hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 417k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 1
class | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f7203c15114df58d6fe296e4cab56a7dfde9a7e6 | 7,137 | py | Python | sdk/python/pulumi_azure_nextgen/resources/v20190601preview/template_spec_version.py | pulumi/pulumi-azure-nextgen | 452736b0a1cf584c2d4c04666e017af6e9b2c15c | [
"Apache-2.0"
] | 31 | 2020-09-21T09:41:01.000Z | 2021-02-26T13:21:59.000Z | sdk/python/pulumi_azure_nextgen/resources/v20190601preview/template_spec_version.py | pulumi/pulumi-azure-nextgen | 452736b0a1cf584c2d4c04666e017af6e9b2c15c | [
"Apache-2.0"
] | 231 | 2020-09-21T09:38:45.000Z | 2021-03-01T11:16:03.000Z | sdk/python/pulumi_azure_nextgen/resources/v20190601preview/template_spec_version.py | pulumi/pulumi-azure-nextgen | 452736b0a1cf584c2d4c04666e017af6e9b2c15c | [
"Apache-2.0"
] | 4 | 2020-09-29T14:14:59.000Z | 2021-02-10T20:38:16.000Z | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
from ._inputs import *
__all__ = ['TemplateSpecVersion']
class TemplateSpecVersion(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
artifacts: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['TemplateSpecTemplateArtifactArgs']]]]] = None,
description: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
template: Optional[Any] = None,
template_spec_name: Optional[pulumi.Input[str]] = None,
template_spec_version: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Template Spec Version object.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['TemplateSpecTemplateArtifactArgs']]]] artifacts: An array of Template Spec artifacts.
:param pulumi.Input[str] description: Template Spec version description.
:param pulumi.Input[str] location: The location of the Template Spec Version. It must match the location of the parent Template Spec.
:param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
:param Any template: The Azure Resource Manager template content.
:param pulumi.Input[str] template_spec_name: Name of the Template Spec.
:param pulumi.Input[str] template_spec_version: The version of the Template Spec.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['artifacts'] = artifacts
__props__['description'] = description
__props__['location'] = location
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['tags'] = tags
__props__['template'] = template
if template_spec_name is None and not opts.urn:
raise TypeError("Missing required property 'template_spec_name'")
__props__['template_spec_name'] = template_spec_name
__props__['template_spec_version'] = template_spec_version
__props__['name'] = None
__props__['system_data'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:resources:TemplateSpecVersion")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(TemplateSpecVersion, __self__).__init__(
'azure-nextgen:resources/v20190601preview:TemplateSpecVersion',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'TemplateSpecVersion':
"""
Get an existing TemplateSpecVersion resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return TemplateSpecVersion(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def artifacts(self) -> pulumi.Output[Optional[Sequence['outputs.TemplateSpecTemplateArtifactResponse']]]:
"""
An array of Template Spec artifacts.
"""
return pulumi.get(self, "artifacts")
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
Template Spec version description.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
The location of the Template Spec Version. It must match the location of the parent Template Spec.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Name of this resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> pulumi.Output['outputs.SystemDataResponse']:
"""
Azure Resource Manager metadata containing createdBy and modifiedBy information.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def template(self) -> pulumi.Output[Optional[Any]]:
"""
The Azure Resource Manager template content.
"""
return pulumi.get(self, "template")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Type of this resource.
"""
return pulumi.get(self, "type")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 41.254335 | 153 | 0.644529 |
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
from ._inputs import *
__all__ = ['TemplateSpecVersion']
class TemplateSpecVersion(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
artifacts: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['TemplateSpecTemplateArtifactArgs']]]]] = None,
description: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
template: Optional[Any] = None,
template_spec_name: Optional[pulumi.Input[str]] = None,
template_spec_version: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['artifacts'] = artifacts
__props__['description'] = description
__props__['location'] = location
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['tags'] = tags
__props__['template'] = template
if template_spec_name is None and not opts.urn:
raise TypeError("Missing required property 'template_spec_name'")
__props__['template_spec_name'] = template_spec_name
__props__['template_spec_version'] = template_spec_version
__props__['name'] = None
__props__['system_data'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:resources:TemplateSpecVersion")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(TemplateSpecVersion, __self__).__init__(
'azure-nextgen:resources/v20190601preview:TemplateSpecVersion',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'TemplateSpecVersion':
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return TemplateSpecVersion(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def artifacts(self) -> pulumi.Output[Optional[Sequence['outputs.TemplateSpecTemplateArtifactResponse']]]:
return pulumi.get(self, "artifacts")
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "description")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
return pulumi.get(self, "name")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> pulumi.Output['outputs.SystemDataResponse']:
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
return pulumi.get(self, "tags")
@property
@pulumi.getter
def template(self) -> pulumi.Output[Optional[Any]]:
return pulumi.get(self, "template")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
return pulumi.get(self, "type")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| true | true |
f7203e25547d18f2b19333cb3e9e2cb96aaaadfa | 14,178 | py | Python | env/lib/python3.6/site-packages/pipenv/vendor/pip9/_vendor/html5lib/serializer.py | anthowen/duplify | 846d01c1b21230937fdf0281b0cf8c0b08a8c24e | [
"MIT"
] | 2 | 2021-10-01T17:23:49.000Z | 2021-10-01T17:26:19.000Z | env/lib/python3.6/site-packages/pipenv/vendor/pip9/_vendor/html5lib/serializer.py | anthowen/duplify | 846d01c1b21230937fdf0281b0cf8c0b08a8c24e | [
"MIT"
] | 1 | 2017-09-15T19:01:09.000Z | 2017-09-15T23:42:43.000Z | env/lib/python3.6/site-packages/pipenv/vendor/pip9/_vendor/html5lib/serializer.py | anthowen/duplify | 846d01c1b21230937fdf0281b0cf8c0b08a8c24e | [
"MIT"
] | 2 | 2018-04-06T05:36:25.000Z | 2018-12-30T22:58:58.000Z | from __future__ import absolute_import, division, unicode_literals
from pip9._vendor.six import text_type
import re
from codecs import register_error, xmlcharrefreplace_errors
from .constants import voidElements, booleanAttributes, spaceCharacters
from .constants import rcdataElements, entities, xmlEntities
from . import treewalkers, _utils
from xml.sax.saxutils import escape
_quoteAttributeSpecChars = "".join(spaceCharacters) + "\"'=<>`"
_quoteAttributeSpec = re.compile("[" + _quoteAttributeSpecChars + "]")
_quoteAttributeLegacy = re.compile("[" + _quoteAttributeSpecChars +
"\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n"
"\x0b\x0c\r\x0e\x0f\x10\x11\x12\x13\x14\x15"
"\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
"\x20\x2f\x60\xa0\u1680\u180e\u180f\u2000"
"\u2001\u2002\u2003\u2004\u2005\u2006\u2007"
"\u2008\u2009\u200a\u2028\u2029\u202f\u205f"
"\u3000]")
_encode_entity_map = {}
_is_ucs4 = len("\U0010FFFF") == 1
for k, v in list(entities.items()):
# skip multi-character entities
if ((_is_ucs4 and len(v) > 1) or
(not _is_ucs4 and len(v) > 2)):
continue
if v != "&":
if len(v) == 2:
v = _utils.surrogatePairToCodepoint(v)
else:
v = ord(v)
if v not in _encode_entity_map or k.islower():
# prefer < over < and similarly for &, >, etc.
_encode_entity_map[v] = k
def htmlentityreplace_errors(exc):
if isinstance(exc, (UnicodeEncodeError, UnicodeTranslateError)):
res = []
codepoints = []
skip = False
for i, c in enumerate(exc.object[exc.start:exc.end]):
if skip:
skip = False
continue
index = i + exc.start
if _utils.isSurrogatePair(exc.object[index:min([exc.end, index + 2])]):
codepoint = _utils.surrogatePairToCodepoint(exc.object[index:index + 2])
skip = True
else:
codepoint = ord(c)
codepoints.append(codepoint)
for cp in codepoints:
e = _encode_entity_map.get(cp)
if e:
res.append("&")
res.append(e)
if not e.endswith(";"):
res.append(";")
else:
res.append("&#x%s;" % (hex(cp)[2:]))
return ("".join(res), exc.end)
else:
return xmlcharrefreplace_errors(exc)
register_error("htmlentityreplace", htmlentityreplace_errors)
def serialize(input, tree="etree", encoding=None, **serializer_opts):
# XXX: Should we cache this?
walker = treewalkers.getTreeWalker(tree)
s = HTMLSerializer(**serializer_opts)
return s.render(walker(input), encoding)
class HTMLSerializer(object):
# attribute quoting options
quote_attr_values = "legacy" # be secure by default
quote_char = '"'
use_best_quote_char = True
# tag syntax options
omit_optional_tags = True
minimize_boolean_attributes = True
use_trailing_solidus = False
space_before_trailing_solidus = True
# escaping options
escape_lt_in_attrs = False
escape_rcdata = False
resolve_entities = True
# miscellaneous options
alphabetical_attributes = False
inject_meta_charset = True
strip_whitespace = False
sanitize = False
options = ("quote_attr_values", "quote_char", "use_best_quote_char",
"omit_optional_tags", "minimize_boolean_attributes",
"use_trailing_solidus", "space_before_trailing_solidus",
"escape_lt_in_attrs", "escape_rcdata", "resolve_entities",
"alphabetical_attributes", "inject_meta_charset",
"strip_whitespace", "sanitize")
def __init__(self, **kwargs):
"""Initialize HTMLSerializer.
Keyword options (default given first unless specified) include:
inject_meta_charset=True|False
Whether it insert a meta element to define the character set of the
document.
quote_attr_values="legacy"|"spec"|"always"
Whether to quote attribute values that don't require quoting
per legacy browser behaviour, when required by the standard, or always.
quote_char=u'"'|u"'"
Use given quote character for attribute quoting. Default is to
use double quote unless attribute value contains a double quote,
in which case single quotes are used instead.
escape_lt_in_attrs=False|True
Whether to escape < in attribute values.
escape_rcdata=False|True
Whether to escape characters that need to be escaped within normal
elements within rcdata elements such as style.
resolve_entities=True|False
Whether to resolve named character entities that appear in the
source tree. The XML predefined entities < > & " '
are unaffected by this setting.
strip_whitespace=False|True
Whether to remove semantically meaningless whitespace. (This
compresses all whitespace to a single space except within pre.)
minimize_boolean_attributes=True|False
Shortens boolean attributes to give just the attribute value,
for example <input disabled="disabled"> becomes <input disabled>.
use_trailing_solidus=False|True
Includes a close-tag slash at the end of the start tag of void
elements (empty elements whose end tag is forbidden). E.g. <hr/>.
space_before_trailing_solidus=True|False
Places a space immediately before the closing slash in a tag
using a trailing solidus. E.g. <hr />. Requires use_trailing_solidus.
sanitize=False|True
Strip all unsafe or unknown constructs from output.
See `html5lib user documentation`_
omit_optional_tags=True|False
Omit start/end tags that are optional.
alphabetical_attributes=False|True
Reorder attributes to be in alphabetical order.
.. _html5lib user documentation: http://code.google.com/p/html5lib/wiki/UserDocumentation
"""
unexpected_args = frozenset(kwargs) - frozenset(self.options)
if len(unexpected_args) > 0:
raise TypeError("__init__() got an unexpected keyword argument '%s'" % next(iter(unexpected_args)))
if 'quote_char' in kwargs:
self.use_best_quote_char = False
for attr in self.options:
setattr(self, attr, kwargs.get(attr, getattr(self, attr)))
self.errors = []
self.strict = False
def encode(self, string):
assert(isinstance(string, text_type))
if self.encoding:
return string.encode(self.encoding, "htmlentityreplace")
else:
return string
def encodeStrict(self, string):
assert(isinstance(string, text_type))
if self.encoding:
return string.encode(self.encoding, "strict")
else:
return string
def serialize(self, treewalker, encoding=None):
# pylint:disable=too-many-nested-blocks
self.encoding = encoding
in_cdata = False
self.errors = []
if encoding and self.inject_meta_charset:
from .filters.inject_meta_charset import Filter
treewalker = Filter(treewalker, encoding)
# Alphabetical attributes is here under the assumption that none of
# the later filters add or change order of attributes; it needs to be
# before the sanitizer so escaped elements come out correctly
if self.alphabetical_attributes:
from .filters.alphabeticalattributes import Filter
treewalker = Filter(treewalker)
# WhitespaceFilter should be used before OptionalTagFilter
# for maximum efficiently of this latter filter
if self.strip_whitespace:
from .filters.whitespace import Filter
treewalker = Filter(treewalker)
if self.sanitize:
from .filters.sanitizer import Filter
treewalker = Filter(treewalker)
if self.omit_optional_tags:
from .filters.optionaltags import Filter
treewalker = Filter(treewalker)
for token in treewalker:
type = token["type"]
if type == "Doctype":
doctype = "<!DOCTYPE %s" % token["name"]
if token["publicId"]:
doctype += ' PUBLIC "%s"' % token["publicId"]
elif token["systemId"]:
doctype += " SYSTEM"
if token["systemId"]:
if token["systemId"].find('"') >= 0:
if token["systemId"].find("'") >= 0:
self.serializeError("System identifer contains both single and double quote characters")
quote_char = "'"
else:
quote_char = '"'
doctype += " %s%s%s" % (quote_char, token["systemId"], quote_char)
doctype += ">"
yield self.encodeStrict(doctype)
elif type in ("Characters", "SpaceCharacters"):
if type == "SpaceCharacters" or in_cdata:
if in_cdata and token["data"].find("</") >= 0:
self.serializeError("Unexpected </ in CDATA")
yield self.encode(token["data"])
else:
yield self.encode(escape(token["data"]))
elif type in ("StartTag", "EmptyTag"):
name = token["name"]
yield self.encodeStrict("<%s" % name)
if name in rcdataElements and not self.escape_rcdata:
in_cdata = True
elif in_cdata:
self.serializeError("Unexpected child element of a CDATA element")
for (_, attr_name), attr_value in token["data"].items():
# TODO: Add namespace support here
k = attr_name
v = attr_value
yield self.encodeStrict(' ')
yield self.encodeStrict(k)
if not self.minimize_boolean_attributes or \
(k not in booleanAttributes.get(name, tuple()) and
k not in booleanAttributes.get("", tuple())):
yield self.encodeStrict("=")
if self.quote_attr_values == "always" or len(v) == 0:
quote_attr = True
elif self.quote_attr_values == "spec":
quote_attr = _quoteAttributeSpec.search(v) is not None
elif self.quote_attr_values == "legacy":
quote_attr = _quoteAttributeLegacy.search(v) is not None
else:
raise ValueError("quote_attr_values must be one of: "
"'always', 'spec', or 'legacy'")
v = v.replace("&", "&")
if self.escape_lt_in_attrs:
v = v.replace("<", "<")
if quote_attr:
quote_char = self.quote_char
if self.use_best_quote_char:
if "'" in v and '"' not in v:
quote_char = '"'
elif '"' in v and "'" not in v:
quote_char = "'"
if quote_char == "'":
v = v.replace("'", "'")
else:
v = v.replace('"', """)
yield self.encodeStrict(quote_char)
yield self.encode(v)
yield self.encodeStrict(quote_char)
else:
yield self.encode(v)
if name in voidElements and self.use_trailing_solidus:
if self.space_before_trailing_solidus:
yield self.encodeStrict(" /")
else:
yield self.encodeStrict("/")
yield self.encode(">")
elif type == "EndTag":
name = token["name"]
if name in rcdataElements:
in_cdata = False
elif in_cdata:
self.serializeError("Unexpected child element of a CDATA element")
yield self.encodeStrict("</%s>" % name)
elif type == "Comment":
data = token["data"]
if data.find("--") >= 0:
self.serializeError("Comment contains --")
yield self.encodeStrict("<!--%s-->" % token["data"])
elif type == "Entity":
name = token["name"]
key = name + ";"
if key not in entities:
self.serializeError("Entity %s not recognized" % name)
if self.resolve_entities and key not in xmlEntities:
data = entities[key]
else:
data = "&%s;" % name
yield self.encodeStrict(data)
else:
self.serializeError(token["data"])
def render(self, treewalker, encoding=None):
if encoding:
return b"".join(list(self.serialize(treewalker, encoding)))
else:
return "".join(list(self.serialize(treewalker)))
def serializeError(self, data="XXX ERROR MESSAGE NEEDED"):
# XXX The idea is to make data mandatory.
self.errors.append(data)
if self.strict:
raise SerializeError
class SerializeError(Exception):
"""Error in serialized tree"""
pass
| 42.322388 | 116 | 0.554662 | from __future__ import absolute_import, division, unicode_literals
from pip9._vendor.six import text_type
import re
from codecs import register_error, xmlcharrefreplace_errors
from .constants import voidElements, booleanAttributes, spaceCharacters
from .constants import rcdataElements, entities, xmlEntities
from . import treewalkers, _utils
from xml.sax.saxutils import escape
_quoteAttributeSpecChars = "".join(spaceCharacters) + "\"'=<>`"
_quoteAttributeSpec = re.compile("[" + _quoteAttributeSpecChars + "]")
_quoteAttributeLegacy = re.compile("[" + _quoteAttributeSpecChars +
"\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n"
"\x0b\x0c\r\x0e\x0f\x10\x11\x12\x13\x14\x15"
"\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
"\x20\x2f\x60\xa0\u1680\u180e\u180f\u2000"
"\u2001\u2002\u2003\u2004\u2005\u2006\u2007"
"\u2008\u2009\u200a\u2028\u2029\u202f\u205f"
"\u3000]")
_encode_entity_map = {}
_is_ucs4 = len("\U0010FFFF") == 1
for k, v in list(entities.items()):
# skip multi-character entities
if ((_is_ucs4 and len(v) > 1) or
(not _is_ucs4 and len(v) > 2)):
continue
if v != "&":
if len(v) == 2:
v = _utils.surrogatePairToCodepoint(v)
else:
v = ord(v)
if v not in _encode_entity_map or k.islower():
# prefer < over < and similarly for &, >, etc.
_encode_entity_map[v] = k
def htmlentityreplace_errors(exc):
if isinstance(exc, (UnicodeEncodeError, UnicodeTranslateError)):
res = []
codepoints = []
skip = False
for i, c in enumerate(exc.object[exc.start:exc.end]):
if skip:
skip = False
continue
index = i + exc.start
if _utils.isSurrogatePair(exc.object[index:min([exc.end, index + 2])]):
codepoint = _utils.surrogatePairToCodepoint(exc.object[index:index + 2])
skip = True
else:
codepoint = ord(c)
codepoints.append(codepoint)
for cp in codepoints:
e = _encode_entity_map.get(cp)
if e:
res.append("&")
res.append(e)
if not e.endswith(";"):
res.append(";")
else:
res.append("&#x%s;" % (hex(cp)[2:]))
return ("".join(res), exc.end)
else:
return xmlcharrefreplace_errors(exc)
register_error("htmlentityreplace", htmlentityreplace_errors)
def serialize(input, tree="etree", encoding=None, **serializer_opts):
# XXX: Should we cache this?
walker = treewalkers.getTreeWalker(tree)
s = HTMLSerializer(**serializer_opts)
return s.render(walker(input), encoding)
class HTMLSerializer(object):
# attribute quoting options
quote_attr_values = "legacy" # be secure by default
quote_char = '"'
use_best_quote_char = True
# tag syntax options
omit_optional_tags = True
minimize_boolean_attributes = True
use_trailing_solidus = False
space_before_trailing_solidus = True
# escaping options
escape_lt_in_attrs = False
escape_rcdata = False
resolve_entities = True
# miscellaneous options
alphabetical_attributes = False
inject_meta_charset = True
strip_whitespace = False
sanitize = False
options = ("quote_attr_values", "quote_char", "use_best_quote_char",
"omit_optional_tags", "minimize_boolean_attributes",
"use_trailing_solidus", "space_before_trailing_solidus",
"escape_lt_in_attrs", "escape_rcdata", "resolve_entities",
"alphabetical_attributes", "inject_meta_charset",
"strip_whitespace", "sanitize")
def __init__(self, **kwargs):
unexpected_args = frozenset(kwargs) - frozenset(self.options)
if len(unexpected_args) > 0:
raise TypeError("__init__() got an unexpected keyword argument '%s'" % next(iter(unexpected_args)))
if 'quote_char' in kwargs:
self.use_best_quote_char = False
for attr in self.options:
setattr(self, attr, kwargs.get(attr, getattr(self, attr)))
self.errors = []
self.strict = False
def encode(self, string):
assert(isinstance(string, text_type))
if self.encoding:
return string.encode(self.encoding, "htmlentityreplace")
else:
return string
def encodeStrict(self, string):
assert(isinstance(string, text_type))
if self.encoding:
return string.encode(self.encoding, "strict")
else:
return string
def serialize(self, treewalker, encoding=None):
# pylint:disable=too-many-nested-blocks
self.encoding = encoding
in_cdata = False
self.errors = []
if encoding and self.inject_meta_charset:
from .filters.inject_meta_charset import Filter
treewalker = Filter(treewalker, encoding)
# Alphabetical attributes is here under the assumption that none of
# the later filters add or change order of attributes; it needs to be
# before the sanitizer so escaped elements come out correctly
if self.alphabetical_attributes:
from .filters.alphabeticalattributes import Filter
treewalker = Filter(treewalker)
# WhitespaceFilter should be used before OptionalTagFilter
# for maximum efficiently of this latter filter
if self.strip_whitespace:
from .filters.whitespace import Filter
treewalker = Filter(treewalker)
if self.sanitize:
from .filters.sanitizer import Filter
treewalker = Filter(treewalker)
if self.omit_optional_tags:
from .filters.optionaltags import Filter
treewalker = Filter(treewalker)
for token in treewalker:
type = token["type"]
if type == "Doctype":
doctype = "<!DOCTYPE %s" % token["name"]
if token["publicId"]:
doctype += ' PUBLIC "%s"' % token["publicId"]
elif token["systemId"]:
doctype += " SYSTEM"
if token["systemId"]:
if token["systemId"].find('"') >= 0:
if token["systemId"].find("'") >= 0:
self.serializeError("System identifer contains both single and double quote characters")
quote_char = "'"
else:
quote_char = '"'
doctype += " %s%s%s" % (quote_char, token["systemId"], quote_char)
doctype += ">"
yield self.encodeStrict(doctype)
elif type in ("Characters", "SpaceCharacters"):
if type == "SpaceCharacters" or in_cdata:
if in_cdata and token["data"].find("</") >= 0:
self.serializeError("Unexpected </ in CDATA")
yield self.encode(token["data"])
else:
yield self.encode(escape(token["data"]))
elif type in ("StartTag", "EmptyTag"):
name = token["name"]
yield self.encodeStrict("<%s" % name)
if name in rcdataElements and not self.escape_rcdata:
in_cdata = True
elif in_cdata:
self.serializeError("Unexpected child element of a CDATA element")
for (_, attr_name), attr_value in token["data"].items():
# TODO: Add namespace support here
k = attr_name
v = attr_value
yield self.encodeStrict(' ')
yield self.encodeStrict(k)
if not self.minimize_boolean_attributes or \
(k not in booleanAttributes.get(name, tuple()) and
k not in booleanAttributes.get("", tuple())):
yield self.encodeStrict("=")
if self.quote_attr_values == "always" or len(v) == 0:
quote_attr = True
elif self.quote_attr_values == "spec":
quote_attr = _quoteAttributeSpec.search(v) is not None
elif self.quote_attr_values == "legacy":
quote_attr = _quoteAttributeLegacy.search(v) is not None
else:
raise ValueError("quote_attr_values must be one of: "
"'always', 'spec', or 'legacy'")
v = v.replace("&", "&")
if self.escape_lt_in_attrs:
v = v.replace("<", "<")
if quote_attr:
quote_char = self.quote_char
if self.use_best_quote_char:
if "'" in v and '"' not in v:
quote_char = '"'
elif '"' in v and "'" not in v:
quote_char = "'"
if quote_char == "'":
v = v.replace("'", "&
else:
v = v.replace('"', """)
yield self.encodeStrict(quote_char)
yield self.encode(v)
yield self.encodeStrict(quote_char)
else:
yield self.encode(v)
if name in voidElements and self.use_trailing_solidus:
if self.space_before_trailing_solidus:
yield self.encodeStrict(" /")
else:
yield self.encodeStrict("/")
yield self.encode(">")
elif type == "EndTag":
name = token["name"]
if name in rcdataElements:
in_cdata = False
elif in_cdata:
self.serializeError("Unexpected child element of a CDATA element")
yield self.encodeStrict("</%s>" % name)
elif type == "Comment":
data = token["data"]
if data.find("--") >= 0:
self.serializeError("Comment contains --")
yield self.encodeStrict("<!--%s-->" % token["data"])
elif type == "Entity":
name = token["name"]
key = name + ";"
if key not in entities:
self.serializeError("Entity %s not recognized" % name)
if self.resolve_entities and key not in xmlEntities:
data = entities[key]
else:
data = "&%s;" % name
yield self.encodeStrict(data)
else:
self.serializeError(token["data"])
def render(self, treewalker, encoding=None):
if encoding:
return b"".join(list(self.serialize(treewalker, encoding)))
else:
return "".join(list(self.serialize(treewalker)))
def serializeError(self, data="XXX ERROR MESSAGE NEEDED"):
self.errors.append(data)
if self.strict:
raise SerializeError
class SerializeError(Exception):
pass
| true | true |
f7203e5539f2d789837ec3db4336641fa5cb95f8 | 24,459 | py | Python | pytorch_transformers/utils_glue.py | nguyenvo09/EACL2021 | 9d04d8954c1ded2110daac23117de11221f08cc6 | [
"MIT"
] | 27 | 2021-01-18T16:03:17.000Z | 2022-03-05T22:38:34.000Z | pytorch_transformers/utils_glue.py | Jason98Xu/GET | 6860c87425619954cacbf5a14ad20befd18ec818 | [
"MIT"
] | null | null | null | pytorch_transformers/utils_glue.py | Jason98Xu/GET | 6860c87425619954cacbf5a14ad20befd18ec818 | [
"MIT"
] | 2 | 2022-03-16T03:22:16.000Z | 2022-03-27T03:12:14.000Z | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" BERT classification fine-tuning: utilities to work with GLUE tasks """
from __future__ import absolute_import, division, print_function
import csv
import logging
import os
import sys
from io import open
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import matthews_corrcoef, f1_score
from .tokenization_utils import PreTrainedTokenizer
logger = logging.getLogger(__name__)
from typing import List
class InputExample(object):
"""A single training/test example for simple sequence classification."""
def __init__(self, guid, text_a, text_b=None, label=None, tokenized_text_a: List[str]=None, tokenized_text_b: List[str]=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, input_ids, input_mask, segment_ids, label_id):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
class DataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
def get_train_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the train set."""
raise NotImplementedError()
def get_dev_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the dev set."""
raise NotImplementedError()
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
@classmethod
def _read_tsv(cls, input_file, quotechar=None):
"""Reads a tab separated value file."""
with open(input_file, "r", encoding="utf-8-sig") as f:
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
lines = []
for line in reader:
if sys.version_info[0] == 2:
line = list(unicode(cell, 'utf-8') for cell in line)
lines.append(line)
return lines
class MrpcProcessor(DataProcessor):
"""Processor for the MRPC data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
logger.info("LOOKING AT {}".format(os.path.join(data_dir, "train.tsv")))
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, i)
text_a = line[3]
text_b = line[4]
label = line[0]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class SearchProcessor(DataProcessor):
"""Processor for the Search data set (BEN version)."""
def get_train_examples(self, data_dir):
"""See base class."""
logger.info("LOOKING AT {}".format(os.path.join(data_dir, "Snopes.train.tsv")))
return self._create_examples(
self._read_tsv2(os.path.join(data_dir, "Snopes.train.tsv")), "train")
def get_dev_examples(self, data_dir, tokenizer: PreTrainedTokenizer=None):
"""See base class."""
return self._create_examples(
self._read_tsv2(os.path.join(data_dir, "Snopes.dev.tsv")), "dev", tokenizer=tokenizer)
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _read_tsv2(cls, input_file, quotechar=None, tokenizer:PreTrainedTokenizer=None):
"""Reads a tab separated value file."""
with open(input_file, "r", encoding="utf-8-sig") as f:
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
lines = []
for line in reader:
if sys.version_info[0] == 2:
line = list(unicode(cell, 'utf-8') for cell in line)
lines.append(line)
return lines
def _create_examples(self, lines, set_type, tokenizer:PreTrainedTokenizer=None):
"""Creates examples for the training and dev sets."""
examples = []
from tqdm import tqdm
for (i, line) in tqdm(enumerate(lines)):
if i == 0:
continue
guid = "%s-%s" % (set_type, i)
text_a = line[1]
# tokenized_text_a = tokenizer.tokenize(text_a)
text_b = line[3]
# tokenized_text_b = tokenizer.tokenize(text_b)
label = line[4]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class MnliProcessor(DataProcessor):
"""Processor for the MultiNLI data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev_matched.tsv")),
"dev_matched")
def get_labels(self):
"""See base class."""
return ["contradiction", "entailment", "neutral"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, line[0])
text_a = line[8]
text_b = line[9]
label = line[-1]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class MnliMismatchedProcessor(MnliProcessor):
"""Processor for the MultiNLI Mismatched data set (GLUE version)."""
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev_mismatched.tsv")),
"dev_matched")
class ColaProcessor(DataProcessor):
"""Processor for the CoLA data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
guid = "%s-%s" % (set_type, i)
text_a = line[3]
label = line[1]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples
class Sst2Processor(DataProcessor):
"""Processor for the SST-2 data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, i)
text_a = line[0]
label = line[1]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples
class StsbProcessor(DataProcessor):
"""Processor for the STS-B data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_labels(self):
"""See base class."""
return [None]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, line[0])
text_a = line[7]
text_b = line[8]
label = line[-1]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class QqpProcessor(DataProcessor):
"""Processor for the QQP data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, line[0])
try:
text_a = line[3]
text_b = line[4]
label = line[5]
except IndexError:
continue
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class QnliProcessor(DataProcessor):
"""Processor for the QNLI data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")),
"dev_matched")
def get_labels(self):
"""See base class."""
return ["entailment", "not_entailment"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, line[0])
text_a = line[1]
text_b = line[2]
label = line[-1]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class RteProcessor(DataProcessor):
"""Processor for the RTE data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_labels(self):
"""See base class."""
return ["entailment", "not_entailment"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, line[0])
text_a = line[1]
text_b = line[2]
label = line[-1]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class WnliProcessor(DataProcessor):
"""Processor for the WNLI data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, line[0])
text_a = line[1]
text_b = line[2]
label = line[-1]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
def convert_examples_to_features(examples, label_list, max_seq_length,
tokenizer, output_mode,
cls_token_at_end=False,
cls_token='[CLS]',
cls_token_segment_id=1,
sep_token='[SEP]',
sep_token_extra=False,
pad_on_left=False,
pad_token=0,
pad_token_segment_id=0,
sequence_a_segment_id=0,
sequence_b_segment_id=1,
mask_padding_with_zero=True,
tokenize_text=True):
""" Loads a data file into a list of `InputBatch`s
`cls_token_at_end` define the location of the CLS token:
- False (Default, BERT/XLM pattern): [CLS] + A + [SEP] + B + [SEP]
- True (XLNet/GPT pattern): A + [SEP] + B + [SEP] + [CLS]
`cls_token_segment_id` define the segment id associated to the CLS token (0 for BERT, 2 for XLNet)
"""
label_map = {label : i for i, label in enumerate(label_list)}
from tqdm import tqdm
features = []
ex_index = -1
for example in tqdm(examples):
ex_index += 1
if ex_index % 10000 == 0:
logger.info("Writing example %d of %d" % (ex_index, len(examples)))
if tokenize_text: tokens_a = tokenizer.tokenize(example.text_a)
else: tokens_a = example.text_a.split()
tokens_b = None
if example.text_b:
if tokenize_text: tokens_b = tokenizer.tokenize(example.text_b)
else: tokens_b = example.text_b.split()
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3". " -4" for RoBERTa.
special_tokens_count = 4 if sep_token_extra else 3
_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - special_tokens_count)
else:
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
special_tokens_count = 3 if sep_token_extra else 2
if len(tokens_a) > max_seq_length - special_tokens_count:
tokens_a = tokens_a[:(max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens = tokens_a + [sep_token]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
segment_ids = [sequence_a_segment_id] * len(tokens)
if tokens_b:
tokens += tokens_b + [sep_token]
segment_ids += [sequence_b_segment_id] * (len(tokens_b) + 1)
if cls_token_at_end:
tokens = tokens + [cls_token]
segment_ids = segment_ids + [cls_token_segment_id]
else:
tokens = [cls_token] + tokens
segment_ids = [cls_token_segment_id] + segment_ids
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
# Zero-pad up to the sequence length.
padding_length = max_seq_length - len(input_ids)
if pad_on_left:
input_ids = ([pad_token] * padding_length) + input_ids
input_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
segment_ids = ([pad_token_segment_id] * padding_length) + segment_ids
else:
input_ids = input_ids + ([pad_token] * padding_length)
input_mask = input_mask + ([0 if mask_padding_with_zero else 1] * padding_length)
segment_ids = segment_ids + ([pad_token_segment_id] * padding_length)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
if output_mode == "classification":
label_id = label_map[example.label]
elif output_mode == "regression":
label_id = float(example.label)
else:
raise KeyError(output_mode)
if ex_index < 5:
logger.info("*** Example ***")
logger.info("guid: %s" % (example.guid))
logger.info("tokens: %s" % " ".join(
[str(x) for x in tokens]))
logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
logger.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
logger.info("segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
logger.info("label: %s (id = %d)" % (example.label, label_id))
features.append(
InputFeatures(input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_id))
return features
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def simple_accuracy(preds, labels):
return (preds == labels).mean()
def acc_and_f1(preds, labels):
acc = simple_accuracy(preds, labels)
f1 = f1_score(y_true=labels, y_pred=preds)
return {
"acc": acc,
"f1": f1,
"acc_and_f1": (acc + f1) / 2,
}
def pearson_and_spearman(preds, labels):
pearson_corr = pearsonr(preds, labels)[0]
spearman_corr = spearmanr(preds, labels)[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def compute_metrics(task_name, preds, labels):
assert len(preds) == len(labels)
if task_name == "cola":
return {"mcc": matthews_corrcoef(labels, preds)}
elif task_name == "sst-2":
return {"acc": simple_accuracy(preds, labels)}
elif task_name == "mrpc":
return acc_and_f1(preds, labels)
elif task_name == "search":
return acc_and_f1(preds, labels)
elif task_name == "sts-b":
return pearson_and_spearman(preds, labels)
elif task_name == "qqp":
return acc_and_f1(preds, labels)
elif task_name == "mnli":
return {"acc": simple_accuracy(preds, labels)}
elif task_name == "mnli-mm":
return {"acc": simple_accuracy(preds, labels)}
elif task_name == "qnli":
return {"acc": simple_accuracy(preds, labels)}
elif task_name == "rte":
return {"acc": simple_accuracy(preds, labels)}
elif task_name == "wnli":
return {"acc": simple_accuracy(preds, labels)}
else:
raise KeyError(task_name)
processors = {
"cola": ColaProcessor,
"mnli": MnliProcessor,
"mnli-mm": MnliMismatchedProcessor,
"mrpc": MrpcProcessor,
"search": SearchProcessor,
"sst-2": Sst2Processor,
"sts-b": StsbProcessor,
"qqp": QqpProcessor,
"qnli": QnliProcessor,
"rte": RteProcessor,
"wnli": WnliProcessor,
}
output_modes = {
"cola": "classification",
"mnli": "classification",
"mnli-mm": "classification",
"mrpc": "classification",
"search": "classification",
"sst-2": "classification",
"sts-b": "regression",
"qqp": "classification",
"qnli": "classification",
"rte": "classification",
"wnli": "classification",
}
GLUE_TASKS_NUM_LABELS = {
"cola": 2,
"mnli": 3,
"mrpc": 2,
"sst-2": 2,
"sts-b": 1,
"qqp": 2,
"qnli": 2,
"rte": 2,
"wnli": 2,
}
| 36.343239 | 130 | 0.58874 |
from __future__ import absolute_import, division, print_function
import csv
import logging
import os
import sys
from io import open
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import matthews_corrcoef, f1_score
from .tokenization_utils import PreTrainedTokenizer
logger = logging.getLogger(__name__)
from typing import List
class InputExample(object):
def __init__(self, guid, text_a, text_b=None, label=None, tokenized_text_a: List[str]=None, tokenized_text_b: List[str]=None):
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
class InputFeatures(object):
def __init__(self, input_ids, input_mask, segment_ids, label_id):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
class DataProcessor(object):
def get_train_examples(self, data_dir):
raise NotImplementedError()
def get_dev_examples(self, data_dir):
raise NotImplementedError()
def get_labels(self):
raise NotImplementedError()
@classmethod
def _read_tsv(cls, input_file, quotechar=None):
with open(input_file, "r", encoding="utf-8-sig") as f:
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
lines = []
for line in reader:
if sys.version_info[0] == 2:
line = list(unicode(cell, 'utf-8') for cell in line)
lines.append(line)
return lines
class MrpcProcessor(DataProcessor):
def get_train_examples(self, data_dir):
logger.info("LOOKING AT {}".format(os.path.join(data_dir, "train.tsv")))
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_labels(self):
return ["0", "1"]
def _create_examples(self, lines, set_type):
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, i)
text_a = line[3]
text_b = line[4]
label = line[0]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class SearchProcessor(DataProcessor):
def get_train_examples(self, data_dir):
logger.info("LOOKING AT {}".format(os.path.join(data_dir, "Snopes.train.tsv")))
return self._create_examples(
self._read_tsv2(os.path.join(data_dir, "Snopes.train.tsv")), "train")
def get_dev_examples(self, data_dir, tokenizer: PreTrainedTokenizer=None):
return self._create_examples(
self._read_tsv2(os.path.join(data_dir, "Snopes.dev.tsv")), "dev", tokenizer=tokenizer)
def get_labels(self):
return ["0", "1"]
def _read_tsv2(cls, input_file, quotechar=None, tokenizer:PreTrainedTokenizer=None):
with open(input_file, "r", encoding="utf-8-sig") as f:
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
lines = []
for line in reader:
if sys.version_info[0] == 2:
line = list(unicode(cell, 'utf-8') for cell in line)
lines.append(line)
return lines
def _create_examples(self, lines, set_type, tokenizer:PreTrainedTokenizer=None):
examples = []
from tqdm import tqdm
for (i, line) in tqdm(enumerate(lines)):
if i == 0:
continue
guid = "%s-%s" % (set_type, i)
text_a = line[1]
text_b = line[3]
label = line[4]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class MnliProcessor(DataProcessor):
def get_train_examples(self, data_dir):
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev_matched.tsv")),
"dev_matched")
def get_labels(self):
return ["contradiction", "entailment", "neutral"]
def _create_examples(self, lines, set_type):
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, line[0])
text_a = line[8]
text_b = line[9]
label = line[-1]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class MnliMismatchedProcessor(MnliProcessor):
def get_dev_examples(self, data_dir):
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev_mismatched.tsv")),
"dev_matched")
class ColaProcessor(DataProcessor):
def get_train_examples(self, data_dir):
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_labels(self):
return ["0", "1"]
def _create_examples(self, lines, set_type):
examples = []
for (i, line) in enumerate(lines):
guid = "%s-%s" % (set_type, i)
text_a = line[3]
label = line[1]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples
class Sst2Processor(DataProcessor):
def get_train_examples(self, data_dir):
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_labels(self):
return ["0", "1"]
def _create_examples(self, lines, set_type):
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, i)
text_a = line[0]
label = line[1]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples
class StsbProcessor(DataProcessor):
def get_train_examples(self, data_dir):
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_labels(self):
return [None]
def _create_examples(self, lines, set_type):
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, line[0])
text_a = line[7]
text_b = line[8]
label = line[-1]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class QqpProcessor(DataProcessor):
def get_train_examples(self, data_dir):
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_labels(self):
return ["0", "1"]
def _create_examples(self, lines, set_type):
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, line[0])
try:
text_a = line[3]
text_b = line[4]
label = line[5]
except IndexError:
continue
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class QnliProcessor(DataProcessor):
def get_train_examples(self, data_dir):
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")),
"dev_matched")
def get_labels(self):
return ["entailment", "not_entailment"]
def _create_examples(self, lines, set_type):
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, line[0])
text_a = line[1]
text_b = line[2]
label = line[-1]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class RteProcessor(DataProcessor):
def get_train_examples(self, data_dir):
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_labels(self):
return ["entailment", "not_entailment"]
def _create_examples(self, lines, set_type):
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, line[0])
text_a = line[1]
text_b = line[2]
label = line[-1]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class WnliProcessor(DataProcessor):
def get_train_examples(self, data_dir):
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_labels(self):
return ["0", "1"]
def _create_examples(self, lines, set_type):
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, line[0])
text_a = line[1]
text_b = line[2]
label = line[-1]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
def convert_examples_to_features(examples, label_list, max_seq_length,
tokenizer, output_mode,
cls_token_at_end=False,
cls_token='[CLS]',
cls_token_segment_id=1,
sep_token='[SEP]',
sep_token_extra=False,
pad_on_left=False,
pad_token=0,
pad_token_segment_id=0,
sequence_a_segment_id=0,
sequence_b_segment_id=1,
mask_padding_with_zero=True,
tokenize_text=True):
label_map = {label : i for i, label in enumerate(label_list)}
from tqdm import tqdm
features = []
ex_index = -1
for example in tqdm(examples):
ex_index += 1
if ex_index % 10000 == 0:
logger.info("Writing example %d of %d" % (ex_index, len(examples)))
if tokenize_text: tokens_a = tokenizer.tokenize(example.text_a)
else: tokens_a = example.text_a.split()
tokens_b = None
if example.text_b:
if tokenize_text: tokens_b = tokenizer.tokenize(example.text_b)
else: tokens_b = example.text_b.split()
special_tokens_count = 4 if sep_token_extra else 3
_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - special_tokens_count)
else:
special_tokens_count = 3 if sep_token_extra else 2
if len(tokens_a) > max_seq_length - special_tokens_count:
tokens_a = tokens_a[:(max_seq_length - special_tokens_count)]
okens_a + [sep_token]
if sep_token_extra:
tokens += [sep_token]
segment_ids = [sequence_a_segment_id] * len(tokens)
if tokens_b:
tokens += tokens_b + [sep_token]
segment_ids += [sequence_b_segment_id] * (len(tokens_b) + 1)
if cls_token_at_end:
tokens = tokens + [cls_token]
segment_ids = segment_ids + [cls_token_segment_id]
else:
tokens = [cls_token] + tokens
segment_ids = [cls_token_segment_id] + segment_ids
input_ids = tokenizer.convert_tokens_to_ids(tokens)
input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
padding_length = max_seq_length - len(input_ids)
if pad_on_left:
input_ids = ([pad_token] * padding_length) + input_ids
input_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
segment_ids = ([pad_token_segment_id] * padding_length) + segment_ids
else:
input_ids = input_ids + ([pad_token] * padding_length)
input_mask = input_mask + ([0 if mask_padding_with_zero else 1] * padding_length)
segment_ids = segment_ids + ([pad_token_segment_id] * padding_length)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
if output_mode == "classification":
label_id = label_map[example.label]
elif output_mode == "regression":
label_id = float(example.label)
else:
raise KeyError(output_mode)
if ex_index < 5:
logger.info("*** Example ***")
logger.info("guid: %s" % (example.guid))
logger.info("tokens: %s" % " ".join(
[str(x) for x in tokens]))
logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
logger.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
logger.info("segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
logger.info("label: %s (id = %d)" % (example.label, label_id))
features.append(
InputFeatures(input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_id))
return features
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def simple_accuracy(preds, labels):
return (preds == labels).mean()
def acc_and_f1(preds, labels):
acc = simple_accuracy(preds, labels)
f1 = f1_score(y_true=labels, y_pred=preds)
return {
"acc": acc,
"f1": f1,
"acc_and_f1": (acc + f1) / 2,
}
def pearson_and_spearman(preds, labels):
pearson_corr = pearsonr(preds, labels)[0]
spearman_corr = spearmanr(preds, labels)[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def compute_metrics(task_name, preds, labels):
assert len(preds) == len(labels)
if task_name == "cola":
return {"mcc": matthews_corrcoef(labels, preds)}
elif task_name == "sst-2":
return {"acc": simple_accuracy(preds, labels)}
elif task_name == "mrpc":
return acc_and_f1(preds, labels)
elif task_name == "search":
return acc_and_f1(preds, labels)
elif task_name == "sts-b":
return pearson_and_spearman(preds, labels)
elif task_name == "qqp":
return acc_and_f1(preds, labels)
elif task_name == "mnli":
return {"acc": simple_accuracy(preds, labels)}
elif task_name == "mnli-mm":
return {"acc": simple_accuracy(preds, labels)}
elif task_name == "qnli":
return {"acc": simple_accuracy(preds, labels)}
elif task_name == "rte":
return {"acc": simple_accuracy(preds, labels)}
elif task_name == "wnli":
return {"acc": simple_accuracy(preds, labels)}
else:
raise KeyError(task_name)
processors = {
"cola": ColaProcessor,
"mnli": MnliProcessor,
"mnli-mm": MnliMismatchedProcessor,
"mrpc": MrpcProcessor,
"search": SearchProcessor,
"sst-2": Sst2Processor,
"sts-b": StsbProcessor,
"qqp": QqpProcessor,
"qnli": QnliProcessor,
"rte": RteProcessor,
"wnli": WnliProcessor,
}
output_modes = {
"cola": "classification",
"mnli": "classification",
"mnli-mm": "classification",
"mrpc": "classification",
"search": "classification",
"sst-2": "classification",
"sts-b": "regression",
"qqp": "classification",
"qnli": "classification",
"rte": "classification",
"wnli": "classification",
}
GLUE_TASKS_NUM_LABELS = {
"cola": 2,
"mnli": 3,
"mrpc": 2,
"sst-2": 2,
"sts-b": 1,
"qqp": 2,
"qnli": 2,
"rte": 2,
"wnli": 2,
}
| true | true |
f7203f7c40d84ae4799af332cef766f88462c378 | 6,000 | py | Python | modules/aim_server/files/handler.py | mshuler/infrastructure-puppet | bb054d08e89f9bf4b804a7a453f02ae722519d0a | [
"Apache-2.0"
] | 1 | 2019-06-09T10:25:04.000Z | 2019-06-09T10:25:04.000Z | modules/aim_server/files/handler.py | mshuler/infrastructure-puppet | bb054d08e89f9bf4b804a7a453f02ae722519d0a | [
"Apache-2.0"
] | 1 | 2020-05-08T07:07:43.000Z | 2020-05-08T07:07:43.000Z | modules/aim_server/files/handler.py | mshuler/infrastructure-puppet | bb054d08e89f9bf4b804a7a453f02ae722519d0a | [
"Apache-2.0"
] | 1 | 2018-07-09T08:44:40.000Z | 2018-07-09T08:44:40.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
This is the main WSGI handler file for AIM.
It compiles a list of valid URLs from the 'pages' library folder,
and if a URL matches it runs the specific submodule's run() function. It
also handles CGI parsing and exceptions in the applications.
"""
# Main imports
import cgi
import re
import sys
import traceback
import yaml
import json
import plugins.session
import plugins.database
import plugins.openapi
# Compile valid API URLs from the pages library
urls = []
if __name__ != '__main__':
import pages
for page in pages.handlers:
urls.append((r"^(/api/%s)(/.+)?$" % page, pages.handlers[page].run))
# Load Aim master configuration
config = yaml.load(open("yaml/aim.yaml"))
# Instantiate database connections
DB = plugins.database.AimAPIDatabase(config)
# Load Open API specifications
AimAPIOpenAPI = plugins.openapi.OpenAPI("yaml/openapi.yaml", ignore_extras = True)
class AimAPIHTTPError(Exception):
def __init__(self, code, message):
self.code = code
self.message = message
class AimAPIWrapper:
"""
Middleware wrapper for exceptions in the application
"""
def __init__(self, path, func):
self.func = func
self.API = AimAPIOpenAPI
self.path = path
self.exception = AimAPIHTTPError
def __call__(self, environ, start_response, session):
"""Run the function, return response OR return stacktrace"""
response = None
try:
# Read JSON client data if any
try:
request_size = int(environ.get('CONTENT_LENGTH', 0))
except (ValueError):
request_size = 0
requestBody = environ['wsgi.input'].read(request_size)
formdata = {}
if requestBody and len(requestBody) > 0:
try:
formdata = json.loads(requestBody.decode('utf-8'))
except json.JSONDecodeError as err:
start_response('400 Invalid request', [
('Content-Type', 'application/json')])
yield json.dumps({
"code": 400,
"reason": "Invalid JSON: %s" % err
})
return
# Validate URL against OpenAPI specs
try:
self.API.validate(environ['REQUEST_METHOD'], self.path, formdata)
except plugins.openapi.OpenAPIException as err:
start_response('400 Invalid request', [
('Content-Type', 'application/json')])
yield json.dumps({
"code": 400,
"reason": err.message
})
return
# Call page with env, SR and form data
try:
response = self.func(self, environ, formdata, session)
if response:
for bucket in response:
yield bucket
except AimAPIHTTPError as err:
errHeaders = {
403: '403 Authentication failed',
404: '404 Resource not found',
500: '500 Internal Server Error',
501: '501 Gateway error'
}
errHeader = errHeaders[err.code] if err.code in errHeaders else "400 Bad request"
start_response(errHeader, [
('Content-Type', 'application/json')])
yield json.dumps({
"code": err.code,
"reason": err.message
}, indent = 4) + "\n"
return
except:
err_type, err_value, tb = sys.exc_info()
traceback_output = ['API traceback:']
traceback_output += traceback.format_tb(tb)
traceback_output.append('%s: %s' % (err_type.__name__, err_value))
# We don't know if response has been given yet, try giving one, fail gracefully.
try:
start_response('500 Internal Server Error', [
('Content-Type', 'application/json')])
except:
pass
yield json.dumps({
"code": "500",
"reason": '\n'.join(traceback_output)
})
def fourohfour(environ, start_response):
"""A very simple 404 handler"""
start_response("404 Not Found", [
('Content-Type', 'application/json')])
yield json.dumps({
"code": 404,
"reason": "API endpoint not found"
}, indent = 4) + "\n"
return
def application(environ, start_response):
"""
This is the main handler. Every API call goes through here.
Checks against the pages library, and if submod found, runs
it and returns the output.
"""
path = environ.get('PATH_INFO', '')
for regex, function in urls:
m = re.match(regex, path)
if m:
callback = AimAPIWrapper(path, function)
session = plugins.session.AimAPISession(DB, environ, config)
a = 0
for bucket in callback(environ, start_response, session):
if a == 0:
session.headers.append(bucket)
try:
start_response("200 Okay", session.headers)
except:
pass
a += 1
# WSGI prefers byte strings, so convert if regular py3 string
if isinstance(bucket, str):
yield bytes(bucket, encoding = 'utf-8')
elif isinstance(bucket, bytes):
yield bucket
return
for bucket in fourohfour(environ, start_response):
yield bytes(bucket, encoding = 'utf-8')
if __name__ == '__main__':
AimAPIOpenAPI.toHTML()
| 34.090909 | 97 | 0.534 |
import cgi
import re
import sys
import traceback
import yaml
import json
import plugins.session
import plugins.database
import plugins.openapi
urls = []
if __name__ != '__main__':
import pages
for page in pages.handlers:
urls.append((r"^(/api/%s)(/.+)?$" % page, pages.handlers[page].run))
config = yaml.load(open("yaml/aim.yaml"))
DB = plugins.database.AimAPIDatabase(config)
AimAPIOpenAPI = plugins.openapi.OpenAPI("yaml/openapi.yaml", ignore_extras = True)
class AimAPIHTTPError(Exception):
def __init__(self, code, message):
self.code = code
self.message = message
class AimAPIWrapper:
def __init__(self, path, func):
self.func = func
self.API = AimAPIOpenAPI
self.path = path
self.exception = AimAPIHTTPError
def __call__(self, environ, start_response, session):
response = None
try:
try:
request_size = int(environ.get('CONTENT_LENGTH', 0))
except (ValueError):
request_size = 0
requestBody = environ['wsgi.input'].read(request_size)
formdata = {}
if requestBody and len(requestBody) > 0:
try:
formdata = json.loads(requestBody.decode('utf-8'))
except json.JSONDecodeError as err:
start_response('400 Invalid request', [
('Content-Type', 'application/json')])
yield json.dumps({
"code": 400,
"reason": "Invalid JSON: %s" % err
})
return
try:
self.API.validate(environ['REQUEST_METHOD'], self.path, formdata)
except plugins.openapi.OpenAPIException as err:
start_response('400 Invalid request', [
('Content-Type', 'application/json')])
yield json.dumps({
"code": 400,
"reason": err.message
})
return
try:
response = self.func(self, environ, formdata, session)
if response:
for bucket in response:
yield bucket
except AimAPIHTTPError as err:
errHeaders = {
403: '403 Authentication failed',
404: '404 Resource not found',
500: '500 Internal Server Error',
501: '501 Gateway error'
}
errHeader = errHeaders[err.code] if err.code in errHeaders else "400 Bad request"
start_response(errHeader, [
('Content-Type', 'application/json')])
yield json.dumps({
"code": err.code,
"reason": err.message
}, indent = 4) + "\n"
return
except:
err_type, err_value, tb = sys.exc_info()
traceback_output = ['API traceback:']
traceback_output += traceback.format_tb(tb)
traceback_output.append('%s: %s' % (err_type.__name__, err_value))
try:
start_response('500 Internal Server Error', [
('Content-Type', 'application/json')])
except:
pass
yield json.dumps({
"code": "500",
"reason": '\n'.join(traceback_output)
})
def fourohfour(environ, start_response):
start_response("404 Not Found", [
('Content-Type', 'application/json')])
yield json.dumps({
"code": 404,
"reason": "API endpoint not found"
}, indent = 4) + "\n"
return
def application(environ, start_response):
path = environ.get('PATH_INFO', '')
for regex, function in urls:
m = re.match(regex, path)
if m:
callback = AimAPIWrapper(path, function)
session = plugins.session.AimAPISession(DB, environ, config)
a = 0
for bucket in callback(environ, start_response, session):
if a == 0:
session.headers.append(bucket)
try:
start_response("200 Okay", session.headers)
except:
pass
a += 1
# WSGI prefers byte strings, so convert if regular py3 string
if isinstance(bucket, str):
yield bytes(bucket, encoding = 'utf-8')
elif isinstance(bucket, bytes):
yield bucket
return
for bucket in fourohfour(environ, start_response):
yield bytes(bucket, encoding = 'utf-8')
if __name__ == '__main__':
AimAPIOpenAPI.toHTML()
| true | true |
f72040ff1201f3604a63a0df3a8ef880ea9ea363 | 5,366 | py | Python | paasta_tools/mesos/mesos_file.py | sobolevn/paasta | 8b87e0b13816c09b3d063b6d3271e6c7627fd264 | [
"Apache-2.0"
] | 1,711 | 2015-11-10T18:04:56.000Z | 2022-03-23T08:53:16.000Z | paasta_tools/mesos/mesos_file.py | sobolevn/paasta | 8b87e0b13816c09b3d063b6d3271e6c7627fd264 | [
"Apache-2.0"
] | 1,689 | 2015-11-10T17:59:04.000Z | 2022-03-31T20:46:46.000Z | paasta_tools/mesos/mesos_file.py | sobolevn/paasta | 8b87e0b13816c09b3d063b6d3271e6c7627fd264 | [
"Apache-2.0"
] | 267 | 2015-11-10T19:17:16.000Z | 2022-02-08T20:59:52.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from . import exceptions
from paasta_tools.async_utils import async_ttl_cache
class File:
chunk_size = 1024
def __init__(self, host, task=None, path=None):
self.host = host
self.task = task
self.path = path
if self.task is None:
self._host_path = self.path
else:
self._host_path = None # Defer until later (_fetch) so we don't make HTTP requests in __init__.
self._offset = 0
# Used during fetch, class level so the dict isn't constantly alloc'd
self._params = {
"path": self._host_path,
"offset": -1,
"length": self.chunk_size,
}
def __eq__(self, y):
return self.key() == y.key()
def __hash__(self):
return hash(self.__str__())
def __repr__(self):
return f"<open file '{self.path}', for '{self._where}'>"
def __str__(self):
return f"{self._where}:{self.path}"
def key(self):
return "{}:{}".format(self.host.key(), self._host_path)
@property
def _where(self):
return self.task["id"] if self.task is not None else self.host.key()
async def _fetch(self):
# fill in path if it wasn't set in __init__
if self._params["path"] is None:
self._params["path"] = os.path.join(await self.task.directory(), self.path)
resp = await self.host.fetch("/files/read.json", params=self._params)
if resp.status == 404:
raise exceptions.FileDoesNotExist("No such file or directory.")
return await resp.json()
async def exists(self):
try:
await self.size()
return True
except exceptions.FileDoesNotExist:
return False
except exceptions.SlaveDoesNotExist:
return False
# When reading a file, it is common to first check whether it exists, then
# look at the size to determine where to seek. Instead of requiring
# multiple requests to the slave, the size is cached for a very short
# period of time.
@async_ttl_cache(ttl=0.5, cleanup_self=True)
async def size(self):
return (await self._fetch())["offset"]
async def seek(self, offset, whence=os.SEEK_SET):
if whence == os.SEEK_SET:
self._offset = 0 + offset
elif whence == os.SEEK_CUR:
self._offset += offset
elif whence == os.SEEK_END:
self._offset = await self.size() + offset
def tell(self):
return self._offset
def _length(self, start, size):
if size and self.tell() - start + self.chunk_size > size:
return size - (self.tell() - start)
return self.chunk_size
async def _get_chunk(self, loc, size=None):
if size is None:
size = self.chunk_size
await self.seek(loc, os.SEEK_SET)
self._params["offset"] = loc
self._params["length"] = size
data = (await self._fetch())["data"]
await self.seek(len(data), os.SEEK_CUR)
return data
async def _read(self, size=None):
start = self.tell()
def pre(x):
return x == ""
def post(x):
return size and (self.tell() - start) >= size
blob = None
while blob != "" and not (size and (self.tell() - start) >= size):
blob = await self._get_chunk(self.tell(), size=self._length(start, size))
yield blob
async def _read_reverse(self, size=None):
fsize = await self.size()
if not size:
size = fsize
def next_block():
current = fsize
while (current - self.chunk_size) > (fsize - size):
current -= self.chunk_size
yield current
for pos in next_block():
yield await self._get_chunk(pos)
yield await self._get_chunk(fsize - size, size % self.chunk_size)
async def _readlines(self, size=None):
last = ""
async for blob in self._read(size):
# This is not streaming and assumes small chunk sizes
blob_lines = (last + blob).split("\n")
for line in blob_lines[: len(blob_lines) - 1]:
yield line
last = blob_lines[-1]
async def _readlines_reverse(self, size=None):
buf = ""
async for blob in self._read_reverse(size):
blob_lines = (blob + buf).split("\n")
for line in reversed(blob_lines[1:]):
yield line
buf = blob_lines[0]
yield buf
| 31.564706 | 108 | 0.602124 |
import os
from . import exceptions
from paasta_tools.async_utils import async_ttl_cache
class File:
chunk_size = 1024
def __init__(self, host, task=None, path=None):
self.host = host
self.task = task
self.path = path
if self.task is None:
self._host_path = self.path
else:
self._host_path = None
self._offset = 0
# Used during fetch, class level so the dict isn't constantly alloc'd
self._params = {
"path": self._host_path,
"offset": -1,
"length": self.chunk_size,
}
def __eq__(self, y):
return self.key() == y.key()
def __hash__(self):
return hash(self.__str__())
def __repr__(self):
return f"<open file '{self.path}', for '{self._where}'>"
def __str__(self):
return f"{self._where}:{self.path}"
def key(self):
return "{}:{}".format(self.host.key(), self._host_path)
@property
def _where(self):
return self.task["id"] if self.task is not None else self.host.key()
async def _fetch(self):
# fill in path if it wasn't set in __init__
if self._params["path"] is None:
self._params["path"] = os.path.join(await self.task.directory(), self.path)
resp = await self.host.fetch("/files/read.json", params=self._params)
if resp.status == 404:
raise exceptions.FileDoesNotExist("No such file or directory.")
return await resp.json()
async def exists(self):
try:
await self.size()
return True
except exceptions.FileDoesNotExist:
return False
except exceptions.SlaveDoesNotExist:
return False
@async_ttl_cache(ttl=0.5, cleanup_self=True)
async def size(self):
return (await self._fetch())["offset"]
async def seek(self, offset, whence=os.SEEK_SET):
if whence == os.SEEK_SET:
self._offset = 0 + offset
elif whence == os.SEEK_CUR:
self._offset += offset
elif whence == os.SEEK_END:
self._offset = await self.size() + offset
def tell(self):
return self._offset
def _length(self, start, size):
if size and self.tell() - start + self.chunk_size > size:
return size - (self.tell() - start)
return self.chunk_size
async def _get_chunk(self, loc, size=None):
if size is None:
size = self.chunk_size
await self.seek(loc, os.SEEK_SET)
self._params["offset"] = loc
self._params["length"] = size
data = (await self._fetch())["data"]
await self.seek(len(data), os.SEEK_CUR)
return data
async def _read(self, size=None):
start = self.tell()
def pre(x):
return x == ""
def post(x):
return size and (self.tell() - start) >= size
blob = None
while blob != "" and not (size and (self.tell() - start) >= size):
blob = await self._get_chunk(self.tell(), size=self._length(start, size))
yield blob
async def _read_reverse(self, size=None):
fsize = await self.size()
if not size:
size = fsize
def next_block():
current = fsize
while (current - self.chunk_size) > (fsize - size):
current -= self.chunk_size
yield current
for pos in next_block():
yield await self._get_chunk(pos)
yield await self._get_chunk(fsize - size, size % self.chunk_size)
async def _readlines(self, size=None):
last = ""
async for blob in self._read(size):
blob_lines = (last + blob).split("\n")
for line in blob_lines[: len(blob_lines) - 1]:
yield line
last = blob_lines[-1]
async def _readlines_reverse(self, size=None):
buf = ""
async for blob in self._read_reverse(size):
blob_lines = (blob + buf).split("\n")
for line in reversed(blob_lines[1:]):
yield line
buf = blob_lines[0]
yield buf
| true | true |
f720411ef892790745b4b66daf03db60907dd920 | 1,203 | py | Python | techk/apps/rest/views.py | felipesantander/fullstack-challenge | d2a6ff1e518199a34eb9d095275adb9cbd8a1cc2 | [
"MIT"
] | null | null | null | techk/apps/rest/views.py | felipesantander/fullstack-challenge | d2a6ff1e518199a34eb9d095275adb9cbd8a1cc2 | [
"MIT"
] | null | null | null | techk/apps/rest/views.py | felipesantander/fullstack-challenge | d2a6ff1e518199a34eb9d095275adb9cbd8a1cc2 | [
"MIT"
] | null | null | null | from django.shortcuts import render
from rest_framework import serializers, viewsets, generics
from apps.scraper.models import Libros, Categorias
from django.http import JsonResponse
# Create your views here.
class libros_serializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Libros
fields = ['id', 'categoria_id', 'titulo', 'miniatura_url', 'precio',
'cantidad', 'libro_descripcion', 'upc']
class categorias_serializer(serializers.HyperlinkedModelSerializer):
libros = serializers.PrimaryKeyRelatedField(many=True, read_only=True)
class Meta:
model = Categorias
fields = ['id', 'nombre_categoria','url','libros']
class libros_view_set(viewsets.ModelViewSet):
queryset = Libros.objects.all()
serializer_class = libros_serializer
class libros_de_categoria(generics.ListAPIView):
serializer_class = libros_serializer
def get_queryset(self):
id_categoria = self.kwargs['categoria']
return Libros.objects.filter(categoria=id_categoria)
class Categorias_view_set(viewsets.ModelViewSet):
queryset = Categorias.objects.all()
serializer_class = categorias_serializer
| 32.513514 | 76 | 0.736492 | from django.shortcuts import render
from rest_framework import serializers, viewsets, generics
from apps.scraper.models import Libros, Categorias
from django.http import JsonResponse
class libros_serializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Libros
fields = ['id', 'categoria_id', 'titulo', 'miniatura_url', 'precio',
'cantidad', 'libro_descripcion', 'upc']
class categorias_serializer(serializers.HyperlinkedModelSerializer):
libros = serializers.PrimaryKeyRelatedField(many=True, read_only=True)
class Meta:
model = Categorias
fields = ['id', 'nombre_categoria','url','libros']
class libros_view_set(viewsets.ModelViewSet):
queryset = Libros.objects.all()
serializer_class = libros_serializer
class libros_de_categoria(generics.ListAPIView):
serializer_class = libros_serializer
def get_queryset(self):
id_categoria = self.kwargs['categoria']
return Libros.objects.filter(categoria=id_categoria)
class Categorias_view_set(viewsets.ModelViewSet):
queryset = Categorias.objects.all()
serializer_class = categorias_serializer
| true | true |
f7204141db43a3754031bc175c87876a2d7df3e5 | 34,936 | py | Python | mmdet/models/dense_heads/reppoints_head.py | Dopamine0717/mmdetection | 40a6fddae20978de98a335cbb45e227db782f72b | [
"Apache-2.0"
] | 20,190 | 2018-09-10T01:11:53.000Z | 2022-03-31T22:31:33.000Z | mmdet/models/dense_heads/reppoints_head.py | Joker-co/mmdet_pro | 96abfd90cf0e38c5ce398795f949e9328eb85c1b | [
"Apache-2.0"
] | 6,736 | 2018-09-17T09:45:51.000Z | 2022-03-31T22:54:10.000Z | mmdet/models/dense_heads/reppoints_head.py | Joker-co/mmdet_pro | 96abfd90cf0e38c5ce398795f949e9328eb85c1b | [
"Apache-2.0"
] | 7,837 | 2018-09-11T02:58:23.000Z | 2022-03-31T22:31:38.000Z | # Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import torch
import torch.nn as nn
from mmcv.cnn import ConvModule
from mmcv.ops import DeformConv2d
from mmdet.core import (build_assigner, build_sampler, images_to_levels,
multi_apply, unmap)
from mmdet.core.anchor.point_generator import MlvlPointGenerator
from mmdet.core.utils import filter_scores_and_topk
from ..builder import HEADS, build_loss
from .anchor_free_head import AnchorFreeHead
@HEADS.register_module()
class RepPointsHead(AnchorFreeHead):
"""RepPoint head.
Args:
point_feat_channels (int): Number of channels of points features.
gradient_mul (float): The multiplier to gradients from
points refinement and recognition.
point_strides (Iterable): points strides.
point_base_scale (int): bbox scale for assigning labels.
loss_cls (dict): Config of classification loss.
loss_bbox_init (dict): Config of initial points loss.
loss_bbox_refine (dict): Config of points loss in refinement.
use_grid_points (bool): If we use bounding box representation, the
reppoints is represented as grid points on the bounding box.
center_init (bool): Whether to use center point assignment.
transform_method (str): The methods to transform RepPoints to bbox.
init_cfg (dict or list[dict], optional): Initialization config dict.
""" # noqa: W605
def __init__(self,
num_classes,
in_channels,
point_feat_channels=256,
num_points=9,
gradient_mul=0.1,
point_strides=[8, 16, 32, 64, 128],
point_base_scale=4,
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox_init=dict(
type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=0.5),
loss_bbox_refine=dict(
type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0),
use_grid_points=False,
center_init=True,
transform_method='moment',
moment_mul=0.01,
init_cfg=dict(
type='Normal',
layer='Conv2d',
std=0.01,
override=dict(
type='Normal',
name='reppoints_cls_out',
std=0.01,
bias_prob=0.01)),
**kwargs):
self.num_points = num_points
self.point_feat_channels = point_feat_channels
self.use_grid_points = use_grid_points
self.center_init = center_init
# we use deform conv to extract points features
self.dcn_kernel = int(np.sqrt(num_points))
self.dcn_pad = int((self.dcn_kernel - 1) / 2)
assert self.dcn_kernel * self.dcn_kernel == num_points, \
'The points number should be a square number.'
assert self.dcn_kernel % 2 == 1, \
'The points number should be an odd square number.'
dcn_base = np.arange(-self.dcn_pad,
self.dcn_pad + 1).astype(np.float64)
dcn_base_y = np.repeat(dcn_base, self.dcn_kernel)
dcn_base_x = np.tile(dcn_base, self.dcn_kernel)
dcn_base_offset = np.stack([dcn_base_y, dcn_base_x], axis=1).reshape(
(-1))
self.dcn_base_offset = torch.tensor(dcn_base_offset).view(1, -1, 1, 1)
super().__init__(
num_classes,
in_channels,
loss_cls=loss_cls,
init_cfg=init_cfg,
**kwargs)
self.gradient_mul = gradient_mul
self.point_base_scale = point_base_scale
self.point_strides = point_strides
self.prior_generator = MlvlPointGenerator(
self.point_strides, offset=0.)
self.sampling = loss_cls['type'] not in ['FocalLoss']
if self.train_cfg:
self.init_assigner = build_assigner(self.train_cfg.init.assigner)
self.refine_assigner = build_assigner(
self.train_cfg.refine.assigner)
# use PseudoSampler when sampling is False
if self.sampling and hasattr(self.train_cfg, 'sampler'):
sampler_cfg = self.train_cfg.sampler
else:
sampler_cfg = dict(type='PseudoSampler')
self.sampler = build_sampler(sampler_cfg, context=self)
self.transform_method = transform_method
if self.transform_method == 'moment':
self.moment_transfer = nn.Parameter(
data=torch.zeros(2), requires_grad=True)
self.moment_mul = moment_mul
self.use_sigmoid_cls = loss_cls.get('use_sigmoid', False)
if self.use_sigmoid_cls:
self.cls_out_channels = self.num_classes
else:
self.cls_out_channels = self.num_classes + 1
self.loss_bbox_init = build_loss(loss_bbox_init)
self.loss_bbox_refine = build_loss(loss_bbox_refine)
def _init_layers(self):
"""Initialize layers of the head."""
self.relu = nn.ReLU(inplace=True)
self.cls_convs = nn.ModuleList()
self.reg_convs = nn.ModuleList()
for i in range(self.stacked_convs):
chn = self.in_channels if i == 0 else self.feat_channels
self.cls_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg))
self.reg_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg))
pts_out_dim = 4 if self.use_grid_points else 2 * self.num_points
self.reppoints_cls_conv = DeformConv2d(self.feat_channels,
self.point_feat_channels,
self.dcn_kernel, 1,
self.dcn_pad)
self.reppoints_cls_out = nn.Conv2d(self.point_feat_channels,
self.cls_out_channels, 1, 1, 0)
self.reppoints_pts_init_conv = nn.Conv2d(self.feat_channels,
self.point_feat_channels, 3,
1, 1)
self.reppoints_pts_init_out = nn.Conv2d(self.point_feat_channels,
pts_out_dim, 1, 1, 0)
self.reppoints_pts_refine_conv = DeformConv2d(self.feat_channels,
self.point_feat_channels,
self.dcn_kernel, 1,
self.dcn_pad)
self.reppoints_pts_refine_out = nn.Conv2d(self.point_feat_channels,
pts_out_dim, 1, 1, 0)
def points2bbox(self, pts, y_first=True):
"""Converting the points set into bounding box.
:param pts: the input points sets (fields), each points
set (fields) is represented as 2n scalar.
:param y_first: if y_first=True, the point set is represented as
[y1, x1, y2, x2 ... yn, xn], otherwise the point set is
represented as [x1, y1, x2, y2 ... xn, yn].
:return: each points set is converting to a bbox [x1, y1, x2, y2].
"""
pts_reshape = pts.view(pts.shape[0], -1, 2, *pts.shape[2:])
pts_y = pts_reshape[:, :, 0, ...] if y_first else pts_reshape[:, :, 1,
...]
pts_x = pts_reshape[:, :, 1, ...] if y_first else pts_reshape[:, :, 0,
...]
if self.transform_method == 'minmax':
bbox_left = pts_x.min(dim=1, keepdim=True)[0]
bbox_right = pts_x.max(dim=1, keepdim=True)[0]
bbox_up = pts_y.min(dim=1, keepdim=True)[0]
bbox_bottom = pts_y.max(dim=1, keepdim=True)[0]
bbox = torch.cat([bbox_left, bbox_up, bbox_right, bbox_bottom],
dim=1)
elif self.transform_method == 'partial_minmax':
pts_y = pts_y[:, :4, ...]
pts_x = pts_x[:, :4, ...]
bbox_left = pts_x.min(dim=1, keepdim=True)[0]
bbox_right = pts_x.max(dim=1, keepdim=True)[0]
bbox_up = pts_y.min(dim=1, keepdim=True)[0]
bbox_bottom = pts_y.max(dim=1, keepdim=True)[0]
bbox = torch.cat([bbox_left, bbox_up, bbox_right, bbox_bottom],
dim=1)
elif self.transform_method == 'moment':
pts_y_mean = pts_y.mean(dim=1, keepdim=True)
pts_x_mean = pts_x.mean(dim=1, keepdim=True)
pts_y_std = torch.std(pts_y - pts_y_mean, dim=1, keepdim=True)
pts_x_std = torch.std(pts_x - pts_x_mean, dim=1, keepdim=True)
moment_transfer = (self.moment_transfer * self.moment_mul) + (
self.moment_transfer.detach() * (1 - self.moment_mul))
moment_width_transfer = moment_transfer[0]
moment_height_transfer = moment_transfer[1]
half_width = pts_x_std * torch.exp(moment_width_transfer)
half_height = pts_y_std * torch.exp(moment_height_transfer)
bbox = torch.cat([
pts_x_mean - half_width, pts_y_mean - half_height,
pts_x_mean + half_width, pts_y_mean + half_height
],
dim=1)
else:
raise NotImplementedError
return bbox
def gen_grid_from_reg(self, reg, previous_boxes):
"""Base on the previous bboxes and regression values, we compute the
regressed bboxes and generate the grids on the bboxes.
:param reg: the regression value to previous bboxes.
:param previous_boxes: previous bboxes.
:return: generate grids on the regressed bboxes.
"""
b, _, h, w = reg.shape
bxy = (previous_boxes[:, :2, ...] + previous_boxes[:, 2:, ...]) / 2.
bwh = (previous_boxes[:, 2:, ...] -
previous_boxes[:, :2, ...]).clamp(min=1e-6)
grid_topleft = bxy + bwh * reg[:, :2, ...] - 0.5 * bwh * torch.exp(
reg[:, 2:, ...])
grid_wh = bwh * torch.exp(reg[:, 2:, ...])
grid_left = grid_topleft[:, [0], ...]
grid_top = grid_topleft[:, [1], ...]
grid_width = grid_wh[:, [0], ...]
grid_height = grid_wh[:, [1], ...]
intervel = torch.linspace(0., 1., self.dcn_kernel).view(
1, self.dcn_kernel, 1, 1).type_as(reg)
grid_x = grid_left + grid_width * intervel
grid_x = grid_x.unsqueeze(1).repeat(1, self.dcn_kernel, 1, 1, 1)
grid_x = grid_x.view(b, -1, h, w)
grid_y = grid_top + grid_height * intervel
grid_y = grid_y.unsqueeze(2).repeat(1, 1, self.dcn_kernel, 1, 1)
grid_y = grid_y.view(b, -1, h, w)
grid_yx = torch.stack([grid_y, grid_x], dim=2)
grid_yx = grid_yx.view(b, -1, h, w)
regressed_bbox = torch.cat([
grid_left, grid_top, grid_left + grid_width, grid_top + grid_height
], 1)
return grid_yx, regressed_bbox
def forward(self, feats):
return multi_apply(self.forward_single, feats)
def forward_single(self, x):
"""Forward feature map of a single FPN level."""
dcn_base_offset = self.dcn_base_offset.type_as(x)
# If we use center_init, the initial reppoints is from center points.
# If we use bounding bbox representation, the initial reppoints is
# from regular grid placed on a pre-defined bbox.
if self.use_grid_points or not self.center_init:
scale = self.point_base_scale / 2
points_init = dcn_base_offset / dcn_base_offset.max() * scale
bbox_init = x.new_tensor([-scale, -scale, scale,
scale]).view(1, 4, 1, 1)
else:
points_init = 0
cls_feat = x
pts_feat = x
for cls_conv in self.cls_convs:
cls_feat = cls_conv(cls_feat)
for reg_conv in self.reg_convs:
pts_feat = reg_conv(pts_feat)
# initialize reppoints
pts_out_init = self.reppoints_pts_init_out(
self.relu(self.reppoints_pts_init_conv(pts_feat)))
if self.use_grid_points:
pts_out_init, bbox_out_init = self.gen_grid_from_reg(
pts_out_init, bbox_init.detach())
else:
pts_out_init = pts_out_init + points_init
# refine and classify reppoints
pts_out_init_grad_mul = (1 - self.gradient_mul) * pts_out_init.detach(
) + self.gradient_mul * pts_out_init
dcn_offset = pts_out_init_grad_mul - dcn_base_offset
cls_out = self.reppoints_cls_out(
self.relu(self.reppoints_cls_conv(cls_feat, dcn_offset)))
pts_out_refine = self.reppoints_pts_refine_out(
self.relu(self.reppoints_pts_refine_conv(pts_feat, dcn_offset)))
if self.use_grid_points:
pts_out_refine, bbox_out_refine = self.gen_grid_from_reg(
pts_out_refine, bbox_out_init.detach())
else:
pts_out_refine = pts_out_refine + pts_out_init.detach()
if self.training:
return cls_out, pts_out_init, pts_out_refine
else:
return cls_out, self.points2bbox(pts_out_refine)
def get_points(self, featmap_sizes, img_metas, device):
"""Get points according to feature map sizes.
Args:
featmap_sizes (list[tuple]): Multi-level feature map sizes.
img_metas (list[dict]): Image meta info.
Returns:
tuple: points of each image, valid flags of each image
"""
num_imgs = len(img_metas)
# since feature map sizes of all images are the same, we only compute
# points center for one time
multi_level_points = self.prior_generator.grid_priors(
featmap_sizes, device=device, with_stride=True)
points_list = [[point.clone() for point in multi_level_points]
for _ in range(num_imgs)]
# for each image, we compute valid flags of multi level grids
valid_flag_list = []
for img_id, img_meta in enumerate(img_metas):
multi_level_flags = self.prior_generator.valid_flags(
featmap_sizes, img_meta['pad_shape'])
valid_flag_list.append(multi_level_flags)
return points_list, valid_flag_list
def centers_to_bboxes(self, point_list):
"""Get bboxes according to center points.
Only used in :class:`MaxIoUAssigner`.
"""
bbox_list = []
for i_img, point in enumerate(point_list):
bbox = []
for i_lvl in range(len(self.point_strides)):
scale = self.point_base_scale * self.point_strides[i_lvl] * 0.5
bbox_shift = torch.Tensor([-scale, -scale, scale,
scale]).view(1, 4).type_as(point[0])
bbox_center = torch.cat(
[point[i_lvl][:, :2], point[i_lvl][:, :2]], dim=1)
bbox.append(bbox_center + bbox_shift)
bbox_list.append(bbox)
return bbox_list
def offset_to_pts(self, center_list, pred_list):
"""Change from point offset to point coordinate."""
pts_list = []
for i_lvl in range(len(self.point_strides)):
pts_lvl = []
for i_img in range(len(center_list)):
pts_center = center_list[i_img][i_lvl][:, :2].repeat(
1, self.num_points)
pts_shift = pred_list[i_lvl][i_img]
yx_pts_shift = pts_shift.permute(1, 2, 0).view(
-1, 2 * self.num_points)
y_pts_shift = yx_pts_shift[..., 0::2]
x_pts_shift = yx_pts_shift[..., 1::2]
xy_pts_shift = torch.stack([x_pts_shift, y_pts_shift], -1)
xy_pts_shift = xy_pts_shift.view(*yx_pts_shift.shape[:-1], -1)
pts = xy_pts_shift * self.point_strides[i_lvl] + pts_center
pts_lvl.append(pts)
pts_lvl = torch.stack(pts_lvl, 0)
pts_list.append(pts_lvl)
return pts_list
def _point_target_single(self,
flat_proposals,
valid_flags,
gt_bboxes,
gt_bboxes_ignore,
gt_labels,
stage='init',
unmap_outputs=True):
inside_flags = valid_flags
if not inside_flags.any():
return (None, ) * 7
# assign gt and sample proposals
proposals = flat_proposals[inside_flags, :]
if stage == 'init':
assigner = self.init_assigner
pos_weight = self.train_cfg.init.pos_weight
else:
assigner = self.refine_assigner
pos_weight = self.train_cfg.refine.pos_weight
assign_result = assigner.assign(proposals, gt_bboxes, gt_bboxes_ignore,
None if self.sampling else gt_labels)
sampling_result = self.sampler.sample(assign_result, proposals,
gt_bboxes)
num_valid_proposals = proposals.shape[0]
bbox_gt = proposals.new_zeros([num_valid_proposals, 4])
pos_proposals = torch.zeros_like(proposals)
proposals_weights = proposals.new_zeros([num_valid_proposals, 4])
labels = proposals.new_full((num_valid_proposals, ),
self.num_classes,
dtype=torch.long)
label_weights = proposals.new_zeros(
num_valid_proposals, dtype=torch.float)
pos_inds = sampling_result.pos_inds
neg_inds = sampling_result.neg_inds
if len(pos_inds) > 0:
pos_gt_bboxes = sampling_result.pos_gt_bboxes
bbox_gt[pos_inds, :] = pos_gt_bboxes
pos_proposals[pos_inds, :] = proposals[pos_inds, :]
proposals_weights[pos_inds, :] = 1.0
if gt_labels is None:
# Only rpn gives gt_labels as None
# Foreground is the first class
labels[pos_inds] = 0
else:
labels[pos_inds] = gt_labels[
sampling_result.pos_assigned_gt_inds]
if pos_weight <= 0:
label_weights[pos_inds] = 1.0
else:
label_weights[pos_inds] = pos_weight
if len(neg_inds) > 0:
label_weights[neg_inds] = 1.0
# map up to original set of proposals
if unmap_outputs:
num_total_proposals = flat_proposals.size(0)
labels = unmap(labels, num_total_proposals, inside_flags)
label_weights = unmap(label_weights, num_total_proposals,
inside_flags)
bbox_gt = unmap(bbox_gt, num_total_proposals, inside_flags)
pos_proposals = unmap(pos_proposals, num_total_proposals,
inside_flags)
proposals_weights = unmap(proposals_weights, num_total_proposals,
inside_flags)
return (labels, label_weights, bbox_gt, pos_proposals,
proposals_weights, pos_inds, neg_inds)
def get_targets(self,
proposals_list,
valid_flag_list,
gt_bboxes_list,
img_metas,
gt_bboxes_ignore_list=None,
gt_labels_list=None,
stage='init',
label_channels=1,
unmap_outputs=True):
"""Compute corresponding GT box and classification targets for
proposals.
Args:
proposals_list (list[list]): Multi level points/bboxes of each
image.
valid_flag_list (list[list]): Multi level valid flags of each
image.
gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image.
img_metas (list[dict]): Meta info of each image.
gt_bboxes_ignore_list (list[Tensor]): Ground truth bboxes to be
ignored.
gt_bboxes_list (list[Tensor]): Ground truth labels of each box.
stage (str): `init` or `refine`. Generate target for init stage or
refine stage
label_channels (int): Channel of label.
unmap_outputs (bool): Whether to map outputs back to the original
set of anchors.
Returns:
tuple:
- labels_list (list[Tensor]): Labels of each level.
- label_weights_list (list[Tensor]): Label weights of each level. # noqa: E501
- bbox_gt_list (list[Tensor]): Ground truth bbox of each level.
- proposal_list (list[Tensor]): Proposals(points/bboxes) of each level. # noqa: E501
- proposal_weights_list (list[Tensor]): Proposal weights of each level. # noqa: E501
- num_total_pos (int): Number of positive samples in all images. # noqa: E501
- num_total_neg (int): Number of negative samples in all images. # noqa: E501
"""
assert stage in ['init', 'refine']
num_imgs = len(img_metas)
assert len(proposals_list) == len(valid_flag_list) == num_imgs
# points number of multi levels
num_level_proposals = [points.size(0) for points in proposals_list[0]]
# concat all level points and flags to a single tensor
for i in range(num_imgs):
assert len(proposals_list[i]) == len(valid_flag_list[i])
proposals_list[i] = torch.cat(proposals_list[i])
valid_flag_list[i] = torch.cat(valid_flag_list[i])
# compute targets for each image
if gt_bboxes_ignore_list is None:
gt_bboxes_ignore_list = [None for _ in range(num_imgs)]
if gt_labels_list is None:
gt_labels_list = [None for _ in range(num_imgs)]
(all_labels, all_label_weights, all_bbox_gt, all_proposals,
all_proposal_weights, pos_inds_list, neg_inds_list) = multi_apply(
self._point_target_single,
proposals_list,
valid_flag_list,
gt_bboxes_list,
gt_bboxes_ignore_list,
gt_labels_list,
stage=stage,
unmap_outputs=unmap_outputs)
# no valid points
if any([labels is None for labels in all_labels]):
return None
# sampled points of all images
num_total_pos = sum([max(inds.numel(), 1) for inds in pos_inds_list])
num_total_neg = sum([max(inds.numel(), 1) for inds in neg_inds_list])
labels_list = images_to_levels(all_labels, num_level_proposals)
label_weights_list = images_to_levels(all_label_weights,
num_level_proposals)
bbox_gt_list = images_to_levels(all_bbox_gt, num_level_proposals)
proposals_list = images_to_levels(all_proposals, num_level_proposals)
proposal_weights_list = images_to_levels(all_proposal_weights,
num_level_proposals)
return (labels_list, label_weights_list, bbox_gt_list, proposals_list,
proposal_weights_list, num_total_pos, num_total_neg)
def loss_single(self, cls_score, pts_pred_init, pts_pred_refine, labels,
label_weights, bbox_gt_init, bbox_weights_init,
bbox_gt_refine, bbox_weights_refine, stride,
num_total_samples_init, num_total_samples_refine):
# classification loss
labels = labels.reshape(-1)
label_weights = label_weights.reshape(-1)
cls_score = cls_score.permute(0, 2, 3,
1).reshape(-1, self.cls_out_channels)
cls_score = cls_score.contiguous()
loss_cls = self.loss_cls(
cls_score,
labels,
label_weights,
avg_factor=num_total_samples_refine)
# points loss
bbox_gt_init = bbox_gt_init.reshape(-1, 4)
bbox_weights_init = bbox_weights_init.reshape(-1, 4)
bbox_pred_init = self.points2bbox(
pts_pred_init.reshape(-1, 2 * self.num_points), y_first=False)
bbox_gt_refine = bbox_gt_refine.reshape(-1, 4)
bbox_weights_refine = bbox_weights_refine.reshape(-1, 4)
bbox_pred_refine = self.points2bbox(
pts_pred_refine.reshape(-1, 2 * self.num_points), y_first=False)
normalize_term = self.point_base_scale * stride
loss_pts_init = self.loss_bbox_init(
bbox_pred_init / normalize_term,
bbox_gt_init / normalize_term,
bbox_weights_init,
avg_factor=num_total_samples_init)
loss_pts_refine = self.loss_bbox_refine(
bbox_pred_refine / normalize_term,
bbox_gt_refine / normalize_term,
bbox_weights_refine,
avg_factor=num_total_samples_refine)
return loss_cls, loss_pts_init, loss_pts_refine
def loss(self,
cls_scores,
pts_preds_init,
pts_preds_refine,
gt_bboxes,
gt_labels,
img_metas,
gt_bboxes_ignore=None):
featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
device = cls_scores[0].device
label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1
# target for initial stage
center_list, valid_flag_list = self.get_points(featmap_sizes,
img_metas, device)
pts_coordinate_preds_init = self.offset_to_pts(center_list,
pts_preds_init)
if self.train_cfg.init.assigner['type'] == 'PointAssigner':
# Assign target for center list
candidate_list = center_list
else:
# transform center list to bbox list and
# assign target for bbox list
bbox_list = self.centers_to_bboxes(center_list)
candidate_list = bbox_list
cls_reg_targets_init = self.get_targets(
candidate_list,
valid_flag_list,
gt_bboxes,
img_metas,
gt_bboxes_ignore_list=gt_bboxes_ignore,
gt_labels_list=gt_labels,
stage='init',
label_channels=label_channels)
(*_, bbox_gt_list_init, candidate_list_init, bbox_weights_list_init,
num_total_pos_init, num_total_neg_init) = cls_reg_targets_init
num_total_samples_init = (
num_total_pos_init +
num_total_neg_init if self.sampling else num_total_pos_init)
# target for refinement stage
center_list, valid_flag_list = self.get_points(featmap_sizes,
img_metas, device)
pts_coordinate_preds_refine = self.offset_to_pts(
center_list, pts_preds_refine)
bbox_list = []
for i_img, center in enumerate(center_list):
bbox = []
for i_lvl in range(len(pts_preds_refine)):
bbox_preds_init = self.points2bbox(
pts_preds_init[i_lvl].detach())
bbox_shift = bbox_preds_init * self.point_strides[i_lvl]
bbox_center = torch.cat(
[center[i_lvl][:, :2], center[i_lvl][:, :2]], dim=1)
bbox.append(bbox_center +
bbox_shift[i_img].permute(1, 2, 0).reshape(-1, 4))
bbox_list.append(bbox)
cls_reg_targets_refine = self.get_targets(
bbox_list,
valid_flag_list,
gt_bboxes,
img_metas,
gt_bboxes_ignore_list=gt_bboxes_ignore,
gt_labels_list=gt_labels,
stage='refine',
label_channels=label_channels)
(labels_list, label_weights_list, bbox_gt_list_refine,
candidate_list_refine, bbox_weights_list_refine, num_total_pos_refine,
num_total_neg_refine) = cls_reg_targets_refine
num_total_samples_refine = (
num_total_pos_refine +
num_total_neg_refine if self.sampling else num_total_pos_refine)
# compute loss
losses_cls, losses_pts_init, losses_pts_refine = multi_apply(
self.loss_single,
cls_scores,
pts_coordinate_preds_init,
pts_coordinate_preds_refine,
labels_list,
label_weights_list,
bbox_gt_list_init,
bbox_weights_list_init,
bbox_gt_list_refine,
bbox_weights_list_refine,
self.point_strides,
num_total_samples_init=num_total_samples_init,
num_total_samples_refine=num_total_samples_refine)
loss_dict_all = {
'loss_cls': losses_cls,
'loss_pts_init': losses_pts_init,
'loss_pts_refine': losses_pts_refine
}
return loss_dict_all
# Same as base_dense_head/_get_bboxes_single except self._bbox_decode
def _get_bboxes_single(self,
cls_score_list,
bbox_pred_list,
score_factor_list,
mlvl_priors,
img_meta,
cfg,
rescale=False,
with_nms=True,
**kwargs):
"""Transform outputs of a single image into bbox predictions.
Args:
cls_score_list (list[Tensor]): Box scores from all scale
levels of a single image, each item has shape
(num_priors * num_classes, H, W).
bbox_pred_list (list[Tensor]): Box energies / deltas from
all scale levels of a single image, each item has shape
(num_priors * 4, H, W).
score_factor_list (list[Tensor]): Score factor from all scale
levels of a single image. RepPoints head does not need
this value.
mlvl_priors (list[Tensor]): Each element in the list is
the priors of a single level in feature pyramid, has shape
(num_priors, 2).
img_meta (dict): Image meta info.
cfg (mmcv.Config): Test / postprocessing configuration,
if None, test_cfg would be used.
rescale (bool): If True, return boxes in original image space.
Default: False.
with_nms (bool): If True, do nms before return boxes.
Default: True.
Returns:
tuple[Tensor]: Results of detected bboxes and labels. If with_nms
is False and mlvl_score_factor is None, return mlvl_bboxes and
mlvl_scores, else return mlvl_bboxes, mlvl_scores and
mlvl_score_factor. Usually with_nms is False is used for aug
test. If with_nms is True, then return the following format
- det_bboxes (Tensor): Predicted bboxes with shape \
[num_bboxes, 5], where the first 4 columns are bounding \
box positions (tl_x, tl_y, br_x, br_y) and the 5-th \
column are scores between 0 and 1.
- det_labels (Tensor): Predicted labels of the corresponding \
box with shape [num_bboxes].
"""
cfg = self.test_cfg if cfg is None else cfg
assert len(cls_score_list) == len(bbox_pred_list)
img_shape = img_meta['img_shape']
nms_pre = cfg.get('nms_pre', -1)
mlvl_bboxes = []
mlvl_scores = []
mlvl_labels = []
for level_idx, (cls_score, bbox_pred, priors) in enumerate(
zip(cls_score_list, bbox_pred_list, mlvl_priors)):
assert cls_score.size()[-2:] == bbox_pred.size()[-2:]
bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4)
cls_score = cls_score.permute(1, 2,
0).reshape(-1, self.cls_out_channels)
if self.use_sigmoid_cls:
scores = cls_score.sigmoid()
else:
scores = cls_score.softmax(-1)[:, :-1]
# After https://github.com/open-mmlab/mmdetection/pull/6268/,
# this operation keeps fewer bboxes under the same `nms_pre`.
# There is no difference in performance for most models. If you
# find a slight drop in performance, you can set a larger
# `nms_pre` than before.
results = filter_scores_and_topk(
scores, cfg.score_thr, nms_pre,
dict(bbox_pred=bbox_pred, priors=priors))
scores, labels, _, filtered_results = results
bbox_pred = filtered_results['bbox_pred']
priors = filtered_results['priors']
bboxes = self._bbox_decode(priors, bbox_pred,
self.point_strides[level_idx],
img_shape)
mlvl_bboxes.append(bboxes)
mlvl_scores.append(scores)
mlvl_labels.append(labels)
return self._bbox_post_process(
mlvl_scores,
mlvl_labels,
mlvl_bboxes,
img_meta['scale_factor'],
cfg,
rescale=rescale,
with_nms=with_nms)
def _bbox_decode(self, points, bbox_pred, stride, max_shape):
bbox_pos_center = torch.cat([points[:, :2], points[:, :2]], dim=1)
bboxes = bbox_pred * stride + bbox_pos_center
x1 = bboxes[:, 0].clamp(min=0, max=max_shape[1])
y1 = bboxes[:, 1].clamp(min=0, max=max_shape[0])
x2 = bboxes[:, 2].clamp(min=0, max=max_shape[1])
y2 = bboxes[:, 3].clamp(min=0, max=max_shape[0])
decoded_bboxes = torch.stack([x1, y1, x2, y2], dim=-1)
return decoded_bboxes
| 45.667974 | 101 | 0.570901 |
import numpy as np
import torch
import torch.nn as nn
from mmcv.cnn import ConvModule
from mmcv.ops import DeformConv2d
from mmdet.core import (build_assigner, build_sampler, images_to_levels,
multi_apply, unmap)
from mmdet.core.anchor.point_generator import MlvlPointGenerator
from mmdet.core.utils import filter_scores_and_topk
from ..builder import HEADS, build_loss
from .anchor_free_head import AnchorFreeHead
@HEADS.register_module()
class RepPointsHead(AnchorFreeHead):
def __init__(self,
num_classes,
in_channels,
point_feat_channels=256,
num_points=9,
gradient_mul=0.1,
point_strides=[8, 16, 32, 64, 128],
point_base_scale=4,
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox_init=dict(
type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=0.5),
loss_bbox_refine=dict(
type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0),
use_grid_points=False,
center_init=True,
transform_method='moment',
moment_mul=0.01,
init_cfg=dict(
type='Normal',
layer='Conv2d',
std=0.01,
override=dict(
type='Normal',
name='reppoints_cls_out',
std=0.01,
bias_prob=0.01)),
**kwargs):
self.num_points = num_points
self.point_feat_channels = point_feat_channels
self.use_grid_points = use_grid_points
self.center_init = center_init
self.dcn_kernel = int(np.sqrt(num_points))
self.dcn_pad = int((self.dcn_kernel - 1) / 2)
assert self.dcn_kernel * self.dcn_kernel == num_points, \
'The points number should be a square number.'
assert self.dcn_kernel % 2 == 1, \
'The points number should be an odd square number.'
dcn_base = np.arange(-self.dcn_pad,
self.dcn_pad + 1).astype(np.float64)
dcn_base_y = np.repeat(dcn_base, self.dcn_kernel)
dcn_base_x = np.tile(dcn_base, self.dcn_kernel)
dcn_base_offset = np.stack([dcn_base_y, dcn_base_x], axis=1).reshape(
(-1))
self.dcn_base_offset = torch.tensor(dcn_base_offset).view(1, -1, 1, 1)
super().__init__(
num_classes,
in_channels,
loss_cls=loss_cls,
init_cfg=init_cfg,
**kwargs)
self.gradient_mul = gradient_mul
self.point_base_scale = point_base_scale
self.point_strides = point_strides
self.prior_generator = MlvlPointGenerator(
self.point_strides, offset=0.)
self.sampling = loss_cls['type'] not in ['FocalLoss']
if self.train_cfg:
self.init_assigner = build_assigner(self.train_cfg.init.assigner)
self.refine_assigner = build_assigner(
self.train_cfg.refine.assigner)
if self.sampling and hasattr(self.train_cfg, 'sampler'):
sampler_cfg = self.train_cfg.sampler
else:
sampler_cfg = dict(type='PseudoSampler')
self.sampler = build_sampler(sampler_cfg, context=self)
self.transform_method = transform_method
if self.transform_method == 'moment':
self.moment_transfer = nn.Parameter(
data=torch.zeros(2), requires_grad=True)
self.moment_mul = moment_mul
self.use_sigmoid_cls = loss_cls.get('use_sigmoid', False)
if self.use_sigmoid_cls:
self.cls_out_channels = self.num_classes
else:
self.cls_out_channels = self.num_classes + 1
self.loss_bbox_init = build_loss(loss_bbox_init)
self.loss_bbox_refine = build_loss(loss_bbox_refine)
def _init_layers(self):
self.relu = nn.ReLU(inplace=True)
self.cls_convs = nn.ModuleList()
self.reg_convs = nn.ModuleList()
for i in range(self.stacked_convs):
chn = self.in_channels if i == 0 else self.feat_channels
self.cls_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg))
self.reg_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg))
pts_out_dim = 4 if self.use_grid_points else 2 * self.num_points
self.reppoints_cls_conv = DeformConv2d(self.feat_channels,
self.point_feat_channels,
self.dcn_kernel, 1,
self.dcn_pad)
self.reppoints_cls_out = nn.Conv2d(self.point_feat_channels,
self.cls_out_channels, 1, 1, 0)
self.reppoints_pts_init_conv = nn.Conv2d(self.feat_channels,
self.point_feat_channels, 3,
1, 1)
self.reppoints_pts_init_out = nn.Conv2d(self.point_feat_channels,
pts_out_dim, 1, 1, 0)
self.reppoints_pts_refine_conv = DeformConv2d(self.feat_channels,
self.point_feat_channels,
self.dcn_kernel, 1,
self.dcn_pad)
self.reppoints_pts_refine_out = nn.Conv2d(self.point_feat_channels,
pts_out_dim, 1, 1, 0)
def points2bbox(self, pts, y_first=True):
pts_reshape = pts.view(pts.shape[0], -1, 2, *pts.shape[2:])
pts_y = pts_reshape[:, :, 0, ...] if y_first else pts_reshape[:, :, 1,
...]
pts_x = pts_reshape[:, :, 1, ...] if y_first else pts_reshape[:, :, 0,
...]
if self.transform_method == 'minmax':
bbox_left = pts_x.min(dim=1, keepdim=True)[0]
bbox_right = pts_x.max(dim=1, keepdim=True)[0]
bbox_up = pts_y.min(dim=1, keepdim=True)[0]
bbox_bottom = pts_y.max(dim=1, keepdim=True)[0]
bbox = torch.cat([bbox_left, bbox_up, bbox_right, bbox_bottom],
dim=1)
elif self.transform_method == 'partial_minmax':
pts_y = pts_y[:, :4, ...]
pts_x = pts_x[:, :4, ...]
bbox_left = pts_x.min(dim=1, keepdim=True)[0]
bbox_right = pts_x.max(dim=1, keepdim=True)[0]
bbox_up = pts_y.min(dim=1, keepdim=True)[0]
bbox_bottom = pts_y.max(dim=1, keepdim=True)[0]
bbox = torch.cat([bbox_left, bbox_up, bbox_right, bbox_bottom],
dim=1)
elif self.transform_method == 'moment':
pts_y_mean = pts_y.mean(dim=1, keepdim=True)
pts_x_mean = pts_x.mean(dim=1, keepdim=True)
pts_y_std = torch.std(pts_y - pts_y_mean, dim=1, keepdim=True)
pts_x_std = torch.std(pts_x - pts_x_mean, dim=1, keepdim=True)
moment_transfer = (self.moment_transfer * self.moment_mul) + (
self.moment_transfer.detach() * (1 - self.moment_mul))
moment_width_transfer = moment_transfer[0]
moment_height_transfer = moment_transfer[1]
half_width = pts_x_std * torch.exp(moment_width_transfer)
half_height = pts_y_std * torch.exp(moment_height_transfer)
bbox = torch.cat([
pts_x_mean - half_width, pts_y_mean - half_height,
pts_x_mean + half_width, pts_y_mean + half_height
],
dim=1)
else:
raise NotImplementedError
return bbox
def gen_grid_from_reg(self, reg, previous_boxes):
b, _, h, w = reg.shape
bxy = (previous_boxes[:, :2, ...] + previous_boxes[:, 2:, ...]) / 2.
bwh = (previous_boxes[:, 2:, ...] -
previous_boxes[:, :2, ...]).clamp(min=1e-6)
grid_topleft = bxy + bwh * reg[:, :2, ...] - 0.5 * bwh * torch.exp(
reg[:, 2:, ...])
grid_wh = bwh * torch.exp(reg[:, 2:, ...])
grid_left = grid_topleft[:, [0], ...]
grid_top = grid_topleft[:, [1], ...]
grid_width = grid_wh[:, [0], ...]
grid_height = grid_wh[:, [1], ...]
intervel = torch.linspace(0., 1., self.dcn_kernel).view(
1, self.dcn_kernel, 1, 1).type_as(reg)
grid_x = grid_left + grid_width * intervel
grid_x = grid_x.unsqueeze(1).repeat(1, self.dcn_kernel, 1, 1, 1)
grid_x = grid_x.view(b, -1, h, w)
grid_y = grid_top + grid_height * intervel
grid_y = grid_y.unsqueeze(2).repeat(1, 1, self.dcn_kernel, 1, 1)
grid_y = grid_y.view(b, -1, h, w)
grid_yx = torch.stack([grid_y, grid_x], dim=2)
grid_yx = grid_yx.view(b, -1, h, w)
regressed_bbox = torch.cat([
grid_left, grid_top, grid_left + grid_width, grid_top + grid_height
], 1)
return grid_yx, regressed_bbox
def forward(self, feats):
return multi_apply(self.forward_single, feats)
def forward_single(self, x):
dcn_base_offset = self.dcn_base_offset.type_as(x)
if self.use_grid_points or not self.center_init:
scale = self.point_base_scale / 2
points_init = dcn_base_offset / dcn_base_offset.max() * scale
bbox_init = x.new_tensor([-scale, -scale, scale,
scale]).view(1, 4, 1, 1)
else:
points_init = 0
cls_feat = x
pts_feat = x
for cls_conv in self.cls_convs:
cls_feat = cls_conv(cls_feat)
for reg_conv in self.reg_convs:
pts_feat = reg_conv(pts_feat)
pts_out_init = self.reppoints_pts_init_out(
self.relu(self.reppoints_pts_init_conv(pts_feat)))
if self.use_grid_points:
pts_out_init, bbox_out_init = self.gen_grid_from_reg(
pts_out_init, bbox_init.detach())
else:
pts_out_init = pts_out_init + points_init
pts_out_init_grad_mul = (1 - self.gradient_mul) * pts_out_init.detach(
) + self.gradient_mul * pts_out_init
dcn_offset = pts_out_init_grad_mul - dcn_base_offset
cls_out = self.reppoints_cls_out(
self.relu(self.reppoints_cls_conv(cls_feat, dcn_offset)))
pts_out_refine = self.reppoints_pts_refine_out(
self.relu(self.reppoints_pts_refine_conv(pts_feat, dcn_offset)))
if self.use_grid_points:
pts_out_refine, bbox_out_refine = self.gen_grid_from_reg(
pts_out_refine, bbox_out_init.detach())
else:
pts_out_refine = pts_out_refine + pts_out_init.detach()
if self.training:
return cls_out, pts_out_init, pts_out_refine
else:
return cls_out, self.points2bbox(pts_out_refine)
def get_points(self, featmap_sizes, img_metas, device):
num_imgs = len(img_metas)
multi_level_points = self.prior_generator.grid_priors(
featmap_sizes, device=device, with_stride=True)
points_list = [[point.clone() for point in multi_level_points]
for _ in range(num_imgs)]
valid_flag_list = []
for img_id, img_meta in enumerate(img_metas):
multi_level_flags = self.prior_generator.valid_flags(
featmap_sizes, img_meta['pad_shape'])
valid_flag_list.append(multi_level_flags)
return points_list, valid_flag_list
def centers_to_bboxes(self, point_list):
bbox_list = []
for i_img, point in enumerate(point_list):
bbox = []
for i_lvl in range(len(self.point_strides)):
scale = self.point_base_scale * self.point_strides[i_lvl] * 0.5
bbox_shift = torch.Tensor([-scale, -scale, scale,
scale]).view(1, 4).type_as(point[0])
bbox_center = torch.cat(
[point[i_lvl][:, :2], point[i_lvl][:, :2]], dim=1)
bbox.append(bbox_center + bbox_shift)
bbox_list.append(bbox)
return bbox_list
def offset_to_pts(self, center_list, pred_list):
pts_list = []
for i_lvl in range(len(self.point_strides)):
pts_lvl = []
for i_img in range(len(center_list)):
pts_center = center_list[i_img][i_lvl][:, :2].repeat(
1, self.num_points)
pts_shift = pred_list[i_lvl][i_img]
yx_pts_shift = pts_shift.permute(1, 2, 0).view(
-1, 2 * self.num_points)
y_pts_shift = yx_pts_shift[..., 0::2]
x_pts_shift = yx_pts_shift[..., 1::2]
xy_pts_shift = torch.stack([x_pts_shift, y_pts_shift], -1)
xy_pts_shift = xy_pts_shift.view(*yx_pts_shift.shape[:-1], -1)
pts = xy_pts_shift * self.point_strides[i_lvl] + pts_center
pts_lvl.append(pts)
pts_lvl = torch.stack(pts_lvl, 0)
pts_list.append(pts_lvl)
return pts_list
def _point_target_single(self,
flat_proposals,
valid_flags,
gt_bboxes,
gt_bboxes_ignore,
gt_labels,
stage='init',
unmap_outputs=True):
inside_flags = valid_flags
if not inside_flags.any():
return (None, ) * 7
proposals = flat_proposals[inside_flags, :]
if stage == 'init':
assigner = self.init_assigner
pos_weight = self.train_cfg.init.pos_weight
else:
assigner = self.refine_assigner
pos_weight = self.train_cfg.refine.pos_weight
assign_result = assigner.assign(proposals, gt_bboxes, gt_bboxes_ignore,
None if self.sampling else gt_labels)
sampling_result = self.sampler.sample(assign_result, proposals,
gt_bboxes)
num_valid_proposals = proposals.shape[0]
bbox_gt = proposals.new_zeros([num_valid_proposals, 4])
pos_proposals = torch.zeros_like(proposals)
proposals_weights = proposals.new_zeros([num_valid_proposals, 4])
labels = proposals.new_full((num_valid_proposals, ),
self.num_classes,
dtype=torch.long)
label_weights = proposals.new_zeros(
num_valid_proposals, dtype=torch.float)
pos_inds = sampling_result.pos_inds
neg_inds = sampling_result.neg_inds
if len(pos_inds) > 0:
pos_gt_bboxes = sampling_result.pos_gt_bboxes
bbox_gt[pos_inds, :] = pos_gt_bboxes
pos_proposals[pos_inds, :] = proposals[pos_inds, :]
proposals_weights[pos_inds, :] = 1.0
if gt_labels is None:
labels[pos_inds] = 0
else:
labels[pos_inds] = gt_labels[
sampling_result.pos_assigned_gt_inds]
if pos_weight <= 0:
label_weights[pos_inds] = 1.0
else:
label_weights[pos_inds] = pos_weight
if len(neg_inds) > 0:
label_weights[neg_inds] = 1.0
if unmap_outputs:
num_total_proposals = flat_proposals.size(0)
labels = unmap(labels, num_total_proposals, inside_flags)
label_weights = unmap(label_weights, num_total_proposals,
inside_flags)
bbox_gt = unmap(bbox_gt, num_total_proposals, inside_flags)
pos_proposals = unmap(pos_proposals, num_total_proposals,
inside_flags)
proposals_weights = unmap(proposals_weights, num_total_proposals,
inside_flags)
return (labels, label_weights, bbox_gt, pos_proposals,
proposals_weights, pos_inds, neg_inds)
def get_targets(self,
proposals_list,
valid_flag_list,
gt_bboxes_list,
img_metas,
gt_bboxes_ignore_list=None,
gt_labels_list=None,
stage='init',
label_channels=1,
unmap_outputs=True):
assert stage in ['init', 'refine']
num_imgs = len(img_metas)
assert len(proposals_list) == len(valid_flag_list) == num_imgs
num_level_proposals = [points.size(0) for points in proposals_list[0]]
for i in range(num_imgs):
assert len(proposals_list[i]) == len(valid_flag_list[i])
proposals_list[i] = torch.cat(proposals_list[i])
valid_flag_list[i] = torch.cat(valid_flag_list[i])
if gt_bboxes_ignore_list is None:
gt_bboxes_ignore_list = [None for _ in range(num_imgs)]
if gt_labels_list is None:
gt_labels_list = [None for _ in range(num_imgs)]
(all_labels, all_label_weights, all_bbox_gt, all_proposals,
all_proposal_weights, pos_inds_list, neg_inds_list) = multi_apply(
self._point_target_single,
proposals_list,
valid_flag_list,
gt_bboxes_list,
gt_bboxes_ignore_list,
gt_labels_list,
stage=stage,
unmap_outputs=unmap_outputs)
if any([labels is None for labels in all_labels]):
return None
num_total_pos = sum([max(inds.numel(), 1) for inds in pos_inds_list])
num_total_neg = sum([max(inds.numel(), 1) for inds in neg_inds_list])
labels_list = images_to_levels(all_labels, num_level_proposals)
label_weights_list = images_to_levels(all_label_weights,
num_level_proposals)
bbox_gt_list = images_to_levels(all_bbox_gt, num_level_proposals)
proposals_list = images_to_levels(all_proposals, num_level_proposals)
proposal_weights_list = images_to_levels(all_proposal_weights,
num_level_proposals)
return (labels_list, label_weights_list, bbox_gt_list, proposals_list,
proposal_weights_list, num_total_pos, num_total_neg)
def loss_single(self, cls_score, pts_pred_init, pts_pred_refine, labels,
label_weights, bbox_gt_init, bbox_weights_init,
bbox_gt_refine, bbox_weights_refine, stride,
num_total_samples_init, num_total_samples_refine):
labels = labels.reshape(-1)
label_weights = label_weights.reshape(-1)
cls_score = cls_score.permute(0, 2, 3,
1).reshape(-1, self.cls_out_channels)
cls_score = cls_score.contiguous()
loss_cls = self.loss_cls(
cls_score,
labels,
label_weights,
avg_factor=num_total_samples_refine)
bbox_gt_init = bbox_gt_init.reshape(-1, 4)
bbox_weights_init = bbox_weights_init.reshape(-1, 4)
bbox_pred_init = self.points2bbox(
pts_pred_init.reshape(-1, 2 * self.num_points), y_first=False)
bbox_gt_refine = bbox_gt_refine.reshape(-1, 4)
bbox_weights_refine = bbox_weights_refine.reshape(-1, 4)
bbox_pred_refine = self.points2bbox(
pts_pred_refine.reshape(-1, 2 * self.num_points), y_first=False)
normalize_term = self.point_base_scale * stride
loss_pts_init = self.loss_bbox_init(
bbox_pred_init / normalize_term,
bbox_gt_init / normalize_term,
bbox_weights_init,
avg_factor=num_total_samples_init)
loss_pts_refine = self.loss_bbox_refine(
bbox_pred_refine / normalize_term,
bbox_gt_refine / normalize_term,
bbox_weights_refine,
avg_factor=num_total_samples_refine)
return loss_cls, loss_pts_init, loss_pts_refine
def loss(self,
cls_scores,
pts_preds_init,
pts_preds_refine,
gt_bboxes,
gt_labels,
img_metas,
gt_bboxes_ignore=None):
featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
device = cls_scores[0].device
label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1
center_list, valid_flag_list = self.get_points(featmap_sizes,
img_metas, device)
pts_coordinate_preds_init = self.offset_to_pts(center_list,
pts_preds_init)
if self.train_cfg.init.assigner['type'] == 'PointAssigner':
candidate_list = center_list
else:
bbox_list = self.centers_to_bboxes(center_list)
candidate_list = bbox_list
cls_reg_targets_init = self.get_targets(
candidate_list,
valid_flag_list,
gt_bboxes,
img_metas,
gt_bboxes_ignore_list=gt_bboxes_ignore,
gt_labels_list=gt_labels,
stage='init',
label_channels=label_channels)
(*_, bbox_gt_list_init, candidate_list_init, bbox_weights_list_init,
num_total_pos_init, num_total_neg_init) = cls_reg_targets_init
num_total_samples_init = (
num_total_pos_init +
num_total_neg_init if self.sampling else num_total_pos_init)
center_list, valid_flag_list = self.get_points(featmap_sizes,
img_metas, device)
pts_coordinate_preds_refine = self.offset_to_pts(
center_list, pts_preds_refine)
bbox_list = []
for i_img, center in enumerate(center_list):
bbox = []
for i_lvl in range(len(pts_preds_refine)):
bbox_preds_init = self.points2bbox(
pts_preds_init[i_lvl].detach())
bbox_shift = bbox_preds_init * self.point_strides[i_lvl]
bbox_center = torch.cat(
[center[i_lvl][:, :2], center[i_lvl][:, :2]], dim=1)
bbox.append(bbox_center +
bbox_shift[i_img].permute(1, 2, 0).reshape(-1, 4))
bbox_list.append(bbox)
cls_reg_targets_refine = self.get_targets(
bbox_list,
valid_flag_list,
gt_bboxes,
img_metas,
gt_bboxes_ignore_list=gt_bboxes_ignore,
gt_labels_list=gt_labels,
stage='refine',
label_channels=label_channels)
(labels_list, label_weights_list, bbox_gt_list_refine,
candidate_list_refine, bbox_weights_list_refine, num_total_pos_refine,
num_total_neg_refine) = cls_reg_targets_refine
num_total_samples_refine = (
num_total_pos_refine +
num_total_neg_refine if self.sampling else num_total_pos_refine)
losses_cls, losses_pts_init, losses_pts_refine = multi_apply(
self.loss_single,
cls_scores,
pts_coordinate_preds_init,
pts_coordinate_preds_refine,
labels_list,
label_weights_list,
bbox_gt_list_init,
bbox_weights_list_init,
bbox_gt_list_refine,
bbox_weights_list_refine,
self.point_strides,
num_total_samples_init=num_total_samples_init,
num_total_samples_refine=num_total_samples_refine)
loss_dict_all = {
'loss_cls': losses_cls,
'loss_pts_init': losses_pts_init,
'loss_pts_refine': losses_pts_refine
}
return loss_dict_all
def _get_bboxes_single(self,
cls_score_list,
bbox_pred_list,
score_factor_list,
mlvl_priors,
img_meta,
cfg,
rescale=False,
with_nms=True,
**kwargs):
cfg = self.test_cfg if cfg is None else cfg
assert len(cls_score_list) == len(bbox_pred_list)
img_shape = img_meta['img_shape']
nms_pre = cfg.get('nms_pre', -1)
mlvl_bboxes = []
mlvl_scores = []
mlvl_labels = []
for level_idx, (cls_score, bbox_pred, priors) in enumerate(
zip(cls_score_list, bbox_pred_list, mlvl_priors)):
assert cls_score.size()[-2:] == bbox_pred.size()[-2:]
bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4)
cls_score = cls_score.permute(1, 2,
0).reshape(-1, self.cls_out_channels)
if self.use_sigmoid_cls:
scores = cls_score.sigmoid()
else:
scores = cls_score.softmax(-1)[:, :-1]
results = filter_scores_and_topk(
scores, cfg.score_thr, nms_pre,
dict(bbox_pred=bbox_pred, priors=priors))
scores, labels, _, filtered_results = results
bbox_pred = filtered_results['bbox_pred']
priors = filtered_results['priors']
bboxes = self._bbox_decode(priors, bbox_pred,
self.point_strides[level_idx],
img_shape)
mlvl_bboxes.append(bboxes)
mlvl_scores.append(scores)
mlvl_labels.append(labels)
return self._bbox_post_process(
mlvl_scores,
mlvl_labels,
mlvl_bboxes,
img_meta['scale_factor'],
cfg,
rescale=rescale,
with_nms=with_nms)
def _bbox_decode(self, points, bbox_pred, stride, max_shape):
bbox_pos_center = torch.cat([points[:, :2], points[:, :2]], dim=1)
bboxes = bbox_pred * stride + bbox_pos_center
x1 = bboxes[:, 0].clamp(min=0, max=max_shape[1])
y1 = bboxes[:, 1].clamp(min=0, max=max_shape[0])
x2 = bboxes[:, 2].clamp(min=0, max=max_shape[1])
y2 = bboxes[:, 3].clamp(min=0, max=max_shape[0])
decoded_bboxes = torch.stack([x1, y1, x2, y2], dim=-1)
return decoded_bboxes
| true | true |
f72041f40d309616756fd6cdeeec8f6b8869269e | 1,839 | py | Python | deeppavlov/__init__.py | techthiyanes/DeepPavlov | 08555428388fed3c7b036c0a82a70a25efcabcff | [
"Apache-2.0"
] | 5,893 | 2018-02-01T18:13:20.000Z | 2022-03-31T19:22:21.000Z | deeppavlov/__init__.py | Aniket27100709/DeepPavlov | d73f45733d6b23347871aa293309730303b64450 | [
"Apache-2.0"
] | 749 | 2018-01-31T11:36:02.000Z | 2022-03-30T07:24:22.000Z | deeppavlov/__init__.py | Aniket27100709/DeepPavlov | d73f45733d6b23347871aa293309730303b64450 | [
"Apache-2.0"
] | 1,155 | 2018-02-01T10:52:15.000Z | 2022-03-29T02:12:15.000Z | # Copyright 2017 Neural Networks and Deep Learning lab, MIPT
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from pathlib import Path
from ._meta import __author__, __description__, __email__, __keywords__, __license__, __version__
from .configs import configs
from .core.commands.infer import build_model
from .core.commands.train import train_evaluate_model_from_config
from .core.common.base import Element, Model
from .core.common.chainer import Chainer
from .core.common.log import init_logger
from .download import deep_download
# TODO: make better
def train_model(config: [str, Path, dict], download: bool = False, recursive: bool = False) -> Chainer:
train_evaluate_model_from_config(config, download=download, recursive=recursive)
return build_model(config, load_trained=True)
def evaluate_model(config: [str, Path, dict], download: bool = False, recursive: bool = False) -> dict:
return train_evaluate_model_from_config(config, to_train=False, download=download, recursive=recursive)
# check version
assert sys.hexversion >= 0x3060000, 'Does not work in python3.5 or lower'
# resolve conflicts with previous DeepPavlov installations versioned up to 0.0.9
dot_dp_path = Path('~/.deeppavlov').expanduser().resolve()
if dot_dp_path.is_file():
dot_dp_path.unlink()
# initiate logging
init_logger()
| 38.3125 | 107 | 0.780859 |
import sys
from pathlib import Path
from ._meta import __author__, __description__, __email__, __keywords__, __license__, __version__
from .configs import configs
from .core.commands.infer import build_model
from .core.commands.train import train_evaluate_model_from_config
from .core.common.base import Element, Model
from .core.common.chainer import Chainer
from .core.common.log import init_logger
from .download import deep_download
def train_model(config: [str, Path, dict], download: bool = False, recursive: bool = False) -> Chainer:
train_evaluate_model_from_config(config, download=download, recursive=recursive)
return build_model(config, load_trained=True)
def evaluate_model(config: [str, Path, dict], download: bool = False, recursive: bool = False) -> dict:
return train_evaluate_model_from_config(config, to_train=False, download=download, recursive=recursive)
assert sys.hexversion >= 0x3060000, 'Does not work in python3.5 or lower'
dot_dp_path = Path('~/.deeppavlov').expanduser().resolve()
if dot_dp_path.is_file():
dot_dp_path.unlink()
init_logger()
| true | true |
f720423411e3b69f1160d587bd41dbe72cd1922f | 66,041 | py | Python | tests/system_tests_two_routers.py | overmeulen/qpid-dispatch | a56b28ccb1b552c5b2dc0872dcde1fa09c725cab | [
"Apache-2.0"
] | null | null | null | tests/system_tests_two_routers.py | overmeulen/qpid-dispatch | a56b28ccb1b552c5b2dc0872dcde1fa09c725cab | [
"Apache-2.0"
] | 3 | 2019-09-30T03:11:04.000Z | 2020-03-06T17:15:54.000Z | tests/system_tests_two_routers.py | irinabov/debian-qpid-dispatch | 42fb2ffb65f8e8c8d616633c0b4308d6531a281d | [
"Apache-2.0"
] | null | null | null | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
from time import sleep
import json, os
import logging
from threading import Timer
from subprocess import PIPE, STDOUT
from proton import Message, Timeout, Delivery
from system_test import TestCase, Process, Qdrouterd, main_module, TIMEOUT, DIR
from system_test import AsyncTestReceiver
from system_test import AsyncTestSender
from system_test import unittest
from proton.handlers import MessagingHandler
from proton.reactor import Container, AtLeastOnce
from proton.utils import BlockingConnection
from qpid_dispatch.management.client import Node
CONNECTION_PROPERTIES_UNICODE_STRING = {u'connection': u'properties', u'int_property': 6451}
class TwoRouterTest(TestCase):
inter_router_port = None
@classmethod
def setUpClass(cls):
"""Start a router and a messenger"""
super(TwoRouterTest, cls).setUpClass()
def router(name, client_server, connection):
policy_config_path = os.path.join(DIR, 'two-router-policy')
config = [
# Use the deprecated attributes helloInterval, raInterval, raIntervalFlux, remoteLsMaxAge
# The routers should still start successfully after using these deprecated entities.
('router', {'remoteLsMaxAge': 60, 'helloInterval': 1, 'raInterval': 30, 'raIntervalFlux': 4,
'mode': 'interior', 'id': 'QDR.%s'%name, 'allowUnsettledMulticast': 'yes'}),
('listener', {'port': cls.tester.get_port(), 'stripAnnotations': 'no', 'linkCapacity': 500}),
('listener', {'port': cls.tester.get_port(), 'stripAnnotations': 'no'}),
('listener', {'port': cls.tester.get_port(), 'stripAnnotations': 'both'}),
('listener', {'port': cls.tester.get_port(), 'stripAnnotations': 'out'}),
('listener', {'port': cls.tester.get_port(), 'stripAnnotations': 'in'}),
('address', {'prefix': 'closest', 'distribution': 'closest'}),
('address', {'prefix': 'balanced', 'distribution': 'balanced'}),
('address', {'prefix': 'multicast', 'distribution': 'multicast'}),
# for testing pattern matching
('address', {'pattern': 'a.b.c.d',
'distribution': 'closest'}),
('address', {'pattern': '#.b.c.d',
'distribution': 'multicast'}),
('address', {'pattern': 'a/*/#/d',
'distribution': 'closest'}),
('address', {'pattern': '*/b/c/d',
'distribution': 'multicast'}),
('address', {'pattern': 'a.x.d',
'distribution': 'closest'}),
('address', {'pattern': 'a.*.d',
'distribution': 'multicast'}),
connection
]
config = Qdrouterd.Config(config)
cls.routers.append(cls.tester.qdrouterd(name, config, wait=True))
cls.routers = []
inter_router_port = cls.tester.get_port()
router('A', 'server',
('listener', {'role': 'inter-router', 'port': inter_router_port}))
router('B', 'client',
('connector', {'name': 'connectorToA', 'role': 'inter-router', 'port': inter_router_port,
'verifyHostname': 'no'}))
cls.routers[0].wait_router_connected('QDR.B')
cls.routers[1].wait_router_connected('QDR.A')
def address(self):
return self.routers[0].addresses[0]
def run_qdmanage(self, cmd, input=None, expect=Process.EXIT_OK, address=None):
p = self.popen(
['qdmanage'] + cmd.split(' ') + ['--bus', address or self.address(), '--indent=-1', '--timeout', str(TIMEOUT)],
stdin=PIPE, stdout=PIPE, stderr=STDOUT, expect=expect,
universal_newlines=True)
out = p.communicate(input)[0]
try:
p.teardown()
except Exception as e:
raise Exception(out if out else str(e))
return out
def test_01_pre_settled(self):
test = DeliveriesInTransit(self.routers[0].addresses[0], self.routers[1].addresses[0])
test.run()
self.assertEqual(None, test.error)
local_node = Node.connect(self.routers[0].addresses[0], timeout=TIMEOUT)
outs = local_node.query(type='org.apache.qpid.dispatch.router')
# deliveriesTransit must most surely be greater than num_msgs
pos = outs.attribute_names.index("deliveriesTransit")
results = outs.results[0]
self.assertTrue(results[pos] > 104)
def test_02a_multicast_unsettled(self):
test = MulticastUnsettled(self.routers[0].addresses[0])
test.run()
self.assertEqual(None, test.error)
def test_02c_sender_settles_first(self):
test = SenderSettlesFirst(self.routers[0].addresses[0], self.routers[1].addresses[0])
test.run()
self.assertEqual(None, test.error)
def test_03_message_annotations(self):
test = MessageAnnotationsTest(self.routers[0].addresses[0], self.routers[1].addresses[0])
test.run()
self.assertEqual(None, test.error)
def test_03a_test_strip_message_annotations_no(self):
test = MessageAnnotationsStripTest(self.routers[0].addresses[1], self.routers[1].addresses[1])
test.run()
self.assertEqual(None, test.error)
def test_03a_test_strip_message_annotations_no_add_trace(self):
test = MessageAnnotationsStripAddTraceTest(self.routers[0].addresses[1], self.routers[1].addresses[1])
test.run()
self.assertEqual(None, test.error)
def test_03a_test_strip_message_annotations_both_add_ingress_trace(self):
test = MessageAnnotationsStripBothAddIngressTrace(self.routers[0].addresses[2], self.routers[1].addresses[2])
test.run()
self.assertEqual(None, test.error)
def test_03a_test_strip_message_annotations_out(self):
test = MessageAnnotationsStripMessageAnnotationsOut(self.routers[0].addresses[3], self.routers[1].addresses[3])
test.run()
self.assertEqual(None, test.error)
def test_03a_test_strip_message_annotations_in(self):
test = MessageAnnotationStripMessageAnnotationsIn(self.routers[0].addresses[4], self.routers[1].addresses[4])
test.run()
self.assertEqual(None, test.error)
def test_04_management(self):
test = ManagementTest(self.routers[0].addresses[0])
test.run()
self.assertEqual(None, test.error)
def test_06_semantics_closest_is_local(self):
test = SemanticsClosestIsLocal(self.routers[0].addresses[0], self.routers[1].addresses[0])
test.run()
self.assertEqual(None, test.error)
def test_07_semantics_closest_is_remote(self):
test = SemanticsClosestIsRemote(self.routers[0].addresses[0], self.routers[1].addresses[0])
test.run()
self.assertEqual(None, test.error)
def test_08_semantics_balanced(self):
test = SemanticsBalanced(self.routers[0].addresses[0], self.routers[0].addresses[1],
self.routers[1].addresses[0])
test.run()
self.assertEqual(None, test.error)
def test_09_to_override(self):
test = MessageAnnotaionsPreExistingOverride(self.routers[0].addresses[0], self.routers[1].addresses[0])
test.run()
self.assertEqual(None, test.error)
def test_10_propagated_disposition(self):
test = PropagatedDisposition(self, self.routers[0].addresses[0], self.routers[1].addresses[0])
test.run()
self.assertTrue(test.passed)
def test_11_three_ack(self):
test = ThreeAck(self, self.routers[0].addresses[0], self.routers[1].addresses[0])
test.run()
def test_12_excess_deliveries_released(self):
"""
Message-route a series of deliveries where the receiver provides credit for a subset and
once received, closes the link. The remaining deliveries should be released back to the sender.
"""
test = ExcessDeliveriesReleasedTest(self.routers[0].addresses[0], self.routers[1].addresses[0])
test.run()
self.assertEqual(None, test.error)
def test_15_attach_on_inter_router(self):
test = AttachOnInterRouterTest(self.routers[0].addresses[5])
test.run()
self.assertEqual(None, test.error)
def test_17_address_wildcard(self):
# verify proper distribution is selected by wildcard
addresses = [
# (address, count of messages expected to be received)
('a.b.c.d', 1), # closest 'a.b.c.d'
('b.c.d', 2), # multi '#.b.c.d'
('f.a.b.c.d', 2), # multi '#.b.c.d
('a.c.d', 2), # multi 'a.*.d'
('a/c/c/d', 1), # closest 'a/*/#.d
('a/x/z/z/d', 1), # closest 'a/*/#.d
('a/x/d', 1), # closest 'a.x.d'
('a.x.e', 1), # balanced ----
('m.b.c.d', 2) # multi '*/b/c/d'
]
# two receivers per address - one for each router
receivers = []
for a in addresses:
for x in range(2):
ar = AsyncTestReceiver(address=self.routers[x].addresses[0],
source=a[0])
receivers.append(ar)
# wait for the consumer info to propagate
for a in addresses:
self.routers[0].wait_address(a[0], 1, 1)
self.routers[1].wait_address(a[0], 1, 1)
# send one message to each address
conn = BlockingConnection(self.routers[0].addresses[0])
sender = conn.create_sender(address=None, options=AtLeastOnce())
for a in addresses:
sender.send(Message(address=a[0], body={'address': a[0]}))
# count received messages by address
msgs_recvd = {}
for M in receivers:
try:
while True:
i = M.queue.get(timeout=0.2).body.get('address', "ERROR")
if i not in msgs_recvd:
msgs_recvd[i] = 0
msgs_recvd[i] += 1
except AsyncTestReceiver.Empty:
pass
# verify expected count == actual count
self.assertTrue("ERROR" not in msgs_recvd)
for a in addresses:
self.assertTrue(a[0] in msgs_recvd)
self.assertEqual(a[1], msgs_recvd[a[0]])
for M in receivers:
M.stop()
conn.close()
def test_17_large_streaming_test(self):
test = LargeMessageStreamTest(self.routers[0].addresses[0], self.routers[1].addresses[0])
test.run()
self.assertEqual(None, test.error)
def test_18_single_char_dest_test(self):
test = SingleCharacterDestinationTest(self.routers[0].addresses[0], self.routers[1].addresses[0])
test.run()
self.assertEqual(None, test.error)
def test_19_delete_inter_router_connection(self):
"""
This test tries to delete an inter-router connection but is
prevented from doing so.
"""
query_command = 'QUERY --type=connection'
outputs = json.loads(self.run_qdmanage(query_command))
identity = None
passed = False
for output in outputs:
if "inter-router" == output['role']:
identity = output['identity']
if identity:
update_command = 'UPDATE --type=connection adminStatus=deleted --id=' + identity
try:
json.loads(self.run_qdmanage(update_command))
except Exception as e:
if "Forbidden" in str(e):
passed = True
# The test has passed since we were forbidden from deleting
# inter-router connections even though we are allowed to update the adminStatus field.
self.assertTrue(passed)
def test_20_delete_connection(self):
"""
This test creates a blocking connection and tries to delete that connection.
Since there is no policy associated with this router, the default for allowAdminStatusUpdate is true,
the delete operation will be permitted.
"""
# Create a connection with some properties so we can easily identify the connection
connection = BlockingConnection(self.address(),
properties=CONNECTION_PROPERTIES_UNICODE_STRING)
query_command = 'QUERY --type=connection'
outputs = json.loads(self.run_qdmanage(query_command))
identity = None
passed = False
print ()
for output in outputs:
if output.get('properties'):
conn_properties = output['properties']
# Find the connection that has our properties - CONNECTION_PROPERTIES_UNICODE_STRING
# Delete that connection and run another qdmanage to see
# if the connection is gone.
if conn_properties.get('int_property'):
identity = output.get("identity")
if identity:
update_command = 'UPDATE --type=connection adminStatus=deleted --id=' + identity
try:
self.run_qdmanage(update_command)
query_command = 'QUERY --type=connection'
outputs = json.loads(
self.run_qdmanage(query_command))
no_properties = True
for output in outputs:
if output.get('properties'):
no_properties = False
conn_properties = output['properties']
if conn_properties.get('int_property'):
passed = False
break
else:
passed = True
if no_properties:
passed = True
except Exception as e:
passed = False
# The test has passed since we were allowed to delete a connection
# because we have the policy permission to do so.
self.assertTrue(passed)
def test_21_delete_connection_with_receiver(self):
test = DeleteConnectionWithReceiver(self.routers[0].addresses[0])
self.assertEqual(test.error, None)
test.run()
def test_30_huge_address(self):
# try a link with an extremely long address
# DISPATCH-1461
addr = "A" * 2019
rx = AsyncTestReceiver(self.routers[0].addresses[0],
source=addr)
tx = AsyncTestSender(self.routers[1].addresses[0],
target=addr,
count=100)
tx.wait()
i = 100
while i:
try:
rx.queue.get(timeout=TIMEOUT)
i -= 1
except AsyncTestReceiver.Empty:
break;
self.assertEqual(0, i)
rx.stop()
class DeleteConnectionWithReceiver(MessagingHandler):
def __init__(self, address):
super(DeleteConnectionWithReceiver, self).__init__()
self.address = address
self.mgmt_receiver = None
self.mgmt_receiver_1 = None
self.mgmt_receiver_2 = None
self.conn_to_kill = None
self.mgmt_conn = None
self.mgmt_sender = None
self.success = False
self.error = None
def on_start(self, event):
self.timer = event.reactor.schedule(TIMEOUT, Timeout(self))
# Create a receiver connection with some properties so it
# can be easily identified.
self.conn_to_kill = event.container.connect(self.address, properties=CONNECTION_PROPERTIES_UNICODE_STRING)
self.receiver_to_kill = event.container.create_receiver(self.conn_to_kill, "hello_world")
self.mgmt_conn = event.container.connect(self.address)
self.mgmt_sender = event.container.create_sender(self.mgmt_conn)
self.mgmt_receiver = event.container.create_receiver(self.mgmt_conn, None, dynamic=True)
self.mgmt_receiver_1 = event.container.create_receiver(self.mgmt_conn,
None,
dynamic=True)
self.mgmt_receiver_2 = event.container.create_receiver(self.mgmt_conn,
None,
dynamic=True)
def timeout(self):
self.error = "Timeout Expired: sent=%d, received=%d" % (self.n_sent, self.n_received)
self.mgmt_conn.close()
def bail(self, error):
self.error = error
self.timer.cancel()
self.mgmt_conn.close()
self.conn_to_kill.close()
def on_link_opened(self, event):
if event.receiver == self.mgmt_receiver:
request = Message()
request.address = "amqp:/_local/$management"
request.properties = {
u'type': u'org.apache.qpid.dispatch.connection',
u'operation': u'QUERY'}
request.reply_to = self.mgmt_receiver.remote_source.address
self.mgmt_sender.send(request)
def on_message(self, event):
if event.receiver == self.mgmt_receiver:
attribute_names = event.message.body['attributeNames']
property_index = attribute_names .index('properties')
identity_index = attribute_names .index('identity')
for result in event.message.body['results']:
if result[property_index]:
properties = result[property_index]
if properties.get('int_property'):
identity = result[identity_index]
if identity:
request = Message()
request.address = "amqp:/_local/$management"
request.properties = {
u'identity': identity,
u'type': u'org.apache.qpid.dispatch.connection',
u'operation': u'UPDATE'
}
request.body = {
u'adminStatus': u'deleted'}
request.reply_to = self.mgmt_receiver_1.remote_source.address
self.mgmt_sender.send(request)
elif event.receiver == self.mgmt_receiver_1:
if event.message.properties['statusDescription'] == 'OK' and event.message.body['adminStatus'] == 'deleted':
request = Message()
request.address = "amqp:/_local/$management"
request.properties = {u'type': u'org.apache.qpid.dispatch.connection',
u'operation': u'QUERY'}
request.reply_to = self.mgmt_receiver_2.remote_source.address
self.mgmt_sender.send(request)
elif event.receiver == self.mgmt_receiver_2:
attribute_names = event.message.body['attributeNames']
property_index = attribute_names .index('properties')
identity_index = attribute_names .index('identity')
for result in event.message.body['results']:
if result[property_index]:
properties = result[property_index]
if properties and properties.get('int_property'):
self.bail("Connection not deleted")
self.bail(None)
def run(self):
Container(self).run()
class Timeout(object):
def __init__(self, parent):
self.parent = parent
def on_timer_task(self, event):
self.parent.timeout()
class SingleCharacterDestinationTest(MessagingHandler):
def __init__(self, address1, address2):
super(SingleCharacterDestinationTest, self).__init__()
self.address1 = address1
self.address2 = address2
self.dest = "x"
self.error = None
self.conn1 = None
self.conn2 = None
self.count = 1
self.n_sent = 0
self.timer = None
self.sender = None
self.receiver = None
self.n_received = 0
self.body = "xyz"
def check_if_done(self):
if self.n_received == self.count:
self.timer.cancel()
self.conn1.close()
self.conn2.close()
def timeout(self):
self.error = "Timeout Expired: sent=%d, received=%d" % (self.n_sent, self.n_received)
self.conn1.close()
self.conn2.close()
def on_start(self, event):
self.timer = event.reactor.schedule(TIMEOUT, Timeout(self))
self.conn1 = event.container.connect(self.address1)
self.conn2 = event.container.connect(self.address2)
self.sender = event.container.create_sender(self.conn1, self.dest)
self.receiver = event.container.create_receiver(self.conn2, self.dest)
def on_sendable(self, event):
if self.n_sent < self.count:
msg = Message(body=self.body)
event.sender.send(msg)
self.n_sent += 1
def on_message(self, event):
self.n_received += 1
self.check_if_done()
def run(self):
Container(self).run()
class LargeMessageStreamTest(MessagingHandler):
def __init__(self, address1, address2):
super(LargeMessageStreamTest, self).__init__()
self.address1 = address1
self.address2 = address2
self.dest = "LargeMessageStreamTest"
self.error = None
self.conn1 = None
self.conn2 = None
self.count = 10
self.n_sent = 0
self.timer = None
self.sender = None
self.receiver = None
self.n_received = 0
self.body = ""
for i in range(10000):
self.body += "0123456789101112131415"
def check_if_done(self):
if self.n_received == self.count:
self.timer.cancel()
self.conn1.close()
self.conn2.close()
def timeout(self):
self.error = "Timeout Expired: sent=%d, received=%d" % (self.n_sent, self.n_received)
self.conn1.close()
self.conn2.close()
def on_start(self, event):
self.timer = event.reactor.schedule(TIMEOUT, Timeout(self))
self.conn1 = event.container.connect(self.address1)
self.conn2 = event.container.connect(self.address2)
self.sender = event.container.create_sender(self.conn1, self.dest)
self.receiver = event.container.create_receiver(self.conn2, self.dest)
def on_sendable(self, event):
if self.n_sent < self.count:
msg = Message(body=self.body)
# send(msg) calls the stream function which streams data from sender to the router
event.sender.send(msg)
self.n_sent += 1
def on_message(self, event):
self.n_received += 1
self.check_if_done()
def run(self):
Container(self).run()
class ExcessDeliveriesReleasedTest(MessagingHandler):
def __init__(self, address1, address2):
super(ExcessDeliveriesReleasedTest, self).__init__(prefetch=0)
self.address1 = address1
self.address2 = address2
self.dest = "closest.EDRtest"
self.error = None
self.sender = None
self.receiver = None
self.n_sent = 0
self.n_received = 0
self.n_accepted = 0
self.n_released = 0
self.timer = None
self.conn1 = None
self.conn2 = None
def timeout(self):
self.error = "Timeout Expired"
self.conn1.close()
self.conn2.close()
def on_start(self, event):
self.timer = event.reactor.schedule(TIMEOUT, Timeout(self))
self.conn1 = event.container.connect(self.address1)
self.conn2 = event.container.connect(self.address2)
self.sender = event.container.create_sender(self.conn1, self.dest)
self.receiver = event.container.create_receiver(self.conn2, self.dest)
self.receiver.flow(6)
def on_sendable(self, event):
for i in range(10 - self.n_sent):
msg = Message(body=i)
event.sender.send(msg)
self.n_sent += 1
def on_accepted(self, event):
self.n_accepted += 1
def on_released(self, event):
self.n_released += 1
if self.n_released == 4:
if self.n_accepted != 6:
self.error = "Expected 6 accepted, got %d" % self.n_accepted
if self.n_received != 6:
self.error = "Expected 6 received, got %d" % self.n_received
self.conn1.close()
self.conn2.close()
self.timer.cancel()
def on_message(self, event):
self.n_received += 1
if self.n_received == 6:
self.receiver.close()
def run(self):
Container(self).run()
class AttachOnInterRouterTest(MessagingHandler):
"""Expect an error when attaching a link to an inter-router listener"""
def __init__(self, address):
super(AttachOnInterRouterTest, self).__init__(prefetch=0)
self.address = address
self.dest = "AOIRtest"
self.error = None
self.sender = None
self.timer = None
self.conn = None
def timeout(self):
self.error = "Timeout Expired"
self.conn.close()
def on_start(self, event):
self.timer = event.reactor.schedule(TIMEOUT, Timeout(self))
self.conn = event.container.connect(self.address)
self.sender = event.container.create_sender(self.conn, self.dest)
def on_link_remote_close(self, event):
self.conn.close()
self.timer.cancel()
def run(self):
logging.disable(logging.ERROR) # Hide expected log errors
try:
Container(self).run()
finally:
logging.disable(logging.NOTSET) # Restore to normal
class DeliveriesInTransit(MessagingHandler):
def __init__(self, address1, address2):
super(DeliveriesInTransit, self).__init__()
self.address1 = address1
self.address2 = address2
self.dest = "pre_settled.1"
self.error = "All messages not received"
self.n_sent = 0
self.timer = None
self.conn1 = None
self.conn2 = None
self.sender = None
self.num_msgs = 104
self.sent_count = 0
self.received_count = 0
self.receiver = None
def on_start(self, event):
self.timer = event.reactor.schedule(TIMEOUT, Timeout(self))
self.conn1 = event.container.connect(self.address1)
self.sender = event.container.create_sender(self.conn1, self.dest)
self.conn2 = event.container.connect(self.address2)
self.receiver = event.container.create_receiver(self.conn2, self.dest)
def on_sendable(self, event):
if self.n_sent <= self.num_msgs-1:
msg = Message(body="Hello World")
self.sender.send(msg)
self.n_sent += 1
def check_if_done(self):
if self.n_sent == self.received_count:
self.error = None
self.timer.cancel()
self.conn1.close()
self.conn2.close()
def on_message(self, event):
self.received_count+=1
self.check_if_done()
def run(self):
Container(self).run()
class MessageAnnotationsTest(MessagingHandler):
def __init__(self, address1, address2):
super(MessageAnnotationsTest, self).__init__()
self.address1 = address1
self.address2 = address2
self.dest = "ma/1"
self.error = "Message annotations not found"
self.timer = None
self.conn1 = None
self.conn2 = None
self.sender = None
self.receiver = None
self.sent_count = 0
self.msg_not_sent = True
def on_start(self, event):
self.timer = event.reactor.schedule(TIMEOUT, Timeout(self))
self.conn1 = event.container.connect(self.address1)
self.sender = event.container.create_sender(self.conn1, self.dest)
self.conn2 = event.container.connect(self.address2)
self.receiver = event.container.create_receiver(self.conn2, self.dest)
def on_sendable(self, event):
if self.msg_not_sent:
msg = Message(body={'number': 0})
event.sender.send(msg)
self.msg_not_sent = False
def on_message(self, event):
if 0 == event.message.body['number']:
ma = event.message.annotations
if ma['x-opt-qd.ingress'] == '0/QDR.A' and ma['x-opt-qd.trace'] == ['0/QDR.A', '0/QDR.B']:
self.error = None
self.accept(event.delivery)
self.timer.cancel()
self.conn1.close()
self.conn2.close()
def run(self):
Container(self).run()
class MessageAnnotationsStripTest(MessagingHandler):
def __init__(self, address1, address2):
super(MessageAnnotationsStripTest, self).__init__()
self.address1 = address1
self.address2 = address2
self.dest = "message_annotations_strip_no/1"
self.error = "Message annotations not found"
self.timer = None
self.conn1 = None
self.conn2 = None
self.sender = None
self.receiver = None
self.sent_count = 0
self.msg_not_sent = True
def on_start(self, event):
self.timer = event.reactor.schedule(TIMEOUT, Timeout(self))
self.conn1 = event.container.connect(self.address1)
self.sender = event.container.create_sender(self.conn1, self.dest)
self.conn2 = event.container.connect(self.address2)
self.receiver = event.container.create_receiver(self.conn2, self.dest)
def on_sendable(self, event):
if self.msg_not_sent:
msg = Message(body={'number': 0})
ingress_message_annotations = {'work': 'hard', 'stay': 'humble'}
msg.annotations = ingress_message_annotations
event.sender.send(msg)
self.msg_not_sent = False
def on_message(self, event):
if 0 == event.message.body['number']:
ma = event.message.annotations
if ma['x-opt-qd.ingress'] == '0/QDR.A' and ma['x-opt-qd.trace'] == ['0/QDR.A', '0/QDR.B'] \
and ma['work'] == 'hard' and ma['stay'] == 'humble':
self.error = None
self.accept(event.delivery)
self.timer.cancel()
self.conn1.close()
self.conn2.close()
def run(self):
Container(self).run()
class ManagementTest(MessagingHandler):
def __init__(self, address):
super(ManagementTest, self).__init__()
self.address = address
self.timer = None
self.conn = None
self.sender = None
self.receiver = None
self.sent_count = 0
self.msg_not_sent = True
self.error = None
self.response1 = False
self.response2 = False
def timeout(self):
if not self.response1:
self.error = "Incorrect response received for message with correlation id C1"
if not self.response1:
self.error = self.error + "Incorrect response received for message with correlation id C2"
def on_start(self, event):
self.timer = event.reactor.schedule(TIMEOUT, Timeout(self))
self.conn = event.container.connect(self.address)
self.sender = event.container.create_sender(self.conn)
self.receiver = event.container.create_receiver(self.conn, None, dynamic=True)
def on_link_opened(self, event):
if event.receiver == self.receiver:
request = Message()
request.correlation_id = "C1"
request.address = "amqp:/_local/$management"
request.properties = {u'type': u'org.amqp.management', u'name': u'self', u'operation': u'GET-MGMT-NODES'}
request.reply_to = self.receiver.remote_source.address
self.sender.send(request)
request = Message()
request.address = "amqp:/_topo/0/QDR.B/$management"
request.correlation_id = "C2"
request.reply_to = self.receiver.remote_source.address
request.properties = {u'type': u'org.amqp.management', u'name': u'self', u'operation': u'GET-MGMT-NODES'}
self.sender.send(request)
def on_message(self, event):
if event.receiver == self.receiver:
if event.message.correlation_id == "C1":
if event.message.properties['statusCode'] == 200 and \
event.message.properties['statusDescription'] is not None \
and 'amqp:/_topo/0/QDR.B/$management' in event.message.body:
self.response1 = True
elif event.message.correlation_id == "C2":
if event.message.properties['statusCode'] == 200 and \
event.message.properties['statusDescription'] is not None \
and 'amqp:/_topo/0/QDR.A/$management' in event.message.body:
self.response2 = True
if self.response1 and self.response2:
self.error = None
if self.error is None:
self.timer.cancel()
self.conn.close()
def run(self):
Container(self).run()
class MessageAnnotationStripMessageAnnotationsIn(MessagingHandler):
def __init__(self, address1, address2):
super(MessageAnnotationStripMessageAnnotationsIn, self).__init__()
self.address1 = address1
self.address2 = address2
self.dest = "strip_message_annotations_in/1"
self.error = "Message annotations not found"
self.timer = None
self.conn1 = None
self.conn2 = None
self.sender = None
self.receiver = None
self.sent_count = 0
self.msg_not_sent = True
def on_start(self, event):
self.timer = event.reactor.schedule(TIMEOUT, Timeout(self))
self.conn1 = event.container.connect(self.address1)
self.sender = event.container.create_sender(self.conn1, self.dest)
self.conn2 = event.container.connect(self.address2)
self.receiver = event.container.create_receiver(self.conn2, self.dest)
def on_sendable(self, event):
if self.msg_not_sent:
msg = Message(body={'number': 0})
#
# Pre-existing ingress and trace
#
ingress_message_annotations = {'x-opt-qd.ingress': 'ingress-router', 'x-opt-qd.trace': ['X/QDR']}
msg.annotations = ingress_message_annotations
event.sender.send(msg)
self.msg_not_sent = False
def on_message(self, event):
if 0 == event.message.body['number']:
if event.message.annotations['x-opt-qd.ingress'] == '0/QDR.A' \
and event.message.annotations['x-opt-qd.trace'] == ['0/QDR.A', '0/QDR.B']:
self.error = None
self.timer.cancel()
self.conn1.close()
self.conn2.close()
def run(self):
Container(self).run()
class MessageAnnotaionsPreExistingOverride(MessagingHandler):
def __init__(self, address1, address2):
super(MessageAnnotaionsPreExistingOverride, self).__init__()
self.address1 = address1
self.address2 = address2
self.dest = "toov/1"
self.error = "Pre-existing x-opt-qd.to has been stripped"
self.timer = None
self.conn1 = None
self.conn2 = None
self.sender = None
self.receiver = None
self.msg_not_sent = True
def on_start(self, event):
self.timer = event.reactor.schedule(TIMEOUT, Timeout(self))
self.conn1 = event.container.connect(self.address1)
self.sender = event.container.create_sender(self.conn1, self.dest)
self.conn2 = event.container.connect(self.address2)
self.receiver = event.container.create_receiver(self.conn2, self.dest)
def on_sendable(self, event):
if self.msg_not_sent:
msg = Message(body={'number': 0})
msg.annotations = {'x-opt-qd.to': 'toov/1'}
event.sender.send(msg)
self.msg_not_sent = False
def on_message(self, event):
if 0 == event.message.body['number']:
ma = event.message.annotations
if ma['x-opt-qd.to'] == 'toov/1':
self.error = None
self.timer.cancel()
self.conn1.close()
self.conn2.close()
def run(self):
Container(self).run()
class MessageAnnotationsStripMessageAnnotationsOut(MessagingHandler):
def __init__(self, address1, address2):
super(MessageAnnotationsStripMessageAnnotationsOut, self).__init__()
self.address1 = address1
self.address2 = address2
self.dest = "strip_message_annotations_out/1"
self.error = "Message annotations not found"
self.timer = None
self.conn1 = None
self.conn2 = None
self.sender = None
self.receiver = None
self.sent_count = 0
self.msg_not_sent = True
def on_start(self, event):
self.timer = event.reactor.schedule(TIMEOUT, Timeout(self))
self.conn1 = event.container.connect(self.address1)
self.sender = event.container.create_sender(self.conn1, self.dest)
self.conn2 = event.container.connect(self.address2)
self.receiver = event.container.create_receiver(self.conn2, self.dest)
def on_sendable(self, event):
if self.msg_not_sent:
msg = Message(body={'number': 0})
event.sender.send(msg)
self.msg_not_sent = False
def on_message(self, event):
if 0 == event.message.body['number']:
if event.message.annotations is None:
self.error = None
self.timer.cancel()
self.conn1.close()
self.conn2.close()
def run(self):
Container(self).run()
class MessageAnnotationsStripBothAddIngressTrace(MessagingHandler):
def __init__(self, address1, address2):
super(MessageAnnotationsStripBothAddIngressTrace, self).__init__()
self.address1 = address1
self.address2 = address2
self.dest = "strip_message_annotations_both_add_ingress_trace/1"
self.error = "Message annotations not found"
self.timer = None
self.conn1 = None
self.conn2 = None
self.sender = None
self.receiver = None
self.sent_count = 0
self.msg_not_sent = True
def on_start(self, event):
self.timer = event.reactor.schedule(TIMEOUT, Timeout(self))
self.conn1 = event.container.connect(self.address1)
self.sender = event.container.create_sender(self.conn1, self.dest)
self.conn2 = event.container.connect(self.address2)
self.receiver = event.container.create_receiver(self.conn2, self.dest)
def on_sendable(self, event):
if self.msg_not_sent:
msg = Message(body={'number': 0})
ingress_message_annotations = {'work': 'hard',
'x-opt-qd': 'humble',
'x-opt-qd.ingress': 'ingress-router',
'x-opt-qd.trace': ['0/QDR.A']}
msg.annotations = ingress_message_annotations
event.sender.send(msg)
self.msg_not_sent = False
def on_message(self, event):
if 0 == event.message.body['number']:
if event.message.annotations == {'work': 'hard', 'x-opt-qd': 'humble'}:
self.error = None
self.timer.cancel()
self.conn1.close()
self.conn2.close()
def run(self):
Container(self).run()
class MessageAnnotationsStripAddTraceTest(MessagingHandler):
def __init__(self, address1, address2):
super(MessageAnnotationsStripAddTraceTest, self).__init__()
self.address1 = address1
self.address2 = address2
self.dest = "message_annotations_strip_no/1"
self.error = "Message annotations not found"
self.timer = None
self.conn1 = None
self.conn2 = None
self.sender = None
self.receiver = None
self.sent_count = 0
self.msg_not_sent = True
def on_start(self, event):
self.timer = event.reactor.schedule(TIMEOUT, Timeout(self))
self.conn1 = event.container.connect(self.address1)
self.sender = event.container.create_sender(self.conn1, self.dest)
self.conn2 = event.container.connect(self.address2)
self.receiver = event.container.create_receiver(self.conn2, self.dest)
def on_sendable(self, event):
if self.msg_not_sent:
msg = Message(body={'number': 0})
ingress_message_annotations = {'x-opt-qd.trace': ['0/QDR.1']}
msg.annotations = ingress_message_annotations
event.sender.send(msg)
self.msg_not_sent = False
def on_message(self, event):
if 0 == event.message.body['number']:
ma = event.message.annotations
if ma['x-opt-qd.ingress'] == '0/QDR.A' and ma['x-opt-qd.trace'] == ['0/QDR.1', '0/QDR.A', '0/QDR.B']:
self.error = None
self.accept(event.delivery)
self.timer.cancel()
self.conn1.close()
self.conn2.close()
def run(self):
Container(self).run()
class SenderSettlesFirst(MessagingHandler):
def __init__(self, address1, address2):
super(SenderSettlesFirst, self).__init__(auto_accept=False)
self.address1 = address1
self.address2 = address2
self.dest = "closest.senderfirst.1"
self.error = "Message body received differs from the one sent"
self.n_sent = 0
self.timer = None
self.conn1 = None
self.conn2 = None
self.sender = None
self.sent_count = 0
self.received_count = 0
self.receiver = None
self.msg_not_sent = True
def on_start(self, event):
self.timer = event.reactor.schedule(TIMEOUT, Timeout(self))
self.conn1 = event.container.connect(self.address1)
self.sender = event.container.create_sender(self.conn1, self.dest)
self.conn2 = event.container.connect(self.address2)
self.receiver = event.container.create_receiver(self.conn2, self.dest)
def on_sendable(self, event):
if self.msg_not_sent:
msg = Message(body={'number': 0})
dlv = event.sender.send(msg)
dlv.settle()
self.msg_not_sent = False
def on_message(self, event):
if 0 == event.message.body['number']:
self.error = None
self.accept(event.delivery)
self.timer.cancel()
self.conn1.close()
self.conn2.close()
def run(self):
Container(self).run()
class MulticastUnsettled(MessagingHandler):
def __init__(self, address):
super(MulticastUnsettled, self).__init__()
self.address = address
self.dest = "multicast.2"
self.error = None
self.n_sent = 0
self.count = 3
self.n_received_a = 0
self.n_received_b = 0
self.n_received_c = 0
self.timer = None
self.conn = None
self.sender = None
self.receiver_a = None
self.receiver_b = None
self.receiver_c = None
def on_start(self, event):
self.timer = event.reactor.schedule(TIMEOUT, Timeout(self))
self.conn = event.container.connect(self.address)
self.sender = event.container.create_sender(self.conn, self.dest)
self.receiver_a = event.container.create_receiver(self.conn, self.dest, name="A")
self.receiver_b = event.container.create_receiver(self.conn, self.dest, name="B")
self.receiver_c = event.container.create_receiver(self.conn, self.dest, name="C")
def timeout(self):
self.error = "Timeout Expired: sent=%d rcvd=%d/%d/%d" % \
(self.n_sent, self.n_received_a, self.n_received_b, self.n_received_c)
self.conn.close()
def check_if_done(self):
if self.n_received_a + self.n_received_b + self.n_received_c == self.count:
self.timer.cancel()
self.conn.close()
def on_sendable(self, event):
if self.n_sent == 0:
msg = Message(body="MulticastUnsettled-Test")
self.sender.send(msg)
self.n_sent += 1
def on_message(self, event):
if event.receiver == self.receiver_a:
self.n_received_a += 1
if event.receiver == self.receiver_b:
self.n_received_b += 1
if event.receiver == self.receiver_c:
self.n_received_c += 1
def on_accepted(self, event):
self.check_if_done()
def run(self):
Container(self).run()
class SemanticsClosestIsLocal(MessagingHandler):
def __init__(self, address1, address2):
super(SemanticsClosestIsLocal, self).__init__()
self.address1 = address1
self.address2 = address2
self.dest = "closest.1"
self.timer = None
self.conn1 = None
self.conn2 = None
self.sender = None
self.receiver_a = None
self.receiver_b = None
self.receiver_c = None
self.num_messages = 100
self.n_received_a = 0
self.n_received_b = 0
self.n_received_c = 0
self.error = None
self.n_sent = 0
def on_start(self, event):
self.timer = event.reactor.schedule(TIMEOUT, Timeout(self))
self.conn1 = event.container.connect(self.address1)
self.conn2 = event.container.connect(self.address2)
self.sender = event.container.create_sender(self.conn1, self.dest)
# Receiver on same router as the sender must receive all the messages. The other two
# receivers are on the other router
self.receiver_a = event.container.create_receiver(self.conn1, self.dest, name="A")
self.receiver_b = event.container.create_receiver(self.conn2, self.dest, name="B")
self.receiver_c = event.container.create_receiver(self.conn2, self.dest, name="C")
def timeout(self):
self.error = "Timeout Expired: sent=%d rcvd=%d/%d/%d" % \
(self.n_sent, self.n_received_a, self.n_received_b, self.n_received_c)
self.conn1.close()
self.conn2.close()
def check_if_done(self):
if self.n_received_a == 100 and self.n_received_b + self.n_received_c == 0:
self.timer.cancel()
self.conn1.close()
self.conn2.close()
def on_sendable(self, event):
if self.n_sent < self.num_messages:
msg = Message(body="SemanticsClosestIsLocal-Test")
self.sender.send(msg)
self.n_sent += 1
def on_message(self, event):
if event.receiver == self.receiver_a:
self.n_received_a += 1
if event.receiver == self.receiver_b:
self.n_received_b += 1
if event.receiver == self.receiver_c:
self.n_received_c += 1
def on_accepted(self, event):
self.check_if_done()
def run(self):
Container(self).run()
class SemanticsClosestIsRemote(MessagingHandler):
def __init__(self, address1, address2):
super(SemanticsClosestIsRemote, self).__init__()
self.address1 = address1
self.address2 = address2
self.dest = "closest.1"
self.timer = None
self.conn1 = None
self.conn2 = None
self.sender = None
self.receiver_a = None
self.receiver_b = None
self.receiver_c = None
self.num_messages = 100
self.n_received_a = 0
self.n_received_b = 0
self.error = None
self.n_sent = 0
def on_start(self, event):
self.timer = event.reactor.schedule(TIMEOUT, Timeout(self))
self.conn1 = event.container.connect(self.address1)
self.conn2 = event.container.connect(self.address2)
self.sender = event.container.create_sender(self.conn1, self.dest)
# Receiver on same router as the sender must receive all the messages. The other two
# receivers are on the other router
self.receiver_a = event.container.create_receiver(self.conn2, self.dest, name="A")
self.receiver_b = event.container.create_receiver(self.conn2, self.dest, name="B")
def timeout(self):
self.error = "Timeout Expired: sent=%d rcvd=%d/%d" % \
(self.n_sent, self.n_received_a, self.n_received_b)
self.conn1.close()
self.conn2.close()
def check_if_done(self):
if self.n_received_a + self.n_received_b == 100 and self.n_received_a > 0 and self.n_received_b > 0:
self.timer.cancel()
self.conn1.close()
self.conn2.close()
def on_sendable(self, event):
if self.n_sent < self.num_messages:
msg = Message(body="SemanticsClosestIsRemote-Test")
self.sender.send(msg)
self.n_sent += 1
def on_message(self, event):
if event.receiver == self.receiver_a:
self.n_received_a += 1
if event.receiver == self.receiver_b:
self.n_received_b += 1
def on_accepted(self, event):
self.check_if_done()
def run(self):
Container(self).run()
class CustomTimeout(object):
def __init__(self, parent):
self.parent = parent
def addr_text(self, addr):
if not addr:
return ""
if addr[0] == 'M':
return addr[2:]
else:
return addr[1:]
def on_timer_task(self, event):
local_node = Node.connect(self.parent.address1, timeout=TIMEOUT)
res = local_node.query('org.apache.qpid.dispatch.router.address')
name = res.attribute_names.index('name')
found = False
for results in res.results:
if "balanced.1" == self.addr_text(results[name]):
found = True
break
if found:
self.parent.cancel_custom()
self.parent.create_sender(event)
else:
event.reactor.schedule(2, self)
class SemanticsBalanced(MessagingHandler):
def __init__(self, address1, address2, address3):
super(SemanticsBalanced, self).__init__(auto_accept=False, prefetch=0)
self.address1 = address1
self.address2 = address2
self.address3 = address3
self.dest = "balanced.1"
self.timer = None
self.conn1 = None
self.conn2 = None
self.conn3 = None
self.sender = None
self.receiver_a = None
self.receiver_b = None
self.receiver_c = None
self.num_messages = 400
self.n_received_a = 0
self.n_received_b = 0
self.n_received_c = 0
self.error = None
self.n_sent = 0
self.rx_set = []
self.custom_timer = None
def on_start(self, event):
self.timer = event.reactor.schedule(TIMEOUT, Timeout(self))
self.custom_timer = event.reactor.schedule(2, CustomTimeout(self))
self.conn1 = event.container.connect(self.address1)
self.conn2 = event.container.connect(self.address2)
self.conn3 = event.container.connect(self.address3)
# This receiver is on the same router as the sender
self.receiver_a = event.container.create_receiver(self.conn2, self.dest, name="A")
# These two receivers are connected to a different router than the sender
self.receiver_b = event.container.create_receiver(self.conn3, self.dest, name="B")
self.receiver_c = event.container.create_receiver(self.conn3, self.dest, name="C")
self.receiver_a.flow(300)
self.receiver_b.flow(300)
self.receiver_c.flow(300)
def cancel_custom(self):
self.custom_timer.cancel()
def create_sender(self, event):
self.sender = event.container.create_sender(self.conn1, self.dest)
def timeout(self):
self.error = "Timeout Expired: sent=%d rcvd=%d/%d/%d" % \
(self.n_sent, self.n_received_a, self.n_received_b, self.n_received_c)
self.conn1.close()
self.conn2.close()
self.conn3.close()
def check_if_done(self):
if self.n_received_a + self.n_received_b + self.n_received_c == self.num_messages and \
self.n_received_a > 0 and self.n_received_b > 0 and self.n_received_c > 0:
self.rx_set.sort()
all_messages_received = True
for i in range(self.num_messages):
if not i == self.rx_set[i]:
all_messages_received = False
if all_messages_received:
self.timer.cancel()
self.conn1.close()
self.conn2.close()
self.conn3.close()
def on_sendable(self, event):
if self.n_sent < self.num_messages:
msg = Message(body={'number': self.n_sent})
self.sender.send(msg)
self.n_sent += 1
def on_message(self, event):
if event.receiver == self.receiver_a:
self.n_received_a += 1
self.rx_set.append(event.message.body['number'])
elif event.receiver == self.receiver_b:
self.n_received_b += 1
self.rx_set.append(event.message.body['number'])
elif event.receiver == self.receiver_c:
self.n_received_c += 1
self.rx_set.append(event.message.body['number'])
self.check_if_done()
def run(self):
Container(self).run()
class PropagatedDisposition(MessagingHandler):
def __init__(self, test, address1, address2):
super(PropagatedDisposition, self).__init__(auto_accept=False)
self.address1 = address1
self.address2 = address2
self.settled = []
self.test = test
self.sender_conn = None
self.receiver_conn = None
self.passed = False
def on_start(self, event):
self.timer = event.reactor.schedule(TIMEOUT, Timeout(self))
self.sender_conn = event.container.connect(self.address1)
self.receiver_conn = event.container.connect(self.address2)
addr = "unsettled/2"
self.sender = event.container.create_sender(self.sender_conn, addr)
self.receiver = event.container.create_receiver(self.receiver_conn, addr)
self.receiver.flow(2)
self.trackers = {}
for b in ['accept', 'reject']:
self.trackers[self.sender.send(Message(body=b))] = b
def timeout(self):
unique_list = sorted(list(dict.fromkeys(self.settled)))
self.error = "Timeout Expired: Expected ['accept', 'reject'] got %s" % unique_list
self.sender_conn.close()
self.receiver_conn.close()
def check(self):
unique_list = sorted(list(dict.fromkeys(self.settled)))
if unique_list == [u'accept', u'reject']:
self.passed = True
self.sender_conn.close()
self.receiver_conn.close()
self.timer.cancel()
def on_message(self, event):
if event.message.body == u'accept':
event.delivery.update(Delivery.ACCEPTED)
event.delivery.settle()
elif event.message.body == u'reject':
event.delivery.update(Delivery.REJECTED)
event.delivery.settle()
def on_accepted(self, event):
self.test.assertEqual(Delivery.ACCEPTED, event.delivery.remote_state)
self.test.assertEqual('accept', self.trackers[event.delivery])
self.settled.append('accept')
self.check()
def on_rejected(self, event):
self.test.assertEqual(Delivery.REJECTED, event.delivery.remote_state)
self.test.assertEqual('reject', self.trackers[event.delivery])
self.settled.append('reject')
self.check()
def run(self):
Container(self).run()
class ThreeAck(MessagingHandler):
def __init__(self, test, address1, address2):
super(ThreeAck, self).__init__(auto_accept=False, auto_settle=False)
self.addrs = [address1, address2]
self.settled = []
self.test = test
self.phase = 0
def on_start(self, event):
connections = [event.container.connect(a) for a in self.addrs]
addr = "three_ack/1"
self.sender = event.container.create_sender(connections[0], addr)
self.receiver = event.container.create_receiver(connections[1], addr)
self.receiver.flow(1)
self.tracker = self.sender.send(Message('hello'))
def on_message(self, event):
self.test.assertEqual(0, self.phase)
self.phase = 1
self.test.assertFalse(event.delivery.settled)
self.test.assertEqual(0, self.tracker.local_state)
self.test.assertEqual(0, self.tracker.remote_state)
event.delivery.update(Delivery.ACCEPTED)
# NOTE: we don't settle yet for 3-ack
def on_accepted(self, event):
self.test.assertTrue(event.sender)
self.test.assertEqual(1, self.phase)
self.phase = 2
self.test.assertEqual(Delivery.ACCEPTED, event.delivery.remote_state)
self.test.assertFalse(event.delivery.settled)
self.test.assertEqual(0, event.delivery.local_state)
event.delivery.settle()
self.test.assertFalse(event.delivery.settled)
event.connection.close()
def on_settled(self, event):
self.test.assertTrue(event.receiver)
self.test.assertEqual(2, self.phase)
self.phase = 3
event.connection.close()
def run(self):
Container(self).run()
self.test.assertEqual(3, self.phase)
class TwoRouterConnection(TestCase):
def __init__(self, test_method):
TestCase.__init__(self, test_method)
self.success = False
self.timer_delay = 4
self.max_attempts = 2
self.attempts = 0
self.local_node = None
@classmethod
def router(cls, name, config):
config = Qdrouterd.Config(config)
cls.routers.append(cls.tester.qdrouterd(name, config, wait=True))
@classmethod
def setUpClass(cls):
super(TwoRouterConnection, cls).setUpClass()
cls.routers = []
cls.B_normal_port_1 = cls.tester.get_port()
cls.B_normal_port_2 = cls.tester.get_port()
TwoRouterConnection.router('A', [
('router', {'mode': 'interior', 'id': 'A'}),
('listener', {'host': '0.0.0.0', 'role': 'normal',
'port': cls.tester.get_port()}),
]
)
TwoRouterConnection.router('B',
[
('router', {'mode': 'interior', 'id': 'B'}),
('listener', {'host': '0.0.0.0', 'role': 'normal',
'port': cls.B_normal_port_1}),
('listener', {'host': '0.0.0.0', 'role': 'normal',
'port': cls.B_normal_port_2}),
]
)
def address(self):
return self.routers[0].addresses[0]
def run_qdmanage(self, cmd, input=None, expect=Process.EXIT_OK, address=None):
p = self.popen(
['qdmanage'] + cmd.split(' ') + ['--bus', address or self.address(), '--indent=-1', '--timeout', str(TIMEOUT)],
stdin=PIPE, stdout=PIPE, stderr=STDOUT, expect=expect,
universal_newlines=True)
out = p.communicate(input)[0]
try:
p.teardown()
except Exception as e:
raise Exception(out if out else str(e))
return out
def can_terminate(self):
if self.attempts == self.max_attempts:
return True
if self.success:
return True
return False
def check_connections(self):
res = self.local_node.query(type='org.apache.qpid.dispatch.connection')
results = res.results
# If DISPATCH-1093 was not fixed, there would be an additional
# connection created and hence the len(results) would be 4
# Since DISPATCH-1093 is fixed, len(results would be 3 which is what
# we would expect.
if len(results) != 3:
self.schedule_num_connections_test()
else:
self.success = True
def schedule_num_connections_test(self):
if self.attempts < self.max_attempts:
if not self.success:
Timer(self.timer_delay, self.check_connections).start()
self.attempts += 1
def test_create_connectors(self):
self.local_node = Node.connect(self.routers[0].addresses[0],
timeout=TIMEOUT)
res = self.local_node.query(type='org.apache.qpid.dispatch.connection')
results = res.results
self.assertEqual(1, len(results))
long_type = 'org.apache.qpid.dispatch.connector' ''
create_command = 'CREATE --type=' + long_type + ' --name=foo' + ' host=0.0.0.0 port=' + str(TwoRouterConnection.B_normal_port_1)
self.run_qdmanage(create_command)
create_command = 'CREATE --type=' + long_type + ' --name=bar' + ' host=0.0.0.0 port=' + str(TwoRouterConnection.B_normal_port_2)
self.run_qdmanage(create_command)
self.schedule_num_connections_test()
while not self.can_terminate():
pass
self.assertTrue(self.success)
class PropagationTest(TestCase):
inter_router_port = None
@classmethod
def setUpClass(cls):
"""Start a router and a messenger"""
super(PropagationTest, cls).setUpClass()
def router(name, extra_config):
config = [
('router', {'mode': 'interior', 'id': 'QDR.%s'%name}),
('listener', {'port': cls.tester.get_port()}),
] + extra_config
config = Qdrouterd.Config(config)
cls.routers.append(cls.tester.qdrouterd(name, config, wait=True))
cls.routers = []
inter_router_port = cls.tester.get_port()
router('A', [('listener', {'role': 'inter-router', 'port': inter_router_port}), ('address', {'prefix': 'multicast', 'distribution': 'multicast'})])
router('B', [('connector', {'role': 'inter-router', 'port': inter_router_port})])
cls.routers[0].wait_router_connected('QDR.B')
cls.routers[1].wait_router_connected('QDR.A')
def test_propagation_of_locally_undefined_address(self):
test = MulticastTestClient(self.routers[0].addresses[0], self.routers[1].addresses[0])
test.run()
self.assertEqual(None, test.error)
self.assertEqual(test.received, 2)
class CreateReceiver(MessagingHandler):
def __init__(self, connection, address):
super(CreateReceiver, self).__init__()
self.connection = connection
self.address = address
def on_timer_task(self, event):
event.container.create_receiver(self.connection, self.address)
class DelayedSend(MessagingHandler):
def __init__(self, connection, address, message):
super(DelayedSend, self).__init__()
self.connection = connection
self.address = address
self.message = message
def on_timer_task(self, event):
event.container.create_sender(self.connection, self.address).send(self.message)
class MulticastTestClient(MessagingHandler):
def __init__(self, router1, router2):
super(MulticastTestClient, self).__init__()
self.routers = [router1, router2]
self.received = 0
self.error = None
def on_start(self, event):
self.connections = [event.container.connect(r) for r in self.routers]
event.container.create_receiver(self.connections[0], "multicast")
# wait for knowledge of receiver1 to propagate to second router
event.container.schedule(5, CreateReceiver(self.connections[1], "multicast"))
event.container.schedule(7, DelayedSend(self.connections[1], "multicast", Message(body="testing1,2,3")))
self.timer = event.reactor.schedule(TIMEOUT, Timeout(self))
def on_message(self, event):
self.received += 1
event.connection.close()
if self.received == 2:
self.timer.cancel()
def timeout(self):
self.error = "Timeout Expired:received=%d" % self.received
for c in self.connections:
c.close()
def run(self):
Container(self).run()
if __name__ == '__main__':
unittest.main(main_module())
| 37.565984 | 155 | 0.601687 |
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
from time import sleep
import json, os
import logging
from threading import Timer
from subprocess import PIPE, STDOUT
from proton import Message, Timeout, Delivery
from system_test import TestCase, Process, Qdrouterd, main_module, TIMEOUT, DIR
from system_test import AsyncTestReceiver
from system_test import AsyncTestSender
from system_test import unittest
from proton.handlers import MessagingHandler
from proton.reactor import Container, AtLeastOnce
from proton.utils import BlockingConnection
from qpid_dispatch.management.client import Node
CONNECTION_PROPERTIES_UNICODE_STRING = {u'connection': u'properties', u'int_property': 6451}
class TwoRouterTest(TestCase):
inter_router_port = None
@classmethod
def setUpClass(cls):
super(TwoRouterTest, cls).setUpClass()
def router(name, client_server, connection):
policy_config_path = os.path.join(DIR, 'two-router-policy')
config = [
('router', {'remoteLsMaxAge': 60, 'helloInterval': 1, 'raInterval': 30, 'raIntervalFlux': 4,
'mode': 'interior', 'id': 'QDR.%s'%name, 'allowUnsettledMulticast': 'yes'}),
('listener', {'port': cls.tester.get_port(), 'stripAnnotations': 'no', 'linkCapacity': 500}),
('listener', {'port': cls.tester.get_port(), 'stripAnnotations': 'no'}),
('listener', {'port': cls.tester.get_port(), 'stripAnnotations': 'both'}),
('listener', {'port': cls.tester.get_port(), 'stripAnnotations': 'out'}),
('listener', {'port': cls.tester.get_port(), 'stripAnnotations': 'in'}),
('address', {'prefix': 'closest', 'distribution': 'closest'}),
('address', {'prefix': 'balanced', 'distribution': 'balanced'}),
('address', {'prefix': 'multicast', 'distribution': 'multicast'}),
('address', {'pattern': 'a.b.c.d',
'distribution': 'closest'}),
('address', {'pattern': '#.b.c.d',
'distribution': 'multicast'}),
('address', {'pattern': 'a/*/#/d',
'distribution': 'closest'}),
('address', {'pattern': '*/b/c/d',
'distribution': 'multicast'}),
('address', {'pattern': 'a.x.d',
'distribution': 'closest'}),
('address', {'pattern': 'a.*.d',
'distribution': 'multicast'}),
connection
]
config = Qdrouterd.Config(config)
cls.routers.append(cls.tester.qdrouterd(name, config, wait=True))
cls.routers = []
inter_router_port = cls.tester.get_port()
router('A', 'server',
('listener', {'role': 'inter-router', 'port': inter_router_port}))
router('B', 'client',
('connector', {'name': 'connectorToA', 'role': 'inter-router', 'port': inter_router_port,
'verifyHostname': 'no'}))
cls.routers[0].wait_router_connected('QDR.B')
cls.routers[1].wait_router_connected('QDR.A')
def address(self):
return self.routers[0].addresses[0]
def run_qdmanage(self, cmd, input=None, expect=Process.EXIT_OK, address=None):
p = self.popen(
['qdmanage'] + cmd.split(' ') + ['--bus', address or self.address(), '--indent=-1', '--timeout', str(TIMEOUT)],
stdin=PIPE, stdout=PIPE, stderr=STDOUT, expect=expect,
universal_newlines=True)
out = p.communicate(input)[0]
try:
p.teardown()
except Exception as e:
raise Exception(out if out else str(e))
return out
def test_01_pre_settled(self):
test = DeliveriesInTransit(self.routers[0].addresses[0], self.routers[1].addresses[0])
test.run()
self.assertEqual(None, test.error)
local_node = Node.connect(self.routers[0].addresses[0], timeout=TIMEOUT)
outs = local_node.query(type='org.apache.qpid.dispatch.router')
pos = outs.attribute_names.index("deliveriesTransit")
results = outs.results[0]
self.assertTrue(results[pos] > 104)
def test_02a_multicast_unsettled(self):
test = MulticastUnsettled(self.routers[0].addresses[0])
test.run()
self.assertEqual(None, test.error)
def test_02c_sender_settles_first(self):
test = SenderSettlesFirst(self.routers[0].addresses[0], self.routers[1].addresses[0])
test.run()
self.assertEqual(None, test.error)
def test_03_message_annotations(self):
test = MessageAnnotationsTest(self.routers[0].addresses[0], self.routers[1].addresses[0])
test.run()
self.assertEqual(None, test.error)
def test_03a_test_strip_message_annotations_no(self):
test = MessageAnnotationsStripTest(self.routers[0].addresses[1], self.routers[1].addresses[1])
test.run()
self.assertEqual(None, test.error)
def test_03a_test_strip_message_annotations_no_add_trace(self):
test = MessageAnnotationsStripAddTraceTest(self.routers[0].addresses[1], self.routers[1].addresses[1])
test.run()
self.assertEqual(None, test.error)
def test_03a_test_strip_message_annotations_both_add_ingress_trace(self):
test = MessageAnnotationsStripBothAddIngressTrace(self.routers[0].addresses[2], self.routers[1].addresses[2])
test.run()
self.assertEqual(None, test.error)
def test_03a_test_strip_message_annotations_out(self):
test = MessageAnnotationsStripMessageAnnotationsOut(self.routers[0].addresses[3], self.routers[1].addresses[3])
test.run()
self.assertEqual(None, test.error)
def test_03a_test_strip_message_annotations_in(self):
test = MessageAnnotationStripMessageAnnotationsIn(self.routers[0].addresses[4], self.routers[1].addresses[4])
test.run()
self.assertEqual(None, test.error)
def test_04_management(self):
test = ManagementTest(self.routers[0].addresses[0])
test.run()
self.assertEqual(None, test.error)
def test_06_semantics_closest_is_local(self):
test = SemanticsClosestIsLocal(self.routers[0].addresses[0], self.routers[1].addresses[0])
test.run()
self.assertEqual(None, test.error)
def test_07_semantics_closest_is_remote(self):
test = SemanticsClosestIsRemote(self.routers[0].addresses[0], self.routers[1].addresses[0])
test.run()
self.assertEqual(None, test.error)
def test_08_semantics_balanced(self):
test = SemanticsBalanced(self.routers[0].addresses[0], self.routers[0].addresses[1],
self.routers[1].addresses[0])
test.run()
self.assertEqual(None, test.error)
def test_09_to_override(self):
test = MessageAnnotaionsPreExistingOverride(self.routers[0].addresses[0], self.routers[1].addresses[0])
test.run()
self.assertEqual(None, test.error)
def test_10_propagated_disposition(self):
test = PropagatedDisposition(self, self.routers[0].addresses[0], self.routers[1].addresses[0])
test.run()
self.assertTrue(test.passed)
def test_11_three_ack(self):
test = ThreeAck(self, self.routers[0].addresses[0], self.routers[1].addresses[0])
test.run()
def test_12_excess_deliveries_released(self):
test = ExcessDeliveriesReleasedTest(self.routers[0].addresses[0], self.routers[1].addresses[0])
test.run()
self.assertEqual(None, test.error)
def test_15_attach_on_inter_router(self):
test = AttachOnInterRouterTest(self.routers[0].addresses[5])
test.run()
self.assertEqual(None, test.error)
def test_17_address_wildcard(self):
addresses = [
('a.b.c.d', 1),
('b.c.d', 2),
('f.a.b.c.d', 2),
('a.c.d', 2), # multi 'a.*.d'
('a/c/c/d', 1), # closest 'a/*/
('a/x/z/z/d', 1),
('a/x/d', 1), # closest 'a.x.d'
('a.x.e', 1), # balanced ----
('m.b.c.d', 2) # multi '*/b/c/d'
]
# two receivers per address - one for each router
receivers = []
for a in addresses:
for x in range(2):
ar = AsyncTestReceiver(address=self.routers[x].addresses[0],
source=a[0])
receivers.append(ar)
# wait for the consumer info to propagate
for a in addresses:
self.routers[0].wait_address(a[0], 1, 1)
self.routers[1].wait_address(a[0], 1, 1)
# send one message to each address
conn = BlockingConnection(self.routers[0].addresses[0])
sender = conn.create_sender(address=None, options=AtLeastOnce())
for a in addresses:
sender.send(Message(address=a[0], body={'address': a[0]}))
# count received messages by address
msgs_recvd = {}
for M in receivers:
try:
while True:
i = M.queue.get(timeout=0.2).body.get('address', "ERROR")
if i not in msgs_recvd:
msgs_recvd[i] = 0
msgs_recvd[i] += 1
except AsyncTestReceiver.Empty:
pass
# verify expected count == actual count
self.assertTrue("ERROR" not in msgs_recvd)
for a in addresses:
self.assertTrue(a[0] in msgs_recvd)
self.assertEqual(a[1], msgs_recvd[a[0]])
for M in receivers:
M.stop()
conn.close()
def test_17_large_streaming_test(self):
test = LargeMessageStreamTest(self.routers[0].addresses[0], self.routers[1].addresses[0])
test.run()
self.assertEqual(None, test.error)
def test_18_single_char_dest_test(self):
test = SingleCharacterDestinationTest(self.routers[0].addresses[0], self.routers[1].addresses[0])
test.run()
self.assertEqual(None, test.error)
def test_19_delete_inter_router_connection(self):
query_command = 'QUERY --type=connection'
outputs = json.loads(self.run_qdmanage(query_command))
identity = None
passed = False
for output in outputs:
if "inter-router" == output['role']:
identity = output['identity']
if identity:
update_command = 'UPDATE --type=connection adminStatus=deleted --id=' + identity
try:
json.loads(self.run_qdmanage(update_command))
except Exception as e:
if "Forbidden" in str(e):
passed = True
# The test has passed since we were forbidden from deleting
# inter-router connections even though we are allowed to update the adminStatus field.
self.assertTrue(passed)
def test_20_delete_connection(self):
# Create a connection with some properties so we can easily identify the connection
connection = BlockingConnection(self.address(),
properties=CONNECTION_PROPERTIES_UNICODE_STRING)
query_command = 'QUERY --type=connection'
outputs = json.loads(self.run_qdmanage(query_command))
identity = None
passed = False
print ()
for output in outputs:
if output.get('properties'):
conn_properties = output['properties']
# Find the connection that has our properties - CONNECTION_PROPERTIES_UNICODE_STRING
# Delete that connection and run another qdmanage to see
# if the connection is gone.
if conn_properties.get('int_property'):
identity = output.get("identity")
if identity:
update_command = 'UPDATE --type=connection adminStatus=deleted --id=' + identity
try:
self.run_qdmanage(update_command)
query_command = 'QUERY --type=connection'
outputs = json.loads(
self.run_qdmanage(query_command))
no_properties = True
for output in outputs:
if output.get('properties'):
no_properties = False
conn_properties = output['properties']
if conn_properties.get('int_property'):
passed = False
break
else:
passed = True
if no_properties:
passed = True
except Exception as e:
passed = False
# The test has passed since we were allowed to delete a connection
# because we have the policy permission to do so.
self.assertTrue(passed)
def test_21_delete_connection_with_receiver(self):
test = DeleteConnectionWithReceiver(self.routers[0].addresses[0])
self.assertEqual(test.error, None)
test.run()
def test_30_huge_address(self):
# try a link with an extremely long address
# DISPATCH-1461
addr = "A" * 2019
rx = AsyncTestReceiver(self.routers[0].addresses[0],
source=addr)
tx = AsyncTestSender(self.routers[1].addresses[0],
target=addr,
count=100)
tx.wait()
i = 100
while i:
try:
rx.queue.get(timeout=TIMEOUT)
i -= 1
except AsyncTestReceiver.Empty:
break;
self.assertEqual(0, i)
rx.stop()
class DeleteConnectionWithReceiver(MessagingHandler):
def __init__(self, address):
super(DeleteConnectionWithReceiver, self).__init__()
self.address = address
self.mgmt_receiver = None
self.mgmt_receiver_1 = None
self.mgmt_receiver_2 = None
self.conn_to_kill = None
self.mgmt_conn = None
self.mgmt_sender = None
self.success = False
self.error = None
def on_start(self, event):
self.timer = event.reactor.schedule(TIMEOUT, Timeout(self))
# Create a receiver connection with some properties so it
# can be easily identified.
self.conn_to_kill = event.container.connect(self.address, properties=CONNECTION_PROPERTIES_UNICODE_STRING)
self.receiver_to_kill = event.container.create_receiver(self.conn_to_kill, "hello_world")
self.mgmt_conn = event.container.connect(self.address)
self.mgmt_sender = event.container.create_sender(self.mgmt_conn)
self.mgmt_receiver = event.container.create_receiver(self.mgmt_conn, None, dynamic=True)
self.mgmt_receiver_1 = event.container.create_receiver(self.mgmt_conn,
None,
dynamic=True)
self.mgmt_receiver_2 = event.container.create_receiver(self.mgmt_conn,
None,
dynamic=True)
def timeout(self):
self.error = "Timeout Expired: sent=%d, received=%d" % (self.n_sent, self.n_received)
self.mgmt_conn.close()
def bail(self, error):
self.error = error
self.timer.cancel()
self.mgmt_conn.close()
self.conn_to_kill.close()
def on_link_opened(self, event):
if event.receiver == self.mgmt_receiver:
request = Message()
request.address = "amqp:/_local/$management"
request.properties = {
u'type': u'org.apache.qpid.dispatch.connection',
u'operation': u'QUERY'}
request.reply_to = self.mgmt_receiver.remote_source.address
self.mgmt_sender.send(request)
def on_message(self, event):
if event.receiver == self.mgmt_receiver:
attribute_names = event.message.body['attributeNames']
property_index = attribute_names .index('properties')
identity_index = attribute_names .index('identity')
for result in event.message.body['results']:
if result[property_index]:
properties = result[property_index]
if properties.get('int_property'):
identity = result[identity_index]
if identity:
request = Message()
request.address = "amqp:/_local/$management"
request.properties = {
u'identity': identity,
u'type': u'org.apache.qpid.dispatch.connection',
u'operation': u'UPDATE'
}
request.body = {
u'adminStatus': u'deleted'}
request.reply_to = self.mgmt_receiver_1.remote_source.address
self.mgmt_sender.send(request)
elif event.receiver == self.mgmt_receiver_1:
if event.message.properties['statusDescription'] == 'OK' and event.message.body['adminStatus'] == 'deleted':
request = Message()
request.address = "amqp:/_local/$management"
request.properties = {u'type': u'org.apache.qpid.dispatch.connection',
u'operation': u'QUERY'}
request.reply_to = self.mgmt_receiver_2.remote_source.address
self.mgmt_sender.send(request)
elif event.receiver == self.mgmt_receiver_2:
attribute_names = event.message.body['attributeNames']
property_index = attribute_names .index('properties')
identity_index = attribute_names .index('identity')
for result in event.message.body['results']:
if result[property_index]:
properties = result[property_index]
if properties and properties.get('int_property'):
self.bail("Connection not deleted")
self.bail(None)
def run(self):
Container(self).run()
class Timeout(object):
def __init__(self, parent):
self.parent = parent
def on_timer_task(self, event):
self.parent.timeout()
class SingleCharacterDestinationTest(MessagingHandler):
def __init__(self, address1, address2):
super(SingleCharacterDestinationTest, self).__init__()
self.address1 = address1
self.address2 = address2
self.dest = "x"
self.error = None
self.conn1 = None
self.conn2 = None
self.count = 1
self.n_sent = 0
self.timer = None
self.sender = None
self.receiver = None
self.n_received = 0
self.body = "xyz"
def check_if_done(self):
if self.n_received == self.count:
self.timer.cancel()
self.conn1.close()
self.conn2.close()
def timeout(self):
self.error = "Timeout Expired: sent=%d, received=%d" % (self.n_sent, self.n_received)
self.conn1.close()
self.conn2.close()
def on_start(self, event):
self.timer = event.reactor.schedule(TIMEOUT, Timeout(self))
self.conn1 = event.container.connect(self.address1)
self.conn2 = event.container.connect(self.address2)
self.sender = event.container.create_sender(self.conn1, self.dest)
self.receiver = event.container.create_receiver(self.conn2, self.dest)
def on_sendable(self, event):
if self.n_sent < self.count:
msg = Message(body=self.body)
event.sender.send(msg)
self.n_sent += 1
def on_message(self, event):
self.n_received += 1
self.check_if_done()
def run(self):
Container(self).run()
class LargeMessageStreamTest(MessagingHandler):
def __init__(self, address1, address2):
super(LargeMessageStreamTest, self).__init__()
self.address1 = address1
self.address2 = address2
self.dest = "LargeMessageStreamTest"
self.error = None
self.conn1 = None
self.conn2 = None
self.count = 10
self.n_sent = 0
self.timer = None
self.sender = None
self.receiver = None
self.n_received = 0
self.body = ""
for i in range(10000):
self.body += "0123456789101112131415"
def check_if_done(self):
if self.n_received == self.count:
self.timer.cancel()
self.conn1.close()
self.conn2.close()
def timeout(self):
self.error = "Timeout Expired: sent=%d, received=%d" % (self.n_sent, self.n_received)
self.conn1.close()
self.conn2.close()
def on_start(self, event):
self.timer = event.reactor.schedule(TIMEOUT, Timeout(self))
self.conn1 = event.container.connect(self.address1)
self.conn2 = event.container.connect(self.address2)
self.sender = event.container.create_sender(self.conn1, self.dest)
self.receiver = event.container.create_receiver(self.conn2, self.dest)
def on_sendable(self, event):
if self.n_sent < self.count:
msg = Message(body=self.body)
# send(msg) calls the stream function which streams data from sender to the router
event.sender.send(msg)
self.n_sent += 1
def on_message(self, event):
self.n_received += 1
self.check_if_done()
def run(self):
Container(self).run()
class ExcessDeliveriesReleasedTest(MessagingHandler):
def __init__(self, address1, address2):
super(ExcessDeliveriesReleasedTest, self).__init__(prefetch=0)
self.address1 = address1
self.address2 = address2
self.dest = "closest.EDRtest"
self.error = None
self.sender = None
self.receiver = None
self.n_sent = 0
self.n_received = 0
self.n_accepted = 0
self.n_released = 0
self.timer = None
self.conn1 = None
self.conn2 = None
def timeout(self):
self.error = "Timeout Expired"
self.conn1.close()
self.conn2.close()
def on_start(self, event):
self.timer = event.reactor.schedule(TIMEOUT, Timeout(self))
self.conn1 = event.container.connect(self.address1)
self.conn2 = event.container.connect(self.address2)
self.sender = event.container.create_sender(self.conn1, self.dest)
self.receiver = event.container.create_receiver(self.conn2, self.dest)
self.receiver.flow(6)
def on_sendable(self, event):
for i in range(10 - self.n_sent):
msg = Message(body=i)
event.sender.send(msg)
self.n_sent += 1
def on_accepted(self, event):
self.n_accepted += 1
def on_released(self, event):
self.n_released += 1
if self.n_released == 4:
if self.n_accepted != 6:
self.error = "Expected 6 accepted, got %d" % self.n_accepted
if self.n_received != 6:
self.error = "Expected 6 received, got %d" % self.n_received
self.conn1.close()
self.conn2.close()
self.timer.cancel()
def on_message(self, event):
self.n_received += 1
if self.n_received == 6:
self.receiver.close()
def run(self):
Container(self).run()
class AttachOnInterRouterTest(MessagingHandler):
def __init__(self, address):
super(AttachOnInterRouterTest, self).__init__(prefetch=0)
self.address = address
self.dest = "AOIRtest"
self.error = None
self.sender = None
self.timer = None
self.conn = None
def timeout(self):
self.error = "Timeout Expired"
self.conn.close()
def on_start(self, event):
self.timer = event.reactor.schedule(TIMEOUT, Timeout(self))
self.conn = event.container.connect(self.address)
self.sender = event.container.create_sender(self.conn, self.dest)
def on_link_remote_close(self, event):
self.conn.close()
self.timer.cancel()
def run(self):
logging.disable(logging.ERROR) # Hide expected log errors
try:
Container(self).run()
finally:
logging.disable(logging.NOTSET) # Restore to normal
class DeliveriesInTransit(MessagingHandler):
def __init__(self, address1, address2):
super(DeliveriesInTransit, self).__init__()
self.address1 = address1
self.address2 = address2
self.dest = "pre_settled.1"
self.error = "All messages not received"
self.n_sent = 0
self.timer = None
self.conn1 = None
self.conn2 = None
self.sender = None
self.num_msgs = 104
self.sent_count = 0
self.received_count = 0
self.receiver = None
def on_start(self, event):
self.timer = event.reactor.schedule(TIMEOUT, Timeout(self))
self.conn1 = event.container.connect(self.address1)
self.sender = event.container.create_sender(self.conn1, self.dest)
self.conn2 = event.container.connect(self.address2)
self.receiver = event.container.create_receiver(self.conn2, self.dest)
def on_sendable(self, event):
if self.n_sent <= self.num_msgs-1:
msg = Message(body="Hello World")
self.sender.send(msg)
self.n_sent += 1
def check_if_done(self):
if self.n_sent == self.received_count:
self.error = None
self.timer.cancel()
self.conn1.close()
self.conn2.close()
def on_message(self, event):
self.received_count+=1
self.check_if_done()
def run(self):
Container(self).run()
class MessageAnnotationsTest(MessagingHandler):
def __init__(self, address1, address2):
super(MessageAnnotationsTest, self).__init__()
self.address1 = address1
self.address2 = address2
self.dest = "ma/1"
self.error = "Message annotations not found"
self.timer = None
self.conn1 = None
self.conn2 = None
self.sender = None
self.receiver = None
self.sent_count = 0
self.msg_not_sent = True
def on_start(self, event):
self.timer = event.reactor.schedule(TIMEOUT, Timeout(self))
self.conn1 = event.container.connect(self.address1)
self.sender = event.container.create_sender(self.conn1, self.dest)
self.conn2 = event.container.connect(self.address2)
self.receiver = event.container.create_receiver(self.conn2, self.dest)
def on_sendable(self, event):
if self.msg_not_sent:
msg = Message(body={'number': 0})
event.sender.send(msg)
self.msg_not_sent = False
def on_message(self, event):
if 0 == event.message.body['number']:
ma = event.message.annotations
if ma['x-opt-qd.ingress'] == '0/QDR.A' and ma['x-opt-qd.trace'] == ['0/QDR.A', '0/QDR.B']:
self.error = None
self.accept(event.delivery)
self.timer.cancel()
self.conn1.close()
self.conn2.close()
def run(self):
Container(self).run()
class MessageAnnotationsStripTest(MessagingHandler):
def __init__(self, address1, address2):
super(MessageAnnotationsStripTest, self).__init__()
self.address1 = address1
self.address2 = address2
self.dest = "message_annotations_strip_no/1"
self.error = "Message annotations not found"
self.timer = None
self.conn1 = None
self.conn2 = None
self.sender = None
self.receiver = None
self.sent_count = 0
self.msg_not_sent = True
def on_start(self, event):
self.timer = event.reactor.schedule(TIMEOUT, Timeout(self))
self.conn1 = event.container.connect(self.address1)
self.sender = event.container.create_sender(self.conn1, self.dest)
self.conn2 = event.container.connect(self.address2)
self.receiver = event.container.create_receiver(self.conn2, self.dest)
def on_sendable(self, event):
if self.msg_not_sent:
msg = Message(body={'number': 0})
ingress_message_annotations = {'work': 'hard', 'stay': 'humble'}
msg.annotations = ingress_message_annotations
event.sender.send(msg)
self.msg_not_sent = False
def on_message(self, event):
if 0 == event.message.body['number']:
ma = event.message.annotations
if ma['x-opt-qd.ingress'] == '0/QDR.A' and ma['x-opt-qd.trace'] == ['0/QDR.A', '0/QDR.B'] \
and ma['work'] == 'hard' and ma['stay'] == 'humble':
self.error = None
self.accept(event.delivery)
self.timer.cancel()
self.conn1.close()
self.conn2.close()
def run(self):
Container(self).run()
class ManagementTest(MessagingHandler):
def __init__(self, address):
super(ManagementTest, self).__init__()
self.address = address
self.timer = None
self.conn = None
self.sender = None
self.receiver = None
self.sent_count = 0
self.msg_not_sent = True
self.error = None
self.response1 = False
self.response2 = False
def timeout(self):
if not self.response1:
self.error = "Incorrect response received for message with correlation id C1"
if not self.response1:
self.error = self.error + "Incorrect response received for message with correlation id C2"
def on_start(self, event):
self.timer = event.reactor.schedule(TIMEOUT, Timeout(self))
self.conn = event.container.connect(self.address)
self.sender = event.container.create_sender(self.conn)
self.receiver = event.container.create_receiver(self.conn, None, dynamic=True)
def on_link_opened(self, event):
if event.receiver == self.receiver:
request = Message()
request.correlation_id = "C1"
request.address = "amqp:/_local/$management"
request.properties = {u'type': u'org.amqp.management', u'name': u'self', u'operation': u'GET-MGMT-NODES'}
request.reply_to = self.receiver.remote_source.address
self.sender.send(request)
request = Message()
request.address = "amqp:/_topo/0/QDR.B/$management"
request.correlation_id = "C2"
request.reply_to = self.receiver.remote_source.address
request.properties = {u'type': u'org.amqp.management', u'name': u'self', u'operation': u'GET-MGMT-NODES'}
self.sender.send(request)
def on_message(self, event):
if event.receiver == self.receiver:
if event.message.correlation_id == "C1":
if event.message.properties['statusCode'] == 200 and \
event.message.properties['statusDescription'] is not None \
and 'amqp:/_topo/0/QDR.B/$management' in event.message.body:
self.response1 = True
elif event.message.correlation_id == "C2":
if event.message.properties['statusCode'] == 200 and \
event.message.properties['statusDescription'] is not None \
and 'amqp:/_topo/0/QDR.A/$management' in event.message.body:
self.response2 = True
if self.response1 and self.response2:
self.error = None
if self.error is None:
self.timer.cancel()
self.conn.close()
def run(self):
Container(self).run()
class MessageAnnotationStripMessageAnnotationsIn(MessagingHandler):
def __init__(self, address1, address2):
super(MessageAnnotationStripMessageAnnotationsIn, self).__init__()
self.address1 = address1
self.address2 = address2
self.dest = "strip_message_annotations_in/1"
self.error = "Message annotations not found"
self.timer = None
self.conn1 = None
self.conn2 = None
self.sender = None
self.receiver = None
self.sent_count = 0
self.msg_not_sent = True
def on_start(self, event):
self.timer = event.reactor.schedule(TIMEOUT, Timeout(self))
self.conn1 = event.container.connect(self.address1)
self.sender = event.container.create_sender(self.conn1, self.dest)
self.conn2 = event.container.connect(self.address2)
self.receiver = event.container.create_receiver(self.conn2, self.dest)
def on_sendable(self, event):
if self.msg_not_sent:
msg = Message(body={'number': 0})
#
# Pre-existing ingress and trace
#
ingress_message_annotations = {'x-opt-qd.ingress': 'ingress-router', 'x-opt-qd.trace': ['X/QDR']}
msg.annotations = ingress_message_annotations
event.sender.send(msg)
self.msg_not_sent = False
def on_message(self, event):
if 0 == event.message.body['number']:
if event.message.annotations['x-opt-qd.ingress'] == '0/QDR.A' \
and event.message.annotations['x-opt-qd.trace'] == ['0/QDR.A', '0/QDR.B']:
self.error = None
self.timer.cancel()
self.conn1.close()
self.conn2.close()
def run(self):
Container(self).run()
class MessageAnnotaionsPreExistingOverride(MessagingHandler):
def __init__(self, address1, address2):
super(MessageAnnotaionsPreExistingOverride, self).__init__()
self.address1 = address1
self.address2 = address2
self.dest = "toov/1"
self.error = "Pre-existing x-opt-qd.to has been stripped"
self.timer = None
self.conn1 = None
self.conn2 = None
self.sender = None
self.receiver = None
self.msg_not_sent = True
def on_start(self, event):
self.timer = event.reactor.schedule(TIMEOUT, Timeout(self))
self.conn1 = event.container.connect(self.address1)
self.sender = event.container.create_sender(self.conn1, self.dest)
self.conn2 = event.container.connect(self.address2)
self.receiver = event.container.create_receiver(self.conn2, self.dest)
def on_sendable(self, event):
if self.msg_not_sent:
msg = Message(body={'number': 0})
msg.annotations = {'x-opt-qd.to': 'toov/1'}
event.sender.send(msg)
self.msg_not_sent = False
def on_message(self, event):
if 0 == event.message.body['number']:
ma = event.message.annotations
if ma['x-opt-qd.to'] == 'toov/1':
self.error = None
self.timer.cancel()
self.conn1.close()
self.conn2.close()
def run(self):
Container(self).run()
class MessageAnnotationsStripMessageAnnotationsOut(MessagingHandler):
def __init__(self, address1, address2):
super(MessageAnnotationsStripMessageAnnotationsOut, self).__init__()
self.address1 = address1
self.address2 = address2
self.dest = "strip_message_annotations_out/1"
self.error = "Message annotations not found"
self.timer = None
self.conn1 = None
self.conn2 = None
self.sender = None
self.receiver = None
self.sent_count = 0
self.msg_not_sent = True
def on_start(self, event):
self.timer = event.reactor.schedule(TIMEOUT, Timeout(self))
self.conn1 = event.container.connect(self.address1)
self.sender = event.container.create_sender(self.conn1, self.dest)
self.conn2 = event.container.connect(self.address2)
self.receiver = event.container.create_receiver(self.conn2, self.dest)
def on_sendable(self, event):
if self.msg_not_sent:
msg = Message(body={'number': 0})
event.sender.send(msg)
self.msg_not_sent = False
def on_message(self, event):
if 0 == event.message.body['number']:
if event.message.annotations is None:
self.error = None
self.timer.cancel()
self.conn1.close()
self.conn2.close()
def run(self):
Container(self).run()
class MessageAnnotationsStripBothAddIngressTrace(MessagingHandler):
def __init__(self, address1, address2):
super(MessageAnnotationsStripBothAddIngressTrace, self).__init__()
self.address1 = address1
self.address2 = address2
self.dest = "strip_message_annotations_both_add_ingress_trace/1"
self.error = "Message annotations not found"
self.timer = None
self.conn1 = None
self.conn2 = None
self.sender = None
self.receiver = None
self.sent_count = 0
self.msg_not_sent = True
def on_start(self, event):
self.timer = event.reactor.schedule(TIMEOUT, Timeout(self))
self.conn1 = event.container.connect(self.address1)
self.sender = event.container.create_sender(self.conn1, self.dest)
self.conn2 = event.container.connect(self.address2)
self.receiver = event.container.create_receiver(self.conn2, self.dest)
def on_sendable(self, event):
if self.msg_not_sent:
msg = Message(body={'number': 0})
ingress_message_annotations = {'work': 'hard',
'x-opt-qd': 'humble',
'x-opt-qd.ingress': 'ingress-router',
'x-opt-qd.trace': ['0/QDR.A']}
msg.annotations = ingress_message_annotations
event.sender.send(msg)
self.msg_not_sent = False
def on_message(self, event):
if 0 == event.message.body['number']:
if event.message.annotations == {'work': 'hard', 'x-opt-qd': 'humble'}:
self.error = None
self.timer.cancel()
self.conn1.close()
self.conn2.close()
def run(self):
Container(self).run()
class MessageAnnotationsStripAddTraceTest(MessagingHandler):
def __init__(self, address1, address2):
super(MessageAnnotationsStripAddTraceTest, self).__init__()
self.address1 = address1
self.address2 = address2
self.dest = "message_annotations_strip_no/1"
self.error = "Message annotations not found"
self.timer = None
self.conn1 = None
self.conn2 = None
self.sender = None
self.receiver = None
self.sent_count = 0
self.msg_not_sent = True
def on_start(self, event):
self.timer = event.reactor.schedule(TIMEOUT, Timeout(self))
self.conn1 = event.container.connect(self.address1)
self.sender = event.container.create_sender(self.conn1, self.dest)
self.conn2 = event.container.connect(self.address2)
self.receiver = event.container.create_receiver(self.conn2, self.dest)
def on_sendable(self, event):
if self.msg_not_sent:
msg = Message(body={'number': 0})
ingress_message_annotations = {'x-opt-qd.trace': ['0/QDR.1']}
msg.annotations = ingress_message_annotations
event.sender.send(msg)
self.msg_not_sent = False
def on_message(self, event):
if 0 == event.message.body['number']:
ma = event.message.annotations
if ma['x-opt-qd.ingress'] == '0/QDR.A' and ma['x-opt-qd.trace'] == ['0/QDR.1', '0/QDR.A', '0/QDR.B']:
self.error = None
self.accept(event.delivery)
self.timer.cancel()
self.conn1.close()
self.conn2.close()
def run(self):
Container(self).run()
class SenderSettlesFirst(MessagingHandler):
def __init__(self, address1, address2):
super(SenderSettlesFirst, self).__init__(auto_accept=False)
self.address1 = address1
self.address2 = address2
self.dest = "closest.senderfirst.1"
self.error = "Message body received differs from the one sent"
self.n_sent = 0
self.timer = None
self.conn1 = None
self.conn2 = None
self.sender = None
self.sent_count = 0
self.received_count = 0
self.receiver = None
self.msg_not_sent = True
def on_start(self, event):
self.timer = event.reactor.schedule(TIMEOUT, Timeout(self))
self.conn1 = event.container.connect(self.address1)
self.sender = event.container.create_sender(self.conn1, self.dest)
self.conn2 = event.container.connect(self.address2)
self.receiver = event.container.create_receiver(self.conn2, self.dest)
def on_sendable(self, event):
if self.msg_not_sent:
msg = Message(body={'number': 0})
dlv = event.sender.send(msg)
dlv.settle()
self.msg_not_sent = False
def on_message(self, event):
if 0 == event.message.body['number']:
self.error = None
self.accept(event.delivery)
self.timer.cancel()
self.conn1.close()
self.conn2.close()
def run(self):
Container(self).run()
class MulticastUnsettled(MessagingHandler):
def __init__(self, address):
super(MulticastUnsettled, self).__init__()
self.address = address
self.dest = "multicast.2"
self.error = None
self.n_sent = 0
self.count = 3
self.n_received_a = 0
self.n_received_b = 0
self.n_received_c = 0
self.timer = None
self.conn = None
self.sender = None
self.receiver_a = None
self.receiver_b = None
self.receiver_c = None
def on_start(self, event):
self.timer = event.reactor.schedule(TIMEOUT, Timeout(self))
self.conn = event.container.connect(self.address)
self.sender = event.container.create_sender(self.conn, self.dest)
self.receiver_a = event.container.create_receiver(self.conn, self.dest, name="A")
self.receiver_b = event.container.create_receiver(self.conn, self.dest, name="B")
self.receiver_c = event.container.create_receiver(self.conn, self.dest, name="C")
def timeout(self):
self.error = "Timeout Expired: sent=%d rcvd=%d/%d/%d" % \
(self.n_sent, self.n_received_a, self.n_received_b, self.n_received_c)
self.conn.close()
def check_if_done(self):
if self.n_received_a + self.n_received_b + self.n_received_c == self.count:
self.timer.cancel()
self.conn.close()
def on_sendable(self, event):
if self.n_sent == 0:
msg = Message(body="MulticastUnsettled-Test")
self.sender.send(msg)
self.n_sent += 1
def on_message(self, event):
if event.receiver == self.receiver_a:
self.n_received_a += 1
if event.receiver == self.receiver_b:
self.n_received_b += 1
if event.receiver == self.receiver_c:
self.n_received_c += 1
def on_accepted(self, event):
self.check_if_done()
def run(self):
Container(self).run()
class SemanticsClosestIsLocal(MessagingHandler):
def __init__(self, address1, address2):
super(SemanticsClosestIsLocal, self).__init__()
self.address1 = address1
self.address2 = address2
self.dest = "closest.1"
self.timer = None
self.conn1 = None
self.conn2 = None
self.sender = None
self.receiver_a = None
self.receiver_b = None
self.receiver_c = None
self.num_messages = 100
self.n_received_a = 0
self.n_received_b = 0
self.n_received_c = 0
self.error = None
self.n_sent = 0
def on_start(self, event):
self.timer = event.reactor.schedule(TIMEOUT, Timeout(self))
self.conn1 = event.container.connect(self.address1)
self.conn2 = event.container.connect(self.address2)
self.sender = event.container.create_sender(self.conn1, self.dest)
# Receiver on same router as the sender must receive all the messages. The other two
# receivers are on the other router
self.receiver_a = event.container.create_receiver(self.conn1, self.dest, name="A")
self.receiver_b = event.container.create_receiver(self.conn2, self.dest, name="B")
self.receiver_c = event.container.create_receiver(self.conn2, self.dest, name="C")
def timeout(self):
self.error = "Timeout Expired: sent=%d rcvd=%d/%d/%d" % \
(self.n_sent, self.n_received_a, self.n_received_b, self.n_received_c)
self.conn1.close()
self.conn2.close()
def check_if_done(self):
if self.n_received_a == 100 and self.n_received_b + self.n_received_c == 0:
self.timer.cancel()
self.conn1.close()
self.conn2.close()
def on_sendable(self, event):
if self.n_sent < self.num_messages:
msg = Message(body="SemanticsClosestIsLocal-Test")
self.sender.send(msg)
self.n_sent += 1
def on_message(self, event):
if event.receiver == self.receiver_a:
self.n_received_a += 1
if event.receiver == self.receiver_b:
self.n_received_b += 1
if event.receiver == self.receiver_c:
self.n_received_c += 1
def on_accepted(self, event):
self.check_if_done()
def run(self):
Container(self).run()
class SemanticsClosestIsRemote(MessagingHandler):
def __init__(self, address1, address2):
super(SemanticsClosestIsRemote, self).__init__()
self.address1 = address1
self.address2 = address2
self.dest = "closest.1"
self.timer = None
self.conn1 = None
self.conn2 = None
self.sender = None
self.receiver_a = None
self.receiver_b = None
self.receiver_c = None
self.num_messages = 100
self.n_received_a = 0
self.n_received_b = 0
self.error = None
self.n_sent = 0
def on_start(self, event):
self.timer = event.reactor.schedule(TIMEOUT, Timeout(self))
self.conn1 = event.container.connect(self.address1)
self.conn2 = event.container.connect(self.address2)
self.sender = event.container.create_sender(self.conn1, self.dest)
# Receiver on same router as the sender must receive all the messages. The other two
# receivers are on the other router
self.receiver_a = event.container.create_receiver(self.conn2, self.dest, name="A")
self.receiver_b = event.container.create_receiver(self.conn2, self.dest, name="B")
def timeout(self):
self.error = "Timeout Expired: sent=%d rcvd=%d/%d" % \
(self.n_sent, self.n_received_a, self.n_received_b)
self.conn1.close()
self.conn2.close()
def check_if_done(self):
if self.n_received_a + self.n_received_b == 100 and self.n_received_a > 0 and self.n_received_b > 0:
self.timer.cancel()
self.conn1.close()
self.conn2.close()
def on_sendable(self, event):
if self.n_sent < self.num_messages:
msg = Message(body="SemanticsClosestIsRemote-Test")
self.sender.send(msg)
self.n_sent += 1
def on_message(self, event):
if event.receiver == self.receiver_a:
self.n_received_a += 1
if event.receiver == self.receiver_b:
self.n_received_b += 1
def on_accepted(self, event):
self.check_if_done()
def run(self):
Container(self).run()
class CustomTimeout(object):
def __init__(self, parent):
self.parent = parent
def addr_text(self, addr):
if not addr:
return ""
if addr[0] == 'M':
return addr[2:]
else:
return addr[1:]
def on_timer_task(self, event):
local_node = Node.connect(self.parent.address1, timeout=TIMEOUT)
res = local_node.query('org.apache.qpid.dispatch.router.address')
name = res.attribute_names.index('name')
found = False
for results in res.results:
if "balanced.1" == self.addr_text(results[name]):
found = True
break
if found:
self.parent.cancel_custom()
self.parent.create_sender(event)
else:
event.reactor.schedule(2, self)
class SemanticsBalanced(MessagingHandler):
def __init__(self, address1, address2, address3):
super(SemanticsBalanced, self).__init__(auto_accept=False, prefetch=0)
self.address1 = address1
self.address2 = address2
self.address3 = address3
self.dest = "balanced.1"
self.timer = None
self.conn1 = None
self.conn2 = None
self.conn3 = None
self.sender = None
self.receiver_a = None
self.receiver_b = None
self.receiver_c = None
self.num_messages = 400
self.n_received_a = 0
self.n_received_b = 0
self.n_received_c = 0
self.error = None
self.n_sent = 0
self.rx_set = []
self.custom_timer = None
def on_start(self, event):
self.timer = event.reactor.schedule(TIMEOUT, Timeout(self))
self.custom_timer = event.reactor.schedule(2, CustomTimeout(self))
self.conn1 = event.container.connect(self.address1)
self.conn2 = event.container.connect(self.address2)
self.conn3 = event.container.connect(self.address3)
# This receiver is on the same router as the sender
self.receiver_a = event.container.create_receiver(self.conn2, self.dest, name="A")
# These two receivers are connected to a different router than the sender
self.receiver_b = event.container.create_receiver(self.conn3, self.dest, name="B")
self.receiver_c = event.container.create_receiver(self.conn3, self.dest, name="C")
self.receiver_a.flow(300)
self.receiver_b.flow(300)
self.receiver_c.flow(300)
def cancel_custom(self):
self.custom_timer.cancel()
def create_sender(self, event):
self.sender = event.container.create_sender(self.conn1, self.dest)
def timeout(self):
self.error = "Timeout Expired: sent=%d rcvd=%d/%d/%d" % \
(self.n_sent, self.n_received_a, self.n_received_b, self.n_received_c)
self.conn1.close()
self.conn2.close()
self.conn3.close()
def check_if_done(self):
if self.n_received_a + self.n_received_b + self.n_received_c == self.num_messages and \
self.n_received_a > 0 and self.n_received_b > 0 and self.n_received_c > 0:
self.rx_set.sort()
all_messages_received = True
for i in range(self.num_messages):
if not i == self.rx_set[i]:
all_messages_received = False
if all_messages_received:
self.timer.cancel()
self.conn1.close()
self.conn2.close()
self.conn3.close()
def on_sendable(self, event):
if self.n_sent < self.num_messages:
msg = Message(body={'number': self.n_sent})
self.sender.send(msg)
self.n_sent += 1
def on_message(self, event):
if event.receiver == self.receiver_a:
self.n_received_a += 1
self.rx_set.append(event.message.body['number'])
elif event.receiver == self.receiver_b:
self.n_received_b += 1
self.rx_set.append(event.message.body['number'])
elif event.receiver == self.receiver_c:
self.n_received_c += 1
self.rx_set.append(event.message.body['number'])
self.check_if_done()
def run(self):
Container(self).run()
class PropagatedDisposition(MessagingHandler):
def __init__(self, test, address1, address2):
super(PropagatedDisposition, self).__init__(auto_accept=False)
self.address1 = address1
self.address2 = address2
self.settled = []
self.test = test
self.sender_conn = None
self.receiver_conn = None
self.passed = False
def on_start(self, event):
self.timer = event.reactor.schedule(TIMEOUT, Timeout(self))
self.sender_conn = event.container.connect(self.address1)
self.receiver_conn = event.container.connect(self.address2)
addr = "unsettled/2"
self.sender = event.container.create_sender(self.sender_conn, addr)
self.receiver = event.container.create_receiver(self.receiver_conn, addr)
self.receiver.flow(2)
self.trackers = {}
for b in ['accept', 'reject']:
self.trackers[self.sender.send(Message(body=b))] = b
def timeout(self):
unique_list = sorted(list(dict.fromkeys(self.settled)))
self.error = "Timeout Expired: Expected ['accept', 'reject'] got %s" % unique_list
self.sender_conn.close()
self.receiver_conn.close()
def check(self):
unique_list = sorted(list(dict.fromkeys(self.settled)))
if unique_list == [u'accept', u'reject']:
self.passed = True
self.sender_conn.close()
self.receiver_conn.close()
self.timer.cancel()
def on_message(self, event):
if event.message.body == u'accept':
event.delivery.update(Delivery.ACCEPTED)
event.delivery.settle()
elif event.message.body == u'reject':
event.delivery.update(Delivery.REJECTED)
event.delivery.settle()
def on_accepted(self, event):
self.test.assertEqual(Delivery.ACCEPTED, event.delivery.remote_state)
self.test.assertEqual('accept', self.trackers[event.delivery])
self.settled.append('accept')
self.check()
def on_rejected(self, event):
self.test.assertEqual(Delivery.REJECTED, event.delivery.remote_state)
self.test.assertEqual('reject', self.trackers[event.delivery])
self.settled.append('reject')
self.check()
def run(self):
Container(self).run()
class ThreeAck(MessagingHandler):
def __init__(self, test, address1, address2):
super(ThreeAck, self).__init__(auto_accept=False, auto_settle=False)
self.addrs = [address1, address2]
self.settled = []
self.test = test
self.phase = 0
def on_start(self, event):
connections = [event.container.connect(a) for a in self.addrs]
addr = "three_ack/1"
self.sender = event.container.create_sender(connections[0], addr)
self.receiver = event.container.create_receiver(connections[1], addr)
self.receiver.flow(1)
self.tracker = self.sender.send(Message('hello'))
def on_message(self, event):
self.test.assertEqual(0, self.phase)
self.phase = 1
self.test.assertFalse(event.delivery.settled)
self.test.assertEqual(0, self.tracker.local_state)
self.test.assertEqual(0, self.tracker.remote_state)
event.delivery.update(Delivery.ACCEPTED)
# NOTE: we don't settle yet for 3-ack
def on_accepted(self, event):
self.test.assertTrue(event.sender)
self.test.assertEqual(1, self.phase)
self.phase = 2
self.test.assertEqual(Delivery.ACCEPTED, event.delivery.remote_state)
self.test.assertFalse(event.delivery.settled)
self.test.assertEqual(0, event.delivery.local_state)
event.delivery.settle()
self.test.assertFalse(event.delivery.settled)
event.connection.close()
def on_settled(self, event):
self.test.assertTrue(event.receiver)
self.test.assertEqual(2, self.phase)
self.phase = 3
event.connection.close()
def run(self):
Container(self).run()
self.test.assertEqual(3, self.phase)
class TwoRouterConnection(TestCase):
def __init__(self, test_method):
TestCase.__init__(self, test_method)
self.success = False
self.timer_delay = 4
self.max_attempts = 2
self.attempts = 0
self.local_node = None
@classmethod
def router(cls, name, config):
config = Qdrouterd.Config(config)
cls.routers.append(cls.tester.qdrouterd(name, config, wait=True))
@classmethod
def setUpClass(cls):
super(TwoRouterConnection, cls).setUpClass()
cls.routers = []
cls.B_normal_port_1 = cls.tester.get_port()
cls.B_normal_port_2 = cls.tester.get_port()
TwoRouterConnection.router('A', [
('router', {'mode': 'interior', 'id': 'A'}),
('listener', {'host': '0.0.0.0', 'role': 'normal',
'port': cls.tester.get_port()}),
]
)
TwoRouterConnection.router('B',
[
('router', {'mode': 'interior', 'id': 'B'}),
('listener', {'host': '0.0.0.0', 'role': 'normal',
'port': cls.B_normal_port_1}),
('listener', {'host': '0.0.0.0', 'role': 'normal',
'port': cls.B_normal_port_2}),
]
)
def address(self):
return self.routers[0].addresses[0]
def run_qdmanage(self, cmd, input=None, expect=Process.EXIT_OK, address=None):
p = self.popen(
['qdmanage'] + cmd.split(' ') + ['--bus', address or self.address(), '--indent=-1', '--timeout', str(TIMEOUT)],
stdin=PIPE, stdout=PIPE, stderr=STDOUT, expect=expect,
universal_newlines=True)
out = p.communicate(input)[0]
try:
p.teardown()
except Exception as e:
raise Exception(out if out else str(e))
return out
def can_terminate(self):
if self.attempts == self.max_attempts:
return True
if self.success:
return True
return False
def check_connections(self):
res = self.local_node.query(type='org.apache.qpid.dispatch.connection')
results = res.results
if len(results) != 3:
self.schedule_num_connections_test()
else:
self.success = True
def schedule_num_connections_test(self):
if self.attempts < self.max_attempts:
if not self.success:
Timer(self.timer_delay, self.check_connections).start()
self.attempts += 1
def test_create_connectors(self):
self.local_node = Node.connect(self.routers[0].addresses[0],
timeout=TIMEOUT)
res = self.local_node.query(type='org.apache.qpid.dispatch.connection')
results = res.results
self.assertEqual(1, len(results))
long_type = 'org.apache.qpid.dispatch.connector' ''
create_command = 'CREATE --type=' + long_type + ' --name=foo' + ' host=0.0.0.0 port=' + str(TwoRouterConnection.B_normal_port_1)
self.run_qdmanage(create_command)
create_command = 'CREATE --type=' + long_type + ' --name=bar' + ' host=0.0.0.0 port=' + str(TwoRouterConnection.B_normal_port_2)
self.run_qdmanage(create_command)
self.schedule_num_connections_test()
while not self.can_terminate():
pass
self.assertTrue(self.success)
class PropagationTest(TestCase):
inter_router_port = None
@classmethod
def setUpClass(cls):
super(PropagationTest, cls).setUpClass()
def router(name, extra_config):
config = [
('router', {'mode': 'interior', 'id': 'QDR.%s'%name}),
('listener', {'port': cls.tester.get_port()}),
] + extra_config
config = Qdrouterd.Config(config)
cls.routers.append(cls.tester.qdrouterd(name, config, wait=True))
cls.routers = []
inter_router_port = cls.tester.get_port()
router('A', [('listener', {'role': 'inter-router', 'port': inter_router_port}), ('address', {'prefix': 'multicast', 'distribution': 'multicast'})])
router('B', [('connector', {'role': 'inter-router', 'port': inter_router_port})])
cls.routers[0].wait_router_connected('QDR.B')
cls.routers[1].wait_router_connected('QDR.A')
def test_propagation_of_locally_undefined_address(self):
test = MulticastTestClient(self.routers[0].addresses[0], self.routers[1].addresses[0])
test.run()
self.assertEqual(None, test.error)
self.assertEqual(test.received, 2)
class CreateReceiver(MessagingHandler):
def __init__(self, connection, address):
super(CreateReceiver, self).__init__()
self.connection = connection
self.address = address
def on_timer_task(self, event):
event.container.create_receiver(self.connection, self.address)
class DelayedSend(MessagingHandler):
def __init__(self, connection, address, message):
super(DelayedSend, self).__init__()
self.connection = connection
self.address = address
self.message = message
def on_timer_task(self, event):
event.container.create_sender(self.connection, self.address).send(self.message)
class MulticastTestClient(MessagingHandler):
def __init__(self, router1, router2):
super(MulticastTestClient, self).__init__()
self.routers = [router1, router2]
self.received = 0
self.error = None
def on_start(self, event):
self.connections = [event.container.connect(r) for r in self.routers]
event.container.create_receiver(self.connections[0], "multicast")
event.container.schedule(5, CreateReceiver(self.connections[1], "multicast"))
event.container.schedule(7, DelayedSend(self.connections[1], "multicast", Message(body="testing1,2,3")))
self.timer = event.reactor.schedule(TIMEOUT, Timeout(self))
def on_message(self, event):
self.received += 1
event.connection.close()
if self.received == 2:
self.timer.cancel()
def timeout(self):
self.error = "Timeout Expired:received=%d" % self.received
for c in self.connections:
c.close()
def run(self):
Container(self).run()
if __name__ == '__main__':
unittest.main(main_module())
| true | true |
f720425769262bb20bd711e6f74901a646158501 | 45,642 | py | Python | core/controllers/profile_test.py | mohitkh7/oppia | d322e6ed8f9d018cc95335544c4fac7290b89af0 | [
"Apache-2.0"
] | null | null | null | core/controllers/profile_test.py | mohitkh7/oppia | d322e6ed8f9d018cc95335544c4fac7290b89af0 | [
"Apache-2.0"
] | null | null | null | core/controllers/profile_test.py | mohitkh7/oppia | d322e6ed8f9d018cc95335544c4fac7290b89af0 | [
"Apache-2.0"
] | null | null | null | # Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the profile page."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
import datetime
import re
from constants import constants
from core.domain import exp_domain
from core.domain import exp_services
from core.domain import rights_manager
from core.domain import subscription_services
from core.domain import user_services
from core.platform import models
from core.tests import test_utils
import feconf
import utils
(user_models,) = models.Registry.import_models([models.NAMES.user])
class ProfilePageTests(test_utils.GenericTestBase):
def test_get_profile_page_of_non_existing_user_raises_status_404(self):
self.get_html_response(
'/profile/%s' % self.OWNER_USERNAME, expected_status_int=404)
def test_get_profile_page_of_existing_user(self):
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
response = self.get_html_response('/profile/%s' % self.OWNER_USERNAME)
self.assertIn(
'<profile-page></profile-page>', response.body)
class ProfileDataHandlerTests(test_utils.GenericTestBase):
def test_preference_page_updates(self):
self.signup(self.EDITOR_EMAIL, username=self.EDITOR_USERNAME)
self.login(self.EDITOR_EMAIL)
csrf_token = self.get_new_csrf_token()
original_preferences = self.get_json('/preferenceshandler/data')
self.assertEqual(
['en'], original_preferences['preferred_language_codes'])
self.assertIsNone(original_preferences['preferred_site_language_code'])
self.assertIsNone(original_preferences['preferred_audio_language_code'])
self.put_json(
'/preferenceshandler/data',
{'update_type': 'preferred_site_language_code', 'data': 'en'},
csrf_token=csrf_token)
self.put_json(
'/preferenceshandler/data',
{'update_type': 'preferred_audio_language_code', 'data': 'hi-en'},
csrf_token=csrf_token)
self.put_json(
'/preferenceshandler/data',
{'update_type': 'preferred_language_codes', 'data': ['de']},
csrf_token=csrf_token)
new_preferences = self.get_json('/preferenceshandler/data')
self.assertEqual(new_preferences['preferred_language_codes'], ['de'])
self.assertEqual(new_preferences['preferred_site_language_code'], 'en')
self.assertEqual(
new_preferences['preferred_audio_language_code'], 'hi-en')
def test_profile_data_is_independent_of_currently_logged_in_user(self):
self.signup(self.EDITOR_EMAIL, username=self.EDITOR_USERNAME)
self.login(self.EDITOR_EMAIL)
csrf_token = self.get_new_csrf_token()
self.put_json(
'/preferenceshandler/data',
{'update_type': 'user_bio', 'data': 'My new editor bio'},
csrf_token=csrf_token)
self.put_json(
'/preferenceshandler/data',
{'update_type': 'subject_interests', 'data': ['editor', 'editing']},
csrf_token=csrf_token)
self.logout()
self.signup(self.VIEWER_EMAIL, username=self.VIEWER_USERNAME)
self.login(self.VIEWER_EMAIL)
csrf_token = self.get_new_csrf_token()
self.put_json(
'/preferenceshandler/data',
{'update_type': 'user_bio', 'data': 'My new viewer bio'},
csrf_token=csrf_token)
self.put_json(
'/preferenceshandler/data',
{'update_type': 'subject_interests', 'data': ['viewer', 'viewing']},
csrf_token=csrf_token)
self.logout()
# Viewer looks at editor's profile page.
self.login(self.VIEWER_EMAIL)
response = self.get_json(
'/profilehandler/data/%s' % self.EDITOR_USERNAME)
self.assertEqual(response['user_bio'], 'My new editor bio')
self.assertEqual(response['subject_interests'], ['editor', 'editing'])
self.logout()
# Editor looks at their own profile page.
self.login(self.EDITOR_EMAIL)
response = self.get_json(
'/profilehandler/data/%s' % self.EDITOR_USERNAME)
self.assertEqual(response['user_bio'], 'My new editor bio')
self.assertEqual(response['subject_interests'], ['editor', 'editing'])
self.logout()
# Looged-out user looks at editor's profile page.
response = self.get_json(
'/profilehandler/data/%s' % self.EDITOR_USERNAME)
self.assertEqual(response['user_bio'], 'My new editor bio')
self.assertEqual(response['subject_interests'], ['editor', 'editing'])
def test_preferences_page(self):
self.signup(self.EDITOR_EMAIL, username=self.EDITOR_USERNAME)
self.login(self.EDITOR_EMAIL)
response = self.get_html_response(feconf.PREFERENCES_URL)
self.assertIn('{"title": "Preferences - Oppia"})', response.body)
self.logout()
class UserContributionsTests(test_utils.GenericTestBase):
USERNAME_A = 'a'
EMAIL_A = 'a@example.com'
USERNAME_B = 'b'
EMAIL_B = 'b@example.com'
EXP_ID_1 = 'exp_id_1'
def test_null_case(self):
# Check that the profile page for a user with no contributions shows
# that they have 0 created/edited explorations.
self.signup(self.EMAIL_A, self.USERNAME_A)
response_dict = self.get_json(
'/profilehandler/data/%s' % self.USERNAME_A)
self.assertEqual(
response_dict['created_exp_summary_dicts'], [])
self.assertEqual(
response_dict['edited_exp_summary_dicts'], [])
def test_created(self):
# Check that the profile page for a user who has created
# a single exploration shows 1 created and 1 edited exploration.
self.signup(self.EMAIL_A, self.USERNAME_A)
user_a_id = self.get_user_id_from_email(self.EMAIL_A)
user_a = user_services.UserActionsInfo(user_a_id)
self.save_new_valid_exploration(
self.EXP_ID_1, user_a_id, end_state_name='End')
rights_manager.publish_exploration(user_a, self.EXP_ID_1)
response_dict = self.get_json(
'/profilehandler/data/%s' % self.USERNAME_A)
self.assertEqual(len(
response_dict['created_exp_summary_dicts']), 1)
self.assertEqual(len(
response_dict['edited_exp_summary_dicts']), 1)
self.assertEqual(
response_dict['created_exp_summary_dicts'][0]['id'],
self.EXP_ID_1)
self.assertEqual(
response_dict['edited_exp_summary_dicts'][0]['id'],
self.EXP_ID_1)
def test_edited(self):
# Check that the profile page for a user who has created
# a single exploration shows 0 created and 1 edited exploration.
self.signup(self.EMAIL_A, self.USERNAME_A)
user_a_id = self.get_user_id_from_email(self.EMAIL_A)
self.signup(self.EMAIL_B, self.USERNAME_B)
user_b_id = self.get_user_id_from_email(self.EMAIL_B)
user_a = user_services.UserActionsInfo(user_a_id)
self.save_new_valid_exploration(
self.EXP_ID_1, user_a_id, end_state_name='End')
rights_manager.publish_exploration(user_a, self.EXP_ID_1)
exp_services.update_exploration(
user_b_id, self.EXP_ID_1, [exp_domain.ExplorationChange({
'cmd': 'edit_exploration_property',
'property_name': 'objective',
'new_value': 'the objective'
})], 'Test edit')
response_dict = self.get_json(
'/profilehandler/data/%s' % self.USERNAME_B)
self.assertEqual(len(
response_dict['created_exp_summary_dicts']), 0)
self.assertEqual(len(
response_dict['edited_exp_summary_dicts']), 1)
self.assertEqual(
response_dict['edited_exp_summary_dicts'][0]['id'],
self.EXP_ID_1)
self.assertEqual(
response_dict['edited_exp_summary_dicts'][0]['objective'],
'the objective')
class FirstContributionDateTests(test_utils.GenericTestBase):
USERNAME = 'abc123'
EMAIL = 'abc123@gmail.com'
def test_contribution_msec(self):
# Test the contribution time shows up correctly as None.
self.signup(self.EMAIL, self.USERNAME)
self.login(self.EMAIL)
user_id = self.get_user_id_from_email(self.EMAIL)
response_dict = self.get_json(
'/profilehandler/data/%s' % self.USERNAME)
self.assertIsNone(response_dict['first_contribution_msec'])
# Update the first_contribution_msec to the current time in
# milliseconds.
first_time_in_msecs = utils.get_current_time_in_millisecs()
user_services.update_first_contribution_msec_if_not_set(
user_id, first_time_in_msecs)
# Test the contribution date correctly changes to current_time_in_msecs.
response_dict = self.get_json(
'/profilehandler/data/%s' % self.USERNAME)
self.assertEqual(
response_dict['first_contribution_msec'],
first_time_in_msecs)
# Test that the contribution date is not changed after the first time it
# is set.
second_time_in_msecs = utils.get_current_time_in_millisecs()
user_services.update_first_contribution_msec_if_not_set(
user_id, second_time_in_msecs)
response_dict = self.get_json(
'/profilehandler/data/%s' % self.USERNAME)
self.assertEqual(
response_dict['first_contribution_msec'],
first_time_in_msecs)
class PreferencesHandlerTests(test_utils.GenericTestBase):
EXP_ID = 'exp_id'
EXP_TITLE = 'Exploration title'
def setUp(self):
super(PreferencesHandlerTests, self).setUp()
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.signup(self.VIEWER_EMAIL, self.VIEWER_USERNAME)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
self.viewer_id = self.get_user_id_from_email(self.VIEWER_EMAIL)
def test_can_see_subscriptions(self):
self.login(self.VIEWER_EMAIL)
response = self.get_json(feconf.PREFERENCES_DATA_URL)
self.assertEqual(len(response['subscription_list']), 0)
# Subscribe to user.
subscription_services.subscribe_to_creator(
self.viewer_id, self.owner_id)
response = self.get_json(feconf.PREFERENCES_DATA_URL)
self.assertEqual(len(response['subscription_list']), 1)
self.assertEqual(
response['subscription_list'][0]['creator_username'],
self.OWNER_USERNAME)
# Unsubscribe from user.
subscription_services.unsubscribe_from_creator(
self.viewer_id, self.owner_id)
response = self.get_json(feconf.PREFERENCES_DATA_URL)
self.assertEqual(len(response['subscription_list']), 0)
self.logout()
def test_can_update_profile_picture_data_url(self):
self.login(self.OWNER_EMAIL)
csrf_token = self.get_new_csrf_token()
user_settings = user_services.get_user_settings(self.owner_id)
self.assertTrue(
user_settings.profile_picture_data_url.startswith(
'data:image/png;'))
self.put_json(
feconf.PREFERENCES_DATA_URL,
payload={'update_type': 'profile_picture_data_url',
'data': 'new_profile_picture_data_url'},
csrf_token=csrf_token)
user_settings = user_services.get_user_settings(self.owner_id)
self.assertEqual(
user_settings.profile_picture_data_url,
'new_profile_picture_data_url')
self.logout()
def test_can_update_default_dashboard(self):
self.login(self.OWNER_EMAIL)
csrf_token = self.get_new_csrf_token()
user_settings = user_services.get_user_settings(self.owner_id)
self.assertIsNone(user_settings.default_dashboard)
self.put_json(
feconf.PREFERENCES_DATA_URL,
payload={'update_type': 'default_dashboard',
'data': constants.DASHBOARD_TYPE_CREATOR},
csrf_token=csrf_token)
user_settings = user_services.get_user_settings(self.owner_id)
self.assertEqual(
user_settings.default_dashboard, constants.DASHBOARD_TYPE_CREATOR)
self.logout()
def test_update_preferences_with_invalid_update_type_raises_exception(self):
self.login(self.OWNER_EMAIL)
csrf_token = self.get_new_csrf_token()
with self.assertRaisesRegexp(Exception, 'Invalid update type:'):
self.put_json(
feconf.PREFERENCES_DATA_URL,
payload={'update_type': 'invalid_update_type'},
csrf_token=csrf_token)
self.logout()
class LongUserBioHandlerTests(test_utils.GenericTestBase):
USERNAME_A = 'a'
EMAIL_A = 'a@example.com'
USERNAME_B = 'b'
EMAIL_B = 'b@example.com'
def test_userbio_within_limit(self):
self.signup(self.EMAIL_A, self.USERNAME_A)
self.login(self.EMAIL_A)
csrf_token = self.get_new_csrf_token()
self.put_json(
'/preferenceshandler/data', {
'update_type': 'user_bio',
'data': 'I am within 2000 char limit',
}, csrf_token=csrf_token)
preferences = self.get_json('/preferenceshandler/data')
self.assertIsNotNone(preferences)
self.assertEqual(
preferences['user_bio'], 'I am within 2000 char limit')
self.logout()
def test_user_bio_exceeds_limit(self):
self.signup(self.EMAIL_B, self.USERNAME_B)
self.login(self.EMAIL_B)
csrf_token = self.get_new_csrf_token()
user_bio_response = self.put_json(
'/preferenceshandler/data', {
'update_type': 'user_bio',
'data': 'I am not within 2000 char limit' * 200
},
csrf_token=csrf_token, expected_status_int=400)
self.assertEqual(user_bio_response['status_code'], 400)
self.assertIn('User bio exceeds maximum character limit: 2000',
user_bio_response['error'])
self.logout()
class ProfileLinkTests(test_utils.GenericTestBase):
USERNAME = 'abc123'
EMAIL = 'abc123@gmail.com'
PROFILE_PIC_URL = '/preferenceshandler/profile_picture_by_username/'
def test_get_profile_picture_invalid_username(self):
self.get_json(
'%s%s' % (self.PROFILE_PIC_URL, self.USERNAME),
expected_status_int=404)
def test_get_profile_picture_valid_username(self):
self.signup(self.EMAIL, self.USERNAME)
response_dict = self.get_json(
'%s%s' % (self.PROFILE_PIC_URL, self.USERNAME)
)
# Every user must have a profile picture.
self.assertEqual(
response_dict['profile_picture_data_url_for_username'],
user_services.DEFAULT_IDENTICON_DATA_URL)
class EmailPreferencesTests(test_utils.GenericTestBase):
def test_user_not_setting_email_prefs_on_signup(self):
self.login(self.EDITOR_EMAIL)
csrf_token = self.get_new_csrf_token()
self.post_json(
feconf.SIGNUP_DATA_URL,
{'username': 'abc', 'agreed_to_terms': True},
csrf_token=csrf_token)
# The email update preference should be whatever the setting in feconf
# is.
editor_id = self.get_user_id_from_email(self.EDITOR_EMAIL)
with self.swap(feconf, 'DEFAULT_EMAIL_UPDATES_PREFERENCE', True):
email_preferences = user_services.get_email_preferences(editor_id)
self.assertEqual(email_preferences.can_receive_email_updates, True)
self.assertEqual(
email_preferences.can_receive_editor_role_email,
feconf.DEFAULT_EDITOR_ROLE_EMAIL_PREFERENCE)
self.assertEqual(
email_preferences.can_receive_feedback_message_email,
feconf.DEFAULT_FEEDBACK_MESSAGE_EMAIL_PREFERENCE)
self.assertEqual(
email_preferences.can_receive_subscription_email,
feconf.DEFAULT_SUBSCRIPTION_EMAIL_PREFERENCE)
with self.swap(feconf, 'DEFAULT_EMAIL_UPDATES_PREFERENCE', False):
email_preferences = user_services.get_email_preferences(editor_id)
self.assertEqual(email_preferences.can_receive_email_updates, False)
self.assertEqual(
email_preferences.can_receive_editor_role_email,
feconf.DEFAULT_EDITOR_ROLE_EMAIL_PREFERENCE)
self.assertEqual(
email_preferences.can_receive_feedback_message_email,
feconf.DEFAULT_FEEDBACK_MESSAGE_EMAIL_PREFERENCE)
self.assertEqual(
email_preferences.can_receive_subscription_email,
feconf.DEFAULT_SUBSCRIPTION_EMAIL_PREFERENCE)
def test_user_allowing_emails_on_signup(self):
self.login(self.EDITOR_EMAIL)
csrf_token = self.get_new_csrf_token()
self.post_json(
feconf.SIGNUP_DATA_URL,
{'username': 'abc', 'agreed_to_terms': True,
'can_receive_email_updates': True},
csrf_token=csrf_token)
# The email update preference should be True in all cases.
editor_id = self.get_user_id_from_email(self.EDITOR_EMAIL)
with self.swap(feconf, 'DEFAULT_EMAIL_UPDATES_PREFERENCE', True):
email_preferences = user_services.get_email_preferences(editor_id)
self.assertEqual(email_preferences.can_receive_email_updates, True)
self.assertEqual(
email_preferences.can_receive_editor_role_email,
feconf.DEFAULT_EDITOR_ROLE_EMAIL_PREFERENCE)
self.assertEqual(
email_preferences.can_receive_feedback_message_email,
feconf.DEFAULT_FEEDBACK_MESSAGE_EMAIL_PREFERENCE)
self.assertEqual(
email_preferences.can_receive_subscription_email,
feconf.DEFAULT_SUBSCRIPTION_EMAIL_PREFERENCE)
with self.swap(feconf, 'DEFAULT_EMAIL_UPDATES_PREFERENCE', False):
email_preferences = user_services.get_email_preferences(editor_id)
self.assertEqual(email_preferences.can_receive_email_updates, True)
self.assertEqual(
email_preferences.can_receive_editor_role_email,
feconf.DEFAULT_EDITOR_ROLE_EMAIL_PREFERENCE)
self.assertEqual(
email_preferences.can_receive_feedback_message_email,
feconf.DEFAULT_FEEDBACK_MESSAGE_EMAIL_PREFERENCE)
self.assertEqual(
email_preferences.can_receive_subscription_email,
feconf.DEFAULT_SUBSCRIPTION_EMAIL_PREFERENCE)
def test_user_disallowing_emails_on_signup(self):
self.login(self.EDITOR_EMAIL)
csrf_token = self.get_new_csrf_token()
self.post_json(
feconf.SIGNUP_DATA_URL,
{'username': 'abc', 'agreed_to_terms': True,
'can_receive_email_updates': False},
csrf_token=csrf_token)
# The email update preference should be False in all cases.
editor_id = self.get_user_id_from_email(self.EDITOR_EMAIL)
with self.swap(feconf, 'DEFAULT_EMAIL_UPDATES_PREFERENCE', True):
email_preferences = user_services.get_email_preferences(editor_id)
self.assertEqual(email_preferences.can_receive_email_updates, False)
self.assertEqual(
email_preferences.can_receive_editor_role_email,
feconf.DEFAULT_EDITOR_ROLE_EMAIL_PREFERENCE)
self.assertEqual(
email_preferences.can_receive_feedback_message_email,
feconf.DEFAULT_FEEDBACK_MESSAGE_EMAIL_PREFERENCE)
self.assertEqual(
email_preferences.can_receive_subscription_email,
feconf.DEFAULT_SUBSCRIPTION_EMAIL_PREFERENCE)
with self.swap(feconf, 'DEFAULT_EMAIL_UPDATES_PREFERENCE', False):
email_preferences = user_services.get_email_preferences(editor_id)
self.assertEqual(email_preferences.can_receive_email_updates, False)
self.assertEqual(
email_preferences.can_receive_editor_role_email,
feconf.DEFAULT_EDITOR_ROLE_EMAIL_PREFERENCE)
self.assertEqual(
email_preferences.can_receive_feedback_message_email,
feconf.DEFAULT_FEEDBACK_MESSAGE_EMAIL_PREFERENCE)
self.assertEqual(
email_preferences.can_receive_subscription_email,
feconf.DEFAULT_SUBSCRIPTION_EMAIL_PREFERENCE)
def test_email_preferences_updates(self):
"""Test that Preferences Handler correctly updates the email
preferences of the user.
"""
self.signup(self.EDITOR_EMAIL, username=self.EDITOR_USERNAME)
editor_id = self.get_user_id_from_email(self.EDITOR_EMAIL)
self.login(self.EDITOR_EMAIL)
csrf_token = self.get_new_csrf_token()
payload = {
'update_type': 'email_preferences',
'data': {
'can_receive_email_updates': True,
'can_receive_editor_role_email': True,
'can_receive_feedback_message_email': True,
'can_receive_subscription_email': True
}
}
# Allow all emails.
self.put_json(
'/preferenceshandler/data', payload, csrf_token=csrf_token)
email_preferences = user_services.get_email_preferences(editor_id)
self.assertTrue(email_preferences.can_receive_email_updates)
self.assertTrue(email_preferences.can_receive_editor_role_email)
self.assertTrue(email_preferences.can_receive_feedback_message_email)
self.assertTrue(email_preferences.can_receive_subscription_email)
payload = {
'update_type': 'email_preferences',
'data': {
'can_receive_email_updates': False,
'can_receive_editor_role_email': False,
'can_receive_feedback_message_email': False,
'can_receive_subscription_email': False
}
}
# Disallow all emails.
self.put_json(
'/preferenceshandler/data', payload, csrf_token=csrf_token)
email_preferences = user_services.get_email_preferences(editor_id)
self.assertFalse(email_preferences.can_receive_email_updates)
self.assertFalse(email_preferences.can_receive_editor_role_email)
self.assertFalse(email_preferences.can_receive_feedback_message_email)
self.assertFalse(email_preferences.can_receive_subscription_email)
class ProfilePictureHandlerTests(test_utils.GenericTestBase):
def test_get_profile_picture_with_updated_value(self):
self.get_json(
'/preferenceshandler/profile_picture', expected_status_int=401)
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
self.login(self.OWNER_EMAIL)
user_settings = user_services.get_user_settings(owner_id)
response = self.get_json('/preferenceshandler/profile_picture')
self.assertEqual(
response['profile_picture_data_url'],
user_settings.profile_picture_data_url)
user_services.update_profile_picture_data_url(
owner_id, 'new_profile_picture')
response = self.get_json('/preferenceshandler/profile_picture')
self.assertEqual(
response['profile_picture_data_url'], 'new_profile_picture')
self.logout()
class SignupTests(test_utils.GenericTestBase):
def test_signup_page_does_not_have_top_right_menu(self):
self.login(self.EDITOR_EMAIL)
response = self.get_html_response(feconf.SIGNUP_URL)
# Sign in can't be inside an html tag, but can appear inside js code.
response.mustcontain(no=['Logout'])
self.logout()
def test_going_somewhere_else_while_signing_in_logs_user_out(self):
exp_services.load_demo('0')
self.login(self.EDITOR_EMAIL)
response = self.get_html_response(feconf.SIGNUP_URL)
response = self.get_html_response('/create/0', expected_status_int=302)
self.assertIn('logout', response.headers['location'])
self.assertIn('create', response.headers['location'])
self.logout()
def test_to_check_url_redirection_in_signup(self):
"""To validate the redirections from return_url."""
self.login(self.EDITOR_EMAIL)
csrf_token = self.get_new_csrf_token()
# Registering this user fully.
self.post_json(
feconf.SIGNUP_DATA_URL,
{'username': 'abc', 'agreed_to_terms': True},
csrf_token=csrf_token)
def strip_domain_from_location_header(url):
"""To strip the domain form the location url."""
splitted_url = re.match(r'(http[s]?:\/\/)?([^\/\s]+\/)(.*)', url)
return splitted_url.group(3)
response = self.get_html_response(
'/signup?return_url=https://google.com', expected_status_int=302)
self.assertEqual('', strip_domain_from_location_header(
response.headers['location']))
response = self.get_html_response(
'/signup?return_url=//google.com', expected_status_int=302)
self.assertEqual('', strip_domain_from_location_header(
response.headers['location']))
response = self.get_html_response(
'/signup?return_url=/page#hello', expected_status_int=302)
self.assertEqual('page', strip_domain_from_location_header(
response.headers['location']))
response = self.get_html_response(
'/signup?return_url=/page/hello', expected_status_int=302)
self.assertEqual('page/hello', strip_domain_from_location_header(
response.headers['location']))
response = self.get_html_response(
'/signup?return_url=/page/hello?id=tests', expected_status_int=302)
self.assertEqual(
'page/hello?id=tests', strip_domain_from_location_header(
response.headers['location']))
self.logout()
def test_accepting_terms_is_handled_correctly(self):
self.login(self.EDITOR_EMAIL)
csrf_token = self.get_new_csrf_token()
response_dict = self.post_json(
feconf.SIGNUP_DATA_URL, {'agreed_to_terms': False},
csrf_token=csrf_token, expected_status_int=400)
self.assertIn('you will need to accept', response_dict['error'])
response_dict = self.post_json(
feconf.SIGNUP_DATA_URL,
{'agreed_to_terms': 'Hasta la vista!'},
csrf_token=csrf_token, expected_status_int=400)
self.assertIn('you will need to accept', response_dict['error'])
self.post_json(
feconf.SIGNUP_DATA_URL,
{'agreed_to_terms': True, 'username': 'myusername'},
csrf_token=csrf_token)
self.logout()
def test_username_is_handled_correctly(self):
self.login(self.EDITOR_EMAIL)
csrf_token = self.get_new_csrf_token()
response_dict = self.post_json(
feconf.SIGNUP_DATA_URL, {'agreed_to_terms': True},
csrf_token=csrf_token, expected_status_int=400)
self.assertIn('Empty username supplied', response_dict['error'])
response_dict = self.post_json(
feconf.SIGNUP_DATA_URL,
{'username': '', 'agreed_to_terms': True},
csrf_token=csrf_token, expected_status_int=400)
self.assertIn('Empty username supplied', response_dict['error'])
response_dict = self.post_json(
feconf.SIGNUP_DATA_URL,
{'username': '!a!', 'agreed_to_terms': True},
csrf_token=csrf_token, expected_status_int=400)
self.assertIn(
'can only have alphanumeric characters', response_dict['error'])
response_dict = self.post_json(
feconf.SIGNUP_DATA_URL,
{'username': self.UNICODE_TEST_STRING, 'agreed_to_terms': True},
csrf_token=csrf_token, expected_status_int=400)
self.assertIn(
'can only have alphanumeric characters', response_dict['error'])
response_dict = self.post_json(
feconf.SIGNUP_DATA_URL,
{'username': 'abcde', 'agreed_to_terms': True},
csrf_token=csrf_token)
self.logout()
def test_default_dashboard_for_new_users(self):
self.login(self.EDITOR_EMAIL)
csrf_token = self.get_new_csrf_token()
# This user should have the creator dashboard as default.
self.post_json(
feconf.SIGNUP_DATA_URL,
{'agreed_to_terms': True, 'username': 'creatoruser',
'default_dashboard': constants.DASHBOARD_TYPE_CREATOR,
'can_receive_email_updates': None},
csrf_token=csrf_token)
user_id = user_services.get_user_id_from_username('creatoruser')
user_settings = user_services.get_user_settings(user_id)
self.assertEqual(
user_settings.default_dashboard, constants.DASHBOARD_TYPE_CREATOR)
self.logout()
self.login(self.VIEWER_EMAIL)
csrf_token = self.get_new_csrf_token()
# This user should have the learner dashboard as default.
self.post_json(
feconf.SIGNUP_DATA_URL,
{'agreed_to_terms': True, 'username': 'learneruser',
'default_dashboard': constants.DASHBOARD_TYPE_LEARNER,
'can_receive_email_updates': None},
csrf_token=csrf_token)
user_id = user_services.get_user_id_from_username('learneruser')
user_settings = user_services.get_user_settings(user_id)
self.assertEqual(
user_settings.default_dashboard, constants.DASHBOARD_TYPE_LEARNER)
self.logout()
def test_user_settings_of_non_existing_user(self):
self.login(self.OWNER_EMAIL)
values_dict = {
'can_send_emails': False,
'has_agreed_to_latest_terms': False,
'has_ever_registered': False,
'username': None,
}
response = self.get_json(feconf.SIGNUP_DATA_URL)
self.assertDictEqual(values_dict, response)
self.logout()
def test_user_settings_of_existing_user(self):
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.login(self.OWNER_EMAIL)
values_dict = {
'can_send_emails': True,
'has_agreed_to_latest_terms': True,
'has_ever_registered': True,
'username': 'owner',
}
with self.swap(feconf, 'CAN_SEND_EMAILS', True):
response = self.get_json(feconf.SIGNUP_DATA_URL)
self.assertDictEqual(values_dict, response)
self.logout()
class DeleteAccountPageTests(test_utils.GenericTestBase):
def setUp(self):
super(DeleteAccountPageTests, self).setUp()
self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME)
self.login(self.EDITOR_EMAIL)
def test_get_delete_account_page(self):
with self.swap(constants, 'ENABLE_ACCOUNT_DELETION', True):
response = self.get_html_response('/delete-account')
self.assertIn(
'<delete-account-page></delete-account-page>', response.body)
def test_get_delete_account_page_disabled(self):
with self.swap(constants, 'ENABLE_ACCOUNT_DELETION', False):
self.get_html_response('/delete-account', expected_status_int=404)
class DeleteAccountHandlerTests(test_utils.GenericTestBase):
def setUp(self):
super(DeleteAccountHandlerTests, self).setUp()
self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME)
self.login(self.EDITOR_EMAIL)
def test_delete_delete_account_page(self):
with self.swap(constants, 'ENABLE_ACCOUNT_DELETION', True):
data = self.delete_json('/delete-account-handler')
self.assertEqual(data, {'success': True})
def test_delete_delete_account_page_disabled(self):
with self.swap(constants, 'ENABLE_ACCOUNT_DELETION', False):
self.delete_json('/delete-account-handler', expected_status_int=404)
class ExportAccountHandlerTests(test_utils.GenericTestBase):
GENERIC_DATE = datetime.datetime(2019, 5, 20)
GENERIC_EPOCH = utils.get_time_in_millisecs(GENERIC_DATE)
def setUp(self):
super(ExportAccountHandlerTests, self).setUp()
self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME)
self.login(self.EDITOR_EMAIL)
user_models.UserSubscriptionsModel(
id=self.get_user_id_from_email(self.EDITOR_EMAIL),
creator_ids=[],
collection_ids=[],
activity_ids=[],
general_feedback_thread_ids=[]).put()
def test_export_account_handler(self):
# Update user settings to constants.
user_id = self.get_user_id_from_email(self.EDITOR_EMAIL)
user_settings = user_services.get_user_settings(user_id)
user_settings.last_agreed_to_terms = self.GENERIC_DATE
user_settings.last_logged_in = self.GENERIC_DATE
user_settings.validate()
user_models.UserSettingsModel(
id=user_settings.user_id,
gae_id=user_settings.gae_id,
email=user_settings.email,
role=user_settings.role,
username=user_settings.username,
normalized_username=user_settings.normalized_username,
last_agreed_to_terms=user_settings.last_agreed_to_terms,
last_started_state_editor_tutorial=(
user_settings.last_started_state_editor_tutorial),
last_started_state_translation_tutorial=(
user_settings.last_started_state_translation_tutorial),
last_logged_in=user_settings.last_logged_in,
last_edited_an_exploration=user_settings.last_edited_an_exploration,
last_created_an_exploration=(
user_settings.last_created_an_exploration),
profile_picture_data_url=user_settings.profile_picture_data_url,
default_dashboard=user_settings.default_dashboard,
creator_dashboard_display_pref=(
user_settings.creator_dashboard_display_pref),
user_bio=user_settings.user_bio,
subject_interests=user_settings.subject_interests,
first_contribution_msec=user_settings.first_contribution_msec,
preferred_language_codes=user_settings.preferred_language_codes,
preferred_site_language_code=(
user_settings.preferred_site_language_code),
preferred_audio_language_code=(
user_settings.preferred_audio_language_code),
deleted=user_settings.deleted
).put()
constants_swap = self.swap(constants, 'ENABLE_ACCOUNT_EXPORT', True)
time_swap = self.swap(
user_services, 'record_user_logged_in', lambda *args: None)
with constants_swap, time_swap:
data = self.get_json('/export-account-handler')
expected_data = {
u'topic_rights_data': {
u'managed_topic_ids': []
},
u'subtopic_page_snapshot_metadata_data': {},
u'general_voiceover_application_data': {},
u'collection_progress_data': {},
u'story_snapshot_metadata_data': {},
u'user_community_rights_data': {},
u'user_contributions_data': {
u'edited_exploration_ids': [],
u'created_exploration_ids': []
},
u'general_feedback_thread_user_data': {},
u'question_snapshot_metadata_data': {},
u'general_feedback_message_data': {},
u'story_progress_data': {},
u'learner_playlist_data': {},
u'collection_rights_data': {
u'voiced_collection_ids': [],
u'owned_collection_ids': [],
u'viewable_collection_ids': [],
u'editable_collection_ids': []
},
u'skill_snapshot_metadata_data': {},
u'exploration_user_data_data': {},
u'collection_snapshot_metadata_data': {},
u'exploration_rights_data': {
u'viewable_exploration_ids': [],
u'owned_exploration_ids': [],
u'voiced_exploration_ids': [],
u'editable_exploration_ids': []
},
u'topic_snapshot_metadata_data': {},
u'completed_activities_data': {},
u'general_feedback_thread_data': {},
u'topic_rights_snapshot_metadata_data': {},
u'user_stats_data': {},
u'exploration_rights_snapshot_metadata_data': {},
u'user_subscriptions_data': {
u'creator_usernames': [],
u'collection_ids': [],
u'activity_ids': [],
u'general_feedback_thread_ids': [],
u'last_checked': None
},
u'config_property_snapshot_metadata_data': {},
u'exploration_snapshot_metadata_data': {},
u'incomplete_activities_data': {},
u'user_skill_mastery_data': {},
u'exp_user_last_playthrough_data': {},
u'user_settings_data': {
u'username': u'editor',
u'last_agreed_to_terms': self.GENERIC_EPOCH,
u'last_started_state_translation_tutorial': None,
u'last_started_state_editor_tutorial': None,
u'normalized_username': u'editor',
u'first_contribution_msec': None,
u'preferred_language_codes': [
u'en'
],
u'creator_dashboard_display_pref': u'card',
u'subject_interests': [],
u'default_dashboard': None,
u'preferred_site_language_code': None,
u'user_bio': u'',
u'profile_picture_data_url':
user_services.DEFAULT_IDENTICON_DATA_URL,
u'role': u'EXPLORATION_EDITOR',
u'last_edited_an_exploration': None,
u'email': u'editor@example.com',
u'preferred_audio_language_code': None,
u'last_logged_in': self.GENERIC_EPOCH
},
u'general_suggestion_data': {},
u'user_contribution_scoring_data': {},
u'general_feedback_email_reply_to_id_data': {},
u'collection_rights_snapshot_metadata_data': {}
}
self.assertEqual(
data,
expected_data
)
def test_export_account_handler_disabled_logged_in(self):
with self.swap(constants, 'ENABLE_ACCOUNT_EXPORT', False):
self.get_json('/export-account-handler', expected_status_int=404)
def test_export_account_hander_disabled_logged_out(self):
self.logout()
with self.swap(constants, 'ENABLE_ACCOUNT_EXPORT', False):
self.get_json('/export-account-handler', expected_status_int=401)
def test_export_account_handler_enabled_logged_out(self):
self.logout()
with self.swap(constants, 'ENABLE_ACCOUNT_EXPORT', True):
self.get_json('/export-account-handler', expected_status_int=401)
class PendingAccountDeletionPageTests(test_utils.GenericTestBase):
def test_get_pending_account_deletion_page(self):
with self.swap(constants, 'ENABLE_ACCOUNT_DELETION', True):
response = self.get_html_response('/pending-account-deletion')
self.assertIn('Pending Account Deletion', response.body)
def test_get_pending_account_deletion_page_disabled(self):
with self.swap(constants, 'ENABLE_ACCOUNT_DELETION', False):
self.get_html_response('/pending-account-deletion',
expected_status_int=404)
class UsernameCheckHandlerTests(test_utils.GenericTestBase):
def test_username_check(self):
self.signup('abc@example.com', username='abc')
self.login(self.EDITOR_EMAIL)
csrf_token = self.get_new_csrf_token()
response_dict = self.post_json(
feconf.USERNAME_CHECK_DATA_URL, {'username': 'abc'},
csrf_token=csrf_token)
self.assertEqual(
response_dict, {
'username_is_taken': True
})
response_dict = self.post_json(
feconf.USERNAME_CHECK_DATA_URL, {'username': 'def'},
csrf_token=csrf_token)
self.assertEqual(
response_dict, {
'username_is_taken': False
})
response_dict = self.post_json(
feconf.USERNAME_CHECK_DATA_URL, {'username': '!!!INVALID!!!'},
csrf_token=csrf_token, expected_status_int=400)
self.assertIn(
'can only have alphanumeric characters', response_dict['error'])
response_dict = self.post_json(
feconf.USERNAME_CHECK_DATA_URL,
{'username': self.UNICODE_TEST_STRING},
csrf_token=csrf_token, expected_status_int=400)
self.assertIn(
'can only have alphanumeric characters', response_dict['error'])
self.logout()
class SiteLanguageHandlerTests(test_utils.GenericTestBase):
def setUp(self):
super(SiteLanguageHandlerTests, self).setUp()
self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME)
self.editor_id = self.get_user_id_from_email(self.EDITOR_EMAIL)
def test_save_site_language_handler(self):
"""Test the language is saved in the preferences when handler is
called.
"""
language_code = 'es'
self.login(self.EDITOR_EMAIL)
csrf_token = self.get_new_csrf_token()
self.put_json(
'/preferenceshandler/data', {
'update_type': 'preferred_site_language_code',
'data': language_code,
}, csrf_token=csrf_token)
preferences = self.get_json('/preferenceshandler/data')
self.assertIsNotNone(preferences)
self.assertEqual(
preferences['preferred_site_language_code'], language_code)
self.logout()
def test_can_update_site_language_code(self):
self.login(self.EDITOR_EMAIL)
user_settings = user_services.get_user_settings(
self.editor_id, strict=True)
self.assertIsNone(user_settings.preferred_site_language_code)
csrf_token = self.get_new_csrf_token()
self.put_json(
feconf.SITE_LANGUAGE_DATA_URL, payload={'site_language_code': 'en'},
csrf_token=csrf_token)
user_settings = user_services.get_user_settings(
self.editor_id, strict=True)
self.assertEqual(user_settings.preferred_site_language_code, 'en')
self.logout()
class UserInfoHandlerTests(test_utils.GenericTestBase):
def test_user_info_handler(self):
"""Test the language is saved in the preferences when handler is
called.
"""
self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME)
self.login(self.EDITOR_EMAIL)
json_response = self.get_json('/userinfohandler')
self.assertDictEqual({
'is_moderator': False,
'is_admin': False,
'is_topic_manager': False,
'is_super_admin': False,
'can_create_collections': False,
'preferred_site_language_code': None,
'username': self.EDITOR_USERNAME,
'email': self.EDITOR_EMAIL,
'user_is_logged_in': True}, json_response)
self.logout()
json_response = self.get_json('/userinfohandler')
self.assertDictEqual({
'user_is_logged_in': False
}, json_response)
class UrlHandlerTests(test_utils.GenericTestBase):
def test_login_url_is_none_for_signed_in_user(self):
self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME)
self.login(self.EDITOR_EMAIL)
response = self.get_json('/url_handler')
self.assertIsNone(response['login_url'])
self.logout()
def test_login_url_gets_created_for_signed_out_users(self):
response = self.get_json(
'/url_handler', params={'current_url': 'random_url'})
self.assertTrue(response['login_url'].endswith('random_url'))
| 41.796703 | 80 | 0.655208 |
from __future__ import absolute_import
from __future__ import unicode_literals
import datetime
import re
from constants import constants
from core.domain import exp_domain
from core.domain import exp_services
from core.domain import rights_manager
from core.domain import subscription_services
from core.domain import user_services
from core.platform import models
from core.tests import test_utils
import feconf
import utils
(user_models,) = models.Registry.import_models([models.NAMES.user])
class ProfilePageTests(test_utils.GenericTestBase):
def test_get_profile_page_of_non_existing_user_raises_status_404(self):
self.get_html_response(
'/profile/%s' % self.OWNER_USERNAME, expected_status_int=404)
def test_get_profile_page_of_existing_user(self):
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
response = self.get_html_response('/profile/%s' % self.OWNER_USERNAME)
self.assertIn(
'<profile-page></profile-page>', response.body)
class ProfileDataHandlerTests(test_utils.GenericTestBase):
def test_preference_page_updates(self):
self.signup(self.EDITOR_EMAIL, username=self.EDITOR_USERNAME)
self.login(self.EDITOR_EMAIL)
csrf_token = self.get_new_csrf_token()
original_preferences = self.get_json('/preferenceshandler/data')
self.assertEqual(
['en'], original_preferences['preferred_language_codes'])
self.assertIsNone(original_preferences['preferred_site_language_code'])
self.assertIsNone(original_preferences['preferred_audio_language_code'])
self.put_json(
'/preferenceshandler/data',
{'update_type': 'preferred_site_language_code', 'data': 'en'},
csrf_token=csrf_token)
self.put_json(
'/preferenceshandler/data',
{'update_type': 'preferred_audio_language_code', 'data': 'hi-en'},
csrf_token=csrf_token)
self.put_json(
'/preferenceshandler/data',
{'update_type': 'preferred_language_codes', 'data': ['de']},
csrf_token=csrf_token)
new_preferences = self.get_json('/preferenceshandler/data')
self.assertEqual(new_preferences['preferred_language_codes'], ['de'])
self.assertEqual(new_preferences['preferred_site_language_code'], 'en')
self.assertEqual(
new_preferences['preferred_audio_language_code'], 'hi-en')
def test_profile_data_is_independent_of_currently_logged_in_user(self):
self.signup(self.EDITOR_EMAIL, username=self.EDITOR_USERNAME)
self.login(self.EDITOR_EMAIL)
csrf_token = self.get_new_csrf_token()
self.put_json(
'/preferenceshandler/data',
{'update_type': 'user_bio', 'data': 'My new editor bio'},
csrf_token=csrf_token)
self.put_json(
'/preferenceshandler/data',
{'update_type': 'subject_interests', 'data': ['editor', 'editing']},
csrf_token=csrf_token)
self.logout()
self.signup(self.VIEWER_EMAIL, username=self.VIEWER_USERNAME)
self.login(self.VIEWER_EMAIL)
csrf_token = self.get_new_csrf_token()
self.put_json(
'/preferenceshandler/data',
{'update_type': 'user_bio', 'data': 'My new viewer bio'},
csrf_token=csrf_token)
self.put_json(
'/preferenceshandler/data',
{'update_type': 'subject_interests', 'data': ['viewer', 'viewing']},
csrf_token=csrf_token)
self.logout()
self.login(self.VIEWER_EMAIL)
response = self.get_json(
'/profilehandler/data/%s' % self.EDITOR_USERNAME)
self.assertEqual(response['user_bio'], 'My new editor bio')
self.assertEqual(response['subject_interests'], ['editor', 'editing'])
self.logout()
# Editor looks at their own profile page.
self.login(self.EDITOR_EMAIL)
response = self.get_json(
'/profilehandler/data/%s' % self.EDITOR_USERNAME)
self.assertEqual(response['user_bio'], 'My new editor bio')
self.assertEqual(response['subject_interests'], ['editor', 'editing'])
self.logout()
# Looged-out user looks at editor's profile page.
response = self.get_json(
'/profilehandler/data/%s' % self.EDITOR_USERNAME)
self.assertEqual(response['user_bio'], 'My new editor bio')
self.assertEqual(response['subject_interests'], ['editor', 'editing'])
def test_preferences_page(self):
self.signup(self.EDITOR_EMAIL, username=self.EDITOR_USERNAME)
self.login(self.EDITOR_EMAIL)
response = self.get_html_response(feconf.PREFERENCES_URL)
self.assertIn('{"title": "Preferences - Oppia"})', response.body)
self.logout()
class UserContributionsTests(test_utils.GenericTestBase):
USERNAME_A = 'a'
EMAIL_A = 'a@example.com'
USERNAME_B = 'b'
EMAIL_B = 'b@example.com'
EXP_ID_1 = 'exp_id_1'
def test_null_case(self):
self.signup(self.EMAIL_A, self.USERNAME_A)
response_dict = self.get_json(
'/profilehandler/data/%s' % self.USERNAME_A)
self.assertEqual(
response_dict['created_exp_summary_dicts'], [])
self.assertEqual(
response_dict['edited_exp_summary_dicts'], [])
def test_created(self):
self.signup(self.EMAIL_A, self.USERNAME_A)
user_a_id = self.get_user_id_from_email(self.EMAIL_A)
user_a = user_services.UserActionsInfo(user_a_id)
self.save_new_valid_exploration(
self.EXP_ID_1, user_a_id, end_state_name='End')
rights_manager.publish_exploration(user_a, self.EXP_ID_1)
response_dict = self.get_json(
'/profilehandler/data/%s' % self.USERNAME_A)
self.assertEqual(len(
response_dict['created_exp_summary_dicts']), 1)
self.assertEqual(len(
response_dict['edited_exp_summary_dicts']), 1)
self.assertEqual(
response_dict['created_exp_summary_dicts'][0]['id'],
self.EXP_ID_1)
self.assertEqual(
response_dict['edited_exp_summary_dicts'][0]['id'],
self.EXP_ID_1)
def test_edited(self):
self.signup(self.EMAIL_A, self.USERNAME_A)
user_a_id = self.get_user_id_from_email(self.EMAIL_A)
self.signup(self.EMAIL_B, self.USERNAME_B)
user_b_id = self.get_user_id_from_email(self.EMAIL_B)
user_a = user_services.UserActionsInfo(user_a_id)
self.save_new_valid_exploration(
self.EXP_ID_1, user_a_id, end_state_name='End')
rights_manager.publish_exploration(user_a, self.EXP_ID_1)
exp_services.update_exploration(
user_b_id, self.EXP_ID_1, [exp_domain.ExplorationChange({
'cmd': 'edit_exploration_property',
'property_name': 'objective',
'new_value': 'the objective'
})], 'Test edit')
response_dict = self.get_json(
'/profilehandler/data/%s' % self.USERNAME_B)
self.assertEqual(len(
response_dict['created_exp_summary_dicts']), 0)
self.assertEqual(len(
response_dict['edited_exp_summary_dicts']), 1)
self.assertEqual(
response_dict['edited_exp_summary_dicts'][0]['id'],
self.EXP_ID_1)
self.assertEqual(
response_dict['edited_exp_summary_dicts'][0]['objective'],
'the objective')
class FirstContributionDateTests(test_utils.GenericTestBase):
USERNAME = 'abc123'
EMAIL = 'abc123@gmail.com'
def test_contribution_msec(self):
self.signup(self.EMAIL, self.USERNAME)
self.login(self.EMAIL)
user_id = self.get_user_id_from_email(self.EMAIL)
response_dict = self.get_json(
'/profilehandler/data/%s' % self.USERNAME)
self.assertIsNone(response_dict['first_contribution_msec'])
first_time_in_msecs = utils.get_current_time_in_millisecs()
user_services.update_first_contribution_msec_if_not_set(
user_id, first_time_in_msecs)
response_dict = self.get_json(
'/profilehandler/data/%s' % self.USERNAME)
self.assertEqual(
response_dict['first_contribution_msec'],
first_time_in_msecs)
second_time_in_msecs = utils.get_current_time_in_millisecs()
user_services.update_first_contribution_msec_if_not_set(
user_id, second_time_in_msecs)
response_dict = self.get_json(
'/profilehandler/data/%s' % self.USERNAME)
self.assertEqual(
response_dict['first_contribution_msec'],
first_time_in_msecs)
class PreferencesHandlerTests(test_utils.GenericTestBase):
EXP_ID = 'exp_id'
EXP_TITLE = 'Exploration title'
def setUp(self):
super(PreferencesHandlerTests, self).setUp()
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.signup(self.VIEWER_EMAIL, self.VIEWER_USERNAME)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
self.viewer_id = self.get_user_id_from_email(self.VIEWER_EMAIL)
def test_can_see_subscriptions(self):
self.login(self.VIEWER_EMAIL)
response = self.get_json(feconf.PREFERENCES_DATA_URL)
self.assertEqual(len(response['subscription_list']), 0)
subscription_services.subscribe_to_creator(
self.viewer_id, self.owner_id)
response = self.get_json(feconf.PREFERENCES_DATA_URL)
self.assertEqual(len(response['subscription_list']), 1)
self.assertEqual(
response['subscription_list'][0]['creator_username'],
self.OWNER_USERNAME)
subscription_services.unsubscribe_from_creator(
self.viewer_id, self.owner_id)
response = self.get_json(feconf.PREFERENCES_DATA_URL)
self.assertEqual(len(response['subscription_list']), 0)
self.logout()
def test_can_update_profile_picture_data_url(self):
self.login(self.OWNER_EMAIL)
csrf_token = self.get_new_csrf_token()
user_settings = user_services.get_user_settings(self.owner_id)
self.assertTrue(
user_settings.profile_picture_data_url.startswith(
'data:image/png;'))
self.put_json(
feconf.PREFERENCES_DATA_URL,
payload={'update_type': 'profile_picture_data_url',
'data': 'new_profile_picture_data_url'},
csrf_token=csrf_token)
user_settings = user_services.get_user_settings(self.owner_id)
self.assertEqual(
user_settings.profile_picture_data_url,
'new_profile_picture_data_url')
self.logout()
def test_can_update_default_dashboard(self):
self.login(self.OWNER_EMAIL)
csrf_token = self.get_new_csrf_token()
user_settings = user_services.get_user_settings(self.owner_id)
self.assertIsNone(user_settings.default_dashboard)
self.put_json(
feconf.PREFERENCES_DATA_URL,
payload={'update_type': 'default_dashboard',
'data': constants.DASHBOARD_TYPE_CREATOR},
csrf_token=csrf_token)
user_settings = user_services.get_user_settings(self.owner_id)
self.assertEqual(
user_settings.default_dashboard, constants.DASHBOARD_TYPE_CREATOR)
self.logout()
def test_update_preferences_with_invalid_update_type_raises_exception(self):
self.login(self.OWNER_EMAIL)
csrf_token = self.get_new_csrf_token()
with self.assertRaisesRegexp(Exception, 'Invalid update type:'):
self.put_json(
feconf.PREFERENCES_DATA_URL,
payload={'update_type': 'invalid_update_type'},
csrf_token=csrf_token)
self.logout()
class LongUserBioHandlerTests(test_utils.GenericTestBase):
USERNAME_A = 'a'
EMAIL_A = 'a@example.com'
USERNAME_B = 'b'
EMAIL_B = 'b@example.com'
def test_userbio_within_limit(self):
self.signup(self.EMAIL_A, self.USERNAME_A)
self.login(self.EMAIL_A)
csrf_token = self.get_new_csrf_token()
self.put_json(
'/preferenceshandler/data', {
'update_type': 'user_bio',
'data': 'I am within 2000 char limit',
}, csrf_token=csrf_token)
preferences = self.get_json('/preferenceshandler/data')
self.assertIsNotNone(preferences)
self.assertEqual(
preferences['user_bio'], 'I am within 2000 char limit')
self.logout()
def test_user_bio_exceeds_limit(self):
self.signup(self.EMAIL_B, self.USERNAME_B)
self.login(self.EMAIL_B)
csrf_token = self.get_new_csrf_token()
user_bio_response = self.put_json(
'/preferenceshandler/data', {
'update_type': 'user_bio',
'data': 'I am not within 2000 char limit' * 200
},
csrf_token=csrf_token, expected_status_int=400)
self.assertEqual(user_bio_response['status_code'], 400)
self.assertIn('User bio exceeds maximum character limit: 2000',
user_bio_response['error'])
self.logout()
class ProfileLinkTests(test_utils.GenericTestBase):
USERNAME = 'abc123'
EMAIL = 'abc123@gmail.com'
PROFILE_PIC_URL = '/preferenceshandler/profile_picture_by_username/'
def test_get_profile_picture_invalid_username(self):
self.get_json(
'%s%s' % (self.PROFILE_PIC_URL, self.USERNAME),
expected_status_int=404)
def test_get_profile_picture_valid_username(self):
self.signup(self.EMAIL, self.USERNAME)
response_dict = self.get_json(
'%s%s' % (self.PROFILE_PIC_URL, self.USERNAME)
)
self.assertEqual(
response_dict['profile_picture_data_url_for_username'],
user_services.DEFAULT_IDENTICON_DATA_URL)
class EmailPreferencesTests(test_utils.GenericTestBase):
def test_user_not_setting_email_prefs_on_signup(self):
self.login(self.EDITOR_EMAIL)
csrf_token = self.get_new_csrf_token()
self.post_json(
feconf.SIGNUP_DATA_URL,
{'username': 'abc', 'agreed_to_terms': True},
csrf_token=csrf_token)
editor_id = self.get_user_id_from_email(self.EDITOR_EMAIL)
with self.swap(feconf, 'DEFAULT_EMAIL_UPDATES_PREFERENCE', True):
email_preferences = user_services.get_email_preferences(editor_id)
self.assertEqual(email_preferences.can_receive_email_updates, True)
self.assertEqual(
email_preferences.can_receive_editor_role_email,
feconf.DEFAULT_EDITOR_ROLE_EMAIL_PREFERENCE)
self.assertEqual(
email_preferences.can_receive_feedback_message_email,
feconf.DEFAULT_FEEDBACK_MESSAGE_EMAIL_PREFERENCE)
self.assertEqual(
email_preferences.can_receive_subscription_email,
feconf.DEFAULT_SUBSCRIPTION_EMAIL_PREFERENCE)
with self.swap(feconf, 'DEFAULT_EMAIL_UPDATES_PREFERENCE', False):
email_preferences = user_services.get_email_preferences(editor_id)
self.assertEqual(email_preferences.can_receive_email_updates, False)
self.assertEqual(
email_preferences.can_receive_editor_role_email,
feconf.DEFAULT_EDITOR_ROLE_EMAIL_PREFERENCE)
self.assertEqual(
email_preferences.can_receive_feedback_message_email,
feconf.DEFAULT_FEEDBACK_MESSAGE_EMAIL_PREFERENCE)
self.assertEqual(
email_preferences.can_receive_subscription_email,
feconf.DEFAULT_SUBSCRIPTION_EMAIL_PREFERENCE)
def test_user_allowing_emails_on_signup(self):
self.login(self.EDITOR_EMAIL)
csrf_token = self.get_new_csrf_token()
self.post_json(
feconf.SIGNUP_DATA_URL,
{'username': 'abc', 'agreed_to_terms': True,
'can_receive_email_updates': True},
csrf_token=csrf_token)
editor_id = self.get_user_id_from_email(self.EDITOR_EMAIL)
with self.swap(feconf, 'DEFAULT_EMAIL_UPDATES_PREFERENCE', True):
email_preferences = user_services.get_email_preferences(editor_id)
self.assertEqual(email_preferences.can_receive_email_updates, True)
self.assertEqual(
email_preferences.can_receive_editor_role_email,
feconf.DEFAULT_EDITOR_ROLE_EMAIL_PREFERENCE)
self.assertEqual(
email_preferences.can_receive_feedback_message_email,
feconf.DEFAULT_FEEDBACK_MESSAGE_EMAIL_PREFERENCE)
self.assertEqual(
email_preferences.can_receive_subscription_email,
feconf.DEFAULT_SUBSCRIPTION_EMAIL_PREFERENCE)
with self.swap(feconf, 'DEFAULT_EMAIL_UPDATES_PREFERENCE', False):
email_preferences = user_services.get_email_preferences(editor_id)
self.assertEqual(email_preferences.can_receive_email_updates, True)
self.assertEqual(
email_preferences.can_receive_editor_role_email,
feconf.DEFAULT_EDITOR_ROLE_EMAIL_PREFERENCE)
self.assertEqual(
email_preferences.can_receive_feedback_message_email,
feconf.DEFAULT_FEEDBACK_MESSAGE_EMAIL_PREFERENCE)
self.assertEqual(
email_preferences.can_receive_subscription_email,
feconf.DEFAULT_SUBSCRIPTION_EMAIL_PREFERENCE)
def test_user_disallowing_emails_on_signup(self):
self.login(self.EDITOR_EMAIL)
csrf_token = self.get_new_csrf_token()
self.post_json(
feconf.SIGNUP_DATA_URL,
{'username': 'abc', 'agreed_to_terms': True,
'can_receive_email_updates': False},
csrf_token=csrf_token)
editor_id = self.get_user_id_from_email(self.EDITOR_EMAIL)
with self.swap(feconf, 'DEFAULT_EMAIL_UPDATES_PREFERENCE', True):
email_preferences = user_services.get_email_preferences(editor_id)
self.assertEqual(email_preferences.can_receive_email_updates, False)
self.assertEqual(
email_preferences.can_receive_editor_role_email,
feconf.DEFAULT_EDITOR_ROLE_EMAIL_PREFERENCE)
self.assertEqual(
email_preferences.can_receive_feedback_message_email,
feconf.DEFAULT_FEEDBACK_MESSAGE_EMAIL_PREFERENCE)
self.assertEqual(
email_preferences.can_receive_subscription_email,
feconf.DEFAULT_SUBSCRIPTION_EMAIL_PREFERENCE)
with self.swap(feconf, 'DEFAULT_EMAIL_UPDATES_PREFERENCE', False):
email_preferences = user_services.get_email_preferences(editor_id)
self.assertEqual(email_preferences.can_receive_email_updates, False)
self.assertEqual(
email_preferences.can_receive_editor_role_email,
feconf.DEFAULT_EDITOR_ROLE_EMAIL_PREFERENCE)
self.assertEqual(
email_preferences.can_receive_feedback_message_email,
feconf.DEFAULT_FEEDBACK_MESSAGE_EMAIL_PREFERENCE)
self.assertEqual(
email_preferences.can_receive_subscription_email,
feconf.DEFAULT_SUBSCRIPTION_EMAIL_PREFERENCE)
def test_email_preferences_updates(self):
self.signup(self.EDITOR_EMAIL, username=self.EDITOR_USERNAME)
editor_id = self.get_user_id_from_email(self.EDITOR_EMAIL)
self.login(self.EDITOR_EMAIL)
csrf_token = self.get_new_csrf_token()
payload = {
'update_type': 'email_preferences',
'data': {
'can_receive_email_updates': True,
'can_receive_editor_role_email': True,
'can_receive_feedback_message_email': True,
'can_receive_subscription_email': True
}
}
self.put_json(
'/preferenceshandler/data', payload, csrf_token=csrf_token)
email_preferences = user_services.get_email_preferences(editor_id)
self.assertTrue(email_preferences.can_receive_email_updates)
self.assertTrue(email_preferences.can_receive_editor_role_email)
self.assertTrue(email_preferences.can_receive_feedback_message_email)
self.assertTrue(email_preferences.can_receive_subscription_email)
payload = {
'update_type': 'email_preferences',
'data': {
'can_receive_email_updates': False,
'can_receive_editor_role_email': False,
'can_receive_feedback_message_email': False,
'can_receive_subscription_email': False
}
}
self.put_json(
'/preferenceshandler/data', payload, csrf_token=csrf_token)
email_preferences = user_services.get_email_preferences(editor_id)
self.assertFalse(email_preferences.can_receive_email_updates)
self.assertFalse(email_preferences.can_receive_editor_role_email)
self.assertFalse(email_preferences.can_receive_feedback_message_email)
self.assertFalse(email_preferences.can_receive_subscription_email)
class ProfilePictureHandlerTests(test_utils.GenericTestBase):
def test_get_profile_picture_with_updated_value(self):
self.get_json(
'/preferenceshandler/profile_picture', expected_status_int=401)
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
self.login(self.OWNER_EMAIL)
user_settings = user_services.get_user_settings(owner_id)
response = self.get_json('/preferenceshandler/profile_picture')
self.assertEqual(
response['profile_picture_data_url'],
user_settings.profile_picture_data_url)
user_services.update_profile_picture_data_url(
owner_id, 'new_profile_picture')
response = self.get_json('/preferenceshandler/profile_picture')
self.assertEqual(
response['profile_picture_data_url'], 'new_profile_picture')
self.logout()
class SignupTests(test_utils.GenericTestBase):
def test_signup_page_does_not_have_top_right_menu(self):
self.login(self.EDITOR_EMAIL)
response = self.get_html_response(feconf.SIGNUP_URL)
response.mustcontain(no=['Logout'])
self.logout()
def test_going_somewhere_else_while_signing_in_logs_user_out(self):
exp_services.load_demo('0')
self.login(self.EDITOR_EMAIL)
response = self.get_html_response(feconf.SIGNUP_URL)
response = self.get_html_response('/create/0', expected_status_int=302)
self.assertIn('logout', response.headers['location'])
self.assertIn('create', response.headers['location'])
self.logout()
def test_to_check_url_redirection_in_signup(self):
self.login(self.EDITOR_EMAIL)
csrf_token = self.get_new_csrf_token()
# Registering this user fully.
self.post_json(
feconf.SIGNUP_DATA_URL,
{'username': 'abc', 'agreed_to_terms': True},
csrf_token=csrf_token)
def strip_domain_from_location_header(url):
splitted_url = re.match(r'(http[s]?:\/\/)?([^\/\s]+\/)(.*)', url)
return splitted_url.group(3)
response = self.get_html_response(
'/signup?return_url=https://google.com', expected_status_int=302)
self.assertEqual('', strip_domain_from_location_header(
response.headers['location']))
response = self.get_html_response(
'/signup?return_url=//google.com', expected_status_int=302)
self.assertEqual('', strip_domain_from_location_header(
response.headers['location']))
response = self.get_html_response(
'/signup?return_url=/page
self.assertEqual('page', strip_domain_from_location_header(
response.headers['location']))
response = self.get_html_response(
'/signup?return_url=/page/hello', expected_status_int=302)
self.assertEqual('page/hello', strip_domain_from_location_header(
response.headers['location']))
response = self.get_html_response(
'/signup?return_url=/page/hello?id=tests', expected_status_int=302)
self.assertEqual(
'page/hello?id=tests', strip_domain_from_location_header(
response.headers['location']))
self.logout()
def test_accepting_terms_is_handled_correctly(self):
self.login(self.EDITOR_EMAIL)
csrf_token = self.get_new_csrf_token()
response_dict = self.post_json(
feconf.SIGNUP_DATA_URL, {'agreed_to_terms': False},
csrf_token=csrf_token, expected_status_int=400)
self.assertIn('you will need to accept', response_dict['error'])
response_dict = self.post_json(
feconf.SIGNUP_DATA_URL,
{'agreed_to_terms': 'Hasta la vista!'},
csrf_token=csrf_token, expected_status_int=400)
self.assertIn('you will need to accept', response_dict['error'])
self.post_json(
feconf.SIGNUP_DATA_URL,
{'agreed_to_terms': True, 'username': 'myusername'},
csrf_token=csrf_token)
self.logout()
def test_username_is_handled_correctly(self):
self.login(self.EDITOR_EMAIL)
csrf_token = self.get_new_csrf_token()
response_dict = self.post_json(
feconf.SIGNUP_DATA_URL, {'agreed_to_terms': True},
csrf_token=csrf_token, expected_status_int=400)
self.assertIn('Empty username supplied', response_dict['error'])
response_dict = self.post_json(
feconf.SIGNUP_DATA_URL,
{'username': '', 'agreed_to_terms': True},
csrf_token=csrf_token, expected_status_int=400)
self.assertIn('Empty username supplied', response_dict['error'])
response_dict = self.post_json(
feconf.SIGNUP_DATA_URL,
{'username': '!a!', 'agreed_to_terms': True},
csrf_token=csrf_token, expected_status_int=400)
self.assertIn(
'can only have alphanumeric characters', response_dict['error'])
response_dict = self.post_json(
feconf.SIGNUP_DATA_URL,
{'username': self.UNICODE_TEST_STRING, 'agreed_to_terms': True},
csrf_token=csrf_token, expected_status_int=400)
self.assertIn(
'can only have alphanumeric characters', response_dict['error'])
response_dict = self.post_json(
feconf.SIGNUP_DATA_URL,
{'username': 'abcde', 'agreed_to_terms': True},
csrf_token=csrf_token)
self.logout()
def test_default_dashboard_for_new_users(self):
self.login(self.EDITOR_EMAIL)
csrf_token = self.get_new_csrf_token()
# This user should have the creator dashboard as default.
self.post_json(
feconf.SIGNUP_DATA_URL,
{'agreed_to_terms': True, 'username': 'creatoruser',
'default_dashboard': constants.DASHBOARD_TYPE_CREATOR,
'can_receive_email_updates': None},
csrf_token=csrf_token)
user_id = user_services.get_user_id_from_username('creatoruser')
user_settings = user_services.get_user_settings(user_id)
self.assertEqual(
user_settings.default_dashboard, constants.DASHBOARD_TYPE_CREATOR)
self.logout()
self.login(self.VIEWER_EMAIL)
csrf_token = self.get_new_csrf_token()
# This user should have the learner dashboard as default.
self.post_json(
feconf.SIGNUP_DATA_URL,
{'agreed_to_terms': True, 'username': 'learneruser',
'default_dashboard': constants.DASHBOARD_TYPE_LEARNER,
'can_receive_email_updates': None},
csrf_token=csrf_token)
user_id = user_services.get_user_id_from_username('learneruser')
user_settings = user_services.get_user_settings(user_id)
self.assertEqual(
user_settings.default_dashboard, constants.DASHBOARD_TYPE_LEARNER)
self.logout()
def test_user_settings_of_non_existing_user(self):
self.login(self.OWNER_EMAIL)
values_dict = {
'can_send_emails': False,
'has_agreed_to_latest_terms': False,
'has_ever_registered': False,
'username': None,
}
response = self.get_json(feconf.SIGNUP_DATA_URL)
self.assertDictEqual(values_dict, response)
self.logout()
def test_user_settings_of_existing_user(self):
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.login(self.OWNER_EMAIL)
values_dict = {
'can_send_emails': True,
'has_agreed_to_latest_terms': True,
'has_ever_registered': True,
'username': 'owner',
}
with self.swap(feconf, 'CAN_SEND_EMAILS', True):
response = self.get_json(feconf.SIGNUP_DATA_URL)
self.assertDictEqual(values_dict, response)
self.logout()
class DeleteAccountPageTests(test_utils.GenericTestBase):
def setUp(self):
super(DeleteAccountPageTests, self).setUp()
self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME)
self.login(self.EDITOR_EMAIL)
def test_get_delete_account_page(self):
with self.swap(constants, 'ENABLE_ACCOUNT_DELETION', True):
response = self.get_html_response('/delete-account')
self.assertIn(
'<delete-account-page></delete-account-page>', response.body)
def test_get_delete_account_page_disabled(self):
with self.swap(constants, 'ENABLE_ACCOUNT_DELETION', False):
self.get_html_response('/delete-account', expected_status_int=404)
class DeleteAccountHandlerTests(test_utils.GenericTestBase):
def setUp(self):
super(DeleteAccountHandlerTests, self).setUp()
self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME)
self.login(self.EDITOR_EMAIL)
def test_delete_delete_account_page(self):
with self.swap(constants, 'ENABLE_ACCOUNT_DELETION', True):
data = self.delete_json('/delete-account-handler')
self.assertEqual(data, {'success': True})
def test_delete_delete_account_page_disabled(self):
with self.swap(constants, 'ENABLE_ACCOUNT_DELETION', False):
self.delete_json('/delete-account-handler', expected_status_int=404)
class ExportAccountHandlerTests(test_utils.GenericTestBase):
GENERIC_DATE = datetime.datetime(2019, 5, 20)
GENERIC_EPOCH = utils.get_time_in_millisecs(GENERIC_DATE)
def setUp(self):
super(ExportAccountHandlerTests, self).setUp()
self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME)
self.login(self.EDITOR_EMAIL)
user_models.UserSubscriptionsModel(
id=self.get_user_id_from_email(self.EDITOR_EMAIL),
creator_ids=[],
collection_ids=[],
activity_ids=[],
general_feedback_thread_ids=[]).put()
def test_export_account_handler(self):
# Update user settings to constants.
user_id = self.get_user_id_from_email(self.EDITOR_EMAIL)
user_settings = user_services.get_user_settings(user_id)
user_settings.last_agreed_to_terms = self.GENERIC_DATE
user_settings.last_logged_in = self.GENERIC_DATE
user_settings.validate()
user_models.UserSettingsModel(
id=user_settings.user_id,
gae_id=user_settings.gae_id,
email=user_settings.email,
role=user_settings.role,
username=user_settings.username,
normalized_username=user_settings.normalized_username,
last_agreed_to_terms=user_settings.last_agreed_to_terms,
last_started_state_editor_tutorial=(
user_settings.last_started_state_editor_tutorial),
last_started_state_translation_tutorial=(
user_settings.last_started_state_translation_tutorial),
last_logged_in=user_settings.last_logged_in,
last_edited_an_exploration=user_settings.last_edited_an_exploration,
last_created_an_exploration=(
user_settings.last_created_an_exploration),
profile_picture_data_url=user_settings.profile_picture_data_url,
default_dashboard=user_settings.default_dashboard,
creator_dashboard_display_pref=(
user_settings.creator_dashboard_display_pref),
user_bio=user_settings.user_bio,
subject_interests=user_settings.subject_interests,
first_contribution_msec=user_settings.first_contribution_msec,
preferred_language_codes=user_settings.preferred_language_codes,
preferred_site_language_code=(
user_settings.preferred_site_language_code),
preferred_audio_language_code=(
user_settings.preferred_audio_language_code),
deleted=user_settings.deleted
).put()
constants_swap = self.swap(constants, 'ENABLE_ACCOUNT_EXPORT', True)
time_swap = self.swap(
user_services, 'record_user_logged_in', lambda *args: None)
with constants_swap, time_swap:
data = self.get_json('/export-account-handler')
expected_data = {
u'topic_rights_data': {
u'managed_topic_ids': []
},
u'subtopic_page_snapshot_metadata_data': {},
u'general_voiceover_application_data': {},
u'collection_progress_data': {},
u'story_snapshot_metadata_data': {},
u'user_community_rights_data': {},
u'user_contributions_data': {
u'edited_exploration_ids': [],
u'created_exploration_ids': []
},
u'general_feedback_thread_user_data': {},
u'question_snapshot_metadata_data': {},
u'general_feedback_message_data': {},
u'story_progress_data': {},
u'learner_playlist_data': {},
u'collection_rights_data': {
u'voiced_collection_ids': [],
u'owned_collection_ids': [],
u'viewable_collection_ids': [],
u'editable_collection_ids': []
},
u'skill_snapshot_metadata_data': {},
u'exploration_user_data_data': {},
u'collection_snapshot_metadata_data': {},
u'exploration_rights_data': {
u'viewable_exploration_ids': [],
u'owned_exploration_ids': [],
u'voiced_exploration_ids': [],
u'editable_exploration_ids': []
},
u'topic_snapshot_metadata_data': {},
u'completed_activities_data': {},
u'general_feedback_thread_data': {},
u'topic_rights_snapshot_metadata_data': {},
u'user_stats_data': {},
u'exploration_rights_snapshot_metadata_data': {},
u'user_subscriptions_data': {
u'creator_usernames': [],
u'collection_ids': [],
u'activity_ids': [],
u'general_feedback_thread_ids': [],
u'last_checked': None
},
u'config_property_snapshot_metadata_data': {},
u'exploration_snapshot_metadata_data': {},
u'incomplete_activities_data': {},
u'user_skill_mastery_data': {},
u'exp_user_last_playthrough_data': {},
u'user_settings_data': {
u'username': u'editor',
u'last_agreed_to_terms': self.GENERIC_EPOCH,
u'last_started_state_translation_tutorial': None,
u'last_started_state_editor_tutorial': None,
u'normalized_username': u'editor',
u'first_contribution_msec': None,
u'preferred_language_codes': [
u'en'
],
u'creator_dashboard_display_pref': u'card',
u'subject_interests': [],
u'default_dashboard': None,
u'preferred_site_language_code': None,
u'user_bio': u'',
u'profile_picture_data_url':
user_services.DEFAULT_IDENTICON_DATA_URL,
u'role': u'EXPLORATION_EDITOR',
u'last_edited_an_exploration': None,
u'email': u'editor@example.com',
u'preferred_audio_language_code': None,
u'last_logged_in': self.GENERIC_EPOCH
},
u'general_suggestion_data': {},
u'user_contribution_scoring_data': {},
u'general_feedback_email_reply_to_id_data': {},
u'collection_rights_snapshot_metadata_data': {}
}
self.assertEqual(
data,
expected_data
)
def test_export_account_handler_disabled_logged_in(self):
with self.swap(constants, 'ENABLE_ACCOUNT_EXPORT', False):
self.get_json('/export-account-handler', expected_status_int=404)
def test_export_account_hander_disabled_logged_out(self):
self.logout()
with self.swap(constants, 'ENABLE_ACCOUNT_EXPORT', False):
self.get_json('/export-account-handler', expected_status_int=401)
def test_export_account_handler_enabled_logged_out(self):
self.logout()
with self.swap(constants, 'ENABLE_ACCOUNT_EXPORT', True):
self.get_json('/export-account-handler', expected_status_int=401)
class PendingAccountDeletionPageTests(test_utils.GenericTestBase):
def test_get_pending_account_deletion_page(self):
with self.swap(constants, 'ENABLE_ACCOUNT_DELETION', True):
response = self.get_html_response('/pending-account-deletion')
self.assertIn('Pending Account Deletion', response.body)
def test_get_pending_account_deletion_page_disabled(self):
with self.swap(constants, 'ENABLE_ACCOUNT_DELETION', False):
self.get_html_response('/pending-account-deletion',
expected_status_int=404)
class UsernameCheckHandlerTests(test_utils.GenericTestBase):
def test_username_check(self):
self.signup('abc@example.com', username='abc')
self.login(self.EDITOR_EMAIL)
csrf_token = self.get_new_csrf_token()
response_dict = self.post_json(
feconf.USERNAME_CHECK_DATA_URL, {'username': 'abc'},
csrf_token=csrf_token)
self.assertEqual(
response_dict, {
'username_is_taken': True
})
response_dict = self.post_json(
feconf.USERNAME_CHECK_DATA_URL, {'username': 'def'},
csrf_token=csrf_token)
self.assertEqual(
response_dict, {
'username_is_taken': False
})
response_dict = self.post_json(
feconf.USERNAME_CHECK_DATA_URL, {'username': '!!!INVALID!!!'},
csrf_token=csrf_token, expected_status_int=400)
self.assertIn(
'can only have alphanumeric characters', response_dict['error'])
response_dict = self.post_json(
feconf.USERNAME_CHECK_DATA_URL,
{'username': self.UNICODE_TEST_STRING},
csrf_token=csrf_token, expected_status_int=400)
self.assertIn(
'can only have alphanumeric characters', response_dict['error'])
self.logout()
class SiteLanguageHandlerTests(test_utils.GenericTestBase):
def setUp(self):
super(SiteLanguageHandlerTests, self).setUp()
self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME)
self.editor_id = self.get_user_id_from_email(self.EDITOR_EMAIL)
def test_save_site_language_handler(self):
language_code = 'es'
self.login(self.EDITOR_EMAIL)
csrf_token = self.get_new_csrf_token()
self.put_json(
'/preferenceshandler/data', {
'update_type': 'preferred_site_language_code',
'data': language_code,
}, csrf_token=csrf_token)
preferences = self.get_json('/preferenceshandler/data')
self.assertIsNotNone(preferences)
self.assertEqual(
preferences['preferred_site_language_code'], language_code)
self.logout()
def test_can_update_site_language_code(self):
self.login(self.EDITOR_EMAIL)
user_settings = user_services.get_user_settings(
self.editor_id, strict=True)
self.assertIsNone(user_settings.preferred_site_language_code)
csrf_token = self.get_new_csrf_token()
self.put_json(
feconf.SITE_LANGUAGE_DATA_URL, payload={'site_language_code': 'en'},
csrf_token=csrf_token)
user_settings = user_services.get_user_settings(
self.editor_id, strict=True)
self.assertEqual(user_settings.preferred_site_language_code, 'en')
self.logout()
class UserInfoHandlerTests(test_utils.GenericTestBase):
def test_user_info_handler(self):
self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME)
self.login(self.EDITOR_EMAIL)
json_response = self.get_json('/userinfohandler')
self.assertDictEqual({
'is_moderator': False,
'is_admin': False,
'is_topic_manager': False,
'is_super_admin': False,
'can_create_collections': False,
'preferred_site_language_code': None,
'username': self.EDITOR_USERNAME,
'email': self.EDITOR_EMAIL,
'user_is_logged_in': True}, json_response)
self.logout()
json_response = self.get_json('/userinfohandler')
self.assertDictEqual({
'user_is_logged_in': False
}, json_response)
class UrlHandlerTests(test_utils.GenericTestBase):
def test_login_url_is_none_for_signed_in_user(self):
self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME)
self.login(self.EDITOR_EMAIL)
response = self.get_json('/url_handler')
self.assertIsNone(response['login_url'])
self.logout()
def test_login_url_gets_created_for_signed_out_users(self):
response = self.get_json(
'/url_handler', params={'current_url': 'random_url'})
self.assertTrue(response['login_url'].endswith('random_url'))
| true | true |
f720429c5f9f58e91b08613e41ef82569fb3831b | 20,741 | py | Python | source/appModules/explorer.py | krzysz00/nvda | d34444242a529098499131165a3e60d5a05ac96f | [
"bzip2-1.0.6"
] | 1 | 2016-07-10T00:23:14.000Z | 2016-07-10T00:23:14.000Z | source/appModules/explorer.py | krzysz00/nvda | d34444242a529098499131165a3e60d5a05ac96f | [
"bzip2-1.0.6"
] | null | null | null | source/appModules/explorer.py | krzysz00/nvda | d34444242a529098499131165a3e60d5a05ac96f | [
"bzip2-1.0.6"
] | 1 | 2017-08-04T09:00:01.000Z | 2017-08-04T09:00:01.000Z | # -*- coding: UTF-8 -*-
# A part of NonVisual Desktop Access (NVDA)
# Copyright (C) 2006-2021 NV Access Limited, Joseph Lee, Łukasz Golonka, Julien Cochuyt
# This file is covered by the GNU General Public License.
# See the file COPYING for more details.
"""App module for Windows Explorer (aka Windows shell and renamed to File Explorer in Windows 8).
Provides workarounds for controls such as identifying Start button, notification area and others.
"""
from comtypes import COMError
import time
import appModuleHandler
import controlTypes
import winUser
import winVersion
import api
import speech
import eventHandler
import mouseHandler
from NVDAObjects.window import Window
from NVDAObjects.IAccessible import IAccessible, List
from NVDAObjects.UIA import UIA
from NVDAObjects.behaviors import ToolTip
from NVDAObjects.window.edit import RichEdit50, EditTextInfo
import config
# Suppress incorrect Win 10 Task switching window focus
class MultitaskingViewFrameWindow(UIA):
shouldAllowUIAFocusEvent=False
# Suppress focus ancestry for task switching list items if alt is held down (alt+tab)
class MultitaskingViewFrameListItem(UIA):
def _get_container(self):
if winUser.getAsyncKeyState(winUser.VK_MENU)&32768:
return api.getDesktopObject()
else:
return super(MultitaskingViewFrameListItem,self).container
# Support for Win8 start screen search suggestions.
class SuggestionListItem(UIA):
def event_UIA_elementSelected(self):
speech.cancelSpeech()
api.setNavigatorObject(self, isFocus=True)
self.reportFocus()
super(SuggestionListItem,self).event_UIA_elementSelected()
# Windows 8 hack: Class to disable incorrect focus on windows 8 search box (containing the already correctly focused edit field)
class SearchBoxClient(IAccessible):
shouldAllowIAccessibleFocusEvent=False
# Class for menu items for Windows Places and Frequently used Programs (in start menu)
# Also used for desktop items
class SysListView32EmittingDuplicateFocusEvents(IAccessible):
# #474: When focus moves to these items, an extra focus is fired on the parent
# However NVDA redirects it to the real focus.
# But this means double focus events on the item, so filter the second one out
# #2988: Also seen when coming back to the Windows 7 desktop from different applications.
def _get_shouldAllowIAccessibleFocusEvent(self):
res = super().shouldAllowIAccessibleFocusEvent
if not res:
return False
focus = eventHandler.lastQueuedFocusObject
if type(focus)!=type(self) or (self.event_windowHandle,self.event_objectID,self.event_childID)!=(focus.event_windowHandle,focus.event_objectID,focus.event_childID):
return True
return False
class NotificationArea(IAccessible):
"""The Windows notification area, a.k.a. system tray.
"""
lastKnownLocation = None
def event_gainFocus(self):
NotificationArea.lastKnownLocation = self.location
if mouseHandler.lastMouseEventTime < time.time() - 0.2:
# This focus change was not caused by a mouse event.
# If the mouse is on another systray control, the notification area toolbar will rudely
# bounce the focus back to the object under the mouse after a brief pause.
# Moving the mouse to the focus object isn't a good solution because
# sometimes, the focus can't be moved away from the object under the mouse.
# Therefore, move the mouse out of the way.
if self.location:
systrayLeft, systrayTop, systrayWidth, systrayHeight = self.location
mouseLeft, mouseTop = winUser.getCursorPos()
if (
systrayLeft <= mouseLeft <= systrayLeft + systrayWidth
and systrayTop <= mouseTop <= systrayTop + systrayHeight
):
winUser.setCursorPos(0, 0)
if self.role == controlTypes.Role.TOOLBAR:
# Sometimes, the toolbar itself receives the focus instead of the focused child.
# However, the focused child still has the focused state.
for child in self.children:
if child.hasFocus:
# Redirect the focus to the focused child.
eventHandler.executeEvent("gainFocus", child)
return
# We've really landed on the toolbar itself.
# This was probably caused by moving the mouse out of the way in a previous focus event.
# This previous focus event is no longer useful, so cancel speech.
speech.cancelSpeech()
if eventHandler.isPendingEvents("gainFocus"):
return
super(NotificationArea, self).event_gainFocus()
class ExplorerToolTip(ToolTip):
def shouldReport(self):
# Avoid reporting systray tool-tips if their text equals the focused systray icon name (#6656)
# Don't bother checking if reporting of tool-tips is disabled
if not config.conf["presentation"]["reportTooltips"]:
return False
focus = api.getFocusObject()
# Report if either
# - the mouse has just moved
# - the focus is not in the systray
# - we do not know (yet) where the systray is located
if (
mouseHandler.lastMouseEventTime >= time.time() - 0.2
or not isinstance(focus, NotificationArea)
or NotificationArea.lastKnownLocation is None
):
return True
# Report if the mouse is indeed located in the systray
systrayLeft, systrayTop, systrayWidth, systrayHeight = NotificationArea.lastKnownLocation
mouseLeft, mouseTop = winUser.getCursorPos()
if (
systrayLeft <= mouseLeft <= systrayLeft + systrayWidth
and systrayTop <= mouseTop <= systrayTop + systrayHeight
):
return True
# Report is the next are different
if focus.name != self.name:
return True
# Do not report otherwise
return False
def event_show(self):
if self.shouldReport():
super().event_show()
class GridTileElement(UIA):
role=controlTypes.Role.TABLECELL
def _get_description(self):
name=self.name
descriptionStrings=[]
for child in self.children:
description=child.basicText
if not description or description==name: continue
descriptionStrings.append(description)
return " ".join(descriptionStrings)
return description
class GridListTileElement(UIA):
role=controlTypes.Role.TABLECELL
description=None
class GridGroup(UIA):
"""A group in the Windows 8 Start Menu.
"""
presentationType=UIA.presType_content
# Normally the name is the first tile which is rather redundant
# However some groups have custom header text which should be read instead
def _get_name(self):
child=self.firstChild
if isinstance(child,UIA):
try:
automationID=child.UIAElement.currentAutomationID
except COMError:
automationID=None
if automationID=="GridListGroupHeader":
return child.name
class ImmersiveLauncher(UIA):
# When the Windows 8 start screen opens, focus correctly goes to the first tile, but then incorrectly back to the root of the window.
# Ignore focus events on this object.
shouldAllowUIAFocusEvent=False
class StartButton(IAccessible):
"""For Windows 8.1 and 10 Start buttons to be recognized as proper buttons and to suppress selection announcement."""
role = controlTypes.Role.BUTTON
def _get_states(self):
# #5178: Selection announcement should be suppressed.
# Borrowed from Mozilla objects in NVDAObjects/IAccessible/Mozilla.py.
states = super(StartButton, self).states
states.discard(controlTypes.State.SELECTED)
return states
CHAR_LTR_MARK = u'\u200E'
CHAR_RTL_MARK = u'\u200F'
class UIProperty(UIA):
#Used for columns in Windows Explorer Details view.
#These can contain dates that include unwanted left-to-right and right-to-left indicator characters.
def _get_value(self):
value = super(UIProperty, self).value
if value is None:
return value
return value.replace(CHAR_LTR_MARK,'').replace(CHAR_RTL_MARK,'')
class ReadOnlyEditBox(IAccessible):
#Used for read-only edit boxes in a properties window.
#These can contain dates that include unwanted left-to-right and right-to-left indicator characters.
def _get_windowText(self):
windowText = super(ReadOnlyEditBox, self).windowText
if windowText is not None:
return windowText.replace(CHAR_LTR_MARK,'').replace(CHAR_RTL_MARK,'')
return windowText
class MetadataEditField(RichEdit50):
""" Used for metadata edit fields in Windows Explorer in Windows 7.
By default these fields would use ITextDocumentTextInfo ,
but to avoid Windows Explorer crashes we need to use EditTextInfo here. """
@classmethod
def _get_TextInfo(cls):
if winVersion.getWinVer() <= winVersion.WIN7_SP1:
cls.TextInfo = EditTextInfo
else:
cls.TextInfo = super().TextInfo
return cls.TextInfo
class WorkerW(IAccessible):
def event_gainFocus(self):
# #6671: Normally we do not allow WorkerW thread to send gain focus event,
# as it causes 'pane" to be announced when minimizing windows or moving to desktop.
# However when closing Windows 7 Start Menu in some cases
# focus lands on it instead of the focused desktop item.
# Simply ignore the event if running on anything other than Win 7.
if winVersion.getWinVer() > winVersion.WIN7_SP1:
return
if eventHandler.isPendingEvents("gainFocus"):
return
if self.simpleFirstChild:
# If focus is not going to be moved autotically
# we need to forcefully move it to the focused desktop item.
# As we are interested in the first focusable object below the pane use simpleFirstChild.
self.simpleFirstChild.setFocus()
return
super().event_gainFocus()
class AppModule(appModuleHandler.AppModule):
def chooseNVDAObjectOverlayClasses(self, obj, clsList):
windowClass = obj.windowClassName
role = obj.role
if windowClass in ("Search Box","UniversalSearchBand") and role==controlTypes.Role.PANE and isinstance(obj,IAccessible):
clsList.insert(0,SearchBoxClient)
return # Optimization: return early to avoid comparing class names and roles that will never match.
if windowClass == "ToolbarWindow32":
if role != controlTypes.Role.POPUPMENU:
try:
# The toolbar's immediate parent is its window object, so we need to go one further.
toolbarParent = obj.parent.parent
if role != controlTypes.Role.TOOLBAR:
# Toolbar item.
toolbarParent = toolbarParent.parent
except AttributeError:
toolbarParent = None
if toolbarParent and toolbarParent.windowClassName == "SysPager":
clsList.insert(0, NotificationArea)
return
if obj.role == controlTypes.Role.TOOLTIP:
clsList.insert(0, ExplorerToolTip)
return
if windowClass == "Edit" and controlTypes.State.READONLY in obj.states:
clsList.insert(0, ReadOnlyEditBox)
return # Optimization: return early to avoid comparing class names and roles that will never match.
if windowClass == "SysListView32":
if(
role == controlTypes.Role.MENUITEM
or(
role == controlTypes.Role.LISTITEM
and obj.simpleParent
and obj.simpleParent.simpleParent
and obj.simpleParent.simpleParent == api.getDesktopObject()
)
):
clsList.insert(0, SysListView32EmittingDuplicateFocusEvents)
return # Optimization: return early to avoid comparing class names and roles that will never match.
# #5178: Start button in Windows 8.1 and 10 should not have been a list in the first place.
if windowClass == "Start" and role in (controlTypes.Role.LIST, controlTypes.Role.BUTTON):
if role == controlTypes.Role.LIST:
clsList.remove(List)
clsList.insert(0, StartButton)
return # Optimization: return early to avoid comparing class names and roles that will never match.
if windowClass == 'RICHEDIT50W' and obj.windowControlID == 256:
clsList.insert(0, MetadataEditField)
return # Optimization: return early to avoid comparing class names and roles that will never match.
if windowClass == "WorkerW" and role == controlTypes.Role.PANE and obj.name is None:
clsList.insert(0, WorkerW)
return # Optimization: return early to avoid comparing class names and roles that will never match.
if isinstance(obj, UIA):
uiaClassName = obj.UIAElement.cachedClassName
if uiaClassName == "GridTileElement":
clsList.insert(0, GridTileElement)
elif uiaClassName == "GridListTileElement":
clsList.insert(0, GridListTileElement)
elif uiaClassName == "GridGroup":
clsList.insert(0, GridGroup)
elif uiaClassName == "ImmersiveLauncher" and role == controlTypes.Role.PANE:
clsList.insert(0, ImmersiveLauncher)
elif uiaClassName == "ListViewItem" and obj.UIAElement.cachedAutomationId.startswith('Suggestion_'):
clsList.insert(0, SuggestionListItem)
# Multitasking view frame window
elif (
# Windows 10 and earlier
(uiaClassName == "MultitaskingViewFrame" and role == controlTypes.Role.WINDOW)
# Windows 11 where a pane window receives focus when switching tasks
or (uiaClassName == "Windows.UI.Input.InputSite.WindowClass" and role == controlTypes.Role.PANE)
):
clsList.insert(0, MultitaskingViewFrameWindow)
# Windows 10 task switch list
elif role == controlTypes.Role.LISTITEM and (
# RS4 and below we can match on a window class
windowClass == "MultitaskingViewFrame" or
# RS5 and above we must look for a particular UIA automationID on the list
isinstance(obj.parent,UIA) and obj.parent.UIAElement.cachedAutomationID=="SwitchItemListControl"
):
clsList.insert(0, MultitaskingViewFrameListItem)
elif uiaClassName == "UIProperty" and role == controlTypes.Role.EDITABLETEXT:
clsList.insert(0, UIProperty)
def _get_statusBar(self):
foreground = api.getForegroundObject()
if not isinstance(foreground, UIA) or not foreground.windowClassName == "CabinetWClass":
# This is not the file explorer window. Resort to standard behavior.
raise NotImplementedError
import UIAHandler
clientObject = UIAHandler.handler.clientObject
condition = clientObject.createPropertyCondition(
UIAHandler.UIA_ControlTypePropertyId,
UIAHandler.UIA_StatusBarControlTypeId
)
walker = clientObject.createTreeWalker(condition)
try:
element = walker.getFirstChildElement(foreground.UIAElement)
except COMError:
# We could not find the expected object. Resort to standard behavior.
raise NotImplementedError()
element = element.buildUpdatedCache(UIAHandler.handler.baseCacheRequest)
statusBar = UIA(UIAElement=element)
return statusBar
@staticmethod
def _getStatusBarTextWin7(obj) -> str:
"""For status bar in Windows 7 Windows Explorer we're interested only in the name of the first child
the rest are either empty or contain garbage."""
if obj.firstChild and obj.firstChild.name:
return obj.firstChild.name
raise NotImplementedError
@staticmethod
def _getStatusBarTextPostWin7(obj) -> str:
# The expected status bar, as of Windows 10 20H2 at least, contains:
# - A grouping with a single static text child presenting the total number of elements
# - Optionally, a grouping with a single static text child presenting the number of
# selected elements and their total size, missing if no element is selected.
# - A grouping with two radio buttons to control the display mode.
parts = []
for index, child in enumerate(obj.children):
if (
child.role == controlTypes.Role.GROUPING
and child.childCount == 1
and child.firstChild.role == controlTypes.Role.STATICTEXT
):
parts.append(child.firstChild.name)
elif (
child.role == controlTypes.Role.GROUPING
and child.childCount > 1
and not any(
grandChild for grandChild in child.children
if grandChild.role != controlTypes.Role.RADIOBUTTON
)
):
selected = next(iter(
grandChild for grandChild in child.children
if controlTypes.State.CHECKED in grandChild.states
), None)
if selected is not None:
parts.append(" ".join(
[child.name]
+ ([selected.name] if selected is not None else [])
))
else:
# Unexpected child, try to retrieve something useful.
parts.append(" ".join(
chunk
for chunk in (child.name, child.value)
if chunk and isinstance(chunk, str) and not chunk.isspace()
))
if not parts:
# We couldn't retrieve anything. Resort to standard behavior.
raise NotImplementedError
return ", ".join(parts)
def getStatusBarText(self, obj) -> str:
if obj.windowClassName == "msctls_statusbar32": # Windows 7
return self._getStatusBarTextWin7(obj)
if (
isinstance(obj, UIA) or obj.UIAElement.cachedClassname == "StatusBarModuleInner"
): # Windows 8 or later
return self._getStatusBarTextPostWin7(obj)
else:
# This is not the file explorer status bar. Resort to standard behavior.
raise NotImplementedError
def event_NVDAObject_init(self, obj):
windowClass = obj.windowClassName
role = obj.role
if windowClass == "ToolbarWindow32" and role == controlTypes.Role.POPUPMENU:
parent = obj.parent
if parent and parent.windowClassName == "SysPager" and not (obj.windowStyle & 0x80):
# This is the menu for a group of icons on the task bar, which Windows stupidly names "Application".
obj.name = None
return
if windowClass == "#32768":
# Standard menu.
parent = obj.parent
if parent and not parent.parent:
# Context menu.
# We don't trust the names that Explorer gives to context menus, so better to have no name at all.
obj.name = None
return
if windowClass == "DV2ControlHost" and role == controlTypes.Role.PANE:
# Windows 7 start menu.
obj.presentationType=obj.presType_content
obj.isPresentableFocusAncestor = True
# In Windows 7, the description of this pane is extremely verbose help text, so nuke it.
obj.description = None
return
# The Address bar is embedded inside a progressbar, how strange.
# Lets hide that
if windowClass=="msctls_progress32" and winUser.getClassName(winUser.getAncestor(obj.windowHandle,winUser.GA_PARENT))=="Address Band Root":
obj.presentationType=obj.presType_layout
return
if windowClass == "DirectUIHWND" and role == controlTypes.Role.LIST:
# Is this a list containing search results in Windows 7 start menu?
isWin7SearchResultsList = False
try:
if obj.parent and obj.parent.parent:
parent = obj.parent.parent.parent
isWin7SearchResultsList = parent is not None and parent.windowClassName == "Desktop Search Open View"
except AttributeError:
isWin7SearchResultsList = False
if isWin7SearchResultsList:
# Namae of this list is not useful and should be discarded.
obj.name = None
return
def event_gainFocus(self, obj, nextHandler):
wClass = obj.windowClassName
if wClass == "ToolbarWindow32" and obj.role == controlTypes.Role.MENUITEM and obj.parent.role == controlTypes.Role.MENUBAR and eventHandler.isPendingEvents("gainFocus"):
# When exiting a menu, Explorer fires focus on the top level menu item before it returns to the previous focus.
# Unfortunately, this focus event always occurs in a subsequent cycle, so the event limiter doesn't eliminate it.
# Therefore, if there is a pending focus event, don't bother handling this event.
return
if wClass in ("ForegroundStaging", "LauncherTipWnd", "ApplicationManager_DesktopShellWindow"):
# #5116: The Windows 10 Task View fires foreground/focus on this weird invisible window and foreground staging screen before and after it appears.
# This causes NVDA to report "unknown", so ignore it.
# We can't do this using shouldAllowIAccessibleFocusEvent because this isn't checked for foreground.
# #8137: also seen when opening quick link menu (Windows+X) on Windows 8 and later.
return
nextHandler()
def isGoodUIAWindow(self, hwnd):
# #9204: shell raises window open event for emoji panel in build 18305 and later.
if (
winVersion.getWinVer() >= winVersion.WIN10_1903
and winUser.getClassName(hwnd) == "ApplicationFrameWindow"
):
return True
return False
def event_UIA_window_windowOpen(self, obj, nextHandler):
# Send UIA window open event to input app window.
if isinstance(obj, UIA) and obj.UIAElement.cachedClassName == "ApplicationFrameWindow":
inputPanelWindow = obj.firstChild
inputPanelAppName = (
# 19H2 and earlier
"windowsinternal_composableshell_experiences_textinput_inputapp",
# 20H1 and later
"textinputhost"
)
if inputPanelWindow and inputPanelWindow.appModule.appName in inputPanelAppName:
eventHandler.executeEvent("UIA_window_windowOpen", inputPanelWindow)
return
nextHandler()
| 38.768224 | 172 | 0.733282 |
from comtypes import COMError
import time
import appModuleHandler
import controlTypes
import winUser
import winVersion
import api
import speech
import eventHandler
import mouseHandler
from NVDAObjects.window import Window
from NVDAObjects.IAccessible import IAccessible, List
from NVDAObjects.UIA import UIA
from NVDAObjects.behaviors import ToolTip
from NVDAObjects.window.edit import RichEdit50, EditTextInfo
import config
class MultitaskingViewFrameWindow(UIA):
shouldAllowUIAFocusEvent=False
class MultitaskingViewFrameListItem(UIA):
def _get_container(self):
if winUser.getAsyncKeyState(winUser.VK_MENU)&32768:
return api.getDesktopObject()
else:
return super(MultitaskingViewFrameListItem,self).container
class SuggestionListItem(UIA):
def event_UIA_elementSelected(self):
speech.cancelSpeech()
api.setNavigatorObject(self, isFocus=True)
self.reportFocus()
super(SuggestionListItem,self).event_UIA_elementSelected()
class SearchBoxClient(IAccessible):
shouldAllowIAccessibleFocusEvent=False
class SysListView32EmittingDuplicateFocusEvents(IAccessible):
stQueuedFocusObject
if type(focus)!=type(self) or (self.event_windowHandle,self.event_objectID,self.event_childID)!=(focus.event_windowHandle,focus.event_objectID,focus.event_childID):
return True
return False
class NotificationArea(IAccessible):
lastKnownLocation = None
def event_gainFocus(self):
NotificationArea.lastKnownLocation = self.location
if mouseHandler.lastMouseEventTime < time.time() - 0.2:
# sometimes, the focus can't be moved away from the object under the mouse.
if self.location:
systrayLeft, systrayTop, systrayWidth, systrayHeight = self.location
mouseLeft, mouseTop = winUser.getCursorPos()
if (
systrayLeft <= mouseLeft <= systrayLeft + systrayWidth
and systrayTop <= mouseTop <= systrayTop + systrayHeight
):
winUser.setCursorPos(0, 0)
if self.role == controlTypes.Role.TOOLBAR:
for child in self.children:
if child.hasFocus:
eventHandler.executeEvent("gainFocus", child)
return
# This was probably caused by moving the mouse out of the way in a previous focus event.
# This previous focus event is no longer useful, so cancel speech.
speech.cancelSpeech()
if eventHandler.isPendingEvents("gainFocus"):
return
super(NotificationArea, self).event_gainFocus()
class ExplorerToolTip(ToolTip):
def shouldReport(self):
# Avoid reporting systray tool-tips if their text equals the focused systray icon name (#6656)
# Don't bother checking if reporting of tool-tips is disabled
if not config.conf["presentation"]["reportTooltips"]:
return False
focus = api.getFocusObject()
if (
mouseHandler.lastMouseEventTime >= time.time() - 0.2
or not isinstance(focus, NotificationArea)
or NotificationArea.lastKnownLocation is None
):
return True
systrayLeft, systrayTop, systrayWidth, systrayHeight = NotificationArea.lastKnownLocation
mouseLeft, mouseTop = winUser.getCursorPos()
if (
systrayLeft <= mouseLeft <= systrayLeft + systrayWidth
and systrayTop <= mouseTop <= systrayTop + systrayHeight
):
return True
if focus.name != self.name:
return True
return False
def event_show(self):
if self.shouldReport():
super().event_show()
class GridTileElement(UIA):
role=controlTypes.Role.TABLECELL
def _get_description(self):
name=self.name
descriptionStrings=[]
for child in self.children:
description=child.basicText
if not description or description==name: continue
descriptionStrings.append(description)
return " ".join(descriptionStrings)
return description
class GridListTileElement(UIA):
role=controlTypes.Role.TABLECELL
description=None
class GridGroup(UIA):
presentationType=UIA.presType_content
def _get_name(self):
child=self.firstChild
if isinstance(child,UIA):
try:
automationID=child.UIAElement.currentAutomationID
except COMError:
automationID=None
if automationID=="GridListGroupHeader":
return child.name
class ImmersiveLauncher(UIA):
shouldAllowUIAFocusEvent=False
class StartButton(IAccessible):
role = controlTypes.Role.BUTTON
def _get_states(self):
ates.discard(controlTypes.State.SELECTED)
return states
CHAR_LTR_MARK = u'\u200E'
CHAR_RTL_MARK = u'\u200F'
class UIProperty(UIA):
def _get_value(self):
value = super(UIProperty, self).value
if value is None:
return value
return value.replace(CHAR_LTR_MARK,'').replace(CHAR_RTL_MARK,'')
class ReadOnlyEditBox(IAccessible):
def _get_windowText(self):
windowText = super(ReadOnlyEditBox, self).windowText
if windowText is not None:
return windowText.replace(CHAR_LTR_MARK,'').replace(CHAR_RTL_MARK,'')
return windowText
class MetadataEditField(RichEdit50):
@classmethod
def _get_TextInfo(cls):
if winVersion.getWinVer() <= winVersion.WIN7_SP1:
cls.TextInfo = EditTextInfo
else:
cls.TextInfo = super().TextInfo
return cls.TextInfo
class WorkerW(IAccessible):
def event_gainFocus(self):
us lands on it instead of the focused desktop item.
# Simply ignore the event if running on anything other than Win 7.
if winVersion.getWinVer() > winVersion.WIN7_SP1:
return
if eventHandler.isPendingEvents("gainFocus"):
return
if self.simpleFirstChild:
# If focus is not going to be moved autotically
# we need to forcefully move it to the focused desktop item.
# As we are interested in the first focusable object below the pane use simpleFirstChild.
self.simpleFirstChild.setFocus()
return
super().event_gainFocus()
class AppModule(appModuleHandler.AppModule):
def chooseNVDAObjectOverlayClasses(self, obj, clsList):
windowClass = obj.windowClassName
role = obj.role
if windowClass in ("Search Box","UniversalSearchBand") and role==controlTypes.Role.PANE and isinstance(obj,IAccessible):
clsList.insert(0,SearchBoxClient)
return # Optimization: return early to avoid comparing class names and roles that will never match.
if windowClass == "ToolbarWindow32":
if role != controlTypes.Role.POPUPMENU:
try:
# The toolbar's immediate parent is its window object, so we need to go one further.
toolbarParent = obj.parent.parent
if role != controlTypes.Role.TOOLBAR:
# Toolbar item.
toolbarParent = toolbarParent.parent
except AttributeError:
toolbarParent = None
if toolbarParent and toolbarParent.windowClassName == "SysPager":
clsList.insert(0, NotificationArea)
return
if obj.role == controlTypes.Role.TOOLTIP:
clsList.insert(0, ExplorerToolTip)
return
if windowClass == "Edit" and controlTypes.State.READONLY in obj.states:
clsList.insert(0, ReadOnlyEditBox)
return # Optimization: return early to avoid comparing class names and roles that will never match.
if windowClass == "SysListView32":
if(
role == controlTypes.Role.MENUITEM
or(
role == controlTypes.Role.LISTITEM
and obj.simpleParent
and obj.simpleParent.simpleParent
and obj.simpleParent.simpleParent == api.getDesktopObject()
)
):
clsList.insert(0, SysListView32EmittingDuplicateFocusEvents)
return # Optimization: return early to avoid comparing class names and roles that will never match.
# #5178: Start button in Windows 8.1 and 10 should not have been a list in the first place.
if windowClass == "Start" and role in (controlTypes.Role.LIST, controlTypes.Role.BUTTON):
if role == controlTypes.Role.LIST:
clsList.remove(List)
clsList.insert(0, StartButton)
return # Optimization: return early to avoid comparing class names and roles that will never match.
if windowClass == 'RICHEDIT50W' and obj.windowControlID == 256:
clsList.insert(0, MetadataEditField)
return # Optimization: return early to avoid comparing class names and roles that will never match.
if windowClass == "WorkerW" and role == controlTypes.Role.PANE and obj.name is None:
clsList.insert(0, WorkerW)
return # Optimization: return early to avoid comparing class names and roles that will never match.
if isinstance(obj, UIA):
uiaClassName = obj.UIAElement.cachedClassName
if uiaClassName == "GridTileElement":
clsList.insert(0, GridTileElement)
elif uiaClassName == "GridListTileElement":
clsList.insert(0, GridListTileElement)
elif uiaClassName == "GridGroup":
clsList.insert(0, GridGroup)
elif uiaClassName == "ImmersiveLauncher" and role == controlTypes.Role.PANE:
clsList.insert(0, ImmersiveLauncher)
elif uiaClassName == "ListViewItem" and obj.UIAElement.cachedAutomationId.startswith('Suggestion_'):
clsList.insert(0, SuggestionListItem)
# Multitasking view frame window
elif (
# Windows 10 and earlier
(uiaClassName == "MultitaskingViewFrame" and role == controlTypes.Role.WINDOW)
# Windows 11 where a pane window receives focus when switching tasks
or (uiaClassName == "Windows.UI.Input.InputSite.WindowClass" and role == controlTypes.Role.PANE)
):
clsList.insert(0, MultitaskingViewFrameWindow)
# Windows 10 task switch list
elif role == controlTypes.Role.LISTITEM and (
# RS4 and below we can match on a window class
windowClass == "MultitaskingViewFrame" or
# RS5 and above we must look for a particular UIA automationID on the list
isinstance(obj.parent,UIA) and obj.parent.UIAElement.cachedAutomationID=="SwitchItemListControl"
):
clsList.insert(0, MultitaskingViewFrameListItem)
elif uiaClassName == "UIProperty" and role == controlTypes.Role.EDITABLETEXT:
clsList.insert(0, UIProperty)
def _get_statusBar(self):
foreground = api.getForegroundObject()
if not isinstance(foreground, UIA) or not foreground.windowClassName == "CabinetWClass":
# This is not the file explorer window. Resort to standard behavior.
raise NotImplementedError
import UIAHandler
clientObject = UIAHandler.handler.clientObject
condition = clientObject.createPropertyCondition(
UIAHandler.UIA_ControlTypePropertyId,
UIAHandler.UIA_StatusBarControlTypeId
)
walker = clientObject.createTreeWalker(condition)
try:
element = walker.getFirstChildElement(foreground.UIAElement)
except COMError:
# We could not find the expected object. Resort to standard behavior.
raise NotImplementedError()
element = element.buildUpdatedCache(UIAHandler.handler.baseCacheRequest)
statusBar = UIA(UIAElement=element)
return statusBar
@staticmethod
def _getStatusBarTextWin7(obj) -> str:
if obj.firstChild and obj.firstChild.name:
return obj.firstChild.name
raise NotImplementedError
@staticmethod
def _getStatusBarTextPostWin7(obj) -> str:
# The expected status bar, as of Windows 10 20H2 at least, contains:
# - A grouping with a single static text child presenting the total number of elements
# - Optionally, a grouping with a single static text child presenting the number of
# selected elements and their total size, missing if no element is selected.
# - A grouping with two radio buttons to control the display mode.
parts = []
for index, child in enumerate(obj.children):
if (
child.role == controlTypes.Role.GROUPING
and child.childCount == 1
and child.firstChild.role == controlTypes.Role.STATICTEXT
):
parts.append(child.firstChild.name)
elif (
child.role == controlTypes.Role.GROUPING
and child.childCount > 1
and not any(
grandChild for grandChild in child.children
if grandChild.role != controlTypes.Role.RADIOBUTTON
)
):
selected = next(iter(
grandChild for grandChild in child.children
if controlTypes.State.CHECKED in grandChild.states
), None)
if selected is not None:
parts.append(" ".join(
[child.name]
+ ([selected.name] if selected is not None else [])
))
else:
# Unexpected child, try to retrieve something useful.
parts.append(" ".join(
chunk
for chunk in (child.name, child.value)
if chunk and isinstance(chunk, str) and not chunk.isspace()
))
if not parts:
# We couldn't retrieve anything. Resort to standard behavior.
raise NotImplementedError
return ", ".join(parts)
def getStatusBarText(self, obj) -> str:
if obj.windowClassName == "msctls_statusbar32": # Windows 7
return self._getStatusBarTextWin7(obj)
if (
isinstance(obj, UIA) or obj.UIAElement.cachedClassname == "StatusBarModuleInner"
): # Windows 8 or later
return self._getStatusBarTextPostWin7(obj)
else:
# This is not the file explorer status bar. Resort to standard behavior.
raise NotImplementedError
def event_NVDAObject_init(self, obj):
windowClass = obj.windowClassName
role = obj.role
if windowClass == "ToolbarWindow32" and role == controlTypes.Role.POPUPMENU:
parent = obj.parent
if parent and parent.windowClassName == "SysPager" and not (obj.windowStyle & 0x80):
# This is the menu for a group of icons on the task bar, which Windows stupidly names "Application".
obj.name = None
return
if windowClass == "#32768":
# Standard menu.
parent = obj.parent
if parent and not parent.parent:
# Context menu.
# We don't trust the names that Explorer gives to context menus, so better to have no name at all.
obj.name = None
return
if windowClass == "DV2ControlHost" and role == controlTypes.Role.PANE:
# Windows 7 start menu.
obj.presentationType=obj.presType_content
obj.isPresentableFocusAncestor = True
# In Windows 7, the description of this pane is extremely verbose help text, so nuke it.
obj.description = None
return
# The Address bar is embedded inside a progressbar, how strange.
# Lets hide that
if windowClass=="msctls_progress32" and winUser.getClassName(winUser.getAncestor(obj.windowHandle,winUser.GA_PARENT))=="Address Band Root":
obj.presentationType=obj.presType_layout
return
if windowClass == "DirectUIHWND" and role == controlTypes.Role.LIST:
# Is this a list containing search results in Windows 7 start menu?
isWin7SearchResultsList = False
try:
if obj.parent and obj.parent.parent:
parent = obj.parent.parent.parent
isWin7SearchResultsList = parent is not None and parent.windowClassName == "Desktop Search Open View"
except AttributeError:
isWin7SearchResultsList = False
if isWin7SearchResultsList:
# Namae of this list is not useful and should be discarded.
obj.name = None
return
def event_gainFocus(self, obj, nextHandler):
wClass = obj.windowClassName
if wClass == "ToolbarWindow32" and obj.role == controlTypes.Role.MENUITEM and obj.parent.role == controlTypes.Role.MENUBAR and eventHandler.isPendingEvents("gainFocus"):
# When exiting a menu, Explorer fires focus on the top level menu item before it returns to the previous focus.
# Unfortunately, this focus event always occurs in a subsequent cycle, so the event limiter doesn't eliminate it.
# Therefore, if there is a pending focus event, don't bother handling this event.
return
if wClass in ("ForegroundStaging", "LauncherTipWnd", "ApplicationManager_DesktopShellWindow"):
# #5116: The Windows 10 Task View fires foreground/focus on this weird invisible window and foreground staging screen before and after it appears.
# This causes NVDA to report "unknown", so ignore it.
# We can't do this using shouldAllowIAccessibleFocusEvent because this isn't checked for foreground.
# #8137: also seen when opening quick link menu (Windows+X) on Windows 8 and later.
return
nextHandler()
def isGoodUIAWindow(self, hwnd):
# #9204: shell raises window open event for emoji panel in build 18305 and later.
if (
winVersion.getWinVer() >= winVersion.WIN10_1903
and winUser.getClassName(hwnd) == "ApplicationFrameWindow"
):
return True
return False
def event_UIA_window_windowOpen(self, obj, nextHandler):
# Send UIA window open event to input app window.
if isinstance(obj, UIA) and obj.UIAElement.cachedClassName == "ApplicationFrameWindow":
inputPanelWindow = obj.firstChild
inputPanelAppName = (
# 19H2 and earlier
"windowsinternal_composableshell_experiences_textinput_inputapp",
# 20H1 and later
"textinputhost"
)
if inputPanelWindow and inputPanelWindow.appModule.appName in inputPanelAppName:
eventHandler.executeEvent("UIA_window_windowOpen", inputPanelWindow)
return
nextHandler()
| true | true |
f7204379766eb4e6ae9bd5b9297cae2841d80760 | 8,607 | py | Python | gpxo/track.py | liquidpizza/gpxo | 4f8eb43a4d6b879f51a7e688dfa80b4aa5558889 | [
"BSD-3-Clause"
] | null | null | null | gpxo/track.py | liquidpizza/gpxo | 4f8eb43a4d6b879f51a7e688dfa80b4aa5558889 | [
"BSD-3-Clause"
] | null | null | null | gpxo/track.py | liquidpizza/gpxo | 4f8eb43a4d6b879f51a7e688dfa80b4aa5558889 | [
"BSD-3-Clause"
] | null | null | null | """General tools for gpx data processing based on gpxpy."""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import gpxpy
from vincenty import vincenty
import mplleaflet
from .general import smooth, closest_pt
# =============================== Misc. Config ===============================
# short names for plots
shortnames = {'t': 'time',
's': 'duration (s)',
'd': 'distance (km)',
'v': 'velocity (km/h)',
'z': 'elevation (m)',
'c': 'compass (°)'}
# ========================= Misc. private functions ==========================
# Function to transform array of timedeltas to seoncds
_total_seconds = np.vectorize(lambda dt: dt.total_seconds())
# ============================ Main class (Track) ============================
class Track:
def __init__(self, filename, track=0, segment=0):
with open(filename, 'r') as gpx_file:
gpx = gpxpy.parse(gpx_file)
pts = gpx.tracks[track].segments[segment].points
self.latitude = np.array([pt.latitude for pt in pts])
self.longitude = np.array([pt.longitude for pt in pts])
self.elevation = np.array([pt.elevation for pt in pts])
self.time = np.array([pt.time for pt in pts])
# If some elevation or time data is missing, just set attribute to None
if any(self.time == None):
self.time = None
if any(self.elevation == None):
self.elevation = None
@staticmethod
def _distance(position1, position2):
"""Distance between two positions (latitude, longitude)."""
return vincenty(position1, position2)
def _resample(self, quantity, reference):
"""Resample quantities (velocity, compass) to fall back on reference
Reference is typically time or distance."""
# midpoints correponding to shifted quantity
midpts = reference[:-1] + (np.diff(reference) / 2)
# linear interpolation to fall back to initial times
qty_resampled = np.interp(reference, midpts, quantity)
return qty_resampled
@property
def seconds(self):
if self.time is not None:
return _total_seconds(self.time - self.time[0])
@property
def distance(self):
"""Travelled distance in kilometers."""
ds = [0]
x1s = self.latitude[:-1]
x2s = self.latitude[1:]
y1s = self.longitude[:-1]
y2s = self.longitude[1:]
for x1, x2, y1, y2 in zip(x1s, x2s, y1s, y2s):
dd = self._distance((x1, y1), (x2, y2))
ds.append(dd)
return np.cumsum(ds)
@property
def compass(self):
"""Compass bearing in decimal degrees (°). See gpxo.compass"""
lat1, long1 = np.radians((self.latitude[:-1], self.longitude[:-1]))
lat2, long2 = np.radians((self.latitude[1:], self.longitude[1:]))
d_long = long2 - long1
x = np.sin(d_long) * np.cos(lat2)
y = np.cos(lat1) * np.sin(lat2) - (np.sin(lat1) * np.cos(lat2) * np.cos(d_long))
# Resample before taking arctan because if not, interpolation fails
# when the signal fluctuates between 0 and 360° when compass is N
x_res = self._resample(x, self.distance)
y_res = self._resample(y, self.distance)
initial_bearing = np.arctan2(x_res, y_res)
# Now we have the initial bearing but np.arctan2 return values
# from -180° to + 180° which is not what we want for a compass bearing
# The solution is to normalize the initial bearing as shown below
initial_bearing = np.degrees(initial_bearing)
compass_bearing = (initial_bearing + 360) % 360
return compass_bearing
@property
def velocity(self):
"""Instantaneous velocity in km/h."""
if self.time is not None:
dt = np.diff(self.seconds)
dd = np.diff(self.distance)
vs = 3600 * dd / dt
return self._resample(vs, self.seconds)
else:
return None
@property
def data(self):
"""pd.DataFrame with all track data (time, position, velocity etc.)"""
names = ['latitude (°)', 'longitude (°)', 'distance (km)', 'compass (°)']
columns = [self.latitude, self.longitude, self.distance, self.compass]
if self.time is not None:
names += ['time', ' duration (s)', 'velocity (km/h)']
columns += [self.time, self.seconds, self.velocity]
if self.elevation is not None:
names.append('elevation (m)')
columns.append(self.elevation)
data = pd.DataFrame(dict(zip(names, columns)))
if self.time is not None:
data['time'] = data['time'].dt.tz_localize(None)
data.set_index('time', inplace=True)
return data
def _shortname_to_column(self, name):
"""shorname to column name in self.data."""
try:
cname = shortnames[name]
except KeyError:
raise ValueError(f'Invalid short name: {name}. ')
if cname == 'time':
column = self.data.index
else:
try:
column = self.data[cname]
except KeyError:
raise KeyError(f'{cname} Data unavailable in current track. ')
return {'name': cname, 'column': column}
def plot(self, mode, *args, **kwargs):
"""Plot columns of self.data (use pandas DataFrame plot arguments).
Parameters
----------
- mode (str): 2 letters that define short names for x and y axis
- *args: any additional argument for matplotlib ax.plot()
- **kwargs: any additional keyword argument for matplotlib ax.plot()
Output
------
- matplotlib axes
Short names
-----------
't': 'time'
's': 'duration (s)'
'd': 'distance (km)'
'v': 'velocity (km/h)'
'z': 'elevation (m)'
'c': 'compass (°)'
"""
try:
xname, yname = mode
except ValueError:
raise ValueError('Invalid plot mode (should be two letters, e.g. '
f"'tv', not {mode}")
xinfo = self._shortname_to_column(xname)
xlabel = xinfo['name']
x = xinfo['column']
yinfo = self._shortname_to_column(yname)
ylabel = yinfo['name']
y = yinfo['column']
fig, ax = plt.subplots()
ax.plot(x, y, *args, **kwargs)
if xlabel == 'time':
fig.autofmt_xdate()
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
return ax
def smooth(self, n=5, window='hanning'):
"""Smooth position data (and subsequently distance, velocity etc.)
Parameters
----------
- n: size of moving window for smoothing
- window: type of window (e.g. 'hanning' or 'flat', see gpxo.smooth())
"""
self.latitude = smooth(self.latitude, n=n, window=window)
self.longitude = smooth(self.longitude, n=n, window=window)
self.elevation = smooth(self.elevation, n=n, window=window)
def closest_to(self, pt):
"""Find index of point in trajectory that is closest to pt=(lat, long)."""
return closest_pt(pt, (self.latitude, self.longitude))
def map(self, map_type='osm', embed=False, ax=None, size=(10, 10),
plot='plot', **kwargs):
"""Plot trajectory on map.
Parameters
----------
- map_type can be e.g. osm, esri_aerial, esri_worldtopo, etc. see:
https://github.com/jwass/mplleaflet/blob/master/mplleaflet/maptiles.py
- embed: if True, embed plot in Jupyter. If False (default), open in
browser.
- ax: if not None, use provided matplotlib axes.
- size: when embedded, size of the figure.
- plot: 'plot' or 'scatter'
- **kwargs: any plt.plot or plt.scatter keyword arguments
"""
if ax is None:
fig, ax = plt.subplots(figsize=size)
else:
fig = ax.figure
if plot == 'plot':
ax.plot(self.longitude, self.latitude, '.-r', **kwargs)
elif plot == 'scatter':
ax.scatter(self.longitude, self.latitude, **kwargs)
else:
raise ValueError(f'Unrecognized plot type: {plot}')
parameters = {'fig': fig, 'tiles': map_type}
if embed:
leaflet = mplleaflet.display(**parameters)
else:
leaflet = mplleaflet.show(**parameters)
return leaflet
| 31.412409 | 88 | 0.562449 |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import gpxpy
from vincenty import vincenty
import mplleaflet
from .general import smooth, closest_pt
shortnames = {'t': 'time',
's': 'duration (s)',
'd': 'distance (km)',
'v': 'velocity (km/h)',
'z': 'elevation (m)',
'c': 'compass (°)'}
_total_seconds = np.vectorize(lambda dt: dt.total_seconds())
class Track:
def __init__(self, filename, track=0, segment=0):
with open(filename, 'r') as gpx_file:
gpx = gpxpy.parse(gpx_file)
pts = gpx.tracks[track].segments[segment].points
self.latitude = np.array([pt.latitude for pt in pts])
self.longitude = np.array([pt.longitude for pt in pts])
self.elevation = np.array([pt.elevation for pt in pts])
self.time = np.array([pt.time for pt in pts])
if any(self.time == None):
self.time = None
if any(self.elevation == None):
self.elevation = None
@staticmethod
def _distance(position1, position2):
return vincenty(position1, position2)
def _resample(self, quantity, reference):
midpts = reference[:-1] + (np.diff(reference) / 2)
qty_resampled = np.interp(reference, midpts, quantity)
return qty_resampled
@property
def seconds(self):
if self.time is not None:
return _total_seconds(self.time - self.time[0])
@property
def distance(self):
ds = [0]
x1s = self.latitude[:-1]
x2s = self.latitude[1:]
y1s = self.longitude[:-1]
y2s = self.longitude[1:]
for x1, x2, y1, y2 in zip(x1s, x2s, y1s, y2s):
dd = self._distance((x1, y1), (x2, y2))
ds.append(dd)
return np.cumsum(ds)
@property
def compass(self):
lat1, long1 = np.radians((self.latitude[:-1], self.longitude[:-1]))
lat2, long2 = np.radians((self.latitude[1:], self.longitude[1:]))
d_long = long2 - long1
x = np.sin(d_long) * np.cos(lat2)
y = np.cos(lat1) * np.sin(lat2) - (np.sin(lat1) * np.cos(lat2) * np.cos(d_long))
x_res = self._resample(x, self.distance)
y_res = self._resample(y, self.distance)
initial_bearing = np.arctan2(x_res, y_res)
initial_bearing = np.degrees(initial_bearing)
compass_bearing = (initial_bearing + 360) % 360
return compass_bearing
@property
def velocity(self):
if self.time is not None:
dt = np.diff(self.seconds)
dd = np.diff(self.distance)
vs = 3600 * dd / dt
return self._resample(vs, self.seconds)
else:
return None
@property
def data(self):
names = ['latitude (°)', 'longitude (°)', 'distance (km)', 'compass (°)']
columns = [self.latitude, self.longitude, self.distance, self.compass]
if self.time is not None:
names += ['time', ' duration (s)', 'velocity (km/h)']
columns += [self.time, self.seconds, self.velocity]
if self.elevation is not None:
names.append('elevation (m)')
columns.append(self.elevation)
data = pd.DataFrame(dict(zip(names, columns)))
if self.time is not None:
data['time'] = data['time'].dt.tz_localize(None)
data.set_index('time', inplace=True)
return data
def _shortname_to_column(self, name):
try:
cname = shortnames[name]
except KeyError:
raise ValueError(f'Invalid short name: {name}. ')
if cname == 'time':
column = self.data.index
else:
try:
column = self.data[cname]
except KeyError:
raise KeyError(f'{cname} Data unavailable in current track. ')
return {'name': cname, 'column': column}
def plot(self, mode, *args, **kwargs):
try:
xname, yname = mode
except ValueError:
raise ValueError('Invalid plot mode (should be two letters, e.g. '
f"'tv', not {mode}")
xinfo = self._shortname_to_column(xname)
xlabel = xinfo['name']
x = xinfo['column']
yinfo = self._shortname_to_column(yname)
ylabel = yinfo['name']
y = yinfo['column']
fig, ax = plt.subplots()
ax.plot(x, y, *args, **kwargs)
if xlabel == 'time':
fig.autofmt_xdate()
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
return ax
def smooth(self, n=5, window='hanning'):
self.latitude = smooth(self.latitude, n=n, window=window)
self.longitude = smooth(self.longitude, n=n, window=window)
self.elevation = smooth(self.elevation, n=n, window=window)
def closest_to(self, pt):
return closest_pt(pt, (self.latitude, self.longitude))
def map(self, map_type='osm', embed=False, ax=None, size=(10, 10),
plot='plot', **kwargs):
if ax is None:
fig, ax = plt.subplots(figsize=size)
else:
fig = ax.figure
if plot == 'plot':
ax.plot(self.longitude, self.latitude, '.-r', **kwargs)
elif plot == 'scatter':
ax.scatter(self.longitude, self.latitude, **kwargs)
else:
raise ValueError(f'Unrecognized plot type: {plot}')
parameters = {'fig': fig, 'tiles': map_type}
if embed:
leaflet = mplleaflet.display(**parameters)
else:
leaflet = mplleaflet.show(**parameters)
return leaflet
| true | true |
f72043e942a5c4831999c099986dd8ca73cf871a | 2,428 | py | Python | share/qt/extract_strings_qt.py | VaderCoinProject/vadercoin | b513c794b014d40e5aad281dd1f54845c46d216c | [
"MIT"
] | null | null | null | share/qt/extract_strings_qt.py | VaderCoinProject/vadercoin | b513c794b014d40e5aad281dd1f54845c46d216c | [
"MIT"
] | null | null | null | share/qt/extract_strings_qt.py | VaderCoinProject/vadercoin | b513c794b014d40e5aad281dd1f54845c46d216c | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2012-2019 The Vadercoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Extract _("...") strings for translation and convert to Qt stringdefs so that
they can be picked up by Qt linguist.
'''
from subprocess import Popen, PIPE
import operator
import os
import sys
OUT_CPP="qt/vadercoinstrings.cpp"
EMPTY=['""']
def parse_po(text):
"""
Parse 'po' format produced by xgettext.
Return a list of (msgid,msgstr) tuples.
"""
messages = []
msgid = []
msgstr = []
in_msgid = False
in_msgstr = False
for line in text.split('\n'):
line = line.rstrip('\r')
if line.startswith('msgid '):
if in_msgstr:
messages.append((msgid, msgstr))
in_msgstr = False
# message start
in_msgid = True
msgid = [line[6:]]
elif line.startswith('msgstr '):
in_msgid = False
in_msgstr = True
msgstr = [line[7:]]
elif line.startswith('"'):
if in_msgid:
msgid.append(line)
if in_msgstr:
msgstr.append(line)
if in_msgstr:
messages.append((msgid, msgstr))
return messages
files = sys.argv[1:]
# xgettext -n --keyword=_ $FILES
XGETTEXT=os.getenv('XGETTEXT', 'xgettext')
if not XGETTEXT:
print('Cannot extract strings: xgettext utility is not installed or not configured.',file=sys.stderr)
print('Please install package "gettext" and re-run \'./configure\'.',file=sys.stderr)
sys.exit(1)
child = Popen([XGETTEXT,'--output=-','--from-code=utf-8','-n','--keyword=_'] + files, stdout=PIPE)
(out, err) = child.communicate()
messages = parse_po(out.decode('utf-8'))
f = open(OUT_CPP, 'w', encoding="utf8")
f.write("""
#include <QtGlobal>
// Automatically generated by extract_strings_qt.py
#ifdef __GNUC__
#define UNUSED __attribute__((unused))
#else
#define UNUSED
#endif
""")
f.write('static const char UNUSED *vadercoin_strings[] = {\n')
f.write('QT_TRANSLATE_NOOP("vadercoin-core", "%s"),\n' % (os.getenv('COPYRIGHT_HOLDERS'),))
messages.sort(key=operator.itemgetter(0))
for (msgid, msgstr) in messages:
if msgid != EMPTY:
f.write('QT_TRANSLATE_NOOP("vadercoin-core", %s),\n' % ('\n'.join(msgid)))
f.write('};\n')
f.close()
| 28.232558 | 105 | 0.629736 |
from subprocess import Popen, PIPE
import operator
import os
import sys
OUT_CPP="qt/vadercoinstrings.cpp"
EMPTY=['""']
def parse_po(text):
messages = []
msgid = []
msgstr = []
in_msgid = False
in_msgstr = False
for line in text.split('\n'):
line = line.rstrip('\r')
if line.startswith('msgid '):
if in_msgstr:
messages.append((msgid, msgstr))
in_msgstr = False
in_msgid = True
msgid = [line[6:]]
elif line.startswith('msgstr '):
in_msgid = False
in_msgstr = True
msgstr = [line[7:]]
elif line.startswith('"'):
if in_msgid:
msgid.append(line)
if in_msgstr:
msgstr.append(line)
if in_msgstr:
messages.append((msgid, msgstr))
return messages
files = sys.argv[1:]
# xgettext -n --keyword=_ $FILES
XGETTEXT=os.getenv('XGETTEXT', 'xgettext')
if not XGETTEXT:
print('Cannot extract strings: xgettext utility is not installed or not configured.',file=sys.stderr)
print('Please install package "gettext" and re-run \'./configure\'.',file=sys.stderr)
sys.exit(1)
child = Popen([XGETTEXT,'--output=-','--from-code=utf-8','-n','--keyword=_'] + files, stdout=PIPE)
(out, err) = child.communicate()
messages = parse_po(out.decode('utf-8'))
f = open(OUT_CPP, 'w', encoding="utf8")
f.write("""
#include <QtGlobal>
// Automatically generated by extract_strings_qt.py
#ifdef __GNUC__
#define UNUSED __attribute__((unused))
#else
#define UNUSED
#endif
""")
f.write('static const char UNUSED *vadercoin_strings[] = {\n')
f.write('QT_TRANSLATE_NOOP("vadercoin-core", "%s"),\n' % (os.getenv('COPYRIGHT_HOLDERS'),))
messages.sort(key=operator.itemgetter(0))
for (msgid, msgstr) in messages:
if msgid != EMPTY:
f.write('QT_TRANSLATE_NOOP("vadercoin-core", %s),\n' % ('\n'.join(msgid)))
f.write('};\n')
f.close()
| true | true |
f7204458a6279f21973b1959938c499ec950b89f | 4,309 | py | Python | losses.py | JayanthRR/ConCURL_NCE | 5471b022a571ae61bd891783084512c3a227829b | [
"MIT"
] | 3 | 2022-01-28T06:49:26.000Z | 2022-03-06T09:25:00.000Z | losses.py | JayanthRR/ConCURL_NCE | 5471b022a571ae61bd891783084512c3a227829b | [
"MIT"
] | null | null | null | losses.py | JayanthRR/ConCURL_NCE | 5471b022a571ae61bd891783084512c3a227829b | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
import time
import sys
softmax = nn.Softmax(dim=1).cuda()
def distributed_sinkhorn(Q, nmb_iters):
with torch.no_grad():
sum_Q = torch.sum(Q)
# dist.all_reduce(sum_Q)
Q /= sum_Q
u = torch.zeros(Q.shape[0]).cuda(non_blocking=True)
r = torch.ones(Q.shape[0]).cuda(non_blocking=True) / Q.shape[0]
c = torch.ones(Q.shape[1]).cuda(non_blocking=True) / ( Q.shape[1])
curr_sum = torch.sum(Q, dim=1)
# dist.all_reduce(curr_sum)
for it in range(nmb_iters):
u = curr_sum
Q *= (r / u).unsqueeze(1)
Q *= (c / torch.sum(Q, dim=0)).unsqueeze(0)
curr_sum = torch.sum(Q, dim=1)
# dist.all_reduce(curr_sum)
return (Q / torch.sum(Q, dim=0, keepdim=True)).t().float()
def getQ(out_queue, epsilon=0.05):
return distributed_sinkhorn(torch.exp(out_queue / epsilon).t(), 3)
def byol_loss_fn(x, y):
#x = F.normalize(x, dim=-1, p=2)
#y = F.normalize(y, dim=-1, p=2)
return 2 - 2 * (x * y).sum(dim=-1)
def ByolLoss(features_one, features_two):
online_pred_one = nn.functional.normalize(features_one['online_pred'], dim=1, p=2)
online_pred_two = nn.functional.normalize(features_two['online_pred'], dim=1, p=2)
target_proj_one = nn.functional.normalize(features_one['target_proj'], dim=1, p=2)
target_proj_two = nn.functional.normalize(features_two['target_proj'], dim=1, p=2)
byol_loss = byol_loss_fn(online_pred_one, target_proj_two).mean() + byol_loss_fn(online_pred_two, target_proj_one).mean()
sys.stdout.flush()
return byol_loss
def softSubLosses(outOne, outTwo,qOne, qTwo, param=0.1):
pOne = softmax(outOne/param)
pTwo = softmax(outTwo/param)
subloss_1 = - torch.mean(torch.sum(qTwo * torch.log(pOne), dim=1))
subloss_2 = - torch.mean(torch.sum(qOne * torch.log(pTwo), dim=1))
return subloss_1, subloss_2
def SoftLoss(outcodes_one, outcodes_two, alpha=1, temperature=0.1, overclustering=False):
if alpha > 0:
if overclustering:
out_one, out_two = outcodes_one['cTz_overcluster'], outcodes_two['cTz_overcluster']
else:
out_one, out_two = outcodes_one['cTz'], outcodes_two['cTz']
#ATTENTION: I have deleted clone operations. Please think about it. My decision can be wrong!!!!
with torch.no_grad():
q_one = getQ(out_one)
q_two = getQ(out_two)
subloss_1, subloss_2 = softSubLosses(out_one, out_two, q_one, q_two, temperature)
sys.stdout.flush()
return (subloss_1 + subloss_2)/2.0, q_one, q_two
else:
return torch.tensor(0), None, None
def ConsensusLossForAGivenProjection(out_rand_one, out_rand_two, q_one, q_two, param=0.1):
p_rand_one = softmax(out_rand_one/ param)
p_rand_two = softmax(out_rand_two/ param)
rand_loss_1 = -torch.mean(torch.sum(q_two * torch.log(p_rand_one), dim=1))
rand_loss_2 = -torch.mean(torch.sum(q_one * torch.log(p_rand_two), dim=1))
return (-torch.mean(torch.sum(q_two * torch.log(p_rand_one), dim=1)) - torch.mean(torch.sum(q_one * torch.log(p_rand_two), dim=1)))/2
def ConsensusLoss(gamma, outcodes_one, outcodes_two, rand_outs_one, rand_outs_two, q_one, q_two, overclustering=False, temperature=0.1):
loss = torch.tensor(0).cuda()
if q_one is None or q_two is None:
# check this when gamma>0 but alpha=0
if overclustering:
out_one, out_two = outcodes_one['cTz_overcluster'], outcodes_two['cTz_overcluster']
else:
out_one, out_two = outcodes_one['cTz'], outcodes_two['cTz']
q_one = getQ(out_one)
q_two = getQ(out_two)
if gamma > 0:
for randind in range(len(rand_outs_one)):
if overclustering:
temp = ConsensusLossForAGivenProjection(rand_outs_one[randind]['cTz_overcluster'], rand_outs_two[randind]['cTz_overcluster'], q_one, q_two, temperature)
loss = loss + temp
else:
temp= ConsensusLossForAGivenProjection(rand_outs_one[randind]['cTz'], rand_outs_two[randind]['cTz'], q_one, q_two, temperature)
loss = loss + temp
sys.stdout.flush()
return loss/len(rand_outs_one)
| 37.469565 | 168 | 0.647018 | import torch
import torch.nn as nn
import time
import sys
softmax = nn.Softmax(dim=1).cuda()
def distributed_sinkhorn(Q, nmb_iters):
with torch.no_grad():
sum_Q = torch.sum(Q)
Q /= sum_Q
u = torch.zeros(Q.shape[0]).cuda(non_blocking=True)
r = torch.ones(Q.shape[0]).cuda(non_blocking=True) / Q.shape[0]
c = torch.ones(Q.shape[1]).cuda(non_blocking=True) / ( Q.shape[1])
curr_sum = torch.sum(Q, dim=1)
for it in range(nmb_iters):
u = curr_sum
Q *= (r / u).unsqueeze(1)
Q *= (c / torch.sum(Q, dim=0)).unsqueeze(0)
curr_sum = torch.sum(Q, dim=1)
return (Q / torch.sum(Q, dim=0, keepdim=True)).t().float()
def getQ(out_queue, epsilon=0.05):
return distributed_sinkhorn(torch.exp(out_queue / epsilon).t(), 3)
def byol_loss_fn(x, y):
return 2 - 2 * (x * y).sum(dim=-1)
def ByolLoss(features_one, features_two):
online_pred_one = nn.functional.normalize(features_one['online_pred'], dim=1, p=2)
online_pred_two = nn.functional.normalize(features_two['online_pred'], dim=1, p=2)
target_proj_one = nn.functional.normalize(features_one['target_proj'], dim=1, p=2)
target_proj_two = nn.functional.normalize(features_two['target_proj'], dim=1, p=2)
byol_loss = byol_loss_fn(online_pred_one, target_proj_two).mean() + byol_loss_fn(online_pred_two, target_proj_one).mean()
sys.stdout.flush()
return byol_loss
def softSubLosses(outOne, outTwo,qOne, qTwo, param=0.1):
pOne = softmax(outOne/param)
pTwo = softmax(outTwo/param)
subloss_1 = - torch.mean(torch.sum(qTwo * torch.log(pOne), dim=1))
subloss_2 = - torch.mean(torch.sum(qOne * torch.log(pTwo), dim=1))
return subloss_1, subloss_2
def SoftLoss(outcodes_one, outcodes_two, alpha=1, temperature=0.1, overclustering=False):
if alpha > 0:
if overclustering:
out_one, out_two = outcodes_one['cTz_overcluster'], outcodes_two['cTz_overcluster']
else:
out_one, out_two = outcodes_one['cTz'], outcodes_two['cTz']
with torch.no_grad():
q_one = getQ(out_one)
q_two = getQ(out_two)
subloss_1, subloss_2 = softSubLosses(out_one, out_two, q_one, q_two, temperature)
sys.stdout.flush()
return (subloss_1 + subloss_2)/2.0, q_one, q_two
else:
return torch.tensor(0), None, None
def ConsensusLossForAGivenProjection(out_rand_one, out_rand_two, q_one, q_two, param=0.1):
p_rand_one = softmax(out_rand_one/ param)
p_rand_two = softmax(out_rand_two/ param)
rand_loss_1 = -torch.mean(torch.sum(q_two * torch.log(p_rand_one), dim=1))
rand_loss_2 = -torch.mean(torch.sum(q_one * torch.log(p_rand_two), dim=1))
return (-torch.mean(torch.sum(q_two * torch.log(p_rand_one), dim=1)) - torch.mean(torch.sum(q_one * torch.log(p_rand_two), dim=1)))/2
def ConsensusLoss(gamma, outcodes_one, outcodes_two, rand_outs_one, rand_outs_two, q_one, q_two, overclustering=False, temperature=0.1):
loss = torch.tensor(0).cuda()
if q_one is None or q_two is None:
if overclustering:
out_one, out_two = outcodes_one['cTz_overcluster'], outcodes_two['cTz_overcluster']
else:
out_one, out_two = outcodes_one['cTz'], outcodes_two['cTz']
q_one = getQ(out_one)
q_two = getQ(out_two)
if gamma > 0:
for randind in range(len(rand_outs_one)):
if overclustering:
temp = ConsensusLossForAGivenProjection(rand_outs_one[randind]['cTz_overcluster'], rand_outs_two[randind]['cTz_overcluster'], q_one, q_two, temperature)
loss = loss + temp
else:
temp= ConsensusLossForAGivenProjection(rand_outs_one[randind]['cTz'], rand_outs_two[randind]['cTz'], q_one, q_two, temperature)
loss = loss + temp
sys.stdout.flush()
return loss/len(rand_outs_one)
| true | true |
f720449d5522724a0dea3ddf7fbe0086f2f89ea6 | 2,880 | py | Python | apps/odoo/lib/odoo-10.0.post20170615-py2.7.egg/odoo/addons/account/wizard/pos_box.py | gtfarng/Odoo_migrade | 9cc28fae4c379e407645248a29d22139925eafe7 | [
"Apache-2.0"
] | 1 | 2019-12-19T01:53:13.000Z | 2019-12-19T01:53:13.000Z | apps/odoo/lib/odoo-10.0.post20170615-py2.7.egg/odoo/addons/account/wizard/pos_box.py | gtfarng/Odoo_migrade | 9cc28fae4c379e407645248a29d22139925eafe7 | [
"Apache-2.0"
] | null | null | null | apps/odoo/lib/odoo-10.0.post20170615-py2.7.egg/odoo/addons/account/wizard/pos_box.py | gtfarng/Odoo_migrade | 9cc28fae4c379e407645248a29d22139925eafe7 | [
"Apache-2.0"
] | null | null | null | from odoo import models, fields, api, _
from odoo.exceptions import UserError
class CashBox(models.TransientModel):
_register = False
name = fields.Char(string='Reason', required=True)
# Attention, we don't set a domain, because there is a journal_type key
# in the context of the action
amount = fields.Float(string='Amount', digits=0, required=True)
@api.multi
def run(self):
context = dict(self._context or {})
active_model = context.get('active_model', False)
active_ids = context.get('active_ids', [])
records = self.env[active_model].browse(active_ids)
return self._run(records)
@api.multi
def _run(self, records):
for box in self:
for record in records:
if not record.journal_id:
raise UserError(_("Please check that the field 'Journal' is set on the Bank Statement"))
if not record.journal_id.company_id.transfer_account_id:
raise UserError(_("Please check that the field 'Transfer Account' is set on the company."))
box._create_bank_statement_line(record)
return {}
@api.one
def _create_bank_statement_line(self, record):
if record.state == 'confirm':
raise UserError(_("You cannot put/take money in/out for a bank statement which is closed."))
values = self._calculate_values_for_statement_line(record)
return record.write({'line_ids': [(0, False, values)]})
class CashBoxIn(CashBox):
_name = 'cash.box.in'
ref = fields.Char('Reference')
@api.multi
def _calculate_values_for_statement_line(self, record):
if not record.journal_id.company_id.transfer_account_id:
raise UserError(_("You should have defined an 'Internal Transfer Account' in your cash register's journal!"))
return {
'date': record.date,
'statement_id': record.id,
'journal_id': record.journal_id.id,
'amount': self.amount or 0.0,
'account_id': record.journal_id.company_id.transfer_account_id.id,
'ref': '%s' % (self.ref or ''),
'name': self.name,
}
class CashBoxOut(CashBox):
_name = 'cash.box.out'
@api.multi
def _calculate_values_for_statement_line(self, record):
if not record.journal_id.company_id.transfer_account_id:
raise UserError(_("You should have defined an 'Internal Transfer Account' in your cash register's journal!"))
amount = self.amount or 0.0
return {
'date': record.date,
'statement_id': record.id,
'journal_id': record.journal_id.id,
'amount': -amount if amount > 0.0 else amount,
'account_id': record.journal_id.company_id.transfer_account_id.id,
'name': self.name,
}
| 37.402597 | 121 | 0.631944 | from odoo import models, fields, api, _
from odoo.exceptions import UserError
class CashBox(models.TransientModel):
_register = False
name = fields.Char(string='Reason', required=True)
# in the context of the action
amount = fields.Float(string='Amount', digits=0, required=True)
@api.multi
def run(self):
context = dict(self._context or {})
active_model = context.get('active_model', False)
active_ids = context.get('active_ids', [])
records = self.env[active_model].browse(active_ids)
return self._run(records)
@api.multi
def _run(self, records):
for box in self:
for record in records:
if not record.journal_id:
raise UserError(_("Please check that the field 'Journal' is set on the Bank Statement"))
if not record.journal_id.company_id.transfer_account_id:
raise UserError(_("Please check that the field 'Transfer Account' is set on the company."))
box._create_bank_statement_line(record)
return {}
@api.one
def _create_bank_statement_line(self, record):
if record.state == 'confirm':
raise UserError(_("You cannot put/take money in/out for a bank statement which is closed."))
values = self._calculate_values_for_statement_line(record)
return record.write({'line_ids': [(0, False, values)]})
class CashBoxIn(CashBox):
_name = 'cash.box.in'
ref = fields.Char('Reference')
@api.multi
def _calculate_values_for_statement_line(self, record):
if not record.journal_id.company_id.transfer_account_id:
raise UserError(_("You should have defined an 'Internal Transfer Account' in your cash register's journal!"))
return {
'date': record.date,
'statement_id': record.id,
'journal_id': record.journal_id.id,
'amount': self.amount or 0.0,
'account_id': record.journal_id.company_id.transfer_account_id.id,
'ref': '%s' % (self.ref or ''),
'name': self.name,
}
class CashBoxOut(CashBox):
_name = 'cash.box.out'
@api.multi
def _calculate_values_for_statement_line(self, record):
if not record.journal_id.company_id.transfer_account_id:
raise UserError(_("You should have defined an 'Internal Transfer Account' in your cash register's journal!"))
amount = self.amount or 0.0
return {
'date': record.date,
'statement_id': record.id,
'journal_id': record.journal_id.id,
'amount': -amount if amount > 0.0 else amount,
'account_id': record.journal_id.company_id.transfer_account_id.id,
'name': self.name,
}
| true | true |
f720451ca2d68c90374718f176888a952e6989a6 | 311 | py | Python | June21/ClassesandObjects/inheritance_101.py | pythonbykhaja/intesivepython | d3074f35bf36a04d4d1d9b4ff4631733d40b5817 | [
"Apache-2.0"
] | 2 | 2021-05-29T18:21:50.000Z | 2021-07-24T13:03:30.000Z | June21/ClassesandObjects/inheritance_101.py | pythonbykhaja/intesivepython | d3074f35bf36a04d4d1d9b4ff4631733d40b5817 | [
"Apache-2.0"
] | null | null | null | June21/ClassesandObjects/inheritance_101.py | pythonbykhaja/intesivepython | d3074f35bf36a04d4d1d9b4ff4631733d40b5817 | [
"Apache-2.0"
] | 2 | 2021-05-25T10:19:54.000Z | 2021-09-21T12:20:48.000Z | class Mobile:
def dial(self, number):
print(f"dialing number {number}")
def ring(self):
print("ringing using built in tones.....")
class SmartMobile(Mobile):
def ring(self):
"""
overriding a Method
"""
print("ringing using custom ring tones .... ") | 22.214286 | 54 | 0.562701 | class Mobile:
def dial(self, number):
print(f"dialing number {number}")
def ring(self):
print("ringing using built in tones.....")
class SmartMobile(Mobile):
def ring(self):
print("ringing using custom ring tones .... ") | true | true |
f7204539107b908bdb3d32b7e595df242c5d27e6 | 3,162 | py | Python | eventsourcing/infrastructure/timebucketedlog_reader.py | scbabacus/eventsourcing | 8404c5b26719ed9d9d1d257ebba774879c7243c4 | [
"BSD-3-Clause"
] | 1 | 2020-02-10T08:12:31.000Z | 2020-02-10T08:12:31.000Z | eventsourcing/infrastructure/timebucketedlog_reader.py | scbabacus/eventsourcing | 8404c5b26719ed9d9d1d257ebba774879c7243c4 | [
"BSD-3-Clause"
] | null | null | null | eventsourcing/infrastructure/timebucketedlog_reader.py | scbabacus/eventsourcing | 8404c5b26719ed9d9d1d257ebba774879c7243c4 | [
"BSD-3-Clause"
] | null | null | null | from time import time
import six
from six import with_metaclass
from eventsourcing.domain.model.events import QualnameABCMeta
from eventsourcing.domain.model.timebucketedlog import MessageLogged, Timebucketedlog, make_timebucket_id, \
next_bucket_starts, previous_bucket_starts
from eventsourcing.infrastructure.eventstore import AbstractEventStore
from eventsourcing.utils.times import decimaltimestamp
def get_timebucketedlog_reader(log, event_store):
"""
:rtype: TimebucketedlogReader
"""
return TimebucketedlogReader(log=log, event_store=event_store)
class TimebucketedlogReader(with_metaclass(QualnameABCMeta)):
def __init__(self, log, event_store, page_size=50):
assert isinstance(log, Timebucketedlog)
self.log = log
assert isinstance(event_store, AbstractEventStore), event_store
self.event_store = event_store
assert isinstance(page_size, six.integer_types)
self.page_size = page_size
self.position = None
def get_messages(self, gt=None, gte=None, lt=None, lte=None, limit=None, is_ascending=False, page_size=None):
events = self.get_events(gt=gt, gte=gte, lt=lt, lte=lte, limit=limit, is_ascending=is_ascending,
page_size=page_size)
for event in events:
if isinstance(event, MessageLogged):
self.position = event.timestamp
yield event.message
def get_events(self, gt=None, gte=None, lt=None, lte=None, limit=None, is_ascending=False, page_size=None):
assert limit is None or limit > 0
# Identify the first time bucket.
now = decimaltimestamp()
started_on = self.log.started_on
absolute_latest = min(now, lt or now, lte or now)
absolute_earlyist = max(started_on, gt or 0, gte or 0)
if is_ascending:
position = absolute_earlyist
else:
position = absolute_latest
# Start counting events.
count_events = 0
while True:
bucket_id = make_timebucket_id(self.log.name, position, self.log.bucket_size)
for message_logged_event in self.event_store.get_domain_events(
originator_id=bucket_id,
gt=gt,
gte=gte,
lt=lt,
lte=lte,
limit=limit,
is_ascending=is_ascending,
page_size=page_size,
):
yield message_logged_event
if limit is not None:
count_events += 1
if count_events >= limit:
return
# See if there's another bucket.
if is_ascending:
next_timestamp = next_bucket_starts(position, self.log.bucket_size)
if next_timestamp > absolute_latest:
return
else:
position = next_timestamp
else:
if position < absolute_earlyist:
return
else:
position = previous_bucket_starts(position, self.log.bucket_size)
| 37.2 | 113 | 0.619861 | from time import time
import six
from six import with_metaclass
from eventsourcing.domain.model.events import QualnameABCMeta
from eventsourcing.domain.model.timebucketedlog import MessageLogged, Timebucketedlog, make_timebucket_id, \
next_bucket_starts, previous_bucket_starts
from eventsourcing.infrastructure.eventstore import AbstractEventStore
from eventsourcing.utils.times import decimaltimestamp
def get_timebucketedlog_reader(log, event_store):
return TimebucketedlogReader(log=log, event_store=event_store)
class TimebucketedlogReader(with_metaclass(QualnameABCMeta)):
def __init__(self, log, event_store, page_size=50):
assert isinstance(log, Timebucketedlog)
self.log = log
assert isinstance(event_store, AbstractEventStore), event_store
self.event_store = event_store
assert isinstance(page_size, six.integer_types)
self.page_size = page_size
self.position = None
def get_messages(self, gt=None, gte=None, lt=None, lte=None, limit=None, is_ascending=False, page_size=None):
events = self.get_events(gt=gt, gte=gte, lt=lt, lte=lte, limit=limit, is_ascending=is_ascending,
page_size=page_size)
for event in events:
if isinstance(event, MessageLogged):
self.position = event.timestamp
yield event.message
def get_events(self, gt=None, gte=None, lt=None, lte=None, limit=None, is_ascending=False, page_size=None):
assert limit is None or limit > 0
now = decimaltimestamp()
started_on = self.log.started_on
absolute_latest = min(now, lt or now, lte or now)
absolute_earlyist = max(started_on, gt or 0, gte or 0)
if is_ascending:
position = absolute_earlyist
else:
position = absolute_latest
count_events = 0
while True:
bucket_id = make_timebucket_id(self.log.name, position, self.log.bucket_size)
for message_logged_event in self.event_store.get_domain_events(
originator_id=bucket_id,
gt=gt,
gte=gte,
lt=lt,
lte=lte,
limit=limit,
is_ascending=is_ascending,
page_size=page_size,
):
yield message_logged_event
if limit is not None:
count_events += 1
if count_events >= limit:
return
if is_ascending:
next_timestamp = next_bucket_starts(position, self.log.bucket_size)
if next_timestamp > absolute_latest:
return
else:
position = next_timestamp
else:
if position < absolute_earlyist:
return
else:
position = previous_bucket_starts(position, self.log.bucket_size)
| true | true |
f720456245ef9dd1a92e44572316f1220df27b85 | 18,816 | py | Python | testing/python/approx.py | rosemichaele/pytest | 1c0ab3c2a32f7932378a1c37106d082784cb4700 | [
"MIT"
] | 1 | 2021-08-16T07:45:51.000Z | 2021-08-16T07:45:51.000Z | testing/python/approx.py | rosemichaele/pytest | 1c0ab3c2a32f7932378a1c37106d082784cb4700 | [
"MIT"
] | null | null | null | testing/python/approx.py | rosemichaele/pytest | 1c0ab3c2a32f7932378a1c37106d082784cb4700 | [
"MIT"
] | null | null | null | import operator
from decimal import Decimal
from fractions import Fraction
from operator import eq
from operator import ne
import pytest
from pytest import approx
inf, nan = float("inf"), float("nan")
@pytest.fixture
def mocked_doctest_runner(monkeypatch):
import doctest
class MockedPdb:
def __init__(self, out):
pass
def set_trace(self):
raise NotImplementedError("not used")
def reset(self):
pass
def set_continue(self):
pass
monkeypatch.setattr("doctest._OutputRedirectingPdb", MockedPdb)
class MyDocTestRunner(doctest.DocTestRunner):
def report_failure(self, out, test, example, got):
raise AssertionError(
"'{}' evaluates to '{}', not '{}'".format(
example.source.strip(), got.strip(), example.want.strip()
)
)
return MyDocTestRunner()
class TestApprox:
def test_repr_string(self):
assert repr(approx(1.0)) == "1.0 ± 1.0e-06"
assert repr(approx([1.0, 2.0])) == "approx([1.0 ± 1.0e-06, 2.0 ± 2.0e-06])"
assert repr(approx((1.0, 2.0))) == "approx((1.0 ± 1.0e-06, 2.0 ± 2.0e-06))"
assert repr(approx(inf)) == "inf"
assert repr(approx(1.0, rel=nan)) == "1.0 ± ???"
assert repr(approx(1.0, rel=inf)) == "1.0 ± inf"
# Dictionaries aren't ordered, so we need to check both orders.
assert repr(approx({"a": 1.0, "b": 2.0})) in (
"approx({'a': 1.0 ± 1.0e-06, 'b': 2.0 ± 2.0e-06})",
"approx({'b': 2.0 ± 2.0e-06, 'a': 1.0 ± 1.0e-06})",
)
def test_repr_complex_numbers(self):
assert repr(approx(inf + 1j)) == "(inf+1j)"
assert repr(approx(1.0j, rel=inf)) == "1j ± inf"
# can't compute a sensible tolerance
assert repr(approx(nan + 1j)) == "(nan+1j) ± ???"
assert repr(approx(1.0j)) == "1j ± 1.0e-06 ∠ ±180°"
# relative tolerance is scaled to |3+4j| = 5
assert repr(approx(3 + 4 * 1j)) == "(3+4j) ± 5.0e-06 ∠ ±180°"
# absolute tolerance is not scaled
assert repr(approx(3.3 + 4.4 * 1j, abs=0.02)) == "(3.3+4.4j) ± 2.0e-02 ∠ ±180°"
@pytest.mark.parametrize(
"value, expected_repr_string",
[
(5.0, "approx(5.0 ± 5.0e-06)"),
([5.0], "approx([5.0 ± 5.0e-06])"),
([[5.0]], "approx([[5.0 ± 5.0e-06]])"),
([[5.0, 6.0]], "approx([[5.0 ± 5.0e-06, 6.0 ± 6.0e-06]])"),
([[5.0], [6.0]], "approx([[5.0 ± 5.0e-06], [6.0 ± 6.0e-06]])"),
],
)
def test_repr_nd_array(self, value, expected_repr_string):
"""Make sure that arrays of all different dimensions are repr'd correctly."""
np = pytest.importorskip("numpy")
np_array = np.array(value)
assert repr(approx(np_array)) == expected_repr_string
def test_operator_overloading(self):
assert 1 == approx(1, rel=1e-6, abs=1e-12)
assert not (1 != approx(1, rel=1e-6, abs=1e-12))
assert 10 != approx(1, rel=1e-6, abs=1e-12)
assert not (10 == approx(1, rel=1e-6, abs=1e-12))
def test_exactly_equal(self):
examples = [
(2.0, 2.0),
(0.1e200, 0.1e200),
(1.123e-300, 1.123e-300),
(12345, 12345.0),
(0.0, -0.0),
(345678, 345678),
(Decimal("1.0001"), Decimal("1.0001")),
(Fraction(1, 3), Fraction(-1, -3)),
]
for a, x in examples:
assert a == approx(x)
def test_opposite_sign(self):
examples = [(eq, 1e-100, -1e-100), (ne, 1e100, -1e100)]
for op, a, x in examples:
assert op(a, approx(x))
def test_zero_tolerance(self):
within_1e10 = [(1.1e-100, 1e-100), (-1.1e-100, -1e-100)]
for a, x in within_1e10:
assert x == approx(x, rel=0.0, abs=0.0)
assert a != approx(x, rel=0.0, abs=0.0)
assert a == approx(x, rel=0.0, abs=5e-101)
assert a != approx(x, rel=0.0, abs=5e-102)
assert a == approx(x, rel=5e-1, abs=0.0)
assert a != approx(x, rel=5e-2, abs=0.0)
def test_negative_tolerance(self):
# Negative tolerances are not allowed.
illegal_kwargs = [
dict(rel=-1e100),
dict(abs=-1e100),
dict(rel=1e100, abs=-1e100),
dict(rel=-1e100, abs=1e100),
dict(rel=-1e100, abs=-1e100),
]
for kwargs in illegal_kwargs:
with pytest.raises(ValueError):
1.1 == approx(1, **kwargs)
def test_inf_tolerance(self):
# Everything should be equal if the tolerance is infinite.
large_diffs = [(1, 1000), (1e-50, 1e50), (-1.0, -1e300), (0.0, 10)]
for a, x in large_diffs:
assert a != approx(x, rel=0.0, abs=0.0)
assert a == approx(x, rel=inf, abs=0.0)
assert a == approx(x, rel=0.0, abs=inf)
assert a == approx(x, rel=inf, abs=inf)
def test_inf_tolerance_expecting_zero(self):
# If the relative tolerance is zero but the expected value is infinite,
# the actual tolerance is a NaN, which should be an error.
illegal_kwargs = [dict(rel=inf, abs=0.0), dict(rel=inf, abs=inf)]
for kwargs in illegal_kwargs:
with pytest.raises(ValueError):
1 == approx(0, **kwargs)
def test_nan_tolerance(self):
illegal_kwargs = [dict(rel=nan), dict(abs=nan), dict(rel=nan, abs=nan)]
for kwargs in illegal_kwargs:
with pytest.raises(ValueError):
1.1 == approx(1, **kwargs)
def test_reasonable_defaults(self):
# Whatever the defaults are, they should work for numbers close to 1
# than have a small amount of floating-point error.
assert 0.1 + 0.2 == approx(0.3)
def test_default_tolerances(self):
# This tests the defaults as they are currently set. If you change the
# defaults, this test will fail but you should feel free to change it.
# None of the other tests (except the doctests) should be affected by
# the choice of defaults.
examples = [
# Relative tolerance used.
(eq, 1e100 + 1e94, 1e100),
(ne, 1e100 + 2e94, 1e100),
(eq, 1e0 + 1e-6, 1e0),
(ne, 1e0 + 2e-6, 1e0),
# Absolute tolerance used.
(eq, 1e-100, +1e-106),
(eq, 1e-100, +2e-106),
(eq, 1e-100, 0),
]
for op, a, x in examples:
assert op(a, approx(x))
def test_custom_tolerances(self):
assert 1e8 + 1e0 == approx(1e8, rel=5e-8, abs=5e0)
assert 1e8 + 1e0 == approx(1e8, rel=5e-9, abs=5e0)
assert 1e8 + 1e0 == approx(1e8, rel=5e-8, abs=5e-1)
assert 1e8 + 1e0 != approx(1e8, rel=5e-9, abs=5e-1)
assert 1e0 + 1e-8 == approx(1e0, rel=5e-8, abs=5e-8)
assert 1e0 + 1e-8 == approx(1e0, rel=5e-9, abs=5e-8)
assert 1e0 + 1e-8 == approx(1e0, rel=5e-8, abs=5e-9)
assert 1e0 + 1e-8 != approx(1e0, rel=5e-9, abs=5e-9)
assert 1e-8 + 1e-16 == approx(1e-8, rel=5e-8, abs=5e-16)
assert 1e-8 + 1e-16 == approx(1e-8, rel=5e-9, abs=5e-16)
assert 1e-8 + 1e-16 == approx(1e-8, rel=5e-8, abs=5e-17)
assert 1e-8 + 1e-16 != approx(1e-8, rel=5e-9, abs=5e-17)
def test_relative_tolerance(self):
within_1e8_rel = [(1e8 + 1e0, 1e8), (1e0 + 1e-8, 1e0), (1e-8 + 1e-16, 1e-8)]
for a, x in within_1e8_rel:
assert a == approx(x, rel=5e-8, abs=0.0)
assert a != approx(x, rel=5e-9, abs=0.0)
def test_absolute_tolerance(self):
within_1e8_abs = [(1e8 + 9e-9, 1e8), (1e0 + 9e-9, 1e0), (1e-8 + 9e-9, 1e-8)]
for a, x in within_1e8_abs:
assert a == approx(x, rel=0, abs=5e-8)
assert a != approx(x, rel=0, abs=5e-9)
def test_expecting_zero(self):
examples = [
(ne, 1e-6, 0.0),
(ne, -1e-6, 0.0),
(eq, 1e-12, 0.0),
(eq, -1e-12, 0.0),
(ne, 2e-12, 0.0),
(ne, -2e-12, 0.0),
(ne, inf, 0.0),
(ne, nan, 0.0),
]
for op, a, x in examples:
assert op(a, approx(x, rel=0.0, abs=1e-12))
assert op(a, approx(x, rel=1e-6, abs=1e-12))
def test_expecting_inf(self):
examples = [
(eq, inf, inf),
(eq, -inf, -inf),
(ne, inf, -inf),
(ne, 0.0, inf),
(ne, nan, inf),
]
for op, a, x in examples:
assert op(a, approx(x))
def test_expecting_nan(self):
examples = [
(eq, nan, nan),
(eq, -nan, -nan),
(eq, nan, -nan),
(ne, 0.0, nan),
(ne, inf, nan),
]
for op, a, x in examples:
# Nothing is equal to NaN by default.
assert a != approx(x)
# If ``nan_ok=True``, then NaN is equal to NaN.
assert op(a, approx(x, nan_ok=True))
def test_int(self):
within_1e6 = [(1000001, 1000000), (-1000001, -1000000)]
for a, x in within_1e6:
assert a == approx(x, rel=5e-6, abs=0)
assert a != approx(x, rel=5e-7, abs=0)
assert approx(x, rel=5e-6, abs=0) == a
assert approx(x, rel=5e-7, abs=0) != a
def test_decimal(self):
within_1e6 = [
(Decimal("1.000001"), Decimal("1.0")),
(Decimal("-1.000001"), Decimal("-1.0")),
]
for a, x in within_1e6:
assert a == approx(x)
assert a == approx(x, rel=Decimal("5e-6"), abs=0)
assert a != approx(x, rel=Decimal("5e-7"), abs=0)
assert approx(x, rel=Decimal("5e-6"), abs=0) == a
assert approx(x, rel=Decimal("5e-7"), abs=0) != a
def test_fraction(self):
within_1e6 = [
(1 + Fraction(1, 1000000), Fraction(1)),
(-1 - Fraction(-1, 1000000), Fraction(-1)),
]
for a, x in within_1e6:
assert a == approx(x, rel=5e-6, abs=0)
assert a != approx(x, rel=5e-7, abs=0)
assert approx(x, rel=5e-6, abs=0) == a
assert approx(x, rel=5e-7, abs=0) != a
def test_complex(self):
within_1e6 = [
(1.000001 + 1.0j, 1.0 + 1.0j),
(1.0 + 1.000001j, 1.0 + 1.0j),
(-1.000001 + 1.0j, -1.0 + 1.0j),
(1.0 - 1.000001j, 1.0 - 1.0j),
]
for a, x in within_1e6:
assert a == approx(x, rel=5e-6, abs=0)
assert a != approx(x, rel=5e-7, abs=0)
assert approx(x, rel=5e-6, abs=0) == a
assert approx(x, rel=5e-7, abs=0) != a
def test_list(self):
actual = [1 + 1e-7, 2 + 1e-8]
expected = [1, 2]
# Return false if any element is outside the tolerance.
assert actual == approx(expected, rel=5e-7, abs=0)
assert actual != approx(expected, rel=5e-8, abs=0)
assert approx(expected, rel=5e-7, abs=0) == actual
assert approx(expected, rel=5e-8, abs=0) != actual
def test_list_wrong_len(self):
assert [1, 2] != approx([1])
assert [1, 2] != approx([1, 2, 3])
def test_tuple(self):
actual = (1 + 1e-7, 2 + 1e-8)
expected = (1, 2)
# Return false if any element is outside the tolerance.
assert actual == approx(expected, rel=5e-7, abs=0)
assert actual != approx(expected, rel=5e-8, abs=0)
assert approx(expected, rel=5e-7, abs=0) == actual
assert approx(expected, rel=5e-8, abs=0) != actual
def test_tuple_wrong_len(self):
assert (1, 2) != approx((1,))
assert (1, 2) != approx((1, 2, 3))
def test_dict(self):
actual = {"a": 1 + 1e-7, "b": 2 + 1e-8}
# Dictionaries became ordered in python3.6, so switch up the order here
# to make sure it doesn't matter.
expected = {"b": 2, "a": 1}
# Return false if any element is outside the tolerance.
assert actual == approx(expected, rel=5e-7, abs=0)
assert actual != approx(expected, rel=5e-8, abs=0)
assert approx(expected, rel=5e-7, abs=0) == actual
assert approx(expected, rel=5e-8, abs=0) != actual
def test_dict_wrong_len(self):
assert {"a": 1, "b": 2} != approx({"a": 1})
assert {"a": 1, "b": 2} != approx({"a": 1, "c": 2})
assert {"a": 1, "b": 2} != approx({"a": 1, "b": 2, "c": 3})
def test_numpy_array(self):
np = pytest.importorskip("numpy")
actual = np.array([1 + 1e-7, 2 + 1e-8])
expected = np.array([1, 2])
# Return false if any element is outside the tolerance.
assert actual == approx(expected, rel=5e-7, abs=0)
assert actual != approx(expected, rel=5e-8, abs=0)
assert approx(expected, rel=5e-7, abs=0) == expected
assert approx(expected, rel=5e-8, abs=0) != actual
# Should be able to compare lists with numpy arrays.
assert list(actual) == approx(expected, rel=5e-7, abs=0)
assert list(actual) != approx(expected, rel=5e-8, abs=0)
assert actual == approx(list(expected), rel=5e-7, abs=0)
assert actual != approx(list(expected), rel=5e-8, abs=0)
def test_numpy_tolerance_args(self):
"""
Check that numpy rel/abs args are handled correctly
for comparison against an np.array
Check both sides of the operator, hopefully it doesn't impact things.
Test all permutations of where the approx and np.array() can show up
"""
np = pytest.importorskip("numpy")
expected = 100.0
actual = 99.0
abs_diff = expected - actual
rel_diff = (expected - actual) / expected
tests = [
(eq, abs_diff, 0),
(eq, 0, rel_diff),
(ne, 0, rel_diff / 2.0), # rel diff fail
(ne, abs_diff / 2.0, 0), # abs diff fail
]
for op, _abs, _rel in tests:
assert op(np.array(actual), approx(expected, abs=_abs, rel=_rel)) # a, b
assert op(approx(expected, abs=_abs, rel=_rel), np.array(actual)) # b, a
assert op(actual, approx(np.array(expected), abs=_abs, rel=_rel)) # a, b
assert op(approx(np.array(expected), abs=_abs, rel=_rel), actual) # b, a
assert op(np.array(actual), approx(np.array(expected), abs=_abs, rel=_rel))
assert op(approx(np.array(expected), abs=_abs, rel=_rel), np.array(actual))
def test_numpy_expecting_nan(self):
np = pytest.importorskip("numpy")
examples = [
(eq, nan, nan),
(eq, -nan, -nan),
(eq, nan, -nan),
(ne, 0.0, nan),
(ne, inf, nan),
]
for op, a, x in examples:
# Nothing is equal to NaN by default.
assert np.array(a) != approx(x)
assert a != approx(np.array(x))
# If ``nan_ok=True``, then NaN is equal to NaN.
assert op(np.array(a), approx(x, nan_ok=True))
assert op(a, approx(np.array(x), nan_ok=True))
def test_numpy_expecting_inf(self):
np = pytest.importorskip("numpy")
examples = [
(eq, inf, inf),
(eq, -inf, -inf),
(ne, inf, -inf),
(ne, 0.0, inf),
(ne, nan, inf),
]
for op, a, x in examples:
assert op(np.array(a), approx(x))
assert op(a, approx(np.array(x)))
assert op(np.array(a), approx(np.array(x)))
def test_numpy_array_wrong_shape(self):
np = pytest.importorskip("numpy")
a12 = np.array([[1, 2]])
a21 = np.array([[1], [2]])
assert a12 != approx(a21)
assert a21 != approx(a12)
def test_doctests(self, mocked_doctest_runner):
import doctest
parser = doctest.DocTestParser()
test = parser.get_doctest(
approx.__doc__, {"approx": approx}, approx.__name__, None, None
)
mocked_doctest_runner.run(test)
def test_unicode_plus_minus(self, testdir):
"""
Comparing approx instances inside lists should not produce an error in the detailed diff.
Integration test for issue #2111.
"""
testdir.makepyfile(
"""
import pytest
def test_foo():
assert [3] == [pytest.approx(4)]
"""
)
expected = "4.0e-06"
result = testdir.runpytest()
result.stdout.fnmatch_lines(
["*At index 0 diff: 3 != 4 * {}".format(expected), "=* 1 failed in *="]
)
@pytest.mark.parametrize(
"x",
[
pytest.param(None),
pytest.param("string"),
pytest.param(["string"], id="nested-str"),
pytest.param([[1]], id="nested-list"),
pytest.param({"key": "string"}, id="dict-with-string"),
pytest.param({"key": {"key": 1}}, id="nested-dict"),
],
)
def test_expected_value_type_error(self, x):
with pytest.raises(TypeError):
approx(x)
@pytest.mark.parametrize(
"op",
[
pytest.param(operator.le, id="<="),
pytest.param(operator.lt, id="<"),
pytest.param(operator.ge, id=">="),
pytest.param(operator.gt, id=">"),
],
)
def test_comparison_operator_type_error(self, op):
"""
pytest.approx should raise TypeError for operators other than == and != (#2003).
"""
with pytest.raises(TypeError):
op(1, approx(1, rel=1e-6, abs=1e-12))
def test_numpy_array_with_scalar(self):
np = pytest.importorskip("numpy")
actual = np.array([1 + 1e-7, 1 - 1e-8])
expected = 1.0
assert actual == approx(expected, rel=5e-7, abs=0)
assert actual != approx(expected, rel=5e-8, abs=0)
assert approx(expected, rel=5e-7, abs=0) == actual
assert approx(expected, rel=5e-8, abs=0) != actual
def test_numpy_scalar_with_array(self):
np = pytest.importorskip("numpy")
actual = 1.0
expected = np.array([1 + 1e-7, 1 - 1e-8])
assert actual == approx(expected, rel=5e-7, abs=0)
assert actual != approx(expected, rel=5e-8, abs=0)
assert approx(expected, rel=5e-7, abs=0) == actual
assert approx(expected, rel=5e-8, abs=0) != actual
def test_generic_sized_iterable_object(self):
class MySizedIterable:
def __iter__(self):
return iter([1, 2, 3, 4])
def __len__(self):
return 4
expected = MySizedIterable()
assert [1, 2, 3, 4] == approx(expected)
| 36.115163 | 97 | 0.526626 | import operator
from decimal import Decimal
from fractions import Fraction
from operator import eq
from operator import ne
import pytest
from pytest import approx
inf, nan = float("inf"), float("nan")
@pytest.fixture
def mocked_doctest_runner(monkeypatch):
import doctest
class MockedPdb:
def __init__(self, out):
pass
def set_trace(self):
raise NotImplementedError("not used")
def reset(self):
pass
def set_continue(self):
pass
monkeypatch.setattr("doctest._OutputRedirectingPdb", MockedPdb)
class MyDocTestRunner(doctest.DocTestRunner):
def report_failure(self, out, test, example, got):
raise AssertionError(
"'{}' evaluates to '{}', not '{}'".format(
example.source.strip(), got.strip(), example.want.strip()
)
)
return MyDocTestRunner()
class TestApprox:
def test_repr_string(self):
assert repr(approx(1.0)) == "1.0 ± 1.0e-06"
assert repr(approx([1.0, 2.0])) == "approx([1.0 ± 1.0e-06, 2.0 ± 2.0e-06])"
assert repr(approx((1.0, 2.0))) == "approx((1.0 ± 1.0e-06, 2.0 ± 2.0e-06))"
assert repr(approx(inf)) == "inf"
assert repr(approx(1.0, rel=nan)) == "1.0 ± ???"
assert repr(approx(1.0, rel=inf)) == "1.0 ± inf"
assert repr(approx({"a": 1.0, "b": 2.0})) in (
"approx({'a': 1.0 ± 1.0e-06, 'b': 2.0 ± 2.0e-06})",
"approx({'b': 2.0 ± 2.0e-06, 'a': 1.0 ± 1.0e-06})",
)
def test_repr_complex_numbers(self):
assert repr(approx(inf + 1j)) == "(inf+1j)"
assert repr(approx(1.0j, rel=inf)) == "1j ± inf"
# can't compute a sensible tolerance
assert repr(approx(nan + 1j)) == "(nan+1j) ± ???"
assert repr(approx(1.0j)) == "1j ± 1.0e-06 ∠ ±180°"
assert repr(approx(3 + 4 * 1j)) == "(3+4j) ± 5.0e-06 ∠ ±180°"
assert repr(approx(3.3 + 4.4 * 1j, abs=0.02)) == "(3.3+4.4j) ± 2.0e-02 ∠ ±180°"
@pytest.mark.parametrize(
"value, expected_repr_string",
[
(5.0, "approx(5.0 ± 5.0e-06)"),
([5.0], "approx([5.0 ± 5.0e-06])"),
([[5.0]], "approx([[5.0 ± 5.0e-06]])"),
([[5.0, 6.0]], "approx([[5.0 ± 5.0e-06, 6.0 ± 6.0e-06]])"),
([[5.0], [6.0]], "approx([[5.0 ± 5.0e-06], [6.0 ± 6.0e-06]])"),
],
)
def test_repr_nd_array(self, value, expected_repr_string):
np = pytest.importorskip("numpy")
np_array = np.array(value)
assert repr(approx(np_array)) == expected_repr_string
def test_operator_overloading(self):
assert 1 == approx(1, rel=1e-6, abs=1e-12)
assert not (1 != approx(1, rel=1e-6, abs=1e-12))
assert 10 != approx(1, rel=1e-6, abs=1e-12)
assert not (10 == approx(1, rel=1e-6, abs=1e-12))
def test_exactly_equal(self):
examples = [
(2.0, 2.0),
(0.1e200, 0.1e200),
(1.123e-300, 1.123e-300),
(12345, 12345.0),
(0.0, -0.0),
(345678, 345678),
(Decimal("1.0001"), Decimal("1.0001")),
(Fraction(1, 3), Fraction(-1, -3)),
]
for a, x in examples:
assert a == approx(x)
def test_opposite_sign(self):
examples = [(eq, 1e-100, -1e-100), (ne, 1e100, -1e100)]
for op, a, x in examples:
assert op(a, approx(x))
def test_zero_tolerance(self):
within_1e10 = [(1.1e-100, 1e-100), (-1.1e-100, -1e-100)]
for a, x in within_1e10:
assert x == approx(x, rel=0.0, abs=0.0)
assert a != approx(x, rel=0.0, abs=0.0)
assert a == approx(x, rel=0.0, abs=5e-101)
assert a != approx(x, rel=0.0, abs=5e-102)
assert a == approx(x, rel=5e-1, abs=0.0)
assert a != approx(x, rel=5e-2, abs=0.0)
def test_negative_tolerance(self):
illegal_kwargs = [
dict(rel=-1e100),
dict(abs=-1e100),
dict(rel=1e100, abs=-1e100),
dict(rel=-1e100, abs=1e100),
dict(rel=-1e100, abs=-1e100),
]
for kwargs in illegal_kwargs:
with pytest.raises(ValueError):
1.1 == approx(1, **kwargs)
def test_inf_tolerance(self):
large_diffs = [(1, 1000), (1e-50, 1e50), (-1.0, -1e300), (0.0, 10)]
for a, x in large_diffs:
assert a != approx(x, rel=0.0, abs=0.0)
assert a == approx(x, rel=inf, abs=0.0)
assert a == approx(x, rel=0.0, abs=inf)
assert a == approx(x, rel=inf, abs=inf)
def test_inf_tolerance_expecting_zero(self):
illegal_kwargs = [dict(rel=inf, abs=0.0), dict(rel=inf, abs=inf)]
for kwargs in illegal_kwargs:
with pytest.raises(ValueError):
1 == approx(0, **kwargs)
def test_nan_tolerance(self):
illegal_kwargs = [dict(rel=nan), dict(abs=nan), dict(rel=nan, abs=nan)]
for kwargs in illegal_kwargs:
with pytest.raises(ValueError):
1.1 == approx(1, **kwargs)
def test_reasonable_defaults(self):
assert 0.1 + 0.2 == approx(0.3)
def test_default_tolerances(self):
examples = [
(eq, 1e100 + 1e94, 1e100),
(ne, 1e100 + 2e94, 1e100),
(eq, 1e0 + 1e-6, 1e0),
(ne, 1e0 + 2e-6, 1e0),
(eq, 1e-100, +1e-106),
(eq, 1e-100, +2e-106),
(eq, 1e-100, 0),
]
for op, a, x in examples:
assert op(a, approx(x))
def test_custom_tolerances(self):
assert 1e8 + 1e0 == approx(1e8, rel=5e-8, abs=5e0)
assert 1e8 + 1e0 == approx(1e8, rel=5e-9, abs=5e0)
assert 1e8 + 1e0 == approx(1e8, rel=5e-8, abs=5e-1)
assert 1e8 + 1e0 != approx(1e8, rel=5e-9, abs=5e-1)
assert 1e0 + 1e-8 == approx(1e0, rel=5e-8, abs=5e-8)
assert 1e0 + 1e-8 == approx(1e0, rel=5e-9, abs=5e-8)
assert 1e0 + 1e-8 == approx(1e0, rel=5e-8, abs=5e-9)
assert 1e0 + 1e-8 != approx(1e0, rel=5e-9, abs=5e-9)
assert 1e-8 + 1e-16 == approx(1e-8, rel=5e-8, abs=5e-16)
assert 1e-8 + 1e-16 == approx(1e-8, rel=5e-9, abs=5e-16)
assert 1e-8 + 1e-16 == approx(1e-8, rel=5e-8, abs=5e-17)
assert 1e-8 + 1e-16 != approx(1e-8, rel=5e-9, abs=5e-17)
def test_relative_tolerance(self):
within_1e8_rel = [(1e8 + 1e0, 1e8), (1e0 + 1e-8, 1e0), (1e-8 + 1e-16, 1e-8)]
for a, x in within_1e8_rel:
assert a == approx(x, rel=5e-8, abs=0.0)
assert a != approx(x, rel=5e-9, abs=0.0)
def test_absolute_tolerance(self):
within_1e8_abs = [(1e8 + 9e-9, 1e8), (1e0 + 9e-9, 1e0), (1e-8 + 9e-9, 1e-8)]
for a, x in within_1e8_abs:
assert a == approx(x, rel=0, abs=5e-8)
assert a != approx(x, rel=0, abs=5e-9)
def test_expecting_zero(self):
examples = [
(ne, 1e-6, 0.0),
(ne, -1e-6, 0.0),
(eq, 1e-12, 0.0),
(eq, -1e-12, 0.0),
(ne, 2e-12, 0.0),
(ne, -2e-12, 0.0),
(ne, inf, 0.0),
(ne, nan, 0.0),
]
for op, a, x in examples:
assert op(a, approx(x, rel=0.0, abs=1e-12))
assert op(a, approx(x, rel=1e-6, abs=1e-12))
def test_expecting_inf(self):
examples = [
(eq, inf, inf),
(eq, -inf, -inf),
(ne, inf, -inf),
(ne, 0.0, inf),
(ne, nan, inf),
]
for op, a, x in examples:
assert op(a, approx(x))
def test_expecting_nan(self):
examples = [
(eq, nan, nan),
(eq, -nan, -nan),
(eq, nan, -nan),
(ne, 0.0, nan),
(ne, inf, nan),
]
for op, a, x in examples:
assert a != approx(x)
assert op(a, approx(x, nan_ok=True))
def test_int(self):
within_1e6 = [(1000001, 1000000), (-1000001, -1000000)]
for a, x in within_1e6:
assert a == approx(x, rel=5e-6, abs=0)
assert a != approx(x, rel=5e-7, abs=0)
assert approx(x, rel=5e-6, abs=0) == a
assert approx(x, rel=5e-7, abs=0) != a
def test_decimal(self):
within_1e6 = [
(Decimal("1.000001"), Decimal("1.0")),
(Decimal("-1.000001"), Decimal("-1.0")),
]
for a, x in within_1e6:
assert a == approx(x)
assert a == approx(x, rel=Decimal("5e-6"), abs=0)
assert a != approx(x, rel=Decimal("5e-7"), abs=0)
assert approx(x, rel=Decimal("5e-6"), abs=0) == a
assert approx(x, rel=Decimal("5e-7"), abs=0) != a
def test_fraction(self):
within_1e6 = [
(1 + Fraction(1, 1000000), Fraction(1)),
(-1 - Fraction(-1, 1000000), Fraction(-1)),
]
for a, x in within_1e6:
assert a == approx(x, rel=5e-6, abs=0)
assert a != approx(x, rel=5e-7, abs=0)
assert approx(x, rel=5e-6, abs=0) == a
assert approx(x, rel=5e-7, abs=0) != a
def test_complex(self):
within_1e6 = [
(1.000001 + 1.0j, 1.0 + 1.0j),
(1.0 + 1.000001j, 1.0 + 1.0j),
(-1.000001 + 1.0j, -1.0 + 1.0j),
(1.0 - 1.000001j, 1.0 - 1.0j),
]
for a, x in within_1e6:
assert a == approx(x, rel=5e-6, abs=0)
assert a != approx(x, rel=5e-7, abs=0)
assert approx(x, rel=5e-6, abs=0) == a
assert approx(x, rel=5e-7, abs=0) != a
def test_list(self):
actual = [1 + 1e-7, 2 + 1e-8]
expected = [1, 2]
assert actual == approx(expected, rel=5e-7, abs=0)
assert actual != approx(expected, rel=5e-8, abs=0)
assert approx(expected, rel=5e-7, abs=0) == actual
assert approx(expected, rel=5e-8, abs=0) != actual
def test_list_wrong_len(self):
assert [1, 2] != approx([1])
assert [1, 2] != approx([1, 2, 3])
def test_tuple(self):
actual = (1 + 1e-7, 2 + 1e-8)
expected = (1, 2)
assert actual == approx(expected, rel=5e-7, abs=0)
assert actual != approx(expected, rel=5e-8, abs=0)
assert approx(expected, rel=5e-7, abs=0) == actual
assert approx(expected, rel=5e-8, abs=0) != actual
def test_tuple_wrong_len(self):
assert (1, 2) != approx((1,))
assert (1, 2) != approx((1, 2, 3))
def test_dict(self):
actual = {"a": 1 + 1e-7, "b": 2 + 1e-8}
expected = {"b": 2, "a": 1}
# Return false if any element is outside the tolerance.
assert actual == approx(expected, rel=5e-7, abs=0)
assert actual != approx(expected, rel=5e-8, abs=0)
assert approx(expected, rel=5e-7, abs=0) == actual
assert approx(expected, rel=5e-8, abs=0) != actual
def test_dict_wrong_len(self):
assert {"a": 1, "b": 2} != approx({"a": 1})
assert {"a": 1, "b": 2} != approx({"a": 1, "c": 2})
assert {"a": 1, "b": 2} != approx({"a": 1, "b": 2, "c": 3})
def test_numpy_array(self):
np = pytest.importorskip("numpy")
actual = np.array([1 + 1e-7, 2 + 1e-8])
expected = np.array([1, 2])
# Return false if any element is outside the tolerance.
assert actual == approx(expected, rel=5e-7, abs=0)
assert actual != approx(expected, rel=5e-8, abs=0)
assert approx(expected, rel=5e-7, abs=0) == expected
assert approx(expected, rel=5e-8, abs=0) != actual
# Should be able to compare lists with numpy arrays.
assert list(actual) == approx(expected, rel=5e-7, abs=0)
assert list(actual) != approx(expected, rel=5e-8, abs=0)
assert actual == approx(list(expected), rel=5e-7, abs=0)
assert actual != approx(list(expected), rel=5e-8, abs=0)
def test_numpy_tolerance_args(self):
np = pytest.importorskip("numpy")
expected = 100.0
actual = 99.0
abs_diff = expected - actual
rel_diff = (expected - actual) / expected
tests = [
(eq, abs_diff, 0),
(eq, 0, rel_diff),
(ne, 0, rel_diff / 2.0), # rel diff fail
(ne, abs_diff / 2.0, 0), # abs diff fail
]
for op, _abs, _rel in tests:
assert op(np.array(actual), approx(expected, abs=_abs, rel=_rel)) # a, b
assert op(approx(expected, abs=_abs, rel=_rel), np.array(actual)) # b, a
assert op(actual, approx(np.array(expected), abs=_abs, rel=_rel)) # a, b
assert op(approx(np.array(expected), abs=_abs, rel=_rel), actual) # b, a
assert op(np.array(actual), approx(np.array(expected), abs=_abs, rel=_rel))
assert op(approx(np.array(expected), abs=_abs, rel=_rel), np.array(actual))
def test_numpy_expecting_nan(self):
np = pytest.importorskip("numpy")
examples = [
(eq, nan, nan),
(eq, -nan, -nan),
(eq, nan, -nan),
(ne, 0.0, nan),
(ne, inf, nan),
]
for op, a, x in examples:
# Nothing is equal to NaN by default.
assert np.array(a) != approx(x)
assert a != approx(np.array(x))
# If ``nan_ok=True``, then NaN is equal to NaN.
assert op(np.array(a), approx(x, nan_ok=True))
assert op(a, approx(np.array(x), nan_ok=True))
def test_numpy_expecting_inf(self):
np = pytest.importorskip("numpy")
examples = [
(eq, inf, inf),
(eq, -inf, -inf),
(ne, inf, -inf),
(ne, 0.0, inf),
(ne, nan, inf),
]
for op, a, x in examples:
assert op(np.array(a), approx(x))
assert op(a, approx(np.array(x)))
assert op(np.array(a), approx(np.array(x)))
def test_numpy_array_wrong_shape(self):
np = pytest.importorskip("numpy")
a12 = np.array([[1, 2]])
a21 = np.array([[1], [2]])
assert a12 != approx(a21)
assert a21 != approx(a12)
def test_doctests(self, mocked_doctest_runner):
import doctest
parser = doctest.DocTestParser()
test = parser.get_doctest(
approx.__doc__, {"approx": approx}, approx.__name__, None, None
)
mocked_doctest_runner.run(test)
def test_unicode_plus_minus(self, testdir):
testdir.makepyfile(
"""
import pytest
def test_foo():
assert [3] == [pytest.approx(4)]
"""
)
expected = "4.0e-06"
result = testdir.runpytest()
result.stdout.fnmatch_lines(
["*At index 0 diff: 3 != 4 * {}".format(expected), "=* 1 failed in *="]
)
@pytest.mark.parametrize(
"x",
[
pytest.param(None),
pytest.param("string"),
pytest.param(["string"], id="nested-str"),
pytest.param([[1]], id="nested-list"),
pytest.param({"key": "string"}, id="dict-with-string"),
pytest.param({"key": {"key": 1}}, id="nested-dict"),
],
)
def test_expected_value_type_error(self, x):
with pytest.raises(TypeError):
approx(x)
@pytest.mark.parametrize(
"op",
[
pytest.param(operator.le, id="<="),
pytest.param(operator.lt, id="<"),
pytest.param(operator.ge, id=">="),
pytest.param(operator.gt, id=">"),
],
)
def test_comparison_operator_type_error(self, op):
with pytest.raises(TypeError):
op(1, approx(1, rel=1e-6, abs=1e-12))
def test_numpy_array_with_scalar(self):
np = pytest.importorskip("numpy")
actual = np.array([1 + 1e-7, 1 - 1e-8])
expected = 1.0
assert actual == approx(expected, rel=5e-7, abs=0)
assert actual != approx(expected, rel=5e-8, abs=0)
assert approx(expected, rel=5e-7, abs=0) == actual
assert approx(expected, rel=5e-8, abs=0) != actual
def test_numpy_scalar_with_array(self):
np = pytest.importorskip("numpy")
actual = 1.0
expected = np.array([1 + 1e-7, 1 - 1e-8])
assert actual == approx(expected, rel=5e-7, abs=0)
assert actual != approx(expected, rel=5e-8, abs=0)
assert approx(expected, rel=5e-7, abs=0) == actual
assert approx(expected, rel=5e-8, abs=0) != actual
def test_generic_sized_iterable_object(self):
class MySizedIterable:
def __iter__(self):
return iter([1, 2, 3, 4])
def __len__(self):
return 4
expected = MySizedIterable()
assert [1, 2, 3, 4] == approx(expected)
| true | true |
f72045666fb3ba1330df271ba4a9bc225bbc5cf0 | 458 | py | Python | rising_sphinx_theme/__init__.py | PhoenixDL/rising_sphinx_theme | 88c213524bdd87e2c4320f047eebbee04322da47 | [
"MIT"
] | 2 | 2020-05-03T09:22:06.000Z | 2020-05-18T11:32:51.000Z | rising_sphinx_theme/__init__.py | PhoenixDL/rising_sphinx_theme | 88c213524bdd87e2c4320f047eebbee04322da47 | [
"MIT"
] | 1 | 2021-09-02T10:40:00.000Z | 2021-09-02T10:40:00.000Z | rising_sphinx_theme/__init__.py | PhoenixDL/rising_sphinx_theme | 88c213524bdd87e2c4320f047eebbee04322da47 | [
"MIT"
] | 1 | 2020-06-19T09:26:47.000Z | 2020-06-19T09:26:47.000Z | """rising Sphinx theme.
"""
from os import path
__version__ = '0.0.25'
__version_full__ = __version__
def get_html_theme_path():
"""Return list of HTML theme paths."""
cur_dir = path.abspath(path.dirname(path.dirname(__file__)))
return cur_dir
# See http://www.sphinx-doc.org/en/stable/theming.html#distribute-your-theme-as-a-python-package
def setup(app):
app.add_html_theme('rising_sphinx_theme', path.abspath(path.dirname(__file__)))
| 25.444444 | 96 | 0.735808 | from os import path
__version__ = '0.0.25'
__version_full__ = __version__
def get_html_theme_path():
cur_dir = path.abspath(path.dirname(path.dirname(__file__)))
return cur_dir
ising_sphinx_theme', path.abspath(path.dirname(__file__)))
| true | true |
f72045bb170c2dd47739981943d2b653f09fbe60 | 54,959 | py | Python | codegen/fletcher/columnreader.py | honorpeter/fletcher | 9622293443b1ea70b1f3aa592098a64690600dd4 | [
"Apache-2.0"
] | 1 | 2021-03-05T08:24:57.000Z | 2021-03-05T08:24:57.000Z | codegen/fletcher/columnreader.py | honorpeter/fletcher | 9622293443b1ea70b1f3aa592098a64690600dd4 | [
"Apache-2.0"
] | null | null | null | codegen/fletcher/columnreader.py | honorpeter/fletcher | 9622293443b1ea70b1f3aa592098a64690600dd4 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 Delft University of Technology
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This package contains a python object representation for ColumnReaders, and
the functions needed to generate a ColumnReader from an Arrow field
(represented as the objects in fields.py)."""
from itertools import zip_longest
import random
from .configurable import *
from .fields import *
from .streams import *
from .lines import *
from .testbench import *
__all__ = ["ColumnReader", "BUS_ADDR_WIDTH", "INDEX_WIDTH", "CMD_TAG_WIDTH"]
# Define the generics used by ColumnReaders.
BUS_ADDR_WIDTH = Generic("BUS_ADDR_WIDTH")
BUS_LEN_WIDTH = Generic("BUS_LEN_WIDTH")
BUS_DATA_WIDTH = Generic("BUS_DATA_WIDTH")
INDEX_WIDTH = Generic("INDEX_WIDTH")
CMD_TAG_WIDTH = Generic("CMD_TAG_WIDTH")
class ReaderLevel(Configurable):
"""Represents an abstract ColumnReaderLevel(Level)."""
def __init__(self, **config):
super().__init__(**config)
@property
def _cmdname(self):
"""Returns the cfg string command name for this ReaderLevel."""
return "?"
@property
def _children(self):
"""Returns a list of child ReaderLevels or parameters."""
return []
def bus_count(self):
"""Returns the number of busses used by this ReaderLevel."""
raise NotImplemented()
@classmethod
def _write_buffer(cls, memory, bits, data):
"""Writes an arrow buffer to the given Memory given a list of integers
and bit width."""
memory.align(max(8*64, bits))
addr = memory.byte_addr()
for entry in data:
memory.write(entry, bits)
return addr
def test_vectors(self, memory, row_count, commands):
"""Returns a set of test vectors for all the signals defined by this
ReaderLevel (both command and response), given a row count and a list
of commands (represented as a list of two-tuples, where the first entry
is the inclusive start index and the second is the exclusive stop
index). The Memory object that should be passed to memory is updated
accordingly, with new data sets generated at the current memory
pointer."""
raise NotImplemented()
def __str__(self):
"""Returns the cfg string for this ReaderLevel."""
children = ",".join(map(str, self._children))
attrs = ",".join(map(lambda x: "%s=%s" % x, self._config.items()))
attrs = []
for key, value in self._config.items():
if isinstance(value, int) or isinstance(value, bool):
value = str(int(value))
attrs.append("%s=%s" % (key, value))
attrs = ",".join(attrs)
if attrs:
attrs = ";" + attrs
return "%s(%s%s)" % (self._cmdname, children, attrs)
class PrimReaderLevel(ReaderLevel):
"""A reader for a primitive data type."""
def __init__(
self,
bit_width,
cmd_stream,
cmd_val_base,
out_stream,
out_val,
**kwargs
):
super().__init__(**kwargs)
# Check and save the bit width.
if not bit_width or bit_width & (bit_width-1):
raise ValueError("bit width must be a power of two")
self.bit_width = bit_width
self.cmd_stream = cmd_stream
self.cmd_val_base = cmd_val_base
self.out_stream = out_stream
self.out_val = out_val
@property
def _cmdname(self):
"""Returns the cfg string command name for this ReaderLevel."""
return "prim"
@property
def _children(self):
"""Returns a list of child ReaderLevels or parameters."""
return [self.bit_width]
@property
def _config_defaults(self):
return { # NOTE: the defaults here MUST correspond to VHDL defaults.
"cmd_in_slice": False,
"bus_req_slice": True,
"bus_fifo_depth": 16,
"bus_fifo_ram_config": "",
"unlock_slice": True,
"shr2gb_slice": False,
"gb2fifo_slice": False,
"fifo_size": 64,
"fifo_ram_config": "",
"fifo_xclk_stages": 0,
"fifo2post_slice": False,
"out_slice": True
}
def bus_count(self):
"""Returns the number of busses used by this ReaderLevel."""
return 2
def test_vectors(self, memory, row_count, commands):
"""Returns a set of test vectors for all the signals defined by this
ReaderLevel (both command and response), given a row count and a list
of commands (represented as a list of two-tuples, where the first entry
is the inclusive start index and the second is the exclusive stop
index). The Memory object that should be passed to memory is updated
accordingly, with new data sets generated at the current memory
pointer."""
# Generate memory for 4 buffers of the given row count. We randomly
# select which buffer to use for each command.
buffers = []
for _ in range(4):
data = [random.randrange(1 << self.bit_width) for _ in range(row_count)]
addr = self._write_buffer(memory, self.bit_width, data)
buffers.append((addr, data))
# Generate test vectors for our signals.
base_tv = TestVectors(self.cmd_val_base)
val_tv = TestVectors(self.out_val, self.out_stream.name + "dvalid = '1'")
for start, stop in commands:
buf_idx = random.randrange(4)
addr, data = buffers[buf_idx]
base_tv.append(addr)
val_tv.extend(data[start:stop])
return [base_tv, val_tv]
class ArbReaderLevel(ReaderLevel):
"""A wrapper for readers that instantiates a bus arbiter and optionally
slices for all the other streams."""
def __init__(self, child, **kwargs):
super().__init__(**kwargs)
self.child = child
@property
def _cmdname(self):
"""Returns the cfg string command name for this ReaderLevel."""
return "arb"
@property
def _children(self):
"""Returns a list of child ReaderLevels or parameters."""
return [self.child]
@property
def _config_defaults(self):
return { # NOTE: the defaults here MUST correspond to VHDL defaults.
"method": "ROUND-ROBIN",
"max_outstanding": 2,
"ram_config": "",
"req_in_slices": False,
"req_out_slice": True,
"resp_in_slice": False,
"resp_out_slices": True,
"cmd_stream_slice": True,
"unlock_stream_slice": True,
"out_stream_slice": True
}
def bus_count(self):
"""Returns the number of busses used by this ReaderLevel."""
return 1
def test_vectors(self, memory, row_count, commands):
"""Returns a set of test vectors for all the signals defined by this
ReaderLevel (both command and response), given a row count and a list
of commands (represented as a list of two-tuples, where the first entry
is the inclusive start index and the second is the exclusive stop
index). The Memory object that should be passed to memory is updated
accordingly, with new data sets generated at the current memory
pointer."""
return self.child.test_vectors(memory, row_count, commands)
class NullReaderLevel(ReaderLevel):
"""A reader for a null bitmap."""
def __init__(
self,
child,
cmd_stream,
cmd_no_nulls,
cmd_null_base,
out_stream,
out_not_null,
**kwargs
):
super().__init__(**kwargs)
self.child = child
self.cmd_stream = cmd_stream
self.cmd_no_nulls = cmd_no_nulls
self.cmd_null_base = cmd_null_base
self.out_stream = out_stream
self.out_not_null = out_not_null
@property
def _cmdname(self):
"""Returns the cfg string command name for this ReaderLevel."""
return "null"
@property
def _children(self):
"""Returns a list of child ReaderLevels or parameters."""
return [self.child]
@property
def _config_defaults(self):
return { # NOTE: the defaults here MUST correspond to VHDL defaults.
"cmd_in_slice": False,
"bus_req_slice": True,
"bus_fifo_depth": 16,
"bus_fifo_ram_config": "",
"unlock_slice": True,
"shr2gb_slice": False,
"gb2fifo_slice": False,
"fifo_size": 64,
"fifo_ram_config": "",
"fifo_xclk_stages": 0,
"fifo2post_slice": False,
"out_slice": True
}
def bus_count(self):
"""Returns the number of busses used by this ReaderLevel."""
return self.child.bus_count() + 1
def test_vectors(self, memory, row_count, commands):
"""Returns a set of test vectors for all the signals defined by this
ReaderLevel (both command and response), given a row count and a list
of commands (represented as a list of two-tuples, where the first entry
is the inclusive start index and the second is the exclusive stop
index). The Memory object that should be passed to memory is updated
accordingly, with new data sets generated at the current memory
pointer."""
# Generate memory for 3 buffers of the given row count. We randomly
# select between one of the buffers and an implicit null bitmap for
# each command.
buffers = []
for _ in range(3):
data = [min(1, random.randrange(10)) for _ in range(row_count)]
addr = self._write_buffer(memory, 1, data)
buffers.append((addr, data))
# Generate test vectors for our signals.
impl_tv = TestVectors(self.cmd_no_nulls)
base_tv = TestVectors(self.cmd_null_base)
val_tv = TestVectors(self.out_not_null, self.out_stream.name + "dvalid = '1'")
for start, stop in commands:
buf_idx = random.randrange(4)
if buf_idx < 3:
addr, data = buffers[buf_idx]
impl_tv.append(0)
base_tv.append(addr)
val_tv.extend(data[start:stop])
else:
impl_tv.append(1)
base_tv.append(None)
val_tv.extend([1 for _ in range(start, stop)])
return [impl_tv, base_tv, val_tv] + self.child.test_vectors(memory, row_count, commands)
def _list_test_vectors(reader, memory, row_count, commands):
"""Test vector generation function shared by ListReaderLevel and
ListPrimReaderLevel."""
# Generate on average 4 items per list.
child_length = row_count * 4
child_commands = []
child_idxs = []
# Generate memory for 4 buffers of the given row count. We randomly
# select one of the buffers for each command.
buffers = []
for _ in range(4):
data = [random.randint(0, child_length) for _ in range(row_count-1)]
data = [0] + sorted(data) + [child_length]
addr = reader._write_buffer(memory, 32, data) # FIXME: this width is actually a generic!
buffers.append((addr, data))
# Generate test vectors for our signals and figure out the command
# stream for the child.
base_tv = TestVectors(reader.cmd_idx_base)
len_tv = TestVectors(reader.out_length, reader.out_stream.name + "dvalid = '1'")
for start, stop in commands:
buf_idx = random.randrange(4)
addr, data = buffers[buf_idx]
child_commands.append((data[start], data[stop]))
child_idxs.append(list(zip(data[start:stop], data[start+1:stop+1])))
base_tv.append(addr)
len_tv.extend([data[i+1] - data[i] for i in range(start, stop)])
return child_length, child_commands, child_idxs, [base_tv, len_tv]
class ListReaderLevel(ReaderLevel):
"""A reader for a list index buffer."""
def __init__(
self,
child,
cmd_stream,
cmd_idx_base,
out_stream,
out_length,
out_el_stream,
**kwargs
):
super().__init__(**kwargs)
self.child = child
self.cmd_stream = cmd_stream
self.cmd_idx_base = cmd_idx_base
self.out_stream = out_stream
self.out_length = out_length
self.out_el_stream = out_el_stream
@property
def _cmdname(self):
"""Returns the cfg string command name for this ReaderLevel."""
return "list"
@property
def _children(self):
"""Returns a list of child ReaderLevels or parameters."""
return [self.child]
@property
def _config_defaults(self):
return { # NOTE: the defaults here MUST correspond to VHDL defaults.
"cmd_in_slice": False,
"bus_req_slice": True,
"bus_fifo_depth": 16,
"bus_fifo_ram_config": "",
"cmd_out_slice": True,
"unlock_slice": True,
"shr2gb_slice": False,
"gb2fifo_slice": False,
"fifo_size": 64,
"fifo_ram_config": "",
"fifo_xclk_stages": 0,
"fifo2post_slice": False,
"len_out_slice": True,
"len_sync_slice": True,
"data_in_slice": False,
"data_out_slice": True
}
def bus_count(self):
"""Returns the number of busses used by this ReaderLevel."""
return self.child.bus_count() + 1
def test_vectors(self, memory, row_count, commands):
"""Returns a set of test vectors for all the signals defined by this
ReaderLevel (both command and response), given a row count and a list
of commands (represented as a list of two-tuples, where the first entry
is the inclusive start index and the second is the exclusive stop
index). The Memory object that should be passed to memory is updated
accordingly, with new data sets generated at the current memory
pointer."""
# Figure out the test vectors for the list.
child_length, child_commands, child_idxs, tvs = _list_test_vectors(
self, memory, row_count, commands)
# Figure out the test vectors for the child.
tvs.extend(self.child.test_vectors(memory, child_length, child_commands))
# Figure out the last/dvalid signals for the element stream.
last_tv = TestVectors(self.out_el_stream.signals[0])
dvalid_tv = TestVectors(self.out_el_stream.signals[1])
for idxs in child_idxs:
for start, stop in idxs:
l = stop - start
if not l:
last_tv.append(1)
dvalid_tv.append(0)
else:
for i in range(l):
last_tv.append(int(i == l - 1))
dvalid_tv.append(1)
return tvs + [last_tv, dvalid_tv]
class ListPrimReaderLevel(ReaderLevel):
"""A reader for a list of non-nullable primitive data types."""
def __init__(
self,
bit_width,
cmd_stream,
cmd_idx_base,
cmd_val_base,
out_stream,
out_length,
out_el_stream,
out_el_values,
out_el_count,
**kwargs
):
super().__init__(**kwargs)
# Check and save the bit width.
if not bit_width or bit_width & (bit_width-1):
raise ValueError("bit width must be a power of two")
self.bit_width = bit_width
self.cmd_stream = cmd_stream
self.cmd_idx_base = cmd_idx_base
self.cmd_val_base = cmd_val_base
self.out_stream = out_stream
self.out_length = out_length
self.out_el_stream = out_el_stream
self.out_el_values = out_el_values
self.out_el_count = out_el_count
@property
def _cmdname(self):
"""Returns the cfg string command name for this ReaderLevel."""
return "listprim"
@property
def _children(self):
"""Returns a list of child ReaderLevels or parameters."""
return [self.bit_width]
@property
def _config_defaults(self):
return { # NOTE: the defaults here MUST correspond to VHDL defaults.
"epc": 1,
"idx_cmd_in_slice": False,
"idx_bus_req_slice": True,
"idx_bus_fifo_depth": 16,
"idx_bus_fifo_ram_config": "",
"idx_cmd_out_slice": True,
"idx_unlock_slice": True,
"idx_shr2gb_slice": False,
"idx_gb2fifo_slice": False,
"idx_fifo_size": 64,
"idx_fifo_ram_config": "",
"idx_fifo_xclk_stages": 0,
"idx_fifo2post_slice": False,
"cmd_in_slice": False,
"bus_req_slice": True,
"bus_fifo_depth": 16,
"bus_fifo_ram_config": "",
"unlock_slice": True,
"shr2gb_slice": False,
"gb2fifo_slice": False,
"fifo_size": 64,
"fifo_ram_config": "",
"fifo_xclk_stages": 0,
"fifo2post_slice": False,
"out_slice": False,
"len_out_slice": True,
"data_in_slice": False,
"len_sync_slice": True,
"data_out_slice": True
}
def bus_count(self):
"""Returns the number of busses used by this ReaderLevel."""
return 2
def test_vectors(self, memory, row_count, commands):
"""Returns a set of test vectors for all the signals defined by this
ReaderLevel (both command and response), given a row count and a list
of commands (represented as a list of two-tuples, where the first entry
is the inclusive start index and the second is the exclusive stop
index). The Memory object that should be passed to memory is updated
accordingly, with new data sets generated at the current memory
pointer."""
# Figure out the test vectors for the list.
child_length, child_commands, child_idxs, tvs = _list_test_vectors(
self, memory, row_count, commands)
# Generate memory for 4 buffers of the given child length. We randomly
# select which buffer to use for each command.
buffers = []
for _ in range(4):
data = [random.randrange(1 << self.bit_width) for _ in range(child_length)]
addr = self._write_buffer(memory, self.bit_width, data)
buffers.append((addr, data))
# Generate test vectors for our signals.
base_tv = TestVectors(self.cmd_val_base)
val_tvs = [TestVectors(sig) for sig in self.out_el_values]
cnt_tv = TestVectors(self.out_el_count)
last_tv = TestVectors(self.out_el_stream.signals[0])
dvalid_tv = TestVectors(self.out_el_stream.signals[1])
for idxs in child_idxs:
buf_idx = random.randrange(4)
addr, cmd_data = buffers[buf_idx]
base_tv.append(addr)
for start, stop in idxs:
data = cmd_data[start:stop]
while True:
cnt = 0
for val_tv in val_tvs:
if data:
val_tv.append(data.pop(0))
cnt += 1
else:
val_tv.append()
cnt_tv.append(cnt)
dvalid_tv.append(1 if cnt > 0 else 0)
if not data:
last_tv.append(1)
break
else:
last_tv.append(0)
return tvs + val_tvs + [base_tv, cnt_tv, last_tv, dvalid_tv]
class StructReaderLevel(ReaderLevel):
"""A reader for a struct of TWO child readers."""
def __init__(self, a, b, **kwargs):
super().__init__(**kwargs)
self.a = a
self.b = b
@property
def _cmdname(self):
"""Returns the cfg string command name for this ReaderLevel."""
return "struct"
@property
def _children(self):
"""Returns a list of child ReaderLevels or parameters."""
return [self.a, self.b]
@property
def _config_defaults(self):
return { # NOTE: the defaults here MUST correspond to VHDL defaults.
}
def bus_count(self):
"""Returns the number of busses used by this ReaderLevel."""
return self.a.bus_count() + self.b.bus_count()
def test_vectors(self, memory, row_count, commands):
"""Returns a set of test vectors for all the signals defined by this
ReaderLevel (both command and response), given a row count and a list
of commands (represented as a list of two-tuples, where the first entry
is the inclusive start index and the second is the exclusive stop
index). The Memory object that should be passed to memory is updated
accordingly, with new data sets generated at the current memory
pointer."""
return (
self.a.test_vectors(memory, row_count, commands)
+ self.b.test_vectors(memory, row_count, commands)
)
def _new_cmd_stream(prefix, field_prefix=""):
"""Constructs a command stream. Returns the stream and the ctrl
SignalGroup."""
p = prefix + "cmd_" + field_prefix
s = Stream(p)
s.append(Signal(p + "firstIdx", INDEX_WIDTH))
s.append(Signal(p + "lastIdx", INDEX_WIDTH))
ctrl = s.append(SignalGroup(p + "ctrl"))
s.append(Signal(p + "tag", CMD_TAG_WIDTH))
return s, ctrl
def _new_out_stream(prefix, field_prefix=""):
"""Constructs an output stream. Returns the stream and the data
SignalGroup."""
p = prefix + "out_" + field_prefix
s = Stream(p)
s.append(Signal(p + "last"))
s.append(Signal(p + "dvalid"))
data = s.append(SignalGroup(p + "data"))
return s, data
def _maybe_wrap_in_arbiter(reader, **opts):
"""Wraps the given reader in a ArbReaderLevel if deemed necessary."""
# TODO: make this stuff customizable using **opts.
if reader.bus_count() > 3:
reader = ArbReaderLevel(reader)
return reader
def _scalar_reader(field, prefix, field_prefix, cmd_stream, cmd_ctrl, out_stream, out_data, **opts):
"""Internal function which converts a scalar field into a ReaderLevel."""
# Add the signals to the streams.
cmd_val_base = cmd_ctrl.append(Signal(prefix + "cmd_" + field_prefix + "valBase", BUS_ADDR_WIDTH))
out_val = out_data.append(Signal(prefix + "out_" + field_prefix + "val", field.bit_width))
# Construct the primitive reader.
reader = PrimReaderLevel(
field.bit_width,
cmd_stream,
cmd_val_base,
out_stream,
out_val,
**field.get_cfg_dict({
"cmd_in_slice": "cmd_in_slice",
"bus_req_slice": "bus_req_slice",
"bus_fifo_depth": "bus_fifo_depth",
"bus_fifo_ram_config": "bus_fifo_ram_config",
"unlock_slice": "unlock_slice",
"shr2gb_slice": "shr2gb_slice",
"gb2fifo_slice": "gb2fifo_slice",
"fifo_size": "fifo_size",
"fifo_ram_config": "fifo_ram_config",
"fifo_xclk_stages": "fifo_xclk_stages",
"fifo2post_slice": "fifo2post_slice",
"out_slice": "out_slice"
})
)
return reader, []
def _bytes_reader(field, prefix, field_prefix, cmd_stream, cmd_ctrl, out_stream, out_data, **opts):
"""Internal function which converts a UTF8/bytes field into a ReaderLevel."""
# Add the signals to the existing streams.
cmd_val_base = cmd_ctrl.append(Signal(prefix + "cmd_" + field_prefix + "valBase", BUS_ADDR_WIDTH))
cmd_idx_base = cmd_ctrl.append(Signal(prefix + "cmd_" + field_prefix + "idxBase", BUS_ADDR_WIDTH))
out_length = out_data.append(Signal(prefix + "out_" + field_prefix + "len", INDEX_WIDTH))
# Create a secondary output stream for the list elements.
out_el_stream, out_el_data = _new_out_stream(prefix, field_prefix + "el_")
# Populate the secondary output stream.
epc = field.bytes_per_cycle
out_el_count = out_el_data.append(Signal(prefix + "out_" + field_prefix + "el_cnt", int.bit_length(epc)))
out_el_values = [
Signal(prefix + "out_" + field_prefix + "el_val" + str(i), field.bit_width)
for i in range(epc)
]
# The elements are serialized MSB first!
for sig in reversed(out_el_values):
out_el_data.append(sig)
# Construct the primitive reader.
reader = ListPrimReaderLevel(
field.bit_width,
cmd_stream,
cmd_idx_base,
cmd_val_base,
out_stream,
out_length,
out_el_stream,
out_el_values,
out_el_count,
**field.get_cfg_dict({
"bytes_per_cycle": "epc",
"idx_cmd_in_slice": "idx_cmd_in_slice",
"idx_bus_req_slice": "idx_bus_req_slice",
"idx_bus_fifo_depth": "idx_bus_fifo_depth",
"idx_bus_fifo_ram_config": "idx_bus_fifo_ram_config",
"idx_cmd_out_slice": "idx_cmd_out_slice",
"idx_unlock_slice": "idx_unlock_slice",
"idx_shr2gb_slice": "idx_shr2gb_slice",
"idx_gb2fifo_slice": "idx_gb2fifo_slice",
"idx_fifo_size": "idx_fifo_size",
"idx_fifo_ram_config": "idx_fifo_ram_config",
"idx_fifo_xclk_stages": "idx_fifo_xclk_stages",
"idx_fifo2post_slice": "idx_fifo2post_slice",
"cmd_in_slice": "cmd_in_slice",
"bus_req_slice": "bus_req_slice",
"bus_fifo_depth": "bus_fifo_depth",
"bus_fifo_ram_config": "bus_fifo_ram_config",
"unlock_slice": "unlock_slice",
"shr2gb_slice": "shr2gb_slice",
"gb2fifo_slice": "gb2fifo_slice",
"fifo_size": "fifo_size",
"fifo_ram_config": "fifo_ram_config",
"fifo_xclk_stages": "fifo_xclk_stages",
"fifo2post_slice": "fifo2post_slice",
"out_slice": "out_slice",
"len_out_slice": "len_out_slice",
"data_in_slice": "data_in_slice",
"len_sync_slice": "len_sync_slice",
"data_out_slice": "data_out_slice"
})
)
return reader, [out_el_stream]
def _list_reader(field, prefix, field_prefix, cmd_stream, cmd_ctrl, out_stream, out_data, **opts):
"""Internal function which converts a list field into a ReaderLevel."""
# Add the signals to the existing streams.
out_length = out_data.append(Signal(prefix + "out_" + field_prefix + "len", INDEX_WIDTH))
# Create a secondary output stream for the list elements.
out_el_stream, out_el_data = _new_out_stream(prefix, field_prefix + field.child.name + "_")
# Populate the secondary output stream with the child reader.
reader, secondary_out_streams = _field_reader(
field.child,
prefix, field_prefix,
cmd_stream, cmd_ctrl,
out_el_stream, out_el_data,
**opts)
# Command stream signal must be appended after traversing into the
# hierarchy.
cmd_idx_base = cmd_ctrl.append(Signal(prefix + "cmd_" + field_prefix + "idxBase", BUS_ADDR_WIDTH))
# Construct the primitive reader.
reader = ListReaderLevel(
reader,
cmd_stream,
cmd_idx_base,
out_stream,
out_length,
out_el_stream,
**field.get_cfg_dict({
"cmd_in_slice": "cmd_in_slice",
"bus_req_slice": "bus_req_slice",
"bus_fifo_depth": "bus_fifo_depth",
"bus_fifo_ram_config": "bus_fifo_ram_config",
"cmd_out_slice": "cmd_out_slice",
"unlock_slice": "unlock_slice",
"shr2gb_slice": "shr2gb_slice",
"gb2fifo_slice": "gb2fifo_slice",
"fifo_size": "fifo_size",
"fifo_ram_config": "fifo_ram_config",
"fifo_xclk_stages": "fifo_xclk_stages",
"fifo2post_slice": "fifo2post_slice",
"len_out_slice": "len_out_slice",
"len_sync_slice": "len_sync_slice",
"data_in_slice": "data_in_slice",
"data_out_slice": "data_out_slice"
})
)
return reader, [out_el_stream] + secondary_out_streams
def _struct_reader(field, prefix, field_prefix, cmd_stream, cmd_ctrl, out_stream, out_data, **opts):
"""Internal function which converts a struct field into a ReaderLevel."""
# Construct the child Reader objects.
child_readers = []
secondary_out_streams = []
for child in field.iter_children():
child_reader, child_secondary_out_stream = _field_reader(
child,
prefix, field_prefix,
cmd_stream, cmd_ctrl,
out_stream, out_data,
**opts)
child_readers.append(child_reader)
secondary_out_streams.extend(child_secondary_out_stream)
# Create a binary tree of readers.
while True:
# Stop if there's only one reader left.
if len(child_readers) == 1:
reader = child_readers[0]
break
# Add a level of structs.
it = iter(child_readers)
child_readers = []
for a, b in zip_longest(*[it]*2, fillvalue=None):
if b is None:
# Odd amount of child readers at this level of the binary tree;
# add the last reader without an additional struct level.
child_readers.append(a)
else:
struct = StructReaderLevel(a, b)
struct = _maybe_wrap_in_arbiter(struct, **opts)
child_readers.append(struct)
return reader, secondary_out_streams
def _field_reader(field, prefix, field_prefix, cmd_stream, cmd_ctrl, out_stream, out_data, **opts):
"""Internal function which converts a field into a ReaderLevel. This is
appropriately called by the initializer of Reader()."""
if not isinstance(field, Field):
raise TypeError("field must be of type %s" % Field)
if field.is_null():
raise ValueError("cannot make a reader for a null field")
# Update the field prefix.
if field_prefix is None:
field_prefix = ""
else:
field_prefix += field.name + "_"
# Add the signals for the null reader if this field is nullable. This must
# be done before going down the hierarchy.
if field.nullable:
out_not_null = out_data.append(Signal(prefix + "out_" + field_prefix + "notNull"))
# Defer to the field-specific generators.
for typ, gen in [
(ScalarField, _scalar_reader),
(BytesField, _bytes_reader),
(ListField, _list_reader),
(StructField, _struct_reader)
]:
if isinstance(field, typ):
reader, secondary_out_streams = gen(
field,
prefix, field_prefix,
cmd_stream, cmd_ctrl,
out_stream, out_data,
**opts)
break
else:
raise NotImplemented("No code generator is implemented for Field type %s" % type(field))
# Command stream signals must be appended after traversing into the
# hierarchy.
if field.nullable:
cmd_no_nulls = cmd_ctrl.append(Signal(prefix + "cmd_" + field_prefix + "noNulls"))
cmd_null_base = cmd_ctrl.append(Signal(prefix + "cmd_" + field_prefix + "nullBase", BUS_ADDR_WIDTH))
# Generate the null() level if this field is nullable.
if field.nullable:
reader = NullReaderLevel(
reader,
cmd_stream,
cmd_no_nulls,
cmd_null_base,
out_stream,
out_not_null,
**field.get_cfg_dict({
"null_cmd_in_slice": "cmd_in_slice",
"null_bus_req_slice": "bus_req_slice",
"null_bus_fifo_depth": "bus_fifo_depth",
"null_bus_fifo_ram_config": "bus_fifo_ram_config",
"null_unlock_slice": "unlock_slice",
"null_shr2gb_slice": "shr2gb_slice",
"null_gb2fifo_slice": "gb2fifo_slice",
"null_fifo_size": "fifo_size",
"null_fifo_ram_config": "fifo_ram_config",
"null_fifo_xclk_stages": "fifo_xclk_stages",
"null_fifo2post_slice": "fifo2post_slice",
"null_out_slice": "out_slice"
})
)
# Wrap the field in an arbiter based on the arbiter policy.
reader = _maybe_wrap_in_arbiter(reader, **opts)
return reader, secondary_out_streams
wrapper_body_template = """
-- Copyright (C) Delft University of Technology - All Rights Reserved
-- (until further notice)
library ieee;
use ieee.std_logic_1164.all;
use ieee.numeric_std.all;
library work;
use work.Streams.all;
use work.Utils.all;
use work.ColumnConfig.all;
use work.ColumnConfigParse.all;
use work.Columns.all;
entity {camelprefix}ColumnReader is
generic (
---------------------------------------------------------------------------
-- Bus metrics and configuration
---------------------------------------------------------------------------
-- Bus address width.
BUS_ADDR_WIDTH : natural := 32;
-- Bus burst length width.
BUS_LEN_WIDTH : natural := 8;
-- Bus data width.
BUS_DATA_WIDTH : natural := 32;
-- Number of beats in a burst step.
BUS_BURST_STEP_LEN : natural := 4;
-- Maximum number of beats in a burst.
BUS_BURST_MAX_LEN : natural := 16;
---------------------------------------------------------------------------
-- Arrow metrics and configuration
---------------------------------------------------------------------------
-- Index field width.
INDEX_WIDTH : natural := 32;
---------------------------------------------------------------------------
-- Column metrics and configuration
---------------------------------------------------------------------------
-- Enables or disables command stream tag system. When enabled, an
-- additional output stream is created that returns tags supplied along
-- with the command stream when all BufferReaders finish making bus
-- requests for the command. This can be used to support chunking later.
CMD_TAG_ENABLE : boolean := false;
-- Command stream tag width. Must be at least 1 to avoid null vectors.
CMD_TAG_WIDTH : natural := 1
);
port (
---------------------------------------------------------------------------
-- Clock domains
---------------------------------------------------------------------------
-- Rising-edge sensitive clock and active-high synchronous reset for the
-- bus and control logic side of the BufferReader.
bus_clk : in std_logic;
bus_reset : in std_logic;
-- Rising-edge sensitive clock and active-high synchronous reset for the
-- accelerator side.
acc_clk : in std_logic;
acc_reset : in std_logic;
---------------------------------------------------------------------------
-- Command streams
---------------------------------------------------------------------------
-- Command stream input (bus clock domain). firstIdx and lastIdx represent
-- a range of elements to be fetched from memory. firstIdx is inclusive,
-- lastIdx is exclusive for normal buffers and inclusive for index buffers,
-- in all cases resulting in lastIdx - firstIdx elements. The ctrl vector
-- is a concatenation of the base address for each buffer and the null
-- bitmap present flags, dependent on CFG.
@cmd_ports
-- Unlock stream (bus clock domain). Produces the chunk tags supplied by
-- the command stream when all BufferReaders finish processing the command.
unlock_valid : out std_logic;
unlock_ready : in std_logic := '1';
unlock_tag : out std_logic_vector(CMD_TAG_WIDTH-1 downto 0);
---------------------------------------------------------------------------
-- Bus access ports
---------------------------------------------------------------------------
-- Bus access port (bus clock domain).
bus_rreq_valid : out std_logic;
bus_rreq_ready : in std_logic;
bus_rreq_addr : out std_logic_vector(BUS_ADDR_WIDTH-1 downto 0);
bus_rreq_len : out std_logic_vector(BUS_LEN_WIDTH-1 downto 0);
bus_rdat_valid : in std_logic;
bus_rdat_ready : out std_logic;
bus_rdat_data : in std_logic_vector(BUS_DATA_WIDTH-1 downto 0);
bus_rdat_last : in std_logic;
---------------------------------------------------------------------------
-- User streams
---------------------------------------------------------------------------
@out_ports
);
end {camelprefix}ColumnReader;
architecture Behavioral of {camelprefix}ColumnReader is
@defs
begin
@arch
-- Wrap an arbiter and register slices around the requested column reader.
{lowerprefix}inst: ColumnReaderLevel
generic map (
BUS_ADDR_WIDTH => BUS_ADDR_WIDTH,
BUS_LEN_WIDTH => BUS_LEN_WIDTH,
BUS_DATA_WIDTH => BUS_DATA_WIDTH,
BUS_BURST_STEP_LEN => BUS_BURST_STEP_LEN,
BUS_BURST_MAX_LEN => BUS_BURST_MAX_LEN,
INDEX_WIDTH => INDEX_WIDTH,
CFG => "{cfg}",
CMD_TAG_ENABLE => CMD_TAG_ENABLE,
CMD_TAG_WIDTH => CMD_TAG_WIDTH
)
port map (
bus_clk => bus_clk,
bus_reset => bus_reset,
acc_clk => acc_clk,
acc_reset => acc_reset,
cmd_valid => cmd_valid,
cmd_ready => cmd_ready,
cmd_firstIdx => cmd_firstIdx,
cmd_lastIdx => cmd_lastIdx,
cmd_ctrl => cmd_ctrl,
cmd_tag => cmd_tag,
unlock_valid => unlock_valid,
unlock_ready => unlock_ready,
unlock_tag => unlock_tag,
bus_rreq_valid(0) => bus_rreq_valid,
bus_rreq_ready(0) => bus_rreq_ready,
bus_rreq_addr => bus_rreq_addr,
bus_rreq_len => bus_rreq_len,
bus_rdat_valid(0) => bus_rdat_valid,
bus_rdat_ready(0) => bus_rdat_ready,
bus_rdat_data => bus_rdat_data,
bus_rdat_last(0) => bus_rdat_last,
out_valid => out_valids,
out_ready => out_readys,
out_last => out_lasts,
out_dvalid => out_dvalids,
out_data => out_datas
);
end Behavioral;
"""
wrapper_component_template = """
component {camelprefix}ColumnReader is
generic (
BUS_ADDR_WIDTH : natural := 32;
BUS_LEN_WIDTH : natural := 8;
BUS_DATA_WIDTH : natural := 32;
BUS_BURST_STEP_LEN : natural := 4;
BUS_BURST_MAX_LEN : natural := 16;
INDEX_WIDTH : natural := 32;
CMD_TAG_ENABLE : boolean := false;
CMD_TAG_WIDTH : natural := 1
);
port (
bus_clk : in std_logic;
bus_reset : in std_logic;
acc_clk : in std_logic;
acc_reset : in std_logic;
@cmd_ports
unlock_valid : out std_logic;
unlock_ready : in std_logic := '1';
unlock_tag : out std_logic_vector(CMD_TAG_WIDTH-1 downto 0);
bus_rreq_valid : out std_logic;
bus_rreq_ready : in std_logic;
bus_rreq_addr : out std_logic_vector(BUS_ADDR_WIDTH-1 downto 0);
bus_rreq_len : out std_logic_vector(BUS_LEN_WIDTH-1 downto 0);
bus_rdat_valid : in std_logic;
bus_rdat_ready : out std_logic;
bus_rdat_data : in std_logic_vector(BUS_DATA_WIDTH-1 downto 0);
bus_rdat_last : in std_logic;
@out_ports
);
end component;
"""
uut_template_with_unlock = """
uut: ColumnReaderLevel
generic map (
BUS_ADDR_WIDTH => BUS_ADDR_WIDTH,
BUS_LEN_WIDTH => BUS_LEN_WIDTH,
BUS_DATA_WIDTH => BUS_DATA_WIDTH,
BUS_BURST_STEP_LEN => BUS_BURST_STEP_LEN,
BUS_BURST_MAX_LEN => BUS_BURST_MAX_LEN,
INDEX_WIDTH => INDEX_WIDTH,
CFG => "{cfg}",
CMD_TAG_ENABLE => CMD_TAG_ENABLE,
CMD_TAG_WIDTH => CMD_TAG_WIDTH
)
port map (
bus_clk => bus_clk,
bus_reset => bus_reset,
acc_clk => {acc}_clk,
acc_reset => {acc}_reset,
cmd_valid => cmd_valid,
cmd_ready => cmd_ready,
cmd_firstIdx => cmd_firstIdx,
cmd_lastIdx => cmd_lastIdx,
cmd_ctrl => cmd_ctrl,
cmd_tag => cmd_tag,
unlock_valid => unlock_valid,
unlock_ready => unlock_ready,
unlock_tag => unlock_tag,
bus_rreq_valid(0) => bus_rreq_valid,
bus_rreq_ready(0) => bus_rreq_ready,
bus_rreq_addr => bus_rreq_addr,
bus_rreq_len => bus_rreq_len,
bus_rdat_valid(0) => bus_rdat_valid,
bus_rdat_ready(0) => bus_rdat_ready,
bus_rdat_data => bus_rdat_data,
bus_rdat_last(0) => bus_rdat_last,
out_valid => out_valids,
out_ready => out_readys,
out_last => out_lasts,
out_dvalid => out_dvalids,
out_data => out_datas
);
"""
uut_template_without_unlock = """
uut: ColumnReaderLevel
generic map (
BUS_ADDR_WIDTH => BUS_ADDR_WIDTH,
BUS_LEN_WIDTH => BUS_LEN_WIDTH,
BUS_DATA_WIDTH => BUS_DATA_WIDTH,
BUS_BURST_STEP_LEN => BUS_BURST_STEP_LEN,
BUS_BURST_MAX_LEN => BUS_BURST_MAX_LEN,
INDEX_WIDTH => INDEX_WIDTH,
CFG => "{cfg}",
CMD_TAG_ENABLE => CMD_TAG_ENABLE,
CMD_TAG_WIDTH => CMD_TAG_WIDTH
)
port map (
bus_clk => bus_clk,
bus_reset => bus_reset,
acc_clk => {acc}_clk,
acc_reset => {acc}_reset,
cmd_valid => cmd_valid,
cmd_ready => cmd_ready,
cmd_firstIdx => cmd_firstIdx,
cmd_lastIdx => cmd_lastIdx,
cmd_ctrl => cmd_ctrl,
bus_rreq_valid(0) => bus_rreq_valid,
bus_rreq_ready(0) => bus_rreq_ready,
bus_rreq_addr => bus_rreq_addr,
bus_rreq_len => bus_rreq_len,
bus_rdat_valid(0) => bus_rdat_valid,
bus_rdat_ready(0) => bus_rdat_ready,
bus_rdat_data => bus_rdat_data,
bus_rdat_last(0) => bus_rdat_last,
out_valid => out_valids,
out_ready => out_readys,
out_last => out_lasts,
out_dvalid => out_dvalids,
out_data => out_datas
);
"""
class ColumnReader(object):
"""Represents a ColumnReader."""
def __init__(self, field, instance_prefix=None, signal_prefix="", bus_clk_prefix="", main_clk_prefix="", **opts):
"""Generates a ColumnReader for the given Arrow field. prefix
optionally specifies a name for the ColumnReader, which will be
prefixed to all signals and instance names in the generated code."""
super().__init__()
# Basic error checking.
if not isinstance(field, Field):
raise TypeError("field must be of type %s" % Field)
self.field = field
# Figure out the prefixes.
if instance_prefix is None:
instance_prefix = field.name
if instance_prefix and not instance_prefix[-1] == "_":
instance_prefix += "_"
self.instance_prefix = instance_prefix
if signal_prefix is None:
signal_prefix = field.name
if signal_prefix and not signal_prefix[-1] == "_":
signal_prefix += "_"
self.signal_prefix = signal_prefix
if bus_clk_prefix and not bus_clk_prefix[-1] == "_":
bus_clk_prefix += "_"
self.bus_clk_prefix = bus_clk_prefix
if main_clk_prefix and not main_clk_prefix[-1] == "_":
main_clk_prefix += "_"
self.main_clk_prefix = main_clk_prefix
# Construct the streams.
self.cmd_stream, cmd_ctrl = _new_cmd_stream(self.signal_prefix)
p = self.signal_prefix + "unlock_"
self.unlock_stream = Stream(p)
self.unlock_stream.append(Signal(p + "tag", CMD_TAG_WIDTH))
p = self.signal_prefix + "bus_rreq_"
self.bus_rreq_stream = Stream(p)
self.bus_rreq_stream.append(Signal(p + "addr", BUS_ADDR_WIDTH))
self.bus_rreq_stream.append(Signal(p + "len", BUS_LEN_WIDTH))
p = self.signal_prefix + "bus_rdat_"
self.bus_rdat_stream = Stream(p)
self.bus_rdat_stream.append(Signal(p + "data", BUS_DATA_WIDTH))
self.bus_rdat_stream.append(Signal(p + "last"))
main_out_stream, out_data = _new_out_stream(self.signal_prefix)
# Construct the field reader.
reader, secondary_out_streams = _field_reader(
self.field,
self.signal_prefix, None,
self.cmd_stream, cmd_ctrl,
main_out_stream, out_data,
**opts)
# If the reader has more than one bus, wrap in an arbiter.
if reader.bus_count() > 1:
reader = ArbReaderLevel(reader)
self.reader = reader
# Construct the output stream group.
self.out_stream = StreamGroup(main_out_stream, *secondary_out_streams)
@property
def _camel_prefix(self):
"""Returns the instance prefix in CamelCase."""
return "".join([w[:1].upper() + w[1:] for w in self.instance_prefix.split("_")])
@property
def _lower_prefix(self):
"""Returns the instance prefix in lower_case."""
return self.instance_prefix.lower()
def cfg(self):
"""Returns the cfg string representation of this ColumnReader."""
return str(self.reader)
def wrapper_body(self):
"""Returns the VHDL entity and body for this ColumnReader's wrapper."""
return gen_template(
wrapper_body_template,
camelprefix = self._camel_prefix,
lowerprefix = self._lower_prefix,
cfg = self.cfg(),
cmd_ports = self.cmd_stream.def_ports(PortDir.IN, False),
out_ports = self.out_stream.def_ports(PortDir.OUT, False).trimsep(),
defs = self.cmd_stream.def_signals(False) + self.out_stream.def_signals(False),
arch = self.cmd_stream.arch_serialize() + self.out_stream.arch_deserialize()
)
def wrapper_component(self):
"""Returns the VHDL entity and body for this ColumnReader's wrapper."""
return gen_template(
wrapper_component_template,
camelprefix = self.instance_prefix[:-1],
cmd_ports = self.cmd_stream.def_ports(PortDir.IN, False),
out_ports = self.out_stream.def_ports(PortDir.OUT, False).trimsep()
)
def testbench(self, **kwargs):
"""Generates a randomized testbench for this ColumnReader."""
# Randomize any parameters not explicitly given.
params = []
def get_param(name, default):
value = kwargs.get(name, default)
params.append((name, value))
return value
seed = get_param("seed", random.randrange(1<<32))
random.seed(seed)
row_count = get_param("row_count", 100)
cmd_count = get_param("cmd_count", 100)
addr_width = get_param("addr_width", random.randint(32, 64))
data_width = get_param("data_width", 1 << random.randint(5, 9))
burst_step_len = get_param("burst_step_len", max(self.field.widest() // data_width, 1 << random.randint(0, 5)))
burst_max_len = get_param("burst_max_len", burst_step_len * (1 << random.randint(0, 4)))
len_width = get_param("len_width", random.randint(1, 4) * int.bit_length(burst_max_len))
tag_width = get_param("tag_width", random.choice([0, 1, 4]))
multi_clk = get_param("multi_clk", True)
random_bus_rreq_timing = get_param("random_bus_rreq_timing", random.choice([True, False]))
random_bus_rdat_timing = get_param("random_bus_rdat_timing", random.choice([True, False]))
# Generate the testbench wrapper object.
acc = "acc" if multi_clk else "bus"
tb = Testbench(self._camel_prefix + "ColumnReader_tb", {"bus", acc})
# Set constants.
tb.set_const("BUS_ADDR_WIDTH", addr_width)
tb.set_const("BUS_LEN_WIDTH", len_width)
tb.set_const("BUS_DATA_WIDTH", data_width)
tb.set_const("BUS_BURST_STEP_LEN", burst_step_len)
tb.set_const("BUS_BURST_MAX_LEN", burst_max_len)
tb.set_const("INDEX_WIDTH", 32)
tb.set_const("CMD_TAG_ENABLE", tag_width > 0)
tb.set_const("CMD_TAG_WIDTH", max(1, tag_width))
# Add the streams.
tb.append_input_stream(self.cmd_stream, "bus")
if tag_width > 0:
tb.append_output_stream(self.unlock_stream, "bus")
tb.append_output_stream(self.bus_rreq_stream, "bus")
tb.append_input_stream(self.bus_rdat_stream, "bus")
tb.append_output_stream(self.out_stream, acc)
# Generate a random set of commands.
commands = []
for _ in range(cmd_count):
a = random.randrange(row_count)
b = random.randrange(row_count)
commands.append((min(a, b), max(a, b) + 1))
# Generate toplevel command stream signal test vectors.
cmd_first_tv = tb.append_test_vector(TestVectors(self.cmd_stream.signals[0]))
cmd_last_tv = tb.append_test_vector(TestVectors(self.cmd_stream.signals[1]))
for start, stop in commands:
cmd_first_tv.append(start)
cmd_last_tv.append(stop)
# Generate tag stream signal test vectors.
if tag_width > 0:
tags = [random.randrange(1 << tag_width) for _ in commands]
tb.append_test_vector(TestVectors(self.cmd_stream.signals[-1])).extend(tags)
tb.append_test_vector(TestVectors(self.unlock_stream.signals[0])).extend(tags)
# Generate output stream master last/dvalid test vectors.
out_last_tv = tb.append_test_vector(TestVectors(self.out_stream.streams[0].signals[0]))
out_dvalid_tv = tb.append_test_vector(TestVectors(self.out_stream.streams[0].signals[1]))
for start, stop in commands:
for i in range(start, stop):
out_last_tv.append(int(i == stop - 1))
out_dvalid_tv.append(1)
# Generate a memory model.
memory = Memory()
tb.append_memory(memory, self.bus_rreq_stream, self.bus_rdat_stream, "bus",
random_bus_rreq_timing, random_bus_rdat_timing)
# Generate the test vectors for the readers.
tvs = self.reader.test_vectors(memory, row_count, commands)
for tv in tvs:
tb.append_test_vector(tv)
# Append unit under test.
template = uut_template_with_unlock if tag_width > 0 else uut_template_without_unlock
tb.append_uut(template.format(cfg=self.cfg(), acc=acc))
# Add documentation.
doc = []
doc.append("Memory dump:")
doc.extend([" " + x for x in memory.hexdump().split("\n")])
doc.append("")
doc.append("Command stream:")
transfer = 1
for i, (start, end) in enumerate(commands):
doc.append(" Command %3d: %4d to %4d = out transfer %5d to %5d" % (
i + 1, start, end - 1, transfer, transfer + (end - start - 1)))
transfer += end - start
doc.append("")
doc.append("Generator parameters:")
doc.extend([" %s: %s" % x for x in params])
doc.append("")
doc.append("Schema:")
doc.extend([" " + x for x in self.field.pprint().split("\n")])
tb.append_uut("\n".join([" -- " + x for x in doc]))
return str(tb)
| 38.785462 | 119 | 0.575138 |
from itertools import zip_longest
import random
from .configurable import *
from .fields import *
from .streams import *
from .lines import *
from .testbench import *
__all__ = ["ColumnReader", "BUS_ADDR_WIDTH", "INDEX_WIDTH", "CMD_TAG_WIDTH"]
BUS_ADDR_WIDTH = Generic("BUS_ADDR_WIDTH")
BUS_LEN_WIDTH = Generic("BUS_LEN_WIDTH")
BUS_DATA_WIDTH = Generic("BUS_DATA_WIDTH")
INDEX_WIDTH = Generic("INDEX_WIDTH")
CMD_TAG_WIDTH = Generic("CMD_TAG_WIDTH")
class ReaderLevel(Configurable):
def __init__(self, **config):
super().__init__(**config)
@property
def _cmdname(self):
return "?"
@property
def _children(self):
return []
def bus_count(self):
raise NotImplemented()
@classmethod
def _write_buffer(cls, memory, bits, data):
memory.align(max(8*64, bits))
addr = memory.byte_addr()
for entry in data:
memory.write(entry, bits)
return addr
def test_vectors(self, memory, row_count, commands):
raise NotImplemented()
def __str__(self):
children = ",".join(map(str, self._children))
attrs = ",".join(map(lambda x: "%s=%s" % x, self._config.items()))
attrs = []
for key, value in self._config.items():
if isinstance(value, int) or isinstance(value, bool):
value = str(int(value))
attrs.append("%s=%s" % (key, value))
attrs = ",".join(attrs)
if attrs:
attrs = ";" + attrs
return "%s(%s%s)" % (self._cmdname, children, attrs)
class PrimReaderLevel(ReaderLevel):
def __init__(
self,
bit_width,
cmd_stream,
cmd_val_base,
out_stream,
out_val,
**kwargs
):
super().__init__(**kwargs)
if not bit_width or bit_width & (bit_width-1):
raise ValueError("bit width must be a power of two")
self.bit_width = bit_width
self.cmd_stream = cmd_stream
self.cmd_val_base = cmd_val_base
self.out_stream = out_stream
self.out_val = out_val
@property
def _cmdname(self):
return "prim"
@property
def _children(self):
return [self.bit_width]
@property
def _config_defaults(self):
return {
"cmd_in_slice": False,
"bus_req_slice": True,
"bus_fifo_depth": 16,
"bus_fifo_ram_config": "",
"unlock_slice": True,
"shr2gb_slice": False,
"gb2fifo_slice": False,
"fifo_size": 64,
"fifo_ram_config": "",
"fifo_xclk_stages": 0,
"fifo2post_slice": False,
"out_slice": True
}
def bus_count(self):
return 2
def test_vectors(self, memory, row_count, commands):
buffers = []
for _ in range(4):
data = [random.randrange(1 << self.bit_width) for _ in range(row_count)]
addr = self._write_buffer(memory, self.bit_width, data)
buffers.append((addr, data))
base_tv = TestVectors(self.cmd_val_base)
val_tv = TestVectors(self.out_val, self.out_stream.name + "dvalid = '1'")
for start, stop in commands:
buf_idx = random.randrange(4)
addr, data = buffers[buf_idx]
base_tv.append(addr)
val_tv.extend(data[start:stop])
return [base_tv, val_tv]
class ArbReaderLevel(ReaderLevel):
def __init__(self, child, **kwargs):
super().__init__(**kwargs)
self.child = child
@property
def _cmdname(self):
return "arb"
@property
def _children(self):
return [self.child]
@property
def _config_defaults(self):
return {
"method": "ROUND-ROBIN",
"max_outstanding": 2,
"ram_config": "",
"req_in_slices": False,
"req_out_slice": True,
"resp_in_slice": False,
"resp_out_slices": True,
"cmd_stream_slice": True,
"unlock_stream_slice": True,
"out_stream_slice": True
}
def bus_count(self):
return 1
def test_vectors(self, memory, row_count, commands):
return self.child.test_vectors(memory, row_count, commands)
class NullReaderLevel(ReaderLevel):
def __init__(
self,
child,
cmd_stream,
cmd_no_nulls,
cmd_null_base,
out_stream,
out_not_null,
**kwargs
):
super().__init__(**kwargs)
self.child = child
self.cmd_stream = cmd_stream
self.cmd_no_nulls = cmd_no_nulls
self.cmd_null_base = cmd_null_base
self.out_stream = out_stream
self.out_not_null = out_not_null
@property
def _cmdname(self):
return "null"
@property
def _children(self):
return [self.child]
@property
def _config_defaults(self):
return {
"cmd_in_slice": False,
"bus_req_slice": True,
"bus_fifo_depth": 16,
"bus_fifo_ram_config": "",
"unlock_slice": True,
"shr2gb_slice": False,
"gb2fifo_slice": False,
"fifo_size": 64,
"fifo_ram_config": "",
"fifo_xclk_stages": 0,
"fifo2post_slice": False,
"out_slice": True
}
def bus_count(self):
return self.child.bus_count() + 1
def test_vectors(self, memory, row_count, commands):
buffers = []
for _ in range(3):
data = [min(1, random.randrange(10)) for _ in range(row_count)]
addr = self._write_buffer(memory, 1, data)
buffers.append((addr, data))
impl_tv = TestVectors(self.cmd_no_nulls)
base_tv = TestVectors(self.cmd_null_base)
val_tv = TestVectors(self.out_not_null, self.out_stream.name + "dvalid = '1'")
for start, stop in commands:
buf_idx = random.randrange(4)
if buf_idx < 3:
addr, data = buffers[buf_idx]
impl_tv.append(0)
base_tv.append(addr)
val_tv.extend(data[start:stop])
else:
impl_tv.append(1)
base_tv.append(None)
val_tv.extend([1 for _ in range(start, stop)])
return [impl_tv, base_tv, val_tv] + self.child.test_vectors(memory, row_count, commands)
def _list_test_vectors(reader, memory, row_count, commands):
child_length = row_count * 4
child_commands = []
child_idxs = []
buffers = []
for _ in range(4):
data = [random.randint(0, child_length) for _ in range(row_count-1)]
data = [0] + sorted(data) + [child_length]
addr = reader._write_buffer(memory, 32, data)
buffers.append((addr, data))
base_tv = TestVectors(reader.cmd_idx_base)
len_tv = TestVectors(reader.out_length, reader.out_stream.name + "dvalid = '1'")
for start, stop in commands:
buf_idx = random.randrange(4)
addr, data = buffers[buf_idx]
child_commands.append((data[start], data[stop]))
child_idxs.append(list(zip(data[start:stop], data[start+1:stop+1])))
base_tv.append(addr)
len_tv.extend([data[i+1] - data[i] for i in range(start, stop)])
return child_length, child_commands, child_idxs, [base_tv, len_tv]
class ListReaderLevel(ReaderLevel):
def __init__(
self,
child,
cmd_stream,
cmd_idx_base,
out_stream,
out_length,
out_el_stream,
**kwargs
):
super().__init__(**kwargs)
self.child = child
self.cmd_stream = cmd_stream
self.cmd_idx_base = cmd_idx_base
self.out_stream = out_stream
self.out_length = out_length
self.out_el_stream = out_el_stream
@property
def _cmdname(self):
return "list"
@property
def _children(self):
return [self.child]
@property
def _config_defaults(self):
return {
"cmd_in_slice": False,
"bus_req_slice": True,
"bus_fifo_depth": 16,
"bus_fifo_ram_config": "",
"cmd_out_slice": True,
"unlock_slice": True,
"shr2gb_slice": False,
"gb2fifo_slice": False,
"fifo_size": 64,
"fifo_ram_config": "",
"fifo_xclk_stages": 0,
"fifo2post_slice": False,
"len_out_slice": True,
"len_sync_slice": True,
"data_in_slice": False,
"data_out_slice": True
}
def bus_count(self):
return self.child.bus_count() + 1
def test_vectors(self, memory, row_count, commands):
child_length, child_commands, child_idxs, tvs = _list_test_vectors(
self, memory, row_count, commands)
tvs.extend(self.child.test_vectors(memory, child_length, child_commands))
last_tv = TestVectors(self.out_el_stream.signals[0])
dvalid_tv = TestVectors(self.out_el_stream.signals[1])
for idxs in child_idxs:
for start, stop in idxs:
l = stop - start
if not l:
last_tv.append(1)
dvalid_tv.append(0)
else:
for i in range(l):
last_tv.append(int(i == l - 1))
dvalid_tv.append(1)
return tvs + [last_tv, dvalid_tv]
class ListPrimReaderLevel(ReaderLevel):
def __init__(
self,
bit_width,
cmd_stream,
cmd_idx_base,
cmd_val_base,
out_stream,
out_length,
out_el_stream,
out_el_values,
out_el_count,
**kwargs
):
super().__init__(**kwargs)
if not bit_width or bit_width & (bit_width-1):
raise ValueError("bit width must be a power of two")
self.bit_width = bit_width
self.cmd_stream = cmd_stream
self.cmd_idx_base = cmd_idx_base
self.cmd_val_base = cmd_val_base
self.out_stream = out_stream
self.out_length = out_length
self.out_el_stream = out_el_stream
self.out_el_values = out_el_values
self.out_el_count = out_el_count
@property
def _cmdname(self):
return "listprim"
@property
def _children(self):
return [self.bit_width]
@property
def _config_defaults(self):
return {
"epc": 1,
"idx_cmd_in_slice": False,
"idx_bus_req_slice": True,
"idx_bus_fifo_depth": 16,
"idx_bus_fifo_ram_config": "",
"idx_cmd_out_slice": True,
"idx_unlock_slice": True,
"idx_shr2gb_slice": False,
"idx_gb2fifo_slice": False,
"idx_fifo_size": 64,
"idx_fifo_ram_config": "",
"idx_fifo_xclk_stages": 0,
"idx_fifo2post_slice": False,
"cmd_in_slice": False,
"bus_req_slice": True,
"bus_fifo_depth": 16,
"bus_fifo_ram_config": "",
"unlock_slice": True,
"shr2gb_slice": False,
"gb2fifo_slice": False,
"fifo_size": 64,
"fifo_ram_config": "",
"fifo_xclk_stages": 0,
"fifo2post_slice": False,
"out_slice": False,
"len_out_slice": True,
"data_in_slice": False,
"len_sync_slice": True,
"data_out_slice": True
}
def bus_count(self):
return 2
def test_vectors(self, memory, row_count, commands):
child_length, child_commands, child_idxs, tvs = _list_test_vectors(
self, memory, row_count, commands)
buffers = []
for _ in range(4):
data = [random.randrange(1 << self.bit_width) for _ in range(child_length)]
addr = self._write_buffer(memory, self.bit_width, data)
buffers.append((addr, data))
base_tv = TestVectors(self.cmd_val_base)
val_tvs = [TestVectors(sig) for sig in self.out_el_values]
cnt_tv = TestVectors(self.out_el_count)
last_tv = TestVectors(self.out_el_stream.signals[0])
dvalid_tv = TestVectors(self.out_el_stream.signals[1])
for idxs in child_idxs:
buf_idx = random.randrange(4)
addr, cmd_data = buffers[buf_idx]
base_tv.append(addr)
for start, stop in idxs:
data = cmd_data[start:stop]
while True:
cnt = 0
for val_tv in val_tvs:
if data:
val_tv.append(data.pop(0))
cnt += 1
else:
val_tv.append()
cnt_tv.append(cnt)
dvalid_tv.append(1 if cnt > 0 else 0)
if not data:
last_tv.append(1)
break
else:
last_tv.append(0)
return tvs + val_tvs + [base_tv, cnt_tv, last_tv, dvalid_tv]
class StructReaderLevel(ReaderLevel):
def __init__(self, a, b, **kwargs):
super().__init__(**kwargs)
self.a = a
self.b = b
@property
def _cmdname(self):
return "struct"
@property
def _children(self):
return [self.a, self.b]
@property
def _config_defaults(self):
return {
}
def bus_count(self):
return self.a.bus_count() + self.b.bus_count()
def test_vectors(self, memory, row_count, commands):
return (
self.a.test_vectors(memory, row_count, commands)
+ self.b.test_vectors(memory, row_count, commands)
)
def _new_cmd_stream(prefix, field_prefix=""):
p = prefix + "cmd_" + field_prefix
s = Stream(p)
s.append(Signal(p + "firstIdx", INDEX_WIDTH))
s.append(Signal(p + "lastIdx", INDEX_WIDTH))
ctrl = s.append(SignalGroup(p + "ctrl"))
s.append(Signal(p + "tag", CMD_TAG_WIDTH))
return s, ctrl
def _new_out_stream(prefix, field_prefix=""):
p = prefix + "out_" + field_prefix
s = Stream(p)
s.append(Signal(p + "last"))
s.append(Signal(p + "dvalid"))
data = s.append(SignalGroup(p + "data"))
return s, data
def _maybe_wrap_in_arbiter(reader, **opts):
if reader.bus_count() > 3:
reader = ArbReaderLevel(reader)
return reader
def _scalar_reader(field, prefix, field_prefix, cmd_stream, cmd_ctrl, out_stream, out_data, **opts):
cmd_val_base = cmd_ctrl.append(Signal(prefix + "cmd_" + field_prefix + "valBase", BUS_ADDR_WIDTH))
out_val = out_data.append(Signal(prefix + "out_" + field_prefix + "val", field.bit_width))
reader = PrimReaderLevel(
field.bit_width,
cmd_stream,
cmd_val_base,
out_stream,
out_val,
**field.get_cfg_dict({
"cmd_in_slice": "cmd_in_slice",
"bus_req_slice": "bus_req_slice",
"bus_fifo_depth": "bus_fifo_depth",
"bus_fifo_ram_config": "bus_fifo_ram_config",
"unlock_slice": "unlock_slice",
"shr2gb_slice": "shr2gb_slice",
"gb2fifo_slice": "gb2fifo_slice",
"fifo_size": "fifo_size",
"fifo_ram_config": "fifo_ram_config",
"fifo_xclk_stages": "fifo_xclk_stages",
"fifo2post_slice": "fifo2post_slice",
"out_slice": "out_slice"
})
)
return reader, []
def _bytes_reader(field, prefix, field_prefix, cmd_stream, cmd_ctrl, out_stream, out_data, **opts):
cmd_val_base = cmd_ctrl.append(Signal(prefix + "cmd_" + field_prefix + "valBase", BUS_ADDR_WIDTH))
cmd_idx_base = cmd_ctrl.append(Signal(prefix + "cmd_" + field_prefix + "idxBase", BUS_ADDR_WIDTH))
out_length = out_data.append(Signal(prefix + "out_" + field_prefix + "len", INDEX_WIDTH))
out_el_stream, out_el_data = _new_out_stream(prefix, field_prefix + "el_")
epc = field.bytes_per_cycle
out_el_count = out_el_data.append(Signal(prefix + "out_" + field_prefix + "el_cnt", int.bit_length(epc)))
out_el_values = [
Signal(prefix + "out_" + field_prefix + "el_val" + str(i), field.bit_width)
for i in range(epc)
]
for sig in reversed(out_el_values):
out_el_data.append(sig)
reader = ListPrimReaderLevel(
field.bit_width,
cmd_stream,
cmd_idx_base,
cmd_val_base,
out_stream,
out_length,
out_el_stream,
out_el_values,
out_el_count,
**field.get_cfg_dict({
"bytes_per_cycle": "epc",
"idx_cmd_in_slice": "idx_cmd_in_slice",
"idx_bus_req_slice": "idx_bus_req_slice",
"idx_bus_fifo_depth": "idx_bus_fifo_depth",
"idx_bus_fifo_ram_config": "idx_bus_fifo_ram_config",
"idx_cmd_out_slice": "idx_cmd_out_slice",
"idx_unlock_slice": "idx_unlock_slice",
"idx_shr2gb_slice": "idx_shr2gb_slice",
"idx_gb2fifo_slice": "idx_gb2fifo_slice",
"idx_fifo_size": "idx_fifo_size",
"idx_fifo_ram_config": "idx_fifo_ram_config",
"idx_fifo_xclk_stages": "idx_fifo_xclk_stages",
"idx_fifo2post_slice": "idx_fifo2post_slice",
"cmd_in_slice": "cmd_in_slice",
"bus_req_slice": "bus_req_slice",
"bus_fifo_depth": "bus_fifo_depth",
"bus_fifo_ram_config": "bus_fifo_ram_config",
"unlock_slice": "unlock_slice",
"shr2gb_slice": "shr2gb_slice",
"gb2fifo_slice": "gb2fifo_slice",
"fifo_size": "fifo_size",
"fifo_ram_config": "fifo_ram_config",
"fifo_xclk_stages": "fifo_xclk_stages",
"fifo2post_slice": "fifo2post_slice",
"out_slice": "out_slice",
"len_out_slice": "len_out_slice",
"data_in_slice": "data_in_slice",
"len_sync_slice": "len_sync_slice",
"data_out_slice": "data_out_slice"
})
)
return reader, [out_el_stream]
def _list_reader(field, prefix, field_prefix, cmd_stream, cmd_ctrl, out_stream, out_data, **opts):
out_length = out_data.append(Signal(prefix + "out_" + field_prefix + "len", INDEX_WIDTH))
out_el_stream, out_el_data = _new_out_stream(prefix, field_prefix + field.child.name + "_")
reader, secondary_out_streams = _field_reader(
field.child,
prefix, field_prefix,
cmd_stream, cmd_ctrl,
out_el_stream, out_el_data,
**opts)
cmd_idx_base = cmd_ctrl.append(Signal(prefix + "cmd_" + field_prefix + "idxBase", BUS_ADDR_WIDTH))
reader = ListReaderLevel(
reader,
cmd_stream,
cmd_idx_base,
out_stream,
out_length,
out_el_stream,
**field.get_cfg_dict({
"cmd_in_slice": "cmd_in_slice",
"bus_req_slice": "bus_req_slice",
"bus_fifo_depth": "bus_fifo_depth",
"bus_fifo_ram_config": "bus_fifo_ram_config",
"cmd_out_slice": "cmd_out_slice",
"unlock_slice": "unlock_slice",
"shr2gb_slice": "shr2gb_slice",
"gb2fifo_slice": "gb2fifo_slice",
"fifo_size": "fifo_size",
"fifo_ram_config": "fifo_ram_config",
"fifo_xclk_stages": "fifo_xclk_stages",
"fifo2post_slice": "fifo2post_slice",
"len_out_slice": "len_out_slice",
"len_sync_slice": "len_sync_slice",
"data_in_slice": "data_in_slice",
"data_out_slice": "data_out_slice"
})
)
return reader, [out_el_stream] + secondary_out_streams
def _struct_reader(field, prefix, field_prefix, cmd_stream, cmd_ctrl, out_stream, out_data, **opts):
child_readers = []
secondary_out_streams = []
for child in field.iter_children():
child_reader, child_secondary_out_stream = _field_reader(
child,
prefix, field_prefix,
cmd_stream, cmd_ctrl,
out_stream, out_data,
**opts)
child_readers.append(child_reader)
secondary_out_streams.extend(child_secondary_out_stream)
while True:
if len(child_readers) == 1:
reader = child_readers[0]
break
# Add a level of structs.
it = iter(child_readers)
child_readers = []
for a, b in zip_longest(*[it]*2, fillvalue=None):
if b is None:
# Odd amount of child readers at this level of the binary tree;
# add the last reader without an additional struct level.
child_readers.append(a)
else:
struct = StructReaderLevel(a, b)
struct = _maybe_wrap_in_arbiter(struct, **opts)
child_readers.append(struct)
return reader, secondary_out_streams
def _field_reader(field, prefix, field_prefix, cmd_stream, cmd_ctrl, out_stream, out_data, **opts):
if not isinstance(field, Field):
raise TypeError("field must be of type %s" % Field)
if field.is_null():
raise ValueError("cannot make a reader for a null field")
# Update the field prefix.
if field_prefix is None:
field_prefix = ""
else:
field_prefix += field.name + "_"
# Add the signals for the null reader if this field is nullable. This must
# be done before going down the hierarchy.
if field.nullable:
out_not_null = out_data.append(Signal(prefix + "out_" + field_prefix + "notNull"))
# Defer to the field-specific generators.
for typ, gen in [
(ScalarField, _scalar_reader),
(BytesField, _bytes_reader),
(ListField, _list_reader),
(StructField, _struct_reader)
]:
if isinstance(field, typ):
reader, secondary_out_streams = gen(
field,
prefix, field_prefix,
cmd_stream, cmd_ctrl,
out_stream, out_data,
**opts)
break
else:
raise NotImplemented("No code generator is implemented for Field type %s" % type(field))
# Command stream signals must be appended after traversing into the
# hierarchy.
if field.nullable:
cmd_no_nulls = cmd_ctrl.append(Signal(prefix + "cmd_" + field_prefix + "noNulls"))
cmd_null_base = cmd_ctrl.append(Signal(prefix + "cmd_" + field_prefix + "nullBase", BUS_ADDR_WIDTH))
# Generate the null() level if this field is nullable.
if field.nullable:
reader = NullReaderLevel(
reader,
cmd_stream,
cmd_no_nulls,
cmd_null_base,
out_stream,
out_not_null,
**field.get_cfg_dict({
"null_cmd_in_slice": "cmd_in_slice",
"null_bus_req_slice": "bus_req_slice",
"null_bus_fifo_depth": "bus_fifo_depth",
"null_bus_fifo_ram_config": "bus_fifo_ram_config",
"null_unlock_slice": "unlock_slice",
"null_shr2gb_slice": "shr2gb_slice",
"null_gb2fifo_slice": "gb2fifo_slice",
"null_fifo_size": "fifo_size",
"null_fifo_ram_config": "fifo_ram_config",
"null_fifo_xclk_stages": "fifo_xclk_stages",
"null_fifo2post_slice": "fifo2post_slice",
"null_out_slice": "out_slice"
})
)
# Wrap the field in an arbiter based on the arbiter policy.
reader = _maybe_wrap_in_arbiter(reader, **opts)
return reader, secondary_out_streams
wrapper_body_template = """
-- Copyright (C) Delft University of Technology - All Rights Reserved
-- (until further notice)
library ieee;
use ieee.std_logic_1164.all;
use ieee.numeric_std.all;
library work;
use work.Streams.all;
use work.Utils.all;
use work.ColumnConfig.all;
use work.ColumnConfigParse.all;
use work.Columns.all;
entity {camelprefix}ColumnReader is
generic (
---------------------------------------------------------------------------
-- Bus metrics and configuration
---------------------------------------------------------------------------
-- Bus address width.
BUS_ADDR_WIDTH : natural := 32;
-- Bus burst length width.
BUS_LEN_WIDTH : natural := 8;
-- Bus data width.
BUS_DATA_WIDTH : natural := 32;
-- Number of beats in a burst step.
BUS_BURST_STEP_LEN : natural := 4;
-- Maximum number of beats in a burst.
BUS_BURST_MAX_LEN : natural := 16;
---------------------------------------------------------------------------
-- Arrow metrics and configuration
---------------------------------------------------------------------------
-- Index field width.
INDEX_WIDTH : natural := 32;
---------------------------------------------------------------------------
-- Column metrics and configuration
---------------------------------------------------------------------------
-- Enables or disables command stream tag system. When enabled, an
-- additional output stream is created that returns tags supplied along
-- with the command stream when all BufferReaders finish making bus
-- requests for the command. This can be used to support chunking later.
CMD_TAG_ENABLE : boolean := false;
-- Command stream tag width. Must be at least 1 to avoid null vectors.
CMD_TAG_WIDTH : natural := 1
);
port (
---------------------------------------------------------------------------
-- Clock domains
---------------------------------------------------------------------------
-- Rising-edge sensitive clock and active-high synchronous reset for the
-- bus and control logic side of the BufferReader.
bus_clk : in std_logic;
bus_reset : in std_logic;
-- Rising-edge sensitive clock and active-high synchronous reset for the
-- accelerator side.
acc_clk : in std_logic;
acc_reset : in std_logic;
---------------------------------------------------------------------------
-- Command streams
---------------------------------------------------------------------------
-- Command stream input (bus clock domain). firstIdx and lastIdx represent
-- a range of elements to be fetched from memory. firstIdx is inclusive,
-- lastIdx is exclusive for normal buffers and inclusive for index buffers,
-- in all cases resulting in lastIdx - firstIdx elements. The ctrl vector
-- is a concatenation of the base address for each buffer and the null
-- bitmap present flags, dependent on CFG.
@cmd_ports
-- Unlock stream (bus clock domain). Produces the chunk tags supplied by
-- the command stream when all BufferReaders finish processing the command.
unlock_valid : out std_logic;
unlock_ready : in std_logic := '1';
unlock_tag : out std_logic_vector(CMD_TAG_WIDTH-1 downto 0);
---------------------------------------------------------------------------
-- Bus access ports
---------------------------------------------------------------------------
-- Bus access port (bus clock domain).
bus_rreq_valid : out std_logic;
bus_rreq_ready : in std_logic;
bus_rreq_addr : out std_logic_vector(BUS_ADDR_WIDTH-1 downto 0);
bus_rreq_len : out std_logic_vector(BUS_LEN_WIDTH-1 downto 0);
bus_rdat_valid : in std_logic;
bus_rdat_ready : out std_logic;
bus_rdat_data : in std_logic_vector(BUS_DATA_WIDTH-1 downto 0);
bus_rdat_last : in std_logic;
---------------------------------------------------------------------------
-- User streams
---------------------------------------------------------------------------
@out_ports
);
end {camelprefix}ColumnReader;
architecture Behavioral of {camelprefix}ColumnReader is
@defs
begin
@arch
-- Wrap an arbiter and register slices around the requested column reader.
{lowerprefix}inst: ColumnReaderLevel
generic map (
BUS_ADDR_WIDTH => BUS_ADDR_WIDTH,
BUS_LEN_WIDTH => BUS_LEN_WIDTH,
BUS_DATA_WIDTH => BUS_DATA_WIDTH,
BUS_BURST_STEP_LEN => BUS_BURST_STEP_LEN,
BUS_BURST_MAX_LEN => BUS_BURST_MAX_LEN,
INDEX_WIDTH => INDEX_WIDTH,
CFG => "{cfg}",
CMD_TAG_ENABLE => CMD_TAG_ENABLE,
CMD_TAG_WIDTH => CMD_TAG_WIDTH
)
port map (
bus_clk => bus_clk,
bus_reset => bus_reset,
acc_clk => acc_clk,
acc_reset => acc_reset,
cmd_valid => cmd_valid,
cmd_ready => cmd_ready,
cmd_firstIdx => cmd_firstIdx,
cmd_lastIdx => cmd_lastIdx,
cmd_ctrl => cmd_ctrl,
cmd_tag => cmd_tag,
unlock_valid => unlock_valid,
unlock_ready => unlock_ready,
unlock_tag => unlock_tag,
bus_rreq_valid(0) => bus_rreq_valid,
bus_rreq_ready(0) => bus_rreq_ready,
bus_rreq_addr => bus_rreq_addr,
bus_rreq_len => bus_rreq_len,
bus_rdat_valid(0) => bus_rdat_valid,
bus_rdat_ready(0) => bus_rdat_ready,
bus_rdat_data => bus_rdat_data,
bus_rdat_last(0) => bus_rdat_last,
out_valid => out_valids,
out_ready => out_readys,
out_last => out_lasts,
out_dvalid => out_dvalids,
out_data => out_datas
);
end Behavioral;
"""
wrapper_component_template = """
component {camelprefix}ColumnReader is
generic (
BUS_ADDR_WIDTH : natural := 32;
BUS_LEN_WIDTH : natural := 8;
BUS_DATA_WIDTH : natural := 32;
BUS_BURST_STEP_LEN : natural := 4;
BUS_BURST_MAX_LEN : natural := 16;
INDEX_WIDTH : natural := 32;
CMD_TAG_ENABLE : boolean := false;
CMD_TAG_WIDTH : natural := 1
);
port (
bus_clk : in std_logic;
bus_reset : in std_logic;
acc_clk : in std_logic;
acc_reset : in std_logic;
@cmd_ports
unlock_valid : out std_logic;
unlock_ready : in std_logic := '1';
unlock_tag : out std_logic_vector(CMD_TAG_WIDTH-1 downto 0);
bus_rreq_valid : out std_logic;
bus_rreq_ready : in std_logic;
bus_rreq_addr : out std_logic_vector(BUS_ADDR_WIDTH-1 downto 0);
bus_rreq_len : out std_logic_vector(BUS_LEN_WIDTH-1 downto 0);
bus_rdat_valid : in std_logic;
bus_rdat_ready : out std_logic;
bus_rdat_data : in std_logic_vector(BUS_DATA_WIDTH-1 downto 0);
bus_rdat_last : in std_logic;
@out_ports
);
end component;
"""
uut_template_with_unlock = """
uut: ColumnReaderLevel
generic map (
BUS_ADDR_WIDTH => BUS_ADDR_WIDTH,
BUS_LEN_WIDTH => BUS_LEN_WIDTH,
BUS_DATA_WIDTH => BUS_DATA_WIDTH,
BUS_BURST_STEP_LEN => BUS_BURST_STEP_LEN,
BUS_BURST_MAX_LEN => BUS_BURST_MAX_LEN,
INDEX_WIDTH => INDEX_WIDTH,
CFG => "{cfg}",
CMD_TAG_ENABLE => CMD_TAG_ENABLE,
CMD_TAG_WIDTH => CMD_TAG_WIDTH
)
port map (
bus_clk => bus_clk,
bus_reset => bus_reset,
acc_clk => {acc}_clk,
acc_reset => {acc}_reset,
cmd_valid => cmd_valid,
cmd_ready => cmd_ready,
cmd_firstIdx => cmd_firstIdx,
cmd_lastIdx => cmd_lastIdx,
cmd_ctrl => cmd_ctrl,
cmd_tag => cmd_tag,
unlock_valid => unlock_valid,
unlock_ready => unlock_ready,
unlock_tag => unlock_tag,
bus_rreq_valid(0) => bus_rreq_valid,
bus_rreq_ready(0) => bus_rreq_ready,
bus_rreq_addr => bus_rreq_addr,
bus_rreq_len => bus_rreq_len,
bus_rdat_valid(0) => bus_rdat_valid,
bus_rdat_ready(0) => bus_rdat_ready,
bus_rdat_data => bus_rdat_data,
bus_rdat_last(0) => bus_rdat_last,
out_valid => out_valids,
out_ready => out_readys,
out_last => out_lasts,
out_dvalid => out_dvalids,
out_data => out_datas
);
"""
uut_template_without_unlock = """
uut: ColumnReaderLevel
generic map (
BUS_ADDR_WIDTH => BUS_ADDR_WIDTH,
BUS_LEN_WIDTH => BUS_LEN_WIDTH,
BUS_DATA_WIDTH => BUS_DATA_WIDTH,
BUS_BURST_STEP_LEN => BUS_BURST_STEP_LEN,
BUS_BURST_MAX_LEN => BUS_BURST_MAX_LEN,
INDEX_WIDTH => INDEX_WIDTH,
CFG => "{cfg}",
CMD_TAG_ENABLE => CMD_TAG_ENABLE,
CMD_TAG_WIDTH => CMD_TAG_WIDTH
)
port map (
bus_clk => bus_clk,
bus_reset => bus_reset,
acc_clk => {acc}_clk,
acc_reset => {acc}_reset,
cmd_valid => cmd_valid,
cmd_ready => cmd_ready,
cmd_firstIdx => cmd_firstIdx,
cmd_lastIdx => cmd_lastIdx,
cmd_ctrl => cmd_ctrl,
bus_rreq_valid(0) => bus_rreq_valid,
bus_rreq_ready(0) => bus_rreq_ready,
bus_rreq_addr => bus_rreq_addr,
bus_rreq_len => bus_rreq_len,
bus_rdat_valid(0) => bus_rdat_valid,
bus_rdat_ready(0) => bus_rdat_ready,
bus_rdat_data => bus_rdat_data,
bus_rdat_last(0) => bus_rdat_last,
out_valid => out_valids,
out_ready => out_readys,
out_last => out_lasts,
out_dvalid => out_dvalids,
out_data => out_datas
);
"""
class ColumnReader(object):
def __init__(self, field, instance_prefix=None, signal_prefix="", bus_clk_prefix="", main_clk_prefix="", **opts):
super().__init__()
# Basic error checking.
if not isinstance(field, Field):
raise TypeError("field must be of type %s" % Field)
self.field = field
# Figure out the prefixes.
if instance_prefix is None:
instance_prefix = field.name
if instance_prefix and not instance_prefix[-1] == "_":
instance_prefix += "_"
self.instance_prefix = instance_prefix
if signal_prefix is None:
signal_prefix = field.name
if signal_prefix and not signal_prefix[-1] == "_":
signal_prefix += "_"
self.signal_prefix = signal_prefix
if bus_clk_prefix and not bus_clk_prefix[-1] == "_":
bus_clk_prefix += "_"
self.bus_clk_prefix = bus_clk_prefix
if main_clk_prefix and not main_clk_prefix[-1] == "_":
main_clk_prefix += "_"
self.main_clk_prefix = main_clk_prefix
# Construct the streams.
self.cmd_stream, cmd_ctrl = _new_cmd_stream(self.signal_prefix)
p = self.signal_prefix + "unlock_"
self.unlock_stream = Stream(p)
self.unlock_stream.append(Signal(p + "tag", CMD_TAG_WIDTH))
p = self.signal_prefix + "bus_rreq_"
self.bus_rreq_stream = Stream(p)
self.bus_rreq_stream.append(Signal(p + "addr", BUS_ADDR_WIDTH))
self.bus_rreq_stream.append(Signal(p + "len", BUS_LEN_WIDTH))
p = self.signal_prefix + "bus_rdat_"
self.bus_rdat_stream = Stream(p)
self.bus_rdat_stream.append(Signal(p + "data", BUS_DATA_WIDTH))
self.bus_rdat_stream.append(Signal(p + "last"))
main_out_stream, out_data = _new_out_stream(self.signal_prefix)
# Construct the field reader.
reader, secondary_out_streams = _field_reader(
self.field,
self.signal_prefix, None,
self.cmd_stream, cmd_ctrl,
main_out_stream, out_data,
**opts)
# If the reader has more than one bus, wrap in an arbiter.
if reader.bus_count() > 1:
reader = ArbReaderLevel(reader)
self.reader = reader
# Construct the output stream group.
self.out_stream = StreamGroup(main_out_stream, *secondary_out_streams)
@property
def _camel_prefix(self):
return "".join([w[:1].upper() + w[1:] for w in self.instance_prefix.split("_")])
@property
def _lower_prefix(self):
return self.instance_prefix.lower()
def cfg(self):
return str(self.reader)
def wrapper_body(self):
return gen_template(
wrapper_body_template,
camelprefix = self._camel_prefix,
lowerprefix = self._lower_prefix,
cfg = self.cfg(),
cmd_ports = self.cmd_stream.def_ports(PortDir.IN, False),
out_ports = self.out_stream.def_ports(PortDir.OUT, False).trimsep(),
defs = self.cmd_stream.def_signals(False) + self.out_stream.def_signals(False),
arch = self.cmd_stream.arch_serialize() + self.out_stream.arch_deserialize()
)
def wrapper_component(self):
return gen_template(
wrapper_component_template,
camelprefix = self.instance_prefix[:-1],
cmd_ports = self.cmd_stream.def_ports(PortDir.IN, False),
out_ports = self.out_stream.def_ports(PortDir.OUT, False).trimsep()
)
def testbench(self, **kwargs):
# Randomize any parameters not explicitly given.
params = []
def get_param(name, default):
value = kwargs.get(name, default)
params.append((name, value))
return value
seed = get_param("seed", random.randrange(1<<32))
random.seed(seed)
row_count = get_param("row_count", 100)
cmd_count = get_param("cmd_count", 100)
addr_width = get_param("addr_width", random.randint(32, 64))
data_width = get_param("data_width", 1 << random.randint(5, 9))
burst_step_len = get_param("burst_step_len", max(self.field.widest() // data_width, 1 << random.randint(0, 5)))
burst_max_len = get_param("burst_max_len", burst_step_len * (1 << random.randint(0, 4)))
len_width = get_param("len_width", random.randint(1, 4) * int.bit_length(burst_max_len))
tag_width = get_param("tag_width", random.choice([0, 1, 4]))
multi_clk = get_param("multi_clk", True)
random_bus_rreq_timing = get_param("random_bus_rreq_timing", random.choice([True, False]))
random_bus_rdat_timing = get_param("random_bus_rdat_timing", random.choice([True, False]))
# Generate the testbench wrapper object.
acc = "acc" if multi_clk else "bus"
tb = Testbench(self._camel_prefix + "ColumnReader_tb", {"bus", acc})
# Set constants.
tb.set_const("BUS_ADDR_WIDTH", addr_width)
tb.set_const("BUS_LEN_WIDTH", len_width)
tb.set_const("BUS_DATA_WIDTH", data_width)
tb.set_const("BUS_BURST_STEP_LEN", burst_step_len)
tb.set_const("BUS_BURST_MAX_LEN", burst_max_len)
tb.set_const("INDEX_WIDTH", 32)
tb.set_const("CMD_TAG_ENABLE", tag_width > 0)
tb.set_const("CMD_TAG_WIDTH", max(1, tag_width))
# Add the streams.
tb.append_input_stream(self.cmd_stream, "bus")
if tag_width > 0:
tb.append_output_stream(self.unlock_stream, "bus")
tb.append_output_stream(self.bus_rreq_stream, "bus")
tb.append_input_stream(self.bus_rdat_stream, "bus")
tb.append_output_stream(self.out_stream, acc)
# Generate a random set of commands.
commands = []
for _ in range(cmd_count):
a = random.randrange(row_count)
b = random.randrange(row_count)
commands.append((min(a, b), max(a, b) + 1))
# Generate toplevel command stream signal test vectors.
cmd_first_tv = tb.append_test_vector(TestVectors(self.cmd_stream.signals[0]))
cmd_last_tv = tb.append_test_vector(TestVectors(self.cmd_stream.signals[1]))
for start, stop in commands:
cmd_first_tv.append(start)
cmd_last_tv.append(stop)
# Generate tag stream signal test vectors.
if tag_width > 0:
tags = [random.randrange(1 << tag_width) for _ in commands]
tb.append_test_vector(TestVectors(self.cmd_stream.signals[-1])).extend(tags)
tb.append_test_vector(TestVectors(self.unlock_stream.signals[0])).extend(tags)
# Generate output stream master last/dvalid test vectors.
out_last_tv = tb.append_test_vector(TestVectors(self.out_stream.streams[0].signals[0]))
out_dvalid_tv = tb.append_test_vector(TestVectors(self.out_stream.streams[0].signals[1]))
for start, stop in commands:
for i in range(start, stop):
out_last_tv.append(int(i == stop - 1))
out_dvalid_tv.append(1)
# Generate a memory model.
memory = Memory()
tb.append_memory(memory, self.bus_rreq_stream, self.bus_rdat_stream, "bus",
random_bus_rreq_timing, random_bus_rdat_timing)
# Generate the test vectors for the readers.
tvs = self.reader.test_vectors(memory, row_count, commands)
for tv in tvs:
tb.append_test_vector(tv)
# Append unit under test.
template = uut_template_with_unlock if tag_width > 0 else uut_template_without_unlock
tb.append_uut(template.format(cfg=self.cfg(), acc=acc))
# Add documentation.
doc = []
doc.append("Memory dump:")
doc.extend([" " + x for x in memory.hexdump().split("\n")])
doc.append("")
doc.append("Command stream:")
transfer = 1
for i, (start, end) in enumerate(commands):
doc.append(" Command %3d: %4d to %4d = out transfer %5d to %5d" % (
i + 1, start, end - 1, transfer, transfer + (end - start - 1)))
transfer += end - start
doc.append("")
doc.append("Generator parameters:")
doc.extend([" %s: %s" % x for x in params])
doc.append("")
doc.append("Schema:")
doc.extend([" " + x for x in self.field.pprint().split("\n")])
tb.append_uut("\n".join([" -- " + x for x in doc]))
return str(tb)
| true | true |
f72045c3f2c81d8d986759ab3a1d7b3e2137875b | 2,936 | py | Python | Whole Protein Prediction CNN/dataset.py | LucaAngioloni/ProteineSecondaryStructure-CNN | c85571bbcdf17b4a753dce6ed0e4346111ea43a0 | [
"MIT"
] | 96 | 2018-02-02T14:11:56.000Z | 2021-12-25T21:23:55.000Z | Whole Protein Prediction CNN/dataset.py | LucaAngioloni/ProteineSecondaryStructure-CNN | c85571bbcdf17b4a753dce6ed0e4346111ea43a0 | [
"MIT"
] | 3 | 2021-05-11T12:10:04.000Z | 2022-02-10T00:05:30.000Z | Whole Protein Prediction CNN/dataset.py | LucaAngioloni/ProteineSecondaryStructure-CNN | c85571bbcdf17b4a753dce6ed0e4346111ea43a0 | [
"MIT"
] | 33 | 2018-11-20T16:10:24.000Z | 2021-12-25T21:23:59.000Z | # MIT License
#
# Copyright (c) 2017 Luca Angioloni
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import numpy as np
dataset_path = "../dataset/cullpdb+profile_6133.npy"
# dataset_path = "../dataset/cullpdb+profile_6133_filtered.npy"
cb513_path = "../dataset/cb513+profile_split1.npy"
sequence_len = 700
total_features = 57
amino_acid_residues = 21
num_classes = 8
def get_dataset(path=dataset_path):
ds = np.load(path)
ds = np.reshape(ds, (ds.shape[0], sequence_len, total_features))
ret = np.zeros((ds.shape[0], ds.shape[1], amino_acid_residues + num_classes))
ret[:, :, 0:amino_acid_residues] = ds[:, :, 35:56]
ret[:, :, amino_acid_residues:] = ds[:, :, amino_acid_residues + 1:amino_acid_residues+ 1 + num_classes]
return ret
def get_data_labels(D):
X = D[:, :, 0:amino_acid_residues]
Y = D[:, :, amino_acid_residues:amino_acid_residues + num_classes]
return X, Y
def split_like_paper(Dataset):
# Dataset subdivision following dataset readme and paper
Train = Dataset[0:5600, :, :]
Test = Dataset[5600:5877, :, :]
Validation = Dataset[5877:, :, :]
return Train, Test, Validation
def split_with_shuffle(Dataset, seed=None):
np.random.seed(seed)
np.random.shuffle(Dataset)
train_split = int(Dataset.shape[0]*0.8)
test_val_split = int(Dataset.shape[0]*0.1)
Train = Dataset[0:train_split, :, :]
Test = Dataset[train_split:train_split+test_val_split, :, :]
Validation = Dataset[train_split+test_val_split:, :, :]
return Train, Test, Validation
def get_cb513():
CB = get_dataset(cb513_path)
X, Y = get_data_labels(CB)
return X, Y
if __name__ == '__main__':
dataset = get_dataset()
D_train, D_test, D_val = split_with_shuffle(dataset, 100)
X_train, Y_train = get_data_labels(D_train)
X_test, Y_test = get_data_labels(D_test)
X_val, Y_val = get_data_labels(D_val)
print("Dataset Loaded") | 34.952381 | 108 | 0.722071 |
import numpy as np
dataset_path = "../dataset/cullpdb+profile_6133.npy"
cb513_path = "../dataset/cb513+profile_split1.npy"
sequence_len = 700
total_features = 57
amino_acid_residues = 21
num_classes = 8
def get_dataset(path=dataset_path):
ds = np.load(path)
ds = np.reshape(ds, (ds.shape[0], sequence_len, total_features))
ret = np.zeros((ds.shape[0], ds.shape[1], amino_acid_residues + num_classes))
ret[:, :, 0:amino_acid_residues] = ds[:, :, 35:56]
ret[:, :, amino_acid_residues:] = ds[:, :, amino_acid_residues + 1:amino_acid_residues+ 1 + num_classes]
return ret
def get_data_labels(D):
X = D[:, :, 0:amino_acid_residues]
Y = D[:, :, amino_acid_residues:amino_acid_residues + num_classes]
return X, Y
def split_like_paper(Dataset):
Train = Dataset[0:5600, :, :]
Test = Dataset[5600:5877, :, :]
Validation = Dataset[5877:, :, :]
return Train, Test, Validation
def split_with_shuffle(Dataset, seed=None):
np.random.seed(seed)
np.random.shuffle(Dataset)
train_split = int(Dataset.shape[0]*0.8)
test_val_split = int(Dataset.shape[0]*0.1)
Train = Dataset[0:train_split, :, :]
Test = Dataset[train_split:train_split+test_val_split, :, :]
Validation = Dataset[train_split+test_val_split:, :, :]
return Train, Test, Validation
def get_cb513():
CB = get_dataset(cb513_path)
X, Y = get_data_labels(CB)
return X, Y
if __name__ == '__main__':
dataset = get_dataset()
D_train, D_test, D_val = split_with_shuffle(dataset, 100)
X_train, Y_train = get_data_labels(D_train)
X_test, Y_test = get_data_labels(D_test)
X_val, Y_val = get_data_labels(D_val)
print("Dataset Loaded") | true | true |
f720461520565da530df980a2ea008f3eb571a8d | 6,931 | py | Python | project/celebrities_births.py | Yoon-D-G/celebrity_scraper | 002fa7487408f05b896812d8fe6cde5cbb5d5edd | [
"MIT"
] | null | null | null | project/celebrities_births.py | Yoon-D-G/celebrity_scraper | 002fa7487408f05b896812d8fe6cde5cbb5d5edd | [
"MIT"
] | null | null | null | project/celebrities_births.py | Yoon-D-G/celebrity_scraper | 002fa7487408f05b896812d8fe6cde5cbb5d5edd | [
"MIT"
] | null | null | null | '''
This script contains a class for representing the date.
Additionally, the class Scraper get the HTML code of a
Wikipedia page and extracts the name of celebrities that
were born in a certain date
'''
import re
import requests
from bs4 import BeautifulSoup
from datetime import datetime
class Date:
'''
This class is used to represent a date.
Attributes:
_day_of_month (tuple): The days in each month of the year
_month_str (tuple): The names of the months
year (int): The year of the date.
month (int): The month of the date.
day (int): The day of the date.
'''
_day_of_month = (31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31)
_month_str = ('January', 'February', 'March',
'April', 'May', 'June',
'July', 'August', 'September',
'October', 'November', 'December')
def __init__(self, day: int, month: int, year: int):
'''
See help(Date) for accurate signature
'''
if not self.is_date_valid(day, month, year):
raise ValueError('Date not valid')
self.year = year
self.month = month
self.day = day
def __str__(self):
'''
This function is used to return the string representation of the date.
Returns:
str: The string representation of the date.
'''
return "{0}-{1}-{2}".format(self.day, self.month, self.year)
def __repr__(self):
'''
This function is used to return the string representation of the date.
Returns:
str: The string representation of the date.
'''
return "{0}-{1}-{2}".format(self.day, self.month, self.year)
def __eq__(self, other):
'''
This function is used to compare the date with other date.
Args:
other (Date): The other date to be compared with.
Returns:
bool: True if the date is equal to the other date, False otherwise.
'''
return self.year == other.year and self.month == other.month and \
self.day == other.day
def __lt__(self, other):
'''
This function is used to compare the date with other date.
Args:
other (Date): The other date to be compared with.
Returns:
bool: True if the date is less than the other date,
False otherwise.
'''
if self.year < other.year:
return True
elif self.year == other.year:
if self.month < other.month:
return True
elif self.month == other.month:
if self.day < other.day:
return True
return False
@staticmethod
def is_leap_year(year: int) -> bool:
'''
This method checks if a year is a leap year
Args:
year (int): The year to check
Returns:
(bool): True if the year is a leap year, False otherwise
'''
return year % 4 == 0
def is_date_valid(self, day: int, month: int, year: int) -> bool:
'''
This method is used to check if the date is valid.
Args:
day (int): The day of the date.
month (int): The month of the date.
year (int): The year of the date.
Returns:
bool: True if the date is valid, False otherwise.
'''
current_day = self._day_of_month[month - 1]
if self.is_leap_year(year) and month == 2:
current_day += 1
return year >= 0 and month >= 1 and month <= 12 and \
day >= 1 and day <= current_day
@classmethod
def from_string(cls, date_as_string):
'''
This function is used to create a date from a string.
Args:
date_as_string (str): The string representation of the date.
Returns:
Date: The date created from the string.
'''
day, month, year = map(int, date_as_string.split('-'))
return cls(day, month, year)
@classmethod
def today(cls):
'''
This function is used to create a date from a string.
Args:
date_as_string (str): The string representation of the date.
Returns:
Date: The date created from the string.
'''
cur_day = datetime.now()
day, month, year = cur_day.day, cur_day.month, cur_day.year
return cls(day, month, year)
def to_wiki_format(self):
'''
Returns the date into a format legible by the Wikipedia URL
Returns:
(str): String that can be appended to the Wikipedia URL
For example 'July_31'
'''
return f'{self._month_str[self.month - 1]}_{self.day}'
class Scraper:
'''
### Summary
Attributes:
###
'''
def __init__(self):
self.ROOT = 'https://en.wikipedia.org/wiki/'
def _get_soup(self, date: str) -> BeautifulSoup:
# private method, you don't need a docstring
r = requests.get(self.ROOT + date)
soup = BeautifulSoup(r.text, 'html.parser')
return soup
def _get_birth_header(self, date: str) -> BeautifulSoup:
# Private
soup = self._get_soup(date)
span = soup.find(
'span', {'class': 'mw-headline'}, text=re.compile("Births"))
# If the list is empty because it didn't find anything
if not span:
raise ValueError('The given date has no birth data')
h2 = span.find_parent()
return h2
def _get_celebrity_list(self, date: str) -> list:
# Add <ul> tags until you find the next <h2> tag
next_node = self._get_birth_header(date)
celebrities_list = []
while True:
next_node = next_node.find_next_sibling()
if getattr(next_node, 'name') == 'ul':
celebrities_list.extend(next_node.find_all('li'))
elif getattr(next_node, 'name') == 'h2':
break
return celebrities_list
def _clean_li(self, li: BeautifulSoup) -> str:
# Private method
li_complete = li.text.split('–')
name_complete = li_complete[1].split(',')
name = name_complete[0].strip()
return name
def get_celebrities(self, date: str = None) -> list:
'''
returns full list of celebrities whose birthday is
equal to the date parameter.
'''
if date is None:
date = 'January_1'
cel_list = self._get_celebrity_list(date)
celebrities = []
for li in cel_list:
celebrities.append(self._clean_li(li))
return celebrities
if __name__ == '__main__':
date_object = Date(27, 3, 1991)
scraper = Scraper()
celebrities = scraper.get_celebrities(date_object.to_wiki_format())
print(celebrities)
| 30.134783 | 79 | 0.566873 | import re
import requests
from bs4 import BeautifulSoup
from datetime import datetime
class Date:
_day_of_month = (31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31)
_month_str = ('January', 'February', 'March',
'April', 'May', 'June',
'July', 'August', 'September',
'October', 'November', 'December')
def __init__(self, day: int, month: int, year: int):
if not self.is_date_valid(day, month, year):
raise ValueError('Date not valid')
self.year = year
self.month = month
self.day = day
def __str__(self):
return "{0}-{1}-{2}".format(self.day, self.month, self.year)
def __repr__(self):
return "{0}-{1}-{2}".format(self.day, self.month, self.year)
def __eq__(self, other):
return self.year == other.year and self.month == other.month and \
self.day == other.day
def __lt__(self, other):
if self.year < other.year:
return True
elif self.year == other.year:
if self.month < other.month:
return True
elif self.month == other.month:
if self.day < other.day:
return True
return False
@staticmethod
def is_leap_year(year: int) -> bool:
return year % 4 == 0
def is_date_valid(self, day: int, month: int, year: int) -> bool:
current_day = self._day_of_month[month - 1]
if self.is_leap_year(year) and month == 2:
current_day += 1
return year >= 0 and month >= 1 and month <= 12 and \
day >= 1 and day <= current_day
@classmethod
def from_string(cls, date_as_string):
day, month, year = map(int, date_as_string.split('-'))
return cls(day, month, year)
@classmethod
def today(cls):
cur_day = datetime.now()
day, month, year = cur_day.day, cur_day.month, cur_day.year
return cls(day, month, year)
def to_wiki_format(self):
return f'{self._month_str[self.month - 1]}_{self.day}'
class Scraper:
def __init__(self):
self.ROOT = 'https://en.wikipedia.org/wiki/'
def _get_soup(self, date: str) -> BeautifulSoup:
r = requests.get(self.ROOT + date)
soup = BeautifulSoup(r.text, 'html.parser')
return soup
def _get_birth_header(self, date: str) -> BeautifulSoup:
# Private
soup = self._get_soup(date)
span = soup.find(
'span', {'class': 'mw-headline'}, text=re.compile("Births"))
# If the list is empty because it didn't find anything
if not span:
raise ValueError('The given date has no birth data')
h2 = span.find_parent()
return h2
def _get_celebrity_list(self, date: str) -> list:
next_node = self._get_birth_header(date)
celebrities_list = []
while True:
next_node = next_node.find_next_sibling()
if getattr(next_node, 'name') == 'ul':
celebrities_list.extend(next_node.find_all('li'))
elif getattr(next_node, 'name') == 'h2':
break
return celebrities_list
def _clean_li(self, li: BeautifulSoup) -> str:
li_complete = li.text.split('–')
name_complete = li_complete[1].split(',')
name = name_complete[0].strip()
return name
def get_celebrities(self, date: str = None) -> list:
if date is None:
date = 'January_1'
cel_list = self._get_celebrity_list(date)
celebrities = []
for li in cel_list:
celebrities.append(self._clean_li(li))
return celebrities
if __name__ == '__main__':
date_object = Date(27, 3, 1991)
scraper = Scraper()
celebrities = scraper.get_celebrities(date_object.to_wiki_format())
print(celebrities)
| true | true |
f7204650310a54c5dad514d4271f2b18cd7ca4c9 | 43,074 | py | Python | pruning/prune_resnet_tools.py | 18463105800/ssd.pruning.pytorch | 39592ee00e02f28742028a97592beec18d07258c | [
"MIT"
] | 13 | 2019-11-15T16:18:55.000Z | 2022-03-23T06:04:49.000Z | pruning/prune_resnet_tools.py | XUHUAKing/ssd.pruning.pytorch | 39592ee00e02f28742028a97592beec18d07258c | [
"MIT"
] | null | null | null | pruning/prune_resnet_tools.py | XUHUAKing/ssd.pruning.pytorch | 39592ee00e02f28742028a97592beec18d07258c | [
"MIT"
] | 3 | 2019-11-27T07:27:38.000Z | 2020-10-21T08:46:21.000Z | '''
This file contains functions for pruning resnet-like model in layer level
1. prune_resconv_layer (resnet: conv layers)
2. prune_resnet_lconv_layer (resnet: lconv means identity layer)
3. prune_rbconv_by_indices (resnet: rbconv means right path's bottom layer)
4. prune_rbconv_by_number (resnet: used when you prune lconv but next block/layer cannot absorb your effect)
5. prune_ruconv1_layer (resnet: for resnet normal conv1 layers (i.e. right path's first upper layers))
6. prune_ruconv2_layer (resnet: for resnet normal conv2 layers (i.e. right path's second upper layers))
Author: xuhuahuang as intern in YouTu 07/2018
'''
import torch
from torch.autograd import Variable
from torchvision import models
import cv2
cv2.setNumThreads(0) # pytorch issue 1355: possible deadlock in DataLoader
# OpenCL may be enabled by default in OpenCV3;
# disable it because it because it's not thread safe and causes unwanted GPU memory allocations
cv2.ocl.setUseOpenCL(False)
import sys
import numpy as np
from models.resnet import BasicBlock, Bottleneck
def replace_layers(model, i, indexes, layers):
if i in indexes:
# layers and indexes store new layers used to update old layers
return layers[indexes.index(i)]
# if i not in indexes, use old layers
return model[i]
# helper function
'''
Helper function for updating immediate following layer/block's input channels
Args:
model: model after pruning current layer/block
layer_index: current layer index. Locate the block/layer being pruned filters NOW
filters_to_prune: the output channels indices being pruned
**Note**
Not handle case described by prune_rbconv_by_number()
Not handle case inside prune_ruconv1_layer() and prune_ruconv2_layer() because they are inside same block
'''
def update_next_layers(model, layer_index, filters_to_prune):
# only need to change in_channels for all following objects based on filters_to_prune
next_conv = None
next_blk = None
next_ds = None # if next one is a block, and this block has downsample path, you need to update both residual and downsample path
offset = 1
# search for the next conv, based on current conv with id = (layer_index, filter_index)
while layer_index + offset < len(model.base._modules.items()):
res = list(model.base._modules.items())[layer_index+offset] # name, module
if isinstance(res[1], torch.nn.modules.conv.Conv2d):
next_name, next_conv = res
next_is_block = False
break
elif isinstance(res[1], (BasicBlock, Bottleneck)):
next_is_block = True
next_blk = res[1]
if res[1].downsample is None:
next_conv = res[1].conv1
next_ds = None
else:
next_conv = res[1].conv1
next_ds = res[1].downsample
break
offset = offset + 1
if next_conv is None:
print("No filter will be prunned for this layer (last layer)")
return model
if len(filters_to_prune) == 0:
print("No filter will be prunned for this layer")
return model
cut = len(filters_to_prune)
# next_conv must exists
next_new_conv = \
torch.nn.Conv2d(in_channels = next_conv.in_channels - cut,\
out_channels = next_conv.out_channels, \
kernel_size = next_conv.kernel_size, \
stride = next_conv.stride,
padding = next_conv.padding,
dilation = next_conv.dilation,
groups = next_conv.groups,
bias = next_conv.bias is not None)
old_weights = next_conv.weight.data.cpu().numpy()
new_weights = next_new_conv.weight.data.cpu().numpy()
new_weights = np.delete(old_weights, filters_to_prune, axis = 1)
next_new_conv.weight.data = torch.from_numpy(new_weights).cuda()
if next_conv.bias is not None:
next_new_conv.bias.data = next_conv.bias.data
# next_ds exists or not is okay, no matter next_is_block is True or not
if next_ds is not None:
old_conv_in_next_ds = next_ds[0]
new_conv_in_next_new_ds = \
torch.nn.Conv2d(in_channels = old_conv_in_next_ds.in_channels - cut,\
out_channels = old_conv_in_next_ds.out_channels, \
kernel_size = old_conv_in_next_ds.kernel_size, \
stride = old_conv_in_next_ds.stride,
padding = old_conv_in_next_ds.padding,
dilation = old_conv_in_next_ds.dilation,
groups = old_conv_in_next_ds.groups,
bias = old_conv_in_next_ds.bias is not None)
old_weights = old_conv_in_next_ds.weight.data.cpu().numpy()
new_weights = new_conv_in_next_new_ds.weight.data.cpu().numpy()
new_weights = np.delete(old_weights, filters_to_prune, axis = 1)
new_conv_in_next_new_ds.weight.data = torch.from_numpy(new_weights).cuda()
if old_conv_in_next_ds.bias is not None:
new_conv_in_next_new_ds.bias.data = old_conv_in_next_ds.bias.data # bias won't change
next_new_ds = torch.nn.Sequential(new_conv_in_next_new_ds, next_ds[1]) # BN keeps unchanged
else:
next_new_ds = None
# next_new_ds and next_new_conv are ready now, create a next_new_block for replace_layers()
if next_is_block: #same as next_blk is not None:
if isinstance(next_blk, BasicBlock):
# rely on conv1 of old block to get in_planes, out_planes, tride
next_new_block = BasicBlock(next_blk.conv1.in_channels - cut, \
next_blk.conv1.out_channels, next_blk.stride, downsample = next_new_ds)
next_new_block.conv1 = next_new_conv # only update in_channels
next_new_block.bn1 = next_blk.bn1
next_new_block.relu = next_blk.relu
next_new_block.conv2 = next_blk.conv2
next_new_block.bn2 = next_blk.bn2
else:
next_new_block = Bottleneck(next_blk.conv1.in_channels - cut, \
next_blk.conv1.out_channels, next_blk.stride, downsample = next_new_ds)
next_new_block.conv1 = next_new_conv # only update in_channels
next_new_block.bn1 = next_blk.bn1
next_new_block.conv2 = next_blk.conv2
next_new_block.bn2 = next_blk.bn2
next_new_block.conv3 = next_blk.conv3
next_new_block.bn3 = next_blk.bn3
next_new_block.relu = next_blk.relu
if not next_is_block:
base = torch.nn.Sequential(
*(replace_layers(model.base, i, [layer_index+offset], \
[next_new_conv]) for i, _ in enumerate(model.base)))
else:
base = torch.nn.Sequential(
*(replace_layers(model.base, i, [layer_index+offset], \
[next_new_block]) for i, _ in enumerate(model.base)))
del model.base # delete and replace with brand new one
model.base = base
print("Finished update next layers.")
return model
'''
--------------------------------------------------------------------------------
1. Prune conv layers in resnet with/without BN (only support layers stored in model.base for now)
Args:
model: model for pruning
layer_index: index the pruned layer's location within model
cut_ratio: the ratio of filters you want to prune from this layer (e.g. 20% - cut 20% lowest weights layers)
Adapted from: https://github.com/jacobgil/pytorch-pruning
'''
def prune_resconv_layer(model, layer_index, cut_ratio=0.2, use_bn = True):
_, conv = list(model.base._modules.items())[layer_index]
if use_bn:
_, old_bn = list(model.base._modules.items())[layer_index + 1]
next_conv = None
offset = 1
# search for the next conv, based on current conv with id = (layer_index, filter_index)
while layer_index + offset < len(model.base._modules.items()):
res = list(model.base._modules.items())[layer_index+offset] # name, module
if isinstance(res[1], torch.nn.modules.conv.Conv2d):
next_name, next_conv = res
break
elif isinstance(res[1], (BasicBlock, Bottleneck)):
next_conv = res[1].conv1
break
offset = offset + 1
if next_conv is None:
print("No filter will be prunned for this layer (last layer)")
return model
num_filters = conv.weight.data.size(0) # out_channels x in_channels x 3 x 3
# skip the layer with only one filter left
if num_filters <= 1:
print("No filter will be prunned for this layer (num_filters<=1)")
return model
cut = int(cut_ratio * num_filters)
if cut < 1:
print("No filter will be prunned for this layer (cut<1)")
return model
if (num_filters - cut) < 1:
print("No filter will be prunned for this layer (no filter left after cutting)")
return model
# rank the filters within this layer and store into filter_ranks
abs_wgt = torch.abs(conv.weight.data)
values = \
torch.sum(abs_wgt, dim = 1, keepdim = True).\
sum(dim=2, keepdim = True).sum(dim=3, keepdim = True)[:, 0, 0, 0]# .data
# Normalize the sum of weight by the filter dimensions in x 3 x 3
values = values / (abs_wgt.size(1) * abs_wgt.size(2) * abs_wgt.size(3)) # (filter_number for this layer, 1)
print("Ranking filters.. ")
filters_to_prune = np.argsort(values.cpu().numpy())[:cut] # order from smallest to largest
print("Filters that will be prunned", filters_to_prune)
print("Pruning filters.. ")
# the updated conv for current conv, with cut output channels being pruned
new_conv = \
torch.nn.Conv2d(in_channels = conv.in_channels, \
out_channels = conv.out_channels - cut,
kernel_size = conv.kernel_size, \
stride = conv.stride,
padding = conv.padding,
dilation = conv.dilation,
groups = conv.groups,
bias = conv.bias is not None) #(out_channels)
old_weights = conv.weight.data.cpu().numpy() # (out_channels, in_channels, kernel_size[0], kernel_size[1]
new_weights = new_conv.weight.data.cpu().numpy()
# skip that filter's weight inside old_weights and store others into new_weights
new_weights = np.delete(old_weights, filters_to_prune, axis = 0)
new_conv.weight.data = torch.from_numpy(new_weights).cuda()
if conv.bias is not None: # no bias for conv layers
bias_numpy = conv.bias.data.cpu().numpy()
# change size to (out_channels - cut)
bias = np.zeros(shape = (bias_numpy.shape[0] - cut), dtype = np.float32)
bias = np.delete(bias_numpy, filters_to_prune, axis = None)
new_conv.bias.data = torch.from_numpy(bias).cuda()
# BatchNorm modification
# TODO: Extract this function outside as a separate func.
if use_bn:
new_bn = torch.nn.BatchNorm2d(num_features=new_conv.out_channels, \
eps=old_bn.eps, momentum=old_bn.momentum, affine=old_bn.affine)
# old_bn.affine == True, need to copy learning gamma and beta to new_bn
# gamma: size = (num_features)
old_weights = old_bn.weight.data.cpu().numpy()
new_weights = new_bn.weight.data.cpu().numpy()
new_weights = np.delete(old_weights, filters_to_prune)
new_bn.weight.data = torch.from_numpy(new_weights).cuda()
# beta: size = (num_features)
bias_numpy = old_bn.bias.data.cpu().numpy()
# change size to (out_channels - cut)
bias = np.zeros(shape = (bias_numpy.shape[0] - cut), dtype = np.float32)
bias = np.delete(bias_numpy, filters_to_prune)
new_bn.bias.data = torch.from_numpy(bias).cuda()
if use_bn:
# BatchNorm modification
base = torch.nn.Sequential(
*(replace_layers(model.base, i, [layer_index, layer_index+1], \
[new_conv, new_bn]) for i, _ in enumerate(model.base)))
del old_bn
else:
# replace current layer and next_conv with new_conv and next_new_conv respectively
base = torch.nn.Sequential(
*(replace_layers(model.base, i, [layer_index], \
[new_conv]) for i, _ in enumerate(model.base)))
del model.base # delete and replace with brand new one
del conv
model.base = base # update current layer
model = update_next_layers(model, layer_index, filters_to_prune) # update following layers
message = str(100*float(cut) / num_filters) + "%"
print("Filters prunned", str(message))
return model
'''
--------------------------------------------------------------------------------
2. Prune identity conv layers without/with BN in a resnet block
(*Note: NOT used for normal layer, the 'layer' here must locate inside a block indexed by block_index)
Args:
block_index: a block also named as a 'layer' in torchvision implementation, locate lconv layer
*Note:
The index criteria based on 'one single block' unit, which means 1 index represents 1 BasicBlock/Bottleneck, instead of one layer (3-6 blocks)
Return:
cut_indices: the filters_to_prune in this layer, will be used in function 5.
'''
def prune_resnet_lconv_layer(model, block_index, cut_ratio=0.2, use_bn = True):
_, blk = list(model.base._modules.items())[block_index]
cut_indices = None
if not use_bn:
print("ResNet without BN is not supported for prunning")
return cut_indices, model
# check whether the left path has conv layer for prunning
if blk.downsample == None:
print("No filters will be prunned because lconv doesn't exist")
return cut_indices, model
if not isinstance(blk, (BasicBlock, Bottleneck)):
print("Only support for ResNet with BasicBlock or Bottleneck defined in torchvision")
return cut_indices, model
# get old conv and bn on the left
lconv = blk.downsample[0] # nn.Sequential for (lconv, lbn)
lbn = blk.downsample[1]
next_conv = None
offset = 1
# search for the next conv, can be conv1 within next block, or a normal conv layer
while block_index + offset < len(model.base._modules.items()):
res = list(model.base._modules.items())[block_index+offset] # name, module
if isinstance(res[1], torch.nn.modules.conv.Conv2d):
next_name, next_conv = res
break
elif isinstance(res[1], (BasicBlock, Bottleneck)):
next_conv = res[1].conv1
break
offset = offset + 1
if next_conv is None:
print("No filters will be prunned because this is the last block")
return cut_indices, model
num_filters = lconv.weight.data.size(0) # out_channels x in_channels x 3 x 3
# skip the layer with only one filter left
if num_filters <= 1:
print("No filter will be prunned for this layer (num_filters<=1)")
return cut_indices, model
cut = int(cut_ratio * num_filters)
if cut < 1:
print("No filter will be prunned for this layer (cut<1)")
return cut_indices, model
if (num_filters - cut) < 1:
print("No filter will be prunned for this layer (no filter left after cutting)")
return cut_indices, model
# rank the filters within this layer and store into filter_ranks
abs_wgt = torch.abs(lconv.weight.data)
values = \
torch.sum(abs_wgt, dim = 1, keepdim = True).\
sum(dim=2, keepdim = True).sum(dim=3, keepdim = True)[:, 0, 0, 0]# .data
# Normalize the sum of weight by the filter dimensions in x 3 x 3
values = values / (abs_wgt.size(1) * abs_wgt.size(2) * abs_wgt.size(3)) # (filter_number for this layer, 1)
print("Ranking filters.. ")
filters_to_prune = np.argsort(values.cpu().numpy())[:cut] # order from smallest to largest
print("Filters that will be prunned", filters_to_prune)
print("Pruning filters.. ")
# the updated conv for old lconv, with cut output channels being pruned
new_conv = \
torch.nn.Conv2d(in_channels = lconv.in_channels, \
out_channels = lconv.out_channels - cut,
kernel_size = lconv.kernel_size, \
stride = lconv.stride,
padding = lconv.padding,
dilation = lconv.dilation,
groups = lconv.groups,
bias = lconv.bias is not None) #(out_channels)
old_weights = lconv.weight.data.cpu().numpy() # (out_channels, in_channels, kernel_size[0], kernel_size[1]
new_weights = new_conv.weight.data.cpu().numpy()
# skip that filter's weight inside old_weights and store others into new_weights
new_weights = np.delete(old_weights, filters_to_prune, axis = 0)
new_conv.weight.data = torch.from_numpy(new_weights).cuda()
if lconv.bias is not None:
bias_numpy = lconv.bias.data.cpu().numpy()
# change size to (out_channels - cut)
bias = np.zeros(shape = (bias_numpy.shape[0] - cut), dtype = np.float32)
bias = np.delete(bias_numpy, filters_to_prune, axis = None)
new_conv.bias.data = torch.from_numpy(bias).cuda()
# new BN layer after new_conv
new_bn = torch.nn.BatchNorm2d(num_features=new_conv.out_channels, \
eps=lbn.eps, momentum=lbn.momentum, affine=lbn.affine)
# old_bn.affine == True, need to copy learnable gamma and beta to new_bn
# gamma: size = (num_features)
old_weights = lbn.weight.data.cpu().numpy()
new_weights = new_bn.weight.data.cpu().numpy()
new_weights = np.delete(old_weights, filters_to_prune)
new_bn.weight.data = torch.from_numpy(new_weights).cuda()
# beta: size = (num_features)
bias_numpy = lbn.bias.data.cpu().numpy()
# change size to (out_channels - cut)
bias = np.zeros(shape = (bias_numpy.shape[0] - cut), dtype = np.float32)
bias = np.delete(bias_numpy, filters_to_prune)
new_bn.bias.data = torch.from_numpy(bias).cuda()
# replace
# update current left conv + left BN layer, have BN by default
new_ds = torch.nn.Sequential(
*(replace_layers(blk.downsample, i, [0, 1], \
[new_conv, new_bn]) for i, _ in enumerate(blk.downsample)))
# delete current and replace with a brand new BLOCK
if isinstance(blk, BasicBlock):
# rely on conv1 of old block to get in_planes, out_planes, tride
new_blk = BasicBlock(blk.conv1.in_channels, blk.conv1.out_channels, \
blk.stride, downsample = new_ds)
# keep all layers in residual path unchanged tempararily
new_blk.conv1 = blk.conv1
new_blk.bn1 = blk.bn1
new_blk.relu = blk.relu
new_blk.conv2 = blk.conv2
new_blk.bn2 = blk.bn2
else:
new_blk = Bottleneck(blk.conv1.in_channels, blk.conv1.out_channels, \
blk.stride, downsample = new_ds)
# keep all layers in residual path unchanged tempararily
new_blk.conv1 = blk.conv1
new_blk.bn1 = blk.bn1
new_blk.conv2 = blk.conv2
new_blk.bn2 = blk.bn2
new_blk.conv3 = blk.conv3
new_blk.bn3 = blk.bn3
new_blk.relu = blk.relu
# now new_blk is ready, it can act as a layer and replace old blk with replace_layers()
base = torch.nn.Sequential(
*(replace_layers(model.base, i, [block_index], \
[new_blk]) for i, _ in enumerate(model.base)))
# delete and replace with brand new one
del model.base # delete the things pointed by pointer
del blk
model.base = base # update current layer
model = update_next_layers(model, block_index, filters_to_prune) # update following layers
cut_indices = filters_to_prune
message = str(100*float(cut) / num_filters) + "%"
print("Filters prunned", str(message))
return cut_indices, model
'''
--------------------------------------------------------------------------------
3. Prune residual conv layer, the one at the bottom of residual side with/without BN
(*Note: MUST call this after you prune identity path with downsample, the size won't fit because upper functions only update left path)
Args:
block_index: the BasicBlock or Bottleneck Block this layer locates
filters_to_prune: the filters' indices waiting for being pruned
use_bn: use Batch Norm or not
'''
def prune_rbconv_by_indices(model, block_index, filters_to_prune, use_bn = True):
_, blk = list(model.base._modules.items())[block_index]
if not use_bn:
print("ResNet without BN is not supported for prunning")
return model
# check whether the left path has conv layer for prunning
if blk.downsample == None:
print("Only support pruning for rbconv after lconv was pruned")
return model
if not isinstance(blk, (BasicBlock, Bottleneck)):
print("Only support for ResNet with BasicBlock or Bottleneck defined in torchvision")
return model
if isinstance(blk, BasicBlock):
# when it is BasicBlock, the rbconv is conv2, and its bn is bn2
conv = blk.conv2
bn = blk.bn2
else:
# when it is Bottleneck, the rbconv is conv3, and its bn is bn3
conv = blk.conv3
bn = blk.bn3
# only need to update itself, no need to care about others such as next_ds/next_conv
new_conv = \
torch.nn.Conv2d(in_channels = conv.in_channels, \
out_channels = conv.out_channels - len(filters_to_prune),
kernel_size = conv.kernel_size, \
stride = conv.stride,
padding = conv.padding,
dilation = conv.dilation,
groups = conv.groups,
bias = conv.bias is not None) #(out_channels)
old_weights = conv.weight.data.cpu().numpy() # (out_channels, in_channels, kernel_size[0], kernel_size[1]
new_weights = new_conv.weight.data.cpu().numpy()
# skip that filter's weight inside old_weights and store others into new_weights
new_weights = np.delete(old_weights, filters_to_prune, axis = 0)
new_conv.weight.data = torch.from_numpy(new_weights).cuda()
if conv.bias is not None:
bias_numpy = conv.bias.data.cpu().numpy()
# change size to (out_channels - cut)
bias = np.zeros(shape = (bias_numpy.shape[0] - len(filters_to_prune)), dtype = np.float32)
bias = np.delete(bias_numpy, filters_to_prune, axis = None)
new_conv.bias.data = torch.from_numpy(bias).cuda()
# new BN layer after new_conv
new_bn = torch.nn.BatchNorm2d(num_features=new_conv.out_channels, \
eps=bn.eps, momentum=bn.momentum, affine=bn.affine)
# old_bn.affine == True, need to copy learnable gamma and beta to new_bn
# gamma: size = (num_features)
old_weights = bn.weight.data.cpu().numpy()
new_weights = new_bn.weight.data.cpu().numpy()
new_weights = np.delete(old_weights, filters_to_prune)
new_bn.weight.data = torch.from_numpy(new_weights).cuda()
# beta: size = (num_features)
bias_numpy = bn.bias.data.cpu().numpy()
# change size to (out_channels - cut)
bias = np.zeros(shape = (bias_numpy.shape[0] - len(filters_to_prune)), dtype = np.float32)
bias = np.delete(bias_numpy, filters_to_prune)
new_bn.bias.data = torch.from_numpy(bias).cuda()
if isinstance(blk, BasicBlock):
# replace with new block
new_blk = BasicBlock(blk.conv1.in_channels, blk.conv1.out_channels, \
blk.stride, downsample = blk.downsample)
# keep all layers in residual path unchanged tempararily
new_blk.conv1 = blk.conv1
new_blk.bn1 = blk.bn1
new_blk.relu = blk.relu
new_blk.conv2 = new_conv # update with new conv
new_blk.bn2 = new_bn # update with new bn
else:
# replace with new block
new_blk = Bottleneck(blk.conv1.in_channels, blk.conv1.out_channels, \
blk.stride, downsample = blk.downsample)
# keep all layers in residual path unchanged tempararily
new_blk.conv1 = blk.conv1
new_blk.bn1 = blk.bn1
new_blk.conv2 = blk.conv2
new_blk.bn2 = blk.bn2
new_blk.conv3 = new_conv
new_blk.bn3 = new_bn
new_blk.relu = blk.relu
base = torch.nn.Sequential(
*(replace_layers(model.base, i, [block_index], \
[new_blk]) for i, _ in enumerate(model.base)))
# delete and replace
del model.base
model.base = base
print("Filters prunned for rb layer:", filters_to_prune)
return model
'''
--------------------------------------------------------------------------------
4. Prune residual conv layer, the one at the bottom of residual side with/without BN, based on its own weights
(*Note: MUST call this when you prune lconv layer,
the immediate following block/conv cannot absorb your effect due to its empty left path)
Args:
block_index: the BasicBlock or Bottleneck Block this layer locates
num_cut: the number of filters waiting for being pruned
use_bn: use Batch Norm or not
'''
def prune_rbconv_by_number(model, block_index, num_cut, use_bn = True):
_, blk = list(model.base._modules.items())[block_index]
if not use_bn:
print("ResNet without BN is not supported for prunning")
return model
if not isinstance(blk, (BasicBlock, Bottleneck)):
print("Only support for ResNet with BasicBlock or Bottleneck defined in torchvision")
return model
if isinstance(blk, BasicBlock):
# when it is BasicBlock, the rbconv is conv2, and its bn is bn2
conv = blk.conv2
bn = blk.bn2
else:
# when it is Bottleneck, the rbconv is conv3, and its bn is bn3
conv = blk.conv3
bn = blk.bn3
num_filters = conv.weight.data.size(0) # out_channels x in_channels x 3 x 3
# skip the layer with only one filter left
if num_filters <= 1:
print("No filter will be prunned for this layer (num_filters<=1)")
return model
if num_cut < 1:
print("Error: No filter will be prunned for this layer (cut<1)")
return model
if (num_filters - num_cut) < 1:
print("Error: No filter will be prunned for this layer (no filter left after cutting)")
return model
# rank the filters within this layer and store into filter_ranks
abs_wgt = torch.abs(conv.weight.data)
values = \
torch.sum(abs_wgt, dim = 1, keepdim = True).\
sum(dim=2, keepdim = True).sum(dim=3, keepdim = True)[:, 0, 0, 0]# .data
# Normalize the sum of weight by the filter dimensions in x 3 x 3
values = values / (abs_wgt.size(1) * abs_wgt.size(2) * abs_wgt.size(3)) # (filter_number for this layer, 1)
print("Ranking filters.. ")
filters_to_prune = np.argsort(values.cpu().numpy())[:num_cut] # order from smallest to largest
print("Filters that will be prunned", filters_to_prune)
print("Pruning filters.. ")
# only need to update itself, no need to care about others such as next_ds/next_conv
new_conv = \
torch.nn.Conv2d(in_channels = conv.in_channels, \
out_channels = conv.out_channels - num_cut,
kernel_size = conv.kernel_size, \
stride = conv.stride,
padding = conv.padding,
dilation = conv.dilation,
groups = conv.groups,
bias = conv.bias is not None) #(out_channels)
old_weights = conv.weight.data.cpu().numpy() # (out_channels, in_channels, kernel_size[0], kernel_size[1]
new_weights = new_conv.weight.data.cpu().numpy()
# skip that filter's weight inside old_weights and store others into new_weights
new_weights = np.delete(old_weights, filters_to_prune, axis = 0)
new_conv.weight.data = torch.from_numpy(new_weights).cuda()
if conv.bias is not None:
bias_numpy = conv.bias.data.cpu().numpy()
# change size to (out_channels - cut)
bias = np.zeros(shape = (bias_numpy.shape[0] - num_cut), dtype = np.float32)
bias = np.delete(bias_numpy, filters_to_prune, axis = None)
new_conv.bias.data = torch.from_numpy(bias).cuda()
# new BN layer after new_conv
new_bn = torch.nn.BatchNorm2d(num_features=new_conv.out_channels, \
eps=bn.eps, momentum=bn.momentum, affine=bn.affine)
# old_bn.affine == True, need to copy learnable gamma and beta to new_bn
# gamma: size = (num_features)
old_weights = bn.weight.data.cpu().numpy()
new_weights = new_bn.weight.data.cpu().numpy()
new_weights = np.delete(old_weights, filters_to_prune)
new_bn.weight.data = torch.from_numpy(new_weights).cuda()
# beta: size = (num_features)
bias_numpy = bn.bias.data.cpu().numpy()
# change size to (out_channels - cut)
bias = np.zeros(shape = (bias_numpy.shape[0] - num_cut), dtype = np.float32)
bias = np.delete(bias_numpy, filters_to_prune)
new_bn.bias.data = torch.from_numpy(bias).cuda()
if isinstance(blk, BasicBlock):
# replace with new block
new_blk = BasicBlock(blk.conv1.in_channels, blk.conv1.out_channels, \
blk.stride, downsample = blk.downsample)
# keep all layers in residual path unchanged tempararily
new_blk.conv1 = blk.conv1
new_blk.bn1 = blk.bn1
new_blk.relu = blk.relu
new_blk.conv2 = new_conv # update with new conv
new_blk.bn2 = new_bn # update with new bn
else:
# replace with new block
new_blk = Bottleneck(blk.conv1.in_channels, blk.conv1.out_channels, \
blk.stride, downsample = blk.downsample)
# keep all layers in residual path unchanged tempararily
new_blk.conv1 = blk.conv1
new_blk.bn1 = blk.bn1
new_blk.conv2 = blk.conv2
new_blk.bn2 = blk.bn2
new_blk.conv3 = new_conv
new_blk.bn3 = new_bn
new_blk.relu = blk.relu
base = torch.nn.Sequential(
*(replace_layers(model.base, i, [block_index], \
[new_blk]) for i, _ in enumerate(model.base)))
# delete and replace
del model.base
del blk
model.base = base
model = update_next_layers(model, block_index, filters_to_prune) # update following layers
print("Filters prunned for rb layer:", filters_to_prune)
return model
'''
--------------------------------------------------------------------------------
5. Prune normal residual conv layer, the FRIST one at the upper of residual side with/without BN
Args:
block_index: the BasicBlock or Bottleneck Block this layer locates
cut_ratio: the ratio of filters pruned from conv1 (and conv2 if Bottleneck)
use_bn: use Batch Norm or not
'''
def prune_ruconv1_layer(model, block_index, cut_ratio=0.2, use_bn = True):
_, blk = list(model.base._modules.items())[block_index]
if not use_bn:
print("ResNet without BN is not supported for prunning")
return model
if not isinstance(blk, (BasicBlock, Bottleneck)):
print("Conv1 only for ResNet with BasicBlock or Bottleneck defined in torchvision")
return model
# cut conv1, and next conv is conv2
conv = blk.conv1
bn = blk.bn1
next_conv = blk.conv2
num_filters = conv.weight.data.size(0) # out_channels x in_channels x 3 x 3
# skip the layer with only one filter left
if num_filters <= 1:
print("No filter will be prunned for this layer (num_filters<=1)")
return model
cut = int(cut_ratio * num_filters)
if cut < 1:
print("No filter will be prunned for this layer (cut<1)")
return model
if (num_filters - cut) < 1:
print("No filter will be prunned for this layer (no filter left after cutting)")
return model
# rank the filters within this layer and store into filter_ranks
abs_wgt = torch.abs(conv.weight.data)
values = \
torch.sum(abs_wgt, dim = 1, keepdim = True).\
sum(dim=2, keepdim = True).sum(dim=3, keepdim = True)[:, 0, 0, 0]# .data
# Normalize the sum of weight by the filter dimensions in x 3 x 3
values = values / (abs_wgt.size(1) * abs_wgt.size(2) * abs_wgt.size(3)) # (filter_number for this layer, 1)
print("Ranking filters.. ")
filters_to_prune = np.argsort(values.cpu().numpy())[:cut] # order from smallest to largest
print("Filters that will be prunned", filters_to_prune)
print("Pruning filters.. ")
# the updated conv for current conv, with cut output channels being pruned
new_conv = \
torch.nn.Conv2d(in_channels = conv.in_channels, \
out_channels = conv.out_channels - cut,
kernel_size = conv.kernel_size, \
stride = conv.stride,
padding = conv.padding,
dilation = conv.dilation,
groups = conv.groups,
bias = conv.bias is not None) #(out_channels)
old_weights = conv.weight.data.cpu().numpy() # (out_channels, in_channels, kernel_size[0], kernel_size[1]
new_weights = new_conv.weight.data.cpu().numpy()
# skip that filter's weight inside old_weights and store others into new_weights
new_weights = np.delete(old_weights, filters_to_prune, axis = 0)
new_conv.weight.data = torch.from_numpy(new_weights).cuda()
if conv.bias is not None:
bias_numpy = conv.bias.data.cpu().numpy()
# change size to (out_channels - cut)
bias = np.zeros(shape = (bias_numpy.shape[0] - cut), dtype = np.float32)
bias = np.delete(bias_numpy, filters_to_prune, axis = None)
new_conv.bias.data = torch.from_numpy(bias).cuda() # new conv1
# BatchNorm layer
new_bn = torch.nn.BatchNorm2d(num_features=new_conv.out_channels, \
eps=bn.eps, momentum=bn.momentum, affine=bn.affine)
# gamma: size = (num_features)
old_weights = bn.weight.data.cpu().numpy()
new_weights = bn.weight.data.cpu().numpy()
new_weights = np.delete(old_weights, filters_to_prune)
new_bn.weight.data = torch.from_numpy(new_weights).cuda()
# beta: size = (num_features)
bias_numpy = bn.bias.data.cpu().numpy()
# change size to (out_channels - cut)
bias = np.zeros(shape = (bias_numpy.shape[0] - cut), dtype = np.float32)
bias = np.delete(bias_numpy, filters_to_prune)
new_bn.bias.data = torch.from_numpy(bias).cuda() # new bn1
# new conv for next_conv
next_new_conv = \
torch.nn.Conv2d(in_channels = next_conv.in_channels - cut,\
out_channels = next_conv.out_channels, \
kernel_size = next_conv.kernel_size, \
stride = next_conv.stride,
padding = next_conv.padding,
dilation = next_conv.dilation,
groups = next_conv.groups,
bias = next_conv.bias is not None)
old_weights = next_conv.weight.data.cpu().numpy()
new_weights = next_new_conv.weight.data.cpu().numpy()
new_weights = np.delete(old_weights, filters_to_prune, axis = 1)
next_new_conv.weight.data = torch.from_numpy(new_weights).cuda()
if next_conv.bias is not None:
next_new_conv.bias.data = next_conv.bias.data # new conv2
# replace with new block
if isinstance(blk, BasicBlock):
new_blk = BasicBlock(blk.conv1.in_channels, blk.conv1.out_channels, \
blk.stride, downsample = blk.downsample)
# keep all layers in residual path unchanged tempararily
new_blk.conv1 = new_conv
new_blk.bn1 = new_bn
new_blk.relu = blk.relu
new_blk.conv2 = next_new_conv # update with new conv
new_blk.bn2 = blk.bn2 # update with new bn
else:
new_blk = Bottleneck(blk.conv1.in_channels, blk.conv1.out_channels, \
blk.stride, downsample = blk.downsample)
# keep all layers in residual path unchanged tempararily
new_blk.conv1 = new_conv
new_blk.bn1 = new_bn
new_blk.conv2 = next_new_conv
new_blk.bn2 = blk.bn2
new_blk.conv3 = blk.conv3
new_blk.bn3 = blk.bn3
new_blk.relu = blk.relu
base = torch.nn.Sequential(
*(replace_layers(model.base, i, [block_index], \
[new_blk]) for i, _ in enumerate(model.base)))
# delete and replace
del model.base
model.base = base
print("Filters prunned:", filters_to_prune)
return model
'''
--------------------------------------------------------------------------------
6. Prune normal residual conv layer, the SECOND one at the upper of residual side with/without BN
(*for Bottleneck only)
Args:
block_index: the BasicBlock or Bottleneck Block this layer locates
cut_ratio: the ratio of filters pruned from conv1 (and conv2 if Bottleneck)
use_bn: use Batch Norm or not
'''
def prune_ruconv2_layer(model, block_index, cut_ratio=0.2, use_bn = True):
_, blk = list(model.base._modules.items())[block_index]
if not use_bn:
print("ResNet without BN is not supported for prunning")
return model
if not isinstance(blk, Bottleneck):
print("Conv2 only for ResNet with Bottleneck defined in torchvision")
return model
# cut conv1, and next conv is conv2
conv = blk.conv2
bn = blk.bn2
next_conv = blk.conv3
num_filters = conv.weight.data.size(0) # out_channels x in_channels x 3 x 3
# skip the layer with only one filter left
if num_filters <= 1:
print("No filter will be prunned for this layer (num_filters<=1)")
return model
cut = int(cut_ratio * num_filters)
if cut < 1:
print("No filter will be prunned for this layer (cut<1)")
return model
if (num_filters - cut) < 1:
print("No filter will be prunned for this layer (no filter left after cutting)")
return model
# rank the filters within this layer and store into filter_ranks
abs_wgt = torch.abs(conv.weight.data)
values = \
torch.sum(abs_wgt, dim = 1, keepdim = True).\
sum(dim=2, keepdim = True).sum(dim=3, keepdim = True)[:, 0, 0, 0]# .data
# Normalize the sum of weight by the filter dimensions in x 3 x 3
values = values / (abs_wgt.size(1) * abs_wgt.size(2) * abs_wgt.size(3)) # (filter_number for this layer, 1)
print("Ranking filters.. ")
filters_to_prune = np.argsort(values.cpu().numpy())[:cut] # order from smallest to largest
print("Filters that will be prunned", filters_to_prune)
print("Pruning filters.. ")
# the updated conv for current conv, with cut output channels being pruned
new_conv = \
torch.nn.Conv2d(in_channels = conv.in_channels, \
out_channels = conv.out_channels - cut,
kernel_size = conv.kernel_size, \
stride = conv.stride,
padding = conv.padding,
dilation = conv.dilation,
groups = conv.groups,
bias = conv.bias is not None) #(out_channels)
old_weights = conv.weight.data.cpu().numpy() # (out_channels, in_channels, kernel_size[0], kernel_size[1]
new_weights = new_conv.weight.data.cpu().numpy()
# skip that filter's weight inside old_weights and store others into new_weights
new_weights = np.delete(old_weights, filters_to_prune, axis = 0)
new_conv.weight.data = torch.from_numpy(new_weights).cuda()
if conv.bias is not None:
bias_numpy = conv.bias.data.cpu().numpy()
# change size to (out_channels - cut)
bias = np.zeros(shape = (bias_numpy.shape[0] - cut), dtype = np.float32)
bias = np.delete(bias_numpy, filters_to_prune, axis = None)
new_conv.bias.data = torch.from_numpy(bias).cuda() # new conv2
# BatchNorm layer
new_bn = torch.nn.BatchNorm2d(num_features=new_conv.out_channels, \
eps=bn.eps, momentum=bn.momentum, affine=bn.affine)
# gamma: size = (num_features)
old_weights = bn.weight.data.cpu().numpy()
new_weights = bn.weight.data.cpu().numpy()
new_weights = np.delete(old_weights, filters_to_prune)
new_bn.weight.data = torch.from_numpy(new_weights).cuda()
# beta: size = (num_features)
bias_numpy = bn.bias.data.cpu().numpy()
# change size to (out_channels - cut)
bias = np.zeros(shape = (bias_numpy.shape[0] - cut), dtype = np.float32)
bias = np.delete(bias_numpy, filters_to_prune)
new_bn.bias.data = torch.from_numpy(bias).cuda() # new bn2
# new conv for next_conv
next_new_conv = \
torch.nn.Conv2d(in_channels = next_conv.in_channels - cut,\
out_channels = next_conv.out_channels, \
kernel_size = next_conv.kernel_size, \
stride = next_conv.stride,
padding = next_conv.padding,
dilation = next_conv.dilation,
groups = next_conv.groups,
bias = next_conv.bias is not None)
old_weights = next_conv.weight.data.cpu().numpy()
new_weights = next_new_conv.weight.data.cpu().numpy()
new_weights = np.delete(old_weights, filters_to_prune, axis = 1)
next_new_conv.weight.data = torch.from_numpy(new_weights).cuda()
if next_conv.bias is not None:
next_new_conv.bias.data = next_conv.bias.data # new conv3
# replace with new block
new_blk = Bottleneck(blk.conv1.in_channels, blk.conv1.out_channels, \
blk.stride, downsample = blk.downsample)
# keep all layers in residual path unchanged tempararily
new_blk.conv1 = blk.conv1
new_blk.bn1 = blk.bn1
new_blk.conv2 = new_conv
new_blk.bn2 = new_bn
new_blk.conv3 = next_new_conv
new_blk.bn3 = blk.bn3
new_blk.relu = blk.relu
base = torch.nn.Sequential(
*(replace_layers(model.base, i, [block_index], \
[new_blk]) for i, _ in enumerate(model.base)))
# delete and replace
del model.base
model.base = base
print("Filters prunned:", filters_to_prune)
return model
| 43.953061 | 151 | 0.631402 | import torch
from torch.autograd import Variable
from torchvision import models
import cv2
cv2.setNumThreads(0)
cv2.ocl.setUseOpenCL(False)
import sys
import numpy as np
from models.resnet import BasicBlock, Bottleneck
def replace_layers(model, i, indexes, layers):
if i in indexes:
# layers and indexes store new layers used to update old layers
return layers[indexes.index(i)]
# if i not in indexes, use old layers
return model[i]
# helper function
def update_next_layers(model, layer_index, filters_to_prune):
# only need to change in_channels for all following objects based on filters_to_prune
next_conv = None
next_blk = None
next_ds = None # if next one is a block, and this block has downsample path, you need to update both residual and downsample path
offset = 1
# search for the next conv, based on current conv with id = (layer_index, filter_index)
while layer_index + offset < len(model.base._modules.items()):
res = list(model.base._modules.items())[layer_index+offset] # name, module
if isinstance(res[1], torch.nn.modules.conv.Conv2d):
next_name, next_conv = res
next_is_block = False
break
elif isinstance(res[1], (BasicBlock, Bottleneck)):
next_is_block = True
next_blk = res[1]
if res[1].downsample is None:
next_conv = res[1].conv1
next_ds = None
else:
next_conv = res[1].conv1
next_ds = res[1].downsample
break
offset = offset + 1
if next_conv is None:
print("No filter will be prunned for this layer (last layer)")
return model
if len(filters_to_prune) == 0:
print("No filter will be prunned for this layer")
return model
cut = len(filters_to_prune)
# next_conv must exists
next_new_conv = \
torch.nn.Conv2d(in_channels = next_conv.in_channels - cut,\
out_channels = next_conv.out_channels, \
kernel_size = next_conv.kernel_size, \
stride = next_conv.stride,
padding = next_conv.padding,
dilation = next_conv.dilation,
groups = next_conv.groups,
bias = next_conv.bias is not None)
old_weights = next_conv.weight.data.cpu().numpy()
new_weights = next_new_conv.weight.data.cpu().numpy()
new_weights = np.delete(old_weights, filters_to_prune, axis = 1)
next_new_conv.weight.data = torch.from_numpy(new_weights).cuda()
if next_conv.bias is not None:
next_new_conv.bias.data = next_conv.bias.data
# next_ds exists or not is okay, no matter next_is_block is True or not
if next_ds is not None:
old_conv_in_next_ds = next_ds[0]
new_conv_in_next_new_ds = \
torch.nn.Conv2d(in_channels = old_conv_in_next_ds.in_channels - cut,\
out_channels = old_conv_in_next_ds.out_channels, \
kernel_size = old_conv_in_next_ds.kernel_size, \
stride = old_conv_in_next_ds.stride,
padding = old_conv_in_next_ds.padding,
dilation = old_conv_in_next_ds.dilation,
groups = old_conv_in_next_ds.groups,
bias = old_conv_in_next_ds.bias is not None)
old_weights = old_conv_in_next_ds.weight.data.cpu().numpy()
new_weights = new_conv_in_next_new_ds.weight.data.cpu().numpy()
new_weights = np.delete(old_weights, filters_to_prune, axis = 1)
new_conv_in_next_new_ds.weight.data = torch.from_numpy(new_weights).cuda()
if old_conv_in_next_ds.bias is not None:
new_conv_in_next_new_ds.bias.data = old_conv_in_next_ds.bias.data # bias won't change
next_new_ds = torch.nn.Sequential(new_conv_in_next_new_ds, next_ds[1])
else:
next_new_ds = None
if next_is_block:
if isinstance(next_blk, BasicBlock):
next_new_block = BasicBlock(next_blk.conv1.in_channels - cut, \
next_blk.conv1.out_channels, next_blk.stride, downsample = next_new_ds)
next_new_block.conv1 = next_new_conv
next_new_block.bn1 = next_blk.bn1
next_new_block.relu = next_blk.relu
next_new_block.conv2 = next_blk.conv2
next_new_block.bn2 = next_blk.bn2
else:
next_new_block = Bottleneck(next_blk.conv1.in_channels - cut, \
next_blk.conv1.out_channels, next_blk.stride, downsample = next_new_ds)
next_new_block.conv1 = next_new_conv
next_new_block.bn1 = next_blk.bn1
next_new_block.conv2 = next_blk.conv2
next_new_block.bn2 = next_blk.bn2
next_new_block.conv3 = next_blk.conv3
next_new_block.bn3 = next_blk.bn3
next_new_block.relu = next_blk.relu
if not next_is_block:
base = torch.nn.Sequential(
*(replace_layers(model.base, i, [layer_index+offset], \
[next_new_conv]) for i, _ in enumerate(model.base)))
else:
base = torch.nn.Sequential(
*(replace_layers(model.base, i, [layer_index+offset], \
[next_new_block]) for i, _ in enumerate(model.base)))
del model.base
model.base = base
print("Finished update next layers.")
return model
def prune_resconv_layer(model, layer_index, cut_ratio=0.2, use_bn = True):
_, conv = list(model.base._modules.items())[layer_index]
if use_bn:
_, old_bn = list(model.base._modules.items())[layer_index + 1]
next_conv = None
offset = 1
while layer_index + offset < len(model.base._modules.items()):
res = list(model.base._modules.items())[layer_index+offset]
if isinstance(res[1], torch.nn.modules.conv.Conv2d):
next_name, next_conv = res
break
elif isinstance(res[1], (BasicBlock, Bottleneck)):
next_conv = res[1].conv1
break
offset = offset + 1
if next_conv is None:
print("No filter will be prunned for this layer (last layer)")
return model
num_filters = conv.weight.data.size(0)
if num_filters <= 1:
print("No filter will be prunned for this layer (num_filters<=1)")
return model
cut = int(cut_ratio * num_filters)
if cut < 1:
print("No filter will be prunned for this layer (cut<1)")
return model
if (num_filters - cut) < 1:
print("No filter will be prunned for this layer (no filter left after cutting)")
return model
abs_wgt = torch.abs(conv.weight.data)
values = \
torch.sum(abs_wgt, dim = 1, keepdim = True).\
sum(dim=2, keepdim = True).sum(dim=3, keepdim = True)[:, 0, 0, 0]
values = values / (abs_wgt.size(1) * abs_wgt.size(2) * abs_wgt.size(3))
print("Ranking filters.. ")
filters_to_prune = np.argsort(values.cpu().numpy())[:cut]
print("Filters that will be prunned", filters_to_prune)
print("Pruning filters.. ")
new_conv = \
torch.nn.Conv2d(in_channels = conv.in_channels, \
out_channels = conv.out_channels - cut,
kernel_size = conv.kernel_size, \
stride = conv.stride,
padding = conv.padding,
dilation = conv.dilation,
groups = conv.groups,
bias = conv.bias is not None)
old_weights = conv.weight.data.cpu().numpy()
new_weights = new_conv.weight.data.cpu().numpy()
new_weights = np.delete(old_weights, filters_to_prune, axis = 0)
new_conv.weight.data = torch.from_numpy(new_weights).cuda()
if conv.bias is not None: # no bias for conv layers
bias_numpy = conv.bias.data.cpu().numpy()
# change size to (out_channels - cut)
bias = np.zeros(shape = (bias_numpy.shape[0] - cut), dtype = np.float32)
bias = np.delete(bias_numpy, filters_to_prune, axis = None)
new_conv.bias.data = torch.from_numpy(bias).cuda()
# BatchNorm modification
# TODO: Extract this function outside as a separate func.
if use_bn:
new_bn = torch.nn.BatchNorm2d(num_features=new_conv.out_channels, \
eps=old_bn.eps, momentum=old_bn.momentum, affine=old_bn.affine)
# old_bn.affine == True, need to copy learning gamma and beta to new_bn
# gamma: size = (num_features)
old_weights = old_bn.weight.data.cpu().numpy()
new_weights = new_bn.weight.data.cpu().numpy()
new_weights = np.delete(old_weights, filters_to_prune)
new_bn.weight.data = torch.from_numpy(new_weights).cuda()
# beta: size = (num_features)
bias_numpy = old_bn.bias.data.cpu().numpy()
# change size to (out_channels - cut)
bias = np.zeros(shape = (bias_numpy.shape[0] - cut), dtype = np.float32)
bias = np.delete(bias_numpy, filters_to_prune)
new_bn.bias.data = torch.from_numpy(bias).cuda()
if use_bn:
# BatchNorm modification
base = torch.nn.Sequential(
*(replace_layers(model.base, i, [layer_index, layer_index+1], \
[new_conv, new_bn]) for i, _ in enumerate(model.base)))
del old_bn
else:
# replace current layer and next_conv with new_conv and next_new_conv respectively
base = torch.nn.Sequential(
*(replace_layers(model.base, i, [layer_index], \
[new_conv]) for i, _ in enumerate(model.base)))
del model.base # delete and replace with brand new one
del conv
model.base = base # update current layer
model = update_next_layers(model, layer_index, filters_to_prune) # update following layers
message = str(100*float(cut) / num_filters) + "%"
print("Filters prunned", str(message))
return model
def prune_resnet_lconv_layer(model, block_index, cut_ratio=0.2, use_bn = True):
_, blk = list(model.base._modules.items())[block_index]
cut_indices = None
if not use_bn:
print("ResNet without BN is not supported for prunning")
return cut_indices, model
# check whether the left path has conv layer for prunning
if blk.downsample == None:
print("No filters will be prunned because lconv doesn't exist")
return cut_indices, model
if not isinstance(blk, (BasicBlock, Bottleneck)):
print("Only support for ResNet with BasicBlock or Bottleneck defined in torchvision")
return cut_indices, model
lconv = blk.downsample[0]
lbn = blk.downsample[1]
next_conv = None
offset = 1
while block_index + offset < len(model.base._modules.items()):
res = list(model.base._modules.items())[block_index+offset]
if isinstance(res[1], torch.nn.modules.conv.Conv2d):
next_name, next_conv = res
break
elif isinstance(res[1], (BasicBlock, Bottleneck)):
next_conv = res[1].conv1
break
offset = offset + 1
if next_conv is None:
print("No filters will be prunned because this is the last block")
return cut_indices, model
num_filters = lconv.weight.data.size(0)
if num_filters <= 1:
print("No filter will be prunned for this layer (num_filters<=1)")
return cut_indices, model
cut = int(cut_ratio * num_filters)
if cut < 1:
print("No filter will be prunned for this layer (cut<1)")
return cut_indices, model
if (num_filters - cut) < 1:
print("No filter will be prunned for this layer (no filter left after cutting)")
return cut_indices, model
abs_wgt = torch.abs(lconv.weight.data)
values = \
torch.sum(abs_wgt, dim = 1, keepdim = True).\
sum(dim=2, keepdim = True).sum(dim=3, keepdim = True)[:, 0, 0, 0]
values = values / (abs_wgt.size(1) * abs_wgt.size(2) * abs_wgt.size(3))
print("Ranking filters.. ")
filters_to_prune = np.argsort(values.cpu().numpy())[:cut]
print("Filters that will be prunned", filters_to_prune)
print("Pruning filters.. ")
new_conv = \
torch.nn.Conv2d(in_channels = lconv.in_channels, \
out_channels = lconv.out_channels - cut,
kernel_size = lconv.kernel_size, \
stride = lconv.stride,
padding = lconv.padding,
dilation = lconv.dilation,
groups = lconv.groups,
bias = lconv.bias is not None)
old_weights = lconv.weight.data.cpu().numpy()
new_weights = new_conv.weight.data.cpu().numpy()
new_weights = np.delete(old_weights, filters_to_prune, axis = 0)
new_conv.weight.data = torch.from_numpy(new_weights).cuda()
if lconv.bias is not None:
bias_numpy = lconv.bias.data.cpu().numpy()
# change size to (out_channels - cut)
bias = np.zeros(shape = (bias_numpy.shape[0] - cut), dtype = np.float32)
bias = np.delete(bias_numpy, filters_to_prune, axis = None)
new_conv.bias.data = torch.from_numpy(bias).cuda()
# new BN layer after new_conv
new_bn = torch.nn.BatchNorm2d(num_features=new_conv.out_channels, \
eps=lbn.eps, momentum=lbn.momentum, affine=lbn.affine)
# old_bn.affine == True, need to copy learnable gamma and beta to new_bn
# gamma: size = (num_features)
old_weights = lbn.weight.data.cpu().numpy()
new_weights = new_bn.weight.data.cpu().numpy()
new_weights = np.delete(old_weights, filters_to_prune)
new_bn.weight.data = torch.from_numpy(new_weights).cuda()
# beta: size = (num_features)
bias_numpy = lbn.bias.data.cpu().numpy()
# change size to (out_channels - cut)
bias = np.zeros(shape = (bias_numpy.shape[0] - cut), dtype = np.float32)
bias = np.delete(bias_numpy, filters_to_prune)
new_bn.bias.data = torch.from_numpy(bias).cuda()
# replace
# update current left conv + left BN layer, have BN by default
new_ds = torch.nn.Sequential(
*(replace_layers(blk.downsample, i, [0, 1], \
[new_conv, new_bn]) for i, _ in enumerate(blk.downsample)))
# delete current and replace with a brand new BLOCK
if isinstance(blk, BasicBlock):
# rely on conv1 of old block to get in_planes, out_planes, tride
new_blk = BasicBlock(blk.conv1.in_channels, blk.conv1.out_channels, \
blk.stride, downsample = new_ds)
# keep all layers in residual path unchanged tempararily
new_blk.conv1 = blk.conv1
new_blk.bn1 = blk.bn1
new_blk.relu = blk.relu
new_blk.conv2 = blk.conv2
new_blk.bn2 = blk.bn2
else:
new_blk = Bottleneck(blk.conv1.in_channels, blk.conv1.out_channels, \
blk.stride, downsample = new_ds)
# keep all layers in residual path unchanged tempararily
new_blk.conv1 = blk.conv1
new_blk.bn1 = blk.bn1
new_blk.conv2 = blk.conv2
new_blk.bn2 = blk.bn2
new_blk.conv3 = blk.conv3
new_blk.bn3 = blk.bn3
new_blk.relu = blk.relu
# now new_blk is ready, it can act as a layer and replace old blk with replace_layers()
base = torch.nn.Sequential(
*(replace_layers(model.base, i, [block_index], \
[new_blk]) for i, _ in enumerate(model.base)))
# delete and replace with brand new one
del model.base # delete the things pointed by pointer
del blk
model.base = base # update current layer
model = update_next_layers(model, block_index, filters_to_prune) # update following layers
cut_indices = filters_to_prune
message = str(100*float(cut) / num_filters) + "%"
print("Filters prunned", str(message))
return cut_indices, model
def prune_rbconv_by_indices(model, block_index, filters_to_prune, use_bn = True):
_, blk = list(model.base._modules.items())[block_index]
if not use_bn:
print("ResNet without BN is not supported for prunning")
return model
# check whether the left path has conv layer for prunning
if blk.downsample == None:
print("Only support pruning for rbconv after lconv was pruned")
return model
if not isinstance(blk, (BasicBlock, Bottleneck)):
print("Only support for ResNet with BasicBlock or Bottleneck defined in torchvision")
return model
if isinstance(blk, BasicBlock):
# when it is BasicBlock, the rbconv is conv2, and its bn is bn2
conv = blk.conv2
bn = blk.bn2
else:
# when it is Bottleneck, the rbconv is conv3, and its bn is bn3
conv = blk.conv3
bn = blk.bn3
# only need to update itself, no need to care about others such as next_ds/next_conv
new_conv = \
torch.nn.Conv2d(in_channels = conv.in_channels, \
out_channels = conv.out_channels - len(filters_to_prune),
kernel_size = conv.kernel_size, \
stride = conv.stride,
padding = conv.padding,
dilation = conv.dilation,
groups = conv.groups,
bias = conv.bias is not None) #(out_channels)
old_weights = conv.weight.data.cpu().numpy() # (out_channels, in_channels, kernel_size[0], kernel_size[1]
new_weights = new_conv.weight.data.cpu().numpy()
# skip that filter's weight inside old_weights and store others into new_weights
new_weights = np.delete(old_weights, filters_to_prune, axis = 0)
new_conv.weight.data = torch.from_numpy(new_weights).cuda()
if conv.bias is not None:
bias_numpy = conv.bias.data.cpu().numpy()
bias = np.zeros(shape = (bias_numpy.shape[0] - len(filters_to_prune)), dtype = np.float32)
bias = np.delete(bias_numpy, filters_to_prune, axis = None)
new_conv.bias.data = torch.from_numpy(bias).cuda()
new_bn = torch.nn.BatchNorm2d(num_features=new_conv.out_channels, \
eps=bn.eps, momentum=bn.momentum, affine=bn.affine)
old_weights = bn.weight.data.cpu().numpy()
new_weights = new_bn.weight.data.cpu().numpy()
new_weights = np.delete(old_weights, filters_to_prune)
new_bn.weight.data = torch.from_numpy(new_weights).cuda()
bias_numpy = bn.bias.data.cpu().numpy()
bias = np.zeros(shape = (bias_numpy.shape[0] - len(filters_to_prune)), dtype = np.float32)
bias = np.delete(bias_numpy, filters_to_prune)
new_bn.bias.data = torch.from_numpy(bias).cuda()
if isinstance(blk, BasicBlock):
new_blk = BasicBlock(blk.conv1.in_channels, blk.conv1.out_channels, \
blk.stride, downsample = blk.downsample)
new_blk.conv1 = blk.conv1
new_blk.bn1 = blk.bn1
new_blk.relu = blk.relu
new_blk.conv2 = new_conv
new_blk.bn2 = new_bn
else:
new_blk = Bottleneck(blk.conv1.in_channels, blk.conv1.out_channels, \
blk.stride, downsample = blk.downsample)
new_blk.conv1 = blk.conv1
new_blk.bn1 = blk.bn1
new_blk.conv2 = blk.conv2
new_blk.bn2 = blk.bn2
new_blk.conv3 = new_conv
new_blk.bn3 = new_bn
new_blk.relu = blk.relu
base = torch.nn.Sequential(
*(replace_layers(model.base, i, [block_index], \
[new_blk]) for i, _ in enumerate(model.base)))
del model.base
model.base = base
print("Filters prunned for rb layer:", filters_to_prune)
return model
def prune_rbconv_by_number(model, block_index, num_cut, use_bn = True):
_, blk = list(model.base._modules.items())[block_index]
if not use_bn:
print("ResNet without BN is not supported for prunning")
return model
if not isinstance(blk, (BasicBlock, Bottleneck)):
print("Only support for ResNet with BasicBlock or Bottleneck defined in torchvision")
return model
if isinstance(blk, BasicBlock):
conv = blk.conv2
bn = blk.bn2
else:
conv = blk.conv3
bn = blk.bn3
num_filters = conv.weight.data.size(0)
if num_filters <= 1:
print("No filter will be prunned for this layer (num_filters<=1)")
return model
if num_cut < 1:
print("Error: No filter will be prunned for this layer (cut<1)")
return model
if (num_filters - num_cut) < 1:
print("Error: No filter will be prunned for this layer (no filter left after cutting)")
return model
abs_wgt = torch.abs(conv.weight.data)
values = \
torch.sum(abs_wgt, dim = 1, keepdim = True).\
sum(dim=2, keepdim = True).sum(dim=3, keepdim = True)[:, 0, 0, 0]
values = values / (abs_wgt.size(1) * abs_wgt.size(2) * abs_wgt.size(3))
print("Ranking filters.. ")
filters_to_prune = np.argsort(values.cpu().numpy())[:num_cut]
print("Filters that will be prunned", filters_to_prune)
print("Pruning filters.. ")
new_conv = \
torch.nn.Conv2d(in_channels = conv.in_channels, \
out_channels = conv.out_channels - num_cut,
kernel_size = conv.kernel_size, \
stride = conv.stride,
padding = conv.padding,
dilation = conv.dilation,
groups = conv.groups,
bias = conv.bias is not None)
old_weights = conv.weight.data.cpu().numpy()
new_weights = new_conv.weight.data.cpu().numpy()
new_weights = np.delete(old_weights, filters_to_prune, axis = 0)
new_conv.weight.data = torch.from_numpy(new_weights).cuda()
if conv.bias is not None:
bias_numpy = conv.bias.data.cpu().numpy()
# change size to (out_channels - cut)
bias = np.zeros(shape = (bias_numpy.shape[0] - num_cut), dtype = np.float32)
bias = np.delete(bias_numpy, filters_to_prune, axis = None)
new_conv.bias.data = torch.from_numpy(bias).cuda()
# new BN layer after new_conv
new_bn = torch.nn.BatchNorm2d(num_features=new_conv.out_channels, \
eps=bn.eps, momentum=bn.momentum, affine=bn.affine)
# old_bn.affine == True, need to copy learnable gamma and beta to new_bn
# gamma: size = (num_features)
old_weights = bn.weight.data.cpu().numpy()
new_weights = new_bn.weight.data.cpu().numpy()
new_weights = np.delete(old_weights, filters_to_prune)
new_bn.weight.data = torch.from_numpy(new_weights).cuda()
# beta: size = (num_features)
bias_numpy = bn.bias.data.cpu().numpy()
# change size to (out_channels - cut)
bias = np.zeros(shape = (bias_numpy.shape[0] - num_cut), dtype = np.float32)
bias = np.delete(bias_numpy, filters_to_prune)
new_bn.bias.data = torch.from_numpy(bias).cuda()
if isinstance(blk, BasicBlock):
# replace with new block
new_blk = BasicBlock(blk.conv1.in_channels, blk.conv1.out_channels, \
blk.stride, downsample = blk.downsample)
# keep all layers in residual path unchanged tempararily
new_blk.conv1 = blk.conv1
new_blk.bn1 = blk.bn1
new_blk.relu = blk.relu
new_blk.conv2 = new_conv # update with new conv
new_blk.bn2 = new_bn # update with new bn
else:
# replace with new block
new_blk = Bottleneck(blk.conv1.in_channels, blk.conv1.out_channels, \
blk.stride, downsample = blk.downsample)
# keep all layers in residual path unchanged tempararily
new_blk.conv1 = blk.conv1
new_blk.bn1 = blk.bn1
new_blk.conv2 = blk.conv2
new_blk.bn2 = blk.bn2
new_blk.conv3 = new_conv
new_blk.bn3 = new_bn
new_blk.relu = blk.relu
base = torch.nn.Sequential(
*(replace_layers(model.base, i, [block_index], \
[new_blk]) for i, _ in enumerate(model.base)))
# delete and replace
del model.base
del blk
model.base = base
model = update_next_layers(model, block_index, filters_to_prune) # update following layers
print("Filters prunned for rb layer:", filters_to_prune)
return model
def prune_ruconv1_layer(model, block_index, cut_ratio=0.2, use_bn = True):
_, blk = list(model.base._modules.items())[block_index]
if not use_bn:
print("ResNet without BN is not supported for prunning")
return model
if not isinstance(blk, (BasicBlock, Bottleneck)):
print("Conv1 only for ResNet with BasicBlock or Bottleneck defined in torchvision")
return model
# cut conv1, and next conv is conv2
conv = blk.conv1
bn = blk.bn1
next_conv = blk.conv2
num_filters = conv.weight.data.size(0) # out_channels x in_channels x 3 x 3
# skip the layer with only one filter left
if num_filters <= 1:
print("No filter will be prunned for this layer (num_filters<=1)")
return model
cut = int(cut_ratio * num_filters)
if cut < 1:
print("No filter will be prunned for this layer (cut<1)")
return model
if (num_filters - cut) < 1:
print("No filter will be prunned for this layer (no filter left after cutting)")
return model
# rank the filters within this layer and store into filter_ranks
abs_wgt = torch.abs(conv.weight.data)
values = \
torch.sum(abs_wgt, dim = 1, keepdim = True).\
sum(dim=2, keepdim = True).sum(dim=3, keepdim = True)[:, 0, 0, 0]# .data
# Normalize the sum of weight by the filter dimensions in x 3 x 3
values = values / (abs_wgt.size(1) * abs_wgt.size(2) * abs_wgt.size(3)) # (filter_number for this layer, 1)
print("Ranking filters.. ")
filters_to_prune = np.argsort(values.cpu().numpy())[:cut] # order from smallest to largest
print("Filters that will be prunned", filters_to_prune)
print("Pruning filters.. ")
# the updated conv for current conv, with cut output channels being pruned
new_conv = \
torch.nn.Conv2d(in_channels = conv.in_channels, \
out_channels = conv.out_channels - cut,
kernel_size = conv.kernel_size, \
stride = conv.stride,
padding = conv.padding,
dilation = conv.dilation,
groups = conv.groups,
bias = conv.bias is not None) #(out_channels)
old_weights = conv.weight.data.cpu().numpy() # (out_channels, in_channels, kernel_size[0], kernel_size[1]
new_weights = new_conv.weight.data.cpu().numpy()
# skip that filter's weight inside old_weights and store others into new_weights
new_weights = np.delete(old_weights, filters_to_prune, axis = 0)
new_conv.weight.data = torch.from_numpy(new_weights).cuda()
if conv.bias is not None:
bias_numpy = conv.bias.data.cpu().numpy()
bias = np.zeros(shape = (bias_numpy.shape[0] - cut), dtype = np.float32)
bias = np.delete(bias_numpy, filters_to_prune, axis = None)
new_conv.bias.data = torch.from_numpy(bias).cuda()
new_bn = torch.nn.BatchNorm2d(num_features=new_conv.out_channels, \
eps=bn.eps, momentum=bn.momentum, affine=bn.affine)
old_weights = bn.weight.data.cpu().numpy()
new_weights = bn.weight.data.cpu().numpy()
new_weights = np.delete(old_weights, filters_to_prune)
new_bn.weight.data = torch.from_numpy(new_weights).cuda()
bias_numpy = bn.bias.data.cpu().numpy()
bias = np.zeros(shape = (bias_numpy.shape[0] - cut), dtype = np.float32)
bias = np.delete(bias_numpy, filters_to_prune)
new_bn.bias.data = torch.from_numpy(bias).cuda()
next_new_conv = \
torch.nn.Conv2d(in_channels = next_conv.in_channels - cut,\
out_channels = next_conv.out_channels, \
kernel_size = next_conv.kernel_size, \
stride = next_conv.stride,
padding = next_conv.padding,
dilation = next_conv.dilation,
groups = next_conv.groups,
bias = next_conv.bias is not None)
old_weights = next_conv.weight.data.cpu().numpy()
new_weights = next_new_conv.weight.data.cpu().numpy()
new_weights = np.delete(old_weights, filters_to_prune, axis = 1)
next_new_conv.weight.data = torch.from_numpy(new_weights).cuda()
if next_conv.bias is not None:
next_new_conv.bias.data = next_conv.bias.data
if isinstance(blk, BasicBlock):
new_blk = BasicBlock(blk.conv1.in_channels, blk.conv1.out_channels, \
blk.stride, downsample = blk.downsample)
new_blk.conv1 = new_conv
new_blk.bn1 = new_bn
new_blk.relu = blk.relu
new_blk.conv2 = next_new_conv
new_blk.bn2 = blk.bn2
else:
new_blk = Bottleneck(blk.conv1.in_channels, blk.conv1.out_channels, \
blk.stride, downsample = blk.downsample)
new_blk.conv1 = new_conv
new_blk.bn1 = new_bn
new_blk.conv2 = next_new_conv
new_blk.bn2 = blk.bn2
new_blk.conv3 = blk.conv3
new_blk.bn3 = blk.bn3
new_blk.relu = blk.relu
base = torch.nn.Sequential(
*(replace_layers(model.base, i, [block_index], \
[new_blk]) for i, _ in enumerate(model.base)))
del model.base
model.base = base
print("Filters prunned:", filters_to_prune)
return model
def prune_ruconv2_layer(model, block_index, cut_ratio=0.2, use_bn = True):
_, blk = list(model.base._modules.items())[block_index]
if not use_bn:
print("ResNet without BN is not supported for prunning")
return model
if not isinstance(blk, Bottleneck):
print("Conv2 only for ResNet with Bottleneck defined in torchvision")
return model
conv = blk.conv2
bn = blk.bn2
next_conv = blk.conv3
num_filters = conv.weight.data.size(0)
if num_filters <= 1:
print("No filter will be prunned for this layer (num_filters<=1)")
return model
cut = int(cut_ratio * num_filters)
if cut < 1:
print("No filter will be prunned for this layer (cut<1)")
return model
if (num_filters - cut) < 1:
print("No filter will be prunned for this layer (no filter left after cutting)")
return model
abs_wgt = torch.abs(conv.weight.data)
values = \
torch.sum(abs_wgt, dim = 1, keepdim = True).\
sum(dim=2, keepdim = True).sum(dim=3, keepdim = True)[:, 0, 0, 0]
values = values / (abs_wgt.size(1) * abs_wgt.size(2) * abs_wgt.size(3))
print("Ranking filters.. ")
filters_to_prune = np.argsort(values.cpu().numpy())[:cut]
print("Filters that will be prunned", filters_to_prune)
print("Pruning filters.. ")
new_conv = \
torch.nn.Conv2d(in_channels = conv.in_channels, \
out_channels = conv.out_channels - cut,
kernel_size = conv.kernel_size, \
stride = conv.stride,
padding = conv.padding,
dilation = conv.dilation,
groups = conv.groups,
bias = conv.bias is not None)
old_weights = conv.weight.data.cpu().numpy()
new_weights = new_conv.weight.data.cpu().numpy()
new_weights = np.delete(old_weights, filters_to_prune, axis = 0)
new_conv.weight.data = torch.from_numpy(new_weights).cuda()
if conv.bias is not None:
bias_numpy = conv.bias.data.cpu().numpy()
# change size to (out_channels - cut)
bias = np.zeros(shape = (bias_numpy.shape[0] - cut), dtype = np.float32)
bias = np.delete(bias_numpy, filters_to_prune, axis = None)
new_conv.bias.data = torch.from_numpy(bias).cuda() # new conv2
# BatchNorm layer
new_bn = torch.nn.BatchNorm2d(num_features=new_conv.out_channels, \
eps=bn.eps, momentum=bn.momentum, affine=bn.affine)
# gamma: size = (num_features)
old_weights = bn.weight.data.cpu().numpy()
new_weights = bn.weight.data.cpu().numpy()
new_weights = np.delete(old_weights, filters_to_prune)
new_bn.weight.data = torch.from_numpy(new_weights).cuda()
# beta: size = (num_features)
bias_numpy = bn.bias.data.cpu().numpy()
# change size to (out_channels - cut)
bias = np.zeros(shape = (bias_numpy.shape[0] - cut), dtype = np.float32)
bias = np.delete(bias_numpy, filters_to_prune)
new_bn.bias.data = torch.from_numpy(bias).cuda() # new bn2
# new conv for next_conv
next_new_conv = \
torch.nn.Conv2d(in_channels = next_conv.in_channels - cut,\
out_channels = next_conv.out_channels, \
kernel_size = next_conv.kernel_size, \
stride = next_conv.stride,
padding = next_conv.padding,
dilation = next_conv.dilation,
groups = next_conv.groups,
bias = next_conv.bias is not None)
old_weights = next_conv.weight.data.cpu().numpy()
new_weights = next_new_conv.weight.data.cpu().numpy()
new_weights = np.delete(old_weights, filters_to_prune, axis = 1)
next_new_conv.weight.data = torch.from_numpy(new_weights).cuda()
if next_conv.bias is not None:
next_new_conv.bias.data = next_conv.bias.data # new conv3
# replace with new block
new_blk = Bottleneck(blk.conv1.in_channels, blk.conv1.out_channels, \
blk.stride, downsample = blk.downsample)
# keep all layers in residual path unchanged tempararily
new_blk.conv1 = blk.conv1
new_blk.bn1 = blk.bn1
new_blk.conv2 = new_conv
new_blk.bn2 = new_bn
new_blk.conv3 = next_new_conv
new_blk.bn3 = blk.bn3
new_blk.relu = blk.relu
base = torch.nn.Sequential(
*(replace_layers(model.base, i, [block_index], \
[new_blk]) for i, _ in enumerate(model.base)))
# delete and replace
del model.base
model.base = base
print("Filters prunned:", filters_to_prune)
return model
| true | true |
f7204660bc2ad2aac0e43e529512a8b9923d1bed | 122,038 | py | Python | zerver/models.py | osamasarwar38/zulip | 58b93c3e830249b1202b8f396b36a2660e1cb8f9 | [
"Apache-2.0"
] | 1 | 2020-06-22T18:00:20.000Z | 2020-06-22T18:00:20.000Z | zerver/models.py | osamasarwar38/zulip | 58b93c3e830249b1202b8f396b36a2660e1cb8f9 | [
"Apache-2.0"
] | null | null | null | zerver/models.py | osamasarwar38/zulip | 58b93c3e830249b1202b8f396b36a2660e1cb8f9 | [
"Apache-2.0"
] | null | null | null | import datetime
import re
import sre_constants
import time
from collections import defaultdict
from datetime import timedelta
from typing import (
AbstractSet,
Any,
Callable,
DefaultDict,
Dict,
Iterable,
List,
Optional,
Sequence,
Set,
Tuple,
TypeVar,
Union,
)
import django.contrib.auth
from bitfield import BitField
from bitfield.types import BitHandler
from django.conf import settings
from django.contrib.auth.models import AbstractBaseUser, PermissionsMixin, UserManager
from django.contrib.postgres.fields import JSONField
from django.core.exceptions import ValidationError
from django.core.validators import MinLengthValidator, RegexValidator, URLValidator, validate_email
from django.db import models, transaction
from django.db.models import CASCADE, Manager, Q, Sum
from django.db.models.query import QuerySet
from django.db.models.signals import post_delete, post_save
from django.utils.timezone import now as timezone_now
from django.utils.translation import ugettext_lazy as _
from confirmation import settings as confirmation_settings
from zerver.lib import cache
from zerver.lib.cache import (
active_non_guest_user_ids_cache_key,
active_user_ids_cache_key,
bot_dict_fields,
bot_dicts_in_realm_cache_key,
bot_profile_cache_key,
cache_delete,
cache_set,
cache_with_key,
flush_message,
flush_realm,
flush_stream,
flush_submessage,
flush_used_upload_space_cache,
flush_user_profile,
generic_bulk_cached_fetch,
get_realm_used_upload_space_cache_key,
get_stream_cache_key,
realm_alert_words_automaton_cache_key,
realm_alert_words_cache_key,
realm_user_dict_fields,
realm_user_dicts_cache_key,
user_profile_by_api_key_cache_key,
user_profile_by_email_cache_key,
user_profile_by_id_cache_key,
user_profile_cache_key,
)
from zerver.lib.exceptions import JsonableError
from zerver.lib.pysa import mark_sanitized
from zerver.lib.timestamp import datetime_to_timestamp
from zerver.lib.types import (
DisplayRecipientT,
ExtendedFieldElement,
ExtendedValidator,
FieldElement,
ProfileData,
ProfileDataElementBase,
RealmUserValidator,
UserFieldElement,
Validator,
)
from zerver.lib.utils import generate_random_token, make_safe_digest
from zerver.lib.validator import (
check_date,
check_int,
check_list,
check_long_string,
check_short_string,
check_url,
validate_choice_field,
)
MAX_TOPIC_NAME_LENGTH = 60
MAX_MESSAGE_LENGTH = 10000
MAX_LANGUAGE_ID_LENGTH: int = 50
STREAM_NAMES = TypeVar('STREAM_NAMES', Sequence[str], AbstractSet[str])
def query_for_ids(query: QuerySet, user_ids: List[int], field: str) -> QuerySet:
'''
This function optimizes searches of the form
`user_profile_id in (1, 2, 3, 4)` by quickly
building the where clauses. Profiling shows significant
speedups over the normal Django-based approach.
Use this very carefully! Also, the caller should
guard against empty lists of user_ids.
'''
assert(user_ids)
clause = f'{field} IN %s'
query = query.extra(
where=[clause], params=(tuple(user_ids),),
)
return query
# Doing 1000 remote cache requests to get_display_recipient is quite slow,
# so add a local cache as well as the remote cache cache.
#
# This local cache has a lifetime of just a single request; it is
# cleared inside `flush_per_request_caches` in our middleware. It
# could be replaced with smarter bulk-fetching logic that deduplicates
# queries for the same recipient; this is just a convenient way to
# write that code.
per_request_display_recipient_cache: Dict[int, DisplayRecipientT] = {}
def get_display_recipient_by_id(recipient_id: int, recipient_type: int,
recipient_type_id: Optional[int]) -> DisplayRecipientT:
"""
returns: an object describing the recipient (using a cache).
If the type is a stream, the type_id must be an int; a string is returned.
Otherwise, type_id may be None; an array of recipient dicts is returned.
"""
# Have to import here, to avoid circular dependency.
from zerver.lib.display_recipient import get_display_recipient_remote_cache
if recipient_id not in per_request_display_recipient_cache:
result = get_display_recipient_remote_cache(recipient_id, recipient_type, recipient_type_id)
per_request_display_recipient_cache[recipient_id] = result
return per_request_display_recipient_cache[recipient_id]
def get_display_recipient(recipient: 'Recipient') -> DisplayRecipientT:
return get_display_recipient_by_id(
recipient.id,
recipient.type,
recipient.type_id,
)
def get_realm_emoji_cache_key(realm: 'Realm') -> str:
return f'realm_emoji:{realm.id}'
def get_active_realm_emoji_cache_key(realm: 'Realm') -> str:
return f'active_realm_emoji:{realm.id}'
# This simple call-once caching saves ~500us in auth_enabled_helper,
# which is a significant optimization for common_context. Note that
# these values cannot change in a running production system, but do
# regularly change within unit tests; we address the latter by calling
# clear_supported_auth_backends_cache in our standard tearDown code.
supported_backends: Optional[Set[type]] = None
def supported_auth_backends() -> Set[type]:
global supported_backends
# Caching temporarily disabled for debugging
supported_backends = django.contrib.auth.get_backends()
assert supported_backends is not None
return supported_backends
def clear_supported_auth_backends_cache() -> None:
global supported_backends
supported_backends = None
class Realm(models.Model):
MAX_REALM_NAME_LENGTH = 40
MAX_REALM_SUBDOMAIN_LENGTH = 40
INVITES_STANDARD_REALM_DAILY_MAX = 3000
MESSAGE_VISIBILITY_LIMITED = 10000
AUTHENTICATION_FLAGS = ['Google', 'Email', 'GitHub', 'LDAP', 'Dev',
'RemoteUser', 'AzureAD', 'SAML', 'GitLab', 'Apple']
SUBDOMAIN_FOR_ROOT_DOMAIN = ''
# User-visible display name and description used on e.g. the organization homepage
name: Optional[str] = models.CharField(max_length=MAX_REALM_NAME_LENGTH, null=True)
description: str = models.TextField(default="")
# A short, identifier-like name for the organization. Used in subdomains;
# e.g. on a server at example.com, an org with string_id `foo` is reached
# at `foo.example.com`.
string_id: str = models.CharField(max_length=MAX_REALM_SUBDOMAIN_LENGTH, unique=True)
date_created: datetime.datetime = models.DateTimeField(default=timezone_now)
deactivated: bool = models.BooleanField(default=False)
# See RealmDomain for the domains that apply for a given organization.
emails_restricted_to_domains: bool = models.BooleanField(default=False)
invite_required: bool = models.BooleanField(default=True)
invite_by_admins_only: bool = models.BooleanField(default=False)
_max_invites: Optional[int] = models.IntegerField(null=True, db_column='max_invites')
disallow_disposable_email_addresses: bool = models.BooleanField(default=True)
authentication_methods: BitHandler = BitField(
flags=AUTHENTICATION_FLAGS, default=2**31 - 1,
)
# Whether the organization has enabled inline image and URL previews.
inline_image_preview: bool = models.BooleanField(default=True)
inline_url_embed_preview: bool = models.BooleanField(default=False)
# Whether digest emails are enabled for the organization.
digest_emails_enabled: bool = models.BooleanField(default=False)
# Day of the week on which the digest is sent (default: Tuesday).
digest_weekday: int = models.SmallIntegerField(default=1)
send_welcome_emails: bool = models.BooleanField(default=True)
message_content_allowed_in_email_notifications: bool = models.BooleanField(default=True)
mandatory_topics: bool = models.BooleanField(default=False)
add_emoji_by_admins_only: bool = models.BooleanField(default=False)
name_changes_disabled: bool = models.BooleanField(default=False)
email_changes_disabled: bool = models.BooleanField(default=False)
avatar_changes_disabled: bool = models.BooleanField(default=False)
POLICY_MEMBERS_ONLY = 1
POLICY_ADMINS_ONLY = 2
POLICY_FULL_MEMBERS_ONLY = 3
COMMON_POLICY_TYPES = [
POLICY_MEMBERS_ONLY,
POLICY_ADMINS_ONLY,
POLICY_FULL_MEMBERS_ONLY,
]
# Who in the organization is allowed to create streams.
create_stream_policy: int = models.PositiveSmallIntegerField(
default=POLICY_MEMBERS_ONLY)
# Who in the organization is allowed to invite other users to streams.
invite_to_stream_policy: int = models.PositiveSmallIntegerField(
default=POLICY_MEMBERS_ONLY)
USER_GROUP_EDIT_POLICY_MEMBERS = 1
USER_GROUP_EDIT_POLICY_ADMINS = 2
user_group_edit_policy: int = models.PositiveSmallIntegerField(
default=USER_GROUP_EDIT_POLICY_MEMBERS)
USER_GROUP_EDIT_POLICY_TYPES = [
USER_GROUP_EDIT_POLICY_MEMBERS,
USER_GROUP_EDIT_POLICY_ADMINS,
]
PRIVATE_MESSAGE_POLICY_UNLIMITED = 1
PRIVATE_MESSAGE_POLICY_DISABLED = 2
private_message_policy: int = models.PositiveSmallIntegerField(
default=PRIVATE_MESSAGE_POLICY_UNLIMITED)
PRIVATE_MESSAGE_POLICY_TYPES = [
PRIVATE_MESSAGE_POLICY_UNLIMITED,
PRIVATE_MESSAGE_POLICY_DISABLED,
]
# Who in the organization has access to users' actual email
# addresses. Controls whether the UserProfile.email field is the
# same as UserProfile.delivery_email, or is instead garbage.
EMAIL_ADDRESS_VISIBILITY_EVERYONE = 1
EMAIL_ADDRESS_VISIBILITY_MEMBERS = 2
EMAIL_ADDRESS_VISIBILITY_ADMINS = 3
EMAIL_ADDRESS_VISIBILITY_NOBODY = 4
email_address_visibility: int = models.PositiveSmallIntegerField(
default=EMAIL_ADDRESS_VISIBILITY_EVERYONE,
)
EMAIL_ADDRESS_VISIBILITY_TYPES = [
EMAIL_ADDRESS_VISIBILITY_EVERYONE,
# The MEMBERS level is not yet implemented on the backend.
## EMAIL_ADDRESS_VISIBILITY_MEMBERS,
EMAIL_ADDRESS_VISIBILITY_ADMINS,
EMAIL_ADDRESS_VISIBILITY_NOBODY,
]
# Threshold in days for new users to create streams, and potentially take
# some other actions.
waiting_period_threshold: int = models.PositiveIntegerField(default=0)
allow_message_deleting: bool = models.BooleanField(default=False)
DEFAULT_MESSAGE_CONTENT_DELETE_LIMIT_SECONDS = 600 # if changed, also change in admin.js, setting_org.js
message_content_delete_limit_seconds: int = models.IntegerField(
default=DEFAULT_MESSAGE_CONTENT_DELETE_LIMIT_SECONDS,
)
allow_message_editing: bool = models.BooleanField(default=True)
DEFAULT_MESSAGE_CONTENT_EDIT_LIMIT_SECONDS = 600 # if changed, also change in admin.js, setting_org.js
message_content_edit_limit_seconds: int = models.IntegerField(
default=DEFAULT_MESSAGE_CONTENT_EDIT_LIMIT_SECONDS,
)
# Whether users have access to message edit history
allow_edit_history: bool = models.BooleanField(default=True)
DEFAULT_COMMUNITY_TOPIC_EDITING_LIMIT_SECONDS = 86400
allow_community_topic_editing: bool = models.BooleanField(default=True)
# Defaults for new users
default_twenty_four_hour_time: bool = models.BooleanField(default=False)
default_language: str = models.CharField(default='en', max_length=MAX_LANGUAGE_ID_LENGTH)
DEFAULT_NOTIFICATION_STREAM_NAME = 'general'
INITIAL_PRIVATE_STREAM_NAME = 'core team'
STREAM_EVENTS_NOTIFICATION_TOPIC = _('stream events')
notifications_stream: Optional["Stream"] = models.ForeignKey(
"Stream", related_name="+", null=True, blank=True, on_delete=CASCADE,
)
signup_notifications_stream: Optional["Stream"] = models.ForeignKey(
"Stream", related_name="+", null=True, blank=True, on_delete=CASCADE,
)
RETAIN_MESSAGE_FOREVER = -1
# For old messages being automatically deleted
message_retention_days: Optional[int] = models.IntegerField(null=True)
# When non-null, all but the latest this many messages in the organization
# are inaccessible to users (but not deleted).
message_visibility_limit: Optional[int] = models.IntegerField(null=True)
# Messages older than this message ID in the organization are inaccessible.
first_visible_message_id: int = models.IntegerField(default=0)
# Valid org_types are {CORPORATE, COMMUNITY}
CORPORATE = 1
COMMUNITY = 2
org_type: int = models.PositiveSmallIntegerField(default=CORPORATE)
UPGRADE_TEXT_STANDARD = _("Available on Zulip Standard. Upgrade to access.")
# plan_type controls various features around resource/feature
# limitations for a Zulip organization on multi-tenant installations
# like Zulip Cloud.
SELF_HOSTED = 1
LIMITED = 2
STANDARD = 3
STANDARD_FREE = 4
plan_type: int = models.PositiveSmallIntegerField(default=SELF_HOSTED)
# This value is also being used in static/js/settings_bots.bot_creation_policy_values.
# On updating it here, update it there as well.
BOT_CREATION_EVERYONE = 1
BOT_CREATION_LIMIT_GENERIC_BOTS = 2
BOT_CREATION_ADMINS_ONLY = 3
bot_creation_policy: int = models.PositiveSmallIntegerField(default=BOT_CREATION_EVERYONE)
BOT_CREATION_POLICY_TYPES = [
BOT_CREATION_EVERYONE,
BOT_CREATION_LIMIT_GENERIC_BOTS,
BOT_CREATION_ADMINS_ONLY,
]
# See upload_quota_bytes; don't interpret upload_quota_gb directly.
UPLOAD_QUOTA_LIMITED = 5
UPLOAD_QUOTA_STANDARD = 50
upload_quota_gb: Optional[int] = models.IntegerField(null=True)
VIDEO_CHAT_PROVIDERS = {
'disabled': {
'name': "None",
'id': 0,
},
'jitsi_meet': {
'name': "Jitsi Meet",
'id': 1,
},
# ID 2 was used for the now-deleted Google Hangouts.
# ID 3 reserved for optional Zoom, see below.
}
if settings.VIDEO_ZOOM_CLIENT_ID is not None and settings.VIDEO_ZOOM_CLIENT_SECRET is not None:
VIDEO_CHAT_PROVIDERS['zoom'] = {
'name': "Zoom",
'id': 3,
}
video_chat_provider = models.PositiveSmallIntegerField(default=VIDEO_CHAT_PROVIDERS['jitsi_meet']['id'])
default_code_block_language: Optional[str] = models.TextField(null=True, default=None)
# Define the types of the various automatically managed properties
property_types: Dict[str, Union[type, Tuple[type, ...]]] = dict(
add_emoji_by_admins_only=bool,
allow_edit_history=bool,
allow_message_deleting=bool,
bot_creation_policy=int,
create_stream_policy=int,
invite_to_stream_policy=int,
default_language=str,
default_twenty_four_hour_time = bool,
description=str,
digest_emails_enabled=bool,
disallow_disposable_email_addresses=bool,
email_address_visibility=int,
email_changes_disabled=bool,
invite_required=bool,
invite_by_admins_only=bool,
inline_image_preview=bool,
inline_url_embed_preview=bool,
mandatory_topics=bool,
message_retention_days=(int, type(None)),
name=str,
name_changes_disabled=bool,
avatar_changes_disabled=bool,
emails_restricted_to_domains=bool,
send_welcome_emails=bool,
message_content_allowed_in_email_notifications=bool,
video_chat_provider=int,
waiting_period_threshold=int,
digest_weekday=int,
private_message_policy=int,
user_group_edit_policy=int,
default_code_block_language=(str, type(None)),
)
DIGEST_WEEKDAY_VALUES = [0, 1, 2, 3, 4, 5, 6]
# Icon is the square mobile icon.
ICON_FROM_GRAVATAR = 'G'
ICON_UPLOADED = 'U'
ICON_SOURCES = (
(ICON_FROM_GRAVATAR, 'Hosted by Gravatar'),
(ICON_UPLOADED, 'Uploaded by administrator'),
)
icon_source: str = models.CharField(
default=ICON_FROM_GRAVATAR, choices=ICON_SOURCES, max_length=1,
)
icon_version: int = models.PositiveSmallIntegerField(default=1)
# Logo is the horizontal logo we show in top-left of webapp navbar UI.
LOGO_DEFAULT = 'D'
LOGO_UPLOADED = 'U'
LOGO_SOURCES = (
(LOGO_DEFAULT, 'Default to Zulip'),
(LOGO_UPLOADED, 'Uploaded by administrator'),
)
logo_source: str = models.CharField(
default=LOGO_DEFAULT, choices=LOGO_SOURCES, max_length=1,
)
logo_version: int = models.PositiveSmallIntegerField(default=1)
night_logo_source: str = models.CharField(
default=LOGO_DEFAULT, choices=LOGO_SOURCES, max_length=1,
)
night_logo_version: int = models.PositiveSmallIntegerField(default=1)
def authentication_methods_dict(self) -> Dict[str, bool]:
"""Returns the a mapping from authentication flags to their status,
showing only those authentication flags that are supported on
the current server (i.e. if EmailAuthBackend is not configured
on the server, this will not return an entry for "Email")."""
# This mapping needs to be imported from here due to the cyclic
# dependency.
from zproject.backends import AUTH_BACKEND_NAME_MAP
ret: Dict[str, bool] = {}
supported_backends = [backend.__class__ for backend in supported_auth_backends()]
# `authentication_methods` is a bitfield.types.BitHandler, not
# a true dict; since it is still python2- and python3-compat,
# `iteritems` is its method to iterate over its contents.
for k, v in self.authentication_methods.iteritems():
backend = AUTH_BACKEND_NAME_MAP[k]
if backend in supported_backends:
ret[k] = v
return ret
def __str__(self) -> str:
return f"<Realm: {self.string_id} {self.id}>"
@cache_with_key(get_realm_emoji_cache_key, timeout=3600*24*7)
def get_emoji(self) -> Dict[str, Dict[str, Iterable[str]]]:
return get_realm_emoji_uncached(self)
@cache_with_key(get_active_realm_emoji_cache_key, timeout=3600*24*7)
def get_active_emoji(self) -> Dict[str, Dict[str, Iterable[str]]]:
return get_active_realm_emoji_uncached(self)
def get_admin_users_and_bots(self) -> Sequence['UserProfile']:
"""Use this in contexts where we want administrative users as well as
bots with administrator privileges, like send_event calls for
notifications to all administrator users.
"""
# TODO: Change return type to QuerySet[UserProfile]
return UserProfile.objects.filter(realm=self, is_active=True,
role__in=[UserProfile.ROLE_REALM_ADMINISTRATOR,
UserProfile.ROLE_REALM_OWNER])
def get_human_admin_users(self) -> QuerySet:
"""Use this in contexts where we want only human users with
administrative privileges, like sending an email to all of a
realm's administrators (bots don't have real email addresses).
"""
# TODO: Change return type to QuerySet[UserProfile]
return UserProfile.objects.filter(realm=self, is_bot=False, is_active=True,
role__in=[UserProfile.ROLE_REALM_ADMINISTRATOR,
UserProfile.ROLE_REALM_OWNER])
def get_active_users(self) -> Sequence['UserProfile']:
# TODO: Change return type to QuerySet[UserProfile]
return UserProfile.objects.filter(realm=self, is_active=True).select_related()
def get_human_owner_users(self) -> QuerySet:
return UserProfile.objects.filter(realm=self, is_bot=False,
role=UserProfile.ROLE_REALM_OWNER,
is_active=True)
def get_bot_domain(self) -> str:
return get_fake_email_domain()
def get_notifications_stream(self) -> Optional['Stream']:
if self.notifications_stream is not None and not self.notifications_stream.deactivated:
return self.notifications_stream
return None
def get_signup_notifications_stream(self) -> Optional['Stream']:
if self.signup_notifications_stream is not None and not self.signup_notifications_stream.deactivated:
return self.signup_notifications_stream
return None
@property
def max_invites(self) -> int:
if self._max_invites is None:
return settings.INVITES_DEFAULT_REALM_DAILY_MAX
return self._max_invites
@max_invites.setter
def max_invites(self, value: Optional[int]) -> None:
self._max_invites = value
def upload_quota_bytes(self) -> Optional[int]:
if self.upload_quota_gb is None:
return None
# We describe the quota to users in "GB" or "gigabytes", but actually apply
# it as gibibytes (GiB) to be a bit more generous in case of confusion.
return self.upload_quota_gb << 30
@cache_with_key(get_realm_used_upload_space_cache_key, timeout=3600*24*7)
def currently_used_upload_space_bytes(self) -> int:
used_space = Attachment.objects.filter(realm=self).aggregate(Sum('size'))['size__sum']
if used_space is None:
return 0
return used_space
def ensure_not_on_limited_plan(self) -> None:
if self.plan_type == Realm.LIMITED:
raise JsonableError(self.UPGRADE_TEXT_STANDARD)
@property
def subdomain(self) -> str:
return self.string_id
@property
def display_subdomain(self) -> str:
"""Likely to be temporary function to avoid signup messages being sent
to an empty topic"""
if self.string_id == "":
return "."
return self.string_id
@property
def uri(self) -> str:
return settings.EXTERNAL_URI_SCHEME + self.host
@property
def host(self) -> str:
# Use mark sanitized to prevent false positives from Pysa thinking that
# the host is user controlled.
return mark_sanitized(self.host_for_subdomain(self.subdomain))
@staticmethod
def host_for_subdomain(subdomain: str) -> str:
if subdomain == Realm.SUBDOMAIN_FOR_ROOT_DOMAIN:
return settings.EXTERNAL_HOST
default_host = f"{subdomain}.{settings.EXTERNAL_HOST}"
return settings.REALM_HOSTS.get(subdomain, default_host)
@property
def is_zephyr_mirror_realm(self) -> bool:
return self.string_id == "zephyr"
@property
def webathena_enabled(self) -> bool:
return self.is_zephyr_mirror_realm
@property
def presence_disabled(self) -> bool:
return self.is_zephyr_mirror_realm
class Meta:
permissions = (
('administer', "Administer a realm"),
('api_super_user', "Can send messages as other users for mirroring"),
)
post_save.connect(flush_realm, sender=Realm)
def get_realm(string_id: str) -> Realm:
return Realm.objects.get(string_id=string_id)
def name_changes_disabled(realm: Optional[Realm]) -> bool:
if realm is None:
return settings.NAME_CHANGES_DISABLED
return settings.NAME_CHANGES_DISABLED or realm.name_changes_disabled
def avatar_changes_disabled(realm: Realm) -> bool:
return settings.AVATAR_CHANGES_DISABLED or realm.avatar_changes_disabled
class RealmDomain(models.Model):
"""For an organization with emails_restricted_to_domains enabled, the list of
allowed domains"""
realm: Realm = models.ForeignKey(Realm, on_delete=CASCADE)
# should always be stored lowercase
domain: str = models.CharField(max_length=80, db_index=True)
allow_subdomains = models.BooleanField(default=False)
class Meta:
unique_together = ("realm", "domain")
# These functions should only be used on email addresses that have
# been validated via django.core.validators.validate_email
#
# Note that we need to use some care, since can you have multiple @-signs; e.g.
# "tabbott@test"@zulip.com
# is valid email address
def email_to_username(email: str) -> str:
return "@".join(email.split("@")[:-1]).lower()
# Returns the raw domain portion of the desired email address
def email_to_domain(email: str) -> str:
return email.split("@")[-1].lower()
class DomainNotAllowedForRealmError(Exception):
pass
class DisposableEmailError(Exception):
pass
class EmailContainsPlusError(Exception):
pass
def get_realm_domains(realm: Realm) -> List[Dict[str, str]]:
return list(realm.realmdomain_set.values('domain', 'allow_subdomains'))
class RealmEmoji(models.Model):
author: Optional["UserProfile"] = models.ForeignKey(
"UserProfile", blank=True, null=True, on_delete=CASCADE,
)
realm: Realm = models.ForeignKey(Realm, on_delete=CASCADE)
name: str = models.TextField(validators=[
MinLengthValidator(1),
# The second part of the regex (negative lookbehind) disallows names
# ending with one of the punctuation characters.
RegexValidator(regex=r'^[0-9a-z.\-_]+(?<![.\-_])$',
message=_("Invalid characters in emoji name"))])
# The basename of the custom emoji's filename; see PATH_ID_TEMPLATE for the full path.
file_name: Optional[str] = models.TextField(db_index=True, null=True, blank=True)
deactivated: bool = models.BooleanField(default=False)
PATH_ID_TEMPLATE = "{realm_id}/emoji/images/{emoji_file_name}"
def __str__(self) -> str:
return f"<RealmEmoji({self.realm.string_id}): {self.id} {self.name} {self.deactivated} {self.file_name}>"
def get_realm_emoji_dicts(realm: Realm,
only_active_emojis: bool=False) -> Dict[str, Dict[str, Any]]:
query = RealmEmoji.objects.filter(realm=realm).select_related('author')
if only_active_emojis:
query = query.filter(deactivated=False)
d = {}
from zerver.lib.emoji import get_emoji_url
for realm_emoji in query.all():
author_id = None
if realm_emoji.author:
author_id = realm_emoji.author_id
emoji_url = get_emoji_url(realm_emoji.file_name, realm_emoji.realm_id)
d[str(realm_emoji.id)] = dict(id=str(realm_emoji.id),
name=realm_emoji.name,
source_url=emoji_url,
deactivated=realm_emoji.deactivated,
author_id=author_id)
return d
def get_realm_emoji_uncached(realm: Realm) -> Dict[str, Dict[str, Any]]:
return get_realm_emoji_dicts(realm)
def get_active_realm_emoji_uncached(realm: Realm) -> Dict[str, Dict[str, Any]]:
realm_emojis = get_realm_emoji_dicts(realm, only_active_emojis=True)
d = {}
for emoji_id, emoji_dict in realm_emojis.items():
d[emoji_dict['name']] = emoji_dict
return d
def flush_realm_emoji(sender: Any, **kwargs: Any) -> None:
realm = kwargs['instance'].realm
cache_set(get_realm_emoji_cache_key(realm),
get_realm_emoji_uncached(realm),
timeout=3600*24*7)
cache_set(get_active_realm_emoji_cache_key(realm),
get_active_realm_emoji_uncached(realm),
timeout=3600*24*7)
post_save.connect(flush_realm_emoji, sender=RealmEmoji)
post_delete.connect(flush_realm_emoji, sender=RealmEmoji)
def filter_pattern_validator(value: str) -> None:
regex = re.compile(r'^(?:(?:[\w\-#_= /:]*|[+]|[!])(\(\?P<\w+>.+\)))+$')
error_msg = _('Invalid filter pattern. Valid characters are %s.') % (
'[ a-zA-Z_#=/:+!-]',)
if not regex.match(str(value)):
raise ValidationError(error_msg)
try:
re.compile(value)
except sre_constants.error:
# Regex is invalid
raise ValidationError(error_msg)
def filter_format_validator(value: str) -> None:
regex = re.compile(r'^([\.\/:a-zA-Z0-9#_?=&;-]+%\(([a-zA-Z0-9_-]+)\)s)+[/a-zA-Z0-9#_?=&;-]*$')
if not regex.match(value):
raise ValidationError(_('Invalid URL format string.'))
class RealmFilter(models.Model):
"""Realm-specific regular expressions to automatically linkify certain
strings inside the markdown processor. See "Custom filters" in the settings UI.
"""
realm: Realm = models.ForeignKey(Realm, on_delete=CASCADE)
pattern: str = models.TextField(validators=[filter_pattern_validator])
url_format_string: str = models.TextField(validators=[URLValidator(), filter_format_validator])
class Meta:
unique_together = ("realm", "pattern")
def __str__(self) -> str:
return f"<RealmFilter({self.realm.string_id}): {self.pattern} {self.url_format_string}>"
def get_realm_filters_cache_key(realm_id: int) -> str:
return f'{cache.KEY_PREFIX}:all_realm_filters:{realm_id}'
# We have a per-process cache to avoid doing 1000 remote cache queries during page load
per_request_realm_filters_cache: Dict[int, List[Tuple[str, str, int]]] = {}
def realm_in_local_realm_filters_cache(realm_id: int) -> bool:
return realm_id in per_request_realm_filters_cache
def realm_filters_for_realm(realm_id: int) -> List[Tuple[str, str, int]]:
if not realm_in_local_realm_filters_cache(realm_id):
per_request_realm_filters_cache[realm_id] = realm_filters_for_realm_remote_cache(realm_id)
return per_request_realm_filters_cache[realm_id]
@cache_with_key(get_realm_filters_cache_key, timeout=3600*24*7)
def realm_filters_for_realm_remote_cache(realm_id: int) -> List[Tuple[str, str, int]]:
filters = []
for realm_filter in RealmFilter.objects.filter(realm_id=realm_id):
filters.append((realm_filter.pattern, realm_filter.url_format_string, realm_filter.id))
return filters
def all_realm_filters() -> Dict[int, List[Tuple[str, str, int]]]:
filters: DefaultDict[int, List[Tuple[str, str, int]]] = defaultdict(list)
for realm_filter in RealmFilter.objects.all():
filters[realm_filter.realm_id].append((realm_filter.pattern,
realm_filter.url_format_string,
realm_filter.id))
return filters
def flush_realm_filter(sender: Any, **kwargs: Any) -> None:
realm_id = kwargs['instance'].realm_id
cache_delete(get_realm_filters_cache_key(realm_id))
try:
per_request_realm_filters_cache.pop(realm_id)
except KeyError:
pass
post_save.connect(flush_realm_filter, sender=RealmFilter)
post_delete.connect(flush_realm_filter, sender=RealmFilter)
def flush_per_request_caches() -> None:
global per_request_display_recipient_cache
per_request_display_recipient_cache = {}
global per_request_realm_filters_cache
per_request_realm_filters_cache = {}
# The Recipient table is used to map Messages to the set of users who
# received the message. It is implemented as a set of triples (id,
# type_id, type). We have 3 types of recipients: Huddles (for group
# private messages), UserProfiles (for 1:1 private messages), and
# Streams. The recipient table maps a globally unique recipient id
# (used by the Message table) to the type-specific unique id (the
# stream id, user_profile id, or huddle id).
class Recipient(models.Model):
type_id: int = models.IntegerField(db_index=True)
type: int = models.PositiveSmallIntegerField(db_index=True)
# Valid types are {personal, stream, huddle}
PERSONAL = 1
STREAM = 2
HUDDLE = 3
class Meta:
unique_together = ("type", "type_id")
# N.B. If we used Django's choice=... we would get this for free (kinda)
_type_names = {
PERSONAL: 'personal',
STREAM: 'stream',
HUDDLE: 'huddle'}
def type_name(self) -> str:
# Raises KeyError if invalid
return self._type_names[self.type]
def __str__(self) -> str:
display_recipient = get_display_recipient(self)
return f"<Recipient: {display_recipient} ({self.type_id}, {self.type})>"
class UserProfile(AbstractBaseUser, PermissionsMixin):
USERNAME_FIELD = 'email'
MAX_NAME_LENGTH = 100
MIN_NAME_LENGTH = 2
API_KEY_LENGTH = 32
NAME_INVALID_CHARS = ['*', '`', "\\", '>', '"', '@']
DEFAULT_BOT = 1
"""
Incoming webhook bots are limited to only sending messages via webhooks.
Thus, it is less of a security risk to expose their API keys to third-party services,
since they can't be used to read messages.
"""
INCOMING_WEBHOOK_BOT = 2
# This value is also being used in static/js/settings_bots.js.
# On updating it here, update it there as well.
OUTGOING_WEBHOOK_BOT = 3
"""
Embedded bots run within the Zulip server itself; events are added to the
embedded_bots queue and then handled by a QueueProcessingWorker.
"""
EMBEDDED_BOT = 4
BOT_TYPES = {
DEFAULT_BOT: 'Generic bot',
INCOMING_WEBHOOK_BOT: 'Incoming webhook',
OUTGOING_WEBHOOK_BOT: 'Outgoing webhook',
EMBEDDED_BOT: 'Embedded bot',
}
SERVICE_BOT_TYPES = [
OUTGOING_WEBHOOK_BOT,
EMBEDDED_BOT,
]
# For historical reasons, Zulip has two email fields. The
# `delivery_email` field is the user's email address, where all
# email notifications will be sent, and is used for all
# authentication use cases.
#
# The `email` field is the same as delivery_email in organizations
# with EMAIL_ADDRESS_VISIBILITY_EVERYONE. For other
# organizations, it will be a unique value of the form
# user1234@example.com. This field exists for backwards
# compatibility in Zulip APIs where users are referred to by their
# email address, not their ID; it should be used in all API use cases.
#
# Both fields are unique within a realm (in a case-insensitive fashion).
delivery_email: str = models.EmailField(blank=False, db_index=True)
email: str = models.EmailField(blank=False, db_index=True)
realm: Realm = models.ForeignKey(Realm, on_delete=CASCADE)
# Foreign key to the Recipient object for PERSONAL type messages to this user.
recipient = models.ForeignKey(Recipient, null=True, on_delete=models.SET_NULL)
# The user's name. We prefer the model of a full_name and
# short_name over first+last because cultures vary on how many
# names one has, whether the family name is first or last, etc.
# It also allows organizations to encode a bit of non-name data in
# the "name" attribute if desired, like gender pronouns,
# graduation year, etc. The short_name attribute is currently not
# used anywhere, but the intent is that it would be used as the
# shorter familiar name for addressing the user in the UI.
full_name: str = models.CharField(max_length=MAX_NAME_LENGTH)
short_name: str = models.CharField(max_length=MAX_NAME_LENGTH)
date_joined: datetime.datetime = models.DateTimeField(default=timezone_now)
tos_version: Optional[str] = models.CharField(null=True, max_length=10)
api_key: str = models.CharField(max_length=API_KEY_LENGTH)
# pointer points to Message.id, NOT UserMessage.id.
pointer: int = models.IntegerField()
# Whether the user has access to server-level administrator pages, like /activity
is_staff: bool = models.BooleanField(default=False)
# For a normal user, this is True unless the user or an admin has
# deactivated their account. The name comes from Django; this field
# isn't related to presence or to whether the user has recently used Zulip.
#
# See also `long_term_idle`.
is_active: bool = models.BooleanField(default=True, db_index=True)
is_billing_admin: bool = models.BooleanField(default=False, db_index=True)
is_bot: bool = models.BooleanField(default=False, db_index=True)
bot_type: Optional[int] = models.PositiveSmallIntegerField(null=True, db_index=True)
bot_owner: Optional["UserProfile"] = models.ForeignKey('self', null=True, on_delete=models.SET_NULL)
# Each role has a superset of the permissions of the next higher
# numbered role. When adding new roles, leave enough space for
# future roles to be inserted between currently adjacent
# roles. These constants appear in RealmAuditLog.extra_data, so
# changes to them will require a migration of RealmAuditLog.
ROLE_REALM_OWNER = 100
ROLE_REALM_ADMINISTRATOR = 200
# ROLE_MODERATOR = 300
ROLE_MEMBER = 400
ROLE_GUEST = 600
role: int = models.PositiveSmallIntegerField(default=ROLE_MEMBER, db_index=True)
ROLE_TYPES = [
ROLE_REALM_OWNER,
ROLE_REALM_ADMINISTRATOR,
ROLE_MEMBER,
ROLE_GUEST,
]
# Whether the user has been "soft-deactivated" due to weeks of inactivity.
# For these users we avoid doing UserMessage table work, as an optimization
# for large Zulip organizations with lots of single-visit users.
long_term_idle: bool = models.BooleanField(default=False, db_index=True)
# When we last added basic UserMessage rows for a long_term_idle user.
last_active_message_id: Optional[int] = models.IntegerField(null=True)
# Mirror dummies are fake (!is_active) users used to provide
# message senders in our cross-protocol Zephyr<->Zulip content
# mirroring integration, so that we can display mirrored content
# like native Zulip messages (with a name + avatar, etc.).
is_mirror_dummy: bool = models.BooleanField(default=False)
# API super users are allowed to forge messages as sent by another
# user and to send to private streams; also used for Zephyr/Jabber mirroring.
is_api_super_user: bool = models.BooleanField(default=False, db_index=True)
### Notifications settings. ###
# Stream notifications.
enable_stream_desktop_notifications: bool = models.BooleanField(default=False)
enable_stream_email_notifications: bool = models.BooleanField(default=False)
enable_stream_push_notifications: bool = models.BooleanField(default=False)
enable_stream_audible_notifications: bool = models.BooleanField(default=False)
notification_sound: str = models.CharField(max_length=20, default='zulip')
wildcard_mentions_notify: bool = models.BooleanField(default=True)
# PM + @-mention notifications.
enable_desktop_notifications: bool = models.BooleanField(default=True)
pm_content_in_desktop_notifications: bool = models.BooleanField(default=True)
enable_sounds: bool = models.BooleanField(default=True)
enable_offline_email_notifications: bool = models.BooleanField(default=True)
message_content_in_email_notifications: bool = models.BooleanField(default=True)
enable_offline_push_notifications: bool = models.BooleanField(default=True)
enable_online_push_notifications: bool = models.BooleanField(default=True)
DESKTOP_ICON_COUNT_DISPLAY_MESSAGES = 1
DESKTOP_ICON_COUNT_DISPLAY_NOTIFIABLE = 2
DESKTOP_ICON_COUNT_DISPLAY_NONE = 3
desktop_icon_count_display: int = models.PositiveSmallIntegerField(
default=DESKTOP_ICON_COUNT_DISPLAY_MESSAGES)
enable_digest_emails: bool = models.BooleanField(default=True)
enable_login_emails: bool = models.BooleanField(default=True)
realm_name_in_notifications: bool = models.BooleanField(default=False)
presence_enabled: bool = models.BooleanField(default=True)
# Used for rate-limiting certain automated messages generated by bots
last_reminder: Optional[datetime.datetime] = models.DateTimeField(default=None, null=True)
# Minutes to wait before warning a bot owner that their bot sent a message
# to a nonexistent stream
BOT_OWNER_STREAM_ALERT_WAITPERIOD = 1
# API rate limits, formatted as a comma-separated list of range:max pairs
rate_limits: str = models.CharField(default="", max_length=100)
# Hours to wait before sending another email to a user
EMAIL_REMINDER_WAITPERIOD = 24
# Default streams for some deprecated/legacy classes of bot users.
default_sending_stream: Optional["Stream"] = models.ForeignKey(
"zerver.Stream", null=True, related_name="+", on_delete=CASCADE,
)
default_events_register_stream: Optional["Stream"] = models.ForeignKey(
"zerver.Stream", null=True, related_name="+", on_delete=CASCADE,
)
default_all_public_streams: bool = models.BooleanField(default=False)
# UI vars
enter_sends: Optional[bool] = models.BooleanField(null=True, default=False)
left_side_userlist: bool = models.BooleanField(default=False)
# display settings
default_language: str = models.CharField(default='en', max_length=MAX_LANGUAGE_ID_LENGTH)
dense_mode: bool = models.BooleanField(default=True)
fluid_layout_width: bool = models.BooleanField(default=False)
high_contrast_mode: bool = models.BooleanField(default=False)
night_mode: bool = models.BooleanField(default=False)
translate_emoticons: bool = models.BooleanField(default=False)
twenty_four_hour_time: bool = models.BooleanField(default=False)
starred_message_counts: bool = models.BooleanField(default=False)
# UI setting controlling Zulip's behavior of demoting in the sort
# order and graying out streams with no recent traffic. The
# default behavior, automatic, enables this behavior once a user
# is subscribed to 30+ streams in the webapp.
DEMOTE_STREAMS_AUTOMATIC = 1
DEMOTE_STREAMS_ALWAYS = 2
DEMOTE_STREAMS_NEVER = 3
DEMOTE_STREAMS_CHOICES = [
DEMOTE_STREAMS_AUTOMATIC,
DEMOTE_STREAMS_ALWAYS,
DEMOTE_STREAMS_NEVER,
]
demote_inactive_streams = models.PositiveSmallIntegerField(default=DEMOTE_STREAMS_AUTOMATIC)
# A timezone name from the `tzdata` database, as found in pytz.all_timezones.
#
# The longest existing name is 32 characters long, so max_length=40 seems
# like a safe choice.
#
# In Django, the convention is to use an empty string instead of NULL/None
# for text-based fields. For more information, see
# https://docs.djangoproject.com/en/1.10/ref/models/fields/#django.db.models.Field.null.
timezone: str = models.CharField(max_length=40, default='')
# Emojisets
GOOGLE_EMOJISET = 'google'
GOOGLE_BLOB_EMOJISET = 'google-blob'
TEXT_EMOJISET = 'text'
TWITTER_EMOJISET = 'twitter'
EMOJISET_CHOICES = ((GOOGLE_EMOJISET, "Google modern"),
(GOOGLE_BLOB_EMOJISET, "Google classic"),
(TWITTER_EMOJISET, "Twitter"),
(TEXT_EMOJISET, "Plain text"))
emojiset: str = models.CharField(default=GOOGLE_BLOB_EMOJISET, choices=EMOJISET_CHOICES, max_length=20)
AVATAR_FROM_GRAVATAR = 'G'
AVATAR_FROM_USER = 'U'
AVATAR_SOURCES = (
(AVATAR_FROM_GRAVATAR, 'Hosted by Gravatar'),
(AVATAR_FROM_USER, 'Uploaded by user'),
)
avatar_source: str = models.CharField(default=AVATAR_FROM_GRAVATAR, choices=AVATAR_SOURCES, max_length=1)
avatar_version: int = models.PositiveSmallIntegerField(default=1)
avatar_hash: Optional[str] = models.CharField(null=True, max_length=64)
TUTORIAL_WAITING = 'W'
TUTORIAL_STARTED = 'S'
TUTORIAL_FINISHED = 'F'
TUTORIAL_STATES = ((TUTORIAL_WAITING, "Waiting"),
(TUTORIAL_STARTED, "Started"),
(TUTORIAL_FINISHED, "Finished"))
tutorial_status: str = models.CharField(default=TUTORIAL_WAITING, choices=TUTORIAL_STATES, max_length=1)
# Contains serialized JSON of the form:
# [("step 1", true), ("step 2", false)]
# where the second element of each tuple is if the step has been
# completed.
onboarding_steps: str = models.TextField(default='[]')
zoom_token: Optional[object] = JSONField(default=None, null=True)
objects: UserManager = UserManager()
# Define the types of the various automatically managed properties
property_types = dict(
default_language=str,
demote_inactive_streams=int,
dense_mode=bool,
emojiset=str,
fluid_layout_width=bool,
high_contrast_mode=bool,
left_side_userlist=bool,
night_mode=bool,
starred_message_counts=bool,
timezone=str,
translate_emoticons=bool,
twenty_four_hour_time=bool,
)
notification_setting_types = dict(
enable_desktop_notifications=bool,
enable_digest_emails=bool,
enable_login_emails=bool,
enable_offline_email_notifications=bool,
enable_offline_push_notifications=bool,
enable_online_push_notifications=bool,
enable_sounds=bool,
enable_stream_desktop_notifications=bool,
enable_stream_email_notifications=bool,
enable_stream_push_notifications=bool,
enable_stream_audible_notifications=bool,
wildcard_mentions_notify=bool,
message_content_in_email_notifications=bool,
notification_sound=str,
pm_content_in_desktop_notifications=bool,
desktop_icon_count_display=int,
realm_name_in_notifications=bool,
presence_enabled=bool,
)
class Meta:
unique_together = (('realm', 'email'),)
@property
def profile_data(self) -> ProfileData:
values = CustomProfileFieldValue.objects.filter(user_profile=self)
user_data = {v.field_id: {"value": v.value, "rendered_value": v.rendered_value} for v in values}
data: ProfileData = []
for field in custom_profile_fields_for_realm(self.realm_id):
field_values = user_data.get(field.id, None)
if field_values:
value, rendered_value = field_values.get("value"), field_values.get("rendered_value")
else:
value, rendered_value = None, None
field_type = field.field_type
if value is not None:
converter = field.FIELD_CONVERTERS[field_type]
value = converter(value)
field_data = field.as_dict()
data.append({
'id': field_data['id'],
'name': field_data['name'],
'type': field_data['type'],
'hint': field_data['hint'],
'field_data': field_data['field_data'],
'order': field_data['order'],
'value': value,
'rendered_value': rendered_value,
})
return data
def can_admin_user(self, target_user: 'UserProfile') -> bool:
"""Returns whether this user has permission to modify target_user"""
if target_user.bot_owner == self:
return True
elif self.is_realm_admin and self.realm == target_user.realm:
return True
else:
return False
def __str__(self) -> str:
return f"<UserProfile: {self.email} {self.realm}>"
@property
def is_new_member(self) -> bool:
diff = (timezone_now() - self.date_joined).days
if diff < self.realm.waiting_period_threshold:
return True
return False
@property
def is_realm_admin(self) -> bool:
return self.role == UserProfile.ROLE_REALM_ADMINISTRATOR or \
self.role == UserProfile.ROLE_REALM_OWNER
@is_realm_admin.setter
def is_realm_admin(self, value: bool) -> None:
if value:
self.role = UserProfile.ROLE_REALM_ADMINISTRATOR
elif self.role == UserProfile.ROLE_REALM_ADMINISTRATOR:
# We need to be careful to not accidentally change
# ROLE_GUEST to ROLE_MEMBER here.
self.role = UserProfile.ROLE_MEMBER
@property
def is_realm_owner(self) -> bool:
return self.role == UserProfile.ROLE_REALM_OWNER
@property
def is_guest(self) -> bool:
return self.role == UserProfile.ROLE_GUEST
@is_guest.setter
def is_guest(self, value: bool) -> None:
if value:
self.role = UserProfile.ROLE_GUEST
elif self.role == UserProfile.ROLE_GUEST:
# We need to be careful to not accidentally change
# ROLE_REALM_ADMINISTRATOR to ROLE_MEMBER here.
self.role = UserProfile.ROLE_MEMBER
@property
def is_incoming_webhook(self) -> bool:
return self.bot_type == UserProfile.INCOMING_WEBHOOK_BOT
@property
def allowed_bot_types(self) -> List[int]:
allowed_bot_types = []
if self.is_realm_admin or \
not self.realm.bot_creation_policy == Realm.BOT_CREATION_LIMIT_GENERIC_BOTS:
allowed_bot_types.append(UserProfile.DEFAULT_BOT)
allowed_bot_types += [
UserProfile.INCOMING_WEBHOOK_BOT,
UserProfile.OUTGOING_WEBHOOK_BOT,
]
if settings.EMBEDDED_BOTS_ENABLED:
allowed_bot_types.append(UserProfile.EMBEDDED_BOT)
return allowed_bot_types
@staticmethod
def emojiset_choices() -> List[Dict[str, str]]:
return [dict(key=emojiset[0], text=emojiset[1]) for emojiset in UserProfile.EMOJISET_CHOICES]
@staticmethod
def emails_from_ids(user_ids: Sequence[int]) -> Dict[int, str]:
rows = UserProfile.objects.filter(id__in=user_ids).values('id', 'email')
return {row['id']: row['email'] for row in rows}
def email_address_is_realm_public(self) -> bool:
if self.realm.email_address_visibility == Realm.EMAIL_ADDRESS_VISIBILITY_EVERYONE:
return True
if self.is_bot:
return True
return False
def has_permission(self, policy_name: str) -> bool:
if policy_name not in ['create_stream_policy', 'invite_to_stream_policy']:
raise AssertionError("Invalid policy")
if self.is_realm_admin:
return True
policy_value = getattr(self.realm, policy_name)
if policy_value == Realm.POLICY_ADMINS_ONLY:
return False
if self.is_guest:
return False
if policy_value == Realm.POLICY_MEMBERS_ONLY:
return True
return not self.is_new_member
def can_create_streams(self) -> bool:
return self.has_permission('create_stream_policy')
def can_subscribe_other_users(self) -> bool:
return self.has_permission('invite_to_stream_policy')
def can_access_public_streams(self) -> bool:
return not (self.is_guest or self.realm.is_zephyr_mirror_realm)
def can_access_all_realm_members(self) -> bool:
return not (self.realm.is_zephyr_mirror_realm or self.is_guest)
def major_tos_version(self) -> int:
if self.tos_version is not None:
return int(self.tos_version.split('.')[0])
else:
return -1
def format_requestor_for_logs(self) -> str:
return "{}@{}".format(self.id, self.realm.string_id or 'root')
def set_password(self, password: Optional[str]) -> None:
if password is None:
self.set_unusable_password()
return
from zproject.backends import check_password_strength
if not check_password_strength(password):
raise PasswordTooWeakError
super().set_password(password)
class PasswordTooWeakError(Exception):
pass
class UserGroup(models.Model):
name = models.CharField(max_length=100)
members = models.ManyToManyField(UserProfile, through='UserGroupMembership')
realm = models.ForeignKey(Realm, on_delete=CASCADE)
description: str = models.TextField(default='')
class Meta:
unique_together = (('realm', 'name'),)
class UserGroupMembership(models.Model):
user_group = models.ForeignKey(UserGroup, on_delete=CASCADE)
user_profile = models.ForeignKey(UserProfile, on_delete=CASCADE)
class Meta:
unique_together = (('user_group', 'user_profile'),)
def receives_offline_push_notifications(user_profile: UserProfile) -> bool:
return (user_profile.enable_offline_push_notifications and
not user_profile.is_bot)
def receives_offline_email_notifications(user_profile: UserProfile) -> bool:
return (user_profile.enable_offline_email_notifications and
not user_profile.is_bot)
def receives_online_notifications(user_profile: UserProfile) -> bool:
return (user_profile.enable_online_push_notifications and
not user_profile.is_bot)
def receives_stream_notifications(user_profile: UserProfile) -> bool:
return (user_profile.enable_stream_push_notifications and
not user_profile.is_bot)
def remote_user_to_email(remote_user: str) -> str:
if settings.SSO_APPEND_DOMAIN is not None:
remote_user += "@" + settings.SSO_APPEND_DOMAIN
return remote_user
# Make sure we flush the UserProfile object from our remote cache
# whenever we save it.
post_save.connect(flush_user_profile, sender=UserProfile)
class PreregistrationUser(models.Model):
# Data on a partially created user, before the completion of
# registration. This is used in at least three major code paths:
# * Realm creation, in which case realm is None.
#
# * Invitations, in which case referred_by will always be set.
#
# * Social authentication signup, where it's used to store data
# from the authentication step and pass it to the registration
# form.
email: str = models.EmailField()
# If the pre-registration process provides a suggested full name for this user,
# store it here to use it to prepopulate the Full Name field in the registration form:
full_name: Optional[str] = models.CharField(max_length=UserProfile.MAX_NAME_LENGTH, null=True)
full_name_validated = models.BooleanField(default=False)
referred_by: Optional[UserProfile] = models.ForeignKey(UserProfile, null=True, on_delete=CASCADE)
streams: Manager = models.ManyToManyField('Stream')
invited_at: datetime.datetime = models.DateTimeField(auto_now=True)
realm_creation = models.BooleanField(default=False)
# Indicates whether the user needs a password. Users who were
# created via SSO style auth (e.g. GitHub/Google) generally do not.
password_required = models.BooleanField(default=True)
# status: whether an object has been confirmed.
# if confirmed, set to confirmation.settings.STATUS_ACTIVE
status: int = models.IntegerField(default=0)
# The realm should only ever be None for PreregistrationUser
# objects created as part of realm creation.
realm: Optional[Realm] = models.ForeignKey(Realm, null=True, on_delete=CASCADE)
# Changes to INVITED_AS should also be reflected in
# settings_invites.invited_as_values in
# static/js/settings_invites.js
INVITE_AS = dict(
MEMBER = 1,
REALM_ADMIN = 2,
GUEST_USER = 3,
)
invited_as: int = models.PositiveSmallIntegerField(default=INVITE_AS['MEMBER'])
def filter_to_valid_prereg_users(query: QuerySet) -> QuerySet:
days_to_activate = settings.INVITATION_LINK_VALIDITY_DAYS
active_value = confirmation_settings.STATUS_ACTIVE
revoked_value = confirmation_settings.STATUS_REVOKED
lowest_datetime = timezone_now() - datetime.timedelta(days=days_to_activate)
return query.exclude(status__in=[active_value, revoked_value]).filter(
invited_at__gte=lowest_datetime)
class MultiuseInvite(models.Model):
referred_by = models.ForeignKey(UserProfile, on_delete=CASCADE) # Optional[UserProfile]
streams: Manager = models.ManyToManyField('Stream')
realm: Realm = models.ForeignKey(Realm, on_delete=CASCADE)
invited_as: int = models.PositiveSmallIntegerField(default=PreregistrationUser.INVITE_AS['MEMBER'])
class EmailChangeStatus(models.Model):
new_email: str = models.EmailField()
old_email: str = models.EmailField()
updated_at: datetime.datetime = models.DateTimeField(auto_now=True)
user_profile: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
# status: whether an object has been confirmed.
# if confirmed, set to confirmation.settings.STATUS_ACTIVE
status: int = models.IntegerField(default=0)
realm: Realm = models.ForeignKey(Realm, on_delete=CASCADE)
class AbstractPushDeviceToken(models.Model):
APNS = 1
GCM = 2
KINDS = (
(APNS, 'apns'),
(GCM, 'gcm'),
)
kind: int = models.PositiveSmallIntegerField(choices=KINDS)
# The token is a unique device-specific token that is
# sent to us from each device:
# - APNS token if kind == APNS
# - GCM registration id if kind == GCM
token: str = models.CharField(max_length=4096, db_index=True)
# TODO: last_updated should be renamed date_created, since it is
# no longer maintained as a last_updated value.
last_updated: datetime.datetime = models.DateTimeField(auto_now=True)
# [optional] Contains the app id of the device if it is an iOS device
ios_app_id: Optional[str] = models.TextField(null=True)
class Meta:
abstract = True
class PushDeviceToken(AbstractPushDeviceToken):
# The user who's device this is
user: UserProfile = models.ForeignKey(UserProfile, db_index=True, on_delete=CASCADE)
class Meta:
unique_together = ("user", "kind", "token")
def generate_email_token_for_stream() -> str:
return generate_random_token(32)
class Stream(models.Model):
MAX_NAME_LENGTH = 60
MAX_DESCRIPTION_LENGTH = 1024
name: str = models.CharField(max_length=MAX_NAME_LENGTH, db_index=True)
realm: Realm = models.ForeignKey(Realm, db_index=True, on_delete=CASCADE)
date_created: datetime.datetime = models.DateTimeField(default=timezone_now)
deactivated: bool = models.BooleanField(default=False)
description: str = models.CharField(max_length=MAX_DESCRIPTION_LENGTH, default='')
rendered_description: str = models.TextField(default='')
# Foreign key to the Recipient object for STREAM type messages to this stream.
recipient = models.ForeignKey(Recipient, null=True, on_delete=models.SET_NULL)
invite_only: Optional[bool] = models.BooleanField(null=True, default=False)
history_public_to_subscribers: bool = models.BooleanField(default=False)
# Whether this stream's content should be published by the web-public archive features
is_web_public: bool = models.BooleanField(default=False)
STREAM_POST_POLICY_EVERYONE = 1
STREAM_POST_POLICY_ADMINS = 2
STREAM_POST_POLICY_RESTRICT_NEW_MEMBERS = 3
# TODO: Implement policy to restrict posting to a user group or admins.
# Who in the organization has permission to send messages to this stream.
stream_post_policy: int = models.PositiveSmallIntegerField(default=STREAM_POST_POLICY_EVERYONE)
STREAM_POST_POLICY_TYPES = [
STREAM_POST_POLICY_EVERYONE,
STREAM_POST_POLICY_ADMINS,
STREAM_POST_POLICY_RESTRICT_NEW_MEMBERS,
]
# The unique thing about Zephyr public streams is that we never list their
# users. We may try to generalize this concept later, but for now
# we just use a concrete field. (Zephyr public streams aren't exactly like
# invite-only streams--while both are private in terms of listing users,
# for Zephyr we don't even list users to stream members, yet membership
# is more public in the sense that you don't need a Zulip invite to join.
# This field is populated directly from UserProfile.is_zephyr_mirror_realm,
# and the reason for denormalizing field is performance.
is_in_zephyr_realm: bool = models.BooleanField(default=False)
# Used by the e-mail forwarder. The e-mail RFC specifies a maximum
# e-mail length of 254, and our max stream length is 30, so we
# have plenty of room for the token.
email_token: str = models.CharField(
max_length=32, default=generate_email_token_for_stream, unique=True,
)
# For old messages being automatically deleted.
# Value NULL means "use retention policy of the realm".
# Value -1 means "disable retention policy for this stream unconditionally".
# Non-negative values have the natural meaning of "archive messages older than <value> days".
message_retention_days: Optional[int] = models.IntegerField(null=True, default=None)
# The very first message ID in the stream. Used to help clients
# determine whether they might need to display "more topics" for a
# stream based on what messages they have cached.
first_message_id: Optional[int] = models.IntegerField(null=True, db_index=True)
def __str__(self) -> str:
return f"<Stream: {self.name}>"
def is_public(self) -> bool:
# All streams are private in Zephyr mirroring realms.
return not self.invite_only and not self.is_in_zephyr_realm
def is_history_realm_public(self) -> bool:
return self.is_public()
def is_history_public_to_subscribers(self) -> bool:
return self.history_public_to_subscribers
class Meta:
unique_together = ("name", "realm")
# Stream fields included whenever a Stream object is provided to
# Zulip clients via the API. A few details worth noting:
# * "id" is represented as "stream_id" in most API interfaces.
# * "email_token" is not realm-public and thus is not included here.
# * is_in_zephyr_realm is a backend-only optimization.
# * "deactivated" streams are filtered from the API entirely.
# * "realm" and "recipient" are not exposed to clients via the API.
# * "date_created" should probably be added here, as it's useful information
# to subscribers.
API_FIELDS = [
"name",
"id",
"description",
"rendered_description",
"invite_only",
"is_web_public",
"stream_post_policy",
"history_public_to_subscribers",
"first_message_id",
"message_retention_days"
]
@staticmethod
def get_client_data(query: QuerySet) -> List[Dict[str, Any]]:
query = query.only(*Stream.API_FIELDS)
return [row.to_dict() for row in query]
def to_dict(self) -> Dict[str, Any]:
result = {}
for field_name in self.API_FIELDS:
if field_name == "id":
result['stream_id'] = self.id
continue
result[field_name] = getattr(self, field_name)
result['is_announcement_only'] = self.stream_post_policy == Stream.STREAM_POST_POLICY_ADMINS
return result
post_save.connect(flush_stream, sender=Stream)
post_delete.connect(flush_stream, sender=Stream)
class MutedTopic(models.Model):
user_profile = models.ForeignKey(UserProfile, on_delete=CASCADE)
stream = models.ForeignKey(Stream, on_delete=CASCADE)
recipient = models.ForeignKey(Recipient, on_delete=CASCADE)
topic_name = models.CharField(max_length=MAX_TOPIC_NAME_LENGTH)
# The default value for date_muted is a few weeks before tracking
# of when topics were muted was first introduced. It's designed
# to be obviously incorrect so that users can tell it's backfilled data.
date_muted = models.DateTimeField(default=datetime.datetime(2020, 1, 1, 0, 0, tzinfo=datetime.timezone.utc))
class Meta:
unique_together = ('user_profile', 'stream', 'topic_name')
def __str__(self) -> str:
return (f"<MutedTopic: ({self.user_profile.email}, {self.stream.name}, {self.topic_name}, {self.date_muted})>")
class Client(models.Model):
name: str = models.CharField(max_length=30, db_index=True, unique=True)
def __str__(self) -> str:
return f"<Client: {self.name}>"
get_client_cache: Dict[str, Client] = {}
def get_client(name: str) -> Client:
# Accessing KEY_PREFIX through the module is necessary
# because we need the updated value of the variable.
cache_name = cache.KEY_PREFIX + name
if cache_name not in get_client_cache:
result = get_client_remote_cache(name)
get_client_cache[cache_name] = result
return get_client_cache[cache_name]
def get_client_cache_key(name: str) -> str:
return f'get_client:{make_safe_digest(name)}'
@cache_with_key(get_client_cache_key, timeout=3600*24*7)
def get_client_remote_cache(name: str) -> Client:
(client, _) = Client.objects.get_or_create(name=name)
return client
@cache_with_key(get_stream_cache_key, timeout=3600*24*7)
def get_realm_stream(stream_name: str, realm_id: int) -> Stream:
return Stream.objects.select_related().get(
name__iexact=stream_name.strip(), realm_id=realm_id)
def stream_name_in_use(stream_name: str, realm_id: int) -> bool:
return Stream.objects.filter(
name__iexact=stream_name.strip(),
realm_id=realm_id,
).exists()
def get_active_streams(realm: Optional[Realm]) -> QuerySet:
# TODO: Change return type to QuerySet[Stream]
# NOTE: Return value is used as a QuerySet, so cannot currently be Sequence[QuerySet]
"""
Return all streams (including invite-only streams) that have not been deactivated.
"""
return Stream.objects.filter(realm=realm, deactivated=False)
def get_stream(stream_name: str, realm: Realm) -> Stream:
'''
Callers that don't have a Realm object already available should use
get_realm_stream directly, to avoid unnecessarily fetching the
Realm object.
'''
return get_realm_stream(stream_name, realm.id)
def get_stream_by_id_in_realm(stream_id: int, realm: Realm) -> Stream:
return Stream.objects.select_related().get(id=stream_id, realm=realm)
def bulk_get_streams(realm: Realm, stream_names: STREAM_NAMES) -> Dict[str, Any]:
def fetch_streams_by_name(stream_names: List[str]) -> Sequence[Stream]:
#
# This should be just
#
# Stream.objects.select_related().filter(name__iexact__in=stream_names,
# realm_id=realm_id)
#
# But chaining __in and __iexact doesn't work with Django's
# ORM, so we have the following hack to construct the relevant where clause
where_clause = "upper(zerver_stream.name::text) IN (SELECT upper(name) FROM unnest(%s) AS name)"
return get_active_streams(realm.id).select_related().extra(
where=[where_clause],
params=(list(stream_names),))
def stream_name_to_cache_key(stream_name: str) -> str:
return get_stream_cache_key(stream_name, realm.id)
def stream_to_lower_name(stream: Stream) -> str:
return stream.name.lower()
return generic_bulk_cached_fetch(stream_name_to_cache_key,
fetch_streams_by_name,
[stream_name.lower() for stream_name in stream_names],
id_fetcher=stream_to_lower_name)
def get_huddle_recipient(user_profile_ids: Set[int]) -> Recipient:
# The caller should ensure that user_profile_ids includes
# the sender. Note that get_huddle hits the cache, and then
# we hit another cache to get the recipient. We may want to
# unify our caching strategy here.
huddle = get_huddle(list(user_profile_ids))
return huddle.recipient
def get_huddle_user_ids(recipient: Recipient) -> List[int]:
assert(recipient.type == Recipient.HUDDLE)
return Subscription.objects.filter(
recipient=recipient,
).order_by('user_profile_id').values_list('user_profile_id', flat=True)
def bulk_get_huddle_user_ids(recipients: List[Recipient]) -> Dict[int, List[int]]:
"""
Takes a list of huddle-type recipients, returns a dict
mapping recipient id to list of user ids in the huddle.
"""
assert all(recipient.type == Recipient.HUDDLE for recipient in recipients)
if not recipients:
return {}
subscriptions = Subscription.objects.filter(
recipient__in=recipients,
).order_by('user_profile_id')
result_dict: Dict[int, List[int]] = {}
for recipient in recipients:
result_dict[recipient.id] = [subscription.user_profile_id
for subscription in subscriptions
if subscription.recipient_id == recipient.id]
return result_dict
class AbstractMessage(models.Model):
sender: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
recipient: Recipient = models.ForeignKey(Recipient, on_delete=CASCADE)
# The message's topic.
#
# Early versions of Zulip called this concept a "subject", as in an email
# "subject line", before changing to "topic" in 2013 (commit dac5a46fa).
# UI and user documentation now consistently say "topic". New APIs and
# new code should generally also say "topic".
#
# See also the `topic_name` method on `Message`.
subject: str = models.CharField(max_length=MAX_TOPIC_NAME_LENGTH, db_index=True)
content: str = models.TextField()
rendered_content: Optional[str] = models.TextField(null=True)
rendered_content_version: Optional[int] = models.IntegerField(null=True)
date_sent: datetime.datetime = models.DateTimeField('date sent', db_index=True)
sending_client: Client = models.ForeignKey(Client, on_delete=CASCADE)
last_edit_time: Optional[datetime.datetime] = models.DateTimeField(null=True)
# A JSON-encoded list of objects describing any past edits to this
# message, oldest first.
edit_history: Optional[str] = models.TextField(null=True)
has_attachment: bool = models.BooleanField(default=False, db_index=True)
has_image: bool = models.BooleanField(default=False, db_index=True)
has_link: bool = models.BooleanField(default=False, db_index=True)
class Meta:
abstract = True
def __str__(self) -> str:
display_recipient = get_display_recipient(self.recipient)
return f"<{self.__class__.__name__}: {display_recipient} / {self.subject} / {self.sender}>"
class ArchiveTransaction(models.Model):
timestamp: datetime.datetime = models.DateTimeField(default=timezone_now, db_index=True)
# Marks if the data archived in this transaction has been restored:
restored: bool = models.BooleanField(default=False, db_index=True)
type: int = models.PositiveSmallIntegerField(db_index=True)
# Valid types:
RETENTION_POLICY_BASED = 1 # Archiving was executed due to automated retention policies
MANUAL = 2 # Archiving was run manually, via move_messages_to_archive function
# ForeignKey to the realm with which objects archived in this transaction are associated.
# If type is set to MANUAL, this should be null.
realm: Optional[Realm] = models.ForeignKey(Realm, null=True, on_delete=CASCADE)
def __str__(self) -> str:
return "ArchiveTransaction id: {id}, type: {type}, realm: {realm}, timestamp: {timestamp}".format(
id=self.id,
type="MANUAL" if self.type == self.MANUAL else "RETENTION_POLICY_BASED",
realm=self.realm.string_id if self.realm else None,
timestamp=self.timestamp,
)
class ArchivedMessage(AbstractMessage):
"""Used as a temporary holding place for deleted messages before they
are permanently deleted. This is an important part of a robust
'message retention' feature.
"""
archive_transaction: ArchiveTransaction = models.ForeignKey(ArchiveTransaction, on_delete=CASCADE)
class Message(AbstractMessage):
def topic_name(self) -> str:
"""
Please start using this helper to facilitate an
eventual switch over to a separate topic table.
"""
return self.subject
def set_topic_name(self, topic_name: str) -> None:
self.subject = topic_name
def is_stream_message(self) -> bool:
'''
Find out whether a message is a stream message by
looking up its recipient.type. TODO: Make this
an easier operation by denormalizing the message
type onto Message, either explicitly (message.type)
or implicitly (message.stream_id is not None).
'''
return self.recipient.type == Recipient.STREAM
def get_realm(self) -> Realm:
return self.sender.realm
def save_rendered_content(self) -> None:
self.save(update_fields=["rendered_content", "rendered_content_version"])
@staticmethod
def need_to_render_content(rendered_content: Optional[str],
rendered_content_version: Optional[int],
bugdown_version: int) -> bool:
return (rendered_content is None or
rendered_content_version is None or
rendered_content_version < bugdown_version)
def to_log_dict(self) -> Dict[str, Any]:
return dict(
id = self.id,
sender_id = self.sender.id,
sender_email = self.sender.email,
sender_realm_str = self.sender.realm.string_id,
sender_full_name = self.sender.full_name,
sender_short_name = self.sender.short_name,
sending_client = self.sending_client.name,
type = self.recipient.type_name(),
recipient = get_display_recipient(self.recipient),
subject = self.topic_name(),
content = self.content,
timestamp = datetime_to_timestamp(self.date_sent))
def sent_by_human(self) -> bool:
"""Used to determine whether a message was sent by a full Zulip UI
style client (and thus whether the message should be treated
as sent by a human and automatically marked as read for the
sender). The purpose of this distinction is to ensure that
message sent to the user by e.g. a Google Calendar integration
using the user's own API key don't get marked as read
automatically.
"""
sending_client = self.sending_client.name.lower()
return (sending_client in ('zulipandroid', 'zulipios', 'zulipdesktop',
'zulipmobile', 'zulipelectron', 'zulipterminal', 'snipe',
'website', 'ios', 'android')) or (
'desktop app' in sending_client)
@staticmethod
def is_status_message(content: str, rendered_content: str) -> bool:
"""
"status messages" start with /me and have special rendering:
/me loves chocolate -> Full Name loves chocolate
"""
if content.startswith('/me '):
return True
return False
def get_context_for_message(message: Message) -> Sequence[Message]:
# TODO: Change return type to QuerySet[Message]
return Message.objects.filter(
recipient_id=message.recipient_id,
subject=message.subject,
id__lt=message.id,
date_sent__gt=message.date_sent - timedelta(minutes=15),
).order_by('-id')[:10]
post_save.connect(flush_message, sender=Message)
class AbstractSubMessage(models.Model):
# We can send little text messages that are associated with a regular
# Zulip message. These can be used for experimental widgets like embedded
# games, surveys, mini threads, etc. These are designed to be pretty
# generic in purpose.
sender: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
msg_type = models.TextField()
content = models.TextField()
class Meta:
abstract = True
class SubMessage(AbstractSubMessage):
message: Message = models.ForeignKey(Message, on_delete=CASCADE)
@staticmethod
def get_raw_db_rows(needed_ids: List[int]) -> List[Dict[str, Any]]:
fields = ['id', 'message_id', 'sender_id', 'msg_type', 'content']
query = SubMessage.objects.filter(message_id__in=needed_ids).values(*fields)
query = query.order_by('message_id', 'id')
return list(query)
class ArchivedSubMessage(AbstractSubMessage):
message: ArchivedMessage = models.ForeignKey(ArchivedMessage, on_delete=CASCADE)
post_save.connect(flush_submessage, sender=SubMessage)
class AbstractReaction(models.Model):
"""For emoji reactions to messages (and potentially future reaction types).
Emoji are surprisingly complicated to implement correctly. For details
on how this subsystem works, see:
https://zulip.readthedocs.io/en/latest/subsystems/emoji.html
"""
user_profile: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
# The user-facing name for an emoji reaction. With emoji aliases,
# there may be multiple accepted names for a given emoji; this
# field encodes which one the user selected.
emoji_name: str = models.TextField()
UNICODE_EMOJI = 'unicode_emoji'
REALM_EMOJI = 'realm_emoji'
ZULIP_EXTRA_EMOJI = 'zulip_extra_emoji'
REACTION_TYPES = ((UNICODE_EMOJI, _("Unicode emoji")),
(REALM_EMOJI, _("Custom emoji")),
(ZULIP_EXTRA_EMOJI, _("Zulip extra emoji")))
reaction_type: str = models.CharField(default=UNICODE_EMOJI, choices=REACTION_TYPES, max_length=30)
# A string that uniquely identifies a particular emoji. The format varies
# by type:
#
# * For Unicode emoji, a dash-separated hex encoding of the sequence of
# Unicode codepoints that define this emoji in the Unicode
# specification. For examples, see "non_qualified" or "unified" in the
# following data, with "non_qualified" taking precedence when both present:
# https://raw.githubusercontent.com/iamcal/emoji-data/master/emoji_pretty.json
#
# * For realm emoji (aka user uploaded custom emoji), the ID
# (in ASCII decimal) of the RealmEmoji object.
#
# * For "Zulip extra emoji" (like :zulip:), the filename of the emoji.
emoji_code: str = models.TextField()
class Meta:
abstract = True
unique_together = (("user_profile", "message", "emoji_name"),
("user_profile", "message", "reaction_type", "emoji_code"))
class Reaction(AbstractReaction):
message: Message = models.ForeignKey(Message, on_delete=CASCADE)
@staticmethod
def get_raw_db_rows(needed_ids: List[int]) -> List[Dict[str, Any]]:
fields = ['message_id', 'emoji_name', 'emoji_code', 'reaction_type',
'user_profile__email', 'user_profile__id', 'user_profile__full_name']
return Reaction.objects.filter(message_id__in=needed_ids).values(*fields)
def __str__(self) -> str:
return f"{self.user_profile.email} / {self.message.id} / {self.emoji_name}"
class ArchivedReaction(AbstractReaction):
message: ArchivedMessage = models.ForeignKey(ArchivedMessage, on_delete=CASCADE)
# Whenever a message is sent, for each user subscribed to the
# corresponding Recipient object, we add a row to the UserMessage
# table indicating that that user received that message. This table
# allows us to quickly query any user's last 1000 messages to generate
# the home view.
#
# Additionally, the flags field stores metadata like whether the user
# has read the message, starred or collapsed the message, was
# mentioned in the message, etc.
#
# UserMessage is the largest table in a Zulip installation, even
# though each row is only 4 integers.
class AbstractUserMessage(models.Model):
id: int = models.BigAutoField(primary_key=True)
user_profile: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
# The order here is important! It's the order of fields in the bitfield.
ALL_FLAGS = [
'read',
'starred',
'collapsed',
'mentioned',
'wildcard_mentioned',
# These next 4 flags are from features that have since been removed.
'summarize_in_home',
'summarize_in_stream',
'force_expand',
'force_collapse',
# Whether the message contains any of the user's alert words.
'has_alert_word',
# The historical flag is used to mark messages which the user
# did not receive when they were sent, but later added to
# their history via e.g. starring the message. This is
# important accounting for the "Subscribed to stream" dividers.
'historical',
# Whether the message is a private message; this flag is a
# denormalization of message.recipient.type to support an
# efficient index on UserMessage for a user's private messages.
'is_private',
# Whether we've sent a push notification to the user's mobile
# devices for this message that has not been revoked.
'active_mobile_push_notification',
]
# Certain flags are used only for internal accounting within the
# Zulip backend, and don't make sense to expose to the API.
NON_API_FLAGS = {"is_private", "active_mobile_push_notification"}
# Certain additional flags are just set once when the UserMessage
# row is created.
NON_EDITABLE_FLAGS = {
# These flags are bookkeeping and don't make sense to edit.
"has_alert_word",
"mentioned",
"wildcard_mentioned",
"historical",
# Unused flags can't be edited.
"force_expand",
"force_collapse",
"summarize_in_home",
"summarize_in_stream",
}
flags: BitHandler = BitField(flags=ALL_FLAGS, default=0)
class Meta:
abstract = True
unique_together = ("user_profile", "message")
@staticmethod
def where_unread() -> str:
# Use this for Django ORM queries to access unread message.
# This custom SQL plays nice with our partial indexes. Grep
# the code for example usage.
return 'flags & 1 = 0'
@staticmethod
def where_starred() -> str:
# Use this for Django ORM queries to access starred messages.
# This custom SQL plays nice with our partial indexes. Grep
# the code for example usage.
#
# The key detail is that e.g.
# UserMessage.objects.filter(user_profile=user_profile, flags=UserMessage.flags.starred)
# will generate a query involving `flags & 2 = 2`, which doesn't match our index.
return 'flags & 2 <> 0'
@staticmethod
def where_active_push_notification() -> str:
# See where_starred for documentation.
return 'flags & 4096 <> 0'
def flags_list(self) -> List[str]:
flags = int(self.flags)
return self.flags_list_for_flags(flags)
@staticmethod
def flags_list_for_flags(val: int) -> List[str]:
'''
This function is highly optimized, because it actually slows down
sending messages in a naive implementation.
'''
flags = []
mask = 1
for flag in UserMessage.ALL_FLAGS:
if (val & mask) and flag not in AbstractUserMessage.NON_API_FLAGS:
flags.append(flag)
mask <<= 1
return flags
def __str__(self) -> str:
display_recipient = get_display_recipient(self.message.recipient)
return f"<{self.__class__.__name__}: {display_recipient} / {self.user_profile.email} ({self.flags_list()})>"
class UserMessage(AbstractUserMessage):
message: Message = models.ForeignKey(Message, on_delete=CASCADE)
def get_usermessage_by_message_id(user_profile: UserProfile, message_id: int) -> Optional[UserMessage]:
try:
return UserMessage.objects.select_related().get(user_profile=user_profile,
message__id=message_id)
except UserMessage.DoesNotExist:
return None
class ArchivedUserMessage(AbstractUserMessage):
"""Used as a temporary holding place for deleted UserMessages objects
before they are permanently deleted. This is an important part of
a robust 'message retention' feature.
"""
message: Message = models.ForeignKey(ArchivedMessage, on_delete=CASCADE)
class AbstractAttachment(models.Model):
file_name: str = models.TextField(db_index=True)
# path_id is a storage location agnostic representation of the path of the file.
# If the path of a file is http://localhost:9991/user_uploads/a/b/abc/temp_file.py
# then its path_id will be a/b/abc/temp_file.py.
path_id: str = models.TextField(db_index=True, unique=True)
owner: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
realm: Optional[Realm] = models.ForeignKey(Realm, blank=True, null=True, on_delete=CASCADE)
create_time: datetime.datetime = models.DateTimeField(
default=timezone_now, db_index=True,
)
size: Optional[int] = models.IntegerField(null=True)
# Whether this attachment has been posted to a public stream, and
# thus should be available to all non-guest users in the
# organization (even if they weren't a recipient of a message
# linking to it). This lets us avoid looking up the corresponding
# messages/streams to check permissions before serving these files.
is_realm_public: bool = models.BooleanField(default=False)
class Meta:
abstract = True
def __str__(self) -> str:
return f"<{self.__class__.__name__}: {self.file_name}>"
class ArchivedAttachment(AbstractAttachment):
"""Used as a temporary holding place for deleted Attachment objects
before they are permanently deleted. This is an important part of
a robust 'message retention' feature.
"""
messages: Manager = models.ManyToManyField(ArchivedMessage)
class Attachment(AbstractAttachment):
messages: Manager = models.ManyToManyField(Message)
def is_claimed(self) -> bool:
return self.messages.count() > 0
def to_dict(self) -> Dict[str, Any]:
return {
'id': self.id,
'name': self.file_name,
'path_id': self.path_id,
'size': self.size,
# convert to JavaScript-style UNIX timestamp so we can take
# advantage of client timezones.
'create_time': time.mktime(self.create_time.timetuple()) * 1000,
'messages': [{
'id': m.id,
'name': time.mktime(m.date_sent.timetuple()) * 1000,
} for m in self.messages.all()],
}
post_save.connect(flush_used_upload_space_cache, sender=Attachment)
post_delete.connect(flush_used_upload_space_cache, sender=Attachment)
def validate_attachment_request(user_profile: UserProfile, path_id: str) -> Optional[bool]:
try:
attachment = Attachment.objects.get(path_id=path_id)
except Attachment.DoesNotExist:
return None
if user_profile == attachment.owner:
# If you own the file, you can access it.
return True
if (attachment.is_realm_public and attachment.realm == user_profile.realm and
user_profile.can_access_public_streams()):
# Any user in the realm can access realm-public files
return True
messages = attachment.messages.all()
if UserMessage.objects.filter(user_profile=user_profile, message__in=messages).exists():
# If it was sent in a private message or private stream
# message, then anyone who received that message can access it.
return True
# The user didn't receive any of the messages that included this
# attachment. But they might still have access to it, if it was
# sent to a stream they are on where history is public to
# subscribers.
# These are subscriptions to a stream one of the messages was sent to
relevant_stream_ids = Subscription.objects.filter(
user_profile=user_profile,
active=True,
recipient__type=Recipient.STREAM,
recipient__in=[m.recipient_id for m in messages]).values_list("recipient__type_id", flat=True)
if len(relevant_stream_ids) == 0:
return False
return Stream.objects.filter(id__in=relevant_stream_ids,
history_public_to_subscribers=True).exists()
def get_old_unclaimed_attachments(weeks_ago: int) -> Sequence[Attachment]:
# TODO: Change return type to QuerySet[Attachment]
delta_weeks_ago = timezone_now() - datetime.timedelta(weeks=weeks_ago)
old_attachments = Attachment.objects.filter(messages=None, create_time__lt=delta_weeks_ago)
return old_attachments
class Subscription(models.Model):
user_profile: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
recipient: Recipient = models.ForeignKey(Recipient, on_delete=CASCADE)
# Whether the user has since unsubscribed. We mark Subscription
# objects as inactive, rather than deleting them, when a user
# unsubscribes, so we can preserve user customizations like
# notification settings, stream color, etc., if the user later
# resubscribes.
active: bool = models.BooleanField(default=True)
# Whether this user had muted this stream.
is_muted: Optional[bool] = models.BooleanField(null=True, default=False)
DEFAULT_STREAM_COLOR = "#c2c2c2"
color: str = models.CharField(max_length=10, default=DEFAULT_STREAM_COLOR)
pin_to_top: bool = models.BooleanField(default=False)
# These fields are stream-level overrides for the user's default
# configuration for notification, configured in UserProfile. The
# default, None, means we just inherit the user-level default.
desktop_notifications: Optional[bool] = models.BooleanField(null=True, default=None)
audible_notifications: Optional[bool] = models.BooleanField(null=True, default=None)
push_notifications: Optional[bool] = models.BooleanField(null=True, default=None)
email_notifications: Optional[bool] = models.BooleanField(null=True, default=None)
wildcard_mentions_notify: Optional[bool] = models.BooleanField(null=True, default=None)
class Meta:
unique_together = ("user_profile", "recipient")
def __str__(self) -> str:
return f"<Subscription: {self.user_profile} -> {self.recipient}>"
# Subscription fields included whenever a Subscription object is provided to
# Zulip clients via the API. A few details worth noting:
# * These fields will generally be merged with Stream.API_FIELDS
# data about the stream.
# * "user_profile" is usually implied as full API access to Subscription
# is primarily done for the current user; API access to other users'
# subscriptions is generally limited to boolean yes/no.
# * "id" and "recipient_id" are not included as they are not used
# in the Zulip API; it's an internal implementation detail.
# Subscription objects are always looked up in the API via
# (user_profile, stream) pairs.
# * "active" is often excluded in API use cases where it is implied.
# * "is_muted" often needs to be copied to not "in_home_view" for
# backwards-compatibility.
API_FIELDS = [
"active",
"color",
"is_muted",
"pin_to_top",
"audible_notifications",
"desktop_notifications",
"email_notifications",
"push_notifications",
"wildcard_mentions_notify",
]
@cache_with_key(user_profile_by_id_cache_key, timeout=3600*24*7)
def get_user_profile_by_id(uid: int) -> UserProfile:
return UserProfile.objects.select_related().get(id=uid)
@cache_with_key(user_profile_by_email_cache_key, timeout=3600*24*7)
def get_user_profile_by_email(email: str) -> UserProfile:
"""This function is intended to be used by our unit tests and for
manual manage.py shell work; robust code must use get_user or
get_user_by_delivery_email instead, because Zulip supports
multiple users with a given (delivery) email address existing on a
single server (in different realms).
"""
return UserProfile.objects.select_related().get(delivery_email__iexact=email.strip())
@cache_with_key(user_profile_by_api_key_cache_key, timeout=3600*24*7)
def maybe_get_user_profile_by_api_key(api_key: str) -> Optional[UserProfile]:
try:
return UserProfile.objects.select_related().get(api_key=api_key)
except UserProfile.DoesNotExist:
# We will cache failed lookups with None. The
# use case here is that broken API clients may
# continually ask for the same wrong API key, and
# we want to handle that as quickly as possible.
return None
def get_user_profile_by_api_key(api_key: str) -> UserProfile:
user_profile = maybe_get_user_profile_by_api_key(api_key)
if user_profile is None:
raise UserProfile.DoesNotExist()
return user_profile
def get_user_by_delivery_email(email: str, realm: Realm) -> UserProfile:
"""Fetches a user given their delivery email. For use in
authentication/registration contexts. Do not use for user-facing
views (e.g. Zulip API endpoints) as doing so would violate the
EMAIL_ADDRESS_VISIBILITY_ADMINS security model. Use get_user in
those code paths.
"""
return UserProfile.objects.select_related().get(
delivery_email__iexact=email.strip(), realm=realm)
def get_users_by_delivery_email(emails: Set[str], realm: Realm) -> QuerySet:
"""This is similar to get_users_by_delivery_email, and
it has the same security caveats. It gets multiple
users and returns a QuerySet, since most callers
will only need two or three fields.
If you are using this to get large UserProfile objects, you are
probably making a mistake, but if you must,
then use `select_related`.
"""
'''
Django doesn't support delivery_email__iexact__in, so
we simply OR all the filters that we'd do for the
one-email case.
'''
email_filter = Q()
for email in emails:
email_filter |= Q(delivery_email__iexact=email.strip())
return UserProfile.objects.filter(realm=realm).filter(email_filter)
@cache_with_key(user_profile_cache_key, timeout=3600*24*7)
def get_user(email: str, realm: Realm) -> UserProfile:
"""Fetches the user by its visible-to-other users username (in the
`email` field). For use in API contexts; do not use in
authentication/registration contexts as doing so will break
authentication in organizations using
EMAIL_ADDRESS_VISIBILITY_ADMINS. In those code paths, use
get_user_by_delivery_email.
"""
return UserProfile.objects.select_related().get(email__iexact=email.strip(), realm=realm)
def get_active_user(email: str, realm: Realm) -> UserProfile:
"""Variant of get_user_by_email that excludes deactivated users.
See get_user docstring for important usage notes."""
user_profile = get_user(email, realm)
if not user_profile.is_active:
raise UserProfile.DoesNotExist()
return user_profile
def get_user_profile_by_id_in_realm(uid: int, realm: Realm) -> UserProfile:
return UserProfile.objects.select_related().get(id=uid, realm=realm)
def get_active_user_profile_by_id_in_realm(uid: int, realm: Realm) -> UserProfile:
user_profile = get_user_profile_by_id_in_realm(uid, realm)
if not user_profile.is_active:
raise UserProfile.DoesNotExist()
return user_profile
def get_user_including_cross_realm(email: str, realm: Optional[Realm]=None) -> UserProfile:
if is_cross_realm_bot_email(email):
return get_system_bot(email)
assert realm is not None
return get_user(email, realm)
@cache_with_key(bot_profile_cache_key, timeout=3600*24*7)
def get_system_bot(email: str) -> UserProfile:
return UserProfile.objects.select_related().get(email__iexact=email.strip())
def get_user_by_id_in_realm_including_cross_realm(
uid: int,
realm: Optional[Realm],
) -> UserProfile:
user_profile = get_user_profile_by_id(uid)
if user_profile.realm == realm:
return user_profile
# Note: This doesn't validate whether the `realm` passed in is
# None/invalid for the CROSS_REALM_BOT_EMAILS case.
if user_profile.delivery_email in settings.CROSS_REALM_BOT_EMAILS:
return user_profile
raise UserProfile.DoesNotExist()
@cache_with_key(realm_user_dicts_cache_key, timeout=3600*24*7)
def get_realm_user_dicts(realm_id: int) -> List[Dict[str, Any]]:
return UserProfile.objects.filter(
realm_id=realm_id,
).values(*realm_user_dict_fields)
@cache_with_key(active_user_ids_cache_key, timeout=3600*24*7)
def active_user_ids(realm_id: int) -> List[int]:
query = UserProfile.objects.filter(
realm_id=realm_id,
is_active=True,
).values_list('id', flat=True)
return list(query)
@cache_with_key(active_non_guest_user_ids_cache_key, timeout=3600*24*7)
def active_non_guest_user_ids(realm_id: int) -> List[int]:
query = UserProfile.objects.filter(
realm_id=realm_id,
is_active=True,
).exclude(
role=UserProfile.ROLE_GUEST,
).values_list('id', flat=True)
return list(query)
def get_source_profile(email: str, string_id: str) -> Optional[UserProfile]:
try:
return get_user_by_delivery_email(email, get_realm(string_id))
except (Realm.DoesNotExist, UserProfile.DoesNotExist):
return None
@cache_with_key(bot_dicts_in_realm_cache_key, timeout=3600*24*7)
def get_bot_dicts_in_realm(realm: Realm) -> List[Dict[str, Any]]:
return UserProfile.objects.filter(realm=realm, is_bot=True).values(*bot_dict_fields)
def is_cross_realm_bot_email(email: str) -> bool:
return email.lower() in settings.CROSS_REALM_BOT_EMAILS
# The Huddle class represents a group of individuals who have had a
# Group Private Message conversation together. The actual membership
# of the Huddle is stored in the Subscription table just like with
# Streams, and a hash of that list is stored in the huddle_hash field
# below, to support efficiently mapping from a set of users to the
# corresponding Huddle object.
class Huddle(models.Model):
# TODO: We should consider whether using
# CommaSeparatedIntegerField would be better.
huddle_hash: str = models.CharField(max_length=40, db_index=True, unique=True)
# Foreign key to the Recipient object for this Huddle.
recipient = models.ForeignKey(Recipient, null=True, on_delete=models.SET_NULL)
def get_huddle_hash(id_list: List[int]) -> str:
id_list = sorted(set(id_list))
hash_key = ",".join(str(x) for x in id_list)
return make_safe_digest(hash_key)
def huddle_hash_cache_key(huddle_hash: str) -> str:
return f"huddle_by_hash:{huddle_hash}"
def get_huddle(id_list: List[int]) -> Huddle:
huddle_hash = get_huddle_hash(id_list)
return get_huddle_backend(huddle_hash, id_list)
@cache_with_key(lambda huddle_hash, id_list: huddle_hash_cache_key(huddle_hash), timeout=3600*24*7)
def get_huddle_backend(huddle_hash: str, id_list: List[int]) -> Huddle:
with transaction.atomic():
(huddle, created) = Huddle.objects.get_or_create(huddle_hash=huddle_hash)
if created:
recipient = Recipient.objects.create(type_id=huddle.id,
type=Recipient.HUDDLE)
huddle.recipient = recipient
huddle.save(update_fields=["recipient"])
subs_to_create = [Subscription(recipient=recipient,
user_profile_id=user_profile_id)
for user_profile_id in id_list]
Subscription.objects.bulk_create(subs_to_create)
return huddle
class UserActivity(models.Model):
user_profile: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
client: Client = models.ForeignKey(Client, on_delete=CASCADE)
query: str = models.CharField(max_length=50, db_index=True)
count: int = models.IntegerField()
last_visit: datetime.datetime = models.DateTimeField('last visit')
class Meta:
unique_together = ("user_profile", "client", "query")
class UserActivityInterval(models.Model):
MIN_INTERVAL_LENGTH = datetime.timedelta(minutes=15)
user_profile: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
start: datetime.datetime = models.DateTimeField('start time', db_index=True)
end: datetime.datetime = models.DateTimeField('end time', db_index=True)
class UserPresence(models.Model):
"""A record from the last time we heard from a given user on a given client.
This is a tricky subsystem, because it is highly optimized. See the docs:
https://zulip.readthedocs.io/en/latest/subsystems/presence.html
"""
class Meta:
unique_together = ("user_profile", "client")
index_together = [
("realm", "timestamp"),
]
user_profile: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
realm: Realm = models.ForeignKey(Realm, on_delete=CASCADE)
client: Client = models.ForeignKey(Client, on_delete=CASCADE)
# The time we heard this update from the client.
timestamp: datetime.datetime = models.DateTimeField('presence changed')
# The user was actively using this Zulip client as of `timestamp` (i.e.,
# they had interacted with the client recently). When the timestamp is
# itself recent, this is the green "active" status in the webapp.
ACTIVE = 1
# There had been no user activity (keyboard/mouse/etc.) on this client
# recently. So the client was online at the specified time, but it
# could be the user's desktop which they were away from. Displayed as
# orange/idle if the timestamp is current.
IDLE = 2
# Information from the client about the user's recent interaction with
# that client, as of `timestamp`. Possible values above.
#
# There is no "inactive" status, because that is encoded by the
# timestamp being old.
status: int = models.PositiveSmallIntegerField(default=ACTIVE)
@staticmethod
def status_to_string(status: int) -> str:
if status == UserPresence.ACTIVE:
return 'active'
elif status == UserPresence.IDLE:
return 'idle'
else: # nocoverage # TODO: Add a presence test to cover this.
raise ValueError(f'Unknown status: {status}')
@staticmethod
def to_presence_dict(client_name: str, status: int, dt: datetime.datetime, push_enabled: bool=False,
has_push_devices: bool=False) -> Dict[str, Any]:
presence_val = UserPresence.status_to_string(status)
timestamp = datetime_to_timestamp(dt)
return dict(
client=client_name,
status=presence_val,
timestamp=timestamp,
pushable=(push_enabled and has_push_devices),
)
def to_dict(self) -> Dict[str, Any]:
return UserPresence.to_presence_dict(
self.client.name,
self.status,
self.timestamp,
)
@staticmethod
def status_from_string(status: str) -> Optional[int]:
if status == 'active':
status_val: Optional[int] = UserPresence.ACTIVE # See https://github.com/python/mypy/issues/2611
elif status == 'idle':
status_val = UserPresence.IDLE
else:
status_val = None
return status_val
class UserStatus(models.Model):
user_profile: UserProfile = models.OneToOneField(UserProfile, on_delete=CASCADE)
timestamp: datetime.datetime = models.DateTimeField()
client: Client = models.ForeignKey(Client, on_delete=CASCADE)
NORMAL = 0
AWAY = 1
status: int = models.PositiveSmallIntegerField(default=NORMAL)
status_text: str = models.CharField(max_length=255, default='')
class DefaultStream(models.Model):
realm: Realm = models.ForeignKey(Realm, on_delete=CASCADE)
stream: Stream = models.ForeignKey(Stream, on_delete=CASCADE)
class Meta:
unique_together = ("realm", "stream")
class DefaultStreamGroup(models.Model):
MAX_NAME_LENGTH = 60
name: str = models.CharField(max_length=MAX_NAME_LENGTH, db_index=True)
realm: Realm = models.ForeignKey(Realm, on_delete=CASCADE)
streams: Manager = models.ManyToManyField('Stream')
description: str = models.CharField(max_length=1024, default='')
class Meta:
unique_together = ("realm", "name")
def to_dict(self) -> Dict[str, Any]:
return dict(name=self.name,
id=self.id,
description=self.description,
streams=[stream.to_dict() for stream in self.streams.all()])
def get_default_stream_groups(realm: Realm) -> List[DefaultStreamGroup]:
return DefaultStreamGroup.objects.filter(realm=realm)
class AbstractScheduledJob(models.Model):
scheduled_timestamp: datetime.datetime = models.DateTimeField(db_index=True)
# JSON representation of arguments to consumer
data: str = models.TextField()
realm: Realm = models.ForeignKey(Realm, on_delete=CASCADE)
class Meta:
abstract = True
class ScheduledEmail(AbstractScheduledJob):
# Exactly one of users or address should be set. These are
# duplicate values, used to efficiently filter the set of
# ScheduledEmails for use in clear_scheduled_emails; the
# recipients used for actually sending messages are stored in the
# data field of AbstractScheduledJob.
users: Manager = models.ManyToManyField(UserProfile)
# Just the address part of a full "name <address>" email address
address: Optional[str] = models.EmailField(null=True, db_index=True)
# Valid types are below
WELCOME = 1
DIGEST = 2
INVITATION_REMINDER = 3
type: int = models.PositiveSmallIntegerField()
def __str__(self) -> str:
return f"<ScheduledEmail: {self.type} {self.address or list(self.users.all())} {self.scheduled_timestamp}>"
class MissedMessageEmailAddress(models.Model):
EXPIRY_SECONDS = 60 * 60 * 24 * 5
ALLOWED_USES = 1
message: Message = models.ForeignKey(Message, on_delete=CASCADE)
user_profile: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
email_token: str = models.CharField(max_length=34, unique=True, db_index=True)
# Timestamp of when the missed message address generated.
# The address is valid until timestamp + EXPIRY_SECONDS.
timestamp: datetime.datetime = models.DateTimeField(db_index=True, default=timezone_now)
times_used: int = models.PositiveIntegerField(default=0, db_index=True)
def __str__(self) -> str:
return settings.EMAIL_GATEWAY_PATTERN % (self.email_token,)
def is_usable(self) -> bool:
not_expired = timezone_now() <= self.timestamp + timedelta(seconds=self.EXPIRY_SECONDS)
has_uses_left = self.times_used < self.ALLOWED_USES
return has_uses_left and not_expired
def increment_times_used(self) -> None:
self.times_used += 1
self.save(update_fields=["times_used"])
class ScheduledMessage(models.Model):
sender: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
recipient: Recipient = models.ForeignKey(Recipient, on_delete=CASCADE)
subject: str = models.CharField(max_length=MAX_TOPIC_NAME_LENGTH)
content: str = models.TextField()
sending_client: Client = models.ForeignKey(Client, on_delete=CASCADE)
stream: Optional[Stream] = models.ForeignKey(Stream, null=True, on_delete=CASCADE)
realm: Realm = models.ForeignKey(Realm, on_delete=CASCADE)
scheduled_timestamp: datetime.datetime = models.DateTimeField(db_index=True)
delivered: bool = models.BooleanField(default=False)
SEND_LATER = 1
REMIND = 2
DELIVERY_TYPES = (
(SEND_LATER, 'send_later'),
(REMIND, 'remind'),
)
delivery_type: int = models.PositiveSmallIntegerField(
choices=DELIVERY_TYPES, default=SEND_LATER,
)
def topic_name(self) -> str:
return self.subject
def set_topic_name(self, topic_name: str) -> None:
self.subject = topic_name
def __str__(self) -> str:
display_recipient = get_display_recipient(self.recipient)
return f"<ScheduledMessage: {display_recipient} {self.subject} {self.sender} {self.scheduled_timestamp}>"
EMAIL_TYPES = {
'followup_day1': ScheduledEmail.WELCOME,
'followup_day2': ScheduledEmail.WELCOME,
'digest': ScheduledEmail.DIGEST,
'invitation_reminder': ScheduledEmail.INVITATION_REMINDER,
}
class AbstractRealmAuditLog(models.Model):
"""Defines fields common to RealmAuditLog and RemoteRealmAuditLog."""
event_time: datetime.datetime = models.DateTimeField(db_index=True)
# If True, event_time is an overestimate of the true time. Can be used
# by migrations when introducing a new event_type.
backfilled: bool = models.BooleanField(default=False)
# Keys within extra_data, when extra_data is a json dict. Keys are strings because
# json keys must always be strings.
OLD_VALUE = '1'
NEW_VALUE = '2'
ROLE_COUNT = '10'
ROLE_COUNT_HUMANS = '11'
ROLE_COUNT_BOTS = '12'
extra_data: Optional[str] = models.TextField(null=True)
# Event types
USER_CREATED = 101
USER_ACTIVATED = 102
USER_DEACTIVATED = 103
USER_REACTIVATED = 104
USER_ROLE_CHANGED = 105
USER_SOFT_ACTIVATED = 120
USER_SOFT_DEACTIVATED = 121
USER_PASSWORD_CHANGED = 122
USER_AVATAR_SOURCE_CHANGED = 123
USER_FULL_NAME_CHANGED = 124
USER_EMAIL_CHANGED = 125
USER_TOS_VERSION_CHANGED = 126
USER_API_KEY_CHANGED = 127
USER_BOT_OWNER_CHANGED = 128
REALM_DEACTIVATED = 201
REALM_REACTIVATED = 202
REALM_SCRUBBED = 203
REALM_PLAN_TYPE_CHANGED = 204
REALM_LOGO_CHANGED = 205
REALM_EXPORTED = 206
SUBSCRIPTION_CREATED = 301
SUBSCRIPTION_ACTIVATED = 302
SUBSCRIPTION_DEACTIVATED = 303
STRIPE_CUSTOMER_CREATED = 401
STRIPE_CARD_CHANGED = 402
STRIPE_PLAN_CHANGED = 403
STRIPE_PLAN_QUANTITY_RESET = 404
CUSTOMER_CREATED = 501
CUSTOMER_PLAN_CREATED = 502
CUSTOMER_SWITCHED_FROM_MONTHLY_TO_ANNUAL_PLAN = 503
event_type: int = models.PositiveSmallIntegerField()
# event_types synced from on-prem installations to Zulip Cloud when
# billing for mobile push notifications is enabled. Every billing
# event_type should have ROLE_COUNT populated in extra_data.
SYNCED_BILLING_EVENTS = [
USER_CREATED, USER_ACTIVATED, USER_DEACTIVATED, USER_REACTIVATED, USER_ROLE_CHANGED,
REALM_DEACTIVATED, REALM_REACTIVATED]
class Meta:
abstract = True
class RealmAuditLog(AbstractRealmAuditLog):
"""
RealmAuditLog tracks important changes to users, streams, and
realms in Zulip. It is intended to support both
debugging/introspection (e.g. determining when a user's left a
given stream?) as well as help with some database migrations where
we might be able to do a better data backfill with it. Here are a
few key details about how this works:
* acting_user is the user who initiated the state change
* modified_user (if present) is the user being modified
* modified_stream (if present) is the stream being modified
For example:
* When a user subscribes another user to a stream, modified_user,
acting_user, and modified_stream will all be present and different.
* When an administrator changes an organization's realm icon,
acting_user is that administrator and both modified_user and
modified_stream will be None.
"""
realm: Realm = models.ForeignKey(Realm, on_delete=CASCADE)
acting_user: Optional[UserProfile] = models.ForeignKey(
UserProfile, null=True, related_name="+", on_delete=CASCADE,
)
modified_user: Optional[UserProfile] = models.ForeignKey(
UserProfile, null=True, related_name="+", on_delete=CASCADE,
)
modified_stream: Optional[Stream] = models.ForeignKey(
Stream, null=True, on_delete=CASCADE,
)
event_last_message_id: Optional[int] = models.IntegerField(null=True)
def __str__(self) -> str:
if self.modified_user is not None:
return f"<RealmAuditLog: {self.modified_user} {self.event_type} {self.event_time} {self.id}>"
if self.modified_stream is not None:
return f"<RealmAuditLog: {self.modified_stream} {self.event_type} {self.event_time} {self.id}>"
return f"<RealmAuditLog: {self.realm} {self.event_type} {self.event_time} {self.id}>"
class UserHotspot(models.Model):
user: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
hotspot: str = models.CharField(max_length=30)
timestamp: datetime.datetime = models.DateTimeField(default=timezone_now)
class Meta:
unique_together = ("user", "hotspot")
def check_valid_user_ids(realm_id: int, user_ids: List[int],
allow_deactivated: bool=False) -> Optional[str]:
error = check_list(check_int)("User IDs", user_ids)
if error:
return error
realm = Realm.objects.get(id=realm_id)
for user_id in user_ids:
# TODO: Structurally, we should be doing a bulk fetch query to
# get the users here, not doing these in a loop. But because
# this is a rarely used feature and likely to never have more
# than a handful of users, it's probably mostly OK.
try:
user_profile = get_user_profile_by_id_in_realm(user_id, realm)
except UserProfile.DoesNotExist:
return _('Invalid user ID: %d') % (user_id)
if not allow_deactivated:
if not user_profile.is_active:
return _('User with ID %d is deactivated') % (user_id)
if (user_profile.is_bot):
return _('User with ID %d is a bot') % (user_id)
return None
class CustomProfileField(models.Model):
"""Defines a form field for the per-realm custom profile fields feature.
See CustomProfileFieldValue for an individual user's values for one of
these fields.
"""
HINT_MAX_LENGTH = 80
NAME_MAX_LENGTH = 40
realm: Realm = models.ForeignKey(Realm, on_delete=CASCADE)
name: str = models.CharField(max_length=NAME_MAX_LENGTH)
hint: Optional[str] = models.CharField(max_length=HINT_MAX_LENGTH, default='', null=True)
order: int = models.IntegerField(default=0)
SHORT_TEXT = 1
LONG_TEXT = 2
CHOICE = 3
DATE = 4
URL = 5
USER = 6
EXTERNAL_ACCOUNT = 7
# These are the fields whose validators require more than var_name
# and value argument. i.e. CHOICE require field_data, USER require
# realm as argument.
CHOICE_FIELD_TYPE_DATA: List[ExtendedFieldElement] = [
(CHOICE, str(_('List of options')), validate_choice_field, str, "CHOICE"),
]
USER_FIELD_TYPE_DATA: List[UserFieldElement] = [
(USER, str(_('Person picker')), check_valid_user_ids, eval, "USER"),
]
CHOICE_FIELD_VALIDATORS: Dict[int, ExtendedValidator] = {
item[0]: item[2] for item in CHOICE_FIELD_TYPE_DATA
}
USER_FIELD_VALIDATORS: Dict[int, RealmUserValidator] = {
item[0]: item[2] for item in USER_FIELD_TYPE_DATA
}
FIELD_TYPE_DATA: List[FieldElement] = [
# Type, Display Name, Validator, Converter, Keyword
(SHORT_TEXT, str(_('Short text')), check_short_string, str, "SHORT_TEXT"),
(LONG_TEXT, str(_('Long text')), check_long_string, str, "LONG_TEXT"),
(DATE, str(_('Date picker')), check_date, str, "DATE"),
(URL, str(_('Link')), check_url, str, "URL"),
(EXTERNAL_ACCOUNT, str(_('External account')), check_short_string, str, "EXTERNAL_ACCOUNT"),
]
ALL_FIELD_TYPES = [*FIELD_TYPE_DATA, *CHOICE_FIELD_TYPE_DATA, *USER_FIELD_TYPE_DATA]
FIELD_VALIDATORS: Dict[int, Validator] = {item[0]: item[2] for item in FIELD_TYPE_DATA}
FIELD_CONVERTERS: Dict[int, Callable[[Any], Any]] = {item[0]: item[3] for item in ALL_FIELD_TYPES}
FIELD_TYPE_CHOICES: List[Tuple[int, str]] = [(item[0], item[1]) for item in ALL_FIELD_TYPES]
FIELD_TYPE_CHOICES_DICT: Dict[str, Dict[str, Union[str, int]]] = {
item[4]: {"id": item[0], "name": item[1]} for item in ALL_FIELD_TYPES
}
field_type: int = models.PositiveSmallIntegerField(
choices=FIELD_TYPE_CHOICES, default=SHORT_TEXT,
)
# A JSON blob of any additional data needed to define the field beyond
# type/name/hint.
#
# The format depends on the type. Field types SHORT_TEXT, LONG_TEXT,
# DATE, URL, and USER leave this null. Fields of type CHOICE store the
# choices' descriptions.
#
# Note: There is no performance overhead of using TextField in PostgreSQL.
# See https://www.postgresql.org/docs/9.0/static/datatype-character.html
field_data: Optional[str] = models.TextField(default='', null=True)
class Meta:
unique_together = ('realm', 'name')
def as_dict(self) -> ProfileDataElementBase:
return {
'id': self.id,
'name': self.name,
'type': self.field_type,
'hint': self.hint,
'field_data': self.field_data,
'order': self.order,
}
def is_renderable(self) -> bool:
if self.field_type in [CustomProfileField.SHORT_TEXT, CustomProfileField.LONG_TEXT]:
return True
return False
def __str__(self) -> str:
return f"<CustomProfileField: {self.realm} {self.name} {self.field_type} {self.order}>"
def custom_profile_fields_for_realm(realm_id: int) -> List[CustomProfileField]:
return CustomProfileField.objects.filter(realm=realm_id).order_by('order')
class CustomProfileFieldValue(models.Model):
user_profile: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
field: CustomProfileField = models.ForeignKey(CustomProfileField, on_delete=CASCADE)
value: str = models.TextField()
rendered_value: Optional[str] = models.TextField(null=True, default=None)
class Meta:
unique_together = ('user_profile', 'field')
def __str__(self) -> str:
return f"<CustomProfileFieldValue: {self.user_profile} {self.field} {self.value}>"
# Interfaces for services
# They provide additional functionality like parsing message to obtain query url, data to be sent to url,
# and parsing the response.
GENERIC_INTERFACE = 'GenericService'
SLACK_INTERFACE = 'SlackOutgoingWebhookService'
# A Service corresponds to either an outgoing webhook bot or an embedded bot.
# The type of Service is determined by the bot_type field of the referenced
# UserProfile.
#
# If the Service is an outgoing webhook bot:
# - name is any human-readable identifier for the Service
# - base_url is the address of the third-party site
# - token is used for authentication with the third-party site
#
# If the Service is an embedded bot:
# - name is the canonical name for the type of bot (e.g. 'xkcd' for an instance
# of the xkcd bot); multiple embedded bots can have the same name, but all
# embedded bots with the same name will run the same code
# - base_url and token are currently unused
class Service(models.Model):
name: str = models.CharField(max_length=UserProfile.MAX_NAME_LENGTH)
# Bot user corresponding to the Service. The bot_type of this user
# deterines the type of service. If non-bot services are added later,
# user_profile can also represent the owner of the Service.
user_profile: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
base_url: str = models.TextField()
token: str = models.TextField()
# Interface / API version of the service.
interface: int = models.PositiveSmallIntegerField(default=1)
# Valid interfaces are {generic, zulip_bot_service, slack}
GENERIC = 1
SLACK = 2
ALLOWED_INTERFACE_TYPES = [
GENERIC,
SLACK,
]
# N.B. If we used Django's choice=... we would get this for free (kinda)
_interfaces: Dict[int, str] = {
GENERIC: GENERIC_INTERFACE,
SLACK: SLACK_INTERFACE,
}
def interface_name(self) -> str:
# Raises KeyError if invalid
return self._interfaces[self.interface]
def get_bot_services(user_profile_id: str) -> List[Service]:
return list(Service.objects.filter(user_profile__id=user_profile_id))
def get_service_profile(user_profile_id: str, service_name: str) -> Service:
return Service.objects.get(user_profile__id=user_profile_id, name=service_name)
class BotStorageData(models.Model):
bot_profile: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
key: str = models.TextField(db_index=True)
value: str = models.TextField()
class Meta:
unique_together = ("bot_profile", "key")
class BotConfigData(models.Model):
bot_profile: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
key: str = models.TextField(db_index=True)
value: str = models.TextField()
class Meta:
unique_together = ("bot_profile", "key")
class InvalidFakeEmailDomain(Exception):
pass
def get_fake_email_domain() -> str:
try:
# Check that the fake email domain can be used to form valid email addresses.
validate_email("bot@" + settings.FAKE_EMAIL_DOMAIN)
except ValidationError:
raise InvalidFakeEmailDomain(settings.FAKE_EMAIL_DOMAIN + ' is not a valid domain.')
return settings.FAKE_EMAIL_DOMAIN
class AlertWord(models.Model):
# Realm isn't necessary, but it's a nice denormalization. Users
# never move to another realm, so it's static, and having Realm
# here optimizes the main query on this table, which is fetching
# all the alert words in a realm.
realm: Realm = models.ForeignKey(Realm, db_index=True, on_delete=CASCADE)
user_profile: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
# Case-insensitive name for the alert word.
word: str = models.TextField()
class Meta:
unique_together = ("user_profile", "word")
def flush_realm_alert_words(realm: Realm) -> None:
cache_delete(realm_alert_words_cache_key(realm))
cache_delete(realm_alert_words_automaton_cache_key(realm))
def flush_alert_word(sender: Any, **kwargs: Any) -> None:
realm = kwargs['instance'].realm
flush_realm_alert_words(realm)
post_save.connect(flush_alert_word, sender=AlertWord)
post_delete.connect(flush_alert_word, sender=AlertWord)
| 40.952349 | 119 | 0.702929 | import datetime
import re
import sre_constants
import time
from collections import defaultdict
from datetime import timedelta
from typing import (
AbstractSet,
Any,
Callable,
DefaultDict,
Dict,
Iterable,
List,
Optional,
Sequence,
Set,
Tuple,
TypeVar,
Union,
)
import django.contrib.auth
from bitfield import BitField
from bitfield.types import BitHandler
from django.conf import settings
from django.contrib.auth.models import AbstractBaseUser, PermissionsMixin, UserManager
from django.contrib.postgres.fields import JSONField
from django.core.exceptions import ValidationError
from django.core.validators import MinLengthValidator, RegexValidator, URLValidator, validate_email
from django.db import models, transaction
from django.db.models import CASCADE, Manager, Q, Sum
from django.db.models.query import QuerySet
from django.db.models.signals import post_delete, post_save
from django.utils.timezone import now as timezone_now
from django.utils.translation import ugettext_lazy as _
from confirmation import settings as confirmation_settings
from zerver.lib import cache
from zerver.lib.cache import (
active_non_guest_user_ids_cache_key,
active_user_ids_cache_key,
bot_dict_fields,
bot_dicts_in_realm_cache_key,
bot_profile_cache_key,
cache_delete,
cache_set,
cache_with_key,
flush_message,
flush_realm,
flush_stream,
flush_submessage,
flush_used_upload_space_cache,
flush_user_profile,
generic_bulk_cached_fetch,
get_realm_used_upload_space_cache_key,
get_stream_cache_key,
realm_alert_words_automaton_cache_key,
realm_alert_words_cache_key,
realm_user_dict_fields,
realm_user_dicts_cache_key,
user_profile_by_api_key_cache_key,
user_profile_by_email_cache_key,
user_profile_by_id_cache_key,
user_profile_cache_key,
)
from zerver.lib.exceptions import JsonableError
from zerver.lib.pysa import mark_sanitized
from zerver.lib.timestamp import datetime_to_timestamp
from zerver.lib.types import (
DisplayRecipientT,
ExtendedFieldElement,
ExtendedValidator,
FieldElement,
ProfileData,
ProfileDataElementBase,
RealmUserValidator,
UserFieldElement,
Validator,
)
from zerver.lib.utils import generate_random_token, make_safe_digest
from zerver.lib.validator import (
check_date,
check_int,
check_list,
check_long_string,
check_short_string,
check_url,
validate_choice_field,
)
MAX_TOPIC_NAME_LENGTH = 60
MAX_MESSAGE_LENGTH = 10000
MAX_LANGUAGE_ID_LENGTH: int = 50
STREAM_NAMES = TypeVar('STREAM_NAMES', Sequence[str], AbstractSet[str])
def query_for_ids(query: QuerySet, user_ids: List[int], field: str) -> QuerySet:
assert(user_ids)
clause = f'{field} IN %s'
query = query.extra(
where=[clause], params=(tuple(user_ids),),
)
return query
per_request_display_recipient_cache: Dict[int, DisplayRecipientT] = {}
def get_display_recipient_by_id(recipient_id: int, recipient_type: int,
recipient_type_id: Optional[int]) -> DisplayRecipientT:
from zerver.lib.display_recipient import get_display_recipient_remote_cache
if recipient_id not in per_request_display_recipient_cache:
result = get_display_recipient_remote_cache(recipient_id, recipient_type, recipient_type_id)
per_request_display_recipient_cache[recipient_id] = result
return per_request_display_recipient_cache[recipient_id]
def get_display_recipient(recipient: 'Recipient') -> DisplayRecipientT:
return get_display_recipient_by_id(
recipient.id,
recipient.type,
recipient.type_id,
)
def get_realm_emoji_cache_key(realm: 'Realm') -> str:
return f'realm_emoji:{realm.id}'
def get_active_realm_emoji_cache_key(realm: 'Realm') -> str:
return f'active_realm_emoji:{realm.id}'
supported_backends: Optional[Set[type]] = None
def supported_auth_backends() -> Set[type]:
global supported_backends
supported_backends = django.contrib.auth.get_backends()
assert supported_backends is not None
return supported_backends
def clear_supported_auth_backends_cache() -> None:
global supported_backends
supported_backends = None
class Realm(models.Model):
MAX_REALM_NAME_LENGTH = 40
MAX_REALM_SUBDOMAIN_LENGTH = 40
INVITES_STANDARD_REALM_DAILY_MAX = 3000
MESSAGE_VISIBILITY_LIMITED = 10000
AUTHENTICATION_FLAGS = ['Google', 'Email', 'GitHub', 'LDAP', 'Dev',
'RemoteUser', 'AzureAD', 'SAML', 'GitLab', 'Apple']
SUBDOMAIN_FOR_ROOT_DOMAIN = ''
name: Optional[str] = models.CharField(max_length=MAX_REALM_NAME_LENGTH, null=True)
description: str = models.TextField(default="")
string_id: str = models.CharField(max_length=MAX_REALM_SUBDOMAIN_LENGTH, unique=True)
date_created: datetime.datetime = models.DateTimeField(default=timezone_now)
deactivated: bool = models.BooleanField(default=False)
emails_restricted_to_domains: bool = models.BooleanField(default=False)
invite_required: bool = models.BooleanField(default=True)
invite_by_admins_only: bool = models.BooleanField(default=False)
_max_invites: Optional[int] = models.IntegerField(null=True, db_column='max_invites')
disallow_disposable_email_addresses: bool = models.BooleanField(default=True)
authentication_methods: BitHandler = BitField(
flags=AUTHENTICATION_FLAGS, default=2**31 - 1,
)
inline_image_preview: bool = models.BooleanField(default=True)
inline_url_embed_preview: bool = models.BooleanField(default=False)
digest_emails_enabled: bool = models.BooleanField(default=False)
digest_weekday: int = models.SmallIntegerField(default=1)
send_welcome_emails: bool = models.BooleanField(default=True)
message_content_allowed_in_email_notifications: bool = models.BooleanField(default=True)
mandatory_topics: bool = models.BooleanField(default=False)
add_emoji_by_admins_only: bool = models.BooleanField(default=False)
name_changes_disabled: bool = models.BooleanField(default=False)
email_changes_disabled: bool = models.BooleanField(default=False)
avatar_changes_disabled: bool = models.BooleanField(default=False)
POLICY_MEMBERS_ONLY = 1
POLICY_ADMINS_ONLY = 2
POLICY_FULL_MEMBERS_ONLY = 3
COMMON_POLICY_TYPES = [
POLICY_MEMBERS_ONLY,
POLICY_ADMINS_ONLY,
POLICY_FULL_MEMBERS_ONLY,
]
create_stream_policy: int = models.PositiveSmallIntegerField(
default=POLICY_MEMBERS_ONLY)
invite_to_stream_policy: int = models.PositiveSmallIntegerField(
default=POLICY_MEMBERS_ONLY)
USER_GROUP_EDIT_POLICY_MEMBERS = 1
USER_GROUP_EDIT_POLICY_ADMINS = 2
user_group_edit_policy: int = models.PositiveSmallIntegerField(
default=USER_GROUP_EDIT_POLICY_MEMBERS)
USER_GROUP_EDIT_POLICY_TYPES = [
USER_GROUP_EDIT_POLICY_MEMBERS,
USER_GROUP_EDIT_POLICY_ADMINS,
]
PRIVATE_MESSAGE_POLICY_UNLIMITED = 1
PRIVATE_MESSAGE_POLICY_DISABLED = 2
private_message_policy: int = models.PositiveSmallIntegerField(
default=PRIVATE_MESSAGE_POLICY_UNLIMITED)
PRIVATE_MESSAGE_POLICY_TYPES = [
PRIVATE_MESSAGE_POLICY_UNLIMITED,
PRIVATE_MESSAGE_POLICY_DISABLED,
]
# addresses. Controls whether the UserProfile.email field is the
# same as UserProfile.delivery_email, or is instead garbage.
EMAIL_ADDRESS_VISIBILITY_EVERYONE = 1
EMAIL_ADDRESS_VISIBILITY_MEMBERS = 2
EMAIL_ADDRESS_VISIBILITY_ADMINS = 3
EMAIL_ADDRESS_VISIBILITY_NOBODY = 4
email_address_visibility: int = models.PositiveSmallIntegerField(
default=EMAIL_ADDRESS_VISIBILITY_EVERYONE,
)
EMAIL_ADDRESS_VISIBILITY_TYPES = [
EMAIL_ADDRESS_VISIBILITY_EVERYONE,
# The MEMBERS level is not yet implemented on the backend.
## EMAIL_ADDRESS_VISIBILITY_MEMBERS,
EMAIL_ADDRESS_VISIBILITY_ADMINS,
EMAIL_ADDRESS_VISIBILITY_NOBODY,
]
# Threshold in days for new users to create streams, and potentially take
# some other actions.
waiting_period_threshold: int = models.PositiveIntegerField(default=0)
allow_message_deleting: bool = models.BooleanField(default=False)
DEFAULT_MESSAGE_CONTENT_DELETE_LIMIT_SECONDS = 600 # if changed, also change in admin.js, setting_org.js
message_content_delete_limit_seconds: int = models.IntegerField(
default=DEFAULT_MESSAGE_CONTENT_DELETE_LIMIT_SECONDS,
)
allow_message_editing: bool = models.BooleanField(default=True)
DEFAULT_MESSAGE_CONTENT_EDIT_LIMIT_SECONDS = 600 # if changed, also change in admin.js, setting_org.js
message_content_edit_limit_seconds: int = models.IntegerField(
default=DEFAULT_MESSAGE_CONTENT_EDIT_LIMIT_SECONDS,
)
# Whether users have access to message edit history
allow_edit_history: bool = models.BooleanField(default=True)
DEFAULT_COMMUNITY_TOPIC_EDITING_LIMIT_SECONDS = 86400
allow_community_topic_editing: bool = models.BooleanField(default=True)
# Defaults for new users
default_twenty_four_hour_time: bool = models.BooleanField(default=False)
default_language: str = models.CharField(default='en', max_length=MAX_LANGUAGE_ID_LENGTH)
DEFAULT_NOTIFICATION_STREAM_NAME = 'general'
INITIAL_PRIVATE_STREAM_NAME = 'core team'
STREAM_EVENTS_NOTIFICATION_TOPIC = _('stream events')
notifications_stream: Optional["Stream"] = models.ForeignKey(
"Stream", related_name="+", null=True, blank=True, on_delete=CASCADE,
)
signup_notifications_stream: Optional["Stream"] = models.ForeignKey(
"Stream", related_name="+", null=True, blank=True, on_delete=CASCADE,
)
RETAIN_MESSAGE_FOREVER = -1
# For old messages being automatically deleted
message_retention_days: Optional[int] = models.IntegerField(null=True)
# When non-null, all but the latest this many messages in the organization
# are inaccessible to users (but not deleted).
message_visibility_limit: Optional[int] = models.IntegerField(null=True)
# Messages older than this message ID in the organization are inaccessible.
first_visible_message_id: int = models.IntegerField(default=0)
# Valid org_types are {CORPORATE, COMMUNITY}
CORPORATE = 1
COMMUNITY = 2
org_type: int = models.PositiveSmallIntegerField(default=CORPORATE)
UPGRADE_TEXT_STANDARD = _("Available on Zulip Standard. Upgrade to access.")
# plan_type controls various features around resource/feature
# limitations for a Zulip organization on multi-tenant installations
# like Zulip Cloud.
SELF_HOSTED = 1
LIMITED = 2
STANDARD = 3
STANDARD_FREE = 4
plan_type: int = models.PositiveSmallIntegerField(default=SELF_HOSTED)
# This value is also being used in static/js/settings_bots.bot_creation_policy_values.
# On updating it here, update it there as well.
BOT_CREATION_EVERYONE = 1
BOT_CREATION_LIMIT_GENERIC_BOTS = 2
BOT_CREATION_ADMINS_ONLY = 3
bot_creation_policy: int = models.PositiveSmallIntegerField(default=BOT_CREATION_EVERYONE)
BOT_CREATION_POLICY_TYPES = [
BOT_CREATION_EVERYONE,
BOT_CREATION_LIMIT_GENERIC_BOTS,
BOT_CREATION_ADMINS_ONLY,
]
# See upload_quota_bytes; don't interpret upload_quota_gb directly.
UPLOAD_QUOTA_LIMITED = 5
UPLOAD_QUOTA_STANDARD = 50
upload_quota_gb: Optional[int] = models.IntegerField(null=True)
VIDEO_CHAT_PROVIDERS = {
'disabled': {
'name': "None",
'id': 0,
},
'jitsi_meet': {
'name': "Jitsi Meet",
'id': 1,
},
}
if settings.VIDEO_ZOOM_CLIENT_ID is not None and settings.VIDEO_ZOOM_CLIENT_SECRET is not None:
VIDEO_CHAT_PROVIDERS['zoom'] = {
'name': "Zoom",
'id': 3,
}
video_chat_provider = models.PositiveSmallIntegerField(default=VIDEO_CHAT_PROVIDERS['jitsi_meet']['id'])
default_code_block_language: Optional[str] = models.TextField(null=True, default=None)
property_types: Dict[str, Union[type, Tuple[type, ...]]] = dict(
add_emoji_by_admins_only=bool,
allow_edit_history=bool,
allow_message_deleting=bool,
bot_creation_policy=int,
create_stream_policy=int,
invite_to_stream_policy=int,
default_language=str,
default_twenty_four_hour_time = bool,
description=str,
digest_emails_enabled=bool,
disallow_disposable_email_addresses=bool,
email_address_visibility=int,
email_changes_disabled=bool,
invite_required=bool,
invite_by_admins_only=bool,
inline_image_preview=bool,
inline_url_embed_preview=bool,
mandatory_topics=bool,
message_retention_days=(int, type(None)),
name=str,
name_changes_disabled=bool,
avatar_changes_disabled=bool,
emails_restricted_to_domains=bool,
send_welcome_emails=bool,
message_content_allowed_in_email_notifications=bool,
video_chat_provider=int,
waiting_period_threshold=int,
digest_weekday=int,
private_message_policy=int,
user_group_edit_policy=int,
default_code_block_language=(str, type(None)),
)
DIGEST_WEEKDAY_VALUES = [0, 1, 2, 3, 4, 5, 6]
ICON_FROM_GRAVATAR = 'G'
ICON_UPLOADED = 'U'
ICON_SOURCES = (
(ICON_FROM_GRAVATAR, 'Hosted by Gravatar'),
(ICON_UPLOADED, 'Uploaded by administrator'),
)
icon_source: str = models.CharField(
default=ICON_FROM_GRAVATAR, choices=ICON_SOURCES, max_length=1,
)
icon_version: int = models.PositiveSmallIntegerField(default=1)
LOGO_DEFAULT = 'D'
LOGO_UPLOADED = 'U'
LOGO_SOURCES = (
(LOGO_DEFAULT, 'Default to Zulip'),
(LOGO_UPLOADED, 'Uploaded by administrator'),
)
logo_source: str = models.CharField(
default=LOGO_DEFAULT, choices=LOGO_SOURCES, max_length=1,
)
logo_version: int = models.PositiveSmallIntegerField(default=1)
night_logo_source: str = models.CharField(
default=LOGO_DEFAULT, choices=LOGO_SOURCES, max_length=1,
)
night_logo_version: int = models.PositiveSmallIntegerField(default=1)
def authentication_methods_dict(self) -> Dict[str, bool]:
from zproject.backends import AUTH_BACKEND_NAME_MAP
ret: Dict[str, bool] = {}
supported_backends = [backend.__class__ for backend in supported_auth_backends()]
for k, v in self.authentication_methods.iteritems():
backend = AUTH_BACKEND_NAME_MAP[k]
if backend in supported_backends:
ret[k] = v
return ret
def __str__(self) -> str:
return f"<Realm: {self.string_id} {self.id}>"
@cache_with_key(get_realm_emoji_cache_key, timeout=3600*24*7)
def get_emoji(self) -> Dict[str, Dict[str, Iterable[str]]]:
return get_realm_emoji_uncached(self)
@cache_with_key(get_active_realm_emoji_cache_key, timeout=3600*24*7)
def get_active_emoji(self) -> Dict[str, Dict[str, Iterable[str]]]:
return get_active_realm_emoji_uncached(self)
def get_admin_users_and_bots(self) -> Sequence['UserProfile']:
return UserProfile.objects.filter(realm=self, is_active=True,
role__in=[UserProfile.ROLE_REALM_ADMINISTRATOR,
UserProfile.ROLE_REALM_OWNER])
def get_human_admin_users(self) -> QuerySet:
return UserProfile.objects.filter(realm=self, is_bot=False, is_active=True,
role__in=[UserProfile.ROLE_REALM_ADMINISTRATOR,
UserProfile.ROLE_REALM_OWNER])
def get_active_users(self) -> Sequence['UserProfile']:
return UserProfile.objects.filter(realm=self, is_active=True).select_related()
def get_human_owner_users(self) -> QuerySet:
return UserProfile.objects.filter(realm=self, is_bot=False,
role=UserProfile.ROLE_REALM_OWNER,
is_active=True)
def get_bot_domain(self) -> str:
return get_fake_email_domain()
def get_notifications_stream(self) -> Optional['Stream']:
if self.notifications_stream is not None and not self.notifications_stream.deactivated:
return self.notifications_stream
return None
def get_signup_notifications_stream(self) -> Optional['Stream']:
if self.signup_notifications_stream is not None and not self.signup_notifications_stream.deactivated:
return self.signup_notifications_stream
return None
@property
def max_invites(self) -> int:
if self._max_invites is None:
return settings.INVITES_DEFAULT_REALM_DAILY_MAX
return self._max_invites
@max_invites.setter
def max_invites(self, value: Optional[int]) -> None:
self._max_invites = value
def upload_quota_bytes(self) -> Optional[int]:
if self.upload_quota_gb is None:
return None
return self.upload_quota_gb << 30
@cache_with_key(get_realm_used_upload_space_cache_key, timeout=3600*24*7)
def currently_used_upload_space_bytes(self) -> int:
used_space = Attachment.objects.filter(realm=self).aggregate(Sum('size'))['size__sum']
if used_space is None:
return 0
return used_space
def ensure_not_on_limited_plan(self) -> None:
if self.plan_type == Realm.LIMITED:
raise JsonableError(self.UPGRADE_TEXT_STANDARD)
@property
def subdomain(self) -> str:
return self.string_id
@property
def display_subdomain(self) -> str:
if self.string_id == "":
return "."
return self.string_id
@property
def uri(self) -> str:
return settings.EXTERNAL_URI_SCHEME + self.host
@property
def host(self) -> str:
return mark_sanitized(self.host_for_subdomain(self.subdomain))
@staticmethod
def host_for_subdomain(subdomain: str) -> str:
if subdomain == Realm.SUBDOMAIN_FOR_ROOT_DOMAIN:
return settings.EXTERNAL_HOST
default_host = f"{subdomain}.{settings.EXTERNAL_HOST}"
return settings.REALM_HOSTS.get(subdomain, default_host)
@property
def is_zephyr_mirror_realm(self) -> bool:
return self.string_id == "zephyr"
@property
def webathena_enabled(self) -> bool:
return self.is_zephyr_mirror_realm
@property
def presence_disabled(self) -> bool:
return self.is_zephyr_mirror_realm
class Meta:
permissions = (
('administer', "Administer a realm"),
('api_super_user', "Can send messages as other users for mirroring"),
)
post_save.connect(flush_realm, sender=Realm)
def get_realm(string_id: str) -> Realm:
return Realm.objects.get(string_id=string_id)
def name_changes_disabled(realm: Optional[Realm]) -> bool:
if realm is None:
return settings.NAME_CHANGES_DISABLED
return settings.NAME_CHANGES_DISABLED or realm.name_changes_disabled
def avatar_changes_disabled(realm: Realm) -> bool:
return settings.AVATAR_CHANGES_DISABLED or realm.avatar_changes_disabled
class RealmDomain(models.Model):
realm: Realm = models.ForeignKey(Realm, on_delete=CASCADE)
domain: str = models.CharField(max_length=80, db_index=True)
allow_subdomains = models.BooleanField(default=False)
class Meta:
unique_together = ("realm", "domain")
def email_to_username(email: str) -> str:
return "@".join(email.split("@")[:-1]).lower()
def email_to_domain(email: str) -> str:
return email.split("@")[-1].lower()
class DomainNotAllowedForRealmError(Exception):
pass
class DisposableEmailError(Exception):
pass
class EmailContainsPlusError(Exception):
pass
def get_realm_domains(realm: Realm) -> List[Dict[str, str]]:
return list(realm.realmdomain_set.values('domain', 'allow_subdomains'))
class RealmEmoji(models.Model):
author: Optional["UserProfile"] = models.ForeignKey(
"UserProfile", blank=True, null=True, on_delete=CASCADE,
)
realm: Realm = models.ForeignKey(Realm, on_delete=CASCADE)
name: str = models.TextField(validators=[
MinLengthValidator(1),
RegexValidator(regex=r'^[0-9a-z.\-_]+(?<![.\-_])$',
message=_("Invalid characters in emoji name"))])
file_name: Optional[str] = models.TextField(db_index=True, null=True, blank=True)
deactivated: bool = models.BooleanField(default=False)
PATH_ID_TEMPLATE = "{realm_id}/emoji/images/{emoji_file_name}"
def __str__(self) -> str:
return f"<RealmEmoji({self.realm.string_id}): {self.id} {self.name} {self.deactivated} {self.file_name}>"
def get_realm_emoji_dicts(realm: Realm,
only_active_emojis: bool=False) -> Dict[str, Dict[str, Any]]:
query = RealmEmoji.objects.filter(realm=realm).select_related('author')
if only_active_emojis:
query = query.filter(deactivated=False)
d = {}
from zerver.lib.emoji import get_emoji_url
for realm_emoji in query.all():
author_id = None
if realm_emoji.author:
author_id = realm_emoji.author_id
emoji_url = get_emoji_url(realm_emoji.file_name, realm_emoji.realm_id)
d[str(realm_emoji.id)] = dict(id=str(realm_emoji.id),
name=realm_emoji.name,
source_url=emoji_url,
deactivated=realm_emoji.deactivated,
author_id=author_id)
return d
def get_realm_emoji_uncached(realm: Realm) -> Dict[str, Dict[str, Any]]:
return get_realm_emoji_dicts(realm)
def get_active_realm_emoji_uncached(realm: Realm) -> Dict[str, Dict[str, Any]]:
realm_emojis = get_realm_emoji_dicts(realm, only_active_emojis=True)
d = {}
for emoji_id, emoji_dict in realm_emojis.items():
d[emoji_dict['name']] = emoji_dict
return d
def flush_realm_emoji(sender: Any, **kwargs: Any) -> None:
realm = kwargs['instance'].realm
cache_set(get_realm_emoji_cache_key(realm),
get_realm_emoji_uncached(realm),
timeout=3600*24*7)
cache_set(get_active_realm_emoji_cache_key(realm),
get_active_realm_emoji_uncached(realm),
timeout=3600*24*7)
post_save.connect(flush_realm_emoji, sender=RealmEmoji)
post_delete.connect(flush_realm_emoji, sender=RealmEmoji)
def filter_pattern_validator(value: str) -> None:
regex = re.compile(r'^(?:(?:[\w\-
error_msg = _('Invalid filter pattern. Valid characters are %s.') % (
'[ a-zA-Z_
if not regex.match(str(value)):
raise ValidationError(error_msg)
try:
re.compile(value)
except sre_constants.error:
# Regex is invalid
raise ValidationError(error_msg)
def filter_format_validator(value: str) -> None:
regex = re.compile(r'^([\.\/:a-zA-Z0-9 regex.match(value):
raise ValidationError(_('Invalid URL format string.'))
class RealmFilter(models.Model):
realm: Realm = models.ForeignKey(Realm, on_delete=CASCADE)
pattern: str = models.TextField(validators=[filter_pattern_validator])
url_format_string: str = models.TextField(validators=[URLValidator(), filter_format_validator])
class Meta:
unique_together = ("realm", "pattern")
def __str__(self) -> str:
return f"<RealmFilter({self.realm.string_id}): {self.pattern} {self.url_format_string}>"
def get_realm_filters_cache_key(realm_id: int) -> str:
return f'{cache.KEY_PREFIX}:all_realm_filters:{realm_id}'
# We have a per-process cache to avoid doing 1000 remote cache queries during page load
per_request_realm_filters_cache: Dict[int, List[Tuple[str, str, int]]] = {}
def realm_in_local_realm_filters_cache(realm_id: int) -> bool:
return realm_id in per_request_realm_filters_cache
def realm_filters_for_realm(realm_id: int) -> List[Tuple[str, str, int]]:
if not realm_in_local_realm_filters_cache(realm_id):
per_request_realm_filters_cache[realm_id] = realm_filters_for_realm_remote_cache(realm_id)
return per_request_realm_filters_cache[realm_id]
@cache_with_key(get_realm_filters_cache_key, timeout=3600*24*7)
def realm_filters_for_realm_remote_cache(realm_id: int) -> List[Tuple[str, str, int]]:
filters = []
for realm_filter in RealmFilter.objects.filter(realm_id=realm_id):
filters.append((realm_filter.pattern, realm_filter.url_format_string, realm_filter.id))
return filters
def all_realm_filters() -> Dict[int, List[Tuple[str, str, int]]]:
filters: DefaultDict[int, List[Tuple[str, str, int]]] = defaultdict(list)
for realm_filter in RealmFilter.objects.all():
filters[realm_filter.realm_id].append((realm_filter.pattern,
realm_filter.url_format_string,
realm_filter.id))
return filters
def flush_realm_filter(sender: Any, **kwargs: Any) -> None:
realm_id = kwargs['instance'].realm_id
cache_delete(get_realm_filters_cache_key(realm_id))
try:
per_request_realm_filters_cache.pop(realm_id)
except KeyError:
pass
post_save.connect(flush_realm_filter, sender=RealmFilter)
post_delete.connect(flush_realm_filter, sender=RealmFilter)
def flush_per_request_caches() -> None:
global per_request_display_recipient_cache
per_request_display_recipient_cache = {}
global per_request_realm_filters_cache
per_request_realm_filters_cache = {}
# The Recipient table is used to map Messages to the set of users who
# received the message. It is implemented as a set of triples (id,
# type_id, type). We have 3 types of recipients: Huddles (for group
# private messages), UserProfiles (for 1:1 private messages), and
# Streams. The recipient table maps a globally unique recipient id
# (used by the Message table) to the type-specific unique id (the
# stream id, user_profile id, or huddle id).
class Recipient(models.Model):
type_id: int = models.IntegerField(db_index=True)
type: int = models.PositiveSmallIntegerField(db_index=True)
# Valid types are {personal, stream, huddle}
PERSONAL = 1
STREAM = 2
HUDDLE = 3
class Meta:
unique_together = ("type", "type_id")
# N.B. If we used Django's choice=... we would get this for free (kinda)
_type_names = {
PERSONAL: 'personal',
STREAM: 'stream',
HUDDLE: 'huddle'}
def type_name(self) -> str:
return self._type_names[self.type]
def __str__(self) -> str:
display_recipient = get_display_recipient(self)
return f"<Recipient: {display_recipient} ({self.type_id}, {self.type})>"
class UserProfile(AbstractBaseUser, PermissionsMixin):
USERNAME_FIELD = 'email'
MAX_NAME_LENGTH = 100
MIN_NAME_LENGTH = 2
API_KEY_LENGTH = 32
NAME_INVALID_CHARS = ['*', '`', "\\", '>', '"', '@']
DEFAULT_BOT = 1
INCOMING_WEBHOOK_BOT = 2
# This value is also being used in static/js/settings_bots.js.
# On updating it here, update it there as well.
OUTGOING_WEBHOOK_BOT = 3
EMBEDDED_BOT = 4
BOT_TYPES = {
DEFAULT_BOT: 'Generic bot',
INCOMING_WEBHOOK_BOT: 'Incoming webhook',
OUTGOING_WEBHOOK_BOT: 'Outgoing webhook',
EMBEDDED_BOT: 'Embedded bot',
}
SERVICE_BOT_TYPES = [
OUTGOING_WEBHOOK_BOT,
EMBEDDED_BOT,
]
# For historical reasons, Zulip has two email fields. The
# `delivery_email` field is the user's email address, where all
# email notifications will be sent, and is used for all
# authentication use cases.
#
# The `email` field is the same as delivery_email in organizations
# with EMAIL_ADDRESS_VISIBILITY_EVERYONE. For other
# organizations, it will be a unique value of the form
# user1234@example.com. This field exists for backwards
# compatibility in Zulip APIs where users are referred to by their
# email address, not their ID; it should be used in all API use cases.
#
# Both fields are unique within a realm (in a case-insensitive fashion).
delivery_email: str = models.EmailField(blank=False, db_index=True)
email: str = models.EmailField(blank=False, db_index=True)
realm: Realm = models.ForeignKey(Realm, on_delete=CASCADE)
# Foreign key to the Recipient object for PERSONAL type messages to this user.
recipient = models.ForeignKey(Recipient, null=True, on_delete=models.SET_NULL)
# The user's name. We prefer the model of a full_name and
# short_name over first+last because cultures vary on how many
# names one has, whether the family name is first or last, etc.
# It also allows organizations to encode a bit of non-name data in
# the "name" attribute if desired, like gender pronouns,
# graduation year, etc. The short_name attribute is currently not
# used anywhere, but the intent is that it would be used as the
# shorter familiar name for addressing the user in the UI.
full_name: str = models.CharField(max_length=MAX_NAME_LENGTH)
short_name: str = models.CharField(max_length=MAX_NAME_LENGTH)
date_joined: datetime.datetime = models.DateTimeField(default=timezone_now)
tos_version: Optional[str] = models.CharField(null=True, max_length=10)
api_key: str = models.CharField(max_length=API_KEY_LENGTH)
# pointer points to Message.id, NOT UserMessage.id.
pointer: int = models.IntegerField()
# Whether the user has access to server-level administrator pages, like /activity
is_staff: bool = models.BooleanField(default=False)
# For a normal user, this is True unless the user or an admin has
# deactivated their account. The name comes from Django; this field
# isn't related to presence or to whether the user has recently used Zulip.
#
# See also `long_term_idle`.
is_active: bool = models.BooleanField(default=True, db_index=True)
is_billing_admin: bool = models.BooleanField(default=False, db_index=True)
is_bot: bool = models.BooleanField(default=False, db_index=True)
bot_type: Optional[int] = models.PositiveSmallIntegerField(null=True, db_index=True)
bot_owner: Optional["UserProfile"] = models.ForeignKey('self', null=True, on_delete=models.SET_NULL)
# Each role has a superset of the permissions of the next higher
# numbered role. When adding new roles, leave enough space for
# future roles to be inserted between currently adjacent
# roles. These constants appear in RealmAuditLog.extra_data, so
# changes to them will require a migration of RealmAuditLog.
ROLE_REALM_OWNER = 100
ROLE_REALM_ADMINISTRATOR = 200
# ROLE_MODERATOR = 300
ROLE_MEMBER = 400
ROLE_GUEST = 600
role: int = models.PositiveSmallIntegerField(default=ROLE_MEMBER, db_index=True)
ROLE_TYPES = [
ROLE_REALM_OWNER,
ROLE_REALM_ADMINISTRATOR,
ROLE_MEMBER,
ROLE_GUEST,
]
# Whether the user has been "soft-deactivated" due to weeks of inactivity.
# For these users we avoid doing UserMessage table work, as an optimization
# for large Zulip organizations with lots of single-visit users.
long_term_idle: bool = models.BooleanField(default=False, db_index=True)
# When we last added basic UserMessage rows for a long_term_idle user.
last_active_message_id: Optional[int] = models.IntegerField(null=True)
# Mirror dummies are fake (!is_active) users used to provide
# message senders in our cross-protocol Zephyr<->Zulip content
# mirroring integration, so that we can display mirrored content
# like native Zulip messages (with a name + avatar, etc.).
is_mirror_dummy: bool = models.BooleanField(default=False)
# API super users are allowed to forge messages as sent by another
# user and to send to private streams; also used for Zephyr/Jabber mirroring.
is_api_super_user: bool = models.BooleanField(default=False, db_index=True)
### Notifications settings. ###
# Stream notifications.
enable_stream_desktop_notifications: bool = models.BooleanField(default=False)
enable_stream_email_notifications: bool = models.BooleanField(default=False)
enable_stream_push_notifications: bool = models.BooleanField(default=False)
enable_stream_audible_notifications: bool = models.BooleanField(default=False)
notification_sound: str = models.CharField(max_length=20, default='zulip')
wildcard_mentions_notify: bool = models.BooleanField(default=True)
# PM + @-mention notifications.
enable_desktop_notifications: bool = models.BooleanField(default=True)
pm_content_in_desktop_notifications: bool = models.BooleanField(default=True)
enable_sounds: bool = models.BooleanField(default=True)
enable_offline_email_notifications: bool = models.BooleanField(default=True)
message_content_in_email_notifications: bool = models.BooleanField(default=True)
enable_offline_push_notifications: bool = models.BooleanField(default=True)
enable_online_push_notifications: bool = models.BooleanField(default=True)
DESKTOP_ICON_COUNT_DISPLAY_MESSAGES = 1
DESKTOP_ICON_COUNT_DISPLAY_NOTIFIABLE = 2
DESKTOP_ICON_COUNT_DISPLAY_NONE = 3
desktop_icon_count_display: int = models.PositiveSmallIntegerField(
default=DESKTOP_ICON_COUNT_DISPLAY_MESSAGES)
enable_digest_emails: bool = models.BooleanField(default=True)
enable_login_emails: bool = models.BooleanField(default=True)
realm_name_in_notifications: bool = models.BooleanField(default=False)
presence_enabled: bool = models.BooleanField(default=True)
# Used for rate-limiting certain automated messages generated by bots
last_reminder: Optional[datetime.datetime] = models.DateTimeField(default=None, null=True)
# Minutes to wait before warning a bot owner that their bot sent a message
# to a nonexistent stream
BOT_OWNER_STREAM_ALERT_WAITPERIOD = 1
# API rate limits, formatted as a comma-separated list of range:max pairs
rate_limits: str = models.CharField(default="", max_length=100)
# Hours to wait before sending another email to a user
EMAIL_REMINDER_WAITPERIOD = 24
# Default streams for some deprecated/legacy classes of bot users.
default_sending_stream: Optional["Stream"] = models.ForeignKey(
"zerver.Stream", null=True, related_name="+", on_delete=CASCADE,
)
default_events_register_stream: Optional["Stream"] = models.ForeignKey(
"zerver.Stream", null=True, related_name="+", on_delete=CASCADE,
)
default_all_public_streams: bool = models.BooleanField(default=False)
# UI vars
enter_sends: Optional[bool] = models.BooleanField(null=True, default=False)
left_side_userlist: bool = models.BooleanField(default=False)
# display settings
default_language: str = models.CharField(default='en', max_length=MAX_LANGUAGE_ID_LENGTH)
dense_mode: bool = models.BooleanField(default=True)
fluid_layout_width: bool = models.BooleanField(default=False)
high_contrast_mode: bool = models.BooleanField(default=False)
night_mode: bool = models.BooleanField(default=False)
translate_emoticons: bool = models.BooleanField(default=False)
twenty_four_hour_time: bool = models.BooleanField(default=False)
starred_message_counts: bool = models.BooleanField(default=False)
# UI setting controlling Zulip's behavior of demoting in the sort
# order and graying out streams with no recent traffic. The
# default behavior, automatic, enables this behavior once a user
# is subscribed to 30+ streams in the webapp.
DEMOTE_STREAMS_AUTOMATIC = 1
DEMOTE_STREAMS_ALWAYS = 2
DEMOTE_STREAMS_NEVER = 3
DEMOTE_STREAMS_CHOICES = [
DEMOTE_STREAMS_AUTOMATIC,
DEMOTE_STREAMS_ALWAYS,
DEMOTE_STREAMS_NEVER,
]
demote_inactive_streams = models.PositiveSmallIntegerField(default=DEMOTE_STREAMS_AUTOMATIC)
# A timezone name from the `tzdata` database, as found in pytz.all_timezones.
#
# The longest existing name is 32 characters long, so max_length=40 seems
# like a safe choice.
#
# In Django, the convention is to use an empty string instead of NULL/None
# for text-based fields. For more information, see
# https://docs.djangoproject.com/en/1.10/ref/models/fields/#django.db.models.Field.null.
timezone: str = models.CharField(max_length=40, default='')
# Emojisets
GOOGLE_EMOJISET = 'google'
GOOGLE_BLOB_EMOJISET = 'google-blob'
TEXT_EMOJISET = 'text'
TWITTER_EMOJISET = 'twitter'
EMOJISET_CHOICES = ((GOOGLE_EMOJISET, "Google modern"),
(GOOGLE_BLOB_EMOJISET, "Google classic"),
(TWITTER_EMOJISET, "Twitter"),
(TEXT_EMOJISET, "Plain text"))
emojiset: str = models.CharField(default=GOOGLE_BLOB_EMOJISET, choices=EMOJISET_CHOICES, max_length=20)
AVATAR_FROM_GRAVATAR = 'G'
AVATAR_FROM_USER = 'U'
AVATAR_SOURCES = (
(AVATAR_FROM_GRAVATAR, 'Hosted by Gravatar'),
(AVATAR_FROM_USER, 'Uploaded by user'),
)
avatar_source: str = models.CharField(default=AVATAR_FROM_GRAVATAR, choices=AVATAR_SOURCES, max_length=1)
avatar_version: int = models.PositiveSmallIntegerField(default=1)
avatar_hash: Optional[str] = models.CharField(null=True, max_length=64)
TUTORIAL_WAITING = 'W'
TUTORIAL_STARTED = 'S'
TUTORIAL_FINISHED = 'F'
TUTORIAL_STATES = ((TUTORIAL_WAITING, "Waiting"),
(TUTORIAL_STARTED, "Started"),
(TUTORIAL_FINISHED, "Finished"))
tutorial_status: str = models.CharField(default=TUTORIAL_WAITING, choices=TUTORIAL_STATES, max_length=1)
# Contains serialized JSON of the form:
# [("step 1", true), ("step 2", false)]
# where the second element of each tuple is if the step has been
# completed.
onboarding_steps: str = models.TextField(default='[]')
zoom_token: Optional[object] = JSONField(default=None, null=True)
objects: UserManager = UserManager()
# Define the types of the various automatically managed properties
property_types = dict(
default_language=str,
demote_inactive_streams=int,
dense_mode=bool,
emojiset=str,
fluid_layout_width=bool,
high_contrast_mode=bool,
left_side_userlist=bool,
night_mode=bool,
starred_message_counts=bool,
timezone=str,
translate_emoticons=bool,
twenty_four_hour_time=bool,
)
notification_setting_types = dict(
enable_desktop_notifications=bool,
enable_digest_emails=bool,
enable_login_emails=bool,
enable_offline_email_notifications=bool,
enable_offline_push_notifications=bool,
enable_online_push_notifications=bool,
enable_sounds=bool,
enable_stream_desktop_notifications=bool,
enable_stream_email_notifications=bool,
enable_stream_push_notifications=bool,
enable_stream_audible_notifications=bool,
wildcard_mentions_notify=bool,
message_content_in_email_notifications=bool,
notification_sound=str,
pm_content_in_desktop_notifications=bool,
desktop_icon_count_display=int,
realm_name_in_notifications=bool,
presence_enabled=bool,
)
class Meta:
unique_together = (('realm', 'email'),)
@property
def profile_data(self) -> ProfileData:
values = CustomProfileFieldValue.objects.filter(user_profile=self)
user_data = {v.field_id: {"value": v.value, "rendered_value": v.rendered_value} for v in values}
data: ProfileData = []
for field in custom_profile_fields_for_realm(self.realm_id):
field_values = user_data.get(field.id, None)
if field_values:
value, rendered_value = field_values.get("value"), field_values.get("rendered_value")
else:
value, rendered_value = None, None
field_type = field.field_type
if value is not None:
converter = field.FIELD_CONVERTERS[field_type]
value = converter(value)
field_data = field.as_dict()
data.append({
'id': field_data['id'],
'name': field_data['name'],
'type': field_data['type'],
'hint': field_data['hint'],
'field_data': field_data['field_data'],
'order': field_data['order'],
'value': value,
'rendered_value': rendered_value,
})
return data
def can_admin_user(self, target_user: 'UserProfile') -> bool:
if target_user.bot_owner == self:
return True
elif self.is_realm_admin and self.realm == target_user.realm:
return True
else:
return False
def __str__(self) -> str:
return f"<UserProfile: {self.email} {self.realm}>"
@property
def is_new_member(self) -> bool:
diff = (timezone_now() - self.date_joined).days
if diff < self.realm.waiting_period_threshold:
return True
return False
@property
def is_realm_admin(self) -> bool:
return self.role == UserProfile.ROLE_REALM_ADMINISTRATOR or \
self.role == UserProfile.ROLE_REALM_OWNER
@is_realm_admin.setter
def is_realm_admin(self, value: bool) -> None:
if value:
self.role = UserProfile.ROLE_REALM_ADMINISTRATOR
elif self.role == UserProfile.ROLE_REALM_ADMINISTRATOR:
# We need to be careful to not accidentally change
# ROLE_GUEST to ROLE_MEMBER here.
self.role = UserProfile.ROLE_MEMBER
@property
def is_realm_owner(self) -> bool:
return self.role == UserProfile.ROLE_REALM_OWNER
@property
def is_guest(self) -> bool:
return self.role == UserProfile.ROLE_GUEST
@is_guest.setter
def is_guest(self, value: bool) -> None:
if value:
self.role = UserProfile.ROLE_GUEST
elif self.role == UserProfile.ROLE_GUEST:
# We need to be careful to not accidentally change
# ROLE_REALM_ADMINISTRATOR to ROLE_MEMBER here.
self.role = UserProfile.ROLE_MEMBER
@property
def is_incoming_webhook(self) -> bool:
return self.bot_type == UserProfile.INCOMING_WEBHOOK_BOT
@property
def allowed_bot_types(self) -> List[int]:
allowed_bot_types = []
if self.is_realm_admin or \
not self.realm.bot_creation_policy == Realm.BOT_CREATION_LIMIT_GENERIC_BOTS:
allowed_bot_types.append(UserProfile.DEFAULT_BOT)
allowed_bot_types += [
UserProfile.INCOMING_WEBHOOK_BOT,
UserProfile.OUTGOING_WEBHOOK_BOT,
]
if settings.EMBEDDED_BOTS_ENABLED:
allowed_bot_types.append(UserProfile.EMBEDDED_BOT)
return allowed_bot_types
@staticmethod
def emojiset_choices() -> List[Dict[str, str]]:
return [dict(key=emojiset[0], text=emojiset[1]) for emojiset in UserProfile.EMOJISET_CHOICES]
@staticmethod
def emails_from_ids(user_ids: Sequence[int]) -> Dict[int, str]:
rows = UserProfile.objects.filter(id__in=user_ids).values('id', 'email')
return {row['id']: row['email'] for row in rows}
def email_address_is_realm_public(self) -> bool:
if self.realm.email_address_visibility == Realm.EMAIL_ADDRESS_VISIBILITY_EVERYONE:
return True
if self.is_bot:
return True
return False
def has_permission(self, policy_name: str) -> bool:
if policy_name not in ['create_stream_policy', 'invite_to_stream_policy']:
raise AssertionError("Invalid policy")
if self.is_realm_admin:
return True
policy_value = getattr(self.realm, policy_name)
if policy_value == Realm.POLICY_ADMINS_ONLY:
return False
if self.is_guest:
return False
if policy_value == Realm.POLICY_MEMBERS_ONLY:
return True
return not self.is_new_member
def can_create_streams(self) -> bool:
return self.has_permission('create_stream_policy')
def can_subscribe_other_users(self) -> bool:
return self.has_permission('invite_to_stream_policy')
def can_access_public_streams(self) -> bool:
return not (self.is_guest or self.realm.is_zephyr_mirror_realm)
def can_access_all_realm_members(self) -> bool:
return not (self.realm.is_zephyr_mirror_realm or self.is_guest)
def major_tos_version(self) -> int:
if self.tos_version is not None:
return int(self.tos_version.split('.')[0])
else:
return -1
def format_requestor_for_logs(self) -> str:
return "{}@{}".format(self.id, self.realm.string_id or 'root')
def set_password(self, password: Optional[str]) -> None:
if password is None:
self.set_unusable_password()
return
from zproject.backends import check_password_strength
if not check_password_strength(password):
raise PasswordTooWeakError
super().set_password(password)
class PasswordTooWeakError(Exception):
pass
class UserGroup(models.Model):
name = models.CharField(max_length=100)
members = models.ManyToManyField(UserProfile, through='UserGroupMembership')
realm = models.ForeignKey(Realm, on_delete=CASCADE)
description: str = models.TextField(default='')
class Meta:
unique_together = (('realm', 'name'),)
class UserGroupMembership(models.Model):
user_group = models.ForeignKey(UserGroup, on_delete=CASCADE)
user_profile = models.ForeignKey(UserProfile, on_delete=CASCADE)
class Meta:
unique_together = (('user_group', 'user_profile'),)
def receives_offline_push_notifications(user_profile: UserProfile) -> bool:
return (user_profile.enable_offline_push_notifications and
not user_profile.is_bot)
def receives_offline_email_notifications(user_profile: UserProfile) -> bool:
return (user_profile.enable_offline_email_notifications and
not user_profile.is_bot)
def receives_online_notifications(user_profile: UserProfile) -> bool:
return (user_profile.enable_online_push_notifications and
not user_profile.is_bot)
def receives_stream_notifications(user_profile: UserProfile) -> bool:
return (user_profile.enable_stream_push_notifications and
not user_profile.is_bot)
def remote_user_to_email(remote_user: str) -> str:
if settings.SSO_APPEND_DOMAIN is not None:
remote_user += "@" + settings.SSO_APPEND_DOMAIN
return remote_user
# Make sure we flush the UserProfile object from our remote cache
# whenever we save it.
post_save.connect(flush_user_profile, sender=UserProfile)
class PreregistrationUser(models.Model):
# Data on a partially created user, before the completion of
# registration. This is used in at least three major code paths:
# * Realm creation, in which case realm is None.
#
# * Invitations, in which case referred_by will always be set.
#
# * Social authentication signup, where it's used to store data
# from the authentication step and pass it to the registration
# form.
email: str = models.EmailField()
# If the pre-registration process provides a suggested full name for this user,
# store it here to use it to prepopulate the Full Name field in the registration form:
full_name: Optional[str] = models.CharField(max_length=UserProfile.MAX_NAME_LENGTH, null=True)
full_name_validated = models.BooleanField(default=False)
referred_by: Optional[UserProfile] = models.ForeignKey(UserProfile, null=True, on_delete=CASCADE)
streams: Manager = models.ManyToManyField('Stream')
invited_at: datetime.datetime = models.DateTimeField(auto_now=True)
realm_creation = models.BooleanField(default=False)
# Indicates whether the user needs a password. Users who were
# created via SSO style auth (e.g. GitHub/Google) generally do not.
password_required = models.BooleanField(default=True)
# status: whether an object has been confirmed.
# if confirmed, set to confirmation.settings.STATUS_ACTIVE
status: int = models.IntegerField(default=0)
# The realm should only ever be None for PreregistrationUser
# objects created as part of realm creation.
realm: Optional[Realm] = models.ForeignKey(Realm, null=True, on_delete=CASCADE)
# Changes to INVITED_AS should also be reflected in
# settings_invites.invited_as_values in
# static/js/settings_invites.js
INVITE_AS = dict(
MEMBER = 1,
REALM_ADMIN = 2,
GUEST_USER = 3,
)
invited_as: int = models.PositiveSmallIntegerField(default=INVITE_AS['MEMBER'])
def filter_to_valid_prereg_users(query: QuerySet) -> QuerySet:
days_to_activate = settings.INVITATION_LINK_VALIDITY_DAYS
active_value = confirmation_settings.STATUS_ACTIVE
revoked_value = confirmation_settings.STATUS_REVOKED
lowest_datetime = timezone_now() - datetime.timedelta(days=days_to_activate)
return query.exclude(status__in=[active_value, revoked_value]).filter(
invited_at__gte=lowest_datetime)
class MultiuseInvite(models.Model):
referred_by = models.ForeignKey(UserProfile, on_delete=CASCADE) # Optional[UserProfile]
streams: Manager = models.ManyToManyField('Stream')
realm: Realm = models.ForeignKey(Realm, on_delete=CASCADE)
invited_as: int = models.PositiveSmallIntegerField(default=PreregistrationUser.INVITE_AS['MEMBER'])
class EmailChangeStatus(models.Model):
new_email: str = models.EmailField()
old_email: str = models.EmailField()
updated_at: datetime.datetime = models.DateTimeField(auto_now=True)
user_profile: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
# status: whether an object has been confirmed.
# if confirmed, set to confirmation.settings.STATUS_ACTIVE
status: int = models.IntegerField(default=0)
realm: Realm = models.ForeignKey(Realm, on_delete=CASCADE)
class AbstractPushDeviceToken(models.Model):
APNS = 1
GCM = 2
KINDS = (
(APNS, 'apns'),
(GCM, 'gcm'),
)
kind: int = models.PositiveSmallIntegerField(choices=KINDS)
# The token is a unique device-specific token that is
# sent to us from each device:
# - APNS token if kind == APNS
# - GCM registration id if kind == GCM
token: str = models.CharField(max_length=4096, db_index=True)
# TODO: last_updated should be renamed date_created, since it is
# no longer maintained as a last_updated value.
last_updated: datetime.datetime = models.DateTimeField(auto_now=True)
# [optional] Contains the app id of the device if it is an iOS device
ios_app_id: Optional[str] = models.TextField(null=True)
class Meta:
abstract = True
class PushDeviceToken(AbstractPushDeviceToken):
# The user who's device this is
user: UserProfile = models.ForeignKey(UserProfile, db_index=True, on_delete=CASCADE)
class Meta:
unique_together = ("user", "kind", "token")
def generate_email_token_for_stream() -> str:
return generate_random_token(32)
class Stream(models.Model):
MAX_NAME_LENGTH = 60
MAX_DESCRIPTION_LENGTH = 1024
name: str = models.CharField(max_length=MAX_NAME_LENGTH, db_index=True)
realm: Realm = models.ForeignKey(Realm, db_index=True, on_delete=CASCADE)
date_created: datetime.datetime = models.DateTimeField(default=timezone_now)
deactivated: bool = models.BooleanField(default=False)
description: str = models.CharField(max_length=MAX_DESCRIPTION_LENGTH, default='')
rendered_description: str = models.TextField(default='')
# Foreign key to the Recipient object for STREAM type messages to this stream.
recipient = models.ForeignKey(Recipient, null=True, on_delete=models.SET_NULL)
invite_only: Optional[bool] = models.BooleanField(null=True, default=False)
history_public_to_subscribers: bool = models.BooleanField(default=False)
# Whether this stream's content should be published by the web-public archive features
is_web_public: bool = models.BooleanField(default=False)
STREAM_POST_POLICY_EVERYONE = 1
STREAM_POST_POLICY_ADMINS = 2
STREAM_POST_POLICY_RESTRICT_NEW_MEMBERS = 3
# TODO: Implement policy to restrict posting to a user group or admins.
# Who in the organization has permission to send messages to this stream.
stream_post_policy: int = models.PositiveSmallIntegerField(default=STREAM_POST_POLICY_EVERYONE)
STREAM_POST_POLICY_TYPES = [
STREAM_POST_POLICY_EVERYONE,
STREAM_POST_POLICY_ADMINS,
STREAM_POST_POLICY_RESTRICT_NEW_MEMBERS,
]
# The unique thing about Zephyr public streams is that we never list their
# users. We may try to generalize this concept later, but for now
# we just use a concrete field. (Zephyr public streams aren't exactly like
# invite-only streams--while both are private in terms of listing users,
# for Zephyr we don't even list users to stream members, yet membership
# is more public in the sense that you don't need a Zulip invite to join.
# This field is populated directly from UserProfile.is_zephyr_mirror_realm,
# and the reason for denormalizing field is performance.
is_in_zephyr_realm: bool = models.BooleanField(default=False)
# Used by the e-mail forwarder. The e-mail RFC specifies a maximum
# e-mail length of 254, and our max stream length is 30, so we
# have plenty of room for the token.
email_token: str = models.CharField(
max_length=32, default=generate_email_token_for_stream, unique=True,
)
# For old messages being automatically deleted.
# Value NULL means "use retention policy of the realm".
# Value -1 means "disable retention policy for this stream unconditionally".
# Non-negative values have the natural meaning of "archive messages older than <value> days".
message_retention_days: Optional[int] = models.IntegerField(null=True, default=None)
# The very first message ID in the stream. Used to help clients
# determine whether they might need to display "more topics" for a
# stream based on what messages they have cached.
first_message_id: Optional[int] = models.IntegerField(null=True, db_index=True)
def __str__(self) -> str:
return f"<Stream: {self.name}>"
def is_public(self) -> bool:
# All streams are private in Zephyr mirroring realms.
return not self.invite_only and not self.is_in_zephyr_realm
def is_history_realm_public(self) -> bool:
return self.is_public()
def is_history_public_to_subscribers(self) -> bool:
return self.history_public_to_subscribers
class Meta:
unique_together = ("name", "realm")
# Stream fields included whenever a Stream object is provided to
# Zulip clients via the API. A few details worth noting:
# * "id" is represented as "stream_id" in most API interfaces.
# * "email_token" is not realm-public and thus is not included here.
# * is_in_zephyr_realm is a backend-only optimization.
# * "deactivated" streams are filtered from the API entirely.
# * "realm" and "recipient" are not exposed to clients via the API.
# * "date_created" should probably be added here, as it's useful information
# to subscribers.
API_FIELDS = [
"name",
"id",
"description",
"rendered_description",
"invite_only",
"is_web_public",
"stream_post_policy",
"history_public_to_subscribers",
"first_message_id",
"message_retention_days"
]
@staticmethod
def get_client_data(query: QuerySet) -> List[Dict[str, Any]]:
query = query.only(*Stream.API_FIELDS)
return [row.to_dict() for row in query]
def to_dict(self) -> Dict[str, Any]:
result = {}
for field_name in self.API_FIELDS:
if field_name == "id":
result['stream_id'] = self.id
continue
result[field_name] = getattr(self, field_name)
result['is_announcement_only'] = self.stream_post_policy == Stream.STREAM_POST_POLICY_ADMINS
return result
post_save.connect(flush_stream, sender=Stream)
post_delete.connect(flush_stream, sender=Stream)
class MutedTopic(models.Model):
user_profile = models.ForeignKey(UserProfile, on_delete=CASCADE)
stream = models.ForeignKey(Stream, on_delete=CASCADE)
recipient = models.ForeignKey(Recipient, on_delete=CASCADE)
topic_name = models.CharField(max_length=MAX_TOPIC_NAME_LENGTH)
# The default value for date_muted is a few weeks before tracking
# of when topics were muted was first introduced. It's designed
# to be obviously incorrect so that users can tell it's backfilled data.
date_muted = models.DateTimeField(default=datetime.datetime(2020, 1, 1, 0, 0, tzinfo=datetime.timezone.utc))
class Meta:
unique_together = ('user_profile', 'stream', 'topic_name')
def __str__(self) -> str:
return (f"<MutedTopic: ({self.user_profile.email}, {self.stream.name}, {self.topic_name}, {self.date_muted})>")
class Client(models.Model):
name: str = models.CharField(max_length=30, db_index=True, unique=True)
def __str__(self) -> str:
return f"<Client: {self.name}>"
get_client_cache: Dict[str, Client] = {}
def get_client(name: str) -> Client:
# Accessing KEY_PREFIX through the module is necessary
# because we need the updated value of the variable.
cache_name = cache.KEY_PREFIX + name
if cache_name not in get_client_cache:
result = get_client_remote_cache(name)
get_client_cache[cache_name] = result
return get_client_cache[cache_name]
def get_client_cache_key(name: str) -> str:
return f'get_client:{make_safe_digest(name)}'
@cache_with_key(get_client_cache_key, timeout=3600*24*7)
def get_client_remote_cache(name: str) -> Client:
(client, _) = Client.objects.get_or_create(name=name)
return client
@cache_with_key(get_stream_cache_key, timeout=3600*24*7)
def get_realm_stream(stream_name: str, realm_id: int) -> Stream:
return Stream.objects.select_related().get(
name__iexact=stream_name.strip(), realm_id=realm_id)
def stream_name_in_use(stream_name: str, realm_id: int) -> bool:
return Stream.objects.filter(
name__iexact=stream_name.strip(),
realm_id=realm_id,
).exists()
def get_active_streams(realm: Optional[Realm]) -> QuerySet:
# TODO: Change return type to QuerySet[Stream]
# NOTE: Return value is used as a QuerySet, so cannot currently be Sequence[QuerySet]
return Stream.objects.filter(realm=realm, deactivated=False)
def get_stream(stream_name: str, realm: Realm) -> Stream:
return get_realm_stream(stream_name, realm.id)
def get_stream_by_id_in_realm(stream_id: int, realm: Realm) -> Stream:
return Stream.objects.select_related().get(id=stream_id, realm=realm)
def bulk_get_streams(realm: Realm, stream_names: STREAM_NAMES) -> Dict[str, Any]:
def fetch_streams_by_name(stream_names: List[str]) -> Sequence[Stream]:
#
# This should be just
#
# Stream.objects.select_related().filter(name__iexact__in=stream_names,
# realm_id=realm_id)
#
# But chaining __in and __iexact doesn't work with Django's
# ORM, so we have the following hack to construct the relevant where clause
where_clause = "upper(zerver_stream.name::text) IN (SELECT upper(name) FROM unnest(%s) AS name)"
return get_active_streams(realm.id).select_related().extra(
where=[where_clause],
params=(list(stream_names),))
def stream_name_to_cache_key(stream_name: str) -> str:
return get_stream_cache_key(stream_name, realm.id)
def stream_to_lower_name(stream: Stream) -> str:
return stream.name.lower()
return generic_bulk_cached_fetch(stream_name_to_cache_key,
fetch_streams_by_name,
[stream_name.lower() for stream_name in stream_names],
id_fetcher=stream_to_lower_name)
def get_huddle_recipient(user_profile_ids: Set[int]) -> Recipient:
# The caller should ensure that user_profile_ids includes
# the sender. Note that get_huddle hits the cache, and then
# we hit another cache to get the recipient. We may want to
# unify our caching strategy here.
huddle = get_huddle(list(user_profile_ids))
return huddle.recipient
def get_huddle_user_ids(recipient: Recipient) -> List[int]:
assert(recipient.type == Recipient.HUDDLE)
return Subscription.objects.filter(
recipient=recipient,
).order_by('user_profile_id').values_list('user_profile_id', flat=True)
def bulk_get_huddle_user_ids(recipients: List[Recipient]) -> Dict[int, List[int]]:
assert all(recipient.type == Recipient.HUDDLE for recipient in recipients)
if not recipients:
return {}
subscriptions = Subscription.objects.filter(
recipient__in=recipients,
).order_by('user_profile_id')
result_dict: Dict[int, List[int]] = {}
for recipient in recipients:
result_dict[recipient.id] = [subscription.user_profile_id
for subscription in subscriptions
if subscription.recipient_id == recipient.id]
return result_dict
class AbstractMessage(models.Model):
sender: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
recipient: Recipient = models.ForeignKey(Recipient, on_delete=CASCADE)
# The message's topic.
#
# Early versions of Zulip called this concept a "subject", as in an email
# "subject line", before changing to "topic" in 2013 (commit dac5a46fa).
# UI and user documentation now consistently say "topic". New APIs and
# new code should generally also say "topic".
#
# See also the `topic_name` method on `Message`.
subject: str = models.CharField(max_length=MAX_TOPIC_NAME_LENGTH, db_index=True)
content: str = models.TextField()
rendered_content: Optional[str] = models.TextField(null=True)
rendered_content_version: Optional[int] = models.IntegerField(null=True)
date_sent: datetime.datetime = models.DateTimeField('date sent', db_index=True)
sending_client: Client = models.ForeignKey(Client, on_delete=CASCADE)
last_edit_time: Optional[datetime.datetime] = models.DateTimeField(null=True)
# A JSON-encoded list of objects describing any past edits to this
# message, oldest first.
edit_history: Optional[str] = models.TextField(null=True)
has_attachment: bool = models.BooleanField(default=False, db_index=True)
has_image: bool = models.BooleanField(default=False, db_index=True)
has_link: bool = models.BooleanField(default=False, db_index=True)
class Meta:
abstract = True
def __str__(self) -> str:
display_recipient = get_display_recipient(self.recipient)
return f"<{self.__class__.__name__}: {display_recipient} / {self.subject} / {self.sender}>"
class ArchiveTransaction(models.Model):
timestamp: datetime.datetime = models.DateTimeField(default=timezone_now, db_index=True)
# Marks if the data archived in this transaction has been restored:
restored: bool = models.BooleanField(default=False, db_index=True)
type: int = models.PositiveSmallIntegerField(db_index=True)
# Valid types:
RETENTION_POLICY_BASED = 1 # Archiving was executed due to automated retention policies
MANUAL = 2 # Archiving was run manually, via move_messages_to_archive function
# ForeignKey to the realm with which objects archived in this transaction are associated.
# If type is set to MANUAL, this should be null.
realm: Optional[Realm] = models.ForeignKey(Realm, null=True, on_delete=CASCADE)
def __str__(self) -> str:
return "ArchiveTransaction id: {id}, type: {type}, realm: {realm}, timestamp: {timestamp}".format(
id=self.id,
type="MANUAL" if self.type == self.MANUAL else "RETENTION_POLICY_BASED",
realm=self.realm.string_id if self.realm else None,
timestamp=self.timestamp,
)
class ArchivedMessage(AbstractMessage):
archive_transaction: ArchiveTransaction = models.ForeignKey(ArchiveTransaction, on_delete=CASCADE)
class Message(AbstractMessage):
def topic_name(self) -> str:
return self.subject
def set_topic_name(self, topic_name: str) -> None:
self.subject = topic_name
def is_stream_message(self) -> bool:
return self.recipient.type == Recipient.STREAM
def get_realm(self) -> Realm:
return self.sender.realm
def save_rendered_content(self) -> None:
self.save(update_fields=["rendered_content", "rendered_content_version"])
@staticmethod
def need_to_render_content(rendered_content: Optional[str],
rendered_content_version: Optional[int],
bugdown_version: int) -> bool:
return (rendered_content is None or
rendered_content_version is None or
rendered_content_version < bugdown_version)
def to_log_dict(self) -> Dict[str, Any]:
return dict(
id = self.id,
sender_id = self.sender.id,
sender_email = self.sender.email,
sender_realm_str = self.sender.realm.string_id,
sender_full_name = self.sender.full_name,
sender_short_name = self.sender.short_name,
sending_client = self.sending_client.name,
type = self.recipient.type_name(),
recipient = get_display_recipient(self.recipient),
subject = self.topic_name(),
content = self.content,
timestamp = datetime_to_timestamp(self.date_sent))
def sent_by_human(self) -> bool:
sending_client = self.sending_client.name.lower()
return (sending_client in ('zulipandroid', 'zulipios', 'zulipdesktop',
'zulipmobile', 'zulipelectron', 'zulipterminal', 'snipe',
'website', 'ios', 'android')) or (
'desktop app' in sending_client)
@staticmethod
def is_status_message(content: str, rendered_content: str) -> bool:
if content.startswith('/me '):
return True
return False
def get_context_for_message(message: Message) -> Sequence[Message]:
# TODO: Change return type to QuerySet[Message]
return Message.objects.filter(
recipient_id=message.recipient_id,
subject=message.subject,
id__lt=message.id,
date_sent__gt=message.date_sent - timedelta(minutes=15),
).order_by('-id')[:10]
post_save.connect(flush_message, sender=Message)
class AbstractSubMessage(models.Model):
# We can send little text messages that are associated with a regular
# Zulip message. These can be used for experimental widgets like embedded
# games, surveys, mini threads, etc. These are designed to be pretty
# generic in purpose.
sender: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
msg_type = models.TextField()
content = models.TextField()
class Meta:
abstract = True
class SubMessage(AbstractSubMessage):
message: Message = models.ForeignKey(Message, on_delete=CASCADE)
@staticmethod
def get_raw_db_rows(needed_ids: List[int]) -> List[Dict[str, Any]]:
fields = ['id', 'message_id', 'sender_id', 'msg_type', 'content']
query = SubMessage.objects.filter(message_id__in=needed_ids).values(*fields)
query = query.order_by('message_id', 'id')
return list(query)
class ArchivedSubMessage(AbstractSubMessage):
message: ArchivedMessage = models.ForeignKey(ArchivedMessage, on_delete=CASCADE)
post_save.connect(flush_submessage, sender=SubMessage)
class AbstractReaction(models.Model):
user_profile: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
# The user-facing name for an emoji reaction. With emoji aliases,
# there may be multiple accepted names for a given emoji; this
# field encodes which one the user selected.
emoji_name: str = models.TextField()
UNICODE_EMOJI = 'unicode_emoji'
REALM_EMOJI = 'realm_emoji'
ZULIP_EXTRA_EMOJI = 'zulip_extra_emoji'
REACTION_TYPES = ((UNICODE_EMOJI, _("Unicode emoji")),
(REALM_EMOJI, _("Custom emoji")),
(ZULIP_EXTRA_EMOJI, _("Zulip extra emoji")))
reaction_type: str = models.CharField(default=UNICODE_EMOJI, choices=REACTION_TYPES, max_length=30)
# A string that uniquely identifies a particular emoji. The format varies
# by type:
#
# * For Unicode emoji, a dash-separated hex encoding of the sequence of
# Unicode codepoints that define this emoji in the Unicode
# specification. For examples, see "non_qualified" or "unified" in the
# following data, with "non_qualified" taking precedence when both present:
# https://raw.githubusercontent.com/iamcal/emoji-data/master/emoji_pretty.json
#
# * For realm emoji (aka user uploaded custom emoji), the ID
# (in ASCII decimal) of the RealmEmoji object.
#
# * For "Zulip extra emoji" (like :zulip:), the filename of the emoji.
emoji_code: str = models.TextField()
class Meta:
abstract = True
unique_together = (("user_profile", "message", "emoji_name"),
("user_profile", "message", "reaction_type", "emoji_code"))
class Reaction(AbstractReaction):
message: Message = models.ForeignKey(Message, on_delete=CASCADE)
@staticmethod
def get_raw_db_rows(needed_ids: List[int]) -> List[Dict[str, Any]]:
fields = ['message_id', 'emoji_name', 'emoji_code', 'reaction_type',
'user_profile__email', 'user_profile__id', 'user_profile__full_name']
return Reaction.objects.filter(message_id__in=needed_ids).values(*fields)
def __str__(self) -> str:
return f"{self.user_profile.email} / {self.message.id} / {self.emoji_name}"
class ArchivedReaction(AbstractReaction):
message: ArchivedMessage = models.ForeignKey(ArchivedMessage, on_delete=CASCADE)
# Whenever a message is sent, for each user subscribed to the
# corresponding Recipient object, we add a row to the UserMessage
# table indicating that that user received that message. This table
# allows us to quickly query any user's last 1000 messages to generate
# the home view.
#
# Additionally, the flags field stores metadata like whether the user
# has read the message, starred or collapsed the message, was
# mentioned in the message, etc.
#
# UserMessage is the largest table in a Zulip installation, even
# though each row is only 4 integers.
class AbstractUserMessage(models.Model):
id: int = models.BigAutoField(primary_key=True)
user_profile: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
# The order here is important! It's the order of fields in the bitfield.
ALL_FLAGS = [
'read',
'starred',
'collapsed',
'mentioned',
'wildcard_mentioned',
# These next 4 flags are from features that have since been removed.
'summarize_in_home',
'summarize_in_stream',
'force_expand',
'force_collapse',
# Whether the message contains any of the user's alert words.
'has_alert_word',
# The historical flag is used to mark messages which the user
# did not receive when they were sent, but later added to
# their history via e.g. starring the message. This is
# important accounting for the "Subscribed to stream" dividers.
'historical',
# Whether the message is a private message; this flag is a
# denormalization of message.recipient.type to support an
# efficient index on UserMessage for a user's private messages.
'is_private',
# Whether we've sent a push notification to the user's mobile
# devices for this message that has not been revoked.
'active_mobile_push_notification',
]
# Certain flags are used only for internal accounting within the
# Zulip backend, and don't make sense to expose to the API.
NON_API_FLAGS = {"is_private", "active_mobile_push_notification"}
# Certain additional flags are just set once when the UserMessage
# row is created.
NON_EDITABLE_FLAGS = {
# These flags are bookkeeping and don't make sense to edit.
"has_alert_word",
"mentioned",
"wildcard_mentioned",
"historical",
# Unused flags can't be edited.
"force_expand",
"force_collapse",
"summarize_in_home",
"summarize_in_stream",
}
flags: BitHandler = BitField(flags=ALL_FLAGS, default=0)
class Meta:
abstract = True
unique_together = ("user_profile", "message")
@staticmethod
def where_unread() -> str:
# Use this for Django ORM queries to access unread message.
# This custom SQL plays nice with our partial indexes. Grep
# the code for example usage.
return 'flags & 1 = 0'
@staticmethod
def where_starred() -> str:
# Use this for Django ORM queries to access starred messages.
# This custom SQL plays nice with our partial indexes. Grep
# the code for example usage.
#
# The key detail is that e.g.
# UserMessage.objects.filter(user_profile=user_profile, flags=UserMessage.flags.starred)
# will generate a query involving `flags & 2 = 2`, which doesn't match our index.
return 'flags & 2 <> 0'
@staticmethod
def where_active_push_notification() -> str:
# See where_starred for documentation.
return 'flags & 4096 <> 0'
def flags_list(self) -> List[str]:
flags = int(self.flags)
return self.flags_list_for_flags(flags)
@staticmethod
def flags_list_for_flags(val: int) -> List[str]:
flags = []
mask = 1
for flag in UserMessage.ALL_FLAGS:
if (val & mask) and flag not in AbstractUserMessage.NON_API_FLAGS:
flags.append(flag)
mask <<= 1
return flags
def __str__(self) -> str:
display_recipient = get_display_recipient(self.message.recipient)
return f"<{self.__class__.__name__}: {display_recipient} / {self.user_profile.email} ({self.flags_list()})>"
class UserMessage(AbstractUserMessage):
message: Message = models.ForeignKey(Message, on_delete=CASCADE)
def get_usermessage_by_message_id(user_profile: UserProfile, message_id: int) -> Optional[UserMessage]:
try:
return UserMessage.objects.select_related().get(user_profile=user_profile,
message__id=message_id)
except UserMessage.DoesNotExist:
return None
class ArchivedUserMessage(AbstractUserMessage):
message: Message = models.ForeignKey(ArchivedMessage, on_delete=CASCADE)
class AbstractAttachment(models.Model):
file_name: str = models.TextField(db_index=True)
# path_id is a storage location agnostic representation of the path of the file.
# If the path of a file is http://localhost:9991/user_uploads/a/b/abc/temp_file.py
# then its path_id will be a/b/abc/temp_file.py.
path_id: str = models.TextField(db_index=True, unique=True)
owner: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
realm: Optional[Realm] = models.ForeignKey(Realm, blank=True, null=True, on_delete=CASCADE)
create_time: datetime.datetime = models.DateTimeField(
default=timezone_now, db_index=True,
)
size: Optional[int] = models.IntegerField(null=True)
# Whether this attachment has been posted to a public stream, and
# thus should be available to all non-guest users in the
# organization (even if they weren't a recipient of a message
# linking to it). This lets us avoid looking up the corresponding
# messages/streams to check permissions before serving these files.
is_realm_public: bool = models.BooleanField(default=False)
class Meta:
abstract = True
def __str__(self) -> str:
return f"<{self.__class__.__name__}: {self.file_name}>"
class ArchivedAttachment(AbstractAttachment):
messages: Manager = models.ManyToManyField(ArchivedMessage)
class Attachment(AbstractAttachment):
messages: Manager = models.ManyToManyField(Message)
def is_claimed(self) -> bool:
return self.messages.count() > 0
def to_dict(self) -> Dict[str, Any]:
return {
'id': self.id,
'name': self.file_name,
'path_id': self.path_id,
'size': self.size,
# convert to JavaScript-style UNIX timestamp so we can take
# advantage of client timezones.
'create_time': time.mktime(self.create_time.timetuple()) * 1000,
'messages': [{
'id': m.id,
'name': time.mktime(m.date_sent.timetuple()) * 1000,
} for m in self.messages.all()],
}
post_save.connect(flush_used_upload_space_cache, sender=Attachment)
post_delete.connect(flush_used_upload_space_cache, sender=Attachment)
def validate_attachment_request(user_profile: UserProfile, path_id: str) -> Optional[bool]:
try:
attachment = Attachment.objects.get(path_id=path_id)
except Attachment.DoesNotExist:
return None
if user_profile == attachment.owner:
# If you own the file, you can access it.
return True
if (attachment.is_realm_public and attachment.realm == user_profile.realm and
user_profile.can_access_public_streams()):
# Any user in the realm can access realm-public files
return True
messages = attachment.messages.all()
if UserMessage.objects.filter(user_profile=user_profile, message__in=messages).exists():
# If it was sent in a private message or private stream
# message, then anyone who received that message can access it.
return True
# The user didn't receive any of the messages that included this
# attachment. But they might still have access to it, if it was
# sent to a stream they are on where history is public to
# subscribers.
# These are subscriptions to a stream one of the messages was sent to
relevant_stream_ids = Subscription.objects.filter(
user_profile=user_profile,
active=True,
recipient__type=Recipient.STREAM,
recipient__in=[m.recipient_id for m in messages]).values_list("recipient__type_id", flat=True)
if len(relevant_stream_ids) == 0:
return False
return Stream.objects.filter(id__in=relevant_stream_ids,
history_public_to_subscribers=True).exists()
def get_old_unclaimed_attachments(weeks_ago: int) -> Sequence[Attachment]:
# TODO: Change return type to QuerySet[Attachment]
delta_weeks_ago = timezone_now() - datetime.timedelta(weeks=weeks_ago)
old_attachments = Attachment.objects.filter(messages=None, create_time__lt=delta_weeks_ago)
return old_attachments
class Subscription(models.Model):
user_profile: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
recipient: Recipient = models.ForeignKey(Recipient, on_delete=CASCADE)
# Whether the user has since unsubscribed. We mark Subscription
# objects as inactive, rather than deleting them, when a user
# unsubscribes, so we can preserve user customizations like
# notification settings, stream color, etc., if the user later
# resubscribes.
active: bool = models.BooleanField(default=True)
# Whether this user had muted this stream.
is_muted: Optional[bool] = models.BooleanField(null=True, default=False)
DEFAULT_STREAM_COLOR = "
color: str = models.CharField(max_length=10, default=DEFAULT_STREAM_COLOR)
pin_to_top: bool = models.BooleanField(default=False)
# These fields are stream-level overrides for the user's default
# configuration for notification, configured in UserProfile. The
# default, None, means we just inherit the user-level default.
desktop_notifications: Optional[bool] = models.BooleanField(null=True, default=None)
audible_notifications: Optional[bool] = models.BooleanField(null=True, default=None)
push_notifications: Optional[bool] = models.BooleanField(null=True, default=None)
email_notifications: Optional[bool] = models.BooleanField(null=True, default=None)
wildcard_mentions_notify: Optional[bool] = models.BooleanField(null=True, default=None)
class Meta:
unique_together = ("user_profile", "recipient")
def __str__(self) -> str:
return f"<Subscription: {self.user_profile} -> {self.recipient}>"
# Subscription fields included whenever a Subscription object is provided to
# Zulip clients via the API. A few details worth noting:
# * These fields will generally be merged with Stream.API_FIELDS
# data about the stream.
# * "user_profile" is usually implied as full API access to Subscription
# is primarily done for the current user; API access to other users'
# subscriptions is generally limited to boolean yes/no.
# * "id" and "recipient_id" are not included as they are not used
# in the Zulip API; it's an internal implementation detail.
# Subscription objects are always looked up in the API via
# (user_profile, stream) pairs.
# * "active" is often excluded in API use cases where it is implied.
# * "is_muted" often needs to be copied to not "in_home_view" for
# backwards-compatibility.
API_FIELDS = [
"active",
"color",
"is_muted",
"pin_to_top",
"audible_notifications",
"desktop_notifications",
"email_notifications",
"push_notifications",
"wildcard_mentions_notify",
]
@cache_with_key(user_profile_by_id_cache_key, timeout=3600*24*7)
def get_user_profile_by_id(uid: int) -> UserProfile:
return UserProfile.objects.select_related().get(id=uid)
@cache_with_key(user_profile_by_email_cache_key, timeout=3600*24*7)
def get_user_profile_by_email(email: str) -> UserProfile:
return UserProfile.objects.select_related().get(delivery_email__iexact=email.strip())
@cache_with_key(user_profile_by_api_key_cache_key, timeout=3600*24*7)
def maybe_get_user_profile_by_api_key(api_key: str) -> Optional[UserProfile]:
try:
return UserProfile.objects.select_related().get(api_key=api_key)
except UserProfile.DoesNotExist:
# We will cache failed lookups with None. The
# use case here is that broken API clients may
# continually ask for the same wrong API key, and
# we want to handle that as quickly as possible.
return None
def get_user_profile_by_api_key(api_key: str) -> UserProfile:
user_profile = maybe_get_user_profile_by_api_key(api_key)
if user_profile is None:
raise UserProfile.DoesNotExist()
return user_profile
def get_user_by_delivery_email(email: str, realm: Realm) -> UserProfile:
return UserProfile.objects.select_related().get(
delivery_email__iexact=email.strip(), realm=realm)
def get_users_by_delivery_email(emails: Set[str], realm: Realm) -> QuerySet:
email_filter = Q()
for email in emails:
email_filter |= Q(delivery_email__iexact=email.strip())
return UserProfile.objects.filter(realm=realm).filter(email_filter)
@cache_with_key(user_profile_cache_key, timeout=3600*24*7)
def get_user(email: str, realm: Realm) -> UserProfile:
return UserProfile.objects.select_related().get(email__iexact=email.strip(), realm=realm)
def get_active_user(email: str, realm: Realm) -> UserProfile:
user_profile = get_user(email, realm)
if not user_profile.is_active:
raise UserProfile.DoesNotExist()
return user_profile
def get_user_profile_by_id_in_realm(uid: int, realm: Realm) -> UserProfile:
return UserProfile.objects.select_related().get(id=uid, realm=realm)
def get_active_user_profile_by_id_in_realm(uid: int, realm: Realm) -> UserProfile:
user_profile = get_user_profile_by_id_in_realm(uid, realm)
if not user_profile.is_active:
raise UserProfile.DoesNotExist()
return user_profile
def get_user_including_cross_realm(email: str, realm: Optional[Realm]=None) -> UserProfile:
if is_cross_realm_bot_email(email):
return get_system_bot(email)
assert realm is not None
return get_user(email, realm)
@cache_with_key(bot_profile_cache_key, timeout=3600*24*7)
def get_system_bot(email: str) -> UserProfile:
return UserProfile.objects.select_related().get(email__iexact=email.strip())
def get_user_by_id_in_realm_including_cross_realm(
uid: int,
realm: Optional[Realm],
) -> UserProfile:
user_profile = get_user_profile_by_id(uid)
if user_profile.realm == realm:
return user_profile
# Note: This doesn't validate whether the `realm` passed in is
# None/invalid for the CROSS_REALM_BOT_EMAILS case.
if user_profile.delivery_email in settings.CROSS_REALM_BOT_EMAILS:
return user_profile
raise UserProfile.DoesNotExist()
@cache_with_key(realm_user_dicts_cache_key, timeout=3600*24*7)
def get_realm_user_dicts(realm_id: int) -> List[Dict[str, Any]]:
return UserProfile.objects.filter(
realm_id=realm_id,
).values(*realm_user_dict_fields)
@cache_with_key(active_user_ids_cache_key, timeout=3600*24*7)
def active_user_ids(realm_id: int) -> List[int]:
query = UserProfile.objects.filter(
realm_id=realm_id,
is_active=True,
).values_list('id', flat=True)
return list(query)
@cache_with_key(active_non_guest_user_ids_cache_key, timeout=3600*24*7)
def active_non_guest_user_ids(realm_id: int) -> List[int]:
query = UserProfile.objects.filter(
realm_id=realm_id,
is_active=True,
).exclude(
role=UserProfile.ROLE_GUEST,
).values_list('id', flat=True)
return list(query)
def get_source_profile(email: str, string_id: str) -> Optional[UserProfile]:
try:
return get_user_by_delivery_email(email, get_realm(string_id))
except (Realm.DoesNotExist, UserProfile.DoesNotExist):
return None
@cache_with_key(bot_dicts_in_realm_cache_key, timeout=3600*24*7)
def get_bot_dicts_in_realm(realm: Realm) -> List[Dict[str, Any]]:
return UserProfile.objects.filter(realm=realm, is_bot=True).values(*bot_dict_fields)
def is_cross_realm_bot_email(email: str) -> bool:
return email.lower() in settings.CROSS_REALM_BOT_EMAILS
# The Huddle class represents a group of individuals who have had a
# Group Private Message conversation together. The actual membership
# of the Huddle is stored in the Subscription table just like with
# Streams, and a hash of that list is stored in the huddle_hash field
# below, to support efficiently mapping from a set of users to the
# corresponding Huddle object.
class Huddle(models.Model):
# TODO: We should consider whether using
# CommaSeparatedIntegerField would be better.
huddle_hash: str = models.CharField(max_length=40, db_index=True, unique=True)
# Foreign key to the Recipient object for this Huddle.
recipient = models.ForeignKey(Recipient, null=True, on_delete=models.SET_NULL)
def get_huddle_hash(id_list: List[int]) -> str:
id_list = sorted(set(id_list))
hash_key = ",".join(str(x) for x in id_list)
return make_safe_digest(hash_key)
def huddle_hash_cache_key(huddle_hash: str) -> str:
return f"huddle_by_hash:{huddle_hash}"
def get_huddle(id_list: List[int]) -> Huddle:
huddle_hash = get_huddle_hash(id_list)
return get_huddle_backend(huddle_hash, id_list)
@cache_with_key(lambda huddle_hash, id_list: huddle_hash_cache_key(huddle_hash), timeout=3600*24*7)
def get_huddle_backend(huddle_hash: str, id_list: List[int]) -> Huddle:
with transaction.atomic():
(huddle, created) = Huddle.objects.get_or_create(huddle_hash=huddle_hash)
if created:
recipient = Recipient.objects.create(type_id=huddle.id,
type=Recipient.HUDDLE)
huddle.recipient = recipient
huddle.save(update_fields=["recipient"])
subs_to_create = [Subscription(recipient=recipient,
user_profile_id=user_profile_id)
for user_profile_id in id_list]
Subscription.objects.bulk_create(subs_to_create)
return huddle
class UserActivity(models.Model):
user_profile: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
client: Client = models.ForeignKey(Client, on_delete=CASCADE)
query: str = models.CharField(max_length=50, db_index=True)
count: int = models.IntegerField()
last_visit: datetime.datetime = models.DateTimeField('last visit')
class Meta:
unique_together = ("user_profile", "client", "query")
class UserActivityInterval(models.Model):
MIN_INTERVAL_LENGTH = datetime.timedelta(minutes=15)
user_profile: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
start: datetime.datetime = models.DateTimeField('start time', db_index=True)
end: datetime.datetime = models.DateTimeField('end time', db_index=True)
class UserPresence(models.Model):
class Meta:
unique_together = ("user_profile", "client")
index_together = [
("realm", "timestamp"),
]
user_profile: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
realm: Realm = models.ForeignKey(Realm, on_delete=CASCADE)
client: Client = models.ForeignKey(Client, on_delete=CASCADE)
# The time we heard this update from the client.
timestamp: datetime.datetime = models.DateTimeField('presence changed')
# The user was actively using this Zulip client as of `timestamp` (i.e.,
# they had interacted with the client recently). When the timestamp is
# itself recent, this is the green "active" status in the webapp.
ACTIVE = 1
# There had been no user activity (keyboard/mouse/etc.) on this client
# recently. So the client was online at the specified time, but it
# could be the user's desktop which they were away from. Displayed as
# orange/idle if the timestamp is current.
IDLE = 2
# Information from the client about the user's recent interaction with
# that client, as of `timestamp`. Possible values above.
#
# There is no "inactive" status, because that is encoded by the
# timestamp being old.
status: int = models.PositiveSmallIntegerField(default=ACTIVE)
@staticmethod
def status_to_string(status: int) -> str:
if status == UserPresence.ACTIVE:
return 'active'
elif status == UserPresence.IDLE:
return 'idle'
else: # nocoverage # TODO: Add a presence test to cover this.
raise ValueError(f'Unknown status: {status}')
@staticmethod
def to_presence_dict(client_name: str, status: int, dt: datetime.datetime, push_enabled: bool=False,
has_push_devices: bool=False) -> Dict[str, Any]:
presence_val = UserPresence.status_to_string(status)
timestamp = datetime_to_timestamp(dt)
return dict(
client=client_name,
status=presence_val,
timestamp=timestamp,
pushable=(push_enabled and has_push_devices),
)
def to_dict(self) -> Dict[str, Any]:
return UserPresence.to_presence_dict(
self.client.name,
self.status,
self.timestamp,
)
@staticmethod
def status_from_string(status: str) -> Optional[int]:
if status == 'active':
status_val: Optional[int] = UserPresence.ACTIVE # See https://github.com/python/mypy/issues/2611
elif status == 'idle':
status_val = UserPresence.IDLE
else:
status_val = None
return status_val
class UserStatus(models.Model):
user_profile: UserProfile = models.OneToOneField(UserProfile, on_delete=CASCADE)
timestamp: datetime.datetime = models.DateTimeField()
client: Client = models.ForeignKey(Client, on_delete=CASCADE)
NORMAL = 0
AWAY = 1
status: int = models.PositiveSmallIntegerField(default=NORMAL)
status_text: str = models.CharField(max_length=255, default='')
class DefaultStream(models.Model):
realm: Realm = models.ForeignKey(Realm, on_delete=CASCADE)
stream: Stream = models.ForeignKey(Stream, on_delete=CASCADE)
class Meta:
unique_together = ("realm", "stream")
class DefaultStreamGroup(models.Model):
MAX_NAME_LENGTH = 60
name: str = models.CharField(max_length=MAX_NAME_LENGTH, db_index=True)
realm: Realm = models.ForeignKey(Realm, on_delete=CASCADE)
streams: Manager = models.ManyToManyField('Stream')
description: str = models.CharField(max_length=1024, default='')
class Meta:
unique_together = ("realm", "name")
def to_dict(self) -> Dict[str, Any]:
return dict(name=self.name,
id=self.id,
description=self.description,
streams=[stream.to_dict() for stream in self.streams.all()])
def get_default_stream_groups(realm: Realm) -> List[DefaultStreamGroup]:
return DefaultStreamGroup.objects.filter(realm=realm)
class AbstractScheduledJob(models.Model):
scheduled_timestamp: datetime.datetime = models.DateTimeField(db_index=True)
# JSON representation of arguments to consumer
data: str = models.TextField()
realm: Realm = models.ForeignKey(Realm, on_delete=CASCADE)
class Meta:
abstract = True
class ScheduledEmail(AbstractScheduledJob):
# Exactly one of users or address should be set. These are
# duplicate values, used to efficiently filter the set of
# ScheduledEmails for use in clear_scheduled_emails; the
# recipients used for actually sending messages are stored in the
# data field of AbstractScheduledJob.
users: Manager = models.ManyToManyField(UserProfile)
# Just the address part of a full "name <address>" email address
address: Optional[str] = models.EmailField(null=True, db_index=True)
# Valid types are below
WELCOME = 1
DIGEST = 2
INVITATION_REMINDER = 3
type: int = models.PositiveSmallIntegerField()
def __str__(self) -> str:
return f"<ScheduledEmail: {self.type} {self.address or list(self.users.all())} {self.scheduled_timestamp}>"
class MissedMessageEmailAddress(models.Model):
EXPIRY_SECONDS = 60 * 60 * 24 * 5
ALLOWED_USES = 1
message: Message = models.ForeignKey(Message, on_delete=CASCADE)
user_profile: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
email_token: str = models.CharField(max_length=34, unique=True, db_index=True)
# Timestamp of when the missed message address generated.
# The address is valid until timestamp + EXPIRY_SECONDS.
timestamp: datetime.datetime = models.DateTimeField(db_index=True, default=timezone_now)
times_used: int = models.PositiveIntegerField(default=0, db_index=True)
def __str__(self) -> str:
return settings.EMAIL_GATEWAY_PATTERN % (self.email_token,)
def is_usable(self) -> bool:
not_expired = timezone_now() <= self.timestamp + timedelta(seconds=self.EXPIRY_SECONDS)
has_uses_left = self.times_used < self.ALLOWED_USES
return has_uses_left and not_expired
def increment_times_used(self) -> None:
self.times_used += 1
self.save(update_fields=["times_used"])
class ScheduledMessage(models.Model):
sender: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
recipient: Recipient = models.ForeignKey(Recipient, on_delete=CASCADE)
subject: str = models.CharField(max_length=MAX_TOPIC_NAME_LENGTH)
content: str = models.TextField()
sending_client: Client = models.ForeignKey(Client, on_delete=CASCADE)
stream: Optional[Stream] = models.ForeignKey(Stream, null=True, on_delete=CASCADE)
realm: Realm = models.ForeignKey(Realm, on_delete=CASCADE)
scheduled_timestamp: datetime.datetime = models.DateTimeField(db_index=True)
delivered: bool = models.BooleanField(default=False)
SEND_LATER = 1
REMIND = 2
DELIVERY_TYPES = (
(SEND_LATER, 'send_later'),
(REMIND, 'remind'),
)
delivery_type: int = models.PositiveSmallIntegerField(
choices=DELIVERY_TYPES, default=SEND_LATER,
)
def topic_name(self) -> str:
return self.subject
def set_topic_name(self, topic_name: str) -> None:
self.subject = topic_name
def __str__(self) -> str:
display_recipient = get_display_recipient(self.recipient)
return f"<ScheduledMessage: {display_recipient} {self.subject} {self.sender} {self.scheduled_timestamp}>"
EMAIL_TYPES = {
'followup_day1': ScheduledEmail.WELCOME,
'followup_day2': ScheduledEmail.WELCOME,
'digest': ScheduledEmail.DIGEST,
'invitation_reminder': ScheduledEmail.INVITATION_REMINDER,
}
class AbstractRealmAuditLog(models.Model):
event_time: datetime.datetime = models.DateTimeField(db_index=True)
# If True, event_time is an overestimate of the true time. Can be used
# by migrations when introducing a new event_type.
backfilled: bool = models.BooleanField(default=False)
# Keys within extra_data, when extra_data is a json dict. Keys are strings because
# json keys must always be strings.
OLD_VALUE = '1'
NEW_VALUE = '2'
ROLE_COUNT = '10'
ROLE_COUNT_HUMANS = '11'
ROLE_COUNT_BOTS = '12'
extra_data: Optional[str] = models.TextField(null=True)
# Event types
USER_CREATED = 101
USER_ACTIVATED = 102
USER_DEACTIVATED = 103
USER_REACTIVATED = 104
USER_ROLE_CHANGED = 105
USER_SOFT_ACTIVATED = 120
USER_SOFT_DEACTIVATED = 121
USER_PASSWORD_CHANGED = 122
USER_AVATAR_SOURCE_CHANGED = 123
USER_FULL_NAME_CHANGED = 124
USER_EMAIL_CHANGED = 125
USER_TOS_VERSION_CHANGED = 126
USER_API_KEY_CHANGED = 127
USER_BOT_OWNER_CHANGED = 128
REALM_DEACTIVATED = 201
REALM_REACTIVATED = 202
REALM_SCRUBBED = 203
REALM_PLAN_TYPE_CHANGED = 204
REALM_LOGO_CHANGED = 205
REALM_EXPORTED = 206
SUBSCRIPTION_CREATED = 301
SUBSCRIPTION_ACTIVATED = 302
SUBSCRIPTION_DEACTIVATED = 303
STRIPE_CUSTOMER_CREATED = 401
STRIPE_CARD_CHANGED = 402
STRIPE_PLAN_CHANGED = 403
STRIPE_PLAN_QUANTITY_RESET = 404
CUSTOMER_CREATED = 501
CUSTOMER_PLAN_CREATED = 502
CUSTOMER_SWITCHED_FROM_MONTHLY_TO_ANNUAL_PLAN = 503
event_type: int = models.PositiveSmallIntegerField()
# event_types synced from on-prem installations to Zulip Cloud when
# billing for mobile push notifications is enabled. Every billing
# event_type should have ROLE_COUNT populated in extra_data.
SYNCED_BILLING_EVENTS = [
USER_CREATED, USER_ACTIVATED, USER_DEACTIVATED, USER_REACTIVATED, USER_ROLE_CHANGED,
REALM_DEACTIVATED, REALM_REACTIVATED]
class Meta:
abstract = True
class RealmAuditLog(AbstractRealmAuditLog):
realm: Realm = models.ForeignKey(Realm, on_delete=CASCADE)
acting_user: Optional[UserProfile] = models.ForeignKey(
UserProfile, null=True, related_name="+", on_delete=CASCADE,
)
modified_user: Optional[UserProfile] = models.ForeignKey(
UserProfile, null=True, related_name="+", on_delete=CASCADE,
)
modified_stream: Optional[Stream] = models.ForeignKey(
Stream, null=True, on_delete=CASCADE,
)
event_last_message_id: Optional[int] = models.IntegerField(null=True)
def __str__(self) -> str:
if self.modified_user is not None:
return f"<RealmAuditLog: {self.modified_user} {self.event_type} {self.event_time} {self.id}>"
if self.modified_stream is not None:
return f"<RealmAuditLog: {self.modified_stream} {self.event_type} {self.event_time} {self.id}>"
return f"<RealmAuditLog: {self.realm} {self.event_type} {self.event_time} {self.id}>"
class UserHotspot(models.Model):
user: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
hotspot: str = models.CharField(max_length=30)
timestamp: datetime.datetime = models.DateTimeField(default=timezone_now)
class Meta:
unique_together = ("user", "hotspot")
def check_valid_user_ids(realm_id: int, user_ids: List[int],
allow_deactivated: bool=False) -> Optional[str]:
error = check_list(check_int)("User IDs", user_ids)
if error:
return error
realm = Realm.objects.get(id=realm_id)
for user_id in user_ids:
# TODO: Structurally, we should be doing a bulk fetch query to
# get the users here, not doing these in a loop. But because
# this is a rarely used feature and likely to never have more
# than a handful of users, it's probably mostly OK.
try:
user_profile = get_user_profile_by_id_in_realm(user_id, realm)
except UserProfile.DoesNotExist:
return _('Invalid user ID: %d') % (user_id)
if not allow_deactivated:
if not user_profile.is_active:
return _('User with ID %d is deactivated') % (user_id)
if (user_profile.is_bot):
return _('User with ID %d is a bot') % (user_id)
return None
class CustomProfileField(models.Model):
HINT_MAX_LENGTH = 80
NAME_MAX_LENGTH = 40
realm: Realm = models.ForeignKey(Realm, on_delete=CASCADE)
name: str = models.CharField(max_length=NAME_MAX_LENGTH)
hint: Optional[str] = models.CharField(max_length=HINT_MAX_LENGTH, default='', null=True)
order: int = models.IntegerField(default=0)
SHORT_TEXT = 1
LONG_TEXT = 2
CHOICE = 3
DATE = 4
URL = 5
USER = 6
EXTERNAL_ACCOUNT = 7
# These are the fields whose validators require more than var_name
# and value argument. i.e. CHOICE require field_data, USER require
# realm as argument.
CHOICE_FIELD_TYPE_DATA: List[ExtendedFieldElement] = [
(CHOICE, str(_('List of options')), validate_choice_field, str, "CHOICE"),
]
USER_FIELD_TYPE_DATA: List[UserFieldElement] = [
(USER, str(_('Person picker')), check_valid_user_ids, eval, "USER"),
]
CHOICE_FIELD_VALIDATORS: Dict[int, ExtendedValidator] = {
item[0]: item[2] for item in CHOICE_FIELD_TYPE_DATA
}
USER_FIELD_VALIDATORS: Dict[int, RealmUserValidator] = {
item[0]: item[2] for item in USER_FIELD_TYPE_DATA
}
FIELD_TYPE_DATA: List[FieldElement] = [
# Type, Display Name, Validator, Converter, Keyword
(SHORT_TEXT, str(_('Short text')), check_short_string, str, "SHORT_TEXT"),
(LONG_TEXT, str(_('Long text')), check_long_string, str, "LONG_TEXT"),
(DATE, str(_('Date picker')), check_date, str, "DATE"),
(URL, str(_('Link')), check_url, str, "URL"),
(EXTERNAL_ACCOUNT, str(_('External account')), check_short_string, str, "EXTERNAL_ACCOUNT"),
]
ALL_FIELD_TYPES = [*FIELD_TYPE_DATA, *CHOICE_FIELD_TYPE_DATA, *USER_FIELD_TYPE_DATA]
FIELD_VALIDATORS: Dict[int, Validator] = {item[0]: item[2] for item in FIELD_TYPE_DATA}
FIELD_CONVERTERS: Dict[int, Callable[[Any], Any]] = {item[0]: item[3] for item in ALL_FIELD_TYPES}
FIELD_TYPE_CHOICES: List[Tuple[int, str]] = [(item[0], item[1]) for item in ALL_FIELD_TYPES]
FIELD_TYPE_CHOICES_DICT: Dict[str, Dict[str, Union[str, int]]] = {
item[4]: {"id": item[0], "name": item[1]} for item in ALL_FIELD_TYPES
}
field_type: int = models.PositiveSmallIntegerField(
choices=FIELD_TYPE_CHOICES, default=SHORT_TEXT,
)
# A JSON blob of any additional data needed to define the field beyond
# type/name/hint.
#
# The format depends on the type. Field types SHORT_TEXT, LONG_TEXT,
# DATE, URL, and USER leave this null. Fields of type CHOICE store the
# choices' descriptions.
#
# Note: There is no performance overhead of using TextField in PostgreSQL.
# See https://www.postgresql.org/docs/9.0/static/datatype-character.html
field_data: Optional[str] = models.TextField(default='', null=True)
class Meta:
unique_together = ('realm', 'name')
def as_dict(self) -> ProfileDataElementBase:
return {
'id': self.id,
'name': self.name,
'type': self.field_type,
'hint': self.hint,
'field_data': self.field_data,
'order': self.order,
}
def is_renderable(self) -> bool:
if self.field_type in [CustomProfileField.SHORT_TEXT, CustomProfileField.LONG_TEXT]:
return True
return False
def __str__(self) -> str:
return f"<CustomProfileField: {self.realm} {self.name} {self.field_type} {self.order}>"
def custom_profile_fields_for_realm(realm_id: int) -> List[CustomProfileField]:
return CustomProfileField.objects.filter(realm=realm_id).order_by('order')
class CustomProfileFieldValue(models.Model):
user_profile: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
field: CustomProfileField = models.ForeignKey(CustomProfileField, on_delete=CASCADE)
value: str = models.TextField()
rendered_value: Optional[str] = models.TextField(null=True, default=None)
class Meta:
unique_together = ('user_profile', 'field')
def __str__(self) -> str:
return f"<CustomProfileFieldValue: {self.user_profile} {self.field} {self.value}>"
# Interfaces for services
# They provide additional functionality like parsing message to obtain query url, data to be sent to url,
# and parsing the response.
GENERIC_INTERFACE = 'GenericService'
SLACK_INTERFACE = 'SlackOutgoingWebhookService'
# A Service corresponds to either an outgoing webhook bot or an embedded bot.
# The type of Service is determined by the bot_type field of the referenced
# UserProfile.
#
# If the Service is an outgoing webhook bot:
# - name is any human-readable identifier for the Service
# - base_url is the address of the third-party site
# - token is used for authentication with the third-party site
#
# If the Service is an embedded bot:
# - name is the canonical name for the type of bot (e.g. 'xkcd' for an instance
# of the xkcd bot); multiple embedded bots can have the same name, but all
# embedded bots with the same name will run the same code
# - base_url and token are currently unused
class Service(models.Model):
name: str = models.CharField(max_length=UserProfile.MAX_NAME_LENGTH)
# Bot user corresponding to the Service. The bot_type of this user
# deterines the type of service. If non-bot services are added later,
# user_profile can also represent the owner of the Service.
user_profile: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
base_url: str = models.TextField()
token: str = models.TextField()
# Interface / API version of the service.
interface: int = models.PositiveSmallIntegerField(default=1)
# Valid interfaces are {generic, zulip_bot_service, slack}
GENERIC = 1
SLACK = 2
ALLOWED_INTERFACE_TYPES = [
GENERIC,
SLACK,
]
# N.B. If we used Django's choice=... we would get this for free (kinda)
_interfaces: Dict[int, str] = {
GENERIC: GENERIC_INTERFACE,
SLACK: SLACK_INTERFACE,
}
def interface_name(self) -> str:
# Raises KeyError if invalid
return self._interfaces[self.interface]
def get_bot_services(user_profile_id: str) -> List[Service]:
return list(Service.objects.filter(user_profile__id=user_profile_id))
def get_service_profile(user_profile_id: str, service_name: str) -> Service:
return Service.objects.get(user_profile__id=user_profile_id, name=service_name)
class BotStorageData(models.Model):
bot_profile: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
key: str = models.TextField(db_index=True)
value: str = models.TextField()
class Meta:
unique_together = ("bot_profile", "key")
class BotConfigData(models.Model):
bot_profile: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
key: str = models.TextField(db_index=True)
value: str = models.TextField()
class Meta:
unique_together = ("bot_profile", "key")
class InvalidFakeEmailDomain(Exception):
pass
def get_fake_email_domain() -> str:
try:
# Check that the fake email domain can be used to form valid email addresses.
validate_email("bot@" + settings.FAKE_EMAIL_DOMAIN)
except ValidationError:
raise InvalidFakeEmailDomain(settings.FAKE_EMAIL_DOMAIN + ' is not a valid domain.')
return settings.FAKE_EMAIL_DOMAIN
class AlertWord(models.Model):
# Realm isn't necessary, but it's a nice denormalization. Users
# never move to another realm, so it's static, and having Realm
# here optimizes the main query on this table, which is fetching
# all the alert words in a realm.
realm: Realm = models.ForeignKey(Realm, db_index=True, on_delete=CASCADE)
user_profile: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
# Case-insensitive name for the alert word.
word: str = models.TextField()
class Meta:
unique_together = ("user_profile", "word")
def flush_realm_alert_words(realm: Realm) -> None:
cache_delete(realm_alert_words_cache_key(realm))
cache_delete(realm_alert_words_automaton_cache_key(realm))
def flush_alert_word(sender: Any, **kwargs: Any) -> None:
realm = kwargs['instance'].realm
flush_realm_alert_words(realm)
post_save.connect(flush_alert_word, sender=AlertWord)
post_delete.connect(flush_alert_word, sender=AlertWord)
| true | true |
f72048b0b98e8bc328e6ee0236c9c737d55cd314 | 19,936 | py | Python | syndicate/core/build/deployment_processor.py | Dmytro-Skorniakov/aws-syndicate | 81363334886c53969f1f0a0c0ac0168318204990 | [
"Apache-2.0"
] | null | null | null | syndicate/core/build/deployment_processor.py | Dmytro-Skorniakov/aws-syndicate | 81363334886c53969f1f0a0c0ac0168318204990 | [
"Apache-2.0"
] | null | null | null | syndicate/core/build/deployment_processor.py | Dmytro-Skorniakov/aws-syndicate | 81363334886c53969f1f0a0c0ac0168318204990 | [
"Apache-2.0"
] | null | null | null | """
Copyright 2018 EPAM Systems, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import concurrent
import json
from concurrent.futures import ALL_COMPLETED, ThreadPoolExecutor
from datetime import date, datetime
from functools import cmp_to_key
from syndicate.commons.log_helper import get_logger
from syndicate.core.build.bundle_processor import (create_deploy_output,
load_deploy_output,
load_failed_deploy_output,
load_meta_resources,
remove_deploy_output,
remove_failed_deploy_output)
from syndicate.core.build.meta_processor import resolve_meta
from syndicate.core.constants import (BUILD_META_FILE_NAME,
CLEAN_RESOURCE_TYPE_PRIORITY,
DEPLOY_RESOURCE_TYPE_PRIORITY,
LAMBDA_TYPE)
from syndicate.core.helper import exit_on_exception, prettify_json
from syndicate.core.resources import (APPLY_MAPPING, CREATE_RESOURCE,
DESCRIBE_RESOURCE, REMOVE_RESOURCE,
RESOURCE_CONFIGURATION_PROCESSORS,
RESOURCE_IDENTIFIER, UPDATE_RESOURCE)
_LOG = get_logger('syndicate.core.build.deployment_processor')
def get_dependencies(name, meta, resources_dict, resources):
""" Get dependencies from resources that needed to create them too.
:type name: str
:type meta: dict
:type resources_dict: dict
:param resources:
:param resources_dict: resources that will be created {name: meta}
"""
resources_dict[name] = meta
if meta.get('dependencies'):
for dependency in meta.get('dependencies'):
dep_name = dependency['resource_name']
dep_meta = resources[dep_name]
resources_dict[dep_name] = dep_meta
if dep_meta.get('dependencies'):
get_dependencies(dep_name, dep_meta, resources_dict, resources)
# todo implement resources sorter according to priority
def _process_resources(resources, handlers_mapping):
res_type = None
output = {}
args = []
resource_type = None
try:
for res_name, res_meta in resources:
res_type = res_meta['resource_type']
if resource_type is None:
resource_type = res_type
if res_type == resource_type:
args.append({'name': res_name, 'meta': res_meta})
continue
elif res_type != resource_type:
_LOG.info('Processing {0} resources ...'.format(resource_type))
func = handlers_mapping[resource_type]
response = func(args) # todo exception may be raised here
if response:
output.update(response)
del args[:]
args.append({'name': res_name, 'meta': res_meta})
resource_type = res_type
if args:
_LOG.info('Processing {0} resources ...'.format(resource_type))
func = handlers_mapping[resource_type]
response = func(args)
if response:
output.update(response)
return True, output
except Exception as e:
_LOG.error('Error occurred while {0} resource creating: {1}'.format(
res_type, str(e)))
# args list always contains one item here
return False, update_failed_output(args[0]['name'], args[0]['meta'],
resource_type, output)
def update_failed_output(res_name, res_meta, resource_type, output):
describe_func = DESCRIBE_RESOURCE[resource_type]
failed_resource_output = describe_func(res_name, res_meta)
if failed_resource_output:
if isinstance(failed_resource_output, list):
for item in failed_resource_output:
output.update(item)
else:
output.update(failed_resource_output)
return output
def deploy_resources(resources):
return _process_resources(resources=resources,
handlers_mapping=CREATE_RESOURCE)
def update_resources(resources):
return _process_resources(resources=resources,
handlers_mapping=UPDATE_RESOURCE)
def clean_resources(output):
args = []
resource_type = None
# clean all resources
for arn, config in output:
res_type = config['resource_meta']['resource_type']
if resource_type is None:
resource_type = res_type
if res_type == resource_type:
args.append({'arn': arn, 'config': config})
continue
elif res_type != resource_type:
_LOG.info('Removing {0} resources ...'.format(resource_type))
func = REMOVE_RESOURCE[resource_type]
func(args)
del args[:]
args.append({'arn': arn, 'config': config})
resource_type = res_type
if args:
_LOG.info('Removing {0} resources ...'.format(resource_type))
func = REMOVE_RESOURCE[resource_type]
func(args)
# todo implement saving failed output
def continue_deploy_resources(resources, failed_output):
updated_output = {}
deploy_result = True
res_type = None
try:
args = []
resource_type = None
for res_name, res_meta in resources:
res_type = res_meta['resource_type']
if resource_type is None:
resource_type = res_type
if res_type == resource_type:
resource_output = __find_output_by_resource_name(
failed_output, res_name)
args.append(
{
'name': res_name,
'meta': res_meta,
'current_configurations': resource_output
})
continue
elif res_type != resource_type:
func = RESOURCE_CONFIGURATION_PROCESSORS.get(resource_type)
if func:
response = func(args)
if response:
updated_output.update(
json.loads(
json.dumps(response, default=_json_serial)))
else:
# function to update resource is not present
# move existing output for resources to new output
__move_output_content(args, failed_output, updated_output)
del args[:]
resource_output = __find_output_by_resource_name(
failed_output, res_name)
args.append({
'name': res_name,
'meta': res_meta,
'current_configurations': resource_output
})
resource_type = res_type
if args:
func = RESOURCE_CONFIGURATION_PROCESSORS.get(resource_type)
if func:
response = func(args)
if response:
updated_output.update(
json.loads(
json.dumps(response, default=_json_serial)))
else:
# function to update resource is not present
# move existing output- for resources to new output
__move_output_content(args, failed_output, updated_output)
except Exception as e:
_LOG.error('Error occurred while {0} resource creating: {1}'.format(
res_type, str(e)))
deploy_result = False
return deploy_result, updated_output
def __move_output_content(args, failed_output, updated_output):
for arg in args:
resource_output = __find_output_by_resource_name(
failed_output, arg['name'])
if resource_output:
updated_output.update(resource_output)
def __find_output_by_resource_name(output, resource_name):
found_items = {}
for k, v in output.items():
if v['resource_name'] == resource_name:
found_items[k] = v
return found_items
@exit_on_exception
def create_deployment_resources(deploy_name, bundle_name,
deploy_only_resources=None,
deploy_only_types=None,
excluded_resources=None, excluded_types=None):
resources = resolve_meta(load_meta_resources(bundle_name))
_LOG.debug('Names were resolved')
_LOG.debug(prettify_json(resources))
# validate_deployment_packages(resources)
_LOG.info('{0} file was loaded successfully'.format(BUILD_META_FILE_NAME))
# TODO make filter chain
if deploy_only_resources:
resources = dict((k, v) for (k, v) in resources.items() if
k in deploy_only_resources)
if excluded_resources:
resources = dict((k, v) for (k, v) in resources.items() if
k not in excluded_resources)
if deploy_only_types:
resources = dict((k, v) for (k, v) in resources.items() if
v['resource_type'] in deploy_only_types)
if excluded_types:
resources = dict((k, v) for (k, v) in resources.items() if
v['resource_type'] not in excluded_types)
_LOG.debug('Going to create: {0}'.format(prettify_json(resources)))
# sort resources with priority
resources_list = list(resources.items())
resources_list.sort(key=cmp_to_key(_compare_deploy_resources))
_LOG.info('Going to deploy AWS resources')
success, output = deploy_resources(resources_list)
if success:
_LOG.info('AWS resources were deployed successfully')
# apply dynamic changes that uses ARNs
_LOG.info('Going to apply dynamic changes')
_apply_dynamic_changes(resources, output)
_LOG.info('Dynamic changes were applied successfully')
_LOG.info('Going to create deploy output')
output_str = json.dumps(output, default=_json_serial)
create_deploy_output(bundle_name, deploy_name, output_str, success)
_LOG.info('Deploy output for {0} was created.'.format(deploy_name))
return success
@exit_on_exception
def remove_deployment_resources(deploy_name, bundle_name,
clean_only_resources=None,
clean_only_types=None,
excluded_resources=None, excluded_types=None):
output = load_deploy_output(bundle_name, deploy_name)
_LOG.info('Output file was loaded successfully')
# TODO make filter chain
if clean_only_resources:
output = dict((k, v) for (k, v) in output.items() if
v['resource_name'] in clean_only_resources)
if excluded_resources:
output = dict((k, v) for (k, v) in output.items() if
v['resource_name'] not in excluded_resources)
if clean_only_types:
output = dict((k, v) for (k, v) in output.items() if
v['resource_meta']['resource_type'] in clean_only_types)
if excluded_types:
output = dict((k, v) for (k, v) in output.items() if
v['resource_meta'][
'resource_type'] not in excluded_types)
# sort resources with priority
resources_list = list(output.items())
resources_list.sort(key=cmp_to_key(_compare_clean_resources))
_LOG.debug('Resources to delete: {0}'.format(resources_list))
_LOG.info('Going to clean AWS resources')
clean_resources(resources_list)
# remove output from bucket
remove_deploy_output(bundle_name, deploy_name)
@exit_on_exception
def continue_deployment_resources(deploy_name, bundle_name,
deploy_only_resources=None,
deploy_only_types=None,
excluded_resources=None,
excluded_types=None):
output = load_failed_deploy_output(bundle_name, deploy_name)
_LOG.info('Failed output file was loaded successfully')
resources = resolve_meta(load_meta_resources(bundle_name))
_LOG.debug('Names were resolved')
_LOG.debug(prettify_json(resources))
# TODO make filter chain
if deploy_only_resources:
resources = dict((k, v) for (k, v) in resources.items() if
k in deploy_only_resources)
if excluded_resources:
resources = dict((k, v) for (k, v) in resources.items() if
k not in excluded_resources)
if deploy_only_types:
resources = dict((k, v) for (k, v) in resources.items() if
v['resource_type'] in deploy_only_types)
if excluded_types:
resources = dict((k, v) for (k, v) in resources.items() if
v['resource_type'] not in excluded_types)
# sort resources with priority
resources_list = list(resources.items())
resources_list.sort(key=cmp_to_key(_compare_deploy_resources))
success, updated_output = continue_deploy_resources(resources_list, output)
_LOG.info('AWS resources were deployed successfully')
if success:
# apply dynamic changes that uses ARNs
_LOG.info('Going to apply dynamic changes')
_apply_dynamic_changes(resources, updated_output)
_LOG.info('Dynamic changes were applied successfully')
# remove failed output from bucket
remove_failed_deploy_output(bundle_name, deploy_name)
_LOG.info('Going to create deploy output')
create_deploy_output(bundle_name, deploy_name,
prettify_json(updated_output), success=success)
return success
@exit_on_exception
def remove_failed_deploy_resources(deploy_name, bundle_name,
clean_only_resources=None,
clean_only_types=None,
excluded_resources=None,
excluded_types=None):
output = load_failed_deploy_output(bundle_name, deploy_name)
_LOG.info('Failed output file was loaded successfully')
# TODO make filter chain
if clean_only_resources:
output = dict((k, v) for (k, v) in output.items() if
v['resource_name'] in clean_only_resources)
if excluded_resources:
output = dict((k, v) for (k, v) in output.items() if
v['resource_name'] not in excluded_resources)
if clean_only_types:
output = dict((k, v) for (k, v) in output.items() if
v['resource_meta']['resource_type'] in clean_only_types)
if excluded_types:
output = dict((k, v) for (k, v) in output.items() if
v['resource_meta'][
'resource_type'] not in excluded_types)
# sort resources with priority
resources_list = list(output.items())
resources_list.sort(key=cmp_to_key(_compare_clean_resources))
_LOG.info('Going to clean AWS resources')
clean_resources(resources_list)
# remove output from bucket
remove_failed_deploy_output(bundle_name, deploy_name)
@exit_on_exception
def update_lambdas(bundle_name,
publish_only_lambdas,
excluded_lambdas_resources):
resources = resolve_meta(load_meta_resources(bundle_name))
_LOG.debug('Names were resolved')
_LOG.debug(prettify_json(resources))
# TODO make filter chain
resources = dict((k, v) for (k, v) in resources.items() if
v['resource_type'] == LAMBDA_TYPE)
if publish_only_lambdas:
resources = dict((k, v) for (k, v) in resources.items() if
k in publish_only_lambdas)
if excluded_lambdas_resources:
resources = dict((k, v) for (k, v) in resources.items() if
k not in excluded_lambdas_resources)
_LOG.debug('Going to update the following lambdas: {0}'.format(
prettify_json(resources)))
resources = list(resources.items())
update_resources(resources=resources)
def _json_serial(obj):
"""JSON serializer for objects not serializable by default json code"""
if isinstance(obj, (datetime, date)):
return obj.isoformat()
raise TypeError("Type %s not serializable" % type(obj))
def _apply_dynamic_changes(resources, output):
pool = ThreadPoolExecutor(max_workers=5)
futures = []
for name, meta in resources.items():
resource_type = meta['resource_type']
apply_changes = meta.get('apply_changes')
if apply_changes:
for apply_item in apply_changes:
change_type = apply_item['apply_type']
dependency_name = apply_item['dependency_name']
res_config = resources.get(dependency_name)
if not res_config:
_LOG.debug('Dependency resource {0} is not found, '
'skipping the apply'.format(dependency_name))
else:
dependency_type = res_config['resource_type']
func = RESOURCE_IDENTIFIER.get(resource_type)
if func:
resource_output = __find_output_by_resource_name(
output, name)
identifier = func(name, resource_output)
apply_func = APPLY_MAPPING.get(change_type)
if apply_func:
alias = '#{' + name + '}'
f = pool.submit(apply_func, alias, identifier,
apply_item)
futures.append(f)
else:
_LOG.warn('Dynamic apply is not defined '
'for {0} type'.format(change_type))
else:
_LOG.warn('Resource identifier is not defined '
'for {0} type'.format(dependency_type))
_LOG.info('Dynamic changes were applied to {0}'.format(name))
concurrent.futures.wait(futures, timeout=None, return_when=ALL_COMPLETED)
def _compare_deploy_resources(first, second):
first_resource_type = first[-1]['resource_type']
second_resource_type = second[-1]['resource_type']
first_res_priority = DEPLOY_RESOURCE_TYPE_PRIORITY[first_resource_type]
second_res_priority = DEPLOY_RESOURCE_TYPE_PRIORITY[second_resource_type]
return _compare_res(first_res_priority, second_res_priority)
def _compare_clean_resources(first, second):
first_resource_type = first[-1]['resource_meta']['resource_type']
second_resource_type = second[-1]['resource_meta']['resource_type']
first_res_priority = CLEAN_RESOURCE_TYPE_PRIORITY[first_resource_type]
second_res_priority = CLEAN_RESOURCE_TYPE_PRIORITY[second_resource_type]
return _compare_res(first_res_priority, second_res_priority)
def _compare_res(first_res_priority, second_res_priority):
if first_res_priority < second_res_priority:
return -1
elif first_res_priority > second_res_priority:
return 1
else:
return 0
| 40.032129 | 79 | 0.610353 | import concurrent
import json
from concurrent.futures import ALL_COMPLETED, ThreadPoolExecutor
from datetime import date, datetime
from functools import cmp_to_key
from syndicate.commons.log_helper import get_logger
from syndicate.core.build.bundle_processor import (create_deploy_output,
load_deploy_output,
load_failed_deploy_output,
load_meta_resources,
remove_deploy_output,
remove_failed_deploy_output)
from syndicate.core.build.meta_processor import resolve_meta
from syndicate.core.constants import (BUILD_META_FILE_NAME,
CLEAN_RESOURCE_TYPE_PRIORITY,
DEPLOY_RESOURCE_TYPE_PRIORITY,
LAMBDA_TYPE)
from syndicate.core.helper import exit_on_exception, prettify_json
from syndicate.core.resources import (APPLY_MAPPING, CREATE_RESOURCE,
DESCRIBE_RESOURCE, REMOVE_RESOURCE,
RESOURCE_CONFIGURATION_PROCESSORS,
RESOURCE_IDENTIFIER, UPDATE_RESOURCE)
_LOG = get_logger('syndicate.core.build.deployment_processor')
def get_dependencies(name, meta, resources_dict, resources):
resources_dict[name] = meta
if meta.get('dependencies'):
for dependency in meta.get('dependencies'):
dep_name = dependency['resource_name']
dep_meta = resources[dep_name]
resources_dict[dep_name] = dep_meta
if dep_meta.get('dependencies'):
get_dependencies(dep_name, dep_meta, resources_dict, resources)
def _process_resources(resources, handlers_mapping):
res_type = None
output = {}
args = []
resource_type = None
try:
for res_name, res_meta in resources:
res_type = res_meta['resource_type']
if resource_type is None:
resource_type = res_type
if res_type == resource_type:
args.append({'name': res_name, 'meta': res_meta})
continue
elif res_type != resource_type:
_LOG.info('Processing {0} resources ...'.format(resource_type))
func = handlers_mapping[resource_type]
response = func(args)
if response:
output.update(response)
del args[:]
args.append({'name': res_name, 'meta': res_meta})
resource_type = res_type
if args:
_LOG.info('Processing {0} resources ...'.format(resource_type))
func = handlers_mapping[resource_type]
response = func(args)
if response:
output.update(response)
return True, output
except Exception as e:
_LOG.error('Error occurred while {0} resource creating: {1}'.format(
res_type, str(e)))
return False, update_failed_output(args[0]['name'], args[0]['meta'],
resource_type, output)
def update_failed_output(res_name, res_meta, resource_type, output):
describe_func = DESCRIBE_RESOURCE[resource_type]
failed_resource_output = describe_func(res_name, res_meta)
if failed_resource_output:
if isinstance(failed_resource_output, list):
for item in failed_resource_output:
output.update(item)
else:
output.update(failed_resource_output)
return output
def deploy_resources(resources):
return _process_resources(resources=resources,
handlers_mapping=CREATE_RESOURCE)
def update_resources(resources):
return _process_resources(resources=resources,
handlers_mapping=UPDATE_RESOURCE)
def clean_resources(output):
args = []
resource_type = None
for arn, config in output:
res_type = config['resource_meta']['resource_type']
if resource_type is None:
resource_type = res_type
if res_type == resource_type:
args.append({'arn': arn, 'config': config})
continue
elif res_type != resource_type:
_LOG.info('Removing {0} resources ...'.format(resource_type))
func = REMOVE_RESOURCE[resource_type]
func(args)
del args[:]
args.append({'arn': arn, 'config': config})
resource_type = res_type
if args:
_LOG.info('Removing {0} resources ...'.format(resource_type))
func = REMOVE_RESOURCE[resource_type]
func(args)
def continue_deploy_resources(resources, failed_output):
updated_output = {}
deploy_result = True
res_type = None
try:
args = []
resource_type = None
for res_name, res_meta in resources:
res_type = res_meta['resource_type']
if resource_type is None:
resource_type = res_type
if res_type == resource_type:
resource_output = __find_output_by_resource_name(
failed_output, res_name)
args.append(
{
'name': res_name,
'meta': res_meta,
'current_configurations': resource_output
})
continue
elif res_type != resource_type:
func = RESOURCE_CONFIGURATION_PROCESSORS.get(resource_type)
if func:
response = func(args)
if response:
updated_output.update(
json.loads(
json.dumps(response, default=_json_serial)))
else:
__move_output_content(args, failed_output, updated_output)
del args[:]
resource_output = __find_output_by_resource_name(
failed_output, res_name)
args.append({
'name': res_name,
'meta': res_meta,
'current_configurations': resource_output
})
resource_type = res_type
if args:
func = RESOURCE_CONFIGURATION_PROCESSORS.get(resource_type)
if func:
response = func(args)
if response:
updated_output.update(
json.loads(
json.dumps(response, default=_json_serial)))
else:
__move_output_content(args, failed_output, updated_output)
except Exception as e:
_LOG.error('Error occurred while {0} resource creating: {1}'.format(
res_type, str(e)))
deploy_result = False
return deploy_result, updated_output
def __move_output_content(args, failed_output, updated_output):
for arg in args:
resource_output = __find_output_by_resource_name(
failed_output, arg['name'])
if resource_output:
updated_output.update(resource_output)
def __find_output_by_resource_name(output, resource_name):
found_items = {}
for k, v in output.items():
if v['resource_name'] == resource_name:
found_items[k] = v
return found_items
@exit_on_exception
def create_deployment_resources(deploy_name, bundle_name,
deploy_only_resources=None,
deploy_only_types=None,
excluded_resources=None, excluded_types=None):
resources = resolve_meta(load_meta_resources(bundle_name))
_LOG.debug('Names were resolved')
_LOG.debug(prettify_json(resources))
_LOG.info('{0} file was loaded successfully'.format(BUILD_META_FILE_NAME))
if deploy_only_resources:
resources = dict((k, v) for (k, v) in resources.items() if
k in deploy_only_resources)
if excluded_resources:
resources = dict((k, v) for (k, v) in resources.items() if
k not in excluded_resources)
if deploy_only_types:
resources = dict((k, v) for (k, v) in resources.items() if
v['resource_type'] in deploy_only_types)
if excluded_types:
resources = dict((k, v) for (k, v) in resources.items() if
v['resource_type'] not in excluded_types)
_LOG.debug('Going to create: {0}'.format(prettify_json(resources)))
resources_list = list(resources.items())
resources_list.sort(key=cmp_to_key(_compare_deploy_resources))
_LOG.info('Going to deploy AWS resources')
success, output = deploy_resources(resources_list)
if success:
_LOG.info('AWS resources were deployed successfully')
_LOG.info('Going to apply dynamic changes')
_apply_dynamic_changes(resources, output)
_LOG.info('Dynamic changes were applied successfully')
_LOG.info('Going to create deploy output')
output_str = json.dumps(output, default=_json_serial)
create_deploy_output(bundle_name, deploy_name, output_str, success)
_LOG.info('Deploy output for {0} was created.'.format(deploy_name))
return success
@exit_on_exception
def remove_deployment_resources(deploy_name, bundle_name,
clean_only_resources=None,
clean_only_types=None,
excluded_resources=None, excluded_types=None):
output = load_deploy_output(bundle_name, deploy_name)
_LOG.info('Output file was loaded successfully')
if clean_only_resources:
output = dict((k, v) for (k, v) in output.items() if
v['resource_name'] in clean_only_resources)
if excluded_resources:
output = dict((k, v) for (k, v) in output.items() if
v['resource_name'] not in excluded_resources)
if clean_only_types:
output = dict((k, v) for (k, v) in output.items() if
v['resource_meta']['resource_type'] in clean_only_types)
if excluded_types:
output = dict((k, v) for (k, v) in output.items() if
v['resource_meta'][
'resource_type'] not in excluded_types)
resources_list = list(output.items())
resources_list.sort(key=cmp_to_key(_compare_clean_resources))
_LOG.debug('Resources to delete: {0}'.format(resources_list))
_LOG.info('Going to clean AWS resources')
clean_resources(resources_list)
remove_deploy_output(bundle_name, deploy_name)
@exit_on_exception
def continue_deployment_resources(deploy_name, bundle_name,
deploy_only_resources=None,
deploy_only_types=None,
excluded_resources=None,
excluded_types=None):
output = load_failed_deploy_output(bundle_name, deploy_name)
_LOG.info('Failed output file was loaded successfully')
resources = resolve_meta(load_meta_resources(bundle_name))
_LOG.debug('Names were resolved')
_LOG.debug(prettify_json(resources))
if deploy_only_resources:
resources = dict((k, v) for (k, v) in resources.items() if
k in deploy_only_resources)
if excluded_resources:
resources = dict((k, v) for (k, v) in resources.items() if
k not in excluded_resources)
if deploy_only_types:
resources = dict((k, v) for (k, v) in resources.items() if
v['resource_type'] in deploy_only_types)
if excluded_types:
resources = dict((k, v) for (k, v) in resources.items() if
v['resource_type'] not in excluded_types)
resources_list = list(resources.items())
resources_list.sort(key=cmp_to_key(_compare_deploy_resources))
success, updated_output = continue_deploy_resources(resources_list, output)
_LOG.info('AWS resources were deployed successfully')
if success:
_LOG.info('Going to apply dynamic changes')
_apply_dynamic_changes(resources, updated_output)
_LOG.info('Dynamic changes were applied successfully')
remove_failed_deploy_output(bundle_name, deploy_name)
_LOG.info('Going to create deploy output')
create_deploy_output(bundle_name, deploy_name,
prettify_json(updated_output), success=success)
return success
@exit_on_exception
def remove_failed_deploy_resources(deploy_name, bundle_name,
clean_only_resources=None,
clean_only_types=None,
excluded_resources=None,
excluded_types=None):
output = load_failed_deploy_output(bundle_name, deploy_name)
_LOG.info('Failed output file was loaded successfully')
if clean_only_resources:
output = dict((k, v) for (k, v) in output.items() if
v['resource_name'] in clean_only_resources)
if excluded_resources:
output = dict((k, v) for (k, v) in output.items() if
v['resource_name'] not in excluded_resources)
if clean_only_types:
output = dict((k, v) for (k, v) in output.items() if
v['resource_meta']['resource_type'] in clean_only_types)
if excluded_types:
output = dict((k, v) for (k, v) in output.items() if
v['resource_meta'][
'resource_type'] not in excluded_types)
resources_list = list(output.items())
resources_list.sort(key=cmp_to_key(_compare_clean_resources))
_LOG.info('Going to clean AWS resources')
clean_resources(resources_list)
remove_failed_deploy_output(bundle_name, deploy_name)
@exit_on_exception
def update_lambdas(bundle_name,
publish_only_lambdas,
excluded_lambdas_resources):
resources = resolve_meta(load_meta_resources(bundle_name))
_LOG.debug('Names were resolved')
_LOG.debug(prettify_json(resources))
resources = dict((k, v) for (k, v) in resources.items() if
v['resource_type'] == LAMBDA_TYPE)
if publish_only_lambdas:
resources = dict((k, v) for (k, v) in resources.items() if
k in publish_only_lambdas)
if excluded_lambdas_resources:
resources = dict((k, v) for (k, v) in resources.items() if
k not in excluded_lambdas_resources)
_LOG.debug('Going to update the following lambdas: {0}'.format(
prettify_json(resources)))
resources = list(resources.items())
update_resources(resources=resources)
def _json_serial(obj):
if isinstance(obj, (datetime, date)):
return obj.isoformat()
raise TypeError("Type %s not serializable" % type(obj))
def _apply_dynamic_changes(resources, output):
pool = ThreadPoolExecutor(max_workers=5)
futures = []
for name, meta in resources.items():
resource_type = meta['resource_type']
apply_changes = meta.get('apply_changes')
if apply_changes:
for apply_item in apply_changes:
change_type = apply_item['apply_type']
dependency_name = apply_item['dependency_name']
res_config = resources.get(dependency_name)
if not res_config:
_LOG.debug('Dependency resource {0} is not found, '
'skipping the apply'.format(dependency_name))
else:
dependency_type = res_config['resource_type']
func = RESOURCE_IDENTIFIER.get(resource_type)
if func:
resource_output = __find_output_by_resource_name(
output, name)
identifier = func(name, resource_output)
apply_func = APPLY_MAPPING.get(change_type)
if apply_func:
alias = '#{' + name + '}'
f = pool.submit(apply_func, alias, identifier,
apply_item)
futures.append(f)
else:
_LOG.warn('Dynamic apply is not defined '
'for {0} type'.format(change_type))
else:
_LOG.warn('Resource identifier is not defined '
'for {0} type'.format(dependency_type))
_LOG.info('Dynamic changes were applied to {0}'.format(name))
concurrent.futures.wait(futures, timeout=None, return_when=ALL_COMPLETED)
def _compare_deploy_resources(first, second):
first_resource_type = first[-1]['resource_type']
second_resource_type = second[-1]['resource_type']
first_res_priority = DEPLOY_RESOURCE_TYPE_PRIORITY[first_resource_type]
second_res_priority = DEPLOY_RESOURCE_TYPE_PRIORITY[second_resource_type]
return _compare_res(first_res_priority, second_res_priority)
def _compare_clean_resources(first, second):
first_resource_type = first[-1]['resource_meta']['resource_type']
second_resource_type = second[-1]['resource_meta']['resource_type']
first_res_priority = CLEAN_RESOURCE_TYPE_PRIORITY[first_resource_type]
second_res_priority = CLEAN_RESOURCE_TYPE_PRIORITY[second_resource_type]
return _compare_res(first_res_priority, second_res_priority)
def _compare_res(first_res_priority, second_res_priority):
if first_res_priority < second_res_priority:
return -1
elif first_res_priority > second_res_priority:
return 1
else:
return 0
| true | true |
f7204924e00d695725bfe8a63d29154fabac6481 | 1,226 | py | Python | app.py | abdu1aziz/10-Fast-Fingers | fa620cdf1f675b681048335f47686942373a6b57 | [
"MIT"
] | null | null | null | app.py | abdu1aziz/10-Fast-Fingers | fa620cdf1f675b681048335f47686942373a6b57 | [
"MIT"
] | null | null | null | app.py | abdu1aziz/10-Fast-Fingers | fa620cdf1f675b681048335f47686942373a6b57 | [
"MIT"
] | null | null | null | from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import time
# pip install selenium==2.53.6
"""
if you wanna run it on your regular browser profile.
profile = webdriver.FirefoxProfile('/home/{your_username}/.mozilla/firefox/{your_default_profile}')
driver = webdriver.Firefox(profile)
"""
driver = webdriver.Chrome(executable_path=r'chromedriver.exe')
def wait(no):
""" Waits for a particular time """
time.sleep(no)
def open_website():
""" Opens the website """
driver.get('https://10fastfingers.com/typing-test/english')
wait(5) # Due to slow network speed
def run_hack():
""" Implement the GOD speed hack """
open_website()
input_field = driver.find_element_by_id('inputfield')
try :
i = 0
while True:
elements = driver.find_element_by_xpath("//span[@wordnr='" + str(i) + "']")
print(elements.text)
input_field.send_keys(elements.text)
input_field.send_keys(" ")
i += 1
except :
print("Words completed")
def main():
""" Driver function """
run_hack()
if __name__ == '__main__':
main() | 24.52 | 100 | 0.613377 | from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import time
driver = webdriver.Chrome(executable_path=r'chromedriver.exe')
def wait(no):
time.sleep(no)
def open_website():
driver.get('https://10fastfingers.com/typing-test/english')
wait(5)
def run_hack():
open_website()
input_field = driver.find_element_by_id('inputfield')
try :
i = 0
while True:
elements = driver.find_element_by_xpath("//span[@wordnr='" + str(i) + "']")
print(elements.text)
input_field.send_keys(elements.text)
input_field.send_keys(" ")
i += 1
except :
print("Words completed")
def main():
run_hack()
if __name__ == '__main__':
main() | true | true |
f7204d1a3af3765a90077f7c2d942f1a43b61546 | 29,621 | py | Python | src/qt/qtwebkit/Source/WebCore/inspector/CodeGeneratorInspectorStrings.py | viewdy/phantomjs | eddb0db1d253fd0c546060a4555554c8ee08c13c | [
"BSD-3-Clause"
] | 1 | 2015-05-27T13:52:20.000Z | 2015-05-27T13:52:20.000Z | src/qt/qtwebkit/Source/WebCore/inspector/CodeGeneratorInspectorStrings.py | mrampersad/phantomjs | dca6f77a36699eb4e1c46f7600cca618f01b0ac3 | [
"BSD-3-Clause"
] | null | null | null | src/qt/qtwebkit/Source/WebCore/inspector/CodeGeneratorInspectorStrings.py | mrampersad/phantomjs | dca6f77a36699eb4e1c46f7600cca618f01b0ac3 | [
"BSD-3-Clause"
] | 1 | 2017-03-19T13:03:23.000Z | 2017-03-19T13:03:23.000Z | # Copyright (c) 2013 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# THis file contains string resources for CodeGeneratorInspector.
# Its syntax is a Python syntax subset, suitable for manual parsing.
frontend_domain_class = (
""" class $domainClassName {
public:
$domainClassName(InspectorFrontendChannel* inspectorFrontendChannel) : m_inspectorFrontendChannel(inspectorFrontendChannel) { }
${frontendDomainMethodDeclarations} void setInspectorFrontendChannel(InspectorFrontendChannel* inspectorFrontendChannel) { m_inspectorFrontendChannel = inspectorFrontendChannel; }
InspectorFrontendChannel* getInspectorFrontendChannel() { return m_inspectorFrontendChannel; }
private:
InspectorFrontendChannel* m_inspectorFrontendChannel;
};
$domainClassName* $domainFieldName() { return &m_$domainFieldName; }
""")
backend_method = (
"""void InspectorBackendDispatcherImpl::${domainName}_$methodName(long callId, InspectorObject*$requestMessageObject)
{
RefPtr<InspectorArray> protocolErrors = InspectorArray::create();
if (!$agentField)
protocolErrors->pushString("${domainName} handler is not available.");
$methodOutCode
$methodInCode
RefPtr<InspectorObject> result = InspectorObject::create();
ErrorString error;
if (!protocolErrors->length()) {
$agentField->$methodName(&error$agentCallParams);
${responseCook}
}
sendResponse(callId, result, commandNames[$commandNameIndex], protocolErrors, error);
}
""")
frontend_method = ("""void InspectorFrontend::$domainName::$eventName($parameters)
{
RefPtr<InspectorObject> jsonMessage = InspectorObject::create();
jsonMessage->setString("method", "$domainName.$eventName");
$code if (m_inspectorFrontendChannel)
m_inspectorFrontendChannel->sendMessageToFrontend(jsonMessage->toJSONString());
}
""")
callback_method = (
"""InspectorBackendDispatcher::$agentName::$callbackName::$callbackName(PassRefPtr<InspectorBackendDispatcherImpl> backendImpl, int id) : CallbackBase(backendImpl, id) {}
void InspectorBackendDispatcher::$agentName::$callbackName::sendSuccess($parameters)
{
RefPtr<InspectorObject> jsonMessage = InspectorObject::create();
$code sendIfActive(jsonMessage, ErrorString());
}
""")
frontend_h = (
"""#ifndef InspectorFrontend_h
#define InspectorFrontend_h
#include "InspectorTypeBuilder.h"
#include "InspectorValues.h"
#include <wtf/PassRefPtr.h>
#include <wtf/text/WTFString.h>
namespace WebCore {
class InspectorFrontendChannel;
// Both InspectorObject and InspectorArray may or may not be declared at this point as defined by ENABLED_INSPECTOR.
// Double-check we have them at least as forward declaration.
class InspectorArray;
class InspectorObject;
typedef String ErrorString;
#if ENABLE(INSPECTOR)
class InspectorFrontend {
public:
InspectorFrontend(InspectorFrontendChannel*);
$domainClassList
private:
${fieldDeclarations}};
#endif // ENABLE(INSPECTOR)
} // namespace WebCore
#endif // !defined(InspectorFrontend_h)
""")
backend_h = (
"""#ifndef InspectorBackendDispatcher_h
#define InspectorBackendDispatcher_h
#include <wtf/PassRefPtr.h>
#include <wtf/RefCounted.h>
#include <wtf/text/WTFString.h>
#include "InspectorTypeBuilder.h"
namespace WebCore {
class InspectorAgent;
class InspectorObject;
class InspectorArray;
class InspectorFrontendChannel;
typedef String ErrorString;
class InspectorBackendDispatcherImpl;
class InspectorBackendDispatcher: public RefCounted<InspectorBackendDispatcher> {
public:
static PassRefPtr<InspectorBackendDispatcher> create(InspectorFrontendChannel* inspectorFrontendChannel);
virtual ~InspectorBackendDispatcher() { }
class CallbackBase: public RefCounted<CallbackBase> {
public:
CallbackBase(PassRefPtr<InspectorBackendDispatcherImpl> backendImpl, int id);
virtual ~CallbackBase();
void sendFailure(const ErrorString&);
bool isActive();
protected:
void sendIfActive(PassRefPtr<InspectorObject> partialMessage, const ErrorString& invocationError);
private:
void disable() { m_alreadySent = true; }
RefPtr<InspectorBackendDispatcherImpl> m_backendImpl;
int m_id;
bool m_alreadySent;
friend class InspectorBackendDispatcherImpl;
};
$agentInterfaces
$virtualSetters
virtual void clearFrontend() = 0;
enum CommonErrorCode {
ParseError = 0,
InvalidRequest,
MethodNotFound,
InvalidParams,
InternalError,
ServerError,
LastEntry,
};
void reportProtocolError(const long* const callId, CommonErrorCode, const String& errorMessage) const;
virtual void reportProtocolError(const long* const callId, CommonErrorCode, const String& errorMessage, PassRefPtr<InspectorArray> data) const = 0;
virtual void dispatch(const String& message) = 0;
static bool getCommandName(const String& message, String* result);
enum MethodNames {
$methodNamesEnumContent
kMethodNamesEnumSize
};
static const char* commandNames[];
};
} // namespace WebCore
#endif // !defined(InspectorBackendDispatcher_h)
""")
backend_cpp = (
"""
#include "config.h"
#if ENABLE(INSPECTOR)
#include "InspectorBackendDispatcher.h"
#include <wtf/text/WTFString.h>
#include <wtf/text/CString.h>
#include "InspectorAgent.h"
#include "InspectorValues.h"
#include "InspectorFrontendChannel.h"
#include <wtf/text/WTFString.h>
namespace WebCore {
const char* InspectorBackendDispatcher::commandNames[] = {
$methodNameDeclarations
};
class InspectorBackendDispatcherImpl : public InspectorBackendDispatcher {
public:
InspectorBackendDispatcherImpl(InspectorFrontendChannel* inspectorFrontendChannel)
: m_inspectorFrontendChannel(inspectorFrontendChannel)
$constructorInit
{ }
virtual void clearFrontend() { m_inspectorFrontendChannel = 0; }
virtual void dispatch(const String& message);
virtual void reportProtocolError(const long* const callId, CommonErrorCode, const String& errorMessage, PassRefPtr<InspectorArray> data) const;
using InspectorBackendDispatcher::reportProtocolError;
void sendResponse(long callId, PassRefPtr<InspectorObject> result, const ErrorString& invocationError);
bool isActive() { return m_inspectorFrontendChannel; }
$setters
private:
$methodDeclarations
InspectorFrontendChannel* m_inspectorFrontendChannel;
$fieldDeclarations
template<typename R, typename V, typename V0>
static R getPropertyValueImpl(InspectorObject* object, const String& name, bool* valueFound, InspectorArray* protocolErrors, V0 initial_value, bool (*as_method)(InspectorValue*, V*), const char* type_name);
static int getInt(InspectorObject* object, const String& name, bool* valueFound, InspectorArray* protocolErrors);
static double getDouble(InspectorObject* object, const String& name, bool* valueFound, InspectorArray* protocolErrors);
static String getString(InspectorObject* object, const String& name, bool* valueFound, InspectorArray* protocolErrors);
static bool getBoolean(InspectorObject* object, const String& name, bool* valueFound, InspectorArray* protocolErrors);
static PassRefPtr<InspectorObject> getObject(InspectorObject* object, const String& name, bool* valueFound, InspectorArray* protocolErrors);
static PassRefPtr<InspectorArray> getArray(InspectorObject* object, const String& name, bool* valueFound, InspectorArray* protocolErrors);
void sendResponse(long callId, PassRefPtr<InspectorObject> result, const char* commandName, PassRefPtr<InspectorArray> protocolErrors, ErrorString invocationError);
};
$methods
PassRefPtr<InspectorBackendDispatcher> InspectorBackendDispatcher::create(InspectorFrontendChannel* inspectorFrontendChannel)
{
return adoptRef(new InspectorBackendDispatcherImpl(inspectorFrontendChannel));
}
void InspectorBackendDispatcherImpl::dispatch(const String& message)
{
RefPtr<InspectorBackendDispatcher> protect = this;
typedef void (InspectorBackendDispatcherImpl::*CallHandler)(long callId, InspectorObject* messageObject);
typedef HashMap<String, CallHandler> DispatchMap;
DEFINE_STATIC_LOCAL(DispatchMap, dispatchMap, );
long callId = 0;
if (dispatchMap.isEmpty()) {
static CallHandler handlers[] = {
$messageHandlers
};
size_t length = WTF_ARRAY_LENGTH(commandNames);
for (size_t i = 0; i < length; ++i)
dispatchMap.add(commandNames[i], handlers[i]);
}
RefPtr<InspectorValue> parsedMessage = InspectorValue::parseJSON(message);
if (!parsedMessage) {
reportProtocolError(0, ParseError, "Message must be in JSON format");
return;
}
RefPtr<InspectorObject> messageObject = parsedMessage->asObject();
if (!messageObject) {
reportProtocolError(0, InvalidRequest, "Message must be a JSONified object");
return;
}
RefPtr<InspectorValue> callIdValue = messageObject->get("id");
if (!callIdValue) {
reportProtocolError(0, InvalidRequest, "'id' property was not found");
return;
}
if (!callIdValue->asNumber(&callId)) {
reportProtocolError(0, InvalidRequest, "The type of 'id' property must be number");
return;
}
RefPtr<InspectorValue> methodValue = messageObject->get("method");
if (!methodValue) {
reportProtocolError(&callId, InvalidRequest, "'method' property wasn't found");
return;
}
String method;
if (!methodValue->asString(&method)) {
reportProtocolError(&callId, InvalidRequest, "The type of 'method' property must be string");
return;
}
HashMap<String, CallHandler>::iterator it = dispatchMap.find(method);
if (it == dispatchMap.end()) {
reportProtocolError(&callId, MethodNotFound, "'" + method + "' wasn't found");
return;
}
((*this).*it->value)(callId, messageObject.get());
}
void InspectorBackendDispatcherImpl::sendResponse(long callId, PassRefPtr<InspectorObject> result, const char* commandName, PassRefPtr<InspectorArray> protocolErrors, ErrorString invocationError)
{
if (protocolErrors->length()) {
String errorMessage = String::format("Some arguments of method '%s' can't be processed", commandName);
reportProtocolError(&callId, InvalidParams, errorMessage, protocolErrors);
return;
}
sendResponse(callId, result, invocationError);
}
void InspectorBackendDispatcherImpl::sendResponse(long callId, PassRefPtr<InspectorObject> result, const ErrorString& invocationError)
{
if (invocationError.length()) {
reportProtocolError(&callId, ServerError, invocationError);
return;
}
RefPtr<InspectorObject> responseMessage = InspectorObject::create();
responseMessage->setObject("result", result);
responseMessage->setNumber("id", callId);
if (m_inspectorFrontendChannel)
m_inspectorFrontendChannel->sendMessageToFrontend(responseMessage->toJSONString());
}
void InspectorBackendDispatcher::reportProtocolError(const long* const callId, CommonErrorCode code, const String& errorMessage) const
{
reportProtocolError(callId, code, errorMessage, 0);
}
void InspectorBackendDispatcherImpl::reportProtocolError(const long* const callId, CommonErrorCode code, const String& errorMessage, PassRefPtr<InspectorArray> data) const
{
DEFINE_STATIC_LOCAL(Vector<int>,s_commonErrors,);
if (!s_commonErrors.size()) {
s_commonErrors.insert(ParseError, -32700);
s_commonErrors.insert(InvalidRequest, -32600);
s_commonErrors.insert(MethodNotFound, -32601);
s_commonErrors.insert(InvalidParams, -32602);
s_commonErrors.insert(InternalError, -32603);
s_commonErrors.insert(ServerError, -32000);
}
ASSERT(code >=0);
ASSERT((unsigned)code < s_commonErrors.size());
ASSERT(s_commonErrors[code]);
RefPtr<InspectorObject> error = InspectorObject::create();
error->setNumber("code", s_commonErrors[code]);
error->setString("message", errorMessage);
ASSERT(error);
if (data)
error->setArray("data", data);
RefPtr<InspectorObject> message = InspectorObject::create();
message->setObject("error", error);
if (callId)
message->setNumber("id", *callId);
else
message->setValue("id", InspectorValue::null());
if (m_inspectorFrontendChannel)
m_inspectorFrontendChannel->sendMessageToFrontend(message->toJSONString());
}
template<typename R, typename V, typename V0>
R InspectorBackendDispatcherImpl::getPropertyValueImpl(InspectorObject* object, const String& name, bool* valueFound, InspectorArray* protocolErrors, V0 initial_value, bool (*as_method)(InspectorValue*, V*), const char* type_name)
{
ASSERT(protocolErrors);
if (valueFound)
*valueFound = false;
V value = initial_value;
if (!object) {
if (!valueFound) {
// Required parameter in missing params container.
protocolErrors->pushString(String::format("'params' object must contain required parameter '%s' with type '%s'.", name.utf8().data(), type_name));
}
return value;
}
InspectorObject::const_iterator end = object->end();
InspectorObject::const_iterator valueIterator = object->find(name);
if (valueIterator == end) {
if (!valueFound)
protocolErrors->pushString(String::format("Parameter '%s' with type '%s' was not found.", name.utf8().data(), type_name));
return value;
}
if (!as_method(valueIterator->value.get(), &value))
protocolErrors->pushString(String::format("Parameter '%s' has wrong type. It must be '%s'.", name.utf8().data(), type_name));
else
if (valueFound)
*valueFound = true;
return value;
}
struct AsMethodBridges {
static bool asInt(InspectorValue* value, int* output) { return value->asNumber(output); }
static bool asDouble(InspectorValue* value, double* output) { return value->asNumber(output); }
static bool asString(InspectorValue* value, String* output) { return value->asString(output); }
static bool asBoolean(InspectorValue* value, bool* output) { return value->asBoolean(output); }
static bool asObject(InspectorValue* value, RefPtr<InspectorObject>* output) { return value->asObject(output); }
static bool asArray(InspectorValue* value, RefPtr<InspectorArray>* output) { return value->asArray(output); }
};
int InspectorBackendDispatcherImpl::getInt(InspectorObject* object, const String& name, bool* valueFound, InspectorArray* protocolErrors)
{
return getPropertyValueImpl<int, int, int>(object, name, valueFound, protocolErrors, 0, AsMethodBridges::asInt, "Number");
}
double InspectorBackendDispatcherImpl::getDouble(InspectorObject* object, const String& name, bool* valueFound, InspectorArray* protocolErrors)
{
return getPropertyValueImpl<double, double, double>(object, name, valueFound, protocolErrors, 0, AsMethodBridges::asDouble, "Number");
}
String InspectorBackendDispatcherImpl::getString(InspectorObject* object, const String& name, bool* valueFound, InspectorArray* protocolErrors)
{
return getPropertyValueImpl<String, String, String>(object, name, valueFound, protocolErrors, "", AsMethodBridges::asString, "String");
}
bool InspectorBackendDispatcherImpl::getBoolean(InspectorObject* object, const String& name, bool* valueFound, InspectorArray* protocolErrors)
{
return getPropertyValueImpl<bool, bool, bool>(object, name, valueFound, protocolErrors, false, AsMethodBridges::asBoolean, "Boolean");
}
PassRefPtr<InspectorObject> InspectorBackendDispatcherImpl::getObject(InspectorObject* object, const String& name, bool* valueFound, InspectorArray* protocolErrors)
{
return getPropertyValueImpl<PassRefPtr<InspectorObject>, RefPtr<InspectorObject>, InspectorObject*>(object, name, valueFound, protocolErrors, 0, AsMethodBridges::asObject, "Object");
}
PassRefPtr<InspectorArray> InspectorBackendDispatcherImpl::getArray(InspectorObject* object, const String& name, bool* valueFound, InspectorArray* protocolErrors)
{
return getPropertyValueImpl<PassRefPtr<InspectorArray>, RefPtr<InspectorArray>, InspectorArray*>(object, name, valueFound, protocolErrors, 0, AsMethodBridges::asArray, "Array");
}
bool InspectorBackendDispatcher::getCommandName(const String& message, String* result)
{
RefPtr<InspectorValue> value = InspectorValue::parseJSON(message);
if (!value)
return false;
RefPtr<InspectorObject> object = value->asObject();
if (!object)
return false;
if (!object->getString("method", result))
return false;
return true;
}
InspectorBackendDispatcher::CallbackBase::CallbackBase(PassRefPtr<InspectorBackendDispatcherImpl> backendImpl, int id)
: m_backendImpl(backendImpl), m_id(id), m_alreadySent(false) {}
InspectorBackendDispatcher::CallbackBase::~CallbackBase() {}
void InspectorBackendDispatcher::CallbackBase::sendFailure(const ErrorString& error)
{
ASSERT(error.length());
sendIfActive(0, error);
}
bool InspectorBackendDispatcher::CallbackBase::isActive()
{
return !m_alreadySent && m_backendImpl->isActive();
}
void InspectorBackendDispatcher::CallbackBase::sendIfActive(PassRefPtr<InspectorObject> partialMessage, const ErrorString& invocationError)
{
if (m_alreadySent)
return;
m_backendImpl->sendResponse(m_id, partialMessage, invocationError);
m_alreadySent = true;
}
COMPILE_ASSERT(static_cast<int>(InspectorBackendDispatcher::kMethodNamesEnumSize) == WTF_ARRAY_LENGTH(InspectorBackendDispatcher::commandNames), command_name_array_problem);
} // namespace WebCore
#endif // ENABLE(INSPECTOR)
""")
frontend_cpp = (
"""
#include "config.h"
#if ENABLE(INSPECTOR)
#include "InspectorFrontend.h"
#include <wtf/text/WTFString.h>
#include <wtf/text/CString.h>
#include "InspectorFrontendChannel.h"
#include "InspectorValues.h"
#include <wtf/text/WTFString.h>
namespace WebCore {
InspectorFrontend::InspectorFrontend(InspectorFrontendChannel* inspectorFrontendChannel)
: $constructorInit{
}
$methods
} // namespace WebCore
#endif // ENABLE(INSPECTOR)
""")
typebuilder_h = (
"""
#ifndef InspectorTypeBuilder_h
#define InspectorTypeBuilder_h
#if ENABLE(INSPECTOR)
#include "InspectorValues.h"
#include <wtf/Assertions.h>
#include <wtf/PassRefPtr.h>
namespace WebCore {
namespace TypeBuilder {
template<typename T>
class OptOutput {
public:
OptOutput() : m_assigned(false) { }
void operator=(T value)
{
m_value = value;
m_assigned = true;
}
bool isAssigned() { return m_assigned; }
T getValue()
{
ASSERT(isAssigned());
return m_value;
}
private:
T m_value;
bool m_assigned;
WTF_MAKE_NONCOPYABLE(OptOutput);
};
// A small transient wrapper around int type, that can be used as a funciton parameter type
// cleverly disallowing C++ implicit casts from float or double.
class ExactlyInt {
public:
template<typename T>
ExactlyInt(T t) : m_value(cast_to_int<T>(t)) {}
ExactlyInt() {}
operator int() { return m_value; }
private:
int m_value;
template<typename T>
static int cast_to_int(T) { return T::default_case_cast_is_not_supported(); }
};
template<>
inline int ExactlyInt::cast_to_int<int>(int i) { return i; }
template<>
inline int ExactlyInt::cast_to_int<unsigned int>(unsigned int i) { return i; }
class RuntimeCastHelper {
public:
#if $validatorIfdefName
template<InspectorValue::Type TYPE>
static void assertType(InspectorValue* value)
{
ASSERT(value->type() == TYPE);
}
static void assertAny(InspectorValue*);
static void assertInt(InspectorValue* value);
#endif
};
// This class provides "Traits" type for the input type T. It is programmed using C++ template specialization
// technique. By default it simply takes "ItemTraits" type from T, but it doesn't work with the base types.
template<typename T>
struct ArrayItemHelper {
typedef typename T::ItemTraits Traits;
};
template<typename T>
class Array : public InspectorArrayBase {
private:
Array() { }
InspectorArray* openAccessors() {
COMPILE_ASSERT(sizeof(InspectorArray) == sizeof(Array<T>), cannot_cast);
return static_cast<InspectorArray*>(static_cast<InspectorArrayBase*>(this));
}
public:
void addItem(PassRefPtr<T> value)
{
ArrayItemHelper<T>::Traits::pushRefPtr(this->openAccessors(), value);
}
void addItem(T value)
{
ArrayItemHelper<T>::Traits::pushRaw(this->openAccessors(), value);
}
static PassRefPtr<Array<T> > create()
{
return adoptRef(new Array<T>());
}
static PassRefPtr<Array<T> > runtimeCast(PassRefPtr<InspectorValue> value)
{
RefPtr<InspectorArray> array;
bool castRes = value->asArray(&array);
ASSERT_UNUSED(castRes, castRes);
#if $validatorIfdefName
assertCorrectValue(array.get());
#endif // $validatorIfdefName
COMPILE_ASSERT(sizeof(Array<T>) == sizeof(InspectorArray), type_cast_problem);
return static_cast<Array<T>*>(static_cast<InspectorArrayBase*>(array.get()));
}
#if $validatorIfdefName
static void assertCorrectValue(InspectorValue* value)
{
RefPtr<InspectorArray> array;
bool castRes = value->asArray(&array);
ASSERT_UNUSED(castRes, castRes);
for (unsigned i = 0; i < array->length(); i++)
ArrayItemHelper<T>::Traits::template assertCorrectValue<T>(array->get(i).get());
}
#endif // $validatorIfdefName
};
struct StructItemTraits {
static void pushRefPtr(InspectorArray* array, PassRefPtr<InspectorValue> value)
{
array->pushValue(value);
}
#if $validatorIfdefName
template<typename T>
static void assertCorrectValue(InspectorValue* value) {
T::assertCorrectValue(value);
}
#endif // $validatorIfdefName
};
template<>
struct ArrayItemHelper<String> {
struct Traits {
static void pushRaw(InspectorArray* array, const String& value)
{
array->pushString(value);
}
#if $validatorIfdefName
template<typename T>
static void assertCorrectValue(InspectorValue* value) {
RuntimeCastHelper::assertType<InspectorValue::TypeString>(value);
}
#endif // $validatorIfdefName
};
};
template<>
struct ArrayItemHelper<int> {
struct Traits {
static void pushRaw(InspectorArray* array, int value)
{
array->pushInt(value);
}
#if $validatorIfdefName
template<typename T>
static void assertCorrectValue(InspectorValue* value) {
RuntimeCastHelper::assertInt(value);
}
#endif // $validatorIfdefName
};
};
template<>
struct ArrayItemHelper<double> {
struct Traits {
static void pushRaw(InspectorArray* array, double value)
{
array->pushNumber(value);
}
#if $validatorIfdefName
template<typename T>
static void assertCorrectValue(InspectorValue* value) {
RuntimeCastHelper::assertType<InspectorValue::TypeNumber>(value);
}
#endif // $validatorIfdefName
};
};
template<>
struct ArrayItemHelper<bool> {
struct Traits {
static void pushRaw(InspectorArray* array, bool value)
{
array->pushBoolean(value);
}
#if $validatorIfdefName
template<typename T>
static void assertCorrectValue(InspectorValue* value) {
RuntimeCastHelper::assertType<InspectorValue::TypeBoolean>(value);
}
#endif // $validatorIfdefName
};
};
template<>
struct ArrayItemHelper<InspectorValue> {
struct Traits {
static void pushRefPtr(InspectorArray* array, PassRefPtr<InspectorValue> value)
{
array->pushValue(value);
}
#if $validatorIfdefName
template<typename T>
static void assertCorrectValue(InspectorValue* value) {
RuntimeCastHelper::assertAny(value);
}
#endif // $validatorIfdefName
};
};
template<>
struct ArrayItemHelper<InspectorObject> {
struct Traits {
static void pushRefPtr(InspectorArray* array, PassRefPtr<InspectorValue> value)
{
array->pushValue(value);
}
#if $validatorIfdefName
template<typename T>
static void assertCorrectValue(InspectorValue* value) {
RuntimeCastHelper::assertType<InspectorValue::TypeObject>(value);
}
#endif // $validatorIfdefName
};
};
template<>
struct ArrayItemHelper<InspectorArray> {
struct Traits {
static void pushRefPtr(InspectorArray* array, PassRefPtr<InspectorArray> value)
{
array->pushArray(value);
}
#if $validatorIfdefName
template<typename T>
static void assertCorrectValue(InspectorValue* value) {
RuntimeCastHelper::assertType<InspectorValue::TypeArray>(value);
}
#endif // $validatorIfdefName
};
};
template<typename T>
struct ArrayItemHelper<TypeBuilder::Array<T> > {
struct Traits {
static void pushRefPtr(InspectorArray* array, PassRefPtr<TypeBuilder::Array<T> > value)
{
array->pushValue(value);
}
#if $validatorIfdefName
template<typename S>
static void assertCorrectValue(InspectorValue* value) {
S::assertCorrectValue(value);
}
#endif // $validatorIfdefName
};
};
${forwards}
String getEnumConstantValue(int code);
${typeBuilders}
} // namespace TypeBuilder
} // namespace WebCore
#endif // ENABLE(INSPECTOR)
#endif // !defined(InspectorTypeBuilder_h)
""")
typebuilder_cpp = (
"""
#include "config.h"
#if ENABLE(INSPECTOR)
#include "InspectorTypeBuilder.h"
#include <wtf/text/CString.h>
namespace WebCore {
namespace TypeBuilder {
const char* const enum_constant_values[] = {
$enumConstantValues};
String getEnumConstantValue(int code) {
return enum_constant_values[code];
}
} // namespace TypeBuilder
$implCode
#if $validatorIfdefName
void TypeBuilder::RuntimeCastHelper::assertAny(InspectorValue*)
{
// No-op.
}
void TypeBuilder::RuntimeCastHelper::assertInt(InspectorValue* value)
{
double v;
bool castRes = value->asNumber(&v);
ASSERT_UNUSED(castRes, castRes);
ASSERT(static_cast<double>(static_cast<int>(v)) == v);
}
$validatorCode
#endif // $validatorIfdefName
} // namespace WebCore
#endif // ENABLE(INSPECTOR)
""")
backend_js = (
"""
$domainInitializers
""")
param_container_access_code = """
RefPtr<InspectorObject> paramsContainer = requestMessageObject->getObject("params");
InspectorObject* paramsContainerPtr = paramsContainer.get();
InspectorArray* protocolErrorsPtr = protocolErrors.get();
"""
class_binding_builder_part_1 = (
""" AllFieldsSet = %s
};
template<int STATE>
class Builder {
private:
RefPtr<InspectorObject> m_result;
template<int STEP> Builder<STATE | STEP>& castState()
{
return *reinterpret_cast<Builder<STATE | STEP>*>(this);
}
Builder(PassRefPtr</*%s*/InspectorObject> ptr)
{
COMPILE_ASSERT(STATE == NoFieldsSet, builder_created_in_non_init_state);
m_result = ptr;
}
friend class %s;
public:
""")
class_binding_builder_part_2 = ("""
Builder<STATE | %s>& set%s(%s value)
{
COMPILE_ASSERT(!(STATE & %s), property_%s_already_set);
m_result->set%s("%s", %s);
return castState<%s>();
}
""")
class_binding_builder_part_3 = ("""
operator RefPtr<%s>& ()
{
COMPILE_ASSERT(STATE == AllFieldsSet, result_is_not_ready);
COMPILE_ASSERT(sizeof(%s) == sizeof(InspectorObject), cannot_cast);
return *reinterpret_cast<RefPtr<%s>*>(&m_result);
}
PassRefPtr<%s> release()
{
return RefPtr<%s>(*this).release();
}
};
""")
class_binding_builder_part_4 = (
""" static Builder<NoFieldsSet> create()
{
return Builder<NoFieldsSet>(InspectorObject::create());
}
""")
| 31.016754 | 230 | 0.716181 |
frontend_domain_class = (
""" class $domainClassName {
public:
$domainClassName(InspectorFrontendChannel* inspectorFrontendChannel) : m_inspectorFrontendChannel(inspectorFrontendChannel) { }
${frontendDomainMethodDeclarations} void setInspectorFrontendChannel(InspectorFrontendChannel* inspectorFrontendChannel) { m_inspectorFrontendChannel = inspectorFrontendChannel; }
InspectorFrontendChannel* getInspectorFrontendChannel() { return m_inspectorFrontendChannel; }
private:
InspectorFrontendChannel* m_inspectorFrontendChannel;
};
$domainClassName* $domainFieldName() { return &m_$domainFieldName; }
""")
backend_method = (
"""void InspectorBackendDispatcherImpl::${domainName}_$methodName(long callId, InspectorObject*$requestMessageObject)
{
RefPtr<InspectorArray> protocolErrors = InspectorArray::create();
if (!$agentField)
protocolErrors->pushString("${domainName} handler is not available.");
$methodOutCode
$methodInCode
RefPtr<InspectorObject> result = InspectorObject::create();
ErrorString error;
if (!protocolErrors->length()) {
$agentField->$methodName(&error$agentCallParams);
${responseCook}
}
sendResponse(callId, result, commandNames[$commandNameIndex], protocolErrors, error);
}
""")
frontend_method = ("""void InspectorFrontend::$domainName::$eventName($parameters)
{
RefPtr<InspectorObject> jsonMessage = InspectorObject::create();
jsonMessage->setString("method", "$domainName.$eventName");
$code if (m_inspectorFrontendChannel)
m_inspectorFrontendChannel->sendMessageToFrontend(jsonMessage->toJSONString());
}
""")
callback_method = (
"""InspectorBackendDispatcher::$agentName::$callbackName::$callbackName(PassRefPtr<InspectorBackendDispatcherImpl> backendImpl, int id) : CallbackBase(backendImpl, id) {}
void InspectorBackendDispatcher::$agentName::$callbackName::sendSuccess($parameters)
{
RefPtr<InspectorObject> jsonMessage = InspectorObject::create();
$code sendIfActive(jsonMessage, ErrorString());
}
""")
frontend_h = (
"""#ifndef InspectorFrontend_h
#define InspectorFrontend_h
#include "InspectorTypeBuilder.h"
#include "InspectorValues.h"
#include <wtf/PassRefPtr.h>
#include <wtf/text/WTFString.h>
namespace WebCore {
class InspectorFrontendChannel;
// Both InspectorObject and InspectorArray may or may not be declared at this point as defined by ENABLED_INSPECTOR.
// Double-check we have them at least as forward declaration.
class InspectorArray;
class InspectorObject;
typedef String ErrorString;
#if ENABLE(INSPECTOR)
class InspectorFrontend {
public:
InspectorFrontend(InspectorFrontendChannel*);
$domainClassList
private:
${fieldDeclarations}};
#endif // ENABLE(INSPECTOR)
} // namespace WebCore
#endif // !defined(InspectorFrontend_h)
""")
backend_h = (
"""#ifndef InspectorBackendDispatcher_h
#define InspectorBackendDispatcher_h
#include <wtf/PassRefPtr.h>
#include <wtf/RefCounted.h>
#include <wtf/text/WTFString.h>
#include "InspectorTypeBuilder.h"
namespace WebCore {
class InspectorAgent;
class InspectorObject;
class InspectorArray;
class InspectorFrontendChannel;
typedef String ErrorString;
class InspectorBackendDispatcherImpl;
class InspectorBackendDispatcher: public RefCounted<InspectorBackendDispatcher> {
public:
static PassRefPtr<InspectorBackendDispatcher> create(InspectorFrontendChannel* inspectorFrontendChannel);
virtual ~InspectorBackendDispatcher() { }
class CallbackBase: public RefCounted<CallbackBase> {
public:
CallbackBase(PassRefPtr<InspectorBackendDispatcherImpl> backendImpl, int id);
virtual ~CallbackBase();
void sendFailure(const ErrorString&);
bool isActive();
protected:
void sendIfActive(PassRefPtr<InspectorObject> partialMessage, const ErrorString& invocationError);
private:
void disable() { m_alreadySent = true; }
RefPtr<InspectorBackendDispatcherImpl> m_backendImpl;
int m_id;
bool m_alreadySent;
friend class InspectorBackendDispatcherImpl;
};
$agentInterfaces
$virtualSetters
virtual void clearFrontend() = 0;
enum CommonErrorCode {
ParseError = 0,
InvalidRequest,
MethodNotFound,
InvalidParams,
InternalError,
ServerError,
LastEntry,
};
void reportProtocolError(const long* const callId, CommonErrorCode, const String& errorMessage) const;
virtual void reportProtocolError(const long* const callId, CommonErrorCode, const String& errorMessage, PassRefPtr<InspectorArray> data) const = 0;
virtual void dispatch(const String& message) = 0;
static bool getCommandName(const String& message, String* result);
enum MethodNames {
$methodNamesEnumContent
kMethodNamesEnumSize
};
static const char* commandNames[];
};
} // namespace WebCore
#endif // !defined(InspectorBackendDispatcher_h)
""")
backend_cpp = (
"""
#include "config.h"
#if ENABLE(INSPECTOR)
#include "InspectorBackendDispatcher.h"
#include <wtf/text/WTFString.h>
#include <wtf/text/CString.h>
#include "InspectorAgent.h"
#include "InspectorValues.h"
#include "InspectorFrontendChannel.h"
#include <wtf/text/WTFString.h>
namespace WebCore {
const char* InspectorBackendDispatcher::commandNames[] = {
$methodNameDeclarations
};
class InspectorBackendDispatcherImpl : public InspectorBackendDispatcher {
public:
InspectorBackendDispatcherImpl(InspectorFrontendChannel* inspectorFrontendChannel)
: m_inspectorFrontendChannel(inspectorFrontendChannel)
$constructorInit
{ }
virtual void clearFrontend() { m_inspectorFrontendChannel = 0; }
virtual void dispatch(const String& message);
virtual void reportProtocolError(const long* const callId, CommonErrorCode, const String& errorMessage, PassRefPtr<InspectorArray> data) const;
using InspectorBackendDispatcher::reportProtocolError;
void sendResponse(long callId, PassRefPtr<InspectorObject> result, const ErrorString& invocationError);
bool isActive() { return m_inspectorFrontendChannel; }
$setters
private:
$methodDeclarations
InspectorFrontendChannel* m_inspectorFrontendChannel;
$fieldDeclarations
template<typename R, typename V, typename V0>
static R getPropertyValueImpl(InspectorObject* object, const String& name, bool* valueFound, InspectorArray* protocolErrors, V0 initial_value, bool (*as_method)(InspectorValue*, V*), const char* type_name);
static int getInt(InspectorObject* object, const String& name, bool* valueFound, InspectorArray* protocolErrors);
static double getDouble(InspectorObject* object, const String& name, bool* valueFound, InspectorArray* protocolErrors);
static String getString(InspectorObject* object, const String& name, bool* valueFound, InspectorArray* protocolErrors);
static bool getBoolean(InspectorObject* object, const String& name, bool* valueFound, InspectorArray* protocolErrors);
static PassRefPtr<InspectorObject> getObject(InspectorObject* object, const String& name, bool* valueFound, InspectorArray* protocolErrors);
static PassRefPtr<InspectorArray> getArray(InspectorObject* object, const String& name, bool* valueFound, InspectorArray* protocolErrors);
void sendResponse(long callId, PassRefPtr<InspectorObject> result, const char* commandName, PassRefPtr<InspectorArray> protocolErrors, ErrorString invocationError);
};
$methods
PassRefPtr<InspectorBackendDispatcher> InspectorBackendDispatcher::create(InspectorFrontendChannel* inspectorFrontendChannel)
{
return adoptRef(new InspectorBackendDispatcherImpl(inspectorFrontendChannel));
}
void InspectorBackendDispatcherImpl::dispatch(const String& message)
{
RefPtr<InspectorBackendDispatcher> protect = this;
typedef void (InspectorBackendDispatcherImpl::*CallHandler)(long callId, InspectorObject* messageObject);
typedef HashMap<String, CallHandler> DispatchMap;
DEFINE_STATIC_LOCAL(DispatchMap, dispatchMap, );
long callId = 0;
if (dispatchMap.isEmpty()) {
static CallHandler handlers[] = {
$messageHandlers
};
size_t length = WTF_ARRAY_LENGTH(commandNames);
for (size_t i = 0; i < length; ++i)
dispatchMap.add(commandNames[i], handlers[i]);
}
RefPtr<InspectorValue> parsedMessage = InspectorValue::parseJSON(message);
if (!parsedMessage) {
reportProtocolError(0, ParseError, "Message must be in JSON format");
return;
}
RefPtr<InspectorObject> messageObject = parsedMessage->asObject();
if (!messageObject) {
reportProtocolError(0, InvalidRequest, "Message must be a JSONified object");
return;
}
RefPtr<InspectorValue> callIdValue = messageObject->get("id");
if (!callIdValue) {
reportProtocolError(0, InvalidRequest, "'id' property was not found");
return;
}
if (!callIdValue->asNumber(&callId)) {
reportProtocolError(0, InvalidRequest, "The type of 'id' property must be number");
return;
}
RefPtr<InspectorValue> methodValue = messageObject->get("method");
if (!methodValue) {
reportProtocolError(&callId, InvalidRequest, "'method' property wasn't found");
return;
}
String method;
if (!methodValue->asString(&method)) {
reportProtocolError(&callId, InvalidRequest, "The type of 'method' property must be string");
return;
}
HashMap<String, CallHandler>::iterator it = dispatchMap.find(method);
if (it == dispatchMap.end()) {
reportProtocolError(&callId, MethodNotFound, "'" + method + "' wasn't found");
return;
}
((*this).*it->value)(callId, messageObject.get());
}
void InspectorBackendDispatcherImpl::sendResponse(long callId, PassRefPtr<InspectorObject> result, const char* commandName, PassRefPtr<InspectorArray> protocolErrors, ErrorString invocationError)
{
if (protocolErrors->length()) {
String errorMessage = String::format("Some arguments of method '%s' can't be processed", commandName);
reportProtocolError(&callId, InvalidParams, errorMessage, protocolErrors);
return;
}
sendResponse(callId, result, invocationError);
}
void InspectorBackendDispatcherImpl::sendResponse(long callId, PassRefPtr<InspectorObject> result, const ErrorString& invocationError)
{
if (invocationError.length()) {
reportProtocolError(&callId, ServerError, invocationError);
return;
}
RefPtr<InspectorObject> responseMessage = InspectorObject::create();
responseMessage->setObject("result", result);
responseMessage->setNumber("id", callId);
if (m_inspectorFrontendChannel)
m_inspectorFrontendChannel->sendMessageToFrontend(responseMessage->toJSONString());
}
void InspectorBackendDispatcher::reportProtocolError(const long* const callId, CommonErrorCode code, const String& errorMessage) const
{
reportProtocolError(callId, code, errorMessage, 0);
}
void InspectorBackendDispatcherImpl::reportProtocolError(const long* const callId, CommonErrorCode code, const String& errorMessage, PassRefPtr<InspectorArray> data) const
{
DEFINE_STATIC_LOCAL(Vector<int>,s_commonErrors,);
if (!s_commonErrors.size()) {
s_commonErrors.insert(ParseError, -32700);
s_commonErrors.insert(InvalidRequest, -32600);
s_commonErrors.insert(MethodNotFound, -32601);
s_commonErrors.insert(InvalidParams, -32602);
s_commonErrors.insert(InternalError, -32603);
s_commonErrors.insert(ServerError, -32000);
}
ASSERT(code >=0);
ASSERT((unsigned)code < s_commonErrors.size());
ASSERT(s_commonErrors[code]);
RefPtr<InspectorObject> error = InspectorObject::create();
error->setNumber("code", s_commonErrors[code]);
error->setString("message", errorMessage);
ASSERT(error);
if (data)
error->setArray("data", data);
RefPtr<InspectorObject> message = InspectorObject::create();
message->setObject("error", error);
if (callId)
message->setNumber("id", *callId);
else
message->setValue("id", InspectorValue::null());
if (m_inspectorFrontendChannel)
m_inspectorFrontendChannel->sendMessageToFrontend(message->toJSONString());
}
template<typename R, typename V, typename V0>
R InspectorBackendDispatcherImpl::getPropertyValueImpl(InspectorObject* object, const String& name, bool* valueFound, InspectorArray* protocolErrors, V0 initial_value, bool (*as_method)(InspectorValue*, V*), const char* type_name)
{
ASSERT(protocolErrors);
if (valueFound)
*valueFound = false;
V value = initial_value;
if (!object) {
if (!valueFound) {
// Required parameter in missing params container.
protocolErrors->pushString(String::format("'params' object must contain required parameter '%s' with type '%s'.", name.utf8().data(), type_name));
}
return value;
}
InspectorObject::const_iterator end = object->end();
InspectorObject::const_iterator valueIterator = object->find(name);
if (valueIterator == end) {
if (!valueFound)
protocolErrors->pushString(String::format("Parameter '%s' with type '%s' was not found.", name.utf8().data(), type_name));
return value;
}
if (!as_method(valueIterator->value.get(), &value))
protocolErrors->pushString(String::format("Parameter '%s' has wrong type. It must be '%s'.", name.utf8().data(), type_name));
else
if (valueFound)
*valueFound = true;
return value;
}
struct AsMethodBridges {
static bool asInt(InspectorValue* value, int* output) { return value->asNumber(output); }
static bool asDouble(InspectorValue* value, double* output) { return value->asNumber(output); }
static bool asString(InspectorValue* value, String* output) { return value->asString(output); }
static bool asBoolean(InspectorValue* value, bool* output) { return value->asBoolean(output); }
static bool asObject(InspectorValue* value, RefPtr<InspectorObject>* output) { return value->asObject(output); }
static bool asArray(InspectorValue* value, RefPtr<InspectorArray>* output) { return value->asArray(output); }
};
int InspectorBackendDispatcherImpl::getInt(InspectorObject* object, const String& name, bool* valueFound, InspectorArray* protocolErrors)
{
return getPropertyValueImpl<int, int, int>(object, name, valueFound, protocolErrors, 0, AsMethodBridges::asInt, "Number");
}
double InspectorBackendDispatcherImpl::getDouble(InspectorObject* object, const String& name, bool* valueFound, InspectorArray* protocolErrors)
{
return getPropertyValueImpl<double, double, double>(object, name, valueFound, protocolErrors, 0, AsMethodBridges::asDouble, "Number");
}
String InspectorBackendDispatcherImpl::getString(InspectorObject* object, const String& name, bool* valueFound, InspectorArray* protocolErrors)
{
return getPropertyValueImpl<String, String, String>(object, name, valueFound, protocolErrors, "", AsMethodBridges::asString, "String");
}
bool InspectorBackendDispatcherImpl::getBoolean(InspectorObject* object, const String& name, bool* valueFound, InspectorArray* protocolErrors)
{
return getPropertyValueImpl<bool, bool, bool>(object, name, valueFound, protocolErrors, false, AsMethodBridges::asBoolean, "Boolean");
}
PassRefPtr<InspectorObject> InspectorBackendDispatcherImpl::getObject(InspectorObject* object, const String& name, bool* valueFound, InspectorArray* protocolErrors)
{
return getPropertyValueImpl<PassRefPtr<InspectorObject>, RefPtr<InspectorObject>, InspectorObject*>(object, name, valueFound, protocolErrors, 0, AsMethodBridges::asObject, "Object");
}
PassRefPtr<InspectorArray> InspectorBackendDispatcherImpl::getArray(InspectorObject* object, const String& name, bool* valueFound, InspectorArray* protocolErrors)
{
return getPropertyValueImpl<PassRefPtr<InspectorArray>, RefPtr<InspectorArray>, InspectorArray*>(object, name, valueFound, protocolErrors, 0, AsMethodBridges::asArray, "Array");
}
bool InspectorBackendDispatcher::getCommandName(const String& message, String* result)
{
RefPtr<InspectorValue> value = InspectorValue::parseJSON(message);
if (!value)
return false;
RefPtr<InspectorObject> object = value->asObject();
if (!object)
return false;
if (!object->getString("method", result))
return false;
return true;
}
InspectorBackendDispatcher::CallbackBase::CallbackBase(PassRefPtr<InspectorBackendDispatcherImpl> backendImpl, int id)
: m_backendImpl(backendImpl), m_id(id), m_alreadySent(false) {}
InspectorBackendDispatcher::CallbackBase::~CallbackBase() {}
void InspectorBackendDispatcher::CallbackBase::sendFailure(const ErrorString& error)
{
ASSERT(error.length());
sendIfActive(0, error);
}
bool InspectorBackendDispatcher::CallbackBase::isActive()
{
return !m_alreadySent && m_backendImpl->isActive();
}
void InspectorBackendDispatcher::CallbackBase::sendIfActive(PassRefPtr<InspectorObject> partialMessage, const ErrorString& invocationError)
{
if (m_alreadySent)
return;
m_backendImpl->sendResponse(m_id, partialMessage, invocationError);
m_alreadySent = true;
}
COMPILE_ASSERT(static_cast<int>(InspectorBackendDispatcher::kMethodNamesEnumSize) == WTF_ARRAY_LENGTH(InspectorBackendDispatcher::commandNames), command_name_array_problem);
} // namespace WebCore
#endif // ENABLE(INSPECTOR)
""")
frontend_cpp = (
"""
#include "config.h"
#if ENABLE(INSPECTOR)
#include "InspectorFrontend.h"
#include <wtf/text/WTFString.h>
#include <wtf/text/CString.h>
#include "InspectorFrontendChannel.h"
#include "InspectorValues.h"
#include <wtf/text/WTFString.h>
namespace WebCore {
InspectorFrontend::InspectorFrontend(InspectorFrontendChannel* inspectorFrontendChannel)
: $constructorInit{
}
$methods
} // namespace WebCore
#endif // ENABLE(INSPECTOR)
""")
typebuilder_h = (
"""
#ifndef InspectorTypeBuilder_h
#define InspectorTypeBuilder_h
#if ENABLE(INSPECTOR)
#include "InspectorValues.h"
#include <wtf/Assertions.h>
#include <wtf/PassRefPtr.h>
namespace WebCore {
namespace TypeBuilder {
template<typename T>
class OptOutput {
public:
OptOutput() : m_assigned(false) { }
void operator=(T value)
{
m_value = value;
m_assigned = true;
}
bool isAssigned() { return m_assigned; }
T getValue()
{
ASSERT(isAssigned());
return m_value;
}
private:
T m_value;
bool m_assigned;
WTF_MAKE_NONCOPYABLE(OptOutput);
};
// A small transient wrapper around int type, that can be used as a funciton parameter type
// cleverly disallowing C++ implicit casts from float or double.
class ExactlyInt {
public:
template<typename T>
ExactlyInt(T t) : m_value(cast_to_int<T>(t)) {}
ExactlyInt() {}
operator int() { return m_value; }
private:
int m_value;
template<typename T>
static int cast_to_int(T) { return T::default_case_cast_is_not_supported(); }
};
template<>
inline int ExactlyInt::cast_to_int<int>(int i) { return i; }
template<>
inline int ExactlyInt::cast_to_int<unsigned int>(unsigned int i) { return i; }
class RuntimeCastHelper {
public:
#if $validatorIfdefName
template<InspectorValue::Type TYPE>
static void assertType(InspectorValue* value)
{
ASSERT(value->type() == TYPE);
}
static void assertAny(InspectorValue*);
static void assertInt(InspectorValue* value);
#endif
};
// This class provides "Traits" type for the input type T. It is programmed using C++ template specialization
// technique. By default it simply takes "ItemTraits" type from T, but it doesn't work with the base types.
template<typename T>
struct ArrayItemHelper {
typedef typename T::ItemTraits Traits;
};
template<typename T>
class Array : public InspectorArrayBase {
private:
Array() { }
InspectorArray* openAccessors() {
COMPILE_ASSERT(sizeof(InspectorArray) == sizeof(Array<T>), cannot_cast);
return static_cast<InspectorArray*>(static_cast<InspectorArrayBase*>(this));
}
public:
void addItem(PassRefPtr<T> value)
{
ArrayItemHelper<T>::Traits::pushRefPtr(this->openAccessors(), value);
}
void addItem(T value)
{
ArrayItemHelper<T>::Traits::pushRaw(this->openAccessors(), value);
}
static PassRefPtr<Array<T> > create()
{
return adoptRef(new Array<T>());
}
static PassRefPtr<Array<T> > runtimeCast(PassRefPtr<InspectorValue> value)
{
RefPtr<InspectorArray> array;
bool castRes = value->asArray(&array);
ASSERT_UNUSED(castRes, castRes);
#if $validatorIfdefName
assertCorrectValue(array.get());
#endif // $validatorIfdefName
COMPILE_ASSERT(sizeof(Array<T>) == sizeof(InspectorArray), type_cast_problem);
return static_cast<Array<T>*>(static_cast<InspectorArrayBase*>(array.get()));
}
#if $validatorIfdefName
static void assertCorrectValue(InspectorValue* value)
{
RefPtr<InspectorArray> array;
bool castRes = value->asArray(&array);
ASSERT_UNUSED(castRes, castRes);
for (unsigned i = 0; i < array->length(); i++)
ArrayItemHelper<T>::Traits::template assertCorrectValue<T>(array->get(i).get());
}
#endif // $validatorIfdefName
};
struct StructItemTraits {
static void pushRefPtr(InspectorArray* array, PassRefPtr<InspectorValue> value)
{
array->pushValue(value);
}
#if $validatorIfdefName
template<typename T>
static void assertCorrectValue(InspectorValue* value) {
T::assertCorrectValue(value);
}
#endif // $validatorIfdefName
};
template<>
struct ArrayItemHelper<String> {
struct Traits {
static void pushRaw(InspectorArray* array, const String& value)
{
array->pushString(value);
}
#if $validatorIfdefName
template<typename T>
static void assertCorrectValue(InspectorValue* value) {
RuntimeCastHelper::assertType<InspectorValue::TypeString>(value);
}
#endif // $validatorIfdefName
};
};
template<>
struct ArrayItemHelper<int> {
struct Traits {
static void pushRaw(InspectorArray* array, int value)
{
array->pushInt(value);
}
#if $validatorIfdefName
template<typename T>
static void assertCorrectValue(InspectorValue* value) {
RuntimeCastHelper::assertInt(value);
}
#endif // $validatorIfdefName
};
};
template<>
struct ArrayItemHelper<double> {
struct Traits {
static void pushRaw(InspectorArray* array, double value)
{
array->pushNumber(value);
}
#if $validatorIfdefName
template<typename T>
static void assertCorrectValue(InspectorValue* value) {
RuntimeCastHelper::assertType<InspectorValue::TypeNumber>(value);
}
#endif // $validatorIfdefName
};
};
template<>
struct ArrayItemHelper<bool> {
struct Traits {
static void pushRaw(InspectorArray* array, bool value)
{
array->pushBoolean(value);
}
#if $validatorIfdefName
template<typename T>
static void assertCorrectValue(InspectorValue* value) {
RuntimeCastHelper::assertType<InspectorValue::TypeBoolean>(value);
}
#endif // $validatorIfdefName
};
};
template<>
struct ArrayItemHelper<InspectorValue> {
struct Traits {
static void pushRefPtr(InspectorArray* array, PassRefPtr<InspectorValue> value)
{
array->pushValue(value);
}
#if $validatorIfdefName
template<typename T>
static void assertCorrectValue(InspectorValue* value) {
RuntimeCastHelper::assertAny(value);
}
#endif // $validatorIfdefName
};
};
template<>
struct ArrayItemHelper<InspectorObject> {
struct Traits {
static void pushRefPtr(InspectorArray* array, PassRefPtr<InspectorValue> value)
{
array->pushValue(value);
}
#if $validatorIfdefName
template<typename T>
static void assertCorrectValue(InspectorValue* value) {
RuntimeCastHelper::assertType<InspectorValue::TypeObject>(value);
}
#endif // $validatorIfdefName
};
};
template<>
struct ArrayItemHelper<InspectorArray> {
struct Traits {
static void pushRefPtr(InspectorArray* array, PassRefPtr<InspectorArray> value)
{
array->pushArray(value);
}
#if $validatorIfdefName
template<typename T>
static void assertCorrectValue(InspectorValue* value) {
RuntimeCastHelper::assertType<InspectorValue::TypeArray>(value);
}
#endif // $validatorIfdefName
};
};
template<typename T>
struct ArrayItemHelper<TypeBuilder::Array<T> > {
struct Traits {
static void pushRefPtr(InspectorArray* array, PassRefPtr<TypeBuilder::Array<T> > value)
{
array->pushValue(value);
}
#if $validatorIfdefName
template<typename S>
static void assertCorrectValue(InspectorValue* value) {
S::assertCorrectValue(value);
}
#endif // $validatorIfdefName
};
};
${forwards}
String getEnumConstantValue(int code);
${typeBuilders}
} // namespace TypeBuilder
} // namespace WebCore
#endif // ENABLE(INSPECTOR)
#endif // !defined(InspectorTypeBuilder_h)
""")
typebuilder_cpp = (
"""
#include "config.h"
#if ENABLE(INSPECTOR)
#include "InspectorTypeBuilder.h"
#include <wtf/text/CString.h>
namespace WebCore {
namespace TypeBuilder {
const char* const enum_constant_values[] = {
$enumConstantValues};
String getEnumConstantValue(int code) {
return enum_constant_values[code];
}
} // namespace TypeBuilder
$implCode
#if $validatorIfdefName
void TypeBuilder::RuntimeCastHelper::assertAny(InspectorValue*)
{
// No-op.
}
void TypeBuilder::RuntimeCastHelper::assertInt(InspectorValue* value)
{
double v;
bool castRes = value->asNumber(&v);
ASSERT_UNUSED(castRes, castRes);
ASSERT(static_cast<double>(static_cast<int>(v)) == v);
}
$validatorCode
#endif // $validatorIfdefName
} // namespace WebCore
#endif // ENABLE(INSPECTOR)
""")
backend_js = (
"""
$domainInitializers
""")
param_container_access_code = """
RefPtr<InspectorObject> paramsContainer = requestMessageObject->getObject("params");
InspectorObject* paramsContainerPtr = paramsContainer.get();
InspectorArray* protocolErrorsPtr = protocolErrors.get();
"""
class_binding_builder_part_1 = (
""" AllFieldsSet = %s
};
template<int STATE>
class Builder {
private:
RefPtr<InspectorObject> m_result;
template<int STEP> Builder<STATE | STEP>& castState()
{
return *reinterpret_cast<Builder<STATE | STEP>*>(this);
}
Builder(PassRefPtr</*%s*/InspectorObject> ptr)
{
COMPILE_ASSERT(STATE == NoFieldsSet, builder_created_in_non_init_state);
m_result = ptr;
}
friend class %s;
public:
""")
class_binding_builder_part_2 = ("""
Builder<STATE | %s>& set%s(%s value)
{
COMPILE_ASSERT(!(STATE & %s), property_%s_already_set);
m_result->set%s("%s", %s);
return castState<%s>();
}
""")
class_binding_builder_part_3 = ("""
operator RefPtr<%s>& ()
{
COMPILE_ASSERT(STATE == AllFieldsSet, result_is_not_ready);
COMPILE_ASSERT(sizeof(%s) == sizeof(InspectorObject), cannot_cast);
return *reinterpret_cast<RefPtr<%s>*>(&m_result);
}
PassRefPtr<%s> release()
{
return RefPtr<%s>(*this).release();
}
};
""")
class_binding_builder_part_4 = (
""" static Builder<NoFieldsSet> create()
{
return Builder<NoFieldsSet>(InspectorObject::create());
}
""")
| true | true |
f7204dbb790d27090a68c4f58eeffa3c3052f15c | 3,609 | py | Python | synthesis_wrapper.py | mdsol/Simulants | 3c71702c301b2d4668adff1180d162a66172aeaa | [
"MIT"
] | null | null | null | synthesis_wrapper.py | mdsol/Simulants | 3c71702c301b2d4668adff1180d162a66172aeaa | [
"MIT"
] | null | null | null | synthesis_wrapper.py | mdsol/Simulants | 3c71702c301b2d4668adff1180d162a66172aeaa | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
# Author: Mandis Beigi
# Copyright (c) 2022 Medidata Solutions, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import logging
import k_anonymity
import synthesis_lib
import preprocessor_lib
import utilities_lib
def synthesize(df, config):
logging.info('Performing k-anonymity to the data......................')
logging.info('The data size before k-anonymity: {}'.format(df.shape))
ignore_columns = utilities_lib.get_date_columns(df)
df = k_anonymity.perform_k_anonymity(df, config.anonymity_k, ignore_columns)
logging.info('The data size after k-anonymity: {}'.format(df.shape))
ignore_columns = utilities_lib.get_date_columns(df)
tmp_df = df.loc[:, ~df.columns.isin(ignore_columns)]
label_encoded_df, encoding_dict = preprocessor_lib.label_encoding_encode(tmp_df)
label_encoded_df = preprocessor_lib.impute_label_encoded_df(label_encoded_df)
corr_cols_groups = synthesis_lib.generate_corr_cols_groups(label_encoded_df, config.corr_threshold)
col_pairings = utilities_lib.merge_2d_lists(corr_cols_groups, config.col_pairings)
one_hot_encoded_df = preprocessor_lib.one_hot_encoding_encode(tmp_df)
logging.info("encoded_df: {}".format(one_hot_encoded_df.shape))
encoded_df = one_hot_encoded_df
logging.info('Synthesizing the data data.............................')
syn_encoded_df = synthesis_lib.synthesize(encoded_df,
method=config.embedding_method, metric=config.embedding_metric,
min_cluster_size=config.min_cluster_size, max_cluster_size=config.max_cluster_size,
batch_size=config.batch_size, corr_thresh=config.corr_threshold, include_outliers=config.include_outliers,
holdout_cols=config.holdout_cols, derived_cols_dict={}, col_pairings=col_pairings,
imputing_method=config.imputing_method, add_noise=config.add_noise)
logging.info("syn_encoded_df: {}".format(syn_encoded_df.shape))
logging.info('Decoding the synthesized data...............................')
syn_encoded_df_no_index = syn_encoded_df.reset_index(drop=False)
syn_df = preprocessor_lib.one_hot_encoding_decode(syn_encoded_df_no_index)
logging.info('Saving the synthesized data.....................................')
logging.info('syn_df: {}'.format(syn_df.shape))
df = df.reset_index(drop=False)
df_columns = utilities_lib.intersection(df.columns, syn_df.columns)
syn_df = syn_df.reindex(columns=df_columns)
syn_df.to_csv(config.output_dir+config.proj_name+'_syn.csv', index=False)
return(syn_df)
| 44.555556 | 118 | 0.748961 |
import logging
import k_anonymity
import synthesis_lib
import preprocessor_lib
import utilities_lib
def synthesize(df, config):
logging.info('Performing k-anonymity to the data......................')
logging.info('The data size before k-anonymity: {}'.format(df.shape))
ignore_columns = utilities_lib.get_date_columns(df)
df = k_anonymity.perform_k_anonymity(df, config.anonymity_k, ignore_columns)
logging.info('The data size after k-anonymity: {}'.format(df.shape))
ignore_columns = utilities_lib.get_date_columns(df)
tmp_df = df.loc[:, ~df.columns.isin(ignore_columns)]
label_encoded_df, encoding_dict = preprocessor_lib.label_encoding_encode(tmp_df)
label_encoded_df = preprocessor_lib.impute_label_encoded_df(label_encoded_df)
corr_cols_groups = synthesis_lib.generate_corr_cols_groups(label_encoded_df, config.corr_threshold)
col_pairings = utilities_lib.merge_2d_lists(corr_cols_groups, config.col_pairings)
one_hot_encoded_df = preprocessor_lib.one_hot_encoding_encode(tmp_df)
logging.info("encoded_df: {}".format(one_hot_encoded_df.shape))
encoded_df = one_hot_encoded_df
logging.info('Synthesizing the data data.............................')
syn_encoded_df = synthesis_lib.synthesize(encoded_df,
method=config.embedding_method, metric=config.embedding_metric,
min_cluster_size=config.min_cluster_size, max_cluster_size=config.max_cluster_size,
batch_size=config.batch_size, corr_thresh=config.corr_threshold, include_outliers=config.include_outliers,
holdout_cols=config.holdout_cols, derived_cols_dict={}, col_pairings=col_pairings,
imputing_method=config.imputing_method, add_noise=config.add_noise)
logging.info("syn_encoded_df: {}".format(syn_encoded_df.shape))
logging.info('Decoding the synthesized data...............................')
syn_encoded_df_no_index = syn_encoded_df.reset_index(drop=False)
syn_df = preprocessor_lib.one_hot_encoding_decode(syn_encoded_df_no_index)
logging.info('Saving the synthesized data.....................................')
logging.info('syn_df: {}'.format(syn_df.shape))
df = df.reset_index(drop=False)
df_columns = utilities_lib.intersection(df.columns, syn_df.columns)
syn_df = syn_df.reindex(columns=df_columns)
syn_df.to_csv(config.output_dir+config.proj_name+'_syn.csv', index=False)
return(syn_df)
| true | true |
f7204f5abde1644d55dfd088cc146878f778bf9c | 5,467 | py | Python | btservice.py | shripal17/AUVController | 077b47ffc3726aad4c715bee2711935675530cc7 | [
"Apache-2.0"
] | null | null | null | btservice.py | shripal17/AUVController | 077b47ffc3726aad4c715bee2711935675530cc7 | [
"Apache-2.0"
] | null | null | null | btservice.py | shripal17/AUVController | 077b47ffc3726aad4c715bee2711935675530cc7 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
import logging
import logging.handlers
import argparse
import sys
import os
import time
from bluetooth import *
class LoggerHelper(object):
def __init__(self, logger, level):
self.logger = logger
self.level = level
def write(self, message):
if message.rstrip() != "":
self.logger.log(self.level, message.rstrip())
def setup_logging():
# Default logging settings
LOG_FILE = "/home/pi/AUV/btservice.log"
LOG_LEVEL = logging.INFO
# Define and parse command line arguments
argp = argparse.ArgumentParser(description="Raspberry PI Bluetooth Server")
argp.add_argument("-l", "--log", help="log (default '" + LOG_FILE + "')")
# Grab the log file from arguments
args = argp.parse_args()
if args.log:
LOG_FILE = args.log
# Setup the logger
logger = logging.getLogger(__name__)
# Set the log level
logger.setLevel(LOG_LEVEL)
# Make a rolling event log that resets at midnight and backs-up every 3 days
handler = logging.handlers.TimedRotatingFileHandler(LOG_FILE,
when="midnight",
backupCount=3)
# Log messages should include time stamp and log level
formatter = logging.Formatter('%(asctime)s %(levelname)-8s %(message)s')
# Attach the formatter to the handler
handler.setFormatter(formatter)
# Attach the handler to the logger
logger.addHandler(handler)
# Replace stdout with logging to file at INFO level
sys.stdout = LoggerHelper(logger, logging.INFO)
# Replace stderr with logging to file at ERROR level
sys.stderr = LoggerHelper(logger, logging.ERROR)
# Main loop
def main():
# Setup logging
# setup_logging()
# setup empty variables
# accelerometer x, y, z
acc = [0, 0, 0]
# gyroscope x, y, z
gyro = [0, 0, 0]
# orientation: azimuth, pitch, roll
orientation = [0, 0, 0]
# light intensity
light = 0
# compass degrees
direction = 0
# We need to wait until Bluetooth init is done
time.sleep(1)
# Make device visible
os.system("hciconfig hci0 piscan")
# Create a new server socket using RFCOMM protocol
server_sock = BluetoothSocket(RFCOMM)
# Bind to any port
server_sock.bind(("", 1))
# Start listening
server_sock.listen(1)
operations = ["Acc,", "Gyr,", "LDi,", "Ori,"]
# Get the port the server socket is listening
port = server_sock.getsockname()[1]
# The service UUID to advertise
uuid = "00001101-0000-1000-8000-00805F9B34FB"
# Start advertising the service
advertise_service(server_sock, "AUV",
service_id=uuid,
service_classes=[uuid, SERIAL_PORT_CLASS],
profiles=[SERIAL_PORT_PROFILE])
# Main Bluetooth server loop
while True:
print("Waiting for connection on RFCOMM channel %d" % port)
try:
client_sock = None
# This will block until we get a new connection
client_sock, client_info = server_sock.accept()
print("Accepted connection from ", client_info)
data = ""
while True:
# Read the data sent by the client
data += client_sock.recv(1024)
if ((len(data) > 0)):
# We are receiving multiple lines at once, hence, split each line as a 1 part
parts = data.split("\r\n")
for part in parts:
print(part)
if (operations[0] in part):
subparts = part.split(",")
acc[0] = float(subparts[1])
acc[1] = float(subparts[2])
acc[2] = float(subparts[3])
print(acc)
elif (operations[1] in part):
subparts = part.split(",")
gyro[0] = float(subparts[1])
gyro[1] = float(subparts[2])
gyro[2] = float(subparts[3])
print(gyro)
elif (operations[2] in part):
subparts = part.split(",")
light = float(subparts[1])
direction = float(subparts[2])
print(light, direction)
elif (operations[3] in part):
subparts = part.split(",")
orientation[0] = float(subparts[1])
orientation[1] = float(subparts[2])
orientation[2] = float(subparts[3])
print(orientation)
# clear the input string
data = ""
# print "Received [%s]" % data
# Handle the request
# client_sock.send(response)
# print "Sent back [%s]" % response
except IOError:
pass
except KeyboardInterrupt:
if client_sock is not None:
client_sock.close()
server_sock.close()
print("Server going down")
break
main() | 30.887006 | 97 | 0.522773 |
import logging
import logging.handlers
import argparse
import sys
import os
import time
from bluetooth import *
class LoggerHelper(object):
def __init__(self, logger, level):
self.logger = logger
self.level = level
def write(self, message):
if message.rstrip() != "":
self.logger.log(self.level, message.rstrip())
def setup_logging():
LOG_FILE = "/home/pi/AUV/btservice.log"
LOG_LEVEL = logging.INFO
argp = argparse.ArgumentParser(description="Raspberry PI Bluetooth Server")
argp.add_argument("-l", "--log", help="log (default '" + LOG_FILE + "')")
args = argp.parse_args()
if args.log:
LOG_FILE = args.log
logger = logging.getLogger(__name__)
logger.setLevel(LOG_LEVEL)
handler = logging.handlers.TimedRotatingFileHandler(LOG_FILE,
when="midnight",
backupCount=3)
formatter = logging.Formatter('%(asctime)s %(levelname)-8s %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
sys.stdout = LoggerHelper(logger, logging.INFO)
sys.stderr = LoggerHelper(logger, logging.ERROR)
def main():
acc = [0, 0, 0]
gyro = [0, 0, 0]
orientation = [0, 0, 0]
light = 0
direction = 0
time.sleep(1)
os.system("hciconfig hci0 piscan")
server_sock = BluetoothSocket(RFCOMM)
server_sock.bind(("", 1))
server_sock.listen(1)
operations = ["Acc,", "Gyr,", "LDi,", "Ori,"]
port = server_sock.getsockname()[1]
uuid = "00001101-0000-1000-8000-00805F9B34FB"
advertise_service(server_sock, "AUV",
service_id=uuid,
service_classes=[uuid, SERIAL_PORT_CLASS],
profiles=[SERIAL_PORT_PROFILE])
while True:
print("Waiting for connection on RFCOMM channel %d" % port)
try:
client_sock = None
client_sock, client_info = server_sock.accept()
print("Accepted connection from ", client_info)
data = ""
while True:
data += client_sock.recv(1024)
if ((len(data) > 0)):
parts = data.split("\r\n")
for part in parts:
print(part)
if (operations[0] in part):
subparts = part.split(",")
acc[0] = float(subparts[1])
acc[1] = float(subparts[2])
acc[2] = float(subparts[3])
print(acc)
elif (operations[1] in part):
subparts = part.split(",")
gyro[0] = float(subparts[1])
gyro[1] = float(subparts[2])
gyro[2] = float(subparts[3])
print(gyro)
elif (operations[2] in part):
subparts = part.split(",")
light = float(subparts[1])
direction = float(subparts[2])
print(light, direction)
elif (operations[3] in part):
subparts = part.split(",")
orientation[0] = float(subparts[1])
orientation[1] = float(subparts[2])
orientation[2] = float(subparts[3])
print(orientation)
data = ""
except IOError:
pass
except KeyboardInterrupt:
if client_sock is not None:
client_sock.close()
server_sock.close()
print("Server going down")
break
main() | true | true |
f7204f6d6a1bb0b49c21d7f8d2f69d975e0e8468 | 584 | py | Python | src/sensing/gpsTest/test.py | NikLeberg/quadro2 | ec2f37858a32b4bb88f7887fc52b683e067c0e1e | [
"MIT"
] | 1 | 2021-02-13T20:12:57.000Z | 2021-02-13T20:12:57.000Z | src/sensing/gpsTest/test.py | NikLeberg/quadro2 | ec2f37858a32b4bb88f7887fc52b683e067c0e1e | [
"MIT"
] | null | null | null | src/sensing/gpsTest/test.py | NikLeberg/quadro2 | ec2f37858a32b4bb88f7887fc52b683e067c0e1e | [
"MIT"
] | 1 | 2021-03-05T19:04:03.000Z | 2021-03-05T19:04:03.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*
import sys
import csv
def main():
s = ''
l = []
with open(str(sys.argv[1])) as csvfile:
readCSV = csv.reader(csvfile, delimiter=',')
for row in readCSV:
c = row[1]
if 'CR' in c:
s = ''.join(l)
l = []
elif 'LF' in c:
print(s)
elif 'SP' in c:
l.append(' ')
elif len(row) == 5:
l.append(',')
else:
l.append(c)
main() | 20.137931 | 53 | 0.356164 |
import sys
import csv
def main():
s = ''
l = []
with open(str(sys.argv[1])) as csvfile:
readCSV = csv.reader(csvfile, delimiter=',')
for row in readCSV:
c = row[1]
if 'CR' in c:
s = ''.join(l)
l = []
elif 'LF' in c:
print(s)
elif 'SP' in c:
l.append(' ')
elif len(row) == 5:
l.append(',')
else:
l.append(c)
main() | true | true |
f7204f97b7c820606a1883b076c1563755aa5e97 | 971 | py | Python | tests/test_items.py | JoshMcKinstry/Dork_Game_team_octosquad | 56cf823ae2a7357d9c50f238da7c72d61fa2cb53 | [
"MIT"
] | null | null | null | tests/test_items.py | JoshMcKinstry/Dork_Game_team_octosquad | 56cf823ae2a7357d9c50f238da7c72d61fa2cb53 | [
"MIT"
] | 12 | 2019-06-26T16:36:13.000Z | 2019-07-29T17:42:05.000Z | tests/test_items.py | JoshMcKinstry/team34 | 56cf823ae2a7357d9c50f238da7c72d61fa2cb53 | [
"MIT"
] | 3 | 2019-07-03T08:08:35.000Z | 2019-07-14T16:00:46.000Z | """
A test for items
"""
from dork.items import Item
def test_init_method():
"""
Testing the constructor
"""
name = 'Donut'
description = {'This is an old fasion donut'}
properties = {'eatable'}
item = Item(name, description, properties)
assert item.name == name
assert item.description == description
assert item.properties == properties
def test_has_property():
"""
Testing the has_property method
"""
name = 'Donut'
description = {'This is an old fasion donut'}
properties = ['eatable', 'pickable']
item = Item(name, description, properties)
assert item.has_property('eatable') is True
def test_yaml_representation():
"""
Testing the yaml_representation method
"""
name = 'Donut'
description = {'This is an old fasion donut'}
properties = ['eatable', 'pickable']
item = Item(name, description, properties)
assert isinstance(item.yaml_representation(), dict)
| 24.275 | 55 | 0.654995 | from dork.items import Item
def test_init_method():
name = 'Donut'
description = {'This is an old fasion donut'}
properties = {'eatable'}
item = Item(name, description, properties)
assert item.name == name
assert item.description == description
assert item.properties == properties
def test_has_property():
name = 'Donut'
description = {'This is an old fasion donut'}
properties = ['eatable', 'pickable']
item = Item(name, description, properties)
assert item.has_property('eatable') is True
def test_yaml_representation():
name = 'Donut'
description = {'This is an old fasion donut'}
properties = ['eatable', 'pickable']
item = Item(name, description, properties)
assert isinstance(item.yaml_representation(), dict)
| true | true |
f72050f9533ad622e35f21606eba12c9e386ae67 | 3,953 | py | Python | NAIP/filter_poly.py | pberezina/earthengine-py-notebooks | 4cbe3c52bcc9ed3f1337bf097aa5799442991a5e | [
"MIT"
] | 1 | 2020-03-20T19:39:34.000Z | 2020-03-20T19:39:34.000Z | NAIP/filter_poly.py | pberezina/earthengine-py-notebooks | 4cbe3c52bcc9ed3f1337bf097aa5799442991a5e | [
"MIT"
] | null | null | null | NAIP/filter_poly.py | pberezina/earthengine-py-notebooks | 4cbe3c52bcc9ed3f1337bf097aa5799442991a5e | [
"MIT"
] | null | null | null | '''
<table class="ee-notebook-buttons" align="left">
<td><a target="_blank" href="https://github.com/giswqs/earthengine-py-notebooks/tree/master/NAIP/filter_poly.ipynb"><img width=32px src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" /> View source on GitHub</a></td>
<td><a target="_blank" href="https://nbviewer.jupyter.org/github/giswqs/earthengine-py-notebooks/blob/master/NAIP/filter_poly.ipynb"><img width=26px src="https://upload.wikimedia.org/wikipedia/commons/thumb/3/38/Jupyter_logo.svg/883px-Jupyter_logo.svg.png" />Notebook Viewer</a></td>
<td><a target="_blank" href="https://mybinder.org/v2/gh/giswqs/earthengine-py-notebooks/master?filepath=NAIP/filter_poly.ipynb"><img width=58px src="https://mybinder.org/static/images/logo_social.png" />Run in binder</a></td>
<td><a target="_blank" href="https://colab.research.google.com/github/giswqs/earthengine-py-notebooks/blob/master/NAIP/filter_poly.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" /> Run in Google Colab</a></td>
</table>
'''
# %%
'''
## Install Earth Engine API
Install the [Earth Engine Python API](https://developers.google.com/earth-engine/python_install) and [geehydro](https://github.com/giswqs/geehydro). The **geehydro** Python package builds on the [folium](https://github.com/python-visualization/folium) package and implements several methods for displaying Earth Engine data layers, such as `Map.addLayer()`, `Map.setCenter()`, `Map.centerObject()`, and `Map.setOptions()`.
The following script checks if the geehydro package has been installed. If not, it will install geehydro, which automatically install its dependencies, including earthengine-api and folium.
'''
# %%
import subprocess
try:
import geehydro
except ImportError:
print('geehydro package not installed. Installing ...')
subprocess.check_call(["python", '-m', 'pip', 'install', 'geehydro'])
# %%
'''
Import libraries
'''
# %%
import ee
import folium
import geehydro
# %%
'''
Authenticate and initialize Earth Engine API. You only need to authenticate the Earth Engine API once.
'''
# %%
try:
ee.Initialize()
except Exception as e:
ee.Authenticate()
ee.Initialize()
# %%
'''
## Create an interactive map
This step creates an interactive map using [folium](https://github.com/python-visualization/folium). The default basemap is the OpenStreetMap. Additional basemaps can be added using the `Map.setOptions()` function.
The optional basemaps can be `ROADMAP`, `SATELLITE`, `HYBRID`, `TERRAIN`, or `ESRI`.
'''
# %%
Map = folium.Map(location=[40, -100], zoom_start=4)
Map.setOptions('HYBRID')
# %%
'''
## Add Earth Engine Python script
'''
# %%
collection = ee.ImageCollection('USDA/NAIP/DOQQ')
polys = ee.Geometry.Polygon(
[[[-99.29615020751953, 46.725459351792374],
[-99.2116928100586, 46.72404725733022],
[-99.21443939208984, 46.772037733479884],
[-99.30267333984375, 46.77321343419932]]])
centroid = polys.centroid()
lng, lat = centroid.getInfo()['coordinates']
print("lng = {}, lat = {}".format(lng, lat))
lng_lat = ee.Geometry.Point(lng, lat)
naip = collection.filterBounds(polys)
naip_2015 = naip.filterDate('2015-01-01', '2015-12-31')
ppr = naip_2015.mosaic()
count = naip_2015.size().getInfo()
print("Count: ", count)
# print(naip_2015.size().getInfo())
# vis = {'bands': ['N', 'R', 'G']}
# Map.setCenter(lng, lat, 12)
# Map.addLayer(ppr,vis)
# Map.addLayer(polys)
downConfig = {'scale': 30, "maxPixels": 1.0E13, 'driveFolder': 'image'} # scale means resolution.
img_lst = naip_2015.toList(100)
for i in range(0, count):
image = ee.Image(img_lst.get(i))
name = image.get('system:index').getInfo()
# print(name)
task = ee.batch.Export.image(image, name, downConfig)
task.start()
# %%
'''
## Display Earth Engine data layers
'''
# %%
Map.setControlVisibility(layerControl=True, fullscreenControl=True, latLngPopup=True)
Map | 34.077586 | 422 | 0.707058 |
import subprocess
try:
import geehydro
except ImportError:
print('geehydro package not installed. Installing ...')
subprocess.check_call(["python", '-m', 'pip', 'install', 'geehydro'])
import ee
import folium
import geehydro
try:
ee.Initialize()
except Exception as e:
ee.Authenticate()
ee.Initialize()
Map = folium.Map(location=[40, -100], zoom_start=4)
Map.setOptions('HYBRID')
collection = ee.ImageCollection('USDA/NAIP/DOQQ')
polys = ee.Geometry.Polygon(
[[[-99.29615020751953, 46.725459351792374],
[-99.2116928100586, 46.72404725733022],
[-99.21443939208984, 46.772037733479884],
[-99.30267333984375, 46.77321343419932]]])
centroid = polys.centroid()
lng, lat = centroid.getInfo()['coordinates']
print("lng = {}, lat = {}".format(lng, lat))
lng_lat = ee.Geometry.Point(lng, lat)
naip = collection.filterBounds(polys)
naip_2015 = naip.filterDate('2015-01-01', '2015-12-31')
ppr = naip_2015.mosaic()
count = naip_2015.size().getInfo()
print("Count: ", count)
downConfig = {'scale': 30, "maxPixels": 1.0E13, 'driveFolder': 'image'}
img_lst = naip_2015.toList(100)
for i in range(0, count):
image = ee.Image(img_lst.get(i))
name = image.get('system:index').getInfo()
task = ee.batch.Export.image(image, name, downConfig)
task.start()
Map.setControlVisibility(layerControl=True, fullscreenControl=True, latLngPopup=True)
Map | true | true |
f72051a61c28b0f296ebe1cb03fdacdcdbc270e1 | 1,664 | py | Python | geocamUtil/models/UuidField.py | geocam/geocamUtilWeb | b64fc063c64b4b0baa140db4c126f2ff980756ab | [
"NASA-1.3"
] | 4 | 2017-03-03T16:24:24.000Z | 2018-06-24T05:50:40.000Z | geocamUtil/models/UuidField.py | geocam/geocamUtilWeb | b64fc063c64b4b0baa140db4c126f2ff980756ab | [
"NASA-1.3"
] | 1 | 2021-09-29T17:17:30.000Z | 2021-09-29T17:17:30.000Z | geocamUtil/models/UuidField.py | geocam/geocamUtilWeb | b64fc063c64b4b0baa140db4c126f2ff980756ab | [
"NASA-1.3"
] | 1 | 2017-12-19T20:45:53.000Z | 2017-12-19T20:45:53.000Z | # __BEGIN_LICENSE__
#Copyright (c) 2015, United States Government, as represented by the
#Administrator of the National Aeronautics and Space Administration.
#All rights reserved.
# __END_LICENSE__
try:
import uuid
except ImportError:
uuid = None
from django.db import models
if uuid:
def makeUuid():
return str(uuid.uuid4())
else:
import random
def makeUuid():
return '%04x-%02x-%02x-%02x-%06x' % (random.getrandbits(32), random.getrandbits(8),
random.getrandbits(8), random.getrandbits(8),
random.getrandbits(48))
class UuidField(models.CharField):
def __init__(self, *args, **kwargs):
kwargs.setdefault('max_length', 48)
kwargs.setdefault('editable', False)
kwargs.setdefault('db_index', True)
super(UuidField, self).__init__(*args, **kwargs)
def pre_save(self, model_instance, add):
if add and not getattr(model_instance, self.attname):
value = makeUuid()
setattr(model_instance, self.attname, value)
return value
else:
return super(UuidField, self).pre_save(model_instance, add)
try:
from south.modelsinspector import add_introspection_rules
# tell south it can freeze this field without any special nonsense
add_introspection_rules([], [r'^geocamUtil\.models\.UuidField'])
except ImportError:
pass
UuidField.UuidField = UuidField
class UuidModel(models.Model):
"""
A model mixin which provides a uuid field
"""
uuid = UuidField(db_index=True)
class Meta:
abstract = True | 28.689655 | 91 | 0.646034 |
try:
import uuid
except ImportError:
uuid = None
from django.db import models
if uuid:
def makeUuid():
return str(uuid.uuid4())
else:
import random
def makeUuid():
return '%04x-%02x-%02x-%02x-%06x' % (random.getrandbits(32), random.getrandbits(8),
random.getrandbits(8), random.getrandbits(8),
random.getrandbits(48))
class UuidField(models.CharField):
def __init__(self, *args, **kwargs):
kwargs.setdefault('max_length', 48)
kwargs.setdefault('editable', False)
kwargs.setdefault('db_index', True)
super(UuidField, self).__init__(*args, **kwargs)
def pre_save(self, model_instance, add):
if add and not getattr(model_instance, self.attname):
value = makeUuid()
setattr(model_instance, self.attname, value)
return value
else:
return super(UuidField, self).pre_save(model_instance, add)
try:
from south.modelsinspector import add_introspection_rules
add_introspection_rules([], [r'^geocamUtil\.models\.UuidField'])
except ImportError:
pass
UuidField.UuidField = UuidField
class UuidModel(models.Model):
uuid = UuidField(db_index=True)
class Meta:
abstract = True | true | true |
f72051a8faae27e72e9e3e355265d217e689a1b4 | 82 | py | Python | ABC099/B.py | shimomura314/AtcoderCodes | db1d62a7715f5f1b3c40eceff8d34f0f34839f41 | [
"MIT"
] | null | null | null | ABC099/B.py | shimomura314/AtcoderCodes | db1d62a7715f5f1b3c40eceff8d34f0f34839f41 | [
"MIT"
] | null | null | null | ABC099/B.py | shimomura314/AtcoderCodes | db1d62a7715f5f1b3c40eceff8d34f0f34839f41 | [
"MIT"
] | null | null | null | x,y = map(int,input().split())
a = 0
for i in range(y-x-1):
a += i+1
print(a-x) | 16.4 | 30 | 0.536585 | x,y = map(int,input().split())
a = 0
for i in range(y-x-1):
a += i+1
print(a-x) | true | true |
f720522b36dfb68926f25b5e15f47500f2daffa4 | 342 | py | Python | src/france/draft_2.py | ClementRolinat/covidtracker-data | 7536b8899bc9bc0a6547288e86838233870dadaf | [
"MIT"
] | 314 | 2020-03-16T20:31:50.000Z | 2022-03-11T17:54:07.000Z | src/france/draft_2.py | ClementRolinat/covidtracker-data | 7536b8899bc9bc0a6547288e86838233870dadaf | [
"MIT"
] | 39 | 2020-03-30T16:27:03.000Z | 2022-02-28T14:33:55.000Z | src/france/draft_2.py | ClementRolinat/covidtracker-data | 7536b8899bc9bc0a6547288e86838233870dadaf | [
"MIT"
] | 61 | 2020-04-30T18:27:33.000Z | 2022-03-25T09:53:45.000Z | #!/usr/bin/env python
# coding: utf-8
# In[29]:
import math
T = 10
RH = 50
AH_num = 6.112 * math.exp(17.67 * T / (T+243.5)) * RH * 2.1674
AH_den = 273.15 + T
AH = AH_num / AH_den
contenu_exp = (T-7.5)**2/196 + (RH-75)**2/625 + (AH-6)**2/2.89
IPTCC = 100 * math.exp(-0.5 * contenu_exp)
IPTCC
# In[9]:
math.exp(-0.5 * contenu_exp)
| 12.666667 | 63 | 0.570175 |
import math
T = 10
RH = 50
AH_num = 6.112 * math.exp(17.67 * T / (T+243.5)) * RH * 2.1674
AH_den = 273.15 + T
AH = AH_num / AH_den
contenu_exp = (T-7.5)**2/196 + (RH-75)**2/625 + (AH-6)**2/2.89
IPTCC = 100 * math.exp(-0.5 * contenu_exp)
IPTCC
math.exp(-0.5 * contenu_exp)
| true | true |
f7205256fa9a8ca8c439c25f4c8effda40af5e58 | 1,930 | py | Python | montage_pt.py | Yuxuan-PO/eddyshelf | 3aad0ca24d0812fdab666a7a753f28096164db26 | [
"MIT"
] | 1 | 2021-04-19T01:04:39.000Z | 2021-04-19T01:04:39.000Z | montage_pt.py | Yuxuan-PO/eddyshelf | 3aad0ca24d0812fdab666a7a753f28096164db26 | [
"MIT"
] | null | null | null | montage_pt.py | Yuxuan-PO/eddyshelf | 3aad0ca24d0812fdab666a7a753f28096164db26 | [
"MIT"
] | 2 | 2017-10-05T03:52:55.000Z | 2021-04-19T01:04:36.000Z | #!/usr/bin/python3
def make_montage(basedir, depths):
""" makes a montage of passive tracer animation from runs.animate_pt
run with different depths
Arguments:
basedir - basedir to which depths are appended i.e., runew-03-pt-z-
depths - depths at which stuff has been outputted
Returns
none
Deepak Cherian - 23/01/2014
"""
import subprocess
import glob
import os
# first find number of files
flist = glob.glob(basedir + str(depths[0]) + '/*.png')
N = len(flist)
print(depths)
outdir = 'temp_pt'
outfmt_av = './' + outdir + '/output_%06d.png'
outfmt_mo = './' + outdir + '/output_{0:06d}.png'
# get runname by splitting by '/'
# and partitioning at the -z introduced by runs.animate_pt
outname = basedir.split('/')[-1].rpartition('-z')[0] + '.mp4'
# avconv options
frameRate = 5
bitRate = 25000000
# avconvArgs = ''
# make temp dir
try:
os.mkdir(outdir)
except os.FileExistsError:
subprocess.call(['rm', '-rf', outdir])
os.mkdir(outdir)
for ii in range(1, N+1): # range(1,N):
print('Processing image ' + str(ii) + '/' + str(N))
fname = '/mm_frame_{0:06d}.png'.format(ii)
# arguments for montage command
argument = 'montage '
for jj in depths:
argument += basedir + str(jj) + fname + ' '
argument += '-geometry 1600x900 ' + outfmt_mo.format(ii)
# call the montage command for each set of images
subprocess.call(argument.split())
# all output images have been created
# now execute avconv command
avconv = ('avconv -r {0} -f image2 -i {1} -q:v 1 -g 1 -b:v {2} {3}'.
format(frameRate, outfmt_av, bitRate, outname))
print(avconv)
subprocess.call(avconv.split())
if __name__ == '__main__':
import sys
make_montage(sys.argv[1], sys.argv[2:])
| 26.805556 | 75 | 0.594819 |
def make_montage(basedir, depths):
import subprocess
import glob
import os
flist = glob.glob(basedir + str(depths[0]) + '/*.png')
N = len(flist)
print(depths)
outdir = 'temp_pt'
outfmt_av = './' + outdir + '/output_%06d.png'
outfmt_mo = './' + outdir + '/output_{0:06d}.png'
outname = basedir.split('/')[-1].rpartition('-z')[0] + '.mp4'
frameRate = 5
bitRate = 25000000
try:
os.mkdir(outdir)
except os.FileExistsError:
subprocess.call(['rm', '-rf', outdir])
os.mkdir(outdir)
for ii in range(1, N+1):
print('Processing image ' + str(ii) + '/' + str(N))
fname = '/mm_frame_{0:06d}.png'.format(ii)
argument = 'montage '
for jj in depths:
argument += basedir + str(jj) + fname + ' '
argument += '-geometry 1600x900 ' + outfmt_mo.format(ii)
subprocess.call(argument.split())
avconv = ('avconv -r {0} -f image2 -i {1} -q:v 1 -g 1 -b:v {2} {3}'.
format(frameRate, outfmt_av, bitRate, outname))
print(avconv)
subprocess.call(avconv.split())
if __name__ == '__main__':
import sys
make_montage(sys.argv[1], sys.argv[2:])
| true | true |
f7205265c35def010f24eb062003ac4efb295334 | 2,347 | py | Python | orchestrator-bundle/orc8r-eventd-operator/tests/unit/test_charm.py | canonical/charmed-magma | 43ac2782930518ee419ad3e9ab451df5ae3324f6 | [
"Apache-2.0"
] | 2 | 2021-12-10T16:32:23.000Z | 2021-12-15T21:14:46.000Z | orchestrator-bundle/orc8r-eventd-operator/tests/unit/test_charm.py | canonical/charmed-magma | 43ac2782930518ee419ad3e9ab451df5ae3324f6 | [
"Apache-2.0"
] | 2 | 2022-02-25T14:21:15.000Z | 2022-03-08T23:56:45.000Z | orchestrator-bundle/orc8r-eventd-operator/tests/unit/test_charm.py | canonical/charmed-magma | 43ac2782930518ee419ad3e9ab451df5ae3324f6 | [
"Apache-2.0"
] | 3 | 2021-12-10T16:23:33.000Z | 2021-12-16T10:08:23.000Z | #!/usr/bin/env python3
# Copyright 2021 Canonical Ltd.
# See LICENSE file for licensing details.
import unittest
from unittest.mock import call, patch
from ops import testing
from ops.model import BlockedStatus
from charm import MagmaOrc8rEventdCharm
testing.SIMULATE_CAN_CONNECT = True
class Test(unittest.TestCase):
"""
Unit tests for charms that leverage the `orc8r_base` and `orc8r_base_db` libraries are
done at the library level. This file only contains tests for additional functionality not
present in the base libraries.
"""
@patch(
"charm.KubernetesServicePatch",
lambda charm, ports, additional_labels, additional_annotations: None,
)
def setUp(self):
self.harness = testing.Harness(MagmaOrc8rEventdCharm)
self.addCleanup(self.harness.cleanup)
self.harness.begin()
def test_given_default_config_when_on_config_changed_then_status_is_blocked(self):
key_values = {"elasticsearch-url": ""}
self.harness.container_pebble_ready("magma-orc8r-eventd")
self.harness.update_config(key_values=key_values)
assert self.harness.charm.unit.status == BlockedStatus(
"Config for elasticsearch is not valid. Format should be <hostname>:<port>"
)
@patch("ops.model.Container.push")
def test_given_good_elasticsearch_config_when_config_changed_then_config_is_written_to_file(
self, patch_push
):
hostname = "blablabla"
port = 80
config = {"elasticsearch-url": f"{hostname}:{port}"}
self.harness.container_pebble_ready("magma-orc8r-eventd")
self.harness.update_config(key_values=config)
calls = [
call(
"/var/opt/magma/configs/orc8r/elastic.yml",
f'"elasticHost": "{hostname}"\n' f'"elasticPort": {port}\n',
),
]
patch_push.assert_has_calls(calls)
def test_given_bad_elasticsearch_config_when_config_changed_then_status_is_blocked(self):
config = {"elasticsearch-url": "hello"}
self.harness.container_pebble_ready("magma-orc8r-eventd")
self.harness.update_config(key_values=config)
assert self.harness.charm.unit.status == BlockedStatus(
"Config for elasticsearch is not valid. Format should be <hostname>:<port>"
)
| 33.528571 | 96 | 0.691095 |
import unittest
from unittest.mock import call, patch
from ops import testing
from ops.model import BlockedStatus
from charm import MagmaOrc8rEventdCharm
testing.SIMULATE_CAN_CONNECT = True
class Test(unittest.TestCase):
@patch(
"charm.KubernetesServicePatch",
lambda charm, ports, additional_labels, additional_annotations: None,
)
def setUp(self):
self.harness = testing.Harness(MagmaOrc8rEventdCharm)
self.addCleanup(self.harness.cleanup)
self.harness.begin()
def test_given_default_config_when_on_config_changed_then_status_is_blocked(self):
key_values = {"elasticsearch-url": ""}
self.harness.container_pebble_ready("magma-orc8r-eventd")
self.harness.update_config(key_values=key_values)
assert self.harness.charm.unit.status == BlockedStatus(
"Config for elasticsearch is not valid. Format should be <hostname>:<port>"
)
@patch("ops.model.Container.push")
def test_given_good_elasticsearch_config_when_config_changed_then_config_is_written_to_file(
self, patch_push
):
hostname = "blablabla"
port = 80
config = {"elasticsearch-url": f"{hostname}:{port}"}
self.harness.container_pebble_ready("magma-orc8r-eventd")
self.harness.update_config(key_values=config)
calls = [
call(
"/var/opt/magma/configs/orc8r/elastic.yml",
f'"elasticHost": "{hostname}"\n' f'"elasticPort": {port}\n',
),
]
patch_push.assert_has_calls(calls)
def test_given_bad_elasticsearch_config_when_config_changed_then_status_is_blocked(self):
config = {"elasticsearch-url": "hello"}
self.harness.container_pebble_ready("magma-orc8r-eventd")
self.harness.update_config(key_values=config)
assert self.harness.charm.unit.status == BlockedStatus(
"Config for elasticsearch is not valid. Format should be <hostname>:<port>"
)
| true | true |
f7205449d69b43bd90ff1d11cf5ac09d1ed82d7f | 393,252 | py | Python | test/test_linalg.py | Mu-L/pytorch | b0bdf588ea575928a94264c30999385d5ff2bc32 | [
"Intel"
] | 1 | 2022-01-01T14:41:11.000Z | 2022-01-01T14:41:11.000Z | test/test_linalg.py | Mu-L/pytorch | b0bdf588ea575928a94264c30999385d5ff2bc32 | [
"Intel"
] | null | null | null | test/test_linalg.py | Mu-L/pytorch | b0bdf588ea575928a94264c30999385d5ff2bc32 | [
"Intel"
] | null | null | null | # -*- coding: utf-8 -*-
# Owner(s): ["module: linear algebra"]
import torch
import numpy as np
import unittest
import itertools
import warnings
import math
from math import inf, nan, isnan
import random
from random import randrange
from itertools import product
from functools import reduce
from torch.testing._internal.common_utils import \
(TestCase, run_tests, TEST_SCIPY, IS_MACOS, IS_WINDOWS, slowTest,
TEST_WITH_ASAN, TEST_WITH_ROCM, IS_FBCODE, IS_REMOTE_GPU,
iter_indices, gradcheck, gradgradcheck)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, dtypes,
onlyCPU, skipCUDAIf, skipCUDAIfNoMagma, skipCPUIfNoLapack, precisionOverride,
skipCUDAIfNoMagmaAndNoCusolver, skipCUDAIfRocm, onlyNativeDeviceTypes, dtypesIfCUDA,
onlyCUDA, skipCUDAVersionIn, skipMeta, skipCUDAIfNoCusolver)
from torch.testing import make_tensor
from torch.testing._internal.common_dtype import (
all_types, floating_types, floating_and_complex_types, get_all_dtypes, get_all_int_dtypes, get_all_complex_dtypes,
get_all_fp_dtypes,
)
from torch.testing._internal.common_cuda import SM53OrLater, tf32_on_and_off, CUDA11OrLater, CUDA9
from torch.distributions.binomial import Binomial
# Protects against includes accidentally setting the default dtype
# NOTE: jit_metaprogramming_utils sets the default dtype to double!
torch.set_default_dtype(torch.float32)
assert torch.get_default_dtype() is torch.float32
if TEST_SCIPY:
import scipy
class TestLinalg(TestCase):
def setUp(self):
super(self.__class__, self).setUp()
torch.backends.cuda.matmul.allow_tf32 = False
def tearDown(self):
torch.backends.cuda.matmul.allow_tf32 = True
super(self.__class__, self).tearDown()
exact_dtype = True
@dtypes(torch.float, torch.cfloat)
@precisionOverride({torch.float: 1e-06, torch.cfloat: 1e-06})
@tf32_on_and_off(5e-3)
def test_inner(self, device, dtype):
def check(a_sizes_, b_sizes_):
for a_sizes, b_sizes in ((a_sizes_, b_sizes_), (b_sizes_, a_sizes_)):
a = torch.randn(a_sizes, dtype=dtype, device=device)
b = torch.randn(b_sizes, dtype=dtype, device=device)
res = torch.inner(a, b)
ref = np.inner(a.cpu().numpy(), b.cpu().numpy())
self.assertEqual(res.cpu(), torch.from_numpy(np.array(ref)))
out = torch.zeros_like(res)
torch.inner(a, b, out=out)
self.assertEqual(res, out)
check([], []) # scalar x scalar
check([], [0]) # scalar x empty
check([], [3]) # scalar x 1D
check([], [2, 3, 4]) # scalar x 3D
check([0], [0]) # empty x empty
check([0], [2, 0]) # empty x 2D
check([2], [2]) # 1D x 1D
check([2], [3, 1, 2]) # 1D x 3D
check([2], [3, 0, 2]) # 1D x 3D empty
check([1, 2], [3, 2]) # 2D x 2D
check([1, 2], [3, 4, 2]) # 2D x 3D
check([2, 1, 3, 2], [1, 3, 2, 2]) # 4D x 4D
# Test noncontiguous input
a = torch.randn(3, 2, device=device, dtype=dtype).transpose_(0, 1)
b = torch.randn(4, 3, device=device, dtype=dtype)[::2, :]
self.assertFalse(a.is_contiguous() or b.is_contiguous())
self.assertEqual(a.inner(b).cpu().numpy(), np.inner(a.cpu().numpy(), b.cpu().numpy()))
# Test error message
with self.assertRaisesRegex(RuntimeError,
r"inner\(\) the last dimension must match on both "
r"input tensors but got shapes \[2, 3\] and \[2, 2\]"):
torch.randn(2, 3, device=device, dtype=dtype).inner(torch.randn(2, 2, device=device, dtype=dtype))
# Tests torch.outer, and its alias, torch.ger, vs. NumPy
@precisionOverride({torch.bfloat16: 1e-1})
@dtypes(*(get_all_dtypes()))
def test_outer(self, device, dtype):
def run_test_case(a, b):
if dtype == torch.bfloat16:
a_np = a.to(torch.double).cpu().numpy()
b_np = b.to(torch.double).cpu().numpy()
exact_dtype = False
else:
a_np = a.cpu().numpy()
b_np = b.cpu().numpy()
exact_dtype = True
expected = np.outer(a_np, b_np)
self.assertEqual(torch.outer(a, b), expected, exact_dtype=False)
self.assertEqual(torch.Tensor.outer(a, b), expected, exact_dtype=False)
self.assertEqual(torch.ger(a, b), expected, exact_dtype=False)
self.assertEqual(torch.Tensor.ger(a, b), expected, exact_dtype=False)
# test out variant
out = torch.empty(a.size(0), b.size(0), device=device, dtype=dtype)
torch.outer(a, b, out=out)
self.assertEqual(out, expected, exact_dtype=False)
out = torch.empty(a.size(0), b.size(0), device=device, dtype=dtype)
torch.ger(a, b, out=out)
self.assertEqual(out, expected, exact_dtype=False)
a = torch.randn(50).to(device=device, dtype=dtype)
b = torch.randn(50).to(device=device, dtype=dtype)
run_test_case(a, b)
# test 0 strided tensor
zero_strided = torch.randn(1).to(device=device, dtype=dtype).expand(50)
run_test_case(zero_strided, b)
run_test_case(a, zero_strided)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float, torch.double, torch.cfloat, torch.cdouble)
def test_linalg_lstsq(self, device, dtype):
from torch.testing._internal.common_utils import random_well_conditioned_matrix
if self.device_type == 'cpu':
drivers = ('gels', 'gelsy', 'gelsd', 'gelss', None)
else:
drivers = ('gels', None)
def check_solution_correctness(a, b, sol):
sol2 = a.pinverse() @ b
self.assertEqual(sol, sol2, atol=1e-5, rtol=1e-5)
def check_correctness_ref(a, b, res, ref, driver="default"):
def apply_if_not_empty(t, f):
if t.numel():
return f(t)
else:
return t
def select_if_not_empty(t, i):
selected = apply_if_not_empty(t, lambda x: x.select(0, i))
return selected
m = a.size(-2)
n = a.size(-1)
nrhs = b.size(-1)
batch_size = int(np.prod(a.shape[:-2]))
if batch_size == 0:
batch_size = 1
a_3d = a.view(batch_size, m, n)
b_3d = b.view(batch_size, m, nrhs)
solution_3d = res.solution.view(batch_size, n, nrhs)
residuals_2d = apply_if_not_empty(res.residuals, lambda t: t.view(-1, nrhs))
rank_1d = apply_if_not_empty(res.rank, lambda t: t.view(-1))
singular_values_2d = res.singular_values.view(batch_size, res.singular_values.shape[-1])
if a.numel() > 0:
for i in range(batch_size):
sol, residuals, rank, singular_values = ref(
a_3d.select(0, i).numpy(),
b_3d.select(0, i).numpy()
)
# Singular values are None when lapack_driver='gelsy' in SciPy
if singular_values is None:
singular_values = []
self.assertEqual(sol, solution_3d.select(0, i), atol=1e-5, rtol=1e-5)
self.assertEqual(rank, select_if_not_empty(rank_1d, i), atol=1e-5, rtol=1e-5)
self.assertEqual(singular_values, singular_values_2d.select(0, i), atol=1e-5, rtol=1e-5)
# SciPy and NumPy operate only on non-batched input and
# return an empty array with shape (0,) if rank(a) != n
# in PyTorch the batched inputs are supported and
# matrices in the batched input can have different ranks
# we compute residuals only if all matrices have rank == n
# see https://github.com/pytorch/pytorch/issues/56483
if m > n:
if torch.all(rank_1d == n):
self.assertEqual(
residuals, select_if_not_empty(residuals_2d, i), atol=1e-5, rtol=1e-5, exact_dtype=False
)
else:
self.assertTrue(residuals_2d.numel() == 0)
else:
self.assertEqual(res.solution.shape, (*a.shape[:-2], n, nrhs))
self.assertEqual(res.rank.shape, a.shape[:-2])
# residuals are not always computed (and have non-zero shape)
if m > n and driver != "gelsy":
self.assertEqual(res.residuals.shape, (*a.shape[:-2], 0))
else:
self.assertEqual(res.residuals.shape, (0, ))
# singular_values are not always computed (and have non-zero shape)
if driver == "default" or driver == "gelsd" or driver == "gelss":
self.assertEqual(res.singular_values.shape, (*a.shape[:-2], min(m, n)))
else:
self.assertEqual(res.singular_values.shape, (0, ))
def check_correctness_scipy(a, b, res, driver, cond):
# SciPy provides 3 driver options: gelsd, gelss, gelsy
if TEST_SCIPY and driver in ('gelsd', 'gelss', 'gelsy'):
import scipy.linalg
def scipy_ref(a, b):
return scipy.linalg.lstsq(a, b, lapack_driver=driver, cond=cond)
check_correctness_ref(a, b, res, scipy_ref, driver=driver)
def check_correctness_numpy(a, b, res, driver, rcond):
# NumPy uses only gelsd routine
if driver == 'gelsd':
def numpy_ref(a, b):
return np.linalg.lstsq(a, b, rcond=rcond)
check_correctness_ref(a, b, res, numpy_ref)
version = torch.testing._internal.common_cuda._get_torch_cuda_version()
cusolver_available = (version >= (10, 2))
ms = [2 ** i for i in range(5)]
m_ge_n_sizes = [(m, m // 2) for m in ms] + [(m, m) for m in ms]
# cases m < n are only supported on CPU and for cuSOLVER path on CUDA
m_l_n_sizes = [(m // 2, m) for m in ms]
include_m_l_n_case = (cusolver_available or device == 'cpu')
matrix_sizes = m_ge_n_sizes + (m_l_n_sizes if include_m_l_n_case else [])
batches = [(), (2,), (2, 2), (2, 2, 2)]
# we generate matrices with singular values sampled from a normal distribution,
# that is why we use `cond=1.0`, the mean to cut roughly half of all
# the singular values and compare whether torch.linalg.lstsq agrees with
# SciPy and NumPy.
# if rcond is True then set value for it based on the used algorithm
# rcond == -1 or any other negative value forces LAPACK to use machine precision tolerance
rconds = (None, True, -1)
for batch, matrix_size, driver, rcond in itertools.product(batches, matrix_sizes, drivers, rconds):
# keep the rcond value if it is None or -1, set the driver specific value if it is True
if rcond and rcond != -1:
if driver in ('gelss', 'gelsd'):
# SVD based algorithm; set to zero roughly half of all the singular values
rcond = 1.0
else:
# driver == 'gelsy'
# QR based algorithm; setting the value too high might lead to non-unique solutions and flaky tests
rcond = 1e-4
# specifying rcond value has no effect for gels driver so no need to run the tests again
if driver == 'gels' and rcond is not None:
continue
shape = batch + matrix_size
a = random_well_conditioned_matrix(*shape, dtype=dtype, device=device)
b = torch.rand(*shape, dtype=dtype, device=device)
m = a.size(-2)
n = a.size(-1)
res = torch.linalg.lstsq(a, b, rcond=rcond, driver=driver)
sol = res.solution
# Only checks gelsd, gelss, gelsy drivers
check_correctness_scipy(a, b, res, driver, rcond)
# Only checks gelsd driver
check_correctness_numpy(a, b, res, driver, rcond)
# gels driver is not checked by comparing to NumPy or SciPy implementation
# because NumPy and SciPy do not implement this driver
if driver == 'gels' and rcond is None:
check_solution_correctness(a, b, sol)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float, torch.double, torch.cfloat, torch.cdouble)
def test_linalg_lstsq_batch_broadcasting(self, device, dtype):
from torch.testing._internal.common_utils import random_well_conditioned_matrix
def check_correctness(a, b):
sol = torch.linalg.lstsq(a, b).solution
sol2 = a.pinverse() @ b
self.assertEqual(sol, sol2, rtol=1e-5, atol=1e-5)
ms = [2 ** i for i in range(5)]
batches = [(), (0,), (2,), (2, 2), (2, 2, 2)]
# the case when a single matrix is batch-broadcasted over the rhs
for m, batch in itertools.product(ms, batches):
a = random_well_conditioned_matrix(m, m, dtype=dtype, device=device).view(*([1] * len(batch)), m, m)
b = torch.rand(*(batch + (m, m)), dtype=dtype, device=device)
check_correctness(a, b)
# cases with broadcastable shapes
for m in ms:
a = random_well_conditioned_matrix(1, 3, 1, 3, m, m, dtype=dtype, device=device)
b = torch.rand(3, 1, 3, 1, m, m // 2, dtype=dtype, device=device)
check_correctness(a, b)
# rhs are vectors, not matrices in this test
b = torch.rand(3, 1, 3, 1, m, dtype=dtype, device=device)
# unsqueeze for b because `check_correctness` checks against
# a.pinverse() @ b, which requires b to be a matrix
check_correctness(a, b.unsqueeze(-1))
a = random_well_conditioned_matrix(3, 1, 3, 1, m, m, dtype=dtype, device=device)
b = torch.rand(1, 3, 1, 3, m, m // 2, dtype=dtype, device=device)
check_correctness(a, b)
# rhs are vectors, not matrices in this test
b = torch.rand(1, 3, 1, 3, m, dtype=dtype, device=device)
check_correctness(a, b.unsqueeze(-1))
@skipCPUIfNoLapack
@skipCUDAIfNoMagma
@dtypes(torch.float, torch.double, torch.cfloat, torch.cdouble)
def test_linalg_lstsq_input_checks(self, device, dtype):
# check empty inputs
# empty batches
a = torch.rand(0, 0, 3, 3, dtype=dtype, device=device)
b = torch.rand(0, 0, 3, 2, dtype=dtype, device=device)
self.assertEqual(
torch.linalg.lstsq(a, b)[0],
torch.zeros(0, 0, 3, 2, dtype=dtype, device=device)
)
# empty a and b
a = torch.rand(2, 2, 0, 0, dtype=dtype, device=device)
b = torch.rand(2, 2, 0, 0, dtype=dtype, device=device)
self.assertEqual(
torch.linalg.lstsq(a, b)[0],
torch.zeros(2, 2, 0, 0, dtype=dtype, device=device)
)
# empty a and b
a = torch.rand(2, 2, 3, 0, dtype=dtype, device=device)
b = torch.rand(2, 2, 3, 0, dtype=dtype, device=device)
self.assertEqual(
torch.linalg.lstsq(a, b)[0],
torch.zeros(2, 2, 0, 0, dtype=dtype, device=device)
)
# empty a but not b
a = torch.rand(2, 2, 3, 0, dtype=dtype, device=device)
b = torch.rand(2, 2, 3, 2, dtype=dtype, device=device)
self.assertEqual(
torch.linalg.lstsq(a, b)[0],
torch.zeros(2, 2, 0, 2, dtype=dtype, device=device)
)
# empty a and b
if torch.device(device).type == 'cpu':
# only CPU since CUDA does not support overdetermined systems
a = torch.rand(2, 2, 0, 3, dtype=dtype, device=device)
b = torch.rand(2, 2, 0, 3, dtype=dtype, device=device)
self.assertEqual(
torch.linalg.lstsq(a, b)[0],
torch.zeros(2, 2, 3, 3, dtype=dtype, device=device)
)
a = torch.rand(2, 3, dtype=dtype, device=device)
b = torch.rand(3, dtype=dtype, device=device)
with self.assertRaisesRegex(RuntimeError, 'input must have at least 2 dimensions'):
torch.linalg.lstsq(b, b)
with self.assertRaisesRegex(RuntimeError, 'other must have at least 1 dimension'):
torch.linalg.lstsq(a, torch.tensor(1, dtype=dtype, device=device))
with self.assertRaisesRegex(RuntimeError, r'input.size\(-2\) should match other.size\(-1\)'):
torch.linalg.lstsq(a, b)
with self.assertRaisesRegex(RuntimeError, r'input.size\(-2\) should match other.size\(-2\)'):
torch.linalg.lstsq(a, b.unsqueeze(-1))
def complement_device(device):
if device == 'cpu' and torch.cuda.is_available():
return 'cuda'
else:
return 'cpu'
a = torch.rand(2, 2, 2, 2, dtype=dtype, device=device)
b = torch.rand(2, 2, 2, dtype=dtype, device=complement_device(device))
if a.device != b.device:
with self.assertRaisesRegex(RuntimeError, 'be on the same device'):
torch.linalg.lstsq(a, b)
b = (torch.rand(2, 2, 2, dtype=dtype, device=device) * 100).long()
with self.assertRaisesRegex(RuntimeError, 'the same dtype'):
torch.linalg.lstsq(a, b)
a = torch.rand(2, 2, 2, 2, dtype=dtype, device=device)
b = torch.rand(2, 2, 2, dtype=dtype, device=device)
if device != 'cpu':
with self.assertRaisesRegex(RuntimeError, '`driver` other than `gels` is not supported on CUDA'):
torch.linalg.lstsq(a, b, driver='fictitious_driver')
# if on cpu
else:
with self.assertRaisesRegex(RuntimeError, r'parameter `driver` should be one of \(gels, gelsy, gelsd, gelss\)'):
torch.linalg.lstsq(a, b, driver='fictitious_driver')
# cuSOLVER path supports underdetermined systems
version = torch.testing._internal.common_cuda._get_torch_cuda_version()
cusolver_not_available = (version < (10, 1))
if device != 'cpu' and cusolver_not_available:
a = torch.rand(2, 3, dtype=dtype, device=device)
b = torch.rand(2, 1, dtype=dtype, device=device)
with self.assertRaisesRegex(RuntimeError, r'only overdetermined systems'):
torch.linalg.lstsq(a, b)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_cholesky(self, device, dtype):
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
def run_test(shape, batch, contiguous):
A = random_hermitian_pd_matrix(shape, *batch, dtype=dtype, device=device)
if A.numel() > 0 and not contiguous:
A = A.mT
self.assertFalse(A.is_contiguous())
expected_L = np.linalg.cholesky(A.cpu().numpy())
actual_L = torch.linalg.cholesky(A)
# For fp32 individual entries in matrices can differ between PyTorch and NumPy
# Let's compare the norms of matrices instead
if A.numel() > 0 and dtype in [torch.float32, torch.complex64]:
# axis is specified to calculate matrix norm for batched input
expected_norm = np.linalg.norm(expected_L, ord=1, axis=(-2, -1))
actual_norm = torch.linalg.norm(actual_L, ord=1, axis=(-2, -1))
# Compare the norms with standard tolerances
self.assertEqual(actual_norm, expected_norm)
# and individual values with a higher tolerance
self.assertEqual(actual_L, expected_L, atol=1e-2, rtol=1e-5)
else:
self.assertEqual(actual_L, expected_L)
shapes = (0, 3, 5)
batches = ((), (3, ), (2, 2))
larger_input_case = [(100, (5, ), True)]
for shape, batch, contiguous in list(itertools.product(shapes, batches, (True, False))) + larger_input_case:
run_test(shape, batch, contiguous)
# check the out= variant
A = random_hermitian_pd_matrix(3, 3, dtype=dtype, device=device)
out = torch.empty_like(A)
ans = torch.linalg.cholesky(A, out=out)
self.assertEqual(ans, out)
expected = torch.linalg.cholesky(A)
self.assertEqual(expected, out)
# check the upper= variant
expected = torch.linalg.cholesky(A).mH
actual = torch.linalg.cholesky(A, upper=True)
self.assertEqual(expected, actual)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_cholesky_errors_and_warnings(self, device, dtype):
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
# cholesky requires the input to be a square matrix or batch of square matrices
A = torch.randn(2, 3, device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, r'must be batches of square matrices'):
torch.linalg.cholesky(A)
A = torch.randn(2, 2, 3, device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, r'must be batches of square matrices'):
torch.linalg.cholesky(A)
with self.assertRaisesRegex(np.linalg.LinAlgError, r'Last 2 dimensions of the array must be square'):
np.linalg.cholesky(A.cpu().numpy())
# cholesky requires the input to be at least 2 dimensional tensor
A = torch.randn(2, device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, r'must have at least 2 dimensions'):
torch.linalg.cholesky(A)
with self.assertRaisesRegex(np.linalg.LinAlgError,
r'1-dimensional array given\. Array must be at least two-dimensional'):
np.linalg.cholesky(A.cpu().numpy())
# if the input matrix is not positive definite, an error should be raised
A = torch.eye(3, 3, dtype=dtype, device=device)
A[-1, -1] = 0 # Now A is not positive definite
with self.assertRaisesRegex(RuntimeError, r'minor of order 3 is not positive-definite'):
torch.linalg.cholesky(A)
with self.assertRaisesRegex(np.linalg.LinAlgError, r'Matrix is not positive definite'):
np.linalg.cholesky(A.cpu().numpy())
# if at least one matrix in the batch is singular, an error should be raised
A = torch.eye(3, 3, dtype=dtype, device=device)
A = A.reshape((1, 3, 3))
A = A.repeat(5, 1, 1)
A[4, -1, -1] = 0 # Now A[4] is not positive definite
with self.assertRaisesRegex(RuntimeError, r'\(Batch element 4\): The factorization could not be completed'):
torch.linalg.cholesky(A)
# if out tensor with wrong shape is passed a warning is given
A = random_hermitian_pd_matrix(3, dtype=dtype, device=device)
out = torch.empty(2, 3, dtype=dtype, device=device)
with warnings.catch_warnings(record=True) as w:
# Trigger warning
torch.linalg.cholesky(A, out=out)
# Check warning occurs
self.assertEqual(len(w), 1)
self.assertTrue("An output with one or more elements was resized" in str(w[-1].message))
# dtypes should be safely castable
out = torch.empty(*A.shape, dtype=torch.int, device=device)
with self.assertRaisesRegex(RuntimeError, "but got result with dtype Int"):
torch.linalg.cholesky(A, out=out)
# device should match
if torch.cuda.is_available():
wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'
out = torch.empty(0, device=wrong_device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, "Expected result and input tensors to be on the same device"):
torch.linalg.cholesky(A, out=out)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float64, torch.complex128)
def test_cholesky_hermitian_grad(self, device, dtype):
# Check that the gradient is Hermitian (or symmetric)
def run_test(shape):
root = torch.rand(*shape, dtype=dtype, device=device)
root = torch.matmul(root, root.mH)
root.requires_grad_()
chol = torch.linalg.cholesky(root).sum().backward()
self.assertEqual(root.grad, root.grad.mH)
shapes = ((3, 3), (1, 1, 3, 3))
for shape in shapes:
run_test(shape)
# NOTE: old_cholesky* tests were moved here from test_torch.py and test_autograd.py
@slowTest
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.double)
def test_old_cholesky_batched_many_batches(self, device, dtype):
from torch.testing._internal.common_utils import random_symmetric_pd_matrix
def cholesky_test_helper(n, batchsize, device, upper):
A = random_symmetric_pd_matrix(n, batchsize, dtype=dtype, device=device)
chol_fact = torch.cholesky(A, upper=upper)
if upper:
# Correctness check
self.assertEqual(A, chol_fact.mT.matmul(chol_fact))
# Upper triangular check
self.assertEqual(chol_fact, chol_fact.triu())
else:
# Correctness check
self.assertEqual(A, chol_fact.matmul(chol_fact.mT))
# Lower triangular check
self.assertEqual(chol_fact, chol_fact.tril())
for upper, batchsize in itertools.product([True, False], [262144, 524288]):
cholesky_test_helper(2, batchsize, device, upper)
@precisionOverride({torch.float32: 1e-4, torch.complex64: 1e-4})
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_old_cholesky_batched(self, device, dtype):
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
def cholesky_test_helper(n, batch_dims, upper):
A = random_hermitian_pd_matrix(n, *batch_dims, dtype=dtype, device=device)
cholesky_exp = torch.stack([m.cholesky(upper=upper) for m in A.reshape(-1, n, n)])
cholesky_exp = cholesky_exp.reshape_as(A)
self.assertEqual(cholesky_exp, torch.cholesky(A, upper=upper))
for upper, batchsize in itertools.product([True, False], [(3,), (3, 4), (2, 3, 4)]):
cholesky_test_helper(3, batchsize, upper)
@precisionOverride({torch.float32: 1e-4, torch.complex64: 1e-4})
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
@tf32_on_and_off(0.01)
def test_old_cholesky(self, device, dtype):
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
A = random_hermitian_pd_matrix(10, dtype=dtype, device=device)
# default Case
C = torch.cholesky(A)
B = torch.mm(C, C.t().conj())
self.assertEqual(A, B, atol=1e-14, rtol=0)
# test Upper Triangular
U = torch.cholesky(A, True)
B = torch.mm(U.t().conj(), U)
self.assertEqual(A, B, atol=1e-14, rtol=0, msg='cholesky (upper) did not allow rebuilding the original matrix')
# test Lower Triangular
L = torch.cholesky(A, False)
B = torch.mm(L, L.t().conj())
self.assertEqual(A, B, atol=1e-14, rtol=0, msg='cholesky (lower) did not allow rebuilding the original matrix')
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_old_cholesky_empty(self, device, dtype):
def run_test(upper):
A = torch.empty(0, 0, dtype=dtype, device=device)
chol = torch.cholesky(A, upper)
chol_A = torch.matmul(chol, chol.t().conj())
self.assertEqual(A, chol_A)
for upper in [True, False]:
run_test(upper)
# Test for issue
# https://github.com/pytorch/pytorch/issues/57032
# torch.cholesky with upper=True for batched CUDA inputs was wrong
# it was using the lower triangular part instead of the upper one
@onlyCUDA
@skipCUDAIfNoMagma
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_old_cholesky_batched_upper(self, device, dtype):
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
batchsize = 2
A = random_hermitian_pd_matrix(3, batchsize, dtype=dtype, device=device)
A_triu = A.triu() # fill the lower triangular part with zero
U = torch.cholesky(A_triu, upper=True)
reconstruct_A = U.mH @ U
self.assertEqual(A, reconstruct_A)
@skipCUDAIfNoMagmaAndNoCusolver
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_cholesky_ex(self, device, dtype):
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
def run_test(n, batch):
A = random_hermitian_pd_matrix(n, *batch, dtype=dtype, device=device)
expected_L = np.linalg.cholesky(A.cpu().numpy())
expected_info = torch.zeros(A.shape[:-2], dtype=torch.int32, device=device)
actual_L, actual_info = torch.linalg.cholesky_ex(A)
# For fp32 individual entries in matrices can differ between PyTorch and NumPy
# Let's compare the norms of matrices instead
if A.numel() > 0 and dtype in [torch.float32, torch.complex64]:
# axis is specified to calculate matrix norm for batched input
expected_norm = np.linalg.norm(expected_L, ord=1, axis=(-2, -1))
actual_norm = torch.linalg.norm(actual_L, ord=1, axis=(-2, -1))
# Compare the norms with standard tolerances
self.assertEqual(actual_norm, expected_norm)
# and individual values with a higher tolerance
self.assertEqual(actual_L, expected_L, atol=1e-2, rtol=1e-5)
else:
self.assertEqual(actual_L, expected_L)
self.assertEqual(actual_info, expected_info)
ns = (0, 3, 5)
batches = ((), (2, ), (2, 1))
for n, batch in itertools.product(ns, batches):
run_test(n, batch)
@skipCUDAIfNoMagmaAndNoCusolver
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_cholesky_ex_non_pd(self, device, dtype):
# if the input matrix is not positive definite, info with positive integer is returned
A = torch.eye(3, 3, dtype=dtype, device=device)
A[-1, -1] = 0 # Now A is singular
_, info = torch.linalg.cholesky_ex(A)
self.assertEqual(info, 3)
with self.assertRaisesRegex(RuntimeError, r'minor of order 3 is not positive-definite'):
torch.linalg.cholesky_ex(A, check_errors=True)
# if at least one matrix in the batch is not positive definite,
# batched info with positive integer for the corresponding matrix is returned
A = torch.eye(3, 3, dtype=dtype, device=device)
A = A.reshape((1, 3, 3))
A = A.repeat(5, 1, 1)
A[3, -2, -2] = 0 # Now A[3] is singular
_, info = torch.linalg.cholesky_ex(A)
expected_info = torch.zeros(A.shape[:-2], dtype=torch.int32, device=device)
expected_info[3] = 2
self.assertEqual(info, expected_info)
with self.assertRaisesRegex(RuntimeError, r'\(Batch element 3\): The factorization could not be completed'):
torch.linalg.cholesky_ex(A, check_errors=True)
@skipCUDAIfNoMagmaAndNoCusolver
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_cholesky_ex_out_info_error(self, device, dtype):
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
# dtype for info must be torch.int32
A = random_hermitian_pd_matrix(3, dtype=dtype, device=device)
L = torch.empty(A.shape, dtype=dtype, device=device)
info = torch.empty(A.shape[:-2], dtype=torch.int64, device=device)
with self.assertRaisesRegex(RuntimeError, "but got info with dtype Long"):
torch.linalg.cholesky_ex(A, out=(L, info))
@onlyCPU
@skipCPUIfNoLapack
@dtypes(torch.float64, torch.complex128)
def test_old_cholesky_autograd(self, device, dtype):
def func(root, upper):
x = 0.5 * (root + root.mH)
return torch.cholesky(x, upper)
def run_test(upper, dims):
root = torch.rand(*dims, dtype=dtype, device=device, requires_grad=True)
root = root + torch.eye(dims[-1])
gradcheck(func, [root, upper])
gradgradcheck(func, [root, upper])
root = torch.rand(*dims, dtype=dtype, device=device)
root = torch.matmul(root, root.mH)
root.requires_grad_()
chol = root.cholesky().sum().backward()
self.assertEqual(root.grad, root.grad.mH) # Check the gradient is hermitian
for upper, dims in itertools.product([True, False], [(3, 3), (4, 3, 2, 2)]):
run_test(upper, dims)
def _test_addr_vs_numpy(self, device, dtype, beta=1, alpha=1):
def check(m, a, b, beta, alpha):
if dtype == torch.bfloat16:
a_np = a.to(torch.double).cpu().numpy()
b_np = b.to(torch.double).cpu().numpy()
m_np = m.to(torch.double).cpu().numpy()
exact_dtype = False
else:
a_np = a.cpu().numpy()
b_np = b.cpu().numpy()
m_np = m.cpu().numpy()
exact_dtype = True
if beta == 0:
expected = alpha * np.outer(a_np, b_np)
else:
expected = beta * m_np + alpha * np.outer(a_np, b_np)
res = torch.addr(m, a, b, beta=beta, alpha=alpha)
self.assertEqual(res, expected, exact_dtype=exact_dtype)
# Test out variant
out = torch.empty_like(res)
torch.addr(m, a, b, beta=beta, alpha=alpha, out=out)
self.assertEqual(out, expected, exact_dtype=exact_dtype)
m = make_tensor((50, 50), device=device, dtype=dtype, low=-2, high=2)
a = make_tensor((50,), device=device, dtype=dtype, low=-2, high=2)
b = make_tensor((50,), device=device, dtype=dtype, low=-2, high=2)
check(m, a, b, beta, alpha)
# test transpose
m_transpose = torch.transpose(m, 0, 1)
check(m_transpose, a, b, beta, alpha)
# test 0 strided tensor
zero_strided = make_tensor((1,), device=device, dtype=dtype, low=-2, high=2).expand(50)
check(m, zero_strided, b, beta, alpha)
# test scalar
m_scalar = torch.tensor(1, device=device, dtype=dtype)
check(m_scalar, a, b, beta, alpha)
# test nans and infs are not propagated to the output when beta == 0
float_and_complex_dtypes = get_all_fp_dtypes() + get_all_complex_dtypes()
if beta == 0 and dtype in float_and_complex_dtypes:
m[0][10] = m[10][10] = m[20][20] = float('inf')
m[1][10] = m[11][10] = m[21][20] = float('nan')
check(m, a, b, 0, alpha)
@dtypes(torch.bool)
def test_addr_bool(self, device, dtype):
self._test_addr_vs_numpy(device, dtype, beta=True, alpha=False)
self._test_addr_vs_numpy(device, dtype, beta=False, alpha=True)
self._test_addr_vs_numpy(device, dtype, beta=False, alpha=False)
self._test_addr_vs_numpy(device, dtype, beta=True, alpha=True)
@dtypes(*(get_all_int_dtypes()))
def test_addr_integral(self, device, dtype):
with self.assertRaisesRegex(RuntimeError,
'argument beta must not be a floating point number.'):
self._test_addr_vs_numpy(device, dtype, beta=2., alpha=1)
with self.assertRaisesRegex(RuntimeError,
'argument alpha must not be a floating point number.'):
self._test_addr_vs_numpy(device, dtype, beta=2, alpha=1.)
with self.assertRaisesRegex(RuntimeError,
'Boolean beta only supported for Boolean results.'):
self._test_addr_vs_numpy(device, dtype, beta=True, alpha=1)
with self.assertRaisesRegex(RuntimeError,
'Boolean alpha only supported for Boolean results.'):
self._test_addr_vs_numpy(device, dtype, beta=2, alpha=True)
# when beta is zero
self._test_addr_vs_numpy(device, dtype, beta=0, alpha=2)
# when beta is not zero
self._test_addr_vs_numpy(device, dtype, beta=2, alpha=2)
@precisionOverride({torch.bfloat16: 1e-1})
@dtypes(*(get_all_fp_dtypes() + get_all_complex_dtypes()))
def test_addr_float_and_complex(self, device, dtype):
with self.assertRaisesRegex(RuntimeError,
'Boolean beta only supported for Boolean results.'):
self._test_addr_vs_numpy(device, dtype, beta=True, alpha=1)
with self.assertRaisesRegex(RuntimeError,
'Boolean alpha only supported for Boolean results.'):
self._test_addr_vs_numpy(device, dtype, beta=2, alpha=True)
# when beta is zero
self._test_addr_vs_numpy(device, dtype, beta=0., alpha=2)
# when beta is not zero
self._test_addr_vs_numpy(device, dtype, beta=0.5, alpha=2)
if dtype in get_all_complex_dtypes():
self._test_addr_vs_numpy(device, dtype, beta=(0 + 0.1j), alpha=(0.2 - 0.2j))
@dtypes(*itertools.product(get_all_dtypes(),
get_all_dtypes()))
def test_outer_type_promotion(self, device, dtypes):
a = torch.randn(5).to(device=device, dtype=dtypes[0])
b = torch.randn(5).to(device=device, dtype=dtypes[1])
for op in (torch.outer, torch.Tensor.outer, torch.ger, torch.Tensor.ger):
result = op(a, b)
self.assertEqual(result.dtype, torch.result_type(a, b))
# don't use @dtypes decorator to avoid generating ~1700 tests per device
def test_addr_type_promotion(self, device):
for dtypes0, dtypes1, dtypes2 in product(get_all_dtypes(), repeat=3):
a = make_tensor((5,), device=device, dtype=dtypes0, low=-2, high=2)
b = make_tensor((5,), device=device, dtype=dtypes1, low=-2, high=2)
m = make_tensor((5, 5), device=device, dtype=dtypes2, low=-2, high=2)
desired_dtype = torch.promote_types(torch.promote_types(dtypes0, dtypes1),
dtypes2)
for op in (torch.addr, torch.Tensor.addr):
result = op(m, a, b)
self.assertEqual(result.dtype, desired_dtype)
# Tests migrated from test_torch.py
# 1) test the shape of the result tensor when there is empty input tensor
# 2) test the Runtime Exception when there is scalar input tensor
def test_outer_ger_addr_legacy_tests(self, device):
for size in ((0, 0), (0, 5), (5, 0)):
a = torch.rand(size[0], device=device)
b = torch.rand(size[1], device=device)
self.assertEqual(torch.outer(a, b).shape, size)
self.assertEqual(torch.ger(a, b).shape, size)
m = torch.empty(size, device=device)
self.assertEqual(torch.addr(m, a, b).shape, size)
m = torch.randn(5, 6, device=device)
a = torch.randn(5, device=device)
b = torch.tensor(6, device=device)
self.assertRaises(RuntimeError, lambda: torch.outer(a, b))
self.assertRaises(RuntimeError, lambda: torch.outer(b, a))
self.assertRaises(RuntimeError, lambda: torch.ger(a, b))
self.assertRaises(RuntimeError, lambda: torch.ger(b, a))
self.assertRaises(RuntimeError, lambda: torch.addr(m, a, b))
self.assertRaises(RuntimeError, lambda: torch.addr(m, b, a))
# Tests torch.det and its alias, torch.linalg.det, vs. NumPy
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.double, torch.cdouble)
def test_det(self, device, dtype):
tensors = (
torch.randn((2, 2), device=device, dtype=dtype),
torch.randn((129, 129), device=device, dtype=dtype),
torch.randn((3, 52, 52), device=device, dtype=dtype),
torch.randn((4, 2, 26, 26), device=device, dtype=dtype))
ops = (torch.det, torch.Tensor.det,
torch.linalg.det)
for t in tensors:
expected = np.linalg.det(t.cpu().numpy())
for op in ops:
actual = op(t)
self.assertEqual(actual, expected)
self.compare_with_numpy(op, np.linalg.det, t)
# NOTE: det requires a 2D+ tensor
t = torch.randn(1, device=device, dtype=dtype)
with self.assertRaises(RuntimeError):
op(t)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
@precisionOverride({torch.float32: 1e-4, torch.complex64: 1e-4})
def test_eigh(self, device, dtype):
from torch.testing._internal.common_utils import random_hermitian_matrix
def run_test(shape, batch, uplo):
matrix = random_hermitian_matrix(shape, *batch, dtype=dtype, device=device)
expected_w, expected_v = np.linalg.eigh(matrix.cpu().numpy(), UPLO=uplo)
actual_w, actual_v = torch.linalg.eigh(matrix, UPLO=uplo)
self.assertEqual(actual_w, expected_w)
# sign of eigenvectors is not unique and therefore absolute values are compared
self.assertEqual(abs(actual_v), abs(expected_v))
# additionally we can multiply the eigenvector with a phase factor e^{i\phi} and then compare the values
# let's choose the convention that the first element of the eigenvectors from torch and numpy be the same
# for real inputs, this phase factor is plus or minus one
if matrix.numel() > 0:
phase = torch.from_numpy(expected_v[..., 0, :]).to(device=device).div(actual_v[..., 0, :])
actual_v_rotated = actual_v * phase.unsqueeze(-2).expand_as(actual_v)
self.assertEqual(actual_v_rotated, expected_v)
# check the out= variant
out_w = torch.empty_like(actual_w)
out_v = torch.empty_like(actual_v)
ans_w, ans_v = torch.linalg.eigh(matrix, UPLO=uplo, out=(out_w, out_v))
self.assertEqual(ans_w, out_w)
self.assertEqual(ans_v, out_v)
self.assertEqual(ans_w, actual_w)
self.assertEqual(abs(ans_v), abs(actual_v))
shapes = (0, 3, 5)
batches = ((), (3, ), (2, 2))
uplos = ["U", "L"]
for shape, batch, uplo in itertools.product(shapes, batches, uplos):
run_test(shape, batch, uplo)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
@precisionOverride({torch.float32: 1e-4, torch.complex64: 1e-4})
def test_eigh_lower_uplo(self, device, dtype):
def run_test(shape, batch, uplo):
# check lower case uplo
# use non-symmetric input to check whether uplo argument is working as intended
matrix = torch.randn(shape, shape, *batch, dtype=dtype, device=device)
expected_w, expected_v = np.linalg.eigh(matrix.cpu().numpy(), UPLO=uplo)
actual_w, actual_v = torch.linalg.eigh(matrix, UPLO=uplo)
self.assertEqual(actual_w, expected_w)
self.assertEqual(abs(actual_v), abs(expected_v))
uplos = ["u", "l"]
for uplo in uplos:
run_test(3, (2, 2), uplo)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_eigh_errors_and_warnings(self, device, dtype):
from torch.testing._internal.common_utils import random_hermitian_matrix
# eigh requires a square matrix
t = torch.randn(2, 3, device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, "must be batches of square matrices"):
torch.linalg.eigh(t)
# eigh requires 'uplo' parameter to be 'U' or 'L'
t = torch.randn(3, 3, device=device, dtype=dtype)
for uplo in ["a", "wrong"]:
with self.assertRaisesRegex(RuntimeError, "be \'L\' or \'U\'"):
torch.linalg.eigh(t, UPLO=uplo)
with self.assertRaisesRegex(ValueError, "be \'L\' or \'U\'"):
np.linalg.eigh(t.cpu().numpy(), UPLO=uplo)
# if non-empty out tensor with wrong shape is passed a warning is given
a = random_hermitian_matrix(3, dtype=dtype, device=device)
real_dtype = a.real.dtype if dtype.is_complex else dtype
out_w = torch.empty(7, 7, dtype=real_dtype, device=device)
out_v = torch.empty(7, 7, dtype=dtype, device=device)
with warnings.catch_warnings(record=True) as w:
# Trigger warning
torch.linalg.eigh(a, out=(out_w, out_v))
# Check warning occurs
self.assertEqual(len(w), 2)
self.assertTrue("An output with one or more elements was resized" in str(w[-2].message))
self.assertTrue("An output with one or more elements was resized" in str(w[-1].message))
# dtypes should be safely castable
out_w = torch.empty(0, dtype=real_dtype, device=device)
out_v = torch.empty(0, dtype=torch.int, device=device)
with self.assertRaisesRegex(RuntimeError, "but got eigenvectors with dtype Int"):
torch.linalg.eigh(a, out=(out_w, out_v))
out_w = torch.empty(0, dtype=torch.int, device=device)
out_v = torch.empty(0, dtype=dtype, device=device)
with self.assertRaisesRegex(RuntimeError, "but got eigenvalues with dtype Int"):
torch.linalg.eigh(a, out=(out_w, out_v))
# device should match
if torch.cuda.is_available():
wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'
out_w = torch.empty(0, device=wrong_device, dtype=dtype)
out_v = torch.empty(0, device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, "tensors to be on the same device"):
torch.linalg.eigh(a, out=(out_w, out_v))
out_w = torch.empty(0, device=device, dtype=dtype)
out_v = torch.empty(0, device=wrong_device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, "tensors to be on the same device"):
torch.linalg.eigh(a, out=(out_w, out_v))
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
@precisionOverride({torch.float32: 1e-4, torch.complex64: 1e-4})
def test_eigh_non_contiguous(self, device, dtype):
from torch.testing._internal.common_utils import random_hermitian_matrix
def run_test(matrix, uplo):
self.assertFalse(matrix.is_contiguous())
expected_w, expected_v = np.linalg.eigh(matrix.cpu().numpy(), UPLO=uplo)
actual_w, actual_v = torch.linalg.eigh(matrix, UPLO=uplo)
self.assertEqual(actual_w, expected_w)
# sign of eigenvectors is not unique and therefore absolute values are compared
self.assertEqual(abs(actual_v), abs(expected_v))
def run_test_permuted(shape, batch, uplo):
# check for permuted / transposed inputs
matrix = random_hermitian_matrix(shape, *batch, dtype=dtype, device=device)
matrix = matrix.mT
run_test(matrix, uplo)
def run_test_skipped_elements(shape, batch, uplo):
# check for inputs with skipped elements
matrix = random_hermitian_matrix(shape, *batch, dtype=dtype, device=device)
matrix = matrix[::2]
run_test(matrix, uplo)
shapes = (3, 5)
batches = ((4, ), (4, 2))
uplos = ["U", "L"]
for shape, batch, uplo in itertools.product(shapes, batches, uplos):
run_test_permuted(shape, batch, uplo)
run_test_skipped_elements(shape, batch, uplo)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float64, torch.complex128)
def test_eigh_hermitian_grad(self, device, dtype):
from torch.testing._internal.common_utils import random_hermitian_matrix
def run_test(dims, uplo):
x = random_hermitian_matrix(dims[-1], *dims[:-2], device=device, dtype=dtype).requires_grad_()
w, v = torch.linalg.eigh(x)
(w.sum() + abs(v).sum()).backward()
self.assertEqual(x.grad, x.grad.mH) # Check the gradient is Hermitian
for dims, uplo in itertools.product([(3, 3), (1, 1, 3, 3)], ["L", "U"]):
run_test(dims, uplo)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
@precisionOverride({torch.float32: 1e-4, torch.complex64: 1e-4})
def test_eigvalsh(self, device, dtype):
from torch.testing._internal.common_utils import random_hermitian_matrix
def run_test(shape, batch, uplo):
matrix = random_hermitian_matrix(shape, *batch, dtype=dtype, device=device)
expected_w = np.linalg.eigvalsh(matrix.cpu().numpy(), UPLO=uplo)
actual_w = torch.linalg.eigvalsh(matrix, UPLO=uplo)
self.assertEqual(actual_w, expected_w)
# check the out= variant
out = torch.empty_like(actual_w)
ans = torch.linalg.eigvalsh(matrix, UPLO=uplo, out=out)
self.assertEqual(ans, out)
self.assertEqual(ans, actual_w)
shapes = (0, 3, 5)
batches = ((), (3, ), (2, 2))
uplos = ["U", "L"]
for shape, batch, uplo in itertools.product(shapes, batches, uplos):
run_test(shape, batch, uplo)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_eigvalsh_errors_and_warnings(self, device, dtype):
# eigvalsh requires a square matrix
t = torch.randn(2, 3, device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, "must be batches of square matrices"):
torch.linalg.eigvalsh(t)
# eigvalsh requires 'uplo' parameter to be 'U' or 'L'
t = torch.randn(3, 3, device=device, dtype=dtype)
for uplo in ["a", "wrong"]:
with self.assertRaisesRegex(RuntimeError, "be \'L\' or \'U\'"):
torch.linalg.eigvalsh(t, UPLO=uplo)
with self.assertRaisesRegex(ValueError, "be \'L\' or \'U\'"):
np.linalg.eigvalsh(t.cpu().numpy(), UPLO=uplo)
# if non-empty out tensor with wrong shape is passed a warning is given
real_dtype = t.real.dtype if dtype.is_complex else dtype
out = torch.empty_like(t).to(real_dtype)
with warnings.catch_warnings(record=True) as w:
# Trigger warning
torch.linalg.eigvalsh(t, out=out)
# Check warning occurs
self.assertEqual(len(w), 1)
self.assertTrue("An output with one or more elements was resized" in str(w[-1].message))
# dtypes should be safely castable
out = torch.empty(0, dtype=torch.int, device=device)
with self.assertRaisesRegex(RuntimeError, "but got result with dtype Int"):
torch.linalg.eigvalsh(t, out=out)
# device should match
if torch.cuda.is_available():
wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'
out = torch.empty(0, device=wrong_device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, "tensors to be on the same device"):
torch.linalg.eigvalsh(t, out=out)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
@precisionOverride({torch.float32: 1e-4, torch.complex64: 1e-4})
def test_eigvalsh_non_contiguous(self, device, dtype):
from torch.testing._internal.common_utils import random_hermitian_matrix
def run_test(matrix, uplo):
self.assertFalse(matrix.is_contiguous())
expected_w = np.linalg.eigvalsh(matrix.cpu().numpy(), UPLO=uplo)
actual_w = torch.linalg.eigvalsh(matrix, UPLO=uplo)
self.assertEqual(actual_w, expected_w)
def run_test_permuted(shape, batch, uplo):
# check for permuted / transposed inputs
matrix = random_hermitian_matrix(shape, *batch, dtype=dtype, device=device)
matrix = matrix.mT
run_test(matrix, uplo)
def run_test_skipped_elements(shape, batch, uplo):
# check for inputs with skipped elements
matrix = random_hermitian_matrix(shape, *batch, dtype=dtype, device=device)
matrix = matrix[::2]
run_test(matrix, uplo)
shapes = (3, 5)
batches = ((4, ), (4, 2))
uplos = ["U", "L"]
for shape, batch, uplo in itertools.product(shapes, batches, uplos):
run_test_permuted(shape, batch, uplo)
run_test_skipped_elements(shape, batch, uplo)
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_kron(self, device, dtype):
def run_test_case(a_shape, b_shape):
a = torch.rand(a_shape, dtype=dtype, device=device)
b = torch.rand(b_shape, dtype=dtype, device=device)
expected = np.kron(a.cpu().numpy(), b.cpu().numpy())
result = torch.kron(a, b)
self.assertEqual(result, expected)
# check the out= variant
out = torch.empty_like(result)
ans = torch.kron(a, b, out=out)
self.assertEqual(ans, out)
self.assertEqual(ans, result)
shapes = [(4,), (2, 2), (1, 2, 3), (1, 2, 3, 3)]
for a_shape, b_shape in itertools.product(shapes, reversed(shapes)):
run_test_case(a_shape, b_shape)
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_kron_non_contiguous(self, device, dtype):
def run_test_transposed(a_shape, b_shape):
# check for transposed case
a = torch.rand(a_shape, dtype=dtype, device=device).mT
b = torch.rand(b_shape, dtype=dtype, device=device).mT
self.assertFalse(a.is_contiguous())
self.assertFalse(b.is_contiguous())
expected = np.kron(a.cpu().numpy(), b.cpu().numpy())
result = torch.kron(a, b)
self.assertEqual(result, expected)
# check the out= variant
out = torch.empty(result.mT.shape, dtype=dtype, device=device).mT
self.assertFalse(out.is_contiguous())
ans = torch.kron(a, b, out=out)
self.assertEqual(ans, out)
self.assertEqual(ans, result)
def run_test_skipped_elements(a_shape, b_shape):
# check for transposed case
a = torch.rand(2 * a_shape[0], *a_shape[1:], dtype=dtype, device=device)[::2]
b = torch.rand(2 * b_shape[0], *b_shape[1:], dtype=dtype, device=device)[::2]
self.assertFalse(a.is_contiguous())
self.assertFalse(b.is_contiguous())
expected = np.kron(a.cpu().numpy(), b.cpu().numpy())
result = torch.kron(a, b)
self.assertEqual(result, expected)
# check the out= variant
out = torch.empty(2 * result.shape[0], *result.shape[1:], dtype=dtype, device=device)[::2]
self.assertFalse(out.is_contiguous())
ans = torch.kron(a, b, out=out)
self.assertEqual(ans, out)
self.assertEqual(ans, result)
shapes = [(2, 2), (2, 2, 3), (2, 2, 3, 3)]
for a_shape, b_shape in itertools.product(shapes, reversed(shapes)):
# run_test_transposed(a_shape, b_shape)
run_test_skipped_elements(a_shape, b_shape)
# Test that kron perserve memory format
a = torch.randn(1, 2, 3, 4, dtype=dtype, device=device).contiguous(memory_format=torch.channels_last)
b = torch.randn(1, 2, 3, 4, dtype=dtype, device=device).contiguous(memory_format=torch.channels_last)
c = torch.kron(a, b)
self.assertTrue(c.is_contiguous(memory_format=torch.channels_last))
torch.kron(a, b, out=c)
self.assertTrue(c.is_contiguous(memory_format=torch.channels_last))
c = c.contiguous(memory_format=torch.contiguous_format)
torch.kron(a, b, out=c)
self.assertTrue(c.is_contiguous(memory_format=torch.contiguous_format))
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_kron_empty(self, device, dtype):
def run_test_case(empty_shape):
a = torch.eye(3, dtype=dtype, device=device)
b = torch.empty(empty_shape, dtype=dtype, device=device)
result = torch.kron(a, b)
expected = np.kron(a.cpu().numpy(), b.cpu().numpy())
self.assertEqual(result, expected)
# NumPy doesn't work if the first argument is empty
result = torch.kron(b, a)
self.assertEqual(result.shape, expected.shape)
empty_shapes = [(0,), (2, 0), (1, 0, 3)]
for empty_shape in empty_shapes:
run_test_case(empty_shape)
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_kron_errors_and_warnings(self, device, dtype):
# if non-empty out tensor with wrong shape is passed a warning is given
a = torch.eye(3, dtype=dtype, device=device)
b = torch.ones((2, 2), dtype=dtype, device=device)
out = torch.empty_like(a)
with warnings.catch_warnings(record=True) as w:
# Trigger warning
torch.kron(a, b, out=out)
# Check warning occurs
self.assertEqual(len(w), 1)
self.assertTrue("An output with one or more elements was resized" in str(w[-1].message))
# dtypes should match
out = torch.empty_like(a).to(torch.int)
with self.assertRaisesRegex(RuntimeError, "can't be cast to the desired output type"):
torch.kron(a, b, out=out)
# This test confirms that torch.linalg.norm's dtype argument works
# as expected, according to the function's documentation
@skipCUDAIfNoMagma
def test_norm_dtype(self, device):
def run_test_case(input_size, ord, keepdim, from_dtype, to_dtype):
# Determine the best dtype to use for comparisons between tensors
# of two different types
def get_compare_dtype(type0, type1):
types_32bit_based = [torch.float, torch.cfloat]
is_complex = type0.is_complex or type1.is_complex
if type0 in types_32bit_based or type1 in types_32bit_based:
return torch.cfloat if is_complex else torch.float
else:
return torch.cdouble if is_complex else torch.double
compare_dtype = get_compare_dtype(from_dtype, to_dtype)
def get_value_type(dtype):
if dtype == torch.cfloat:
return torch.float
elif dtype == torch.cdouble:
return torch.double
elif dtype == torch.complex32:
return torch.float16
else:
return dtype
msg = (
f'input_size={input_size}, ord={ord}, keepdim={keepdim}, '
f'from_dtype={from_dtype}, to_dtype={to_dtype}')
input = torch.randn(*input_size, dtype=from_dtype, device=device)
result = torch.linalg.norm(input, ord, keepdim=keepdim)
if from_dtype.is_complex:
# By default, norm downgrades a complex input to the corresponding real number type
self.assertEqual(result.dtype, get_value_type(from_dtype), msg=msg)
else:
self.assertEqual(result.dtype, from_dtype, msg=msg)
result_out = torch.empty((0), dtype=to_dtype, device=device)
torch.linalg.norm(input, ord, keepdim=keepdim, out=result_out)
self.assertEqual(result_out.dtype, to_dtype, msg=msg)
self.assertEqual(result.to(compare_dtype), result_out.to(compare_dtype), msg=msg)
result_with_dtype = torch.linalg.norm(input, ord, keepdim=keepdim, dtype=to_dtype)
self.assertEqual(result_with_dtype.dtype, to_dtype, msg=msg)
if from_dtype.is_complex:
result_convert_first = torch.linalg.norm(input.to(to_dtype), ord, keepdim=keepdim)
self.assertEqual(result_with_dtype.to(compare_dtype), result_convert_first.to(compare_dtype), msg=msg)
else:
self.assertEqual(result.to(compare_dtype), result_with_dtype.to(compare_dtype), msg=msg)
result_out_with_dtype = torch.empty_like(result_with_dtype)
torch.linalg.norm(input, ord, keepdim=keepdim, dtype=to_dtype, out=result_out_with_dtype)
self.assertEqual(result_out_with_dtype.dtype, to_dtype, msg=msg)
self.assertEqual(result_with_dtype, result_out_with_dtype, msg=msg)
ord_vector = [0, 0.1, -0.1, 1, -1, 2, -2, 3, -3, 4.5, -4.5, inf, -inf, None]
ord_matrix = ['fro', 'nuc', 1, -1, 2, -2, inf, -inf, None]
S = 10
test_cases = [
((S, ), ord_vector),
((S, S), ord_matrix),
]
for keepdim in [True, False]:
for input_size, ord_settings in test_cases:
for ord in ord_settings:
if self.device_type == 'cpu' and not torch._C.has_lapack and ord in [2, -2, 'nuc']:
continue
dtypes = [torch.float, torch.double, torch.cfloat, torch.cdouble]
for from_dtype, to_dtype in itertools.product(dtypes, dtypes):
if from_dtype.is_complex and not to_dtype.is_complex:
continue
run_test_case(input_size, ord, keepdim, from_dtype, to_dtype)
# Make sure that setting dtype != out.dtype raises an error
dtype_pairs = [
(torch.float, torch.double),
(torch.double, torch.float),
(torch.cfloat, torch.cdouble),
(torch.cdouble, torch.cfloat),
]
for keepdim in [True, False]:
for input_size, ord_settings in test_cases:
for ord in ord_settings:
for dtype, out_dtype in dtype_pairs:
input = torch.rand(*input_size)
result = torch.tensor([]).to(out_dtype)
with self.assertRaisesRegex(RuntimeError, r'provided dtype must match dtype of result'):
torch.linalg.norm(input, ord=ord, keepdim=keepdim, dtype=dtype, out=result)
@dtypes(torch.float, torch.double, torch.cfloat, torch.cdouble, torch.bfloat16, torch.float16)
def test_vector_norm(self, device, dtype):
# This test compares torch.linalg.vector_norm's output with
# torch.linalg.norm given a flattened tensor
ord_vector = [0, 0.9, 1, 2, 3, inf, -0.5, -1, -2, -3, -inf]
input_sizes = [
(10, ),
(4, 5),
(3, 4, 5),
(0, ),
(0, 10),
(0, 0),
(10, 0, 10),
]
def vector_norm_reference(input, ord, dim=None, keepdim=False, dtype=None):
if dim is None:
input_maybe_flat = input.flatten(0, -1)
else:
input_maybe_flat = input
result = torch.linalg.norm(input_maybe_flat, ord, dim=dim, keepdim=keepdim, dtype=dtype)
if keepdim and dim is None:
result = result.reshape([1] * input.dim())
return result
def run_test_case(input, ord, dim, keepdim, norm_dtype):
msg = f'input.size()={input.size()}, ord={ord}, dim={dim}, keepdim={keepdim}, dtype={dtype}, norm_dtype={norm_dtype}'
error_msg = None
if input.numel() == 0:
if ord < 0:
error_msg = r'linalg.vector_norm of negative order cannot be performed on an empty tensor'
elif ord == inf and (dim is None or input.size(dim) == 0):
error_msg = (
r'linalg.vector_norm cannot compute the infinity norm on an empty '
r'dimension because the operation does not have an identity')
if error_msg is None:
result_dtype_reference = vector_norm_reference(input, ord, dim=dim, keepdim=keepdim, dtype=norm_dtype)
result_dtype = torch.linalg.vector_norm(input, ord, dim=dim, keepdim=keepdim, dtype=norm_dtype)
self.assertEqual(result_dtype, result_dtype_reference, msg=msg)
if norm_dtype is not None:
result_convert_before = torch.linalg.vector_norm(input.to(norm_dtype), ord, dim=dim, keepdim=keepdim)
if norm_dtype.is_complex:
result_convert_before = result_convert_before.to(norm_dtype)
result_out = torch.empty((0), dtype=norm_dtype, device=device)
torch.linalg.vector_norm(input, ord, dtype=norm_dtype, dim=dim, keepdim=keepdim, out=result_out)
self.assertEqual(result_convert_before, result_out, msg=msg)
else:
result_out = torch.empty((0), dtype=result_dtype.dtype, device=device)
torch.linalg.vector_norm(input, ord, dim=dim, keepdim=keepdim, out=result_out)
self.assertEqual(result_dtype, result_out, msg=msg)
else:
with self.assertRaises(RuntimeError):
vector_norm_reference(input, ord, dim=dim, keepdim=keepdim)
with self.assertRaisesRegex(RuntimeError, error_msg):
torch.linalg.vector_norm(input, ord, dim=dim, keepdim=keepdim)
if dtype.is_complex:
norm_dtypes = [None, torch.cfloat, torch.cdouble]
else:
norm_dtypes = [None, torch.float, torch.double, torch.cfloat, torch.cdouble, torch.float16, torch.bfloat16]
for input_size, ord, keepdim, norm_dtype in product(input_sizes, ord_vector, [True, False], norm_dtypes):
input = make_tensor(input_size, device, dtype, low=-9, high=9)
for dim in [None, random.randint(0, len(input_size) - 1)]:
run_test_case(
input,
ord,
dim,
keepdim,
norm_dtype)
def test_vector_norm_dim_tuple_arg(self, device):
test_cases = [
# input size, dim, error, error message
((4, ), (0, ), None, None),
((4, ), (1, ), IndexError, r'Dimension out of range'),
((4, ), (-2, ), IndexError, r'Dimension out of range'),
((4, 3), (0, -1), None, None),
((4, 3), (0, 0), RuntimeError, r'dim 0 appears multiple times in the list of dims'),
((4, 3), (0, -2), RuntimeError, r'dim 0 appears multiple times in the list of dims'),
((4, 3), (0, 1.0), TypeError, r"argument 'dim' must be tuple of ints"),
((4, 3), (None, ), TypeError, r"argument 'dim' must be tuple of ints"),
]
for input_size, dim_tuple, error, error_msg in test_cases:
input = torch.randn(input_size, device=device)
# vector_norm should accept a tuple or a list for dim arg
for dim in [dim_tuple, list(dim_tuple)]:
if error is None:
torch.linalg.vector_norm(input, dim=dim)
else:
with self.assertRaises(error):
torch.linalg.vector_norm(input, dim=dim)
# Test that linalg.vector_norm throws an error if the out tensor's dtype
# does not match the expected output dtype
@dtypes(torch.float, torch.double, torch.cfloat, torch.cdouble, torch.bfloat16, torch.float16)
def test_vector_norm_out_dtype_error(self, device, dtype):
input = torch.randn(10, device=device, dtype=dtype)
dtypes = [None, torch.float, torch.double, torch.cfloat, torch.cdouble, torch.float16, torch.bfloat16]
for norm_dtype, out_dtype in product(dtypes, dtypes):
if out_dtype is None:
continue
if norm_dtype is None:
if dtype == torch.cfloat:
expected_dtype = torch.float
elif dtype == torch.cdouble:
expected_dtype = torch.double
else:
expected_dtype = dtype
else:
expected_dtype = norm_dtype
result = torch.empty((0), device=device, dtype=out_dtype)
msg = f'norm_dtype: {norm_dtype}, out_dtype: {out_dtype}, expected_dtype: {expected_dtype}'
if dtype.is_complex and norm_dtype is not None and not norm_dtype.is_complex:
with self.assertRaisesRegex(RuntimeError, r"linalg.vector_norm expected complex 'dtype'", msg=msg):
torch.linalg.vector_norm(input, dtype=norm_dtype, out=result)
elif out_dtype != expected_dtype:
with self.assertRaisesRegex(RuntimeError, r'linalg.vector_norm expected out tensor dtype', msg=msg):
torch.linalg.vector_norm(input, dtype=norm_dtype, out=result)
else:
torch.linalg.vector_norm(input, dtype=norm_dtype, out=result)
# This test compares torch.linalg.norm and numpy.linalg.norm to ensure that
# their vector norm results match
@dtypes(torch.float, torch.double)
def test_norm_vector(self, device, dtype):
def run_test_case(input, p, dim, keepdim):
result = torch.linalg.norm(input, ord, dim, keepdim)
input_numpy = input.cpu().numpy()
result_numpy = np.linalg.norm(input_numpy, ord, dim, keepdim)
msg = f'input.size()={input.size()}, ord={ord}, dim={dim}, keepdim={keepdim}, dtype={dtype}'
self.assertEqual(result, result_numpy, msg=msg)
result_out = torch.empty_like(result)
torch.linalg.norm(input, ord, dim, keepdim, out=result_out)
self.assertEqual(result, result_out, msg=msg)
ord_vector = [0, 1, -1, 2, -2, 3, -3, 4.5, -4.5, inf, -inf]
S = 10
test_cases = [
# input size, p settings, dim
((S, ), ord_vector, None),
((S, ), ord_vector, 0),
((S, S, S), ord_vector, 0),
((S, S, S), ord_vector, 1),
((S, S, S), ord_vector, 2),
((S, S, S), ord_vector, -1),
((S, S, S), ord_vector, -2),
]
L = 1_000_000
if dtype == torch.double:
test_cases.append(((L, ), ord_vector, None))
for keepdim in [True, False]:
for input_size, ord_settings, dim in test_cases:
input = torch.randn(*input_size, dtype=dtype, device=device)
for ord in ord_settings:
run_test_case(input, ord, dim, keepdim)
# This test compares torch.linalg.norm, torch.linalg.matrix_norm and numpy.linalg.norm to
# ensure that their matrix norm results match.
@skipMeta # https://github.com/pytorch/pytorch/issues/54082
@skipCUDAIfNoMagma
@dtypes(torch.float, torch.double)
@precisionOverride({torch.float32: 2e-5})
def test_norm_matrix(self, device, dtype):
def run_test_case(input, ord, dim, keepdim):
msg = f'input.size()={input.size()}, ord={ord}, dim={dim}, keepdim={keepdim}, dtype={dtype}'
result = torch.linalg.norm(input, ord, dim, keepdim)
input_numpy = input.cpu().numpy()
result_numpy = np.linalg.norm(input_numpy, ord, dim, keepdim)
def check(op):
result = op(input, ord, dim, keepdim)
self.assertEqual(result, result_numpy, msg=msg)
result_out = torch.empty_like(result)
op(input, ord, dim, keepdim, out=result_out)
self.assertEqual(result, result_out, msg=msg)
check(torch.linalg.norm)
if ord is not None and dim is not None:
check(torch.linalg.matrix_norm)
ord_matrix = [1, -1, 2, -2, inf, -inf, 'nuc', 'fro']
S = 10
test_cases = [
# input size, p settings, dim
((S, S), ord_matrix, None),
((S, S), ord_matrix, (0, 1)),
((S, S), ord_matrix, (1, 0)),
((S, S, S, S), ord_matrix, (2, 0)),
((S, S, S, S), ord_matrix, (-1, -2)),
((S, S, S, S), ord_matrix, (-1, -3)),
((S, S, S, S), ord_matrix, (-3, 2)),
]
L = 1_000
if dtype == torch.double:
test_cases.append(((L, L), ord_matrix, None))
for keepdim in [True, False]:
for input_size, ord_settings, dim in test_cases:
input = torch.randn(*input_size, dtype=dtype, device=device)
for ord in ord_settings:
if self.device_type == 'cpu' and not torch._C.has_lapack and ord in [2, -2, 'nuc']:
continue
run_test_case(input, ord, dim, keepdim)
@onlyCUDA
@dtypes(torch.bfloat16, torch.float16)
def test_norm_fused_type_promotion(self, device, dtype):
x = torch.randn(10, device=device, dtype=dtype)
def profile_and_check(fn, x, kwargs, fn_name):
with torch.profiler.profile(activities=(torch.profiler.ProfilerActivity.CPU,)) as p:
fn(x, **kwargs, dtype=torch.float)
# smoke check that profiler returned some events
self.assertTrue(fn_name in map(lambda e: e.name, p.events()))
# test that there was no explicit copy
self.assertFalse("aten::to" in map(lambda e: e.name, p.events()))
for f, kwargs, fn_name in zip((torch.norm, torch.linalg.vector_norm), ({"p" : 2}, {}),
("aten::norm", "aten::linalg_vector_norm")):
profile_and_check(f, x, kwargs, fn_name)
@skipMeta # https://github.com/pytorch/pytorch/issues/53739
@skipCPUIfNoLapack
@skipCUDAIfNoMagma
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
@precisionOverride({torch.float32: 1e-3})
def test_cond(self, device, dtype):
def run_test_case(input, p):
result = torch.linalg.cond(input, p)
result_numpy = np.linalg.cond(input.cpu().numpy(), p)
self.assertEqual(result, result_numpy, rtol=1e-2, atol=self.precision, exact_dtype=False)
self.assertEqual(result.shape, result_numpy.shape)
# test out= variant
out = torch.empty_like(result)
ans = torch.linalg.cond(input, p, out=out)
self.assertEqual(ans, out)
self.assertEqual(ans, result)
norm_types = [1, -1, 2, -2, inf, -inf, 'fro', 'nuc', None]
input_sizes = [(32, 32), (2, 3, 3, 3)]
for input_size in input_sizes:
input = torch.randn(*input_size, dtype=dtype, device=device)
for p in norm_types:
run_test_case(input, p)
# test empty batch sizes
input_sizes = [(0, 3, 3), (0, 2, 5, 5)]
for input_size in input_sizes:
input = torch.randn(*input_size, dtype=dtype, device=device)
for p in norm_types:
run_test_case(input, p)
# test non-square input
input_sizes = [(16, 32), (32, 16), (2, 3, 5, 3), (2, 3, 3, 5)]
for input_size in input_sizes:
input = torch.randn(*input_size, dtype=dtype, device=device)
for p in [2, -2, None]:
run_test_case(input, p)
# test for singular input
a = torch.eye(3, dtype=dtype, device=device)
a[-1, -1] = 0 # make 'a' singular
for p in norm_types:
try:
run_test_case(a, p)
except np.linalg.LinAlgError:
# Numpy may fail to converge for some BLAS backends (although this is very rare)
# See the discussion in https://github.com/pytorch/pytorch/issues/67675
pass
# test for 0x0 matrices. NumPy doesn't work for such input, we return 0
input_sizes = [(0, 0), (2, 5, 0, 0)]
for input_size in input_sizes:
input = torch.randn(*input_size, dtype=dtype, device=device)
for p in ['fro', 2]:
expected_dtype = a.real.dtype if dtype.is_complex else dtype
expected = torch.zeros(input_size[:-2], dtype=expected_dtype, device=device)
actual = torch.linalg.cond(input, p)
self.assertEqual(actual, expected)
@skipMeta # https://github.com/pytorch/pytorch/issues/53739
@skipCPUIfNoLapack
@skipCUDAIfNoMagma
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
@precisionOverride({torch.float32: 1e-3})
def test_cond_errors_and_warnings(self, device, dtype):
norm_types = [1, -1, 2, -2, inf, -inf, 'fro', 'nuc', None]
# cond expects the input to be at least 2-dimensional
a = torch.ones(3, dtype=dtype, device=device)
for p in norm_types:
with self.assertRaisesRegex(RuntimeError, r'at least 2 dimensions'):
torch.linalg.cond(a, p)
# for some norm types cond expects the input to be square
a = torch.ones(3, 2, dtype=dtype, device=device)
norm_types = [1, -1, inf, -inf, 'fro', 'nuc']
for p in norm_types:
with self.assertRaisesRegex(RuntimeError, r'must be batches of square matrices'):
torch.linalg.cond(a, p)
# if non-empty out tensor with wrong shape is passed a warning is given
a = torch.ones((2, 2), dtype=dtype, device=device)
for p in ['fro', 2]:
real_dtype = a.real.dtype if dtype.is_complex else dtype
out = torch.empty(a.shape, dtype=real_dtype, device=device)
with warnings.catch_warnings(record=True) as w:
# Trigger warning
torch.linalg.cond(a, p, out=out)
# Check warning occurs
self.assertEqual(len(w), 1)
self.assertTrue("An output with one or more elements was resized" in str(w[-1].message))
# dtypes should be safely castable
out = torch.empty(0, dtype=torch.int, device=device)
for p in ['fro', 2]:
with self.assertRaisesRegex(RuntimeError, "but got result with dtype Int"):
torch.linalg.cond(a, p, out=out)
# device should match
if torch.cuda.is_available():
wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'
out = torch.empty(0, dtype=dtype, device=wrong_device)
for p in ['fro', 2]:
with self.assertRaisesRegex(RuntimeError, "tensors to be on the same device"):
torch.linalg.cond(a, p, out=out)
# for batched input if at least one matrix in the batch is not invertible,
# we can't get the result for all other (possibly) invertible matrices in the batch without an explicit for loop.
# this should change when at::inverse works with silent errors
# NumPy works fine in this case because it's possible to silence the error and get the inverse matrix results
# possibly filled with NANs
batch_dim = 3
a = torch.eye(3, 3, dtype=dtype, device=device)
a = a.reshape((1, 3, 3))
a = a.repeat(batch_dim, 1, 1)
a[1, -1, -1] = 0 # now a[1] is singular
for p in [1, -1, inf, -inf, 'fro', 'nuc']:
result = torch.linalg.cond(a, p)
self.assertEqual(result[1], float('inf'))
# check invalid norm type
a = torch.ones(3, 3, dtype=dtype, device=device)
for p in ['wrong_norm', 5]:
with self.assertRaisesRegex(RuntimeError, f"linalg.cond got an invalid norm type: {p}"):
torch.linalg.cond(a, p)
# This test calls torch.linalg.norm and numpy.linalg.norm with illegal arguments
# to ensure that they both throw errors
@dtypes(torch.float, torch.double)
def test_norm_errors(self, device, dtype):
def run_error_test_case(input, ord, dim, keepdim, error_type, error_regex):
test_case_info = (
f'test case input.size()={input.size()}, ord={ord}, dim={dim}, '
f'keepdim={keepdim}, dtype={dtype}')
with self.assertRaisesRegex(error_type, error_regex, msg=test_case_info):
torch.linalg.norm(input, ord, dim, keepdim)
input_numpy = input.cpu().numpy()
msg = f'numpy does not raise error but pytorch does, for case "{test_case_info}"'
with self.assertRaises(Exception, msg=test_case_info):
np.linalg.norm(input_numpy, ord, dim, keepdim)
S = 10
error_test_cases = [
# input size, p settings, dim, error type, error regex
((S, ), ['fro'], None, RuntimeError, r'order "fro" can only be used if either len\(dim\) == 2'),
((S, ), ['nuc'], None, RuntimeError, r'order "nuc" can only be used if either len\(dim\) == 2'),
((S, S), [3.5], None, RuntimeError, r'Order 3.5 not supported for matrix norm'),
((S, S), [0], None, RuntimeError, r'Order 0 not supported for matrix norm'),
((S, S), ['nuc'], 0, RuntimeError, r'order "nuc" can only be used if either len\(dim\) == 2'),
((S, S), ['fro'], 0, RuntimeError, r'order "fro" can only be used if either len\(dim\) == 2'),
((S, S), ['nuc'], (0, 0), RuntimeError, r'duplicate or invalid dimensions'),
((S, S), ['fro', 0], (0, 0), RuntimeError, r'Expected dims to be different'),
((S, S), ['fro', 'nuc', 0], (0, 4), IndexError, r'Dimension out of range'),
((S, ), [0], (4, ), IndexError, r'Dimension out of range'),
((S, ), [None], (0, 0), RuntimeError, r'dim 0 appears multiple times'),
((S, S, S), [1], (0, 1, 2), RuntimeError, r"'dim' must specify 1 or 2 dimensions"),
((S, S, S), [1], None, RuntimeError, r"'dim' must specify 1 or 2 dimensions"),
((S, S), ['garbage'], (0, 1), RuntimeError, r'Invalid norm order: garbage'),
]
for keepdim in [True, False]:
for input_size, ord_settings, dim, error_type, error_regex in error_test_cases:
input = torch.randn(*input_size, dtype=dtype, device=device)
for ord in ord_settings:
run_error_test_case(input, ord, dim, keepdim, error_type, error_regex)
# Test complex number inputs for linalg.norm
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.cfloat, torch.cdouble)
@precisionOverride({torch.cfloat: 2e-4})
def test_norm_complex(self, device, dtype):
def gen_error_message(input_size, ord, keepdim, dim=None):
return "complex norm failed for input size %s, ord=%s, keepdim=%s, dim=%s" % (
input_size, ord, keepdim, dim)
vector_ords = [None, 0, 1, 2, 3, inf, -1, -2, -3, -inf]
matrix_ords = [None, 'fro', 'nuc', 1, 2, inf, -1, -2, -inf]
# Test supported ords
for keepdim in [False, True]:
# vector norm
x = torch.randn(25, device=device, dtype=dtype)
xn = x.cpu().numpy()
for ord in vector_ords:
res = torch.linalg.norm(x, ord, keepdim=keepdim).cpu()
expected = np.linalg.norm(xn, ord, keepdims=keepdim)
msg = gen_error_message(x.size(), ord, keepdim)
self.assertEqual(res.shape, expected.shape, msg=msg)
self.assertEqual(res, expected, msg=msg, exact_dtype=False)
res_out = torch.tensor([]).to(device)
torch.linalg.norm(x, ord, keepdim=keepdim, out=res_out)
self.assertEqual(res_out.shape, expected.shape, msg=msg)
self.assertEqual(res_out.cpu(), expected, msg=msg, exact_dtype=False)
# matrix norm
x = torch.randn(25, 25, device=device, dtype=dtype)
xn = x.cpu().numpy()
for ord in matrix_ords:
res = torch.linalg.norm(x, ord, keepdim=keepdim).cpu()
expected = np.linalg.norm(xn, ord, keepdims=keepdim)
msg = gen_error_message(x.size(), ord, keepdim)
self.assertEqual(res.shape, expected.shape, msg=msg)
self.assertEqual(res, expected, msg=msg, exact_dtype=False)
res_out = torch.tensor([]).to(device)
torch.linalg.norm(x, ord, keepdim=keepdim, out=res_out)
self.assertEqual(res_out.shape, expected.shape, msg=msg)
self.assertEqual(res_out.cpu(), expected, msg=msg, exact_dtype=False)
# Test that linal.vector_norm gives the same result as numpy when inputs
# contain extreme values (inf, -inf, nan)
def test_vector_norm_extreme_values(self, device):
vector_ords = [0, 1, 2, 3, inf, -1, -2, -3, -inf]
vectors = []
for pair in itertools.product([inf, -inf, 0.0, nan, 1.0], repeat=2):
vectors.append(list(pair))
for vector in vectors:
x = torch.tensor(vector, device=device)
x_n = x.cpu().numpy()
for ord in vector_ords:
msg = f'ord={ord}, vector={vector}'
result = torch.linalg.vector_norm(x, ord=ord)
result_n = np.linalg.norm(x_n, ord=ord)
self.assertEqual(result, result_n, msg=msg)
@skipMeta # https://github.com/pytorch/pytorch/issues/54082
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float, torch.double)
@precisionOverride({torch.float32: 2e-5})
def test_matrix_norm(self, device, dtype):
# Test only inputs for which torch.linalg.matrix_norm diverges from torch.linalg.norm
A = make_tensor((2, 2, 2), device, dtype)
with self.assertRaisesRegex(RuntimeError, r'linalg.matrix_norm\(\):.*must be a matrix.*'):
torch.linalg.matrix_norm(make_tensor((2,), device, dtype))
with self.assertRaisesRegex(RuntimeError, r'linalg.matrix_norm\(\):.*must be a 2-tuple.*'):
torch.linalg.matrix_norm(A, dim=(0,))
with self.assertRaisesRegex(RuntimeError, r'.*not supported.*'):
torch.linalg.matrix_norm(A, ord=0)
with self.assertRaisesRegex(RuntimeError, r'.*not supported.*'):
torch.linalg.matrix_norm(A, ord=3.0)
# Test dim=None behavior
ref = torch.linalg.norm(A, dim=(-2, -1))
res = torch.linalg.matrix_norm(A)
self.assertEqual(ref, res)
# Test that linal.norm gives the same result as numpy when inputs
# contain extreme values (inf, -inf, nan)
@unittest.skipIf(IS_WINDOWS, "Skipped on Windows!")
@unittest.skipIf(IS_MACOS, "Skipped on MacOS!")
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
def test_norm_extreme_values(self, device):
vector_ords = [0, 1, 2, 3, inf, -1, -2, -3, -inf]
matrix_ords = ['fro', 'nuc', 1, 2, inf, -1, -2, -inf]
vectors = []
matrices = []
for pair in itertools.product([inf, -inf, 0.0, nan, 1.0], repeat=2):
vectors.append(list(pair))
matrices.append([[pair[0], pair[1]]])
matrices.append([[pair[0]], [pair[1]]])
for vector in vectors:
x = torch.tensor(vector).to(device)
x_n = x.cpu().numpy()
for ord in vector_ords:
msg = f'ord={ord}, vector={vector}'
result = torch.linalg.norm(x, ord=ord)
result_n = np.linalg.norm(x_n, ord=ord)
self.assertEqual(result, result_n, msg=msg)
# TODO: Remove this function once the broken cases are fixed
def is_broken_matrix_norm_case(ord, x):
if self.device_type == 'cuda':
if x.size() == torch.Size([1, 2]):
if ord in ['nuc', 2, -2] and isnan(x[0][0]) and x[0][1] == 1:
# These cases are broken because of an issue with svd
# https://github.com/pytorch/pytorch/issues/43567
return True
if ord in ['nuc', 2, -2]:
# These cases are broken because of another issue with svd
# https://github.com/pytorch/pytorch/issues/52633
return True
return False
for matrix in matrices:
x = torch.tensor(matrix).to(device)
x_n = x.cpu().numpy()
for ord in matrix_ords:
msg = f'ord={ord}, matrix={matrix}'
if is_broken_matrix_norm_case(ord, x):
continue
else:
result = torch.linalg.norm(x, ord=ord)
result_n = np.linalg.norm(x_n, ord=ord)
self.assertEqual(result, result_n, msg=msg)
# Test degenerate shape results match numpy for linalg.norm vector norms
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@unittest.skipIf(TEST_WITH_ASAN, "Skipped on ASAN since it checks for undefined behavior.")
@dtypes(torch.float, torch.double, torch.cfloat, torch.cdouble)
def test_norm_vector_degenerate_shapes(self, device, dtype):
def run_test_case(input, ord, dim, keepdim):
msg = f'input.size()={input.size()}, ord={ord}, dim={dim}, keepdim={keepdim}, dtype={dtype}'
should_error = False
if ord is not None and ord < 0:
should_error = True
elif ord == inf:
if dim is None or input.size(dim) == 0:
should_error = True
if should_error:
with self.assertRaises(RuntimeError):
torch.linalg.norm(input, ord, dim, keepdim)
else:
input_numpy = input.cpu().numpy()
result_numpy = np.linalg.norm(input_numpy, ord, dim, keepdim)
result = torch.linalg.norm(input, ord, dim, keepdim)
self.assertEqual(result, result_numpy, msg=msg)
ord_vector = [0, 0.5, 1, 2, 3, inf, -0.5, -1, -2, -3, -inf, None]
S = 10
test_cases = [
# input size, dim
((0, ), None),
((0, S), 0),
((0, S), 1),
((S, 0), 0),
((S, 0), 1),
]
for keepdim in [True, False]:
for input_size, dim in test_cases:
input = torch.randn(*input_size, dtype=dtype, device=device)
for ord in ord_vector:
run_test_case(input, ord, dim, keepdim)
# Test degenerate shape results match numpy for linalg.norm matrix norms
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float, torch.double, torch.cfloat, torch.cdouble)
def test_norm_matrix_degenerate_shapes(self, device, dtype):
def run_test_case(input, ord, dim, keepdim, should_error):
msg = f'input.size()={input.size()}, ord={ord}, dim={dim}, keepdim={keepdim}, dtype={dtype}'
input_numpy = input.cpu().numpy()
ops = [torch.linalg.norm]
if ord is not None and dim is not None:
ops.append(torch.linalg.matrix_norm)
if should_error:
with self.assertRaises(ValueError):
np.linalg.norm(input_numpy, ord, dim, keepdim)
for op in ops:
with self.assertRaises(IndexError):
op(input, ord, dim, keepdim)
else:
result_numpy = np.linalg.norm(input_numpy, ord, dim, keepdim)
for op in ops:
result = op(input, ord, dim, keepdim)
self.assertEqual(result, result_numpy, msg=msg)
ord_matrix = ['fro', 'nuc', 1, 2, inf, -1, -2, -inf, None]
S = 10
test_cases = [
# input size, p settings that cause error, dim
((0, 0), [1, 2, inf, -1, -2, -inf], None),
((0, S), [2, inf, -2, -inf], None),
((S, 0), [1, 2, -1, -2], None),
((S, S, 0), [], (0, 1)),
((1, S, 0), [], (0, 1)),
((0, 0, S), [1, 2, inf, -1, -2, -inf], (0, 1)),
((0, 0, S), [1, 2, inf, -1, -2, -inf], (1, 0)),
]
for keepdim in [True, False]:
for input_size, error_ords, dim in test_cases:
input = torch.randn(*input_size, dtype=dtype, device=device)
for ord in ord_matrix:
run_test_case(input, ord, dim, keepdim, ord in error_ords)
def test_norm_fastpaths(self, device):
x = torch.randn(3, 5, device=device)
# slow path
result = torch.linalg.norm(x, 4.5, 1)
expected = torch.pow(x.abs().pow(4.5).sum(1), 1.0 / 4.5)
self.assertEqual(result, expected)
# fast 0-norm
result = torch.linalg.norm(x, 0, 1)
expected = (x != 0).type_as(x).sum(1)
self.assertEqual(result, expected)
# fast 1-norm
result = torch.linalg.norm(x, 1, 1)
expected = x.abs().sum(1)
self.assertEqual(result, expected)
# fast 2-norm
result = torch.linalg.norm(x, 2, 1)
expected = torch.sqrt(x.pow(2).sum(1))
self.assertEqual(result, expected)
# fast 3-norm
result = torch.linalg.norm(x, 3, 1)
expected = torch.pow(x.pow(3).abs().sum(1), 1.0 / 3.0)
self.assertEqual(result, expected)
@skipCPUIfNoLapack
@skipCUDAIfNoMagma
@dtypes(*floating_and_complex_types())
def test_old_eig_basic(self, device, dtype):
a = torch.tensor([[1.96, 0.00, 0.00, 0.00, 0.00],
[-6.49, 3.80, 0.00, 0.00, 0.00],
[-0.47, -6.39, 4.17, 0.00, 0.00],
[-7.20, 1.50, -1.51, 5.70, 0.00],
[-0.65, -6.34, 2.67, 1.80, -7.10]],
dtype=dtype, device=device).t()
e = torch.eig(a)[0]
ee, vv = torch.eig(a, True)
te = torch.tensor((), dtype=dtype, device=device)
tv = torch.tensor((), dtype=dtype, device=device)
eee, vvv = torch.eig(a, True, out=(te, tv))
self.assertEqual(e, ee, atol=1e-12, rtol=0)
self.assertEqual(ee, eee, atol=1e-12, rtol=0)
self.assertEqual(ee, te, atol=1e-12, rtol=0)
self.assertEqual(vv, vvv, atol=1e-12, rtol=0)
self.assertEqual(vv, tv, atol=1e-12, rtol=0)
#
# compare with numpy
np_e, np_v = np.linalg.eig(a.cpu().numpy())
if dtype.is_complex:
self.assertEqual(ee, np_e)
else:
# np_e.shape == (n, 2), where each column contain the real and
# imaginary parts of the result
self.assertEqual(ee[:, 0], np_e) # real part
self.assertEqual(ee[:, 1], torch.zeros(ee.shape[0], dtype=dtype)) # imaginary part
self.assertEqual(vv, np_v)
@skipCPUIfNoLapack
@skipCUDAIfNoMagma
@dtypes(torch.double, torch.float)
def test_old_eig_reuse(self, device, dtype):
X = torch.randn(4, 4, dtype=dtype, device=device)
X = torch.mm(X.t(), X)
e = torch.zeros(4, 2, dtype=dtype, device=device)
v = torch.zeros(4, 4, dtype=dtype, device=device)
torch.eig(X, True, out=(e, v))
Xhat = np.matmul(np.matmul(v.cpu(), torch.diag(e.select(1, 0)).cpu()), v.t().cpu())
if dtype is torch.float:
atol = 1e-7
rtol = 1e-5
else:
atol = 1e-8
rtol = 0
self.assertEqual(X, Xhat, atol=atol, rtol=rtol, msg='VeV\' wrong')
self.assertTrue(v.is_contiguous(), 'V is not contiguous')
torch.eig(X, True, out=(e, v))
Xhat = np.matmul(v.cpu(), np.matmul(e.select(1, 0).diag().cpu(), v.t().cpu()))
self.assertEqual(X, Xhat, atol=atol, rtol=rtol, msg='VeV\' wrong')
self.assertTrue(v.is_contiguous(), 'V is not contiguous')
@skipCPUIfNoLapack
@skipCUDAIfNoMagma
@dtypes(torch.double, torch.float)
def test_old_eig_non_contiguous(self, device, dtype):
X = torch.randn(4, 4, dtype=dtype, device=device)
X = torch.mm(X.t(), X)
e = torch.zeros(4, 2, 2, dtype=dtype, device=device)[:, 1]
v = torch.zeros(4, 2, 4, dtype=dtype, device=device)[:, 1]
self.assertFalse(v.is_contiguous(), 'V is contiguous')
self.assertFalse(e.is_contiguous(), 'E is contiguous')
torch.eig(X, True, out=(e, v))
Xhat = np.matmul(np.matmul(v.cpu(), torch.diag(e.cpu().select(1, 0))), v.t().cpu())
if dtype is torch.float:
atol = 1e-7
rtol = 1e-5
else:
atol = 1e-8
rtol = 0
self.assertEqual(X, Xhat, atol=atol, rtol=rtol, msg='VeV\' wrong')
@skipCPUIfNoLapack
@skipCUDAIfNoMagma
@dtypes(torch.double, torch.float)
def test_old_eig_invalid_input(self, device, dtype):
# test invalid input
self.assertRaisesRegex(
RuntimeError,
'input should be 2 dimensional',
lambda: torch.eig(torch.ones((2))))
self.assertRaisesRegex(
RuntimeError,
'input should be square',
lambda: torch.eig(torch.ones((2, 3))))
self.assertRaisesRegex(
RuntimeError,
'input should not contain infs or NaNs',
lambda: torch.eig(np.inf * torch.ones((2, 2))))
self.assertRaisesRegex(
RuntimeError,
'input should not contain infs or NaNs',
lambda: torch.eig(np.nan * torch.ones((2, 2))))
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.double, torch.float)
def test_old_eig_out(self, device, dtype):
# the out version of torch.eig needs to be tested manually: we can't
# use the "test_out=True" parameter to tensor_op_tests because the
# signature is irregular (since we have *two* output vectors)
t = torch.randn(10, 10, dtype=dtype, device=device)
evals, evecs = torch.eig(t, eigenvectors=True)
#
# check that the out= version computes the same values as the normal one
out_evals = torch.empty_like(evals)
out_evecs = torch.empty_like(evecs)
evals2, evecs2 = torch.eig(t, eigenvectors=True, out=(out_evals, out_evecs))
# check that the out tensors were used in-place
self.assertEqual(evals2.data_ptr(), out_evals.data_ptr())
self.assertEqual(evecs2.data_ptr(), out_evecs.data_ptr())
# check that the result is the same as the non-out version
self.assertEqual(evals, out_evals)
self.assertEqual(evecs, out_evecs)
#
# check what happens in the eigenvectors=False case
out_evals = torch.empty_like(evals)
out_evecs = torch.tensor([1, 2, 3], dtype=dtype, device=device)
evals2, evecs2 = torch.eig(t, eigenvectors=False, out=(out_evals, out_evecs))
# check that the out_evals was used in-place
self.assertEqual(evals2.data_ptr(), out_evals.data_ptr())
self.assertEqual(evals, out_evals)
# check that out_evecs was NOT touched at all
assert out_evecs.tolist() == [1, 2, 3]
#
# check that we complain if we pass an out vector of the wrong dtype
wrong_out = torch.empty((0, 0), dtype=int)
with self.assertRaisesRegex(RuntimeError, r"Expected .* but got .*"):
torch.eig(t, eigenvectors=True, out=(wrong_out, out_evecs))
with self.assertRaisesRegex(RuntimeError, r"Expected .* but got .*"):
torch.eig(t, eigenvectors=True, out=(out_evals, wrong_out))
@skipCPUIfNoLapack
@skipCUDAIfNoMagma
# NumPy computes only in float64 and complex128 precisions
# for float32 or complex64 results might be very different from float64 or complex128
@dtypes(torch.float64, torch.complex128)
def test_eig_numpy(self, device, dtype):
def run_test(shape, *, symmetric=False):
from torch.testing._internal.common_utils import random_symmetric_matrix
if not dtype.is_complex and symmetric:
# for symmetric real-valued inputs eigenvalues and eigenvectors have imaginary part equal to zero
# unlike NumPy the result is not cast to float32 or float64 dtype in this case
a = random_symmetric_matrix(shape[-1], *shape[:-2], dtype=dtype, device=device)
else:
a = make_tensor(shape, dtype=dtype, device=device)
actual = torch.linalg.eig(a)
# compare with NumPy
# the eigenvalues are not necessarily ordered
# so order of NumPy and PyTorch can be different
expected = np.linalg.eig(a.cpu().numpy())
# sort NumPy output
ind = np.argsort(expected[0], axis=-1)[::-1]
expected = (np.take_along_axis(expected[0], ind, axis=-1), np.take_along_axis(expected[1], ind[:, None], axis=-1))
# sort PyTorch output
# torch.argsort doesn't work with complex inputs, NumPy sorting on CPU is used instead
# RuntimeError: _th_sort not supported on CUDAType for ComplexDouble
# RuntimeError: "sorting_kernel_method_name" not implemented for 'ComplexDouble'
ind = np.argsort(actual[0].cpu().numpy(), axis=-1)[::-1]
actual_np = [x.cpu().numpy() for x in actual]
sorted_actual = (
np.take_along_axis(actual_np[0], ind, axis=-1),
np.take_along_axis(actual_np[1], ind[:, None], axis=-1))
self.assertEqual(expected[0], sorted_actual[0], exact_dtype=False)
self.assertEqual(abs(expected[1]), abs(sorted_actual[1]), exact_dtype=False)
shapes = [(0, 0), # Empty matrix
(5, 5), # Single matrix
(0, 0, 0), (0, 5, 5), # Zero batch dimension tensors
(2, 5, 5), # 3-dim tensors
(2, 1, 5, 5)] # 4-dim tensors
for shape in shapes:
run_test(shape)
run_test(shape, symmetric=True)
@onlyCUDA
@skipCUDAIfNoMagma
@dtypes(*floating_and_complex_types())
def test_eig_compare_backends(self, device, dtype):
def run_test(shape, *, symmetric=False):
from torch.testing._internal.common_utils import random_symmetric_matrix
if not dtype.is_complex and symmetric:
# for symmetric real-valued inputs eigenvalues and eigenvectors have imaginary part equal to zero
a = random_symmetric_matrix(shape[-1], *shape[:-2], dtype=dtype, device=device)
else:
a = make_tensor(shape, dtype=dtype, device=device)
actual = torch.linalg.eig(a)
complementary_device = 'cpu'
# compare with CPU
expected = torch.linalg.eig(a.to(complementary_device))
self.assertEqual(expected[0], actual[0])
self.assertEqual(expected[1], actual[1])
shapes = [(0, 0), # Empty matrix
(5, 5), # Single matrix
(0, 0, 0), (0, 5, 5), # Zero batch dimension tensors
(2, 5, 5), # 3-dim tensors
(2, 1, 5, 5)] # 4-dim tensors
for shape in shapes:
run_test(shape)
run_test(shape, symmetric=True)
@slowTest
@onlyCUDA
@skipCUDAIfNoMagma
@dtypes(torch.float32)
def test_eig_check_magma(self, device, dtype):
# For CUDA inputs only matrices of size larger than 2048x2048 actually call MAGMA library
shape = (2049, 2049)
a = make_tensor(shape, dtype=dtype, device=device)
w, v = torch.linalg.eig(a)
# check correctness using eigendecomposition identity
self.assertEqual(a.to(v.dtype) @ v, w * v, atol=1e-3, rtol=1e-3)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_eig_errors_and_warnings(self, device, dtype):
# eig requires the input to be at least 2 dimensional tensor
a = make_tensor(2, dtype=dtype, device=device)
with self.assertRaisesRegex(RuntimeError, "must have at least 2 dimensions"):
torch.linalg.eig(a)
# eig requires a square matrix
a = make_tensor((2, 3), dtype=dtype, device=device)
with self.assertRaisesRegex(RuntimeError, "must be batches of square matrices"):
torch.linalg.eig(a)
# if out tensor with floating dtype is passed for complex output an error is thrown
if not dtype.is_complex:
# The characteristic equation is p(λ) = λ^2 − 2λ + 5 = 0, with roots λ = 1±2i
a = torch.tensor([[3., -2.], [4., -1.]], dtype=dtype, device=device)
out0 = torch.empty(0, device=device, dtype=dtype)
out1 = torch.empty(0, device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, "Expected eigenvalues to be safely castable"):
torch.linalg.eig(a, out=(out0, out1))
out0 = torch.empty(0, device=device, dtype=torch.complex128)
with self.assertRaisesRegex(RuntimeError, "Expected eigenvectors to be safely castable"):
torch.linalg.eig(a, out=(out0, out1))
# dtypes should be safely castable
a = make_tensor((3, 3), dtype=dtype, device=device)
out0 = torch.empty(0, dtype=torch.int, device=device)
out1 = torch.empty(0, dtype=torch.int, device=device)
with self.assertRaisesRegex(RuntimeError, "but got eigenvalues with dtype Int"):
torch.linalg.eig(a, out=(out0, out1))
out0 = torch.empty(0, dtype=torch.complex128, device=device)
with self.assertRaisesRegex(RuntimeError, "but got eigenvectors with dtype Int"):
torch.linalg.eig(a, out=(out0, out1))
# if non-empty out tensor with wrong shape is passed a warning is given
a = make_tensor((3, 3), dtype=dtype, device=device)
out0 = torch.empty(1, device=device, dtype=torch.complex128)
out1 = torch.empty(1, device=device, dtype=torch.complex128)
with warnings.catch_warnings(record=True) as w:
# Trigger warning
torch.linalg.eig(a, out=(out0, out1))
# Check warning occurs
self.assertEqual(len(w), 2)
self.assertTrue("An output with one or more elements was resized" in str(w[-1].message))
self.assertTrue("An output with one or more elements was resized" in str(w[-2].message))
# device should match
if torch.cuda.is_available():
wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'
out_w = torch.empty(0, device=wrong_device, dtype=torch.complex128)
out_v = torch.empty(0, device=device, dtype=torch.complex128)
with self.assertRaisesRegex(RuntimeError, "tensors to be on the same device"):
torch.linalg.eig(a, out=(out_w, out_v))
out_w = torch.empty(0, device=device, dtype=torch.complex128)
out_v = torch.empty(0, device=wrong_device, dtype=torch.complex128)
with self.assertRaisesRegex(RuntimeError, "tensors to be on the same device"):
torch.linalg.eig(a, out=(out_w, out_v))
@skipCPUIfNoLapack
@skipCUDAIfNoMagma
@dtypes(*floating_and_complex_types())
def test_eig_with_nan(self, device, dtype):
for val in [np.inf, np.nan]:
for batch_dim in [(), (10,)]:
a = make_tensor((*batch_dim, 5, 5), device=device, dtype=dtype)
a[..., -1, -1] = val
with self.assertRaisesRegex(RuntimeError, "torch.linalg.eig: input tensor should not"):
torch.linalg.eig(a)
@skipCPUIfNoLapack
@skipCUDAIfNoMagma
# NumPy computes only in float64 and complex128 precisions
# for float32 or complex64 results might be very different from float64 or complex128
@dtypes(torch.float64, torch.complex128)
def test_eigvals_numpy(self, device, dtype):
def run_test(shape, *, symmetric=False):
from torch.testing._internal.common_utils import random_symmetric_matrix
if not dtype.is_complex and symmetric:
# for symmetric real-valued inputs eigenvalues and eigenvectors have imaginary part equal to zero
# unlike NumPy the result is not cast to float32 or float64 dtype in this case
a = random_symmetric_matrix(shape[-1], *shape[:-2], dtype=dtype, device=device)
else:
a = make_tensor(shape, dtype=dtype, device=device)
actual = torch.linalg.eigvals(a)
# compare with NumPy
# the eigenvalues are not necessarily ordered
# so order of NumPy and PyTorch can be different
expected = np.linalg.eigvals(a.cpu().numpy())
# sort NumPy output
ind = np.argsort(expected, axis=-1)[::-1]
expected = np.take_along_axis(expected, ind, axis=-1)
# sort PyTorch output
# torch.argsort doesn't work with complex inputs, NumPy sorting on CPU is used instead
# RuntimeError: _th_sort not supported on CUDAType for ComplexDouble
# RuntimeError: "sorting_kernel_method_name" not implemented for 'ComplexDouble'
ind = np.argsort(actual.cpu().numpy(), axis=-1)[::-1]
actual_np = actual.cpu().numpy()
sorted_actual = np.take_along_axis(actual_np, ind, axis=-1)
self.assertEqual(expected, sorted_actual, exact_dtype=False)
shapes = [(0, 0), # Empty matrix
(5, 5), # Single matrix
(0, 0, 0), (0, 5, 5), # Zero batch dimension tensors
(2, 5, 5), # 3-dim tensors
(2, 1, 5, 5)] # 4-dim tensors
for shape in shapes:
run_test(shape)
run_test(shape, symmetric=True)
@onlyCUDA
@skipCUDAIfNoMagma
@dtypes(*floating_and_complex_types())
def test_eigvals_compare_backends(self, device, dtype):
def run_test(shape, *, symmetric=False):
from torch.testing._internal.common_utils import random_symmetric_matrix
if not dtype.is_complex and symmetric:
# for symmetric real-valued inputs eigenvalues and eigenvectors have imaginary part equal to zero
a = random_symmetric_matrix(shape[-1], *shape[:-2], dtype=dtype, device=device)
else:
a = make_tensor(shape, dtype=dtype, device=device)
actual = torch.linalg.eigvals(a)
complementary_device = 'cpu'
# compare with CPU
expected = torch.linalg.eigvals(a.to(complementary_device))
self.assertEqual(expected, actual)
# check out= variant
complex_dtype = dtype
if not dtype.is_complex:
complex_dtype = torch.complex128 if dtype == torch.float64 else torch.complex64
out = torch.empty(0, dtype=complex_dtype, device=device)
ans = torch.linalg.eigvals(a, out=out)
self.assertEqual(ans, out)
self.assertEqual(expected.to(complex_dtype), out)
# check non-contiguous out
if a.numel() > 0:
out = torch.empty(2 * shape[0], *shape[1:-1], dtype=complex_dtype, device=device)[::2]
self.assertFalse(out.is_contiguous())
ans = torch.linalg.eigvals(a, out=out)
self.assertEqual(ans, out)
self.assertEqual(expected.to(complex_dtype), out)
shapes = [(0, 0), # Empty matrix
(5, 5), # Single matrix
(0, 0, 0), (0, 5, 5), # Zero batch dimension tensors
(2, 5, 5), # 3-dim tensors
(2, 1, 5, 5)] # 4-dim tensors
for shape in shapes:
run_test(shape)
run_test(shape, symmetric=True)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_eigvals_errors_and_warnings(self, device, dtype):
# eig requires the input to be at least 2 dimensional tensor
a = make_tensor(2, dtype=dtype, device=device)
with self.assertRaisesRegex(RuntimeError, "must have at least 2 dimensions"):
torch.linalg.eigvals(a)
# eig requires a square matrix
a = make_tensor((2, 3), dtype=dtype, device=device)
with self.assertRaisesRegex(RuntimeError, "must be batches of square matrices"):
torch.linalg.eigvals(a)
# if out tensor with floating dtype is passed for complex output an error is thrown
if not dtype.is_complex:
# The characteristic equation is p(λ) = λ^2 − 2λ + 5 = 0, with roots λ = 1±2i
a = torch.tensor([[3., -2.], [4., -1.]], dtype=dtype, device=device)
out = torch.empty(0, device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, "Expected eigenvalues to be safely castable"):
torch.linalg.eigvals(a, out=out)
# dtypes should be safely castable
a = make_tensor((3, 3), dtype=dtype, device=device)
out = torch.empty(0, dtype=torch.int, device=device)
with self.assertRaisesRegex(RuntimeError, "but got eigenvalues with dtype Int"):
torch.linalg.eigvals(a, out=out)
# if non-empty out tensor with wrong shape is passed a warning is given
out = torch.empty(1, device=device, dtype=torch.complex128)
with warnings.catch_warnings(record=True) as w:
# Trigger warning
torch.linalg.eigvals(a, out=out)
# Check warning occurs
self.assertEqual(len(w), 1)
self.assertTrue("An output with one or more elements was resized" in str(w[-1].message))
# device should match
if torch.cuda.is_available():
wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'
out_w = torch.empty(0, device=wrong_device, dtype=torch.complex128)
with self.assertRaisesRegex(RuntimeError, "tensors to be on the same device"):
torch.linalg.eigvals(a, out=out_w)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
def test_norm_old(self, device):
def gen_error_message(input_size, p, keepdim, dim=None):
return "norm failed for input size %s, p=%s, keepdim=%s, dim=%s" % (
input_size, p, keepdim, dim)
for keepdim in [False, True]:
# full reduction
x = torch.randn(25, device=device)
xn = x.cpu().numpy()
for p in [0, 1, 2, 3, 4, inf, -inf, -1, -2, -3, 1.5]:
res = x.norm(p, keepdim=keepdim).cpu()
expected = np.linalg.norm(xn, p, keepdims=keepdim)
self.assertEqual(res, expected, atol=1e-5, rtol=0, msg=gen_error_message(x.size(), p, keepdim))
# one dimension
x = torch.randn(25, 25, device=device)
xn = x.cpu().numpy()
for p in [0, 1, 2, 3, 4, inf, -inf, -1, -2, -3]:
dim = 1
res = x.norm(p, dim, keepdim=keepdim).cpu()
expected = np.linalg.norm(xn, p, dim, keepdims=keepdim)
msg = gen_error_message(x.size(), p, keepdim, dim)
self.assertEqual(res.shape, expected.shape, msg=msg)
self.assertEqual(res, expected, msg=msg)
# matrix norm
for p in ['fro', 'nuc']:
res = x.norm(p, keepdim=keepdim).cpu()
expected = np.linalg.norm(xn, p, keepdims=keepdim)
msg = gen_error_message(x.size(), p, keepdim)
self.assertEqual(res.shape, expected.shape, msg=msg)
self.assertEqual(res, expected, msg=msg)
# zero dimensions
x = torch.randn((), device=device)
xn = x.cpu().numpy()
res = x.norm(keepdim=keepdim).cpu()
expected = np.linalg.norm(xn, keepdims=keepdim)
msg = gen_error_message(x.size(), None, keepdim)
self.assertEqual(res.shape, expected.shape, msg=msg)
self.assertEqual(res, expected, msg=msg)
# larger tensor sanity check
self.assertEqual(
2 * torch.norm(torch.ones(10000), keepdim=keepdim),
torch.norm(torch.ones(40000), keepdim=keepdim))
# matrix norm with non-square >2-D tensors, all combinations of reduction dims
x = torch.randn(5, 6, 7, 8, device=device)
xn = x.cpu().numpy()
for p in ['fro', 'nuc']:
for dim in itertools.product(*[list(range(4))] * 2):
if dim[0] == dim[1]:
continue
res = x.norm(p=p, dim=dim, keepdim=keepdim).cpu()
expected = np.linalg.norm(xn, ord=p, axis=dim, keepdims=keepdim)
msg = gen_error_message(x.size(), p, keepdim, dim)
self.assertEqual(res.shape, expected.shape, msg=msg)
self.assertEqual(res, expected, msg=msg)
# Test that torch.norm with p=+/-inf propagates NaN
def test_norm_old_nan_propagation(self, device):
ords = [inf, -inf]
for pair in itertools.product([0.0, nan, 1.0], repeat=2):
x = torch.tensor(list(pair), device=device)
for ord in ords:
result = torch.norm(x, p=ord)
result_check = torch.linalg.norm(x, ord=ord)
self.assertEqual(result, result_check)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
def test_norm_complex_old(self, device):
def gen_error_message(input_size, p, keepdim, dim=None):
return "complex norm failed for input size %s, p=%s, keepdim=%s, dim=%s" % (
input_size, p, keepdim, dim)
for keepdim in [False, True]:
# vector norm
x = torch.randn(25, device=device) + 1j * torch.randn(25, device=device)
xn = x.cpu().numpy()
for p in [0, 1, 2, 3, inf, -1, -2, -3, -inf]:
res = x.norm(p, keepdim=keepdim).cpu()
expected = np.linalg.norm(xn, p, keepdims=keepdim)
msg = gen_error_message(x.size(), p, keepdim)
self.assertEqual(res.shape, expected.shape, msg=msg)
self.assertEqual(res, expected, msg=msg)
# matrix norm
x = torch.randn(25, 25, device=device) + 1j * torch.randn(25, 25, device=device)
xn = x.cpu().numpy()
for p in ['nuc', 'fro']:
res = x.norm(p, keepdim=keepdim).cpu()
expected = np.linalg.norm(xn, p, keepdims=keepdim)
msg = gen_error_message(x.size(), p, keepdim)
self.assertEqual(res.shape, expected.shape, msg=msg)
self.assertEqual(res, expected, msg=msg, rtol=1.3e-6, atol=3e-4)
# Ensure torch.norm with p='fro' and p=2 give the same results for mutually supported input combinations
@dtypes(torch.float)
def test_norm_fro_2_equivalence_old(self, device, dtype):
input_sizes = [
(0,),
(10,),
(0, 0),
(4, 30),
(0, 45),
(100, 0),
(45, 10, 23),
(0, 23, 59),
(23, 0, 37),
(34, 58, 0),
(0, 0, 348),
(0, 3434, 0),
(0, 0, 0),
(5, 3, 8, 1, 3, 5)]
for input_size in input_sizes:
a = make_tensor(input_size, device, dtype, low=-9, high=9)
# Try full reduction
dim_settings = [None]
# Try all possible 1-D reductions
dim_settings += list(range(-a.dim(), a.dim()))
def wrap_dim(dim, ndims):
assert (dim < ndims) and (dim >= -ndims)
if dim >= 0:
return dim
else:
return dim + ndims
# Try all possible 2-D reductions
dim_settings += [
(d0, d1) for d0, d1 in itertools.combinations(range(-a.dim(), a.dim()), 2)
if wrap_dim(d0, a.dim()) != wrap_dim(d1, a.dim())]
for dim in dim_settings:
for keepdim in [True, False]:
a_norm_2 = torch.norm(a, p=2, dim=dim, keepdim=keepdim)
a_norm_fro = torch.norm(a, p='fro', dim=dim, keepdim=keepdim)
self.assertEqual(a_norm_fro, a_norm_2)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
def test_nuclear_norm_axes_small_brute_force_old(self, device):
def check_single_nuclear_norm(x, axes):
if self.device_type != 'cpu' and randrange(100) < 95:
return # too many cpu <==> device copies
a = np.array(x.cpu(), copy=False)
expected = np.linalg.norm(a, "nuc", axis=axes)
ans = torch.norm(x, "nuc", dim=axes)
self.assertTrue(ans.is_contiguous())
self.assertEqual(ans.shape, expected.shape)
self.assertEqual(ans.cpu(), expected, rtol=1e-02, atol=1e-03, equal_nan=True)
out = torch.zeros(expected.shape, dtype=x.dtype, device=x.device)
ans = torch.norm(x, "nuc", dim=axes, out=out)
self.assertIs(ans, out)
self.assertTrue(ans.is_contiguous())
self.assertEqual(ans.shape, expected.shape)
self.assertEqual(ans.cpu(), expected, rtol=1e-02, atol=1e-03, equal_nan=True)
for n in range(1, 3):
for m in range(1, 3):
for axes in itertools.permutations([0, 1], 2):
# 2d, inner dimensions C
x = torch.randn(n, m, device=device)
check_single_nuclear_norm(x, axes)
# 2d, inner dimensions Fortran
x = torch.randn(m, n, device=device).mT
check_single_nuclear_norm(x, axes)
# 2d, inner dimensions non-contiguous
x = torch.randn(n, 2 * m, device=device)[:, ::2]
check_single_nuclear_norm(x, axes)
# 2d, all dimensions non-contiguous
x = torch.randn(7 * n, 2 * m, device=device)[::7, ::2]
check_single_nuclear_norm(x, axes)
for o in range(1, 3):
for axes in itertools.permutations([0, 1, 2], 2):
# 3d, inner dimensions C
x = torch.randn(o, n, m, device=device)
check_single_nuclear_norm(x, axes)
# 3d, inner dimensions Fortran
x = torch.randn(o, m, n, device=device).mT
check_single_nuclear_norm(x, axes)
# 3d, inner dimensions non-contiguous
x = torch.randn(o, n, 2 * m, device=device)[:, :, ::2]
check_single_nuclear_norm(x, axes)
# 3d, all dimensions non-contiguous
x = torch.randn(7 * o, 5 * n, 2 * m, device=device)[::7, ::5, ::2]
check_single_nuclear_norm(x, axes)
for r in range(1, 3):
for axes in itertools.permutations([0, 1, 2, 3], 2):
# 4d, inner dimensions C
x = torch.randn(r, o, n, m, device=device)
check_single_nuclear_norm(x, axes)
# 4d, inner dimensions Fortran
x = torch.randn(r, o, n, m, device=device).mT
check_single_nuclear_norm(x, axes)
# 4d, inner dimensions non-contiguous
x = torch.randn(r, o, n, 2 * m, device=device)[:, :, :, ::2]
check_single_nuclear_norm(x, axes)
# 4d, all dimensions non-contiguous
x = torch.randn(7 * r, 5 * o, 11 * n, 2 * m, device=device)[::7, ::5, ::11, ::2]
check_single_nuclear_norm(x, axes)
@skipCUDAIfNoMagma
def test_nuclear_norm_exceptions_old(self, device):
for lst in [], [1], [1, 2]:
x = torch.tensor(lst, dtype=torch.double, device=device)
for axes in (), (0,):
self.assertRaises(RuntimeError, torch.norm, x, "nuc", axes)
self.assertRaises(IndexError, torch.norm, x, "nuc", (0, 1))
x = torch.tensor([[0, 1, 2], [3, 4, 5]], dtype=torch.double, device=device)
self.assertRaisesRegex(RuntimeError, "duplicate or invalid", torch.norm, x, "nuc", (0, 0))
self.assertRaisesRegex(IndexError, "Dimension out of range", torch.norm, x, "nuc", (0, 2))
# ~~~ tests for torch.svd ~~~
@skipCUDAIfNoMagmaAndNoCusolver
@skipCPUIfNoLapack
@dtypes(torch.double)
def test_svd(self, device, dtype):
def run_test(dims, some, compute_uv):
x = torch.randn(*dims, dtype=dtype, device=device)
outu = torch.empty(0, dtype=dtype, device=device)
outs = torch.empty(0, dtype=dtype, device=device)
outv = torch.empty(0, dtype=dtype, device=device)
torch.svd(x, some=some, compute_uv=compute_uv, out=(outu, outs, outv))
if compute_uv:
if some:
x_recon = torch.matmul(outu, torch.matmul(outs.diag_embed(), outv.mT))
self.assertEqual(x, x_recon, atol=1e-8, rtol=0, msg='Incorrect reconstruction using U @ diag(S) @ V.T')
else:
narrow_u = outu[..., :min(*dims[-2:])]
narrow_v = outv[..., :min(*dims[-2:])]
x_recon = torch.matmul(narrow_u, torch.matmul(outs.diag_embed(), narrow_v.mT))
self.assertEqual(x, x_recon, atol=1e-8, rtol=0, msg='Incorrect reconstruction using U @ diag(S) @ V.T')
else:
_, singvals, _ = torch.svd(x, compute_uv=True)
self.assertEqual(singvals, outs, msg='Singular values mismatch')
self.assertEqual(outu, torch.zeros_like(outu), msg='U not zero')
self.assertEqual(outv, torch.zeros_like(outv), msg='V not zero')
resu, ress, resv = torch.svd(x, some=some, compute_uv=compute_uv)
self.assertEqual(resu, outu, msg='outputs of svd and svd with out differ')
self.assertEqual(ress, outs, msg='outputs of svd and svd with out differ')
self.assertEqual(resv, outv, msg='outputs of svd and svd with out differ')
# test non-contiguous
x = torch.randn(*dims, dtype=dtype, device=device)
if x.numel() > 0:
n_dim = len(dims)
# Reverse the batch dimensions and the matrix dimensions and then concat them
x = x.permute(tuple(range(n_dim - 3, -1, -1)) + (n_dim - 1, n_dim - 2))
assert not x.is_contiguous(), "x is intentionally non-contiguous"
resu, ress, resv = torch.svd(x, some=some, compute_uv=compute_uv)
if compute_uv:
if some:
x_recon = torch.matmul(resu, torch.matmul(ress.diag_embed(), resv.mT))
self.assertEqual(x, x_recon, atol=1e-8, rtol=0, msg='Incorrect reconstruction using U @ diag(S) @ V.T')
else:
narrow_u = resu[..., :min(*dims[-2:])]
narrow_v = resv[..., :min(*dims[-2:])]
x_recon = torch.matmul(narrow_u, torch.matmul(ress.diag_embed(), narrow_v.mT))
self.assertEqual(x, x_recon, atol=1e-8, rtol=0, msg='Incorrect reconstruction using U @ diag(S) @ V.T')
else:
_, singvals, _ = torch.svd(x, compute_uv=True)
self.assertEqual(singvals, ress, msg='Singular values mismatch')
self.assertEqual(resu, torch.zeros_like(resu), msg='U not zero')
self.assertEqual(resv, torch.zeros_like(resv), msg='V not zero')
shapes = [(0, 0), (5, 0), (0, 5), # empty matrices
(0, 0, 0), (0, 5, 5), (0, 5, 3), # zero batch dimension
(3, 3), (5, 3, 3), (7, 5, 3, 3), # square matrices
(7, 3), (5, 7, 3), (7, 5, 7, 3), # fat matrices
(3, 7), (5, 3, 7), (7, 5, 3, 7)] # thin matrices
for dims, some, compute_uv in product(shapes, [True, False], [True, False]):
run_test(dims, some, compute_uv)
@skipCUDAIfNoMagmaAndNoCusolver
@skipCPUIfNoLapack
@dtypes(torch.float)
def test_svd_no_singularvectors(self, device, dtype):
for size in [(5, 5), (5, 20), (20, 5)]:
a = torch.randn(*size, device=device, dtype=dtype)
u, s_expect, v = torch.svd(a)
u, s_actual, v = torch.svd(a, compute_uv=False)
self.assertEqual(s_expect, s_actual, msg="Singular values don't match")
@skipCUDAIfNoMagmaAndNoCusolver
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.double)
def test_svd_lowrank(self, device, dtype):
from torch.testing._internal.common_utils import random_lowrank_matrix, random_sparse_matrix
def run_subtest(actual_rank, matrix_size, batches, device, svd_lowrank, **options):
density = options.pop('density', 1)
if isinstance(matrix_size, int):
rows = columns = matrix_size
else:
rows, columns = matrix_size
if density == 1:
a_input = random_lowrank_matrix(actual_rank, rows, columns, *batches, device=device, dtype=dtype)
a = a_input
else:
assert batches == ()
a_input = random_sparse_matrix(rows, columns, density, device=device, dtype=dtype)
a = a_input.to_dense()
q = min(*size)
u, s, v = svd_lowrank(a_input, q=q, **options)
# check if u, s, v is a SVD
u, s, v = u[..., :q], s[..., :q], v[..., :q]
A = u.matmul(s.diag_embed()).matmul(v.mT)
self.assertEqual(A, a, rtol=1e-7, atol=2e-7)
# check if svd_lowrank produces same singular values as torch.svd
U, S, V = torch.svd(a)
self.assertEqual(s.shape, S.shape)
self.assertEqual(u.shape, U.shape)
self.assertEqual(v.shape, V.shape)
self.assertEqual(s, S)
if density == 1:
# actual_rank is known only for dense inputs
#
# check if pairs (u, U) and (v, V) span the same
# subspaces, respectively
u, s, v = u[..., :actual_rank], s[..., :actual_rank], v[..., :actual_rank]
U, S, V = U[..., :actual_rank], S[..., :actual_rank], V[..., :actual_rank]
self.assertEqual(u.mT.matmul(U).det().abs(), torch.ones(batches, device=device, dtype=dtype))
self.assertEqual(v.mT.matmul(V).det().abs(), torch.ones(batches, device=device, dtype=dtype))
all_batches = [(), (1,), (3,), (2, 3)]
for actual_rank, size, all_batches in [
(2, (17, 4), all_batches),
(4, (17, 4), all_batches),
(4, (17, 17), all_batches),
(10, (100, 40), all_batches),
(7, (1000, 1000), [()]),
]:
# dense input
for batches in all_batches:
run_subtest(actual_rank, size, batches, device, torch.svd_lowrank)
if size != size[::-1]:
run_subtest(actual_rank, size[::-1], batches, device, torch.svd_lowrank)
# sparse input
for size in [(17, 4), (4, 17), (17, 17), (100, 40), (40, 100), (1000, 1000)]:
for density in [0.005, 0.1]:
run_subtest(None, size, (), device, torch.svd_lowrank, density=density)
# jitting support
jitted = torch.jit.script(torch.svd_lowrank)
actual_rank, size, batches = 2, (17, 4), ()
run_subtest(actual_rank, size, batches, device, jitted)
@skipCUDAIfNoMagmaAndNoCusolver
@skipCPUIfNoLapack
@dtypes(torch.cfloat)
def test_svd_complex(self, device, dtype):
# this test verifies that torch.svd really returns V and not V.conj()
# see: https://github.com/pytorch/pytorch/issues/45821
t = torch.randn((10, 10), dtype=dtype, device=device)
U, S, V = torch.svd(t, some=False)
# verify that t ≈ t2
# t2 = U @ diag(S) @ Vᴴ
# Vᴴ is the conjugate transpose of V
t2 = U @ torch.diag(S).type(dtype) @ V.conj().T
self.assertEqual(t, t2)
def _test_svd_helper(self, shape, some, col_maj, device, dtype):
# test implementation below uses cpu unconditionally
if not torch._C.has_lapack:
reason = "PyTorch compiled without Lapack"
raise unittest.SkipTest(reason)
# To have accurate tests and less false positives on different CPUs and GPUs,
# we use double or complex double accuracy for CPU reference.
cpu_dtype = torch.complex128 if dtype.is_complex else torch.float64
cpu_tensor = torch.randn(shape, device='cpu', dtype=cpu_dtype)
device_tensor = cpu_tensor.to(device=device, dtype=dtype)
if col_maj:
cpu_tensor = cpu_tensor.t()
device_tensor = device_tensor.t()
cpu_result = torch.svd(cpu_tensor, some=some)
device_result = torch.svd(device_tensor, some=some)
m = min(cpu_tensor.shape[-2:])
# torch.svd returns torch.return_types.svd which is a tuple of (U, V, S).
# - When some==False, U[..., m:] can be arbitrary.
# - When some==True, U shape: [..., m], V shape: [m, m]
# - Signs are not deterministic. If the sign of a column of U is changed
# then the corresponding column of the V has to be changed.
# Thus here we only compare result[..., :m].abs() from CPU and device.
for x, y in zip(cpu_result, device_result):
self.assertEqual(x[..., :m].abs(), y[..., :m].abs(), exact_dtype=False)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_svd_errors_and_warnings(self, device, dtype):
for svd in [torch.svd, torch.linalg.svd]:
# if non-empty out tensor with wrong shape is passed a warning is given
a = torch.randn(3, 3, dtype=dtype, device=device)
real_dtype = a.real.dtype if dtype.is_complex else dtype
out_u = torch.empty(2, 2, dtype=dtype, device=device)
out_s = torch.empty(4, 4, dtype=real_dtype, device=device)
out_v = torch.empty(6, 6, dtype=dtype, device=device)
with warnings.catch_warnings(record=True) as w:
# Trigger warning
svd(a, out=(out_u, out_s, out_v))
# Check warning occurs
self.assertEqual(len(w), 3)
self.assertTrue("An output with one or more elements was resized" in str(w[-3].message))
self.assertTrue("An output with one or more elements was resized" in str(w[-2].message))
self.assertTrue("An output with one or more elements was resized" in str(w[-1].message))
# dtypes should be safely castable
out_u = torch.empty(0, dtype=torch.int, device=device)
out_s = torch.empty(0, dtype=torch.int, device=device)
out_v = torch.empty(0, dtype=torch.int, device=device)
with self.assertRaisesRegex(RuntimeError, "but got U with dtype Int"):
svd(a, out=(out_u, out_s, out_v))
out_u = torch.empty(0, dtype=dtype, device=device)
if svd == torch.linalg.svd:
msg = "but got Vh with dtype Int"
else:
msg = "but got V with dtype Int"
with self.assertRaisesRegex(RuntimeError, msg):
svd(a, out=(out_u, out_s, out_v))
out_v = torch.empty(0, dtype=dtype, device=device)
with self.assertRaisesRegex(RuntimeError, "but got S with dtype Int"):
svd(a, out=(out_u, out_s, out_v))
# device should match
if torch.cuda.is_available():
wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'
out_u = torch.empty(0, device=wrong_device, dtype=dtype)
out_s = torch.empty(0, device=wrong_device, dtype=real_dtype)
out_v = torch.empty(0, device=wrong_device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, "tensors to be on the same device"):
# error from out_u
svd(a, out=(out_u, out_s, out_v))
out_u = torch.empty(0, device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, "tensors to be on the same device"):
# error from out_s
svd(a, out=(out_u, out_s, out_v))
out_s = torch.empty(0, device=device, dtype=real_dtype)
with self.assertRaisesRegex(RuntimeError, "tensors to be on the same device"):
# error from out_v
svd(a, out=(out_u, out_s, out_v))
# if input contains NaN then an error is triggered for svd
error_msg = 'The algorithm failed to converge' \
if (self.device_type == 'cpu' or TEST_WITH_ROCM) \
else 'CUSOLVER_STATUS_EXECUTION_FAILED'
a = torch.full((3, 3), float('nan'), dtype=dtype, device=device)
a[0] = float('nan')
with self.assertRaisesRegex(RuntimeError, error_msg):
svd(a)
error_msg = r'\(Batch element 1\): The algorithm failed to converge' \
if (self.device_type == 'cpu' or TEST_WITH_ROCM) \
else 'CUSOLVER_STATUS_EXECUTION_FAILED'
a = torch.randn(3, 33, 33, dtype=dtype, device=device)
a[1, 0, 0] = float('nan')
with self.assertRaisesRegex(RuntimeError, error_msg):
svd(a)
@skipCUDAIfNoMagmaAndNoCusolver
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_svd_square(self, device, dtype):
self._test_svd_helper((10, 10), True, False, device, dtype)
@skipCUDAIfNoMagmaAndNoCusolver
@skipCPUIfNoLapack
@dtypes(*floating_types())
def test_svd_square_col_maj(self, device, dtype):
self._test_svd_helper((10, 10), True, True, device, dtype)
@skipCUDAIfNoMagmaAndNoCusolver
@skipCPUIfNoLapack
@dtypes(*floating_types())
def test_svd_tall_some(self, device, dtype):
self._test_svd_helper((20, 5), True, False, device, dtype)
@skipCUDAIfNoMagmaAndNoCusolver
@skipCPUIfNoLapack
@dtypes(*floating_types())
def test_svd_tall_all(self, device, dtype):
self._test_svd_helper((20, 5), False, False, device, dtype)
@skipCUDAIfNoMagmaAndNoCusolver
@skipCPUIfNoLapack
@dtypes(*floating_types())
def test_svd_tall_some_col_maj(self, device, dtype):
self._test_svd_helper((5, 20), True, True, device, dtype)
@skipCUDAIfNoMagmaAndNoCusolver
@skipCPUIfNoLapack
@dtypes(*floating_types())
def test_svd_tall_all_col_maj(self, device, dtype):
self._test_svd_helper((5, 20), False, True, device, dtype)
# ~~~ tests for torch.linalg.svd ~~~
@skipCUDAIfNoMagmaAndNoCusolver
@skipCPUIfNoLapack
@dtypes(torch.float, torch.double, torch.cfloat, torch.cdouble)
def test_linalg_svd_compute_uv(self, device, dtype):
"""
Test the default case. Here we have the very same behavior as
NumPy with compute_uv=True.
"""
t = torch.randn((10, 11), device=device, dtype=dtype)
np_t = t.cpu().numpy()
for full_matrices in (True, False):
# check linalg.svd vs numpy
expected = np.linalg.svd(np_t, full_matrices, compute_uv=True)
actual = torch.linalg.svd(t, full_matrices)
# sign/phase of the singular vectors is not unique and therefore absolute values are compared
self.assertEqual(abs(actual[0]), abs(expected[0]))
self.assertEqual(actual[1], expected[1])
self.assertEqual(abs(actual[2]), abs(expected[2]))
# check linalg.svd vs linalg.svd(out=...)
out = (torch.empty_like(actual[0]),
torch.empty_like(actual[1]),
torch.empty_like(actual[2]))
out2 = torch.linalg.svd(t, full_matrices, out=out)
self.assertEqual(actual, out)
self.assertEqual(actual, out2)
@skipCUDAIfNoMagmaAndNoCusolver
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_svdvals(self, device, dtype):
def run_test(shape):
# NumPy doesn't have separate svdvals function, it is included in
# svd with compute_uv=False
# so we test our implementation against numpy.linalg.svd(*, compute_uv=False)
A = make_tensor(shape, dtype=dtype, device=device)
expected = np.linalg.svd(A.cpu(), compute_uv=False)
actual = torch.linalg.svdvals(A)
self.assertEqual(actual, expected)
batches = [(), (0, ), (2, ), (2, 1)]
ns = [5, 2, 0]
for batch, (m, n) in itertools.product(batches, product(ns, ns)):
run_test((*batch, m, n))
@skipCUDAIfNoCusolver # MAGMA backend doesn't work in this case
@skipCUDAIfRocm
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_svd_memory_allocation(self, device, dtype):
# test for https://github.com/pytorch/pytorch/issues/61949
# the problem was that tensors of incorrect size were allocated and then narrowed
m = 3
n = 2**20
a = make_tensor((m, n), dtype=dtype, device=device)
# the following should run without errors
result = torch.linalg.svdvals(a)
result = torch.linalg.svd(a, full_matrices=False)
out0 = torch.empty_like(result[0])
out1 = torch.empty_like(result[1])
out2 = torch.empty_like(result[2])
torch.linalg.svdvals(a, out=out0)
torch.linalg.svd(a, full_matrices=False, out=(out0, out1, out2))
def cholesky_solve_test_helper(self, A_dims, b_dims, upper, device, dtype):
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
b = torch.randn(*b_dims, dtype=dtype, device=device)
A = random_hermitian_pd_matrix(*A_dims, dtype=dtype, device=device)
L = torch.cholesky(A, upper=upper)
return b, A, L
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
@precisionOverride({torch.float32: 1e-3, torch.complex64: 1e-3,
torch.float64: 1e-8, torch.complex128: 1e-8})
def test_cholesky_solve(self, device, dtype):
for (k, n), upper in itertools.product(zip([2, 3, 5], [3, 5, 7]), [True, False]):
b, A, L = self.cholesky_solve_test_helper((n,), (n, k), upper, device, dtype)
x = torch.cholesky_solve(b, L, upper=upper)
self.assertEqual(b, np.matmul(A.cpu(), x.cpu()))
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
@precisionOverride({torch.float32: 1e-3, torch.complex64: 1e-3,
torch.float64: 1e-8, torch.complex128: 1e-8})
def test_cholesky_solve_batched(self, device, dtype):
def cholesky_solve_batch_helper(A_dims, b_dims, upper):
b, A, L = self.cholesky_solve_test_helper(A_dims, b_dims, upper, device, dtype)
x_exp_list = []
for i in range(b_dims[0]):
x_exp_list.append(torch.cholesky_solve(b[i], L[i], upper=upper))
x_exp = torch.stack(x_exp_list) # Stacked output
x_act = torch.cholesky_solve(b, L, upper=upper) # Actual output
self.assertEqual(x_act, x_exp) # Equality check
Ax = np.matmul(A.cpu(), x_act.cpu())
self.assertEqual(b, Ax) # Correctness check
for upper, batchsize in itertools.product([True, False], [1, 3, 4]):
cholesky_solve_batch_helper((5, batchsize), (batchsize, 5, 10), upper)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_cholesky_solve_batched_non_contiguous(self, device, dtype):
from numpy.linalg import solve
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
for upper in [True, False]:
A = random_hermitian_pd_matrix(2, 2, dtype=dtype, device='cpu')
b = torch.randn(2, 2, 2, dtype=dtype, device='cpu')
x_exp = solve(A.permute(0, 2, 1).numpy(), b.permute(2, 1, 0).numpy())
A = A.to(device).permute(0, 2, 1)
b = b.to(device).permute(2, 1, 0)
assert not A.is_contiguous() and not b.is_contiguous(), "contiguous inputs"
L = torch.cholesky(A, upper)
x = torch.cholesky_solve(b, L, upper=upper)
self.assertEqual(x, x_exp)
@slowTest
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
@precisionOverride({torch.float32: 1e-3, torch.complex64: 1e-3,
torch.float64: 1e-8, torch.complex128: 1e-8})
def test_cholesky_solve_batched_many_batches(self, device, dtype):
for A_dims, b_dims in zip([(5, 256, 256), (5,)], [(5, 10), (512, 512, 5, 10)]):
for upper in [True, False]:
b, A, L = self.cholesky_solve_test_helper(A_dims, b_dims, upper, device, dtype)
x = torch.cholesky_solve(b, L, upper)
Ax = torch.matmul(A, x)
self.assertEqual(Ax, b.expand_as(Ax))
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
@precisionOverride({torch.float32: 1e-3, torch.complex64: 1e-3,
torch.float64: 1e-8, torch.complex128: 1e-8})
def test_cholesky_solve_batched_broadcasting(self, device, dtype):
from numpy.linalg import solve
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
def run_test(A_dims, b_dims, upper):
A_matrix_size = A_dims[-1]
A_batch_dims = A_dims[:-2]
A = random_hermitian_pd_matrix(A_matrix_size, *A_batch_dims,
dtype=dtype, device='cpu')
b = torch.randn(*b_dims, dtype=dtype, device='cpu')
x_exp = torch.tensor(solve(A.numpy(), b.numpy()), dtype=dtype, device=device)
A, b = A.to(dtype=dtype, device=device), b.to(dtype=dtype, device=device)
L = torch.linalg.cholesky(A, upper=upper)
x = torch.cholesky_solve(b, L, upper=upper)
self.assertEqual(x, x_exp)
# https://github.com/pytorch/pytorch/issues/42695
x = torch.cholesky_solve(b, L, upper=upper, out=x)
self.assertEqual(x, x_exp)
# test against numpy.linalg.solve
for upper in [True, False]:
run_test((2, 1, 3, 4, 4), (2, 1, 3, 4, 6), upper) # no broadcasting
run_test((2, 1, 3, 4, 4), (4, 6), upper) # broadcasting b
run_test((4, 4), (2, 1, 3, 4, 2), upper) # broadcasting A
run_test((1, 3, 1, 4, 4), (2, 1, 3, 4, 5), upper) # broadcasting A & b
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float64, torch.complex128)
def test_cholesky_solve_autograd(self, device, dtype):
def run_test(A_dims, B_dims, upper):
root = torch.randn(*A_dims, device=device, dtype=dtype).requires_grad_()
b = torch.randn(*B_dims, device=device, dtype=dtype).requires_grad_()
def func(root, b, upper):
if upper:
A = root.triu()
else:
A = root.tril()
return torch.cholesky_solve(b, A, upper)
gradcheck(func, [root, b, upper])
# TODO(#50743): the following fails with batched grad testing
# TODO(#56235): disabling temporarily
# gradgradcheck(func, [root, b, upper], atol=1e-3, check_batched_grad=False)
for (a_size, b_size), upper in itertools.product([((3, 3), (3, 4)), ((3, 3), (3, 2)),
((2, 3, 3), (2, 3, 4)), ((2, 3, 3), (2, 3, 2))],
[True, False]):
run_test(a_size, b_size, upper)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_cholesky_solve_out_errors_and_warnings(self, device, dtype):
# dtypes should be safely castable
a = torch.eye(2, dtype=dtype, device=device)
b = torch.randn(2, 1, dtype=dtype, device=device)
out = torch.empty(0, dtype=torch.int, device=device)
with self.assertRaisesRegex(RuntimeError, "but got result with dtype Int"):
torch.cholesky_solve(b, a, out=out)
# device should match
if torch.cuda.is_available():
wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'
out = torch.empty(0, dtype=dtype, device=wrong_device)
with self.assertRaisesRegex(RuntimeError, "tensors to be on the same device"):
torch.cholesky_solve(b, a, out=out)
# if out tensor with wrong shape is passed a warning is given
with warnings.catch_warnings(record=True) as w:
out = torch.empty(1, dtype=dtype, device=device)
# Trigger warning
torch.cholesky_solve(b, a, out=out)
# Check warning occurs
self.assertEqual(len(w), 1)
self.assertTrue("An output with one or more elements was resized" in str(w[-1].message))
@skipCUDAIfNoMagmaAndNoCusolver
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
@precisionOverride({torch.float32: 2e-3, torch.complex64: 2e-3,
torch.float64: 1e-8, torch.complex128: 1e-8})
def test_inverse(self, device, dtype):
from torch.testing._internal.common_utils import random_fullrank_matrix_distinct_singular_value
def run_test(torch_inverse, matrix, batches, n):
matrix_inverse = torch_inverse(matrix)
# Compare against NumPy output
# NumPy uses 'gesv' LAPACK routine solving the equation A A_inv = I
# But in PyTorch 'gertf' + 'getri' is used causing element-wise differences
expected = np.linalg.inv(matrix.cpu().numpy())
self.assertEqual(matrix_inverse, expected, atol=self.precision, rtol=self.precision)
# Additional correctness tests, check matrix*matrix_inverse == identity
identity = torch.eye(n, dtype=dtype, device=device)
self.assertEqual(identity.expand_as(matrix), np.matmul(matrix.cpu(), matrix_inverse.cpu()))
self.assertEqual(identity.expand_as(matrix), np.matmul(matrix_inverse.cpu(), matrix.cpu()))
# check the out= variant
# prepare the expected out tensor
matrix_inverse_out = torch.empty(*batches, n, n, dtype=dtype, device=device)
matrix_inverse_out_t = matrix_inverse_out.mT.clone(memory_format=torch.contiguous_format)
matrix_inverse_out = matrix_inverse_out_t.mT
ans = torch_inverse(matrix, out=matrix_inverse_out)
self.assertEqual(matrix_inverse_out, ans, atol=0, rtol=0)
self.assertEqual(matrix_inverse_out, matrix_inverse, atol=0, rtol=0)
# batched matrices: 3+ dimensional tensors, check matrix_inverse same as single-inverse for each matrix
if matrix.ndim > 2 and batches[0] != 0:
expected_inv_list = []
p = int(np.prod(batches)) # use `p` instead of -1, so that the test works for empty input as well
for mat in matrix.contiguous().view(p, n, n):
expected_inv_list.append(torch_inverse(mat))
expected_inv = torch.stack(expected_inv_list).view(*batches, n, n)
if self.device_type == 'cuda' and dtype in [torch.float32, torch.complex64]:
# single-inverse is done using cuSOLVER, while batched inverse is done using MAGMA
# individual values can be significantly different for fp32, hence rather high rtol is used
# the important thing is that torch_inverse passes above checks with identity
self.assertEqual(matrix_inverse, expected_inv, atol=1e-1, rtol=1e-2)
else:
self.assertEqual(matrix_inverse, expected_inv)
# helper function for testing torch.linalg.inv_ex
def test_inv_ex(input, out=None):
if out is not None:
info = torch.empty(0, dtype=torch.int32, device=device)
return torch.linalg.inv_ex(input, out=(out, info)).inverse
return torch.linalg.inv_ex(input).inverse
for torch_inverse in [torch.inverse, torch.linalg.inv, test_inv_ex]:
for batches, n in itertools.product(
[[], [0], [2], [2, 1]],
[0, 5]
):
matrices = random_fullrank_matrix_distinct_singular_value(n, *batches, dtype=dtype, device=device)
run_test(torch_inverse, matrices, batches, n)
# test non-contiguous input
run_test(torch_inverse, matrices.mT, batches, n)
if n > 0:
run_test(
torch_inverse,
random_fullrank_matrix_distinct_singular_value(n * 2, *batches, dtype=dtype, device=device)
.view(-1, n * 2, n * 2)[:, ::2, ::2].view(*batches, n, n),
batches, n
)
@skipCUDAIfNoMagmaAndNoCusolver
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_inv_ex_info_device(self, device, dtype):
A = torch.eye(3, 3, dtype=dtype, device=device)
info = torch.linalg.inv_ex(A).info
self.assertTrue(info.device == A.device)
@skipCUDAIfNoMagmaAndNoCusolver
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
@skipCUDAIfRocm
def test_inv_ex_singular(self, device, dtype):
# if the input matrix is not invertible, info with positive integer is returned
A = torch.eye(3, 3, dtype=dtype, device=device)
A[-1, -1] = 0 # Now A is singular
info = torch.linalg.inv_ex(A).info
self.assertEqual(info, 3)
with self.assertRaisesRegex(RuntimeError, r'diagonal element 3 is zero, the inversion could not be completed'):
torch.linalg.inv_ex(A, check_errors=True)
# if at least one matrix in the batch is not positive definite,
# batched info with positive integer for the corresponding matrix is returned
A = torch.eye(3, 3, dtype=dtype, device=device)
A = A.reshape((1, 3, 3))
A = A.repeat(5, 1, 1)
A[3, -2, -2] = 0 # Now A[3] is singular
info = torch.linalg.inv_ex(A).info
expected_info = torch.zeros(A.shape[:-2], dtype=torch.int32, device=device)
expected_info[3] = 2
self.assertEqual(info, expected_info)
with self.assertRaisesRegex(RuntimeError, r'\(Batch element 3\): The diagonal element 2 is zero'):
torch.linalg.inv_ex(A, check_errors=True)
@slowTest
@skipCUDAIfNoMagmaAndNoCusolver
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
@precisionOverride({torch.float32: 2e-3, torch.complex64: 2e-3,
torch.float64: 1e-5, torch.complex128: 1e-5})
def test_inverse_many_batches(self, device, dtype):
from torch.testing._internal.common_utils import random_fullrank_matrix_distinct_singular_value
def test_inverse_many_batches_helper(torch_inverse, b, n):
matrices = random_fullrank_matrix_distinct_singular_value(b, n, n, dtype=dtype, device=device)
matrices_inverse = torch_inverse(matrices)
# Compare against NumPy output
expected = np.linalg.inv(matrices.cpu().numpy())
self.assertEqual(matrices_inverse, expected, atol=self.precision, rtol=1e-3)
for torch_inverse in [torch.inverse, torch.linalg.inv]:
test_inverse_many_batches_helper(torch_inverse, 5, 256)
test_inverse_many_batches_helper(torch_inverse, 3, 512)
@skipCUDAIfNoMagmaAndNoCusolver
@skipCPUIfNoLapack
@onlyNativeDeviceTypes # TODO: XLA doesn't raise exception
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_inverse_errors(self, device, dtype):
# inverse expects batches of square matrices as input
with self.assertRaisesRegex(RuntimeError, "must be batches of square matrices"):
torch.inverse(torch.randn(2, 3, 4, 3))
# if input is not invertible, RuntimeError is raised mentioning the first non-invertible batch
def run_test_singular_input(batch_dim, n):
x = torch.eye(3, 3, dtype=dtype, device=device).reshape((1, 3, 3)).repeat(batch_dim, 1, 1)
x[n, -1, -1] = 0
with self.assertRaisesRegex(RuntimeError, rf'\(Batch element {n}\): The diagonal element 3 is zero'):
torch.inverse(x)
for params in [(1, 0), (2, 0), (2, 1), (4, 0), (4, 2), (10, 2)]:
run_test_singular_input(*params)
@skipCUDAIfNoMagmaAndNoCusolver
@skipCPUIfNoLapack
@onlyNativeDeviceTypes # TODO: XLA doesn't raise exception
@skipCUDAIfRocm
@skipCUDAVersionIn([(11, 3)]) # https://github.com/pytorch/pytorch/issues/57482
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_inverse_errors_large(self, device, dtype):
# Test batched inverse of singular matrices reports errors without crashing (gh-51930)
x = torch.empty((8, 10, 616, 616), dtype=dtype, device=device)
x[:] = torch.eye(616, dtype=dtype, device=device)
x[..., 10, 10] = 0
with self.assertRaisesRegex(RuntimeError, r'\(Batch element 0\): The diagonal element 11 is zero'):
torch.inverse(x)
@precisionOverride({torch.float32: 1e-3, torch.complex64: 1e-3, torch.float64: 1e-7, torch.complex128: 1e-7})
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_pinv(self, device, dtype):
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
def run_test_main(A, hermitian):
# Testing against definition for pseudo-inverses
A_pinv = torch.linalg.pinv(A, hermitian=hermitian)
np_A = A.cpu().numpy()
np_A_pinv = A_pinv.cpu().numpy()
if A.numel() > 0:
self.assertEqual(A, np_A @ np_A_pinv @ np_A, atol=self.precision, rtol=self.precision)
self.assertEqual(A_pinv, np_A_pinv @ np_A @ np_A_pinv, atol=self.precision, rtol=self.precision)
self.assertEqual(np_A @ np_A_pinv, (np_A @ np_A_pinv).conj().swapaxes(-2, -1))
self.assertEqual(np_A_pinv @ np_A, (np_A_pinv @ np_A).conj().swapaxes(-2, -1))
else:
self.assertEqual(A.shape, A_pinv.shape[:-2] + (A_pinv.shape[-1], A_pinv.shape[-2]))
# Check out= variant
out = torch.empty_like(A_pinv)
ans = torch.linalg.pinv(A, hermitian=hermitian, out=out)
self.assertEqual(ans, out)
self.assertEqual(ans, A_pinv)
def run_test_numpy(A, hermitian):
# Check against NumPy output
# Test float rcond, and specific value for each matrix
rconds = [float(torch.rand(1)), ]
# Test different types of rcond tensor
for rcond_type in all_types():
rconds.append(torch.rand(A.shape[:-2], dtype=torch.double, device=device).to(rcond_type))
# Test broadcasting of rcond
if A.ndim > 2:
rconds.append(torch.rand(A.shape[-3], device=device))
for rcond in rconds:
actual = torch.linalg.pinv(A, rcond=rcond, hermitian=hermitian)
torch_rtol = torch.linalg.pinv(A, rtol=rcond, hermitian=hermitian)
self.assertEqual(actual, torch_rtol)
numpy_rcond = rcond if isinstance(rcond, float) else rcond.cpu().numpy()
expected = np.linalg.pinv(A.cpu().numpy(), rcond=numpy_rcond, hermitian=hermitian)
self.assertEqual(actual, expected, atol=self.precision, rtol=1e-5)
for sizes in [(5, 5), (3, 5, 5), (3, 2, 5, 5), # square matrices
(3, 2), (5, 3, 2), (2, 5, 3, 2), # fat matrices
(2, 3), (5, 2, 3), (2, 5, 2, 3), # thin matrices
(0, 0), (0, 2), (2, 0), (3, 0, 0), (0, 3, 0), (0, 0, 3)]: # zero numel matrices
A = torch.randn(*sizes, dtype=dtype, device=device)
hermitian = False
run_test_main(A, hermitian)
run_test_numpy(A, hermitian)
# Check hermitian = True
for sizes in [(5, 5), (3, 5, 5), (3, 2, 5, 5), # square matrices
(0, 0), (3, 0, 0), ]: # zero numel square matrices
A = random_hermitian_pd_matrix(sizes[-1], *sizes[:-2], dtype=dtype, device=device)
hermitian = True
run_test_main(A, hermitian)
run_test_numpy(A, hermitian)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_pinv_errors_and_warnings(self, device, dtype):
# pinv requires at least 2D tensor
a = torch.randn(1, device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, "expected a tensor with 2 or more dimensions"):
torch.linalg.pinv(a)
# if non-empty out tensor with wrong shape is passed a warning is given
a = torch.randn(3, 3, dtype=dtype, device=device)
out = torch.empty(7, 7, dtype=dtype, device=device)
with warnings.catch_warnings(record=True) as w:
# Trigger warning
torch.linalg.pinv(a, out=out)
# Check warning occurs
self.assertEqual(len(w), 1)
self.assertTrue("An output with one or more elements was resized" in str(w[-1].message))
# dtypes of out and input should be safely castable
out = torch.empty_like(a).to(torch.int)
with self.assertRaisesRegex(RuntimeError, "but got result with dtype Int"):
torch.linalg.pinv(a, out=out)
if torch.cuda.is_available():
# device of out and input should match
wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'
out = torch.empty_like(a).to(wrong_device)
with self.assertRaisesRegex(RuntimeError, "Expected result and input tensors to be on the same device"):
torch.linalg.pinv(a, out=out)
# device of rcond and input should match
wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'
rcond = torch.full((), 1e-2, device=wrong_device)
with self.assertRaisesRegex(RuntimeError, "Expected all tensors to be on the same device"):
torch.linalg.pinv(a, rcond=rcond)
# rcond can't be complex
rcond = torch.full((), 1j, device=device)
with self.assertRaisesRegex(RuntimeError, "rcond tensor of complex type is not supported"):
torch.linalg.pinv(a, rcond=rcond)
# atol can't be complex
atol = torch.full((), 1j, device=device)
with self.assertRaisesRegex(RuntimeError, "atol tensor of complex type is not supported"):
torch.linalg.pinv(a, atol=atol)
# rtol can't be complex
rtol = torch.full((), 1j, device=device)
with self.assertRaisesRegex(RuntimeError, "rtol tensor of complex type is not supported"):
torch.linalg.pinv(a, rtol=rtol)
@skipCUDAIfNoMagmaAndNoCusolver
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_inv_errors_and_warnings(self, device, dtype):
# inv expects batches of square matrices as input
a = torch.randn(2, 3, 4, 3, dtype=dtype, device=device)
with self.assertRaisesRegex(RuntimeError, "must be batches of square matrices"):
torch.linalg.inv(a)
# inv requires the input to be at least 2 dimensional tensor
a = torch.randn(2, device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, "must have at least 2 dimensions"):
torch.linalg.inv(a)
# if input is not invertible, RuntimeError is raised mentioning the first non-invertible batch
def run_test_singular_input(batch_dim, n):
a = torch.eye(3, 3, dtype=dtype, device=device).reshape((1, 3, 3)).repeat(batch_dim, 1, 1)
a[n, -1, -1] = 0
with self.assertRaisesRegex(RuntimeError, rf"\(Batch element {n}\): The diagonal element 3 is zero"):
torch.linalg.inv(a)
for params in [(1, 0), (2, 0), (2, 1), (4, 0), (4, 2), (10, 2)]:
run_test_singular_input(*params)
# dtypes should match
a = torch.eye(2, dtype=dtype, device=device)
out = torch.empty(0, dtype=torch.int, device=device)
with self.assertRaisesRegex(RuntimeError, "got result with dtype Int"):
torch.linalg.inv(a, out=out)
# device should match
if torch.cuda.is_available():
wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'
out = torch.empty(0, device=wrong_device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, "tensors to be on the same device"):
torch.linalg.inv(a, out=out)
# if out tensor with wrong shape is passed a warning is given
with warnings.catch_warnings(record=True) as w:
a = torch.eye(2, dtype=dtype, device=device)
out = torch.empty(1, dtype=dtype, device=device)
# Trigger warning
torch.linalg.inv(a, out=out)
# Check warning occurs
self.assertEqual(len(w), 1)
self.assertTrue("An output with one or more elements was resized" in str(w[-1].message))
# if out tensor in batched column major format but with wrong a warning is given
with warnings.catch_warnings(record=True) as w:
a = torch.eye(2, dtype=dtype, device=device)
out = torch.empty(3, 3, dtype=dtype, device=device)
out = out.mT.clone(memory_format=torch.contiguous_format)
out = out.mT
self.assertTrue(out.mT.is_contiguous())
# Trigger warning
torch.linalg.inv(a, out=out)
# Check warning occurs
self.assertEqual(len(w), 1)
self.assertTrue("An output with one or more elements was resized" in str(w[-1].message))
def solve_test_helper(self, A_dims, b_dims, device, dtype):
from torch.testing._internal.common_utils import random_fullrank_matrix_distinct_singular_value
b = torch.randn(*b_dims, dtype=dtype, device=device)
A = random_fullrank_matrix_distinct_singular_value(*A_dims, dtype=dtype, device=device)
return b, A
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
@precisionOverride({torch.float32: 1e-3, torch.complex64: 1e-3})
def test_solve(self, device, dtype):
def run_test(n, batch, rhs):
A_dims = (n, *batch)
b_dims = (*batch, n, *rhs)
b, A = self.solve_test_helper(A_dims, b_dims, device, dtype)
# Correctness test
x = torch.linalg.solve(A, b)
if rhs == ():
Ax = np.matmul(A.cpu(), x.unsqueeze(-1).cpu())
Ax.squeeze_(-1)
else:
Ax = np.matmul(A.cpu(), x.cpu())
self.assertEqual(b.expand_as(Ax), Ax)
# Check against NumPy
expected = np.linalg.solve(A.cpu().numpy(), b.expand_as(x).cpu().numpy())
self.assertEqual(x, expected)
# Check out= variant
out = torch.empty_like(x)
ans = torch.linalg.solve(A, b, out=out)
self.assertEqual(ans, out)
self.assertEqual(x, out)
# Check out= variant with complex128 out tensor
out = torch.empty_like(x).to(torch.complex128)
ans = torch.linalg.solve(A, b, out=out)
self.assertEqual(ans, out)
self.assertEqual(x.to(torch.complex128), out)
# Check empty out
out = torch.empty(0, dtype=dtype, device=device)
ans = torch.linalg.solve(A, b, out=out)
self.assertEqual(ans, out)
self.assertEqual(x, out)
batches = [(), (0, ), (3, ), (2, 3)]
ns = [0, 5, 32]
nrhs = [(), (1, ), (5, )]
for n, batch, rhs in itertools.product(ns, batches, nrhs):
run_test(n, batch, rhs)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
@precisionOverride({torch.float32: 1e-3, torch.complex64: 1e-3})
def test_solve_batched_non_contiguous(self, device, dtype):
from torch.testing._internal.common_utils import random_fullrank_matrix_distinct_singular_value
A = random_fullrank_matrix_distinct_singular_value(2, 2, dtype=dtype, device=device).permute(1, 0, 2)
b = torch.randn(2, 2, 2, dtype=dtype, device=device).permute(2, 1, 0)
self.assertFalse(A.is_contiguous())
self.assertFalse(b.is_contiguous())
actual = torch.linalg.solve(A, b)
expected = np.linalg.solve(A.cpu().numpy(), b.cpu().numpy())
self.assertEqual(actual, expected)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_solve_errors_and_warnings(self, device, dtype):
# solve expects batches of square matrices as input
with self.assertRaisesRegex(RuntimeError, "must be batches of square matrices"):
a = torch.randn(2, 3, 4, 3, dtype=dtype, device=device)
b = torch.randn(2, 3, 4, 1, dtype=dtype, device=device)
torch.linalg.solve(a, b)
# solve expects compatible shapes for A x = b
with self.assertRaisesRegex(RuntimeError, "Incompatible matrix sizes"):
a = torch.randn(2, 3, 3, 3, dtype=dtype, device=device)
b = torch.randn(2, 3, 2, 1, dtype=dtype, device=device)
torch.linalg.solve(a, b)
# if input is not solvable, RuntimeError is raised mentioning the first non-solvable batch
def run_test_singular_input(batch_dim, n):
a = torch.eye(3, 3, dtype=dtype, device=device).reshape((1, 3, 3)).repeat(batch_dim, 1, 1)
a[n, -1, -1] = 0
b = torch.randn(batch_dim, 3, 1, dtype=dtype, device=device)
with self.assertRaisesRegex(RuntimeError, rf'\(Batch element {n}\): The diagonal element 3 is zero'):
torch.linalg.solve(a, b)
for params in [(1, 0), (2, 0), (2, 1), (4, 0), (4, 2), (10, 2)]:
run_test_singular_input(*params)
# if out tensor with wrong shape is passed a warning is given
# matrix 'b' case
with warnings.catch_warnings(record=True) as w:
A = torch.eye(2, dtype=dtype, device=device).reshape((1, 2, 2)).repeat(2, 1, 1)
b = torch.randn(2, 2, 2, dtype=dtype, device=device)
out = torch.zeros(1, dtype=dtype, device=device)
# Trigger warning
torch.linalg.solve(A, b, out=out)
# Check warning occurs
self.assertEqual(len(w), 1)
self.assertTrue("An output with one or more elements was resized" in str(w[-1].message))
# if out tensor with wrong shape is passed a warning is given
# vector 'b' case
with warnings.catch_warnings(record=True) as w:
A = torch.eye(2, dtype=dtype, device=device)
b = torch.randn(2, dtype=dtype, device=device)
out = torch.zeros(1, dtype=dtype, device=device)
# Trigger warning
torch.linalg.solve(A, b, out=out)
# Check warning occurs
self.assertEqual(len(w), 1)
self.assertTrue("An output with one or more elements was resized" in str(w[-1].message))
# dtypes should be safely castable
a = torch.eye(2, dtype=dtype, device=device)
b = torch.randn(2, 1, dtype=dtype, device=device)
out = torch.empty(0, dtype=torch.int, device=device)
with self.assertRaisesRegex(RuntimeError, "but got result with dtype Int"):
torch.linalg.solve(a, b, out=out)
# device should match
if torch.cuda.is_available():
wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'
out = torch.empty(0, dtype=dtype, device=wrong_device)
clone_a = torch.empty_like(a)
with self.assertRaisesRegex(RuntimeError, "tensors to be on the same device"):
torch.linalg.solve(a, b, out=out)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_old_solve(self, device, dtype):
for (k, n) in zip([2, 3, 5], [3, 5, 7]):
b, A = self.solve_test_helper((n,), (n, k), device, dtype)
x = torch.solve(b, A)[0]
self.assertEqual(b, np.matmul(A.cpu(), x.cpu()))
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_old_solve_batched(self, device, dtype):
def solve_batch_helper(A_dims, b_dims):
b, A = self.solve_test_helper(A_dims, b_dims, device, dtype)
x_exp_list = []
for i in range(b_dims[0]):
x_exp_list.append(torch.solve(b[i], A[i])[0])
x_exp = torch.stack(x_exp_list) # Stacked output
x_act = torch.solve(b, A)[0] # Actual output
self.assertEqual(x_exp, x_act) # Equality check
Ax = np.matmul(A.cpu(), x_act.cpu())
self.assertEqual(b, Ax)
for batchsize in [1, 3, 4]:
solve_batch_helper((5, batchsize), (batchsize, 5, 10))
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_old_solve_batched_non_contiguous(self, device, dtype):
from numpy.linalg import solve
from torch.testing._internal.common_utils import random_fullrank_matrix_distinct_singular_value
A = random_fullrank_matrix_distinct_singular_value(2, 2, dtype=dtype, device=device).permute(1, 0, 2)
b = torch.randn(2, 2, 2, dtype=dtype, device=device).permute(2, 1, 0)
x, _ = torch.solve(b, A)
x_exp = solve(A.cpu().numpy(), b.cpu().numpy())
self.assertEqual(x, x_exp)
@slowTest
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_old_solve_batched_many_batches(self, device, dtype):
for A_dims, b_dims in zip([(5, 256, 256), (3, )], [(5, 1), (512, 512, 3, 1)]):
b, A = self.solve_test_helper(A_dims, b_dims, device, dtype)
x, _ = torch.solve(b, A)
Ax = torch.matmul(A, x)
self.assertEqual(Ax, b.expand_as(x))
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_old_solve_batched_broadcasting(self, device, dtype):
from numpy.linalg import solve
def run_test(A_dims, b_dims):
A_matrix_size = A_dims[-1]
A_batch_dims = A_dims[:-2]
b, A = self.solve_test_helper((A_matrix_size,) + A_batch_dims, b_dims, device, dtype)
x, _ = torch.solve(b, A)
x_exp = solve(A.cpu().numpy(), b.cpu().numpy())
self.assertEqual(x, x_exp)
# test against numpy.linalg.solve
run_test((2, 1, 3, 4, 4), (2, 1, 3, 4, 6)) # no broadcasting
run_test((2, 1, 3, 4, 4), (4, 6)) # broadcasting b
run_test((4, 4), (2, 1, 3, 4, 2)) # broadcasting A
run_test((1, 3, 1, 4, 4), (2, 1, 3, 4, 5)) # broadcasting A & b
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_old_solve_errors_and_warnings(self, device, dtype):
# dtypes should be safely castable
a = torch.eye(2, dtype=dtype, device=device)
b = torch.randn(2, 1, dtype=dtype, device=device)
out = torch.empty(0, dtype=torch.int, device=device)
lu = torch.empty(0, dtype=dtype, device=device)
with self.assertRaisesRegex(RuntimeError, "but got solution with dtype Int"):
torch.solve(b, a, out=(out, lu))
out = torch.empty(0, dtype=dtype, device=device)
lu = torch.empty(0, dtype=torch.int, device=device)
with self.assertRaisesRegex(RuntimeError, "but got lu with dtype Int"):
torch.solve(b, a, out=(out, lu))
# device should match
if torch.cuda.is_available():
wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'
out = torch.empty(0, dtype=dtype, device=wrong_device)
lu = torch.empty_like(a)
with self.assertRaisesRegex(RuntimeError, "tensors to be on the same device"):
torch.solve(b, a, out=(out, lu))
out = torch.empty(0, dtype=dtype, device=device)
lu = torch.empty_like(a).to(wrong_device)
with self.assertRaisesRegex(RuntimeError, "tensors to be on the same device"):
torch.solve(b, a, out=(out, lu))
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float, torch.double, torch.cfloat, torch.cdouble)
@precisionOverride({torch.float: 1e-4, torch.cfloat: 1e-4})
def test_tensorsolve(self, device, dtype):
def run_test(a_shape, dims):
a = torch.randn(a_shape, dtype=dtype, device=device)
b = torch.randn(a_shape[:2], dtype=dtype, device=device)
result = torch.linalg.tensorsolve(a, b, dims=dims)
expected = np.linalg.tensorsolve(a.cpu().numpy(), b.cpu().numpy(), axes=dims)
self.assertEqual(result, expected)
# check the out= variant
out = torch.empty_like(result)
ans = torch.linalg.tensorsolve(a, b, dims=dims, out=out)
self.assertEqual(ans, out)
self.assertEqual(ans, result)
a_shapes = [(2, 3, 6), (3, 4, 4, 3)]
dims = [None, (0, 2)]
for a_shape, d in itertools.product(a_shapes, dims):
run_test(a_shape, d)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float, torch.double, torch.cfloat, torch.cdouble)
def test_tensorsolve_empty(self, device, dtype):
# Check for empty inputs. NumPy does not work for these cases.
a = torch.empty(0, 0, 1, 2, 3, 0, dtype=dtype, device=device)
b = torch.empty(a.shape[:2], dtype=dtype, device=device)
x = torch.linalg.tensorsolve(a, b)
self.assertEqual(torch.tensordot(a, x, dims=len(x.shape)), b)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float, torch.double, torch.cfloat, torch.cdouble)
@precisionOverride({torch.float: 1e-4, torch.cfloat: 1e-4})
def test_tensorsolve_non_contiguous(self, device, dtype):
def run_test_permuted(a_shape, dims):
# check for permuted / transposed inputs
a = torch.randn(a_shape, dtype=dtype, device=device)
a = a.movedim((0, 2), (-2, -1))
self.assertFalse(a.is_contiguous())
b = torch.randn(a.shape[:2], dtype=dtype, device=device)
b = b.t()
self.assertFalse(b.is_contiguous())
result = torch.linalg.tensorsolve(a, b, dims=dims)
expected = np.linalg.tensorsolve(a.cpu().numpy(), b.cpu().numpy(), axes=dims)
self.assertEqual(result, expected)
def run_test_skipped_elements(a_shape, dims):
# check for inputs with skipped elements
a = torch.randn(a_shape, dtype=dtype, device=device)
a = a[::2]
self.assertFalse(a.is_contiguous())
b = torch.randn(a_shape[:2], dtype=dtype, device=device)
b = b[::2]
self.assertFalse(b.is_contiguous())
result = torch.linalg.tensorsolve(a, b, dims=dims)
expected = np.linalg.tensorsolve(a.cpu().numpy(), b.cpu().numpy(), axes=dims)
self.assertEqual(result, expected)
# check non-contiguous out
out = torch.empty(2 * result.shape[0], *result.shape[1:], dtype=dtype, device=device)[::2]
self.assertFalse(out.is_contiguous())
ans = torch.linalg.tensorsolve(a, b, dims=dims, out=out)
self.assertEqual(ans, out)
self.assertEqual(ans, result)
a_shapes = [(2, 3, 6), (3, 4, 4, 3)]
dims = [None, (0, 2)]
for a_shape, d in itertools.product(a_shapes, dims):
run_test_permuted(a_shape, d)
a_shapes = [(4, 3, 6), (6, 4, 4, 3)]
dims = [None, (0, 2)]
for a_shape, d in itertools.product(a_shapes, dims):
run_test_skipped_elements(a_shape, d)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float32)
def test_tensorsolve_errors_and_warnings(self, device, dtype):
# tensorsolve expects the input that can be reshaped to a square matrix
a = torch.eye(2 * 3 * 4, dtype=dtype, device=device).reshape((2 * 3, 4, 2, 3, 4))
b = torch.randn(8, 4, dtype=dtype, device=device)
self.assertTrue(np.prod(a.shape[2:]) != np.prod(b.shape))
with self.assertRaisesRegex(RuntimeError, r'Expected self to satisfy the requirement'):
torch.linalg.tensorsolve(a, b)
# if non-empty out tensor with wrong shape is passed a warning is given
out = torch.empty_like(a)
b = torch.randn(6, 4, dtype=dtype, device=device)
with warnings.catch_warnings(record=True) as w:
# Trigger warning
torch.linalg.tensorsolve(a, b, out=out)
# Check warning occurs
self.assertEqual(len(w), 1)
self.assertTrue("An output with one or more elements was resized" in str(w[-1].message))
# dtypes should be safely castable
out = torch.empty_like(a).to(torch.int)
with self.assertRaisesRegex(RuntimeError, "but got result with dtype Int"):
torch.linalg.tensorsolve(a, b, out=out)
# device should match
if torch.cuda.is_available():
wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'
out = torch.empty(0, dtype=dtype, device=wrong_device)
with self.assertRaisesRegex(RuntimeError, "tensors to be on the same device"):
torch.linalg.tensorsolve(a, b, out=out)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
@precisionOverride({torch.float: 1e-3, torch.cfloat: 1e-3})
def test_tensorinv(self, device, dtype):
def run_test(a_shape, ind):
a = torch.randn(a_shape, dtype=dtype, device=device)
a_numpy = a.cpu().numpy()
result = torch.linalg.tensorinv(a, ind=ind)
expected = np.linalg.tensorinv(a_numpy, ind=ind)
self.assertEqual(result, expected)
# check the out= variant
out = torch.empty_like(result)
ans = torch.linalg.tensorinv(a, ind=ind, out=out)
self.assertEqual(ans, out)
self.assertEqual(ans, result)
# compare to NumPy output
run_test((12, 3, 4), ind=1)
run_test((3, 8, 24), ind=2)
run_test((18, 3, 3, 2), ind=1)
run_test((1, 4, 2, 2), ind=2)
run_test((2, 3, 5, 30), ind=3)
run_test((24, 2, 2, 3, 2), ind=1)
run_test((3, 4, 2, 3, 2), ind=2)
run_test((1, 2, 3, 2, 3), ind=3)
run_test((3, 2, 1, 2, 12), ind=4)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
@precisionOverride({torch.float: 1e-3, torch.cfloat: 1e-3})
def test_tensorinv_non_contiguous(self, device, dtype):
def run_test(a_shape, ind):
# check for permuted (transposed) case
a = torch.randn(a_shape, dtype=dtype, device=device)
permutation = list(range(0, a.ndim))
a = a.permute(permutation[ind:] + permutation[:ind])
self.assertFalse(a.is_contiguous())
a_numpy = a.cpu().numpy()
result = torch.linalg.tensorinv(a, ind=a.ndim - ind)
expected = np.linalg.tensorinv(a_numpy, ind=a.ndim - ind)
self.assertEqual(result, expected)
def run_test_skipped_elements(a_shape, ind):
# check for input with skipped elements
a = torch.randn(a_shape, dtype=dtype, device=device)
a = a[::2]
self.assertFalse(a.is_contiguous())
a_numpy = a.cpu().numpy()
result = torch.linalg.tensorinv(a, ind=ind)
expected = np.linalg.tensorinv(a_numpy, ind=ind)
self.assertEqual(result, expected)
# check non-contiguous out
out = torch.empty(2 * result.shape[0], *result.shape[1:], dtype=dtype, device=device)[::2]
self.assertFalse(out.is_contiguous())
ans = torch.linalg.tensorinv(a, ind=ind, out=out)
self.assertEqual(ans, out)
self.assertEqual(ans, result)
run_test((12, 3, 4), ind=1)
run_test((3, 8, 24), ind=2)
run_test((18, 3, 3, 2), ind=1)
run_test((1, 4, 2, 2), ind=2)
run_test((2, 3, 5, 30), ind=3)
run_test((24, 2, 2, 3, 2), ind=1)
run_test((3, 4, 2, 3, 2), ind=2)
run_test((1, 2, 3, 2, 3), ind=3)
run_test((3, 2, 1, 2, 12), ind=4)
run_test_skipped_elements((12, 3, 2), ind=1)
run_test_skipped_elements((18, 3, 3, 1), ind=1)
@skipMeta # See https://github.com/pytorch/pytorch/issues/53739
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_tensorinv_empty(self, device, dtype):
for ind in range(1, 4):
# Check for empty inputs. NumPy does not work for these cases.
a = torch.empty(0, 0, 1, 2, 3, 0, dtype=dtype, device=device)
a_inv = torch.linalg.tensorinv(a, ind=ind)
self.assertEqual(a_inv.shape, a.shape[ind:] + a.shape[:ind])
@skipMeta # See https://github.com/pytorch/pytorch/issues/53739
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_tensorinv_errors_and_warnings(self, device, dtype):
def check_shape(a_shape, ind):
# tensorinv requires the input to satisfy
# prod(a.shape[ind:]) == prod(a.shape[:ind])
a = torch.randn(a_shape, dtype=dtype, device=device)
with self.assertRaisesRegex(RuntimeError, "Expected self to satisfy the requirement"):
torch.linalg.tensorinv(a, ind=ind)
def check_ind(a_shape, ind):
a = torch.randn(a_shape, dtype=dtype, device=device)
with self.assertRaisesRegex(RuntimeError, "Expected a strictly positive integer"):
torch.linalg.tensorinv(a, ind=ind)
def check_out(a_shape, ind):
# if non-empty out tensor with wrong shape is passed a warning is given
a = torch.randn(a_shape, dtype=dtype, device=device)
out = torch.empty_like(a)
with warnings.catch_warnings(record=True) as w:
# Trigger warning
torch.linalg.tensorinv(a, ind=ind, out=out)
# Check warning occurs
self.assertEqual(len(w), 1)
self.assertTrue("An output with one or more elements was resized" in str(w[-1].message))
# dtypes should be safely castable
out = torch.empty(0, dtype=torch.int, device=device)
with self.assertRaisesRegex(RuntimeError, "but got result with dtype Int"):
torch.linalg.tensorinv(a, ind=ind, out=out)
# device should match
if torch.cuda.is_available():
wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'
out = torch.empty(0, dtype=dtype, device=wrong_device)
with self.assertRaisesRegex(RuntimeError, "tensors to be on the same device"):
torch.linalg.tensorinv(a, ind=ind, out=out)
# test for invalid shape
check_shape((2, 3, 4), ind=1)
check_shape((1, 2, 3, 4), ind=3)
# test for invalid ind
check_ind((12, 3, 4), ind=-1)
check_ind((18, 3, 3, 2), ind=0)
# test for invalid out tensor
check_out((12, 3, 4), ind=1)
check_out((3, 8, 24), ind=2)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_tensorinv_singular_input(self, device, dtype):
def check_singular_input(a_shape, ind):
prod_ind_end = np.prod(a_shape[ind:])
a = torch.eye(prod_ind_end, dtype=dtype, device=device)
a[-1, -1] = 0 # Now `a` is singular
a = a.reshape(a_shape)
with self.assertRaisesRegex(RuntimeError, "Failed to invert the input tensor, because it is singular"):
torch.linalg.tensorinv(a, ind=ind)
# test for non-invertible input
check_singular_input((12, 3, 4), ind=1)
check_singular_input((3, 6, 18), ind=2)
def _test_dot_vdot_vs_numpy(self, device, dtype, torch_fn, np_fn):
def check(x, y):
# Compare with numpy
res = torch_fn(x, y)
if x.dtype == torch.bfloat16:
ref = torch.from_numpy(np.array(np_fn(x.cpu().float().numpy(), y.cpu().float().numpy())))
else:
ref = torch.from_numpy(np.array(np_fn(x.cpu().numpy(), y.cpu().numpy())))
if res.dtype == torch.bfloat16:
self.assertEqual(res.cpu(), ref.bfloat16())
else:
self.assertEqual(res.cpu(), ref)
# Test out variant
out = torch.empty_like(res)
torch_fn(x, y, out=out)
self.assertEqual(out, res)
# Empty
x = torch.tensor([], dtype=dtype, device=device)
y = torch.tensor([], dtype=dtype, device=device)
check(x, y)
# Contiguous
x = 0.1 * torch.randn(5000, dtype=dtype, device=device)
y = 0.1 * torch.randn(5000, dtype=dtype, device=device)
check(x, y)
# 0 strided
y = 0.1 * torch.randn(1, dtype=dtype, device=device).expand(5000)
check(x, y)
# 2 strided
check(x[::2], y[::2])
@dtypes(torch.float, torch.cfloat, torch.bfloat16)
@dtypesIfCUDA(torch.float, torch.cfloat)
@precisionOverride({torch.cfloat: 1e-4, torch.float32: 5e-5, torch.bfloat16: 1e-0})
def test_dot_vs_numpy(self, device, dtype):
self._test_dot_vdot_vs_numpy(device, dtype, torch.dot, np.dot)
@dtypes(torch.float, torch.cfloat)
@precisionOverride({torch.cfloat: 1e-4, torch.float32: 5e-5})
def test_vdot_vs_numpy(self, device, dtype):
self._test_dot_vdot_vs_numpy(device, dtype, torch.vdot, np.vdot)
def _test_dot_vdot_invalid_args(self, device, torch_fn, complex_dtypes=False):
def check(x, y, regex):
with self.assertRaisesRegex(RuntimeError, regex):
torch_fn(x, y)
if complex_dtypes:
x = torch.randn(1, dtype=torch.cfloat, device=device)
y = torch.randn(3, dtype=torch.cdouble, device=device)
else:
x = torch.randn(1, dtype=torch.float, device=device)
y = torch.randn(3, dtype=torch.double, device=device)
check(x, y, 'dot : expected both vectors to have same dtype')
check(x.reshape(1, 1), y, '1D tensors expected')
check(x.expand(9), y.to(x.dtype), 'inconsistent tensor size')
if self.device_type != 'cpu':
x_cpu = x.expand(3).cpu()
check(x_cpu, y.to(x.dtype), 'Expected all tensors to be on the same device')
@onlyNativeDeviceTypes
def test_vdot_invalid_args(self, device):
self._test_dot_vdot_invalid_args(device, torch.vdot)
self._test_dot_vdot_invalid_args(device, torch.vdot, complex_dtypes=True)
@onlyNativeDeviceTypes
def test_dot_invalid_args(self, device):
self._test_dot_vdot_invalid_args(device, torch.dot)
self._test_dot_vdot_invalid_args(device, torch.dot, complex_dtypes=True)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_matrix_rank(self, device, dtype):
matrix_rank = torch.linalg.matrix_rank
def run_test(shape0, shape1, batch):
a = torch.randn(*batch, shape0, shape1, dtype=dtype, device=device)
rank_a = matrix_rank(a)
self.assertEqual(rank_a, matrix_rank(a.mH))
aaH = torch.matmul(a, a.mH)
rank_aaH = matrix_rank(aaH)
rank_aaH_hermitian = matrix_rank(aaH, hermitian=True)
self.assertEqual(rank_aaH, rank_aaH_hermitian)
aHa = torch.matmul(a.mH, a)
self.assertEqual(matrix_rank(aHa), matrix_rank(aHa, hermitian=True))
# check against NumPy
self.assertEqual(rank_a, np.linalg.matrix_rank(a.cpu().numpy()))
self.assertEqual(matrix_rank(a, 0.01), np.linalg.matrix_rank(a.cpu().numpy(), 0.01))
self.assertEqual(rank_aaH, np.linalg.matrix_rank(aaH.cpu().numpy()))
self.assertEqual(matrix_rank(aaH, 0.01), np.linalg.matrix_rank(aaH.cpu().numpy(), 0.01))
# hermitian flag for NumPy was added in 1.14.0
if np.lib.NumpyVersion(np.__version__) >= '1.14.0':
self.assertEqual(rank_aaH_hermitian,
np.linalg.matrix_rank(aaH.cpu().numpy(), hermitian=True))
self.assertEqual(matrix_rank(aaH, 0.01, True),
np.linalg.matrix_rank(aaH.cpu().numpy(), 0.01, True))
# check out= variant
out = torch.empty(a.shape[:-2], dtype=torch.int64, device=device)
ans = matrix_rank(a, out=out)
self.assertEqual(ans, out)
self.assertEqual(ans, rank_a)
shapes = (3, 13)
batches = ((), (0, ), (4, ), (3, 5, ))
for (shape0, shape1), batch in zip(itertools.product(shapes, reversed(shapes)), batches):
run_test(shape0, shape1, batch)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_matrix_rank_atol(self, device, dtype):
def run_test_atol(shape0, shape1, batch):
a = make_tensor((*batch, shape0, shape1), dtype=dtype, device=device)
# Check against NumPy output
# Test float tol, and specific value for each matrix
tolerances = [float(torch.rand(1)), ]
# Test different types of tol tensor
for tol_type in all_types():
tolerances.append(make_tensor(a.shape[:-2], dtype=tol_type, device=device, low=0))
# Test broadcasting of tol
if a.ndim > 2:
tolerances.append(make_tensor(a.shape[-3], dtype=torch.float32, device=device, low=0))
for tol in tolerances:
actual = torch.linalg.matrix_rank(a, atol=tol)
actual_tol = torch.linalg.matrix_rank(a, tol=tol)
self.assertEqual(actual, actual_tol)
numpy_tol = tol if isinstance(tol, float) else tol.cpu().numpy()
expected = np.linalg.matrix_rank(a.cpu().numpy(), tol=numpy_tol)
self.assertEqual(actual, expected)
shapes = (3, 13)
batches = ((), (0, ), (4, ), (3, 5, ))
for (shape0, shape1), batch in zip(itertools.product(shapes, reversed(shapes)), batches):
run_test_atol(shape0, shape1, batch)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float64)
def test_matrix_rank_atol_rtol(self, device, dtype):
from torch.testing._internal.common_utils import make_fullrank_matrices_with_distinct_singular_values
# creates a matrix with singular values arange(1/(n+1), 1, 1/(n+1)) and rank=n
n = 9
a = make_fullrank_matrices_with_distinct_singular_values(n, n, dtype=dtype, device=device)
# test float and tensor variants
for tol_value in [0.51, torch.tensor(0.51, device=device)]:
# using rtol (relative tolerance) takes into account the largest singular value (0.9 in this case)
result = torch.linalg.matrix_rank(a, rtol=tol_value)
self.assertEqual(result, 5) # there are 5 singular values above 0.9*0.51=0.459
# atol is used directly to compare with singular values
result = torch.linalg.matrix_rank(a, atol=tol_value)
self.assertEqual(result, 4) # there are 4 singular values above 0.51
# when both are specified the maximum tolerance is used
result = torch.linalg.matrix_rank(a, atol=tol_value, rtol=tol_value)
self.assertEqual(result, 4) # there are 4 singular values above max(0.51, 0.9*0.51)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_matrix_rank_empty(self, device, dtype):
matrix_rank = torch.linalg.matrix_rank
# NumPy doesn't work for input with no elements
def run_test(shape0, shape1, batch):
a = torch.randn(*batch, shape0, shape1, dtype=dtype, device=device)
rank_a = matrix_rank(a)
expected = torch.zeros(batch, dtype=torch.int64, device=device)
self.assertEqual(rank_a, matrix_rank(a.mH))
aaH = torch.matmul(a, a.mH)
rank_aaH = matrix_rank(aaH)
rank_aaH_hermitian = matrix_rank(aaH, hermitian=True)
self.assertEqual(rank_aaH, rank_aaH_hermitian)
aHa = torch.matmul(a.mH, a)
self.assertEqual(matrix_rank(aHa), matrix_rank(aHa, hermitian=True))
self.assertEqual(rank_a, expected)
self.assertEqual(matrix_rank(a, 0.01), expected)
self.assertEqual(rank_aaH, expected)
self.assertEqual(matrix_rank(aaH, 0.01), expected)
self.assertEqual(rank_aaH_hermitian, expected)
self.assertEqual(matrix_rank(aaH, 0.01, True), expected)
batches = ((), (4, ), (3, 5, ))
for batch in batches:
run_test(0, 0, batch)
run_test(0, 3, batch)
run_test(3, 0, batch)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_matrix_rank_out_errors_and_warnings(self, device, dtype):
# dtypes should be safely castable
a = torch.eye(2, dtype=dtype, device=device)
out = torch.empty(0, dtype=torch.bool, device=device)
with self.assertRaisesRegex(RuntimeError, "but got result with dtype Bool"):
torch.linalg.matrix_rank(a, out=out)
# device should match
if torch.cuda.is_available():
wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'
out = torch.empty(0, dtype=dtype, device=wrong_device)
with self.assertRaisesRegex(RuntimeError, "tensors to be on the same device"):
torch.linalg.matrix_rank(a, out=out)
# if out tensor with wrong shape is passed a warning is given
with warnings.catch_warnings(record=True) as w:
out = torch.empty(3, dtype=dtype, device=device)
# Trigger warning
torch.linalg.matrix_rank(a, out=out)
# Check warning occurs
self.assertEqual(len(w), 1)
self.assertTrue("An output with one or more elements was resized" in str(w[-1].message))
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_matrix_rank_basic(self, device, dtype):
matrix_rank = torch.linalg.matrix_rank
a = torch.eye(10, dtype=dtype, device=device)
self.assertEqual(matrix_rank(a).item(), 10)
self.assertEqual(matrix_rank(a, hermitian=True).item(), 10)
a[5, 5] = 0
self.assertEqual(matrix_rank(a).item(), 9)
self.assertEqual(matrix_rank(a, hermitian=True).item(), 9)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_old_matrix_rank(self, device, dtype):
a = torch.eye(10, dtype=dtype, device=device)
self.assertEqual(torch.matrix_rank(a).item(), 10)
self.assertEqual(torch.matrix_rank(a, True).item(), 10)
a[5, 5] = 0
self.assertEqual(torch.matrix_rank(a).item(), 9)
self.assertEqual(torch.matrix_rank(a, True).item(), 9)
a = torch.randn(24, 42, dtype=dtype, device=device)
self.assertEqual(torch.matrix_rank(a), torch.matrix_rank(a.t()))
aaT = torch.mm(a, a.conj().t())
self.assertEqual(torch.matrix_rank(aaT), torch.matrix_rank(aaT, True))
aTa = torch.mm(a.conj().t(), a)
self.assertEqual(torch.matrix_rank(aTa), torch.matrix_rank(aTa, True))
a = torch.randn(35, 75, dtype=dtype, device=device)
self.assertEqual(torch.matrix_rank(a), np.linalg.matrix_rank(a.cpu().numpy()))
self.assertEqual(torch.matrix_rank(a, 0.01), np.linalg.matrix_rank(a.cpu().numpy(), 0.01))
aaT = torch.mm(a, a.conj().t())
self.assertEqual(torch.matrix_rank(aaT), np.linalg.matrix_rank(aaT.cpu().numpy()))
self.assertEqual(torch.matrix_rank(aaT, 0.01), np.linalg.matrix_rank(aaT.cpu().numpy(), 0.01))
if np.lib.NumpyVersion(np.__version__) >= '1.14.0':
self.assertEqual(torch.matrix_rank(aaT, True), np.linalg.matrix_rank(aaT.cpu().numpy(), True))
self.assertEqual(torch.matrix_rank(aaT, 0.01, True), np.linalg.matrix_rank(aaT.cpu().numpy(), 0.01, True))
@onlyNativeDeviceTypes
@dtypes(torch.double)
# This tests only the cases where torch.chain_matmul differs from torch.linalg.multi_dot which this is an "alias" for.
def test_chain_matmul(self, device, dtype):
# chain_matmul accepts a single input tensor while multi_dot does not
t = make_tensor((2, 2), device, dtype)
self.assertEqual(t, torch.chain_matmul(t))
with self.assertRaisesRegex(RuntimeError, r"chain_matmul\(\): Expected one or more matrices"):
torch.chain_matmul()
# chain_matmul expects all tensors to be 2D whereas multi_dot allows the first and last tensors to
# be either 1D or 2D
with self.assertRaisesRegex(RuntimeError, r"Tensor dimension is 1, expected 2 instead"):
torch.chain_matmul(make_tensor(1, device, dtype), make_tensor(1, device, dtype))
@onlyNativeDeviceTypes
@dtypes(torch.double, torch.cdouble)
def test_multi_dot(self, device, dtype):
def check(*shapes, noncontiguous=False):
tensors = [make_tensor(shape, device, dtype, noncontiguous=noncontiguous) for shape in shapes]
np_arrays = [tensor.cpu().numpy() for tensor in tensors]
res = torch.linalg.multi_dot(tensors).cpu()
ref = torch.from_numpy(np.array(np.linalg.multi_dot(np_arrays)))
self.assertEqual(res, ref)
# test for inputs with empty dimensions
check([0], [0])
check([2], [2, 0])
check([1, 0], [0])
check([0, 2], [2, 1])
check([2, 2], [2, 0])
check([2, 0], [0, 3])
check([0, 0], [0, 1])
check([4, 2], [2, 0], [0, 3], [3, 2])
# test variable output shapes
check([2], [2])
check([1, 2], [2])
check([2], [2, 1])
check([1, 2], [2, 1])
check([3, 2], [2, 4])
# test multiple input tensors
check([3], [3, 4], [4, 2], [2, 5], [5])
check([1, 2], [2, 2], [2, 3], [3, 1])
# test large tensors
check([10, 100], [100, 5], [5, 50])
check([10, 20], [20, 30], [30, 5])
# test noncontiguous input
check([3, 2], [2, 2], [2, 3], [3, 4], noncontiguous=True)
check([15, 5], [5, 10], [10, 20], [20, 25], noncontiguous=True)
@onlyNativeDeviceTypes
@dtypes(torch.float)
def test_multi_dot_errors(self, device, dtype):
def check(tensors, out, msg):
with self.assertRaisesRegex(RuntimeError, msg):
torch.linalg.multi_dot(tensors, out=out)
a = make_tensor(2, device, dtype)
check([], None, "expected at least 2 tensors")
check([a], None, "expected at least 2 tensors")
check([torch.tensor(1, device=device, dtype=dtype), a], None, "the first tensor must be 1D or 2D")
check([a, torch.tensor(1, device=device, dtype=dtype)], None, "the last tensor must be 1D or 2D")
check([a, a, a], None, "tensor 1 must be 2D")
check([a, make_tensor((2, 2, 2), device, dtype), a], None, "tensor 1 must be 2D")
check([a, make_tensor(2, device, torch.double)], None, "all tensors must have be the same dtype")
check([a, a], torch.empty(0, device=device, dtype=torch.double), "expected out tensor to have dtype")
if self.device_type == 'cuda':
check([a, make_tensor(2, 'cpu', dtype)], None, "all tensors must be on the same device")
check([a, a], torch.empty(0, dtype=dtype), "expected out tensor to be on device")
check([a, make_tensor(3, device, dtype)], None, "cannot be multiplied")
check([a, make_tensor((3, 2), device, dtype), a], None, "cannot be multiplied")
@precisionOverride({torch.float32: 5e-6, torch.complex64: 5e-6})
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_qr(self, device, dtype):
def run_test(tensor_dims, some):
A = torch.randn(*tensor_dims, dtype=dtype, device=device)
Q, R = torch.qr(A, some=some)
# Check0: Q[-2:] = (m, n_columns), R[-2:] = (n_columns, n)
m, n = tensor_dims[-2:]
n_columns = m if (not some) and m > n else min(m, n)
self.assertEqual(Q.size(-2), m)
self.assertEqual(R.size(-1), n)
self.assertEqual(Q.size(-1), n_columns)
A_ = A.cpu().numpy()
Q_ = Q.cpu().numpy()
R_ = R.cpu().numpy()
# Check1: A = QR
self.assertEqual(A_, np.matmul(Q_, R_))
# Check2: A = QR (with out)
Q_out, R_out = torch.full_like(Q, math.nan), torch.full_like(R, math.nan)
torch.qr(A, some=some, out=(Q_out, R_out))
Q_out_ = Q_out.cpu().numpy()
R_out_ = R_out.cpu().numpy()
self.assertEqual(A_, np.matmul(Q_out_, R_out_))
# Check3: Q == Q_out, R == R_out
self.assertEqual(Q_, Q_out_)
self.assertEqual(R_, R_out_)
# Check4: Q^{T}Q = I, triu(R) = R
eye = torch.eye(n_columns, device=device, dtype=dtype).expand(Q.shape[:-2] + (n_columns, n_columns)).cpu().numpy()
self.assertEqual(np.matmul(Q_.swapaxes(-1, -2).conj(), Q_), eye)
self.assertEqual(R.triu(), R)
tensor_dims_list = [(0, 5), (0, 0), (5, 0), # Empty Tensors
(2, 1, 0, 5), (2, 1, 0, 0), (2, 1, 5, 0), (2, 0, 5, 5), # Batched empty Tensors
(3, 5), (5, 5), (5, 3), # Single matrix
(7, 3, 5), (7, 5, 5), (7, 5, 3), # 3-dim Tensors
(7, 5, 3, 5), (7, 5, 5, 5), (7, 5, 5, 3)] # 4-dim Tensors
for tensor_dims, some in itertools.product(tensor_dims_list, [True, False]):
run_test(tensor_dims, some)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float, torch.double, torch.cfloat, torch.cdouble)
def test_qr_vs_numpy(self, device, dtype):
"""
test torch.linalg.qr vs numpy.linalg.qr
"""
sizes_to_test = [
(7, 5),
(5, 7),
(5, 0), # empty
(0, 5), # empty
]
for size in sizes_to_test:
t = torch.randn(size, device=device, dtype=dtype)
np_t = t.cpu().numpy()
for mode in ['reduced', 'complete']:
exp_q, exp_r = np.linalg.qr(np_t, mode=mode)
q, r = torch.linalg.qr(t, mode=mode)
self.assertEqual(q, exp_q)
self.assertEqual(r, exp_r)
#
# for mode='r' we need a special logic because numpy returns only r
exp_r = np.linalg.qr(np_t, mode='r')
q, r = torch.linalg.qr(t, mode='r')
# check that q is empty
self.assertEqual(q.shape, (0,))
self.assertEqual(q.dtype, t.dtype)
self.assertEqual(q.device, t.device)
# check r
self.assertEqual(r, exp_r)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float)
def test_linalg_qr_autograd_errors(self, device, dtype):
# torch.linalg.qr(mode='r') returns only 'r' and discards 'q', but
# without 'q' you cannot compute the backward pass. Check that
# linalg_qr_backward complains cleanly in that case.
inp = torch.randn((5, 7), device=device, dtype=dtype, requires_grad=True)
q, r = torch.linalg.qr(inp, mode='r')
self.assertEqual(q.shape, (0,)) # empty tensor
b = torch.sum(r)
with self.assertRaisesRegex(RuntimeError,
"The derivative of qr is not implemented when mode='r'"):
b.backward()
#
inp = torch.randn((7, 5), device=device, dtype=dtype, requires_grad=True)
q, r = torch.linalg.qr(inp, mode='complete')
b = torch.sum(r)
with self.assertRaisesRegex(RuntimeError,
"The derivative of qr is not implemented when mode='complete' and nrows > ncols"):
b.backward()
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float, torch.double, torch.cfloat, torch.cdouble)
def test_qr_batched(self, device, dtype):
"""
test torch.linalg.qr vs numpy.linalg.qr. We need some special logic
because numpy does not support batched qr
"""
def np_qr_batched(a, mode):
"""poor's man batched version of np.linalg.qr"""
all_q = []
all_r = []
for matrix in a:
result = np.linalg.qr(matrix, mode=mode)
if mode == 'r':
all_r.append(result)
else:
q, r = result
all_q.append(q)
all_r.append(r)
if mode == 'r':
return np.array(all_r)
else:
return np.array(all_q), np.array(all_r)
t = torch.randn((3, 7, 5), device=device, dtype=dtype)
np_t = t.cpu().numpy()
for mode in ['reduced', 'complete']:
exp_q, exp_r = np_qr_batched(np_t, mode=mode)
q, r = torch.linalg.qr(t, mode=mode)
self.assertEqual(q, exp_q)
self.assertEqual(r, exp_r)
# for mode='r' we need a special logic because numpy returns only r
exp_r = np_qr_batched(np_t, mode='r')
q, r = torch.linalg.qr(t, mode='r')
# check that q is empty
self.assertEqual(q.shape, (0,))
self.assertEqual(q.dtype, t.dtype)
self.assertEqual(q.device, t.device)
# check r
self.assertEqual(r, exp_r)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float, torch.double, torch.cfloat, torch.cdouble)
def test_qr_out(self, device, dtype):
"""
test torch.linalg.qr(out=...) vs torch.lingalg.qr
"""
sizes_to_test = [
(7, 5),
(5, 7),
(5, 0), # empty
(0, 5), # empty
]
for size in sizes_to_test:
t = torch.randn(size, device=device, dtype=dtype)
np_t = t.cpu().numpy()
for mode in ['reduced', 'complete', 'r']:
q, r = torch.linalg.qr(t, mode=mode)
out = (torch.empty((0), dtype=dtype, device=device),
torch.empty((0), dtype=dtype, device=device))
q2, r2 = torch.linalg.qr(t, mode=mode, out=out)
self.assertIs(q2, out[0])
self.assertIs(r2, out[1])
self.assertEqual(q2, q)
self.assertEqual(r2, r)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float)
def test_qr_error_cases(self, device, dtype):
t1 = torch.randn(5, device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, 'qr input should have at least 2 dimensions, but has 1 dimensions instead'):
torch.linalg.qr(t1)
t2 = torch.randn((5, 7), device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, "qr received unrecognized mode 'hello'"):
torch.linalg.qr(t2, mode='hello')
def _check_einsum(self, *args, np_args=None):
if np_args is None:
np_args = [arg.cpu().numpy() if isinstance(arg, torch.Tensor) else arg for arg in args]
res = torch.einsum(*args)
ref = np.einsum(*np_args)
self.assertEqual(torch.from_numpy(np.array(ref)), res)
@dtypes(torch.double, torch.cdouble)
def test_einsum(self, device, dtype):
# Test cases from https://gist.github.com/rockt/15ee013889d65342088e9260a377dc8f
x = make_tensor((5,), device, dtype)
y = make_tensor((7,), device, dtype)
A = make_tensor((3, 5), device, dtype)
B = make_tensor((2, 5), device, dtype)
C = make_tensor((2, 3, 5), device, dtype)
D = make_tensor((2, 5, 7), device, dtype)
E = make_tensor((7, 9), device, dtype)
F = make_tensor((2, 3, 3, 5), device, dtype)
G = make_tensor((5, 4, 6), device, dtype)
H = make_tensor((4, 4), device, dtype)
I = make_tensor((2, 3, 2), device, dtype)
# Vector operations
self._check_einsum('i->', x) # sum
self._check_einsum('i,i->', x, x) # dot
self._check_einsum('i,i->i', x, x) # vector element-wisem mul
self._check_einsum('i,j->ij', x, y) # outer
# Matrix operations
self._check_einsum("ij->ji", A) # transpose
self._check_einsum("ij->j", A) # row sum
self._check_einsum("ij->i", A) # col sum
self._check_einsum("ij,ij->ij", A, A) # matrix element-wise mul
self._check_einsum("ij,j->i", A, x) # matrix vector multiplication
self._check_einsum("ij,kj->ik", A, B) # matmul
self._check_einsum("ij,ab->ijab", A, E) # matrix outer product
# Tensor operations
self._check_einsum("Aij,Ajk->Aik", C, D) # batch matmul
self._check_einsum("ijk,jk->i", C, A) # tensor matrix contraction
self._check_einsum("aij,jk->aik", D, E) # tensor matrix contraction
self._check_einsum("abCd,dFg->abCFg", F, G) # tensor tensor contraction
self._check_einsum("ijk,jk->ik", C, A) # tensor matrix contraction with double indices
self._check_einsum("ijk,jk->ij", C, A) # tensor matrix contraction with double indices
self._check_einsum("ijk,ik->j", C, B) # non contiguous
self._check_einsum("ijk,ik->jk", C, B) # non contiguous with double indices
# Test diagonals
self._check_einsum("ii", H) # trace
self._check_einsum("ii->i", H) # diagonal
self._check_einsum('iji->j', I) # non-contiguous trace
self._check_einsum('ngrg...->nrg...', make_tensor((2, 1, 3, 1, 4), device, dtype))
# Test ellipsis
self._check_einsum("i...->...", H)
self._check_einsum("ki,...k->i...", A.t(), B)
self._check_einsum("k...,jk->...", A.t(), B)
self._check_einsum('...ik, ...j -> ...ij', C, x)
self._check_einsum('Bik,k...j->i...j', C, make_tensor((5, 3), device, dtype))
self._check_einsum('i...j, ij... -> ...ij', C, make_tensor((2, 5, 2, 3), device, dtype))
# torch.bilinear with noncontiguous tensors
l = make_tensor((5, 10), device, dtype, noncontiguous=True)
r = make_tensor((5, 20), device, dtype, noncontiguous=True)
w = make_tensor((15, 10, 20), device, dtype)
self._check_einsum("bn,anm,bm->ba", l, w, r)
# with strided tensors
self._check_einsum("bn,Anm,bm->bA", l[:, ::2], w[:, ::2, ::2], r[:, ::2])
@dtypes(torch.double, torch.cdouble)
def test_einsum_sublist_format(self, device, dtype):
x = make_tensor((5,), device, dtype)
y = make_tensor((7,), device, dtype)
A = make_tensor((3, 5), device, dtype)
B = make_tensor((2, 5), device, dtype)
C = make_tensor((2, 1, 3, 1, 4), device, dtype)
self._check_einsum(x, [0])
self._check_einsum(x, [0], [])
self._check_einsum(x, [0], y, [1], [0, 1])
self._check_einsum(A, [0, 1], [1, 0])
self._check_einsum(A, [0, 1], x, [1], [0])
self._check_einsum(A, [0, 1], B, [2, 1])
self._check_einsum(A, [0, 1], B, [2, 1], [0, 2])
self._check_einsum(C, [0, 1, 2, 1, Ellipsis], [0, 2, 1, Ellipsis])
self._check_einsum(A.t(), [0, 1], B, [Ellipsis, 0])
self._check_einsum(A.t(), [0, 1], B, [Ellipsis, 0], [1, Ellipsis])
self._check_einsum(A.t(), [0, Ellipsis], B, [1, 0], [Ellipsis])
# torch.bilinear with noncontiguous tensors
l = make_tensor((5, 10), device, dtype, noncontiguous=True)
r = make_tensor((5, 20), device, dtype, noncontiguous=True)
w = make_tensor((15, 10, 20), device, dtype)
self._check_einsum(l, [40, 41], w, [2, 41, 50], r, [40, 50], [40, 2])
@dtypes(torch.double, torch.cdouble)
def test_einsum_random(self, device, dtype):
def convert_label(label):
if label == ...:
return '...'
elif label < 26:
return chr(ord('A') + label)
else:
return chr(ord('a') + label - 26)
def convert_sublist(sublist):
return ''.join(convert_label(label) for label in sublist)
def test(n=10, # how many tests to generate
n_labels=5, # how many labels available
min_ops=1, max_ops=3, # min and max number of operands per test
min_dims=1, max_dims=3, # min and max number of dimensions per operand
min_size=1, max_size=8, # min and max size of each dimension
max_out_dim=3, # max number of dimensions for the output
enable_diagonals=True, # controls if labels can be repeated for diagonals
ellipsis_prob=0.5, # probability of including ellipsis in operand
broadcasting_prob=0.1): # probability of turning some dim sizes 1 for broadcasting
all_labels = torch.arange(52)
assert 0 <= n
assert 0 <= n_labels < len(all_labels)
assert 0 < min_ops <= max_ops
assert 0 <= min_dims <= max_dims
assert 0 <= min_size <= max_size
assert 0 <= max_out_dim
assert enable_diagonals or max_dims <= n_labels
for _ in range(n):
# Select a subset of labels for this test and give them random sizes
possible_labels = all_labels[torch.randperm(len(all_labels))[:n_labels]]
labels_size = torch.randint_like(all_labels, min_size, max_size + 1)
ellipsis_shape = torch.randint(min_size, max_size + 1, (max_dims - min_dims,))
operands = []
sublists = []
ell_size = 0
valid_labels = set()
# create random input operands
for _ in range(random.randint(min_ops, max_ops)):
n_dim = random.randint(min_dims, max_dims)
labels_idx = torch.ones(len(possible_labels)).multinomial(n_dim, enable_diagonals)
labels = possible_labels[labels_idx]
valid_labels.update(labels.tolist())
shape = labels_size[labels]
# turn some dimensions to size 1 for testing broadcasting
mask = Binomial(probs=broadcasting_prob).sample((n_dim,))
broadcast_labels = torch.unique(labels[mask == 1])
shape[(labels[..., None] == broadcast_labels).any(-1)] = 1
labels = labels.tolist()
shape = shape.tolist()
# include ellipsis if not all dimensions were assigned a label already
if n_dim < max_dims and torch.rand(1) < ellipsis_prob:
ell_num_dim = random.randint(1, max_dims - n_dim)
ell_size = max(ell_size, ell_num_dim)
ell_shape = ellipsis_shape[-ell_num_dim:]
# again, turn some dimensions to size 1 for broadcasting
mask = Binomial(probs=broadcasting_prob).sample((ell_num_dim,))
ell_shape[mask == 1] = 1
ell_index = random.randint(0, n_dim)
shape[ell_index:ell_index] = ell_shape
labels.insert(ell_index, ...)
operands.append(make_tensor(shape, device, dtype))
sublists.append(labels)
# NumPy has a bug with the sublist format so for now we compare PyTorch sublist
# implementation against the equation format implementation of NumPy
# see https://github.com/numpy/numpy/issues/10926
np_operands = [op.cpu().numpy() for op in operands]
# test equation format
equation = ','.join(convert_sublist(l) for l in sublists)
self._check_einsum(equation, *operands, np_args=(equation, *np_operands))
# test sublist format
args = [*itertools.chain(*zip(operands, sublists))]
self._check_einsum(*args, np_args=(equation, *np_operands))
# generate an explicit output
out_sublist = []
num_out_labels = max(0, random.randint(0, min(max_out_dim, len(valid_labels))) - ell_size)
if num_out_labels > 0:
out_labels_idx = torch.ones(len(valid_labels)).multinomial(num_out_labels)
out_sublist = torch.tensor(list(valid_labels))[out_labels_idx].tolist()
out_sublist.insert(random.randint(0, num_out_labels), ...)
# test equation format with explicit output
equation += '->' + convert_sublist(out_sublist)
self._check_einsum(equation, *operands, np_args=(equation, *np_operands))
# test sublist format with explicit output
args.append(out_sublist)
self._check_einsum(*args, np_args=(equation, *np_operands))
test(100)
def test_einsum_corner_cases(self, device):
def check(equation, *operands, expected_output):
tensors = [torch.tensor(operand, device=device, dtype=torch.float32) if not isinstance(operand, tuple)
else make_tensor(operand, device, torch.float32) for operand in operands]
output = torch.einsum(equation, tensors)
self.assertEqual(output, torch.tensor(expected_output, dtype=torch.float32, device=device))
# Test equation variantions
check(' ', 1, expected_output=1)
check(' -> ', 1, expected_output=1)
check(' , ', 2, 2, expected_output=4)
check(' , , ', 2, 2, 2, expected_output=8)
check(' , -> ', 2, 2, expected_output=4)
check(' i ', [1], expected_output=[1])
check(' i -> ', [1], expected_output=1)
check(' i -> i ', [1], expected_output=[1])
check(' i , i ', [2], [2], expected_output=4)
check(' i , i -> i ', [2], [2], expected_output=[4])
# Test tensors with 0 size dimensions
check('i', [], expected_output=[])
check(' i j -> j', [[], []], expected_output=[])
check('ij->i', [[], []], expected_output=[0., 0.])
check(' i j k , k -> i j ', (3, 0, 6), (6,), expected_output=[[], [], []])
# Test broadcasting
check('i,j', [2], [1, 2], expected_output=[[2, 4]])
check('i,ij->ij', [1, 2], [[1, 2, 3], [2, 3, 4]], expected_output=[[1, 2, 3], [4, 6, 8]])
# Test ellipsis broadcasting
check('...', 1, expected_output=1)
check('...->', 1, expected_output=1)
check('...->...', 1, expected_output=1)
check('...', [1], expected_output=[1])
check('...->', [1], expected_output=1)
check('z...->z', [1], expected_output=[1])
check('Z...->...Z', [1], expected_output=[1])
check('...a->', [[2], [4]], expected_output=6)
check('a...b->ab', [[[1], [2]], [[3], [4]]], expected_output=[[3], [7]])
def test_einsum_error_cases(self, device):
def check(*args, regex, exception=RuntimeError):
with self.assertRaisesRegex(exception, r'einsum\(\):.*' + regex):
torch.einsum(*args)
x = make_tensor((2,), device, torch.float32)
y = make_tensor((2, 3), device, torch.float32)
check('', [], regex=r'at least one operand', exception=ValueError)
check('. ..', [x], regex=r'found \'.\' for operand 0 that is not part of any ellipsis')
check('... ...', [x], regex=r'found \'.\' for operand 0 for which an ellipsis was already found')
check('1', [x], regex=r'invalid subscript given at index 0')
check(',', [x], regex=r'fewer operands were provided than specified in the equation')
check('', [x, x], regex=r'more operands were provided than specified in the equation')
check('', [x], regex=r'the number of subscripts in the equation \(0\) does not match the number '
r'of dimensions \(1\) for operand 0 and no ellipsis was given')
check('ai', [x], regex=r'the number of subscripts in the equation \(2\) does not match the number '
r'of dimensions \(1\) for operand 0 and no ellipsis was given')
check('ai...', [x], regex=r'the number of subscripts in the equation \(2\) is more than the number '
r'of dimensions \(1\) for operand 0')
check('a->... .', [x], regex=r'found \'.\' for output but an ellipsis \(...\) was already found')
check('a->..', [x], regex=r'found \'.\' for output that is not part of any ellipsis \(...\)')
check('a->1', [x], regex=r'invalid subscript given at index 3')
check('a->aa', [x], regex=r'output subscript a appears more than once in the output')
check('a->i', [x], regex=r'output subscript i does not appear in the equation for any input operand')
check('aa', [y], regex=r'subscript a is repeated for operand 0 but the sizes don\'t match, 3 != 2')
check('a, ba', [x, y], regex=r'operands do not broadcast with remapped shapes \[original->remapped\]: '
r'\[2\]->\[1, 2\] \[2, 3\]->\[2, 3\]')
check(x, [-1], regex=r'not within the valid range \[0, 52\)', exception=ValueError)
check(x, [52], regex=r'not within the valid range \[0, 52\)', exception=ValueError)
def triangular_solve_test_helper(self, A_dims, b_dims, upper, unitriangular,
device, dtype):
triangle_function = torch.triu if upper else torch.tril
b = torch.randn(*b_dims, dtype=dtype, device=device)
A = torch.randn(*A_dims, dtype=dtype, device=device)
# create positive definite matrix
A = torch.matmul(A, A.mT)
A_triangular = triangle_function(A)
if unitriangular:
A_triangular.diagonal(dim1=-2, dim2=-1).fill_(1.)
return b, A_triangular
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
@precisionOverride({torch.float32: 1e-3, torch.complex64: 1e-3,
torch.float64: 1e-8, torch.complex128: 1e-8})
def test_triangular_solve(self, device, dtype):
ks = [0, 1, 3]
ns = [0, 5]
for k, n, (upper, unitriangular, transpose) in itertools.product(ks, ns,
itertools.product([True, False], repeat=3)):
b, A = self.triangular_solve_test_helper((n, n), (n, k), upper,
unitriangular, device, dtype)
x = torch.triangular_solve(b, A, upper=upper, unitriangular=unitriangular, transpose=transpose)[0]
if transpose:
self.assertEqual(b, np.matmul(A.t().cpu(), x.cpu()))
else:
self.assertEqual(b, np.matmul(A.cpu(), x.cpu()))
@skipCPUIfNoLapack
@skipCUDAIfNoMagma
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
@precisionOverride({torch.float32: 1e-3, torch.complex64: 1e-3,
torch.float64: 1e-8, torch.complex128: 1e-8})
def test_triangular_solve_batched(self, device, dtype):
def triangular_solve_batch_helper(A_dims, b_dims, upper, unitriangular, transpose):
b, A = self.triangular_solve_test_helper(A_dims, b_dims, upper,
unitriangular, device, dtype)
x_exp_list = []
for i in range(b_dims[0]):
x_exp_list.append(torch.triangular_solve(b[i], A[i], upper=upper,
unitriangular=unitriangular,
transpose=transpose)[0])
x_exp = torch.stack(x_exp_list) # Stacked output
x_act = torch.triangular_solve(b, A, upper=upper,
unitriangular=unitriangular,
transpose=transpose)[0] # Actual output
self.assertEqual(x_act, x_exp) # Equality check
if transpose:
A = A.mT
Ax = np.matmul(A.cpu(), x_act.cpu())
self.assertEqual(b, Ax)
def triangular_solve_zero_batch_helper(A_dims, b_dims, upper, unitriangular, transpose):
b, A = self.triangular_solve_test_helper(A_dims, b_dims, upper,
unitriangular, device, dtype)
x = torch.triangular_solve(b, A, upper=upper,
unitriangular=unitriangular,
transpose=transpose)[0]
self.assertTrue(x.shape == b.shape)
for upper, unitriangular, transpose in itertools.product([True, False], repeat=3):
batchsize = 3
triangular_solve_batch_helper((batchsize, 5, 5), (batchsize, 5, 10),
upper, unitriangular, transpose)
# test empty input
triangular_solve_batch_helper((batchsize, 0, 0), (batchsize, 0, 10),
upper, unitriangular, transpose)
triangular_solve_batch_helper((batchsize, 0, 0), (batchsize, 0, 0),
upper, unitriangular, transpose)
# test zero batch case
batchsize = 0
triangular_solve_zero_batch_helper((batchsize, 5, 5), (batchsize, 5, 10),
upper, unitriangular, transpose)
@slowTest
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
@precisionOverride({torch.float32: 1e-3, torch.complex64: 1e-3,
torch.float64: 1e-8, torch.complex128: 1e-8})
def test_triangular_solve_batched_many_batches(self, device, dtype):
for upper, transpose, unitriangular in itertools.product([True, False], repeat=3):
# test batched A case
b, A = self.triangular_solve_test_helper((256, 256, 5, 5), (5, 1),
upper, unitriangular, device, dtype)
x, _ = torch.triangular_solve(b, A,
upper=upper, transpose=transpose, unitriangular=unitriangular)
if transpose:
A = A.mT
Ax = torch.matmul(A, x)
rtol = 1e-2 if dtype in [torch.float32, torch.complex64] else self.precision
self.assertEqual(Ax, b.expand_as(Ax), atol=self.precision, rtol=rtol)
# test batched b case
b, A = self.triangular_solve_test_helper((3, 3), (512, 512, 3, 1),
upper, unitriangular, device, dtype)
x, _ = torch.triangular_solve(b, A, upper=upper, transpose=transpose,
unitriangular=unitriangular)
if transpose:
A = A.mT
self.assertEqual(torch.matmul(A, x), b)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@unittest.skipIf(not TEST_SCIPY, "SciPy not found")
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_triangular_solve_batched_broadcasting(self, device, dtype):
from scipy.linalg import solve_triangular as tri_solve
def scipy_tri_solve_batched(A, B, upper, trans, diag):
batch_dims_A, batch_dims_B = A.shape[:-2], B.shape[:-2]
single_dim_A, single_dim_B = A.shape[-2:], B.shape[-2:]
expand_dims = tuple(torch._C._infer_size(torch.Size(batch_dims_A),
torch.Size(batch_dims_B)))
expand_A = np.broadcast_to(A, expand_dims + single_dim_A)
expand_B = np.broadcast_to(B, expand_dims + single_dim_B)
flat_A = expand_A.reshape((-1,) + single_dim_A)
flat_B = expand_B.reshape((-1,) + single_dim_B)
flat_X = np.vstack([tri_solve(a, b, lower=(not upper), trans=int(trans), unit_diagonal=diag)
for a, b in zip(flat_A, flat_B)])
return flat_X.reshape(expand_B.shape)
def run_test(A_dims, b_dims, device, upper, transpose, unitriangular):
b, A = self.triangular_solve_test_helper(A_dims, b_dims, upper,
unitriangular, device, dtype)
x_exp = torch.as_tensor(scipy_tri_solve_batched(A.cpu().numpy(), b.cpu().numpy(),
upper, transpose, unitriangular))
x = torch.triangular_solve(b, A, upper=upper, transpose=transpose, unitriangular=unitriangular)[0]
self.assertEqual(x, x_exp.to(device))
for upper, transpose, unitriangular in itertools.product([True, False], repeat=3):
# test against scipy.linalg.solve_triangular
run_test((2, 1, 3, 4, 4), (2, 1, 3, 4, 6), device, upper, transpose, unitriangular) # no broadcasting
run_test((2, 1, 3, 4, 4), (4, 6), device, upper, transpose, unitriangular) # broadcasting b
run_test((4, 4), (2, 1, 3, 4, 2), device, upper, transpose, unitriangular) # broadcasting A
run_test((1, 3, 1, 4, 4), (2, 1, 3, 4, 5), device, upper, transpose, unitriangular) # broadcasting A & b
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_triangular_solve_out_errors_and_warnings(self, device, dtype):
# dtypes should be safely castable
a = torch.eye(2, dtype=dtype, device=device)
b = torch.randn(2, 1, dtype=dtype, device=device)
out = torch.empty_like(b).to(torch.int)
clone_a = torch.empty_like(a)
with self.assertRaisesRegex(RuntimeError, "Expected out tensor to have dtype"):
torch.triangular_solve(b, a, out=(out, clone_a))
out = torch.empty_like(b)
clone_a = clone_a.to(torch.int)
with self.assertRaisesRegex(RuntimeError, "Expected out tensor to have dtype"):
torch.triangular_solve(b, a, out=(out, clone_a))
# device should match
if torch.cuda.is_available():
wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'
out = torch.empty(0, dtype=dtype, device=wrong_device)
clone_a = torch.empty_like(a)
with self.assertRaisesRegex(RuntimeError, "tensors to be on the same device"):
torch.triangular_solve(b, a, out=(out, clone_a))
out = torch.empty(0, dtype=dtype, device=device)
clone_a = torch.empty_like(a).to(wrong_device)
with self.assertRaisesRegex(RuntimeError, "tensors to be on the same device"):
torch.triangular_solve(b, a, out=(out, clone_a))
# if out tensor with wrong shape is passed a warning is given
with warnings.catch_warnings(record=True) as w:
out = torch.empty(1, dtype=dtype, device=device)
clone_a = torch.empty(1, dtype=dtype, device=device)
# Trigger warning
torch.triangular_solve(b, a, out=(out, clone_a))
# Check warning occurs
self.assertEqual(len(w), 2)
self.assertTrue("An output with one or more elements was resized" in str(w[-1].message))
self.assertTrue("An output with one or more elements was resized" in str(w[-2].message))
def check_single_matmul(self, x, y, shape):
a = np.array(x, copy=False)
b = np.array(y, copy=False)
expected = np.matmul(a, b)
ans = torch.matmul(x, y)
self.assertTrue(ans.is_contiguous())
self.assertTrue(np.array_equal(ans, expected))
out = torch.zeros(*shape, dtype=torch.int64).to(x.device)
ans = torch.matmul(x, y, out=out)
self.assertIs(ans, out)
self.assertTrue(ans.is_contiguous())
self.assertTrue(np.array_equal(ans, expected))
# TODO: update to run on CUDA, too
@onlyCPU
def test_matmul_small_brute_force_1d_Nd(self, device):
# Issue #20452: range(0, 10) does not work.
n = 1
for m in range(1, 8):
for p in range(1, 8):
for o in range(1, 5):
# 1d, 3d, inner dimensions C
x = torch.arange(m, device=device)
y = torch.arange(o * m * p, device=device).reshape(o, m, p)
self.check_single_matmul(x, y, (o, n, p))
# 1d, 3d, inner dimensions Fortran
x = torch.arange(m, device=device)
y = torch.arange(o * p * m, device=device).reshape(o, p, m).mT
self.check_single_matmul(x, y, (o, n, p))
# 1d, 3d, inner dimensions non-contiguous
x = torch.arange(2 * m, device=device)[::2]
y = torch.arange(o * m * 2 * p, device=device).reshape(o, m, 2 * p)[:, :, ::2]
self.check_single_matmul(x, y, (o, n, p))
for r in range(1, 5):
# 1d, 4d, inner dimensions C
x = torch.arange(m)
y = torch.arange(r * o * m * p, device=device).reshape(r, o, m, p)
self.check_single_matmul(x, y, (r, o, n, p))
# 1d, 4d, inner dimensions Fortran
x = torch.arange(m)
y = torch.arange(r * o * p * m, device=device).reshape(r, o, p, m).mT
self.check_single_matmul(x, y, (r, o, n, p))
# 1d, 4d, inner dimensions non-contiguous
x = torch.arange(2 * m, device=device)[::2]
y = torch.arange(r * o * m * 2 * p, device=device).reshape(r, o, m, 2 * p)[:, :, :, ::2]
self.check_single_matmul(x, y, (r, o, n, p))
# TODO: update to run on CUDA, too
@onlyCPU
def test_matmul_small_brute_force_2d_Nd(self, device):
# Issue #20452: range(0, 10) does not work.
for n in range(1, 5):
for m in range(1, 5):
for p in range(1, 5):
for o in range(1, 3):
# 2d, 3d, inner dimensions C
x = torch.arange(n * m, device=device).reshape(n, m)
y = torch.arange(o * m * p, device=device).reshape(o, m, p)
self.check_single_matmul(x, y, (o, n, p))
# 2d, 3d, inner dimensions Fortran
x = torch.arange(m * n, device=device).reshape(m, n).mT
y = torch.arange(o * p * m, device=device).reshape(o, p, m).mT
self.check_single_matmul(x, y, (o, n, p))
# 2d, 3d, inner dimensions non-contiguous
x = torch.arange(n * 2 * m, device=device).reshape(n, 2 * m)[:, ::2]
y = torch.arange(o * m * 2 * p, device=device).reshape(o, m, 2 * p)[:, :, ::2]
self.check_single_matmul(x, y, (o, n, p))
for r in range(1, 2):
# 2d, 4d, inner dimensions C
x = torch.arange(n * m, device=device).reshape(n, m)
y = torch.arange(r * o * m * p, device=device).reshape(r, o, m, p)
self.check_single_matmul(x, y, (r, o, n, p))
# 2d, 4d, inner dimensions Fortran
x = torch.arange(m * n, device=device).reshape(m, n).mT
y = torch.arange(r * o * p * m, device=device).reshape(r, o, p, m).mT
self.check_single_matmul(x, y, (r, o, n, p))
# 2d, 4d, inner dimensions non-contiguous
x = torch.arange(n * 2 * m, device=device).reshape(n, 2 * m)[:, ::2]
y = torch.arange(r * o * m * 2 * p, device=device).reshape(r, o, m, 2 * p)[:, :, :, ::2]
self.check_single_matmul(x, y, (r, o, n, p))
def test_linear_algebra_scalar_raises(self, device) -> None:
m = torch.randn(5, 5, device=device)
v = torch.randn(5, device=device)
s = torch.tensor(7, device=device)
self.assertRaises(RuntimeError, lambda: torch.mv(m, s))
self.assertRaises(RuntimeError, lambda: torch.addmv(v, m, s))
@dtypes(torch.float32, torch.complex64)
def test_cross(self, device, dtype):
x = torch.rand(100, 3, 100, dtype=dtype, device=device)
y = torch.rand(100, 3, 100, dtype=dtype, device=device)
res1 = torch.cross(x, y)
res2 = torch.tensor((), dtype=dtype, device=device)
torch.cross(x, y, out=res2)
self.assertEqual(res1, res2)
@dtypes(torch.float32, torch.complex64)
def test_linalg_cross(self, device, dtype):
x = torch.rand(100, 3, 100, dtype=dtype, device=device)
y = torch.rand(100, 3, 100, dtype=dtype, device=device)
res1 = torch.linalg.cross(x, y, dim=1)
res2 = torch.tensor((), dtype=dtype, device=device)
torch.linalg.cross(x, y, dim=1, out=res2)
self.assertEqual(res1, res2)
# test for broadcastable inputs
x = torch.rand(1, 3, 2, dtype=dtype, device=device)
y = torch.rand(4, 3, 1, dtype=dtype, device=device)
res1 = torch.linalg.cross(x, y, dim=1)
res2 = torch.tensor((), dtype=dtype, device=device)
torch.linalg.cross(x, y, dim=1, out=res2)
self.assertEqual(res1, res2)
# non contiguous case 1
x = torch.rand((4, 4, 4, 3), dtype=dtype,
device=device).contiguous(memory_format=torch.channels_last) # non-contiguous
y = torch.rand((4, 4, 4, 3), dtype=dtype,
device=device).contiguous(memory_format=torch.channels_last) # non-contiguous
np_expected_ref = np.cross(x.cpu().numpy(), y.cpu().numpy(), axis=-1)
res = torch.linalg.cross(x, y, dim=-1)
# numpy reference compared to torch result
self.assertEqual(res.cpu().numpy(), np_expected_ref)
# non contiguous case 2
x = torch.rand(1, 3, 2, dtype=dtype, device=device) # contiguous
y = torch.rand(1, 3, 4, dtype=dtype, device=device).permute(2, 1, 0) # non-contiguous
np_expected_ref = np.cross(x.cpu().numpy(), y.cpu().numpy(), axis=1)
res = torch.linalg.cross(x, y, dim=1)
# numpy reference compared to torch result
self.assertEqual(res.cpu().numpy(), np_expected_ref)
# non contiguous case 3
x = torch.rand(2, 3, 1, dtype=dtype, device=device).permute(2, 1, 0) # non-contiguous
y = torch.rand(1, 3, 4, dtype=dtype, device=device).permute(2, 1, 0) # non-contiguous
np_expected_ref = np.cross(x.cpu().numpy(), y.cpu().numpy(), axis=1)
res = torch.linalg.cross(x, y, dim=1)
# numpy reference compared to torch result
self.assertEqual(res.cpu().numpy(), np_expected_ref)
# non contiguous case 4
x = torch.randn(12, 3, device=device, dtype=dtype)[::2, :] # non-contiguous
y = torch.randn(18, 3, device=device, dtype=dtype)[::3, :] # non-contiguous
np_expected_ref = np.cross(x.cpu().numpy(), y.cpu().numpy(), axis=1)
res = torch.linalg.cross(x, y, dim=1)
# numpy reference compared to torch result
self.assertEqual(res.cpu().numpy(), np_expected_ref)
# non contiguous case 5
x = torch.randn(1, device=device, dtype=dtype) # contiguous
y = torch.randn(6, device=device, dtype=dtype)[::2] # non-contiguous
np_expected_ref = np.cross(x.expand(3).cpu().numpy(), y.cpu().numpy())
res = torch.linalg.cross(x, y)
# numpy reference compared to torch result
self.assertEqual(res.cpu().numpy(), np_expected_ref)
@dtypes(torch.float32, torch.complex64)
def test_cross_with_and_without_dim(self, device, dtype):
x = torch.rand(100, 3, dtype=dtype, device=device)
y = torch.rand(100, 3, dtype=dtype, device=device)
res1 = torch.cross(x, y, dim=1)
res2 = torch.cross(x, y, dim=-1)
res3 = torch.cross(x, y)
self.assertEqual(res1, res2)
self.assertEqual(res1, res3)
@dtypes(torch.float32, torch.complex64)
def test_linalg_cross_with_and_without_dim(self, device, dtype):
x = torch.rand(100, 3, dtype=dtype, device=device)
y = torch.rand(100, 3, dtype=dtype, device=device)
res1 = torch.linalg.cross(x, y, dim=1)
res2 = torch.linalg.cross(x, y, dim=-1)
res3 = torch.linalg.cross(x, y)
self.assertEqual(res1, res2)
self.assertEqual(res1, res3)
def test_cross_errors(self, device):
self.assertRaisesRegex(
RuntimeError, "must match the size of tensor",
lambda: torch.cross(torch.rand(100, 3, device=device), torch.rand(100, 3, 10, device=device)))
self.assertRaisesRegex(
RuntimeError, "must match the size of tensor",
lambda: torch.cross(torch.rand(5, 3, device=device), torch.rand(3, 5, device=device)))
self.assertRaisesRegex(
RuntimeError, "no dimension of size 3 in input",
lambda: torch.cross(torch.rand(5, 4, device=device), torch.rand(5, 4, device=device)))
self.assertRaisesRegex(
RuntimeError, "dimension 0 does not have size 3",
lambda: torch.cross(torch.rand(5, 4, 3, device=device), torch.rand(5, 4, 3, device=device), dim=0))
self.assertRaisesRegex(
RuntimeError, "dimension -1 does not have size 3",
lambda: torch.cross(torch.rand(5, 3, 4, device=device), torch.rand(5, 3, 4, device=device), dim=-1))
self.assertRaisesRegex(
IndexError, "Dimension out of range",
lambda: torch.cross(torch.rand(5, 3, 4, device=device), torch.rand(5, 3, 4, device=device), dim=-5))
def test_linalg_cross_errors(self, device):
self.assertRaisesRegex(
RuntimeError, "dimension -1 does not have size 3",
lambda: torch.linalg.cross(torch.rand(5, 3, 4, device=device), torch.rand(5, 3, 4, device=device)))
self.assertRaisesRegex(
RuntimeError, "must match the size of tensor",
lambda: torch.linalg.cross(torch.rand(100, 3, device=device), torch.rand(100, 3, 10, device=device)))
self.assertRaisesRegex(
RuntimeError, "must match the size of tensor",
lambda: torch.linalg.cross(torch.rand(5, 3, device=device), torch.rand(3, 5, device=device)))
self.assertRaisesRegex(
RuntimeError, "dimension 0 does not have size 3",
lambda: torch.linalg.cross(torch.rand(5, 4, 3, device=device), torch.rand(5, 4, 3, device=device), dim=0))
self.assertRaisesRegex(
RuntimeError, "dimension -1 does not have size 3",
lambda: torch.linalg.cross(torch.rand(5, 3, 4, device=device), torch.rand(5, 3, 4, device=device), dim=-1))
self.assertRaisesRegex(
IndexError, "Dimension out of range",
lambda: torch.linalg.cross(torch.rand(5, 3, 4, device=device), torch.rand(5, 3, 4, device=device), dim=-5))
def test_renorm(self, device):
m1 = torch.randn(20, 20, device=device) # big enough to exercise vectorized path
res1 = torch.tensor((), device=device)
def renorm(matrix, value, dim, max_norm):
m1 = matrix.transpose(dim, 0).contiguous()
# collapse non-dim dimensions.
m2 = m1.clone().resize_(m1.size(0), int(math.floor(m1.nelement() / m1.size(0))))
norms = m2.norm(value, 1, True)
# clip
new_norms = norms.clone()
new_norms[torch.gt(norms, max_norm)] = max_norm
new_norms.div_(norms.add_(1e-7))
# renormalize
m1.mul_(new_norms.expand_as(m1))
return m1.transpose(dim, 0)
# note that the axis fed to torch.renorm is different (2~=1)
maxnorm = m1.norm(2, 1).mean()
m2 = renorm(m1, 2, 1, maxnorm)
m1.renorm_(2, 1, maxnorm)
self.assertEqual(m1, m2, atol=1e-5, rtol=0)
self.assertEqual(m1.norm(2, 0), m2.norm(2, 0), atol=1e-5, rtol=0)
m1 = torch.randn(3, 4, 5, device=device)
m2 = m1.transpose(1, 2).contiguous().clone().resize_(15, 4)
maxnorm = m2.norm(2, 0).mean()
m2 = renorm(m2, 2, 1, maxnorm)
m1.renorm_(2, 1, maxnorm)
m3 = m1.transpose(1, 2).contiguous().clone().resize_(15, 4)
self.assertEqual(m3, m2)
self.assertEqual(m3.norm(2, 0), m2.norm(2, 0))
@skipCPUIfNoLapack
@skipCUDAIfNoCusolver
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_ormqr(self, device, dtype):
def run_test(batch, m, n, fortran_contiguous):
A = make_tensor((*batch, m, n), dtype=dtype, device=device)
reflectors, tau = torch.geqrf(A)
if not fortran_contiguous:
self.assertTrue(reflectors.mT.is_contiguous())
reflectors = reflectors.contiguous()
# Q is of size m x m
Q, _ = torch.linalg.qr(A, mode='complete')
C_right = make_tensor((*batch, m, n), dtype=dtype, device=device)
C_left = make_tensor((*batch, n, m), dtype=dtype, device=device)
expected = Q @ C_right
actual = torch.ormqr(reflectors, tau, C_right, left=True, transpose=False)
self.assertEqual(expected, actual)
expected = C_left @ Q
actual = torch.ormqr(reflectors, tau, C_left, left=False, transpose=False)
self.assertEqual(expected, actual)
expected = Q.mH @ C_right
actual = torch.ormqr(reflectors, tau, C_right, left=True, transpose=True)
self.assertEqual(expected, actual)
expected = C_left @ Q.mH
actual = torch.ormqr(reflectors, tau, C_left, left=False, transpose=True)
self.assertEqual(expected, actual)
# if tau is all zeros then the implicit matrix Q is the identity matrix
# so the actual result should be C_right in this case
zero_tau = torch.zeros_like(tau)
actual = torch.ormqr(reflectors, zero_tau, C_right, left=True, transpose=False)
self.assertEqual(C_right, actual)
batches = [(), (0, ), (2, ), (2, 1)]
ns = [5, 2, 0]
for batch, (m, n), fortran_contiguous in product(batches, product(ns, ns), [True, False]):
run_test(batch, m, n, fortran_contiguous)
@skipCPUIfNoLapack
@skipCUDAIfNoCusolver
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_ormqr_errors_and_warnings(self, device, dtype):
test_cases = [
# input1 size, input2 size, input3 size, error regex
((10,), (2,), (2,), r"input must have at least 2 dimensions"),
((2, 2), (2,), (2,), r"other must have at least 2 dimensions"),
((10, 6), (20,), (10, 6), r"other.shape\[-2\] must be greater than or equal to tau.shape\[-1\]"),
((6, 6), (5,), (5, 5), r"other.shape\[-2\] must be equal to input.shape\[-2\]"),
((1, 2, 2), (2, 2), (1, 2, 2), r"batch dimensions of tau to be equal to input.shape\[:-2\]"),
((1, 2, 2), (1, 2), (2, 2, 2), r"batch dimensions of other to be equal to input.shape\[:-2\]"),
]
for a_size, tau_size, c_size, error_regex in test_cases:
a = make_tensor(a_size, dtype=dtype, device=device)
tau = make_tensor(tau_size, dtype=dtype, device=device)
c = make_tensor(c_size, dtype=dtype, device=device)
with self.assertRaisesRegex(RuntimeError, error_regex):
torch.ormqr(a, tau, c)
def test_blas_empty(self, device):
def fn(torchfn, *args, test_out=False, **kwargs):
def call_torch_fn(*args, **kwargs):
return torchfn(*tuple(torch.randn(shape, device=device) if isinstance(shape, tuple) else shape
for shape in args), **kwargs)
result = call_torch_fn(*args, **kwargs)
if not test_out:
return result
else:
out = torch.full_like(result, math.nan)
out1 = call_torch_fn(*args, **kwargs, out=out)
return out
# mm, addmm
self.assertEqual((0, 0), fn(torch.mm, (0, 0), (0, 0)).shape)
self.assertEqual((0, 5), fn(torch.mm, (0, 0), (0, 5)).shape)
self.assertEqual((5, 0), fn(torch.mm, (5, 0), (0, 0)).shape)
self.assertEqual((3, 0), fn(torch.mm, (3, 2), (2, 0)).shape)
self.assertEqual(torch.zeros((5, 6), device=device), fn(torch.mm, (5, 0), (0, 6)))
self.assertEqual(torch.zeros((5, 6), device=device), fn(torch.mm, (5, 0), (0, 6), test_out=True))
self.assertEqual((0, 0), fn(torch.addmm, (0, 0), (0, 0), (0, 0)).shape)
self.assertEqual((0, 1), fn(torch.addmm, (1, ), (0, 17), (17, 1)).shape)
t = torch.randn((5, 6), device=device)
self.assertEqual(t, fn(torch.addmm, t, (5, 0), (0, 6)))
self.assertEqual(t, fn(torch.addmm, t, (5, 0), (0, 6), test_out=True))
# mv, addmv
self.assertEqual((0,), fn(torch.mv, (0, 0), (0,)).shape)
self.assertEqual((0,), fn(torch.mv, (0, 2), (2,)).shape)
self.assertEqual(torch.zeros((3,), device=device), fn(torch.mv, (3, 0), (0,)))
self.assertEqual(torch.zeros((3,), device=device), fn(torch.mv, (3, 0), (0,), test_out=True))
self.assertEqual((0,), fn(torch.addmv, (0,), (0, 0), (0,)).shape)
t = torch.randn((3,), device=device)
self.assertEqual(t, fn(torch.addmv, t, (3, 0), (0,)))
self.assertEqual(t, fn(torch.addmv, t, (3, 0), (0,), test_out=True))
# bmm, baddbmm
self.assertEqual((0, 0, 0), fn(torch.bmm, (0, 0, 0), (0, 0, 0)).shape)
self.assertEqual((3, 0, 5), fn(torch.bmm, (3, 0, 0), (3, 0, 5)).shape)
self.assertEqual((0, 5, 6), fn(torch.bmm, (0, 5, 0), (0, 0, 6)).shape)
self.assertEqual(torch.zeros((3, 5, 6), device=device), fn(torch.bmm, (3, 5, 0), (3, 0, 6)))
self.assertEqual(torch.zeros((3, 5, 6), device=device), fn(torch.bmm, (3, 5, 0), (3, 0, 6), test_out=True))
self.assertEqual((0, 0, 0), fn(torch.baddbmm, (0, 0, 0), (0, 0, 0), (0, 0, 0)).shape)
self.assertEqual((3, 0, 5), fn(torch.baddbmm, (3, 0, 5), (3, 0, 0), (3, 0, 5)).shape)
self.assertEqual((0, 5, 6), fn(torch.baddbmm, (0, 5, 6), (0, 5, 0), (0, 0, 6)).shape)
self.assertEqual((3, 5, 6), fn(torch.baddbmm, (3, 5, 6), (3, 5, 0), (3, 0, 6)).shape)
c = torch.arange(30, dtype=torch.float32, device=device).reshape(3, 2, 5)
self.assertEqual(-2 * c, fn(torch.baddbmm, c, (3, 2, 0), (3, 0, 5), beta=-2)) # Issue #33467
self.assertEqual(-2 * c, fn(torch.baddbmm, c, (3, 2, 0), (3, 0, 5), beta=-2, test_out=True)) # Issue #33467
# addbmm
self.assertEqual((0, 0), fn(torch.addbmm, (0, 0), (0, 0, 0), (0, 0, 0)).shape)
self.assertEqual((0, 5), fn(torch.addbmm, (0, 5), (3, 0, 0), (3, 0, 5)).shape)
t = torch.randn((5, 6), device=device)
self.assertEqual(t, fn(torch.addbmm, t, (0, 5, 0), (0, 0, 6)))
self.assertEqual(t, fn(torch.addbmm, t, (0, 5, 0), (0, 0, 6), test_out=True))
# matmul
self.assertEqual(torch.tensor(0., device=device), fn(torch.matmul, (0,), (0,)))
self.assertEqual(torch.tensor(0., device=device), fn(torch.matmul, (0,), (0,), test_out=True))
self.assertEqual((0, 0), fn(torch.matmul, (0, 0), (0, 0)).shape)
self.assertEqual((0, 0, 0), fn(torch.matmul, (0, 0, 0), (0, 0, 0)).shape)
self.assertEqual((5, 0, 0), fn(torch.matmul, (5, 0, 0), (5, 0, 0)).shape)
self.assertEqual(torch.zeros((5, 3, 4), device=device), fn(torch.matmul, (5, 3, 0), (5, 0, 4)))
self.assertEqual(torch.zeros((5, 3, 4), device=device), fn(torch.matmul, (5, 3, 0), (5, 0, 4), test_out=True))
# dot
self.assertEqual(torch.tensor(0., device=device), fn(torch.dot, (0,), (0,)))
self.assertEqual(torch.tensor(0., device=device), fn(torch.dot, (0,), (0,), test_out=True))
if torch._C.has_lapack:
# lu
A_LU, pivots = fn(torch.lu, (0, 5, 5))
self.assertEqual([(0, 5, 5), (0, 5)], [A_LU.shape, pivots.shape])
A_LU, pivots = fn(torch.lu, (0, 0, 0))
self.assertEqual([(0, 0, 0), (0, 0)], [A_LU.shape, pivots.shape])
A_LU, pivots = fn(torch.lu, (2, 0, 0))
self.assertEqual([(2, 0, 0), (2, 0)], [A_LU.shape, pivots.shape])
@dtypesIfCUDA(torch.cfloat, torch.cdouble,
*get_all_fp_dtypes(include_half=not CUDA9, include_bfloat16=(CUDA11OrLater and SM53OrLater)))
@dtypes(*(set(get_all_dtypes()) - {torch.half, torch.bool}))
def test_blas_alpha_beta_empty(self, device, dtype):
# This test is disabled on CUDA 9 due to:
# See: https://github.com/pytorch/pytorch/issues/31006
if dtype is torch.bfloat16 and self.device_type == 'xla':
# TODO (@zasdfgbnm): this causes the following error on test
# TestTorchDeviceTypeXLA.test_blas_alpha_beta_empty_xla_bfloat16:
#
# RuntimeError: _th_equal not supported on CPUType for BFloat16
return
# ensure beta is respected
value = 11
input = torch.full((2,), value, dtype=dtype, device=device)
mat = torch.ones((2, 0), dtype=dtype, device=device)
vec = torch.ones((0,), dtype=dtype, device=device)
out = torch.empty((2,), dtype=dtype, device=device)
if dtype.is_complex:
alpha = 6 + 7j
beta = 3 + 4j
else:
alpha = 6
beta = 3
self.assertEqual(torch.full((2,), beta * value, dtype=dtype, device=device),
torch.addmv(input=input, mat=mat, vec=vec, alpha=alpha, beta=beta))
self.assertEqual(torch.full((2,), beta * value, dtype=dtype, device=device),
torch.addmv(input=input, mat=mat, vec=vec, alpha=alpha, beta=beta, out=out))
# torch.addmm
input = torch.full((2, 3), value, dtype=dtype, device=device)
mat2 = torch.ones((0, 3), dtype=dtype, device=device)
out = torch.empty((2, 3), dtype=dtype, device=device)
self.assertEqual(torch.full((2, 3), beta * value, dtype=dtype, device=device),
torch.addmm(input=input, mat1=mat, mat2=mat2, alpha=alpha, beta=beta))
self.assertEqual(torch.full((2, 3), beta * value, dtype=dtype, device=device),
torch.addmm(input=input, mat1=mat, mat2=mat2, alpha=alpha, beta=beta, out=out))
@dtypes(*(get_all_complex_dtypes() + get_all_fp_dtypes()))
def test_blas_nan_out(self, device, dtype):
# These functions should work correctly with NaN filled outputs,
# but need special handling, see [NOTE: cpu_zero]
b = 3
n = 5
m = 7
p = 11
# torch.mv
nm = torch.randn((m, n), device=device).t()
_m = torch.randn((), device=device).expand(m)
_m_out = torch.full((m,), float('nan'), device=device)
self.assertEqual(torch.mv(nm, _m), torch.mv(nm, _m, out=_m_out))
self.assertEqual(0, torch.isnan(torch.mv(nm, _m)).sum())
# torch.mm
mp = torch.randn((p, m), device=device).t()
np_out = torch.full((n, p), float('nan'), device=device)
self.assertEqual(torch.mm(nm, mp), torch.mm(nm, mp, out=np_out))
# torch.bmm
bnm = torch.randn((b, m, n), device=device).transpose(1, 2)
bmp = torch.randn((b, p, m), device=device).transpose(1, 2)
bnp_out = torch.full((b, n, p), float('nan'), device=device)
self.assertEqual(torch.bmm(bnm, bmp), torch.bmm(bnm, bmp, out=bnp_out))
@onlyCPU # not supported by CUBLAS
def test_blas_mv_large_input(self, device):
# This would previously fail if the allocated output had NaNs, see:
# https://github.com/pytorch/pytorch/issues/31663 and [NOTE: cpu_zero]
n = 3000
m = 200
nm = torch.randn((m, n), device=device).t()
_m = torch.randn((), device=device).expand(m)
_m_out = torch.full((m,), 0., device=device)
self.assertEqual(torch.mv(nm, _m), torch.mv(nm, _m, out=_m_out))
@onlyCPU
def test_renorm_ps(self, device):
# full reduction
x = torch.randn(5, 5)
xn = x.numpy()
for p in [1, 2, 3, 4, inf]:
res = x.renorm(p, 1, 1)
expected = x / x.norm(p, 0, keepdim=True).clamp(min=1)
self.assertEqual(res, expected, msg="renorm failed for {}-norm".format(p))
@skipCPUIfNoLapack
@skipCUDAIfNoCusolver
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_householder_product(self, device, dtype):
def generate_reflectors_and_tau(A):
"""
This function uses numpy.linalg.qr with mode "raw" to extract output of LAPACK's geqrf.
There is torch.geqrf function but it doesn't work with complex-valued input.
"""
if A.numel() > 0:
A_cpu = A.cpu()
flattened_batch_shape = [-1, *A_cpu.shape[-2:]]
reflectors = torch.empty_like(A_cpu).view(*flattened_batch_shape)
tau_shape = [*A_cpu.shape[:-2], A_cpu.shape[-1]]
tau = torch.empty(tau_shape, dtype=dtype).view(-1, A_cpu.shape[-1])
for A_i, reflectors_i, tau_i in zip(A_cpu.contiguous().view(*flattened_batch_shape), reflectors, tau):
reflectors_tmp, tau_i[:] = map(torch.from_numpy, np.linalg.qr(A_i, mode='raw'))
reflectors_i[:] = reflectors_tmp.T
reflectors = reflectors.view(*A_cpu.shape)
tau = tau.view(tau_shape)
return reflectors.to(A.device), tau.to(A.device)
reflectors = torch.empty_like(A)
tau = torch.empty(*A.shape[:-2], A.shape[-1], dtype=dtype, device=device)
return reflectors, tau
def run_test(shape):
A = torch.randn(*shape, dtype=dtype, device=device)
reflectors, tau = generate_reflectors_and_tau(A)
expected, _ = torch.linalg.qr(A)
actual = torch.linalg.householder_product(reflectors, tau)
# torch.linalg.qr does not work correctly for zero batch dimension tensors
# see https://github.com/pytorch/pytorch/issues/50576
if (A.numel() > 0):
self.assertEqual(expected, actual)
else:
self.assertTrue(actual.shape == shape)
# if tau is empty and A is not the result should be a matrix with ones on the diagonal
if (A.numel() > 0):
tau_empty = torch.empty(*shape[:-2], 0, dtype=dtype, device=device)
identity_mat = torch.zeros_like(reflectors)
identity_mat.diagonal(dim1=-1, dim2=-2)[:] = 1
actual = torch.linalg.householder_product(reflectors, tau_empty)
self.assertEqual(actual, identity_mat)
out = torch.empty_like(A)
ans = torch.linalg.householder_product(reflectors, tau, out=out)
self.assertEqual(ans, out)
if (A.numel() > 0):
self.assertEqual(expected, out)
shapes = [(0, 0), (5, 0), # Empty matrix
(5, 5), (5, 3), # Single matrix
(0, 0, 0), (0, 5, 5), (0, 5, 3), # Zero batch dimension tensors
(2, 5, 5), (2, 5, 3), # 3-dim tensors
(2, 1, 5, 5), (2, 1, 5, 3)] # 4-dim tensors
for shape in shapes:
run_test(shape)
@skipCPUIfNoLapack
@skipCUDAIfNoCusolver
def test_householder_product_errors_and_warnings(self, device):
test_cases = [
# input1 size, input2 size, error regex
((10,), (2,), r"input must have at least 2 dimensions"),
((10, 6), (20,), r"input.shape\[-1\] must be greater than or equal to tau.shape\[-1\]"),
((6, 10), (5,), r"input.shape\[-2\] must be greater than or equal to input.shape\[-1\]"),
]
for a_size, tau_size, error_regex in test_cases:
a = torch.rand(*a_size, device=device)
tau = torch.rand(*tau_size, device=device)
with self.assertRaisesRegex(RuntimeError, error_regex):
torch.linalg.householder_product(a, tau)
# if out tensor with wrong shape is passed a warning is given
reflectors = torch.randn(3, 3, device=device)
tau = torch.randn(3, device=device)
out = torch.empty(2, 3, device=device)
with warnings.catch_warnings(record=True) as w:
# Trigger warning
torch.linalg.householder_product(reflectors, tau, out=out)
# Check warning occurs
self.assertEqual(len(w), 1)
self.assertTrue("An output with one or more elements was resized" in str(w[-1].message))
# dtypes should be safely castable
out = torch.empty_like(reflectors).to(torch.int)
with self.assertRaisesRegex(RuntimeError, "but got result with dtype Int"):
torch.linalg.householder_product(reflectors, tau, out=out)
with self.assertRaisesRegex(RuntimeError, "tau dtype Int does not match input dtype"):
torch.linalg.householder_product(reflectors, tau.to(torch.int))
if torch.cuda.is_available():
# device of out and input should match
wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'
out = torch.empty_like(reflectors).to(wrong_device)
with self.assertRaisesRegex(RuntimeError, "Expected all tensors to be on the same device"):
torch.linalg.householder_product(reflectors, tau, out=out)
# device of tau and input should match
wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'
tau = tau.to(wrong_device)
with self.assertRaisesRegex(RuntimeError, "Expected all tensors to be on the same device"):
torch.linalg.householder_product(reflectors, tau)
@precisionOverride({torch.complex64: 5e-6})
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.double, torch.cfloat, torch.cdouble)
def test_lu(self, device, dtype):
from torch.testing._internal.common_utils import random_matrix
def run_test(device, pivot):
def run_subtest(matrix_size, batches, device, pivot, singular=False, a=None):
if isinstance(matrix_size, int):
rows = columns = matrix_size
else:
rows, columns = matrix_size
if a is None:
a = random_matrix(rows, columns, *batches, **dict(singular=singular, dtype=dtype, device=device))
a_LU_info, pivots_info, info_ = a.lu(pivot=pivot, get_infos=True)
self.assertEqual(a_LU_info.size(), torch.Size(batches + (rows, columns)))
self.assertEqual(pivots_info.size(), torch.Size(batches + (min(rows, columns),)))
self.assertEqual(info_.size(), torch.Size(batches))
# If a randomly generated input matrix is singular,
# then info_ contains indices i such that U[i, i] ==
# 0. This however conveys that the factorization was
# successful albeit with a singular input. Therefore,
# we require info.min() >= 0
self.assertGreaterEqual(info_.min(), 0)
a_LU, pivots = a.lu(pivot=pivot)
self.assertEqual(a_LU, a_LU_info)
self.assertEqual(pivots_info, pivots)
P, L, U = torch.lu_unpack(a_LU, pivots)
P_ = P.cpu().numpy()
L_ = L.cpu().numpy()
U_ = U.cpu().numpy()
self.assertEqual(np.matmul(P_, np.matmul(L_, U_)), a)
if self.device_type == 'cuda':
# lu without pivoting is implemented only for cuda device
a_LU_info_nopiv, nopiv, info_nopiv = a.lu(pivot=False, get_infos=True)
P_nopiv, L_nopiv, U_nopiv = torch.lu_unpack(a_LU_info_nopiv, nopiv)
P_nopiv_ = P_nopiv.cpu().numpy()
L_nopiv_ = L_nopiv.cpu().numpy()
U_nopiv_ = U_nopiv.cpu().numpy()
self.assertEqual(np.matmul(P_nopiv_, np.matmul(L_nopiv_, U_nopiv_)), a)
k = min(rows, columns)
self.assertEqual(nopiv, torch.arange(1, 1 + k, device=device, dtype=torch.int32).expand(a.shape[:-2] + (k, )))
if not singular:
# It is not guaranteed that LU factorization
# without pivoting is able to determine if a
# matrix is singular while LU factorization
# with pivoting is. Therefore, we require the
# equality of info-s only for non-singular
# matrices.
# NOTE: infor_ is reshaped because info_nopiv might have
# squashed batch dimensions for complex types on CUDA,
# see the TODOs above.
self.assertEqual(info_.reshape(info_nopiv.shape), info_nopiv)
for ms, batch in itertools.product([3, 5, 7, (4, 2), (3, 4)], [(), (2,), (3,), (3, 5)]):
run_subtest(ms, batch, device, pivot)
run_subtest(ms, batch, device, pivot, singular=True)
# Reproducer of a magma bug, see https://bitbucket.org/icl/magma/issues/13/getrf_batched-kernel-produces-nans-on
a = torch.ones(batch + (ms if isinstance(ms, tuple) else (ms, ms)), dtype=torch.double, device=device)
run_subtest(ms, batch, device, pivot, singular=True, a=a)
# Info should be positive for rank deficient matrices
a = torch.ones(5, 3, 3, device=device)
self.assertGreater(a.lu(pivot=pivot, get_infos=True)[2][0], 0)
run_test(device, True)
if self.device_type == 'cpu':
# Error checking, no pivoting variant on CPU
with self.assertRaisesRegex(RuntimeError, 'lu without pivoting is not implemented on the CPU'):
torch.lu(torch.empty(1, 2, 2), pivot=False)
else:
run_test(device, False)
@skipCPUIfNoLapack
@skipCUDAIfNoMagma
@dtypes(torch.float, torch.double, torch.cfloat, torch.cdouble)
@skipCUDAIfRocm
@precisionOverride({torch.float: 1e-3})
def test_lu_unpack(self, device, dtype):
def run_test(pivot):
for shape in ((3, 3), (5, 3, 3), (7, 3, 5, 5), (7, 5, 3, 3, 3)):
a = torch.randn(*shape, dtype=dtype, device=device)
a_lu, p = torch.lu(a, pivot=pivot)
p_ref, l_ref, u_ref = torch.lu_unpack(a_lu, p)
self.assertEqual(p_ref.matmul(l_ref.matmul(u_ref)), a)
for shape in ((3, 3), (5, 3, 3), (7, 3, 5, 5), (7, 5, 3, 3, 3),
(3, 5), (5, 3), (3, 3, 5), (3, 5, 3),
(7, 5, 3, 5, 3), (7, 5, 3, 3, 5),
# empty tensors
(0, 0), (0, 0, 0), (0, 3, 3)
):
a = make_tensor(shape, dtype=dtype, device=device, low=-0.1, high=+0.1)
a_lu, p = torch.lu(a, pivot=pivot)
p_ref, l_ref, u_ref = torch.lu_unpack(a_lu, p)
self.assertEqual(p_ref.matmul(l_ref.matmul(u_ref)), a)
run_test(True)
if self.device_type == 'cuda':
run_test(False)
@skipCPUIfNoLapack
@skipCUDAIfNoMagma
@dtypes(torch.double)
def test_lu_unpack_check_input(self, device, dtype):
x = torch.rand(5, 5, 5, device=device, dtype=dtype)
lu_data, lu_pivots = torch.lu(x, pivot=True)
with self.assertRaisesRegex(RuntimeError, "torch.int32 dtype"):
torch.lu_unpack(lu_data, lu_pivots.long())
with self.assertRaisesRegex(RuntimeError, "contiguous tensor"):
torch.lu_unpack(lu_data, lu_pivots.mT)
# check that onces flags are unset, Nones are returned
p, l, u = torch.lu_unpack(lu_data, lu_pivots, unpack_data=False)
self.assertTrue((l == u) and l is None)
p, l, u = torch.lu_unpack(lu_data, lu_pivots, unpack_pivots=False)
self.assertTrue(p is None)
p, l, u = torch.lu_unpack(lu_data, lu_pivots, unpack_data=False, unpack_pivots=False)
self.assertTrue((p == l == u) and p is None)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.double)
@skipCUDAIfRocm
def test_lobpcg_basic(self, device, dtype):
self._test_lobpcg_method(device, dtype, 'basic')
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.double)
@skipCUDAIfRocm
def test_lobpcg_ortho(self, device, dtype):
self._test_lobpcg_method(device, dtype, 'ortho')
def _test_lobpcg_method(self, device, dtype, method):
from torch.testing._internal.common_utils import random_symmetric_pd_matrix, random_sparse_pd_matrix
from torch._linalg_utils import matmul, qform
from torch._lobpcg import lobpcg
def test_tracker(worker):
k = worker.iparams['k']
nc = worker.ivars['converged_count']
if k <= nc:
tol = worker.fparams['tol']
rerr = worker.tvars['rerr']
X = worker.X
E = worker.E
B = worker.B
A = worker.A
dtype = X.dtype
device = X.device
# Check convergence
self.assertLessEqual(rerr[:k].max(), tol)
# Check B-orthogonality
I = torch.eye(k, k, dtype=dtype, device=device)
self.assertEqual(qform(B, X[:, :k]), I)
# Check block equation
self.assertEqual(qform(A, X[:, :k]) / E[:k], I, atol=0.2, rtol=0)
orig_lobpcg = lobpcg
def lobpcg(*args, **kwargs):
kwargs['tracker'] = test_tracker
kwargs['niter'] = 1000
kwargs['method'] = method
kwargs['tol'] = 1e-8
return orig_lobpcg(*args, **kwargs)
prec = 5e-4
# check dense input
mm = torch.matmul
for batches in [(), (2,), (2, 3)]:
for m, n, k in [
(9, 3, 1),
(9, 3, 2),
(9, 2, 2),
(100, 15, 5),
]:
# skip tests that are known to fail with the basic
# LOBPCG method due to calling cholesky on singular
# input
if method == 'basic' and (m, n, k) in [(9, 2, 2), (100, 15, 5)]:
continue
A = random_symmetric_pd_matrix(m, *batches, device=device, dtype=dtype)
B = random_symmetric_pd_matrix(m, *batches, device=device, dtype=dtype)
# classical eigenvalue problem, smallest eigenvalues
E, V = lobpcg(A, k=k, n=n, largest=False)
self.assertEqual(E.shape, batches + (k,))
self.assertEqual(V.shape, batches + (m, k))
self.assertEqual(matmul(A, V), mm(V, E.diag_embed()), atol=prec, rtol=0)
e = torch.symeig(A)[0]
e_smallest = e[..., :k]
self.assertEqual(E, e_smallest)
# classical eigenvalue problem, largest eigenvalues
E, V = lobpcg(A, k=k, n=n, largest=True)
e_largest, _ = torch.sort(e[..., -k:], descending=True)
self.assertEqual(E, e_largest, atol=prec, rtol=0)
self.assertEqual(matmul(A, V), mm(V, E.diag_embed()), atol=prec, rtol=0)
# generalized eigenvalue problem, smallest eigenvalues
E, V = lobpcg(A, B=B, k=k, n=n, largest=False)
self.assertEqual(matmul(A, V), mm(matmul(B, V), E.diag_embed()), atol=prec, rtol=0)
# generalized eigenvalue problem, largest eigenvalues
E, V = lobpcg(A, B=B, k=k, n=n, largest=True)
self.assertEqual(matmul(A, V) / E.max(), mm(matmul(B, V), (E / E.max()).diag_embed()),
atol=prec, rtol=0)
# check sparse input
for m, n, k, density in [
(5, 1, 1, 0.8),
(9, 3, 2, 0.5),
(100, 1, 1, 0.1),
(1000, 7, 3, 0.01),
]:
# skip tests that are known to fail with the basic LOBCG
# method due to insufficient accuracy
if method == 'basic' and (m, n, k, density) in [(1000, 7, 3, 0.01)]:
continue
A = random_sparse_pd_matrix(m, density=density, device=device, dtype=dtype)
B = random_sparse_pd_matrix(m, density=density, device=device, dtype=dtype)
A_eigenvalues = torch.arange(1, m + 1, dtype=dtype) / m
e_smallest = A_eigenvalues[..., :k]
e_largest, _ = torch.sort(A_eigenvalues[..., -k:], descending=True)
# classical eigenvalue problem, smallest eigenvalues
E, V = lobpcg(A, k=k, n=n, largest=False)
self.assertEqual(E, e_smallest)
self.assertEqual(matmul(A, V), mm(V, E.diag_embed()), atol=prec, rtol=0)
# classical eigenvalue problem, largest eigenvalues
E, V = lobpcg(A, k=k, n=n, largest=True)
self.assertEqual(matmul(A, V), mm(V, E.diag_embed()), atol=prec, rtol=0)
self.assertEqual(E, e_largest)
# generalized eigenvalue problem, smallest eigenvalues
E, V = lobpcg(A, B=B, k=k, n=n, largest=False)
self.assertEqual(matmul(A, V), matmul(B, mm(V, E.diag_embed())), atol=prec, rtol=0)
# generalized eigenvalue problem, largest eigenvalues
E, V = lobpcg(A, B=B, k=k, n=n, largest=True)
self.assertEqual(matmul(A, V) / E.max(), mm(matmul(B, V), (E / E.max()).diag_embed()),
atol=prec, rtol=0)
@skipCPUIfNoLapack
@onlyCPU
@dtypes(torch.double)
def test_lobpcg_torchscript(self, device, dtype):
from torch.testing._internal.common_utils import random_sparse_pd_matrix
from torch._linalg_utils import matmul as mm
lobpcg = torch.jit.script(torch.lobpcg)
m = 500
k = 5
A1 = random_sparse_pd_matrix(m, density=2.0 / m, device=device, dtype=dtype)
X1 = torch.randn((m, k), dtype=dtype, device=device)
E1, V1 = lobpcg(A1, X=X1)
eq_err = torch.norm((mm(A1, V1) - V1 * E1), 2) / E1.max()
self.assertLess(eq_err, 1e-6)
@unittest.skipIf(not TEST_SCIPY or (TEST_SCIPY and scipy.__version__ < '1.4.1'), "Scipy not found or older than 1.4.1")
@skipCPUIfNoLapack
@onlyCPU
@dtypes(torch.double)
def test_lobpcg_scipy(self, device, dtype):
"""Compare torch and scipy.sparse.linalg implementations of lobpcg
"""
import time
from torch.testing._internal.common_utils import random_sparse_pd_matrix
from torch._linalg_utils import matmul as mm
from scipy.sparse.linalg import lobpcg as scipy_lobpcg
import scipy.sparse
def toscipy(A):
if A.layout == torch.sparse_coo:
values = A.coalesce().values().cpu().numpy().copy()
indices = A.coalesce().indices().cpu().numpy().copy()
return scipy.sparse.coo_matrix((values, (indices[0], indices[1])), A.shape)
return A.cpu().numpy().copy()
niter = 1000
repeat = 10
m = 500 # size of the square matrix
k = 7 # the number of requested eigenpairs
A1 = random_sparse_pd_matrix(m, density=2.0 / m, device=device, dtype=dtype)
B1 = random_sparse_pd_matrix(m, density=2.0 / m, device=device, dtype=dtype)
X1 = torch.randn((m, k), dtype=dtype, device=device)
A2 = toscipy(A1)
B2 = toscipy(B1)
X2 = toscipy(X1)
lambdas1 = []
def tracker(worker):
lambdas1.append(worker.E[:])
tol = 1e-8
# tol for scipy lobpcg will be choosed so that the number of
# iterations will be equal or very close to pytorch lobpcg
# (that is around 170-180)
# Standard eigenvalue problem
E1, V1 = torch.lobpcg(A1, X=X1, niter=niter, largest=True, tracker=tracker, tol=tol)
E2, V2, lambdas2 = scipy_lobpcg(A2, X2, maxiter=niter, largest=True, retLambdaHistory=True, tol=1.1 * tol)
iters1 = len(lambdas1)
iters2 = len(lambdas2)
self.assertLess(abs(iters1 - iters2), 0.05 * max(iters1, iters2))
E2a, V2a = scipy_lobpcg(A2, X2, maxiter=niter, largest=False)
eq_err = torch.norm((mm(A1, V1) - V1 * E1), 2) / E1.max()
eq_err_scipy = (abs(A2.dot(V2) - V2 * E2)**2).sum() ** 0.5 / E2.max()
self.assertLess(eq_err, 1e-6) # std
self.assertLess(eq_err_scipy, 1e-6) # std
self.assertEqual(E1, torch.from_numpy(E2.copy()))
# Generalized eigenvalue problem
lambdas1 = []
def tracker(worker):
lambdas1.append(worker.E[:])
E1, V1 = torch.lobpcg(A1, B=B1, X=X1, niter=niter, largest=True, tracker=tracker, tol=tol)
E2, V2, lambdas2 = scipy_lobpcg(A2, X2, B=B2, maxiter=niter, largest=True, retLambdaHistory=True, tol=39 * tol)
E2a, V2a = scipy_lobpcg(A2, X2, B=B2, maxiter=niter, largest=False)
iters1 = len(lambdas1)
iters2 = len(lambdas2)
self.assertLess(abs(iters1 - iters2), 0.05 * max(iters1, iters2))
eq_err = torch.norm((mm(A1, V1) - mm(B1, V1) * E1), 2) / E1.max()
eq_err_scipy = (abs(A2.dot(V2) - B2.dot(V2) * E2)**2).sum() ** 0.5 / E2.max()
self.assertLess(eq_err, 1e-6) # general
self.assertLess(eq_err_scipy, 1e-6) # general
self.assertEqual(E1, torch.from_numpy(E2.copy()))
# Timings
elapsed_ortho = 0
elapsed_ortho_general = 0
elapsed_scipy = 0
elapsed_general_scipy = 0
for i in range(repeat):
start = time.time()
torch.lobpcg(A1, X=X1, niter=niter, method='ortho', tol=tol)
end = time.time()
elapsed_ortho += end - start
start = time.time()
torch.lobpcg(A1, X=X1, B=B1, niter=niter, method='ortho', tol=tol)
end = time.time()
elapsed_ortho_general += end - start
start = time.time()
scipy_lobpcg(A2, X2, maxiter=niter, tol=1.1 * tol)
end = time.time()
elapsed_scipy += end - start
start = time.time()
scipy_lobpcg(A2, X2, B=B2, maxiter=niter, tol=39 * tol)
end = time.time()
elapsed_general_scipy += end - start
elapsed_ortho_ms = 1000.0 * elapsed_ortho / repeat
elapsed_ortho_general_ms = 1000.0 * elapsed_ortho_general / repeat
elapsed_scipy_ms = 1000.0 * elapsed_scipy / repeat
elapsed_general_scipy_ms = 1000.0 * elapsed_general_scipy / repeat
print('''
CPU timings: torch.lobpcg vs scipy.sparse.linalg.lobpcg
-------------------------------------------------------
| standard | generalized | method
torch.lobpcg | {:10.2f} | {:10.2f} | ortho
scipy_lobpcg | {:10.2f} | {:10.2f} | N/A
-(input size: {:4}, eigenpairs:{:2}, units: ms per call)-
'''.format(elapsed_ortho_ms, elapsed_ortho_general_ms,
elapsed_scipy_ms, elapsed_general_scipy_ms,
m, k))
# Handling of very small tolerence
tol = 1e-100
lambdas1 = []
def tracker(worker):
lambdas1.append(worker.E[:])
E1, V1 = torch.lobpcg(A1, X=X1, niter=niter, largest=True, tracker=tracker, tol=tol)
iters1 = len(lambdas1)
eq_err = torch.norm((mm(A1, V1) - V1 * E1), 2) / E1.max()
try:
E2, V2, lambdas2 = scipy_lobpcg(A2, X2, maxiter=niter, largest=True, retLambdaHistory=True, tol=tol)
iters2 = len(lambdas2)
eq_err_scipy = (abs(A2.dot(V2) - V2 * E2)**2).sum() ** 0.5 / E2.max()
except Exception as msg:
print('Calling scipy_lobpcg failed [standard]:', msg)
iters2 = -1
eq_err_scipy = -1
lambdas1 = []
def tracker(worker):
lambdas1.append(worker.E[:])
E1, V1 = torch.lobpcg(A1, X=X1, B=B1, niter=niter, largest=True, tracker=tracker, tol=tol)
iters1_general = len(lambdas1)
eq_err_general = torch.norm((mm(A1, V1) - mm(B1, V1) * E1), 2) / E1.max()
try:
E2, V2, lambdas2 = scipy_lobpcg(A2, X2, B=B2, maxiter=niter, largest=True, retLambdaHistory=True, tol=tol)
iters2_general = len(lambdas2)
eq_err_general_scipy = (abs(A2.dot(V2) - B2.dot(V2) * E2)**2).sum() ** 0.5 / E2.max()
except Exception as msg:
print('Calling scipy_lobpcg failed [generalized]:', msg)
iters2_general = -1
eq_err_general_scipy = -1
print('''\
Handling of small tol={:6.0e}: torch.lobpcg vs scipy.sparse.linalg.lobpcg
----------------------------------------------------------------------------
| standard | generalized | niter | method
torch.lobpcg | {:10.2e} | {:10.2e} | {:6} | ortho
scipy_lobpcg | {:10.2e} | {:10.2e} | {:6} | N/A
---(input size: {:4}, eigenpairs:{:2}, units: relative error, maxiter={:4})---
'''.format(tol, eq_err, eq_err_general, iters1, eq_err_scipy, eq_err_general_scipy, iters2, m, k, niter))
def _test_addmm_addmv(self, f, t, m, v, *, alpha=None, beta=None, transpose_out=False):
dtype = t.dtype
numpy_dtype = dtype
if dtype in {torch.bfloat16}:
numpy_dtype = torch.float
if dtype.is_complex:
alpha = 0.9 + 0.3j if alpha is None else alpha
beta = 0.5 + 0.6j if beta is None else beta
else:
alpha = 1.2 if alpha is None else alpha
beta = 0.8 if beta is None else beta
res1 = f(t, m, v, alpha=alpha, beta=beta)
res2 = torch.full_like(res1, math.nan)
if transpose_out:
res2 = res2.t().clone(memory_format=torch.contiguous_format).t()
f(t, m, v, alpha=alpha, beta=beta, out=res2)
res3 = alpha * (m.to(numpy_dtype).cpu().numpy() @ v.to(numpy_dtype).cpu().numpy())
if beta != 0:
res3 += (beta * t).to(numpy_dtype).cpu().numpy()
res3 = torch.from_numpy(res3).to(dtype)
self.assertEqual(res1, res2)
self.assertEqual(res1, res3)
@precisionOverride({torch.bfloat16: 1e-0, torch.half: 5e-4, torch.float: 1e-4, torch.double: 1e-8,
torch.cfloat: 1e-4, torch.cdouble: 1e-8})
@dtypesIfCUDA(*get_all_complex_dtypes(),
*get_all_fp_dtypes(include_bfloat16=(TEST_WITH_ROCM or (CUDA11OrLater and SM53OrLater)),
include_half=(not TEST_WITH_ROCM)))
@dtypes(torch.bfloat16, torch.float, torch.double, torch.cfloat, torch.cdouble)
def test_addmv(self, device, dtype):
# have to use torch.randn(...).to(bfloat16) instead of
# torch.randn(..., dtype=bfloat16). randn does not support
# bfloat16 yet.
# "*0.2" to reduce errors for low precision
ts = [
0.2 * torch.randn(50, device=device).to(dtype),
0.2 * torch.randn(1, device=device).to(dtype).expand(50),
]
vs = [
0.2 * torch.randn(100, device=device).to(dtype),
0.2 * torch.ones(1, device=device).to(dtype).expand(100), # to reduce errors for low precision
]
ms = [
# 0d
0.2 * torch.ones((), device=device).to(dtype).expand(50, 100), # to reduce errors for low precision
# 1d
0.2 * torch.randn((1, 100), device=device).to(dtype).expand(50, 100),
# this initialization reduces errors for low precision for broadcasted matrices
# by making sure that intermediate and result values are exactly representable
# in low precision type
0.2 * torch.randint(3, (50, 1), dtype=torch.float, device=device).to(dtype).expand(50, 100),
# 2d
0.2 * torch.randn((50, 100), device=device).to(dtype),
0.2 * torch.randn((100, 50), device=device).to(dtype).t(),
]
for m, v, t in itertools.product(ms, vs, ts):
self._test_addmm_addmv(torch.addmv, t, m, v)
# Test beta=0, t=nan
t = torch.full((50,), math.nan, device=device).to(dtype)
for m, v in itertools.product(ms, vs):
self._test_addmm_addmv(torch.addmv, t, m, v, beta=0)
@dtypesIfCUDA(*get_all_fp_dtypes(include_bfloat16=(TEST_WITH_ROCM or (CUDA11OrLater and SM53OrLater))))
@dtypes(torch.float, torch.double)
def test_addmv_rowmajor_colmajor_incx_incy_lda(self, device, dtype):
# tests (o, s)*(s). o is output size, s is summed size.
o = 5
s = 3
a_data = torch.arange(1, o * s + 1, device=device, dtype=dtype).view(o, s)
x_data = torch.arange(1, s + 1, 1, device=device, dtype=dtype)
y_data = torch.ones(o, device=device, dtype=dtype)
control = torch.tensor([15., 33., 51., 69., 87.], device=device, dtype=dtype)
def _test(row_major, incx, incy, lda_tail):
if row_major:
a_storage = torch.full((o, s + lda_tail), float('nan'), device=device, dtype=dtype)
else:
a_storage = torch.full((s, o + lda_tail), float('nan'), device=device, dtype=dtype).permute(1, 0)
a = a_storage[:o, :s].copy_(a_data)
x_storage = torch.full((s, incx), float('nan'), device=device, dtype=dtype)
x = x_storage[:, 0].copy_(x_data)
y_storage = torch.full((o, incy), float('nan'), device=device, dtype=dtype)
y = y_storage[:, 0].copy_(y_data)
self._test_addmm_addmv(torch.addmv, y, a, x)
for row_major, incx, incy, lda_tail in itertools.product((False, True), (1, 2), (1, 2), (0, 1)):
_test(row_major, incx, incy, lda_tail)
@precisionOverride({torch.double: 1e-8, torch.float: 1e-4, torch.bfloat16: 0.6,
torch.half: 1e-1, torch.cfloat: 1e-4, torch.cdouble: 1e-8})
@dtypesIfCUDA(*get_all_complex_dtypes(),
*get_all_fp_dtypes(include_bfloat16=(TEST_WITH_ROCM or (CUDA11OrLater and SM53OrLater))))
@dtypes(*get_all_complex_dtypes(), *get_all_fp_dtypes())
@tf32_on_and_off(0.05)
def test_addmm(self, device, dtype):
M = torch.randn(10, 25, device=device).to(dtype)
m1 = torch.randn(10, 50, device=device).to(dtype)
m2 = torch.randn(50, 25, device=device).to(dtype)
self._test_addmm_addmv(torch.addmm, M, m1, m2)
# Test 0-strided
M = torch.randn(10, 1, device=device).to(dtype).expand(10, 25)
m1 = torch.randn(10, 1, device=device).to(dtype).expand(10, 50)
m2 = torch.randn(50, 25, device=device).to(dtype)
self._test_addmm_addmv(torch.addmm, M, m1, m2)
# Test beta=0, M=nan
M = torch.full((10, 25), math.nan, device=device).to(dtype)
m1 = torch.randn(10, 50, device=device).to(dtype)
m2 = torch.randn(50, 25, device=device).to(dtype)
self._test_addmm_addmv(torch.addmm, M, m1, m2, beta=0)
# Test transpose
for t1, t2, t3, t4 in itertools.product([True, False], repeat=4):
def maybe_transpose(cond, m):
if not cond:
return m
return m.t().clone(memory_format=torch.contiguous_format).t()
M = maybe_transpose(t1, torch.randn(10, 25, device=device).to(dtype))
m1 = maybe_transpose(t2, torch.randn(10, 50, device=device).to(dtype))
m2 = maybe_transpose(t3, torch.randn(50, 25, device=device).to(dtype))
self._test_addmm_addmv(torch.addmm, M, m1, m2, transpose_out=t4)
@dtypes(torch.float, torch.double)
@dtypesIfCUDA(*([torch.float, torch.double] + get_all_complex_dtypes()))
@tf32_on_and_off(0.005)
def test_addmm_sizes(self, device, dtype):
for m in [0, 1, 25]:
for n in [0, 1, 10]:
for k in [0, 1, 8]:
M = torch.randn(n, m, device=device).to(dtype)
m1 = torch.randn(n, k, device=device).to(dtype)
m2 = torch.randn(k, m, device=device).to(dtype)
self._test_addmm_addmv(torch.addmm, M, m1, m2)
m1 = torch.randn(n, k + 1, device=device).to(dtype)
m2 = torch.randn(k, m, device=device).to(dtype)
self.assertRaisesRegex(RuntimeError, f"{n}x{k + 1}.*{k}x{m}", lambda: torch.addmm(M, m1, m2))
self.assertRaisesRegex(RuntimeError, f"{n}x{k + 1}.*{k}x{m}", lambda: torch.mm(m1, m2))
@dtypes(torch.half)
@onlyCUDA
def test_addmm_baddbmm_overflow(self, device, dtype):
orig = torch.backends.cuda.matmul.allow_fp16_reduced_precision_reduction
torch.backends.cuda.matmul.allow_fp16_reduced_precision_reduction = False
inp = torch.zeros(128, 128, dtype=torch.half, device=device)
mat1 = torch.ones(128, 1000, dtype=torch.half, device=device) * 100
mat2 = torch.ones(1000, 128, dtype=torch.half, device=device) * 100
out = torch.addmm(inp, mat1, mat2, alpha=0.001, beta=0.)
# just check for no overflow on ROCM
if TEST_WITH_ROCM:
self.assertFalse(out.isinf().any())
else:
self.assertTrue((out == 10000.).all())
inp = torch.zeros(3, 128, 128, dtype=torch.half, device=device)
mat1 = torch.ones(3, 128, 1000, dtype=torch.half, device=device) * 100
mat2 = torch.ones(3, 1000, 128, dtype=torch.half, device=device) * 100
out = torch.baddbmm(inp, mat1, mat2, alpha=0.001, beta=0.)
if TEST_WITH_ROCM:
self.assertFalse(out.isinf().any())
else:
self.assertTrue((out == 10000.).all())
torch.backends.cuda.matmul.allow_fp16_reduced_precision_reduction = orig
@unittest.skipIf(IS_FBCODE and IS_REMOTE_GPU, "cublas runtime error")
@onlyCUDA
def test_matmul_45724(self, device):
# https://github.com/pytorch/pytorch/issues/45724
a = torch.rand(65537, 22, 64, device=device, dtype=torch.half)
b = torch.rand(65537, 64, 22, device=device, dtype=torch.half)
c = torch.full((65537, 22, 22), math.nan, dtype=torch.half, device=device)
cpu_result = torch.matmul(a.cpu().float(), b.cpu().float()).cuda().half()
torch.matmul(a, b, out=c)
self.assertEqual(c, cpu_result)
@slowTest
@onlyNativeDeviceTypes
@dtypes(torch.float32, torch.float64, torch.bfloat16, torch.int32, torch.int64, torch.cfloat, torch.cdouble)
@dtypesIfCUDA(torch.float32, torch.float64, torch.cfloat, torch.cdouble)
@tf32_on_and_off(0.01)
def test_mm(self, device, dtype):
def _test_mm(n, m, p, dtype, genf):
# helper function
def matrixmultiply(mat1, mat2):
n = mat1.size(0)
m = mat1.size(1)
p = mat2.size(1)
res = torch.zeros(n, p, dtype=dtype, device=device)
for i, j in iter_indices(res):
res[i, j] = sum(mat1[i, k] * mat2[k, j] for k in range(m))
return res
# contiguous case
mat1 = genf(n, m)
mat2 = genf(m, p)
res = torch.mm(mat1, mat2)
res2 = matrixmultiply(mat1, mat2)
self.assertEqual(res, res2)
# non contiguous case 1
mat1 = genf(n, m)
mat2 = genf(p, m).t()
res = torch.mm(mat1, mat2)
res2 = matrixmultiply(mat1, mat2)
self.assertEqual(res, res2)
# non contiguous case 2
mat1 = genf(m, n).t()
mat2 = genf(m, p)
res = torch.mm(mat1, mat2)
res2 = matrixmultiply(mat1, mat2)
self.assertEqual(res, res2)
# non contiguous case 3
mat1 = genf(m, n).t()
mat2 = genf(p, m).t()
res = torch.mm(mat1, mat2)
res2 = matrixmultiply(mat1, mat2)
self.assertEqual(res, res2)
# test with zero stride
mat1 = genf(n, m)
mat2 = genf(m, 1).expand(m, p)
res = torch.mm(mat1, mat2)
res2 = matrixmultiply(mat1, mat2)
self.assertEqual(res, res2)
# explicitly exercise the _out variant in torch.mm().
# contiguous case
mat1 = genf(n, m)
mat2 = genf(m, p)
res = genf(n, p)
torch.mm(mat1, mat2, out=res)
res2 = matrixmultiply(mat1, mat2)
self.assertEqual(res, res2)
# explicitly exercise the _out variant in torch.mm().
# non contiguous case 3
mat1 = genf(m, n).t()
mat2 = genf(p, m).t()
res = genf(n, p)
torch.mm(mat1, mat2, out=res)
res2 = matrixmultiply(mat1, mat2)
self.assertEqual(res, res2)
def genf_int(x, y):
return torch.randint(0, 100, (x, y), dtype=dtype, device=device)
def genf_bfloat(x, y):
return torch.randn(x, y, dtype=torch.float32, device=device).to(dtype) * 0.1
def genf_float(x, y):
return torch.randn(x, y, dtype=dtype, device=device)
for (n, m, p) in [(20, 10, 15), (15, 20, 10), (25, 18, 10)]:
if (dtype == torch.int32) or (dtype == torch.int64):
genf = genf_int
elif (dtype == torch.bfloat16):
genf = genf_bfloat
else:
genf = genf_float
_test_mm(n, m, p, dtype, genf)
@onlyNativeDeviceTypes
def test_mm_bmm_non_memory_dense(self, device):
def _slice(tensor, fn):
return fn(tensor)[..., ::2]
A = torch.randn(3, 6, dtype=torch.cfloat, device=device)
B = torch.randn(3, 3, dtype=torch.cfloat, device=device)
out = torch.empty(3, 3, device=device, dtype=torch.complex64).t()
out1 = torch.empty(3, 3, device=device, dtype=torch.complex64).t()
A_conj = _slice(A, torch.conj)
A_conj_physical = _slice(A, torch.conj_physical)
self.assertEqual(torch.mm(A_conj, B, out=out), torch.mm(A_conj_physical, B, out=out))
self.assertEqual(torch.mm(A_conj.t(), B, out=out), torch.mm(A_conj_physical.t(), B, out=out))
Ab = torch.randn(2, 3, 6, dtype=torch.cfloat, device=device)
Bb = torch.randn(2, 3, 3, dtype=torch.cfloat, device=device)
Bb_ = torch.randn(1, 3, 3, dtype=torch.cfloat, device=device).expand(2, 3, 3)
out_b = torch.empty(2, 3, 3, device=device, dtype=torch.complex64).mT
Ab_conj = _slice(Ab, torch.conj)
Ab_conj_physical = _slice(Ab, torch.conj_physical)
def t_b(tensor):
return tensor.mT
self.assertEqual(torch.bmm(Ab_conj, Bb, out=out_b), torch.bmm(Ab_conj_physical, Bb, out=out_b))
self.assertEqual(torch.bmm(t_b(Ab_conj), Bb, out=out_b), torch.bmm(t_b(Ab_conj_physical), Bb, out=out_b))
# test broadcasting
self.assertEqual(torch.bmm(Ab_conj, Bb_, out=out_b), torch.bmm(Ab_conj_physical, Bb_, out=out_b))
self.assertEqual(torch.bmm(t_b(Ab_conj), Bb_, out=out_b), torch.bmm(t_b(Ab_conj_physical), Bb_, out=out_b))
@onlyNativeDeviceTypes
@dtypes(torch.float32, torch.float64)
def test_strided_mm_bmm(self, device, dtype):
# Tests strided view case with stride smaller than corresponding dimension size
x = torch.tensor([[1., 2., 3.], [4., 5., 6.]], dtype=dtype, device=device)
new_shape = [2, 2, 2]
new_stride = [3, 1, 1]
sx = torch.as_strided(x, size=new_shape, stride=new_stride)
torch_fn = lambda x: torch.bmm(x, x) # noqa: E731
np_fn = lambda x: np.matmul(x, x) # noqa: E731
self.compare_with_numpy(torch_fn, np_fn, sx)
torch_fn = lambda x: torch.mm(x, x) # noqa: E731
self.compare_with_numpy(torch_fn, np_fn, sx[0])
@precisionOverride({torch.half: 0.05, torch.bfloat16: 0.05})
@skipCUDAIf(torch.version.cuda == "10.1", "flaky on CUDA 10.1")
@onlyNativeDeviceTypes
@dtypes(*get_all_fp_dtypes(), *get_all_complex_dtypes())
@tf32_on_and_off(0.05)
def test_bmm(self, device, dtype):
if self.device_type == 'cuda' and dtype is torch.bfloat16 and CUDA11OrLater and not SM53OrLater:
# cuBLAS does not guarantee BFloat16 support on SM < 53.
# So on PyTorch, we consider BFloat16 support on SM < 53 as
# undefined bahavior
return
batch_sizes = [1, 10]
M, N, O = 23, 15, 12
numpy_dtype = dtype if dtype != torch.bfloat16 else torch.float32
is_supported = True
if dtype == torch.bfloat16 and self.device_type == 'cuda':
is_supported = TEST_WITH_ROCM or (CUDA11OrLater and SM53OrLater)
if not is_supported:
for num_batches in batch_sizes:
b1 = torch.randn(num_batches, M, N, device=device).to(dtype)
b2 = torch.randn(num_batches, N, O, device=device).to(dtype)
self.assertRaisesRegex(RuntimeError, "type|Type|not implemented|CUBLAS_STATUS_NOT_SUPPORTED",
lambda: torch.bmm(b1, b2))
return
def invert_perm(p):
d = {x: i for i, x in enumerate(p)}
return (d[0], d[1], d[2])
def generate_inputs(num_batches):
# transposed tensors
for perm1, perm2 in itertools.product(itertools.permutations((0, 1, 2)), repeat=2):
b1 = make_tensor((num_batches, M, N), device, dtype, low=-0.1, high=0.1)
b2 = make_tensor((num_batches, N, O), device, dtype, low=-0.1, high=0.1)
b1 = b1.permute(perm1).contiguous().permute(invert_perm(perm1))
b2 = b2.permute(perm2).contiguous().permute(invert_perm(perm2))
yield b1, b2
# broadcasting tensors
for b1, b2, b3, b4, b5, b6 in itertools.product((True, False), repeat=6):
shape1 = (num_batches if b1 else 1, M if b2 else 1, N if b3 else 1)
shape2 = (num_batches if b4 else 1, N if b5 else 1, O if b6 else 1)
b1 = make_tensor(shape1, device, dtype, low=-0.1, high=0.1).expand(num_batches, M, N)
b2 = make_tensor(shape2, device, dtype, low=-0.1, high=0.1).expand(num_batches, N, O)
yield b1, b2
# zero-sized tensors
for z1, z2, z3, z4 in itertools.product((True, False), repeat=4):
shape1 = (num_batches if z1 else 0, M if z2 else 0, N if z3 else 0)
shape2 = (num_batches if z1 else 0, N if z3 else 0, O if z4 else 0)
b1 = torch.randn(shape1, dtype=dtype, device=device)
b2 = torch.randn(shape2, dtype=dtype, device=device)
yield b1, b2
for num_batches in batch_sizes:
for (b1, b2), perm3 in itertools.product(generate_inputs(num_batches), itertools.permutations((0, 1, 2))):
res1 = torch.bmm(b1, b2)
res2 = torch.full((num_batches, M, O), math.nan, dtype=dtype, device=device) \
.permute(perm3).contiguous().permute(invert_perm(perm3))
torch.bmm(b1, b2, out=res2)
expect = torch.from_numpy(
b1.to(numpy_dtype).cpu().numpy() @ b2.to(numpy_dtype).cpu().numpy()).to(device=device, dtype=dtype)
self.assertEqual(expect, res1)
self.assertEqual(expect, res2)
if self.device_type == 'cuda':
# check that mixed arguments are rejected
self.assertRaises(RuntimeError, lambda: torch.bmm(b1, b2.cpu()))
self.assertRaises(RuntimeError, lambda: torch.bmm(b1.cpu(), b2))
self.assertRaises(RuntimeError, lambda: torch.bmm(b1, b2, out=res2.cpu()))
def _test_addbmm_baddbmm(self, func, b1, b2, ref, out_tensor):
getattr(out_tensor, func + "_")(b1, b2)
self.assertEqual(out_tensor, ref)
res3 = out_tensor.clone()
with self.assertWarnsOnceRegex(
UserWarning, f"This overload of {func}_ is deprecated"):
getattr(out_tensor, func + "_")(1, b1, b2)
self.assertEqual(out_tensor, ref * 2),
getattr(res3, func + "_")(b1, b2, beta=1)
self.assertEqual(out_tensor, res3)
with self.assertWarnsOnceRegex(
UserWarning, f"This overload of {func}_ is deprecated"):
getattr(out_tensor, func + "_")(1., .5, b1, b2)
self.assertEqual(out_tensor, ref * 2.5)
getattr(res3, func + "_")(b1, b2, beta=1., alpha=.5)
self.assertEqual(out_tensor, res3)
with self.assertWarnsOnceRegex(
UserWarning, f"This overload of {func} is deprecated"):
self.assertEqual(out_tensor, getattr(torch, func)(1, out_tensor, 0, b1, b2))
res4 = getattr(torch, func)(out_tensor, b1, b2, beta=1, alpha=.5)
self.assertEqual(res4, ref * 3),
nan = torch.full_like(out_tensor, math.nan)
res5 = getattr(torch, func)(nan, b1, b2, beta=0, alpha=1)
self.assertEqual(res5, ref)
if b1.is_complex():
res6 = getattr(torch, func)(out_tensor, b1, b2, beta=.1j, alpha=.5j)
self.assertEqual(res6, out_tensor * .1j + .5j * ref)
else:
res6 = getattr(torch, func)(out_tensor, b1, b2, beta=.1, alpha=.5)
self.assertEqual(res6, out_tensor * .1 + .5 * ref)
res7 = torch.full_like(out_tensor, math.nan)
getattr(torch, func)(nan, b1, b2, beta=0, out=res7)
self.assertEqual(res7, ref)
@precisionOverride({torch.half: 0.05, torch.bfloat16: 0.05})
@onlyNativeDeviceTypes
@dtypes(*get_all_fp_dtypes(), *get_all_complex_dtypes())
@tf32_on_and_off(0.05)
def test_addbmm(self, device, dtype):
if self.device_type == 'cuda' and dtype is torch.bfloat16 and CUDA11OrLater and not SM53OrLater:
# cuBLAS does not guarantee BFloat16 support on SM < 53.
# So on PyTorch, we consider BFloat16 support on SM < 53 as
# undefined bahavior
return
num_batches = 2
M, N, O = 16, 17, 18
is_supported = True
if dtype == torch.bfloat16:
if self.device_type == 'cpu':
self.precision = 1 # 43 vs 43.75
else:
is_supported = TEST_WITH_ROCM or (CUDA11OrLater and SM53OrLater)
if not is_supported:
b1 = make_tensor((num_batches, M, N), device, dtype, low=-1, high=1)
b2 = make_tensor((num_batches, N, O), device, dtype, low=-1, high=1)
t = make_tensor((M, O), device, dtype, low=-1, high=1)
self.assertRaisesRegex(RuntimeError, "type|Type|not implemented|CUBLAS_STATUS_NOT_SUPPORTED",
lambda: torch.addbmm(t, b1, b2))
return
def invert_perm(p):
d = {x: i for i, x in enumerate(p)}
return (d[0], d[1], d[2])
def generate_tensor():
numpy_dtype = dtype if dtype != torch.bfloat16 else torch.float32
# transposed tensors
for perm1, perm2 in itertools.product(itertools.permutations((0, 1, 2)), repeat=2):
for perm3 in itertools.permutations((0, 1)):
b1 = make_tensor((num_batches, M, N), device, dtype, low=-1, high=1) * 0.1
b2 = make_tensor((num_batches, N, O), device, dtype, low=-1, high=1) * 0.1
b1 = b1.permute(perm1).contiguous().permute(invert_perm(perm1))
b2 = b2.permute(perm2).contiguous().permute(invert_perm(perm2))
ref = torch.from_numpy(
b1.to(numpy_dtype).cpu().numpy() @ b2.to(numpy_dtype).cpu().numpy()
).to(device=device, dtype=dtype).sum(0)
out_tensor = torch.zeros_like(ref).permute(perm3).contiguous().permute(perm3)
yield b1, b2, ref, out_tensor
# broadcasting tensors
for s1, s2, s3, s4, s5, s6 in itertools.product((True, False), repeat=6):
shape1 = (num_batches if s1 else 1, M if s2 else 1, N if s3 else 1)
shape2 = (num_batches if s4 else 1, N if s5 else 1, O if s6 else 1)
b1 = make_tensor(shape1, device, dtype, low=-1, high=1).expand(num_batches, M, N) * 0.1
b2 = make_tensor(shape2, device, dtype, low=-1, high=1).expand(num_batches, N, O) * 0.1
ref = torch.from_numpy(
b1.to(numpy_dtype).cpu().numpy() @ b2.to(numpy_dtype).cpu().numpy()
).to(device=device, dtype=dtype).sum(0)
out_tensor = torch.zeros_like(ref)
yield b1, b2, ref, out_tensor
# zero-sized tensors
for z1, z2, z3, z4 in itertools.product((True, False), repeat=4):
shape1 = (num_batches if z1 else 0, M if z2 else 0, N if z3 else 0)
shape2 = (num_batches if z1 else 0, N if z3 else 0, O if z4 else 0)
b1 = make_tensor(shape1, device, dtype, low=-1, high=1) * 0.1
b2 = make_tensor(shape2, device, dtype, low=-1, high=1) * 0.1
ref = torch.from_numpy(
b1.to(numpy_dtype).cpu().numpy() @ b2.to(numpy_dtype).cpu().numpy()
).to(device=device, dtype=dtype).sum(0)
out_tensor = torch.zeros_like(ref)
yield b1, b2, ref, out_tensor
for b1, b2, ref, out_tensor in generate_tensor():
self._test_addbmm_baddbmm("addbmm", b1, b2, ref, out_tensor)
@precisionOverride({torch.half: 0.1, torch.bfloat16: 0.5})
@onlyNativeDeviceTypes
@dtypes(*get_all_fp_dtypes(), *get_all_complex_dtypes())
@tf32_on_and_off(0.05)
def test_baddbmm(self, device, dtype):
if self.device_type == 'cuda' and dtype is torch.bfloat16 and CUDA11OrLater and not SM53OrLater:
# cuBLAS does not guarantee BFloat16 support on SM < 53.
# So on PyTorch, we consider BFloat16 support on SM < 53 as
# undefined bahavior
return
num_batches = 10
M, N, O = 12, 8, 50
is_supported = True
if dtype == torch.bfloat16 and self.device_type == 'cuda':
is_supported = TEST_WITH_ROCM or (CUDA11OrLater and SM53OrLater)
if not is_supported:
b1 = make_tensor((num_batches, M, N), device, dtype, low=-1, high=1)
b2 = make_tensor((num_batches, N, O), device, dtype, low=-1, high=1)
t = make_tensor((num_batches, M, O), device, dtype, low=-1, high=1)
self.assertRaisesRegex(RuntimeError, "type|Type|not implemented|CUBLAS_STATUS_NOT_SUPPORTED",
lambda: torch.baddbmm(t, b1, b2))
return
def invert_perm(p):
d = {x: i for i, x in enumerate(p)}
return (d[0], d[1], d[2])
def generate_tensor():
numpy_dtype = dtype if dtype != torch.bfloat16 else torch.float32
# transposed tensors
for perm1, perm2, perm3 in itertools.product(itertools.permutations((0, 1, 2)), repeat=3):
b1 = make_tensor((num_batches, M, N), device, dtype, low=-1, high=1)
b2 = make_tensor((num_batches, N, O), device, dtype, low=-1, high=1)
b1 = b1.permute(perm1).contiguous().permute(invert_perm(perm1))
b2 = b2.permute(perm2).contiguous().permute(invert_perm(perm2))
ref = torch.from_numpy(
b1.to(numpy_dtype).cpu().numpy() @ b2.to(numpy_dtype).cpu().numpy()).to(device=device, dtype=dtype)
out_tensor = torch.zeros_like(ref)
out_tensor = out_tensor.permute(perm3).contiguous().permute(invert_perm(perm3))
yield b1, b2, ref, out_tensor
# broadcasting tensors
for s1, s2, s3, s4, s5, s6 in itertools.product((True, False), repeat=6):
shape1 = (num_batches if s1 else 1, M if s2 else 1, N if s3 else 1)
shape2 = (num_batches if s4 else 1, N if s5 else 1, O if s6 else 1)
b1 = make_tensor(shape1, device, dtype, low=-1, high=1).expand(num_batches, M, N)
b2 = make_tensor(shape2, device, dtype, low=-1, high=1).expand(num_batches, N, O)
ref = torch.from_numpy(
b1.to(numpy_dtype).cpu().numpy() @ b2.to(numpy_dtype).cpu().numpy()).to(device=device, dtype=dtype)
out_tensor = torch.zeros_like(ref)
yield b1, b2, ref, out_tensor
# zero-sized tensors
for z1, z2, z3, z4 in itertools.product((True, False), repeat=4):
shape1 = (num_batches if z1 else 0, M if z2 else 0, N if z3 else 0)
shape2 = (num_batches if z1 else 0, N if z3 else 0, O if z4 else 0)
b1 = make_tensor(shape1, device, dtype, low=-2, high=2)
b2 = make_tensor(shape2, device, dtype, low=-2, high=2)
ref = torch.from_numpy(
b1.to(numpy_dtype).cpu().numpy() @ b2.to(numpy_dtype).cpu().numpy()).to(device=device, dtype=dtype)
out_tensor = torch.zeros_like(ref)
yield b1, b2, ref, out_tensor
for b1, b2, ref, out_tensor in generate_tensor():
self._test_addbmm_baddbmm("baddbmm", b1, b2, ref, out_tensor)
# TODO: update to compare against NumPy
@onlyCUDA
def test_solve_methods_arg_device(self, device):
for b_device, A_device in itertools.product(['cpu', device], repeat=2):
if b_device == A_device:
continue
b = torch.randn(3, 1, device=b_device)
A = torch.randn(3, 3, device=A_device)
# solve and cholesky_solve goes through generic backend dispatch and hit kernel specific device check first
# triangular_solve goes through specific backend dispatch (CPU/CUDA) and hit auto-generated device check first
generic_backend_dispatch_err_str = "Expected b and A to be on the same device"
specific_backend_dispatch_err_str = "Expected all tensors to be on the same device"
with self.assertRaisesRegex(RuntimeError, generic_backend_dispatch_err_str):
torch.solve(b, A)
with self.assertRaisesRegex(RuntimeError, generic_backend_dispatch_err_str):
torch.cholesky_solve(b, A)
with self.assertRaisesRegex(RuntimeError, specific_backend_dispatch_err_str):
torch.triangular_solve(b, A)
# b and A have to be modified to match accepted inputs sizes for lu_solve
b = b.unsqueeze(0)
A = A.unsqueeze(0)
with self.assertRaisesRegex(RuntimeError, specific_backend_dispatch_err_str):
torch.lu_solve(b, A, torch.rand(A.shape[:-1], device=A_device).int())
# This checks if a suitable error message is thrown
# when LU output and pivots are not on the same device
with self.assertRaisesRegex(RuntimeError, specific_backend_dispatch_err_str):
torch.lu_solve(b, A, torch.rand(A.shape[:-1], device=b_device).int())
@precisionOverride({torch.float32: 5e-3, torch.complex64: 1e-3})
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_pinverse(self, device, dtype):
from torch.testing._internal.common_utils import random_fullrank_matrix_distinct_singular_value as fullrank
def run_test(M):
# Testing against definition for pseudo-inverses
MPI = torch.pinverse(M)
MPI_ = MPI.cpu().numpy()
M_ = M.cpu().numpy()
if M.numel() > 0:
self.assertEqual(M_, np.matmul(np.matmul(M_, MPI_), M_))
self.assertEqual(MPI_, np.matmul(np.matmul(MPI_, M_), MPI_))
self.assertEqual(np.matmul(M_, MPI_), np.matmul(M_, MPI_).swapaxes(-2, -1).conj())
self.assertEqual(np.matmul(MPI_, M_), np.matmul(MPI_, M_).swapaxes(-2, -1).conj())
else:
self.assertEqual(M.shape, MPI.shape[:-2] + (MPI.shape[-1], MPI.shape[-2]))
for sizes in [(5, 5), (3, 5, 5), (3, 7, 5, 5), # square matrices
(3, 2), (5, 3, 2), (7, 5, 3, 2), # fat matrices
(2, 3), (5, 2, 3), (7, 5, 2, 3), # thin matrices
(0, 0), (0, 2), (2, 0), (3, 0, 0), (0, 3, 0), (0, 0, 3)]: # zero numel matrices
M = torch.randn(*sizes, dtype=dtype, device=device)
run_test(M)
# Test inverse and pseudo-inverse for invertible matrix
for sizes in [(5, 5), (3, 5, 5), (3, 7, 5, 5)]:
matsize = sizes[-1]
batchdims = sizes[:-2]
M = fullrank(matsize, *batchdims, dtype=dtype, device=device)
self.assertEqual(torch.eye(matsize, dtype=dtype, device=device).expand(sizes), M.pinverse().matmul(M),
atol=1e-7, rtol=0, msg='pseudo-inverse for invertible matrix')
@skipCPUIfNoLapack
@skipCUDAIfNoMagmaAndNoCusolver
@dtypes(torch.double, torch.cdouble)
def test_matrix_power_non_negative(self, device, dtype):
def check(*size, noncontiguous=False):
t = make_tensor(size, device, dtype, noncontiguous=noncontiguous)
for n in range(8):
res = torch.linalg.matrix_power(t, n)
ref = np.linalg.matrix_power(t.cpu().numpy(), n)
self.assertEqual(res.cpu(), torch.from_numpy(ref))
check(0, 0)
check(1, 1)
check(5, 5)
check(5, 5, noncontiguous=True)
check(0, 3, 3)
check(2, 3, 3)
check(2, 3, 4, 4, noncontiguous=True)
@skipCPUIfNoLapack
@skipCUDAIfNoMagmaAndNoCusolver
@dtypes(torch.double, torch.cdouble)
def test_matrix_power_negative(self, device, dtype):
from torch.testing._internal.common_utils import random_fullrank_matrix_distinct_singular_value
def check(*size):
t = random_fullrank_matrix_distinct_singular_value(*size, dtype=dtype, device=device)
for n in range(-7, 0):
res = torch.linalg.matrix_power(t, n)
ref = np.linalg.matrix_power(t.cpu().numpy(), n)
self.assertEqual(res.cpu(), torch.from_numpy(ref))
check(0)
check(5)
check(0, 2)
check(3, 0)
check(3, 2)
check(5, 2, 3)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float, torch.complex64)
def test_linalg_matrix_exp_utils(self, device, dtype):
# test linear combination
def run_test(coeff_shape, data_shape):
coeffs = torch.rand(*coeff_shape, device=device, dtype=torch.float)
x = torch.rand(coeff_shape[1], *data_shape, device=device, dtype=dtype)
res1 = torch._compute_linear_combination(x, coeffs)
res2 = (x.unsqueeze(0) * coeffs.view(*coeff_shape, *([1] * len(data_shape)))).sum(1)
self.assertEqual(res1, res2, atol=1e-5, rtol=0.0)
# check `out=` version
res3 = torch.zeros(coeff_shape[0], *data_shape, device=device, dtype=dtype)
torch._compute_linear_combination(x, coeffs, out=res3)
self.assertEqual(res1, res3, atol=1e-5, rtol=0.0)
res4 = torch.ones(coeff_shape[0], *data_shape, device=device, dtype=dtype)
torch._compute_linear_combination(x, coeffs, out=res4)
self.assertEqual(res1, res4 - 1.0, atol=1e-5, rtol=0.0)
res5 = torch.ones(coeff_shape[0], *data_shape, device=device, dtype=dtype)
res5_clone = res5.clone()
torch._compute_linear_combination(x, coeffs, out=res5)
self.assertEqual(res1, res5 - res5_clone, atol=1e-5, rtol=0.0)
run_test([1, 3], [2, 2])
run_test([3, 1], [2, 2])
run_test([1, 10], [10, 10])
run_test([10, 1], [10, 10])
run_test([5, 3], [2, 2])
run_test([5, 3], [100, 100])
run_test([3, 4], [3, 3, 3])
run_test([3, 4], [3, 3, 3, 3])
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float, torch.double, torch.complex64, torch.complex128)
def test_linalg_matrix_exp_boundary_cases(self, device, dtype):
expm = torch.linalg.matrix_exp
with self.assertRaisesRegex(RuntimeError, "Expected a floating point or complex tensor"):
expm(torch.randn(3, 3).type(torch.int))
with self.assertRaisesRegex(RuntimeError, "must have at least 2 dimensions"):
expm(torch.randn(3))
with self.assertRaisesRegex(RuntimeError, "must be batches of square matrices"):
expm(torch.randn(3, 2, 1))
# check 1x1 matrices
x = torch.randn(3, 3, 1, 1)
self.assertEqual(expm(x), x.exp())
@slowTest
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float, torch.double, torch.cfloat, torch.cdouble)
def test_linalg_matrix_exp_analytic(self, device, dtype):
expm = torch.linalg.matrix_exp
# check zero matrix
x = torch.zeros(20, 20, dtype=dtype, device=device)
self.assertTrue((expm(x) == torch.eye(20, 20, dtype=dtype, device=device)).all().item())
def normalize_to_1_operator_norm(sample, desired_norm):
sample_norm, _ = sample.abs().sum(-2).max(-1)
sample_to_1_norm = sample / sample_norm.unsqueeze(-1).unsqueeze(-1)
return sample_to_1_norm * desired_norm
def gen_good_cond_number_matrices(*n):
"""
Generates a diagonally-domimant matrix
with the eigenvalues centered at 1
and the radii at most (n[-1] - 1) / (n[-2] ** 2)
"""
identity = torch.eye(n[-2], n[-1], dtype=dtype, device=device).expand(*n)
x = torch.rand(*n, dtype=dtype, device=device) / (n[-1] ** 2)
x = (x - x * identity) + identity
return x
def run_test(*n):
if dtype == torch.float:
thetas = [
1.192092800768788e-07, # deg 1
5.978858893805233e-04, # deg 2
5.116619363445086e-02, # deg 4
5.800524627688768e-01, # deg 8
1.461661507209034e+00, # deg 12
3.010066362817634e+00 # deg 18
]
else: # if torch.double
thetas = [
2.220446049250313e-16, # deg 1
2.580956802971767e-08, # deg 2
3.397168839976962e-04, # deg 4
4.991228871115323e-02, # deg 8
2.996158913811580e-01, # deg 12
1.090863719290036e+00 # deg 18
]
# generate input
q = gen_good_cond_number_matrices(*n)
q_ = q.cpu().numpy()
qinv = torch.inverse(q)
qinv_ = qinv.cpu().numpy()
d = torch.randn(n[:-1], dtype=dtype, device=device)
x = torch.from_numpy(
np.matmul(q_, np.matmul(torch.diag_embed(d).cpu().numpy(), qinv_))).to(device)
x_norm, _ = x.abs().sum(-2).max(-1)
# test simple analytic whatever norm generated
mexp = expm(x)
mexp_analytic = np.matmul(
q_,
np.matmul(
torch.diag_embed(d.exp()).cpu().numpy(),
qinv_
)
)
self.assertEqual(mexp, mexp_analytic, atol=1e-3, rtol=0.0)
# generate norms to test different degree expansions
sample_norms = []
for i in range(len(thetas) - 1):
sample_norms.append(0.5 * (thetas[i] + thetas[i + 1]))
sample_norms = [thetas[0] / 2] + sample_norms + [thetas[-1] * 2]
# matrices to equal norm
for sample_norm in sample_norms:
x_normalized = normalize_to_1_operator_norm(x, sample_norm)
mexp = expm(x_normalized)
mexp_analytic = np.matmul(
q_,
np.matmul(
torch.diag_embed((d / x_norm.unsqueeze(-1) * sample_norm).exp()).cpu().numpy(),
qinv_
)
)
self.assertEqual(mexp, mexp_analytic, atol=1e-3, rtol=0.0)
# single matrix
run_test(2, 2)
run_test(3, 3)
run_test(4, 4)
run_test(5, 5)
run_test(100, 100)
run_test(200, 200)
# small batch of matrices
run_test(3, 2, 2)
run_test(3, 3, 3)
run_test(3, 4, 4)
run_test(3, 5, 5)
run_test(3, 100, 100)
run_test(3, 200, 200)
# large batch of matrices
run_test(3, 3, 2, 2)
run_test(3, 3, 3, 3)
run_test(3, 3, 4, 4)
run_test(3, 3, 5, 5)
run_test(3, 3, 100, 100)
run_test(3, 3, 200, 200)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float, torch.double)
def test_linalg_matrix_exp_batch(self, device, dtype):
def run_test(*n):
tensors_batch = torch.zeros(n, dtype=dtype, device=device)
tensors_batch = tensors_batch.view(-1, n[-2], n[-1])
num_matrices = tensors_batch.size(0)
tensors_list = []
for i in range(num_matrices):
tensors_list.append(torch.randn(n[-2], n[-1], dtype=dtype, device=device))
for i in range(num_matrices):
tensors_batch[i, ...] = tensors_list[i]
tensors_exp_map = (torch.linalg.matrix_exp(x) for x in tensors_list)
tensors_exp_batch = torch.linalg.matrix_exp(tensors_batch)
for i, tensor_exp in enumerate(tensors_exp_map):
self.assertEqual(tensors_exp_batch[i, ...], tensor_exp)
# small batch of matrices
run_test(3, 2, 2)
run_test(3, 3, 3)
run_test(3, 4, 4)
run_test(3, 5, 5)
# large batch of matrices
run_test(3, 3, 2, 2)
run_test(3, 3, 3, 3)
run_test(3, 3, 4, 4)
run_test(3, 3, 5, 5)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float, torch.double, torch.cfloat, torch.cdouble)
def test_linalg_matrix_exp_compare_with_taylor(self, device, dtype):
def normalize_to_1_operator_norm(sample, desired_norm):
sample_norm, _ = sample.abs().sum(-2).max(-1)
sample_to_1_norm = sample / sample_norm.unsqueeze(-1).unsqueeze(-1)
return sample_to_1_norm * desired_norm
def gen_good_cond_number_matrices(*n):
"""
Generates a diagonally-domimant matrix
with the eigenvalues centered at 1
and the radii at most (n[-1] - 1) / (n[-2] ** 2)
"""
identity = torch.eye(n[-2], n[-1], dtype=dtype, device=device).expand(*n)
x = torch.rand(*n, dtype=dtype, device=device) / (n[-1] ** 2)
x = (x - x * identity) + identity
return x
def get_taylor_approximation(a, deg):
a_ = a.cpu().numpy()
identity = torch.eye(a.size(-2), a.size(-1), dtype=dtype, device=device).expand_as(a)
res = identity.cpu().numpy()
taylor_term = identity.cpu().numpy()
for i in range(1, deg + 1):
taylor_term = np.matmul(a_, taylor_term) / i
res = res + taylor_term
return res
def scale_square(a, deg):
if a.abs().pow(2).sum().sqrt() < 1.0:
return get_taylor_approximation(a, 12)
else:
s = int(torch.log2(a.abs().pow(2).sum().sqrt()).ceil().item())
b = a / (2 ** s)
b = get_taylor_approximation(b, 18)
for _ in range(s):
b = np.matmul(b, b)
return torch.from_numpy(b).to(a.device)
def run_test(*n):
degs = [1, 2, 4, 8, 12, 18]
if dtype == torch.float:
thetas = [
1.192092800768788e-07, # deg 1
5.978858893805233e-04, # deg 2
5.116619363445086e-02, # deg 4
5.800524627688768e-01, # deg 8
1.461661507209034e+00, # deg 12
3.010066362817634e+00 # deg 18
]
else: # if torch.double
thetas = [
2.220446049250313e-16, # deg 1
2.580956802971767e-08, # deg 2
3.397168839976962e-04, # deg 4
4.991228871115323e-02, # deg 8
2.996158913811580e-01, # deg 12
1.090863719290036e+00 # deg 18
]
# generate norms to test different degree expansions
sample_norms = []
for i in range(len(thetas) - 1):
sample_norms.append(0.5 * (thetas[i] + thetas[i + 1]))
sample_norms = [thetas[0] / 2] + sample_norms + [thetas[-1] * 2]
degs = [degs[0]] + degs
for sample_norm, deg in zip(sample_norms, degs):
x = gen_good_cond_number_matrices(*n)
x = normalize_to_1_operator_norm(x, sample_norm)
mexp = torch.linalg.matrix_exp(x)
mexp_taylor = scale_square(x, deg)
self.assertEqual(mexp, mexp_taylor, atol=1e-2, rtol=0.0)
# single matrix
run_test(2, 2)
run_test(3, 3)
run_test(4, 4)
run_test(5, 5)
# small batch of matrices
run_test(3, 2, 2)
run_test(3, 3, 3)
run_test(3, 4, 4)
run_test(3, 5, 5)
# large batch of matrices
run_test(3, 3, 2, 2)
run_test(3, 3, 3, 3)
run_test(3, 3, 4, 4)
run_test(3, 3, 5, 5)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
@precisionOverride({torch.float32: 1e-3, torch.complex64: 1e-3,
torch.float64: 1e-8, torch.complex128: 1e-8})
def test_slogdet(self, device, dtype):
from torch.testing._internal.common_utils import (random_hermitian_matrix, random_hermitian_psd_matrix,
random_hermitian_pd_matrix, random_square_matrix_of_rank)
# mat_chars denotes matrix characteristics
# possible values are: hermitian, hermitian_psd, hermitian_pd, singular, non_singular
def run_test(matsize, batchdims, mat_chars):
num_matrices = np.prod(batchdims)
list_of_matrices = []
if num_matrices != 0:
for idx in range(num_matrices):
mat_type = idx % len(mat_chars)
if mat_chars[mat_type] == 'hermitian':
list_of_matrices.append(random_hermitian_matrix(matsize, dtype=dtype, device=device))
elif mat_chars[mat_type] == 'hermitian_psd':
list_of_matrices.append(random_hermitian_psd_matrix(matsize, dtype=dtype, device=device))
elif mat_chars[mat_type] == 'hermitian_pd':
list_of_matrices.append(random_hermitian_pd_matrix(matsize, dtype=dtype, device=device))
elif mat_chars[mat_type] == 'singular':
list_of_matrices.append(torch.ones(matsize, matsize, dtype=dtype, device=device))
elif mat_chars[mat_type] == 'non_singular':
list_of_matrices.append(random_square_matrix_of_rank(matsize, matsize, dtype=dtype, device=device))
full_tensor = torch.stack(list_of_matrices, dim=0).reshape(batchdims + (matsize, matsize))
else:
full_tensor = torch.randn(*batchdims, matsize, matsize, dtype=dtype, device=device)
actual_value = torch.linalg.slogdet(full_tensor)
expected_value = np.linalg.slogdet(full_tensor.cpu().numpy())
self.assertEqual(expected_value[0], actual_value[0], atol=self.precision, rtol=self.precision)
self.assertEqual(expected_value[1], actual_value[1], atol=self.precision, rtol=self.precision)
# test out=variant
sign_out = torch.empty_like(actual_value[0])
logabsdet_out = torch.empty_like(actual_value[1])
ans = torch.linalg.slogdet(full_tensor, out=(sign_out, logabsdet_out))
self.assertEqual(ans[0], sign_out)
self.assertEqual(ans[1], logabsdet_out)
self.assertEqual(sign_out, actual_value[0])
self.assertEqual(logabsdet_out, actual_value[1])
for matsize, batchdims in itertools.product([0, 3, 5], [(0,), (3,), (5, 3)]):
run_test(matsize, batchdims, mat_chars=['hermitian_pd'])
run_test(matsize, batchdims, mat_chars=['singular'])
run_test(matsize, batchdims, mat_chars=['non_singular'])
run_test(matsize, batchdims, mat_chars=['hermitian', 'hermitian_pd', 'hermitian_psd'])
run_test(matsize, batchdims, mat_chars=['singular', 'non_singular'])
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_slogdet_errors_and_warnings(self, device, dtype):
# slogdet requires the input to be a square matrix or batch of square matrices
a = torch.randn(2, 3, device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, r'must be batches of square matrices'):
torch.linalg.slogdet(a)
# slogdet requires the input to be at least 2 dimensional tensor
a = torch.randn(2, device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, r'must have at least 2 dimensions'):
torch.linalg.slogdet(a)
# slogdet requires the input to be of float, double, cfloat or cdouble types
a = torch.randn(2, 2, device=device, dtype=torch.bfloat16)
with self.assertRaisesRegex(RuntimeError, r'of float, double, cfloat or cdouble types'):
torch.linalg.slogdet(a)
# if non-empty out tensor with wrong shape is passed a warning is given
a = torch.randn(2, 3, 3, device=device, dtype=dtype)
sign_out = torch.empty(1, device=device, dtype=dtype)
real_dtype = a.real.dtype if dtype.is_complex else dtype
logabsdet_out = torch.empty(1, device=device, dtype=real_dtype)
with warnings.catch_warnings(record=True) as w:
# Trigger warning
torch.linalg.slogdet(a, out=(sign_out, logabsdet_out))
# Check warning occurs
self.assertEqual(len(w), 1)
self.assertTrue("An output with one or more elements was resized" in str(w[-1].message))
# dtypes should be safely castable
sign_out = torch.empty_like(a).to(torch.int)
logabsdet_out = torch.empty_like(a).to(torch.int)
with self.assertRaisesRegex(RuntimeError, "but got sign with dtype Int"):
torch.linalg.slogdet(a, out=(sign_out, logabsdet_out))
sign_out = torch.empty(0, device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, "but got logabsdet with dtype Int"):
torch.linalg.slogdet(a, out=(sign_out, logabsdet_out))
# device should match
if torch.cuda.is_available():
wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'
sign_out = torch.empty(0, device=wrong_device, dtype=dtype)
logabsdet_out = torch.empty(0, device=wrong_device, dtype=real_dtype)
with self.assertRaisesRegex(RuntimeError, "tensors to be on the same device"):
torch.linalg.slogdet(a, out=(sign_out, logabsdet_out))
@slowTest
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.double)
def test_det_logdet_slogdet(self, device, dtype):
def reference_slogdet(M):
sdet, logabsdet = np.linalg.slogdet(M.detach().cpu().numpy())
return M.new_tensor(sdet), M.new_tensor(logabsdet)
def test_single_det(M, target, desc):
target_sdet, target_logabsdet = target
det = M.det()
logdet = M.logdet()
sdet, logabsdet = M.slogdet()
linalg_sdet, linalg_logabsdet = torch.linalg.slogdet(M)
# Test det
self.assertEqual(det, target_sdet * target_logabsdet.exp(),
atol=1e-7, rtol=0, msg='{} (det)'.format(desc))
# Test slogdet
# Compare the overall value rather than individual parts because of
# precision issues when det is near zero.
self.assertEqual(sdet * logabsdet.exp(), target_sdet * target_logabsdet.exp(),
atol=1e-7, rtol=0, msg='{} (slogdet)'.format(desc))
self.assertEqual(linalg_sdet * linalg_logabsdet.exp(), target_sdet * target_logabsdet.exp(),
atol=1e-7, rtol=0, msg='{} (linalg_slogdet)'.format(desc))
# Test logdet
# Compare logdet against our own pytorch slogdet because they should
# be consistent, while it may behave slightly differently with other
# slogdet implementations when det is near zero due to precision
# issues.
if sdet.item() < 0:
self.assertTrue(logdet.item() != logdet.item(), '{} (logdet negative case)'.format(desc))
else:
self.assertEqual(logdet.exp(), target_logabsdet.exp(),
atol=1e-7, rtol=0, msg='{} (logdet non-negative case)'.format(desc))
eye = torch.eye(5, dtype=dtype, device=device)
test_single_det(eye, (torch.ones((), dtype=dtype, device=device), torch.zeros((), dtype=dtype, device=device)), 'identity')
# Testing bug in #34061 (https://github.com/pytorch/pytorch/issues/34061)
for n in range(250, 551, 100):
mat = torch.randn(n, n, dtype=dtype, device=device)
q, _ = torch.qr(mat)
ref_det, ref_logabsdet = reference_slogdet(q)
test_single_det(q, (ref_det, ref_logabsdet), 'orthogonal')
def test(M):
assert M.size(0) >= 5, 'this helper fn assumes M to be at least 5x5'
M = M.to(device)
ref_M_sdet, ref_M_logabsdet = reference_slogdet(M)
test_single_det(M, (ref_M_sdet, ref_M_logabsdet), 'basic')
if ref_M_logabsdet.exp().item() >= 1e-6: # skip singular
M_inv = M.inverse()
test_single_det(M_inv, reference_slogdet(M_inv), 'inverse')
test_single_det(M, (ref_M_sdet, ref_M_logabsdet), 'transpose')
for x in [0, 2, 4]:
for scale in [-2, -0.1, 0, 10]:
if scale > 0:
target = ref_M_sdet, ref_M_logabsdet + math.log(scale)
elif scale == 0:
target = torch.zeros_like(ref_M_sdet), torch.full_like(ref_M_logabsdet, -inf)
else:
target = ref_M_sdet.neg(), ref_M_logabsdet + math.log(-scale)
# dim 0
M_clone = M.clone()
M_clone[:, x] *= scale
test_single_det(M_clone, target, 'scale a row')
# dim 1
M_clone = M.clone()
M_clone[x, :] *= scale
test_single_det(M_clone, target, 'scale a column')
for x1, x2 in [(0, 3), (4, 1), (3, 2)]:
assert x1 != x2, 'x1 and x2 needs to be different for this test'
target = torch.zeros_like(ref_M_sdet), torch.full_like(ref_M_logabsdet, -inf)
# dim 0
M_clone = M.clone()
M_clone[:, x2] = M_clone[:, x1]
test_single_det(M_clone, target, 'two rows are same')
# dim 1
M_clone = M.clone()
M_clone[x2, :] = M_clone[x1, :]
test_single_det(M_clone, target, 'two columns are same')
for scale1, scale2 in [(0.3, -1), (0, 2), (10, 0.1)]:
det_scale = scale1 * scale2 * -1
if det_scale > 0:
target = ref_M_sdet, ref_M_logabsdet + math.log(det_scale)
elif det_scale == 0:
target = torch.zeros_like(ref_M_sdet), torch.full_like(ref_M_logabsdet, -inf)
else:
target = ref_M_sdet.neg(), ref_M_logabsdet + math.log(-det_scale)
# dim 0
M_clone = M.clone()
t = M_clone[:, x1] * scale1
M_clone[:, x1] += M_clone[:, x2] * scale2
M_clone[:, x2] = t
test_single_det(M_clone, target, 'exchanging rows')
# dim 1
M_clone = M.clone()
t = M_clone[x1, :] * scale1
M_clone[x1, :] += M_clone[x2, :] * scale2
M_clone[x2, :] = t
test_single_det(M_clone, target, 'exchanging columns')
def get_random_mat_scale(n):
# For matrices with values i.i.d. with 0 mean, unit variance, and
# subexponential tail, we have:
# E[log det(A^2)] \approx log((n-1)!)
#
# Notice:
# log Var[det(A)] = log E[det(A^2)] >= E[log det(A^2)]
#
# So:
# stddev[det(A)] >= sqrt( (n-1)! )
#
# We use this as an intuitive guideline to scale random generated
# matrices so our closeness tests can work more robustly:
# scale by sqrt( (n-1)! )^(-1/n) = ( (n-1)! )^(-1/(2n))
#
# source: https://arxiv.org/pdf/1112.0752.pdf
# TODO: technically we need subexponential distn for this to hold,
# but we mostly use gaussian entries below. Consider switching
# to Chi-sq if this turns out not stable enough, since Chi-sq
# is easy enough to sample from.
return math.factorial(n - 1) ** (-1.0 / (2 * n))
for n in [5, 10, 25]:
scale = get_random_mat_scale(n)
test(torch.randn(n, n, dtype=dtype, device=device) * scale)
r = torch.randn(n, n, dtype=dtype, device=device) * scale
# symmetric psd
test(r.mm(r.t()))
# symmetric pd
r = torch.randn(n, n, dtype=dtype, device=device) * scale
test(r.mm(r.t()) + torch.eye(n, dtype=dtype, device=device) * 1e-6)
# symmetric
r = torch.randn(n, n, dtype=dtype, device=device) * scale
for i in range(n):
for j in range(i):
r[i, j] = r[j, i]
test(r)
# non-contiguous
test((torch.randn(n, n, n + 1, dtype=dtype, device=device) * scale)[:, 2, 1:])
# det = 0
r = torch.randn(n, n, dtype=dtype, device=device) * scale
u, s, v = r.svd()
if reference_slogdet(u)[0] < 0:
u = -u
if reference_slogdet(v)[0] < 0:
v = -v
s[0] *= -1
s[-1] = 0
test(u.mm(s.diag()).mm(v))
# Small values to test numerical stability. Note that we don't scale
# this matrix.
r = torch.randn(512, 512, dtype=dtype, device=device)
u, s, v = r.svd()
s.fill_(1. / (100 * s.numel()))
test(u.mm(s.diag()).mm(v))
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.double)
def test_det_logdet_slogdet_batched(self, device, dtype):
from torch.testing._internal.common_utils import (random_symmetric_matrix, random_symmetric_psd_matrix,
random_symmetric_pd_matrix, random_square_matrix_of_rank)
# mat_chars denotes matrix characteristics
# possible values are: sym, sym_psd, sym_pd, sing, non_sym
def run_test(matsize, batchdims, mat_chars):
num_matrices = reduce(lambda x, y: x * y, batchdims, 1)
list_of_matrices = []
for idx in range(num_matrices):
mat_type = idx % len(mat_chars)
if mat_chars[mat_type] == 'sym':
list_of_matrices.append(random_symmetric_matrix(matsize, dtype=dtype, device=device))
elif mat_chars[mat_type] == 'sym_psd':
list_of_matrices.append(random_symmetric_psd_matrix(matsize, dtype=dtype, device=device))
elif mat_chars[mat_type] == 'sym_pd':
list_of_matrices.append(random_symmetric_pd_matrix(matsize, dtype=dtype, device=device))
elif mat_chars[mat_type] == 'sing':
list_of_matrices.append(torch.ones(matsize, matsize, dtype=dtype, device=device))
elif mat_chars[mat_type] == 'non_sing':
list_of_matrices.append(random_square_matrix_of_rank(matsize, matsize, dtype=dtype, device=device))
full_tensor = torch.stack(list_of_matrices, dim=0).reshape(batchdims + (matsize, matsize))
# Scaling adapted from `get_random_mat_scale` in _test_det_logdet_slogdet
full_tensor *= (math.factorial(matsize - 1) ** (-1.0 / (2 * matsize)))
for fn in [torch.det, torch.logdet, torch.slogdet, torch.linalg.slogdet]:
expected_value = []
actual_value = fn(full_tensor)
for full_idx in itertools.product(*map(lambda x: list(range(x)), batchdims)):
expected_value.append(fn(full_tensor[full_idx]))
if fn == torch.slogdet or fn == torch.linalg.slogdet:
sign_value = torch.stack([tup[0] for tup in expected_value], dim=0).reshape(batchdims)
expected_value = torch.stack([tup[1] for tup in expected_value], dim=0).reshape(batchdims)
self.assertEqual(sign_value, actual_value[0])
self.assertEqual(expected_value, actual_value[1])
else:
expected_value = torch.stack(expected_value, dim=0).reshape(batchdims)
self.assertEqual(actual_value, expected_value)
for matsize, batchdims in itertools.product([3, 5], [(3,), (5, 3)]):
run_test(matsize, batchdims, mat_chars=['sym_pd'])
run_test(matsize, batchdims, mat_chars=['sing'])
run_test(matsize, batchdims, mat_chars=['non_sing'])
run_test(matsize, batchdims, mat_chars=['sym', 'sym_pd', 'sym_psd'])
run_test(matsize, batchdims, mat_chars=['sing', 'non_sing'])
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_cholesky_inverse(self, device, dtype):
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
def run_test(shape, batch, upper, contiguous):
A = random_hermitian_pd_matrix(shape, *batch, dtype=dtype, device=device)
if A.numel() > 0 and not contiguous:
A = A.mT
self.assertFalse(A.is_contiguous())
L = torch.linalg.cholesky(A)
expected_inverse = torch.inverse(A)
L = L.mH if upper else L
actual_inverse = torch.cholesky_inverse(L, upper)
self.assertEqual(actual_inverse, expected_inverse)
shapes = (0, 3, 5)
batches = ((), (0,), (3, ), (2, 2))
for shape, batch, upper, contiguous in list(itertools.product(shapes, batches, (True, False), (True, False))):
run_test(shape, batch, upper, contiguous)
# check the out= variant
A = random_hermitian_pd_matrix(3, 2, dtype=dtype, device=device)
L = torch.linalg.cholesky(A)
# There are two code paths currently for the out= variant
# 1. When 'out' tensor is in Fortran (column-major) memory format
# then the fast route is taken and the storage is reused directly in the computations
# 2. When 'out' tensor is not in Fortran format then a temporary tensor is allocated internally
# and the result is copied from the temporary tensor to 'out' tensor
# This test checks the first code path
out = torch.empty_like(A)
out_t = out.mT.clone(memory_format=torch.contiguous_format)
out = out_t.mT
ans = torch.cholesky_inverse(L, out=out)
self.assertEqual(ans, out)
expected = torch.inverse(A)
self.assertEqual(expected, out)
# This test checks the second code path
out = torch.empty_like(A)
ans = torch.cholesky_inverse(L, out=out)
self.assertEqual(ans, out)
expected = torch.inverse(A)
self.assertEqual(expected, out)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_cholesky_inverse_errors_and_warnings(self, device, dtype):
# cholesky_inverse requires the input to be at least 2 dimensional tensor
a = torch.randn(2, device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, "must have at least 2 dimensions"):
torch.cholesky_inverse(a)
# cholesky_inverse requires a square matrix
a = torch.randn(2, 3, device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, "must be batches of square matrices"):
torch.cholesky_inverse(a)
# if non-empty out tensor with wrong shape is passed a warning is given
a = torch.randn(3, 3, device=device, dtype=dtype)
out = torch.empty(2, 3, device=device, dtype=dtype)
with warnings.catch_warnings(record=True) as w:
# Trigger warning
torch.cholesky_inverse(a, out=out)
# Check warning occurs
self.assertEqual(len(w), 1)
self.assertTrue("An output with one or more elements was resized" in str(w[-1].message))
# dtypes should be safely castable
out = torch.empty(*a.shape, dtype=torch.int, device=device)
with self.assertRaisesRegex(RuntimeError, "but got result with dtype Int"):
torch.cholesky_inverse(a, out=out)
# device should match
if torch.cuda.is_available():
wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'
out = torch.empty(0, device=wrong_device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, "Expected all tensors to be on the same device"):
torch.cholesky_inverse(a, out=out)
# cholesky_inverse raises an error for invalid inputs on CPU
# for example if at least one diagonal element is zero
a = torch.randn(3, 3, device=device, dtype=dtype)
a[1, 1] = 0
if self.device_type == 'cpu':
with self.assertRaisesRegex(RuntimeError, r"cholesky_inverse: The diagonal element 2 is zero"):
torch.cholesky_inverse(a)
# cholesky_inverse on GPU does not raise an error for this case
elif self.device_type == 'cuda':
out = torch.cholesky_inverse(a)
self.assertTrue(out.isinf().any() or out.isnan().any())
def _select_broadcastable_dims(self, dims_full=None):
# select full dimensionality
if dims_full is None:
dims_full = []
ndims = random.randint(1, 4)
dims_full = [random.randint(1, 8) for _ in range(ndims)]
else:
ndims = len(dims_full)
# select actual dimensions for ops:
# larger: full ndims, individual sizes may be reduced
# smaller: possibly reduced ndims, sizes may be reduced
smaller_ndims = random.randint(1, ndims)
dims_small = []
dims_large = []
for i in range(ndims - 1, -1, -1):
j = random.randint(1, 3)
if j == 1: # no reduced singleton dimension
ds = dims_full[i]
dl = dims_full[i]
elif j == 2: # larger may have reduced singleton dimension
ds = dims_full[i]
dl = 1 if len(dims_small) < smaller_ndims else dims_full[i]
elif j == 3: # smaller may have reduced singleton dimension
ds = 1
dl = dims_full[i]
dims_large = [dl] + dims_large
if len(dims_small) < smaller_ndims:
dims_small = [ds] + dims_small
return (dims_small, dims_large, dims_full)
def test_broadcast_fused_matmul(self, device):
fns = ["baddbmm", "addbmm", "addmm", "addmv", "addr"]
for fn in fns:
batch_dim = random.randint(1, 8)
n_dim = random.randint(1, 8)
m_dim = random.randint(1, 8)
p_dim = random.randint(1, 8)
def dims_full_for_fn():
if fn == "baddbmm":
return ([batch_dim, n_dim, p_dim], [batch_dim, n_dim, m_dim], [batch_dim, m_dim, p_dim])
elif fn == "addbmm":
return ([n_dim, p_dim], [batch_dim, n_dim, m_dim], [batch_dim, m_dim, p_dim])
elif fn == "addmm":
return ([n_dim, p_dim], [n_dim, m_dim], [m_dim, p_dim])
elif fn == "addmv":
return ([n_dim], [n_dim, m_dim], [m_dim])
elif fn == "addr":
return ([n_dim, m_dim], [n_dim], [m_dim])
else:
raise AssertionError("unknown function")
(t0_dims_full, t1_dims, t2_dims) = dims_full_for_fn()
(t0_dims_small, _, _) = self._select_broadcastable_dims(t0_dims_full)
t0_small = torch.randn(*t0_dims_small, device=device).float()
t1 = torch.randn(*t1_dims, device=device).float()
t2 = torch.randn(*t2_dims, device=device).float()
t0_full = t0_small.expand(*t0_dims_full).to(device)
fntorch = getattr(torch, fn)
r0 = fntorch(t0_small, t1, t2)
r1 = fntorch(t0_full, t1, t2)
self.assertEqual(r0, r1)
@tf32_on_and_off(0.001)
def test_broadcast_batched_matmul(self, device):
n_dim = random.randint(1, 8)
m_dim = random.randint(1, 8)
p_dim = random.randint(1, 8)
full_batch_dims = [random.randint(1, 3) for i in range(random.randint(1, 3))]
(batch_dims_small, _, _) = self._select_broadcastable_dims(full_batch_dims)
def verify_batched_matmul(full_lhs, one_dimensional):
if not one_dimensional:
lhs_dims = [n_dim, m_dim]
rhs_dims = [m_dim, p_dim]
result_dims = [n_dim, p_dim]
else:
lhs_dims = [n_dim, m_dim] if full_lhs else [m_dim]
rhs_dims = [m_dim, p_dim] if not full_lhs else [m_dim]
result_dims = [n_dim] if full_lhs else [p_dim]
lhs_mat_dims = lhs_dims if len(lhs_dims) != 1 else [1, m_dim]
rhs_mat_dims = rhs_dims if len(rhs_dims) != 1 else [m_dim, 1]
full_mat_dims = lhs_mat_dims if full_lhs else rhs_mat_dims
dim0_dims = rhs_dims if full_lhs else lhs_dims
small_dims = batch_dims_small + (rhs_mat_dims if full_lhs else lhs_mat_dims)
small = torch.randn(*(small_dims), device=device).float()
dim0 = torch.randn(*(dim0_dims), device=device).float()
full = torch.randn(*(full_batch_dims + full_mat_dims), device=device).float()
if not one_dimensional:
(lhsTensors, rhsTensors) = ((full,), (small, dim0)) if full_lhs else ((small, dim0), (full,))
else:
(lhsTensors, rhsTensors) = ((full,), (dim0,)) if full_lhs else ((dim0,), (full,))
def maybe_squeeze_result(l, r, result):
if len(lhs_dims) == 1 and l.dim() != 1:
return result.squeeze(-2)
elif len(rhs_dims) == 1 and r.dim() != 1:
return result.squeeze(-1)
else:
return result
for lhs in lhsTensors:
lhs_expanded = lhs.expand(*(torch.Size(full_batch_dims) + torch.Size(lhs_mat_dims)))
lhs_expanded_matmul_fn = lhs_expanded.matmul
for rhs in rhsTensors:
rhs_expanded = ((rhs if len(rhs_dims) != 1 else rhs.unsqueeze(-1)).
expand(*(torch.Size(full_batch_dims) + torch.Size(rhs_mat_dims))))
truth = maybe_squeeze_result(lhs_expanded, rhs_expanded, lhs_expanded_matmul_fn(rhs_expanded))
for l in (lhs, lhs_expanded):
for r in (rhs, rhs_expanded):
l_matmul_fn = l.matmul
result = maybe_squeeze_result(l, r, l_matmul_fn(r))
self.assertEqual(truth, result)
# test torch.matmul function as well
torch_result = maybe_squeeze_result(l, r, torch.matmul(l, r))
self.assertEqual(truth, torch_result)
# test torch.matmul with out
out = torch.zeros_like(torch_result)
torch.matmul(l, r, out=out)
self.assertEqual(truth, maybe_squeeze_result(l, r, out))
# compare to bmm
bmm_result = (torch.bmm(lhs_expanded.contiguous().view(-1, *lhs_mat_dims),
rhs_expanded.contiguous().view(-1, *rhs_mat_dims)))
self.assertEqual(truth.view(-1, *result_dims), bmm_result.view(-1, *result_dims))
for indices in itertools.product((True, False), repeat=2):
verify_batched_matmul(*indices)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_lu_solve_batched_non_contiguous(self, device, dtype):
from torch.testing._internal.common_utils import random_fullrank_matrix_distinct_singular_value
A = random_fullrank_matrix_distinct_singular_value(2, 2, dtype=dtype, device=device)
b = torch.randn(2, 2, 2, dtype=dtype, device=device)
x_exp = np.linalg.solve(A.cpu().permute(0, 2, 1).numpy(), b.cpu().permute(2, 1, 0).numpy())
A = A.permute(0, 2, 1)
b = b.permute(2, 1, 0)
assert not A.is_contiguous() and not b.is_contiguous(), "contiguous inputs"
LU_data, LU_pivots = torch.lu(A)
x = torch.lu_solve(b, LU_data, LU_pivots)
self.assertEqual(x, x_exp)
def lu_solve_test_helper(self, A_dims, b_dims, pivot, device, dtype):
from torch.testing._internal.common_utils import random_fullrank_matrix_distinct_singular_value
b = torch.randn(*b_dims, dtype=dtype, device=device)
A = random_fullrank_matrix_distinct_singular_value(*A_dims, dtype=dtype, device=device)
LU_data, LU_pivots, info = torch.lu(A, get_infos=True, pivot=pivot)
self.assertEqual(info, torch.zeros_like(info))
return b, A, LU_data, LU_pivots
@skipCPUIfNoLapack
@skipCUDAIfNoMagma
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
@precisionOverride({torch.float32: 1e-3, torch.complex64: 1e-3,
torch.float64: 1e-8, torch.complex128: 1e-8})
def test_lu_solve(self, device, dtype):
def sub_test(pivot):
for k, n in zip([2, 3, 5], [3, 5, 7]):
b, A, LU_data, LU_pivots = self.lu_solve_test_helper((n,), (n, k), pivot, device, dtype)
x = torch.lu_solve(b, LU_data, LU_pivots)
self.assertEqual(b, np.matmul(A.cpu(), x.cpu()))
sub_test(True)
if self.device_type == 'cuda':
sub_test(False)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
@precisionOverride({torch.float32: 1e-3, torch.complex64: 1e-3,
torch.float64: 1e-8, torch.complex128: 1e-8})
def test_lu_solve_batched(self, device, dtype):
def sub_test(pivot):
def lu_solve_batch_test_helper(A_dims, b_dims, pivot):
b, A, LU_data, LU_pivots = self.lu_solve_test_helper(A_dims, b_dims, pivot, device, dtype)
x_exp_list = []
for i in range(b_dims[0]):
x_exp_list.append(torch.lu_solve(b[i], LU_data[i], LU_pivots[i]))
x_exp = torch.stack(x_exp_list) # Stacked output
x_act = torch.lu_solve(b, LU_data, LU_pivots) # Actual output
self.assertEqual(x_exp, x_act) # Equality check
Ax = np.matmul(A.cpu(), x_act.cpu())
self.assertEqual(b, Ax)
for batchsize in [1, 3, 4]:
lu_solve_batch_test_helper((5, batchsize), (batchsize, 5, 10), pivot)
# Tests tensors with 0 elements
b = torch.randn(3, 0, 3, dtype=dtype, device=device)
A = torch.randn(3, 0, 0, dtype=dtype, device=device)
LU_data, LU_pivots = torch.lu(A)
self.assertEqual(torch.empty_like(b), b.lu_solve(LU_data, LU_pivots))
sub_test(True)
if self.device_type == 'cuda':
sub_test(False)
@slowTest
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_lu_solve_batched_many_batches(self, device, dtype):
def run_test(A_dims, b_dims):
b, A, LU_data, LU_pivots = self.lu_solve_test_helper(A_dims, b_dims, True, device, dtype)
x = torch.lu_solve(b, LU_data, LU_pivots)
Ax = torch.matmul(A, x)
self.assertEqual(Ax, b.expand_as(Ax))
run_test((5, 65536), (65536, 5, 10))
run_test((5, 262144), (262144, 5, 10))
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_lu_solve_batched_broadcasting(self, device, dtype):
from torch.testing._internal.common_utils import random_fullrank_matrix_distinct_singular_value
def run_test(A_dims, b_dims, pivot=True):
A_matrix_size = A_dims[-1]
A_batch_dims = A_dims[:-2]
A = random_fullrank_matrix_distinct_singular_value(A_matrix_size, *A_batch_dims, dtype=dtype, device=device)
b = make_tensor(b_dims, dtype=dtype, device=device)
x_exp = np.linalg.solve(A.cpu(), b.cpu())
LU_data, LU_pivots = torch.lu(A, pivot=pivot)
x = torch.lu_solve(b, LU_data, LU_pivots)
self.assertEqual(x, x_exp)
# test against numpy.linalg.solve
run_test((2, 1, 3, 4, 4), (2, 1, 3, 4, 6)) # no broadcasting
run_test((2, 1, 3, 4, 4), (4, 6)) # broadcasting b
run_test((4, 4), (2, 1, 3, 4, 2)) # broadcasting A
run_test((1, 3, 1, 4, 4), (2, 1, 3, 4, 5)) # broadcasting A & b
@onlyCUDA
@skipCUDAIfNoMagma
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
# this tests https://github.com/pytorch/pytorch/issues/36921
def test_lu_solve_large_matrices(self, device, dtype):
def run_test(A_dims, b_dims):
b, A, LU_data, LU_pivots = self.lu_solve_test_helper(A_dims, b_dims, True, device, dtype)
x = torch.lu_solve(b, LU_data, LU_pivots)
Ax = torch.matmul(A, x)
self.assertEqual(Ax, b.expand_as(Ax))
run_test((1, 1), (1, 1, 1025))
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_lu_solve_out_errors_and_warnings(self, device, dtype):
# dtypes should be safely castable
a = torch.eye(2, dtype=dtype, device=device)
LU_data, LU_pivots = torch.lu(a, pivot=True)
b = torch.randn(2, 1, dtype=dtype, device=device)
out = torch.empty(0, dtype=torch.int, device=device)
with self.assertRaisesRegex(RuntimeError, "but got result with dtype Int"):
torch.lu_solve(b, LU_data, LU_pivots, out=out)
# device should match
if torch.cuda.is_available():
wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'
out = torch.empty(0, dtype=dtype, device=wrong_device)
with self.assertRaisesRegex(RuntimeError, "tensors to be on the same device"):
torch.lu_solve(b, LU_data, LU_pivots, out=out)
# if out tensor with wrong shape is passed a warning is given
with warnings.catch_warnings(record=True) as w:
out = torch.empty(1, dtype=dtype, device=device)
# Trigger warning
torch.lu_solve(b, LU_data, LU_pivots, out=out)
# Check warning occurs
self.assertEqual(len(w), 1)
self.assertTrue("An output with one or more elements was resized" in str(w[-1].message))
@precisionOverride({torch.float32: 1e-5, torch.complex64: 1e-5})
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_symeig(self, device, dtype):
from torch.testing._internal.common_utils import random_hermitian_matrix
def run_test(dims, eigenvectors, upper):
x = random_hermitian_matrix(*dims, dtype=dtype, device=device)
if dtype.is_complex:
real_dtype = torch.float32 if dtype is torch.complex64 else torch.float64
else:
real_dtype = dtype
oute = torch.empty(dims[1:] + dims[:1], dtype=real_dtype, device=device)
outv = torch.empty(dims[1:] + dims[:1] * 2, dtype=dtype, device=device)
torch.symeig(x, eigenvectors=eigenvectors, upper=upper, out=(oute, outv))
if eigenvectors:
outv_ = outv.cpu().numpy()
x_recon = np.matmul(np.matmul(outv_, torch.diag_embed(oute.to(dtype)).cpu().numpy()),
outv_.swapaxes(-2, -1).conj())
self.assertEqual(x, x_recon, atol=1e-8, rtol=0, msg='Incorrect reconstruction using V @ diag(e) @ V.T')
else:
eigvals, _ = torch.symeig(x, eigenvectors=True, upper=upper)
self.assertEqual(eigvals, oute, msg='Eigenvalues mismatch')
self.assertEqual(torch.empty(0, device=device, dtype=dtype), outv, msg='Eigenvector matrix not empty')
rese, resv = x.symeig(eigenvectors=eigenvectors, upper=upper)
self.assertEqual(rese, oute, msg="outputs of symeig and symeig with out don't match")
self.assertEqual(resv, outv, msg="outputs of symeig and symeig with out don't match")
# test non-contiguous
x = random_hermitian_matrix(*dims, dtype=dtype, device=device)
n_dim = len(dims) + 1
# Reverse the batch dimensions and the matrix dimensions and then concat them
x = x.permute(tuple(range(n_dim - 3, -1, -1)) + (n_dim - 1, n_dim - 2))
assert not x.is_contiguous(), "x is intentionally non-contiguous"
rese, resv = torch.symeig(x, eigenvectors=eigenvectors, upper=upper)
if eigenvectors:
resv_ = resv.cpu().numpy()
x_recon = np.matmul(np.matmul(resv_, torch.diag_embed(rese.to(dtype)).cpu().numpy()),
resv_.swapaxes(-2, -1).conj())
self.assertEqual(x, x_recon, atol=1e-8, rtol=0, msg='Incorrect reconstruction using V @ diag(e) @ V.T')
else:
eigvals, _ = torch.symeig(x, eigenvectors=True, upper=upper)
self.assertEqual(eigvals, rese, msg='Eigenvalues mismatch')
self.assertEqual(torch.empty(0, device=device, dtype=dtype), resv, msg='Eigenvector matrix not empty')
batch_dims_set = [(), (3,), (3, 5), (5, 3, 5)]
for batch_dims, eigenvectors, upper in itertools.product(batch_dims_set, (True, False), (True, False)):
run_test((5,) + batch_dims, eigenvectors, upper)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_symeig_out_errors_and_warnings(self, device, dtype):
from torch.testing._internal.common_utils import random_hermitian_matrix
# if non-empty out tensor with wrong shape is passed a warning is given
a = random_hermitian_matrix(3, dtype=dtype, device=device)
real_dtype = a.real.dtype if dtype.is_complex else dtype
out_w = torch.empty(7, 7, dtype=real_dtype, device=device)
out_v = torch.empty(7, 7, dtype=dtype, device=device)
with warnings.catch_warnings(record=True) as w:
# Trigger warning
torch.symeig(a, out=(out_w, out_v))
self.assertTrue("An output with one or more elements was resized" in str(w[-2].message))
self.assertTrue("An output with one or more elements was resized" in str(w[-1].message))
# dtypes should be safely castable
out_w = torch.empty(0, dtype=real_dtype, device=device)
out_v = torch.empty(0, dtype=torch.int, device=device)
with self.assertRaisesRegex(RuntimeError, "but got eigenvectors with dtype Int"):
torch.symeig(a, out=(out_w, out_v))
out_w = torch.empty(0, dtype=torch.int, device=device)
out_v = torch.empty(0, dtype=dtype, device=device)
with self.assertRaisesRegex(RuntimeError, "but got eigenvalues with dtype Int"):
torch.symeig(a, out=(out_w, out_v))
# device should match
if torch.cuda.is_available():
wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'
out_w = torch.empty(0, device=wrong_device, dtype=dtype)
out_v = torch.empty(0, device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, "tensors to be on the same device"):
torch.symeig(a, out=(out_w, out_v))
out_w = torch.empty(0, device=device, dtype=dtype)
out_v = torch.empty(0, device=wrong_device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, "tensors to be on the same device"):
torch.symeig(a, out=(out_w, out_v))
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
def test_pca_lowrank(self, device):
from torch.testing._internal.common_utils import random_lowrank_matrix, random_sparse_matrix
dtype = torch.double
def run_subtest(guess_rank, actual_rank, matrix_size, batches, device, pca, **options):
density = options.pop('density', 1)
if isinstance(matrix_size, int):
rows = columns = matrix_size
else:
rows, columns = matrix_size
if density == 1:
a_input = random_lowrank_matrix(actual_rank, rows, columns, *batches, device=device, dtype=dtype)
a = a_input
else:
a_input = random_sparse_matrix(rows, columns, density, device=device, dtype=dtype)
a = a_input.to_dense()
u, s, v = pca(a_input, q=guess_rank, **options)
self.assertEqual(s.shape[-1], guess_rank)
self.assertEqual(u.shape[-2], rows)
self.assertEqual(u.shape[-1], guess_rank)
self.assertEqual(v.shape[-1], guess_rank)
self.assertEqual(v.shape[-2], columns)
A1 = u.matmul(s.diag_embed()).matmul(v.mT)
ones_m1 = torch.ones(batches + (rows, 1), dtype=a.dtype, device=device)
c = a.sum(axis=-2) / rows
c = c.reshape(batches + (1, columns))
A2 = a - ones_m1.matmul(c)
self.assertEqual(A1, A2)
if density == 1:
# actual rank is known only for dense input
detect_rank = (s.abs() > 1e-5).sum(axis=-1)
self.assertEqual(actual_rank * torch.ones(batches, device=device, dtype=torch.int64), detect_rank)
S = torch.linalg.svdvals(A2)
self.assertEqual(s[..., :actual_rank], S[..., :actual_rank])
all_batches = [(), (1,), (3,), (2, 3)]
for actual_rank, size, all_batches in [
(2, (17, 4), all_batches),
(2, (100, 4), all_batches),
(6, (100, 40), all_batches),
(12, (1000, 1000), [()]),
]:
for batches in all_batches:
for guess_rank in [
actual_rank,
actual_rank + 2,
actual_rank + 6,
]:
if guess_rank <= min(*size):
run_subtest(guess_rank, actual_rank, size, batches, device, torch.pca_lowrank)
run_subtest(guess_rank, actual_rank, size[::-1], batches, device, torch.pca_lowrank)
# sparse input
for guess_rank, size in [
(4, (17, 4)), (4, (4, 17)), (16, (17, 17)),
(21, (100, 40)), (20, (40, 100)), (600, (1000, 1000))]:
for density in [0.005, 0.1]:
run_subtest(guess_rank, None, size, (), device, torch.pca_lowrank, density=density)
# jitting support
jitted = torch.jit.script(torch.pca_lowrank)
guess_rank, actual_rank, size, batches = 2, 2, (17, 4), ()
run_subtest(guess_rank, actual_rank, size, batches, device, jitted)
# Ensure that nuclear_norm's out variant gives the same result as the non-out
@onlyNativeDeviceTypes
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64)
def test_nuclear_norm_out(self, device, dtype):
test_cases = [
# input size, dim
((25, 25), None),
((25, 25), (0, 1)),
((25, 25), (1, 0)),
((25, 25, 25), (2, 0)),
((25, 25, 25), (0, 1)),
]
for keepdim in [False, True]:
for input_size, dim in test_cases:
msg = f'input_size: {input_size}, dim: {dim}, keepdim: {keepdim}'
x = torch.randn(*input_size, device=device, dtype=dtype)
result_out = torch.empty(0, device=device, dtype=dtype)
if dim is None:
result = torch.nuclear_norm(x, keepdim=keepdim)
torch.nuclear_norm(x, keepdim=keepdim, out=result_out)
else:
result = torch.nuclear_norm(x, keepdim=keepdim, dim=dim)
torch.nuclear_norm(x, keepdim=keepdim, dim=dim, out=result_out)
self.assertEqual(result, result_out, msg=msg)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_geqrf(self, device, dtype):
def run_test(shape):
# numpy.linalg.qr with mode = 'raw' computes the same operation as torch.geqrf
# so this test compares against that function
A = make_tensor(shape, dtype=dtype, device=device)
# numpy.linalg.qr doesn't work with batched input
m, n = A.shape[-2:]
tau_size = "n" if m > n else "m"
np_dtype = A.cpu().numpy().dtype
ot = [np_dtype, np_dtype]
numpy_geqrf_batched = np.vectorize(
lambda x: np.linalg.qr(x, mode='raw'),
otypes=ot,
signature=f'(m,n)->(n,m),({tau_size})')
expected = numpy_geqrf_batched(A.cpu())
actual = torch.geqrf(A)
# numpy.linalg.qr returns transposed result
self.assertEqual(expected[0].swapaxes(-2, -1), actual[0])
self.assertEqual(expected[1], actual[1])
batches = [(), (0, ), (2, ), (2, 1)]
ns = [5, 2, 0]
for batch, (m, n) in product(batches, product(ns, ns)):
run_test((*batch, m, n))
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.double)
def test_lstsq(self, device, dtype):
def _test_underdetermined(a, b, expectedNorm):
# underdetermined systems are only supported on CPU
if self.device_type != 'cpu':
return
m = a.size()[0]
n = a.size()[1]
assert(m <= n)
a_copy = a.clone()
b_copy = b.clone()
res1 = torch.lstsq(b, a)[0]
self.assertEqual(a, a_copy, atol=0, rtol=0)
self.assertEqual(b, b_copy, atol=0, rtol=0)
self.assertEqual((torch.mm(a, res1) - b).norm(), expectedNorm, atol=1e-8, rtol=0)
ta = torch.tensor((), dtype=dtype, device=device)
tb = torch.tensor((), dtype=dtype, device=device)
res2 = torch.lstsq(b, a, out=(tb, ta))[0]
self.assertEqual(a, a_copy, atol=0, rtol=0)
self.assertEqual(b, b_copy, atol=0, rtol=0)
self.assertEqual((torch.mm(a, res1) - b).norm(), expectedNorm, atol=1e-8, rtol=0)
res3 = torch.lstsq(b, a, out=(b, a))[0]
self.assertEqual((torch.mm(a_copy, b) - b_copy).norm(), expectedNorm, atol=1e-8, rtol=0)
self.assertEqual(res1, tb, atol=0, rtol=0)
self.assertEqual(res1, b, atol=0, rtol=0)
self.assertEqual(res1, res2, atol=0, rtol=0)
self.assertEqual(res1, res3, atol=0, rtol=0)
def _test_overdetermined(a, b, expectedNorm):
m = a.size()[0]
n = a.size()[1]
assert(m > n)
def check_norm(a, b, expected_norm, gels_result):
# Checks |ax - b| and the residual info from the result
# The first n rows is the least square solution.
# Rows n to m-1 contain residual information.
x = gels_result[:n]
resid_info = gels_result[n:]
resid_norm = (torch.mm(a, x) - b).norm()
self.assertEqual(resid_norm, expectedNorm, atol=1e-8, rtol=0)
self.assertEqual(resid_info.norm(), resid_norm, atol=1e-8, rtol=0)
a_copy = a.clone()
b_copy = b.clone()
res1 = torch.lstsq(b, a)[0]
self.assertEqual(a, a_copy, atol=0, rtol=0)
self.assertEqual(b, b_copy, atol=0, rtol=0)
check_norm(a, b, expectedNorm, res1)
ta = torch.tensor((), dtype=dtype, device=device)
tb = torch.tensor((), dtype=dtype, device=device)
res2 = torch.lstsq(b, a, out=(tb, ta))[0]
self.assertEqual(a, a_copy, atol=0, rtol=0)
self.assertEqual(b, b_copy, atol=0, rtol=0)
check_norm(a, b, expectedNorm, res2)
res3 = torch.lstsq(b, a, out=(b, a))[0]
check_norm(a_copy, b_copy, expectedNorm, res3)
self.assertEqual(res1, tb, atol=0, rtol=0)
self.assertEqual(res1, b, atol=0, rtol=0)
self.assertEqual(res1, res2, atol=0, rtol=0)
self.assertEqual(res1, res3, atol=0, rtol=0)
# basic test
expectedNorm = 0
a = torch.tensor(((1.44, -9.96, -7.55, 8.34),
(-7.84, -0.28, 3.24, 8.09),
(-4.39, -3.24, 6.27, 5.28),
(4.53, 3.83, -6.64, 2.06)), dtype=dtype, device=device).t()
b = torch.tensor(((8.58, 8.26, 8.48, -5.28),
(9.35, -4.43, -0.70, -0.26)), dtype=dtype, device=device).t()
_test_underdetermined(a, b, expectedNorm)
# test overdetermined
expectedNorm = 17.390200628863
a = torch.tensor(((1.44, -9.96, -7.55, 8.34, 7.08, -5.45),
(-7.84, -0.28, 3.24, 8.09, 2.52, -5.70),
(-4.39, -3.24, 6.27, 5.28, 0.74, -1.19),
(4.53, 3.83, -6.64, 2.06, -2.47, 4.70)), dtype=dtype, device=device).t()
b = torch.tensor(((8.58, 8.26, 8.48, -5.28, 5.72, 8.93),
(9.35, -4.43, -0.70, -0.26, -7.36, -2.52)), dtype=dtype, device=device).t()
_test_overdetermined(a, b, expectedNorm)
# test underdetermined
expectedNorm = 0
a = torch.tensor(((1.44, -9.96, -7.55),
(-7.84, -0.28, 3.24),
(-4.39, -3.24, 6.27),
(4.53, 3.83, -6.64)), dtype=dtype, device=device).t()
b = torch.tensor(((8.58, 8.26, 8.48),
(9.35, -4.43, -0.70)), dtype=dtype, device=device).t()
_test_underdetermined(a, b, expectedNorm)
# test reuse
expectedNorm = 0
a = torch.tensor(((1.44, -9.96, -7.55, 8.34),
(-7.84, -0.28, 3.24, 8.09),
(-4.39, -3.24, 6.27, 5.28),
(4.53, 3.83, -6.64, 2.06)), dtype=dtype, device=device).t()
b = torch.tensor(((8.58, 8.26, 8.48, -5.28),
(9.35, -4.43, -0.70, -0.26)), dtype=dtype, device=device).t()
ta = torch.tensor((), dtype=dtype, device=device)
tb = torch.tensor((), dtype=dtype, device=device)
torch.lstsq(b, a, out=(tb, ta))
self.assertEqual((torch.mm(a, tb) - b).norm(), expectedNorm, atol=1e-8, rtol=0)
torch.lstsq(b, a, out=(tb, ta))
self.assertEqual((torch.mm(a, tb) - b).norm(), expectedNorm, atol=1e-8, rtol=0)
torch.lstsq(b, a, out=(tb, ta))
self.assertEqual((torch.mm(a, tb) - b).norm(), expectedNorm, atol=1e-8, rtol=0)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
def test_lapack_empty(self, device):
# FIXME: these are just a selection of LAPACK functions -- we need a general strategy here.
# The LAPACK functions themselves generally do NOT work with zero sized dimensions, although
# numpy/sci often has a direct wrapper (e.g. lu_factor) and a wrapper that "does the right thing"
# (e.g. lu). We often name our functions identically to the lapack function, so it will take work
# to name / migrate-to better wrappers.
def fn(torchfn, *args):
return torchfn(*tuple(torch.randn(shape, device=device) if isinstance(shape, tuple) else shape
for shape in args))
# inverse, pinverse
self.assertEqual((0, 0), fn(torch.inverse, (0, 0)).shape)
self.assertEqual((5, 0), fn(torch.pinverse, (0, 5)).shape)
self.assertEqual((0, 5), fn(torch.pinverse, (5, 0)).shape)
self.assertEqual((0, 0), fn(torch.pinverse, (0, 0)).shape)
# det, logdet, slogdet
self.assertEqual(torch.tensor(1., device=device), fn(torch.det, (0, 0)))
self.assertEqual(torch.tensor(0., device=device), fn(torch.logdet, (0, 0)))
self.assertEqual((torch.tensor(1., device=device), torch.tensor(0., device=device)),
fn(torch.slogdet, (0, 0)))
# eig, symeig
evalues, evectors = fn(torch.eig, (0, 0), True)
self.assertEqual([(0, 2), (0, 0)], [evalues.shape, evectors.shape])
evalues, evectors = fn(torch.symeig, (0, 0), True)
self.assertEqual([(0,), (0, 0)], [evalues.shape, evectors.shape])
# qr
q, r = fn(torch.qr, (3, 0), True)
self.assertEqual([(3, 0), (0, 0)], [q.shape, r.shape])
q, r = fn(torch.qr, (0, 3), True)
self.assertEqual([(0, 0), (0, 3)], [q.shape, r.shape])
q, r = fn(torch.qr, (3, 0), False)
self.assertEqual([(3, 3), (3, 0)], [q.shape, r.shape])
# lstsq
self.assertRaises(RuntimeError, lambda: torch.lstsq(torch.randn(0, 0), torch.randn(0, 0)))
self.assertRaises(RuntimeError, lambda: torch.lstsq(torch.randn(0,), torch.randn(0, 0)))
@tf32_on_and_off(0.005)
def test_tensordot(self, device):
a = torch.arange(60., device=device).reshape(3, 4, 5)
b = torch.arange(24., device=device).reshape(4, 3, 2)
c = torch.tensordot(a, b, dims=([1, 0], [0, 1])).cpu()
cn = torch.from_numpy(np.tensordot(a.cpu().numpy(), b.cpu().numpy(),
axes=([1, 0], [0, 1])))
self.assertEqual(c, cn)
cout = torch.zeros((5, 2), device=device)
torch.tensordot(a, b, dims=([1, 0], [0, 1]), out=cout).cpu()
self.assertEqual(c, cout)
a = torch.randn(2, 3, 4, 5, device=device)
b = torch.randn(4, 5, 6, 7, device=device)
c = torch.tensordot(a, b, dims=2).cpu()
cn = torch.from_numpy(np.tensordot(a.cpu().numpy(), b.cpu().numpy(),
axes=2))
with self.assertRaisesRegex(RuntimeError, "expects dims >= 0"):
torch.tensordot(a, b, dims=-1)
self.assertEqual(c, cn)
c = torch.tensordot(a, b).cpu()
cn = torch.from_numpy(np.tensordot(a.cpu().numpy(), b.cpu().numpy()))
self.assertEqual(c, cn)
a = torch.tensordot(torch.tensor(0.), torch.tensor(0.), 0)
an = torch.from_numpy(np.tensordot(np.zeros((), dtype=np.float32), np.zeros((), dtype=np.float32), 0))
self.assertEqual(a, an)
instantiate_device_type_tests(TestLinalg, globals())
if __name__ == '__main__':
run_tests()
| 47.980966 | 131 | 0.581955 |
import torch
import numpy as np
import unittest
import itertools
import warnings
import math
from math import inf, nan, isnan
import random
from random import randrange
from itertools import product
from functools import reduce
from torch.testing._internal.common_utils import \
(TestCase, run_tests, TEST_SCIPY, IS_MACOS, IS_WINDOWS, slowTest,
TEST_WITH_ASAN, TEST_WITH_ROCM, IS_FBCODE, IS_REMOTE_GPU,
iter_indices, gradcheck, gradgradcheck)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, dtypes,
onlyCPU, skipCUDAIf, skipCUDAIfNoMagma, skipCPUIfNoLapack, precisionOverride,
skipCUDAIfNoMagmaAndNoCusolver, skipCUDAIfRocm, onlyNativeDeviceTypes, dtypesIfCUDA,
onlyCUDA, skipCUDAVersionIn, skipMeta, skipCUDAIfNoCusolver)
from torch.testing import make_tensor
from torch.testing._internal.common_dtype import (
all_types, floating_types, floating_and_complex_types, get_all_dtypes, get_all_int_dtypes, get_all_complex_dtypes,
get_all_fp_dtypes,
)
from torch.testing._internal.common_cuda import SM53OrLater, tf32_on_and_off, CUDA11OrLater, CUDA9
from torch.distributions.binomial import Binomial
torch.set_default_dtype(torch.float32)
assert torch.get_default_dtype() is torch.float32
if TEST_SCIPY:
import scipy
class TestLinalg(TestCase):
def setUp(self):
super(self.__class__, self).setUp()
torch.backends.cuda.matmul.allow_tf32 = False
def tearDown(self):
torch.backends.cuda.matmul.allow_tf32 = True
super(self.__class__, self).tearDown()
exact_dtype = True
@dtypes(torch.float, torch.cfloat)
@precisionOverride({torch.float: 1e-06, torch.cfloat: 1e-06})
@tf32_on_and_off(5e-3)
def test_inner(self, device, dtype):
def check(a_sizes_, b_sizes_):
for a_sizes, b_sizes in ((a_sizes_, b_sizes_), (b_sizes_, a_sizes_)):
a = torch.randn(a_sizes, dtype=dtype, device=device)
b = torch.randn(b_sizes, dtype=dtype, device=device)
res = torch.inner(a, b)
ref = np.inner(a.cpu().numpy(), b.cpu().numpy())
self.assertEqual(res.cpu(), torch.from_numpy(np.array(ref)))
out = torch.zeros_like(res)
torch.inner(a, b, out=out)
self.assertEqual(res, out)
check([], [])
check([], [0])
check([], [3])
check([], [2, 3, 4])
check([0], [0])
check([0], [2, 0])
check([2], [2])
check([2], [3, 1, 2])
check([2], [3, 0, 2])
check([1, 2], [3, 2])
check([1, 2], [3, 4, 2])
check([2, 1, 3, 2], [1, 3, 2, 2])
a = torch.randn(3, 2, device=device, dtype=dtype).transpose_(0, 1)
b = torch.randn(4, 3, device=device, dtype=dtype)[::2, :]
self.assertFalse(a.is_contiguous() or b.is_contiguous())
self.assertEqual(a.inner(b).cpu().numpy(), np.inner(a.cpu().numpy(), b.cpu().numpy()))
with self.assertRaisesRegex(RuntimeError,
r"inner\(\) the last dimension must match on both "
r"input tensors but got shapes \[2, 3\] and \[2, 2\]"):
torch.randn(2, 3, device=device, dtype=dtype).inner(torch.randn(2, 2, device=device, dtype=dtype))
@precisionOverride({torch.bfloat16: 1e-1})
@dtypes(*(get_all_dtypes()))
def test_outer(self, device, dtype):
def run_test_case(a, b):
if dtype == torch.bfloat16:
a_np = a.to(torch.double).cpu().numpy()
b_np = b.to(torch.double).cpu().numpy()
exact_dtype = False
else:
a_np = a.cpu().numpy()
b_np = b.cpu().numpy()
exact_dtype = True
expected = np.outer(a_np, b_np)
self.assertEqual(torch.outer(a, b), expected, exact_dtype=False)
self.assertEqual(torch.Tensor.outer(a, b), expected, exact_dtype=False)
self.assertEqual(torch.ger(a, b), expected, exact_dtype=False)
self.assertEqual(torch.Tensor.ger(a, b), expected, exact_dtype=False)
out = torch.empty(a.size(0), b.size(0), device=device, dtype=dtype)
torch.outer(a, b, out=out)
self.assertEqual(out, expected, exact_dtype=False)
out = torch.empty(a.size(0), b.size(0), device=device, dtype=dtype)
torch.ger(a, b, out=out)
self.assertEqual(out, expected, exact_dtype=False)
a = torch.randn(50).to(device=device, dtype=dtype)
b = torch.randn(50).to(device=device, dtype=dtype)
run_test_case(a, b)
zero_strided = torch.randn(1).to(device=device, dtype=dtype).expand(50)
run_test_case(zero_strided, b)
run_test_case(a, zero_strided)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float, torch.double, torch.cfloat, torch.cdouble)
def test_linalg_lstsq(self, device, dtype):
from torch.testing._internal.common_utils import random_well_conditioned_matrix
if self.device_type == 'cpu':
drivers = ('gels', 'gelsy', 'gelsd', 'gelss', None)
else:
drivers = ('gels', None)
def check_solution_correctness(a, b, sol):
sol2 = a.pinverse() @ b
self.assertEqual(sol, sol2, atol=1e-5, rtol=1e-5)
def check_correctness_ref(a, b, res, ref, driver="default"):
def apply_if_not_empty(t, f):
if t.numel():
return f(t)
else:
return t
def select_if_not_empty(t, i):
selected = apply_if_not_empty(t, lambda x: x.select(0, i))
return selected
m = a.size(-2)
n = a.size(-1)
nrhs = b.size(-1)
batch_size = int(np.prod(a.shape[:-2]))
if batch_size == 0:
batch_size = 1
a_3d = a.view(batch_size, m, n)
b_3d = b.view(batch_size, m, nrhs)
solution_3d = res.solution.view(batch_size, n, nrhs)
residuals_2d = apply_if_not_empty(res.residuals, lambda t: t.view(-1, nrhs))
rank_1d = apply_if_not_empty(res.rank, lambda t: t.view(-1))
singular_values_2d = res.singular_values.view(batch_size, res.singular_values.shape[-1])
if a.numel() > 0:
for i in range(batch_size):
sol, residuals, rank, singular_values = ref(
a_3d.select(0, i).numpy(),
b_3d.select(0, i).numpy()
)
if singular_values is None:
singular_values = []
self.assertEqual(sol, solution_3d.select(0, i), atol=1e-5, rtol=1e-5)
self.assertEqual(rank, select_if_not_empty(rank_1d, i), atol=1e-5, rtol=1e-5)
self.assertEqual(singular_values, singular_values_2d.select(0, i), atol=1e-5, rtol=1e-5)
if m > n:
if torch.all(rank_1d == n):
self.assertEqual(
residuals, select_if_not_empty(residuals_2d, i), atol=1e-5, rtol=1e-5, exact_dtype=False
)
else:
self.assertTrue(residuals_2d.numel() == 0)
else:
self.assertEqual(res.solution.shape, (*a.shape[:-2], n, nrhs))
self.assertEqual(res.rank.shape, a.shape[:-2])
if m > n and driver != "gelsy":
self.assertEqual(res.residuals.shape, (*a.shape[:-2], 0))
else:
self.assertEqual(res.residuals.shape, (0, ))
if driver == "default" or driver == "gelsd" or driver == "gelss":
self.assertEqual(res.singular_values.shape, (*a.shape[:-2], min(m, n)))
else:
self.assertEqual(res.singular_values.shape, (0, ))
def check_correctness_scipy(a, b, res, driver, cond):
if TEST_SCIPY and driver in ('gelsd', 'gelss', 'gelsy'):
import scipy.linalg
def scipy_ref(a, b):
return scipy.linalg.lstsq(a, b, lapack_driver=driver, cond=cond)
check_correctness_ref(a, b, res, scipy_ref, driver=driver)
def check_correctness_numpy(a, b, res, driver, rcond):
if driver == 'gelsd':
def numpy_ref(a, b):
return np.linalg.lstsq(a, b, rcond=rcond)
check_correctness_ref(a, b, res, numpy_ref)
version = torch.testing._internal.common_cuda._get_torch_cuda_version()
cusolver_available = (version >= (10, 2))
ms = [2 ** i for i in range(5)]
m_ge_n_sizes = [(m, m // 2) for m in ms] + [(m, m) for m in ms]
m_l_n_sizes = [(m // 2, m) for m in ms]
include_m_l_n_case = (cusolver_available or device == 'cpu')
matrix_sizes = m_ge_n_sizes + (m_l_n_sizes if include_m_l_n_case else [])
batches = [(), (2,), (2, 2), (2, 2, 2)]
rconds = (None, True, -1)
for batch, matrix_size, driver, rcond in itertools.product(batches, matrix_sizes, drivers, rconds):
if rcond and rcond != -1:
if driver in ('gelss', 'gelsd'):
rcond = 1.0
else:
rcond = 1e-4
if driver == 'gels' and rcond is not None:
continue
shape = batch + matrix_size
a = random_well_conditioned_matrix(*shape, dtype=dtype, device=device)
b = torch.rand(*shape, dtype=dtype, device=device)
m = a.size(-2)
n = a.size(-1)
res = torch.linalg.lstsq(a, b, rcond=rcond, driver=driver)
sol = res.solution
check_correctness_scipy(a, b, res, driver, rcond)
check_correctness_numpy(a, b, res, driver, rcond)
if driver == 'gels' and rcond is None:
check_solution_correctness(a, b, sol)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float, torch.double, torch.cfloat, torch.cdouble)
def test_linalg_lstsq_batch_broadcasting(self, device, dtype):
from torch.testing._internal.common_utils import random_well_conditioned_matrix
def check_correctness(a, b):
sol = torch.linalg.lstsq(a, b).solution
sol2 = a.pinverse() @ b
self.assertEqual(sol, sol2, rtol=1e-5, atol=1e-5)
ms = [2 ** i for i in range(5)]
batches = [(), (0,), (2,), (2, 2), (2, 2, 2)]
for m, batch in itertools.product(ms, batches):
a = random_well_conditioned_matrix(m, m, dtype=dtype, device=device).view(*([1] * len(batch)), m, m)
b = torch.rand(*(batch + (m, m)), dtype=dtype, device=device)
check_correctness(a, b)
for m in ms:
a = random_well_conditioned_matrix(1, 3, 1, 3, m, m, dtype=dtype, device=device)
b = torch.rand(3, 1, 3, 1, m, m // 2, dtype=dtype, device=device)
check_correctness(a, b)
b = torch.rand(3, 1, 3, 1, m, dtype=dtype, device=device)
check_correctness(a, b.unsqueeze(-1))
a = random_well_conditioned_matrix(3, 1, 3, 1, m, m, dtype=dtype, device=device)
b = torch.rand(1, 3, 1, 3, m, m // 2, dtype=dtype, device=device)
check_correctness(a, b)
b = torch.rand(1, 3, 1, 3, m, dtype=dtype, device=device)
check_correctness(a, b.unsqueeze(-1))
@skipCPUIfNoLapack
@skipCUDAIfNoMagma
@dtypes(torch.float, torch.double, torch.cfloat, torch.cdouble)
def test_linalg_lstsq_input_checks(self, device, dtype):
a = torch.rand(0, 0, 3, 3, dtype=dtype, device=device)
b = torch.rand(0, 0, 3, 2, dtype=dtype, device=device)
self.assertEqual(
torch.linalg.lstsq(a, b)[0],
torch.zeros(0, 0, 3, 2, dtype=dtype, device=device)
)
a = torch.rand(2, 2, 0, 0, dtype=dtype, device=device)
b = torch.rand(2, 2, 0, 0, dtype=dtype, device=device)
self.assertEqual(
torch.linalg.lstsq(a, b)[0],
torch.zeros(2, 2, 0, 0, dtype=dtype, device=device)
)
a = torch.rand(2, 2, 3, 0, dtype=dtype, device=device)
b = torch.rand(2, 2, 3, 0, dtype=dtype, device=device)
self.assertEqual(
torch.linalg.lstsq(a, b)[0],
torch.zeros(2, 2, 0, 0, dtype=dtype, device=device)
)
a = torch.rand(2, 2, 3, 0, dtype=dtype, device=device)
b = torch.rand(2, 2, 3, 2, dtype=dtype, device=device)
self.assertEqual(
torch.linalg.lstsq(a, b)[0],
torch.zeros(2, 2, 0, 2, dtype=dtype, device=device)
)
if torch.device(device).type == 'cpu':
a = torch.rand(2, 2, 0, 3, dtype=dtype, device=device)
b = torch.rand(2, 2, 0, 3, dtype=dtype, device=device)
self.assertEqual(
torch.linalg.lstsq(a, b)[0],
torch.zeros(2, 2, 3, 3, dtype=dtype, device=device)
)
a = torch.rand(2, 3, dtype=dtype, device=device)
b = torch.rand(3, dtype=dtype, device=device)
with self.assertRaisesRegex(RuntimeError, 'input must have at least 2 dimensions'):
torch.linalg.lstsq(b, b)
with self.assertRaisesRegex(RuntimeError, 'other must have at least 1 dimension'):
torch.linalg.lstsq(a, torch.tensor(1, dtype=dtype, device=device))
with self.assertRaisesRegex(RuntimeError, r'input.size\(-2\) should match other.size\(-1\)'):
torch.linalg.lstsq(a, b)
with self.assertRaisesRegex(RuntimeError, r'input.size\(-2\) should match other.size\(-2\)'):
torch.linalg.lstsq(a, b.unsqueeze(-1))
def complement_device(device):
if device == 'cpu' and torch.cuda.is_available():
return 'cuda'
else:
return 'cpu'
a = torch.rand(2, 2, 2, 2, dtype=dtype, device=device)
b = torch.rand(2, 2, 2, dtype=dtype, device=complement_device(device))
if a.device != b.device:
with self.assertRaisesRegex(RuntimeError, 'be on the same device'):
torch.linalg.lstsq(a, b)
b = (torch.rand(2, 2, 2, dtype=dtype, device=device) * 100).long()
with self.assertRaisesRegex(RuntimeError, 'the same dtype'):
torch.linalg.lstsq(a, b)
a = torch.rand(2, 2, 2, 2, dtype=dtype, device=device)
b = torch.rand(2, 2, 2, dtype=dtype, device=device)
if device != 'cpu':
with self.assertRaisesRegex(RuntimeError, '`driver` other than `gels` is not supported on CUDA'):
torch.linalg.lstsq(a, b, driver='fictitious_driver')
else:
with self.assertRaisesRegex(RuntimeError, r'parameter `driver` should be one of \(gels, gelsy, gelsd, gelss\)'):
torch.linalg.lstsq(a, b, driver='fictitious_driver')
version = torch.testing._internal.common_cuda._get_torch_cuda_version()
cusolver_not_available = (version < (10, 1))
if device != 'cpu' and cusolver_not_available:
a = torch.rand(2, 3, dtype=dtype, device=device)
b = torch.rand(2, 1, dtype=dtype, device=device)
with self.assertRaisesRegex(RuntimeError, r'only overdetermined systems'):
torch.linalg.lstsq(a, b)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_cholesky(self, device, dtype):
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
def run_test(shape, batch, contiguous):
A = random_hermitian_pd_matrix(shape, *batch, dtype=dtype, device=device)
if A.numel() > 0 and not contiguous:
A = A.mT
self.assertFalse(A.is_contiguous())
expected_L = np.linalg.cholesky(A.cpu().numpy())
actual_L = torch.linalg.cholesky(A)
if A.numel() > 0 and dtype in [torch.float32, torch.complex64]:
# axis is specified to calculate matrix norm for batched input
expected_norm = np.linalg.norm(expected_L, ord=1, axis=(-2, -1))
actual_norm = torch.linalg.norm(actual_L, ord=1, axis=(-2, -1))
# Compare the norms with standard tolerances
self.assertEqual(actual_norm, expected_norm)
# and individual values with a higher tolerance
self.assertEqual(actual_L, expected_L, atol=1e-2, rtol=1e-5)
else:
self.assertEqual(actual_L, expected_L)
shapes = (0, 3, 5)
batches = ((), (3, ), (2, 2))
larger_input_case = [(100, (5, ), True)]
for shape, batch, contiguous in list(itertools.product(shapes, batches, (True, False))) + larger_input_case:
run_test(shape, batch, contiguous)
# check the out= variant
A = random_hermitian_pd_matrix(3, 3, dtype=dtype, device=device)
out = torch.empty_like(A)
ans = torch.linalg.cholesky(A, out=out)
self.assertEqual(ans, out)
expected = torch.linalg.cholesky(A)
self.assertEqual(expected, out)
# check the upper= variant
expected = torch.linalg.cholesky(A).mH
actual = torch.linalg.cholesky(A, upper=True)
self.assertEqual(expected, actual)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_cholesky_errors_and_warnings(self, device, dtype):
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
# cholesky requires the input to be a square matrix or batch of square matrices
A = torch.randn(2, 3, device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, r'must be batches of square matrices'):
torch.linalg.cholesky(A)
A = torch.randn(2, 2, 3, device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, r'must be batches of square matrices'):
torch.linalg.cholesky(A)
with self.assertRaisesRegex(np.linalg.LinAlgError, r'Last 2 dimensions of the array must be square'):
np.linalg.cholesky(A.cpu().numpy())
# cholesky requires the input to be at least 2 dimensional tensor
A = torch.randn(2, device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, r'must have at least 2 dimensions'):
torch.linalg.cholesky(A)
with self.assertRaisesRegex(np.linalg.LinAlgError,
r'1-dimensional array given\. Array must be at least two-dimensional'):
np.linalg.cholesky(A.cpu().numpy())
# if the input matrix is not positive definite, an error should be raised
A = torch.eye(3, 3, dtype=dtype, device=device)
A[-1, -1] = 0 # Now A is not positive definite
with self.assertRaisesRegex(RuntimeError, r'minor of order 3 is not positive-definite'):
torch.linalg.cholesky(A)
with self.assertRaisesRegex(np.linalg.LinAlgError, r'Matrix is not positive definite'):
np.linalg.cholesky(A.cpu().numpy())
# if at least one matrix in the batch is singular, an error should be raised
A = torch.eye(3, 3, dtype=dtype, device=device)
A = A.reshape((1, 3, 3))
A = A.repeat(5, 1, 1)
A[4, -1, -1] = 0 # Now A[4] is not positive definite
with self.assertRaisesRegex(RuntimeError, r'\(Batch element 4\): The factorization could not be completed'):
torch.linalg.cholesky(A)
# if out tensor with wrong shape is passed a warning is given
A = random_hermitian_pd_matrix(3, dtype=dtype, device=device)
out = torch.empty(2, 3, dtype=dtype, device=device)
with warnings.catch_warnings(record=True) as w:
# Trigger warning
torch.linalg.cholesky(A, out=out)
# Check warning occurs
self.assertEqual(len(w), 1)
self.assertTrue("An output with one or more elements was resized" in str(w[-1].message))
# dtypes should be safely castable
out = torch.empty(*A.shape, dtype=torch.int, device=device)
with self.assertRaisesRegex(RuntimeError, "but got result with dtype Int"):
torch.linalg.cholesky(A, out=out)
# device should match
if torch.cuda.is_available():
wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'
out = torch.empty(0, device=wrong_device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, "Expected result and input tensors to be on the same device"):
torch.linalg.cholesky(A, out=out)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float64, torch.complex128)
def test_cholesky_hermitian_grad(self, device, dtype):
# Check that the gradient is Hermitian (or symmetric)
def run_test(shape):
root = torch.rand(*shape, dtype=dtype, device=device)
root = torch.matmul(root, root.mH)
root.requires_grad_()
chol = torch.linalg.cholesky(root).sum().backward()
self.assertEqual(root.grad, root.grad.mH)
shapes = ((3, 3), (1, 1, 3, 3))
for shape in shapes:
run_test(shape)
# NOTE: old_cholesky* tests were moved here from test_torch.py and test_autograd.py
@slowTest
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.double)
def test_old_cholesky_batched_many_batches(self, device, dtype):
from torch.testing._internal.common_utils import random_symmetric_pd_matrix
def cholesky_test_helper(n, batchsize, device, upper):
A = random_symmetric_pd_matrix(n, batchsize, dtype=dtype, device=device)
chol_fact = torch.cholesky(A, upper=upper)
if upper:
# Correctness check
self.assertEqual(A, chol_fact.mT.matmul(chol_fact))
# Upper triangular check
self.assertEqual(chol_fact, chol_fact.triu())
else:
# Correctness check
self.assertEqual(A, chol_fact.matmul(chol_fact.mT))
# Lower triangular check
self.assertEqual(chol_fact, chol_fact.tril())
for upper, batchsize in itertools.product([True, False], [262144, 524288]):
cholesky_test_helper(2, batchsize, device, upper)
@precisionOverride({torch.float32: 1e-4, torch.complex64: 1e-4})
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_old_cholesky_batched(self, device, dtype):
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
def cholesky_test_helper(n, batch_dims, upper):
A = random_hermitian_pd_matrix(n, *batch_dims, dtype=dtype, device=device)
cholesky_exp = torch.stack([m.cholesky(upper=upper) for m in A.reshape(-1, n, n)])
cholesky_exp = cholesky_exp.reshape_as(A)
self.assertEqual(cholesky_exp, torch.cholesky(A, upper=upper))
for upper, batchsize in itertools.product([True, False], [(3,), (3, 4), (2, 3, 4)]):
cholesky_test_helper(3, batchsize, upper)
@precisionOverride({torch.float32: 1e-4, torch.complex64: 1e-4})
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
@tf32_on_and_off(0.01)
def test_old_cholesky(self, device, dtype):
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
A = random_hermitian_pd_matrix(10, dtype=dtype, device=device)
# default Case
C = torch.cholesky(A)
B = torch.mm(C, C.t().conj())
self.assertEqual(A, B, atol=1e-14, rtol=0)
# test Upper Triangular
U = torch.cholesky(A, True)
B = torch.mm(U.t().conj(), U)
self.assertEqual(A, B, atol=1e-14, rtol=0, msg='cholesky (upper) did not allow rebuilding the original matrix')
# test Lower Triangular
L = torch.cholesky(A, False)
B = torch.mm(L, L.t().conj())
self.assertEqual(A, B, atol=1e-14, rtol=0, msg='cholesky (lower) did not allow rebuilding the original matrix')
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_old_cholesky_empty(self, device, dtype):
def run_test(upper):
A = torch.empty(0, 0, dtype=dtype, device=device)
chol = torch.cholesky(A, upper)
chol_A = torch.matmul(chol, chol.t().conj())
self.assertEqual(A, chol_A)
for upper in [True, False]:
run_test(upper)
# Test for issue
# https://github.com/pytorch/pytorch/issues/57032
# torch.cholesky with upper=True for batched CUDA inputs was wrong
# it was using the lower triangular part instead of the upper one
@onlyCUDA
@skipCUDAIfNoMagma
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_old_cholesky_batched_upper(self, device, dtype):
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
batchsize = 2
A = random_hermitian_pd_matrix(3, batchsize, dtype=dtype, device=device)
A_triu = A.triu() # fill the lower triangular part with zero
U = torch.cholesky(A_triu, upper=True)
reconstruct_A = U.mH @ U
self.assertEqual(A, reconstruct_A)
@skipCUDAIfNoMagmaAndNoCusolver
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_cholesky_ex(self, device, dtype):
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
def run_test(n, batch):
A = random_hermitian_pd_matrix(n, *batch, dtype=dtype, device=device)
expected_L = np.linalg.cholesky(A.cpu().numpy())
expected_info = torch.zeros(A.shape[:-2], dtype=torch.int32, device=device)
actual_L, actual_info = torch.linalg.cholesky_ex(A)
# For fp32 individual entries in matrices can differ between PyTorch and NumPy
# Let's compare the norms of matrices instead
if A.numel() > 0 and dtype in [torch.float32, torch.complex64]:
expected_norm = np.linalg.norm(expected_L, ord=1, axis=(-2, -1))
actual_norm = torch.linalg.norm(actual_L, ord=1, axis=(-2, -1))
self.assertEqual(actual_norm, expected_norm)
self.assertEqual(actual_L, expected_L, atol=1e-2, rtol=1e-5)
else:
self.assertEqual(actual_L, expected_L)
self.assertEqual(actual_info, expected_info)
ns = (0, 3, 5)
batches = ((), (2, ), (2, 1))
for n, batch in itertools.product(ns, batches):
run_test(n, batch)
@skipCUDAIfNoMagmaAndNoCusolver
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_cholesky_ex_non_pd(self, device, dtype):
A = torch.eye(3, 3, dtype=dtype, device=device)
A[-1, -1] = 0
_, info = torch.linalg.cholesky_ex(A)
self.assertEqual(info, 3)
with self.assertRaisesRegex(RuntimeError, r'minor of order 3 is not positive-definite'):
torch.linalg.cholesky_ex(A, check_errors=True)
A = torch.eye(3, 3, dtype=dtype, device=device)
A = A.reshape((1, 3, 3))
A = A.repeat(5, 1, 1)
A[3, -2, -2] = 0
_, info = torch.linalg.cholesky_ex(A)
expected_info = torch.zeros(A.shape[:-2], dtype=torch.int32, device=device)
expected_info[3] = 2
self.assertEqual(info, expected_info)
with self.assertRaisesRegex(RuntimeError, r'\(Batch element 3\): The factorization could not be completed'):
torch.linalg.cholesky_ex(A, check_errors=True)
@skipCUDAIfNoMagmaAndNoCusolver
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_cholesky_ex_out_info_error(self, device, dtype):
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
A = random_hermitian_pd_matrix(3, dtype=dtype, device=device)
L = torch.empty(A.shape, dtype=dtype, device=device)
info = torch.empty(A.shape[:-2], dtype=torch.int64, device=device)
with self.assertRaisesRegex(RuntimeError, "but got info with dtype Long"):
torch.linalg.cholesky_ex(A, out=(L, info))
@onlyCPU
@skipCPUIfNoLapack
@dtypes(torch.float64, torch.complex128)
def test_old_cholesky_autograd(self, device, dtype):
def func(root, upper):
x = 0.5 * (root + root.mH)
return torch.cholesky(x, upper)
def run_test(upper, dims):
root = torch.rand(*dims, dtype=dtype, device=device, requires_grad=True)
root = root + torch.eye(dims[-1])
gradcheck(func, [root, upper])
gradgradcheck(func, [root, upper])
root = torch.rand(*dims, dtype=dtype, device=device)
root = torch.matmul(root, root.mH)
root.requires_grad_()
chol = root.cholesky().sum().backward()
self.assertEqual(root.grad, root.grad.mH)
for upper, dims in itertools.product([True, False], [(3, 3), (4, 3, 2, 2)]):
run_test(upper, dims)
def _test_addr_vs_numpy(self, device, dtype, beta=1, alpha=1):
def check(m, a, b, beta, alpha):
if dtype == torch.bfloat16:
a_np = a.to(torch.double).cpu().numpy()
b_np = b.to(torch.double).cpu().numpy()
m_np = m.to(torch.double).cpu().numpy()
exact_dtype = False
else:
a_np = a.cpu().numpy()
b_np = b.cpu().numpy()
m_np = m.cpu().numpy()
exact_dtype = True
if beta == 0:
expected = alpha * np.outer(a_np, b_np)
else:
expected = beta * m_np + alpha * np.outer(a_np, b_np)
res = torch.addr(m, a, b, beta=beta, alpha=alpha)
self.assertEqual(res, expected, exact_dtype=exact_dtype)
out = torch.empty_like(res)
torch.addr(m, a, b, beta=beta, alpha=alpha, out=out)
self.assertEqual(out, expected, exact_dtype=exact_dtype)
m = make_tensor((50, 50), device=device, dtype=dtype, low=-2, high=2)
a = make_tensor((50,), device=device, dtype=dtype, low=-2, high=2)
b = make_tensor((50,), device=device, dtype=dtype, low=-2, high=2)
check(m, a, b, beta, alpha)
m_transpose = torch.transpose(m, 0, 1)
check(m_transpose, a, b, beta, alpha)
zero_strided = make_tensor((1,), device=device, dtype=dtype, low=-2, high=2).expand(50)
check(m, zero_strided, b, beta, alpha)
m_scalar = torch.tensor(1, device=device, dtype=dtype)
check(m_scalar, a, b, beta, alpha)
float_and_complex_dtypes = get_all_fp_dtypes() + get_all_complex_dtypes()
if beta == 0 and dtype in float_and_complex_dtypes:
m[0][10] = m[10][10] = m[20][20] = float('inf')
m[1][10] = m[11][10] = m[21][20] = float('nan')
check(m, a, b, 0, alpha)
@dtypes(torch.bool)
def test_addr_bool(self, device, dtype):
self._test_addr_vs_numpy(device, dtype, beta=True, alpha=False)
self._test_addr_vs_numpy(device, dtype, beta=False, alpha=True)
self._test_addr_vs_numpy(device, dtype, beta=False, alpha=False)
self._test_addr_vs_numpy(device, dtype, beta=True, alpha=True)
@dtypes(*(get_all_int_dtypes()))
def test_addr_integral(self, device, dtype):
with self.assertRaisesRegex(RuntimeError,
'argument beta must not be a floating point number.'):
self._test_addr_vs_numpy(device, dtype, beta=2., alpha=1)
with self.assertRaisesRegex(RuntimeError,
'argument alpha must not be a floating point number.'):
self._test_addr_vs_numpy(device, dtype, beta=2, alpha=1.)
with self.assertRaisesRegex(RuntimeError,
'Boolean beta only supported for Boolean results.'):
self._test_addr_vs_numpy(device, dtype, beta=True, alpha=1)
with self.assertRaisesRegex(RuntimeError,
'Boolean alpha only supported for Boolean results.'):
self._test_addr_vs_numpy(device, dtype, beta=2, alpha=True)
self._test_addr_vs_numpy(device, dtype, beta=0, alpha=2)
self._test_addr_vs_numpy(device, dtype, beta=2, alpha=2)
@precisionOverride({torch.bfloat16: 1e-1})
@dtypes(*(get_all_fp_dtypes() + get_all_complex_dtypes()))
def test_addr_float_and_complex(self, device, dtype):
with self.assertRaisesRegex(RuntimeError,
'Boolean beta only supported for Boolean results.'):
self._test_addr_vs_numpy(device, dtype, beta=True, alpha=1)
with self.assertRaisesRegex(RuntimeError,
'Boolean alpha only supported for Boolean results.'):
self._test_addr_vs_numpy(device, dtype, beta=2, alpha=True)
self._test_addr_vs_numpy(device, dtype, beta=0., alpha=2)
self._test_addr_vs_numpy(device, dtype, beta=0.5, alpha=2)
if dtype in get_all_complex_dtypes():
self._test_addr_vs_numpy(device, dtype, beta=(0 + 0.1j), alpha=(0.2 - 0.2j))
@dtypes(*itertools.product(get_all_dtypes(),
get_all_dtypes()))
def test_outer_type_promotion(self, device, dtypes):
a = torch.randn(5).to(device=device, dtype=dtypes[0])
b = torch.randn(5).to(device=device, dtype=dtypes[1])
for op in (torch.outer, torch.Tensor.outer, torch.ger, torch.Tensor.ger):
result = op(a, b)
self.assertEqual(result.dtype, torch.result_type(a, b))
def test_addr_type_promotion(self, device):
for dtypes0, dtypes1, dtypes2 in product(get_all_dtypes(), repeat=3):
a = make_tensor((5,), device=device, dtype=dtypes0, low=-2, high=2)
b = make_tensor((5,), device=device, dtype=dtypes1, low=-2, high=2)
m = make_tensor((5, 5), device=device, dtype=dtypes2, low=-2, high=2)
desired_dtype = torch.promote_types(torch.promote_types(dtypes0, dtypes1),
dtypes2)
for op in (torch.addr, torch.Tensor.addr):
result = op(m, a, b)
self.assertEqual(result.dtype, desired_dtype)
# Tests migrated from test_torch.py
# 1) test the shape of the result tensor when there is empty input tensor
# 2) test the Runtime Exception when there is scalar input tensor
def test_outer_ger_addr_legacy_tests(self, device):
for size in ((0, 0), (0, 5), (5, 0)):
a = torch.rand(size[0], device=device)
b = torch.rand(size[1], device=device)
self.assertEqual(torch.outer(a, b).shape, size)
self.assertEqual(torch.ger(a, b).shape, size)
m = torch.empty(size, device=device)
self.assertEqual(torch.addr(m, a, b).shape, size)
m = torch.randn(5, 6, device=device)
a = torch.randn(5, device=device)
b = torch.tensor(6, device=device)
self.assertRaises(RuntimeError, lambda: torch.outer(a, b))
self.assertRaises(RuntimeError, lambda: torch.outer(b, a))
self.assertRaises(RuntimeError, lambda: torch.ger(a, b))
self.assertRaises(RuntimeError, lambda: torch.ger(b, a))
self.assertRaises(RuntimeError, lambda: torch.addr(m, a, b))
self.assertRaises(RuntimeError, lambda: torch.addr(m, b, a))
# Tests torch.det and its alias, torch.linalg.det, vs. NumPy
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.double, torch.cdouble)
def test_det(self, device, dtype):
tensors = (
torch.randn((2, 2), device=device, dtype=dtype),
torch.randn((129, 129), device=device, dtype=dtype),
torch.randn((3, 52, 52), device=device, dtype=dtype),
torch.randn((4, 2, 26, 26), device=device, dtype=dtype))
ops = (torch.det, torch.Tensor.det,
torch.linalg.det)
for t in tensors:
expected = np.linalg.det(t.cpu().numpy())
for op in ops:
actual = op(t)
self.assertEqual(actual, expected)
self.compare_with_numpy(op, np.linalg.det, t)
# NOTE: det requires a 2D+ tensor
t = torch.randn(1, device=device, dtype=dtype)
with self.assertRaises(RuntimeError):
op(t)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
@precisionOverride({torch.float32: 1e-4, torch.complex64: 1e-4})
def test_eigh(self, device, dtype):
from torch.testing._internal.common_utils import random_hermitian_matrix
def run_test(shape, batch, uplo):
matrix = random_hermitian_matrix(shape, *batch, dtype=dtype, device=device)
expected_w, expected_v = np.linalg.eigh(matrix.cpu().numpy(), UPLO=uplo)
actual_w, actual_v = torch.linalg.eigh(matrix, UPLO=uplo)
self.assertEqual(actual_w, expected_w)
# sign of eigenvectors is not unique and therefore absolute values are compared
self.assertEqual(abs(actual_v), abs(expected_v))
# additionally we can multiply the eigenvector with a phase factor e^{i\phi} and then compare the values
# let's choose the convention that the first element of the eigenvectors from torch and numpy be the same
if matrix.numel() > 0:
phase = torch.from_numpy(expected_v[..., 0, :]).to(device=device).div(actual_v[..., 0, :])
actual_v_rotated = actual_v * phase.unsqueeze(-2).expand_as(actual_v)
self.assertEqual(actual_v_rotated, expected_v)
out_w = torch.empty_like(actual_w)
out_v = torch.empty_like(actual_v)
ans_w, ans_v = torch.linalg.eigh(matrix, UPLO=uplo, out=(out_w, out_v))
self.assertEqual(ans_w, out_w)
self.assertEqual(ans_v, out_v)
self.assertEqual(ans_w, actual_w)
self.assertEqual(abs(ans_v), abs(actual_v))
shapes = (0, 3, 5)
batches = ((), (3, ), (2, 2))
uplos = ["U", "L"]
for shape, batch, uplo in itertools.product(shapes, batches, uplos):
run_test(shape, batch, uplo)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
@precisionOverride({torch.float32: 1e-4, torch.complex64: 1e-4})
def test_eigh_lower_uplo(self, device, dtype):
def run_test(shape, batch, uplo):
matrix = torch.randn(shape, shape, *batch, dtype=dtype, device=device)
expected_w, expected_v = np.linalg.eigh(matrix.cpu().numpy(), UPLO=uplo)
actual_w, actual_v = torch.linalg.eigh(matrix, UPLO=uplo)
self.assertEqual(actual_w, expected_w)
self.assertEqual(abs(actual_v), abs(expected_v))
uplos = ["u", "l"]
for uplo in uplos:
run_test(3, (2, 2), uplo)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_eigh_errors_and_warnings(self, device, dtype):
from torch.testing._internal.common_utils import random_hermitian_matrix
t = torch.randn(2, 3, device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, "must be batches of square matrices"):
torch.linalg.eigh(t)
t = torch.randn(3, 3, device=device, dtype=dtype)
for uplo in ["a", "wrong"]:
with self.assertRaisesRegex(RuntimeError, "be \'L\' or \'U\'"):
torch.linalg.eigh(t, UPLO=uplo)
with self.assertRaisesRegex(ValueError, "be \'L\' or \'U\'"):
np.linalg.eigh(t.cpu().numpy(), UPLO=uplo)
a = random_hermitian_matrix(3, dtype=dtype, device=device)
real_dtype = a.real.dtype if dtype.is_complex else dtype
out_w = torch.empty(7, 7, dtype=real_dtype, device=device)
out_v = torch.empty(7, 7, dtype=dtype, device=device)
with warnings.catch_warnings(record=True) as w:
torch.linalg.eigh(a, out=(out_w, out_v))
self.assertEqual(len(w), 2)
self.assertTrue("An output with one or more elements was resized" in str(w[-2].message))
self.assertTrue("An output with one or more elements was resized" in str(w[-1].message))
out_w = torch.empty(0, dtype=real_dtype, device=device)
out_v = torch.empty(0, dtype=torch.int, device=device)
with self.assertRaisesRegex(RuntimeError, "but got eigenvectors with dtype Int"):
torch.linalg.eigh(a, out=(out_w, out_v))
out_w = torch.empty(0, dtype=torch.int, device=device)
out_v = torch.empty(0, dtype=dtype, device=device)
with self.assertRaisesRegex(RuntimeError, "but got eigenvalues with dtype Int"):
torch.linalg.eigh(a, out=(out_w, out_v))
if torch.cuda.is_available():
wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'
out_w = torch.empty(0, device=wrong_device, dtype=dtype)
out_v = torch.empty(0, device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, "tensors to be on the same device"):
torch.linalg.eigh(a, out=(out_w, out_v))
out_w = torch.empty(0, device=device, dtype=dtype)
out_v = torch.empty(0, device=wrong_device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, "tensors to be on the same device"):
torch.linalg.eigh(a, out=(out_w, out_v))
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
@precisionOverride({torch.float32: 1e-4, torch.complex64: 1e-4})
def test_eigh_non_contiguous(self, device, dtype):
from torch.testing._internal.common_utils import random_hermitian_matrix
def run_test(matrix, uplo):
self.assertFalse(matrix.is_contiguous())
expected_w, expected_v = np.linalg.eigh(matrix.cpu().numpy(), UPLO=uplo)
actual_w, actual_v = torch.linalg.eigh(matrix, UPLO=uplo)
self.assertEqual(actual_w, expected_w)
self.assertEqual(abs(actual_v), abs(expected_v))
def run_test_permuted(shape, batch, uplo):
matrix = random_hermitian_matrix(shape, *batch, dtype=dtype, device=device)
matrix = matrix.mT
run_test(matrix, uplo)
def run_test_skipped_elements(shape, batch, uplo):
matrix = random_hermitian_matrix(shape, *batch, dtype=dtype, device=device)
matrix = matrix[::2]
run_test(matrix, uplo)
shapes = (3, 5)
batches = ((4, ), (4, 2))
uplos = ["U", "L"]
for shape, batch, uplo in itertools.product(shapes, batches, uplos):
run_test_permuted(shape, batch, uplo)
run_test_skipped_elements(shape, batch, uplo)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float64, torch.complex128)
def test_eigh_hermitian_grad(self, device, dtype):
from torch.testing._internal.common_utils import random_hermitian_matrix
def run_test(dims, uplo):
x = random_hermitian_matrix(dims[-1], *dims[:-2], device=device, dtype=dtype).requires_grad_()
w, v = torch.linalg.eigh(x)
(w.sum() + abs(v).sum()).backward()
self.assertEqual(x.grad, x.grad.mH)
for dims, uplo in itertools.product([(3, 3), (1, 1, 3, 3)], ["L", "U"]):
run_test(dims, uplo)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
@precisionOverride({torch.float32: 1e-4, torch.complex64: 1e-4})
def test_eigvalsh(self, device, dtype):
from torch.testing._internal.common_utils import random_hermitian_matrix
def run_test(shape, batch, uplo):
matrix = random_hermitian_matrix(shape, *batch, dtype=dtype, device=device)
expected_w = np.linalg.eigvalsh(matrix.cpu().numpy(), UPLO=uplo)
actual_w = torch.linalg.eigvalsh(matrix, UPLO=uplo)
self.assertEqual(actual_w, expected_w)
out = torch.empty_like(actual_w)
ans = torch.linalg.eigvalsh(matrix, UPLO=uplo, out=out)
self.assertEqual(ans, out)
self.assertEqual(ans, actual_w)
shapes = (0, 3, 5)
batches = ((), (3, ), (2, 2))
uplos = ["U", "L"]
for shape, batch, uplo in itertools.product(shapes, batches, uplos):
run_test(shape, batch, uplo)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_eigvalsh_errors_and_warnings(self, device, dtype):
t = torch.randn(2, 3, device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, "must be batches of square matrices"):
torch.linalg.eigvalsh(t)
t = torch.randn(3, 3, device=device, dtype=dtype)
for uplo in ["a", "wrong"]:
with self.assertRaisesRegex(RuntimeError, "be \'L\' or \'U\'"):
torch.linalg.eigvalsh(t, UPLO=uplo)
with self.assertRaisesRegex(ValueError, "be \'L\' or \'U\'"):
np.linalg.eigvalsh(t.cpu().numpy(), UPLO=uplo)
real_dtype = t.real.dtype if dtype.is_complex else dtype
out = torch.empty_like(t).to(real_dtype)
with warnings.catch_warnings(record=True) as w:
torch.linalg.eigvalsh(t, out=out)
self.assertEqual(len(w), 1)
self.assertTrue("An output with one or more elements was resized" in str(w[-1].message))
out = torch.empty(0, dtype=torch.int, device=device)
with self.assertRaisesRegex(RuntimeError, "but got result with dtype Int"):
torch.linalg.eigvalsh(t, out=out)
if torch.cuda.is_available():
wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'
out = torch.empty(0, device=wrong_device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, "tensors to be on the same device"):
torch.linalg.eigvalsh(t, out=out)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
@precisionOverride({torch.float32: 1e-4, torch.complex64: 1e-4})
def test_eigvalsh_non_contiguous(self, device, dtype):
from torch.testing._internal.common_utils import random_hermitian_matrix
def run_test(matrix, uplo):
self.assertFalse(matrix.is_contiguous())
expected_w = np.linalg.eigvalsh(matrix.cpu().numpy(), UPLO=uplo)
actual_w = torch.linalg.eigvalsh(matrix, UPLO=uplo)
self.assertEqual(actual_w, expected_w)
def run_test_permuted(shape, batch, uplo):
matrix = random_hermitian_matrix(shape, *batch, dtype=dtype, device=device)
matrix = matrix.mT
run_test(matrix, uplo)
def run_test_skipped_elements(shape, batch, uplo):
matrix = random_hermitian_matrix(shape, *batch, dtype=dtype, device=device)
matrix = matrix[::2]
run_test(matrix, uplo)
shapes = (3, 5)
batches = ((4, ), (4, 2))
uplos = ["U", "L"]
for shape, batch, uplo in itertools.product(shapes, batches, uplos):
run_test_permuted(shape, batch, uplo)
run_test_skipped_elements(shape, batch, uplo)
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_kron(self, device, dtype):
def run_test_case(a_shape, b_shape):
a = torch.rand(a_shape, dtype=dtype, device=device)
b = torch.rand(b_shape, dtype=dtype, device=device)
expected = np.kron(a.cpu().numpy(), b.cpu().numpy())
result = torch.kron(a, b)
self.assertEqual(result, expected)
out = torch.empty_like(result)
ans = torch.kron(a, b, out=out)
self.assertEqual(ans, out)
self.assertEqual(ans, result)
shapes = [(4,), (2, 2), (1, 2, 3), (1, 2, 3, 3)]
for a_shape, b_shape in itertools.product(shapes, reversed(shapes)):
run_test_case(a_shape, b_shape)
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_kron_non_contiguous(self, device, dtype):
def run_test_transposed(a_shape, b_shape):
a = torch.rand(a_shape, dtype=dtype, device=device).mT
b = torch.rand(b_shape, dtype=dtype, device=device).mT
self.assertFalse(a.is_contiguous())
self.assertFalse(b.is_contiguous())
expected = np.kron(a.cpu().numpy(), b.cpu().numpy())
result = torch.kron(a, b)
self.assertEqual(result, expected)
out = torch.empty(result.mT.shape, dtype=dtype, device=device).mT
self.assertFalse(out.is_contiguous())
ans = torch.kron(a, b, out=out)
self.assertEqual(ans, out)
self.assertEqual(ans, result)
def run_test_skipped_elements(a_shape, b_shape):
a = torch.rand(2 * a_shape[0], *a_shape[1:], dtype=dtype, device=device)[::2]
b = torch.rand(2 * b_shape[0], *b_shape[1:], dtype=dtype, device=device)[::2]
self.assertFalse(a.is_contiguous())
self.assertFalse(b.is_contiguous())
expected = np.kron(a.cpu().numpy(), b.cpu().numpy())
result = torch.kron(a, b)
self.assertEqual(result, expected)
out = torch.empty(2 * result.shape[0], *result.shape[1:], dtype=dtype, device=device)[::2]
self.assertFalse(out.is_contiguous())
ans = torch.kron(a, b, out=out)
self.assertEqual(ans, out)
self.assertEqual(ans, result)
shapes = [(2, 2), (2, 2, 3), (2, 2, 3, 3)]
for a_shape, b_shape in itertools.product(shapes, reversed(shapes)):
run_test_skipped_elements(a_shape, b_shape)
a = torch.randn(1, 2, 3, 4, dtype=dtype, device=device).contiguous(memory_format=torch.channels_last)
b = torch.randn(1, 2, 3, 4, dtype=dtype, device=device).contiguous(memory_format=torch.channels_last)
c = torch.kron(a, b)
self.assertTrue(c.is_contiguous(memory_format=torch.channels_last))
torch.kron(a, b, out=c)
self.assertTrue(c.is_contiguous(memory_format=torch.channels_last))
c = c.contiguous(memory_format=torch.contiguous_format)
torch.kron(a, b, out=c)
self.assertTrue(c.is_contiguous(memory_format=torch.contiguous_format))
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_kron_empty(self, device, dtype):
def run_test_case(empty_shape):
a = torch.eye(3, dtype=dtype, device=device)
b = torch.empty(empty_shape, dtype=dtype, device=device)
result = torch.kron(a, b)
expected = np.kron(a.cpu().numpy(), b.cpu().numpy())
self.assertEqual(result, expected)
result = torch.kron(b, a)
self.assertEqual(result.shape, expected.shape)
empty_shapes = [(0,), (2, 0), (1, 0, 3)]
for empty_shape in empty_shapes:
run_test_case(empty_shape)
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_kron_errors_and_warnings(self, device, dtype):
# if non-empty out tensor with wrong shape is passed a warning is given
a = torch.eye(3, dtype=dtype, device=device)
b = torch.ones((2, 2), dtype=dtype, device=device)
out = torch.empty_like(a)
with warnings.catch_warnings(record=True) as w:
# Trigger warning
torch.kron(a, b, out=out)
# Check warning occurs
self.assertEqual(len(w), 1)
self.assertTrue("An output with one or more elements was resized" in str(w[-1].message))
# dtypes should match
out = torch.empty_like(a).to(torch.int)
with self.assertRaisesRegex(RuntimeError, "can't be cast to the desired output type"):
torch.kron(a, b, out=out)
# as expected, according to the function's documentation
@skipCUDAIfNoMagma
def test_norm_dtype(self, device):
def run_test_case(input_size, ord, keepdim, from_dtype, to_dtype):
def get_compare_dtype(type0, type1):
types_32bit_based = [torch.float, torch.cfloat]
is_complex = type0.is_complex or type1.is_complex
if type0 in types_32bit_based or type1 in types_32bit_based:
return torch.cfloat if is_complex else torch.float
else:
return torch.cdouble if is_complex else torch.double
compare_dtype = get_compare_dtype(from_dtype, to_dtype)
def get_value_type(dtype):
if dtype == torch.cfloat:
return torch.float
elif dtype == torch.cdouble:
return torch.double
elif dtype == torch.complex32:
return torch.float16
else:
return dtype
msg = (
f'input_size={input_size}, ord={ord}, keepdim={keepdim}, '
f'from_dtype={from_dtype}, to_dtype={to_dtype}')
input = torch.randn(*input_size, dtype=from_dtype, device=device)
result = torch.linalg.norm(input, ord, keepdim=keepdim)
if from_dtype.is_complex:
self.assertEqual(result.dtype, get_value_type(from_dtype), msg=msg)
else:
self.assertEqual(result.dtype, from_dtype, msg=msg)
result_out = torch.empty((0), dtype=to_dtype, device=device)
torch.linalg.norm(input, ord, keepdim=keepdim, out=result_out)
self.assertEqual(result_out.dtype, to_dtype, msg=msg)
self.assertEqual(result.to(compare_dtype), result_out.to(compare_dtype), msg=msg)
result_with_dtype = torch.linalg.norm(input, ord, keepdim=keepdim, dtype=to_dtype)
self.assertEqual(result_with_dtype.dtype, to_dtype, msg=msg)
if from_dtype.is_complex:
result_convert_first = torch.linalg.norm(input.to(to_dtype), ord, keepdim=keepdim)
self.assertEqual(result_with_dtype.to(compare_dtype), result_convert_first.to(compare_dtype), msg=msg)
else:
self.assertEqual(result.to(compare_dtype), result_with_dtype.to(compare_dtype), msg=msg)
result_out_with_dtype = torch.empty_like(result_with_dtype)
torch.linalg.norm(input, ord, keepdim=keepdim, dtype=to_dtype, out=result_out_with_dtype)
self.assertEqual(result_out_with_dtype.dtype, to_dtype, msg=msg)
self.assertEqual(result_with_dtype, result_out_with_dtype, msg=msg)
ord_vector = [0, 0.1, -0.1, 1, -1, 2, -2, 3, -3, 4.5, -4.5, inf, -inf, None]
ord_matrix = ['fro', 'nuc', 1, -1, 2, -2, inf, -inf, None]
S = 10
test_cases = [
((S, ), ord_vector),
((S, S), ord_matrix),
]
for keepdim in [True, False]:
for input_size, ord_settings in test_cases:
for ord in ord_settings:
if self.device_type == 'cpu' and not torch._C.has_lapack and ord in [2, -2, 'nuc']:
continue
dtypes = [torch.float, torch.double, torch.cfloat, torch.cdouble]
for from_dtype, to_dtype in itertools.product(dtypes, dtypes):
if from_dtype.is_complex and not to_dtype.is_complex:
continue
run_test_case(input_size, ord, keepdim, from_dtype, to_dtype)
dtype_pairs = [
(torch.float, torch.double),
(torch.double, torch.float),
(torch.cfloat, torch.cdouble),
(torch.cdouble, torch.cfloat),
]
for keepdim in [True, False]:
for input_size, ord_settings in test_cases:
for ord in ord_settings:
for dtype, out_dtype in dtype_pairs:
input = torch.rand(*input_size)
result = torch.tensor([]).to(out_dtype)
with self.assertRaisesRegex(RuntimeError, r'provided dtype must match dtype of result'):
torch.linalg.norm(input, ord=ord, keepdim=keepdim, dtype=dtype, out=result)
@dtypes(torch.float, torch.double, torch.cfloat, torch.cdouble, torch.bfloat16, torch.float16)
def test_vector_norm(self, device, dtype):
# torch.linalg.norm given a flattened tensor
ord_vector = [0, 0.9, 1, 2, 3, inf, -0.5, -1, -2, -3, -inf]
input_sizes = [
(10, ),
(4, 5),
(3, 4, 5),
(0, ),
(0, 10),
(0, 0),
(10, 0, 10),
]
def vector_norm_reference(input, ord, dim=None, keepdim=False, dtype=None):
if dim is None:
input_maybe_flat = input.flatten(0, -1)
else:
input_maybe_flat = input
result = torch.linalg.norm(input_maybe_flat, ord, dim=dim, keepdim=keepdim, dtype=dtype)
if keepdim and dim is None:
result = result.reshape([1] * input.dim())
return result
def run_test_case(input, ord, dim, keepdim, norm_dtype):
msg = f'input.size()={input.size()}, ord={ord}, dim={dim}, keepdim={keepdim}, dtype={dtype}, norm_dtype={norm_dtype}'
error_msg = None
if input.numel() == 0:
if ord < 0:
error_msg = r'linalg.vector_norm of negative order cannot be performed on an empty tensor'
elif ord == inf and (dim is None or input.size(dim) == 0):
error_msg = (
r'linalg.vector_norm cannot compute the infinity norm on an empty '
r'dimension because the operation does not have an identity')
if error_msg is None:
result_dtype_reference = vector_norm_reference(input, ord, dim=dim, keepdim=keepdim, dtype=norm_dtype)
result_dtype = torch.linalg.vector_norm(input, ord, dim=dim, keepdim=keepdim, dtype=norm_dtype)
self.assertEqual(result_dtype, result_dtype_reference, msg=msg)
if norm_dtype is not None:
result_convert_before = torch.linalg.vector_norm(input.to(norm_dtype), ord, dim=dim, keepdim=keepdim)
if norm_dtype.is_complex:
result_convert_before = result_convert_before.to(norm_dtype)
result_out = torch.empty((0), dtype=norm_dtype, device=device)
torch.linalg.vector_norm(input, ord, dtype=norm_dtype, dim=dim, keepdim=keepdim, out=result_out)
self.assertEqual(result_convert_before, result_out, msg=msg)
else:
result_out = torch.empty((0), dtype=result_dtype.dtype, device=device)
torch.linalg.vector_norm(input, ord, dim=dim, keepdim=keepdim, out=result_out)
self.assertEqual(result_dtype, result_out, msg=msg)
else:
with self.assertRaises(RuntimeError):
vector_norm_reference(input, ord, dim=dim, keepdim=keepdim)
with self.assertRaisesRegex(RuntimeError, error_msg):
torch.linalg.vector_norm(input, ord, dim=dim, keepdim=keepdim)
if dtype.is_complex:
norm_dtypes = [None, torch.cfloat, torch.cdouble]
else:
norm_dtypes = [None, torch.float, torch.double, torch.cfloat, torch.cdouble, torch.float16, torch.bfloat16]
for input_size, ord, keepdim, norm_dtype in product(input_sizes, ord_vector, [True, False], norm_dtypes):
input = make_tensor(input_size, device, dtype, low=-9, high=9)
for dim in [None, random.randint(0, len(input_size) - 1)]:
run_test_case(
input,
ord,
dim,
keepdim,
norm_dtype)
def test_vector_norm_dim_tuple_arg(self, device):
test_cases = [
# input size, dim, error, error message
((4, ), (0, ), None, None),
((4, ), (1, ), IndexError, r'Dimension out of range'),
((4, ), (-2, ), IndexError, r'Dimension out of range'),
((4, 3), (0, -1), None, None),
((4, 3), (0, 0), RuntimeError, r'dim 0 appears multiple times in the list of dims'),
((4, 3), (0, -2), RuntimeError, r'dim 0 appears multiple times in the list of dims'),
((4, 3), (0, 1.0), TypeError, r"argument 'dim' must be tuple of ints"),
((4, 3), (None, ), TypeError, r"argument 'dim' must be tuple of ints"),
]
for input_size, dim_tuple, error, error_msg in test_cases:
input = torch.randn(input_size, device=device)
# vector_norm should accept a tuple or a list for dim arg
for dim in [dim_tuple, list(dim_tuple)]:
if error is None:
torch.linalg.vector_norm(input, dim=dim)
else:
with self.assertRaises(error):
torch.linalg.vector_norm(input, dim=dim)
# Test that linalg.vector_norm throws an error if the out tensor's dtype
@dtypes(torch.float, torch.double, torch.cfloat, torch.cdouble, torch.bfloat16, torch.float16)
def test_vector_norm_out_dtype_error(self, device, dtype):
input = torch.randn(10, device=device, dtype=dtype)
dtypes = [None, torch.float, torch.double, torch.cfloat, torch.cdouble, torch.float16, torch.bfloat16]
for norm_dtype, out_dtype in product(dtypes, dtypes):
if out_dtype is None:
continue
if norm_dtype is None:
if dtype == torch.cfloat:
expected_dtype = torch.float
elif dtype == torch.cdouble:
expected_dtype = torch.double
else:
expected_dtype = dtype
else:
expected_dtype = norm_dtype
result = torch.empty((0), device=device, dtype=out_dtype)
msg = f'norm_dtype: {norm_dtype}, out_dtype: {out_dtype}, expected_dtype: {expected_dtype}'
if dtype.is_complex and norm_dtype is not None and not norm_dtype.is_complex:
with self.assertRaisesRegex(RuntimeError, r"linalg.vector_norm expected complex 'dtype'", msg=msg):
torch.linalg.vector_norm(input, dtype=norm_dtype, out=result)
elif out_dtype != expected_dtype:
with self.assertRaisesRegex(RuntimeError, r'linalg.vector_norm expected out tensor dtype', msg=msg):
torch.linalg.vector_norm(input, dtype=norm_dtype, out=result)
else:
torch.linalg.vector_norm(input, dtype=norm_dtype, out=result)
@dtypes(torch.float, torch.double)
def test_norm_vector(self, device, dtype):
def run_test_case(input, p, dim, keepdim):
result = torch.linalg.norm(input, ord, dim, keepdim)
input_numpy = input.cpu().numpy()
result_numpy = np.linalg.norm(input_numpy, ord, dim, keepdim)
msg = f'input.size()={input.size()}, ord={ord}, dim={dim}, keepdim={keepdim}, dtype={dtype}'
self.assertEqual(result, result_numpy, msg=msg)
result_out = torch.empty_like(result)
torch.linalg.norm(input, ord, dim, keepdim, out=result_out)
self.assertEqual(result, result_out, msg=msg)
ord_vector = [0, 1, -1, 2, -2, 3, -3, 4.5, -4.5, inf, -inf]
S = 10
test_cases = [
((S, ), ord_vector, None),
((S, ), ord_vector, 0),
((S, S, S), ord_vector, 0),
((S, S, S), ord_vector, 1),
((S, S, S), ord_vector, 2),
((S, S, S), ord_vector, -1),
((S, S, S), ord_vector, -2),
]
L = 1_000_000
if dtype == torch.double:
test_cases.append(((L, ), ord_vector, None))
for keepdim in [True, False]:
for input_size, ord_settings, dim in test_cases:
input = torch.randn(*input_size, dtype=dtype, device=device)
for ord in ord_settings:
run_test_case(input, ord, dim, keepdim)
@skipMeta
@skipCUDAIfNoMagma
@dtypes(torch.float, torch.double)
@precisionOverride({torch.float32: 2e-5})
def test_norm_matrix(self, device, dtype):
def run_test_case(input, ord, dim, keepdim):
msg = f'input.size()={input.size()}, ord={ord}, dim={dim}, keepdim={keepdim}, dtype={dtype}'
result = torch.linalg.norm(input, ord, dim, keepdim)
input_numpy = input.cpu().numpy()
result_numpy = np.linalg.norm(input_numpy, ord, dim, keepdim)
def check(op):
result = op(input, ord, dim, keepdim)
self.assertEqual(result, result_numpy, msg=msg)
result_out = torch.empty_like(result)
op(input, ord, dim, keepdim, out=result_out)
self.assertEqual(result, result_out, msg=msg)
check(torch.linalg.norm)
if ord is not None and dim is not None:
check(torch.linalg.matrix_norm)
ord_matrix = [1, -1, 2, -2, inf, -inf, 'nuc', 'fro']
S = 10
test_cases = [
((S, S), ord_matrix, None),
((S, S), ord_matrix, (0, 1)),
((S, S), ord_matrix, (1, 0)),
((S, S, S, S), ord_matrix, (2, 0)),
((S, S, S, S), ord_matrix, (-1, -2)),
((S, S, S, S), ord_matrix, (-1, -3)),
((S, S, S, S), ord_matrix, (-3, 2)),
]
L = 1_000
if dtype == torch.double:
test_cases.append(((L, L), ord_matrix, None))
for keepdim in [True, False]:
for input_size, ord_settings, dim in test_cases:
input = torch.randn(*input_size, dtype=dtype, device=device)
for ord in ord_settings:
if self.device_type == 'cpu' and not torch._C.has_lapack and ord in [2, -2, 'nuc']:
continue
run_test_case(input, ord, dim, keepdim)
@onlyCUDA
@dtypes(torch.bfloat16, torch.float16)
def test_norm_fused_type_promotion(self, device, dtype):
x = torch.randn(10, device=device, dtype=dtype)
def profile_and_check(fn, x, kwargs, fn_name):
with torch.profiler.profile(activities=(torch.profiler.ProfilerActivity.CPU,)) as p:
fn(x, **kwargs, dtype=torch.float)
self.assertTrue(fn_name in map(lambda e: e.name, p.events()))
self.assertFalse("aten::to" in map(lambda e: e.name, p.events()))
for f, kwargs, fn_name in zip((torch.norm, torch.linalg.vector_norm), ({"p" : 2}, {}),
("aten::norm", "aten::linalg_vector_norm")):
profile_and_check(f, x, kwargs, fn_name)
@skipMeta
@skipCPUIfNoLapack
@skipCUDAIfNoMagma
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
@precisionOverride({torch.float32: 1e-3})
def test_cond(self, device, dtype):
def run_test_case(input, p):
result = torch.linalg.cond(input, p)
result_numpy = np.linalg.cond(input.cpu().numpy(), p)
self.assertEqual(result, result_numpy, rtol=1e-2, atol=self.precision, exact_dtype=False)
self.assertEqual(result.shape, result_numpy.shape)
out = torch.empty_like(result)
ans = torch.linalg.cond(input, p, out=out)
self.assertEqual(ans, out)
self.assertEqual(ans, result)
norm_types = [1, -1, 2, -2, inf, -inf, 'fro', 'nuc', None]
input_sizes = [(32, 32), (2, 3, 3, 3)]
for input_size in input_sizes:
input = torch.randn(*input_size, dtype=dtype, device=device)
for p in norm_types:
run_test_case(input, p)
input_sizes = [(0, 3, 3), (0, 2, 5, 5)]
for input_size in input_sizes:
input = torch.randn(*input_size, dtype=dtype, device=device)
for p in norm_types:
run_test_case(input, p)
input_sizes = [(16, 32), (32, 16), (2, 3, 5, 3), (2, 3, 3, 5)]
for input_size in input_sizes:
input = torch.randn(*input_size, dtype=dtype, device=device)
for p in [2, -2, None]:
run_test_case(input, p)
a = torch.eye(3, dtype=dtype, device=device)
a[-1, -1] = 0
for p in norm_types:
try:
run_test_case(a, p)
except np.linalg.LinAlgError:
pass
input_sizes = [(0, 0), (2, 5, 0, 0)]
for input_size in input_sizes:
input = torch.randn(*input_size, dtype=dtype, device=device)
for p in ['fro', 2]:
expected_dtype = a.real.dtype if dtype.is_complex else dtype
expected = torch.zeros(input_size[:-2], dtype=expected_dtype, device=device)
actual = torch.linalg.cond(input, p)
self.assertEqual(actual, expected)
@skipMeta # https://github.com/pytorch/pytorch/issues/53739
@skipCPUIfNoLapack
@skipCUDAIfNoMagma
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
@precisionOverride({torch.float32: 1e-3})
def test_cond_errors_and_warnings(self, device, dtype):
norm_types = [1, -1, 2, -2, inf, -inf, 'fro', 'nuc', None]
# cond expects the input to be at least 2-dimensional
a = torch.ones(3, dtype=dtype, device=device)
for p in norm_types:
with self.assertRaisesRegex(RuntimeError, r'at least 2 dimensions'):
torch.linalg.cond(a, p)
# for some norm types cond expects the input to be square
a = torch.ones(3, 2, dtype=dtype, device=device)
norm_types = [1, -1, inf, -inf, 'fro', 'nuc']
for p in norm_types:
with self.assertRaisesRegex(RuntimeError, r'must be batches of square matrices'):
torch.linalg.cond(a, p)
# if non-empty out tensor with wrong shape is passed a warning is given
a = torch.ones((2, 2), dtype=dtype, device=device)
for p in ['fro', 2]:
real_dtype = a.real.dtype if dtype.is_complex else dtype
out = torch.empty(a.shape, dtype=real_dtype, device=device)
with warnings.catch_warnings(record=True) as w:
# Trigger warning
torch.linalg.cond(a, p, out=out)
# Check warning occurs
self.assertEqual(len(w), 1)
self.assertTrue("An output with one or more elements was resized" in str(w[-1].message))
# dtypes should be safely castable
out = torch.empty(0, dtype=torch.int, device=device)
for p in ['fro', 2]:
with self.assertRaisesRegex(RuntimeError, "but got result with dtype Int"):
torch.linalg.cond(a, p, out=out)
# device should match
if torch.cuda.is_available():
wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'
out = torch.empty(0, dtype=dtype, device=wrong_device)
for p in ['fro', 2]:
with self.assertRaisesRegex(RuntimeError, "tensors to be on the same device"):
torch.linalg.cond(a, p, out=out)
# for batched input if at least one matrix in the batch is not invertible,
# we can't get the result for all other (possibly) invertible matrices in the batch without an explicit for loop.
# possibly filled with NANs
batch_dim = 3
a = torch.eye(3, 3, dtype=dtype, device=device)
a = a.reshape((1, 3, 3))
a = a.repeat(batch_dim, 1, 1)
a[1, -1, -1] = 0 # now a[1] is singular
for p in [1, -1, inf, -inf, 'fro', 'nuc']:
result = torch.linalg.cond(a, p)
self.assertEqual(result[1], float('inf'))
# check invalid norm type
a = torch.ones(3, 3, dtype=dtype, device=device)
for p in ['wrong_norm', 5]:
with self.assertRaisesRegex(RuntimeError, f"linalg.cond got an invalid norm type: {p}"):
torch.linalg.cond(a, p)
# This test calls torch.linalg.norm and numpy.linalg.norm with illegal arguments
# to ensure that they both throw errors
@dtypes(torch.float, torch.double)
def test_norm_errors(self, device, dtype):
def run_error_test_case(input, ord, dim, keepdim, error_type, error_regex):
test_case_info = (
f'test case input.size()={input.size()}, ord={ord}, dim={dim}, '
f'keepdim={keepdim}, dtype={dtype}')
with self.assertRaisesRegex(error_type, error_regex, msg=test_case_info):
torch.linalg.norm(input, ord, dim, keepdim)
input_numpy = input.cpu().numpy()
msg = f'numpy does not raise error but pytorch does, for case "{test_case_info}"'
with self.assertRaises(Exception, msg=test_case_info):
np.linalg.norm(input_numpy, ord, dim, keepdim)
S = 10
error_test_cases = [
# input size, p settings, dim, error type, error regex
((S, ), ['fro'], None, RuntimeError, r'order "fro" can only be used if either len\(dim\) == 2'),
((S, ), ['nuc'], None, RuntimeError, r'order "nuc" can only be used if either len\(dim\) == 2'),
((S, S), [3.5], None, RuntimeError, r'Order 3.5 not supported for matrix norm'),
((S, S), [0], None, RuntimeError, r'Order 0 not supported for matrix norm'),
((S, S), ['nuc'], 0, RuntimeError, r'order "nuc" can only be used if either len\(dim\) == 2'),
((S, S), ['fro'], 0, RuntimeError, r'order "fro" can only be used if either len\(dim\) == 2'),
((S, S), ['nuc'], (0, 0), RuntimeError, r'duplicate or invalid dimensions'),
((S, S), ['fro', 0], (0, 0), RuntimeError, r'Expected dims to be different'),
((S, S), ['fro', 'nuc', 0], (0, 4), IndexError, r'Dimension out of range'),
((S, ), [0], (4, ), IndexError, r'Dimension out of range'),
((S, ), [None], (0, 0), RuntimeError, r'dim 0 appears multiple times'),
((S, S, S), [1], (0, 1, 2), RuntimeError, r"'dim' must specify 1 or 2 dimensions"),
((S, S, S), [1], None, RuntimeError, r"'dim' must specify 1 or 2 dimensions"),
((S, S), ['garbage'], (0, 1), RuntimeError, r'Invalid norm order: garbage'),
]
for keepdim in [True, False]:
for input_size, ord_settings, dim, error_type, error_regex in error_test_cases:
input = torch.randn(*input_size, dtype=dtype, device=device)
for ord in ord_settings:
run_error_test_case(input, ord, dim, keepdim, error_type, error_regex)
# Test complex number inputs for linalg.norm
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.cfloat, torch.cdouble)
@precisionOverride({torch.cfloat: 2e-4})
def test_norm_complex(self, device, dtype):
def gen_error_message(input_size, ord, keepdim, dim=None):
return "complex norm failed for input size %s, ord=%s, keepdim=%s, dim=%s" % (
input_size, ord, keepdim, dim)
vector_ords = [None, 0, 1, 2, 3, inf, -1, -2, -3, -inf]
matrix_ords = [None, 'fro', 'nuc', 1, 2, inf, -1, -2, -inf]
# Test supported ords
for keepdim in [False, True]:
# vector norm
x = torch.randn(25, device=device, dtype=dtype)
xn = x.cpu().numpy()
for ord in vector_ords:
res = torch.linalg.norm(x, ord, keepdim=keepdim).cpu()
expected = np.linalg.norm(xn, ord, keepdims=keepdim)
msg = gen_error_message(x.size(), ord, keepdim)
self.assertEqual(res.shape, expected.shape, msg=msg)
self.assertEqual(res, expected, msg=msg, exact_dtype=False)
res_out = torch.tensor([]).to(device)
torch.linalg.norm(x, ord, keepdim=keepdim, out=res_out)
self.assertEqual(res_out.shape, expected.shape, msg=msg)
self.assertEqual(res_out.cpu(), expected, msg=msg, exact_dtype=False)
# matrix norm
x = torch.randn(25, 25, device=device, dtype=dtype)
xn = x.cpu().numpy()
for ord in matrix_ords:
res = torch.linalg.norm(x, ord, keepdim=keepdim).cpu()
expected = np.linalg.norm(xn, ord, keepdims=keepdim)
msg = gen_error_message(x.size(), ord, keepdim)
self.assertEqual(res.shape, expected.shape, msg=msg)
self.assertEqual(res, expected, msg=msg, exact_dtype=False)
res_out = torch.tensor([]).to(device)
torch.linalg.norm(x, ord, keepdim=keepdim, out=res_out)
self.assertEqual(res_out.shape, expected.shape, msg=msg)
self.assertEqual(res_out.cpu(), expected, msg=msg, exact_dtype=False)
# Test that linal.vector_norm gives the same result as numpy when inputs
# contain extreme values (inf, -inf, nan)
def test_vector_norm_extreme_values(self, device):
vector_ords = [0, 1, 2, 3, inf, -1, -2, -3, -inf]
vectors = []
for pair in itertools.product([inf, -inf, 0.0, nan, 1.0], repeat=2):
vectors.append(list(pair))
for vector in vectors:
x = torch.tensor(vector, device=device)
x_n = x.cpu().numpy()
for ord in vector_ords:
msg = f'ord={ord}, vector={vector}'
result = torch.linalg.vector_norm(x, ord=ord)
result_n = np.linalg.norm(x_n, ord=ord)
self.assertEqual(result, result_n, msg=msg)
@skipMeta # https://github.com/pytorch/pytorch/issues/54082
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float, torch.double)
@precisionOverride({torch.float32: 2e-5})
def test_matrix_norm(self, device, dtype):
# Test only inputs for which torch.linalg.matrix_norm diverges from torch.linalg.norm
A = make_tensor((2, 2, 2), device, dtype)
with self.assertRaisesRegex(RuntimeError, r'linalg.matrix_norm\(\):.*must be a matrix.*'):
torch.linalg.matrix_norm(make_tensor((2,), device, dtype))
with self.assertRaisesRegex(RuntimeError, r'linalg.matrix_norm\(\):.*must be a 2-tuple.*'):
torch.linalg.matrix_norm(A, dim=(0,))
with self.assertRaisesRegex(RuntimeError, r'.*not supported.*'):
torch.linalg.matrix_norm(A, ord=0)
with self.assertRaisesRegex(RuntimeError, r'.*not supported.*'):
torch.linalg.matrix_norm(A, ord=3.0)
# Test dim=None behavior
ref = torch.linalg.norm(A, dim=(-2, -1))
res = torch.linalg.matrix_norm(A)
self.assertEqual(ref, res)
# Test that linal.norm gives the same result as numpy when inputs
# contain extreme values (inf, -inf, nan)
@unittest.skipIf(IS_WINDOWS, "Skipped on Windows!")
@unittest.skipIf(IS_MACOS, "Skipped on MacOS!")
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
def test_norm_extreme_values(self, device):
vector_ords = [0, 1, 2, 3, inf, -1, -2, -3, -inf]
matrix_ords = ['fro', 'nuc', 1, 2, inf, -1, -2, -inf]
vectors = []
matrices = []
for pair in itertools.product([inf, -inf, 0.0, nan, 1.0], repeat=2):
vectors.append(list(pair))
matrices.append([[pair[0], pair[1]]])
matrices.append([[pair[0]], [pair[1]]])
for vector in vectors:
x = torch.tensor(vector).to(device)
x_n = x.cpu().numpy()
for ord in vector_ords:
msg = f'ord={ord}, vector={vector}'
result = torch.linalg.norm(x, ord=ord)
result_n = np.linalg.norm(x_n, ord=ord)
self.assertEqual(result, result_n, msg=msg)
# TODO: Remove this function once the broken cases are fixed
def is_broken_matrix_norm_case(ord, x):
if self.device_type == 'cuda':
if x.size() == torch.Size([1, 2]):
if ord in ['nuc', 2, -2] and isnan(x[0][0]) and x[0][1] == 1:
# These cases are broken because of an issue with svd
# https://github.com/pytorch/pytorch/issues/43567
return True
if ord in ['nuc', 2, -2]:
# These cases are broken because of another issue with svd
# https://github.com/pytorch/pytorch/issues/52633
return True
return False
for matrix in matrices:
x = torch.tensor(matrix).to(device)
x_n = x.cpu().numpy()
for ord in matrix_ords:
msg = f'ord={ord}, matrix={matrix}'
if is_broken_matrix_norm_case(ord, x):
continue
else:
result = torch.linalg.norm(x, ord=ord)
result_n = np.linalg.norm(x_n, ord=ord)
self.assertEqual(result, result_n, msg=msg)
# Test degenerate shape results match numpy for linalg.norm vector norms
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@unittest.skipIf(TEST_WITH_ASAN, "Skipped on ASAN since it checks for undefined behavior.")
@dtypes(torch.float, torch.double, torch.cfloat, torch.cdouble)
def test_norm_vector_degenerate_shapes(self, device, dtype):
def run_test_case(input, ord, dim, keepdim):
msg = f'input.size()={input.size()}, ord={ord}, dim={dim}, keepdim={keepdim}, dtype={dtype}'
should_error = False
if ord is not None and ord < 0:
should_error = True
elif ord == inf:
if dim is None or input.size(dim) == 0:
should_error = True
if should_error:
with self.assertRaises(RuntimeError):
torch.linalg.norm(input, ord, dim, keepdim)
else:
input_numpy = input.cpu().numpy()
result_numpy = np.linalg.norm(input_numpy, ord, dim, keepdim)
result = torch.linalg.norm(input, ord, dim, keepdim)
self.assertEqual(result, result_numpy, msg=msg)
ord_vector = [0, 0.5, 1, 2, 3, inf, -0.5, -1, -2, -3, -inf, None]
S = 10
test_cases = [
# input size, dim
((0, ), None),
((0, S), 0),
((0, S), 1),
((S, 0), 0),
((S, 0), 1),
]
for keepdim in [True, False]:
for input_size, dim in test_cases:
input = torch.randn(*input_size, dtype=dtype, device=device)
for ord in ord_vector:
run_test_case(input, ord, dim, keepdim)
# Test degenerate shape results match numpy for linalg.norm matrix norms
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float, torch.double, torch.cfloat, torch.cdouble)
def test_norm_matrix_degenerate_shapes(self, device, dtype):
def run_test_case(input, ord, dim, keepdim, should_error):
msg = f'input.size()={input.size()}, ord={ord}, dim={dim}, keepdim={keepdim}, dtype={dtype}'
input_numpy = input.cpu().numpy()
ops = [torch.linalg.norm]
if ord is not None and dim is not None:
ops.append(torch.linalg.matrix_norm)
if should_error:
with self.assertRaises(ValueError):
np.linalg.norm(input_numpy, ord, dim, keepdim)
for op in ops:
with self.assertRaises(IndexError):
op(input, ord, dim, keepdim)
else:
result_numpy = np.linalg.norm(input_numpy, ord, dim, keepdim)
for op in ops:
result = op(input, ord, dim, keepdim)
self.assertEqual(result, result_numpy, msg=msg)
ord_matrix = ['fro', 'nuc', 1, 2, inf, -1, -2, -inf, None]
S = 10
test_cases = [
# input size, p settings that cause error, dim
((0, 0), [1, 2, inf, -1, -2, -inf], None),
((0, S), [2, inf, -2, -inf], None),
((S, 0), [1, 2, -1, -2], None),
((S, S, 0), [], (0, 1)),
((1, S, 0), [], (0, 1)),
((0, 0, S), [1, 2, inf, -1, -2, -inf], (0, 1)),
((0, 0, S), [1, 2, inf, -1, -2, -inf], (1, 0)),
]
for keepdim in [True, False]:
for input_size, error_ords, dim in test_cases:
input = torch.randn(*input_size, dtype=dtype, device=device)
for ord in ord_matrix:
run_test_case(input, ord, dim, keepdim, ord in error_ords)
def test_norm_fastpaths(self, device):
x = torch.randn(3, 5, device=device)
# slow path
result = torch.linalg.norm(x, 4.5, 1)
expected = torch.pow(x.abs().pow(4.5).sum(1), 1.0 / 4.5)
self.assertEqual(result, expected)
# fast 0-norm
result = torch.linalg.norm(x, 0, 1)
expected = (x != 0).type_as(x).sum(1)
self.assertEqual(result, expected)
# fast 1-norm
result = torch.linalg.norm(x, 1, 1)
expected = x.abs().sum(1)
self.assertEqual(result, expected)
# fast 2-norm
result = torch.linalg.norm(x, 2, 1)
expected = torch.sqrt(x.pow(2).sum(1))
self.assertEqual(result, expected)
# fast 3-norm
result = torch.linalg.norm(x, 3, 1)
expected = torch.pow(x.pow(3).abs().sum(1), 1.0 / 3.0)
self.assertEqual(result, expected)
@skipCPUIfNoLapack
@skipCUDAIfNoMagma
@dtypes(*floating_and_complex_types())
def test_old_eig_basic(self, device, dtype):
a = torch.tensor([[1.96, 0.00, 0.00, 0.00, 0.00],
[-6.49, 3.80, 0.00, 0.00, 0.00],
[-0.47, -6.39, 4.17, 0.00, 0.00],
[-7.20, 1.50, -1.51, 5.70, 0.00],
[-0.65, -6.34, 2.67, 1.80, -7.10]],
dtype=dtype, device=device).t()
e = torch.eig(a)[0]
ee, vv = torch.eig(a, True)
te = torch.tensor((), dtype=dtype, device=device)
tv = torch.tensor((), dtype=dtype, device=device)
eee, vvv = torch.eig(a, True, out=(te, tv))
self.assertEqual(e, ee, atol=1e-12, rtol=0)
self.assertEqual(ee, eee, atol=1e-12, rtol=0)
self.assertEqual(ee, te, atol=1e-12, rtol=0)
self.assertEqual(vv, vvv, atol=1e-12, rtol=0)
self.assertEqual(vv, tv, atol=1e-12, rtol=0)
#
# compare with numpy
np_e, np_v = np.linalg.eig(a.cpu().numpy())
if dtype.is_complex:
self.assertEqual(ee, np_e)
else:
# np_e.shape == (n, 2), where each column contain the real and
# imaginary parts of the result
self.assertEqual(ee[:, 0], np_e) # real part
self.assertEqual(ee[:, 1], torch.zeros(ee.shape[0], dtype=dtype)) # imaginary part
self.assertEqual(vv, np_v)
@skipCPUIfNoLapack
@skipCUDAIfNoMagma
@dtypes(torch.double, torch.float)
def test_old_eig_reuse(self, device, dtype):
X = torch.randn(4, 4, dtype=dtype, device=device)
X = torch.mm(X.t(), X)
e = torch.zeros(4, 2, dtype=dtype, device=device)
v = torch.zeros(4, 4, dtype=dtype, device=device)
torch.eig(X, True, out=(e, v))
Xhat = np.matmul(np.matmul(v.cpu(), torch.diag(e.select(1, 0)).cpu()), v.t().cpu())
if dtype is torch.float:
atol = 1e-7
rtol = 1e-5
else:
atol = 1e-8
rtol = 0
self.assertEqual(X, Xhat, atol=atol, rtol=rtol, msg='VeV\' wrong')
self.assertTrue(v.is_contiguous(), 'V is not contiguous')
torch.eig(X, True, out=(e, v))
Xhat = np.matmul(v.cpu(), np.matmul(e.select(1, 0).diag().cpu(), v.t().cpu()))
self.assertEqual(X, Xhat, atol=atol, rtol=rtol, msg='VeV\' wrong')
self.assertTrue(v.is_contiguous(), 'V is not contiguous')
@skipCPUIfNoLapack
@skipCUDAIfNoMagma
@dtypes(torch.double, torch.float)
def test_old_eig_non_contiguous(self, device, dtype):
X = torch.randn(4, 4, dtype=dtype, device=device)
X = torch.mm(X.t(), X)
e = torch.zeros(4, 2, 2, dtype=dtype, device=device)[:, 1]
v = torch.zeros(4, 2, 4, dtype=dtype, device=device)[:, 1]
self.assertFalse(v.is_contiguous(), 'V is contiguous')
self.assertFalse(e.is_contiguous(), 'E is contiguous')
torch.eig(X, True, out=(e, v))
Xhat = np.matmul(np.matmul(v.cpu(), torch.diag(e.cpu().select(1, 0))), v.t().cpu())
if dtype is torch.float:
atol = 1e-7
rtol = 1e-5
else:
atol = 1e-8
rtol = 0
self.assertEqual(X, Xhat, atol=atol, rtol=rtol, msg='VeV\' wrong')
@skipCPUIfNoLapack
@skipCUDAIfNoMagma
@dtypes(torch.double, torch.float)
def test_old_eig_invalid_input(self, device, dtype):
self.assertRaisesRegex(
RuntimeError,
'input should be 2 dimensional',
lambda: torch.eig(torch.ones((2))))
self.assertRaisesRegex(
RuntimeError,
'input should be square',
lambda: torch.eig(torch.ones((2, 3))))
self.assertRaisesRegex(
RuntimeError,
'input should not contain infs or NaNs',
lambda: torch.eig(np.inf * torch.ones((2, 2))))
self.assertRaisesRegex(
RuntimeError,
'input should not contain infs or NaNs',
lambda: torch.eig(np.nan * torch.ones((2, 2))))
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.double, torch.float)
def test_old_eig_out(self, device, dtype):
# use the "test_out=True" parameter to tensor_op_tests because the
# signature is irregular (since we have *two* output vectors)
t = torch.randn(10, 10, dtype=dtype, device=device)
evals, evecs = torch.eig(t, eigenvectors=True)
#
# check that the out= version computes the same values as the normal one
out_evals = torch.empty_like(evals)
out_evecs = torch.empty_like(evecs)
evals2, evecs2 = torch.eig(t, eigenvectors=True, out=(out_evals, out_evecs))
# check that the out tensors were used in-place
self.assertEqual(evals2.data_ptr(), out_evals.data_ptr())
self.assertEqual(evecs2.data_ptr(), out_evecs.data_ptr())
# check that the result is the same as the non-out version
self.assertEqual(evals, out_evals)
self.assertEqual(evecs, out_evecs)
#
# check what happens in the eigenvectors=False case
out_evals = torch.empty_like(evals)
out_evecs = torch.tensor([1, 2, 3], dtype=dtype, device=device)
evals2, evecs2 = torch.eig(t, eigenvectors=False, out=(out_evals, out_evecs))
# check that the out_evals was used in-place
self.assertEqual(evals2.data_ptr(), out_evals.data_ptr())
self.assertEqual(evals, out_evals)
# check that out_evecs was NOT touched at all
assert out_evecs.tolist() == [1, 2, 3]
#
# check that we complain if we pass an out vector of the wrong dtype
wrong_out = torch.empty((0, 0), dtype=int)
with self.assertRaisesRegex(RuntimeError, r"Expected .* but got .*"):
torch.eig(t, eigenvectors=True, out=(wrong_out, out_evecs))
with self.assertRaisesRegex(RuntimeError, r"Expected .* but got .*"):
torch.eig(t, eigenvectors=True, out=(out_evals, wrong_out))
@skipCPUIfNoLapack
@skipCUDAIfNoMagma
# NumPy computes only in float64 and complex128 precisions
# for float32 or complex64 results might be very different from float64 or complex128
@dtypes(torch.float64, torch.complex128)
def test_eig_numpy(self, device, dtype):
def run_test(shape, *, symmetric=False):
from torch.testing._internal.common_utils import random_symmetric_matrix
if not dtype.is_complex and symmetric:
# for symmetric real-valued inputs eigenvalues and eigenvectors have imaginary part equal to zero
# unlike NumPy the result is not cast to float32 or float64 dtype in this case
a = random_symmetric_matrix(shape[-1], *shape[:-2], dtype=dtype, device=device)
else:
a = make_tensor(shape, dtype=dtype, device=device)
actual = torch.linalg.eig(a)
# compare with NumPy
# the eigenvalues are not necessarily ordered
# so order of NumPy and PyTorch can be different
expected = np.linalg.eig(a.cpu().numpy())
# sort NumPy output
ind = np.argsort(expected[0], axis=-1)[::-1]
expected = (np.take_along_axis(expected[0], ind, axis=-1), np.take_along_axis(expected[1], ind[:, None], axis=-1))
# sort PyTorch output
# torch.argsort doesn't work with complex inputs, NumPy sorting on CPU is used instead
ind = np.argsort(actual[0].cpu().numpy(), axis=-1)[::-1]
actual_np = [x.cpu().numpy() for x in actual]
sorted_actual = (
np.take_along_axis(actual_np[0], ind, axis=-1),
np.take_along_axis(actual_np[1], ind[:, None], axis=-1))
self.assertEqual(expected[0], sorted_actual[0], exact_dtype=False)
self.assertEqual(abs(expected[1]), abs(sorted_actual[1]), exact_dtype=False)
shapes = [(0, 0),
(5, 5),
(0, 0, 0), (0, 5, 5),
(2, 5, 5),
(2, 1, 5, 5)]
for shape in shapes:
run_test(shape)
run_test(shape, symmetric=True)
@onlyCUDA
@skipCUDAIfNoMagma
@dtypes(*floating_and_complex_types())
def test_eig_compare_backends(self, device, dtype):
def run_test(shape, *, symmetric=False):
from torch.testing._internal.common_utils import random_symmetric_matrix
if not dtype.is_complex and symmetric:
a = random_symmetric_matrix(shape[-1], *shape[:-2], dtype=dtype, device=device)
else:
a = make_tensor(shape, dtype=dtype, device=device)
actual = torch.linalg.eig(a)
complementary_device = 'cpu'
expected = torch.linalg.eig(a.to(complementary_device))
self.assertEqual(expected[0], actual[0])
self.assertEqual(expected[1], actual[1])
shapes = [(0, 0),
(5, 5),
(0, 0, 0), (0, 5, 5),
(2, 5, 5),
(2, 1, 5, 5)]
for shape in shapes:
run_test(shape)
run_test(shape, symmetric=True)
@slowTest
@onlyCUDA
@skipCUDAIfNoMagma
@dtypes(torch.float32)
def test_eig_check_magma(self, device, dtype):
shape = (2049, 2049)
a = make_tensor(shape, dtype=dtype, device=device)
w, v = torch.linalg.eig(a)
self.assertEqual(a.to(v.dtype) @ v, w * v, atol=1e-3, rtol=1e-3)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_eig_errors_and_warnings(self, device, dtype):
a = make_tensor(2, dtype=dtype, device=device)
with self.assertRaisesRegex(RuntimeError, "must have at least 2 dimensions"):
torch.linalg.eig(a)
a = make_tensor((2, 3), dtype=dtype, device=device)
with self.assertRaisesRegex(RuntimeError, "must be batches of square matrices"):
torch.linalg.eig(a)
if not dtype.is_complex:
a = torch.tensor([[3., -2.], [4., -1.]], dtype=dtype, device=device)
out0 = torch.empty(0, device=device, dtype=dtype)
out1 = torch.empty(0, device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, "Expected eigenvalues to be safely castable"):
torch.linalg.eig(a, out=(out0, out1))
out0 = torch.empty(0, device=device, dtype=torch.complex128)
with self.assertRaisesRegex(RuntimeError, "Expected eigenvectors to be safely castable"):
torch.linalg.eig(a, out=(out0, out1))
a = make_tensor((3, 3), dtype=dtype, device=device)
out0 = torch.empty(0, dtype=torch.int, device=device)
out1 = torch.empty(0, dtype=torch.int, device=device)
with self.assertRaisesRegex(RuntimeError, "but got eigenvalues with dtype Int"):
torch.linalg.eig(a, out=(out0, out1))
out0 = torch.empty(0, dtype=torch.complex128, device=device)
with self.assertRaisesRegex(RuntimeError, "but got eigenvectors with dtype Int"):
torch.linalg.eig(a, out=(out0, out1))
a = make_tensor((3, 3), dtype=dtype, device=device)
out0 = torch.empty(1, device=device, dtype=torch.complex128)
out1 = torch.empty(1, device=device, dtype=torch.complex128)
with warnings.catch_warnings(record=True) as w:
torch.linalg.eig(a, out=(out0, out1))
self.assertEqual(len(w), 2)
self.assertTrue("An output with one or more elements was resized" in str(w[-1].message))
self.assertTrue("An output with one or more elements was resized" in str(w[-2].message))
if torch.cuda.is_available():
wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'
out_w = torch.empty(0, device=wrong_device, dtype=torch.complex128)
out_v = torch.empty(0, device=device, dtype=torch.complex128)
with self.assertRaisesRegex(RuntimeError, "tensors to be on the same device"):
torch.linalg.eig(a, out=(out_w, out_v))
out_w = torch.empty(0, device=device, dtype=torch.complex128)
out_v = torch.empty(0, device=wrong_device, dtype=torch.complex128)
with self.assertRaisesRegex(RuntimeError, "tensors to be on the same device"):
torch.linalg.eig(a, out=(out_w, out_v))
@skipCPUIfNoLapack
@skipCUDAIfNoMagma
@dtypes(*floating_and_complex_types())
def test_eig_with_nan(self, device, dtype):
for val in [np.inf, np.nan]:
for batch_dim in [(), (10,)]:
a = make_tensor((*batch_dim, 5, 5), device=device, dtype=dtype)
a[..., -1, -1] = val
with self.assertRaisesRegex(RuntimeError, "torch.linalg.eig: input tensor should not"):
torch.linalg.eig(a)
@skipCPUIfNoLapack
@skipCUDAIfNoMagma
@dtypes(torch.float64, torch.complex128)
def test_eigvals_numpy(self, device, dtype):
def run_test(shape, *, symmetric=False):
from torch.testing._internal.common_utils import random_symmetric_matrix
if not dtype.is_complex and symmetric:
a = random_symmetric_matrix(shape[-1], *shape[:-2], dtype=dtype, device=device)
else:
a = make_tensor(shape, dtype=dtype, device=device)
actual = torch.linalg.eigvals(a)
expected = np.linalg.eigvals(a.cpu().numpy())
ind = np.argsort(expected, axis=-1)[::-1]
expected = np.take_along_axis(expected, ind, axis=-1)
# RuntimeError: _th_sort not supported on CUDAType for ComplexDouble
# RuntimeError: "sorting_kernel_method_name" not implemented for 'ComplexDouble'
ind = np.argsort(actual.cpu().numpy(), axis=-1)[::-1]
actual_np = actual.cpu().numpy()
sorted_actual = np.take_along_axis(actual_np, ind, axis=-1)
self.assertEqual(expected, sorted_actual, exact_dtype=False)
shapes = [(0, 0), # Empty matrix
(5, 5), # Single matrix
(0, 0, 0), (0, 5, 5), # Zero batch dimension tensors
(2, 5, 5), # 3-dim tensors
(2, 1, 5, 5)] # 4-dim tensors
for shape in shapes:
run_test(shape)
run_test(shape, symmetric=True)
@onlyCUDA
@skipCUDAIfNoMagma
@dtypes(*floating_and_complex_types())
def test_eigvals_compare_backends(self, device, dtype):
def run_test(shape, *, symmetric=False):
from torch.testing._internal.common_utils import random_symmetric_matrix
if not dtype.is_complex and symmetric:
# for symmetric real-valued inputs eigenvalues and eigenvectors have imaginary part equal to zero
a = random_symmetric_matrix(shape[-1], *shape[:-2], dtype=dtype, device=device)
else:
a = make_tensor(shape, dtype=dtype, device=device)
actual = torch.linalg.eigvals(a)
complementary_device = 'cpu'
# compare with CPU
expected = torch.linalg.eigvals(a.to(complementary_device))
self.assertEqual(expected, actual)
# check out= variant
complex_dtype = dtype
if not dtype.is_complex:
complex_dtype = torch.complex128 if dtype == torch.float64 else torch.complex64
out = torch.empty(0, dtype=complex_dtype, device=device)
ans = torch.linalg.eigvals(a, out=out)
self.assertEqual(ans, out)
self.assertEqual(expected.to(complex_dtype), out)
# check non-contiguous out
if a.numel() > 0:
out = torch.empty(2 * shape[0], *shape[1:-1], dtype=complex_dtype, device=device)[::2]
self.assertFalse(out.is_contiguous())
ans = torch.linalg.eigvals(a, out=out)
self.assertEqual(ans, out)
self.assertEqual(expected.to(complex_dtype), out)
shapes = [(0, 0), # Empty matrix
(5, 5), # Single matrix
(0, 0, 0), (0, 5, 5), # Zero batch dimension tensors
(2, 5, 5), # 3-dim tensors
(2, 1, 5, 5)] # 4-dim tensors
for shape in shapes:
run_test(shape)
run_test(shape, symmetric=True)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_eigvals_errors_and_warnings(self, device, dtype):
# eig requires the input to be at least 2 dimensional tensor
a = make_tensor(2, dtype=dtype, device=device)
with self.assertRaisesRegex(RuntimeError, "must have at least 2 dimensions"):
torch.linalg.eigvals(a)
# eig requires a square matrix
a = make_tensor((2, 3), dtype=dtype, device=device)
with self.assertRaisesRegex(RuntimeError, "must be batches of square matrices"):
torch.linalg.eigvals(a)
# if out tensor with floating dtype is passed for complex output an error is thrown
if not dtype.is_complex:
# The characteristic equation is p(λ) = λ^2 − 2λ + 5 = 0, with roots λ = 1±2i
a = torch.tensor([[3., -2.], [4., -1.]], dtype=dtype, device=device)
out = torch.empty(0, device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, "Expected eigenvalues to be safely castable"):
torch.linalg.eigvals(a, out=out)
# dtypes should be safely castable
a = make_tensor((3, 3), dtype=dtype, device=device)
out = torch.empty(0, dtype=torch.int, device=device)
with self.assertRaisesRegex(RuntimeError, "but got eigenvalues with dtype Int"):
torch.linalg.eigvals(a, out=out)
# if non-empty out tensor with wrong shape is passed a warning is given
out = torch.empty(1, device=device, dtype=torch.complex128)
with warnings.catch_warnings(record=True) as w:
# Trigger warning
torch.linalg.eigvals(a, out=out)
# Check warning occurs
self.assertEqual(len(w), 1)
self.assertTrue("An output with one or more elements was resized" in str(w[-1].message))
# device should match
if torch.cuda.is_available():
wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'
out_w = torch.empty(0, device=wrong_device, dtype=torch.complex128)
with self.assertRaisesRegex(RuntimeError, "tensors to be on the same device"):
torch.linalg.eigvals(a, out=out_w)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
def test_norm_old(self, device):
def gen_error_message(input_size, p, keepdim, dim=None):
return "norm failed for input size %s, p=%s, keepdim=%s, dim=%s" % (
input_size, p, keepdim, dim)
for keepdim in [False, True]:
# full reduction
x = torch.randn(25, device=device)
xn = x.cpu().numpy()
for p in [0, 1, 2, 3, 4, inf, -inf, -1, -2, -3, 1.5]:
res = x.norm(p, keepdim=keepdim).cpu()
expected = np.linalg.norm(xn, p, keepdims=keepdim)
self.assertEqual(res, expected, atol=1e-5, rtol=0, msg=gen_error_message(x.size(), p, keepdim))
# one dimension
x = torch.randn(25, 25, device=device)
xn = x.cpu().numpy()
for p in [0, 1, 2, 3, 4, inf, -inf, -1, -2, -3]:
dim = 1
res = x.norm(p, dim, keepdim=keepdim).cpu()
expected = np.linalg.norm(xn, p, dim, keepdims=keepdim)
msg = gen_error_message(x.size(), p, keepdim, dim)
self.assertEqual(res.shape, expected.shape, msg=msg)
self.assertEqual(res, expected, msg=msg)
# matrix norm
for p in ['fro', 'nuc']:
res = x.norm(p, keepdim=keepdim).cpu()
expected = np.linalg.norm(xn, p, keepdims=keepdim)
msg = gen_error_message(x.size(), p, keepdim)
self.assertEqual(res.shape, expected.shape, msg=msg)
self.assertEqual(res, expected, msg=msg)
# zero dimensions
x = torch.randn((), device=device)
xn = x.cpu().numpy()
res = x.norm(keepdim=keepdim).cpu()
expected = np.linalg.norm(xn, keepdims=keepdim)
msg = gen_error_message(x.size(), None, keepdim)
self.assertEqual(res.shape, expected.shape, msg=msg)
self.assertEqual(res, expected, msg=msg)
# larger tensor sanity check
self.assertEqual(
2 * torch.norm(torch.ones(10000), keepdim=keepdim),
torch.norm(torch.ones(40000), keepdim=keepdim))
# matrix norm with non-square >2-D tensors, all combinations of reduction dims
x = torch.randn(5, 6, 7, 8, device=device)
xn = x.cpu().numpy()
for p in ['fro', 'nuc']:
for dim in itertools.product(*[list(range(4))] * 2):
if dim[0] == dim[1]:
continue
res = x.norm(p=p, dim=dim, keepdim=keepdim).cpu()
expected = np.linalg.norm(xn, ord=p, axis=dim, keepdims=keepdim)
msg = gen_error_message(x.size(), p, keepdim, dim)
self.assertEqual(res.shape, expected.shape, msg=msg)
self.assertEqual(res, expected, msg=msg)
# Test that torch.norm with p=+/-inf propagates NaN
def test_norm_old_nan_propagation(self, device):
ords = [inf, -inf]
for pair in itertools.product([0.0, nan, 1.0], repeat=2):
x = torch.tensor(list(pair), device=device)
for ord in ords:
result = torch.norm(x, p=ord)
result_check = torch.linalg.norm(x, ord=ord)
self.assertEqual(result, result_check)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
def test_norm_complex_old(self, device):
def gen_error_message(input_size, p, keepdim, dim=None):
return "complex norm failed for input size %s, p=%s, keepdim=%s, dim=%s" % (
input_size, p, keepdim, dim)
for keepdim in [False, True]:
# vector norm
x = torch.randn(25, device=device) + 1j * torch.randn(25, device=device)
xn = x.cpu().numpy()
for p in [0, 1, 2, 3, inf, -1, -2, -3, -inf]:
res = x.norm(p, keepdim=keepdim).cpu()
expected = np.linalg.norm(xn, p, keepdims=keepdim)
msg = gen_error_message(x.size(), p, keepdim)
self.assertEqual(res.shape, expected.shape, msg=msg)
self.assertEqual(res, expected, msg=msg)
# matrix norm
x = torch.randn(25, 25, device=device) + 1j * torch.randn(25, 25, device=device)
xn = x.cpu().numpy()
for p in ['nuc', 'fro']:
res = x.norm(p, keepdim=keepdim).cpu()
expected = np.linalg.norm(xn, p, keepdims=keepdim)
msg = gen_error_message(x.size(), p, keepdim)
self.assertEqual(res.shape, expected.shape, msg=msg)
self.assertEqual(res, expected, msg=msg, rtol=1.3e-6, atol=3e-4)
# Ensure torch.norm with p='fro' and p=2 give the same results for mutually supported input combinations
@dtypes(torch.float)
def test_norm_fro_2_equivalence_old(self, device, dtype):
input_sizes = [
(0,),
(10,),
(0, 0),
(4, 30),
(0, 45),
(100, 0),
(45, 10, 23),
(0, 23, 59),
(23, 0, 37),
(34, 58, 0),
(0, 0, 348),
(0, 3434, 0),
(0, 0, 0),
(5, 3, 8, 1, 3, 5)]
for input_size in input_sizes:
a = make_tensor(input_size, device, dtype, low=-9, high=9)
# Try full reduction
dim_settings = [None]
# Try all possible 1-D reductions
dim_settings += list(range(-a.dim(), a.dim()))
def wrap_dim(dim, ndims):
assert (dim < ndims) and (dim >= -ndims)
if dim >= 0:
return dim
else:
return dim + ndims
# Try all possible 2-D reductions
dim_settings += [
(d0, d1) for d0, d1 in itertools.combinations(range(-a.dim(), a.dim()), 2)
if wrap_dim(d0, a.dim()) != wrap_dim(d1, a.dim())]
for dim in dim_settings:
for keepdim in [True, False]:
a_norm_2 = torch.norm(a, p=2, dim=dim, keepdim=keepdim)
a_norm_fro = torch.norm(a, p='fro', dim=dim, keepdim=keepdim)
self.assertEqual(a_norm_fro, a_norm_2)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
def test_nuclear_norm_axes_small_brute_force_old(self, device):
def check_single_nuclear_norm(x, axes):
if self.device_type != 'cpu' and randrange(100) < 95:
return # too many cpu <==> device copies
a = np.array(x.cpu(), copy=False)
expected = np.linalg.norm(a, "nuc", axis=axes)
ans = torch.norm(x, "nuc", dim=axes)
self.assertTrue(ans.is_contiguous())
self.assertEqual(ans.shape, expected.shape)
self.assertEqual(ans.cpu(), expected, rtol=1e-02, atol=1e-03, equal_nan=True)
out = torch.zeros(expected.shape, dtype=x.dtype, device=x.device)
ans = torch.norm(x, "nuc", dim=axes, out=out)
self.assertIs(ans, out)
self.assertTrue(ans.is_contiguous())
self.assertEqual(ans.shape, expected.shape)
self.assertEqual(ans.cpu(), expected, rtol=1e-02, atol=1e-03, equal_nan=True)
for n in range(1, 3):
for m in range(1, 3):
for axes in itertools.permutations([0, 1], 2):
# 2d, inner dimensions C
x = torch.randn(n, m, device=device)
check_single_nuclear_norm(x, axes)
# 2d, inner dimensions Fortran
x = torch.randn(m, n, device=device).mT
check_single_nuclear_norm(x, axes)
# 2d, inner dimensions non-contiguous
x = torch.randn(n, 2 * m, device=device)[:, ::2]
check_single_nuclear_norm(x, axes)
# 2d, all dimensions non-contiguous
x = torch.randn(7 * n, 2 * m, device=device)[::7, ::2]
check_single_nuclear_norm(x, axes)
for o in range(1, 3):
for axes in itertools.permutations([0, 1, 2], 2):
# 3d, inner dimensions C
x = torch.randn(o, n, m, device=device)
check_single_nuclear_norm(x, axes)
# 3d, inner dimensions Fortran
x = torch.randn(o, m, n, device=device).mT
check_single_nuclear_norm(x, axes)
# 3d, inner dimensions non-contiguous
x = torch.randn(o, n, 2 * m, device=device)[:, :, ::2]
check_single_nuclear_norm(x, axes)
# 3d, all dimensions non-contiguous
x = torch.randn(7 * o, 5 * n, 2 * m, device=device)[::7, ::5, ::2]
check_single_nuclear_norm(x, axes)
for r in range(1, 3):
for axes in itertools.permutations([0, 1, 2, 3], 2):
# 4d, inner dimensions C
x = torch.randn(r, o, n, m, device=device)
check_single_nuclear_norm(x, axes)
# 4d, inner dimensions Fortran
x = torch.randn(r, o, n, m, device=device).mT
check_single_nuclear_norm(x, axes)
# 4d, inner dimensions non-contiguous
x = torch.randn(r, o, n, 2 * m, device=device)[:, :, :, ::2]
check_single_nuclear_norm(x, axes)
# 4d, all dimensions non-contiguous
x = torch.randn(7 * r, 5 * o, 11 * n, 2 * m, device=device)[::7, ::5, ::11, ::2]
check_single_nuclear_norm(x, axes)
@skipCUDAIfNoMagma
def test_nuclear_norm_exceptions_old(self, device):
for lst in [], [1], [1, 2]:
x = torch.tensor(lst, dtype=torch.double, device=device)
for axes in (), (0,):
self.assertRaises(RuntimeError, torch.norm, x, "nuc", axes)
self.assertRaises(IndexError, torch.norm, x, "nuc", (0, 1))
x = torch.tensor([[0, 1, 2], [3, 4, 5]], dtype=torch.double, device=device)
self.assertRaisesRegex(RuntimeError, "duplicate or invalid", torch.norm, x, "nuc", (0, 0))
self.assertRaisesRegex(IndexError, "Dimension out of range", torch.norm, x, "nuc", (0, 2))
# ~~~ tests for torch.svd ~~~
@skipCUDAIfNoMagmaAndNoCusolver
@skipCPUIfNoLapack
@dtypes(torch.double)
def test_svd(self, device, dtype):
def run_test(dims, some, compute_uv):
x = torch.randn(*dims, dtype=dtype, device=device)
outu = torch.empty(0, dtype=dtype, device=device)
outs = torch.empty(0, dtype=dtype, device=device)
outv = torch.empty(0, dtype=dtype, device=device)
torch.svd(x, some=some, compute_uv=compute_uv, out=(outu, outs, outv))
if compute_uv:
if some:
x_recon = torch.matmul(outu, torch.matmul(outs.diag_embed(), outv.mT))
self.assertEqual(x, x_recon, atol=1e-8, rtol=0, msg='Incorrect reconstruction using U @ diag(S) @ V.T')
else:
narrow_u = outu[..., :min(*dims[-2:])]
narrow_v = outv[..., :min(*dims[-2:])]
x_recon = torch.matmul(narrow_u, torch.matmul(outs.diag_embed(), narrow_v.mT))
self.assertEqual(x, x_recon, atol=1e-8, rtol=0, msg='Incorrect reconstruction using U @ diag(S) @ V.T')
else:
_, singvals, _ = torch.svd(x, compute_uv=True)
self.assertEqual(singvals, outs, msg='Singular values mismatch')
self.assertEqual(outu, torch.zeros_like(outu), msg='U not zero')
self.assertEqual(outv, torch.zeros_like(outv), msg='V not zero')
resu, ress, resv = torch.svd(x, some=some, compute_uv=compute_uv)
self.assertEqual(resu, outu, msg='outputs of svd and svd with out differ')
self.assertEqual(ress, outs, msg='outputs of svd and svd with out differ')
self.assertEqual(resv, outv, msg='outputs of svd and svd with out differ')
# test non-contiguous
x = torch.randn(*dims, dtype=dtype, device=device)
if x.numel() > 0:
n_dim = len(dims)
# Reverse the batch dimensions and the matrix dimensions and then concat them
x = x.permute(tuple(range(n_dim - 3, -1, -1)) + (n_dim - 1, n_dim - 2))
assert not x.is_contiguous(), "x is intentionally non-contiguous"
resu, ress, resv = torch.svd(x, some=some, compute_uv=compute_uv)
if compute_uv:
if some:
x_recon = torch.matmul(resu, torch.matmul(ress.diag_embed(), resv.mT))
self.assertEqual(x, x_recon, atol=1e-8, rtol=0, msg='Incorrect reconstruction using U @ diag(S) @ V.T')
else:
narrow_u = resu[..., :min(*dims[-2:])]
narrow_v = resv[..., :min(*dims[-2:])]
x_recon = torch.matmul(narrow_u, torch.matmul(ress.diag_embed(), narrow_v.mT))
self.assertEqual(x, x_recon, atol=1e-8, rtol=0, msg='Incorrect reconstruction using U @ diag(S) @ V.T')
else:
_, singvals, _ = torch.svd(x, compute_uv=True)
self.assertEqual(singvals, ress, msg='Singular values mismatch')
self.assertEqual(resu, torch.zeros_like(resu), msg='U not zero')
self.assertEqual(resv, torch.zeros_like(resv), msg='V not zero')
shapes = [(0, 0), (5, 0), (0, 5), # empty matrices
(0, 0, 0), (0, 5, 5), (0, 5, 3), # zero batch dimension
(3, 3), (5, 3, 3), (7, 5, 3, 3), # square matrices
(7, 3), (5, 7, 3), (7, 5, 7, 3), # fat matrices
(3, 7), (5, 3, 7), (7, 5, 3, 7)] # thin matrices
for dims, some, compute_uv in product(shapes, [True, False], [True, False]):
run_test(dims, some, compute_uv)
@skipCUDAIfNoMagmaAndNoCusolver
@skipCPUIfNoLapack
@dtypes(torch.float)
def test_svd_no_singularvectors(self, device, dtype):
for size in [(5, 5), (5, 20), (20, 5)]:
a = torch.randn(*size, device=device, dtype=dtype)
u, s_expect, v = torch.svd(a)
u, s_actual, v = torch.svd(a, compute_uv=False)
self.assertEqual(s_expect, s_actual, msg="Singular values don't match")
@skipCUDAIfNoMagmaAndNoCusolver
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.double)
def test_svd_lowrank(self, device, dtype):
from torch.testing._internal.common_utils import random_lowrank_matrix, random_sparse_matrix
def run_subtest(actual_rank, matrix_size, batches, device, svd_lowrank, **options):
density = options.pop('density', 1)
if isinstance(matrix_size, int):
rows = columns = matrix_size
else:
rows, columns = matrix_size
if density == 1:
a_input = random_lowrank_matrix(actual_rank, rows, columns, *batches, device=device, dtype=dtype)
a = a_input
else:
assert batches == ()
a_input = random_sparse_matrix(rows, columns, density, device=device, dtype=dtype)
a = a_input.to_dense()
q = min(*size)
u, s, v = svd_lowrank(a_input, q=q, **options)
u, s, v = u[..., :q], s[..., :q], v[..., :q]
A = u.matmul(s.diag_embed()).matmul(v.mT)
self.assertEqual(A, a, rtol=1e-7, atol=2e-7)
U, S, V = torch.svd(a)
self.assertEqual(s.shape, S.shape)
self.assertEqual(u.shape, U.shape)
self.assertEqual(v.shape, V.shape)
self.assertEqual(s, S)
if density == 1:
u, s, v = u[..., :actual_rank], s[..., :actual_rank], v[..., :actual_rank]
U, S, V = U[..., :actual_rank], S[..., :actual_rank], V[..., :actual_rank]
self.assertEqual(u.mT.matmul(U).det().abs(), torch.ones(batches, device=device, dtype=dtype))
self.assertEqual(v.mT.matmul(V).det().abs(), torch.ones(batches, device=device, dtype=dtype))
all_batches = [(), (1,), (3,), (2, 3)]
for actual_rank, size, all_batches in [
(2, (17, 4), all_batches),
(4, (17, 4), all_batches),
(4, (17, 17), all_batches),
(10, (100, 40), all_batches),
(7, (1000, 1000), [()]),
]:
for batches in all_batches:
run_subtest(actual_rank, size, batches, device, torch.svd_lowrank)
if size != size[::-1]:
run_subtest(actual_rank, size[::-1], batches, device, torch.svd_lowrank)
for size in [(17, 4), (4, 17), (17, 17), (100, 40), (40, 100), (1000, 1000)]:
for density in [0.005, 0.1]:
run_subtest(None, size, (), device, torch.svd_lowrank, density=density)
jitted = torch.jit.script(torch.svd_lowrank)
actual_rank, size, batches = 2, (17, 4), ()
run_subtest(actual_rank, size, batches, device, jitted)
@skipCUDAIfNoMagmaAndNoCusolver
@skipCPUIfNoLapack
@dtypes(torch.cfloat)
def test_svd_complex(self, device, dtype):
t = torch.randn((10, 10), dtype=dtype, device=device)
U, S, V = torch.svd(t, some=False)
t2 = U @ torch.diag(S).type(dtype) @ V.conj().T
self.assertEqual(t, t2)
def _test_svd_helper(self, shape, some, col_maj, device, dtype):
if not torch._C.has_lapack:
reason = "PyTorch compiled without Lapack"
raise unittest.SkipTest(reason)
cpu_dtype = torch.complex128 if dtype.is_complex else torch.float64
cpu_tensor = torch.randn(shape, device='cpu', dtype=cpu_dtype)
device_tensor = cpu_tensor.to(device=device, dtype=dtype)
if col_maj:
cpu_tensor = cpu_tensor.t()
device_tensor = device_tensor.t()
cpu_result = torch.svd(cpu_tensor, some=some)
device_result = torch.svd(device_tensor, some=some)
m = min(cpu_tensor.shape[-2:])
for x, y in zip(cpu_result, device_result):
self.assertEqual(x[..., :m].abs(), y[..., :m].abs(), exact_dtype=False)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_svd_errors_and_warnings(self, device, dtype):
for svd in [torch.svd, torch.linalg.svd]:
a = torch.randn(3, 3, dtype=dtype, device=device)
real_dtype = a.real.dtype if dtype.is_complex else dtype
out_u = torch.empty(2, 2, dtype=dtype, device=device)
out_s = torch.empty(4, 4, dtype=real_dtype, device=device)
out_v = torch.empty(6, 6, dtype=dtype, device=device)
with warnings.catch_warnings(record=True) as w:
svd(a, out=(out_u, out_s, out_v))
self.assertEqual(len(w), 3)
self.assertTrue("An output with one or more elements was resized" in str(w[-3].message))
self.assertTrue("An output with one or more elements was resized" in str(w[-2].message))
self.assertTrue("An output with one or more elements was resized" in str(w[-1].message))
out_u = torch.empty(0, dtype=torch.int, device=device)
out_s = torch.empty(0, dtype=torch.int, device=device)
out_v = torch.empty(0, dtype=torch.int, device=device)
with self.assertRaisesRegex(RuntimeError, "but got U with dtype Int"):
svd(a, out=(out_u, out_s, out_v))
out_u = torch.empty(0, dtype=dtype, device=device)
if svd == torch.linalg.svd:
msg = "but got Vh with dtype Int"
else:
msg = "but got V with dtype Int"
with self.assertRaisesRegex(RuntimeError, msg):
svd(a, out=(out_u, out_s, out_v))
out_v = torch.empty(0, dtype=dtype, device=device)
with self.assertRaisesRegex(RuntimeError, "but got S with dtype Int"):
svd(a, out=(out_u, out_s, out_v))
if torch.cuda.is_available():
wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'
out_u = torch.empty(0, device=wrong_device, dtype=dtype)
out_s = torch.empty(0, device=wrong_device, dtype=real_dtype)
out_v = torch.empty(0, device=wrong_device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, "tensors to be on the same device"):
svd(a, out=(out_u, out_s, out_v))
out_u = torch.empty(0, device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, "tensors to be on the same device"):
svd(a, out=(out_u, out_s, out_v))
out_s = torch.empty(0, device=device, dtype=real_dtype)
with self.assertRaisesRegex(RuntimeError, "tensors to be on the same device"):
svd(a, out=(out_u, out_s, out_v))
error_msg = 'The algorithm failed to converge' \
if (self.device_type == 'cpu' or TEST_WITH_ROCM) \
else 'CUSOLVER_STATUS_EXECUTION_FAILED'
a = torch.full((3, 3), float('nan'), dtype=dtype, device=device)
a[0] = float('nan')
with self.assertRaisesRegex(RuntimeError, error_msg):
svd(a)
error_msg = r'\(Batch element 1\): The algorithm failed to converge' \
if (self.device_type == 'cpu' or TEST_WITH_ROCM) \
else 'CUSOLVER_STATUS_EXECUTION_FAILED'
a = torch.randn(3, 33, 33, dtype=dtype, device=device)
a[1, 0, 0] = float('nan')
with self.assertRaisesRegex(RuntimeError, error_msg):
svd(a)
@skipCUDAIfNoMagmaAndNoCusolver
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_svd_square(self, device, dtype):
self._test_svd_helper((10, 10), True, False, device, dtype)
@skipCUDAIfNoMagmaAndNoCusolver
@skipCPUIfNoLapack
@dtypes(*floating_types())
def test_svd_square_col_maj(self, device, dtype):
self._test_svd_helper((10, 10), True, True, device, dtype)
@skipCUDAIfNoMagmaAndNoCusolver
@skipCPUIfNoLapack
@dtypes(*floating_types())
def test_svd_tall_some(self, device, dtype):
self._test_svd_helper((20, 5), True, False, device, dtype)
@skipCUDAIfNoMagmaAndNoCusolver
@skipCPUIfNoLapack
@dtypes(*floating_types())
def test_svd_tall_all(self, device, dtype):
self._test_svd_helper((20, 5), False, False, device, dtype)
@skipCUDAIfNoMagmaAndNoCusolver
@skipCPUIfNoLapack
@dtypes(*floating_types())
def test_svd_tall_some_col_maj(self, device, dtype):
self._test_svd_helper((5, 20), True, True, device, dtype)
@skipCUDAIfNoMagmaAndNoCusolver
@skipCPUIfNoLapack
@dtypes(*floating_types())
def test_svd_tall_all_col_maj(self, device, dtype):
self._test_svd_helper((5, 20), False, True, device, dtype)
@skipCUDAIfNoMagmaAndNoCusolver
@skipCPUIfNoLapack
@dtypes(torch.float, torch.double, torch.cfloat, torch.cdouble)
def test_linalg_svd_compute_uv(self, device, dtype):
t = torch.randn((10, 11), device=device, dtype=dtype)
np_t = t.cpu().numpy()
for full_matrices in (True, False):
expected = np.linalg.svd(np_t, full_matrices, compute_uv=True)
actual = torch.linalg.svd(t, full_matrices)
self.assertEqual(abs(actual[0]), abs(expected[0]))
self.assertEqual(actual[1], expected[1])
self.assertEqual(abs(actual[2]), abs(expected[2]))
out = (torch.empty_like(actual[0]),
torch.empty_like(actual[1]),
torch.empty_like(actual[2]))
out2 = torch.linalg.svd(t, full_matrices, out=out)
self.assertEqual(actual, out)
self.assertEqual(actual, out2)
@skipCUDAIfNoMagmaAndNoCusolver
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_svdvals(self, device, dtype):
def run_test(shape):
# svd with compute_uv=False
# so we test our implementation against numpy.linalg.svd(*, compute_uv=False)
A = make_tensor(shape, dtype=dtype, device=device)
expected = np.linalg.svd(A.cpu(), compute_uv=False)
actual = torch.linalg.svdvals(A)
self.assertEqual(actual, expected)
batches = [(), (0, ), (2, ), (2, 1)]
ns = [5, 2, 0]
for batch, (m, n) in itertools.product(batches, product(ns, ns)):
run_test((*batch, m, n))
@skipCUDAIfNoCusolver # MAGMA backend doesn't work in this case
@skipCUDAIfRocm
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_svd_memory_allocation(self, device, dtype):
m = 3
n = 2**20
a = make_tensor((m, n), dtype=dtype, device=device)
result = torch.linalg.svdvals(a)
result = torch.linalg.svd(a, full_matrices=False)
out0 = torch.empty_like(result[0])
out1 = torch.empty_like(result[1])
out2 = torch.empty_like(result[2])
torch.linalg.svdvals(a, out=out0)
torch.linalg.svd(a, full_matrices=False, out=(out0, out1, out2))
def cholesky_solve_test_helper(self, A_dims, b_dims, upper, device, dtype):
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
b = torch.randn(*b_dims, dtype=dtype, device=device)
A = random_hermitian_pd_matrix(*A_dims, dtype=dtype, device=device)
L = torch.cholesky(A, upper=upper)
return b, A, L
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
@precisionOverride({torch.float32: 1e-3, torch.complex64: 1e-3,
torch.float64: 1e-8, torch.complex128: 1e-8})
def test_cholesky_solve(self, device, dtype):
for (k, n), upper in itertools.product(zip([2, 3, 5], [3, 5, 7]), [True, False]):
b, A, L = self.cholesky_solve_test_helper((n,), (n, k), upper, device, dtype)
x = torch.cholesky_solve(b, L, upper=upper)
self.assertEqual(b, np.matmul(A.cpu(), x.cpu()))
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
@precisionOverride({torch.float32: 1e-3, torch.complex64: 1e-3,
torch.float64: 1e-8, torch.complex128: 1e-8})
def test_cholesky_solve_batched(self, device, dtype):
def cholesky_solve_batch_helper(A_dims, b_dims, upper):
b, A, L = self.cholesky_solve_test_helper(A_dims, b_dims, upper, device, dtype)
x_exp_list = []
for i in range(b_dims[0]):
x_exp_list.append(torch.cholesky_solve(b[i], L[i], upper=upper))
x_exp = torch.stack(x_exp_list)
x_act = torch.cholesky_solve(b, L, upper=upper)
self.assertEqual(x_act, x_exp)
Ax = np.matmul(A.cpu(), x_act.cpu())
self.assertEqual(b, Ax)
for upper, batchsize in itertools.product([True, False], [1, 3, 4]):
cholesky_solve_batch_helper((5, batchsize), (batchsize, 5, 10), upper)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_cholesky_solve_batched_non_contiguous(self, device, dtype):
from numpy.linalg import solve
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
for upper in [True, False]:
A = random_hermitian_pd_matrix(2, 2, dtype=dtype, device='cpu')
b = torch.randn(2, 2, 2, dtype=dtype, device='cpu')
x_exp = solve(A.permute(0, 2, 1).numpy(), b.permute(2, 1, 0).numpy())
A = A.to(device).permute(0, 2, 1)
b = b.to(device).permute(2, 1, 0)
assert not A.is_contiguous() and not b.is_contiguous(), "contiguous inputs"
L = torch.cholesky(A, upper)
x = torch.cholesky_solve(b, L, upper=upper)
self.assertEqual(x, x_exp)
@slowTest
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
@precisionOverride({torch.float32: 1e-3, torch.complex64: 1e-3,
torch.float64: 1e-8, torch.complex128: 1e-8})
def test_cholesky_solve_batched_many_batches(self, device, dtype):
for A_dims, b_dims in zip([(5, 256, 256), (5,)], [(5, 10), (512, 512, 5, 10)]):
for upper in [True, False]:
b, A, L = self.cholesky_solve_test_helper(A_dims, b_dims, upper, device, dtype)
x = torch.cholesky_solve(b, L, upper)
Ax = torch.matmul(A, x)
self.assertEqual(Ax, b.expand_as(Ax))
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
@precisionOverride({torch.float32: 1e-3, torch.complex64: 1e-3,
torch.float64: 1e-8, torch.complex128: 1e-8})
def test_cholesky_solve_batched_broadcasting(self, device, dtype):
from numpy.linalg import solve
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
def run_test(A_dims, b_dims, upper):
A_matrix_size = A_dims[-1]
A_batch_dims = A_dims[:-2]
A = random_hermitian_pd_matrix(A_matrix_size, *A_batch_dims,
dtype=dtype, device='cpu')
b = torch.randn(*b_dims, dtype=dtype, device='cpu')
x_exp = torch.tensor(solve(A.numpy(), b.numpy()), dtype=dtype, device=device)
A, b = A.to(dtype=dtype, device=device), b.to(dtype=dtype, device=device)
L = torch.linalg.cholesky(A, upper=upper)
x = torch.cholesky_solve(b, L, upper=upper)
self.assertEqual(x, x_exp)
x = torch.cholesky_solve(b, L, upper=upper, out=x)
self.assertEqual(x, x_exp)
for upper in [True, False]:
run_test((2, 1, 3, 4, 4), (2, 1, 3, 4, 6), upper)
run_test((2, 1, 3, 4, 4), (4, 6), upper)
run_test((4, 4), (2, 1, 3, 4, 2), upper)
run_test((1, 3, 1, 4, 4), (2, 1, 3, 4, 5), upper)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float64, torch.complex128)
def test_cholesky_solve_autograd(self, device, dtype):
def run_test(A_dims, B_dims, upper):
root = torch.randn(*A_dims, device=device, dtype=dtype).requires_grad_()
b = torch.randn(*B_dims, device=device, dtype=dtype).requires_grad_()
def func(root, b, upper):
if upper:
A = root.triu()
else:
A = root.tril()
return torch.cholesky_solve(b, A, upper)
gradcheck(func, [root, b, upper])
([((3, 3), (3, 4)), ((3, 3), (3, 2)),
((2, 3, 3), (2, 3, 4)), ((2, 3, 3), (2, 3, 2))],
[True, False]):
run_test(a_size, b_size, upper)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_cholesky_solve_out_errors_and_warnings(self, device, dtype):
a = torch.eye(2, dtype=dtype, device=device)
b = torch.randn(2, 1, dtype=dtype, device=device)
out = torch.empty(0, dtype=torch.int, device=device)
with self.assertRaisesRegex(RuntimeError, "but got result with dtype Int"):
torch.cholesky_solve(b, a, out=out)
if torch.cuda.is_available():
wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'
out = torch.empty(0, dtype=dtype, device=wrong_device)
with self.assertRaisesRegex(RuntimeError, "tensors to be on the same device"):
torch.cholesky_solve(b, a, out=out)
with warnings.catch_warnings(record=True) as w:
out = torch.empty(1, dtype=dtype, device=device)
torch.cholesky_solve(b, a, out=out)
self.assertEqual(len(w), 1)
self.assertTrue("An output with one or more elements was resized" in str(w[-1].message))
@skipCUDAIfNoMagmaAndNoCusolver
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
@precisionOverride({torch.float32: 2e-3, torch.complex64: 2e-3,
torch.float64: 1e-8, torch.complex128: 1e-8})
def test_inverse(self, device, dtype):
from torch.testing._internal.common_utils import random_fullrank_matrix_distinct_singular_value
def run_test(torch_inverse, matrix, batches, n):
matrix_inverse = torch_inverse(matrix)
expected = np.linalg.inv(matrix.cpu().numpy())
self.assertEqual(matrix_inverse, expected, atol=self.precision, rtol=self.precision)
identity = torch.eye(n, dtype=dtype, device=device)
self.assertEqual(identity.expand_as(matrix), np.matmul(matrix.cpu(), matrix_inverse.cpu()))
self.assertEqual(identity.expand_as(matrix), np.matmul(matrix_inverse.cpu(), matrix.cpu()))
matrix_inverse_out = torch.empty(*batches, n, n, dtype=dtype, device=device)
matrix_inverse_out_t = matrix_inverse_out.mT.clone(memory_format=torch.contiguous_format)
matrix_inverse_out = matrix_inverse_out_t.mT
ans = torch_inverse(matrix, out=matrix_inverse_out)
self.assertEqual(matrix_inverse_out, ans, atol=0, rtol=0)
self.assertEqual(matrix_inverse_out, matrix_inverse, atol=0, rtol=0)
if matrix.ndim > 2 and batches[0] != 0:
expected_inv_list = []
p = int(np.prod(batches))
for mat in matrix.contiguous().view(p, n, n):
expected_inv_list.append(torch_inverse(mat))
expected_inv = torch.stack(expected_inv_list).view(*batches, n, n)
if self.device_type == 'cuda' and dtype in [torch.float32, torch.complex64]:
self.assertEqual(matrix_inverse, expected_inv, atol=1e-1, rtol=1e-2)
else:
self.assertEqual(matrix_inverse, expected_inv)
def test_inv_ex(input, out=None):
if out is not None:
info = torch.empty(0, dtype=torch.int32, device=device)
return torch.linalg.inv_ex(input, out=(out, info)).inverse
return torch.linalg.inv_ex(input).inverse
for torch_inverse in [torch.inverse, torch.linalg.inv, test_inv_ex]:
for batches, n in itertools.product(
[[], [0], [2], [2, 1]],
[0, 5]
):
matrices = random_fullrank_matrix_distinct_singular_value(n, *batches, dtype=dtype, device=device)
run_test(torch_inverse, matrices, batches, n)
run_test(torch_inverse, matrices.mT, batches, n)
if n > 0:
run_test(
torch_inverse,
random_fullrank_matrix_distinct_singular_value(n * 2, *batches, dtype=dtype, device=device)
.view(-1, n * 2, n * 2)[:, ::2, ::2].view(*batches, n, n),
batches, n
)
@skipCUDAIfNoMagmaAndNoCusolver
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_inv_ex_info_device(self, device, dtype):
A = torch.eye(3, 3, dtype=dtype, device=device)
info = torch.linalg.inv_ex(A).info
self.assertTrue(info.device == A.device)
@skipCUDAIfNoMagmaAndNoCusolver
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
@skipCUDAIfRocm
def test_inv_ex_singular(self, device, dtype):
A = torch.eye(3, 3, dtype=dtype, device=device)
A[-1, -1] = 0
info = torch.linalg.inv_ex(A).info
self.assertEqual(info, 3)
with self.assertRaisesRegex(RuntimeError, r'diagonal element 3 is zero, the inversion could not be completed'):
torch.linalg.inv_ex(A, check_errors=True)
A = torch.eye(3, 3, dtype=dtype, device=device)
A = A.reshape((1, 3, 3))
A = A.repeat(5, 1, 1)
A[3, -2, -2] = 0
info = torch.linalg.inv_ex(A).info
expected_info = torch.zeros(A.shape[:-2], dtype=torch.int32, device=device)
expected_info[3] = 2
self.assertEqual(info, expected_info)
with self.assertRaisesRegex(RuntimeError, r'\(Batch element 3\): The diagonal element 2 is zero'):
torch.linalg.inv_ex(A, check_errors=True)
@slowTest
@skipCUDAIfNoMagmaAndNoCusolver
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
@precisionOverride({torch.float32: 2e-3, torch.complex64: 2e-3,
torch.float64: 1e-5, torch.complex128: 1e-5})
def test_inverse_many_batches(self, device, dtype):
from torch.testing._internal.common_utils import random_fullrank_matrix_distinct_singular_value
def test_inverse_many_batches_helper(torch_inverse, b, n):
matrices = random_fullrank_matrix_distinct_singular_value(b, n, n, dtype=dtype, device=device)
matrices_inverse = torch_inverse(matrices)
expected = np.linalg.inv(matrices.cpu().numpy())
self.assertEqual(matrices_inverse, expected, atol=self.precision, rtol=1e-3)
for torch_inverse in [torch.inverse, torch.linalg.inv]:
test_inverse_many_batches_helper(torch_inverse, 5, 256)
test_inverse_many_batches_helper(torch_inverse, 3, 512)
@skipCUDAIfNoMagmaAndNoCusolver
@skipCPUIfNoLapack
@onlyNativeDeviceTypes
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_inverse_errors(self, device, dtype):
# inverse expects batches of square matrices as input
with self.assertRaisesRegex(RuntimeError, "must be batches of square matrices"):
torch.inverse(torch.randn(2, 3, 4, 3))
# if input is not invertible, RuntimeError is raised mentioning the first non-invertible batch
def run_test_singular_input(batch_dim, n):
x = torch.eye(3, 3, dtype=dtype, device=device).reshape((1, 3, 3)).repeat(batch_dim, 1, 1)
x[n, -1, -1] = 0
with self.assertRaisesRegex(RuntimeError, rf'\(Batch element {n}\): The diagonal element 3 is zero'):
torch.inverse(x)
for params in [(1, 0), (2, 0), (2, 1), (4, 0), (4, 2), (10, 2)]:
run_test_singular_input(*params)
@skipCUDAIfNoMagmaAndNoCusolver
@skipCPUIfNoLapack
@onlyNativeDeviceTypes # TODO: XLA doesn't raise exception
@skipCUDAIfRocm
@skipCUDAVersionIn([(11, 3)])
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_inverse_errors_large(self, device, dtype):
x = torch.empty((8, 10, 616, 616), dtype=dtype, device=device)
x[:] = torch.eye(616, dtype=dtype, device=device)
x[..., 10, 10] = 0
with self.assertRaisesRegex(RuntimeError, r'\(Batch element 0\): The diagonal element 11 is zero'):
torch.inverse(x)
@precisionOverride({torch.float32: 1e-3, torch.complex64: 1e-3, torch.float64: 1e-7, torch.complex128: 1e-7})
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_pinv(self, device, dtype):
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
def run_test_main(A, hermitian):
A_pinv = torch.linalg.pinv(A, hermitian=hermitian)
np_A = A.cpu().numpy()
np_A_pinv = A_pinv.cpu().numpy()
if A.numel() > 0:
self.assertEqual(A, np_A @ np_A_pinv @ np_A, atol=self.precision, rtol=self.precision)
self.assertEqual(A_pinv, np_A_pinv @ np_A @ np_A_pinv, atol=self.precision, rtol=self.precision)
self.assertEqual(np_A @ np_A_pinv, (np_A @ np_A_pinv).conj().swapaxes(-2, -1))
self.assertEqual(np_A_pinv @ np_A, (np_A_pinv @ np_A).conj().swapaxes(-2, -1))
else:
self.assertEqual(A.shape, A_pinv.shape[:-2] + (A_pinv.shape[-1], A_pinv.shape[-2]))
out = torch.empty_like(A_pinv)
ans = torch.linalg.pinv(A, hermitian=hermitian, out=out)
self.assertEqual(ans, out)
self.assertEqual(ans, A_pinv)
def run_test_numpy(A, hermitian):
rconds = [float(torch.rand(1)), ]
for rcond_type in all_types():
rconds.append(torch.rand(A.shape[:-2], dtype=torch.double, device=device).to(rcond_type))
if A.ndim > 2:
rconds.append(torch.rand(A.shape[-3], device=device))
for rcond in rconds:
actual = torch.linalg.pinv(A, rcond=rcond, hermitian=hermitian)
torch_rtol = torch.linalg.pinv(A, rtol=rcond, hermitian=hermitian)
self.assertEqual(actual, torch_rtol)
numpy_rcond = rcond if isinstance(rcond, float) else rcond.cpu().numpy()
expected = np.linalg.pinv(A.cpu().numpy(), rcond=numpy_rcond, hermitian=hermitian)
self.assertEqual(actual, expected, atol=self.precision, rtol=1e-5)
for sizes in [(5, 5), (3, 5, 5), (3, 2, 5, 5),
(3, 2), (5, 3, 2), (2, 5, 3, 2),
(2, 3), (5, 2, 3), (2, 5, 2, 3),
(0, 0), (0, 2), (2, 0), (3, 0, 0), (0, 3, 0), (0, 0, 3)]:
A = torch.randn(*sizes, dtype=dtype, device=device)
hermitian = False
run_test_main(A, hermitian)
run_test_numpy(A, hermitian)
for sizes in [(5, 5), (3, 5, 5), (3, 2, 5, 5),
(0, 0), (3, 0, 0), ]:
A = random_hermitian_pd_matrix(sizes[-1], *sizes[:-2], dtype=dtype, device=device)
hermitian = True
run_test_main(A, hermitian)
run_test_numpy(A, hermitian)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_pinv_errors_and_warnings(self, device, dtype):
a = torch.randn(1, device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, "expected a tensor with 2 or more dimensions"):
torch.linalg.pinv(a)
a = torch.randn(3, 3, dtype=dtype, device=device)
out = torch.empty(7, 7, dtype=dtype, device=device)
with warnings.catch_warnings(record=True) as w:
torch.linalg.pinv(a, out=out)
self.assertEqual(len(w), 1)
self.assertTrue("An output with one or more elements was resized" in str(w[-1].message))
out = torch.empty_like(a).to(torch.int)
with self.assertRaisesRegex(RuntimeError, "but got result with dtype Int"):
torch.linalg.pinv(a, out=out)
if torch.cuda.is_available():
wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'
out = torch.empty_like(a).to(wrong_device)
with self.assertRaisesRegex(RuntimeError, "Expected result and input tensors to be on the same device"):
torch.linalg.pinv(a, out=out)
wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'
rcond = torch.full((), 1e-2, device=wrong_device)
with self.assertRaisesRegex(RuntimeError, "Expected all tensors to be on the same device"):
torch.linalg.pinv(a, rcond=rcond)
rcond = torch.full((), 1j, device=device)
with self.assertRaisesRegex(RuntimeError, "rcond tensor of complex type is not supported"):
torch.linalg.pinv(a, rcond=rcond)
# atol can't be complex
atol = torch.full((), 1j, device=device)
with self.assertRaisesRegex(RuntimeError, "atol tensor of complex type is not supported"):
torch.linalg.pinv(a, atol=atol)
rtol = torch.full((), 1j, device=device)
with self.assertRaisesRegex(RuntimeError, "rtol tensor of complex type is not supported"):
torch.linalg.pinv(a, rtol=rtol)
@skipCUDAIfNoMagmaAndNoCusolver
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_inv_errors_and_warnings(self, device, dtype):
# inv expects batches of square matrices as input
a = torch.randn(2, 3, 4, 3, dtype=dtype, device=device)
with self.assertRaisesRegex(RuntimeError, "must be batches of square matrices"):
torch.linalg.inv(a)
# inv requires the input to be at least 2 dimensional tensor
a = torch.randn(2, device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, "must have at least 2 dimensions"):
torch.linalg.inv(a)
# if input is not invertible, RuntimeError is raised mentioning the first non-invertible batch
def run_test_singular_input(batch_dim, n):
a = torch.eye(3, 3, dtype=dtype, device=device).reshape((1, 3, 3)).repeat(batch_dim, 1, 1)
a[n, -1, -1] = 0
with self.assertRaisesRegex(RuntimeError, rf"\(Batch element {n}\): The diagonal element 3 is zero"):
torch.linalg.inv(a)
for params in [(1, 0), (2, 0), (2, 1), (4, 0), (4, 2), (10, 2)]:
run_test_singular_input(*params)
# dtypes should match
a = torch.eye(2, dtype=dtype, device=device)
out = torch.empty(0, dtype=torch.int, device=device)
with self.assertRaisesRegex(RuntimeError, "got result with dtype Int"):
torch.linalg.inv(a, out=out)
# device should match
if torch.cuda.is_available():
wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'
out = torch.empty(0, device=wrong_device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, "tensors to be on the same device"):
torch.linalg.inv(a, out=out)
# if out tensor with wrong shape is passed a warning is given
with warnings.catch_warnings(record=True) as w:
a = torch.eye(2, dtype=dtype, device=device)
out = torch.empty(1, dtype=dtype, device=device)
# Trigger warning
torch.linalg.inv(a, out=out)
# Check warning occurs
self.assertEqual(len(w), 1)
self.assertTrue("An output with one or more elements was resized" in str(w[-1].message))
# if out tensor in batched column major format but with wrong a warning is given
with warnings.catch_warnings(record=True) as w:
a = torch.eye(2, dtype=dtype, device=device)
out = torch.empty(3, 3, dtype=dtype, device=device)
out = out.mT.clone(memory_format=torch.contiguous_format)
out = out.mT
self.assertTrue(out.mT.is_contiguous())
# Trigger warning
torch.linalg.inv(a, out=out)
# Check warning occurs
self.assertEqual(len(w), 1)
self.assertTrue("An output with one or more elements was resized" in str(w[-1].message))
def solve_test_helper(self, A_dims, b_dims, device, dtype):
from torch.testing._internal.common_utils import random_fullrank_matrix_distinct_singular_value
b = torch.randn(*b_dims, dtype=dtype, device=device)
A = random_fullrank_matrix_distinct_singular_value(*A_dims, dtype=dtype, device=device)
return b, A
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
@precisionOverride({torch.float32: 1e-3, torch.complex64: 1e-3})
def test_solve(self, device, dtype):
def run_test(n, batch, rhs):
A_dims = (n, *batch)
b_dims = (*batch, n, *rhs)
b, A = self.solve_test_helper(A_dims, b_dims, device, dtype)
# Correctness test
x = torch.linalg.solve(A, b)
if rhs == ():
Ax = np.matmul(A.cpu(), x.unsqueeze(-1).cpu())
Ax.squeeze_(-1)
else:
Ax = np.matmul(A.cpu(), x.cpu())
self.assertEqual(b.expand_as(Ax), Ax)
# Check against NumPy
expected = np.linalg.solve(A.cpu().numpy(), b.expand_as(x).cpu().numpy())
self.assertEqual(x, expected)
# Check out= variant
out = torch.empty_like(x)
ans = torch.linalg.solve(A, b, out=out)
self.assertEqual(ans, out)
self.assertEqual(x, out)
# Check out= variant with complex128 out tensor
out = torch.empty_like(x).to(torch.complex128)
ans = torch.linalg.solve(A, b, out=out)
self.assertEqual(ans, out)
self.assertEqual(x.to(torch.complex128), out)
# Check empty out
out = torch.empty(0, dtype=dtype, device=device)
ans = torch.linalg.solve(A, b, out=out)
self.assertEqual(ans, out)
self.assertEqual(x, out)
batches = [(), (0, ), (3, ), (2, 3)]
ns = [0, 5, 32]
nrhs = [(), (1, ), (5, )]
for n, batch, rhs in itertools.product(ns, batches, nrhs):
run_test(n, batch, rhs)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
@precisionOverride({torch.float32: 1e-3, torch.complex64: 1e-3})
def test_solve_batched_non_contiguous(self, device, dtype):
from torch.testing._internal.common_utils import random_fullrank_matrix_distinct_singular_value
A = random_fullrank_matrix_distinct_singular_value(2, 2, dtype=dtype, device=device).permute(1, 0, 2)
b = torch.randn(2, 2, 2, dtype=dtype, device=device).permute(2, 1, 0)
self.assertFalse(A.is_contiguous())
self.assertFalse(b.is_contiguous())
actual = torch.linalg.solve(A, b)
expected = np.linalg.solve(A.cpu().numpy(), b.cpu().numpy())
self.assertEqual(actual, expected)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_solve_errors_and_warnings(self, device, dtype):
# solve expects batches of square matrices as input
with self.assertRaisesRegex(RuntimeError, "must be batches of square matrices"):
a = torch.randn(2, 3, 4, 3, dtype=dtype, device=device)
b = torch.randn(2, 3, 4, 1, dtype=dtype, device=device)
torch.linalg.solve(a, b)
# solve expects compatible shapes for A x = b
with self.assertRaisesRegex(RuntimeError, "Incompatible matrix sizes"):
a = torch.randn(2, 3, 3, 3, dtype=dtype, device=device)
b = torch.randn(2, 3, 2, 1, dtype=dtype, device=device)
torch.linalg.solve(a, b)
# if input is not solvable, RuntimeError is raised mentioning the first non-solvable batch
def run_test_singular_input(batch_dim, n):
a = torch.eye(3, 3, dtype=dtype, device=device).reshape((1, 3, 3)).repeat(batch_dim, 1, 1)
a[n, -1, -1] = 0
b = torch.randn(batch_dim, 3, 1, dtype=dtype, device=device)
with self.assertRaisesRegex(RuntimeError, rf'\(Batch element {n}\): The diagonal element 3 is zero'):
torch.linalg.solve(a, b)
for params in [(1, 0), (2, 0), (2, 1), (4, 0), (4, 2), (10, 2)]:
run_test_singular_input(*params)
# if out tensor with wrong shape is passed a warning is given
# matrix 'b' case
with warnings.catch_warnings(record=True) as w:
A = torch.eye(2, dtype=dtype, device=device).reshape((1, 2, 2)).repeat(2, 1, 1)
b = torch.randn(2, 2, 2, dtype=dtype, device=device)
out = torch.zeros(1, dtype=dtype, device=device)
# Trigger warning
torch.linalg.solve(A, b, out=out)
# Check warning occurs
self.assertEqual(len(w), 1)
self.assertTrue("An output with one or more elements was resized" in str(w[-1].message))
# if out tensor with wrong shape is passed a warning is given
# vector 'b' case
with warnings.catch_warnings(record=True) as w:
A = torch.eye(2, dtype=dtype, device=device)
b = torch.randn(2, dtype=dtype, device=device)
out = torch.zeros(1, dtype=dtype, device=device)
# Trigger warning
torch.linalg.solve(A, b, out=out)
# Check warning occurs
self.assertEqual(len(w), 1)
self.assertTrue("An output with one or more elements was resized" in str(w[-1].message))
# dtypes should be safely castable
a = torch.eye(2, dtype=dtype, device=device)
b = torch.randn(2, 1, dtype=dtype, device=device)
out = torch.empty(0, dtype=torch.int, device=device)
with self.assertRaisesRegex(RuntimeError, "but got result with dtype Int"):
torch.linalg.solve(a, b, out=out)
# device should match
if torch.cuda.is_available():
wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'
out = torch.empty(0, dtype=dtype, device=wrong_device)
clone_a = torch.empty_like(a)
with self.assertRaisesRegex(RuntimeError, "tensors to be on the same device"):
torch.linalg.solve(a, b, out=out)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_old_solve(self, device, dtype):
for (k, n) in zip([2, 3, 5], [3, 5, 7]):
b, A = self.solve_test_helper((n,), (n, k), device, dtype)
x = torch.solve(b, A)[0]
self.assertEqual(b, np.matmul(A.cpu(), x.cpu()))
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_old_solve_batched(self, device, dtype):
def solve_batch_helper(A_dims, b_dims):
b, A = self.solve_test_helper(A_dims, b_dims, device, dtype)
x_exp_list = []
for i in range(b_dims[0]):
x_exp_list.append(torch.solve(b[i], A[i])[0])
x_exp = torch.stack(x_exp_list) # Stacked output
x_act = torch.solve(b, A)[0] # Actual output
self.assertEqual(x_exp, x_act) # Equality check
Ax = np.matmul(A.cpu(), x_act.cpu())
self.assertEqual(b, Ax)
for batchsize in [1, 3, 4]:
solve_batch_helper((5, batchsize), (batchsize, 5, 10))
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_old_solve_batched_non_contiguous(self, device, dtype):
from numpy.linalg import solve
from torch.testing._internal.common_utils import random_fullrank_matrix_distinct_singular_value
A = random_fullrank_matrix_distinct_singular_value(2, 2, dtype=dtype, device=device).permute(1, 0, 2)
b = torch.randn(2, 2, 2, dtype=dtype, device=device).permute(2, 1, 0)
x, _ = torch.solve(b, A)
x_exp = solve(A.cpu().numpy(), b.cpu().numpy())
self.assertEqual(x, x_exp)
@slowTest
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_old_solve_batched_many_batches(self, device, dtype):
for A_dims, b_dims in zip([(5, 256, 256), (3, )], [(5, 1), (512, 512, 3, 1)]):
b, A = self.solve_test_helper(A_dims, b_dims, device, dtype)
x, _ = torch.solve(b, A)
Ax = torch.matmul(A, x)
self.assertEqual(Ax, b.expand_as(x))
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_old_solve_batched_broadcasting(self, device, dtype):
from numpy.linalg import solve
def run_test(A_dims, b_dims):
A_matrix_size = A_dims[-1]
A_batch_dims = A_dims[:-2]
b, A = self.solve_test_helper((A_matrix_size,) + A_batch_dims, b_dims, device, dtype)
x, _ = torch.solve(b, A)
x_exp = solve(A.cpu().numpy(), b.cpu().numpy())
self.assertEqual(x, x_exp)
# test against numpy.linalg.solve
run_test((2, 1, 3, 4, 4), (2, 1, 3, 4, 6)) # no broadcasting
run_test((2, 1, 3, 4, 4), (4, 6)) # broadcasting b
run_test((4, 4), (2, 1, 3, 4, 2)) # broadcasting A
run_test((1, 3, 1, 4, 4), (2, 1, 3, 4, 5)) # broadcasting A & b
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_old_solve_errors_and_warnings(self, device, dtype):
# dtypes should be safely castable
a = torch.eye(2, dtype=dtype, device=device)
b = torch.randn(2, 1, dtype=dtype, device=device)
out = torch.empty(0, dtype=torch.int, device=device)
lu = torch.empty(0, dtype=dtype, device=device)
with self.assertRaisesRegex(RuntimeError, "but got solution with dtype Int"):
torch.solve(b, a, out=(out, lu))
out = torch.empty(0, dtype=dtype, device=device)
lu = torch.empty(0, dtype=torch.int, device=device)
with self.assertRaisesRegex(RuntimeError, "but got lu with dtype Int"):
torch.solve(b, a, out=(out, lu))
# device should match
if torch.cuda.is_available():
wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'
out = torch.empty(0, dtype=dtype, device=wrong_device)
lu = torch.empty_like(a)
with self.assertRaisesRegex(RuntimeError, "tensors to be on the same device"):
torch.solve(b, a, out=(out, lu))
out = torch.empty(0, dtype=dtype, device=device)
lu = torch.empty_like(a).to(wrong_device)
with self.assertRaisesRegex(RuntimeError, "tensors to be on the same device"):
torch.solve(b, a, out=(out, lu))
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float, torch.double, torch.cfloat, torch.cdouble)
@precisionOverride({torch.float: 1e-4, torch.cfloat: 1e-4})
def test_tensorsolve(self, device, dtype):
def run_test(a_shape, dims):
a = torch.randn(a_shape, dtype=dtype, device=device)
b = torch.randn(a_shape[:2], dtype=dtype, device=device)
result = torch.linalg.tensorsolve(a, b, dims=dims)
expected = np.linalg.tensorsolve(a.cpu().numpy(), b.cpu().numpy(), axes=dims)
self.assertEqual(result, expected)
# check the out= variant
out = torch.empty_like(result)
ans = torch.linalg.tensorsolve(a, b, dims=dims, out=out)
self.assertEqual(ans, out)
self.assertEqual(ans, result)
a_shapes = [(2, 3, 6), (3, 4, 4, 3)]
dims = [None, (0, 2)]
for a_shape, d in itertools.product(a_shapes, dims):
run_test(a_shape, d)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float, torch.double, torch.cfloat, torch.cdouble)
def test_tensorsolve_empty(self, device, dtype):
# Check for empty inputs. NumPy does not work for these cases.
a = torch.empty(0, 0, 1, 2, 3, 0, dtype=dtype, device=device)
b = torch.empty(a.shape[:2], dtype=dtype, device=device)
x = torch.linalg.tensorsolve(a, b)
self.assertEqual(torch.tensordot(a, x, dims=len(x.shape)), b)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float, torch.double, torch.cfloat, torch.cdouble)
@precisionOverride({torch.float: 1e-4, torch.cfloat: 1e-4})
def test_tensorsolve_non_contiguous(self, device, dtype):
def run_test_permuted(a_shape, dims):
# check for permuted / transposed inputs
a = torch.randn(a_shape, dtype=dtype, device=device)
a = a.movedim((0, 2), (-2, -1))
self.assertFalse(a.is_contiguous())
b = torch.randn(a.shape[:2], dtype=dtype, device=device)
b = b.t()
self.assertFalse(b.is_contiguous())
result = torch.linalg.tensorsolve(a, b, dims=dims)
expected = np.linalg.tensorsolve(a.cpu().numpy(), b.cpu().numpy(), axes=dims)
self.assertEqual(result, expected)
def run_test_skipped_elements(a_shape, dims):
# check for inputs with skipped elements
a = torch.randn(a_shape, dtype=dtype, device=device)
a = a[::2]
self.assertFalse(a.is_contiguous())
b = torch.randn(a_shape[:2], dtype=dtype, device=device)
b = b[::2]
self.assertFalse(b.is_contiguous())
result = torch.linalg.tensorsolve(a, b, dims=dims)
expected = np.linalg.tensorsolve(a.cpu().numpy(), b.cpu().numpy(), axes=dims)
self.assertEqual(result, expected)
# check non-contiguous out
out = torch.empty(2 * result.shape[0], *result.shape[1:], dtype=dtype, device=device)[::2]
self.assertFalse(out.is_contiguous())
ans = torch.linalg.tensorsolve(a, b, dims=dims, out=out)
self.assertEqual(ans, out)
self.assertEqual(ans, result)
a_shapes = [(2, 3, 6), (3, 4, 4, 3)]
dims = [None, (0, 2)]
for a_shape, d in itertools.product(a_shapes, dims):
run_test_permuted(a_shape, d)
a_shapes = [(4, 3, 6), (6, 4, 4, 3)]
dims = [None, (0, 2)]
for a_shape, d in itertools.product(a_shapes, dims):
run_test_skipped_elements(a_shape, d)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float32)
def test_tensorsolve_errors_and_warnings(self, device, dtype):
# tensorsolve expects the input that can be reshaped to a square matrix
a = torch.eye(2 * 3 * 4, dtype=dtype, device=device).reshape((2 * 3, 4, 2, 3, 4))
b = torch.randn(8, 4, dtype=dtype, device=device)
self.assertTrue(np.prod(a.shape[2:]) != np.prod(b.shape))
with self.assertRaisesRegex(RuntimeError, r'Expected self to satisfy the requirement'):
torch.linalg.tensorsolve(a, b)
# if non-empty out tensor with wrong shape is passed a warning is given
out = torch.empty_like(a)
b = torch.randn(6, 4, dtype=dtype, device=device)
with warnings.catch_warnings(record=True) as w:
# Trigger warning
torch.linalg.tensorsolve(a, b, out=out)
# Check warning occurs
self.assertEqual(len(w), 1)
self.assertTrue("An output with one or more elements was resized" in str(w[-1].message))
# dtypes should be safely castable
out = torch.empty_like(a).to(torch.int)
with self.assertRaisesRegex(RuntimeError, "but got result with dtype Int"):
torch.linalg.tensorsolve(a, b, out=out)
# device should match
if torch.cuda.is_available():
wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'
out = torch.empty(0, dtype=dtype, device=wrong_device)
with self.assertRaisesRegex(RuntimeError, "tensors to be on the same device"):
torch.linalg.tensorsolve(a, b, out=out)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
@precisionOverride({torch.float: 1e-3, torch.cfloat: 1e-3})
def test_tensorinv(self, device, dtype):
def run_test(a_shape, ind):
a = torch.randn(a_shape, dtype=dtype, device=device)
a_numpy = a.cpu().numpy()
result = torch.linalg.tensorinv(a, ind=ind)
expected = np.linalg.tensorinv(a_numpy, ind=ind)
self.assertEqual(result, expected)
# check the out= variant
out = torch.empty_like(result)
ans = torch.linalg.tensorinv(a, ind=ind, out=out)
self.assertEqual(ans, out)
self.assertEqual(ans, result)
# compare to NumPy output
run_test((12, 3, 4), ind=1)
run_test((3, 8, 24), ind=2)
run_test((18, 3, 3, 2), ind=1)
run_test((1, 4, 2, 2), ind=2)
run_test((2, 3, 5, 30), ind=3)
run_test((24, 2, 2, 3, 2), ind=1)
run_test((3, 4, 2, 3, 2), ind=2)
run_test((1, 2, 3, 2, 3), ind=3)
run_test((3, 2, 1, 2, 12), ind=4)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
@precisionOverride({torch.float: 1e-3, torch.cfloat: 1e-3})
def test_tensorinv_non_contiguous(self, device, dtype):
def run_test(a_shape, ind):
# check for permuted (transposed) case
a = torch.randn(a_shape, dtype=dtype, device=device)
permutation = list(range(0, a.ndim))
a = a.permute(permutation[ind:] + permutation[:ind])
self.assertFalse(a.is_contiguous())
a_numpy = a.cpu().numpy()
result = torch.linalg.tensorinv(a, ind=a.ndim - ind)
expected = np.linalg.tensorinv(a_numpy, ind=a.ndim - ind)
self.assertEqual(result, expected)
def run_test_skipped_elements(a_shape, ind):
# check for input with skipped elements
a = torch.randn(a_shape, dtype=dtype, device=device)
a = a[::2]
self.assertFalse(a.is_contiguous())
a_numpy = a.cpu().numpy()
result = torch.linalg.tensorinv(a, ind=ind)
expected = np.linalg.tensorinv(a_numpy, ind=ind)
self.assertEqual(result, expected)
# check non-contiguous out
out = torch.empty(2 * result.shape[0], *result.shape[1:], dtype=dtype, device=device)[::2]
self.assertFalse(out.is_contiguous())
ans = torch.linalg.tensorinv(a, ind=ind, out=out)
self.assertEqual(ans, out)
self.assertEqual(ans, result)
run_test((12, 3, 4), ind=1)
run_test((3, 8, 24), ind=2)
run_test((18, 3, 3, 2), ind=1)
run_test((1, 4, 2, 2), ind=2)
run_test((2, 3, 5, 30), ind=3)
run_test((24, 2, 2, 3, 2), ind=1)
run_test((3, 4, 2, 3, 2), ind=2)
run_test((1, 2, 3, 2, 3), ind=3)
run_test((3, 2, 1, 2, 12), ind=4)
run_test_skipped_elements((12, 3, 2), ind=1)
run_test_skipped_elements((18, 3, 3, 1), ind=1)
@skipMeta # See https://github.com/pytorch/pytorch/issues/53739
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_tensorinv_empty(self, device, dtype):
for ind in range(1, 4):
# Check for empty inputs. NumPy does not work for these cases.
a = torch.empty(0, 0, 1, 2, 3, 0, dtype=dtype, device=device)
a_inv = torch.linalg.tensorinv(a, ind=ind)
self.assertEqual(a_inv.shape, a.shape[ind:] + a.shape[:ind])
@skipMeta # See https://github.com/pytorch/pytorch/issues/53739
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_tensorinv_errors_and_warnings(self, device, dtype):
def check_shape(a_shape, ind):
# tensorinv requires the input to satisfy
# prod(a.shape[ind:]) == prod(a.shape[:ind])
a = torch.randn(a_shape, dtype=dtype, device=device)
with self.assertRaisesRegex(RuntimeError, "Expected self to satisfy the requirement"):
torch.linalg.tensorinv(a, ind=ind)
def check_ind(a_shape, ind):
a = torch.randn(a_shape, dtype=dtype, device=device)
with self.assertRaisesRegex(RuntimeError, "Expected a strictly positive integer"):
torch.linalg.tensorinv(a, ind=ind)
def check_out(a_shape, ind):
# if non-empty out tensor with wrong shape is passed a warning is given
a = torch.randn(a_shape, dtype=dtype, device=device)
out = torch.empty_like(a)
with warnings.catch_warnings(record=True) as w:
# Trigger warning
torch.linalg.tensorinv(a, ind=ind, out=out)
# Check warning occurs
self.assertEqual(len(w), 1)
self.assertTrue("An output with one or more elements was resized" in str(w[-1].message))
# dtypes should be safely castable
out = torch.empty(0, dtype=torch.int, device=device)
with self.assertRaisesRegex(RuntimeError, "but got result with dtype Int"):
torch.linalg.tensorinv(a, ind=ind, out=out)
# device should match
if torch.cuda.is_available():
wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'
out = torch.empty(0, dtype=dtype, device=wrong_device)
with self.assertRaisesRegex(RuntimeError, "tensors to be on the same device"):
torch.linalg.tensorinv(a, ind=ind, out=out)
# test for invalid shape
check_shape((2, 3, 4), ind=1)
check_shape((1, 2, 3, 4), ind=3)
# test for invalid ind
check_ind((12, 3, 4), ind=-1)
check_ind((18, 3, 3, 2), ind=0)
# test for invalid out tensor
check_out((12, 3, 4), ind=1)
check_out((3, 8, 24), ind=2)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_tensorinv_singular_input(self, device, dtype):
def check_singular_input(a_shape, ind):
prod_ind_end = np.prod(a_shape[ind:])
a = torch.eye(prod_ind_end, dtype=dtype, device=device)
a[-1, -1] = 0 # Now `a` is singular
a = a.reshape(a_shape)
with self.assertRaisesRegex(RuntimeError, "Failed to invert the input tensor, because it is singular"):
torch.linalg.tensorinv(a, ind=ind)
# test for non-invertible input
check_singular_input((12, 3, 4), ind=1)
check_singular_input((3, 6, 18), ind=2)
def _test_dot_vdot_vs_numpy(self, device, dtype, torch_fn, np_fn):
def check(x, y):
# Compare with numpy
res = torch_fn(x, y)
if x.dtype == torch.bfloat16:
ref = torch.from_numpy(np.array(np_fn(x.cpu().float().numpy(), y.cpu().float().numpy())))
else:
ref = torch.from_numpy(np.array(np_fn(x.cpu().numpy(), y.cpu().numpy())))
if res.dtype == torch.bfloat16:
self.assertEqual(res.cpu(), ref.bfloat16())
else:
self.assertEqual(res.cpu(), ref)
# Test out variant
out = torch.empty_like(res)
torch_fn(x, y, out=out)
self.assertEqual(out, res)
# Empty
x = torch.tensor([], dtype=dtype, device=device)
y = torch.tensor([], dtype=dtype, device=device)
check(x, y)
# Contiguous
x = 0.1 * torch.randn(5000, dtype=dtype, device=device)
y = 0.1 * torch.randn(5000, dtype=dtype, device=device)
check(x, y)
# 0 strided
y = 0.1 * torch.randn(1, dtype=dtype, device=device).expand(5000)
check(x, y)
# 2 strided
check(x[::2], y[::2])
@dtypes(torch.float, torch.cfloat, torch.bfloat16)
@dtypesIfCUDA(torch.float, torch.cfloat)
@precisionOverride({torch.cfloat: 1e-4, torch.float32: 5e-5, torch.bfloat16: 1e-0})
def test_dot_vs_numpy(self, device, dtype):
self._test_dot_vdot_vs_numpy(device, dtype, torch.dot, np.dot)
@dtypes(torch.float, torch.cfloat)
@precisionOverride({torch.cfloat: 1e-4, torch.float32: 5e-5})
def test_vdot_vs_numpy(self, device, dtype):
self._test_dot_vdot_vs_numpy(device, dtype, torch.vdot, np.vdot)
def _test_dot_vdot_invalid_args(self, device, torch_fn, complex_dtypes=False):
def check(x, y, regex):
with self.assertRaisesRegex(RuntimeError, regex):
torch_fn(x, y)
if complex_dtypes:
x = torch.randn(1, dtype=torch.cfloat, device=device)
y = torch.randn(3, dtype=torch.cdouble, device=device)
else:
x = torch.randn(1, dtype=torch.float, device=device)
y = torch.randn(3, dtype=torch.double, device=device)
check(x, y, 'dot : expected both vectors to have same dtype')
check(x.reshape(1, 1), y, '1D tensors expected')
check(x.expand(9), y.to(x.dtype), 'inconsistent tensor size')
if self.device_type != 'cpu':
x_cpu = x.expand(3).cpu()
check(x_cpu, y.to(x.dtype), 'Expected all tensors to be on the same device')
@onlyNativeDeviceTypes
def test_vdot_invalid_args(self, device):
self._test_dot_vdot_invalid_args(device, torch.vdot)
self._test_dot_vdot_invalid_args(device, torch.vdot, complex_dtypes=True)
@onlyNativeDeviceTypes
def test_dot_invalid_args(self, device):
self._test_dot_vdot_invalid_args(device, torch.dot)
self._test_dot_vdot_invalid_args(device, torch.dot, complex_dtypes=True)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_matrix_rank(self, device, dtype):
matrix_rank = torch.linalg.matrix_rank
def run_test(shape0, shape1, batch):
a = torch.randn(*batch, shape0, shape1, dtype=dtype, device=device)
rank_a = matrix_rank(a)
self.assertEqual(rank_a, matrix_rank(a.mH))
aaH = torch.matmul(a, a.mH)
rank_aaH = matrix_rank(aaH)
rank_aaH_hermitian = matrix_rank(aaH, hermitian=True)
self.assertEqual(rank_aaH, rank_aaH_hermitian)
aHa = torch.matmul(a.mH, a)
self.assertEqual(matrix_rank(aHa), matrix_rank(aHa, hermitian=True))
# check against NumPy
self.assertEqual(rank_a, np.linalg.matrix_rank(a.cpu().numpy()))
self.assertEqual(matrix_rank(a, 0.01), np.linalg.matrix_rank(a.cpu().numpy(), 0.01))
self.assertEqual(rank_aaH, np.linalg.matrix_rank(aaH.cpu().numpy()))
self.assertEqual(matrix_rank(aaH, 0.01), np.linalg.matrix_rank(aaH.cpu().numpy(), 0.01))
# hermitian flag for NumPy was added in 1.14.0
if np.lib.NumpyVersion(np.__version__) >= '1.14.0':
self.assertEqual(rank_aaH_hermitian,
np.linalg.matrix_rank(aaH.cpu().numpy(), hermitian=True))
self.assertEqual(matrix_rank(aaH, 0.01, True),
np.linalg.matrix_rank(aaH.cpu().numpy(), 0.01, True))
# check out= variant
out = torch.empty(a.shape[:-2], dtype=torch.int64, device=device)
ans = matrix_rank(a, out=out)
self.assertEqual(ans, out)
self.assertEqual(ans, rank_a)
shapes = (3, 13)
batches = ((), (0, ), (4, ), (3, 5, ))
for (shape0, shape1), batch in zip(itertools.product(shapes, reversed(shapes)), batches):
run_test(shape0, shape1, batch)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_matrix_rank_atol(self, device, dtype):
def run_test_atol(shape0, shape1, batch):
a = make_tensor((*batch, shape0, shape1), dtype=dtype, device=device)
# Check against NumPy output
# Test float tol, and specific value for each matrix
tolerances = [float(torch.rand(1)), ]
# Test different types of tol tensor
for tol_type in all_types():
tolerances.append(make_tensor(a.shape[:-2], dtype=tol_type, device=device, low=0))
# Test broadcasting of tol
if a.ndim > 2:
tolerances.append(make_tensor(a.shape[-3], dtype=torch.float32, device=device, low=0))
for tol in tolerances:
actual = torch.linalg.matrix_rank(a, atol=tol)
actual_tol = torch.linalg.matrix_rank(a, tol=tol)
self.assertEqual(actual, actual_tol)
numpy_tol = tol if isinstance(tol, float) else tol.cpu().numpy()
expected = np.linalg.matrix_rank(a.cpu().numpy(), tol=numpy_tol)
self.assertEqual(actual, expected)
shapes = (3, 13)
batches = ((), (0, ), (4, ), (3, 5, ))
for (shape0, shape1), batch in zip(itertools.product(shapes, reversed(shapes)), batches):
run_test_atol(shape0, shape1, batch)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float64)
def test_matrix_rank_atol_rtol(self, device, dtype):
from torch.testing._internal.common_utils import make_fullrank_matrices_with_distinct_singular_values
# creates a matrix with singular values arange(1/(n+1), 1, 1/(n+1)) and rank=n
n = 9
a = make_fullrank_matrices_with_distinct_singular_values(n, n, dtype=dtype, device=device)
# test float and tensor variants
for tol_value in [0.51, torch.tensor(0.51, device=device)]:
# using rtol (relative tolerance) takes into account the largest singular value (0.9 in this case)
result = torch.linalg.matrix_rank(a, rtol=tol_value)
self.assertEqual(result, 5) # there are 5 singular values above 0.9*0.51=0.459
# atol is used directly to compare with singular values
result = torch.linalg.matrix_rank(a, atol=tol_value)
self.assertEqual(result, 4) # there are 4 singular values above 0.51
# when both are specified the maximum tolerance is used
result = torch.linalg.matrix_rank(a, atol=tol_value, rtol=tol_value)
self.assertEqual(result, 4) # there are 4 singular values above max(0.51, 0.9*0.51)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_matrix_rank_empty(self, device, dtype):
matrix_rank = torch.linalg.matrix_rank
# NumPy doesn't work for input with no elements
def run_test(shape0, shape1, batch):
a = torch.randn(*batch, shape0, shape1, dtype=dtype, device=device)
rank_a = matrix_rank(a)
expected = torch.zeros(batch, dtype=torch.int64, device=device)
self.assertEqual(rank_a, matrix_rank(a.mH))
aaH = torch.matmul(a, a.mH)
rank_aaH = matrix_rank(aaH)
rank_aaH_hermitian = matrix_rank(aaH, hermitian=True)
self.assertEqual(rank_aaH, rank_aaH_hermitian)
aHa = torch.matmul(a.mH, a)
self.assertEqual(matrix_rank(aHa), matrix_rank(aHa, hermitian=True))
self.assertEqual(rank_a, expected)
self.assertEqual(matrix_rank(a, 0.01), expected)
self.assertEqual(rank_aaH, expected)
self.assertEqual(matrix_rank(aaH, 0.01), expected)
self.assertEqual(rank_aaH_hermitian, expected)
self.assertEqual(matrix_rank(aaH, 0.01, True), expected)
batches = ((), (4, ), (3, 5, ))
for batch in batches:
run_test(0, 0, batch)
run_test(0, 3, batch)
run_test(3, 0, batch)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_matrix_rank_out_errors_and_warnings(self, device, dtype):
a = torch.eye(2, dtype=dtype, device=device)
out = torch.empty(0, dtype=torch.bool, device=device)
with self.assertRaisesRegex(RuntimeError, "but got result with dtype Bool"):
torch.linalg.matrix_rank(a, out=out)
if torch.cuda.is_available():
wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'
out = torch.empty(0, dtype=dtype, device=wrong_device)
with self.assertRaisesRegex(RuntimeError, "tensors to be on the same device"):
torch.linalg.matrix_rank(a, out=out)
with warnings.catch_warnings(record=True) as w:
out = torch.empty(3, dtype=dtype, device=device)
torch.linalg.matrix_rank(a, out=out)
self.assertEqual(len(w), 1)
self.assertTrue("An output with one or more elements was resized" in str(w[-1].message))
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_matrix_rank_basic(self, device, dtype):
matrix_rank = torch.linalg.matrix_rank
a = torch.eye(10, dtype=dtype, device=device)
self.assertEqual(matrix_rank(a).item(), 10)
self.assertEqual(matrix_rank(a, hermitian=True).item(), 10)
a[5, 5] = 0
self.assertEqual(matrix_rank(a).item(), 9)
self.assertEqual(matrix_rank(a, hermitian=True).item(), 9)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_old_matrix_rank(self, device, dtype):
a = torch.eye(10, dtype=dtype, device=device)
self.assertEqual(torch.matrix_rank(a).item(), 10)
self.assertEqual(torch.matrix_rank(a, True).item(), 10)
a[5, 5] = 0
self.assertEqual(torch.matrix_rank(a).item(), 9)
self.assertEqual(torch.matrix_rank(a, True).item(), 9)
a = torch.randn(24, 42, dtype=dtype, device=device)
self.assertEqual(torch.matrix_rank(a), torch.matrix_rank(a.t()))
aaT = torch.mm(a, a.conj().t())
self.assertEqual(torch.matrix_rank(aaT), torch.matrix_rank(aaT, True))
aTa = torch.mm(a.conj().t(), a)
self.assertEqual(torch.matrix_rank(aTa), torch.matrix_rank(aTa, True))
a = torch.randn(35, 75, dtype=dtype, device=device)
self.assertEqual(torch.matrix_rank(a), np.linalg.matrix_rank(a.cpu().numpy()))
self.assertEqual(torch.matrix_rank(a, 0.01), np.linalg.matrix_rank(a.cpu().numpy(), 0.01))
aaT = torch.mm(a, a.conj().t())
self.assertEqual(torch.matrix_rank(aaT), np.linalg.matrix_rank(aaT.cpu().numpy()))
self.assertEqual(torch.matrix_rank(aaT, 0.01), np.linalg.matrix_rank(aaT.cpu().numpy(), 0.01))
if np.lib.NumpyVersion(np.__version__) >= '1.14.0':
self.assertEqual(torch.matrix_rank(aaT, True), np.linalg.matrix_rank(aaT.cpu().numpy(), True))
self.assertEqual(torch.matrix_rank(aaT, 0.01, True), np.linalg.matrix_rank(aaT.cpu().numpy(), 0.01, True))
@onlyNativeDeviceTypes
@dtypes(torch.double)
def test_chain_matmul(self, device, dtype):
t = make_tensor((2, 2), device, dtype)
self.assertEqual(t, torch.chain_matmul(t))
with self.assertRaisesRegex(RuntimeError, r"chain_matmul\(\): Expected one or more matrices"):
torch.chain_matmul()
with self.assertRaisesRegex(RuntimeError, r"Tensor dimension is 1, expected 2 instead"):
torch.chain_matmul(make_tensor(1, device, dtype), make_tensor(1, device, dtype))
@onlyNativeDeviceTypes
@dtypes(torch.double, torch.cdouble)
def test_multi_dot(self, device, dtype):
def check(*shapes, noncontiguous=False):
tensors = [make_tensor(shape, device, dtype, noncontiguous=noncontiguous) for shape in shapes]
np_arrays = [tensor.cpu().numpy() for tensor in tensors]
res = torch.linalg.multi_dot(tensors).cpu()
ref = torch.from_numpy(np.array(np.linalg.multi_dot(np_arrays)))
self.assertEqual(res, ref)
check([0], [0])
check([2], [2, 0])
check([1, 0], [0])
check([0, 2], [2, 1])
check([2, 2], [2, 0])
check([2, 0], [0, 3])
check([0, 0], [0, 1])
check([4, 2], [2, 0], [0, 3], [3, 2])
check([2], [2])
check([1, 2], [2])
check([2], [2, 1])
check([1, 2], [2, 1])
check([3, 2], [2, 4])
check([3], [3, 4], [4, 2], [2, 5], [5])
check([1, 2], [2, 2], [2, 3], [3, 1])
check([10, 100], [100, 5], [5, 50])
check([10, 20], [20, 30], [30, 5])
check([3, 2], [2, 2], [2, 3], [3, 4], noncontiguous=True)
check([15, 5], [5, 10], [10, 20], [20, 25], noncontiguous=True)
@onlyNativeDeviceTypes
@dtypes(torch.float)
def test_multi_dot_errors(self, device, dtype):
def check(tensors, out, msg):
with self.assertRaisesRegex(RuntimeError, msg):
torch.linalg.multi_dot(tensors, out=out)
a = make_tensor(2, device, dtype)
check([], None, "expected at least 2 tensors")
check([a], None, "expected at least 2 tensors")
check([torch.tensor(1, device=device, dtype=dtype), a], None, "the first tensor must be 1D or 2D")
check([a, torch.tensor(1, device=device, dtype=dtype)], None, "the last tensor must be 1D or 2D")
check([a, a, a], None, "tensor 1 must be 2D")
check([a, make_tensor((2, 2, 2), device, dtype), a], None, "tensor 1 must be 2D")
check([a, make_tensor(2, device, torch.double)], None, "all tensors must have be the same dtype")
check([a, a], torch.empty(0, device=device, dtype=torch.double), "expected out tensor to have dtype")
if self.device_type == 'cuda':
check([a, make_tensor(2, 'cpu', dtype)], None, "all tensors must be on the same device")
check([a, a], torch.empty(0, dtype=dtype), "expected out tensor to be on device")
check([a, make_tensor(3, device, dtype)], None, "cannot be multiplied")
check([a, make_tensor((3, 2), device, dtype), a], None, "cannot be multiplied")
@precisionOverride({torch.float32: 5e-6, torch.complex64: 5e-6})
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_qr(self, device, dtype):
def run_test(tensor_dims, some):
A = torch.randn(*tensor_dims, dtype=dtype, device=device)
Q, R = torch.qr(A, some=some)
m, n = tensor_dims[-2:]
n_columns = m if (not some) and m > n else min(m, n)
self.assertEqual(Q.size(-2), m)
self.assertEqual(R.size(-1), n)
self.assertEqual(Q.size(-1), n_columns)
A_ = A.cpu().numpy()
Q_ = Q.cpu().numpy()
R_ = R.cpu().numpy()
self.assertEqual(A_, np.matmul(Q_, R_))
Q_out, R_out = torch.full_like(Q, math.nan), torch.full_like(R, math.nan)
torch.qr(A, some=some, out=(Q_out, R_out))
Q_out_ = Q_out.cpu().numpy()
R_out_ = R_out.cpu().numpy()
self.assertEqual(A_, np.matmul(Q_out_, R_out_))
self.assertEqual(Q_, Q_out_)
self.assertEqual(R_, R_out_)
eye = torch.eye(n_columns, device=device, dtype=dtype).expand(Q.shape[:-2] + (n_columns, n_columns)).cpu().numpy()
self.assertEqual(np.matmul(Q_.swapaxes(-1, -2).conj(), Q_), eye)
self.assertEqual(R.triu(), R)
tensor_dims_list = [(0, 5), (0, 0), (5, 0),
(2, 1, 0, 5), (2, 1, 0, 0), (2, 1, 5, 0), (2, 0, 5, 5),
(3, 5), (5, 5), (5, 3),
(7, 3, 5), (7, 5, 5), (7, 5, 3),
(7, 5, 3, 5), (7, 5, 5, 5), (7, 5, 5, 3)]
for tensor_dims, some in itertools.product(tensor_dims_list, [True, False]):
run_test(tensor_dims, some)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float, torch.double, torch.cfloat, torch.cdouble)
def test_qr_vs_numpy(self, device, dtype):
sizes_to_test = [
(7, 5),
(5, 7),
(5, 0),
(0, 5),
]
for size in sizes_to_test:
t = torch.randn(size, device=device, dtype=dtype)
np_t = t.cpu().numpy()
for mode in ['reduced', 'complete']:
exp_q, exp_r = np.linalg.qr(np_t, mode=mode)
q, r = torch.linalg.qr(t, mode=mode)
self.assertEqual(q, exp_q)
self.assertEqual(r, exp_r)
exp_r = np.linalg.qr(np_t, mode='r')
q, r = torch.linalg.qr(t, mode='r')
self.assertEqual(q.shape, (0,))
self.assertEqual(q.dtype, t.dtype)
self.assertEqual(q.device, t.device)
self.assertEqual(r, exp_r)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float)
def test_linalg_qr_autograd_errors(self, device, dtype):
inp = torch.randn((5, 7), device=device, dtype=dtype, requires_grad=True)
q, r = torch.linalg.qr(inp, mode='r')
self.assertEqual(q.shape, (0,))
b = torch.sum(r)
with self.assertRaisesRegex(RuntimeError,
"The derivative of qr is not implemented when mode='r'"):
b.backward()
inp = torch.randn((7, 5), device=device, dtype=dtype, requires_grad=True)
q, r = torch.linalg.qr(inp, mode='complete')
b = torch.sum(r)
with self.assertRaisesRegex(RuntimeError,
"The derivative of qr is not implemented when mode='complete' and nrows > ncols"):
b.backward()
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float, torch.double, torch.cfloat, torch.cdouble)
def test_qr_batched(self, device, dtype):
def np_qr_batched(a, mode):
all_q = []
all_r = []
for matrix in a:
result = np.linalg.qr(matrix, mode=mode)
if mode == 'r':
all_r.append(result)
else:
q, r = result
all_q.append(q)
all_r.append(r)
if mode == 'r':
return np.array(all_r)
else:
return np.array(all_q), np.array(all_r)
t = torch.randn((3, 7, 5), device=device, dtype=dtype)
np_t = t.cpu().numpy()
for mode in ['reduced', 'complete']:
exp_q, exp_r = np_qr_batched(np_t, mode=mode)
q, r = torch.linalg.qr(t, mode=mode)
self.assertEqual(q, exp_q)
self.assertEqual(r, exp_r)
exp_r = np_qr_batched(np_t, mode='r')
q, r = torch.linalg.qr(t, mode='r')
self.assertEqual(q.shape, (0,))
self.assertEqual(q.dtype, t.dtype)
self.assertEqual(q.device, t.device)
self.assertEqual(r, exp_r)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float, torch.double, torch.cfloat, torch.cdouble)
def test_qr_out(self, device, dtype):
sizes_to_test = [
(7, 5),
(5, 7),
(5, 0),
(0, 5),
]
for size in sizes_to_test:
t = torch.randn(size, device=device, dtype=dtype)
np_t = t.cpu().numpy()
for mode in ['reduced', 'complete', 'r']:
q, r = torch.linalg.qr(t, mode=mode)
out = (torch.empty((0), dtype=dtype, device=device),
torch.empty((0), dtype=dtype, device=device))
q2, r2 = torch.linalg.qr(t, mode=mode, out=out)
self.assertIs(q2, out[0])
self.assertIs(r2, out[1])
self.assertEqual(q2, q)
self.assertEqual(r2, r)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float)
def test_qr_error_cases(self, device, dtype):
t1 = torch.randn(5, device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, 'qr input should have at least 2 dimensions, but has 1 dimensions instead'):
torch.linalg.qr(t1)
t2 = torch.randn((5, 7), device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, "qr received unrecognized mode 'hello'"):
torch.linalg.qr(t2, mode='hello')
def _check_einsum(self, *args, np_args=None):
if np_args is None:
np_args = [arg.cpu().numpy() if isinstance(arg, torch.Tensor) else arg for arg in args]
res = torch.einsum(*args)
ref = np.einsum(*np_args)
self.assertEqual(torch.from_numpy(np.array(ref)), res)
@dtypes(torch.double, torch.cdouble)
def test_einsum(self, device, dtype):
x = make_tensor((5,), device, dtype)
y = make_tensor((7,), device, dtype)
A = make_tensor((3, 5), device, dtype)
B = make_tensor((2, 5), device, dtype)
C = make_tensor((2, 3, 5), device, dtype)
D = make_tensor((2, 5, 7), device, dtype)
E = make_tensor((7, 9), device, dtype)
F = make_tensor((2, 3, 3, 5), device, dtype)
G = make_tensor((5, 4, 6), device, dtype)
H = make_tensor((4, 4), device, dtype)
I = make_tensor((2, 3, 2), device, dtype)
self._check_einsum('i->', x)
self._check_einsum('i,i->', x, x)
self._check_einsum('i,i->i', x, x)
self._check_einsum('i,j->ij', x, y)
self._check_einsum("ij->ji", A)
self._check_einsum("ij->j", A)
self._check_einsum("ij->i", A)
self._check_einsum("ij,ij->ij", A, A)
self._check_einsum("ij,j->i", A, x)
self._check_einsum("ij,kj->ik", A, B)
self._check_einsum("ij,ab->ijab", A, E)
self._check_einsum("Aij,Ajk->Aik", C, D)
self._check_einsum("ijk,jk->i", C, A)
self._check_einsum("aij,jk->aik", D, E)
self._check_einsum("abCd,dFg->abCFg", F, G)
self._check_einsum("ijk,jk->ik", C, A)
self._check_einsum("ijk,jk->ij", C, A)
self._check_einsum("ijk,ik->j", C, B)
self._check_einsum("ijk,ik->jk", C, B)
self._check_einsum("ii", H)
self._check_einsum("ii->i", H)
self._check_einsum('iji->j', I)
self._check_einsum('ngrg...->nrg...', make_tensor((2, 1, 3, 1, 4), device, dtype))
self._check_einsum("i...->...", H)
self._check_einsum("ki,...k->i...", A.t(), B)
self._check_einsum("k...,jk->...", A.t(), B)
self._check_einsum('...ik, ...j -> ...ij', C, x)
self._check_einsum('Bik,k...j->i...j', C, make_tensor((5, 3), device, dtype))
self._check_einsum('i...j, ij... -> ...ij', C, make_tensor((2, 5, 2, 3), device, dtype))
l = make_tensor((5, 10), device, dtype, noncontiguous=True)
r = make_tensor((5, 20), device, dtype, noncontiguous=True)
w = make_tensor((15, 10, 20), device, dtype)
self._check_einsum("bn,anm,bm->ba", l, w, r)
self._check_einsum("bn,Anm,bm->bA", l[:, ::2], w[:, ::2, ::2], r[:, ::2])
@dtypes(torch.double, torch.cdouble)
def test_einsum_sublist_format(self, device, dtype):
x = make_tensor((5,), device, dtype)
y = make_tensor((7,), device, dtype)
A = make_tensor((3, 5), device, dtype)
B = make_tensor((2, 5), device, dtype)
C = make_tensor((2, 1, 3, 1, 4), device, dtype)
self._check_einsum(x, [0])
self._check_einsum(x, [0], [])
self._check_einsum(x, [0], y, [1], [0, 1])
self._check_einsum(A, [0, 1], [1, 0])
self._check_einsum(A, [0, 1], x, [1], [0])
self._check_einsum(A, [0, 1], B, [2, 1])
self._check_einsum(A, [0, 1], B, [2, 1], [0, 2])
self._check_einsum(C, [0, 1, 2, 1, Ellipsis], [0, 2, 1, Ellipsis])
self._check_einsum(A.t(), [0, 1], B, [Ellipsis, 0])
self._check_einsum(A.t(), [0, 1], B, [Ellipsis, 0], [1, Ellipsis])
self._check_einsum(A.t(), [0, Ellipsis], B, [1, 0], [Ellipsis])
l = make_tensor((5, 10), device, dtype, noncontiguous=True)
r = make_tensor((5, 20), device, dtype, noncontiguous=True)
w = make_tensor((15, 10, 20), device, dtype)
self._check_einsum(l, [40, 41], w, [2, 41, 50], r, [40, 50], [40, 2])
@dtypes(torch.double, torch.cdouble)
def test_einsum_random(self, device, dtype):
def convert_label(label):
if label == ...:
return '...'
elif label < 26:
return chr(ord('A') + label)
else:
return chr(ord('a') + label - 26)
def convert_sublist(sublist):
return ''.join(convert_label(label) for label in sublist)
def test(n=10,
n_labels=5,
min_ops=1, max_ops=3,
min_dims=1, max_dims=3,
min_size=1, max_size=8,
max_out_dim=3,
enable_diagonals=True,
ellipsis_prob=0.5,
broadcasting_prob=0.1):
all_labels = torch.arange(52)
assert 0 <= n
assert 0 <= n_labels < len(all_labels)
assert 0 < min_ops <= max_ops
assert 0 <= min_dims <= max_dims
assert 0 <= min_size <= max_size
assert 0 <= max_out_dim
assert enable_diagonals or max_dims <= n_labels
for _ in range(n):
possible_labels = all_labels[torch.randperm(len(all_labels))[:n_labels]]
labels_size = torch.randint_like(all_labels, min_size, max_size + 1)
ellipsis_shape = torch.randint(min_size, max_size + 1, (max_dims - min_dims,))
operands = []
sublists = []
ell_size = 0
valid_labels = set()
for _ in range(random.randint(min_ops, max_ops)):
n_dim = random.randint(min_dims, max_dims)
labels_idx = torch.ones(len(possible_labels)).multinomial(n_dim, enable_diagonals)
labels = possible_labels[labels_idx]
valid_labels.update(labels.tolist())
shape = labels_size[labels]
mask = Binomial(probs=broadcasting_prob).sample((n_dim,))
broadcast_labels = torch.unique(labels[mask == 1])
shape[(labels[..., None] == broadcast_labels).any(-1)] = 1
labels = labels.tolist()
shape = shape.tolist()
if n_dim < max_dims and torch.rand(1) < ellipsis_prob:
ell_num_dim = random.randint(1, max_dims - n_dim)
ell_size = max(ell_size, ell_num_dim)
ell_shape = ellipsis_shape[-ell_num_dim:]
mask = Binomial(probs=broadcasting_prob).sample((ell_num_dim,))
ell_shape[mask == 1] = 1
ell_index = random.randint(0, n_dim)
shape[ell_index:ell_index] = ell_shape
labels.insert(ell_index, ...)
operands.append(make_tensor(shape, device, dtype))
sublists.append(labels)
np_operands = [op.cpu().numpy() for op in operands]
equation = ','.join(convert_sublist(l) for l in sublists)
self._check_einsum(equation, *operands, np_args=(equation, *np_operands))
args = [*itertools.chain(*zip(operands, sublists))]
self._check_einsum(*args, np_args=(equation, *np_operands))
out_sublist = []
num_out_labels = max(0, random.randint(0, min(max_out_dim, len(valid_labels))) - ell_size)
if num_out_labels > 0:
out_labels_idx = torch.ones(len(valid_labels)).multinomial(num_out_labels)
out_sublist = torch.tensor(list(valid_labels))[out_labels_idx].tolist()
out_sublist.insert(random.randint(0, num_out_labels), ...)
equation += '->' + convert_sublist(out_sublist)
self._check_einsum(equation, *operands, np_args=(equation, *np_operands))
args.append(out_sublist)
self._check_einsum(*args, np_args=(equation, *np_operands))
test(100)
def test_einsum_corner_cases(self, device):
def check(equation, *operands, expected_output):
tensors = [torch.tensor(operand, device=device, dtype=torch.float32) if not isinstance(operand, tuple)
else make_tensor(operand, device, torch.float32) for operand in operands]
output = torch.einsum(equation, tensors)
self.assertEqual(output, torch.tensor(expected_output, dtype=torch.float32, device=device))
check(' ', 1, expected_output=1)
check(' -> ', 1, expected_output=1)
check(' , ', 2, 2, expected_output=4)
check(' , , ', 2, 2, 2, expected_output=8)
check(' , -> ', 2, 2, expected_output=4)
check(' i ', [1], expected_output=[1])
check(' i -> ', [1], expected_output=1)
check(' i -> i ', [1], expected_output=[1])
check(' i , i ', [2], [2], expected_output=4)
check(' i , i -> i ', [2], [2], expected_output=[4])
check('i', [], expected_output=[])
check(' i j -> j', [[], []], expected_output=[])
check('ij->i', [[], []], expected_output=[0., 0.])
check(' i j k , k -> i j ', (3, 0, 6), (6,), expected_output=[[], [], []])
check('i,j', [2], [1, 2], expected_output=[[2, 4]])
check('i,ij->ij', [1, 2], [[1, 2, 3], [2, 3, 4]], expected_output=[[1, 2, 3], [4, 6, 8]])
check('...', 1, expected_output=1)
check('...->', 1, expected_output=1)
check('...->...', 1, expected_output=1)
check('...', [1], expected_output=[1])
check('...->', [1], expected_output=1)
check('z...->z', [1], expected_output=[1])
check('Z...->...Z', [1], expected_output=[1])
check('...a->', [[2], [4]], expected_output=6)
check('a...b->ab', [[[1], [2]], [[3], [4]]], expected_output=[[3], [7]])
def test_einsum_error_cases(self, device):
def check(*args, regex, exception=RuntimeError):
with self.assertRaisesRegex(exception, r'einsum\(\):.*' + regex):
torch.einsum(*args)
x = make_tensor((2,), device, torch.float32)
y = make_tensor((2, 3), device, torch.float32)
check('', [], regex=r'at least one operand', exception=ValueError)
check('. ..', [x], regex=r'found \'.\' for operand 0 that is not part of any ellipsis')
check('... ...', [x], regex=r'found \'.\' for operand 0 for which an ellipsis was already found')
check('1', [x], regex=r'invalid subscript given at index 0')
check(',', [x], regex=r'fewer operands were provided than specified in the equation')
check('', [x, x], regex=r'more operands were provided than specified in the equation')
check('', [x], regex=r'the number of subscripts in the equation \(0\) does not match the number '
r'of dimensions \(1\) for operand 0 and no ellipsis was given')
check('ai', [x], regex=r'the number of subscripts in the equation \(2\) does not match the number '
r'of dimensions \(1\) for operand 0 and no ellipsis was given')
check('ai...', [x], regex=r'the number of subscripts in the equation \(2\) is more than the number '
r'of dimensions \(1\) for operand 0')
check('a->... .', [x], regex=r'found \'.\' for output but an ellipsis \(...\) was already found')
check('a->..', [x], regex=r'found \'.\' for output that is not part of any ellipsis \(...\)')
check('a->1', [x], regex=r'invalid subscript given at index 3')
check('a->aa', [x], regex=r'output subscript a appears more than once in the output')
check('a->i', [x], regex=r'output subscript i does not appear in the equation for any input operand')
check('aa', [y], regex=r'subscript a is repeated for operand 0 but the sizes don\'t match, 3 != 2')
check('a, ba', [x, y], regex=r'operands do not broadcast with remapped shapes \[original->remapped\]: '
r'\[2\]->\[1, 2\] \[2, 3\]->\[2, 3\]')
check(x, [-1], regex=r'not within the valid range \[0, 52\)', exception=ValueError)
check(x, [52], regex=r'not within the valid range \[0, 52\)', exception=ValueError)
def triangular_solve_test_helper(self, A_dims, b_dims, upper, unitriangular,
device, dtype):
triangle_function = torch.triu if upper else torch.tril
b = torch.randn(*b_dims, dtype=dtype, device=device)
A = torch.randn(*A_dims, dtype=dtype, device=device)
# create positive definite matrix
A = torch.matmul(A, A.mT)
A_triangular = triangle_function(A)
if unitriangular:
A_triangular.diagonal(dim1=-2, dim2=-1).fill_(1.)
return b, A_triangular
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
@precisionOverride({torch.float32: 1e-3, torch.complex64: 1e-3,
torch.float64: 1e-8, torch.complex128: 1e-8})
def test_triangular_solve(self, device, dtype):
ks = [0, 1, 3]
ns = [0, 5]
for k, n, (upper, unitriangular, transpose) in itertools.product(ks, ns,
itertools.product([True, False], repeat=3)):
b, A = self.triangular_solve_test_helper((n, n), (n, k), upper,
unitriangular, device, dtype)
x = torch.triangular_solve(b, A, upper=upper, unitriangular=unitriangular, transpose=transpose)[0]
if transpose:
self.assertEqual(b, np.matmul(A.t().cpu(), x.cpu()))
else:
self.assertEqual(b, np.matmul(A.cpu(), x.cpu()))
@skipCPUIfNoLapack
@skipCUDAIfNoMagma
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
@precisionOverride({torch.float32: 1e-3, torch.complex64: 1e-3,
torch.float64: 1e-8, torch.complex128: 1e-8})
def test_triangular_solve_batched(self, device, dtype):
def triangular_solve_batch_helper(A_dims, b_dims, upper, unitriangular, transpose):
b, A = self.triangular_solve_test_helper(A_dims, b_dims, upper,
unitriangular, device, dtype)
x_exp_list = []
for i in range(b_dims[0]):
x_exp_list.append(torch.triangular_solve(b[i], A[i], upper=upper,
unitriangular=unitriangular,
transpose=transpose)[0])
x_exp = torch.stack(x_exp_list) # Stacked output
x_act = torch.triangular_solve(b, A, upper=upper,
unitriangular=unitriangular,
transpose=transpose)[0] # Actual output
self.assertEqual(x_act, x_exp) # Equality check
if transpose:
A = A.mT
Ax = np.matmul(A.cpu(), x_act.cpu())
self.assertEqual(b, Ax)
def triangular_solve_zero_batch_helper(A_dims, b_dims, upper, unitriangular, transpose):
b, A = self.triangular_solve_test_helper(A_dims, b_dims, upper,
unitriangular, device, dtype)
x = torch.triangular_solve(b, A, upper=upper,
unitriangular=unitriangular,
transpose=transpose)[0]
self.assertTrue(x.shape == b.shape)
for upper, unitriangular, transpose in itertools.product([True, False], repeat=3):
batchsize = 3
triangular_solve_batch_helper((batchsize, 5, 5), (batchsize, 5, 10),
upper, unitriangular, transpose)
# test empty input
triangular_solve_batch_helper((batchsize, 0, 0), (batchsize, 0, 10),
upper, unitriangular, transpose)
triangular_solve_batch_helper((batchsize, 0, 0), (batchsize, 0, 0),
upper, unitriangular, transpose)
# test zero batch case
batchsize = 0
triangular_solve_zero_batch_helper((batchsize, 5, 5), (batchsize, 5, 10),
upper, unitriangular, transpose)
@slowTest
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
@precisionOverride({torch.float32: 1e-3, torch.complex64: 1e-3,
torch.float64: 1e-8, torch.complex128: 1e-8})
def test_triangular_solve_batched_many_batches(self, device, dtype):
for upper, transpose, unitriangular in itertools.product([True, False], repeat=3):
# test batched A case
b, A = self.triangular_solve_test_helper((256, 256, 5, 5), (5, 1),
upper, unitriangular, device, dtype)
x, _ = torch.triangular_solve(b, A,
upper=upper, transpose=transpose, unitriangular=unitriangular)
if transpose:
A = A.mT
Ax = torch.matmul(A, x)
rtol = 1e-2 if dtype in [torch.float32, torch.complex64] else self.precision
self.assertEqual(Ax, b.expand_as(Ax), atol=self.precision, rtol=rtol)
# test batched b case
b, A = self.triangular_solve_test_helper((3, 3), (512, 512, 3, 1),
upper, unitriangular, device, dtype)
x, _ = torch.triangular_solve(b, A, upper=upper, transpose=transpose,
unitriangular=unitriangular)
if transpose:
A = A.mT
self.assertEqual(torch.matmul(A, x), b)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@unittest.skipIf(not TEST_SCIPY, "SciPy not found")
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_triangular_solve_batched_broadcasting(self, device, dtype):
from scipy.linalg import solve_triangular as tri_solve
def scipy_tri_solve_batched(A, B, upper, trans, diag):
batch_dims_A, batch_dims_B = A.shape[:-2], B.shape[:-2]
single_dim_A, single_dim_B = A.shape[-2:], B.shape[-2:]
expand_dims = tuple(torch._C._infer_size(torch.Size(batch_dims_A),
torch.Size(batch_dims_B)))
expand_A = np.broadcast_to(A, expand_dims + single_dim_A)
expand_B = np.broadcast_to(B, expand_dims + single_dim_B)
flat_A = expand_A.reshape((-1,) + single_dim_A)
flat_B = expand_B.reshape((-1,) + single_dim_B)
flat_X = np.vstack([tri_solve(a, b, lower=(not upper), trans=int(trans), unit_diagonal=diag)
for a, b in zip(flat_A, flat_B)])
return flat_X.reshape(expand_B.shape)
def run_test(A_dims, b_dims, device, upper, transpose, unitriangular):
b, A = self.triangular_solve_test_helper(A_dims, b_dims, upper,
unitriangular, device, dtype)
x_exp = torch.as_tensor(scipy_tri_solve_batched(A.cpu().numpy(), b.cpu().numpy(),
upper, transpose, unitriangular))
x = torch.triangular_solve(b, A, upper=upper, transpose=transpose, unitriangular=unitriangular)[0]
self.assertEqual(x, x_exp.to(device))
for upper, transpose, unitriangular in itertools.product([True, False], repeat=3):
# test against scipy.linalg.solve_triangular
run_test((2, 1, 3, 4, 4), (2, 1, 3, 4, 6), device, upper, transpose, unitriangular) # no broadcasting
run_test((2, 1, 3, 4, 4), (4, 6), device, upper, transpose, unitriangular) # broadcasting b
run_test((4, 4), (2, 1, 3, 4, 2), device, upper, transpose, unitriangular) # broadcasting A
run_test((1, 3, 1, 4, 4), (2, 1, 3, 4, 5), device, upper, transpose, unitriangular) # broadcasting A & b
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_triangular_solve_out_errors_and_warnings(self, device, dtype):
# dtypes should be safely castable
a = torch.eye(2, dtype=dtype, device=device)
b = torch.randn(2, 1, dtype=dtype, device=device)
out = torch.empty_like(b).to(torch.int)
clone_a = torch.empty_like(a)
with self.assertRaisesRegex(RuntimeError, "Expected out tensor to have dtype"):
torch.triangular_solve(b, a, out=(out, clone_a))
out = torch.empty_like(b)
clone_a = clone_a.to(torch.int)
with self.assertRaisesRegex(RuntimeError, "Expected out tensor to have dtype"):
torch.triangular_solve(b, a, out=(out, clone_a))
# device should match
if torch.cuda.is_available():
wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'
out = torch.empty(0, dtype=dtype, device=wrong_device)
clone_a = torch.empty_like(a)
with self.assertRaisesRegex(RuntimeError, "tensors to be on the same device"):
torch.triangular_solve(b, a, out=(out, clone_a))
out = torch.empty(0, dtype=dtype, device=device)
clone_a = torch.empty_like(a).to(wrong_device)
with self.assertRaisesRegex(RuntimeError, "tensors to be on the same device"):
torch.triangular_solve(b, a, out=(out, clone_a))
# if out tensor with wrong shape is passed a warning is given
with warnings.catch_warnings(record=True) as w:
out = torch.empty(1, dtype=dtype, device=device)
clone_a = torch.empty(1, dtype=dtype, device=device)
# Trigger warning
torch.triangular_solve(b, a, out=(out, clone_a))
# Check warning occurs
self.assertEqual(len(w), 2)
self.assertTrue("An output with one or more elements was resized" in str(w[-1].message))
self.assertTrue("An output with one or more elements was resized" in str(w[-2].message))
def check_single_matmul(self, x, y, shape):
a = np.array(x, copy=False)
b = np.array(y, copy=False)
expected = np.matmul(a, b)
ans = torch.matmul(x, y)
self.assertTrue(ans.is_contiguous())
self.assertTrue(np.array_equal(ans, expected))
out = torch.zeros(*shape, dtype=torch.int64).to(x.device)
ans = torch.matmul(x, y, out=out)
self.assertIs(ans, out)
self.assertTrue(ans.is_contiguous())
self.assertTrue(np.array_equal(ans, expected))
# TODO: update to run on CUDA, too
@onlyCPU
def test_matmul_small_brute_force_1d_Nd(self, device):
# Issue #20452: range(0, 10) does not work.
n = 1
for m in range(1, 8):
for p in range(1, 8):
for o in range(1, 5):
# 1d, 3d, inner dimensions C
x = torch.arange(m, device=device)
y = torch.arange(o * m * p, device=device).reshape(o, m, p)
self.check_single_matmul(x, y, (o, n, p))
# 1d, 3d, inner dimensions Fortran
x = torch.arange(m, device=device)
y = torch.arange(o * p * m, device=device).reshape(o, p, m).mT
self.check_single_matmul(x, y, (o, n, p))
# 1d, 3d, inner dimensions non-contiguous
x = torch.arange(2 * m, device=device)[::2]
y = torch.arange(o * m * 2 * p, device=device).reshape(o, m, 2 * p)[:, :, ::2]
self.check_single_matmul(x, y, (o, n, p))
for r in range(1, 5):
# 1d, 4d, inner dimensions C
x = torch.arange(m)
y = torch.arange(r * o * m * p, device=device).reshape(r, o, m, p)
self.check_single_matmul(x, y, (r, o, n, p))
# 1d, 4d, inner dimensions Fortran
x = torch.arange(m)
y = torch.arange(r * o * p * m, device=device).reshape(r, o, p, m).mT
self.check_single_matmul(x, y, (r, o, n, p))
# 1d, 4d, inner dimensions non-contiguous
x = torch.arange(2 * m, device=device)[::2]
y = torch.arange(r * o * m * 2 * p, device=device).reshape(r, o, m, 2 * p)[:, :, :, ::2]
self.check_single_matmul(x, y, (r, o, n, p))
# TODO: update to run on CUDA, too
@onlyCPU
def test_matmul_small_brute_force_2d_Nd(self, device):
# Issue #20452: range(0, 10) does not work.
for n in range(1, 5):
for m in range(1, 5):
for p in range(1, 5):
for o in range(1, 3):
# 2d, 3d, inner dimensions C
x = torch.arange(n * m, device=device).reshape(n, m)
y = torch.arange(o * m * p, device=device).reshape(o, m, p)
self.check_single_matmul(x, y, (o, n, p))
# 2d, 3d, inner dimensions Fortran
x = torch.arange(m * n, device=device).reshape(m, n).mT
y = torch.arange(o * p * m, device=device).reshape(o, p, m).mT
self.check_single_matmul(x, y, (o, n, p))
# 2d, 3d, inner dimensions non-contiguous
x = torch.arange(n * 2 * m, device=device).reshape(n, 2 * m)[:, ::2]
y = torch.arange(o * m * 2 * p, device=device).reshape(o, m, 2 * p)[:, :, ::2]
self.check_single_matmul(x, y, (o, n, p))
for r in range(1, 2):
# 2d, 4d, inner dimensions C
x = torch.arange(n * m, device=device).reshape(n, m)
y = torch.arange(r * o * m * p, device=device).reshape(r, o, m, p)
self.check_single_matmul(x, y, (r, o, n, p))
# 2d, 4d, inner dimensions Fortran
x = torch.arange(m * n, device=device).reshape(m, n).mT
y = torch.arange(r * o * p * m, device=device).reshape(r, o, p, m).mT
self.check_single_matmul(x, y, (r, o, n, p))
# 2d, 4d, inner dimensions non-contiguous
x = torch.arange(n * 2 * m, device=device).reshape(n, 2 * m)[:, ::2]
y = torch.arange(r * o * m * 2 * p, device=device).reshape(r, o, m, 2 * p)[:, :, :, ::2]
self.check_single_matmul(x, y, (r, o, n, p))
def test_linear_algebra_scalar_raises(self, device) -> None:
m = torch.randn(5, 5, device=device)
v = torch.randn(5, device=device)
s = torch.tensor(7, device=device)
self.assertRaises(RuntimeError, lambda: torch.mv(m, s))
self.assertRaises(RuntimeError, lambda: torch.addmv(v, m, s))
@dtypes(torch.float32, torch.complex64)
def test_cross(self, device, dtype):
x = torch.rand(100, 3, 100, dtype=dtype, device=device)
y = torch.rand(100, 3, 100, dtype=dtype, device=device)
res1 = torch.cross(x, y)
res2 = torch.tensor((), dtype=dtype, device=device)
torch.cross(x, y, out=res2)
self.assertEqual(res1, res2)
@dtypes(torch.float32, torch.complex64)
def test_linalg_cross(self, device, dtype):
x = torch.rand(100, 3, 100, dtype=dtype, device=device)
y = torch.rand(100, 3, 100, dtype=dtype, device=device)
res1 = torch.linalg.cross(x, y, dim=1)
res2 = torch.tensor((), dtype=dtype, device=device)
torch.linalg.cross(x, y, dim=1, out=res2)
self.assertEqual(res1, res2)
# test for broadcastable inputs
x = torch.rand(1, 3, 2, dtype=dtype, device=device)
y = torch.rand(4, 3, 1, dtype=dtype, device=device)
res1 = torch.linalg.cross(x, y, dim=1)
res2 = torch.tensor((), dtype=dtype, device=device)
torch.linalg.cross(x, y, dim=1, out=res2)
self.assertEqual(res1, res2)
# non contiguous case 1
x = torch.rand((4, 4, 4, 3), dtype=dtype,
device=device).contiguous(memory_format=torch.channels_last) # non-contiguous
y = torch.rand((4, 4, 4, 3), dtype=dtype,
device=device).contiguous(memory_format=torch.channels_last) # non-contiguous
np_expected_ref = np.cross(x.cpu().numpy(), y.cpu().numpy(), axis=-1)
res = torch.linalg.cross(x, y, dim=-1)
# numpy reference compared to torch result
self.assertEqual(res.cpu().numpy(), np_expected_ref)
# non contiguous case 2
x = torch.rand(1, 3, 2, dtype=dtype, device=device) # contiguous
y = torch.rand(1, 3, 4, dtype=dtype, device=device).permute(2, 1, 0) # non-contiguous
np_expected_ref = np.cross(x.cpu().numpy(), y.cpu().numpy(), axis=1)
res = torch.linalg.cross(x, y, dim=1)
# numpy reference compared to torch result
self.assertEqual(res.cpu().numpy(), np_expected_ref)
# non contiguous case 3
x = torch.rand(2, 3, 1, dtype=dtype, device=device).permute(2, 1, 0) # non-contiguous
y = torch.rand(1, 3, 4, dtype=dtype, device=device).permute(2, 1, 0) # non-contiguous
np_expected_ref = np.cross(x.cpu().numpy(), y.cpu().numpy(), axis=1)
res = torch.linalg.cross(x, y, dim=1)
# numpy reference compared to torch result
self.assertEqual(res.cpu().numpy(), np_expected_ref)
# non contiguous case 4
x = torch.randn(12, 3, device=device, dtype=dtype)[::2, :] # non-contiguous
y = torch.randn(18, 3, device=device, dtype=dtype)[::3, :] # non-contiguous
np_expected_ref = np.cross(x.cpu().numpy(), y.cpu().numpy(), axis=1)
res = torch.linalg.cross(x, y, dim=1)
# numpy reference compared to torch result
self.assertEqual(res.cpu().numpy(), np_expected_ref)
# non contiguous case 5
x = torch.randn(1, device=device, dtype=dtype) # contiguous
y = torch.randn(6, device=device, dtype=dtype)[::2] # non-contiguous
np_expected_ref = np.cross(x.expand(3).cpu().numpy(), y.cpu().numpy())
res = torch.linalg.cross(x, y)
# numpy reference compared to torch result
self.assertEqual(res.cpu().numpy(), np_expected_ref)
@dtypes(torch.float32, torch.complex64)
def test_cross_with_and_without_dim(self, device, dtype):
x = torch.rand(100, 3, dtype=dtype, device=device)
y = torch.rand(100, 3, dtype=dtype, device=device)
res1 = torch.cross(x, y, dim=1)
res2 = torch.cross(x, y, dim=-1)
res3 = torch.cross(x, y)
self.assertEqual(res1, res2)
self.assertEqual(res1, res3)
@dtypes(torch.float32, torch.complex64)
def test_linalg_cross_with_and_without_dim(self, device, dtype):
x = torch.rand(100, 3, dtype=dtype, device=device)
y = torch.rand(100, 3, dtype=dtype, device=device)
res1 = torch.linalg.cross(x, y, dim=1)
res2 = torch.linalg.cross(x, y, dim=-1)
res3 = torch.linalg.cross(x, y)
self.assertEqual(res1, res2)
self.assertEqual(res1, res3)
def test_cross_errors(self, device):
self.assertRaisesRegex(
RuntimeError, "must match the size of tensor",
lambda: torch.cross(torch.rand(100, 3, device=device), torch.rand(100, 3, 10, device=device)))
self.assertRaisesRegex(
RuntimeError, "must match the size of tensor",
lambda: torch.cross(torch.rand(5, 3, device=device), torch.rand(3, 5, device=device)))
self.assertRaisesRegex(
RuntimeError, "no dimension of size 3 in input",
lambda: torch.cross(torch.rand(5, 4, device=device), torch.rand(5, 4, device=device)))
self.assertRaisesRegex(
RuntimeError, "dimension 0 does not have size 3",
lambda: torch.cross(torch.rand(5, 4, 3, device=device), torch.rand(5, 4, 3, device=device), dim=0))
self.assertRaisesRegex(
RuntimeError, "dimension -1 does not have size 3",
lambda: torch.cross(torch.rand(5, 3, 4, device=device), torch.rand(5, 3, 4, device=device), dim=-1))
self.assertRaisesRegex(
IndexError, "Dimension out of range",
lambda: torch.cross(torch.rand(5, 3, 4, device=device), torch.rand(5, 3, 4, device=device), dim=-5))
def test_linalg_cross_errors(self, device):
self.assertRaisesRegex(
RuntimeError, "dimension -1 does not have size 3",
lambda: torch.linalg.cross(torch.rand(5, 3, 4, device=device), torch.rand(5, 3, 4, device=device)))
self.assertRaisesRegex(
RuntimeError, "must match the size of tensor",
lambda: torch.linalg.cross(torch.rand(100, 3, device=device), torch.rand(100, 3, 10, device=device)))
self.assertRaisesRegex(
RuntimeError, "must match the size of tensor",
lambda: torch.linalg.cross(torch.rand(5, 3, device=device), torch.rand(3, 5, device=device)))
self.assertRaisesRegex(
RuntimeError, "dimension 0 does not have size 3",
lambda: torch.linalg.cross(torch.rand(5, 4, 3, device=device), torch.rand(5, 4, 3, device=device), dim=0))
self.assertRaisesRegex(
RuntimeError, "dimension -1 does not have size 3",
lambda: torch.linalg.cross(torch.rand(5, 3, 4, device=device), torch.rand(5, 3, 4, device=device), dim=-1))
self.assertRaisesRegex(
IndexError, "Dimension out of range",
lambda: torch.linalg.cross(torch.rand(5, 3, 4, device=device), torch.rand(5, 3, 4, device=device), dim=-5))
def test_renorm(self, device):
m1 = torch.randn(20, 20, device=device) # big enough to exercise vectorized path
res1 = torch.tensor((), device=device)
def renorm(matrix, value, dim, max_norm):
m1 = matrix.transpose(dim, 0).contiguous()
# collapse non-dim dimensions.
m2 = m1.clone().resize_(m1.size(0), int(math.floor(m1.nelement() / m1.size(0))))
norms = m2.norm(value, 1, True)
# clip
new_norms = norms.clone()
new_norms[torch.gt(norms, max_norm)] = max_norm
new_norms.div_(norms.add_(1e-7))
# renormalize
m1.mul_(new_norms.expand_as(m1))
return m1.transpose(dim, 0)
# note that the axis fed to torch.renorm is different (2~=1)
maxnorm = m1.norm(2, 1).mean()
m2 = renorm(m1, 2, 1, maxnorm)
m1.renorm_(2, 1, maxnorm)
self.assertEqual(m1, m2, atol=1e-5, rtol=0)
self.assertEqual(m1.norm(2, 0), m2.norm(2, 0), atol=1e-5, rtol=0)
m1 = torch.randn(3, 4, 5, device=device)
m2 = m1.transpose(1, 2).contiguous().clone().resize_(15, 4)
maxnorm = m2.norm(2, 0).mean()
m2 = renorm(m2, 2, 1, maxnorm)
m1.renorm_(2, 1, maxnorm)
m3 = m1.transpose(1, 2).contiguous().clone().resize_(15, 4)
self.assertEqual(m3, m2)
self.assertEqual(m3.norm(2, 0), m2.norm(2, 0))
@skipCPUIfNoLapack
@skipCUDAIfNoCusolver
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_ormqr(self, device, dtype):
def run_test(batch, m, n, fortran_contiguous):
A = make_tensor((*batch, m, n), dtype=dtype, device=device)
reflectors, tau = torch.geqrf(A)
if not fortran_contiguous:
self.assertTrue(reflectors.mT.is_contiguous())
reflectors = reflectors.contiguous()
# Q is of size m x m
Q, _ = torch.linalg.qr(A, mode='complete')
C_right = make_tensor((*batch, m, n), dtype=dtype, device=device)
C_left = make_tensor((*batch, n, m), dtype=dtype, device=device)
expected = Q @ C_right
actual = torch.ormqr(reflectors, tau, C_right, left=True, transpose=False)
self.assertEqual(expected, actual)
expected = C_left @ Q
actual = torch.ormqr(reflectors, tau, C_left, left=False, transpose=False)
self.assertEqual(expected, actual)
expected = Q.mH @ C_right
actual = torch.ormqr(reflectors, tau, C_right, left=True, transpose=True)
self.assertEqual(expected, actual)
expected = C_left @ Q.mH
actual = torch.ormqr(reflectors, tau, C_left, left=False, transpose=True)
self.assertEqual(expected, actual)
# if tau is all zeros then the implicit matrix Q is the identity matrix
# so the actual result should be C_right in this case
zero_tau = torch.zeros_like(tau)
actual = torch.ormqr(reflectors, zero_tau, C_right, left=True, transpose=False)
self.assertEqual(C_right, actual)
batches = [(), (0, ), (2, ), (2, 1)]
ns = [5, 2, 0]
for batch, (m, n), fortran_contiguous in product(batches, product(ns, ns), [True, False]):
run_test(batch, m, n, fortran_contiguous)
@skipCPUIfNoLapack
@skipCUDAIfNoCusolver
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_ormqr_errors_and_warnings(self, device, dtype):
test_cases = [
# input1 size, input2 size, input3 size, error regex
((10,), (2,), (2,), r"input must have at least 2 dimensions"),
((2, 2), (2,), (2,), r"other must have at least 2 dimensions"),
((10, 6), (20,), (10, 6), r"other.shape\[-2\] must be greater than or equal to tau.shape\[-1\]"),
((6, 6), (5,), (5, 5), r"other.shape\[-2\] must be equal to input.shape\[-2\]"),
((1, 2, 2), (2, 2), (1, 2, 2), r"batch dimensions of tau to be equal to input.shape\[:-2\]"),
((1, 2, 2), (1, 2), (2, 2, 2), r"batch dimensions of other to be equal to input.shape\[:-2\]"),
]
for a_size, tau_size, c_size, error_regex in test_cases:
a = make_tensor(a_size, dtype=dtype, device=device)
tau = make_tensor(tau_size, dtype=dtype, device=device)
c = make_tensor(c_size, dtype=dtype, device=device)
with self.assertRaisesRegex(RuntimeError, error_regex):
torch.ormqr(a, tau, c)
def test_blas_empty(self, device):
def fn(torchfn, *args, test_out=False, **kwargs):
def call_torch_fn(*args, **kwargs):
return torchfn(*tuple(torch.randn(shape, device=device) if isinstance(shape, tuple) else shape
for shape in args), **kwargs)
result = call_torch_fn(*args, **kwargs)
if not test_out:
return result
else:
out = torch.full_like(result, math.nan)
out1 = call_torch_fn(*args, **kwargs, out=out)
return out
# mm, addmm
self.assertEqual((0, 0), fn(torch.mm, (0, 0), (0, 0)).shape)
self.assertEqual((0, 5), fn(torch.mm, (0, 0), (0, 5)).shape)
self.assertEqual((5, 0), fn(torch.mm, (5, 0), (0, 0)).shape)
self.assertEqual((3, 0), fn(torch.mm, (3, 2), (2, 0)).shape)
self.assertEqual(torch.zeros((5, 6), device=device), fn(torch.mm, (5, 0), (0, 6)))
self.assertEqual(torch.zeros((5, 6), device=device), fn(torch.mm, (5, 0), (0, 6), test_out=True))
self.assertEqual((0, 0), fn(torch.addmm, (0, 0), (0, 0), (0, 0)).shape)
self.assertEqual((0, 1), fn(torch.addmm, (1, ), (0, 17), (17, 1)).shape)
t = torch.randn((5, 6), device=device)
self.assertEqual(t, fn(torch.addmm, t, (5, 0), (0, 6)))
self.assertEqual(t, fn(torch.addmm, t, (5, 0), (0, 6), test_out=True))
# mv, addmv
self.assertEqual((0,), fn(torch.mv, (0, 0), (0,)).shape)
self.assertEqual((0,), fn(torch.mv, (0, 2), (2,)).shape)
self.assertEqual(torch.zeros((3,), device=device), fn(torch.mv, (3, 0), (0,)))
self.assertEqual(torch.zeros((3,), device=device), fn(torch.mv, (3, 0), (0,), test_out=True))
self.assertEqual((0,), fn(torch.addmv, (0,), (0, 0), (0,)).shape)
t = torch.randn((3,), device=device)
self.assertEqual(t, fn(torch.addmv, t, (3, 0), (0,)))
self.assertEqual(t, fn(torch.addmv, t, (3, 0), (0,), test_out=True))
# bmm, baddbmm
self.assertEqual((0, 0, 0), fn(torch.bmm, (0, 0, 0), (0, 0, 0)).shape)
self.assertEqual((3, 0, 5), fn(torch.bmm, (3, 0, 0), (3, 0, 5)).shape)
self.assertEqual((0, 5, 6), fn(torch.bmm, (0, 5, 0), (0, 0, 6)).shape)
self.assertEqual(torch.zeros((3, 5, 6), device=device), fn(torch.bmm, (3, 5, 0), (3, 0, 6)))
self.assertEqual(torch.zeros((3, 5, 6), device=device), fn(torch.bmm, (3, 5, 0), (3, 0, 6), test_out=True))
self.assertEqual((0, 0, 0), fn(torch.baddbmm, (0, 0, 0), (0, 0, 0), (0, 0, 0)).shape)
self.assertEqual((3, 0, 5), fn(torch.baddbmm, (3, 0, 5), (3, 0, 0), (3, 0, 5)).shape)
self.assertEqual((0, 5, 6), fn(torch.baddbmm, (0, 5, 6), (0, 5, 0), (0, 0, 6)).shape)
self.assertEqual((3, 5, 6), fn(torch.baddbmm, (3, 5, 6), (3, 5, 0), (3, 0, 6)).shape)
c = torch.arange(30, dtype=torch.float32, device=device).reshape(3, 2, 5)
self.assertEqual(-2 * c, fn(torch.baddbmm, c, (3, 2, 0), (3, 0, 5), beta=-2)) # Issue #33467
self.assertEqual(-2 * c, fn(torch.baddbmm, c, (3, 2, 0), (3, 0, 5), beta=-2, test_out=True)) # Issue #33467
# addbmm
self.assertEqual((0, 0), fn(torch.addbmm, (0, 0), (0, 0, 0), (0, 0, 0)).shape)
self.assertEqual((0, 5), fn(torch.addbmm, (0, 5), (3, 0, 0), (3, 0, 5)).shape)
t = torch.randn((5, 6), device=device)
self.assertEqual(t, fn(torch.addbmm, t, (0, 5, 0), (0, 0, 6)))
self.assertEqual(t, fn(torch.addbmm, t, (0, 5, 0), (0, 0, 6), test_out=True))
# matmul
self.assertEqual(torch.tensor(0., device=device), fn(torch.matmul, (0,), (0,)))
self.assertEqual(torch.tensor(0., device=device), fn(torch.matmul, (0,), (0,), test_out=True))
self.assertEqual((0, 0), fn(torch.matmul, (0, 0), (0, 0)).shape)
self.assertEqual((0, 0, 0), fn(torch.matmul, (0, 0, 0), (0, 0, 0)).shape)
self.assertEqual((5, 0, 0), fn(torch.matmul, (5, 0, 0), (5, 0, 0)).shape)
self.assertEqual(torch.zeros((5, 3, 4), device=device), fn(torch.matmul, (5, 3, 0), (5, 0, 4)))
self.assertEqual(torch.zeros((5, 3, 4), device=device), fn(torch.matmul, (5, 3, 0), (5, 0, 4), test_out=True))
# dot
self.assertEqual(torch.tensor(0., device=device), fn(torch.dot, (0,), (0,)))
self.assertEqual(torch.tensor(0., device=device), fn(torch.dot, (0,), (0,), test_out=True))
if torch._C.has_lapack:
# lu
A_LU, pivots = fn(torch.lu, (0, 5, 5))
self.assertEqual([(0, 5, 5), (0, 5)], [A_LU.shape, pivots.shape])
A_LU, pivots = fn(torch.lu, (0, 0, 0))
self.assertEqual([(0, 0, 0), (0, 0)], [A_LU.shape, pivots.shape])
A_LU, pivots = fn(torch.lu, (2, 0, 0))
self.assertEqual([(2, 0, 0), (2, 0)], [A_LU.shape, pivots.shape])
@dtypesIfCUDA(torch.cfloat, torch.cdouble,
*get_all_fp_dtypes(include_half=not CUDA9, include_bfloat16=(CUDA11OrLater and SM53OrLater)))
@dtypes(*(set(get_all_dtypes()) - {torch.half, torch.bool}))
def test_blas_alpha_beta_empty(self, device, dtype):
# This test is disabled on CUDA 9 due to:
# See: https://github.com/pytorch/pytorch/issues/31006
if dtype is torch.bfloat16 and self.device_type == 'xla':
# TODO (@zasdfgbnm): this causes the following error on test
# TestTorchDeviceTypeXLA.test_blas_alpha_beta_empty_xla_bfloat16:
#
# RuntimeError: _th_equal not supported on CPUType for BFloat16
return
# ensure beta is respected
value = 11
input = torch.full((2,), value, dtype=dtype, device=device)
mat = torch.ones((2, 0), dtype=dtype, device=device)
vec = torch.ones((0,), dtype=dtype, device=device)
out = torch.empty((2,), dtype=dtype, device=device)
if dtype.is_complex:
alpha = 6 + 7j
beta = 3 + 4j
else:
alpha = 6
beta = 3
self.assertEqual(torch.full((2,), beta * value, dtype=dtype, device=device),
torch.addmv(input=input, mat=mat, vec=vec, alpha=alpha, beta=beta))
self.assertEqual(torch.full((2,), beta * value, dtype=dtype, device=device),
torch.addmv(input=input, mat=mat, vec=vec, alpha=alpha, beta=beta, out=out))
# torch.addmm
input = torch.full((2, 3), value, dtype=dtype, device=device)
mat2 = torch.ones((0, 3), dtype=dtype, device=device)
out = torch.empty((2, 3), dtype=dtype, device=device)
self.assertEqual(torch.full((2, 3), beta * value, dtype=dtype, device=device),
torch.addmm(input=input, mat1=mat, mat2=mat2, alpha=alpha, beta=beta))
self.assertEqual(torch.full((2, 3), beta * value, dtype=dtype, device=device),
torch.addmm(input=input, mat1=mat, mat2=mat2, alpha=alpha, beta=beta, out=out))
@dtypes(*(get_all_complex_dtypes() + get_all_fp_dtypes()))
def test_blas_nan_out(self, device, dtype):
# These functions should work correctly with NaN filled outputs,
# but need special handling, see [NOTE: cpu_zero]
b = 3
n = 5
m = 7
p = 11
# torch.mv
nm = torch.randn((m, n), device=device).t()
_m = torch.randn((), device=device).expand(m)
_m_out = torch.full((m,), float('nan'), device=device)
self.assertEqual(torch.mv(nm, _m), torch.mv(nm, _m, out=_m_out))
self.assertEqual(0, torch.isnan(torch.mv(nm, _m)).sum())
# torch.mm
mp = torch.randn((p, m), device=device).t()
np_out = torch.full((n, p), float('nan'), device=device)
self.assertEqual(torch.mm(nm, mp), torch.mm(nm, mp, out=np_out))
# torch.bmm
bnm = torch.randn((b, m, n), device=device).transpose(1, 2)
bmp = torch.randn((b, p, m), device=device).transpose(1, 2)
bnp_out = torch.full((b, n, p), float('nan'), device=device)
self.assertEqual(torch.bmm(bnm, bmp), torch.bmm(bnm, bmp, out=bnp_out))
@onlyCPU # not supported by CUBLAS
def test_blas_mv_large_input(self, device):
# This would previously fail if the allocated output had NaNs, see:
# https://github.com/pytorch/pytorch/issues/31663 and [NOTE: cpu_zero]
n = 3000
m = 200
nm = torch.randn((m, n), device=device).t()
_m = torch.randn((), device=device).expand(m)
_m_out = torch.full((m,), 0., device=device)
self.assertEqual(torch.mv(nm, _m), torch.mv(nm, _m, out=_m_out))
@onlyCPU
def test_renorm_ps(self, device):
# full reduction
x = torch.randn(5, 5)
xn = x.numpy()
for p in [1, 2, 3, 4, inf]:
res = x.renorm(p, 1, 1)
expected = x / x.norm(p, 0, keepdim=True).clamp(min=1)
self.assertEqual(res, expected, msg="renorm failed for {}-norm".format(p))
@skipCPUIfNoLapack
@skipCUDAIfNoCusolver
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_householder_product(self, device, dtype):
def generate_reflectors_and_tau(A):
if A.numel() > 0:
A_cpu = A.cpu()
flattened_batch_shape = [-1, *A_cpu.shape[-2:]]
reflectors = torch.empty_like(A_cpu).view(*flattened_batch_shape)
tau_shape = [*A_cpu.shape[:-2], A_cpu.shape[-1]]
tau = torch.empty(tau_shape, dtype=dtype).view(-1, A_cpu.shape[-1])
for A_i, reflectors_i, tau_i in zip(A_cpu.contiguous().view(*flattened_batch_shape), reflectors, tau):
reflectors_tmp, tau_i[:] = map(torch.from_numpy, np.linalg.qr(A_i, mode='raw'))
reflectors_i[:] = reflectors_tmp.T
reflectors = reflectors.view(*A_cpu.shape)
tau = tau.view(tau_shape)
return reflectors.to(A.device), tau.to(A.device)
reflectors = torch.empty_like(A)
tau = torch.empty(*A.shape[:-2], A.shape[-1], dtype=dtype, device=device)
return reflectors, tau
def run_test(shape):
A = torch.randn(*shape, dtype=dtype, device=device)
reflectors, tau = generate_reflectors_and_tau(A)
expected, _ = torch.linalg.qr(A)
actual = torch.linalg.householder_product(reflectors, tau)
# torch.linalg.qr does not work correctly for zero batch dimension tensors
# see https://github.com/pytorch/pytorch/issues/50576
if (A.numel() > 0):
self.assertEqual(expected, actual)
else:
self.assertTrue(actual.shape == shape)
# if tau is empty and A is not the result should be a matrix with ones on the diagonal
if (A.numel() > 0):
tau_empty = torch.empty(*shape[:-2], 0, dtype=dtype, device=device)
identity_mat = torch.zeros_like(reflectors)
identity_mat.diagonal(dim1=-1, dim2=-2)[:] = 1
actual = torch.linalg.householder_product(reflectors, tau_empty)
self.assertEqual(actual, identity_mat)
out = torch.empty_like(A)
ans = torch.linalg.householder_product(reflectors, tau, out=out)
self.assertEqual(ans, out)
if (A.numel() > 0):
self.assertEqual(expected, out)
shapes = [(0, 0), (5, 0), # Empty matrix
(5, 5), (5, 3), # Single matrix
(0, 0, 0), (0, 5, 5), (0, 5, 3), # Zero batch dimension tensors
(2, 5, 5), (2, 5, 3), # 3-dim tensors
(2, 1, 5, 5), (2, 1, 5, 3)] # 4-dim tensors
for shape in shapes:
run_test(shape)
@skipCPUIfNoLapack
@skipCUDAIfNoCusolver
def test_householder_product_errors_and_warnings(self, device):
test_cases = [
# input1 size, input2 size, error regex
((10,), (2,), r"input must have at least 2 dimensions"),
((10, 6), (20,), r"input.shape\[-1\] must be greater than or equal to tau.shape\[-1\]"),
((6, 10), (5,), r"input.shape\[-2\] must be greater than or equal to input.shape\[-1\]"),
]
for a_size, tau_size, error_regex in test_cases:
a = torch.rand(*a_size, device=device)
tau = torch.rand(*tau_size, device=device)
with self.assertRaisesRegex(RuntimeError, error_regex):
torch.linalg.householder_product(a, tau)
# if out tensor with wrong shape is passed a warning is given
reflectors = torch.randn(3, 3, device=device)
tau = torch.randn(3, device=device)
out = torch.empty(2, 3, device=device)
with warnings.catch_warnings(record=True) as w:
# Trigger warning
torch.linalg.householder_product(reflectors, tau, out=out)
# Check warning occurs
self.assertEqual(len(w), 1)
self.assertTrue("An output with one or more elements was resized" in str(w[-1].message))
# dtypes should be safely castable
out = torch.empty_like(reflectors).to(torch.int)
with self.assertRaisesRegex(RuntimeError, "but got result with dtype Int"):
torch.linalg.householder_product(reflectors, tau, out=out)
with self.assertRaisesRegex(RuntimeError, "tau dtype Int does not match input dtype"):
torch.linalg.householder_product(reflectors, tau.to(torch.int))
if torch.cuda.is_available():
# device of out and input should match
wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'
out = torch.empty_like(reflectors).to(wrong_device)
with self.assertRaisesRegex(RuntimeError, "Expected all tensors to be on the same device"):
torch.linalg.householder_product(reflectors, tau, out=out)
# device of tau and input should match
wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'
tau = tau.to(wrong_device)
with self.assertRaisesRegex(RuntimeError, "Expected all tensors to be on the same device"):
torch.linalg.householder_product(reflectors, tau)
@precisionOverride({torch.complex64: 5e-6})
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.double, torch.cfloat, torch.cdouble)
def test_lu(self, device, dtype):
from torch.testing._internal.common_utils import random_matrix
def run_test(device, pivot):
def run_subtest(matrix_size, batches, device, pivot, singular=False, a=None):
if isinstance(matrix_size, int):
rows = columns = matrix_size
else:
rows, columns = matrix_size
if a is None:
a = random_matrix(rows, columns, *batches, **dict(singular=singular, dtype=dtype, device=device))
a_LU_info, pivots_info, info_ = a.lu(pivot=pivot, get_infos=True)
self.assertEqual(a_LU_info.size(), torch.Size(batches + (rows, columns)))
self.assertEqual(pivots_info.size(), torch.Size(batches + (min(rows, columns),)))
self.assertEqual(info_.size(), torch.Size(batches))
# If a randomly generated input matrix is singular,
# then info_ contains indices i such that U[i, i] ==
# 0. This however conveys that the factorization was
# successful albeit with a singular input. Therefore,
# we require info.min() >= 0
self.assertGreaterEqual(info_.min(), 0)
a_LU, pivots = a.lu(pivot=pivot)
self.assertEqual(a_LU, a_LU_info)
self.assertEqual(pivots_info, pivots)
P, L, U = torch.lu_unpack(a_LU, pivots)
P_ = P.cpu().numpy()
L_ = L.cpu().numpy()
U_ = U.cpu().numpy()
self.assertEqual(np.matmul(P_, np.matmul(L_, U_)), a)
if self.device_type == 'cuda':
# lu without pivoting is implemented only for cuda device
a_LU_info_nopiv, nopiv, info_nopiv = a.lu(pivot=False, get_infos=True)
P_nopiv, L_nopiv, U_nopiv = torch.lu_unpack(a_LU_info_nopiv, nopiv)
P_nopiv_ = P_nopiv.cpu().numpy()
L_nopiv_ = L_nopiv.cpu().numpy()
U_nopiv_ = U_nopiv.cpu().numpy()
self.assertEqual(np.matmul(P_nopiv_, np.matmul(L_nopiv_, U_nopiv_)), a)
k = min(rows, columns)
self.assertEqual(nopiv, torch.arange(1, 1 + k, device=device, dtype=torch.int32).expand(a.shape[:-2] + (k, )))
if not singular:
# It is not guaranteed that LU factorization
# without pivoting is able to determine if a
# matrix is singular while LU factorization
# with pivoting is. Therefore, we require the
# equality of info-s only for non-singular
# matrices.
# NOTE: infor_ is reshaped because info_nopiv might have
# squashed batch dimensions for complex types on CUDA,
# see the TODOs above.
self.assertEqual(info_.reshape(info_nopiv.shape), info_nopiv)
for ms, batch in itertools.product([3, 5, 7, (4, 2), (3, 4)], [(), (2,), (3,), (3, 5)]):
run_subtest(ms, batch, device, pivot)
run_subtest(ms, batch, device, pivot, singular=True)
# Reproducer of a magma bug, see https://bitbucket.org/icl/magma/issues/13/getrf_batched-kernel-produces-nans-on
a = torch.ones(batch + (ms if isinstance(ms, tuple) else (ms, ms)), dtype=torch.double, device=device)
run_subtest(ms, batch, device, pivot, singular=True, a=a)
# Info should be positive for rank deficient matrices
a = torch.ones(5, 3, 3, device=device)
self.assertGreater(a.lu(pivot=pivot, get_infos=True)[2][0], 0)
run_test(device, True)
if self.device_type == 'cpu':
# Error checking, no pivoting variant on CPU
with self.assertRaisesRegex(RuntimeError, 'lu without pivoting is not implemented on the CPU'):
torch.lu(torch.empty(1, 2, 2), pivot=False)
else:
run_test(device, False)
@skipCPUIfNoLapack
@skipCUDAIfNoMagma
@dtypes(torch.float, torch.double, torch.cfloat, torch.cdouble)
@skipCUDAIfRocm
@precisionOverride({torch.float: 1e-3})
def test_lu_unpack(self, device, dtype):
def run_test(pivot):
for shape in ((3, 3), (5, 3, 3), (7, 3, 5, 5), (7, 5, 3, 3, 3)):
a = torch.randn(*shape, dtype=dtype, device=device)
a_lu, p = torch.lu(a, pivot=pivot)
p_ref, l_ref, u_ref = torch.lu_unpack(a_lu, p)
self.assertEqual(p_ref.matmul(l_ref.matmul(u_ref)), a)
for shape in ((3, 3), (5, 3, 3), (7, 3, 5, 5), (7, 5, 3, 3, 3),
(3, 5), (5, 3), (3, 3, 5), (3, 5, 3),
(7, 5, 3, 5, 3), (7, 5, 3, 3, 5),
# empty tensors
(0, 0), (0, 0, 0), (0, 3, 3)
):
a = make_tensor(shape, dtype=dtype, device=device, low=-0.1, high=+0.1)
a_lu, p = torch.lu(a, pivot=pivot)
p_ref, l_ref, u_ref = torch.lu_unpack(a_lu, p)
self.assertEqual(p_ref.matmul(l_ref.matmul(u_ref)), a)
run_test(True)
if self.device_type == 'cuda':
run_test(False)
@skipCPUIfNoLapack
@skipCUDAIfNoMagma
@dtypes(torch.double)
def test_lu_unpack_check_input(self, device, dtype):
x = torch.rand(5, 5, 5, device=device, dtype=dtype)
lu_data, lu_pivots = torch.lu(x, pivot=True)
with self.assertRaisesRegex(RuntimeError, "torch.int32 dtype"):
torch.lu_unpack(lu_data, lu_pivots.long())
with self.assertRaisesRegex(RuntimeError, "contiguous tensor"):
torch.lu_unpack(lu_data, lu_pivots.mT)
# check that onces flags are unset, Nones are returned
p, l, u = torch.lu_unpack(lu_data, lu_pivots, unpack_data=False)
self.assertTrue((l == u) and l is None)
p, l, u = torch.lu_unpack(lu_data, lu_pivots, unpack_pivots=False)
self.assertTrue(p is None)
p, l, u = torch.lu_unpack(lu_data, lu_pivots, unpack_data=False, unpack_pivots=False)
self.assertTrue((p == l == u) and p is None)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.double)
@skipCUDAIfRocm
def test_lobpcg_basic(self, device, dtype):
self._test_lobpcg_method(device, dtype, 'basic')
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.double)
@skipCUDAIfRocm
def test_lobpcg_ortho(self, device, dtype):
self._test_lobpcg_method(device, dtype, 'ortho')
def _test_lobpcg_method(self, device, dtype, method):
from torch.testing._internal.common_utils import random_symmetric_pd_matrix, random_sparse_pd_matrix
from torch._linalg_utils import matmul, qform
from torch._lobpcg import lobpcg
def test_tracker(worker):
k = worker.iparams['k']
nc = worker.ivars['converged_count']
if k <= nc:
tol = worker.fparams['tol']
rerr = worker.tvars['rerr']
X = worker.X
E = worker.E
B = worker.B
A = worker.A
dtype = X.dtype
device = X.device
# Check convergence
self.assertLessEqual(rerr[:k].max(), tol)
# Check B-orthogonality
I = torch.eye(k, k, dtype=dtype, device=device)
self.assertEqual(qform(B, X[:, :k]), I)
# Check block equation
self.assertEqual(qform(A, X[:, :k]) / E[:k], I, atol=0.2, rtol=0)
orig_lobpcg = lobpcg
def lobpcg(*args, **kwargs):
kwargs['tracker'] = test_tracker
kwargs['niter'] = 1000
kwargs['method'] = method
kwargs['tol'] = 1e-8
return orig_lobpcg(*args, **kwargs)
prec = 5e-4
# check dense input
mm = torch.matmul
for batches in [(), (2,), (2, 3)]:
for m, n, k in [
(9, 3, 1),
(9, 3, 2),
(9, 2, 2),
(100, 15, 5),
]:
# skip tests that are known to fail with the basic
# LOBPCG method due to calling cholesky on singular
# input
if method == 'basic' and (m, n, k) in [(9, 2, 2), (100, 15, 5)]:
continue
A = random_symmetric_pd_matrix(m, *batches, device=device, dtype=dtype)
B = random_symmetric_pd_matrix(m, *batches, device=device, dtype=dtype)
# classical eigenvalue problem, smallest eigenvalues
E, V = lobpcg(A, k=k, n=n, largest=False)
self.assertEqual(E.shape, batches + (k,))
self.assertEqual(V.shape, batches + (m, k))
self.assertEqual(matmul(A, V), mm(V, E.diag_embed()), atol=prec, rtol=0)
e = torch.symeig(A)[0]
e_smallest = e[..., :k]
self.assertEqual(E, e_smallest)
# classical eigenvalue problem, largest eigenvalues
E, V = lobpcg(A, k=k, n=n, largest=True)
e_largest, _ = torch.sort(e[..., -k:], descending=True)
self.assertEqual(E, e_largest, atol=prec, rtol=0)
self.assertEqual(matmul(A, V), mm(V, E.diag_embed()), atol=prec, rtol=0)
# generalized eigenvalue problem, smallest eigenvalues
E, V = lobpcg(A, B=B, k=k, n=n, largest=False)
self.assertEqual(matmul(A, V), mm(matmul(B, V), E.diag_embed()), atol=prec, rtol=0)
# generalized eigenvalue problem, largest eigenvalues
E, V = lobpcg(A, B=B, k=k, n=n, largest=True)
self.assertEqual(matmul(A, V) / E.max(), mm(matmul(B, V), (E / E.max()).diag_embed()),
atol=prec, rtol=0)
# check sparse input
for m, n, k, density in [
(5, 1, 1, 0.8),
(9, 3, 2, 0.5),
(100, 1, 1, 0.1),
(1000, 7, 3, 0.01),
]:
# skip tests that are known to fail with the basic LOBCG
# method due to insufficient accuracy
if method == 'basic' and (m, n, k, density) in [(1000, 7, 3, 0.01)]:
continue
A = random_sparse_pd_matrix(m, density=density, device=device, dtype=dtype)
B = random_sparse_pd_matrix(m, density=density, device=device, dtype=dtype)
A_eigenvalues = torch.arange(1, m + 1, dtype=dtype) / m
e_smallest = A_eigenvalues[..., :k]
e_largest, _ = torch.sort(A_eigenvalues[..., -k:], descending=True)
# classical eigenvalue problem, smallest eigenvalues
E, V = lobpcg(A, k=k, n=n, largest=False)
self.assertEqual(E, e_smallest)
self.assertEqual(matmul(A, V), mm(V, E.diag_embed()), atol=prec, rtol=0)
# classical eigenvalue problem, largest eigenvalues
E, V = lobpcg(A, k=k, n=n, largest=True)
self.assertEqual(matmul(A, V), mm(V, E.diag_embed()), atol=prec, rtol=0)
self.assertEqual(E, e_largest)
# generalized eigenvalue problem, smallest eigenvalues
E, V = lobpcg(A, B=B, k=k, n=n, largest=False)
self.assertEqual(matmul(A, V), matmul(B, mm(V, E.diag_embed())), atol=prec, rtol=0)
# generalized eigenvalue problem, largest eigenvalues
E, V = lobpcg(A, B=B, k=k, n=n, largest=True)
self.assertEqual(matmul(A, V) / E.max(), mm(matmul(B, V), (E / E.max()).diag_embed()),
atol=prec, rtol=0)
@skipCPUIfNoLapack
@onlyCPU
@dtypes(torch.double)
def test_lobpcg_torchscript(self, device, dtype):
from torch.testing._internal.common_utils import random_sparse_pd_matrix
from torch._linalg_utils import matmul as mm
lobpcg = torch.jit.script(torch.lobpcg)
m = 500
k = 5
A1 = random_sparse_pd_matrix(m, density=2.0 / m, device=device, dtype=dtype)
X1 = torch.randn((m, k), dtype=dtype, device=device)
E1, V1 = lobpcg(A1, X=X1)
eq_err = torch.norm((mm(A1, V1) - V1 * E1), 2) / E1.max()
self.assertLess(eq_err, 1e-6)
@unittest.skipIf(not TEST_SCIPY or (TEST_SCIPY and scipy.__version__ < '1.4.1'), "Scipy not found or older than 1.4.1")
@skipCPUIfNoLapack
@onlyCPU
@dtypes(torch.double)
def test_lobpcg_scipy(self, device, dtype):
import time
from torch.testing._internal.common_utils import random_sparse_pd_matrix
from torch._linalg_utils import matmul as mm
from scipy.sparse.linalg import lobpcg as scipy_lobpcg
import scipy.sparse
def toscipy(A):
if A.layout == torch.sparse_coo:
values = A.coalesce().values().cpu().numpy().copy()
indices = A.coalesce().indices().cpu().numpy().copy()
return scipy.sparse.coo_matrix((values, (indices[0], indices[1])), A.shape)
return A.cpu().numpy().copy()
niter = 1000
repeat = 10
m = 500 # size of the square matrix
k = 7 # the number of requested eigenpairs
A1 = random_sparse_pd_matrix(m, density=2.0 / m, device=device, dtype=dtype)
B1 = random_sparse_pd_matrix(m, density=2.0 / m, device=device, dtype=dtype)
X1 = torch.randn((m, k), dtype=dtype, device=device)
A2 = toscipy(A1)
B2 = toscipy(B1)
X2 = toscipy(X1)
lambdas1 = []
def tracker(worker):
lambdas1.append(worker.E[:])
tol = 1e-8
# tol for scipy lobpcg will be choosed so that the number of
# iterations will be equal or very close to pytorch lobpcg
# (that is around 170-180)
# Standard eigenvalue problem
E1, V1 = torch.lobpcg(A1, X=X1, niter=niter, largest=True, tracker=tracker, tol=tol)
E2, V2, lambdas2 = scipy_lobpcg(A2, X2, maxiter=niter, largest=True, retLambdaHistory=True, tol=1.1 * tol)
iters1 = len(lambdas1)
iters2 = len(lambdas2)
self.assertLess(abs(iters1 - iters2), 0.05 * max(iters1, iters2))
E2a, V2a = scipy_lobpcg(A2, X2, maxiter=niter, largest=False)
eq_err = torch.norm((mm(A1, V1) - V1 * E1), 2) / E1.max()
eq_err_scipy = (abs(A2.dot(V2) - V2 * E2)**2).sum() ** 0.5 / E2.max()
self.assertLess(eq_err, 1e-6) # std
self.assertLess(eq_err_scipy, 1e-6) # std
self.assertEqual(E1, torch.from_numpy(E2.copy()))
# Generalized eigenvalue problem
lambdas1 = []
def tracker(worker):
lambdas1.append(worker.E[:])
E1, V1 = torch.lobpcg(A1, B=B1, X=X1, niter=niter, largest=True, tracker=tracker, tol=tol)
E2, V2, lambdas2 = scipy_lobpcg(A2, X2, B=B2, maxiter=niter, largest=True, retLambdaHistory=True, tol=39 * tol)
E2a, V2a = scipy_lobpcg(A2, X2, B=B2, maxiter=niter, largest=False)
iters1 = len(lambdas1)
iters2 = len(lambdas2)
self.assertLess(abs(iters1 - iters2), 0.05 * max(iters1, iters2))
eq_err = torch.norm((mm(A1, V1) - mm(B1, V1) * E1), 2) / E1.max()
eq_err_scipy = (abs(A2.dot(V2) - B2.dot(V2) * E2)**2).sum() ** 0.5 / E2.max()
self.assertLess(eq_err, 1e-6) # general
self.assertLess(eq_err_scipy, 1e-6) # general
self.assertEqual(E1, torch.from_numpy(E2.copy()))
# Timings
elapsed_ortho = 0
elapsed_ortho_general = 0
elapsed_scipy = 0
elapsed_general_scipy = 0
for i in range(repeat):
start = time.time()
torch.lobpcg(A1, X=X1, niter=niter, method='ortho', tol=tol)
end = time.time()
elapsed_ortho += end - start
start = time.time()
torch.lobpcg(A1, X=X1, B=B1, niter=niter, method='ortho', tol=tol)
end = time.time()
elapsed_ortho_general += end - start
start = time.time()
scipy_lobpcg(A2, X2, maxiter=niter, tol=1.1 * tol)
end = time.time()
elapsed_scipy += end - start
start = time.time()
scipy_lobpcg(A2, X2, B=B2, maxiter=niter, tol=39 * tol)
end = time.time()
elapsed_general_scipy += end - start
elapsed_ortho_ms = 1000.0 * elapsed_ortho / repeat
elapsed_ortho_general_ms = 1000.0 * elapsed_ortho_general / repeat
elapsed_scipy_ms = 1000.0 * elapsed_scipy / repeat
elapsed_general_scipy_ms = 1000.0 * elapsed_general_scipy / repeat
print('''
CPU timings: torch.lobpcg vs scipy.sparse.linalg.lobpcg
-------------------------------------------------------
| standard | generalized | method
torch.lobpcg | {:10.2f} | {:10.2f} | ortho
scipy_lobpcg | {:10.2f} | {:10.2f} | N/A
-(input size: {:4}, eigenpairs:{:2}, units: ms per call)-
'''.format(elapsed_ortho_ms, elapsed_ortho_general_ms,
elapsed_scipy_ms, elapsed_general_scipy_ms,
m, k))
# Handling of very small tolerence
tol = 1e-100
lambdas1 = []
def tracker(worker):
lambdas1.append(worker.E[:])
E1, V1 = torch.lobpcg(A1, X=X1, niter=niter, largest=True, tracker=tracker, tol=tol)
iters1 = len(lambdas1)
eq_err = torch.norm((mm(A1, V1) - V1 * E1), 2) / E1.max()
try:
E2, V2, lambdas2 = scipy_lobpcg(A2, X2, maxiter=niter, largest=True, retLambdaHistory=True, tol=tol)
iters2 = len(lambdas2)
eq_err_scipy = (abs(A2.dot(V2) - V2 * E2)**2).sum() ** 0.5 / E2.max()
except Exception as msg:
print('Calling scipy_lobpcg failed [standard]:', msg)
iters2 = -1
eq_err_scipy = -1
lambdas1 = []
def tracker(worker):
lambdas1.append(worker.E[:])
E1, V1 = torch.lobpcg(A1, X=X1, B=B1, niter=niter, largest=True, tracker=tracker, tol=tol)
iters1_general = len(lambdas1)
eq_err_general = torch.norm((mm(A1, V1) - mm(B1, V1) * E1), 2) / E1.max()
try:
E2, V2, lambdas2 = scipy_lobpcg(A2, X2, B=B2, maxiter=niter, largest=True, retLambdaHistory=True, tol=tol)
iters2_general = len(lambdas2)
eq_err_general_scipy = (abs(A2.dot(V2) - B2.dot(V2) * E2)**2).sum() ** 0.5 / E2.max()
except Exception as msg:
print('Calling scipy_lobpcg failed [generalized]:', msg)
iters2_general = -1
eq_err_general_scipy = -1
print('''\
Handling of small tol={:6.0e}: torch.lobpcg vs scipy.sparse.linalg.lobpcg
----------------------------------------------------------------------------
| standard | generalized | niter | method
torch.lobpcg | {:10.2e} | {:10.2e} | {:6} | ortho
scipy_lobpcg | {:10.2e} | {:10.2e} | {:6} | N/A
---(input size: {:4}, eigenpairs:{:2}, units: relative error, maxiter={:4})---
'''.format(tol, eq_err, eq_err_general, iters1, eq_err_scipy, eq_err_general_scipy, iters2, m, k, niter))
def _test_addmm_addmv(self, f, t, m, v, *, alpha=None, beta=None, transpose_out=False):
dtype = t.dtype
numpy_dtype = dtype
if dtype in {torch.bfloat16}:
numpy_dtype = torch.float
if dtype.is_complex:
alpha = 0.9 + 0.3j if alpha is None else alpha
beta = 0.5 + 0.6j if beta is None else beta
else:
alpha = 1.2 if alpha is None else alpha
beta = 0.8 if beta is None else beta
res1 = f(t, m, v, alpha=alpha, beta=beta)
res2 = torch.full_like(res1, math.nan)
if transpose_out:
res2 = res2.t().clone(memory_format=torch.contiguous_format).t()
f(t, m, v, alpha=alpha, beta=beta, out=res2)
res3 = alpha * (m.to(numpy_dtype).cpu().numpy() @ v.to(numpy_dtype).cpu().numpy())
if beta != 0:
res3 += (beta * t).to(numpy_dtype).cpu().numpy()
res3 = torch.from_numpy(res3).to(dtype)
self.assertEqual(res1, res2)
self.assertEqual(res1, res3)
@precisionOverride({torch.bfloat16: 1e-0, torch.half: 5e-4, torch.float: 1e-4, torch.double: 1e-8,
torch.cfloat: 1e-4, torch.cdouble: 1e-8})
@dtypesIfCUDA(*get_all_complex_dtypes(),
*get_all_fp_dtypes(include_bfloat16=(TEST_WITH_ROCM or (CUDA11OrLater and SM53OrLater)),
include_half=(not TEST_WITH_ROCM)))
@dtypes(torch.bfloat16, torch.float, torch.double, torch.cfloat, torch.cdouble)
def test_addmv(self, device, dtype):
# have to use torch.randn(...).to(bfloat16) instead of
# torch.randn(..., dtype=bfloat16). randn does not support
# bfloat16 yet.
# "*0.2" to reduce errors for low precision
ts = [
0.2 * torch.randn(50, device=device).to(dtype),
0.2 * torch.randn(1, device=device).to(dtype).expand(50),
]
vs = [
0.2 * torch.randn(100, device=device).to(dtype),
0.2 * torch.ones(1, device=device).to(dtype).expand(100), # to reduce errors for low precision
]
ms = [
# 0d
0.2 * torch.ones((), device=device).to(dtype).expand(50, 100), # to reduce errors for low precision
# 1d
0.2 * torch.randn((1, 100), device=device).to(dtype).expand(50, 100),
# this initialization reduces errors for low precision for broadcasted matrices
# by making sure that intermediate and result values are exactly representable
# in low precision type
0.2 * torch.randint(3, (50, 1), dtype=torch.float, device=device).to(dtype).expand(50, 100),
# 2d
0.2 * torch.randn((50, 100), device=device).to(dtype),
0.2 * torch.randn((100, 50), device=device).to(dtype).t(),
]
for m, v, t in itertools.product(ms, vs, ts):
self._test_addmm_addmv(torch.addmv, t, m, v)
# Test beta=0, t=nan
t = torch.full((50,), math.nan, device=device).to(dtype)
for m, v in itertools.product(ms, vs):
self._test_addmm_addmv(torch.addmv, t, m, v, beta=0)
@dtypesIfCUDA(*get_all_fp_dtypes(include_bfloat16=(TEST_WITH_ROCM or (CUDA11OrLater and SM53OrLater))))
@dtypes(torch.float, torch.double)
def test_addmv_rowmajor_colmajor_incx_incy_lda(self, device, dtype):
# tests (o, s)*(s). o is output size, s is summed size.
o = 5
s = 3
a_data = torch.arange(1, o * s + 1, device=device, dtype=dtype).view(o, s)
x_data = torch.arange(1, s + 1, 1, device=device, dtype=dtype)
y_data = torch.ones(o, device=device, dtype=dtype)
control = torch.tensor([15., 33., 51., 69., 87.], device=device, dtype=dtype)
def _test(row_major, incx, incy, lda_tail):
if row_major:
a_storage = torch.full((o, s + lda_tail), float('nan'), device=device, dtype=dtype)
else:
a_storage = torch.full((s, o + lda_tail), float('nan'), device=device, dtype=dtype).permute(1, 0)
a = a_storage[:o, :s].copy_(a_data)
x_storage = torch.full((s, incx), float('nan'), device=device, dtype=dtype)
x = x_storage[:, 0].copy_(x_data)
y_storage = torch.full((o, incy), float('nan'), device=device, dtype=dtype)
y = y_storage[:, 0].copy_(y_data)
self._test_addmm_addmv(torch.addmv, y, a, x)
for row_major, incx, incy, lda_tail in itertools.product((False, True), (1, 2), (1, 2), (0, 1)):
_test(row_major, incx, incy, lda_tail)
@precisionOverride({torch.double: 1e-8, torch.float: 1e-4, torch.bfloat16: 0.6,
torch.half: 1e-1, torch.cfloat: 1e-4, torch.cdouble: 1e-8})
@dtypesIfCUDA(*get_all_complex_dtypes(),
*get_all_fp_dtypes(include_bfloat16=(TEST_WITH_ROCM or (CUDA11OrLater and SM53OrLater))))
@dtypes(*get_all_complex_dtypes(), *get_all_fp_dtypes())
@tf32_on_and_off(0.05)
def test_addmm(self, device, dtype):
M = torch.randn(10, 25, device=device).to(dtype)
m1 = torch.randn(10, 50, device=device).to(dtype)
m2 = torch.randn(50, 25, device=device).to(dtype)
self._test_addmm_addmv(torch.addmm, M, m1, m2)
# Test 0-strided
M = torch.randn(10, 1, device=device).to(dtype).expand(10, 25)
m1 = torch.randn(10, 1, device=device).to(dtype).expand(10, 50)
m2 = torch.randn(50, 25, device=device).to(dtype)
self._test_addmm_addmv(torch.addmm, M, m1, m2)
# Test beta=0, M=nan
M = torch.full((10, 25), math.nan, device=device).to(dtype)
m1 = torch.randn(10, 50, device=device).to(dtype)
m2 = torch.randn(50, 25, device=device).to(dtype)
self._test_addmm_addmv(torch.addmm, M, m1, m2, beta=0)
# Test transpose
for t1, t2, t3, t4 in itertools.product([True, False], repeat=4):
def maybe_transpose(cond, m):
if not cond:
return m
return m.t().clone(memory_format=torch.contiguous_format).t()
M = maybe_transpose(t1, torch.randn(10, 25, device=device).to(dtype))
m1 = maybe_transpose(t2, torch.randn(10, 50, device=device).to(dtype))
m2 = maybe_transpose(t3, torch.randn(50, 25, device=device).to(dtype))
self._test_addmm_addmv(torch.addmm, M, m1, m2, transpose_out=t4)
@dtypes(torch.float, torch.double)
@dtypesIfCUDA(*([torch.float, torch.double] + get_all_complex_dtypes()))
@tf32_on_and_off(0.005)
def test_addmm_sizes(self, device, dtype):
for m in [0, 1, 25]:
for n in [0, 1, 10]:
for k in [0, 1, 8]:
M = torch.randn(n, m, device=device).to(dtype)
m1 = torch.randn(n, k, device=device).to(dtype)
m2 = torch.randn(k, m, device=device).to(dtype)
self._test_addmm_addmv(torch.addmm, M, m1, m2)
m1 = torch.randn(n, k + 1, device=device).to(dtype)
m2 = torch.randn(k, m, device=device).to(dtype)
self.assertRaisesRegex(RuntimeError, f"{n}x{k + 1}.*{k}x{m}", lambda: torch.addmm(M, m1, m2))
self.assertRaisesRegex(RuntimeError, f"{n}x{k + 1}.*{k}x{m}", lambda: torch.mm(m1, m2))
@dtypes(torch.half)
@onlyCUDA
def test_addmm_baddbmm_overflow(self, device, dtype):
orig = torch.backends.cuda.matmul.allow_fp16_reduced_precision_reduction
torch.backends.cuda.matmul.allow_fp16_reduced_precision_reduction = False
inp = torch.zeros(128, 128, dtype=torch.half, device=device)
mat1 = torch.ones(128, 1000, dtype=torch.half, device=device) * 100
mat2 = torch.ones(1000, 128, dtype=torch.half, device=device) * 100
out = torch.addmm(inp, mat1, mat2, alpha=0.001, beta=0.)
# just check for no overflow on ROCM
if TEST_WITH_ROCM:
self.assertFalse(out.isinf().any())
else:
self.assertTrue((out == 10000.).all())
inp = torch.zeros(3, 128, 128, dtype=torch.half, device=device)
mat1 = torch.ones(3, 128, 1000, dtype=torch.half, device=device) * 100
mat2 = torch.ones(3, 1000, 128, dtype=torch.half, device=device) * 100
out = torch.baddbmm(inp, mat1, mat2, alpha=0.001, beta=0.)
if TEST_WITH_ROCM:
self.assertFalse(out.isinf().any())
else:
self.assertTrue((out == 10000.).all())
torch.backends.cuda.matmul.allow_fp16_reduced_precision_reduction = orig
@unittest.skipIf(IS_FBCODE and IS_REMOTE_GPU, "cublas runtime error")
@onlyCUDA
def test_matmul_45724(self, device):
# https://github.com/pytorch/pytorch/issues/45724
a = torch.rand(65537, 22, 64, device=device, dtype=torch.half)
b = torch.rand(65537, 64, 22, device=device, dtype=torch.half)
c = torch.full((65537, 22, 22), math.nan, dtype=torch.half, device=device)
cpu_result = torch.matmul(a.cpu().float(), b.cpu().float()).cuda().half()
torch.matmul(a, b, out=c)
self.assertEqual(c, cpu_result)
@slowTest
@onlyNativeDeviceTypes
@dtypes(torch.float32, torch.float64, torch.bfloat16, torch.int32, torch.int64, torch.cfloat, torch.cdouble)
@dtypesIfCUDA(torch.float32, torch.float64, torch.cfloat, torch.cdouble)
@tf32_on_and_off(0.01)
def test_mm(self, device, dtype):
def _test_mm(n, m, p, dtype, genf):
# helper function
def matrixmultiply(mat1, mat2):
n = mat1.size(0)
m = mat1.size(1)
p = mat2.size(1)
res = torch.zeros(n, p, dtype=dtype, device=device)
for i, j in iter_indices(res):
res[i, j] = sum(mat1[i, k] * mat2[k, j] for k in range(m))
return res
# contiguous case
mat1 = genf(n, m)
mat2 = genf(m, p)
res = torch.mm(mat1, mat2)
res2 = matrixmultiply(mat1, mat2)
self.assertEqual(res, res2)
# non contiguous case 1
mat1 = genf(n, m)
mat2 = genf(p, m).t()
res = torch.mm(mat1, mat2)
res2 = matrixmultiply(mat1, mat2)
self.assertEqual(res, res2)
# non contiguous case 2
mat1 = genf(m, n).t()
mat2 = genf(m, p)
res = torch.mm(mat1, mat2)
res2 = matrixmultiply(mat1, mat2)
self.assertEqual(res, res2)
# non contiguous case 3
mat1 = genf(m, n).t()
mat2 = genf(p, m).t()
res = torch.mm(mat1, mat2)
res2 = matrixmultiply(mat1, mat2)
self.assertEqual(res, res2)
# test with zero stride
mat1 = genf(n, m)
mat2 = genf(m, 1).expand(m, p)
res = torch.mm(mat1, mat2)
res2 = matrixmultiply(mat1, mat2)
self.assertEqual(res, res2)
# explicitly exercise the _out variant in torch.mm().
# contiguous case
mat1 = genf(n, m)
mat2 = genf(m, p)
res = genf(n, p)
torch.mm(mat1, mat2, out=res)
res2 = matrixmultiply(mat1, mat2)
self.assertEqual(res, res2)
# explicitly exercise the _out variant in torch.mm().
# non contiguous case 3
mat1 = genf(m, n).t()
mat2 = genf(p, m).t()
res = genf(n, p)
torch.mm(mat1, mat2, out=res)
res2 = matrixmultiply(mat1, mat2)
self.assertEqual(res, res2)
def genf_int(x, y):
return torch.randint(0, 100, (x, y), dtype=dtype, device=device)
def genf_bfloat(x, y):
return torch.randn(x, y, dtype=torch.float32, device=device).to(dtype) * 0.1
def genf_float(x, y):
return torch.randn(x, y, dtype=dtype, device=device)
for (n, m, p) in [(20, 10, 15), (15, 20, 10), (25, 18, 10)]:
if (dtype == torch.int32) or (dtype == torch.int64):
genf = genf_int
elif (dtype == torch.bfloat16):
genf = genf_bfloat
else:
genf = genf_float
_test_mm(n, m, p, dtype, genf)
@onlyNativeDeviceTypes
def test_mm_bmm_non_memory_dense(self, device):
def _slice(tensor, fn):
return fn(tensor)[..., ::2]
A = torch.randn(3, 6, dtype=torch.cfloat, device=device)
B = torch.randn(3, 3, dtype=torch.cfloat, device=device)
out = torch.empty(3, 3, device=device, dtype=torch.complex64).t()
out1 = torch.empty(3, 3, device=device, dtype=torch.complex64).t()
A_conj = _slice(A, torch.conj)
A_conj_physical = _slice(A, torch.conj_physical)
self.assertEqual(torch.mm(A_conj, B, out=out), torch.mm(A_conj_physical, B, out=out))
self.assertEqual(torch.mm(A_conj.t(), B, out=out), torch.mm(A_conj_physical.t(), B, out=out))
Ab = torch.randn(2, 3, 6, dtype=torch.cfloat, device=device)
Bb = torch.randn(2, 3, 3, dtype=torch.cfloat, device=device)
Bb_ = torch.randn(1, 3, 3, dtype=torch.cfloat, device=device).expand(2, 3, 3)
out_b = torch.empty(2, 3, 3, device=device, dtype=torch.complex64).mT
Ab_conj = _slice(Ab, torch.conj)
Ab_conj_physical = _slice(Ab, torch.conj_physical)
def t_b(tensor):
return tensor.mT
self.assertEqual(torch.bmm(Ab_conj, Bb, out=out_b), torch.bmm(Ab_conj_physical, Bb, out=out_b))
self.assertEqual(torch.bmm(t_b(Ab_conj), Bb, out=out_b), torch.bmm(t_b(Ab_conj_physical), Bb, out=out_b))
# test broadcasting
self.assertEqual(torch.bmm(Ab_conj, Bb_, out=out_b), torch.bmm(Ab_conj_physical, Bb_, out=out_b))
self.assertEqual(torch.bmm(t_b(Ab_conj), Bb_, out=out_b), torch.bmm(t_b(Ab_conj_physical), Bb_, out=out_b))
@onlyNativeDeviceTypes
@dtypes(torch.float32, torch.float64)
def test_strided_mm_bmm(self, device, dtype):
# Tests strided view case with stride smaller than corresponding dimension size
x = torch.tensor([[1., 2., 3.], [4., 5., 6.]], dtype=dtype, device=device)
new_shape = [2, 2, 2]
new_stride = [3, 1, 1]
sx = torch.as_strided(x, size=new_shape, stride=new_stride)
torch_fn = lambda x: torch.bmm(x, x) # noqa: E731
np_fn = lambda x: np.matmul(x, x) # noqa: E731
self.compare_with_numpy(torch_fn, np_fn, sx)
torch_fn = lambda x: torch.mm(x, x) # noqa: E731
self.compare_with_numpy(torch_fn, np_fn, sx[0])
@precisionOverride({torch.half: 0.05, torch.bfloat16: 0.05})
@skipCUDAIf(torch.version.cuda == "10.1", "flaky on CUDA 10.1")
@onlyNativeDeviceTypes
@dtypes(*get_all_fp_dtypes(), *get_all_complex_dtypes())
@tf32_on_and_off(0.05)
def test_bmm(self, device, dtype):
if self.device_type == 'cuda' and dtype is torch.bfloat16 and CUDA11OrLater and not SM53OrLater:
# cuBLAS does not guarantee BFloat16 support on SM < 53.
# So on PyTorch, we consider BFloat16 support on SM < 53 as
# undefined bahavior
return
batch_sizes = [1, 10]
M, N, O = 23, 15, 12
numpy_dtype = dtype if dtype != torch.bfloat16 else torch.float32
is_supported = True
if dtype == torch.bfloat16 and self.device_type == 'cuda':
is_supported = TEST_WITH_ROCM or (CUDA11OrLater and SM53OrLater)
if not is_supported:
for num_batches in batch_sizes:
b1 = torch.randn(num_batches, M, N, device=device).to(dtype)
b2 = torch.randn(num_batches, N, O, device=device).to(dtype)
self.assertRaisesRegex(RuntimeError, "type|Type|not implemented|CUBLAS_STATUS_NOT_SUPPORTED",
lambda: torch.bmm(b1, b2))
return
def invert_perm(p):
d = {x: i for i, x in enumerate(p)}
return (d[0], d[1], d[2])
def generate_inputs(num_batches):
# transposed tensors
for perm1, perm2 in itertools.product(itertools.permutations((0, 1, 2)), repeat=2):
b1 = make_tensor((num_batches, M, N), device, dtype, low=-0.1, high=0.1)
b2 = make_tensor((num_batches, N, O), device, dtype, low=-0.1, high=0.1)
b1 = b1.permute(perm1).contiguous().permute(invert_perm(perm1))
b2 = b2.permute(perm2).contiguous().permute(invert_perm(perm2))
yield b1, b2
# broadcasting tensors
for b1, b2, b3, b4, b5, b6 in itertools.product((True, False), repeat=6):
shape1 = (num_batches if b1 else 1, M if b2 else 1, N if b3 else 1)
shape2 = (num_batches if b4 else 1, N if b5 else 1, O if b6 else 1)
b1 = make_tensor(shape1, device, dtype, low=-0.1, high=0.1).expand(num_batches, M, N)
b2 = make_tensor(shape2, device, dtype, low=-0.1, high=0.1).expand(num_batches, N, O)
yield b1, b2
# zero-sized tensors
for z1, z2, z3, z4 in itertools.product((True, False), repeat=4):
shape1 = (num_batches if z1 else 0, M if z2 else 0, N if z3 else 0)
shape2 = (num_batches if z1 else 0, N if z3 else 0, O if z4 else 0)
b1 = torch.randn(shape1, dtype=dtype, device=device)
b2 = torch.randn(shape2, dtype=dtype, device=device)
yield b1, b2
for num_batches in batch_sizes:
for (b1, b2), perm3 in itertools.product(generate_inputs(num_batches), itertools.permutations((0, 1, 2))):
res1 = torch.bmm(b1, b2)
res2 = torch.full((num_batches, M, O), math.nan, dtype=dtype, device=device) \
.permute(perm3).contiguous().permute(invert_perm(perm3))
torch.bmm(b1, b2, out=res2)
expect = torch.from_numpy(
b1.to(numpy_dtype).cpu().numpy() @ b2.to(numpy_dtype).cpu().numpy()).to(device=device, dtype=dtype)
self.assertEqual(expect, res1)
self.assertEqual(expect, res2)
if self.device_type == 'cuda':
# check that mixed arguments are rejected
self.assertRaises(RuntimeError, lambda: torch.bmm(b1, b2.cpu()))
self.assertRaises(RuntimeError, lambda: torch.bmm(b1.cpu(), b2))
self.assertRaises(RuntimeError, lambda: torch.bmm(b1, b2, out=res2.cpu()))
def _test_addbmm_baddbmm(self, func, b1, b2, ref, out_tensor):
getattr(out_tensor, func + "_")(b1, b2)
self.assertEqual(out_tensor, ref)
res3 = out_tensor.clone()
with self.assertWarnsOnceRegex(
UserWarning, f"This overload of {func}_ is deprecated"):
getattr(out_tensor, func + "_")(1, b1, b2)
self.assertEqual(out_tensor, ref * 2),
getattr(res3, func + "_")(b1, b2, beta=1)
self.assertEqual(out_tensor, res3)
with self.assertWarnsOnceRegex(
UserWarning, f"This overload of {func}_ is deprecated"):
getattr(out_tensor, func + "_")(1., .5, b1, b2)
self.assertEqual(out_tensor, ref * 2.5)
getattr(res3, func + "_")(b1, b2, beta=1., alpha=.5)
self.assertEqual(out_tensor, res3)
with self.assertWarnsOnceRegex(
UserWarning, f"This overload of {func} is deprecated"):
self.assertEqual(out_tensor, getattr(torch, func)(1, out_tensor, 0, b1, b2))
res4 = getattr(torch, func)(out_tensor, b1, b2, beta=1, alpha=.5)
self.assertEqual(res4, ref * 3),
nan = torch.full_like(out_tensor, math.nan)
res5 = getattr(torch, func)(nan, b1, b2, beta=0, alpha=1)
self.assertEqual(res5, ref)
if b1.is_complex():
res6 = getattr(torch, func)(out_tensor, b1, b2, beta=.1j, alpha=.5j)
self.assertEqual(res6, out_tensor * .1j + .5j * ref)
else:
res6 = getattr(torch, func)(out_tensor, b1, b2, beta=.1, alpha=.5)
self.assertEqual(res6, out_tensor * .1 + .5 * ref)
res7 = torch.full_like(out_tensor, math.nan)
getattr(torch, func)(nan, b1, b2, beta=0, out=res7)
self.assertEqual(res7, ref)
@precisionOverride({torch.half: 0.05, torch.bfloat16: 0.05})
@onlyNativeDeviceTypes
@dtypes(*get_all_fp_dtypes(), *get_all_complex_dtypes())
@tf32_on_and_off(0.05)
def test_addbmm(self, device, dtype):
if self.device_type == 'cuda' and dtype is torch.bfloat16 and CUDA11OrLater and not SM53OrLater:
# cuBLAS does not guarantee BFloat16 support on SM < 53.
# So on PyTorch, we consider BFloat16 support on SM < 53 as
# undefined bahavior
return
num_batches = 2
M, N, O = 16, 17, 18
is_supported = True
if dtype == torch.bfloat16:
if self.device_type == 'cpu':
self.precision = 1 # 43 vs 43.75
else:
is_supported = TEST_WITH_ROCM or (CUDA11OrLater and SM53OrLater)
if not is_supported:
b1 = make_tensor((num_batches, M, N), device, dtype, low=-1, high=1)
b2 = make_tensor((num_batches, N, O), device, dtype, low=-1, high=1)
t = make_tensor((M, O), device, dtype, low=-1, high=1)
self.assertRaisesRegex(RuntimeError, "type|Type|not implemented|CUBLAS_STATUS_NOT_SUPPORTED",
lambda: torch.addbmm(t, b1, b2))
return
def invert_perm(p):
d = {x: i for i, x in enumerate(p)}
return (d[0], d[1], d[2])
def generate_tensor():
numpy_dtype = dtype if dtype != torch.bfloat16 else torch.float32
# transposed tensors
for perm1, perm2 in itertools.product(itertools.permutations((0, 1, 2)), repeat=2):
for perm3 in itertools.permutations((0, 1)):
b1 = make_tensor((num_batches, M, N), device, dtype, low=-1, high=1) * 0.1
b2 = make_tensor((num_batches, N, O), device, dtype, low=-1, high=1) * 0.1
b1 = b1.permute(perm1).contiguous().permute(invert_perm(perm1))
b2 = b2.permute(perm2).contiguous().permute(invert_perm(perm2))
ref = torch.from_numpy(
b1.to(numpy_dtype).cpu().numpy() @ b2.to(numpy_dtype).cpu().numpy()
).to(device=device, dtype=dtype).sum(0)
out_tensor = torch.zeros_like(ref).permute(perm3).contiguous().permute(perm3)
yield b1, b2, ref, out_tensor
# broadcasting tensors
for s1, s2, s3, s4, s5, s6 in itertools.product((True, False), repeat=6):
shape1 = (num_batches if s1 else 1, M if s2 else 1, N if s3 else 1)
shape2 = (num_batches if s4 else 1, N if s5 else 1, O if s6 else 1)
b1 = make_tensor(shape1, device, dtype, low=-1, high=1).expand(num_batches, M, N) * 0.1
b2 = make_tensor(shape2, device, dtype, low=-1, high=1).expand(num_batches, N, O) * 0.1
ref = torch.from_numpy(
b1.to(numpy_dtype).cpu().numpy() @ b2.to(numpy_dtype).cpu().numpy()
).to(device=device, dtype=dtype).sum(0)
out_tensor = torch.zeros_like(ref)
yield b1, b2, ref, out_tensor
# zero-sized tensors
for z1, z2, z3, z4 in itertools.product((True, False), repeat=4):
shape1 = (num_batches if z1 else 0, M if z2 else 0, N if z3 else 0)
shape2 = (num_batches if z1 else 0, N if z3 else 0, O if z4 else 0)
b1 = make_tensor(shape1, device, dtype, low=-1, high=1) * 0.1
b2 = make_tensor(shape2, device, dtype, low=-1, high=1) * 0.1
ref = torch.from_numpy(
b1.to(numpy_dtype).cpu().numpy() @ b2.to(numpy_dtype).cpu().numpy()
).to(device=device, dtype=dtype).sum(0)
out_tensor = torch.zeros_like(ref)
yield b1, b2, ref, out_tensor
for b1, b2, ref, out_tensor in generate_tensor():
self._test_addbmm_baddbmm("addbmm", b1, b2, ref, out_tensor)
@precisionOverride({torch.half: 0.1, torch.bfloat16: 0.5})
@onlyNativeDeviceTypes
@dtypes(*get_all_fp_dtypes(), *get_all_complex_dtypes())
@tf32_on_and_off(0.05)
def test_baddbmm(self, device, dtype):
if self.device_type == 'cuda' and dtype is torch.bfloat16 and CUDA11OrLater and not SM53OrLater:
# cuBLAS does not guarantee BFloat16 support on SM < 53.
# So on PyTorch, we consider BFloat16 support on SM < 53 as
# undefined bahavior
return
num_batches = 10
M, N, O = 12, 8, 50
is_supported = True
if dtype == torch.bfloat16 and self.device_type == 'cuda':
is_supported = TEST_WITH_ROCM or (CUDA11OrLater and SM53OrLater)
if not is_supported:
b1 = make_tensor((num_batches, M, N), device, dtype, low=-1, high=1)
b2 = make_tensor((num_batches, N, O), device, dtype, low=-1, high=1)
t = make_tensor((num_batches, M, O), device, dtype, low=-1, high=1)
self.assertRaisesRegex(RuntimeError, "type|Type|not implemented|CUBLAS_STATUS_NOT_SUPPORTED",
lambda: torch.baddbmm(t, b1, b2))
return
def invert_perm(p):
d = {x: i for i, x in enumerate(p)}
return (d[0], d[1], d[2])
def generate_tensor():
numpy_dtype = dtype if dtype != torch.bfloat16 else torch.float32
# transposed tensors
for perm1, perm2, perm3 in itertools.product(itertools.permutations((0, 1, 2)), repeat=3):
b1 = make_tensor((num_batches, M, N), device, dtype, low=-1, high=1)
b2 = make_tensor((num_batches, N, O), device, dtype, low=-1, high=1)
b1 = b1.permute(perm1).contiguous().permute(invert_perm(perm1))
b2 = b2.permute(perm2).contiguous().permute(invert_perm(perm2))
ref = torch.from_numpy(
b1.to(numpy_dtype).cpu().numpy() @ b2.to(numpy_dtype).cpu().numpy()).to(device=device, dtype=dtype)
out_tensor = torch.zeros_like(ref)
out_tensor = out_tensor.permute(perm3).contiguous().permute(invert_perm(perm3))
yield b1, b2, ref, out_tensor
# broadcasting tensors
for s1, s2, s3, s4, s5, s6 in itertools.product((True, False), repeat=6):
shape1 = (num_batches if s1 else 1, M if s2 else 1, N if s3 else 1)
shape2 = (num_batches if s4 else 1, N if s5 else 1, O if s6 else 1)
b1 = make_tensor(shape1, device, dtype, low=-1, high=1).expand(num_batches, M, N)
b2 = make_tensor(shape2, device, dtype, low=-1, high=1).expand(num_batches, N, O)
ref = torch.from_numpy(
b1.to(numpy_dtype).cpu().numpy() @ b2.to(numpy_dtype).cpu().numpy()).to(device=device, dtype=dtype)
out_tensor = torch.zeros_like(ref)
yield b1, b2, ref, out_tensor
# zero-sized tensors
for z1, z2, z3, z4 in itertools.product((True, False), repeat=4):
shape1 = (num_batches if z1 else 0, M if z2 else 0, N if z3 else 0)
shape2 = (num_batches if z1 else 0, N if z3 else 0, O if z4 else 0)
b1 = make_tensor(shape1, device, dtype, low=-2, high=2)
b2 = make_tensor(shape2, device, dtype, low=-2, high=2)
ref = torch.from_numpy(
b1.to(numpy_dtype).cpu().numpy() @ b2.to(numpy_dtype).cpu().numpy()).to(device=device, dtype=dtype)
out_tensor = torch.zeros_like(ref)
yield b1, b2, ref, out_tensor
for b1, b2, ref, out_tensor in generate_tensor():
self._test_addbmm_baddbmm("baddbmm", b1, b2, ref, out_tensor)
# TODO: update to compare against NumPy
@onlyCUDA
def test_solve_methods_arg_device(self, device):
for b_device, A_device in itertools.product(['cpu', device], repeat=2):
if b_device == A_device:
continue
b = torch.randn(3, 1, device=b_device)
A = torch.randn(3, 3, device=A_device)
# solve and cholesky_solve goes through generic backend dispatch and hit kernel specific device check first
# triangular_solve goes through specific backend dispatch (CPU/CUDA) and hit auto-generated device check first
generic_backend_dispatch_err_str = "Expected b and A to be on the same device"
specific_backend_dispatch_err_str = "Expected all tensors to be on the same device"
with self.assertRaisesRegex(RuntimeError, generic_backend_dispatch_err_str):
torch.solve(b, A)
with self.assertRaisesRegex(RuntimeError, generic_backend_dispatch_err_str):
torch.cholesky_solve(b, A)
with self.assertRaisesRegex(RuntimeError, specific_backend_dispatch_err_str):
torch.triangular_solve(b, A)
# b and A have to be modified to match accepted inputs sizes for lu_solve
b = b.unsqueeze(0)
A = A.unsqueeze(0)
with self.assertRaisesRegex(RuntimeError, specific_backend_dispatch_err_str):
torch.lu_solve(b, A, torch.rand(A.shape[:-1], device=A_device).int())
# This checks if a suitable error message is thrown
# when LU output and pivots are not on the same device
with self.assertRaisesRegex(RuntimeError, specific_backend_dispatch_err_str):
torch.lu_solve(b, A, torch.rand(A.shape[:-1], device=b_device).int())
@precisionOverride({torch.float32: 5e-3, torch.complex64: 1e-3})
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_pinverse(self, device, dtype):
from torch.testing._internal.common_utils import random_fullrank_matrix_distinct_singular_value as fullrank
def run_test(M):
# Testing against definition for pseudo-inverses
MPI = torch.pinverse(M)
MPI_ = MPI.cpu().numpy()
M_ = M.cpu().numpy()
if M.numel() > 0:
self.assertEqual(M_, np.matmul(np.matmul(M_, MPI_), M_))
self.assertEqual(MPI_, np.matmul(np.matmul(MPI_, M_), MPI_))
self.assertEqual(np.matmul(M_, MPI_), np.matmul(M_, MPI_).swapaxes(-2, -1).conj())
self.assertEqual(np.matmul(MPI_, M_), np.matmul(MPI_, M_).swapaxes(-2, -1).conj())
else:
self.assertEqual(M.shape, MPI.shape[:-2] + (MPI.shape[-1], MPI.shape[-2]))
for sizes in [(5, 5), (3, 5, 5), (3, 7, 5, 5), # square matrices
(3, 2), (5, 3, 2), (7, 5, 3, 2), # fat matrices
(2, 3), (5, 2, 3), (7, 5, 2, 3), # thin matrices
(0, 0), (0, 2), (2, 0), (3, 0, 0), (0, 3, 0), (0, 0, 3)]: # zero numel matrices
M = torch.randn(*sizes, dtype=dtype, device=device)
run_test(M)
# Test inverse and pseudo-inverse for invertible matrix
for sizes in [(5, 5), (3, 5, 5), (3, 7, 5, 5)]:
matsize = sizes[-1]
batchdims = sizes[:-2]
M = fullrank(matsize, *batchdims, dtype=dtype, device=device)
self.assertEqual(torch.eye(matsize, dtype=dtype, device=device).expand(sizes), M.pinverse().matmul(M),
atol=1e-7, rtol=0, msg='pseudo-inverse for invertible matrix')
@skipCPUIfNoLapack
@skipCUDAIfNoMagmaAndNoCusolver
@dtypes(torch.double, torch.cdouble)
def test_matrix_power_non_negative(self, device, dtype):
def check(*size, noncontiguous=False):
t = make_tensor(size, device, dtype, noncontiguous=noncontiguous)
for n in range(8):
res = torch.linalg.matrix_power(t, n)
ref = np.linalg.matrix_power(t.cpu().numpy(), n)
self.assertEqual(res.cpu(), torch.from_numpy(ref))
check(0, 0)
check(1, 1)
check(5, 5)
check(5, 5, noncontiguous=True)
check(0, 3, 3)
check(2, 3, 3)
check(2, 3, 4, 4, noncontiguous=True)
@skipCPUIfNoLapack
@skipCUDAIfNoMagmaAndNoCusolver
@dtypes(torch.double, torch.cdouble)
def test_matrix_power_negative(self, device, dtype):
from torch.testing._internal.common_utils import random_fullrank_matrix_distinct_singular_value
def check(*size):
t = random_fullrank_matrix_distinct_singular_value(*size, dtype=dtype, device=device)
for n in range(-7, 0):
res = torch.linalg.matrix_power(t, n)
ref = np.linalg.matrix_power(t.cpu().numpy(), n)
self.assertEqual(res.cpu(), torch.from_numpy(ref))
check(0)
check(5)
check(0, 2)
check(3, 0)
check(3, 2)
check(5, 2, 3)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float, torch.complex64)
def test_linalg_matrix_exp_utils(self, device, dtype):
# test linear combination
def run_test(coeff_shape, data_shape):
coeffs = torch.rand(*coeff_shape, device=device, dtype=torch.float)
x = torch.rand(coeff_shape[1], *data_shape, device=device, dtype=dtype)
res1 = torch._compute_linear_combination(x, coeffs)
res2 = (x.unsqueeze(0) * coeffs.view(*coeff_shape, *([1] * len(data_shape)))).sum(1)
self.assertEqual(res1, res2, atol=1e-5, rtol=0.0)
# check `out=` version
res3 = torch.zeros(coeff_shape[0], *data_shape, device=device, dtype=dtype)
torch._compute_linear_combination(x, coeffs, out=res3)
self.assertEqual(res1, res3, atol=1e-5, rtol=0.0)
res4 = torch.ones(coeff_shape[0], *data_shape, device=device, dtype=dtype)
torch._compute_linear_combination(x, coeffs, out=res4)
self.assertEqual(res1, res4 - 1.0, atol=1e-5, rtol=0.0)
res5 = torch.ones(coeff_shape[0], *data_shape, device=device, dtype=dtype)
res5_clone = res5.clone()
torch._compute_linear_combination(x, coeffs, out=res5)
self.assertEqual(res1, res5 - res5_clone, atol=1e-5, rtol=0.0)
run_test([1, 3], [2, 2])
run_test([3, 1], [2, 2])
run_test([1, 10], [10, 10])
run_test([10, 1], [10, 10])
run_test([5, 3], [2, 2])
run_test([5, 3], [100, 100])
run_test([3, 4], [3, 3, 3])
run_test([3, 4], [3, 3, 3, 3])
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float, torch.double, torch.complex64, torch.complex128)
def test_linalg_matrix_exp_boundary_cases(self, device, dtype):
expm = torch.linalg.matrix_exp
with self.assertRaisesRegex(RuntimeError, "Expected a floating point or complex tensor"):
expm(torch.randn(3, 3).type(torch.int))
with self.assertRaisesRegex(RuntimeError, "must have at least 2 dimensions"):
expm(torch.randn(3))
with self.assertRaisesRegex(RuntimeError, "must be batches of square matrices"):
expm(torch.randn(3, 2, 1))
# check 1x1 matrices
x = torch.randn(3, 3, 1, 1)
self.assertEqual(expm(x), x.exp())
@slowTest
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float, torch.double, torch.cfloat, torch.cdouble)
def test_linalg_matrix_exp_analytic(self, device, dtype):
expm = torch.linalg.matrix_exp
# check zero matrix
x = torch.zeros(20, 20, dtype=dtype, device=device)
self.assertTrue((expm(x) == torch.eye(20, 20, dtype=dtype, device=device)).all().item())
def normalize_to_1_operator_norm(sample, desired_norm):
sample_norm, _ = sample.abs().sum(-2).max(-1)
sample_to_1_norm = sample / sample_norm.unsqueeze(-1).unsqueeze(-1)
return sample_to_1_norm * desired_norm
def gen_good_cond_number_matrices(*n):
identity = torch.eye(n[-2], n[-1], dtype=dtype, device=device).expand(*n)
x = torch.rand(*n, dtype=dtype, device=device) / (n[-1] ** 2)
x = (x - x * identity) + identity
return x
def run_test(*n):
if dtype == torch.float:
thetas = [
1.192092800768788e-07, # deg 1
5.978858893805233e-04, # deg 2
5.116619363445086e-02, # deg 4
5.800524627688768e-01, # deg 8
1.461661507209034e+00, # deg 12
3.010066362817634e+00 # deg 18
]
else: # if torch.double
thetas = [
2.220446049250313e-16, # deg 1
2.580956802971767e-08, # deg 2
3.397168839976962e-04, # deg 4
4.991228871115323e-02, # deg 8
2.996158913811580e-01, # deg 12
1.090863719290036e+00 # deg 18
]
# generate input
q = gen_good_cond_number_matrices(*n)
q_ = q.cpu().numpy()
qinv = torch.inverse(q)
qinv_ = qinv.cpu().numpy()
d = torch.randn(n[:-1], dtype=dtype, device=device)
x = torch.from_numpy(
np.matmul(q_, np.matmul(torch.diag_embed(d).cpu().numpy(), qinv_))).to(device)
x_norm, _ = x.abs().sum(-2).max(-1)
# test simple analytic whatever norm generated
mexp = expm(x)
mexp_analytic = np.matmul(
q_,
np.matmul(
torch.diag_embed(d.exp()).cpu().numpy(),
qinv_
)
)
self.assertEqual(mexp, mexp_analytic, atol=1e-3, rtol=0.0)
# generate norms to test different degree expansions
sample_norms = []
for i in range(len(thetas) - 1):
sample_norms.append(0.5 * (thetas[i] + thetas[i + 1]))
sample_norms = [thetas[0] / 2] + sample_norms + [thetas[-1] * 2]
# matrices to equal norm
for sample_norm in sample_norms:
x_normalized = normalize_to_1_operator_norm(x, sample_norm)
mexp = expm(x_normalized)
mexp_analytic = np.matmul(
q_,
np.matmul(
torch.diag_embed((d / x_norm.unsqueeze(-1) * sample_norm).exp()).cpu().numpy(),
qinv_
)
)
self.assertEqual(mexp, mexp_analytic, atol=1e-3, rtol=0.0)
# single matrix
run_test(2, 2)
run_test(3, 3)
run_test(4, 4)
run_test(5, 5)
run_test(100, 100)
run_test(200, 200)
# small batch of matrices
run_test(3, 2, 2)
run_test(3, 3, 3)
run_test(3, 4, 4)
run_test(3, 5, 5)
run_test(3, 100, 100)
run_test(3, 200, 200)
# large batch of matrices
run_test(3, 3, 2, 2)
run_test(3, 3, 3, 3)
run_test(3, 3, 4, 4)
run_test(3, 3, 5, 5)
run_test(3, 3, 100, 100)
run_test(3, 3, 200, 200)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float, torch.double)
def test_linalg_matrix_exp_batch(self, device, dtype):
def run_test(*n):
tensors_batch = torch.zeros(n, dtype=dtype, device=device)
tensors_batch = tensors_batch.view(-1, n[-2], n[-1])
num_matrices = tensors_batch.size(0)
tensors_list = []
for i in range(num_matrices):
tensors_list.append(torch.randn(n[-2], n[-1], dtype=dtype, device=device))
for i in range(num_matrices):
tensors_batch[i, ...] = tensors_list[i]
tensors_exp_map = (torch.linalg.matrix_exp(x) for x in tensors_list)
tensors_exp_batch = torch.linalg.matrix_exp(tensors_batch)
for i, tensor_exp in enumerate(tensors_exp_map):
self.assertEqual(tensors_exp_batch[i, ...], tensor_exp)
# small batch of matrices
run_test(3, 2, 2)
run_test(3, 3, 3)
run_test(3, 4, 4)
run_test(3, 5, 5)
# large batch of matrices
run_test(3, 3, 2, 2)
run_test(3, 3, 3, 3)
run_test(3, 3, 4, 4)
run_test(3, 3, 5, 5)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float, torch.double, torch.cfloat, torch.cdouble)
def test_linalg_matrix_exp_compare_with_taylor(self, device, dtype):
def normalize_to_1_operator_norm(sample, desired_norm):
sample_norm, _ = sample.abs().sum(-2).max(-1)
sample_to_1_norm = sample / sample_norm.unsqueeze(-1).unsqueeze(-1)
return sample_to_1_norm * desired_norm
def gen_good_cond_number_matrices(*n):
identity = torch.eye(n[-2], n[-1], dtype=dtype, device=device).expand(*n)
x = torch.rand(*n, dtype=dtype, device=device) / (n[-1] ** 2)
x = (x - x * identity) + identity
return x
def get_taylor_approximation(a, deg):
a_ = a.cpu().numpy()
identity = torch.eye(a.size(-2), a.size(-1), dtype=dtype, device=device).expand_as(a)
res = identity.cpu().numpy()
taylor_term = identity.cpu().numpy()
for i in range(1, deg + 1):
taylor_term = np.matmul(a_, taylor_term) / i
res = res + taylor_term
return res
def scale_square(a, deg):
if a.abs().pow(2).sum().sqrt() < 1.0:
return get_taylor_approximation(a, 12)
else:
s = int(torch.log2(a.abs().pow(2).sum().sqrt()).ceil().item())
b = a / (2 ** s)
b = get_taylor_approximation(b, 18)
for _ in range(s):
b = np.matmul(b, b)
return torch.from_numpy(b).to(a.device)
def run_test(*n):
degs = [1, 2, 4, 8, 12, 18]
if dtype == torch.float:
thetas = [
1.192092800768788e-07, # deg 1
5.978858893805233e-04, # deg 2
5.116619363445086e-02, # deg 4
5.800524627688768e-01, # deg 8
1.461661507209034e+00, # deg 12
3.010066362817634e+00 # deg 18
]
else: # if torch.double
thetas = [
2.220446049250313e-16, # deg 1
2.580956802971767e-08, # deg 2
3.397168839976962e-04, # deg 4
4.991228871115323e-02, # deg 8
2.996158913811580e-01, # deg 12
1.090863719290036e+00 # deg 18
]
# generate norms to test different degree expansions
sample_norms = []
for i in range(len(thetas) - 1):
sample_norms.append(0.5 * (thetas[i] + thetas[i + 1]))
sample_norms = [thetas[0] / 2] + sample_norms + [thetas[-1] * 2]
degs = [degs[0]] + degs
for sample_norm, deg in zip(sample_norms, degs):
x = gen_good_cond_number_matrices(*n)
x = normalize_to_1_operator_norm(x, sample_norm)
mexp = torch.linalg.matrix_exp(x)
mexp_taylor = scale_square(x, deg)
self.assertEqual(mexp, mexp_taylor, atol=1e-2, rtol=0.0)
# single matrix
run_test(2, 2)
run_test(3, 3)
run_test(4, 4)
run_test(5, 5)
# small batch of matrices
run_test(3, 2, 2)
run_test(3, 3, 3)
run_test(3, 4, 4)
run_test(3, 5, 5)
# large batch of matrices
run_test(3, 3, 2, 2)
run_test(3, 3, 3, 3)
run_test(3, 3, 4, 4)
run_test(3, 3, 5, 5)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
@precisionOverride({torch.float32: 1e-3, torch.complex64: 1e-3,
torch.float64: 1e-8, torch.complex128: 1e-8})
def test_slogdet(self, device, dtype):
from torch.testing._internal.common_utils import (random_hermitian_matrix, random_hermitian_psd_matrix,
random_hermitian_pd_matrix, random_square_matrix_of_rank)
# mat_chars denotes matrix characteristics
# possible values are: hermitian, hermitian_psd, hermitian_pd, singular, non_singular
def run_test(matsize, batchdims, mat_chars):
num_matrices = np.prod(batchdims)
list_of_matrices = []
if num_matrices != 0:
for idx in range(num_matrices):
mat_type = idx % len(mat_chars)
if mat_chars[mat_type] == 'hermitian':
list_of_matrices.append(random_hermitian_matrix(matsize, dtype=dtype, device=device))
elif mat_chars[mat_type] == 'hermitian_psd':
list_of_matrices.append(random_hermitian_psd_matrix(matsize, dtype=dtype, device=device))
elif mat_chars[mat_type] == 'hermitian_pd':
list_of_matrices.append(random_hermitian_pd_matrix(matsize, dtype=dtype, device=device))
elif mat_chars[mat_type] == 'singular':
list_of_matrices.append(torch.ones(matsize, matsize, dtype=dtype, device=device))
elif mat_chars[mat_type] == 'non_singular':
list_of_matrices.append(random_square_matrix_of_rank(matsize, matsize, dtype=dtype, device=device))
full_tensor = torch.stack(list_of_matrices, dim=0).reshape(batchdims + (matsize, matsize))
else:
full_tensor = torch.randn(*batchdims, matsize, matsize, dtype=dtype, device=device)
actual_value = torch.linalg.slogdet(full_tensor)
expected_value = np.linalg.slogdet(full_tensor.cpu().numpy())
self.assertEqual(expected_value[0], actual_value[0], atol=self.precision, rtol=self.precision)
self.assertEqual(expected_value[1], actual_value[1], atol=self.precision, rtol=self.precision)
# test out=variant
sign_out = torch.empty_like(actual_value[0])
logabsdet_out = torch.empty_like(actual_value[1])
ans = torch.linalg.slogdet(full_tensor, out=(sign_out, logabsdet_out))
self.assertEqual(ans[0], sign_out)
self.assertEqual(ans[1], logabsdet_out)
self.assertEqual(sign_out, actual_value[0])
self.assertEqual(logabsdet_out, actual_value[1])
for matsize, batchdims in itertools.product([0, 3, 5], [(0,), (3,), (5, 3)]):
run_test(matsize, batchdims, mat_chars=['hermitian_pd'])
run_test(matsize, batchdims, mat_chars=['singular'])
run_test(matsize, batchdims, mat_chars=['non_singular'])
run_test(matsize, batchdims, mat_chars=['hermitian', 'hermitian_pd', 'hermitian_psd'])
run_test(matsize, batchdims, mat_chars=['singular', 'non_singular'])
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_slogdet_errors_and_warnings(self, device, dtype):
# slogdet requires the input to be a square matrix or batch of square matrices
a = torch.randn(2, 3, device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, r'must be batches of square matrices'):
torch.linalg.slogdet(a)
# slogdet requires the input to be at least 2 dimensional tensor
a = torch.randn(2, device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, r'must have at least 2 dimensions'):
torch.linalg.slogdet(a)
# slogdet requires the input to be of float, double, cfloat or cdouble types
a = torch.randn(2, 2, device=device, dtype=torch.bfloat16)
with self.assertRaisesRegex(RuntimeError, r'of float, double, cfloat or cdouble types'):
torch.linalg.slogdet(a)
# if non-empty out tensor with wrong shape is passed a warning is given
a = torch.randn(2, 3, 3, device=device, dtype=dtype)
sign_out = torch.empty(1, device=device, dtype=dtype)
real_dtype = a.real.dtype if dtype.is_complex else dtype
logabsdet_out = torch.empty(1, device=device, dtype=real_dtype)
with warnings.catch_warnings(record=True) as w:
# Trigger warning
torch.linalg.slogdet(a, out=(sign_out, logabsdet_out))
# Check warning occurs
self.assertEqual(len(w), 1)
self.assertTrue("An output with one or more elements was resized" in str(w[-1].message))
# dtypes should be safely castable
sign_out = torch.empty_like(a).to(torch.int)
logabsdet_out = torch.empty_like(a).to(torch.int)
with self.assertRaisesRegex(RuntimeError, "but got sign with dtype Int"):
torch.linalg.slogdet(a, out=(sign_out, logabsdet_out))
sign_out = torch.empty(0, device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, "but got logabsdet with dtype Int"):
torch.linalg.slogdet(a, out=(sign_out, logabsdet_out))
# device should match
if torch.cuda.is_available():
wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'
sign_out = torch.empty(0, device=wrong_device, dtype=dtype)
logabsdet_out = torch.empty(0, device=wrong_device, dtype=real_dtype)
with self.assertRaisesRegex(RuntimeError, "tensors to be on the same device"):
torch.linalg.slogdet(a, out=(sign_out, logabsdet_out))
@slowTest
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.double)
def test_det_logdet_slogdet(self, device, dtype):
def reference_slogdet(M):
sdet, logabsdet = np.linalg.slogdet(M.detach().cpu().numpy())
return M.new_tensor(sdet), M.new_tensor(logabsdet)
def test_single_det(M, target, desc):
target_sdet, target_logabsdet = target
det = M.det()
logdet = M.logdet()
sdet, logabsdet = M.slogdet()
linalg_sdet, linalg_logabsdet = torch.linalg.slogdet(M)
# Test det
self.assertEqual(det, target_sdet * target_logabsdet.exp(),
atol=1e-7, rtol=0, msg='{} (det)'.format(desc))
# Test slogdet
# Compare the overall value rather than individual parts because of
# precision issues when det is near zero.
self.assertEqual(sdet * logabsdet.exp(), target_sdet * target_logabsdet.exp(),
atol=1e-7, rtol=0, msg='{} (slogdet)'.format(desc))
self.assertEqual(linalg_sdet * linalg_logabsdet.exp(), target_sdet * target_logabsdet.exp(),
atol=1e-7, rtol=0, msg='{} (linalg_slogdet)'.format(desc))
# Test logdet
# Compare logdet against our own pytorch slogdet because they should
# be consistent, while it may behave slightly differently with other
# slogdet implementations when det is near zero due to precision
# issues.
if sdet.item() < 0:
self.assertTrue(logdet.item() != logdet.item(), '{} (logdet negative case)'.format(desc))
else:
self.assertEqual(logdet.exp(), target_logabsdet.exp(),
atol=1e-7, rtol=0, msg='{} (logdet non-negative case)'.format(desc))
eye = torch.eye(5, dtype=dtype, device=device)
test_single_det(eye, (torch.ones((), dtype=dtype, device=device), torch.zeros((), dtype=dtype, device=device)), 'identity')
# Testing bug in #34061 (https://github.com/pytorch/pytorch/issues/34061)
for n in range(250, 551, 100):
mat = torch.randn(n, n, dtype=dtype, device=device)
q, _ = torch.qr(mat)
ref_det, ref_logabsdet = reference_slogdet(q)
test_single_det(q, (ref_det, ref_logabsdet), 'orthogonal')
def test(M):
assert M.size(0) >= 5, 'this helper fn assumes M to be at least 5x5'
M = M.to(device)
ref_M_sdet, ref_M_logabsdet = reference_slogdet(M)
test_single_det(M, (ref_M_sdet, ref_M_logabsdet), 'basic')
if ref_M_logabsdet.exp().item() >= 1e-6: # skip singular
M_inv = M.inverse()
test_single_det(M_inv, reference_slogdet(M_inv), 'inverse')
test_single_det(M, (ref_M_sdet, ref_M_logabsdet), 'transpose')
for x in [0, 2, 4]:
for scale in [-2, -0.1, 0, 10]:
if scale > 0:
target = ref_M_sdet, ref_M_logabsdet + math.log(scale)
elif scale == 0:
target = torch.zeros_like(ref_M_sdet), torch.full_like(ref_M_logabsdet, -inf)
else:
target = ref_M_sdet.neg(), ref_M_logabsdet + math.log(-scale)
# dim 0
M_clone = M.clone()
M_clone[:, x] *= scale
test_single_det(M_clone, target, 'scale a row')
# dim 1
M_clone = M.clone()
M_clone[x, :] *= scale
test_single_det(M_clone, target, 'scale a column')
for x1, x2 in [(0, 3), (4, 1), (3, 2)]:
assert x1 != x2, 'x1 and x2 needs to be different for this test'
target = torch.zeros_like(ref_M_sdet), torch.full_like(ref_M_logabsdet, -inf)
# dim 0
M_clone = M.clone()
M_clone[:, x2] = M_clone[:, x1]
test_single_det(M_clone, target, 'two rows are same')
# dim 1
M_clone = M.clone()
M_clone[x2, :] = M_clone[x1, :]
test_single_det(M_clone, target, 'two columns are same')
for scale1, scale2 in [(0.3, -1), (0, 2), (10, 0.1)]:
det_scale = scale1 * scale2 * -1
if det_scale > 0:
target = ref_M_sdet, ref_M_logabsdet + math.log(det_scale)
elif det_scale == 0:
target = torch.zeros_like(ref_M_sdet), torch.full_like(ref_M_logabsdet, -inf)
else:
target = ref_M_sdet.neg(), ref_M_logabsdet + math.log(-det_scale)
# dim 0
M_clone = M.clone()
t = M_clone[:, x1] * scale1
M_clone[:, x1] += M_clone[:, x2] * scale2
M_clone[:, x2] = t
test_single_det(M_clone, target, 'exchanging rows')
# dim 1
M_clone = M.clone()
t = M_clone[x1, :] * scale1
M_clone[x1, :] += M_clone[x2, :] * scale2
M_clone[x2, :] = t
test_single_det(M_clone, target, 'exchanging columns')
def get_random_mat_scale(n):
# For matrices with values i.i.d. with 0 mean, unit variance, and
# subexponential tail, we have:
# E[log det(A^2)] \approx log((n-1)!)
#
# Notice:
# log Var[det(A)] = log E[det(A^2)] >= E[log det(A^2)]
#
# So:
# stddev[det(A)] >= sqrt( (n-1)! )
#
# We use this as an intuitive guideline to scale random generated
# matrices so our closeness tests can work more robustly:
# scale by sqrt( (n-1)! )^(-1/n) = ( (n-1)! )^(-1/(2n))
#
# source: https://arxiv.org/pdf/1112.0752.pdf
# TODO: technically we need subexponential distn for this to hold,
# but we mostly use gaussian entries below. Consider switching
# to Chi-sq if this turns out not stable enough, since Chi-sq
# is easy enough to sample from.
return math.factorial(n - 1) ** (-1.0 / (2 * n))
for n in [5, 10, 25]:
scale = get_random_mat_scale(n)
test(torch.randn(n, n, dtype=dtype, device=device) * scale)
r = torch.randn(n, n, dtype=dtype, device=device) * scale
# symmetric psd
test(r.mm(r.t()))
# symmetric pd
r = torch.randn(n, n, dtype=dtype, device=device) * scale
test(r.mm(r.t()) + torch.eye(n, dtype=dtype, device=device) * 1e-6)
# symmetric
r = torch.randn(n, n, dtype=dtype, device=device) * scale
for i in range(n):
for j in range(i):
r[i, j] = r[j, i]
test(r)
# non-contiguous
test((torch.randn(n, n, n + 1, dtype=dtype, device=device) * scale)[:, 2, 1:])
# det = 0
r = torch.randn(n, n, dtype=dtype, device=device) * scale
u, s, v = r.svd()
if reference_slogdet(u)[0] < 0:
u = -u
if reference_slogdet(v)[0] < 0:
v = -v
s[0] *= -1
s[-1] = 0
test(u.mm(s.diag()).mm(v))
# Small values to test numerical stability. Note that we don't scale
r = torch.randn(512, 512, dtype=dtype, device=device)
u, s, v = r.svd()
s.fill_(1. / (100 * s.numel()))
test(u.mm(s.diag()).mm(v))
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.double)
def test_det_logdet_slogdet_batched(self, device, dtype):
from torch.testing._internal.common_utils import (random_symmetric_matrix, random_symmetric_psd_matrix,
random_symmetric_pd_matrix, random_square_matrix_of_rank)
def run_test(matsize, batchdims, mat_chars):
num_matrices = reduce(lambda x, y: x * y, batchdims, 1)
list_of_matrices = []
for idx in range(num_matrices):
mat_type = idx % len(mat_chars)
if mat_chars[mat_type] == 'sym':
list_of_matrices.append(random_symmetric_matrix(matsize, dtype=dtype, device=device))
elif mat_chars[mat_type] == 'sym_psd':
list_of_matrices.append(random_symmetric_psd_matrix(matsize, dtype=dtype, device=device))
elif mat_chars[mat_type] == 'sym_pd':
list_of_matrices.append(random_symmetric_pd_matrix(matsize, dtype=dtype, device=device))
elif mat_chars[mat_type] == 'sing':
list_of_matrices.append(torch.ones(matsize, matsize, dtype=dtype, device=device))
elif mat_chars[mat_type] == 'non_sing':
list_of_matrices.append(random_square_matrix_of_rank(matsize, matsize, dtype=dtype, device=device))
full_tensor = torch.stack(list_of_matrices, dim=0).reshape(batchdims + (matsize, matsize))
full_tensor *= (math.factorial(matsize - 1) ** (-1.0 / (2 * matsize)))
for fn in [torch.det, torch.logdet, torch.slogdet, torch.linalg.slogdet]:
expected_value = []
actual_value = fn(full_tensor)
for full_idx in itertools.product(*map(lambda x: list(range(x)), batchdims)):
expected_value.append(fn(full_tensor[full_idx]))
if fn == torch.slogdet or fn == torch.linalg.slogdet:
sign_value = torch.stack([tup[0] for tup in expected_value], dim=0).reshape(batchdims)
expected_value = torch.stack([tup[1] for tup in expected_value], dim=0).reshape(batchdims)
self.assertEqual(sign_value, actual_value[0])
self.assertEqual(expected_value, actual_value[1])
else:
expected_value = torch.stack(expected_value, dim=0).reshape(batchdims)
self.assertEqual(actual_value, expected_value)
for matsize, batchdims in itertools.product([3, 5], [(3,), (5, 3)]):
run_test(matsize, batchdims, mat_chars=['sym_pd'])
run_test(matsize, batchdims, mat_chars=['sing'])
run_test(matsize, batchdims, mat_chars=['non_sing'])
run_test(matsize, batchdims, mat_chars=['sym', 'sym_pd', 'sym_psd'])
run_test(matsize, batchdims, mat_chars=['sing', 'non_sing'])
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_cholesky_inverse(self, device, dtype):
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
def run_test(shape, batch, upper, contiguous):
A = random_hermitian_pd_matrix(shape, *batch, dtype=dtype, device=device)
if A.numel() > 0 and not contiguous:
A = A.mT
self.assertFalse(A.is_contiguous())
L = torch.linalg.cholesky(A)
expected_inverse = torch.inverse(A)
L = L.mH if upper else L
actual_inverse = torch.cholesky_inverse(L, upper)
self.assertEqual(actual_inverse, expected_inverse)
shapes = (0, 3, 5)
batches = ((), (0,), (3, ), (2, 2))
for shape, batch, upper, contiguous in list(itertools.product(shapes, batches, (True, False), (True, False))):
run_test(shape, batch, upper, contiguous)
A = random_hermitian_pd_matrix(3, 2, dtype=dtype, device=device)
L = torch.linalg.cholesky(A)
out = torch.empty_like(A)
out_t = out.mT.clone(memory_format=torch.contiguous_format)
out = out_t.mT
ans = torch.cholesky_inverse(L, out=out)
self.assertEqual(ans, out)
expected = torch.inverse(A)
self.assertEqual(expected, out)
out = torch.empty_like(A)
ans = torch.cholesky_inverse(L, out=out)
self.assertEqual(ans, out)
expected = torch.inverse(A)
self.assertEqual(expected, out)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_cholesky_inverse_errors_and_warnings(self, device, dtype):
a = torch.randn(2, device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, "must have at least 2 dimensions"):
torch.cholesky_inverse(a)
a = torch.randn(2, 3, device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, "must be batches of square matrices"):
torch.cholesky_inverse(a)
a = torch.randn(3, 3, device=device, dtype=dtype)
out = torch.empty(2, 3, device=device, dtype=dtype)
with warnings.catch_warnings(record=True) as w:
torch.cholesky_inverse(a, out=out)
self.assertEqual(len(w), 1)
self.assertTrue("An output with one or more elements was resized" in str(w[-1].message))
out = torch.empty(*a.shape, dtype=torch.int, device=device)
with self.assertRaisesRegex(RuntimeError, "but got result with dtype Int"):
torch.cholesky_inverse(a, out=out)
if torch.cuda.is_available():
wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'
out = torch.empty(0, device=wrong_device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, "Expected all tensors to be on the same device"):
torch.cholesky_inverse(a, out=out)
a = torch.randn(3, 3, device=device, dtype=dtype)
a[1, 1] = 0
if self.device_type == 'cpu':
with self.assertRaisesRegex(RuntimeError, r"cholesky_inverse: The diagonal element 2 is zero"):
torch.cholesky_inverse(a)
elif self.device_type == 'cuda':
out = torch.cholesky_inverse(a)
self.assertTrue(out.isinf().any() or out.isnan().any())
def _select_broadcastable_dims(self, dims_full=None):
if dims_full is None:
dims_full = []
ndims = random.randint(1, 4)
dims_full = [random.randint(1, 8) for _ in range(ndims)]
else:
ndims = len(dims_full)
smaller_ndims = random.randint(1, ndims)
dims_small = []
dims_large = []
for i in range(ndims - 1, -1, -1):
j = random.randint(1, 3)
if j == 1:
ds = dims_full[i]
dl = dims_full[i]
elif j == 2:
ds = dims_full[i]
dl = 1 if len(dims_small) < smaller_ndims else dims_full[i]
elif j == 3:
ds = 1
dl = dims_full[i]
dims_large = [dl] + dims_large
if len(dims_small) < smaller_ndims:
dims_small = [ds] + dims_small
return (dims_small, dims_large, dims_full)
def test_broadcast_fused_matmul(self, device):
fns = ["baddbmm", "addbmm", "addmm", "addmv", "addr"]
for fn in fns:
batch_dim = random.randint(1, 8)
n_dim = random.randint(1, 8)
m_dim = random.randint(1, 8)
p_dim = random.randint(1, 8)
def dims_full_for_fn():
if fn == "baddbmm":
return ([batch_dim, n_dim, p_dim], [batch_dim, n_dim, m_dim], [batch_dim, m_dim, p_dim])
elif fn == "addbmm":
return ([n_dim, p_dim], [batch_dim, n_dim, m_dim], [batch_dim, m_dim, p_dim])
elif fn == "addmm":
return ([n_dim, p_dim], [n_dim, m_dim], [m_dim, p_dim])
elif fn == "addmv":
return ([n_dim], [n_dim, m_dim], [m_dim])
elif fn == "addr":
return ([n_dim, m_dim], [n_dim], [m_dim])
else:
raise AssertionError("unknown function")
(t0_dims_full, t1_dims, t2_dims) = dims_full_for_fn()
(t0_dims_small, _, _) = self._select_broadcastable_dims(t0_dims_full)
t0_small = torch.randn(*t0_dims_small, device=device).float()
t1 = torch.randn(*t1_dims, device=device).float()
t2 = torch.randn(*t2_dims, device=device).float()
t0_full = t0_small.expand(*t0_dims_full).to(device)
fntorch = getattr(torch, fn)
r0 = fntorch(t0_small, t1, t2)
r1 = fntorch(t0_full, t1, t2)
self.assertEqual(r0, r1)
@tf32_on_and_off(0.001)
def test_broadcast_batched_matmul(self, device):
n_dim = random.randint(1, 8)
m_dim = random.randint(1, 8)
p_dim = random.randint(1, 8)
full_batch_dims = [random.randint(1, 3) for i in range(random.randint(1, 3))]
(batch_dims_small, _, _) = self._select_broadcastable_dims(full_batch_dims)
def verify_batched_matmul(full_lhs, one_dimensional):
if not one_dimensional:
lhs_dims = [n_dim, m_dim]
rhs_dims = [m_dim, p_dim]
result_dims = [n_dim, p_dim]
else:
lhs_dims = [n_dim, m_dim] if full_lhs else [m_dim]
rhs_dims = [m_dim, p_dim] if not full_lhs else [m_dim]
result_dims = [n_dim] if full_lhs else [p_dim]
lhs_mat_dims = lhs_dims if len(lhs_dims) != 1 else [1, m_dim]
rhs_mat_dims = rhs_dims if len(rhs_dims) != 1 else [m_dim, 1]
full_mat_dims = lhs_mat_dims if full_lhs else rhs_mat_dims
dim0_dims = rhs_dims if full_lhs else lhs_dims
small_dims = batch_dims_small + (rhs_mat_dims if full_lhs else lhs_mat_dims)
small = torch.randn(*(small_dims), device=device).float()
dim0 = torch.randn(*(dim0_dims), device=device).float()
full = torch.randn(*(full_batch_dims + full_mat_dims), device=device).float()
if not one_dimensional:
(lhsTensors, rhsTensors) = ((full,), (small, dim0)) if full_lhs else ((small, dim0), (full,))
else:
(lhsTensors, rhsTensors) = ((full,), (dim0,)) if full_lhs else ((dim0,), (full,))
def maybe_squeeze_result(l, r, result):
if len(lhs_dims) == 1 and l.dim() != 1:
return result.squeeze(-2)
elif len(rhs_dims) == 1 and r.dim() != 1:
return result.squeeze(-1)
else:
return result
for lhs in lhsTensors:
lhs_expanded = lhs.expand(*(torch.Size(full_batch_dims) + torch.Size(lhs_mat_dims)))
lhs_expanded_matmul_fn = lhs_expanded.matmul
for rhs in rhsTensors:
rhs_expanded = ((rhs if len(rhs_dims) != 1 else rhs.unsqueeze(-1)).
expand(*(torch.Size(full_batch_dims) + torch.Size(rhs_mat_dims))))
truth = maybe_squeeze_result(lhs_expanded, rhs_expanded, lhs_expanded_matmul_fn(rhs_expanded))
for l in (lhs, lhs_expanded):
for r in (rhs, rhs_expanded):
l_matmul_fn = l.matmul
result = maybe_squeeze_result(l, r, l_matmul_fn(r))
self.assertEqual(truth, result)
torch_result = maybe_squeeze_result(l, r, torch.matmul(l, r))
self.assertEqual(truth, torch_result)
out = torch.zeros_like(torch_result)
torch.matmul(l, r, out=out)
self.assertEqual(truth, maybe_squeeze_result(l, r, out))
bmm_result = (torch.bmm(lhs_expanded.contiguous().view(-1, *lhs_mat_dims),
rhs_expanded.contiguous().view(-1, *rhs_mat_dims)))
self.assertEqual(truth.view(-1, *result_dims), bmm_result.view(-1, *result_dims))
for indices in itertools.product((True, False), repeat=2):
verify_batched_matmul(*indices)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_lu_solve_batched_non_contiguous(self, device, dtype):
from torch.testing._internal.common_utils import random_fullrank_matrix_distinct_singular_value
A = random_fullrank_matrix_distinct_singular_value(2, 2, dtype=dtype, device=device)
b = torch.randn(2, 2, 2, dtype=dtype, device=device)
x_exp = np.linalg.solve(A.cpu().permute(0, 2, 1).numpy(), b.cpu().permute(2, 1, 0).numpy())
A = A.permute(0, 2, 1)
b = b.permute(2, 1, 0)
assert not A.is_contiguous() and not b.is_contiguous(), "contiguous inputs"
LU_data, LU_pivots = torch.lu(A)
x = torch.lu_solve(b, LU_data, LU_pivots)
self.assertEqual(x, x_exp)
def lu_solve_test_helper(self, A_dims, b_dims, pivot, device, dtype):
from torch.testing._internal.common_utils import random_fullrank_matrix_distinct_singular_value
b = torch.randn(*b_dims, dtype=dtype, device=device)
A = random_fullrank_matrix_distinct_singular_value(*A_dims, dtype=dtype, device=device)
LU_data, LU_pivots, info = torch.lu(A, get_infos=True, pivot=pivot)
self.assertEqual(info, torch.zeros_like(info))
return b, A, LU_data, LU_pivots
@skipCPUIfNoLapack
@skipCUDAIfNoMagma
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
@precisionOverride({torch.float32: 1e-3, torch.complex64: 1e-3,
torch.float64: 1e-8, torch.complex128: 1e-8})
def test_lu_solve(self, device, dtype):
def sub_test(pivot):
for k, n in zip([2, 3, 5], [3, 5, 7]):
b, A, LU_data, LU_pivots = self.lu_solve_test_helper((n,), (n, k), pivot, device, dtype)
x = torch.lu_solve(b, LU_data, LU_pivots)
self.assertEqual(b, np.matmul(A.cpu(), x.cpu()))
sub_test(True)
if self.device_type == 'cuda':
sub_test(False)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
@precisionOverride({torch.float32: 1e-3, torch.complex64: 1e-3,
torch.float64: 1e-8, torch.complex128: 1e-8})
def test_lu_solve_batched(self, device, dtype):
def sub_test(pivot):
def lu_solve_batch_test_helper(A_dims, b_dims, pivot):
b, A, LU_data, LU_pivots = self.lu_solve_test_helper(A_dims, b_dims, pivot, device, dtype)
x_exp_list = []
for i in range(b_dims[0]):
x_exp_list.append(torch.lu_solve(b[i], LU_data[i], LU_pivots[i]))
x_exp = torch.stack(x_exp_list)
x_act = torch.lu_solve(b, LU_data, LU_pivots)
self.assertEqual(x_exp, x_act)
Ax = np.matmul(A.cpu(), x_act.cpu())
self.assertEqual(b, Ax)
for batchsize in [1, 3, 4]:
lu_solve_batch_test_helper((5, batchsize), (batchsize, 5, 10), pivot)
b = torch.randn(3, 0, 3, dtype=dtype, device=device)
A = torch.randn(3, 0, 0, dtype=dtype, device=device)
LU_data, LU_pivots = torch.lu(A)
self.assertEqual(torch.empty_like(b), b.lu_solve(LU_data, LU_pivots))
sub_test(True)
if self.device_type == 'cuda':
sub_test(False)
@slowTest
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_lu_solve_batched_many_batches(self, device, dtype):
def run_test(A_dims, b_dims):
b, A, LU_data, LU_pivots = self.lu_solve_test_helper(A_dims, b_dims, True, device, dtype)
x = torch.lu_solve(b, LU_data, LU_pivots)
Ax = torch.matmul(A, x)
self.assertEqual(Ax, b.expand_as(Ax))
run_test((5, 65536), (65536, 5, 10))
run_test((5, 262144), (262144, 5, 10))
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_lu_solve_batched_broadcasting(self, device, dtype):
from torch.testing._internal.common_utils import random_fullrank_matrix_distinct_singular_value
def run_test(A_dims, b_dims, pivot=True):
A_matrix_size = A_dims[-1]
A_batch_dims = A_dims[:-2]
A = random_fullrank_matrix_distinct_singular_value(A_matrix_size, *A_batch_dims, dtype=dtype, device=device)
b = make_tensor(b_dims, dtype=dtype, device=device)
x_exp = np.linalg.solve(A.cpu(), b.cpu())
LU_data, LU_pivots = torch.lu(A, pivot=pivot)
x = torch.lu_solve(b, LU_data, LU_pivots)
self.assertEqual(x, x_exp)
run_test((2, 1, 3, 4, 4), (2, 1, 3, 4, 6))
run_test((2, 1, 3, 4, 4), (4, 6))
run_test((4, 4), (2, 1, 3, 4, 2))
run_test((1, 3, 1, 4, 4), (2, 1, 3, 4, 5))
@onlyCUDA
@skipCUDAIfNoMagma
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_lu_solve_large_matrices(self, device, dtype):
def run_test(A_dims, b_dims):
b, A, LU_data, LU_pivots = self.lu_solve_test_helper(A_dims, b_dims, True, device, dtype)
x = torch.lu_solve(b, LU_data, LU_pivots)
Ax = torch.matmul(A, x)
self.assertEqual(Ax, b.expand_as(Ax))
run_test((1, 1), (1, 1, 1025))
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_lu_solve_out_errors_and_warnings(self, device, dtype):
a = torch.eye(2, dtype=dtype, device=device)
LU_data, LU_pivots = torch.lu(a, pivot=True)
b = torch.randn(2, 1, dtype=dtype, device=device)
out = torch.empty(0, dtype=torch.int, device=device)
with self.assertRaisesRegex(RuntimeError, "but got result with dtype Int"):
torch.lu_solve(b, LU_data, LU_pivots, out=out)
if torch.cuda.is_available():
wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'
out = torch.empty(0, dtype=dtype, device=wrong_device)
with self.assertRaisesRegex(RuntimeError, "tensors to be on the same device"):
torch.lu_solve(b, LU_data, LU_pivots, out=out)
with warnings.catch_warnings(record=True) as w:
out = torch.empty(1, dtype=dtype, device=device)
torch.lu_solve(b, LU_data, LU_pivots, out=out)
self.assertEqual(len(w), 1)
self.assertTrue("An output with one or more elements was resized" in str(w[-1].message))
@precisionOverride({torch.float32: 1e-5, torch.complex64: 1e-5})
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_symeig(self, device, dtype):
from torch.testing._internal.common_utils import random_hermitian_matrix
def run_test(dims, eigenvectors, upper):
x = random_hermitian_matrix(*dims, dtype=dtype, device=device)
if dtype.is_complex:
real_dtype = torch.float32 if dtype is torch.complex64 else torch.float64
else:
real_dtype = dtype
oute = torch.empty(dims[1:] + dims[:1], dtype=real_dtype, device=device)
outv = torch.empty(dims[1:] + dims[:1] * 2, dtype=dtype, device=device)
torch.symeig(x, eigenvectors=eigenvectors, upper=upper, out=(oute, outv))
if eigenvectors:
outv_ = outv.cpu().numpy()
x_recon = np.matmul(np.matmul(outv_, torch.diag_embed(oute.to(dtype)).cpu().numpy()),
outv_.swapaxes(-2, -1).conj())
self.assertEqual(x, x_recon, atol=1e-8, rtol=0, msg='Incorrect reconstruction using V @ diag(e) @ V.T')
else:
eigvals, _ = torch.symeig(x, eigenvectors=True, upper=upper)
self.assertEqual(eigvals, oute, msg='Eigenvalues mismatch')
self.assertEqual(torch.empty(0, device=device, dtype=dtype), outv, msg='Eigenvector matrix not empty')
rese, resv = x.symeig(eigenvectors=eigenvectors, upper=upper)
self.assertEqual(rese, oute, msg="outputs of symeig and symeig with out don't match")
self.assertEqual(resv, outv, msg="outputs of symeig and symeig with out don't match")
x = random_hermitian_matrix(*dims, dtype=dtype, device=device)
n_dim = len(dims) + 1
x = x.permute(tuple(range(n_dim - 3, -1, -1)) + (n_dim - 1, n_dim - 2))
assert not x.is_contiguous(), "x is intentionally non-contiguous"
rese, resv = torch.symeig(x, eigenvectors=eigenvectors, upper=upper)
if eigenvectors:
resv_ = resv.cpu().numpy()
x_recon = np.matmul(np.matmul(resv_, torch.diag_embed(rese.to(dtype)).cpu().numpy()),
resv_.swapaxes(-2, -1).conj())
self.assertEqual(x, x_recon, atol=1e-8, rtol=0, msg='Incorrect reconstruction using V @ diag(e) @ V.T')
else:
eigvals, _ = torch.symeig(x, eigenvectors=True, upper=upper)
self.assertEqual(eigvals, rese, msg='Eigenvalues mismatch')
self.assertEqual(torch.empty(0, device=device, dtype=dtype), resv, msg='Eigenvector matrix not empty')
batch_dims_set = [(), (3,), (3, 5), (5, 3, 5)]
for batch_dims, eigenvectors, upper in itertools.product(batch_dims_set, (True, False), (True, False)):
run_test((5,) + batch_dims, eigenvectors, upper)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_symeig_out_errors_and_warnings(self, device, dtype):
from torch.testing._internal.common_utils import random_hermitian_matrix
a = random_hermitian_matrix(3, dtype=dtype, device=device)
real_dtype = a.real.dtype if dtype.is_complex else dtype
out_w = torch.empty(7, 7, dtype=real_dtype, device=device)
out_v = torch.empty(7, 7, dtype=dtype, device=device)
with warnings.catch_warnings(record=True) as w:
torch.symeig(a, out=(out_w, out_v))
self.assertTrue("An output with one or more elements was resized" in str(w[-2].message))
self.assertTrue("An output with one or more elements was resized" in str(w[-1].message))
out_w = torch.empty(0, dtype=real_dtype, device=device)
out_v = torch.empty(0, dtype=torch.int, device=device)
with self.assertRaisesRegex(RuntimeError, "but got eigenvectors with dtype Int"):
torch.symeig(a, out=(out_w, out_v))
out_w = torch.empty(0, dtype=torch.int, device=device)
out_v = torch.empty(0, dtype=dtype, device=device)
with self.assertRaisesRegex(RuntimeError, "but got eigenvalues with dtype Int"):
torch.symeig(a, out=(out_w, out_v))
if torch.cuda.is_available():
wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'
out_w = torch.empty(0, device=wrong_device, dtype=dtype)
out_v = torch.empty(0, device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, "tensors to be on the same device"):
torch.symeig(a, out=(out_w, out_v))
out_w = torch.empty(0, device=device, dtype=dtype)
out_v = torch.empty(0, device=wrong_device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, "tensors to be on the same device"):
torch.symeig(a, out=(out_w, out_v))
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
def test_pca_lowrank(self, device):
from torch.testing._internal.common_utils import random_lowrank_matrix, random_sparse_matrix
dtype = torch.double
def run_subtest(guess_rank, actual_rank, matrix_size, batches, device, pca, **options):
density = options.pop('density', 1)
if isinstance(matrix_size, int):
rows = columns = matrix_size
else:
rows, columns = matrix_size
if density == 1:
a_input = random_lowrank_matrix(actual_rank, rows, columns, *batches, device=device, dtype=dtype)
a = a_input
else:
a_input = random_sparse_matrix(rows, columns, density, device=device, dtype=dtype)
a = a_input.to_dense()
u, s, v = pca(a_input, q=guess_rank, **options)
self.assertEqual(s.shape[-1], guess_rank)
self.assertEqual(u.shape[-2], rows)
self.assertEqual(u.shape[-1], guess_rank)
self.assertEqual(v.shape[-1], guess_rank)
self.assertEqual(v.shape[-2], columns)
A1 = u.matmul(s.diag_embed()).matmul(v.mT)
ones_m1 = torch.ones(batches + (rows, 1), dtype=a.dtype, device=device)
c = a.sum(axis=-2) / rows
c = c.reshape(batches + (1, columns))
A2 = a - ones_m1.matmul(c)
self.assertEqual(A1, A2)
if density == 1:
detect_rank = (s.abs() > 1e-5).sum(axis=-1)
self.assertEqual(actual_rank * torch.ones(batches, device=device, dtype=torch.int64), detect_rank)
S = torch.linalg.svdvals(A2)
self.assertEqual(s[..., :actual_rank], S[..., :actual_rank])
all_batches = [(), (1,), (3,), (2, 3)]
for actual_rank, size, all_batches in [
(2, (17, 4), all_batches),
(2, (100, 4), all_batches),
(6, (100, 40), all_batches),
(12, (1000, 1000), [()]),
]:
for batches in all_batches:
for guess_rank in [
actual_rank,
actual_rank + 2,
actual_rank + 6,
]:
if guess_rank <= min(*size):
run_subtest(guess_rank, actual_rank, size, batches, device, torch.pca_lowrank)
run_subtest(guess_rank, actual_rank, size[::-1], batches, device, torch.pca_lowrank)
for guess_rank, size in [
(4, (17, 4)), (4, (4, 17)), (16, (17, 17)),
(21, (100, 40)), (20, (40, 100)), (600, (1000, 1000))]:
for density in [0.005, 0.1]:
run_subtest(guess_rank, None, size, (), device, torch.pca_lowrank, density=density)
jitted = torch.jit.script(torch.pca_lowrank)
guess_rank, actual_rank, size, batches = 2, 2, (17, 4), ()
run_subtest(guess_rank, actual_rank, size, batches, device, jitted)
@onlyNativeDeviceTypes
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64)
def test_nuclear_norm_out(self, device, dtype):
test_cases = [
# input size, dim
((25, 25), None),
((25, 25), (0, 1)),
((25, 25), (1, 0)),
((25, 25, 25), (2, 0)),
((25, 25, 25), (0, 1)),
]
for keepdim in [False, True]:
for input_size, dim in test_cases:
msg = f'input_size: {input_size}, dim: {dim}, keepdim: {keepdim}'
x = torch.randn(*input_size, device=device, dtype=dtype)
result_out = torch.empty(0, device=device, dtype=dtype)
if dim is None:
result = torch.nuclear_norm(x, keepdim=keepdim)
torch.nuclear_norm(x, keepdim=keepdim, out=result_out)
else:
result = torch.nuclear_norm(x, keepdim=keepdim, dim=dim)
torch.nuclear_norm(x, keepdim=keepdim, dim=dim, out=result_out)
self.assertEqual(result, result_out, msg=msg)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_geqrf(self, device, dtype):
def run_test(shape):
# numpy.linalg.qr with mode = 'raw' computes the same operation as torch.geqrf
# so this test compares against that function
A = make_tensor(shape, dtype=dtype, device=device)
# numpy.linalg.qr doesn't work with batched input
m, n = A.shape[-2:]
tau_size = "n" if m > n else "m"
np_dtype = A.cpu().numpy().dtype
ot = [np_dtype, np_dtype]
numpy_geqrf_batched = np.vectorize(
lambda x: np.linalg.qr(x, mode='raw'),
otypes=ot,
signature=f'(m,n)->(n,m),({tau_size})')
expected = numpy_geqrf_batched(A.cpu())
actual = torch.geqrf(A)
self.assertEqual(expected[0].swapaxes(-2, -1), actual[0])
self.assertEqual(expected[1], actual[1])
batches = [(), (0, ), (2, ), (2, 1)]
ns = [5, 2, 0]
for batch, (m, n) in product(batches, product(ns, ns)):
run_test((*batch, m, n))
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.double)
def test_lstsq(self, device, dtype):
def _test_underdetermined(a, b, expectedNorm):
if self.device_type != 'cpu':
return
m = a.size()[0]
n = a.size()[1]
assert(m <= n)
a_copy = a.clone()
b_copy = b.clone()
res1 = torch.lstsq(b, a)[0]
self.assertEqual(a, a_copy, atol=0, rtol=0)
self.assertEqual(b, b_copy, atol=0, rtol=0)
self.assertEqual((torch.mm(a, res1) - b).norm(), expectedNorm, atol=1e-8, rtol=0)
ta = torch.tensor((), dtype=dtype, device=device)
tb = torch.tensor((), dtype=dtype, device=device)
res2 = torch.lstsq(b, a, out=(tb, ta))[0]
self.assertEqual(a, a_copy, atol=0, rtol=0)
self.assertEqual(b, b_copy, atol=0, rtol=0)
self.assertEqual((torch.mm(a, res1) - b).norm(), expectedNorm, atol=1e-8, rtol=0)
res3 = torch.lstsq(b, a, out=(b, a))[0]
self.assertEqual((torch.mm(a_copy, b) - b_copy).norm(), expectedNorm, atol=1e-8, rtol=0)
self.assertEqual(res1, tb, atol=0, rtol=0)
self.assertEqual(res1, b, atol=0, rtol=0)
self.assertEqual(res1, res2, atol=0, rtol=0)
self.assertEqual(res1, res3, atol=0, rtol=0)
def _test_overdetermined(a, b, expectedNorm):
m = a.size()[0]
n = a.size()[1]
assert(m > n)
def check_norm(a, b, expected_norm, gels_result):
x = gels_result[:n]
resid_info = gels_result[n:]
resid_norm = (torch.mm(a, x) - b).norm()
self.assertEqual(resid_norm, expectedNorm, atol=1e-8, rtol=0)
self.assertEqual(resid_info.norm(), resid_norm, atol=1e-8, rtol=0)
a_copy = a.clone()
b_copy = b.clone()
res1 = torch.lstsq(b, a)[0]
self.assertEqual(a, a_copy, atol=0, rtol=0)
self.assertEqual(b, b_copy, atol=0, rtol=0)
check_norm(a, b, expectedNorm, res1)
ta = torch.tensor((), dtype=dtype, device=device)
tb = torch.tensor((), dtype=dtype, device=device)
res2 = torch.lstsq(b, a, out=(tb, ta))[0]
self.assertEqual(a, a_copy, atol=0, rtol=0)
self.assertEqual(b, b_copy, atol=0, rtol=0)
check_norm(a, b, expectedNorm, res2)
res3 = torch.lstsq(b, a, out=(b, a))[0]
check_norm(a_copy, b_copy, expectedNorm, res3)
self.assertEqual(res1, tb, atol=0, rtol=0)
self.assertEqual(res1, b, atol=0, rtol=0)
self.assertEqual(res1, res2, atol=0, rtol=0)
self.assertEqual(res1, res3, atol=0, rtol=0)
expectedNorm = 0
a = torch.tensor(((1.44, -9.96, -7.55, 8.34),
(-7.84, -0.28, 3.24, 8.09),
(-4.39, -3.24, 6.27, 5.28),
(4.53, 3.83, -6.64, 2.06)), dtype=dtype, device=device).t()
b = torch.tensor(((8.58, 8.26, 8.48, -5.28),
(9.35, -4.43, -0.70, -0.26)), dtype=dtype, device=device).t()
_test_underdetermined(a, b, expectedNorm)
expectedNorm = 17.390200628863
a = torch.tensor(((1.44, -9.96, -7.55, 8.34, 7.08, -5.45),
(-7.84, -0.28, 3.24, 8.09, 2.52, -5.70),
(-4.39, -3.24, 6.27, 5.28, 0.74, -1.19),
(4.53, 3.83, -6.64, 2.06, -2.47, 4.70)), dtype=dtype, device=device).t()
b = torch.tensor(((8.58, 8.26, 8.48, -5.28, 5.72, 8.93),
(9.35, -4.43, -0.70, -0.26, -7.36, -2.52)), dtype=dtype, device=device).t()
_test_overdetermined(a, b, expectedNorm)
expectedNorm = 0
a = torch.tensor(((1.44, -9.96, -7.55),
(-7.84, -0.28, 3.24),
(-4.39, -3.24, 6.27),
(4.53, 3.83, -6.64)), dtype=dtype, device=device).t()
b = torch.tensor(((8.58, 8.26, 8.48),
(9.35, -4.43, -0.70)), dtype=dtype, device=device).t()
_test_underdetermined(a, b, expectedNorm)
expectedNorm = 0
a = torch.tensor(((1.44, -9.96, -7.55, 8.34),
(-7.84, -0.28, 3.24, 8.09),
(-4.39, -3.24, 6.27, 5.28),
(4.53, 3.83, -6.64, 2.06)), dtype=dtype, device=device).t()
b = torch.tensor(((8.58, 8.26, 8.48, -5.28),
(9.35, -4.43, -0.70, -0.26)), dtype=dtype, device=device).t()
ta = torch.tensor((), dtype=dtype, device=device)
tb = torch.tensor((), dtype=dtype, device=device)
torch.lstsq(b, a, out=(tb, ta))
self.assertEqual((torch.mm(a, tb) - b).norm(), expectedNorm, atol=1e-8, rtol=0)
torch.lstsq(b, a, out=(tb, ta))
self.assertEqual((torch.mm(a, tb) - b).norm(), expectedNorm, atol=1e-8, rtol=0)
torch.lstsq(b, a, out=(tb, ta))
self.assertEqual((torch.mm(a, tb) - b).norm(), expectedNorm, atol=1e-8, rtol=0)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
def test_lapack_empty(self, device):
def fn(torchfn, *args):
return torchfn(*tuple(torch.randn(shape, device=device) if isinstance(shape, tuple) else shape
for shape in args))
self.assertEqual((0, 0), fn(torch.inverse, (0, 0)).shape)
self.assertEqual((5, 0), fn(torch.pinverse, (0, 5)).shape)
self.assertEqual((0, 5), fn(torch.pinverse, (5, 0)).shape)
self.assertEqual((0, 0), fn(torch.pinverse, (0, 0)).shape)
self.assertEqual(torch.tensor(1., device=device), fn(torch.det, (0, 0)))
self.assertEqual(torch.tensor(0., device=device), fn(torch.logdet, (0, 0)))
self.assertEqual((torch.tensor(1., device=device), torch.tensor(0., device=device)),
fn(torch.slogdet, (0, 0)))
evalues, evectors = fn(torch.eig, (0, 0), True)
self.assertEqual([(0, 2), (0, 0)], [evalues.shape, evectors.shape])
evalues, evectors = fn(torch.symeig, (0, 0), True)
self.assertEqual([(0,), (0, 0)], [evalues.shape, evectors.shape])
q, r = fn(torch.qr, (3, 0), True)
self.assertEqual([(3, 0), (0, 0)], [q.shape, r.shape])
q, r = fn(torch.qr, (0, 3), True)
self.assertEqual([(0, 0), (0, 3)], [q.shape, r.shape])
q, r = fn(torch.qr, (3, 0), False)
self.assertEqual([(3, 3), (3, 0)], [q.shape, r.shape])
self.assertRaises(RuntimeError, lambda: torch.lstsq(torch.randn(0, 0), torch.randn(0, 0)))
self.assertRaises(RuntimeError, lambda: torch.lstsq(torch.randn(0,), torch.randn(0, 0)))
@tf32_on_and_off(0.005)
def test_tensordot(self, device):
a = torch.arange(60., device=device).reshape(3, 4, 5)
b = torch.arange(24., device=device).reshape(4, 3, 2)
c = torch.tensordot(a, b, dims=([1, 0], [0, 1])).cpu()
cn = torch.from_numpy(np.tensordot(a.cpu().numpy(), b.cpu().numpy(),
axes=([1, 0], [0, 1])))
self.assertEqual(c, cn)
cout = torch.zeros((5, 2), device=device)
torch.tensordot(a, b, dims=([1, 0], [0, 1]), out=cout).cpu()
self.assertEqual(c, cout)
a = torch.randn(2, 3, 4, 5, device=device)
b = torch.randn(4, 5, 6, 7, device=device)
c = torch.tensordot(a, b, dims=2).cpu()
cn = torch.from_numpy(np.tensordot(a.cpu().numpy(), b.cpu().numpy(),
axes=2))
with self.assertRaisesRegex(RuntimeError, "expects dims >= 0"):
torch.tensordot(a, b, dims=-1)
self.assertEqual(c, cn)
c = torch.tensordot(a, b).cpu()
cn = torch.from_numpy(np.tensordot(a.cpu().numpy(), b.cpu().numpy()))
self.assertEqual(c, cn)
a = torch.tensordot(torch.tensor(0.), torch.tensor(0.), 0)
an = torch.from_numpy(np.tensordot(np.zeros((), dtype=np.float32), np.zeros((), dtype=np.float32), 0))
self.assertEqual(a, an)
instantiate_device_type_tests(TestLinalg, globals())
if __name__ == '__main__':
run_tests()
| true | true |
f7205474fe31fcf9ee4d542a8f985061f402912b | 6,074 | py | Python | c7n/actions/autotag.py | chris-angeli-rft/cloud-custodian | 5ff331b114a591dbaf6d672e30ceefb7ae64a5dd | [
"Apache-2.0"
] | 8 | 2021-05-18T02:22:03.000Z | 2021-09-11T02:49:04.000Z | c7n/actions/autotag.py | chris-angeli-rft/cloud-custodian | 5ff331b114a591dbaf6d672e30ceefb7ae64a5dd | [
"Apache-2.0"
] | 1 | 2021-04-26T04:38:35.000Z | 2021-04-26T04:38:35.000Z | c7n/actions/autotag.py | chris-angeli-rft/cloud-custodian | 5ff331b114a591dbaf6d672e30ceefb7ae64a5dd | [
"Apache-2.0"
] | 1 | 2021-11-10T02:28:47.000Z | 2021-11-10T02:28:47.000Z | # Copyright 2017-2018 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .core import EventAction
from c7n.exceptions import PolicyValidationError
from c7n.manager import resources
from c7n import utils
class AutoTagUser(EventAction):
"""Tag a resource with the user who created/modified it.
.. code-block:: yaml
policies:
- name: ec2-auto-tag-ownercontact
resource: ec2
description: |
Triggered when a new EC2 Instance is launched. Checks to see if
it's missing the OwnerContact tag. If missing it gets created
with the value of the ID of whomever called the RunInstances API
mode:
type: cloudtrail
role: arn:aws:iam::123456789000:role/custodian-auto-tagger
events:
- RunInstances
filters:
- tag:OwnerContact: absent
actions:
- type: auto-tag-user
tag: OwnerContact
There's a number of caveats to usage. Resources which don't
include tagging as part of their api may have some delay before
automation kicks in to create a tag. Real world delay may be several
minutes, with worst case into hours[0]. This creates a race condition
between auto tagging and automation.
In practice this window is on the order of a fraction of a second, as
we fetch the resource and evaluate the presence of the tag before
attempting to tag it.
References
CloudTrail User
https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-event-reference-user-identity.html
""" # NOQA
schema_alias = True
schema = utils.type_schema(
'auto-tag-user',
required=['tag'],
**{'user-type': {
'type': 'array',
'items': {'type': 'string',
'enum': [
'IAMUser',
'AssumedRole',
'FederatedUser'
]}},
'update': {'type': 'boolean'},
'tag': {'type': 'string'},
'principal_id_tag': {'type': 'string'}
}
)
def get_permissions(self):
return self.manager.action_registry.get(
'tag')({}, self.manager).get_permissions()
def validate(self):
if self.manager.data.get('mode', {}).get('type') != 'cloudtrail':
raise PolicyValidationError(
"Auto tag owner requires an event %s" % (self.manager.data,))
if self.manager.action_registry.get('tag') is None:
raise PolicyValidationError(
"Resource does not support tagging %s" % (self.manager.data,))
if 'tag' not in self.data:
raise PolicyValidationError(
"auto-tag action requires 'tag'")
return self
def process(self, resources, event):
if event is None:
return
event = event['detail']
utype = event['userIdentity']['type']
if utype not in self.data.get('user-type', ['AssumedRole', 'IAMUser', 'FederatedUser']):
return
user = None
if utype == "IAMUser":
user = event['userIdentity']['userName']
principal_id_value = event['userIdentity'].get('principalId', '')
elif utype == "AssumedRole" or utype == "FederatedUser":
user = event['userIdentity']['arn']
prefix, user = user.rsplit('/', 1)
principal_id_value = event['userIdentity'].get('principalId', '').split(':')[0]
# instance role
if user.startswith('i-'):
return
# lambda function (old style)
elif user.startswith('awslambda'):
return
if user is None:
return
# if the auto-tag-user policy set update to False (or it's unset) then we
# will skip writing their UserName tag and not overwrite pre-existing values
if not self.data.get('update', False):
untagged_resources = []
# iterating over all the resources the user spun up in this event
for resource in resources:
tag_already_set = False
for tag in resource.get('Tags', ()):
if tag['Key'] == self.data['tag']:
tag_already_set = True
break
if not tag_already_set:
untagged_resources.append(resource)
# if update is set to True, we will overwrite the userName tag even if
# the user already set a value
else:
untagged_resources = resources
tag_action = self.manager.action_registry.get('tag')
new_tags = {
self.data['tag']: user
}
# if principal_id_key is set (and value), we'll set the principalId tag.
principal_id_key = self.data.get('principal_id_tag', None)
if principal_id_key and principal_id_value:
new_tags[principal_id_key] = principal_id_value
for key, value in new_tags.items():
tag_action({'key': key, 'value': value}, self.manager).process(untagged_resources)
return new_tags
@classmethod
def register_resource(cls, registry, resource_class):
if 'auto-tag-user' in resource_class.action_registry:
return
if resource_class.action_registry.get('tag'):
resource_class.action_registry.register('auto-tag-user', AutoTagUser)
resources.subscribe(AutoTagUser.register_resource)
| 38.687898 | 109 | 0.602898 |
from .core import EventAction
from c7n.exceptions import PolicyValidationError
from c7n.manager import resources
from c7n import utils
class AutoTagUser(EventAction):
schema_alias = True
schema = utils.type_schema(
'auto-tag-user',
required=['tag'],
**{'user-type': {
'type': 'array',
'items': {'type': 'string',
'enum': [
'IAMUser',
'AssumedRole',
'FederatedUser'
]}},
'update': {'type': 'boolean'},
'tag': {'type': 'string'},
'principal_id_tag': {'type': 'string'}
}
)
def get_permissions(self):
return self.manager.action_registry.get(
'tag')({}, self.manager).get_permissions()
def validate(self):
if self.manager.data.get('mode', {}).get('type') != 'cloudtrail':
raise PolicyValidationError(
"Auto tag owner requires an event %s" % (self.manager.data,))
if self.manager.action_registry.get('tag') is None:
raise PolicyValidationError(
"Resource does not support tagging %s" % (self.manager.data,))
if 'tag' not in self.data:
raise PolicyValidationError(
"auto-tag action requires 'tag'")
return self
def process(self, resources, event):
if event is None:
return
event = event['detail']
utype = event['userIdentity']['type']
if utype not in self.data.get('user-type', ['AssumedRole', 'IAMUser', 'FederatedUser']):
return
user = None
if utype == "IAMUser":
user = event['userIdentity']['userName']
principal_id_value = event['userIdentity'].get('principalId', '')
elif utype == "AssumedRole" or utype == "FederatedUser":
user = event['userIdentity']['arn']
prefix, user = user.rsplit('/', 1)
principal_id_value = event['userIdentity'].get('principalId', '').split(':')[0]
if user.startswith('i-'):
return
elif user.startswith('awslambda'):
return
if user is None:
return
# will skip writing their UserName tag and not overwrite pre-existing values
if not self.data.get('update', False):
untagged_resources = []
# iterating over all the resources the user spun up in this event
for resource in resources:
tag_already_set = False
for tag in resource.get('Tags', ()):
if tag['Key'] == self.data['tag']:
tag_already_set = True
break
if not tag_already_set:
untagged_resources.append(resource)
# if update is set to True, we will overwrite the userName tag even if
# the user already set a value
else:
untagged_resources = resources
tag_action = self.manager.action_registry.get('tag')
new_tags = {
self.data['tag']: user
}
# if principal_id_key is set (and value), we'll set the principalId tag.
principal_id_key = self.data.get('principal_id_tag', None)
if principal_id_key and principal_id_value:
new_tags[principal_id_key] = principal_id_value
for key, value in new_tags.items():
tag_action({'key': key, 'value': value}, self.manager).process(untagged_resources)
return new_tags
@classmethod
def register_resource(cls, registry, resource_class):
if 'auto-tag-user' in resource_class.action_registry:
return
if resource_class.action_registry.get('tag'):
resource_class.action_registry.register('auto-tag-user', AutoTagUser)
resources.subscribe(AutoTagUser.register_resource)
| true | true |
f720548ea283334132cb0ded4dadf688758eebc3 | 2,786 | py | Python | tests/integration/long/test_policies.py | clohfink/python-driver | 30a0e27cd1b8999267c146f0a93adf962a50790b | [
"Apache-2.0"
] | 1 | 2020-07-11T11:20:34.000Z | 2020-07-11T11:20:34.000Z | tests/integration/long/test_policies.py | clohfink/python-driver | 30a0e27cd1b8999267c146f0a93adf962a50790b | [
"Apache-2.0"
] | null | null | null | tests/integration/long/test_policies.py | clohfink/python-driver | 30a0e27cd1b8999267c146f0a93adf962a50790b | [
"Apache-2.0"
] | 1 | 2021-07-22T07:20:49.000Z | 2021-07-22T07:20:49.000Z | # Copyright DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
import unittest2 as unittest
except ImportError:
import unittest # noqa
from cassandra import ConsistencyLevel, Unavailable
from cassandra.cluster import Cluster, ExecutionProfile, EXEC_PROFILE_DEFAULT
from tests.integration import use_cluster, get_cluster, get_node
def setup_module():
use_cluster('test_cluster', [4])
class RetryPolicyTests(unittest.TestCase):
@classmethod
def tearDownClass(cls):
cluster = get_cluster()
cluster.start() # make sure other nodes are restarted
def test_should_rethrow_on_unvailable_with_default_policy_if_cas(self):
"""
Tests for the default retry policy in combination with lightweight transactions.
@since 3.17
@jira_ticket PYTHON-1007
@expected_result the query is retried with the default CL, not the serial one.
@test_category policy
"""
ep = ExecutionProfile(consistency_level=ConsistencyLevel.ALL,
serial_consistency_level=ConsistencyLevel.SERIAL)
cluster = Cluster(execution_profiles={EXEC_PROFILE_DEFAULT: ep})
session = cluster.connect()
session.execute("CREATE KEYSPACE test_retry_policy_cas WITH replication = {'class':'SimpleStrategy','replication_factor': 3};")
session.execute("CREATE TABLE test_retry_policy_cas.t (id int PRIMARY KEY, data text);")
session.execute('INSERT INTO test_retry_policy_cas.t ("id", "data") VALUES (%(0)s, %(1)s)', {'0': 42, '1': 'testing'})
get_node(2).stop()
get_node(4).stop()
# before fix: cassandra.InvalidRequest: Error from server: code=2200 [Invalid query] message="SERIAL is not
# supported as conditional update commit consistency. ....""
# after fix: cassandra.Unavailable (expected since replicas are down)
with self.assertRaises(Unavailable) as cm:
session.execute("update test_retry_policy_cas.t set data = 'staging' where id = 42 if data ='testing'")
exception = cm.exception
self.assertEqual(exception.consistency, ConsistencyLevel.SERIAL)
self.assertEqual(exception.required_replicas, 2)
self.assertEqual(exception.alive_replicas, 1)
| 39.239437 | 135 | 0.71285 |
try:
import unittest2 as unittest
except ImportError:
import unittest
from cassandra import ConsistencyLevel, Unavailable
from cassandra.cluster import Cluster, ExecutionProfile, EXEC_PROFILE_DEFAULT
from tests.integration import use_cluster, get_cluster, get_node
def setup_module():
use_cluster('test_cluster', [4])
class RetryPolicyTests(unittest.TestCase):
@classmethod
def tearDownClass(cls):
cluster = get_cluster()
cluster.start()
def test_should_rethrow_on_unvailable_with_default_policy_if_cas(self):
ep = ExecutionProfile(consistency_level=ConsistencyLevel.ALL,
serial_consistency_level=ConsistencyLevel.SERIAL)
cluster = Cluster(execution_profiles={EXEC_PROFILE_DEFAULT: ep})
session = cluster.connect()
session.execute("CREATE KEYSPACE test_retry_policy_cas WITH replication = {'class':'SimpleStrategy','replication_factor': 3};")
session.execute("CREATE TABLE test_retry_policy_cas.t (id int PRIMARY KEY, data text);")
session.execute('INSERT INTO test_retry_policy_cas.t ("id", "data") VALUES (%(0)s, %(1)s)', {'0': 42, '1': 'testing'})
get_node(2).stop()
get_node(4).stop()
# supported as conditional update commit consistency. ....""
# after fix: cassandra.Unavailable (expected since replicas are down)
with self.assertRaises(Unavailable) as cm:
session.execute("update test_retry_policy_cas.t set data = 'staging' where id = 42 if data ='testing'")
exception = cm.exception
self.assertEqual(exception.consistency, ConsistencyLevel.SERIAL)
self.assertEqual(exception.required_replicas, 2)
self.assertEqual(exception.alive_replicas, 1)
| true | true |
f72055de22b933a14a11f041fdc68987d8fc5154 | 337 | py | Python | plotarchive/src/unarchive.py | buswinka/plotarchive | 5393b0140e11551a7c3bbb5c1094d1b5e0fce6bd | [
"MIT"
] | null | null | null | plotarchive/src/unarchive.py | buswinka/plotarchive | 5393b0140e11551a7c3bbb5c1094d1b5e0fce6bd | [
"MIT"
] | null | null | null | plotarchive/src/unarchive.py | buswinka/plotarchive | 5393b0140e11551a7c3bbb5c1094d1b5e0fce6bd | [
"MIT"
] | null | null | null | import dill
from . import files
def expand(filename, folder=None):
data = dill.load(open(filename, 'rb'))
file_dict = data['files']
plotter = data['func']
if 'args' in data:
args = data['args']
plotter(**data['args'])
if folder is not None:
files.write_files_from_dict(file_dict, folder)
| 19.823529 | 54 | 0.617211 | import dill
from . import files
def expand(filename, folder=None):
data = dill.load(open(filename, 'rb'))
file_dict = data['files']
plotter = data['func']
if 'args' in data:
args = data['args']
plotter(**data['args'])
if folder is not None:
files.write_files_from_dict(file_dict, folder)
| true | true |
f72056e829f271ea00336ce08152308cdf5bbcfa | 1,409 | py | Python | setup.py | yuhonghong66/onnx-mxnet | 17e8fa4f515e0bb5c2fd01ef733c13bd51f6ff95 | [
"Apache-2.0"
] | 118 | 2017-11-03T05:21:55.000Z | 2020-03-24T14:57:21.000Z | setup.py | yuhonghong66/onnx-mxnet | 17e8fa4f515e0bb5c2fd01ef733c13bd51f6ff95 | [
"Apache-2.0"
] | 43 | 2017-11-20T14:24:30.000Z | 2019-01-10T06:47:18.000Z | setup.py | yuhonghong66/onnx-mxnet | 17e8fa4f515e0bb5c2fd01ef733c13bd51f6ff95 | [
"Apache-2.0"
] | 32 | 2017-11-16T22:33:33.000Z | 2021-03-15T17:09:09.000Z | # Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
# http://www.apache.org/licenses/LICENSE-2.0
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# pylint: disable=invalid-name, exec-used
"""Setup onnx-mxnet package"""
# To build and upload a new version, follow the steps below.
# Notes:
# - this is a "Universal Wheels" package that is pure Python and supports both Python2 and Python3
# - Twine is a secure PyPi upload package
# $ pip install twine
# $ pip install wheel
# $ python setup.py bdist_wheel --universal
# $ twine upload dist/*
from setuptools import setup, find_packages
pkgs = find_packages()
setup(
name='onnx-mxnet',
version='0.4.2',
description='ONNX-MXNet Model converter',
url='https://github.com/onnx/onnx-mxnet',
keywords='ONNX MXNet model converter deep learning',
packages=pkgs,
install_requires=['mxnet>=0.11.0', 'onnx>=1.0.1'],
tests_require=['pytest', 'pylint'],
include_package_data=True,
license='Apache 2.0'
)
| 36.128205 | 98 | 0.725337 |
from setuptools import setup, find_packages
pkgs = find_packages()
setup(
name='onnx-mxnet',
version='0.4.2',
description='ONNX-MXNet Model converter',
url='https://github.com/onnx/onnx-mxnet',
keywords='ONNX MXNet model converter deep learning',
packages=pkgs,
install_requires=['mxnet>=0.11.0', 'onnx>=1.0.1'],
tests_require=['pytest', 'pylint'],
include_package_data=True,
license='Apache 2.0'
)
| true | true |
f7205702ffae189a8112330f7eb605fc4ca4cadc | 3,047 | py | Python | examples/minigrid/eval_maze.py | AIDefender/Tianshou-ReMPER | 297ba383fc1e4e19cd52bd89df7d0d3148bd4e68 | [
"MIT"
] | null | null | null | examples/minigrid/eval_maze.py | AIDefender/Tianshou-ReMPER | 297ba383fc1e4e19cd52bd89df7d0d3148bd4e68 | [
"MIT"
] | null | null | null | examples/minigrid/eval_maze.py | AIDefender/Tianshou-ReMPER | 297ba383fc1e4e19cd52bd89df7d0d3148bd4e68 | [
"MIT"
] | null | null | null | import seaborn as sns
import matplotlib.pyplot as plt
import pickle
import argparse
import numpy as np
import os
sns.set_context('paper', font_scale=1.5)
parser = argparse.ArgumentParser()
parser.add_argument("-n", type=int)
parser.add_argument('--resume-path', type=str, default=None)
parser.add_argument('--title', type=str, default='default')
args = parser.parse_args()
def get_mask(mean_V_loss):
mask = np.where(mean_V_loss > 0.9, 1, 0)
return mask
all_V_loss = []
for seed in os.listdir(args.resume_path):
if seed.startswith("heatmap"):
continue
with open(os.path.join(args.resume_path, str(seed), "Q_tablepickle%d"%args.n), 'rb') as f:
Q_table = pickle.load(f)
print("Loaded Q table ", os.path.join(args.resume_path, "Q_tablepickle%d"%args.n))
V_table = {}
for key, value in zip(Q_table.keys(), Q_table.values()):
V_table[key] = np.max(value)
V_mean = np.average(list(V_table.values()))
V_loss_table = []
V_loss_linear = {}
for i in range(14):
V_loss_linear[i] = []
for i in range(1, 8):
this_loss = []
for j in range(1, 8):
# TODO: compute correct real_V
real_V = 0.99 ** ((7-i) + (7-j))
try:
loss = abs(V_table[(i,j)] - real_V)
except KeyError:
# loss = abs(V_mean - real_V)
loss = 1
this_loss.append(loss)
V_loss_linear[14-i-j].append(loss)
V_loss_table.append(this_loss)
V_loss_table = np.array(V_loss_table)
all_V_loss.append(V_loss_table)
all_V_loss = np.array(all_V_loss)
V_seed_mean = np.average(all_V_loss, axis=(1,2))
mean_V_loss = np.average(all_V_loss[np.argsort(V_seed_mean)[:2]], axis=0)
# ===========plot=============
fig, ax = plt.subplots()
# frame = sns.heatmap(mean_V_loss, cmap="YlGnBu", vmin=0.1, vmax=0.5)
# frame = sns.heatmap(mean_V_loss, cmap = 'RdBu_r', vmin=0.1, center=0.45, vmax=0.6, mask=get_mask(mean_V_loss), ax=ax, annot=False)
annot = [["" for _ in range(7)] for _ in range(7)]
for pos in [(2,2), (6,0), (4,2), (4,4), (6,6), (2, 4)]:
annot[pos[0]][pos[1]] = str(round(mean_V_loss[pos], 2))
frame = sns.heatmap(mean_V_loss, cmap = sns.color_palette("rocket_r", 20), vmin=0.3, vmax=0.6,
mask=get_mask(mean_V_loss), ax=ax, annot=annot, fmt="")
frame.axes.get_xaxis().set_visible(False)
frame.axes.get_yaxis().set_visible(False)
frame.set_facecolor("gray")
triangle = plt.imread('examples/minigrid/fig/triangle.png')
square = plt.imread('examples/minigrid/fig/square.png')
newax = fig.add_axes([0.65, 0.78, 0.1, 0.1])
newax.imshow(square)
newax.set_xticks([])
newax.set_yticks([])
newax2 = fig.add_axes([0.12, 0.78, 0.1, 0.1])
newax2.imshow(triangle)
newax2.set_xticks([])
newax2.set_yticks([])
# =========save fig============
if not os.path.isdir(os.path.join(args.resume_path, "heatmap")):
os.mkdir(os.path.join(args.resume_path, "heatmap"))
fig.suptitle(args.title)
plt.savefig(os.path.join(args.resume_path, "heatmap", "%d.png"%args.n))
| 35.847059 | 132 | 0.640958 | import seaborn as sns
import matplotlib.pyplot as plt
import pickle
import argparse
import numpy as np
import os
sns.set_context('paper', font_scale=1.5)
parser = argparse.ArgumentParser()
parser.add_argument("-n", type=int)
parser.add_argument('--resume-path', type=str, default=None)
parser.add_argument('--title', type=str, default='default')
args = parser.parse_args()
def get_mask(mean_V_loss):
mask = np.where(mean_V_loss > 0.9, 1, 0)
return mask
all_V_loss = []
for seed in os.listdir(args.resume_path):
if seed.startswith("heatmap"):
continue
with open(os.path.join(args.resume_path, str(seed), "Q_tablepickle%d"%args.n), 'rb') as f:
Q_table = pickle.load(f)
print("Loaded Q table ", os.path.join(args.resume_path, "Q_tablepickle%d"%args.n))
V_table = {}
for key, value in zip(Q_table.keys(), Q_table.values()):
V_table[key] = np.max(value)
V_mean = np.average(list(V_table.values()))
V_loss_table = []
V_loss_linear = {}
for i in range(14):
V_loss_linear[i] = []
for i in range(1, 8):
this_loss = []
for j in range(1, 8):
real_V = 0.99 ** ((7-i) + (7-j))
try:
loss = abs(V_table[(i,j)] - real_V)
except KeyError:
loss = 1
this_loss.append(loss)
V_loss_linear[14-i-j].append(loss)
V_loss_table.append(this_loss)
V_loss_table = np.array(V_loss_table)
all_V_loss.append(V_loss_table)
all_V_loss = np.array(all_V_loss)
V_seed_mean = np.average(all_V_loss, axis=(1,2))
mean_V_loss = np.average(all_V_loss[np.argsort(V_seed_mean)[:2]], axis=0)
fig, ax = plt.subplots()
annot = [["" for _ in range(7)] for _ in range(7)]
for pos in [(2,2), (6,0), (4,2), (4,4), (6,6), (2, 4)]:
annot[pos[0]][pos[1]] = str(round(mean_V_loss[pos], 2))
frame = sns.heatmap(mean_V_loss, cmap = sns.color_palette("rocket_r", 20), vmin=0.3, vmax=0.6,
mask=get_mask(mean_V_loss), ax=ax, annot=annot, fmt="")
frame.axes.get_xaxis().set_visible(False)
frame.axes.get_yaxis().set_visible(False)
frame.set_facecolor("gray")
triangle = plt.imread('examples/minigrid/fig/triangle.png')
square = plt.imread('examples/minigrid/fig/square.png')
newax = fig.add_axes([0.65, 0.78, 0.1, 0.1])
newax.imshow(square)
newax.set_xticks([])
newax.set_yticks([])
newax2 = fig.add_axes([0.12, 0.78, 0.1, 0.1])
newax2.imshow(triangle)
newax2.set_xticks([])
newax2.set_yticks([])
if not os.path.isdir(os.path.join(args.resume_path, "heatmap")):
os.mkdir(os.path.join(args.resume_path, "heatmap"))
fig.suptitle(args.title)
plt.savefig(os.path.join(args.resume_path, "heatmap", "%d.png"%args.n))
| true | true |
f7205781c0c4df8d2891d89d9d280b3ad59b22ec | 699 | py | Python | aiovault/v1/auth/backends/__init__.py | johnnoone/aiovault | 03e1bfb6f0404dcf97ce87a98c539027c4e78a37 | [
"BSD-3-Clause"
] | 1 | 2022-01-31T22:37:57.000Z | 2022-01-31T22:37:57.000Z | aiovault/v1/auth/backends/__init__.py | johnnoone/aiovault | 03e1bfb6f0404dcf97ce87a98c539027c4e78a37 | [
"BSD-3-Clause"
] | null | null | null | aiovault/v1/auth/backends/__init__.py | johnnoone/aiovault | 03e1bfb6f0404dcf97ce87a98c539027c4e78a37 | [
"BSD-3-Clause"
] | null | null | null | """
auth.backends
~~~~~~~~~~~~~
"""
from .app_id import AppIDBackend
from .cert import CertBackend
from .github import GitHubBackend
from .ldap import LDAPBackend
from .userpass import UserPassBackend
from stevedore import DriverManager
__all__ = ['AppIDBackend', 'CertBackend', 'GitHubBackend',
'LDAPBackend', 'UserPassBackend']
def load_backend(type, backend):
"""Load secret backend.
Parameters:
type (str): The backend type
backend (str): The backend init variables
"""
mgr = DriverManager(
namespace='aiovault.auth.backend',
name=type,
invoke_on_load=True,
invoke_kwds=backend
)
return mgr.driver
| 21.84375 | 58 | 0.662375 |
from .app_id import AppIDBackend
from .cert import CertBackend
from .github import GitHubBackend
from .ldap import LDAPBackend
from .userpass import UserPassBackend
from stevedore import DriverManager
__all__ = ['AppIDBackend', 'CertBackend', 'GitHubBackend',
'LDAPBackend', 'UserPassBackend']
def load_backend(type, backend):
mgr = DriverManager(
namespace='aiovault.auth.backend',
name=type,
invoke_on_load=True,
invoke_kwds=backend
)
return mgr.driver
| true | true |
f7205a8e46810cda3abbd608dafa0f62fae9673c | 8,873 | py | Python | doc/conf.py | duwhop/relate | 568bf6868fbc980e78e74fa29f84d10be2f8c94d | [
"Unlicense"
] | null | null | null | doc/conf.py | duwhop/relate | 568bf6868fbc980e78e74fa29f84d10be2f8c94d | [
"Unlicense"
] | null | null | null | doc/conf.py | duwhop/relate | 568bf6868fbc980e78e74fa29f84d10be2f8c94d | [
"Unlicense"
] | null | null | null | # -*- coding: utf-8 -*-
#
# relate documentation build configuration file, created by
# sphinx-quickstart on Thu Jun 26 18:41:17 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
os.environ["DJANGO_SETTINGS_MODULE"] = "relate.settings"
import django
django.setup()
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'RELATE'
copyright = u'2014, Andreas Kloeckner'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '2015.1'
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
try:
import sphinx_bootstrap_theme
except:
from warnings import warn
warn("I would like to use the sphinx bootstrap theme, but can't find it.\n"
"'pip install sphinx_bootstrap_theme' to fix.")
else:
# Activate the theme.
html_theme = 'bootstrap'
html_theme_path = sphinx_bootstrap_theme.get_html_theme_path()
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
"navbar_fixed_top": "true",
"navbar_site_name": "Contents",
'bootstrap_version': '3',
'source_link_position': 'footer',
}
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'relatedoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'relate.tex', u'RELATE Documentation',
u'Andreas Kloeckner', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'relate', u'RELATE Documentation',
[u'Andreas Kloeckner'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'relate', u'RELATE Documentation',
u'Andreas Kloeckner', 'relate', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| 31.242958 | 79 | 0.712949 |
import sys
import os
sys.path.insert(0, os.path.abspath('..'))
os.environ["DJANGO_SETTINGS_MODULE"] = "relate.settings"
import django
django.setup()
extensions = [
'sphinx.ext.autodoc',
]
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
project = u'RELATE'
copyright = u'2014, Andreas Kloeckner'
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '2015.1'
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
try:
import sphinx_bootstrap_theme
except:
from warnings import warn
warn("I would like to use the sphinx bootstrap theme, but can't find it.\n"
"'pip install sphinx_bootstrap_theme' to fix.")
else:
html_theme = 'bootstrap'
html_theme_path = sphinx_bootstrap_theme.get_html_theme_path()
html_theme_options = {
"navbar_fixed_top": "true",
"navbar_site_name": "Contents",
'bootstrap_version': '3',
'source_link_position': 'footer',
}
html_static_path = ['_static']
htmlhelp_basename = 'relatedoc'
latex_elements = {
}
latex_documents = [
('index', 'relate.tex', u'RELATE Documentation',
u'Andreas Kloeckner', 'manual'),
]
man_pages = [
('index', 'relate', u'RELATE Documentation',
[u'Andreas Kloeckner'], 1)
]
texinfo_documents = [
('index', 'relate', u'RELATE Documentation',
u'Andreas Kloeckner', 'relate', 'One line description of project.',
'Miscellaneous'),
]
#texinfo_no_detailmenu = False
| true | true |
f7205e2735b7f7103c0de347dfa88005e04f48db | 7,513 | py | Python | src/pretix/base/models/base.py | tcatm/pretix | a76f74b161e140f4445568b97cb26fc57247e0d2 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | src/pretix/base/models/base.py | tcatm/pretix | a76f74b161e140f4445568b97cb26fc57247e0d2 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | src/pretix/base/models/base.py | tcatm/pretix | a76f74b161e140f4445568b97cb26fc57247e0d2 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | import json
import uuid
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.db.models.constants import LOOKUP_SEP
from django.db.models.signals import post_delete
from django.dispatch import receiver
from django.urls import reverse
from django.utils.crypto import get_random_string
from django.utils.functional import cached_property
from pretix.helpers.json import CustomJSONEncoder
def cachedfile_name(instance, filename: str) -> str:
secret = get_random_string(length=12)
return 'cachedfiles/%s.%s.%s' % (instance.id, secret, filename.split('.')[-1])
class CachedFile(models.Model):
"""
An uploaded file, with an optional expiry date.
"""
id = models.UUIDField(primary_key=True, default=uuid.uuid4)
expires = models.DateTimeField(null=True, blank=True)
date = models.DateTimeField(null=True, blank=True)
filename = models.CharField(max_length=255)
type = models.CharField(max_length=255)
file = models.FileField(null=True, blank=True, upload_to=cachedfile_name, max_length=255)
@receiver(post_delete, sender=CachedFile)
def cached_file_delete(sender, instance, **kwargs):
if instance.file:
# Pass false so FileField doesn't save the model.
instance.file.delete(False)
class LoggingMixin:
def log_action(self, action, data=None, user=None, api_token=None, auth=None, save=True):
"""
Create a LogEntry object that is related to this object.
See the LogEntry documentation for details.
:param action: The namespaced action code
:param data: Any JSON-serializable object
:param user: The user performing the action (optional)
"""
from pretix.api.models import OAuthAccessToken, OAuthApplication
from pretix.api.webhooks import get_all_webhook_events, notify_webhooks
from ..notifications import get_all_notification_types
from ..services.notifications import notify
from .devices import Device
from .event import Event
from .log import LogEntry
from .organizer import TeamAPIToken
event = None
if isinstance(self, Event):
event = self
elif hasattr(self, 'event'):
event = self.event
if user and not user.is_authenticated:
user = None
kwargs = {}
if isinstance(auth, OAuthAccessToken):
kwargs['oauth_application'] = auth.application
elif isinstance(auth, OAuthApplication):
kwargs['oauth_application'] = auth
elif isinstance(auth, TeamAPIToken):
kwargs['api_token'] = auth
elif isinstance(auth, Device):
kwargs['device'] = auth
elif isinstance(api_token, TeamAPIToken):
kwargs['api_token'] = api_token
logentry = LogEntry(content_object=self, user=user, action_type=action, event=event, **kwargs)
if isinstance(data, dict):
sensitivekeys = ['password', 'secret', 'api_key']
for sensitivekey in sensitivekeys:
for k, v in data.items():
if (sensitivekey in k) and v:
data[k] = "********"
logentry.data = json.dumps(data, cls=CustomJSONEncoder, sort_keys=True)
elif data:
raise TypeError("You should only supply dictionaries as log data.")
if save:
logentry.save()
no_types = get_all_notification_types()
wh_types = get_all_webhook_events()
no_type = None
wh_type = None
typepath = logentry.action_type
while (not no_type or not wh_types) and '.' in typepath:
wh_type = wh_type or wh_types.get(typepath + ('.*' if typepath != logentry.action_type else ''))
no_type = no_type or no_types.get(typepath + ('.*' if typepath != logentry.action_type else ''))
typepath = typepath.rsplit('.', 1)[0]
if no_type:
notify.apply_async(args=(logentry.pk,))
if wh_type:
notify_webhooks.apply_async(args=(logentry.pk,))
return logentry
class LoggedModel(models.Model, LoggingMixin):
class Meta:
abstract = True
@cached_property
def logs_content_type(self):
return ContentType.objects.get_for_model(type(self))
@cached_property
def all_logentries_link(self):
from pretix.base.models import Event
if isinstance(self, Event):
event = self
elif hasattr(self, 'event'):
event = self.event
else:
return None
return reverse(
'control:event.log',
kwargs={
'event': event.slug,
'organizer': event.organizer.slug,
}
) + '?content_type={}&object={}'.format(
self.logs_content_type.pk,
self.pk
)
def top_logentries(self):
qs = self.all_logentries()
if self.all_logentries_link:
qs = qs[:25]
return qs
def top_logentries_has_more(self):
return self.all_logentries().count() > 25
def all_logentries(self):
"""
Returns all log entries that are attached to this object.
:return: A QuerySet of LogEntry objects
"""
from .log import LogEntry
return LogEntry.objects.filter(
content_type=self.logs_content_type, object_id=self.pk
).select_related('user', 'event', 'oauth_application', 'api_token', 'device')
class LockModel:
def refresh_for_update(self, fields=None, using=None, **kwargs):
"""
Like refresh_from_db(), but with select_for_update().
See also https://code.djangoproject.com/ticket/28344
"""
if fields is not None:
if not fields:
return
if any(LOOKUP_SEP in f for f in fields):
raise ValueError(
'Found "%s" in fields argument. Relations and transforms '
'are not allowed in fields.' % LOOKUP_SEP)
hints = {'instance': self}
db_instance_qs = self.__class__._base_manager.db_manager(using, hints=hints).filter(pk=self.pk).select_for_update(**kwargs)
# Use provided fields, if not set then reload all non-deferred fields.
deferred_fields = self.get_deferred_fields()
if fields is not None:
fields = list(fields)
db_instance_qs = db_instance_qs.only(*fields)
elif deferred_fields:
fields = [f.attname for f in self._meta.concrete_fields
if f.attname not in deferred_fields]
db_instance_qs = db_instance_qs.only(*fields)
db_instance = db_instance_qs.get()
non_loaded_fields = db_instance.get_deferred_fields()
for field in self._meta.concrete_fields:
if field.attname in non_loaded_fields:
# This field wasn't refreshed - skip ahead.
continue
setattr(self, field.attname, getattr(db_instance, field.attname))
# Clear cached foreign keys.
if field.is_relation and field.is_cached(self):
field.delete_cached_value(self)
# Clear cached relations.
for field in self._meta.related_objects:
if field.is_cached(self):
field.delete_cached_value(self)
self._state.db = db_instance._state.db
| 35.77619 | 131 | 0.626381 | import json
import uuid
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.db.models.constants import LOOKUP_SEP
from django.db.models.signals import post_delete
from django.dispatch import receiver
from django.urls import reverse
from django.utils.crypto import get_random_string
from django.utils.functional import cached_property
from pretix.helpers.json import CustomJSONEncoder
def cachedfile_name(instance, filename: str) -> str:
secret = get_random_string(length=12)
return 'cachedfiles/%s.%s.%s' % (instance.id, secret, filename.split('.')[-1])
class CachedFile(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4)
expires = models.DateTimeField(null=True, blank=True)
date = models.DateTimeField(null=True, blank=True)
filename = models.CharField(max_length=255)
type = models.CharField(max_length=255)
file = models.FileField(null=True, blank=True, upload_to=cachedfile_name, max_length=255)
@receiver(post_delete, sender=CachedFile)
def cached_file_delete(sender, instance, **kwargs):
if instance.file:
instance.file.delete(False)
class LoggingMixin:
def log_action(self, action, data=None, user=None, api_token=None, auth=None, save=True):
from pretix.api.models import OAuthAccessToken, OAuthApplication
from pretix.api.webhooks import get_all_webhook_events, notify_webhooks
from ..notifications import get_all_notification_types
from ..services.notifications import notify
from .devices import Device
from .event import Event
from .log import LogEntry
from .organizer import TeamAPIToken
event = None
if isinstance(self, Event):
event = self
elif hasattr(self, 'event'):
event = self.event
if user and not user.is_authenticated:
user = None
kwargs = {}
if isinstance(auth, OAuthAccessToken):
kwargs['oauth_application'] = auth.application
elif isinstance(auth, OAuthApplication):
kwargs['oauth_application'] = auth
elif isinstance(auth, TeamAPIToken):
kwargs['api_token'] = auth
elif isinstance(auth, Device):
kwargs['device'] = auth
elif isinstance(api_token, TeamAPIToken):
kwargs['api_token'] = api_token
logentry = LogEntry(content_object=self, user=user, action_type=action, event=event, **kwargs)
if isinstance(data, dict):
sensitivekeys = ['password', 'secret', 'api_key']
for sensitivekey in sensitivekeys:
for k, v in data.items():
if (sensitivekey in k) and v:
data[k] = "********"
logentry.data = json.dumps(data, cls=CustomJSONEncoder, sort_keys=True)
elif data:
raise TypeError("You should only supply dictionaries as log data.")
if save:
logentry.save()
no_types = get_all_notification_types()
wh_types = get_all_webhook_events()
no_type = None
wh_type = None
typepath = logentry.action_type
while (not no_type or not wh_types) and '.' in typepath:
wh_type = wh_type or wh_types.get(typepath + ('.*' if typepath != logentry.action_type else ''))
no_type = no_type or no_types.get(typepath + ('.*' if typepath != logentry.action_type else ''))
typepath = typepath.rsplit('.', 1)[0]
if no_type:
notify.apply_async(args=(logentry.pk,))
if wh_type:
notify_webhooks.apply_async(args=(logentry.pk,))
return logentry
class LoggedModel(models.Model, LoggingMixin):
class Meta:
abstract = True
@cached_property
def logs_content_type(self):
return ContentType.objects.get_for_model(type(self))
@cached_property
def all_logentries_link(self):
from pretix.base.models import Event
if isinstance(self, Event):
event = self
elif hasattr(self, 'event'):
event = self.event
else:
return None
return reverse(
'control:event.log',
kwargs={
'event': event.slug,
'organizer': event.organizer.slug,
}
) + '?content_type={}&object={}'.format(
self.logs_content_type.pk,
self.pk
)
def top_logentries(self):
qs = self.all_logentries()
if self.all_logentries_link:
qs = qs[:25]
return qs
def top_logentries_has_more(self):
return self.all_logentries().count() > 25
def all_logentries(self):
from .log import LogEntry
return LogEntry.objects.filter(
content_type=self.logs_content_type, object_id=self.pk
).select_related('user', 'event', 'oauth_application', 'api_token', 'device')
class LockModel:
def refresh_for_update(self, fields=None, using=None, **kwargs):
if fields is not None:
if not fields:
return
if any(LOOKUP_SEP in f for f in fields):
raise ValueError(
'Found "%s" in fields argument. Relations and transforms '
'are not allowed in fields.' % LOOKUP_SEP)
hints = {'instance': self}
db_instance_qs = self.__class__._base_manager.db_manager(using, hints=hints).filter(pk=self.pk).select_for_update(**kwargs)
# Use provided fields, if not set then reload all non-deferred fields.
deferred_fields = self.get_deferred_fields()
if fields is not None:
fields = list(fields)
db_instance_qs = db_instance_qs.only(*fields)
elif deferred_fields:
fields = [f.attname for f in self._meta.concrete_fields
if f.attname not in deferred_fields]
db_instance_qs = db_instance_qs.only(*fields)
db_instance = db_instance_qs.get()
non_loaded_fields = db_instance.get_deferred_fields()
for field in self._meta.concrete_fields:
if field.attname in non_loaded_fields:
# This field wasn't refreshed - skip ahead.
continue
setattr(self, field.attname, getattr(db_instance, field.attname))
if field.is_relation and field.is_cached(self):
field.delete_cached_value(self)
for field in self._meta.related_objects:
if field.is_cached(self):
field.delete_cached_value(self)
self._state.db = db_instance._state.db
| true | true |
f7205e512691d1a027eaa2ec064dc6f8fc60327d | 690 | py | Python | anova.py | Roninkoi/Scicodes | 97eb4dc017ad4cd494b545aecaa9fdd7c501a9b7 | [
"MIT"
] | null | null | null | anova.py | Roninkoi/Scicodes | 97eb4dc017ad4cd494b545aecaa9fdd7c501a9b7 | [
"MIT"
] | null | null | null | anova.py | Roninkoi/Scicodes | 97eb4dc017ad4cd494b545aecaa9fdd7c501a9b7 | [
"MIT"
] | null | null | null | import numpy as np
from scipy.stats import f
# Does analysis of variance for a number of sets x.
# Each set in x is an array containing mean, variance
# and number [mean, var, n].
def anova(x):
mean = np.mean(x[:, 0]) # overall mean
n = np.sum(x[:, 2]) # total N
r = len(x) # number of sets
ssb = 0.
for i in range(r): # sum of squares between sets
ssb += x[i, 2] * (x[i, 0] - mean)**2
ssw = 0.
for i in range(r): # sum of squares within sets
ssw += (x[i, 2] - 1) * x[i, 1]
fs = (ssb / (r - 1)) / (ssw / (n - r))
dfn, dfd = r - 1, n - r # degrees of freedom
p = f.cdf(fs, dfn, dfd) # P-value from F-distribution
return fs, p
| 27.6 | 57 | 0.550725 | import numpy as np
from scipy.stats import f
def anova(x):
mean = np.mean(x[:, 0])
n = np.sum(x[:, 2])
r = len(x)
ssb = 0.
for i in range(r):
ssb += x[i, 2] * (x[i, 0] - mean)**2
ssw = 0.
for i in range(r):
ssw += (x[i, 2] - 1) * x[i, 1]
fs = (ssb / (r - 1)) / (ssw / (n - r))
dfn, dfd = r - 1, n - r
p = f.cdf(fs, dfn, dfd)
return fs, p
| true | true |
f7205fc62dfb7c7257990f72c13066d2d9797429 | 6,440 | py | Python | ir/text.py | goerz/incremental-reading | 443cc85d1dac6a8200f63b24bd443fb052d222dc | [
"ISC"
] | 1 | 2019-07-22T03:06:14.000Z | 2019-07-22T03:06:14.000Z | ir/text.py | goerz/incremental-reading | 443cc85d1dac6a8200f63b24bd443fb052d222dc | [
"ISC"
] | null | null | null | ir/text.py | goerz/incremental-reading | 443cc85d1dac6a8200f63b24bd443fb052d222dc | [
"ISC"
] | null | null | null | # Copyright 2013 Tiago Barroso
# Copyright 2013 Frank Kmiec
# Copyright 2013-2016 Aleksej
# Copyright 2017 Christian Weiß
# Copyright 2018 Timothée Chauvin
# Copyright 2017-2018 Joseph Lorimer <luoliyan@posteo.net>
#
# Permission to use, copy, modify, and distribute this software for any purpose
# with or without fee is hereby granted, provided that the above copyright
# notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
# OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
# PERFORMANCE OF THIS SOFTWARE.
from collections import defaultdict
from anki.notes import Note
from aqt import mw
from aqt.addcards import AddCards
from aqt.editcurrent import EditCurrent
from aqt.utils import getText, showInfo, showWarning, tooltip
from .util import fixImages, getField, setField
SCHEDULE_EXTRACT = 0
class TextManager:
def __init__(self):
self.history = defaultdict(list)
def highlight(self, bgColor=None, textColor=None):
if not bgColor:
bgColor = self.settings['highlightBgColor']
if not textColor:
textColor = self.settings['highlightTextColor']
script = "highlight('%s', '%s')" % (bgColor, textColor)
mw.web.eval(script)
self.save()
def format(self, style):
mw.web.eval('format("%s")' % style)
self.save()
def toggleOverlay(self):
mw.web.eval('toggleOverlay()')
self.save()
def extract(self, settings=None):
if not settings:
settings = self.settings
if not mw.web.selectedText() and not settings['editExtract']:
showInfo('Please select some text to extract.')
return
if settings['plainText']:
mw.web.evalWithCallback(
'getPlainText()',
lambda text: self.create(text, settings))
else:
mw.web.evalWithCallback(
'getHtmlText()',
lambda text: self.create(text, settings))
def create(self, text, settings):
currentCard = mw.reviewer.card
currentNote = currentCard.note()
model = mw.col.models.byName(settings['modelName'])
newNote = Note(mw.col, model)
newNote.tags = currentNote.tags
setField(newNote, settings['textField'], fixImages(text))
if settings['extractDeck']:
deck = mw.col.decks.byName(settings['extractDeck'])
if not deck:
showWarning('Destination deck no longer exists. '
'Please update your settings.')
return
did = deck['id']
else:
did = currentCard.did
if settings['isQuickKey']:
newNote.tags += settings['tags']
if settings['sourceField']:
setField(newNote,
settings['sourceField'],
getField(currentNote, self.settings['sourceField']))
if settings['editExtract']:
highlight = self._editExtract(newNote, did, settings)
else:
highlight = True
newNote.model()['did'] = did
mw.col.addNote(newNote)
else:
if settings['copyTitle']:
title = getField(currentNote, settings['titleField'])
else:
title = ''
setField(newNote,
settings['sourceField'],
getField(currentNote, settings['sourceField']))
if settings['prioEnabled']:
setField(newNote,
settings['priorityField'],
getField(currentNote, settings['priorityField']))
if settings['editExtract']:
setField(newNote, settings['titleField'], title)
highlight = self._editExtract(newNote, did, settings)
else:
highlight = self._getTitle(newNote, did, title, settings)
if settings['scheduleExtract'] and not settings['prioEnabled']:
cards = newNote.cards()
if cards:
mw.readingManager.scheduler.answer(
cards[0], SCHEDULE_EXTRACT)
if highlight:
self.highlight(settings['extractBgColor'],
settings['extractTextColor'])
if settings['editSource']:
EditCurrent(mw)
def _editExtract(self, note, did, settings):
def onAdd():
addCards.rejected.disconnect(self.undo)
addCards.reject()
addCards = AddCards(mw)
addCards.rejected.connect(self.undo)
addCards.addButton.clicked.connect(onAdd)
addCards.editor.setNote(note, focusTo=0)
deckName = mw.col.decks.get(did)['name']
addCards.deckChooser.setDeckName(deckName)
addCards.modelChooser.models.setText(settings['modelName'])
return True
def _getTitle(self, note, did, title, settings):
title, accepted = getText(
'Enter title', title='Extract Text', default=title)
if accepted:
setField(note, settings['titleField'], title)
note.model()['did'] = did
mw.col.addNote(note)
return accepted
def remove(self):
mw.web.eval('removeText()')
self.save()
def undo(self):
note = mw.reviewer.card.note()
if note.id not in self.history or not self.history[note.id]:
showInfo('No undo history for this note.')
return
note['Text'] = self.history[note.id].pop()
note.flush()
mw.reset()
tooltip('Undone')
def save(self):
def callback(text):
if text:
note = mw.reviewer.card.note()
self.history[note.id].append(note['Text'])
note['Text'] = text
note.flush()
mw.web.evalWithCallback(
'document.getElementsByClassName("ir-text")[0].innerHTML;',
callback)
| 34.074074 | 79 | 0.590373 |
from collections import defaultdict
from anki.notes import Note
from aqt import mw
from aqt.addcards import AddCards
from aqt.editcurrent import EditCurrent
from aqt.utils import getText, showInfo, showWarning, tooltip
from .util import fixImages, getField, setField
SCHEDULE_EXTRACT = 0
class TextManager:
def __init__(self):
self.history = defaultdict(list)
def highlight(self, bgColor=None, textColor=None):
if not bgColor:
bgColor = self.settings['highlightBgColor']
if not textColor:
textColor = self.settings['highlightTextColor']
script = "highlight('%s', '%s')" % (bgColor, textColor)
mw.web.eval(script)
self.save()
def format(self, style):
mw.web.eval('format("%s")' % style)
self.save()
def toggleOverlay(self):
mw.web.eval('toggleOverlay()')
self.save()
def extract(self, settings=None):
if not settings:
settings = self.settings
if not mw.web.selectedText() and not settings['editExtract']:
showInfo('Please select some text to extract.')
return
if settings['plainText']:
mw.web.evalWithCallback(
'getPlainText()',
lambda text: self.create(text, settings))
else:
mw.web.evalWithCallback(
'getHtmlText()',
lambda text: self.create(text, settings))
def create(self, text, settings):
currentCard = mw.reviewer.card
currentNote = currentCard.note()
model = mw.col.models.byName(settings['modelName'])
newNote = Note(mw.col, model)
newNote.tags = currentNote.tags
setField(newNote, settings['textField'], fixImages(text))
if settings['extractDeck']:
deck = mw.col.decks.byName(settings['extractDeck'])
if not deck:
showWarning('Destination deck no longer exists. '
'Please update your settings.')
return
did = deck['id']
else:
did = currentCard.did
if settings['isQuickKey']:
newNote.tags += settings['tags']
if settings['sourceField']:
setField(newNote,
settings['sourceField'],
getField(currentNote, self.settings['sourceField']))
if settings['editExtract']:
highlight = self._editExtract(newNote, did, settings)
else:
highlight = True
newNote.model()['did'] = did
mw.col.addNote(newNote)
else:
if settings['copyTitle']:
title = getField(currentNote, settings['titleField'])
else:
title = ''
setField(newNote,
settings['sourceField'],
getField(currentNote, settings['sourceField']))
if settings['prioEnabled']:
setField(newNote,
settings['priorityField'],
getField(currentNote, settings['priorityField']))
if settings['editExtract']:
setField(newNote, settings['titleField'], title)
highlight = self._editExtract(newNote, did, settings)
else:
highlight = self._getTitle(newNote, did, title, settings)
if settings['scheduleExtract'] and not settings['prioEnabled']:
cards = newNote.cards()
if cards:
mw.readingManager.scheduler.answer(
cards[0], SCHEDULE_EXTRACT)
if highlight:
self.highlight(settings['extractBgColor'],
settings['extractTextColor'])
if settings['editSource']:
EditCurrent(mw)
def _editExtract(self, note, did, settings):
def onAdd():
addCards.rejected.disconnect(self.undo)
addCards.reject()
addCards = AddCards(mw)
addCards.rejected.connect(self.undo)
addCards.addButton.clicked.connect(onAdd)
addCards.editor.setNote(note, focusTo=0)
deckName = mw.col.decks.get(did)['name']
addCards.deckChooser.setDeckName(deckName)
addCards.modelChooser.models.setText(settings['modelName'])
return True
def _getTitle(self, note, did, title, settings):
title, accepted = getText(
'Enter title', title='Extract Text', default=title)
if accepted:
setField(note, settings['titleField'], title)
note.model()['did'] = did
mw.col.addNote(note)
return accepted
def remove(self):
mw.web.eval('removeText()')
self.save()
def undo(self):
note = mw.reviewer.card.note()
if note.id not in self.history or not self.history[note.id]:
showInfo('No undo history for this note.')
return
note['Text'] = self.history[note.id].pop()
note.flush()
mw.reset()
tooltip('Undone')
def save(self):
def callback(text):
if text:
note = mw.reviewer.card.note()
self.history[note.id].append(note['Text'])
note['Text'] = text
note.flush()
mw.web.evalWithCallback(
'document.getElementsByClassName("ir-text")[0].innerHTML;',
callback)
| true | true |
f720608eb7164c7da567f088dbf879d1825a9a14 | 18 | py | Python | drivers/__init__.py | lucasmazz/ackermann-line-follower-robot | fe2914abdfe8bfb0867955b2e8a5fe787d30e0a9 | [
"BSD-2-Clause"
] | 1 | 2022-02-03T22:10:00.000Z | 2022-02-03T22:10:00.000Z | drivers/__init__.py | lucasmazz/ackermann-line-follower-robot | fe2914abdfe8bfb0867955b2e8a5fe787d30e0a9 | [
"BSD-2-Clause"
] | null | null | null | drivers/__init__.py | lucasmazz/ackermann-line-follower-robot | fe2914abdfe8bfb0867955b2e8a5fe787d30e0a9 | [
"BSD-2-Clause"
] | 1 | 2022-02-17T13:10:56.000Z | 2022-02-17T13:10:56.000Z | from .car import * | 18 | 18 | 0.722222 | from .car import * | true | true |
f720616eb7e8ff6c025fb8cda31659df286aedc8 | 1,043 | py | Python | vasp/vaspForcesRunningAverage.py | roryvandervalk/computationalChemistryTools | 9e718433f82e010d127f576bd2bf48af3953e8b1 | [
"MIT"
] | null | null | null | vasp/vaspForcesRunningAverage.py | roryvandervalk/computationalChemistryTools | 9e718433f82e010d127f576bd2bf48af3953e8b1 | [
"MIT"
] | null | null | null | vasp/vaspForcesRunningAverage.py | roryvandervalk/computationalChemistryTools | 9e718433f82e010d127f576bd2bf48af3953e8b1 | [
"MIT"
] | null | null | null | #! /bin/python3
if __name__ == "__main__":
# Collect data
f = open('OUTCAR', 'r')
forces = []
for l in f:
if "FORCES:" in l:
forces.append([float(x) for x in l.split()[-2:]])
f.close()
# Early exit check to avoid errors from empty arrays
if(len(forces) == 0):
print("No 'FORCES:' entries found")
else:
print(forces[-1])
window = 5
ave = []
# Average for first set of numbers
# Store sums initially and loop to
# divide into an average at the end
ave.append([
sum([x[0] for x in forces[:window]]),
sum([x[1] for x in forces[:window]])
])
avMax = ave[0][0]
avRMS = ave[0][1]
for i in range(window, len(forces)):
avMax = avMax - forces[i - window][0] + forces[i][0]
avRMS = avRMS - forces[i - window][1] + forces[i][1]
ave.append([avMax, avRMS])
# Print results
print("Running Average (window size "+str(window)+")")
print("Max, RMS")
for x,y in ave: # zip(*ave):
print("{0:>8.4f}{1:>8.4f}".format(x/window,y/window))
| 28.972222 | 59 | 0.567593 |
if __name__ == "__main__":
f = open('OUTCAR', 'r')
forces = []
for l in f:
if "FORCES:" in l:
forces.append([float(x) for x in l.split()[-2:]])
f.close()
if(len(forces) == 0):
print("No 'FORCES:' entries found")
else:
print(forces[-1])
window = 5
ave = []
ave.append([
sum([x[0] for x in forces[:window]]),
sum([x[1] for x in forces[:window]])
])
avMax = ave[0][0]
avRMS = ave[0][1]
for i in range(window, len(forces)):
avMax = avMax - forces[i - window][0] + forces[i][0]
avRMS = avRMS - forces[i - window][1] + forces[i][1]
ave.append([avMax, avRMS])
print("Running Average (window size "+str(window)+")")
print("Max, RMS")
for x,y in ave:
print("{0:>8.4f}{1:>8.4f}".format(x/window,y/window))
| true | true |
f72062661e10fbf546f7901cb621778d0a733be7 | 178 | py | Python | bg_helper/tools/__init__.py | kenjyco/bg-helper | a0760009864f0e677d29cd522e6c95350c75c2c7 | [
"MIT"
] | null | null | null | bg_helper/tools/__init__.py | kenjyco/bg-helper | a0760009864f0e677d29cd522e6c95350c75c2c7 | [
"MIT"
] | null | null | null | bg_helper/tools/__init__.py | kenjyco/bg-helper | a0760009864f0e677d29cd522e6c95350c75c2c7 | [
"MIT"
] | null | null | null | from bg_helper.tools._docker import *
from bg_helper.tools._git import *
from bg_helper.tools._grep import *
from bg_helper.tools._ps import *
from bg_helper.tools._ssh import *
| 29.666667 | 37 | 0.803371 | from bg_helper.tools._docker import *
from bg_helper.tools._git import *
from bg_helper.tools._grep import *
from bg_helper.tools._ps import *
from bg_helper.tools._ssh import *
| true | true |
f72062c7ad5e58574682ee8059b838a63b9a0352 | 5,296 | py | Python | flask/app.py | vincentlaucsb/us_commutes_ii | e3f32d7baddbf7fec483b95bd2835d58befc3cad | [
"MIT"
] | 1 | 2020-10-17T18:40:20.000Z | 2020-10-17T18:40:20.000Z | flask/app.py | vincentlaucsb/us-commutes-2 | e3f32d7baddbf7fec483b95bd2835d58befc3cad | [
"MIT"
] | null | null | null | flask/app.py | vincentlaucsb/us-commutes-2 | e3f32d7baddbf7fec483b95bd2835d58befc3cad | [
"MIT"
] | null | null | null | from flask import Flask, jsonify
from flask_cors import CORS, cross_origin
from os import getenv
import psycopg2
import pickle
from secret import PG_PASSWORD
class Queries(object):
@staticmethod
def query(sql: str):
try:
with open('cache-{}'.format(hash(sql)), mode='rb') as infile:
return pickle.load(infile)
except FileNotFoundError:
with psycopg2.connect("host=localhost dbname=us-commutes user=postgres password={pwd}".format(pwd=PG_PASSWORD)) as conn:
cur = conn.cursor()
cur.execute(sql);
result = cur.fetchall()
if len(result) == 1:
result = result[0]
if len(result) == 1:
result = result[0]
# Cache the response
data = jsonify(result)
with open('cache-{}'.format(hash(sql)), mode='wb') as outfile:
pickle.dump(data, outfile)
return data
def init_db():
with psycopg2.connect("host=localhost dbname=us-commutes user=postgres password={pwd}".format(pwd=PG_PASSWORD)) as conn:
cur = conn.cursor()
cur.execute('''
DROP VIEW IF EXISTS commute_times;
CREATE VIEW commute_times AS
SELECT
"GEO.id",
/* Workers 16 years and over */
"HC01_EST_VC01", "HC02_EST_VC01", "HC03_EST_VC01",
/* Travel Time Categories */
"HC01_EST_VC46", "HC02_EST_VC46", "HC03_EST_VC46",
"HC01_EST_VC47", "HC02_EST_VC47", "HC03_EST_VC47",
"HC01_EST_VC48", "HC02_EST_VC48", "HC03_EST_VC48",
"HC01_EST_VC49", "HC02_EST_VC49", "HC03_EST_VC49",
"HC01_EST_VC50", "HC02_EST_VC50", "HC03_EST_VC50",
"HC01_EST_VC51", "HC02_EST_VC51", "HC03_EST_VC51",
"HC01_EST_VC52", "HC02_EST_VC52", "HC03_EST_VC52",
"HC01_EST_VC53", "HC02_EST_VC53", "HC03_EST_VC53", /* 45-59 minutes */
"HC01_EST_VC54", "HC02_EST_VC54", "HC03_EST_VC54", /* 60+ minutes */
"HC01_EST_VC55", "HC02_EST_VC55", "HC03_EST_VC55", /* Mean Travel Time */
("HC01_EST_VC53" + "HC02_EST_VC53" + "HC03_EST_VC53") as "LONG_COMMUTES",
/* Location of Work */
"HC01_EST_VC19", /* Outside county */
"HC01_EST_VC20", /* Outside state */
/* Mode of Transport */
"HC01_EST_VC03", "HC02_EST_VC03", "HC03_EST_VC03",
"HC01_EST_VC04", "HC02_EST_VC04", "HC03_EST_VC04",
"HC01_EST_VC05", "HC02_EST_VC05", "HC03_EST_VC05",
"HC01_EST_VC10", "HC02_EST_VC10", "HC03_EST_VC10",
"HC01_EST_VC11", "HC02_EST_VC11", "HC03_EST_VC11",
"HC01_EST_VC12", "HC02_EST_VC12", "HC03_EST_VC12",
"HC01_EST_VC13", "HC02_EST_VC13", "HC03_EST_VC13",
"HC01_EST_VC14", "HC02_EST_VC14", "HC03_EST_VC14" /* Work at home */
FROM "ACS_16_5YR_S0801_with_ann.csv";
''')
def init_app():
init_db()
app = Flask(__name__)
return app
app = init_app()
cors = CORS(app)
app.config['CORS_HEADERS'] = 'Content-Type'
@app.route("/")
@cross_origin()
def hello():
return jsonify("Hello, World!")
@app.route("/map")
@cross_origin()
def map():
query = '''
SELECT
jsonb_build_object('type', 'FeatureCollection')
|| jsonb_build_object('features', array_agg(features))
FROM (
WITH _features AS (
SELECT jsonb_array_elements(data->'features') AS features
FROM counties
)
SELECT
F.features
|| jsonb_build_object(
'properties', F.features->'properties'
|| jsonb_build_object(
/* Mean Travel Time */
'HC01_EST_VC55_RANK', rank() OVER (ORDER BY D1."HC01_EST_VC55" ASC),
'HC01_EST_VC55_STATE_RANK', rank() OVER (PARTITION BY S."STATE" ORDER BY D1."HC01_EST_VC55" ASC),
/* State Name */
'STATE_NAME',S."STATE_NAME"
)
|| to_jsonb(row_to_json(D1))
) AS features
FROM
_features F,
commute_times D1,
"state.txt" S
WHERE
D1."GEO.id" = (F.features->'properties'->>'GEO_ID') AND
S."STATE" = (F.features->'properties'->>'STATE')::bigint
) as subquery
'''
return Queries.query(query)
@app.route("/percentiles/<column>")
@cross_origin()
def percentiles(column):
query = '''SELECT jsonb_build_object(
0.125, percentile_cont(0.125) WITHIN GROUP(ORDER BY "{col}"),
0.25, percentile_cont(0.25) WITHIN GROUP(ORDER BY "{col}"),
0.375, percentile_cont(0.375) WITHIN GROUP(ORDER BY "{col}"),
0.5, percentile_cont(0.5) WITHIN GROUP(ORDER BY "{col}"),
0.625, percentile_cont(0.625) WITHIN GROUP(ORDER BY "{col}"),
0.75, percentile_cont(0.75) WITHIN GROUP(ORDER BY "{col}"),
0.875, percentile_cont(0.875) WITHIN GROUP(ORDER BY "{col}")
) FROM commute_times
'''.format(col=column)
return Queries.query(query)
if __name__ == "__main__":
app.run(host="0.0.0.0") | 35.783784 | 132 | 0.565899 | from flask import Flask, jsonify
from flask_cors import CORS, cross_origin
from os import getenv
import psycopg2
import pickle
from secret import PG_PASSWORD
class Queries(object):
@staticmethod
def query(sql: str):
try:
with open('cache-{}'.format(hash(sql)), mode='rb') as infile:
return pickle.load(infile)
except FileNotFoundError:
with psycopg2.connect("host=localhost dbname=us-commutes user=postgres password={pwd}".format(pwd=PG_PASSWORD)) as conn:
cur = conn.cursor()
cur.execute(sql);
result = cur.fetchall()
if len(result) == 1:
result = result[0]
if len(result) == 1:
result = result[0]
data = jsonify(result)
with open('cache-{}'.format(hash(sql)), mode='wb') as outfile:
pickle.dump(data, outfile)
return data
def init_db():
with psycopg2.connect("host=localhost dbname=us-commutes user=postgres password={pwd}".format(pwd=PG_PASSWORD)) as conn:
cur = conn.cursor()
cur.execute('''
DROP VIEW IF EXISTS commute_times;
CREATE VIEW commute_times AS
SELECT
"GEO.id",
/* Workers 16 years and over */
"HC01_EST_VC01", "HC02_EST_VC01", "HC03_EST_VC01",
/* Travel Time Categories */
"HC01_EST_VC46", "HC02_EST_VC46", "HC03_EST_VC46",
"HC01_EST_VC47", "HC02_EST_VC47", "HC03_EST_VC47",
"HC01_EST_VC48", "HC02_EST_VC48", "HC03_EST_VC48",
"HC01_EST_VC49", "HC02_EST_VC49", "HC03_EST_VC49",
"HC01_EST_VC50", "HC02_EST_VC50", "HC03_EST_VC50",
"HC01_EST_VC51", "HC02_EST_VC51", "HC03_EST_VC51",
"HC01_EST_VC52", "HC02_EST_VC52", "HC03_EST_VC52",
"HC01_EST_VC53", "HC02_EST_VC53", "HC03_EST_VC53", /* 45-59 minutes */
"HC01_EST_VC54", "HC02_EST_VC54", "HC03_EST_VC54", /* 60+ minutes */
"HC01_EST_VC55", "HC02_EST_VC55", "HC03_EST_VC55", /* Mean Travel Time */
("HC01_EST_VC53" + "HC02_EST_VC53" + "HC03_EST_VC53") as "LONG_COMMUTES",
/* Location of Work */
"HC01_EST_VC19", /* Outside county */
"HC01_EST_VC20", /* Outside state */
/* Mode of Transport */
"HC01_EST_VC03", "HC02_EST_VC03", "HC03_EST_VC03",
"HC01_EST_VC04", "HC02_EST_VC04", "HC03_EST_VC04",
"HC01_EST_VC05", "HC02_EST_VC05", "HC03_EST_VC05",
"HC01_EST_VC10", "HC02_EST_VC10", "HC03_EST_VC10",
"HC01_EST_VC11", "HC02_EST_VC11", "HC03_EST_VC11",
"HC01_EST_VC12", "HC02_EST_VC12", "HC03_EST_VC12",
"HC01_EST_VC13", "HC02_EST_VC13", "HC03_EST_VC13",
"HC01_EST_VC14", "HC02_EST_VC14", "HC03_EST_VC14" /* Work at home */
FROM "ACS_16_5YR_S0801_with_ann.csv";
''')
def init_app():
init_db()
app = Flask(__name__)
return app
app = init_app()
cors = CORS(app)
app.config['CORS_HEADERS'] = 'Content-Type'
@app.route("/")
@cross_origin()
def hello():
return jsonify("Hello, World!")
@app.route("/map")
@cross_origin()
def map():
query = '''
SELECT
jsonb_build_object('type', 'FeatureCollection')
|| jsonb_build_object('features', array_agg(features))
FROM (
WITH _features AS (
SELECT jsonb_array_elements(data->'features') AS features
FROM counties
)
SELECT
F.features
|| jsonb_build_object(
'properties', F.features->'properties'
|| jsonb_build_object(
/* Mean Travel Time */
'HC01_EST_VC55_RANK', rank() OVER (ORDER BY D1."HC01_EST_VC55" ASC),
'HC01_EST_VC55_STATE_RANK', rank() OVER (PARTITION BY S."STATE" ORDER BY D1."HC01_EST_VC55" ASC),
/* State Name */
'STATE_NAME',S."STATE_NAME"
)
|| to_jsonb(row_to_json(D1))
) AS features
FROM
_features F,
commute_times D1,
"state.txt" S
WHERE
D1."GEO.id" = (F.features->'properties'->>'GEO_ID') AND
S."STATE" = (F.features->'properties'->>'STATE')::bigint
) as subquery
'''
return Queries.query(query)
@app.route("/percentiles/<column>")
@cross_origin()
def percentiles(column):
query = '''SELECT jsonb_build_object(
0.125, percentile_cont(0.125) WITHIN GROUP(ORDER BY "{col}"),
0.25, percentile_cont(0.25) WITHIN GROUP(ORDER BY "{col}"),
0.375, percentile_cont(0.375) WITHIN GROUP(ORDER BY "{col}"),
0.5, percentile_cont(0.5) WITHIN GROUP(ORDER BY "{col}"),
0.625, percentile_cont(0.625) WITHIN GROUP(ORDER BY "{col}"),
0.75, percentile_cont(0.75) WITHIN GROUP(ORDER BY "{col}"),
0.875, percentile_cont(0.875) WITHIN GROUP(ORDER BY "{col}")
) FROM commute_times
'''.format(col=column)
return Queries.query(query)
if __name__ == "__main__":
app.run(host="0.0.0.0") | true | true |
f72062e75ab889b21768dc24ee456870d015edac | 3,932 | py | Python | flash/image/data.py | dudeperf3ct/lightning-flash | a855cd14cf1cd0301b4a2f82c0c95e4d8d986650 | [
"Apache-2.0"
] | 1 | 2022-03-09T22:40:05.000Z | 2022-03-09T22:40:05.000Z | flash/image/data.py | dudeperf3ct/lightning-flash | a855cd14cf1cd0301b4a2f82c0c95e4d8d986650 | [
"Apache-2.0"
] | null | null | null | flash/image/data.py | dudeperf3ct/lightning-flash | a855cd14cf1cd0301b4a2f82c0c95e4d8d986650 | [
"Apache-2.0"
] | null | null | null | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
from io import BytesIO
from pathlib import Path
from typing import Any, Dict, List
import numpy as np
import torch
import flash
from flash.core.data.io.input import DataKeys, Input, ServeInput
from flash.core.data.utilities.paths import filter_valid_files, has_file_allowed_extension, PATH_TYPE
from flash.core.data.utilities.samples import to_samples
from flash.core.data.utils import image_default_loader
from flash.core.utilities.imports import _TORCHVISION_AVAILABLE, Image, requires
if _TORCHVISION_AVAILABLE:
from torchvision.datasets.folder import IMG_EXTENSIONS
from torchvision.transforms.functional import to_pil_image
else:
IMG_EXTENSIONS = (".jpg", ".jpeg", ".png", ".ppm", ".bmp", ".pgm", ".tif", ".tiff", ".webp")
NP_EXTENSIONS = (".npy",)
def image_loader(filepath: str):
if has_file_allowed_extension(filepath, IMG_EXTENSIONS):
img = image_default_loader(filepath)
elif has_file_allowed_extension(filepath, NP_EXTENSIONS):
img = Image.fromarray(np.load(filepath).astype("uint8"), "RGB")
else:
raise ValueError(
f"File: {filepath} has an unsupported extension. Supported extensions: "
f"{list(IMG_EXTENSIONS + NP_EXTENSIONS)}."
)
return img
class ImageDeserializer(ServeInput):
@requires("image")
def serve_load_sample(self, data: str) -> Dict:
encoded_with_padding = (data + "===").encode("ascii")
img = base64.b64decode(encoded_with_padding)
buffer = BytesIO(img)
img = Image.open(buffer, mode="r")
return {
DataKeys.INPUT: img,
}
@property
def example_input(self) -> str:
with (Path(flash.ASSETS_ROOT) / "fish.jpg").open("rb") as f:
return base64.b64encode(f.read()).decode("UTF-8")
class ImageInput(Input):
@requires("image")
def load_sample(self, sample: Dict[str, Any]) -> Dict[str, Any]:
w, h = sample[DataKeys.INPUT].size # W x H
if DataKeys.METADATA not in sample:
sample[DataKeys.METADATA] = {}
sample[DataKeys.METADATA]["size"] = (h, w)
return sample
class ImageFilesInput(ImageInput):
def load_data(self, files: List[PATH_TYPE]) -> List[Dict[str, Any]]:
files = filter_valid_files(files, valid_extensions=IMG_EXTENSIONS + NP_EXTENSIONS)
return to_samples(files)
def load_sample(self, sample: Dict[str, Any]) -> Dict[str, Any]:
filepath = sample[DataKeys.INPUT]
sample[DataKeys.INPUT] = image_loader(filepath)
sample = super().load_sample(sample)
sample[DataKeys.METADATA]["filepath"] = filepath
return sample
class ImageTensorInput(ImageInput):
def load_data(self, tensor: Any) -> List[Dict[str, Any]]:
return to_samples(tensor)
def load_sample(self, sample: Dict[str, Any]) -> Dict[str, Any]:
img = to_pil_image(sample[DataKeys.INPUT])
sample[DataKeys.INPUT] = img
return super().load_sample(sample)
class ImageNumpyInput(ImageInput):
def load_data(self, array: Any) -> List[Dict[str, Any]]:
return to_samples(array)
def load_sample(self, sample: Dict[str, Any]) -> Dict[str, Any]:
img = to_pil_image(torch.from_numpy(sample[DataKeys.INPUT]))
sample[DataKeys.INPUT] = img
return super().load_sample(sample)
| 36.073394 | 101 | 0.689471 |
import base64
from io import BytesIO
from pathlib import Path
from typing import Any, Dict, List
import numpy as np
import torch
import flash
from flash.core.data.io.input import DataKeys, Input, ServeInput
from flash.core.data.utilities.paths import filter_valid_files, has_file_allowed_extension, PATH_TYPE
from flash.core.data.utilities.samples import to_samples
from flash.core.data.utils import image_default_loader
from flash.core.utilities.imports import _TORCHVISION_AVAILABLE, Image, requires
if _TORCHVISION_AVAILABLE:
from torchvision.datasets.folder import IMG_EXTENSIONS
from torchvision.transforms.functional import to_pil_image
else:
IMG_EXTENSIONS = (".jpg", ".jpeg", ".png", ".ppm", ".bmp", ".pgm", ".tif", ".tiff", ".webp")
NP_EXTENSIONS = (".npy",)
def image_loader(filepath: str):
if has_file_allowed_extension(filepath, IMG_EXTENSIONS):
img = image_default_loader(filepath)
elif has_file_allowed_extension(filepath, NP_EXTENSIONS):
img = Image.fromarray(np.load(filepath).astype("uint8"), "RGB")
else:
raise ValueError(
f"File: {filepath} has an unsupported extension. Supported extensions: "
f"{list(IMG_EXTENSIONS + NP_EXTENSIONS)}."
)
return img
class ImageDeserializer(ServeInput):
@requires("image")
def serve_load_sample(self, data: str) -> Dict:
encoded_with_padding = (data + "===").encode("ascii")
img = base64.b64decode(encoded_with_padding)
buffer = BytesIO(img)
img = Image.open(buffer, mode="r")
return {
DataKeys.INPUT: img,
}
@property
def example_input(self) -> str:
with (Path(flash.ASSETS_ROOT) / "fish.jpg").open("rb") as f:
return base64.b64encode(f.read()).decode("UTF-8")
class ImageInput(Input):
@requires("image")
def load_sample(self, sample: Dict[str, Any]) -> Dict[str, Any]:
w, h = sample[DataKeys.INPUT].size
if DataKeys.METADATA not in sample:
sample[DataKeys.METADATA] = {}
sample[DataKeys.METADATA]["size"] = (h, w)
return sample
class ImageFilesInput(ImageInput):
def load_data(self, files: List[PATH_TYPE]) -> List[Dict[str, Any]]:
files = filter_valid_files(files, valid_extensions=IMG_EXTENSIONS + NP_EXTENSIONS)
return to_samples(files)
def load_sample(self, sample: Dict[str, Any]) -> Dict[str, Any]:
filepath = sample[DataKeys.INPUT]
sample[DataKeys.INPUT] = image_loader(filepath)
sample = super().load_sample(sample)
sample[DataKeys.METADATA]["filepath"] = filepath
return sample
class ImageTensorInput(ImageInput):
def load_data(self, tensor: Any) -> List[Dict[str, Any]]:
return to_samples(tensor)
def load_sample(self, sample: Dict[str, Any]) -> Dict[str, Any]:
img = to_pil_image(sample[DataKeys.INPUT])
sample[DataKeys.INPUT] = img
return super().load_sample(sample)
class ImageNumpyInput(ImageInput):
def load_data(self, array: Any) -> List[Dict[str, Any]]:
return to_samples(array)
def load_sample(self, sample: Dict[str, Any]) -> Dict[str, Any]:
img = to_pil_image(torch.from_numpy(sample[DataKeys.INPUT]))
sample[DataKeys.INPUT] = img
return super().load_sample(sample)
| true | true |
f72062f74e3d658e22fe2cb10addd4d8ad13e2b0 | 399 | py | Python | thefuck/rules/brew_uninstall.py | pybenchmark/thefuck | 993a661c6048063e84645015cc832602b6ec32df | [
"MIT"
] | 1 | 2021-12-13T18:41:46.000Z | 2021-12-13T18:41:46.000Z | thefuck/rules/brew_uninstall.py | pybenchmark/thefuck | 993a661c6048063e84645015cc832602b6ec32df | [
"MIT"
] | 4 | 2020-12-23T15:44:08.000Z | 2020-12-23T16:48:59.000Z | thefuck/rules/brew_uninstall.py | pybenchmark/thefuck | 993a661c6048063e84645015cc832602b6ec32df | [
"MIT"
] | 1 | 2020-12-23T14:46:54.000Z | 2020-12-23T14:46:54.000Z | from thefuck.utils import for_app
@for_app('brew', at_least=2)
def match(command):
return (command.script_parts[1] in ['uninstall', 'rm', 'remove']
and "brew uninstall --force" in command.stdout)
def get_new_command(command):
command_parts = command.script_parts[:]
command_parts[1] = 'uninstall'
command_parts.insert(2, '--force')
return ' '.join(command_parts)
| 26.6 | 68 | 0.684211 | from thefuck.utils import for_app
@for_app('brew', at_least=2)
def match(command):
return (command.script_parts[1] in ['uninstall', 'rm', 'remove']
and "brew uninstall --force" in command.stdout)
def get_new_command(command):
command_parts = command.script_parts[:]
command_parts[1] = 'uninstall'
command_parts.insert(2, '--force')
return ' '.join(command_parts)
| true | true |
f720630f134225e35a47baee79036aa6afb6bbf5 | 2,354 | py | Python | aliyun-python-sdk-sddp/aliyunsdksddp/request/v20190103/DescribeDataAssetsRequest.py | yndu13/aliyun-openapi-python-sdk | 12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5 | [
"Apache-2.0"
] | 1,001 | 2015-07-24T01:32:41.000Z | 2022-03-25T01:28:18.000Z | aliyun-python-sdk-sddp/aliyunsdksddp/request/v20190103/DescribeDataAssetsRequest.py | yndu13/aliyun-openapi-python-sdk | 12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5 | [
"Apache-2.0"
] | 363 | 2015-10-20T03:15:00.000Z | 2022-03-08T12:26:19.000Z | aliyun-python-sdk-sddp/aliyunsdksddp/request/v20190103/DescribeDataAssetsRequest.py | yndu13/aliyun-openapi-python-sdk | 12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5 | [
"Apache-2.0"
] | 682 | 2015-09-22T07:19:02.000Z | 2022-03-22T09:51:46.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdksddp.endpoint import endpoint_data
class DescribeDataAssetsRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Sddp', '2019-01-03', 'DescribeDataAssets')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_RiskLevels(self):
return self.get_query_params().get('RiskLevels')
def set_RiskLevels(self,RiskLevels):
self.add_query_param('RiskLevels',RiskLevels)
def get_RangeId(self):
return self.get_query_params().get('RangeId')
def set_RangeId(self,RangeId):
self.add_query_param('RangeId',RangeId)
def get_PageSize(self):
return self.get_query_params().get('PageSize')
def set_PageSize(self,PageSize):
self.add_query_param('PageSize',PageSize)
def get_Lang(self):
return self.get_query_params().get('Lang')
def set_Lang(self,Lang):
self.add_query_param('Lang',Lang)
def get_CurrentPage(self):
return self.get_query_params().get('CurrentPage')
def set_CurrentPage(self,CurrentPage):
self.add_query_param('CurrentPage',CurrentPage)
def get_Name(self):
return self.get_query_params().get('Name')
def set_Name(self,Name):
self.add_query_param('Name',Name)
def get_RuleId(self):
return self.get_query_params().get('RuleId')
def set_RuleId(self,RuleId):
self.add_query_param('RuleId',RuleId) | 31.810811 | 74 | 0.752336 |
from aliyunsdkcore.request import RpcRequest
from aliyunsdksddp.endpoint import endpoint_data
class DescribeDataAssetsRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Sddp', '2019-01-03', 'DescribeDataAssets')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_RiskLevels(self):
return self.get_query_params().get('RiskLevels')
def set_RiskLevels(self,RiskLevels):
self.add_query_param('RiskLevels',RiskLevels)
def get_RangeId(self):
return self.get_query_params().get('RangeId')
def set_RangeId(self,RangeId):
self.add_query_param('RangeId',RangeId)
def get_PageSize(self):
return self.get_query_params().get('PageSize')
def set_PageSize(self,PageSize):
self.add_query_param('PageSize',PageSize)
def get_Lang(self):
return self.get_query_params().get('Lang')
def set_Lang(self,Lang):
self.add_query_param('Lang',Lang)
def get_CurrentPage(self):
return self.get_query_params().get('CurrentPage')
def set_CurrentPage(self,CurrentPage):
self.add_query_param('CurrentPage',CurrentPage)
def get_Name(self):
return self.get_query_params().get('Name')
def set_Name(self,Name):
self.add_query_param('Name',Name)
def get_RuleId(self):
return self.get_query_params().get('RuleId')
def set_RuleId(self,RuleId):
self.add_query_param('RuleId',RuleId) | true | true |
f720645e9f330d987e8a854268bf2c20aad7c1fa | 13,423 | py | Python | JChipSetup.py | mbuckaway/CrossMgr | 4c64e429eb3215fda1b685c5e684c56f5d0c02cf | [
"MIT"
] | 1 | 2020-02-05T11:22:03.000Z | 2020-02-05T11:22:03.000Z | JChipSetup.py | mbuckaway/CrossMgr | 4c64e429eb3215fda1b685c5e684c56f5d0c02cf | [
"MIT"
] | null | null | null | JChipSetup.py | mbuckaway/CrossMgr | 4c64e429eb3215fda1b685c5e684c56f5d0c02cf | [
"MIT"
] | null | null | null | import wx
import wx.lib.intctrl
import wx.lib.rcsizer as rcs
import socket
import sys
import re
import six
import datetime
import Model
import Utils
import JChip
import ChipReader
from JChip import EVT_CHIP_READER
import RaceResult
import Ultra
import HelpSearch
from ReadSignOnSheet import GetTagNums
HOST, PORT = JChip.DEFAULT_HOST, JChip.DEFAULT_PORT
def CheckExcelLink():
race = Model.race
if not race:
return (False, 'No active race.')
try:
externalFields = race.excelLink.getFields()
except (ValueError, AttributeError):
return (False, 'Unconfigured.')
if 'Tag' not in externalFields:
return (False, '"Tag" column not specified.')
return (True, 'Excel Link OK')
#------------------------------------------------------------------------------------------------
reIP = re.compile( '^[0-9.]+$' )
def GetAllIps():
addrInfo = socket.getaddrinfo( socket.gethostname(), None )
ips = set()
for a in addrInfo:
try:
ip = a[4][0]
except:
continue
if reIP.search(ip):
ips.add( ip )
return sorted( ips )
class JChipSetupDialog( wx.Dialog ):
def __init__( self, parent, id = wx.ID_ANY ):
wx.Dialog.__init__( self, parent, id, _("Chip Reader Setup"),
style=wx.DEFAULT_DIALOG_STYLE|wx.TAB_TRAVERSAL )
self.timer = None
self.receivedCount = 0
self.refTime = None
self.enableJChipCheckBox = wx.CheckBox( self, label = _('Accept RFID Reader Data During Race') )
if Model.race:
self.enableJChipCheckBox.SetValue( getattr(Model.race, 'enableJChipIntegration', False) )
else:
self.enableJChipCheckBox.Enable( False )
self.testJChip = wx.ToggleButton( self, label = _('Start RFID Test') )
self.testJChip.SetFont( wx.Font( (0,24), wx.FONTFAMILY_SWISS, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL ) )
self.Bind(wx.EVT_TOGGLEBUTTON, self.testJChipToggle, self.testJChip)
self.testList = wx.TextCtrl( self, style=wx.TE_READONLY|wx.TE_MULTILINE, size=(-1,200) )
self.testList.Bind( wx.EVT_RIGHT_DOWN, self.skip )
self.okBtn = wx.Button( self, wx.ID_OK )
self.Bind( wx.EVT_BUTTON, self.onOK, self.okBtn )
self.cancelBtn = wx.Button( self, wx.ID_CANCEL )
self.Bind( wx.EVT_BUTTON, self.onCancel, self.cancelBtn )
self.helpBtn = wx.Button( self, wx.ID_HELP )
self.Bind( wx.EVT_BUTTON, lambda evt: HelpSearch.showHelp('Menu-ChipReader.html#chip-reader-setup'), self.helpBtn )
self.Bind(EVT_CHIP_READER, self.handleChipReaderEvent)
bs = wx.BoxSizer( wx.VERTICAL )
todoList = u'\n'.join( '%d) %s' % (i + 1, s) for i, s in enumerate( [
_('Make sure the RFID receiver is plugged into the network.'),
_('If you are using Impinj/Alien, make sure the CrossMgrImpinj or CrossMgrAlien bridge programs are running.'),
_('You must have the Sign-On Excel sheet ready and linked before your race.'),
_('You must configure a "Tag" field in your Sign-On Excel Sheet.'),
_('Run this test before each race.'),
]) )
intro = (u'\n'.join( [
_('CrossMgr supports the JChip, RaceResult, Ultra, Impinj and Alien RFID readers.'),
_('For more details, consult the documentation for your reader.'),
] ) + u'\n' + _('Checklist:') + u'\n\n{}\n').format( todoList )
border = 4
bs.Add( wx.StaticText(self, label = intro), 0, wx.EXPAND|wx.ALL, border )
bs.Add( self.enableJChipCheckBox, 0, wx.EXPAND|wx.ALL|wx.ALIGN_LEFT, border )
#-------------------------------------------------------------------
bs.AddSpacer( border )
bs.Add( wx.StaticText( self, label = _('Reader Configuration:') ), 0, wx.EXPAND|wx.ALL, border )
#-------------------------------------------------------------------
rowColSizer = rcs.RowColSizer()
bs.Add( rowColSizer, 0, wx.EXPAND|wx.ALL, border )
row = 0
rowColSizer.Add( wx.StaticText( self, label=u'{}:'.format(_('Reader Type')) ), row=row, col=0, border=border,
flag=wx.TOP|wx.LEFT|wx.ALIGN_RIGHT|wx.ALIGN_CENTER_VERTICAL )
self.chipReaderType = wx.Choice( self, choices=[_('JChip/Impinj/Alien'), _('RaceResult'), _('Ultra')] )
self.chipReaderType.SetSelection( 0 )
self.chipReaderType.Bind( wx.EVT_CHOICE, self.changechipReaderType )
rowColSizer.Add( self.chipReaderType,
row=row, col=1, border=border, flag=wx.EXPAND|wx.TOP|wx.RIGHT|wx.ALIGN_LEFT )
row += 1
sep = u' -' + _('or') + u'- '
ips = sep.join( GetAllIps() )
self.ipaddr = wx.TextCtrl( self, value = ips, style = wx.TE_READONLY, size=(240,-1) )
self.autoDetect = wx.Button( self, label=_('AutoDetect') )
self.autoDetect.Show( False )
self.autoDetect.Bind( wx.EVT_BUTTON, self.doAutoDetect )
iphs = wx.BoxSizer( wx.HORIZONTAL )
iphs.Add( self.ipaddr, 1, flag=wx.EXPAND )
iphs.Add( self.autoDetect, 0, flag=wx.LEFT, border=4 )
rowColSizer.Add( wx.StaticText( self, label=_('Remote IP Address:') ),
row=row, col=0, flag=wx.ALIGN_RIGHT|wx.ALIGN_CENTER_VERTICAL )
rowColSizer.Add( iphs, row=row, col=1, border=border, flag=wx.EXPAND|wx.RIGHT|wx.ALIGN_LEFT )
row += 1
self.port = wx.lib.intctrl.IntCtrl( self, -1, min=1, max=65535, value=PORT,
limited=True, style = wx.TE_READONLY )
rowColSizer.Add( wx.StaticText(self, label = _('Remote Port:')), row=row, col=0,
flag=wx.ALIGN_RIGHT|wx.ALIGN_CENTER_VERTICAL )
rowColSizer.Add( self.port, row=row, col=1, border=border, flag=wx.EXPAND|wx.RIGHT|wx.ALIGN_LEFT )
bs.Add( wx.StaticText( self, label = _('If using JChip, see "7 Setting of Connections" in JChip "Control Panel Soft Manual" for more details.') ),
border=border, flag = wx.GROW|wx.ALL )
#-------------------------------------------------------------------
bs.Add( self.testJChip, 0, wx.ALIGN_CENTER|wx.ALL, border )
bs.Add( wx.StaticText(self, label = _('Messages:')), 0, wx.EXPAND|wx.ALL, border=border )
bs.Add( self.testList, 1, wx.EXPAND|wx.ALL, border )
buttonBox = wx.BoxSizer( wx.HORIZONTAL )
buttonBox.AddStretchSpacer()
buttonBox.Add( self.okBtn, flag = wx.RIGHT, border=border )
self.okBtn.SetDefault()
buttonBox.Add( self.cancelBtn )
buttonBox.Add( self.helpBtn )
bs.Add( buttonBox, 0, wx.EXPAND | wx.ALL, border )
self.stopTest()
self.SetSizerAndFit(bs)
bs.Fit( self )
self.update()
self.CentreOnParent(wx.BOTH)
wx.CallAfter( self.SetFocus )
def skip(self, evt):
return
def commit( self ):
race = Model.race
if not race:
return
race.chipReaderType = max( 0, self.chipReaderType.GetSelection() )
race.chipReaderIpAddr = self.ipaddr.GetValue()
if race.chipReaderType == 1:
Utils.writeConfig( 'RaceResultHost', race.chipReaderIpAddr )
elif race.chipReaderType == 2:
Utils.writeConfig( 'UltraHost', race.chipReaderIpAddr )
race.chipReaderPort = self.port.GetValue()
race.enableJChipIntegration = bool(self.enableJChipCheckBox.GetValue())
ChipReader.chipReaderCur.reset( race.chipReaderType )
def update( self ):
race = Model.race
if not race:
return
self.enableJChipCheckBox.SetValue( race.enableJChipIntegration )
self.chipReaderType.SetSelection( max(0, race.chipReaderType) )
self.ipaddr.SetValue( race.chipReaderIpAddr )
self.port.SetValue( race.chipReaderPort )
self.changechipReaderType()
def changechipReaderType( self, event=None ):
selection = self.chipReaderType.GetSelection()
if selection == 0: # JChip/CrossMgrImpinj/CrossMgrAlien
self.port.SetValue( JChip.DEFAULT_PORT )
self.port.SetEditable( False )
self.ipaddr.SetValue( Utils.GetDefaultHost() )
self.ipaddr.SetEditable( False )
self.autoDetect.Show( False )
elif selection == 1: # RaceResult
self.port.SetValue( RaceResult.DEFAULT_PORT )
self.port.SetEditable( True )
self.ipaddr.SetEditable( True )
rfidReaderHost = Utils.readConfig( 'RfidReaderHost', None )
if rfidReaderHost:
try:
self.ipaddr.SetValue( rfidReaderHost )
except Exception as e:
self.ipaddr.SetValue( Utils.GetDefaultHost() )
self.autoDetect.Show( True )
elif selection == 2: # Ultra
self.port.SetValue( Ultra.DEFAULT_PORT )
self.port.SetEditable( True )
self.ipaddr.SetEditable( True )
rfidReaderHost = Utils.readConfig( 'RfidReaderHost', None )
if rfidReaderHost:
try:
self.ipaddr.SetValue( rfidReaderHost )
except Exception as e:
self.ipaddr.SetValue( Utils.GetDefaultHost() )
self.autoDetect.Show( True )
self.Layout()
self.Refresh()
def doAutoDetect( self, event ):
selection = self.chipReaderType.GetSelection()
autoDetect = [RaceResult.AutoDetect, Ultra.AutoDetect][selection-1]
def getHost():
wait = wx.BusyCursor()
try:
return None, autoDetect(self.port.GetValue())
except Exception as e:
return e, None
error, readerHost = getHost()
if error:
Utils.MessageOK(
self,
u'{}:\n\n{}'.format(_("AutoDetect Error"), error),
_("AutoDetect Error"),
wx.ICON_ERROR
)
return
if not readerHost:
Utils.MessageOK(
self, u'{}:\n\n{}'.format(_("AutoDetect Failure"), _('Reader not found.')),
_("AutoDetect Failure"),
wx.ICON_ERROR
)
return
self.ipaddr.SetValue( readerHost )
def handleChipReaderEvent( self, event ):
if not event.tagTimes:
return
tagNums = {}
race = Model.race
if race:
if not race.enableUSBCamera:
return
tagNums = GetTagNums()
tag, dt = event.tagTimes[-1]
num = tagNums.get(tag, None)
def testJChipToggle( self, event ):
self.commit()
if not Model.race:
self.stopTest()
Utils.MessageOK( self, _('No active race. Cannot perform RFID test. "New" or "Open" a race first.'), _('Cannot Perform RFID Test') )
return
if Model.race.isRunning():
self.stopTest()
Utils.MessageOK( self, _('Cannot perform RFID test while race is running.'), _('Cannot Perform RFID Test') )
return
if self.testJChip.GetValue():
correct, reason = CheckExcelLink()
explain = _('CrossMgr will not be able to associate chip Tags with Bib numbers.') + u'\n' + \
_('You may proceed with the test, but you need to fix the Excel sheet.') + u'\n\n' + \
_('See documentation for details.')
if not correct:
if not Utils.MessageOKCancel( self, (_('Problems with Excel sheet.') + u'\n\n ' + _('Reason:') + u' {}\n\n{}').format(reason, explain),
title = _('Excel Link Problem'), iconMask = wx.ICON_WARNING ):
self.testJChip.SetValue( False )
return
tagNums = GetTagNums( True )
if correct and not tagNums:
if not Utils.MessageOKCancel( self, (_('All Tag entries in the Excel sheet are blank.') + u'\n\n{}').format(explain),
title = _('Excel Link Problem'), iconMask = wx.ICON_WARNING ):
self.testJChip.SetValue( False )
return
ChipReader.chipReaderCur.readerEventWindow = self
self.testList.Clear()
self.testJChip.SetLabel( 'Stop RFID Test' )
self.testJChip.SetBackgroundColour( wx.Colour(255,128,128) )
self.testJChip.SetValue( True )
ChipReader.chipReaderCur.StartListener()
self.appendMsg( 'listening for RFID connection...' )
# Start a timer to monitor the receiver.
self.receivedCount = 0
self.timer = wx.CallLater( 1000, self.onTimerCallback, 'started' )
else:
self.stopTest()
def appendMsg( self, s ):
self.testList.AppendText( s + '\n' )
def onTimerCallback( self, stat ):
data = ChipReader.chipReaderCur.GetData()
lastTag = None
for d in data:
if d[0] == 'data':
self.receivedCount += 1
ts = d[2].isoformat(' ')
if len(ts) == 8:
ts += '.00'
else:
ts = ts[:-2]
try:
num = '{}'.format(Model.race.tagNums[d[1]])
except (AttributeError, ValueError, KeyError):
num = 'not found'
lastTag = d[1]
self.appendMsg( '{}: tag={}, time={}, Bib={}'.format(self.receivedCount, d[1], ts, num) )
elif d[0] == 'connected':
self.appendMsg( '*******************************************' )
self.appendMsg( '{}: {}'.format(d[0], ', '.join('{}'.format(s) for s in d[1:]) ) )
elif d[0] == 'disconnected':
self.appendMsg( d[0] )
self.appendMsg( '' )
self.appendMsg( _('listening for RFID connection...') )
elif d[0] == 'name':
self.appendMsg( u'{}: {}'.format(_('receiver name'), d[1]) )
else:
self.appendMsg( '{}: {}'.format(d[0], ', '.join('<<{}>>'.format(s) for s in d[1:]) ) )
if data:
self.testList.SetInsertionPointEnd()
self.timer.Restart( 1000, 'restarted' )
if lastTag and Utils.mainWin and getattr(Utils.mainWin, 'findDialog', None):
if Utils.mainWin.findDialog.IsShown():
Utils.mainWin.findDialog.refresh( lastTag )
def stopTest( self ):
ChipReader.chipReaderCur.StopListener()
if self.timer:
self.timer.Stop()
self.timer = None
self.testList.Clear()
self.appendMsg( _('No test running.') )
ChipReader.chipReaderCur.readerEventWindow = None
self.testJChip.SetLabel( _('Start RFID Test') )
self.testJChip.SetBackgroundColour( wx.NullColour )
self.testJChip.SetValue( False )
def onOK( self, event ):
self.stopTest()
self.commit()
wx.CallAfter( Utils.refresh )
self.EndModal( wx.ID_OK )
def onCancel( self, event ):
self.stopTest()
self.EndModal( wx.ID_CANCEL )
if __name__ == '__main__':
six.print_( GetAllIps() )
#sys.exit()
app = wx.App(False)
mainWin = wx.Frame(None,title="CrossMan", size=(600,400))
Model.setRace( Model.Race() )
Model.race._populate()
Model.race.finishRaceNow()
Model.race.enableUSBCamera = True
mainWin.Show()
dlg = JChipSetupDialog( mainWin )
dlg.ShowModal()
dlg.Destroy()
| 33.896465 | 149 | 0.662147 | import wx
import wx.lib.intctrl
import wx.lib.rcsizer as rcs
import socket
import sys
import re
import six
import datetime
import Model
import Utils
import JChip
import ChipReader
from JChip import EVT_CHIP_READER
import RaceResult
import Ultra
import HelpSearch
from ReadSignOnSheet import GetTagNums
HOST, PORT = JChip.DEFAULT_HOST, JChip.DEFAULT_PORT
def CheckExcelLink():
race = Model.race
if not race:
return (False, 'No active race.')
try:
externalFields = race.excelLink.getFields()
except (ValueError, AttributeError):
return (False, 'Unconfigured.')
if 'Tag' not in externalFields:
return (False, '"Tag" column not specified.')
return (True, 'Excel Link OK')
reIP = re.compile( '^[0-9.]+$' )
def GetAllIps():
addrInfo = socket.getaddrinfo( socket.gethostname(), None )
ips = set()
for a in addrInfo:
try:
ip = a[4][0]
except:
continue
if reIP.search(ip):
ips.add( ip )
return sorted( ips )
class JChipSetupDialog( wx.Dialog ):
def __init__( self, parent, id = wx.ID_ANY ):
wx.Dialog.__init__( self, parent, id, _("Chip Reader Setup"),
style=wx.DEFAULT_DIALOG_STYLE|wx.TAB_TRAVERSAL )
self.timer = None
self.receivedCount = 0
self.refTime = None
self.enableJChipCheckBox = wx.CheckBox( self, label = _('Accept RFID Reader Data During Race') )
if Model.race:
self.enableJChipCheckBox.SetValue( getattr(Model.race, 'enableJChipIntegration', False) )
else:
self.enableJChipCheckBox.Enable( False )
self.testJChip = wx.ToggleButton( self, label = _('Start RFID Test') )
self.testJChip.SetFont( wx.Font( (0,24), wx.FONTFAMILY_SWISS, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL ) )
self.Bind(wx.EVT_TOGGLEBUTTON, self.testJChipToggle, self.testJChip)
self.testList = wx.TextCtrl( self, style=wx.TE_READONLY|wx.TE_MULTILINE, size=(-1,200) )
self.testList.Bind( wx.EVT_RIGHT_DOWN, self.skip )
self.okBtn = wx.Button( self, wx.ID_OK )
self.Bind( wx.EVT_BUTTON, self.onOK, self.okBtn )
self.cancelBtn = wx.Button( self, wx.ID_CANCEL )
self.Bind( wx.EVT_BUTTON, self.onCancel, self.cancelBtn )
self.helpBtn = wx.Button( self, wx.ID_HELP )
self.Bind( wx.EVT_BUTTON, lambda evt: HelpSearch.showHelp('Menu-ChipReader.html#chip-reader-setup'), self.helpBtn )
self.Bind(EVT_CHIP_READER, self.handleChipReaderEvent)
bs = wx.BoxSizer( wx.VERTICAL )
todoList = u'\n'.join( '%d) %s' % (i + 1, s) for i, s in enumerate( [
_('Make sure the RFID receiver is plugged into the network.'),
_('If you are using Impinj/Alien, make sure the CrossMgrImpinj or CrossMgrAlien bridge programs are running.'),
_('You must have the Sign-On Excel sheet ready and linked before your race.'),
_('You must configure a "Tag" field in your Sign-On Excel Sheet.'),
_('Run this test before each race.'),
]) )
intro = (u'\n'.join( [
_('CrossMgr supports the JChip, RaceResult, Ultra, Impinj and Alien RFID readers.'),
_('For more details, consult the documentation for your reader.'),
] ) + u'\n' + _('Checklist:') + u'\n\n{}\n').format( todoList )
border = 4
bs.Add( wx.StaticText(self, label = intro), 0, wx.EXPAND|wx.ALL, border )
bs.Add( self.enableJChipCheckBox, 0, wx.EXPAND|wx.ALL|wx.ALIGN_LEFT, border )
bs.AddSpacer( border )
bs.Add( wx.StaticText( self, label = _('Reader Configuration:') ), 0, wx.EXPAND|wx.ALL, border )
rowColSizer = rcs.RowColSizer()
bs.Add( rowColSizer, 0, wx.EXPAND|wx.ALL, border )
row = 0
rowColSizer.Add( wx.StaticText( self, label=u'{}:'.format(_('Reader Type')) ), row=row, col=0, border=border,
flag=wx.TOP|wx.LEFT|wx.ALIGN_RIGHT|wx.ALIGN_CENTER_VERTICAL )
self.chipReaderType = wx.Choice( self, choices=[_('JChip/Impinj/Alien'), _('RaceResult'), _('Ultra')] )
self.chipReaderType.SetSelection( 0 )
self.chipReaderType.Bind( wx.EVT_CHOICE, self.changechipReaderType )
rowColSizer.Add( self.chipReaderType,
row=row, col=1, border=border, flag=wx.EXPAND|wx.TOP|wx.RIGHT|wx.ALIGN_LEFT )
row += 1
sep = u' -' + _('or') + u'- '
ips = sep.join( GetAllIps() )
self.ipaddr = wx.TextCtrl( self, value = ips, style = wx.TE_READONLY, size=(240,-1) )
self.autoDetect = wx.Button( self, label=_('AutoDetect') )
self.autoDetect.Show( False )
self.autoDetect.Bind( wx.EVT_BUTTON, self.doAutoDetect )
iphs = wx.BoxSizer( wx.HORIZONTAL )
iphs.Add( self.ipaddr, 1, flag=wx.EXPAND )
iphs.Add( self.autoDetect, 0, flag=wx.LEFT, border=4 )
rowColSizer.Add( wx.StaticText( self, label=_('Remote IP Address:') ),
row=row, col=0, flag=wx.ALIGN_RIGHT|wx.ALIGN_CENTER_VERTICAL )
rowColSizer.Add( iphs, row=row, col=1, border=border, flag=wx.EXPAND|wx.RIGHT|wx.ALIGN_LEFT )
row += 1
self.port = wx.lib.intctrl.IntCtrl( self, -1, min=1, max=65535, value=PORT,
limited=True, style = wx.TE_READONLY )
rowColSizer.Add( wx.StaticText(self, label = _('Remote Port:')), row=row, col=0,
flag=wx.ALIGN_RIGHT|wx.ALIGN_CENTER_VERTICAL )
rowColSizer.Add( self.port, row=row, col=1, border=border, flag=wx.EXPAND|wx.RIGHT|wx.ALIGN_LEFT )
bs.Add( wx.StaticText( self, label = _('If using JChip, see "7 Setting of Connections" in JChip "Control Panel Soft Manual" for more details.') ),
border=border, flag = wx.GROW|wx.ALL )
bs.Add( self.testJChip, 0, wx.ALIGN_CENTER|wx.ALL, border )
bs.Add( wx.StaticText(self, label = _('Messages:')), 0, wx.EXPAND|wx.ALL, border=border )
bs.Add( self.testList, 1, wx.EXPAND|wx.ALL, border )
buttonBox = wx.BoxSizer( wx.HORIZONTAL )
buttonBox.AddStretchSpacer()
buttonBox.Add( self.okBtn, flag = wx.RIGHT, border=border )
self.okBtn.SetDefault()
buttonBox.Add( self.cancelBtn )
buttonBox.Add( self.helpBtn )
bs.Add( buttonBox, 0, wx.EXPAND | wx.ALL, border )
self.stopTest()
self.SetSizerAndFit(bs)
bs.Fit( self )
self.update()
self.CentreOnParent(wx.BOTH)
wx.CallAfter( self.SetFocus )
def skip(self, evt):
return
def commit( self ):
race = Model.race
if not race:
return
race.chipReaderType = max( 0, self.chipReaderType.GetSelection() )
race.chipReaderIpAddr = self.ipaddr.GetValue()
if race.chipReaderType == 1:
Utils.writeConfig( 'RaceResultHost', race.chipReaderIpAddr )
elif race.chipReaderType == 2:
Utils.writeConfig( 'UltraHost', race.chipReaderIpAddr )
race.chipReaderPort = self.port.GetValue()
race.enableJChipIntegration = bool(self.enableJChipCheckBox.GetValue())
ChipReader.chipReaderCur.reset( race.chipReaderType )
def update( self ):
race = Model.race
if not race:
return
self.enableJChipCheckBox.SetValue( race.enableJChipIntegration )
self.chipReaderType.SetSelection( max(0, race.chipReaderType) )
self.ipaddr.SetValue( race.chipReaderIpAddr )
self.port.SetValue( race.chipReaderPort )
self.changechipReaderType()
def changechipReaderType( self, event=None ):
selection = self.chipReaderType.GetSelection()
if selection == 0:
self.port.SetValue( JChip.DEFAULT_PORT )
self.port.SetEditable( False )
self.ipaddr.SetValue( Utils.GetDefaultHost() )
self.ipaddr.SetEditable( False )
self.autoDetect.Show( False )
elif selection == 1:
self.port.SetValue( RaceResult.DEFAULT_PORT )
self.port.SetEditable( True )
self.ipaddr.SetEditable( True )
rfidReaderHost = Utils.readConfig( 'RfidReaderHost', None )
if rfidReaderHost:
try:
self.ipaddr.SetValue( rfidReaderHost )
except Exception as e:
self.ipaddr.SetValue( Utils.GetDefaultHost() )
self.autoDetect.Show( True )
elif selection == 2:
self.port.SetValue( Ultra.DEFAULT_PORT )
self.port.SetEditable( True )
self.ipaddr.SetEditable( True )
rfidReaderHost = Utils.readConfig( 'RfidReaderHost', None )
if rfidReaderHost:
try:
self.ipaddr.SetValue( rfidReaderHost )
except Exception as e:
self.ipaddr.SetValue( Utils.GetDefaultHost() )
self.autoDetect.Show( True )
self.Layout()
self.Refresh()
def doAutoDetect( self, event ):
selection = self.chipReaderType.GetSelection()
autoDetect = [RaceResult.AutoDetect, Ultra.AutoDetect][selection-1]
def getHost():
wait = wx.BusyCursor()
try:
return None, autoDetect(self.port.GetValue())
except Exception as e:
return e, None
error, readerHost = getHost()
if error:
Utils.MessageOK(
self,
u'{}:\n\n{}'.format(_("AutoDetect Error"), error),
_("AutoDetect Error"),
wx.ICON_ERROR
)
return
if not readerHost:
Utils.MessageOK(
self, u'{}:\n\n{}'.format(_("AutoDetect Failure"), _('Reader not found.')),
_("AutoDetect Failure"),
wx.ICON_ERROR
)
return
self.ipaddr.SetValue( readerHost )
def handleChipReaderEvent( self, event ):
if not event.tagTimes:
return
tagNums = {}
race = Model.race
if race:
if not race.enableUSBCamera:
return
tagNums = GetTagNums()
tag, dt = event.tagTimes[-1]
num = tagNums.get(tag, None)
def testJChipToggle( self, event ):
self.commit()
if not Model.race:
self.stopTest()
Utils.MessageOK( self, _('No active race. Cannot perform RFID test. "New" or "Open" a race first.'), _('Cannot Perform RFID Test') )
return
if Model.race.isRunning():
self.stopTest()
Utils.MessageOK( self, _('Cannot perform RFID test while race is running.'), _('Cannot Perform RFID Test') )
return
if self.testJChip.GetValue():
correct, reason = CheckExcelLink()
explain = _('CrossMgr will not be able to associate chip Tags with Bib numbers.') + u'\n' + \
_('You may proceed with the test, but you need to fix the Excel sheet.') + u'\n\n' + \
_('See documentation for details.')
if not correct:
if not Utils.MessageOKCancel( self, (_('Problems with Excel sheet.') + u'\n\n ' + _('Reason:') + u' {}\n\n{}').format(reason, explain),
title = _('Excel Link Problem'), iconMask = wx.ICON_WARNING ):
self.testJChip.SetValue( False )
return
tagNums = GetTagNums( True )
if correct and not tagNums:
if not Utils.MessageOKCancel( self, (_('All Tag entries in the Excel sheet are blank.') + u'\n\n{}').format(explain),
title = _('Excel Link Problem'), iconMask = wx.ICON_WARNING ):
self.testJChip.SetValue( False )
return
ChipReader.chipReaderCur.readerEventWindow = self
self.testList.Clear()
self.testJChip.SetLabel( 'Stop RFID Test' )
self.testJChip.SetBackgroundColour( wx.Colour(255,128,128) )
self.testJChip.SetValue( True )
ChipReader.chipReaderCur.StartListener()
self.appendMsg( 'listening for RFID connection...' )
self.receivedCount = 0
self.timer = wx.CallLater( 1000, self.onTimerCallback, 'started' )
else:
self.stopTest()
def appendMsg( self, s ):
self.testList.AppendText( s + '\n' )
def onTimerCallback( self, stat ):
data = ChipReader.chipReaderCur.GetData()
lastTag = None
for d in data:
if d[0] == 'data':
self.receivedCount += 1
ts = d[2].isoformat(' ')
if len(ts) == 8:
ts += '.00'
else:
ts = ts[:-2]
try:
num = '{}'.format(Model.race.tagNums[d[1]])
except (AttributeError, ValueError, KeyError):
num = 'not found'
lastTag = d[1]
self.appendMsg( '{}: tag={}, time={}, Bib={}'.format(self.receivedCount, d[1], ts, num) )
elif d[0] == 'connected':
self.appendMsg( '*******************************************' )
self.appendMsg( '{}: {}'.format(d[0], ', '.join('{}'.format(s) for s in d[1:]) ) )
elif d[0] == 'disconnected':
self.appendMsg( d[0] )
self.appendMsg( '' )
self.appendMsg( _('listening for RFID connection...') )
elif d[0] == 'name':
self.appendMsg( u'{}: {}'.format(_('receiver name'), d[1]) )
else:
self.appendMsg( '{}: {}'.format(d[0], ', '.join('<<{}>>'.format(s) for s in d[1:]) ) )
if data:
self.testList.SetInsertionPointEnd()
self.timer.Restart( 1000, 'restarted' )
if lastTag and Utils.mainWin and getattr(Utils.mainWin, 'findDialog', None):
if Utils.mainWin.findDialog.IsShown():
Utils.mainWin.findDialog.refresh( lastTag )
def stopTest( self ):
ChipReader.chipReaderCur.StopListener()
if self.timer:
self.timer.Stop()
self.timer = None
self.testList.Clear()
self.appendMsg( _('No test running.') )
ChipReader.chipReaderCur.readerEventWindow = None
self.testJChip.SetLabel( _('Start RFID Test') )
self.testJChip.SetBackgroundColour( wx.NullColour )
self.testJChip.SetValue( False )
def onOK( self, event ):
self.stopTest()
self.commit()
wx.CallAfter( Utils.refresh )
self.EndModal( wx.ID_OK )
def onCancel( self, event ):
self.stopTest()
self.EndModal( wx.ID_CANCEL )
if __name__ == '__main__':
six.print_( GetAllIps() )
app = wx.App(False)
mainWin = wx.Frame(None,title="CrossMan", size=(600,400))
Model.setRace( Model.Race() )
Model.race._populate()
Model.race.finishRaceNow()
Model.race.enableUSBCamera = True
mainWin.Show()
dlg = JChipSetupDialog( mainWin )
dlg.ShowModal()
dlg.Destroy()
| true | true |
f72064b70b67fb3499b7878938b50a98033a491b | 845 | py | Python | examples/upload_video.py | nullwriter/ig-actor | a089107657ccdf11ba213160c4cc5d3690cecd76 | [
"MIT"
] | null | null | null | examples/upload_video.py | nullwriter/ig-actor | a089107657ccdf11ba213160c4cc5d3690cecd76 | [
"MIT"
] | null | null | null | examples/upload_video.py | nullwriter/ig-actor | a089107657ccdf11ba213160c4cc5d3690cecd76 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Use text editor to edit the script and type in valid Instagram username/password
import urllib
from bot.lib.InstagramAPI import InstagramAPI
video_url = 'https://instagram.fmad3-2.fna.fbcdn.net/t50.2886-16/17157217_1660580944235536_866261046376005632_n.mp4' #a valid instagram video
video_local_path = video_url.split("/")[-1]
thumbnail_url = "https://instagram.fmad3-2.fna.fbcdn.net/t51.2885-15/e15/17075853_1759410394387536_3927726791665385472_n.jpg"
thumbnail_local_path = thumbnail_url.split("/")[-1]
urllib.urlretrieve(video_url,video_local_path)
urllib.urlretrieve(thumbnail_url,thumbnail_local_path)
user,pwd = 'user', 'password'
InstagramAPI = InstagramAPI(user,pwd)
InstagramAPI.login() # login
InstagramAPI.uploadVideo(video_local_path,thumbnail_local_path,caption="Tortuguero")
| 36.73913 | 141 | 0.802367 |
import urllib
from bot.lib.InstagramAPI import InstagramAPI
video_url = 'https://instagram.fmad3-2.fna.fbcdn.net/t50.2886-16/17157217_1660580944235536_866261046376005632_n.mp4'
video_local_path = video_url.split("/")[-1]
thumbnail_url = "https://instagram.fmad3-2.fna.fbcdn.net/t51.2885-15/e15/17075853_1759410394387536_3927726791665385472_n.jpg"
thumbnail_local_path = thumbnail_url.split("/")[-1]
urllib.urlretrieve(video_url,video_local_path)
urllib.urlretrieve(thumbnail_url,thumbnail_local_path)
user,pwd = 'user', 'password'
InstagramAPI = InstagramAPI(user,pwd)
InstagramAPI.login()
InstagramAPI.uploadVideo(video_local_path,thumbnail_local_path,caption="Tortuguero")
| true | true |
f72065bfefeeb5caf657c46e57376538c3455609 | 4,517 | py | Python | lib/3rdparty/common/pyqode/core/modes/autocomplete.py | tommo/gii | 03624a57cf74a07e38bfdc7f53c50bd926b7b5a7 | [
"MIT"
] | 7 | 2016-02-13T18:47:23.000Z | 2020-07-03T13:47:49.000Z | lib/3rdparty/common/pyqode/core/modes/autocomplete.py | tommo/gii | 03624a57cf74a07e38bfdc7f53c50bd926b7b5a7 | [
"MIT"
] | 1 | 2018-06-13T04:55:27.000Z | 2021-11-05T05:52:51.000Z | lib/3rdparty/common/pyqode/core/modes/autocomplete.py | tommo/gii | 03624a57cf74a07e38bfdc7f53c50bd926b7b5a7 | [
"MIT"
] | 4 | 2016-02-15T13:32:46.000Z | 2019-12-12T17:22:31.000Z | # -*- coding: utf-8 -*-
""" Contains the AutoCompleteMode """
import logging
from pyqode.qt import QtCore, QtGui
from pyqode.core.api import TextHelper
from pyqode.core.api.mode import Mode
class AutoCompleteMode(Mode):
""" Automatically complete quotes and parentheses
Generic auto complete mode that automatically completes the following
symbols:
- " -> "
- ' -> '
- ( -> )
- [ -> ]
- { -> }
"""
#: Auto complete mapping, maps input key with completion text.
MAPPING = {'"': '"', "'": "'", "(": ")", "{": "}", "[": "]"}
#: The format to use for each symbol in mapping when there is a selection
SELECTED_QUOTES_FORMATS = {key: '%s%s%s' for key in MAPPING.keys()}
#: The format to use for each symbol in mapping when there is no selection
QUOTES_FORMATS = {key: '%s' for key in MAPPING.keys()}
def __init__(self):
super(AutoCompleteMode, self).__init__()
self.logger = logging.getLogger(__name__)
self._ignore_post = False
def on_state_changed(self, state):
if state:
self.editor.post_key_pressed.connect(self._on_post_key_pressed)
self.editor.key_pressed.connect(self._on_key_pressed)
else:
self.editor.post_key_pressed.disconnect(self._on_post_key_pressed)
self.editor.key_pressed.disconnect(self._on_key_pressed)
def _on_post_key_pressed(self, event):
if not event.isAccepted() and not self._ignore_post:
txt = event.text()
trav = self.editor.textCursor()
assert isinstance(trav, QtGui.QTextCursor)
trav.movePosition(trav.Left, trav.MoveAnchor, 2)
literal = TextHelper(self.editor).is_comment_or_string(trav)
if not literal:
next_char = TextHelper(self.editor).get_right_character()
if txt in self.MAPPING:
to_insert = self.MAPPING[txt]
if (not next_char or next_char in self.MAPPING.keys() or
next_char in self.MAPPING.values() or
next_char.isspace()):
TextHelper(self.editor).insert_text(
self.QUOTES_FORMATS[txt] % to_insert)
self._ignore_post = False
def _on_key_pressed(self, event):
txt = event.text()
cursor = self.editor.textCursor()
from pyqode.qt import QtGui
assert isinstance(cursor, QtGui.QTextCursor)
if cursor.hasSelection():
# quoting of selected text
if event.text() in self.MAPPING.keys():
first = event.text()
last = self.MAPPING[event.text()]
cursor.insertText(
self.SELECTED_QUOTES_FORMATS[event.text()] % (
first, cursor.selectedText(), last))
self.editor.setTextCursor(cursor)
event.accept()
else:
self._ignore_post = True
return
next_char = TextHelper(self.editor).get_right_character()
self.logger.debug('next char: %s', next_char)
ignore = False
if event.key() == QtCore.Qt.Key_Backspace:
# get the character that will get deleted
tc = self.editor.textCursor()
pos = tc.position()
tc.movePosition(tc.Left)
tc.movePosition(tc.Right, tc.KeepAnchor)
del_char = tc.selectedText()
if del_char in self.MAPPING and \
self.MAPPING[del_char] == next_char:
tc.beginEditBlock()
tc.movePosition(tc.Right, tc.KeepAnchor)
tc.insertText('')
tc.setPosition(pos - 2)
tc.endEditBlock()
self.editor.setTextCursor(tc)
ignore = True
elif txt and next_char == txt and next_char in self.MAPPING:
ignore = True
elif event.text() == ')' or event.text() == ']' or event.text() == '}':
# if typing the same symbol twice, the symbol should not be written
# and the cursor moved just after the char
# e.g. if you type ) just before ), the cursor will just move after
# the existing )
if next_char == event.text():
ignore = True
if ignore:
event.accept()
TextHelper(self.editor).clear_selection()
TextHelper(self.editor).move_right()
| 41.440367 | 79 | 0.570511 |
import logging
from pyqode.qt import QtCore, QtGui
from pyqode.core.api import TextHelper
from pyqode.core.api.mode import Mode
class AutoCompleteMode(Mode):
MAPPING = {'"': '"', "'": "'", "(": ")", "{": "}", "[": "]"}
SELECTED_QUOTES_FORMATS = {key: '%s%s%s' for key in MAPPING.keys()}
QUOTES_FORMATS = {key: '%s' for key in MAPPING.keys()}
def __init__(self):
super(AutoCompleteMode, self).__init__()
self.logger = logging.getLogger(__name__)
self._ignore_post = False
def on_state_changed(self, state):
if state:
self.editor.post_key_pressed.connect(self._on_post_key_pressed)
self.editor.key_pressed.connect(self._on_key_pressed)
else:
self.editor.post_key_pressed.disconnect(self._on_post_key_pressed)
self.editor.key_pressed.disconnect(self._on_key_pressed)
def _on_post_key_pressed(self, event):
if not event.isAccepted() and not self._ignore_post:
txt = event.text()
trav = self.editor.textCursor()
assert isinstance(trav, QtGui.QTextCursor)
trav.movePosition(trav.Left, trav.MoveAnchor, 2)
literal = TextHelper(self.editor).is_comment_or_string(trav)
if not literal:
next_char = TextHelper(self.editor).get_right_character()
if txt in self.MAPPING:
to_insert = self.MAPPING[txt]
if (not next_char or next_char in self.MAPPING.keys() or
next_char in self.MAPPING.values() or
next_char.isspace()):
TextHelper(self.editor).insert_text(
self.QUOTES_FORMATS[txt] % to_insert)
self._ignore_post = False
def _on_key_pressed(self, event):
txt = event.text()
cursor = self.editor.textCursor()
from pyqode.qt import QtGui
assert isinstance(cursor, QtGui.QTextCursor)
if cursor.hasSelection():
if event.text() in self.MAPPING.keys():
first = event.text()
last = self.MAPPING[event.text()]
cursor.insertText(
self.SELECTED_QUOTES_FORMATS[event.text()] % (
first, cursor.selectedText(), last))
self.editor.setTextCursor(cursor)
event.accept()
else:
self._ignore_post = True
return
next_char = TextHelper(self.editor).get_right_character()
self.logger.debug('next char: %s', next_char)
ignore = False
if event.key() == QtCore.Qt.Key_Backspace:
tc = self.editor.textCursor()
pos = tc.position()
tc.movePosition(tc.Left)
tc.movePosition(tc.Right, tc.KeepAnchor)
del_char = tc.selectedText()
if del_char in self.MAPPING and \
self.MAPPING[del_char] == next_char:
tc.beginEditBlock()
tc.movePosition(tc.Right, tc.KeepAnchor)
tc.insertText('')
tc.setPosition(pos - 2)
tc.endEditBlock()
self.editor.setTextCursor(tc)
ignore = True
elif txt and next_char == txt and next_char in self.MAPPING:
ignore = True
elif event.text() == ')' or event.text() == ']' or event.text() == '}':
if next_char == event.text():
ignore = True
if ignore:
event.accept()
TextHelper(self.editor).clear_selection()
TextHelper(self.editor).move_right()
| true | true |
f720669f2683fff61a73382464913841475adbc5 | 18,762 | py | Python | mslib/mswms/dataaccess.py | iamansoni/MSS | 69bc8fc61ab277697ca691119f911382a63860c0 | [
"Apache-2.0"
] | null | null | null | mslib/mswms/dataaccess.py | iamansoni/MSS | 69bc8fc61ab277697ca691119f911382a63860c0 | [
"Apache-2.0"
] | null | null | null | mslib/mswms/dataaccess.py | iamansoni/MSS | 69bc8fc61ab277697ca691119f911382a63860c0 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
mslib.mswms.dataaccess
~~~~~~~~~~~~~~~~~~~~~~
This module provides functions to access data
This file is part of mss.
:copyright: Copyright 2008-2014 Deutsches Zentrum fuer Luft- und Raumfahrt e.V.
:copyright: Copyright 2011-2014 Marc Rautenhaus (mr)
:copyright: Copyright 2016-2020 by the mss team, see AUTHORS.
:license: APACHE-2.0, see LICENSE for details.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from abc import ABCMeta, abstractmethod
import itertools
import os
import logging
import netCDF4
import numpy as np
import pint
from mslib import netCDF4tools
from mslib.utils import UR
class NWPDataAccess(metaclass=ABCMeta):
"""Abstract superclass providing a framework to let the user query
in which data file a given variable at a given time can be found.
The class provides the method get_filename(). It derives filenames from
CF variable names, initialisation and valid times.q
The method get_datapath() provides the root path where the data
can be found.
In subclasses, the protected method _determine_filename() must be
implemented.
"""
def __init__(self, rootpath, uses_init_time=True, uses_valid_time=True):
"""Constructor takes the path of the data directory and determines whether
this class employs different init_times or valid_times.
"""
self._root_path = rootpath
self._modelname = ""
self._use_init_time = uses_init_time
self._use_valid_time = uses_valid_time
@abstractmethod
def setup(self):
"""Checks for existing files etc. and sets up the class. Called by
server whenever a client requests a current capability document.
"""
pass
def have_data(self, variable, vartype, init_time, valid_time):
"""Checks whether a file with data for the specified variable,
type and times is known. This does not trigger a search for
updated data files on disk.
"""
try:
self._determine_filename(
variable, vartype, init_time, valid_time, reload=False)
except ValueError:
return False
else:
return True
def get_filename(self, variable, vartype, init_time, valid_time,
fullpath=False):
"""Get the filename of the file in which a given variable at
a given time can be found.
In case no file is available, the disk is searched for updated
data before failing.
Arguments:
variable -- string with CF name of variable
vartype -- string specifying the type of the variable (model specific).
For example, can be ml (model level), pl (pressure level),
or sfc (surface) for, e.g., ECMWF data.
init_time -- datetime object with initialisation time of forecast run
valid_time -- datetime object with valid time of forecast
fullpath -- if True, the complete path to the file will be returned.
Default is False, only the filename will be returned.
"""
filename = self._determine_filename(variable, vartype,
init_time, valid_time)
if fullpath:
return os.path.join(self._root_path, filename)
else:
return filename
@abstractmethod
def _determine_filename(self, variable, vartype, init_time, valid_time):
"""Must be overwritten in subclass. Determines the filename
(without path) of the variable <variable> at the forecast
timestep specified by init_time and valid_time.
"""
pass
def get_datapath(self):
"""Return the path to the data directory.
"""
return self._root_path
def uses_inittime_dimension(self):
""" Return whether this data set supports multiple init times
"""
return self._use_init_time
def uses_validtime_dimension(self):
""" Return whether this data set supports multiple valid times
"""
return self._use_valid_time
@abstractmethod
def get_all_datafiles(self):
"""Return a list of all available data files.
"""
pass
@abstractmethod
def get_init_times(self):
"""Return a list of available forecast init times (base times).
"""
pass
@abstractmethod
def get_valid_times(self):
"""Return a list of available forecast times.
"""
pass
@abstractmethod
def get_elevations(self, vert_type):
"""Return a list of available elevations for a vertical level type.
"""
pass
@abstractmethod
def get_elevation_units(self, vert_type):
"""Returns units of supplied vertical type.
"""
pass
_mfDatasetArgsDict = {}
def mfDatasetArgs(self):
"""Returns additional keyword for the MFDatasetCommonDims instance that
handles the input data of this dataset. See the MFDatasetCommonDims
documentation for further details.
Mainly provided as a workaround for numerical inaccuracies introduced
to the NetCDF files by netcdf-java 4.3.
(mr, 16Oct2012)
"""
return self._mfDatasetArgsDict
class DefaultDataAccess(NWPDataAccess):
"""
Subclass to NWPDataAccess for accessing properly constructed NetCDF files
Constructor needs information on domain ID.
"""
# Workaround for the numerical issue concering the lon dimension in
# NetCDF files produced by netcdf-java 4.3..
def __init__(self, rootpath, domain_id, skip_dim_check=[], **kwargs):
"""Constructor takes the path of the data directory and determines whether
this class employs different init_times or valid_times.
"""
NWPDataAccess.__init__(self, rootpath, **kwargs)
self._domain_id = domain_id
self._available_files = None
self._filetree = None
self._mfDatasetArgsDict = {"skip_dim_check": skip_dim_check}
def _determine_filename(self, variable, vartype, init_time, valid_time, reload=True):
"""Determines the name of the data file that contains
the variable <variable> with type <vartype> of the forecast specified
by <init_time> and <valid_time>.
"""
assert self._filetree is not None, "filetree is None. Forgot to call setup()?"
try:
return self._filetree[vartype][init_time][variable][valid_time]
except KeyError:
if reload:
self.setup()
try:
return self._filetree[vartype][init_time][variable][valid_time]
except KeyError as ex:
logging.error("Could not identify filename. %s %s %s %s %s %s",
variable, vartype, init_time, valid_time, type(ex), ex)
raise ValueError("variable type {} not available for variable {}"
.format(vartype, variable))
def _parse_file(self, filename):
elevations = {"levels": [], "units": None}
with netCDF4.Dataset(os.path.join(self._root_path, filename)) as dataset:
time_name, time_var = netCDF4tools.identify_CF_time(dataset)
init_time = netCDF4tools.num2date(0, time_var.units)
if not self.uses_inittime_dimension():
init_time = None
valid_times = netCDF4tools.num2date(time_var[:], time_var.units)
if not self.uses_validtime_dimension():
if len(valid_times) > 0:
raise IOError("Skipping file '{}: no support for valid time, but multiple "
"time steps present".format(filename))
valid_times = [None]
lat_name, lat_var, lon_name, lon_var = netCDF4tools.identify_CF_lonlat(dataset)
vert_name, vert_var, _, _, vert_type = netCDF4tools.identify_vertical_axis(dataset)
if len(time_var.dimensions) != 1 or time_var.dimensions[0] != time_name:
raise IOError("Problem with time coordinate variable")
if len(lat_var.dimensions) != 1 or lat_var.dimensions[0] != lat_name:
raise IOError("Problem with latitude coordinate variable")
if len(lon_var.dimensions) != 1 or lon_var.dimensions[0] != lon_name:
raise IOError("Problem with longitude coordinate variable")
if vert_type != "sfc":
elevations = {"levels": vert_var[:], "units": vert_var.units}
if vert_type in self._elevations:
if len(vert_var[:]) != len(self._elevations[vert_type]["levels"]):
raise IOError("Number of vertical levels does not fit to levels of "
"previous file '{}'.".format(self._elevations[vert_type]["filename"]))
if not np.allclose(vert_var[:], self._elevations[vert_type]["levels"]):
raise IOError("vertical levels do not fit to levels of previous "
"file '{}'.".format(self._elevations[vert_type]["filename"]))
if elevations["units"] != self._elevations[vert_type]["units"]:
raise IOError("vertical level units do not match previous file '{}'".format(
self._elevations[vert_type]["filename"]))
standard_names = []
for ncvarname, ncvar in dataset.variables.items():
if hasattr(ncvar, "standard_name"):
if (len(ncvar.dimensions) >= 3 and (
ncvar.dimensions[0] != time_name or
ncvar.dimensions[-2] != lat_name or
ncvar.dimensions[-1] != lon_name)):
logging.error("Skipping variable '%s' in file '%s': Incorrect order of dimensions",
ncvarname, filename)
continue
if not hasattr(ncvar, "units"):
logging.error("Skipping variable '%s' in file '%s': No units attribute",
ncvarname, filename)
continue
if ncvar.standard_name != "time":
try:
UR(ncvar.units)
except (ValueError, pint.UndefinedUnitError):
logging.error("Skipping variable '%s' in file '%s': unparseable units attribute '%s'",
ncvarname, filename, ncvar.units)
continue
if len(ncvar.shape) == 4 and vert_name in ncvar.dimensions:
standard_names.append(ncvar.standard_name)
elif len(ncvar.shape) == 3 and vert_type == "sfc":
standard_names.append(ncvar.standard_name)
return {
"vert_type": vert_type,
"elevations": elevations,
"init_time": init_time,
"valid_times": valid_times,
"standard_names": standard_names
}
def _add_to_filetree(self, filename, content):
logging.info("File '%s' identified as '%s' type", filename, content["vert_type"])
logging.info("Found init time '%s', %s valid_times and %s standard_names",
content["init_time"], len(content["valid_times"]), len(content["standard_names"]))
if len(content["valid_times"]) == 0 or len(content["standard_names"]) == 0:
logging.error(
"Something is wrong with this file... valid_times='%s' standard_names='%s'",
content["valid_times"], content["standard_names"])
else:
logging.debug("valid_times='%s' standard_names='%s'",
content["valid_times"], content["standard_names"])
leaf = self._filetree.setdefault(content["vert_type"], {}).setdefault(content["init_time"], {})
for standard_name in content["standard_names"]:
var_leaf = leaf.setdefault(standard_name, {})
for valid_time in content["valid_times"]:
if valid_time in var_leaf:
logging.warning(
"some data was found twice! vartype='%s' init_time='%s' standard_name='%s' "
"valid_time='%s' first_file='%s' second_file='%s'",
content["vert_type"], content["init_time"], standard_name,
valid_time, var_leaf[valid_time], filename)
else:
var_leaf[valid_time] = filename
def setup(self):
# Get a list of the available data files.
self._available_files = [
_filename for _filename in sorted(os.listdir(self._root_path)) if self._domain_id in _filename]
logging.info("Files identified for domain '%s': %s",
self._domain_id, self._available_files)
self._filetree = {}
self._elevations = {"sfc": {"filename": None, "levels": [], "units": None}}
# Build the tree structure.
for filename in self._available_files:
logging.info("Opening candidate '%s'", filename)
try:
content = self._parse_file(filename)
except IOError as ex:
logging.error("Skipping file '%s' (%s: %s)", filename, type(ex), ex)
continue
if content["vert_type"] not in self._elevations:
self._elevations[content["vert_type"]] = content["elevations"]
self._add_to_filetree(filename, content)
def get_init_times(self):
"""Returns a list of available forecast init times (base times).
"""
init_times = set(itertools.chain.from_iterable(
self._filetree[_x].keys() for _x in self._filetree))
return sorted(init_times)
def get_valid_times(self, variable, vartype, init_time):
"""Returns a list of available valid times for the specified
variable at the specified init time.
"""
try:
return sorted(self._filetree[vartype][init_time][variable])
except KeyError as ex:
logging.error("Could not find times! %s %s", type(ex), ex)
return []
def get_elevations(self, vert_type):
"""Return a list of available elevations for a vertical level type.
"""
logging.debug("%s", self._elevations)
return self._elevations[vert_type]["levels"]
def get_elevation_units(self, vert_type):
"""Return a list of available elevations for a vertical level type.
"""
logging.debug("%s", self._elevations)
return self._elevations[vert_type]["units"]
def get_all_valid_times(self, variable, vartype):
"""Similar to get_valid_times(), but returns the combined valid times
of all available init times.
"""
all_valid_times = []
if vartype not in self._filetree:
return []
for init_time in self._filetree[vartype]:
if variable in self._filetree[vartype][init_time]:
all_valid_times.extend(list(self._filetree[vartype][init_time][variable]))
return sorted(set(all_valid_times))
def get_all_datafiles(self):
"""Return a list of all available data files.
"""
return self._available_files
class CachedDataAccess(DefaultDataAccess):
"""
Subclass to NWPDataAccess for accessing properly constructed NetCDF files
Constructor needs information on domain ID.
Uses file name and modification date to reduce setup time by caching directory
content in a dictionary.
"""
def __init__(self, rootpath, domain_id, **kwargs):
"""Constructor takes the path of the data directory and determines whether
this class employs different init_times or valid_times.
"""
DefaultDataAccess.__init__(self, rootpath, domain_id, **kwargs)
self._file_cache = {}
def setup(self):
# Get a list of the available data files.
self._available_files = [
_filename for _filename in os.listdir(self._root_path) if self._domain_id in _filename]
logging.info("Files identified for domain '%s': %s",
self._domain_id, self._available_files)
for filename in list(self._file_cache):
if filename not in self._available_files:
del self._file_cache[filename]
self._filetree = {}
self._elevations = {"sfc": {"filename": None, "levels": []}}
# Build the tree structure.
for filename in self._available_files:
mtime = os.path.getmtime(os.path.join(self._root_path, filename))
if filename in self._file_cache and mtime == self._file_cache[filename][0]:
logging.info("Using cached candidate '%s'", filename)
content = self._file_cache[filename][1]
if content["vert_type"] != "sfc":
if content["vert_type"] not in self._elevations:
self._elevations[content["vert_type"]] = content["elevations"]
elif not np.allclose(
self._elevations[content["vert_type"]]["levels"],
content["elevations"]["levels"]):
logging.error("Skipping file '%s' due to elevation mismatch", filename)
continue
else:
if filename in self._file_cache:
del self._file_cache[filename]
logging.info("Opening candidate '%s'", filename)
try:
content = self._parse_file(filename)
except IOError as ex:
logging.error("Skipping file '%s' (%s: %s)", filename, type(ex), ex)
continue
self._file_cache[filename] = (mtime, content)
self._add_to_filetree(filename, content)
| 43.531323 | 114 | 0.600149 |
from abc import ABCMeta, abstractmethod
import itertools
import os
import logging
import netCDF4
import numpy as np
import pint
from mslib import netCDF4tools
from mslib.utils import UR
class NWPDataAccess(metaclass=ABCMeta):
def __init__(self, rootpath, uses_init_time=True, uses_valid_time=True):
self._root_path = rootpath
self._modelname = ""
self._use_init_time = uses_init_time
self._use_valid_time = uses_valid_time
@abstractmethod
def setup(self):
pass
def have_data(self, variable, vartype, init_time, valid_time):
try:
self._determine_filename(
variable, vartype, init_time, valid_time, reload=False)
except ValueError:
return False
else:
return True
def get_filename(self, variable, vartype, init_time, valid_time,
fullpath=False):
filename = self._determine_filename(variable, vartype,
init_time, valid_time)
if fullpath:
return os.path.join(self._root_path, filename)
else:
return filename
@abstractmethod
def _determine_filename(self, variable, vartype, init_time, valid_time):
pass
def get_datapath(self):
return self._root_path
def uses_inittime_dimension(self):
return self._use_init_time
def uses_validtime_dimension(self):
return self._use_valid_time
@abstractmethod
def get_all_datafiles(self):
pass
@abstractmethod
def get_init_times(self):
pass
@abstractmethod
def get_valid_times(self):
pass
@abstractmethod
def get_elevations(self, vert_type):
pass
@abstractmethod
def get_elevation_units(self, vert_type):
pass
_mfDatasetArgsDict = {}
def mfDatasetArgs(self):
return self._mfDatasetArgsDict
class DefaultDataAccess(NWPDataAccess):
def __init__(self, rootpath, domain_id, skip_dim_check=[], **kwargs):
NWPDataAccess.__init__(self, rootpath, **kwargs)
self._domain_id = domain_id
self._available_files = None
self._filetree = None
self._mfDatasetArgsDict = {"skip_dim_check": skip_dim_check}
def _determine_filename(self, variable, vartype, init_time, valid_time, reload=True):
assert self._filetree is not None, "filetree is None. Forgot to call setup()?"
try:
return self._filetree[vartype][init_time][variable][valid_time]
except KeyError:
if reload:
self.setup()
try:
return self._filetree[vartype][init_time][variable][valid_time]
except KeyError as ex:
logging.error("Could not identify filename. %s %s %s %s %s %s",
variable, vartype, init_time, valid_time, type(ex), ex)
raise ValueError("variable type {} not available for variable {}"
.format(vartype, variable))
def _parse_file(self, filename):
elevations = {"levels": [], "units": None}
with netCDF4.Dataset(os.path.join(self._root_path, filename)) as dataset:
time_name, time_var = netCDF4tools.identify_CF_time(dataset)
init_time = netCDF4tools.num2date(0, time_var.units)
if not self.uses_inittime_dimension():
init_time = None
valid_times = netCDF4tools.num2date(time_var[:], time_var.units)
if not self.uses_validtime_dimension():
if len(valid_times) > 0:
raise IOError("Skipping file '{}: no support for valid time, but multiple "
"time steps present".format(filename))
valid_times = [None]
lat_name, lat_var, lon_name, lon_var = netCDF4tools.identify_CF_lonlat(dataset)
vert_name, vert_var, _, _, vert_type = netCDF4tools.identify_vertical_axis(dataset)
if len(time_var.dimensions) != 1 or time_var.dimensions[0] != time_name:
raise IOError("Problem with time coordinate variable")
if len(lat_var.dimensions) != 1 or lat_var.dimensions[0] != lat_name:
raise IOError("Problem with latitude coordinate variable")
if len(lon_var.dimensions) != 1 or lon_var.dimensions[0] != lon_name:
raise IOError("Problem with longitude coordinate variable")
if vert_type != "sfc":
elevations = {"levels": vert_var[:], "units": vert_var.units}
if vert_type in self._elevations:
if len(vert_var[:]) != len(self._elevations[vert_type]["levels"]):
raise IOError("Number of vertical levels does not fit to levels of "
"previous file '{}'.".format(self._elevations[vert_type]["filename"]))
if not np.allclose(vert_var[:], self._elevations[vert_type]["levels"]):
raise IOError("vertical levels do not fit to levels of previous "
"file '{}'.".format(self._elevations[vert_type]["filename"]))
if elevations["units"] != self._elevations[vert_type]["units"]:
raise IOError("vertical level units do not match previous file '{}'".format(
self._elevations[vert_type]["filename"]))
standard_names = []
for ncvarname, ncvar in dataset.variables.items():
if hasattr(ncvar, "standard_name"):
if (len(ncvar.dimensions) >= 3 and (
ncvar.dimensions[0] != time_name or
ncvar.dimensions[-2] != lat_name or
ncvar.dimensions[-1] != lon_name)):
logging.error("Skipping variable '%s' in file '%s': Incorrect order of dimensions",
ncvarname, filename)
continue
if not hasattr(ncvar, "units"):
logging.error("Skipping variable '%s' in file '%s': No units attribute",
ncvarname, filename)
continue
if ncvar.standard_name != "time":
try:
UR(ncvar.units)
except (ValueError, pint.UndefinedUnitError):
logging.error("Skipping variable '%s' in file '%s': unparseable units attribute '%s'",
ncvarname, filename, ncvar.units)
continue
if len(ncvar.shape) == 4 and vert_name in ncvar.dimensions:
standard_names.append(ncvar.standard_name)
elif len(ncvar.shape) == 3 and vert_type == "sfc":
standard_names.append(ncvar.standard_name)
return {
"vert_type": vert_type,
"elevations": elevations,
"init_time": init_time,
"valid_times": valid_times,
"standard_names": standard_names
}
def _add_to_filetree(self, filename, content):
logging.info("File '%s' identified as '%s' type", filename, content["vert_type"])
logging.info("Found init time '%s', %s valid_times and %s standard_names",
content["init_time"], len(content["valid_times"]), len(content["standard_names"]))
if len(content["valid_times"]) == 0 or len(content["standard_names"]) == 0:
logging.error(
"Something is wrong with this file... valid_times='%s' standard_names='%s'",
content["valid_times"], content["standard_names"])
else:
logging.debug("valid_times='%s' standard_names='%s'",
content["valid_times"], content["standard_names"])
leaf = self._filetree.setdefault(content["vert_type"], {}).setdefault(content["init_time"], {})
for standard_name in content["standard_names"]:
var_leaf = leaf.setdefault(standard_name, {})
for valid_time in content["valid_times"]:
if valid_time in var_leaf:
logging.warning(
"some data was found twice! vartype='%s' init_time='%s' standard_name='%s' "
"valid_time='%s' first_file='%s' second_file='%s'",
content["vert_type"], content["init_time"], standard_name,
valid_time, var_leaf[valid_time], filename)
else:
var_leaf[valid_time] = filename
def setup(self):
# Get a list of the available data files.
self._available_files = [
_filename for _filename in sorted(os.listdir(self._root_path)) if self._domain_id in _filename]
logging.info("Files identified for domain '%s': %s",
self._domain_id, self._available_files)
self._filetree = {}
self._elevations = {"sfc": {"filename": None, "levels": [], "units": None}}
# Build the tree structure.
for filename in self._available_files:
logging.info("Opening candidate '%s'", filename)
try:
content = self._parse_file(filename)
except IOError as ex:
logging.error("Skipping file '%s' (%s: %s)", filename, type(ex), ex)
continue
if content["vert_type"] not in self._elevations:
self._elevations[content["vert_type"]] = content["elevations"]
self._add_to_filetree(filename, content)
def get_init_times(self):
init_times = set(itertools.chain.from_iterable(
self._filetree[_x].keys() for _x in self._filetree))
return sorted(init_times)
def get_valid_times(self, variable, vartype, init_time):
try:
return sorted(self._filetree[vartype][init_time][variable])
except KeyError as ex:
logging.error("Could not find times! %s %s", type(ex), ex)
return []
def get_elevations(self, vert_type):
logging.debug("%s", self._elevations)
return self._elevations[vert_type]["levels"]
def get_elevation_units(self, vert_type):
logging.debug("%s", self._elevations)
return self._elevations[vert_type]["units"]
def get_all_valid_times(self, variable, vartype):
all_valid_times = []
if vartype not in self._filetree:
return []
for init_time in self._filetree[vartype]:
if variable in self._filetree[vartype][init_time]:
all_valid_times.extend(list(self._filetree[vartype][init_time][variable]))
return sorted(set(all_valid_times))
def get_all_datafiles(self):
return self._available_files
class CachedDataAccess(DefaultDataAccess):
def __init__(self, rootpath, domain_id, **kwargs):
DefaultDataAccess.__init__(self, rootpath, domain_id, **kwargs)
self._file_cache = {}
def setup(self):
# Get a list of the available data files.
self._available_files = [
_filename for _filename in os.listdir(self._root_path) if self._domain_id in _filename]
logging.info("Files identified for domain '%s': %s",
self._domain_id, self._available_files)
for filename in list(self._file_cache):
if filename not in self._available_files:
del self._file_cache[filename]
self._filetree = {}
self._elevations = {"sfc": {"filename": None, "levels": []}}
# Build the tree structure.
for filename in self._available_files:
mtime = os.path.getmtime(os.path.join(self._root_path, filename))
if filename in self._file_cache and mtime == self._file_cache[filename][0]:
logging.info("Using cached candidate '%s'", filename)
content = self._file_cache[filename][1]
if content["vert_type"] != "sfc":
if content["vert_type"] not in self._elevations:
self._elevations[content["vert_type"]] = content["elevations"]
elif not np.allclose(
self._elevations[content["vert_type"]]["levels"],
content["elevations"]["levels"]):
logging.error("Skipping file '%s' due to elevation mismatch", filename)
continue
else:
if filename in self._file_cache:
del self._file_cache[filename]
logging.info("Opening candidate '%s'", filename)
try:
content = self._parse_file(filename)
except IOError as ex:
logging.error("Skipping file '%s' (%s: %s)", filename, type(ex), ex)
continue
self._file_cache[filename] = (mtime, content)
self._add_to_filetree(filename, content)
| true | true |
f7206716b828e3c1c1ab325cae68cae705a72727 | 9,010 | py | Python | scalability/experiments/run_mainnet.py | Deland-Labs/ic | 047172b01e0afc0e61448669d4ec98b2425c6853 | [
"Apache-2.0"
] | 1 | 2021-12-01T03:48:42.000Z | 2021-12-01T03:48:42.000Z | scalability/experiments/run_mainnet.py | Deland-Labs/ic | 047172b01e0afc0e61448669d4ec98b2425c6853 | [
"Apache-2.0"
] | null | null | null | scalability/experiments/run_mainnet.py | Deland-Labs/ic | 047172b01e0afc0e61448669d4ec98b2425c6853 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
import subprocess
import sys
import time
from typing import List
import gflags
FLAGS = gflags.FLAGS
gflags.DEFINE_bool("use_updates", False, "Issue update calls instead of query calls")
class Mainnet:
"""Wrapper to run against subnetworks in mainnet concurrently."""
def __init__(self):
"""Initialize."""
if FLAGS.testnet == "mercury" and FLAGS.target_subnet_id is None:
raise Exception("--target_subnet_id has to be set when running against mainnet")
# Testnets you have booked and the number of subnetworks each (including NNS)
self.testnets = {
"large01": 5,
# "large02": 5,
"large03": 5,
"large04": 5,
"large05": 5,
# "medium01": 2,
# "medium03": 2,
"medium04": 2,
# "medium06": 2,
# "medium07": 2,
# "medium08": 2,
# "medium09": 2,
}
# All subnets with the ID of the counter canister.
# Uncomment if you want to run against that subnetwork.
# sum(self.testnets.items()) has to be larger than the number of subnets uncommented here.
self.load_targets = {
# --- pjljw has a lot of traffic, so perhaps avoid
# "pjljw-kztyl-46ud4-ofrj6-nzkhm-3n4nt-wi3jt-ypmav-ijqkt-gjf66-uae": "ifkln-viaaa-aaaah-qccva-cai",
"ejbmu-grnam-gk6ol-6irwa-htwoj-7ihfl-goimw-hlnvh-abms4-47v2e-zqe": "nffi3-byaaa-aaaae-qaava-cai",
# # 404 - [MessageId(...)]: Update returned non-202: 404
"gmq5v-hbozq-uui6y-o55wc-ihop3-562wb-3qspg-nnijg-npqp5-he3cj-3ae": "phin2-eyaaa-aaaak-qaaca-cai",
"opn46-zyspe-hhmyp-4zu6u-7sbrh-dok77-m7dch-im62f-vyimr-a3n2c-4ae": "psp4x-fqaaa-aaaak-qaabq-cai",
# # normal
"w4asl-4nmyj-qnr7c-6cqq4-tkwmt-o26di-iupkq-vx4kt-asbrx-jzuxh-4ae": "wrd4y-xiaaa-aaaac-qaaaq-cai",
"lspz2-jx4pu-k3e7p-znm7j-q4yum-ork6e-6w4q6-pijwq-znehu-4jabe-kqe": "m4dvk-faaaa-aaaag-aaaba-cai",
"k44fs-gm4pv-afozh-rs7zw-cg32n-u7xov-xqyx3-2pw5q-eucnu-cosd4-uqe": "cst46-ryaaa-aaaak-aaaha-cai",
"lhg73-sax6z-2zank-6oer2-575lz-zgbxx-ptudx-5korm-fy7we-kh4hl-pqe": "anvl4-jaaaa-aaaag-qaaca-cai",
"brlsh-zidhj-3yy3e-6vqbz-7xnih-xeq2l-as5oc-g32c4-i5pdn-2wwof-oae": "qnlji-3yaaa-aaaai-aa2aq-cai",
"mpubz-g52jc-grhjo-5oze5-qcj74-sex34-omprz-ivnsm-qvvhr-rfzpv-vae": "2zwmb-wyaaa-aaaai-qa2vq-cai",
"qdvhd-os4o2-zzrdw-xrcv4-gljou-eztdp-bj326-e6jgr-tkhuc-ql6v2-yqe": "ivomh-taaaa-aaaaj-aac2a-cai",
"jtdsg-3h6gi-hs7o5-z2soi-43w3z-soyl3-ajnp3-ekni5-sw553-5kw67-nqe": "tiezx-5yaaa-aaaaj-qagya-cai",
"io67a-2jmkw-zup3h-snbwi-g6a5n-rm5dn-b6png-lvdpl-nqnto-yih6l-gqe": "ftuvq-daaaa-aaaad-aaaqa-cai",
"5kdm2-62fc6-fwnja-hutkz-ycsnm-4z33i-woh43-4cenu-ev7mi-gii6t-4ae": "iqjyz-jqaaa-aaaad-qayoa-cai",
"4zbus-z2bmt-ilreg-xakz4-6tyre-hsqj4-slb4g-zjwqo-snjcc-iqphi-3qe": "2oxpg-ayaaa-aaaac-aaacq-cai",
"qxesv-zoxpm-vc64m-zxguk-5sj74-35vrb-tbgwg-pcird-5gr26-62oxl-cae": "htg4w-ziaaa-aaaab-aabaa-cai",
"shefu-t3kr5-t5q3w-mqmdq-jabyv-vyvtf-cyyey-3kmo4-toyln-emubw-4qe": "2vzmb-ayaaa-aaaae-aaf3q-cai",
"csyj4-zmann-ys6ge-3kzi6-onexi-obayx-2fvak-zersm-euci4-6pslt-lae": "cozrd-caaaa-aaaaf-qaeua-cai",
"eq6en-6jqla-fbu5s-daskr-h6hx2-376n5-iqabl-qgrng-gfqmv-n3yjr-mqe": "34i5c-taaaa-aaaaf-aaa2q-cai",
"snjp4-xlbw4-mnbog-ddwy6-6ckfd-2w5a2-eipqo-7l436-pxqkh-l6fuv-vae": "3muos-6yaaa-aaaaa-qaaua-cai",
"pae4o-o6dxf-xki7q-ezclx-znyd6-fnk6w-vkv5z-5lfwh-xym2i-otrrw-fqe": "r7fsz-diaaa-aaaab-qadxa-cai",
}
# Next subnetwork to use for workload generators
self.next_subnet = {key: 0 for key in self.testnets.keys()}
total_subnetworks = sum(self.testnets.values())
total_targets = len(self.load_targets)
missing = total_targets - total_subnetworks
if total_targets > total_subnetworks:
print(
(
f"Insufficient testnets for load generation (have {total_subnetworks}, "
f"but {total_targets} load targets, {missing} more missing"
)
)
exit(1)
self.start_time = int(time.time())
def get_window_name(self, subnet):
"""Get window name from subnet."""
return subnet.split("-")[0]
def get_query_command(self, canister, subnet, wg_testnet, wg_subnet, subnet_prefix):
"""Return query command."""
return [
"./max_capacity_system_baseline.py",
"--testnet",
"mercury",
"--canister",
canister,
"--target_subnet_id",
subnet,
"--wg_testnet",
wg_testnet,
"--wg_subnet",
str(wg_subnet),
"--no_instrument=True",
"--top_level_out_dir",
"mainnet-{}".format(self.start_time),
"--second_level_out_dir",
subnet_prefix,
"--num_workload_generators",
str(4),
"--query_initial_rps",
str(500),
"--max_query_load",
str(500),
"--skip_generate_report=True",
"--target_query_load",
str(440),
"--query_rps_increment",
str(40),
"--target_all=True",
]
def get_update_command(self, canister, subnet, wg_testnet, wg_subnet, subnet_prefix):
"""Retrun update command."""
return [
"./max_capacity_system_baseline.py",
"--testnet",
"mercury",
"--canister",
canister,
"--target_subnet_id",
subnet,
"--wg_testnet",
wg_testnet,
"--wg_subnet",
str(wg_subnet),
"--no_instrument=True",
"--max_update_load",
str(600),
"--top_level_out_dir",
"mainnet-{}".format(self.start_time),
"--second_level_out_dir",
subnet_prefix,
"--num_workload_generators",
str(4),
"--target_update_load",
str(600),
"--update_rps_increment",
str(4),
"--update_initial_rps",
str(600),
"--skip_generate_report=True",
"--target_update_load",
str(600),
"--use_updates=True",
"--iter_duration={}".format(300),
]
def get_commands(self, do_updates=True):
"""Get commands to run based on the list of subnets and canister IDs."""
r = []
for subnet, canister in self.load_targets.items():
wg_testnet = None
for testnet, num_subnets in self.testnets.items():
if num_subnets > 0:
wg_testnet = testnet
break
self.testnets[wg_testnet] -= 1
wg_subnet = self.next_subnet[wg_testnet]
self.next_subnet[wg_testnet] += 1
subnet_prefix = self.get_window_name(subnet)
r.append(
(
subnet_prefix,
self.get_update_command(canister, subnet, wg_testnet, wg_subnet, subnet_prefix)
if do_updates
else self.get_query_command(canister, subnet, wg_testnet, wg_subnet, subnet_prefix),
)
)
return r
def run_in_session(self, name: str, command: List[str]):
"""Run the given command in a tmux session."""
assert len(name) > 0
subprocess.run(
[
"tmux",
"new-window",
"-n",
name,
" ".join(command) + '; echo "Check failure rate + hit enter to terminate"; read',
],
check=True,
)
def start(self, do_updates):
"""Start the benchmark."""
print(f"Starting workload with do_updates={do_updates}")
for name, command in self.get_commands(do_updates):
self.run_in_session(name, command)
def tmux_window_list(self) -> List[str]:
"""Get the current tmux window list."""
r = []
for line in subprocess.check_output(["tmux", "list-windows"], encoding="utf-8").split("\n"):
e = line.split(" ")
if len(e) > 1:
r.append(e[1])
return r
def wait(self):
"""Wait for all benchmarks to terminate."""
time.sleep(30)
for name in self.load_targets.keys():
print(f"Waiting for {name}")
while self.get_window_name(name) in self.tmux_window_list():
time.sleep(10)
FLAGS(sys.argv)
mainnet = Mainnet()
mainnet.start(FLAGS.use_updates)
# Need to sleep a bit in order to ensure that all windows are coming up
mainnet.wait()
print("All terminated, done")
| 38.836207 | 111 | 0.568036 |
import subprocess
import sys
import time
from typing import List
import gflags
FLAGS = gflags.FLAGS
gflags.DEFINE_bool("use_updates", False, "Issue update calls instead of query calls")
class Mainnet:
def __init__(self):
if FLAGS.testnet == "mercury" and FLAGS.target_subnet_id is None:
raise Exception("--target_subnet_id has to be set when running against mainnet")
self.testnets = {
"large01": 5,
"large03": 5,
"large04": 5,
"large05": 5,
"medium04": 2,
}
self.load_targets = {
"ejbmu-grnam-gk6ol-6irwa-htwoj-7ihfl-goimw-hlnvh-abms4-47v2e-zqe": "nffi3-byaaa-aaaae-qaava-cai",
g-nnijg-npqp5-he3cj-3ae": "phin2-eyaaa-aaaak-qaaca-cai",
"opn46-zyspe-hhmyp-4zu6u-7sbrh-dok77-m7dch-im62f-vyimr-a3n2c-4ae": "psp4x-fqaaa-aaaak-qaabq-cai",
"w4asl-4nmyj-qnr7c-6cqq4-tkwmt-o26di-iupkq-vx4kt-asbrx-jzuxh-4ae": "wrd4y-xiaaa-aaaac-qaaaq-cai",
"lspz2-jx4pu-k3e7p-znm7j-q4yum-ork6e-6w4q6-pijwq-znehu-4jabe-kqe": "m4dvk-faaaa-aaaag-aaaba-cai",
"k44fs-gm4pv-afozh-rs7zw-cg32n-u7xov-xqyx3-2pw5q-eucnu-cosd4-uqe": "cst46-ryaaa-aaaak-aaaha-cai",
"lhg73-sax6z-2zank-6oer2-575lz-zgbxx-ptudx-5korm-fy7we-kh4hl-pqe": "anvl4-jaaaa-aaaag-qaaca-cai",
"brlsh-zidhj-3yy3e-6vqbz-7xnih-xeq2l-as5oc-g32c4-i5pdn-2wwof-oae": "qnlji-3yaaa-aaaai-aa2aq-cai",
"mpubz-g52jc-grhjo-5oze5-qcj74-sex34-omprz-ivnsm-qvvhr-rfzpv-vae": "2zwmb-wyaaa-aaaai-qa2vq-cai",
"qdvhd-os4o2-zzrdw-xrcv4-gljou-eztdp-bj326-e6jgr-tkhuc-ql6v2-yqe": "ivomh-taaaa-aaaaj-aac2a-cai",
"jtdsg-3h6gi-hs7o5-z2soi-43w3z-soyl3-ajnp3-ekni5-sw553-5kw67-nqe": "tiezx-5yaaa-aaaaj-qagya-cai",
"io67a-2jmkw-zup3h-snbwi-g6a5n-rm5dn-b6png-lvdpl-nqnto-yih6l-gqe": "ftuvq-daaaa-aaaad-aaaqa-cai",
"5kdm2-62fc6-fwnja-hutkz-ycsnm-4z33i-woh43-4cenu-ev7mi-gii6t-4ae": "iqjyz-jqaaa-aaaad-qayoa-cai",
"4zbus-z2bmt-ilreg-xakz4-6tyre-hsqj4-slb4g-zjwqo-snjcc-iqphi-3qe": "2oxpg-ayaaa-aaaac-aaacq-cai",
"qxesv-zoxpm-vc64m-zxguk-5sj74-35vrb-tbgwg-pcird-5gr26-62oxl-cae": "htg4w-ziaaa-aaaab-aabaa-cai",
"shefu-t3kr5-t5q3w-mqmdq-jabyv-vyvtf-cyyey-3kmo4-toyln-emubw-4qe": "2vzmb-ayaaa-aaaae-aaf3q-cai",
"csyj4-zmann-ys6ge-3kzi6-onexi-obayx-2fvak-zersm-euci4-6pslt-lae": "cozrd-caaaa-aaaaf-qaeua-cai",
"eq6en-6jqla-fbu5s-daskr-h6hx2-376n5-iqabl-qgrng-gfqmv-n3yjr-mqe": "34i5c-taaaa-aaaaf-aaa2q-cai",
"snjp4-xlbw4-mnbog-ddwy6-6ckfd-2w5a2-eipqo-7l436-pxqkh-l6fuv-vae": "3muos-6yaaa-aaaaa-qaaua-cai",
"pae4o-o6dxf-xki7q-ezclx-znyd6-fnk6w-vkv5z-5lfwh-xym2i-otrrw-fqe": "r7fsz-diaaa-aaaab-qadxa-cai",
}
self.next_subnet = {key: 0 for key in self.testnets.keys()}
total_subnetworks = sum(self.testnets.values())
total_targets = len(self.load_targets)
missing = total_targets - total_subnetworks
if total_targets > total_subnetworks:
print(
(
f"Insufficient testnets for load generation (have {total_subnetworks}, "
f"but {total_targets} load targets, {missing} more missing"
)
)
exit(1)
self.start_time = int(time.time())
def get_window_name(self, subnet):
return subnet.split("-")[0]
def get_query_command(self, canister, subnet, wg_testnet, wg_subnet, subnet_prefix):
return [
"./max_capacity_system_baseline.py",
"--testnet",
"mercury",
"--canister",
canister,
"--target_subnet_id",
subnet,
"--wg_testnet",
wg_testnet,
"--wg_subnet",
str(wg_subnet),
"--no_instrument=True",
"--top_level_out_dir",
"mainnet-{}".format(self.start_time),
"--second_level_out_dir",
subnet_prefix,
"--num_workload_generators",
str(4),
"--query_initial_rps",
str(500),
"--max_query_load",
str(500),
"--skip_generate_report=True",
"--target_query_load",
str(440),
"--query_rps_increment",
str(40),
"--target_all=True",
]
def get_update_command(self, canister, subnet, wg_testnet, wg_subnet, subnet_prefix):
return [
"./max_capacity_system_baseline.py",
"--testnet",
"mercury",
"--canister",
canister,
"--target_subnet_id",
subnet,
"--wg_testnet",
wg_testnet,
"--wg_subnet",
str(wg_subnet),
"--no_instrument=True",
"--max_update_load",
str(600),
"--top_level_out_dir",
"mainnet-{}".format(self.start_time),
"--second_level_out_dir",
subnet_prefix,
"--num_workload_generators",
str(4),
"--target_update_load",
str(600),
"--update_rps_increment",
str(4),
"--update_initial_rps",
str(600),
"--skip_generate_report=True",
"--target_update_load",
str(600),
"--use_updates=True",
"--iter_duration={}".format(300),
]
def get_commands(self, do_updates=True):
r = []
for subnet, canister in self.load_targets.items():
wg_testnet = None
for testnet, num_subnets in self.testnets.items():
if num_subnets > 0:
wg_testnet = testnet
break
self.testnets[wg_testnet] -= 1
wg_subnet = self.next_subnet[wg_testnet]
self.next_subnet[wg_testnet] += 1
subnet_prefix = self.get_window_name(subnet)
r.append(
(
subnet_prefix,
self.get_update_command(canister, subnet, wg_testnet, wg_subnet, subnet_prefix)
if do_updates
else self.get_query_command(canister, subnet, wg_testnet, wg_subnet, subnet_prefix),
)
)
return r
def run_in_session(self, name: str, command: List[str]):
assert len(name) > 0
subprocess.run(
[
"tmux",
"new-window",
"-n",
name,
" ".join(command) + '; echo "Check failure rate + hit enter to terminate"; read',
],
check=True,
)
def start(self, do_updates):
print(f"Starting workload with do_updates={do_updates}")
for name, command in self.get_commands(do_updates):
self.run_in_session(name, command)
def tmux_window_list(self) -> List[str]:
r = []
for line in subprocess.check_output(["tmux", "list-windows"], encoding="utf-8").split("\n"):
e = line.split(" ")
if len(e) > 1:
r.append(e[1])
return r
def wait(self):
time.sleep(30)
for name in self.load_targets.keys():
print(f"Waiting for {name}")
while self.get_window_name(name) in self.tmux_window_list():
time.sleep(10)
FLAGS(sys.argv)
mainnet = Mainnet()
mainnet.start(FLAGS.use_updates)
mainnet.wait()
print("All terminated, done")
| true | true |
f72067b03fcce0e8fbe2579787be36a9398c361c | 627 | py | Python | transform_file.py | BXuan694/universalAdversarialPerturbation | ebca90f76b5d45715c98a1ff0b6f11df753b51c6 | [
"BSD-2-Clause"
] | 38 | 2019-01-19T09:43:13.000Z | 2022-01-05T09:47:02.000Z | transform_file.py | BXuan694/universalAdversarialPerturbation | ebca90f76b5d45715c98a1ff0b6f11df753b51c6 | [
"BSD-2-Clause"
] | 3 | 2020-02-24T05:56:35.000Z | 2022-01-07T12:08:33.000Z | transform_file.py | BXuan694/universalAdversarialPerturbation | ebca90f76b5d45715c98a1ff0b6f11df753b51c6 | [
"BSD-2-Clause"
] | 10 | 2019-02-19T10:05:57.000Z | 2021-06-07T08:02:36.000Z | from torchvision import transforms
transform1 = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean = [0.485,0.456,0.406], std = [0.229,0.224,0.225]),
])
cut = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
])
convert = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
transform = transforms.Compose([
cut.transforms[0],
cut.transforms[1],
convert.transforms[0],
convert.transforms[1]
])
| 24.115385 | 80 | 0.657097 | from torchvision import transforms
transform1 = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean = [0.485,0.456,0.406], std = [0.229,0.224,0.225]),
])
cut = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
])
convert = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
transform = transforms.Compose([
cut.transforms[0],
cut.transforms[1],
convert.transforms[0],
convert.transforms[1]
])
| true | true |
f7206874eea5462451a2197d6ce51ff97b87f248 | 7,039 | py | Python | pypi_tools/main.py | xnuinside/pypi_tools_bot | 2ae408e510dcc30c39475af1f9cba8af866c54ee | [
"MIT"
] | 2 | 2021-03-04T07:37:38.000Z | 2021-04-01T16:57:10.000Z | pypi_tools/main.py | xnuinside/pypi_tools_bot | 2ae408e510dcc30c39475af1f9cba8af866c54ee | [
"MIT"
] | null | null | null | pypi_tools/main.py | xnuinside/pypi_tools_bot | 2ae408e510dcc30c39475af1f9cba8af866c54ee | [
"MIT"
] | null | null | null | import os
import logging
import sentry_sdk
from aiogram import Bot, Dispatcher, executor, types
from datetime import datetime, timedelta
from pypi_tools.logic import remove_track_for_package
import pypi_tools.data as d
from pypi_tools.helpers import validate_input
import pypi_tools.vizualizer as v
import pypi_tools.readme as r
import asyncio
import aioredis
logging.basicConfig(level=logging.INFO)
redis_host = f"redis://{os.environ.get('REDIS_HOST')}"
#sentry_sdk.init(os.environ["SENTRY_PATH"])
bot = Bot(token=os.environ["BOT_API_KEY"], parse_mode="html")
dp = Dispatcher(bot)
@dp.message_handler(commands=['start'])
async def send_welcome(message):
text = f"Hello, {message.chat.first_name} {message.chat.last_name}! \n" \
f"Welcome to <b>PyPi Tools Bot.</b>\n\n" \
"This Bot created special to obtain information from Official Python PyPi Server\n" \
+ r.help_text + r.current_version
await message.answer(text)
@dp.message_handler(commands=['help'])
async def send_welcome(message):
await message.answer(r.help_text)
@dp.message_handler(lambda message: message.text and (
'/stats' in message.text.lower() or 'stats:' in message.text.lower()))
@validate_input(command='stats',
known_sub_commands={'@any_number': lambda num: num})
async def send_package_stats(message):
output = message.output
sub_command = message.sub_command or 5
if len(output.split()) == 1:
days = sub_command
package_name = output
current_date = datetime.now().date()
data_ = await d.cached_package_downloads_stats(package_name, days, current_date)
output = d.stats_text(data_, package_name, days)
await message.answer(output)
@dp.message_handler(lambda message: message.text and (
'/plot' in message.text.lower() or 'plot:' in message.text.lower()))
@validate_input(command='plot',
known_sub_commands={'@any_number': lambda num: num})
async def send_package_stats_with_graph(message):
output = message.output
sub_command = message.sub_command or 5
if len(output.split()) == 1:
days = sub_command
package_name = output
current_date = datetime.now().date()
data_ = await d.cached_package_downloads_stats(package_name, days, current_date)
output = d.stats_text(data_, package_name, days)
temp = 'temp/'
os.makedirs(temp, exist_ok=True)
# for pandas range
start_date = current_date - timedelta(days=2)
file_name = f'{temp}/{package_name}:{current_date - timedelta(days=1)}:{days}.png'
if not os.path.isfile(file_name):
file_name = v.generate_graph(start_date, [item for _, item in data_.items()][::-1], file_name)
file_ = types.InputFile(file_name)
await message.answer(output)
await message.answer_photo(file_)
@dp.message_handler(commands=['random'])
async def command(message):
output = await d.get_random_package()
await message.answer(output)
@dp.message_handler(commands=['search', 'search:detailed'])
@validate_input(command='search',
known_sub_commands={'detailed': lambda _package_name: d.request_package_info_from_pypi(
_package_name, detailed=True)},
additional_error="Or use with sub-command to get detailed information:"
"/search:detailed aiohttp")
async def search_command(message):
output = message.output
sub_command = message.sub_command
if len(output.split()) == 1:
package_name = output
if sub_command:
output = await sub_command(package_name)
else:
output = await d.request_package_info_from_pypi(package_name)
await message.answer(output)
@dp.message_handler(commands=['releases', 'releases:full'])
@validate_input(command='releases',
known_sub_commands={'full': 'full'},
additional_error="Or use with sub-command to get full list of releases:"
"/releases:full aiohttp")
async def releases_command(message):
output = message.output
sub_command = message.sub_command
if len(output.split()) == 1:
package_name = output
releases = await d.get_release_list(package_name=package_name)
if sub_command and sub_command == 'full':
output = f"Full Releases list for Package {package_name}\n\n"
for version, v_date in releases.items():
output += f"<b>{version}</b>: {v_date}\n"
else:
output = f"Last 7 Releases for Package {package_name}\n\n"
for num, items in enumerate(list(releases.items())):
if num > 7:
break
version, v_date = items
output += f"<b>{version}</b>: {v_date}\n"
await message.answer(output)
track_sub_commands = {'stop': lambda key: remove_track_for_package(key),
'nodev': 'nodev'}
@dp.message_handler(commands=['track', 'track:stop', 'track:nodev'])
@validate_input(command='track',
known_sub_commands=track_sub_commands,
additional_error="Or use with sub-command to stop track a package releases"
"/track:stop aiohttp")
async def track_command(message):
""" handler to react on /track command and it sub-commands"""
pool = await aioredis.create_redis_pool(redis_host)
with await pool as redis:
output = message.output
sub_command = message.sub_command
if len(output.split()) == 1:
package_name = output
chat_id = str(message.chat.id)
key = chat_id + ":" + package_name
if sub_command and sub_command != 'nodev':
output = await sub_command(key)
else:
nodev = False
if sub_command:
nodev = True
versions = await d.get_release_list(package_name, nodev)
if versions is None:
output = f'Package {package_name} does not exists'
else:
current_version = d.get_last_release_version(versions)
output = f"Current {package_name} version is {current_version} \n" \
"You will be announced with new version release"
version = current_version[0]
if nodev:
version = version + ':nodev'
await redis.set(key, version)
await message.answer(output)
@dp.message_handler()
async def echo_all(message: types.Message):
await message.answer(message.text)
if __name__ == '__main__':
try:
executor.start_polling(dp, skip_updates=True)
except Exception as e:
sentry_sdk.capture_exception(e)
| 40.687861 | 107 | 0.624378 | import os
import logging
import sentry_sdk
from aiogram import Bot, Dispatcher, executor, types
from datetime import datetime, timedelta
from pypi_tools.logic import remove_track_for_package
import pypi_tools.data as d
from pypi_tools.helpers import validate_input
import pypi_tools.vizualizer as v
import pypi_tools.readme as r
import asyncio
import aioredis
logging.basicConfig(level=logging.INFO)
redis_host = f"redis://{os.environ.get('REDIS_HOST')}"
bot = Bot(token=os.environ["BOT_API_KEY"], parse_mode="html")
dp = Dispatcher(bot)
@dp.message_handler(commands=['start'])
async def send_welcome(message):
text = f"Hello, {message.chat.first_name} {message.chat.last_name}! \n" \
f"Welcome to <b>PyPi Tools Bot.</b>\n\n" \
"This Bot created special to obtain information from Official Python PyPi Server\n" \
+ r.help_text + r.current_version
await message.answer(text)
@dp.message_handler(commands=['help'])
async def send_welcome(message):
await message.answer(r.help_text)
@dp.message_handler(lambda message: message.text and (
'/stats' in message.text.lower() or 'stats:' in message.text.lower()))
@validate_input(command='stats',
known_sub_commands={'@any_number': lambda num: num})
async def send_package_stats(message):
output = message.output
sub_command = message.sub_command or 5
if len(output.split()) == 1:
days = sub_command
package_name = output
current_date = datetime.now().date()
data_ = await d.cached_package_downloads_stats(package_name, days, current_date)
output = d.stats_text(data_, package_name, days)
await message.answer(output)
@dp.message_handler(lambda message: message.text and (
'/plot' in message.text.lower() or 'plot:' in message.text.lower()))
@validate_input(command='plot',
known_sub_commands={'@any_number': lambda num: num})
async def send_package_stats_with_graph(message):
output = message.output
sub_command = message.sub_command or 5
if len(output.split()) == 1:
days = sub_command
package_name = output
current_date = datetime.now().date()
data_ = await d.cached_package_downloads_stats(package_name, days, current_date)
output = d.stats_text(data_, package_name, days)
temp = 'temp/'
os.makedirs(temp, exist_ok=True)
start_date = current_date - timedelta(days=2)
file_name = f'{temp}/{package_name}:{current_date - timedelta(days=1)}:{days}.png'
if not os.path.isfile(file_name):
file_name = v.generate_graph(start_date, [item for _, item in data_.items()][::-1], file_name)
file_ = types.InputFile(file_name)
await message.answer(output)
await message.answer_photo(file_)
@dp.message_handler(commands=['random'])
async def command(message):
output = await d.get_random_package()
await message.answer(output)
@dp.message_handler(commands=['search', 'search:detailed'])
@validate_input(command='search',
known_sub_commands={'detailed': lambda _package_name: d.request_package_info_from_pypi(
_package_name, detailed=True)},
additional_error="Or use with sub-command to get detailed information:"
"/search:detailed aiohttp")
async def search_command(message):
output = message.output
sub_command = message.sub_command
if len(output.split()) == 1:
package_name = output
if sub_command:
output = await sub_command(package_name)
else:
output = await d.request_package_info_from_pypi(package_name)
await message.answer(output)
@dp.message_handler(commands=['releases', 'releases:full'])
@validate_input(command='releases',
known_sub_commands={'full': 'full'},
additional_error="Or use with sub-command to get full list of releases:"
"/releases:full aiohttp")
async def releases_command(message):
output = message.output
sub_command = message.sub_command
if len(output.split()) == 1:
package_name = output
releases = await d.get_release_list(package_name=package_name)
if sub_command and sub_command == 'full':
output = f"Full Releases list for Package {package_name}\n\n"
for version, v_date in releases.items():
output += f"<b>{version}</b>: {v_date}\n"
else:
output = f"Last 7 Releases for Package {package_name}\n\n"
for num, items in enumerate(list(releases.items())):
if num > 7:
break
version, v_date = items
output += f"<b>{version}</b>: {v_date}\n"
await message.answer(output)
track_sub_commands = {'stop': lambda key: remove_track_for_package(key),
'nodev': 'nodev'}
@dp.message_handler(commands=['track', 'track:stop', 'track:nodev'])
@validate_input(command='track',
known_sub_commands=track_sub_commands,
additional_error="Or use with sub-command to stop track a package releases"
"/track:stop aiohttp")
async def track_command(message):
pool = await aioredis.create_redis_pool(redis_host)
with await pool as redis:
output = message.output
sub_command = message.sub_command
if len(output.split()) == 1:
package_name = output
chat_id = str(message.chat.id)
key = chat_id + ":" + package_name
if sub_command and sub_command != 'nodev':
output = await sub_command(key)
else:
nodev = False
if sub_command:
nodev = True
versions = await d.get_release_list(package_name, nodev)
if versions is None:
output = f'Package {package_name} does not exists'
else:
current_version = d.get_last_release_version(versions)
output = f"Current {package_name} version is {current_version} \n" \
"You will be announced with new version release"
version = current_version[0]
if nodev:
version = version + ':nodev'
await redis.set(key, version)
await message.answer(output)
@dp.message_handler()
async def echo_all(message: types.Message):
await message.answer(message.text)
if __name__ == '__main__':
try:
executor.start_polling(dp, skip_updates=True)
except Exception as e:
sentry_sdk.capture_exception(e)
| true | true |
f720698516b3281db66e46d699cb433d69aa86c5 | 473 | py | Python | 4_factory/simple_factory/veggie_pizza.py | hypersport/Head-First-Design-Patterns-Python | 0c8b831ae89ebbbef8b203b96508deb7e3063590 | [
"MIT"
] | null | null | null | 4_factory/simple_factory/veggie_pizza.py | hypersport/Head-First-Design-Patterns-Python | 0c8b831ae89ebbbef8b203b96508deb7e3063590 | [
"MIT"
] | null | null | null | 4_factory/simple_factory/veggie_pizza.py | hypersport/Head-First-Design-Patterns-Python | 0c8b831ae89ebbbef8b203b96508deb7e3063590 | [
"MIT"
] | null | null | null | from pizza import Pizza
class VeggiePizza(Pizza):
def __init__(self):
self.name = 'Veggie Pizza'
self.dough = 'Crust'
self.sauce = 'Marinara sauce'
self.toppings.append('Shredded mozzarella')
self.toppings.append('Grated parmesan')
self.toppings.append('Diced onion')
self.toppings.append('Sliced mushrooms')
self.toppings.append('Sliced red pepper')
self.toppings.append('Sliced black olives')
| 31.533333 | 51 | 0.651163 | from pizza import Pizza
class VeggiePizza(Pizza):
def __init__(self):
self.name = 'Veggie Pizza'
self.dough = 'Crust'
self.sauce = 'Marinara sauce'
self.toppings.append('Shredded mozzarella')
self.toppings.append('Grated parmesan')
self.toppings.append('Diced onion')
self.toppings.append('Sliced mushrooms')
self.toppings.append('Sliced red pepper')
self.toppings.append('Sliced black olives')
| true | true |
f7206acbc129c68b6043e8dd105bdfbbd6738ace | 240 | py | Python | python/homeflux/utils/timer.py | david2777/homeflux | 4f7c4b855bd69b0b132d480cac133582445cacf5 | [
"MIT"
] | null | null | null | python/homeflux/utils/timer.py | david2777/homeflux | 4f7c4b855bd69b0b132d480cac133582445cacf5 | [
"MIT"
] | null | null | null | python/homeflux/utils/timer.py | david2777/homeflux | 4f7c4b855bd69b0b132d480cac133582445cacf5 | [
"MIT"
] | null | null | null | import time
class Timer:
"""Simple Timer"""
def __init__(self):
self.start = time.perf_counter()
def end(self, precision: int = 3) -> str:
return '%.{}f'.format(precision) % (time.perf_counter() - self.start)
| 21.818182 | 77 | 0.6 | import time
class Timer:
def __init__(self):
self.start = time.perf_counter()
def end(self, precision: int = 3) -> str:
return '%.{}f'.format(precision) % (time.perf_counter() - self.start)
| true | true |
f7206b1325541721ae9f9158f32fc2ad213a5369 | 16,774 | py | Python | pw_tokenizer/py/pw_tokenizer/tokens.py | LuDuda/pigweed | dcd7230895a234156bc7b6e5061e6936627c5fbb | [
"Apache-2.0"
] | null | null | null | pw_tokenizer/py/pw_tokenizer/tokens.py | LuDuda/pigweed | dcd7230895a234156bc7b6e5061e6936627c5fbb | [
"Apache-2.0"
] | null | null | null | pw_tokenizer/py/pw_tokenizer/tokens.py | LuDuda/pigweed | dcd7230895a234156bc7b6e5061e6936627c5fbb | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 The Pigweed Authors
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""Builds and manages databases of tokenized strings."""
import collections
import csv
from dataclasses import dataclass
from datetime import datetime
import io
import logging
from pathlib import Path
import re
import struct
from typing import (BinaryIO, Callable, Dict, Iterable, Iterator, List,
NamedTuple, Optional, Pattern, Tuple, Union, ValuesView)
DATE_FORMAT = '%Y-%m-%d'
DEFAULT_DOMAIN = ''
# The default hash length to use. This value only applies when hashing strings
# from a legacy-style ELF with plain strings. New tokenized string entries
# include the token alongside the string.
#
# This MUST match the default value of PW_TOKENIZER_CFG_C_HASH_LENGTH in
# pw_tokenizer/public/pw_tokenizer/config.h.
DEFAULT_C_HASH_LENGTH = 128
TOKENIZER_HASH_CONSTANT = 65599
_LOG = logging.getLogger('pw_tokenizer')
def _value(char: Union[int, str]) -> int:
return char if isinstance(char, int) else ord(char)
def pw_tokenizer_65599_fixed_length_hash(string: Union[str, bytes],
hash_length: int) -> int:
"""Hashes the provided string.
This hash function is only used when adding tokens from legacy-style
tokenized strings in an ELF, which do not include the token.
"""
hash_value = len(string)
coefficient = TOKENIZER_HASH_CONSTANT
for char in string[:hash_length]:
hash_value = (hash_value + coefficient * _value(char)) % 2**32
coefficient = (coefficient * TOKENIZER_HASH_CONSTANT) % 2**32
return hash_value
def default_hash(string: Union[str, bytes]) -> int:
return pw_tokenizer_65599_fixed_length_hash(string, DEFAULT_C_HASH_LENGTH)
class _EntryKey(NamedTuple):
"""Uniquely refers to an entry."""
token: int
string: str
@dataclass(eq=True, order=False)
class TokenizedStringEntry:
"""A tokenized string with its metadata."""
token: int
string: str
domain: str = DEFAULT_DOMAIN
date_removed: Optional[datetime] = None
def key(self) -> _EntryKey:
"""The key determines uniqueness for a tokenized string."""
return _EntryKey(self.token, self.string)
def update_date_removed(self,
new_date_removed: Optional[datetime]) -> None:
"""Sets self.date_removed if the other date is newer."""
# No removal date (None) is treated as the newest date.
if self.date_removed is None:
return
if new_date_removed is None or new_date_removed > self.date_removed:
self.date_removed = new_date_removed
def __lt__(self, other) -> bool:
"""Sorts the entry by token, date removed, then string."""
if self.token != other.token:
return self.token < other.token
# Sort removal dates in reverse, so the most recently removed (or still
# present) entry appears first.
if self.date_removed != other.date_removed:
return (other.date_removed or datetime.max) < (self.date_removed
or datetime.max)
return self.string < other.string
def __str__(self) -> str:
return self.string
class Database:
"""Database of tokenized strings stored as TokenizedStringEntry objects."""
def __init__(self, entries: Iterable[TokenizedStringEntry] = ()):
"""Creates a token database."""
# The database dict stores each unique (token, string) entry.
self._database: Dict[_EntryKey, TokenizedStringEntry] = {
entry.key(): entry
for entry in entries
}
# This is a cache for fast token lookup that is built as needed.
self._cache: Optional[Dict[int, List[TokenizedStringEntry]]] = None
@classmethod
def from_strings(
cls,
strings: Iterable[str],
domain: str = DEFAULT_DOMAIN,
tokenize: Callable[[str], int] = default_hash) -> 'Database':
"""Creates a Database from an iterable of strings."""
return cls((TokenizedStringEntry(tokenize(string), string, domain)
for string in strings))
@classmethod
def merged(cls, *databases: 'Database') -> 'Database':
"""Creates a TokenDatabase from one or more other databases."""
db = cls()
db.merge(*databases)
return db
@property
def token_to_entries(self) -> Dict[int, List[TokenizedStringEntry]]:
"""Returns a dict that maps tokens to a list of TokenizedStringEntry."""
if self._cache is None: # build cache token -> entry cache
self._cache = collections.defaultdict(list)
for entry in self._database.values():
self._cache[entry.token].append(entry)
return self._cache
def entries(self) -> ValuesView[TokenizedStringEntry]:
"""Returns iterable over all TokenizedStringEntries in the database."""
return self._database.values()
def collisions(self) -> Iterator[Tuple[int, List[TokenizedStringEntry]]]:
"""Returns tuple of (token, entries_list)) for all colliding tokens."""
for token, entries in self.token_to_entries.items():
if len(entries) > 1:
yield token, entries
def mark_removals(
self,
all_entries: Iterable[TokenizedStringEntry],
removal_date: Optional[datetime] = None
) -> List[TokenizedStringEntry]:
"""Marks entries missing from all_entries as having been removed.
The entries are assumed to represent the complete set of entries for the
database. Entries currently in the database not present in the provided
entries are marked with a removal date but remain in the database.
Entries in all_entries missing from the database are NOT added; call the
add function to add these.
Args:
all_entries: the complete set of strings present in the database
removal_date: the datetime for removed entries; today by default
Returns:
A list of entries marked as removed.
"""
self._cache = None
if removal_date is None:
removal_date = datetime.now()
all_keys = frozenset(entry.key() for entry in all_entries)
removed = []
for entry in self._database.values():
if (entry.key() not in all_keys
and (entry.date_removed is None
or removal_date < entry.date_removed)):
# Add a removal date, or update it to the oldest date.
entry.date_removed = removal_date
removed.append(entry)
return removed
def add(self, entries: Iterable[TokenizedStringEntry]) -> None:
"""Adds new entries and updates date_removed for existing entries."""
self._cache = None
for new_entry in entries:
# Update an existing entry or create a new one.
try:
entry = self._database[new_entry.key()]
entry.domain = new_entry.domain
entry.date_removed = None
except KeyError:
self._database[new_entry.key()] = TokenizedStringEntry(
new_entry.token, new_entry.string, new_entry.domain)
def purge(
self,
date_removed_cutoff: Optional[datetime] = None
) -> List[TokenizedStringEntry]:
"""Removes and returns entries removed on/before date_removed_cutoff."""
self._cache = None
if date_removed_cutoff is None:
date_removed_cutoff = datetime.max
to_delete = [
entry for _, entry in self._database.items()
if entry.date_removed and entry.date_removed <= date_removed_cutoff
]
for entry in to_delete:
del self._database[entry.key()]
return to_delete
def merge(self, *databases: 'Database') -> None:
"""Merges two or more databases together, keeping the newest dates."""
self._cache = None
for other_db in databases:
for entry in other_db.entries():
key = entry.key()
if key in self._database:
self._database[key].update_date_removed(entry.date_removed)
else:
self._database[key] = entry
def filter(
self,
include: Iterable[Union[str, Pattern[str]]] = (),
exclude: Iterable[Union[str, Pattern[str]]] = (),
replace: Iterable[Tuple[Union[str, Pattern[str]], str]] = ()
) -> None:
"""Filters the database using regular expressions (strings or compiled).
Args:
include: regexes; only entries matching at least one are kept
exclude: regexes; entries matching any of these are removed
replace: (regex, str) tuples; replaces matching terms in all entries
"""
self._cache = None
to_delete: List[_EntryKey] = []
if include:
include_re = [re.compile(pattern) for pattern in include]
to_delete.extend(
key for key, val in self._database.items()
if not any(rgx.search(val.string) for rgx in include_re))
if exclude:
exclude_re = [re.compile(pattern) for pattern in exclude]
to_delete.extend(key for key, val in self._database.items() if any(
rgx.search(val.string) for rgx in exclude_re))
for key in to_delete:
del self._database[key]
for search, replacement in replace:
search = re.compile(search)
for value in self._database.values():
value.string = search.sub(replacement, value.string)
def __len__(self) -> int:
"""Returns the number of entries in the database."""
return len(self.entries())
def __str__(self) -> str:
"""Outputs the database as CSV."""
csv_output = io.BytesIO()
write_csv(self, csv_output)
return csv_output.getvalue().decode()
def parse_csv(fd) -> Iterable[TokenizedStringEntry]:
"""Parses TokenizedStringEntries from a CSV token database file."""
for line in csv.reader(fd):
try:
token_str, date_str, string_literal = line
token = int(token_str, 16)
date = (datetime.strptime(date_str, DATE_FORMAT)
if date_str.strip() else None)
yield TokenizedStringEntry(token, string_literal, DEFAULT_DOMAIN,
date)
except (ValueError, UnicodeDecodeError) as err:
_LOG.error('Failed to parse tokenized string entry %s: %s', line,
err)
def write_csv(database: Database, fd: BinaryIO) -> None:
"""Writes the database as CSV to the provided binary file."""
for entry in sorted(database.entries()):
# Align the CSV output to 10-character columns for improved readability.
# Use \n instead of RFC 4180's \r\n.
fd.write('{:08x},{:10},"{}"\n'.format(
entry.token,
entry.date_removed.strftime(DATE_FORMAT) if entry.date_removed else
'', entry.string.replace('"', '""')).encode()) # escape " as ""
class _BinaryFileFormat(NamedTuple):
"""Attributes of the binary token database file format."""
magic: bytes = b'TOKENS\0\0'
header: struct.Struct = struct.Struct('<8sI4x')
entry: struct.Struct = struct.Struct('<IBBH')
BINARY_FORMAT = _BinaryFileFormat()
class DatabaseFormatError(Exception):
"""Failed to parse a token database file."""
def file_is_binary_database(fd: BinaryIO) -> bool:
"""True if the file starts with the binary token database magic string."""
try:
fd.seek(0)
magic = fd.read(len(BINARY_FORMAT.magic))
fd.seek(0)
return BINARY_FORMAT.magic == magic
except IOError:
return False
def _check_that_file_is_csv_database(path: Path) -> None:
"""Raises an error unless the path appears to be a CSV token database."""
try:
with path.open('rb') as fd:
data = fd.read(8) # Read 8 bytes, which should be the first token.
if not data:
return # File is empty, which is valid CSV.
if len(data) != 8:
raise DatabaseFormatError(
f'Attempted to read {path} as a CSV token database, but the '
f'file is too short ({len(data)} B)')
# Make sure the first 8 chars are a valid hexadecimal number.
_ = int(data.decode(), 16)
except (IOError, UnicodeDecodeError, ValueError) as err:
raise DatabaseFormatError(
f'Encountered error while reading {path} as a CSV token database'
) from err
def parse_binary(fd: BinaryIO) -> Iterable[TokenizedStringEntry]:
"""Parses TokenizedStringEntries from a binary token database file."""
magic, entry_count = BINARY_FORMAT.header.unpack(
fd.read(BINARY_FORMAT.header.size))
if magic != BINARY_FORMAT.magic:
raise DatabaseFormatError(
f'Binary token database magic number mismatch (found {magic!r}, '
f'expected {BINARY_FORMAT.magic!r}) while reading from {fd}')
entries = []
for _ in range(entry_count):
token, day, month, year = BINARY_FORMAT.entry.unpack(
fd.read(BINARY_FORMAT.entry.size))
try:
date_removed: Optional[datetime] = datetime(year, month, day)
except ValueError:
date_removed = None
entries.append((token, date_removed))
# Read the entire string table and define a function for looking up strings.
string_table = fd.read()
def read_string(start):
end = string_table.find(b'\0', start)
return string_table[start:string_table.find(b'\0', start)].decode(
), end + 1
offset = 0
for token, removed in entries:
string, offset = read_string(offset)
yield TokenizedStringEntry(token, string, DEFAULT_DOMAIN, removed)
def write_binary(database: Database, fd: BinaryIO) -> None:
"""Writes the database as packed binary to the provided binary file."""
entries = sorted(database.entries())
fd.write(BINARY_FORMAT.header.pack(BINARY_FORMAT.magic, len(entries)))
string_table = bytearray()
for entry in entries:
if entry.date_removed:
removed_day = entry.date_removed.day
removed_month = entry.date_removed.month
removed_year = entry.date_removed.year
else:
# If there is no removal date, use the special value 0xffffffff for
# the day/month/year. That ensures that still-present tokens appear
# as the newest tokens when sorted by removal date.
removed_day = 0xff
removed_month = 0xff
removed_year = 0xffff
string_table += entry.string.encode()
string_table.append(0)
fd.write(
BINARY_FORMAT.entry.pack(entry.token, removed_day, removed_month,
removed_year))
fd.write(string_table)
class DatabaseFile(Database):
"""A token database that is associated with a particular file.
This class adds the write_to_file() method that writes to file from which it
was created in the correct format (CSV or binary).
"""
def __init__(self, path: Union[Path, str]):
self.path = Path(path)
# Read the path as a packed binary file.
with self.path.open('rb') as fd:
if file_is_binary_database(fd):
super().__init__(parse_binary(fd))
self._export = write_binary
return
# Read the path as a CSV file.
_check_that_file_is_csv_database(self.path)
with self.path.open('r', newline='') as file:
super().__init__(parse_csv(file))
self._export = write_csv
def write_to_file(self, path: Optional[Union[Path, str]] = None) -> None:
"""Exports in the original format to the original or provided path."""
with open(self.path if path is None else path, 'wb') as fd:
self._export(self, fd)
| 35.84188 | 80 | 0.63205 |
import collections
import csv
from dataclasses import dataclass
from datetime import datetime
import io
import logging
from pathlib import Path
import re
import struct
from typing import (BinaryIO, Callable, Dict, Iterable, Iterator, List,
NamedTuple, Optional, Pattern, Tuple, Union, ValuesView)
DATE_FORMAT = '%Y-%m-%d'
DEFAULT_DOMAIN = ''
DEFAULT_C_HASH_LENGTH = 128
TOKENIZER_HASH_CONSTANT = 65599
_LOG = logging.getLogger('pw_tokenizer')
def _value(char: Union[int, str]) -> int:
return char if isinstance(char, int) else ord(char)
def pw_tokenizer_65599_fixed_length_hash(string: Union[str, bytes],
hash_length: int) -> int:
hash_value = len(string)
coefficient = TOKENIZER_HASH_CONSTANT
for char in string[:hash_length]:
hash_value = (hash_value + coefficient * _value(char)) % 2**32
coefficient = (coefficient * TOKENIZER_HASH_CONSTANT) % 2**32
return hash_value
def default_hash(string: Union[str, bytes]) -> int:
return pw_tokenizer_65599_fixed_length_hash(string, DEFAULT_C_HASH_LENGTH)
class _EntryKey(NamedTuple):
token: int
string: str
@dataclass(eq=True, order=False)
class TokenizedStringEntry:
token: int
string: str
domain: str = DEFAULT_DOMAIN
date_removed: Optional[datetime] = None
def key(self) -> _EntryKey:
return _EntryKey(self.token, self.string)
def update_date_removed(self,
new_date_removed: Optional[datetime]) -> None:
if self.date_removed is None:
return
if new_date_removed is None or new_date_removed > self.date_removed:
self.date_removed = new_date_removed
def __lt__(self, other) -> bool:
if self.token != other.token:
return self.token < other.token
if self.date_removed != other.date_removed:
return (other.date_removed or datetime.max) < (self.date_removed
or datetime.max)
return self.string < other.string
def __str__(self) -> str:
return self.string
class Database:
def __init__(self, entries: Iterable[TokenizedStringEntry] = ()):
self._database: Dict[_EntryKey, TokenizedStringEntry] = {
entry.key(): entry
for entry in entries
}
self._cache: Optional[Dict[int, List[TokenizedStringEntry]]] = None
@classmethod
def from_strings(
cls,
strings: Iterable[str],
domain: str = DEFAULT_DOMAIN,
tokenize: Callable[[str], int] = default_hash) -> 'Database':
return cls((TokenizedStringEntry(tokenize(string), string, domain)
for string in strings))
@classmethod
def merged(cls, *databases: 'Database') -> 'Database':
db = cls()
db.merge(*databases)
return db
@property
def token_to_entries(self) -> Dict[int, List[TokenizedStringEntry]]:
if self._cache is None:
self._cache = collections.defaultdict(list)
for entry in self._database.values():
self._cache[entry.token].append(entry)
return self._cache
def entries(self) -> ValuesView[TokenizedStringEntry]:
return self._database.values()
def collisions(self) -> Iterator[Tuple[int, List[TokenizedStringEntry]]]:
for token, entries in self.token_to_entries.items():
if len(entries) > 1:
yield token, entries
def mark_removals(
self,
all_entries: Iterable[TokenizedStringEntry],
removal_date: Optional[datetime] = None
) -> List[TokenizedStringEntry]:
self._cache = None
if removal_date is None:
removal_date = datetime.now()
all_keys = frozenset(entry.key() for entry in all_entries)
removed = []
for entry in self._database.values():
if (entry.key() not in all_keys
and (entry.date_removed is None
or removal_date < entry.date_removed)):
entry.date_removed = removal_date
removed.append(entry)
return removed
def add(self, entries: Iterable[TokenizedStringEntry]) -> None:
self._cache = None
for new_entry in entries:
try:
entry = self._database[new_entry.key()]
entry.domain = new_entry.domain
entry.date_removed = None
except KeyError:
self._database[new_entry.key()] = TokenizedStringEntry(
new_entry.token, new_entry.string, new_entry.domain)
def purge(
self,
date_removed_cutoff: Optional[datetime] = None
) -> List[TokenizedStringEntry]:
self._cache = None
if date_removed_cutoff is None:
date_removed_cutoff = datetime.max
to_delete = [
entry for _, entry in self._database.items()
if entry.date_removed and entry.date_removed <= date_removed_cutoff
]
for entry in to_delete:
del self._database[entry.key()]
return to_delete
def merge(self, *databases: 'Database') -> None:
self._cache = None
for other_db in databases:
for entry in other_db.entries():
key = entry.key()
if key in self._database:
self._database[key].update_date_removed(entry.date_removed)
else:
self._database[key] = entry
def filter(
self,
include: Iterable[Union[str, Pattern[str]]] = (),
exclude: Iterable[Union[str, Pattern[str]]] = (),
replace: Iterable[Tuple[Union[str, Pattern[str]], str]] = ()
) -> None:
self._cache = None
to_delete: List[_EntryKey] = []
if include:
include_re = [re.compile(pattern) for pattern in include]
to_delete.extend(
key for key, val in self._database.items()
if not any(rgx.search(val.string) for rgx in include_re))
if exclude:
exclude_re = [re.compile(pattern) for pattern in exclude]
to_delete.extend(key for key, val in self._database.items() if any(
rgx.search(val.string) for rgx in exclude_re))
for key in to_delete:
del self._database[key]
for search, replacement in replace:
search = re.compile(search)
for value in self._database.values():
value.string = search.sub(replacement, value.string)
def __len__(self) -> int:
return len(self.entries())
def __str__(self) -> str:
csv_output = io.BytesIO()
write_csv(self, csv_output)
return csv_output.getvalue().decode()
def parse_csv(fd) -> Iterable[TokenizedStringEntry]:
for line in csv.reader(fd):
try:
token_str, date_str, string_literal = line
token = int(token_str, 16)
date = (datetime.strptime(date_str, DATE_FORMAT)
if date_str.strip() else None)
yield TokenizedStringEntry(token, string_literal, DEFAULT_DOMAIN,
date)
except (ValueError, UnicodeDecodeError) as err:
_LOG.error('Failed to parse tokenized string entry %s: %s', line,
err)
def write_csv(database: Database, fd: BinaryIO) -> None:
for entry in sorted(database.entries()):
fd.write('{:08x},{:10},"{}"\n'.format(
entry.token,
entry.date_removed.strftime(DATE_FORMAT) if entry.date_removed else
'', entry.string.replace('"', '""')).encode()) # escape " as ""
class _BinaryFileFormat(NamedTuple):
magic: bytes = b'TOKENS\0\0'
header: struct.Struct = struct.Struct('<8sI4x')
entry: struct.Struct = struct.Struct('<IBBH')
BINARY_FORMAT = _BinaryFileFormat()
class DatabaseFormatError(Exception):
def file_is_binary_database(fd: BinaryIO) -> bool:
try:
fd.seek(0)
magic = fd.read(len(BINARY_FORMAT.magic))
fd.seek(0)
return BINARY_FORMAT.magic == magic
except IOError:
return False
def _check_that_file_is_csv_database(path: Path) -> None:
try:
with path.open('rb') as fd:
data = fd.read(8) # Read 8 bytes, which should be the first token.
if not data:
return # File is empty, which is valid CSV.
if len(data) != 8:
raise DatabaseFormatError(
f'Attempted to read {path} as a CSV token database, but the '
f'file is too short ({len(data)} B)')
# Make sure the first 8 chars are a valid hexadecimal number.
_ = int(data.decode(), 16)
except (IOError, UnicodeDecodeError, ValueError) as err:
raise DatabaseFormatError(
f'Encountered error while reading {path} as a CSV token database'
) from err
def parse_binary(fd: BinaryIO) -> Iterable[TokenizedStringEntry]:
magic, entry_count = BINARY_FORMAT.header.unpack(
fd.read(BINARY_FORMAT.header.size))
if magic != BINARY_FORMAT.magic:
raise DatabaseFormatError(
f'Binary token database magic number mismatch (found {magic!r}, '
f'expected {BINARY_FORMAT.magic!r}) while reading from {fd}')
entries = []
for _ in range(entry_count):
token, day, month, year = BINARY_FORMAT.entry.unpack(
fd.read(BINARY_FORMAT.entry.size))
try:
date_removed: Optional[datetime] = datetime(year, month, day)
except ValueError:
date_removed = None
entries.append((token, date_removed))
# Read the entire string table and define a function for looking up strings.
string_table = fd.read()
def read_string(start):
end = string_table.find(b'\0', start)
return string_table[start:string_table.find(b'\0', start)].decode(
), end + 1
offset = 0
for token, removed in entries:
string, offset = read_string(offset)
yield TokenizedStringEntry(token, string, DEFAULT_DOMAIN, removed)
def write_binary(database: Database, fd: BinaryIO) -> None:
entries = sorted(database.entries())
fd.write(BINARY_FORMAT.header.pack(BINARY_FORMAT.magic, len(entries)))
string_table = bytearray()
for entry in entries:
if entry.date_removed:
removed_day = entry.date_removed.day
removed_month = entry.date_removed.month
removed_year = entry.date_removed.year
else:
# If there is no removal date, use the special value 0xffffffff for
# the day/month/year. That ensures that still-present tokens appear
# as the newest tokens when sorted by removal date.
removed_day = 0xff
removed_month = 0xff
removed_year = 0xffff
string_table += entry.string.encode()
string_table.append(0)
fd.write(
BINARY_FORMAT.entry.pack(entry.token, removed_day, removed_month,
removed_year))
fd.write(string_table)
class DatabaseFile(Database):
def __init__(self, path: Union[Path, str]):
self.path = Path(path)
# Read the path as a packed binary file.
with self.path.open('rb') as fd:
if file_is_binary_database(fd):
super().__init__(parse_binary(fd))
self._export = write_binary
return
# Read the path as a CSV file.
_check_that_file_is_csv_database(self.path)
with self.path.open('r', newline='') as file:
super().__init__(parse_csv(file))
self._export = write_csv
def write_to_file(self, path: Optional[Union[Path, str]] = None) -> None:
with open(self.path if path is None else path, 'wb') as fd:
self._export(self, fd)
| true | true |
f7206b36e5a22338c09290307a5bbcd5356c269a | 366 | py | Python | chatterbot/ext/django_chatterbot/migrations/0016_statement_stemmed_text.py | dieterwarson/ChatterBot | 69c674218be274bca1f47c105b09995373e09f47 | [
"BSD-3-Clause"
] | null | null | null | chatterbot/ext/django_chatterbot/migrations/0016_statement_stemmed_text.py | dieterwarson/ChatterBot | 69c674218be274bca1f47c105b09995373e09f47 | [
"BSD-3-Clause"
] | null | null | null | chatterbot/ext/django_chatterbot/migrations/0016_statement_stemmed_text.py | dieterwarson/ChatterBot | 69c674218be274bca1f47c105b09995373e09f47 | [
"BSD-3-Clause"
] | null | null | null | from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('django_chatterbot', '0015_statement_persona'),
]
operations = [
migrations.AddField(
model_name='statement',
name='stemmed_text',
field=models.CharField(blank=True, max_length=400),
),
]
| 21.529412 | 63 | 0.60929 | from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('django_chatterbot', '0015_statement_persona'),
]
operations = [
migrations.AddField(
model_name='statement',
name='stemmed_text',
field=models.CharField(blank=True, max_length=400),
),
]
| true | true |
f7206bcb4173f335d34eb68cdae8a22c3c3c3a67 | 1,928 | py | Python | docs_src/sql_databases/sql_app/alt_main.py | Aryabhata-Rootspring/fastapi | f6237ad05a8468ac19c591181adad38d75372c46 | [
"MIT"
] | 53,007 | 2018-12-08T10:05:29.000Z | 2022-03-31T23:30:02.000Z | docs_src/sql_databases/sql_app/alt_main.py | Aryabhata-Rootspring/fastapi | f6237ad05a8468ac19c591181adad38d75372c46 | [
"MIT"
] | 4,155 | 2019-01-05T05:07:49.000Z | 2022-03-31T21:25:38.000Z | docs_src/sql_databases/sql_app/alt_main.py | Aryabhata-Rootspring/fastapi | f6237ad05a8468ac19c591181adad38d75372c46 | [
"MIT"
] | 4,092 | 2018-12-09T16:21:00.000Z | 2022-03-31T07:59:45.000Z | from typing import List
from fastapi import Depends, FastAPI, HTTPException, Request, Response
from sqlalchemy.orm import Session
from . import crud, models, schemas
from .database import SessionLocal, engine
models.Base.metadata.create_all(bind=engine)
app = FastAPI()
@app.middleware("http")
async def db_session_middleware(request: Request, call_next):
response = Response("Internal server error", status_code=500)
try:
request.state.db = SessionLocal()
response = await call_next(request)
finally:
request.state.db.close()
return response
# Dependency
def get_db(request: Request):
return request.state.db
@app.post("/users/", response_model=schemas.User)
def create_user(user: schemas.UserCreate, db: Session = Depends(get_db)):
db_user = crud.get_user_by_email(db, email=user.email)
if db_user:
raise HTTPException(status_code=400, detail="Email already registered")
return crud.create_user(db=db, user=user)
@app.get("/users/", response_model=List[schemas.User])
def read_users(skip: int = 0, limit: int = 100, db: Session = Depends(get_db)):
users = crud.get_users(db, skip=skip, limit=limit)
return users
@app.get("/users/{user_id}", response_model=schemas.User)
def read_user(user_id: int, db: Session = Depends(get_db)):
db_user = crud.get_user(db, user_id=user_id)
if db_user is None:
raise HTTPException(status_code=404, detail="User not found")
return db_user
@app.post("/users/{user_id}/items/", response_model=schemas.Item)
def create_item_for_user(
user_id: int, item: schemas.ItemCreate, db: Session = Depends(get_db)
):
return crud.create_user_item(db=db, item=item, user_id=user_id)
@app.get("/items/", response_model=List[schemas.Item])
def read_items(skip: int = 0, limit: int = 100, db: Session = Depends(get_db)):
items = crud.get_items(db, skip=skip, limit=limit)
return items
| 30.603175 | 79 | 0.721992 | from typing import List
from fastapi import Depends, FastAPI, HTTPException, Request, Response
from sqlalchemy.orm import Session
from . import crud, models, schemas
from .database import SessionLocal, engine
models.Base.metadata.create_all(bind=engine)
app = FastAPI()
@app.middleware("http")
async def db_session_middleware(request: Request, call_next):
response = Response("Internal server error", status_code=500)
try:
request.state.db = SessionLocal()
response = await call_next(request)
finally:
request.state.db.close()
return response
def get_db(request: Request):
return request.state.db
@app.post("/users/", response_model=schemas.User)
def create_user(user: schemas.UserCreate, db: Session = Depends(get_db)):
db_user = crud.get_user_by_email(db, email=user.email)
if db_user:
raise HTTPException(status_code=400, detail="Email already registered")
return crud.create_user(db=db, user=user)
@app.get("/users/", response_model=List[schemas.User])
def read_users(skip: int = 0, limit: int = 100, db: Session = Depends(get_db)):
users = crud.get_users(db, skip=skip, limit=limit)
return users
@app.get("/users/{user_id}", response_model=schemas.User)
def read_user(user_id: int, db: Session = Depends(get_db)):
db_user = crud.get_user(db, user_id=user_id)
if db_user is None:
raise HTTPException(status_code=404, detail="User not found")
return db_user
@app.post("/users/{user_id}/items/", response_model=schemas.Item)
def create_item_for_user(
user_id: int, item: schemas.ItemCreate, db: Session = Depends(get_db)
):
return crud.create_user_item(db=db, item=item, user_id=user_id)
@app.get("/items/", response_model=List[schemas.Item])
def read_items(skip: int = 0, limit: int = 100, db: Session = Depends(get_db)):
items = crud.get_items(db, skip=skip, limit=limit)
return items
| true | true |
f7206d083d469a643b9a783b3a819077f502c23e | 450 | py | Python | Lib/fontTools/ttLib/tables/T_S_I__2.py | anntzer/fonttools | 726cd67549956b985bbbe83e26fb0af9da59ddf7 | [
"MIT",
"BSD-3-Clause"
] | 2 | 2021-04-07T16:47:04.000Z | 2022-01-15T04:01:01.000Z | Lib/fontTools/ttLib/tables/T_S_I__2.py | anntzer/fonttools | 726cd67549956b985bbbe83e26fb0af9da59ddf7 | [
"MIT",
"BSD-3-Clause"
] | 74 | 2020-01-30T07:27:54.000Z | 2021-08-03T05:47:17.000Z | Lib/fontTools/ttLib/tables/T_S_I__2.py | anntzer/fonttools | 726cd67549956b985bbbe83e26fb0af9da59ddf7 | [
"MIT",
"BSD-3-Clause"
] | 1 | 2020-01-22T20:06:09.000Z | 2020-01-22T20:06:09.000Z | """ TSI{0,1,2,3,5} are private tables used by Microsoft Visual TrueType (VTT)
tool to store its hinting source data.
TSI2 is the index table containing the lengths and offsets for the glyph
programs that are contained in the TSI3 table. It uses the same format as
the TSI0 table.
"""
from fontTools.misc.py23 import *
from fontTools import ttLib
superclass = ttLib.getTableClass("TSI0")
class table_T_S_I__2(superclass):
dependencies = ["TSI3"]
| 28.125 | 77 | 0.768889 | from fontTools.misc.py23 import *
from fontTools import ttLib
superclass = ttLib.getTableClass("TSI0")
class table_T_S_I__2(superclass):
dependencies = ["TSI3"]
| true | true |
f7206d8f4f866ba92ed05ffaf316f027c8f23f04 | 405 | py | Python | psqlflow/printers/graph_printer.py | liuhenry/psqlflow | a4d39794d437a16fbf89582d3f7b8e7425bdfca5 | [
"MIT"
] | 3 | 2016-12-11T19:50:44.000Z | 2018-05-24T13:52:09.000Z | psqlflow/printers/graph_printer.py | liuhenry/psqlflow | a4d39794d437a16fbf89582d3f7b8e7425bdfca5 | [
"MIT"
] | null | null | null | psqlflow/printers/graph_printer.py | liuhenry/psqlflow | a4d39794d437a16fbf89582d3f7b8e7425bdfca5 | [
"MIT"
] | null | null | null | import pygraphviz as pgv
from .printer import Printer
class GraphPrinter(Printer):
"""
Exports flows to graphviz
"""
def __init__(self, *args, **kwargs):
super(GraphPrinter, self).__init__(*args, **kwargs)
def new_obj(self):
return pgv.AGraph(strict=False, directed=True, rankdir='LR')
@staticmethod
def add_edge(graph, a, b):
graph.add_edge(a, b)
| 21.315789 | 68 | 0.644444 | import pygraphviz as pgv
from .printer import Printer
class GraphPrinter(Printer):
def __init__(self, *args, **kwargs):
super(GraphPrinter, self).__init__(*args, **kwargs)
def new_obj(self):
return pgv.AGraph(strict=False, directed=True, rankdir='LR')
@staticmethod
def add_edge(graph, a, b):
graph.add_edge(a, b)
| true | true |
f7206e62311f2ae8bf498980b4772f3ea02b5efc | 6,337 | py | Python | apps/alerts/main.py | Cal-CS-61A-Staff/examtool-web | c31b8596fde75c54fe6436400bb6d2889d7b1283 | [
"MIT"
] | 1 | 2020-05-06T22:34:44.000Z | 2020-05-06T22:34:44.000Z | apps/alerts/main.py | Cal-CS-61A-Staff/examtool-web | c31b8596fde75c54fe6436400bb6d2889d7b1283 | [
"MIT"
] | 5 | 2020-07-13T09:29:01.000Z | 2020-10-18T06:33:30.000Z | apps/alerts/main.py | Cal-CS-61A-Staff/examtool-web | c31b8596fde75c54fe6436400bb6d2889d7b1283 | [
"MIT"
] | 5 | 2020-05-13T16:10:24.000Z | 2020-09-23T18:41:06.000Z | import time
from os import getenv
from flask import jsonify, abort
from google.cloud import firestore
from google.oauth2 import id_token
from google.auth.transport import requests as g_requests
from api import (
process_ok_exam_upload,
is_admin,
clear_collection,
get_announcements,
get_email_from_secret,
generate_audio,
)
# this can be public
CLIENT_ID = "713452892775-59gliacuhbfho8qvn4ctngtp3858fgf9.apps.googleusercontent.com"
DEV_EMAIL = getenv("DEV_EMAIL", "exam-test@berkeley.edu")
def update_cache():
global main_html, main_js
with open("static/index.html") as f:
main_html = f.read()
with open("static/main.js") as f:
main_js = f.read()
update_cache()
def get_email(request):
if getenv("ENV") == "dev":
return DEV_EMAIL
token = request.json["token"]
# validate token
id_info = id_token.verify_oauth2_token(token, g_requests.Request(), CLIENT_ID)
if id_info["iss"] not in ["accounts.google.com", "https://accounts.google.com"]:
raise ValueError("Wrong issuer.")
return id_info["email"]
def index(request):
try:
if getenv("ENV") == "dev":
update_cache()
db = firestore.Client()
if request.path.endswith("main.js"):
return main_js
if request.path.endswith("list_exams"):
return jsonify(
db.collection("exam-alerts")
.document("all")
.get()
.to_dict()["exam-list"]
)
if request.path == "/" or request.json is None:
return main_html
if request.path.endswith("upload_ok_exam"):
process_ok_exam_upload(db, request.json["data"], request.json["secret"])
return jsonify({"success": True})
exam = request.json["exam"]
course = exam.split("-")[0]
if request.path.endswith("fetch_data"):
received_audio = request.json.get("receivedAudio")
email = get_email(request)
student_data = (
db.collection("exam-alerts")
.document(exam)
.collection("students")
.document(email)
.get()
.to_dict()
)
announcements = list(
db.collection("exam-alerts")
.document(exam)
.collection("announcements")
.stream()
)
return jsonify(
{
"success": True,
"exam_type": "ok-exam",
"questions": [],
"startTime": student_data["start_time"],
"endTime": student_data["end_time"],
# "questions": [
# {
# "questionName": question["student_question_name"],
# "startTime": question["start_time"],
# "endTime": question["end_time"],
# }
# for question in student_data["questions"]
# ],
"announcements": get_announcements(
student_data,
announcements,
received_audio,
lambda x: (
db.collection("exam-alerts")
.document(exam)
.collection("announcement_audio")
.document(x)
.get()
.to_dict()
or {}
).get("audio"),
),
}
)
# only staff endpoints from here onwards
email = (
get_email_from_secret(request.json["secret"])
if "secret" in request.json
else get_email(request)
)
if not is_admin(email, course):
abort(401)
if request.path.endswith("fetch_staff_data"):
pass
elif request.path.endswith("add_announcement"):
announcement = request.json["announcement"]
announcement["timestamp"] = time.time()
ref = (
db.collection("exam-alerts")
.document(exam)
.collection("announcements")
.document()
)
ref.set(announcement)
spoken_message = announcement.get("spoken_message", announcement["message"])
if spoken_message:
audio = generate_audio(spoken_message)
db.collection("exam-alerts").document(exam).collection(
"announcement_audio"
).document(ref.id).set({"audio": audio})
elif request.path.endswith("clear_announcements"):
clear_collection(
db,
db.collection("exam-alerts").document(exam).collection("announcements"),
)
clear_collection(
db,
db.collection("exam-alerts")
.document(exam)
.collection("announcement_audio"),
)
elif request.path.endswith("delete_announcement"):
target = request.json["id"]
db.collection("exam-alerts").document(exam).collection(
"announcements"
).document(target).delete()
else:
abort(404)
# all staff endpoints return an updated state
exam_data = db.collection("exam-alerts").document(exam).get().to_dict()
announcements = sorted(
(
{"id": announcement.id, **announcement.to_dict()}
for announcement in db.collection("exam-alerts")
.document(exam)
.collection("announcements")
.stream()
),
key=lambda announcement: announcement["timestamp"],
reverse=True,
)
return jsonify(
{"success": True, "exam": exam_data, "announcements": announcements}
)
except Exception as e:
if getenv("ENV") == "dev":
raise
print(e)
print(dict(request.json))
return jsonify({"success": False})
| 31.844221 | 88 | 0.504024 | import time
from os import getenv
from flask import jsonify, abort
from google.cloud import firestore
from google.oauth2 import id_token
from google.auth.transport import requests as g_requests
from api import (
process_ok_exam_upload,
is_admin,
clear_collection,
get_announcements,
get_email_from_secret,
generate_audio,
)
CLIENT_ID = "713452892775-59gliacuhbfho8qvn4ctngtp3858fgf9.apps.googleusercontent.com"
DEV_EMAIL = getenv("DEV_EMAIL", "exam-test@berkeley.edu")
def update_cache():
global main_html, main_js
with open("static/index.html") as f:
main_html = f.read()
with open("static/main.js") as f:
main_js = f.read()
update_cache()
def get_email(request):
if getenv("ENV") == "dev":
return DEV_EMAIL
token = request.json["token"]
id_info = id_token.verify_oauth2_token(token, g_requests.Request(), CLIENT_ID)
if id_info["iss"] not in ["accounts.google.com", "https://accounts.google.com"]:
raise ValueError("Wrong issuer.")
return id_info["email"]
def index(request):
try:
if getenv("ENV") == "dev":
update_cache()
db = firestore.Client()
if request.path.endswith("main.js"):
return main_js
if request.path.endswith("list_exams"):
return jsonify(
db.collection("exam-alerts")
.document("all")
.get()
.to_dict()["exam-list"]
)
if request.path == "/" or request.json is None:
return main_html
if request.path.endswith("upload_ok_exam"):
process_ok_exam_upload(db, request.json["data"], request.json["secret"])
return jsonify({"success": True})
exam = request.json["exam"]
course = exam.split("-")[0]
if request.path.endswith("fetch_data"):
received_audio = request.json.get("receivedAudio")
email = get_email(request)
student_data = (
db.collection("exam-alerts")
.document(exam)
.collection("students")
.document(email)
.get()
.to_dict()
)
announcements = list(
db.collection("exam-alerts")
.document(exam)
.collection("announcements")
.stream()
)
return jsonify(
{
"success": True,
"exam_type": "ok-exam",
"questions": [],
"startTime": student_data["start_time"],
"endTime": student_data["end_time"],
"announcements": get_announcements(
student_data,
announcements,
received_audio,
lambda x: (
db.collection("exam-alerts")
.document(exam)
.collection("announcement_audio")
.document(x)
.get()
.to_dict()
or {}
).get("audio"),
),
}
)
email = (
get_email_from_secret(request.json["secret"])
if "secret" in request.json
else get_email(request)
)
if not is_admin(email, course):
abort(401)
if request.path.endswith("fetch_staff_data"):
pass
elif request.path.endswith("add_announcement"):
announcement = request.json["announcement"]
announcement["timestamp"] = time.time()
ref = (
db.collection("exam-alerts")
.document(exam)
.collection("announcements")
.document()
)
ref.set(announcement)
spoken_message = announcement.get("spoken_message", announcement["message"])
if spoken_message:
audio = generate_audio(spoken_message)
db.collection("exam-alerts").document(exam).collection(
"announcement_audio"
).document(ref.id).set({"audio": audio})
elif request.path.endswith("clear_announcements"):
clear_collection(
db,
db.collection("exam-alerts").document(exam).collection("announcements"),
)
clear_collection(
db,
db.collection("exam-alerts")
.document(exam)
.collection("announcement_audio"),
)
elif request.path.endswith("delete_announcement"):
target = request.json["id"]
db.collection("exam-alerts").document(exam).collection(
"announcements"
).document(target).delete()
else:
abort(404)
exam_data = db.collection("exam-alerts").document(exam).get().to_dict()
announcements = sorted(
(
{"id": announcement.id, **announcement.to_dict()}
for announcement in db.collection("exam-alerts")
.document(exam)
.collection("announcements")
.stream()
),
key=lambda announcement: announcement["timestamp"],
reverse=True,
)
return jsonify(
{"success": True, "exam": exam_data, "announcements": announcements}
)
except Exception as e:
if getenv("ENV") == "dev":
raise
print(e)
print(dict(request.json))
return jsonify({"success": False})
| true | true |
f7206ebe8fad56aa6688ff5f36b4dc7a3840b16f | 413 | py | Python | backend/app/models/user.py | Infam852/IoT-project | 673d8a96676e046331550b9c16c0610de5733f73 | [
"MIT"
] | null | null | null | backend/app/models/user.py | Infam852/IoT-project | 673d8a96676e046331550b9c16c0610de5733f73 | [
"MIT"
] | null | null | null | backend/app/models/user.py | Infam852/IoT-project | 673d8a96676e046331550b9c16c0610de5733f73 | [
"MIT"
] | 1 | 2021-12-18T19:33:01.000Z | 2021-12-18T19:33:01.000Z | from sqlalchemy import Column, Integer, ForeignKey, DateTime
from sqlalchemy.sql.sqltypes import Boolean, String
from app.db.database import Base
class UserModel(Base):
__tablename__ = "users"
id = Column(Integer, primary_key=True, index=True)
username = Column(String, nullable=False, index=True)
disabled = Column(Boolean, default=False)
hashed_password = Column(String, nullable=False)
| 29.5 | 60 | 0.755448 | from sqlalchemy import Column, Integer, ForeignKey, DateTime
from sqlalchemy.sql.sqltypes import Boolean, String
from app.db.database import Base
class UserModel(Base):
__tablename__ = "users"
id = Column(Integer, primary_key=True, index=True)
username = Column(String, nullable=False, index=True)
disabled = Column(Boolean, default=False)
hashed_password = Column(String, nullable=False)
| true | true |
f7206ec0c3d11ce24235924a1f5bf4631efd543f | 1,715 | py | Python | tools/generate_taint_models/tests/get_exit_nodes_test.py | rvantonder/pyre-check-1 | 600ec9656ece5fff21598f4248c55089714bf590 | [
"MIT"
] | null | null | null | tools/generate_taint_models/tests/get_exit_nodes_test.py | rvantonder/pyre-check-1 | 600ec9656ece5fff21598f4248c55089714bf590 | [
"MIT"
] | null | null | null | tools/generate_taint_models/tests/get_exit_nodes_test.py | rvantonder/pyre-check-1 | 600ec9656ece5fff21598f4248c55089714bf590 | [
"MIT"
] | null | null | null | # Copyright (c) 2016-present, Facebook, Inc.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
from typing import Callable
from ..get_exit_nodes import ExitNodeGenerator
from ..model_generator import Configuration
from .test_functions import __name__ as qualifier, all_functions
class GetExitNodesTest(unittest.TestCase):
def test_compute_models(self):
sink = "TaintSink[ReturnedToUser]"
self.assertEqual(
list(ExitNodeGenerator().compute_models(all_functions)),
[
f"def {qualifier}.TestClass.methodA(self, x) -> {sink}: ...",
f"def {qualifier}.TestClass.methodB(self, *args) -> {sink}: ...",
f"def {qualifier}.testA() -> {sink}: ...",
f"def {qualifier}.testB(x) -> {sink}: ...",
f"def {qualifier}.testC(x) -> {sink}: ...",
f"def {qualifier}.testD(x, *args) -> {sink}: ...",
f"def {qualifier}.testE(x, **kwargs) -> {sink}: ...",
],
)
Configuration.whitelisted_views = [f"{qualifier}.TestClass.methodA"]
self.assertEqual(
list(ExitNodeGenerator().compute_models(all_functions)),
[
f"def {qualifier}.TestClass.methodB(self, *args) -> {sink}: ...",
f"def {qualifier}.testA() -> {sink}: ...",
f"def {qualifier}.testB(x) -> {sink}: ...",
f"def {qualifier}.testC(x) -> {sink}: ...",
f"def {qualifier}.testD(x, *args) -> {sink}: ...",
f"def {qualifier}.testE(x, **kwargs) -> {sink}: ...",
],
)
| 41.829268 | 81 | 0.549271 |
import unittest
from typing import Callable
from ..get_exit_nodes import ExitNodeGenerator
from ..model_generator import Configuration
from .test_functions import __name__ as qualifier, all_functions
class GetExitNodesTest(unittest.TestCase):
def test_compute_models(self):
sink = "TaintSink[ReturnedToUser]"
self.assertEqual(
list(ExitNodeGenerator().compute_models(all_functions)),
[
f"def {qualifier}.TestClass.methodA(self, x) -> {sink}: ...",
f"def {qualifier}.TestClass.methodB(self, *args) -> {sink}: ...",
f"def {qualifier}.testA() -> {sink}: ...",
f"def {qualifier}.testB(x) -> {sink}: ...",
f"def {qualifier}.testC(x) -> {sink}: ...",
f"def {qualifier}.testD(x, *args) -> {sink}: ...",
f"def {qualifier}.testE(x, **kwargs) -> {sink}: ...",
],
)
Configuration.whitelisted_views = [f"{qualifier}.TestClass.methodA"]
self.assertEqual(
list(ExitNodeGenerator().compute_models(all_functions)),
[
f"def {qualifier}.TestClass.methodB(self, *args) -> {sink}: ...",
f"def {qualifier}.testA() -> {sink}: ...",
f"def {qualifier}.testB(x) -> {sink}: ...",
f"def {qualifier}.testC(x) -> {sink}: ...",
f"def {qualifier}.testD(x, *args) -> {sink}: ...",
f"def {qualifier}.testE(x, **kwargs) -> {sink}: ...",
],
)
| true | true |
f7206f20aeb3af06e52189f7a1589c4376b4e9c4 | 1,677 | py | Python | 09.py | Michanix/Algorithms-Intro-Course | c81fa38b05199a42eaeb48567447ee3f6b1e535e | [
"MIT"
] | null | null | null | 09.py | Michanix/Algorithms-Intro-Course | c81fa38b05199a42eaeb48567447ee3f6b1e535e | [
"MIT"
] | null | null | null | 09.py | Michanix/Algorithms-Intro-Course | c81fa38b05199a42eaeb48567447ee3f6b1e535e | [
"MIT"
] | null | null | null | from random import randrange
from time import time
def bubble_sort(arr):
for i in range(len(arr)):
for j in range(len(arr)-1, i, -1):
if arr[j] < arr[j-1]:
# меняем элементы местами
arr[j], arr[j-1] = arr[j-1], arr[j]
return arr
def opt_bubble_sort(arr):
while True:
swap = False
for i in range(len(arr)-1):
if arr[i] > arr[i+1]:
arr[i], arr[i+1] = arr[i+1], arr[i]
swap = True
if not swap:
break
swap = False
for j in range(len(arr)-1, 0):
if arr[j] < arr[j+1]:
# меняем элементы местами
arr[j], arr[j+1] = arr[j+1], arr[j]
swap = True
return arr
# измерить время работы алгоритма в случайом массиве
def check_time_in_random_arr(f):
arr = [randrange(100) for i in range(1100)]
start = time()
f(arr)
end = time()
return end - start
# время работы алгоритма в сортированном массиве
def check_time(f):
arr = [i for i in range(1100)]
start = time()
f(arr)
end = time()
return end - start
bubble_sort_time = check_time(bubble_sort)
opt_bubble_sort_time = check_time(opt_bubble_sort)
bubble_sort_time2 = check_time_in_random_arr(bubble_sort)
opt_bubble_sort_time2 = check_time_in_random_arr(opt_bubble_sort)
print('''
Время работы в уже отсортированном массиве:\n
Обычный пузырёк: {}\n
Модифицированный {}\n
Время работы в случайном массиве: \n
Обычный пузырёк: {}\n
Модифицированный: {}'''.format(bubble_sort_time, opt_bubble_sort_time, bubble_sort_time2, opt_bubble_sort_time2))
| 26.203125 | 117 | 0.603459 | from random import randrange
from time import time
def bubble_sort(arr):
for i in range(len(arr)):
for j in range(len(arr)-1, i, -1):
if arr[j] < arr[j-1]:
arr[j], arr[j-1] = arr[j-1], arr[j]
return arr
def opt_bubble_sort(arr):
while True:
swap = False
for i in range(len(arr)-1):
if arr[i] > arr[i+1]:
arr[i], arr[i+1] = arr[i+1], arr[i]
swap = True
if not swap:
break
swap = False
for j in range(len(arr)-1, 0):
if arr[j] < arr[j+1]:
arr[j], arr[j+1] = arr[j+1], arr[j]
swap = True
return arr
def check_time_in_random_arr(f):
arr = [randrange(100) for i in range(1100)]
start = time()
f(arr)
end = time()
return end - start
def check_time(f):
arr = [i for i in range(1100)]
start = time()
f(arr)
end = time()
return end - start
bubble_sort_time = check_time(bubble_sort)
opt_bubble_sort_time = check_time(opt_bubble_sort)
bubble_sort_time2 = check_time_in_random_arr(bubble_sort)
opt_bubble_sort_time2 = check_time_in_random_arr(opt_bubble_sort)
print('''
Время работы в уже отсортированном массиве:\n
Обычный пузырёк: {}\n
Модифицированный {}\n
Время работы в случайном массиве: \n
Обычный пузырёк: {}\n
Модифицированный: {}'''.format(bubble_sort_time, opt_bubble_sort_time, bubble_sort_time2, opt_bubble_sort_time2))
| true | true |
f72071c15f22bdfc2b6d13e6fb45864f32d756e1 | 5,175 | py | Python | nay/scrape_nominate_movie.py | kondounagi/japanese_movies_dataset | 349f217cd04e07fd44a401ecb2f2dcaea7bc2e5e | [
"MIT"
] | 1 | 2019-08-05T21:43:09.000Z | 2019-08-05T21:43:09.000Z | nay/scrape_nominate_movie.py | kondounagi/japanese_movies_dataset | 349f217cd04e07fd44a401ecb2f2dcaea7bc2e5e | [
"MIT"
] | 3 | 2020-03-31T05:53:37.000Z | 2021-12-13T20:07:39.000Z | nay/scrape_nominate_movie.py | kondounagi/japanese_movies_dataset | 349f217cd04e07fd44a401ecb2f2dcaea7bc2e5e | [
"MIT"
] | null | null | null | import re
import sys
import json
import requests
from bs4 import BeautifulSoup
def scrape_nominate_movie(year):
film_index = "https://eiga.com/movie/"
re_time = re.compile(r"/\d*分/")
re_production_studio = re.compile(r"配給:[^<]*")
re_title = re.compile(r"映画「[^」]*」")
re_date = re.compile(r"\d*年\d*月\d*日")
year_film_data = []
# title aligns with eiga.com
best_prize_title = [
'万引き家族',
'三度目の殺人',
'シン・ゴジラ',
'海街diary',
'永遠の0',
'舟を編む',
'桐島、部活やめるってよ',
'八日目の蟬',
'告白',
'沈まぬ太陽',
'おくりびと',
'東京タワー オカンとボクと、時々、オトン',
'フラガール',
'ALWAYS 三丁目の夕日',
'半落ち',
'壬生義士伝',
'たそがれ清兵衛',
'千と千尋の神隠し',
'雨あがる',
'鉄道員(ぽっぽや)',
'愛を乞うひと',
'もののけ姫',
'Shall we ダンス?',
'午後の遺言状',
'忠臣蔵外伝 四谷怪談',
'学校',
'シコふんじゃった。',
'息子',
'少年時代',
'黒い雨',
'敦煌',
'マルサの女',
'火宅の人',
'花いちもんめ',
'お葬式',
'楢山節考',
'蒲田行進曲',
'駅 STATION',
'ツィゴイネルワイゼン',
'復讐するは我にあり',
'事件',
'幸福の黄色いハンカチ',
]
with open("nominate_id/" + str(year) + ".txt", "r") as f:
for line in f.readlines():
film_id = line.strip()
film_data = {}
film_data["director"] = []
film_data["scriptwriter"] = []
film_data["performers"] = []
film_data["screen_time"] = -1
film_data["production_studio"] = ""
film_data["title"] = ""
film_data["year"] = year
if len(year_film_data) == 0 and year != 2020:
film_data["prize"] = 1
else:
film_data["prize"] = 0
# fetch top-1 movie result information
content = requests.get(film_index + film_id).content
soup = BeautifulSoup(content, features="lxml")
# filter out screen time and production studio
html_text = soup.prettify()
production_studio = re_production_studio.search(html_text)
screen_time = re_time.search(html_text)
title = re_title.search(html_text)
date = re_date.search(html_text)
if production_studio:
film_data["production_studio"] = (
production_studio.group(0)[3:].strip())
if screen_time:
film_data["screen_time"] = int(screen_time.group(0)[1:-2])
if title:
film_data["title"] = title.group(0)[3:-1]
if film_data["title"] in best_prize_title:
film_data["prize"] = 1
else:
print(film_id)
if date:
date_str = date.group(0)
film_data["year"] = date_str[0:date_str.find("年")]
film_data["month"] = (
date_str[date_str.find("年") + 1:date_str.find("月")])
film_data["day"] = (
date_str[date_str.find("月") + 1:date_str.find("日")])
# filter out informative data
staff_cast = soup.find(id="staff-cast")
if staff_cast is not None:
for div in staff_cast.find_all():
# When calling div["class"], return type is list[string]
if div.name == "dl" and div.has_attr("class") and div["class"][0] == "movie-staff":
# movie staff column
data_type = ""
for p in div.find_all():
if p.name == "dt":
if p.get_text().find("監督") != -1:
data_type = "director"
elif p.get_text().find("脚本") != -1:
data_type = "scriptwriter"
else:
data_type = ""
# new meta data type can be added here
elif p.name == "dd" and len(data_type) > 0:
film_data[data_type].append(p.get_text().strip())
elif div.name == "ul" and div.has_attr("class") and div["class"][0] == "movie-cast":
# movie cast column
for p in div.find_all():
if p.name == "span":
film_data["performers"].append(p.get_text().strip())
# print(film_data)
year_film_data.append(film_data)
sys.stdout.flush()
return year_film_data
def main():
start_year = 1978
end_year = 2020
years_dict = {}
unique_id = 10
for i in range(start_year, end_year + 1):
years_dict[i] = scrape_nominate_movie(i)
for j in range(len(years_dict[i])):
years_dict[i][j]["id"] = unique_id
unique_id += 1
with open("nominate_movie_meta_data.json", "w") as f:
f.write(json.dumps(years_dict, ensure_ascii=False))
f.write("\n")
if __name__ == "__main__":
main()
| 32.753165 | 104 | 0.465507 | import re
import sys
import json
import requests
from bs4 import BeautifulSoup
def scrape_nominate_movie(year):
film_index = "https://eiga.com/movie/"
re_time = re.compile(r"/\d*分/")
re_production_studio = re.compile(r"配給:[^<]*")
re_title = re.compile(r"映画「[^」]*」")
re_date = re.compile(r"\d*年\d*月\d*日")
year_film_data = []
best_prize_title = [
'万引き家族',
'三度目の殺人',
'シン・ゴジラ',
'海街diary',
'永遠の0',
'舟を編む',
'桐島、部活やめるってよ',
'八日目の蟬',
'告白',
'沈まぬ太陽',
'おくりびと',
'東京タワー オカンとボクと、時々、オトン',
'フラガール',
'ALWAYS 三丁目の夕日',
'半落ち',
'壬生義士伝',
'たそがれ清兵衛',
'千と千尋の神隠し',
'雨あがる',
'鉄道員(ぽっぽや)',
'愛を乞うひと',
'もののけ姫',
'Shall we ダンス?',
'午後の遺言状',
'忠臣蔵外伝 四谷怪談',
'学校',
'シコふんじゃった。',
'息子',
'少年時代',
'黒い雨',
'敦煌',
'マルサの女',
'火宅の人',
'花いちもんめ',
'お葬式',
'楢山節考',
'蒲田行進曲',
'駅 STATION',
'ツィゴイネルワイゼン',
'復讐するは我にあり',
'事件',
'幸福の黄色いハンカチ',
]
with open("nominate_id/" + str(year) + ".txt", "r") as f:
for line in f.readlines():
film_id = line.strip()
film_data = {}
film_data["director"] = []
film_data["scriptwriter"] = []
film_data["performers"] = []
film_data["screen_time"] = -1
film_data["production_studio"] = ""
film_data["title"] = ""
film_data["year"] = year
if len(year_film_data) == 0 and year != 2020:
film_data["prize"] = 1
else:
film_data["prize"] = 0
content = requests.get(film_index + film_id).content
soup = BeautifulSoup(content, features="lxml")
html_text = soup.prettify()
production_studio = re_production_studio.search(html_text)
screen_time = re_time.search(html_text)
title = re_title.search(html_text)
date = re_date.search(html_text)
if production_studio:
film_data["production_studio"] = (
production_studio.group(0)[3:].strip())
if screen_time:
film_data["screen_time"] = int(screen_time.group(0)[1:-2])
if title:
film_data["title"] = title.group(0)[3:-1]
if film_data["title"] in best_prize_title:
film_data["prize"] = 1
else:
print(film_id)
if date:
date_str = date.group(0)
film_data["year"] = date_str[0:date_str.find("年")]
film_data["month"] = (
date_str[date_str.find("年") + 1:date_str.find("月")])
film_data["day"] = (
date_str[date_str.find("月") + 1:date_str.find("日")])
staff_cast = soup.find(id="staff-cast")
if staff_cast is not None:
for div in staff_cast.find_all():
if div.name == "dl" and div.has_attr("class") and div["class"][0] == "movie-staff":
data_type = ""
for p in div.find_all():
if p.name == "dt":
if p.get_text().find("監督") != -1:
data_type = "director"
elif p.get_text().find("脚本") != -1:
data_type = "scriptwriter"
else:
data_type = ""
elif p.name == "dd" and len(data_type) > 0:
film_data[data_type].append(p.get_text().strip())
elif div.name == "ul" and div.has_attr("class") and div["class"][0] == "movie-cast":
for p in div.find_all():
if p.name == "span":
film_data["performers"].append(p.get_text().strip())
year_film_data.append(film_data)
sys.stdout.flush()
return year_film_data
def main():
start_year = 1978
end_year = 2020
years_dict = {}
unique_id = 10
for i in range(start_year, end_year + 1):
years_dict[i] = scrape_nominate_movie(i)
for j in range(len(years_dict[i])):
years_dict[i][j]["id"] = unique_id
unique_id += 1
with open("nominate_movie_meta_data.json", "w") as f:
f.write(json.dumps(years_dict, ensure_ascii=False))
f.write("\n")
if __name__ == "__main__":
main()
| true | true |
f72073fa7cc1e6f079942989602618bed6ed1f0a | 9,216 | py | Python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_07_01/aio/operations/_available_private_endpoint_types_operations.py | praveenkuttappan/azure-sdk-for-python | 4b79413667b7539750a6c7dde15737013a3d4bd5 | [
"MIT"
] | 2,728 | 2015-01-09T10:19:32.000Z | 2022-03-31T14:50:33.000Z | sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_07_01/aio/operations/_available_private_endpoint_types_operations.py | v-xuto/azure-sdk-for-python | 9c6296d22094c5ede410bc83749e8df8694ccacc | [
"MIT"
] | 17,773 | 2015-01-05T15:57:17.000Z | 2022-03-31T23:50:25.000Z | sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_07_01/aio/operations/_available_private_endpoint_types_operations.py | v-xuto/azure-sdk-for-python | 9c6296d22094c5ede410bc83749e8df8694ccacc | [
"MIT"
] | 1,916 | 2015-01-19T05:05:41.000Z | 2022-03-31T19:36:44.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class AvailablePrivateEndpointTypesOperations:
"""AvailablePrivateEndpointTypesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_07_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
location: str,
**kwargs: Any
) -> AsyncIterable["_models.AvailablePrivateEndpointTypesResult"]:
"""Returns all of the resource types that can be linked to a Private Endpoint in this subscription
in this region.
:param location: The location of the domain name.
:type location: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either AvailablePrivateEndpointTypesResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2019_07_01.models.AvailablePrivateEndpointTypesResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AvailablePrivateEndpointTypesResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'location': self._serialize.url("location", location, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('AvailablePrivateEndpointTypesResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/locations/{location}/availablePrivateEndpointTypes'} # type: ignore
def list_by_resource_group(
self,
location: str,
resource_group_name: str,
**kwargs: Any
) -> AsyncIterable["_models.AvailablePrivateEndpointTypesResult"]:
"""Returns all of the resource types that can be linked to a Private Endpoint in this subscription
in this region.
:param location: The location of the domain name.
:type location: str
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either AvailablePrivateEndpointTypesResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2019_07_01.models.AvailablePrivateEndpointTypesResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AvailablePrivateEndpointTypesResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'location': self._serialize.url("location", location, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('AvailablePrivateEndpointTypesResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/locations/{location}/availablePrivateEndpointTypes'} # type: ignore
| 48.761905 | 210 | 0.659397 |
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class AvailablePrivateEndpointTypesOperations:
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
location: str,
**kwargs: Any
) -> AsyncIterable["_models.AvailablePrivateEndpointTypesResult"]:
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
accept = "application/json"
def prepare_request(next_link=None):
header_parameters = {}
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
url = self.list.metadata['url']
path_format_arguments = {
'location': self._serialize.url("location", location, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {}
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('AvailablePrivateEndpointTypesResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/locations/{location}/availablePrivateEndpointTypes'}
def list_by_resource_group(
self,
location: str,
resource_group_name: str,
**kwargs: Any
) -> AsyncIterable["_models.AvailablePrivateEndpointTypesResult"]:
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
accept = "application/json"
def prepare_request(next_link=None):
header_parameters = {}
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
url = self.list_by_resource_group.metadata['url']
path_format_arguments = {
'location': self._serialize.url("location", location, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {}
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('AvailablePrivateEndpointTypesResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/locations/{location}/availablePrivateEndpointTypes'}
| true | true |
f72076a759856b30a8e2638c441c193c5f2894fe | 3,023 | py | Python | rally_openstack/scenarios/gnocchi/archive_policy.py | RSE-Cambridge/rally-openstack | 32bbc091bbce1db625a2fc22da28b32718befa13 | [
"Apache-2.0"
] | null | null | null | rally_openstack/scenarios/gnocchi/archive_policy.py | RSE-Cambridge/rally-openstack | 32bbc091bbce1db625a2fc22da28b32718befa13 | [
"Apache-2.0"
] | null | null | null | rally_openstack/scenarios/gnocchi/archive_policy.py | RSE-Cambridge/rally-openstack | 32bbc091bbce1db625a2fc22da28b32718befa13 | [
"Apache-2.0"
] | 1 | 2018-12-10T12:31:27.000Z | 2018-12-10T12:31:27.000Z | # Copyright 2017 Red Hat, Inc. <http://www.redhat.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from rally.task import validation
from rally_openstack import consts
from rally_openstack import scenario
from rally_openstack.scenarios.gnocchi import utils as gnocchiutils
"""Scenarios for Gnocchi archive policy."""
@validation.add("required_services", services=[consts.Service.GNOCCHI])
@validation.add("required_platform", platform="openstack", users=True)
@scenario.configure(name="GnocchiArchivePolicy.list_archive_policy")
class ListArchivePolicy(gnocchiutils.GnocchiBase):
def run(self):
"""List archive policies."""
self.gnocchi.list_archive_policy()
@validation.add("required_services", services=[consts.Service.GNOCCHI])
@validation.add("required_platform", platform="openstack", admin=True)
@scenario.configure(
context={"admin_cleanup@openstack": ["gnocchi.archive_policy"]},
name="GnocchiArchivePolicy.create_archive_policy")
class CreateArchivePolicy(gnocchiutils.GnocchiBase):
def run(self, definition=None, aggregation_methods=None):
"""Create archive policy.
:param definition: List of definitions
:param aggregation_methods: List of aggregation methods
"""
if definition is None:
definition = [{"granularity": "0:00:01", "timespan": "1:00:00"}]
name = self.generate_random_name()
self.admin_gnocchi.create_archive_policy(
name, definition=definition,
aggregation_methods=aggregation_methods)
@validation.add("required_services", services=[consts.Service.GNOCCHI])
@validation.add("required_platform", platform="openstack", admin=True)
@scenario.configure(
context={"admin_cleanup@openstack": ["gnocchi.archive_policy"]},
name="GnocchiArchivePolicy.create_delete_archive_policy")
class CreateDeleteArchivePolicy(gnocchiutils.GnocchiBase):
def run(self, definition=None, aggregation_methods=None):
"""Create archive policy and then delete it.
:param definition: List of definitions
:param aggregation_methods: List of aggregation methods
"""
if definition is None:
definition = [{"granularity": "0:00:01", "timespan": "1:00:00"}]
name = self.generate_random_name()
self.admin_gnocchi.create_archive_policy(
name, definition=definition,
aggregation_methods=aggregation_methods)
self.admin_gnocchi.delete_archive_policy(name)
| 38.75641 | 78 | 0.7261 |
from rally.task import validation
from rally_openstack import consts
from rally_openstack import scenario
from rally_openstack.scenarios.gnocchi import utils as gnocchiutils
@validation.add("required_services", services=[consts.Service.GNOCCHI])
@validation.add("required_platform", platform="openstack", users=True)
@scenario.configure(name="GnocchiArchivePolicy.list_archive_policy")
class ListArchivePolicy(gnocchiutils.GnocchiBase):
def run(self):
self.gnocchi.list_archive_policy()
@validation.add("required_services", services=[consts.Service.GNOCCHI])
@validation.add("required_platform", platform="openstack", admin=True)
@scenario.configure(
context={"admin_cleanup@openstack": ["gnocchi.archive_policy"]},
name="GnocchiArchivePolicy.create_archive_policy")
class CreateArchivePolicy(gnocchiutils.GnocchiBase):
def run(self, definition=None, aggregation_methods=None):
if definition is None:
definition = [{"granularity": "0:00:01", "timespan": "1:00:00"}]
name = self.generate_random_name()
self.admin_gnocchi.create_archive_policy(
name, definition=definition,
aggregation_methods=aggregation_methods)
@validation.add("required_services", services=[consts.Service.GNOCCHI])
@validation.add("required_platform", platform="openstack", admin=True)
@scenario.configure(
context={"admin_cleanup@openstack": ["gnocchi.archive_policy"]},
name="GnocchiArchivePolicy.create_delete_archive_policy")
class CreateDeleteArchivePolicy(gnocchiutils.GnocchiBase):
def run(self, definition=None, aggregation_methods=None):
if definition is None:
definition = [{"granularity": "0:00:01", "timespan": "1:00:00"}]
name = self.generate_random_name()
self.admin_gnocchi.create_archive_policy(
name, definition=definition,
aggregation_methods=aggregation_methods)
self.admin_gnocchi.delete_archive_policy(name)
| true | true |
f720771fafd95ebfe23bca7f24afc2f571e9f07b | 51,158 | py | Python | tests/test_dataset.py | jenhaoyang/datumaro | add81ddb59502362fa65fa07e5bc4d8c9f61afde | [
"MIT"
] | null | null | null | tests/test_dataset.py | jenhaoyang/datumaro | add81ddb59502362fa65fa07e5bc4d8c9f61afde | [
"MIT"
] | null | null | null | tests/test_dataset.py | jenhaoyang/datumaro | add81ddb59502362fa65fa07e5bc4d8c9f61afde | [
"MIT"
] | 1 | 2021-12-15T22:15:59.000Z | 2021-12-15T22:15:59.000Z | from unittest import TestCase
import os
import os.path as osp
import numpy as np
from datumaro.components.annotation import (
AnnotationType, Bbox, Caption, Label, LabelCategories, Mask, Points,
Polygon, PolyLine,
)
from datumaro.components.converter import Converter
from datumaro.components.dataset import (
DEFAULT_FORMAT, Dataset, ItemStatus, eager_mode,
)
from datumaro.components.dataset_filter import (
DatasetItemEncoder, XPathAnnotationsFilter, XPathDatasetFilter,
)
from datumaro.components.environment import Environment
from datumaro.components.errors import (
ConflictingCategoriesError, DatasetNotFoundError, MultipleFormatsMatchError,
NoMatchingFormatsError, RepeatedItemError, UnknownFormatError,
)
from datumaro.components.extractor import (
DEFAULT_SUBSET_NAME, DatasetItem, Extractor, ItemTransform, Transform,
)
from datumaro.components.launcher import Launcher
from datumaro.components.media import Image
from datumaro.util.test_utils import TestDir, compare_datasets
from .requirements import Requirements, mark_requirement
class DatasetTest(TestCase):
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_create_from_extractors(self):
class SrcExtractor1(Extractor):
def __iter__(self):
return iter([
DatasetItem(id=1, subset='train', annotations=[
Bbox(1, 2, 3, 4),
Label(4),
]),
DatasetItem(id=1, subset='val', annotations=[
Label(4),
]),
])
class SrcExtractor2(Extractor):
def __iter__(self):
return iter([
DatasetItem(id=1, subset='val', annotations=[
Label(5),
]),
])
class DstExtractor(Extractor):
def __iter__(self):
return iter([
DatasetItem(id=1, subset='train', annotations=[
Bbox(1, 2, 3, 4),
Label(4),
]),
DatasetItem(id=1, subset='val', annotations=[
Label(4),
Label(5),
]),
])
dataset = Dataset.from_extractors(SrcExtractor1(), SrcExtractor2())
compare_datasets(self, DstExtractor(), dataset)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_create_from_iterable(self):
class TestExtractor(Extractor):
def __iter__(self):
return iter([
DatasetItem(id=1, subset='train', annotations=[
Bbox(1, 2, 3, 4, label=2),
Label(4),
]),
DatasetItem(id=1, subset='val', annotations=[
Label(3),
]),
])
def categories(self):
return { AnnotationType.label: LabelCategories.from_iterable(
['a', 'b', 'c', 'd', 'e'])
}
actual = Dataset.from_iterable([
DatasetItem(id=1, subset='train', annotations=[
Bbox(1, 2, 3, 4, label=2),
Label(4),
]),
DatasetItem(id=1, subset='val', annotations=[
Label(3),
]),
], categories=['a', 'b', 'c', 'd', 'e'])
compare_datasets(self, TestExtractor(), actual)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_join_datasets_with_empty_categories(self):
expected = Dataset.from_iterable([
DatasetItem(1, annotations=[
Label(0),
Bbox(1, 2, 3, 4),
Caption('hello world'),
])
], categories=['a'])
src1 = Dataset.from_iterable([
DatasetItem(1, annotations=[ Bbox(1, 2, 3, 4, label=None) ])
], categories=[])
src2 = Dataset.from_iterable([
DatasetItem(1, annotations=[ Label(0) ])
], categories=['a'])
src3 = Dataset.from_iterable([
DatasetItem(1, annotations=[ Caption('hello world') ])
])
actual = Dataset.from_extractors(src1, src2, src3)
compare_datasets(self, expected, actual)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_save_and_load(self):
source_dataset = Dataset.from_iterable([
DatasetItem(id=1, annotations=[ Label(2) ]),
], categories=['a', 'b', 'c'])
with TestDir() as test_dir:
source_dataset.save(test_dir)
loaded_dataset = Dataset.load(test_dir)
compare_datasets(self, source_dataset, loaded_dataset)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_detect(self):
env = Environment()
env.importers.items = {DEFAULT_FORMAT: env.importers[DEFAULT_FORMAT]}
env.extractors.items = {DEFAULT_FORMAT: env.extractors[DEFAULT_FORMAT]}
dataset = Dataset.from_iterable([
DatasetItem(id=1, annotations=[ Label(2) ]),
], categories=['a', 'b', 'c'])
with TestDir() as test_dir:
dataset.save(test_dir)
detected_format = Dataset.detect(test_dir, env=env)
self.assertEqual(DEFAULT_FORMAT, detected_format)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_detect_and_import(self):
env = Environment()
env.importers.items = {DEFAULT_FORMAT: env.importers[DEFAULT_FORMAT]}
env.extractors.items = {DEFAULT_FORMAT: env.extractors[DEFAULT_FORMAT]}
source_dataset = Dataset.from_iterable([
DatasetItem(id=1, annotations=[ Label(2) ]),
], categories=['a', 'b', 'c'])
with TestDir() as test_dir:
source_dataset.save(test_dir)
imported_dataset = Dataset.import_from(test_dir, env=env)
self.assertEqual(imported_dataset.data_path, test_dir)
self.assertEqual(imported_dataset.format, DEFAULT_FORMAT)
compare_datasets(self, source_dataset, imported_dataset)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_report_no_dataset_found(self):
env = Environment()
env.importers.items = {
DEFAULT_FORMAT: env.importers[DEFAULT_FORMAT],
}
env.extractors.items = {
DEFAULT_FORMAT: env.extractors[DEFAULT_FORMAT],
}
with TestDir() as test_dir, self.assertRaises(DatasetNotFoundError):
Dataset.import_from(test_dir, DEFAULT_FORMAT, env=env)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_report_multiple_formats_match(self):
env = Environment()
env.importers.items = {
'a': env.importers[DEFAULT_FORMAT],
'b': env.importers[DEFAULT_FORMAT],
}
env.extractors.items = {
'a': env.extractors[DEFAULT_FORMAT],
'b': env.extractors[DEFAULT_FORMAT],
}
source_dataset = Dataset.from_iterable([
DatasetItem(id=1, annotations=[ Label(2) ]),
], categories=['a', 'b', 'c'])
with TestDir() as test_dir:
source_dataset.save(test_dir)
with self.assertRaises(MultipleFormatsMatchError):
Dataset.import_from(test_dir, env=env)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_report_no_matching_formats(self):
env = Environment()
env.importers.items = {}
env.extractors.items = {}
source_dataset = Dataset.from_iterable([
DatasetItem(id=1, annotations=[ Label(2) ]),
], categories=['a', 'b', 'c'])
with TestDir() as test_dir:
source_dataset.save(test_dir)
with self.assertRaises(NoMatchingFormatsError):
Dataset.import_from(test_dir, env=env)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_report_unknown_format_requested(self):
env = Environment()
env.importers.items = {}
env.extractors.items = {}
source_dataset = Dataset.from_iterable([
DatasetItem(id=1, annotations=[ Label(2) ]),
], categories=['a', 'b', 'c'])
with TestDir() as test_dir:
source_dataset.save(test_dir)
with self.assertRaises(UnknownFormatError):
Dataset.import_from(test_dir, format='custom', env=env)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_export_by_string_format_name(self):
env = Environment()
env.converters.items = {'qq': env.converters[DEFAULT_FORMAT]}
dataset = Dataset.from_iterable([
DatasetItem(id=1, annotations=[ Label(2) ]),
], categories=['a', 'b', 'c'], env=env)
with TestDir() as test_dir:
dataset.export(format='qq', save_dir=test_dir)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_remember_export_options(self):
dataset = Dataset.from_iterable([
DatasetItem(id=1, image=np.ones((1, 2, 3))),
], categories=['a'])
with TestDir() as test_dir:
dataset.save(test_dir, save_images=True)
dataset.put(dataset.get(1)) # mark the item modified for patching
image_path = osp.join(test_dir, 'images', 'default', '1.jpg')
os.remove(image_path)
dataset.save(test_dir)
self.assertEqual({'save_images': True}, dataset.options)
self.assertTrue(osp.isfile(image_path))
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_compute_length_when_created_from_scratch(self):
dataset = Dataset()
dataset.put(DatasetItem(1))
dataset.put(DatasetItem(2))
dataset.put(DatasetItem(3))
dataset.remove(1)
self.assertEqual(2, len(dataset))
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_compute_length_when_created_from_extractor(self):
class TestExtractor(Extractor):
def __iter__(self):
yield from [
DatasetItem(1),
DatasetItem(2),
DatasetItem(3),
]
dataset = Dataset.from_extractors(TestExtractor())
self.assertEqual(3, len(dataset))
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_compute_length_when_created_from_sequence(self):
dataset = Dataset.from_iterable([
DatasetItem(1),
DatasetItem(2),
DatasetItem(3),
])
self.assertEqual(3, len(dataset))
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_transform_by_string_name(self):
expected = Dataset.from_iterable([
DatasetItem(id=1, attributes={'qq': 1}),
])
class TestTransform(ItemTransform):
def transform_item(self, item):
return self.wrap_item(item, attributes={'qq': 1})
env = Environment()
env.transforms.register('qq', TestTransform)
dataset = Dataset.from_iterable([ DatasetItem(id=1) ], env=env)
actual = dataset.transform('qq')
compare_datasets(self, expected, actual)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_transform(self):
expected = Dataset.from_iterable([
DatasetItem(id=1, attributes={'qq': 1}),
])
class TestTransform(ItemTransform):
def transform_item(self, item):
return self.wrap_item(item, attributes={'qq': 1})
dataset = Dataset.from_iterable([ DatasetItem(id=1) ])
actual = dataset.transform(TestTransform)
compare_datasets(self, expected, actual)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_join_annotations(self):
a = Dataset.from_iterable([
DatasetItem(id=1, subset='train', annotations=[
Label(1, id=3),
Label(2, attributes={ 'x': 1 }),
])
], categories=['a', 'b', 'c', 'd'])
b = Dataset.from_iterable([
DatasetItem(id=1, subset='train', annotations=[
Label(2, attributes={ 'x': 1 }),
Label(3, id=4),
])
], categories=['a', 'b', 'c', 'd'])
expected = Dataset.from_iterable([
DatasetItem(id=1, subset='train', annotations=[
Label(1, id=3),
Label(2, attributes={ 'x': 1 }),
Label(3, id=4),
])
], categories=['a', 'b', 'c', 'd'])
merged = Dataset.from_extractors(a, b)
compare_datasets(self, expected, merged)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_cant_join_different_categories(self):
s1 = Dataset.from_iterable([], categories=['a', 'b'])
s2 = Dataset.from_iterable([], categories=['b', 'a'])
with self.assertRaises(ConflictingCategoriesError):
Dataset.from_extractors(s1, s2)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_join_datasets(self):
s1 = Dataset.from_iterable([ DatasetItem(0), DatasetItem(1) ])
s2 = Dataset.from_iterable([ DatasetItem(1), DatasetItem(2) ])
expected = Dataset.from_iterable([
DatasetItem(0), DatasetItem(1), DatasetItem(2)
])
actual = Dataset.from_extractors(s1, s2)
compare_datasets(self, expected, actual)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_track_modifications_on_addition(self):
dataset = Dataset.from_iterable([
DatasetItem(1),
DatasetItem(2),
])
self.assertFalse(dataset.is_modified)
dataset.put(DatasetItem(3, subset='a'))
self.assertTrue(dataset.is_modified)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_track_modifications_on_removal(self):
dataset = Dataset.from_iterable([
DatasetItem(1),
DatasetItem(2),
])
self.assertFalse(dataset.is_modified)
dataset.remove(1)
self.assertTrue(dataset.is_modified)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_create_patch(self):
expected = Dataset.from_iterable([
DatasetItem(2),
DatasetItem(3, subset='a')
])
dataset = Dataset.from_iterable([
DatasetItem(1),
DatasetItem(2),
])
dataset.put(DatasetItem(2))
dataset.put(DatasetItem(3, subset='a'))
dataset.remove(1)
patch = dataset.get_patch()
self.assertEqual({
('1', DEFAULT_SUBSET_NAME): ItemStatus.removed,
('2', DEFAULT_SUBSET_NAME): ItemStatus.added,
('3', 'a'): ItemStatus.added,
}, patch.updated_items)
self.assertEqual({
'default': ItemStatus.modified,
'a': ItemStatus.modified,
}, patch.updated_subsets)
self.assertEqual(2, len(patch.data))
self.assertEqual(None, patch.data.get(1))
self.assertEqual(dataset.get(2), patch.data.get(2))
self.assertEqual(dataset.get(3, 'a'), patch.data.get(3, 'a'))
compare_datasets(self, expected, dataset)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_create_patch_when_cached(self):
expected = Dataset.from_iterable([
DatasetItem(2),
DatasetItem(3, subset='a')
])
dataset = Dataset.from_iterable([
DatasetItem(1),
DatasetItem(2),
])
dataset.init_cache()
dataset.put(DatasetItem(2))
dataset.put(DatasetItem(3, subset='a'))
dataset.remove(1)
patch = dataset.get_patch()
self.assertEqual({
('1', DEFAULT_SUBSET_NAME): ItemStatus.removed,
# Item was not changed from the original one.
# TODO: add item comparison and remove this line
('2', DEFAULT_SUBSET_NAME): ItemStatus.modified,
('3', 'a'): ItemStatus.added,
}, patch.updated_items)
self.assertEqual({
'default': ItemStatus.modified,
'a': ItemStatus.modified,
}, patch.updated_subsets)
self.assertEqual(2, len(patch.data))
self.assertEqual(None, patch.data.get(1))
self.assertEqual(dataset.get(2), patch.data.get(2))
self.assertEqual(dataset.get(3, 'a'), patch.data.get(3, 'a'))
compare_datasets(self, expected, dataset)
@mark_requirement(Requirements.DATUM_BUG_257)
def test_can_create_patch_when_transforms_mixed(self):
expected = Dataset.from_iterable([
DatasetItem(2),
DatasetItem(3, subset='a')
])
dataset = Dataset.from_iterable([
DatasetItem(1),
DatasetItem(2),
])
class Remove1(Transform):
def __iter__(self):
for item in self._extractor:
if item.id != '1':
yield item
class Add3(Transform):
def __iter__(self):
for item in self._extractor:
if item.id == '2':
yield item
yield DatasetItem(3, subset='a')
dataset.transform(Remove1)
dataset.transform(Add3)
patch = dataset.get_patch()
self.assertEqual({
('1', DEFAULT_SUBSET_NAME): ItemStatus.removed,
('2', DEFAULT_SUBSET_NAME): ItemStatus.modified,
('3', 'a'): ItemStatus.added,
}, patch.updated_items)
self.assertEqual({
'default': ItemStatus.modified,
'a': ItemStatus.modified,
}, patch.updated_subsets)
self.assertEqual(2, len(patch.data))
self.assertEqual(None, patch.data.get(1))
self.assertEqual(dataset.get(2), patch.data.get(2))
self.assertEqual(dataset.get(3, 'a'), patch.data.get(3, 'a'))
compare_datasets(self, expected, dataset)
@mark_requirement(Requirements.DATUM_BUG_257)
def test_can_create_patch_when_transforms_chained(self):
expected = Dataset.from_iterable([
DatasetItem(2),
DatasetItem(3, subset='a')
])
class TestExtractor(Extractor):
iter_called = 0
def __iter__(self):
yield from [
DatasetItem(1),
DatasetItem(2),
]
__class__.iter_called += 1
class Remove1(Transform):
iter_called = 0
def __iter__(self):
for item in self._extractor:
if item.id != '1':
yield item
__class__.iter_called += 1
class Add3(Transform):
iter_called = 0
def __iter__(self):
yield from self._extractor
yield DatasetItem(3, subset='a')
__class__.iter_called += 1
dataset = Dataset.from_extractors(TestExtractor())
dataset.transform(Remove1)
dataset.transform(Add3)
patch = dataset.get_patch()
self.assertEqual({
('1', DEFAULT_SUBSET_NAME): ItemStatus.removed,
('2', DEFAULT_SUBSET_NAME): ItemStatus.modified,
('3', 'a'): ItemStatus.added,
}, patch.updated_items)
self.assertEqual({
'default': ItemStatus.modified,
'a': ItemStatus.modified,
}, patch.updated_subsets)
self.assertEqual(2, len(patch.data))
self.assertEqual(None, patch.data.get(1))
self.assertEqual(dataset.get(2), patch.data.get(2))
self.assertEqual(dataset.get(3, 'a'), patch.data.get(3, 'a'))
self.assertEqual(TestExtractor.iter_called, 2) # 1 for items, 1 for list
self.assertEqual(Remove1.iter_called, 1)
self.assertEqual(Add3.iter_called, 1)
compare_datasets(self, expected, dataset)
@mark_requirement(Requirements.DATUM_BUG_257)
def test_can_create_patch_when_transforms_intermixed_with_direct_ops(self):
expected = Dataset.from_iterable([
DatasetItem(3, subset='a'),
DatasetItem(4),
DatasetItem(5),
])
class TestExtractor(Extractor):
iter_called = 0
def __iter__(self):
yield from [
DatasetItem(1),
DatasetItem(2),
]
__class__.iter_called += 1
class Remove1(Transform):
iter_called = 0
def __iter__(self):
for item in self._extractor:
if item.id != '1':
yield item
__class__.iter_called += 1
class Add3(Transform):
iter_called = 0
def __iter__(self):
yield from self._extractor
yield DatasetItem(3, subset='a')
__class__.iter_called += 1
dataset = Dataset.from_extractors(TestExtractor())
dataset.init_cache()
dataset.put(DatasetItem(4))
dataset.transform(Remove1)
dataset.put(DatasetItem(5))
dataset.remove(2)
dataset.transform(Add3)
patch = dataset.get_patch()
self.assertEqual({
('1', DEFAULT_SUBSET_NAME): ItemStatus.removed,
('2', DEFAULT_SUBSET_NAME): ItemStatus.removed,
('3', 'a'): ItemStatus.added,
('4', DEFAULT_SUBSET_NAME): ItemStatus.added,
('5', DEFAULT_SUBSET_NAME): ItemStatus.added,
}, patch.updated_items)
self.assertEqual({
'default': ItemStatus.modified,
'a': ItemStatus.modified,
}, patch.updated_subsets)
self.assertEqual(3, len(patch.data))
self.assertEqual(None, patch.data.get(1))
self.assertEqual(None, patch.data.get(2))
self.assertEqual(dataset.get(3, 'a'), patch.data.get(3, 'a'))
self.assertEqual(dataset.get(4), patch.data.get(4))
self.assertEqual(dataset.get(5), patch.data.get(5))
self.assertEqual(TestExtractor.iter_called, 1)
self.assertEqual(Remove1.iter_called, 1)
self.assertEqual(Add3.iter_called, 1)
compare_datasets(self, expected, dataset)
@mark_requirement(Requirements.DATUM_BUG_257)
def test_can_create_patch_when_local_transforms_stacked(self):
expected = Dataset.from_iterable([
DatasetItem(4),
DatasetItem(5),
])
class TestExtractor(Extractor):
iter_called = 0
def __iter__(self):
yield from [
DatasetItem(1),
DatasetItem(2),
]
__class__.iter_called += 1
class ShiftIds(ItemTransform):
def transform_item(self, item):
return item.wrap(id=int(item.id) + 1)
dataset = Dataset.from_extractors(TestExtractor())
dataset.remove(2)
dataset.transform(ShiftIds)
dataset.transform(ShiftIds)
dataset.transform(ShiftIds)
dataset.put(DatasetItem(5))
patch = dataset.get_patch()
self.assertEqual({
('1', DEFAULT_SUBSET_NAME): ItemStatus.removed,
('2', DEFAULT_SUBSET_NAME): ItemStatus.removed,
('4', DEFAULT_SUBSET_NAME): ItemStatus.added,
('5', DEFAULT_SUBSET_NAME): ItemStatus.added,
}, patch.updated_items)
self.assertEqual({
'default': ItemStatus.modified,
}, patch.updated_subsets)
self.assertEqual(2, len(patch.data))
self.assertEqual(None, patch.data.get(1))
self.assertEqual(None, patch.data.get(2))
self.assertEqual(None, patch.data.get(3))
self.assertEqual(dataset.get(4), patch.data.get(4))
self.assertEqual(dataset.get(5), patch.data.get(5))
self.assertEqual(TestExtractor.iter_called, 1)
compare_datasets(self, expected, dataset)
@mark_requirement(Requirements.DATUM_BUG_257)
def test_can_create_patch_when_transforms_chained_and_source_cached(self):
expected = Dataset.from_iterable([
DatasetItem(2),
DatasetItem(3, subset='a')
])
class TestExtractor(Extractor):
iter_called = 0
def __iter__(self):
yield from [
DatasetItem(1),
DatasetItem(2),
]
__class__.iter_called += 1
class Remove1(Transform):
iter_called = 0
def __iter__(self):
for item in self._extractor:
if item.id != '1':
yield item
__class__.iter_called += 1
class Add3(Transform):
iter_called = 0
def __iter__(self):
yield from self._extractor
yield DatasetItem(3, subset='a')
__class__.iter_called += 1
dataset = Dataset.from_extractors(TestExtractor())
dataset.init_cache()
dataset.transform(Remove1)
dataset.transform(Add3)
patch = dataset.get_patch()
self.assertEqual({
('1', DEFAULT_SUBSET_NAME): ItemStatus.removed,
('2', DEFAULT_SUBSET_NAME): ItemStatus.modified, # TODO: remove this
('3', 'a'): ItemStatus.added,
}, patch.updated_items)
self.assertEqual({
'default': ItemStatus.modified,
'a': ItemStatus.modified,
}, patch.updated_subsets)
self.assertEqual(2, len(patch.data))
self.assertEqual(None, patch.data.get(1))
self.assertEqual(dataset.get(2), patch.data.get(2))
self.assertEqual(dataset.get(3, 'a'), patch.data.get(3, 'a'))
self.assertEqual(TestExtractor.iter_called, 1) # 1 for items and list
self.assertEqual(Remove1.iter_called, 1)
self.assertEqual(Add3.iter_called, 1)
compare_datasets(self, expected, dataset)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_do_lazy_put_and_remove(self):
iter_called = False
class TestExtractor(Extractor):
def __iter__(self):
nonlocal iter_called
iter_called = True
return iter([
DatasetItem(1),
DatasetItem(2),
])
dataset = Dataset.from_extractors(TestExtractor())
self.assertFalse(dataset.is_cache_initialized)
dataset.put(DatasetItem(3))
dataset.remove(DatasetItem(1))
self.assertFalse(dataset.is_cache_initialized)
self.assertFalse(iter_called)
dataset.init_cache()
self.assertTrue(dataset.is_cache_initialized)
self.assertTrue(iter_called)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_put(self):
dataset = Dataset()
dataset.put(DatasetItem(1))
self.assertTrue((1, '') in dataset)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_do_lazy_get_on_updated_item(self):
iter_called = False
class TestExtractor(Extractor):
def __iter__(self):
nonlocal iter_called
iter_called = True
return iter([
DatasetItem(1),
DatasetItem(2),
])
dataset = Dataset.from_extractors(TestExtractor())
dataset.put(DatasetItem(2))
self.assertTrue((2, '') in dataset)
self.assertFalse(iter_called)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_switch_eager_and_lazy_with_cm_global(self):
iter_called = False
class TestExtractor(Extractor):
def __iter__(self):
nonlocal iter_called
iter_called = True
return iter([
DatasetItem(1),
DatasetItem(2),
])
with eager_mode():
Dataset.from_extractors(TestExtractor())
self.assertTrue(iter_called)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_switch_eager_and_lazy_with_cm_local(self):
iter_called = False
class TestExtractor(Extractor):
def __iter__(self):
nonlocal iter_called
iter_called = True
yield from [
DatasetItem(1),
DatasetItem(2),
DatasetItem(3),
DatasetItem(4),
]
dataset = Dataset.from_extractors(TestExtractor())
with eager_mode(dataset=dataset):
dataset.select(lambda item: int(item.id) < 3)
dataset.select(lambda item: int(item.id) < 2)
self.assertTrue(iter_called)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_do_lazy_select(self):
iter_called = 0
class TestExtractor(Extractor):
def __iter__(self):
nonlocal iter_called
iter_called += 1
yield from [
DatasetItem(1),
DatasetItem(2),
DatasetItem(3),
DatasetItem(4),
]
dataset = Dataset.from_extractors(TestExtractor())
dataset.select(lambda item: int(item.id) < 3)
dataset.select(lambda item: int(item.id) < 2)
self.assertEqual(iter_called, 0)
self.assertEqual(1, len(dataset))
self.assertEqual(iter_called, 1)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_chain_lazy_transforms(self):
iter_called = 0
class TestExtractor(Extractor):
def __iter__(self):
nonlocal iter_called
iter_called += 1
yield from [
DatasetItem(1),
DatasetItem(2),
DatasetItem(3),
DatasetItem(4),
]
dataset = Dataset.from_extractors(TestExtractor())
class TestTransform(ItemTransform):
def transform_item(self, item):
return self.wrap_item(item, id=int(item.id) + 1)
dataset.transform(TestTransform)
dataset.transform(TestTransform)
self.assertEqual(iter_called, 0)
self.assertEqual(4, len(dataset))
self.assertEqual(3, int(min(int(item.id) for item in dataset)))
self.assertEqual(iter_called, 1)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_get_len_after_local_transforms(self):
iter_called = 0
class TestExtractor(Extractor):
def __iter__(self):
nonlocal iter_called
iter_called += 1
yield from [
DatasetItem(1),
DatasetItem(2),
DatasetItem(3),
DatasetItem(4),
]
dataset = Dataset.from_extractors(TestExtractor())
class TestTransform(ItemTransform):
def transform_item(self, item):
return self.wrap_item(item, id=int(item.id) + 1)
dataset.transform(TestTransform)
dataset.transform(TestTransform)
self.assertEqual(iter_called, 0)
self.assertEqual(4, len(dataset))
self.assertEqual(iter_called, 1)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_get_len_after_nonlocal_transforms(self):
iter_called = 0
class TestExtractor(Extractor):
def __iter__(self):
nonlocal iter_called
iter_called += 1
yield from [
DatasetItem(1),
DatasetItem(2),
DatasetItem(3),
DatasetItem(4),
]
dataset = Dataset.from_extractors(TestExtractor())
class TestTransform(Transform):
def __iter__(self):
for item in self._extractor:
yield self.wrap_item(item, id=int(item.id) + 1)
dataset.transform(TestTransform)
dataset.transform(TestTransform)
self.assertEqual(iter_called, 0)
self.assertEqual(4, len(dataset))
self.assertEqual(iter_called, 2)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_get_subsets_after_local_transforms(self):
iter_called = 0
class TestExtractor(Extractor):
def __iter__(self):
nonlocal iter_called
iter_called += 1
yield from [
DatasetItem(1),
DatasetItem(2),
DatasetItem(3),
DatasetItem(4),
]
dataset = Dataset.from_extractors(TestExtractor())
class TestTransform(ItemTransform):
def transform_item(self, item):
return self.wrap_item(item, id=int(item.id) + 1, subset='a')
dataset.transform(TestTransform)
dataset.transform(TestTransform)
self.assertEqual(iter_called, 0)
self.assertEqual({'a'}, set(dataset.subsets()))
self.assertEqual(iter_called, 1)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_get_subsets_after_nonlocal_transforms(self):
iter_called = 0
class TestExtractor(Extractor):
def __iter__(self):
nonlocal iter_called
iter_called += 1
yield from [
DatasetItem(1),
DatasetItem(2),
DatasetItem(3),
DatasetItem(4),
]
dataset = Dataset.from_extractors(TestExtractor())
class TestTransform(Transform):
def __iter__(self):
for item in self._extractor:
yield self.wrap_item(item, id=int(item.id) + 1, subset='a')
dataset.transform(TestTransform)
dataset.transform(TestTransform)
self.assertEqual(iter_called, 0)
self.assertEqual({'a'}, set(dataset.subsets()))
self.assertEqual(iter_called, 2)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_raises_when_repeated_items_in_source(self):
dataset = Dataset.from_iterable([DatasetItem(0), DatasetItem(0)])
with self.assertRaises(RepeatedItemError):
dataset.init_cache()
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_check_item_existence(self):
dataset = Dataset.from_iterable([
DatasetItem(0, subset='a'), DatasetItem(1)
])
self.assertTrue(DatasetItem(0, subset='a') in dataset)
self.assertFalse(DatasetItem(0, subset='b') in dataset)
self.assertTrue((0, 'a') in dataset)
self.assertFalse((0, 'b') in dataset)
self.assertTrue(1 in dataset)
self.assertFalse(0 in dataset)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_put_with_id_override(self):
dataset = Dataset.from_iterable([])
dataset.put(DatasetItem(0, subset='a'), id=2, subset='b')
self.assertTrue((2, 'b') in dataset)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_compute_cache_with_empty_source(self):
dataset = Dataset.from_iterable([])
dataset.put(DatasetItem(2))
dataset.init_cache()
self.assertTrue(2 in dataset)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_cant_do_partial_caching_in_get_when_default(self):
iter_called = 0
class TestExtractor(Extractor):
def __iter__(self):
nonlocal iter_called
iter_called += 1
return iter([
DatasetItem(1),
DatasetItem(2),
DatasetItem(3),
DatasetItem(4),
])
dataset = Dataset.from_extractors(TestExtractor())
dataset.get(3)
dataset.get(4)
self.assertEqual(1, iter_called)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_do_partial_caching_in_get_when_redefined(self):
iter_called = 0
get_called = 0
class TestExtractor(Extractor):
def __iter__(self):
nonlocal iter_called
iter_called += 1
return iter([
DatasetItem(1),
DatasetItem(2),
DatasetItem(3),
DatasetItem(4),
])
def get(self, id, subset=None):
nonlocal get_called
get_called += 1
return DatasetItem(id, subset=subset)
dataset = Dataset.from_extractors(TestExtractor())
dataset.get(3)
dataset.get(4)
self.assertEqual(0, iter_called)
self.assertEqual(2, get_called)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_binds_on_save(self):
dataset = Dataset.from_iterable([DatasetItem(1)])
self.assertFalse(dataset.is_bound)
with TestDir() as test_dir:
dataset.save(test_dir)
self.assertTrue(dataset.is_bound)
self.assertEqual(dataset.data_path, test_dir)
self.assertEqual(dataset.format, DEFAULT_FORMAT)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_flushes_changes_on_save(self):
dataset = Dataset.from_iterable([])
dataset.put(DatasetItem(1))
self.assertTrue(dataset.is_modified)
with TestDir() as test_dir:
dataset.save(test_dir)
self.assertFalse(dataset.is_modified)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_does_not_load_images_on_saving(self):
# Issue https://github.com/openvinotoolkit/datumaro/issues/177
# Missing image metadata (size etc.) can lead to image loading on
# dataset save without image saving
called = False
def test_loader():
nonlocal called
called = True
dataset = Dataset.from_iterable([
DatasetItem(1, image=test_loader)
])
with TestDir() as test_dir:
dataset.save(test_dir)
self.assertFalse(called)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_transform_labels(self):
expected = Dataset.from_iterable([], categories=['c', 'b'])
dataset = Dataset.from_iterable([], categories=['a', 'b'])
actual = dataset.transform('remap_labels', {'a': 'c'})
compare_datasets(self, expected, actual)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_run_model(self):
dataset = Dataset.from_iterable([
DatasetItem(i, image=np.array([i]))
for i in range(5)
], categories=['label'])
batch_size = 3
expected = Dataset.from_iterable([
DatasetItem(i, image=np.array([i]), annotations=[
Label(0, attributes={ 'idx': i % batch_size, 'data': i })
])
for i in range(5)
], categories=['label'])
calls = 0
class TestLauncher(Launcher):
def launch(self, inputs):
nonlocal calls
calls += 1
for i, inp in enumerate(inputs):
yield [ Label(0, attributes={'idx': i, 'data': inp.item()}) ]
model = TestLauncher()
actual = dataset.run_model(model, batch_size=batch_size)
compare_datasets(self, expected, actual, require_images=True)
self.assertEqual(2, calls)
@mark_requirement(Requirements.DATUM_BUG_259)
def test_can_filter_items(self):
dataset = Dataset.from_iterable([
DatasetItem(id=0, subset='train'),
DatasetItem(id=1, subset='test'),
])
dataset.filter('/item[id > 0]')
self.assertEqual(1, len(dataset))
@mark_requirement(Requirements.DATUM_BUG_257)
def test_filter_registers_changes(self):
dataset = Dataset.from_iterable([
DatasetItem(id=0, subset='train'),
DatasetItem(id=1, subset='test'),
])
dataset.filter('/item[id > 0]')
self.assertEqual({
('0', 'train'): ItemStatus.removed,
('1', 'test'): ItemStatus.modified, # TODO: remove this line
}, dataset.get_patch().updated_items)
@mark_requirement(Requirements.DATUM_BUG_259)
def test_can_filter_annotations(self):
dataset = Dataset.from_iterable([
DatasetItem(id=0, subset='train', annotations=[Label(0), Label(1)]),
DatasetItem(id=1, subset='val', annotations=[Label(2)]),
DatasetItem(id=2, subset='test', annotations=[Label(0), Label(2)]),
], categories=['a', 'b', 'c'])
dataset.filter('/item/annotation[label = "c"]',
filter_annotations=True, remove_empty=True)
self.assertEqual(2, len(dataset))
@mark_requirement(Requirements.DATUM_BUG_259)
def test_can_filter_items_in_merged_dataset(self):
dataset = Dataset.from_extractors(
Dataset.from_iterable([ DatasetItem(id=0, subset='train') ]),
Dataset.from_iterable([ DatasetItem(id=1, subset='test') ]),
)
dataset.filter('/item[id > 0]')
self.assertEqual(1, len(dataset))
@mark_requirement(Requirements.DATUM_BUG_259)
def test_can_filter_annotations_in_merged_dataset(self):
dataset = Dataset.from_extractors(
Dataset.from_iterable([
DatasetItem(id=0, subset='train', annotations=[Label(0)]),
], categories=['a', 'b', 'c']),
Dataset.from_iterable([
DatasetItem(id=1, subset='val', annotations=[Label(1)]),
], categories=['a', 'b', 'c']),
Dataset.from_iterable([
DatasetItem(id=2, subset='test', annotations=[Label(2)]),
], categories=['a', 'b', 'c']),
)
dataset.filter('/item/annotation[label = "c"]',
filter_annotations=True, remove_empty=True)
self.assertEqual(1, len(dataset))
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_inplace_save_writes_only_updated_data(self):
class CustomConverter(Converter):
DEFAULT_IMAGE_EXT = '.jpg'
def apply(self):
assert osp.isdir(self._save_dir)
for item in self._extractor:
name = f'{item.subset}_{item.id}'
with open(osp.join(
self._save_dir, name + '.txt'), 'w') as f:
f.write('\n')
if self._save_images and \
item.has_image and item.image.has_data:
self._save_image(item, name=name)
env = Environment()
env.converters.items = { 'test': CustomConverter }
with TestDir() as path:
dataset = Dataset.from_iterable([
DatasetItem(1, subset='train', image=np.ones((2, 4, 3))),
DatasetItem(2, subset='train',
image=Image(path='2.jpg', size=(3, 2))),
DatasetItem(3, subset='valid', image=np.ones((2, 2, 3))),
], categories=[], env=env)
dataset.export(path, 'test', save_images=True)
dataset.put(DatasetItem(2, subset='train', image=np.ones((3, 2, 3))))
dataset.remove(3, 'valid')
dataset.save(save_images=True)
self.assertEqual({
'train_1.txt', 'train_1.jpg',
'train_2.txt', 'train_2.jpg'
},
set(os.listdir(path)))
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_update_overwrites_matching_items(self):
patch = Dataset.from_iterable([
DatasetItem(id=1, annotations=[ Bbox(1, 2, 3, 4, label=1) ])
], categories=['a', 'b'])
dataset = Dataset.from_iterable([
DatasetItem(id=1, annotations=[ Bbox(2, 2, 1, 1, label=0) ]),
DatasetItem(id=2, annotations=[ Bbox(1, 1, 1, 1, label=1) ]),
], categories=['a', 'b'])
expected = Dataset.from_iterable([
DatasetItem(id=1, annotations=[ Bbox(1, 2, 3, 4, label=1) ]),
DatasetItem(id=2, annotations=[ Bbox(1, 1, 1, 1, label=1) ]),
], categories=['a', 'b'])
dataset.update(patch)
compare_datasets(self, expected, dataset)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_update_can_reorder_labels(self):
patch = Dataset.from_iterable([
DatasetItem(id=1, annotations=[ Bbox(1, 2, 3, 4, label=1) ])
], categories=['b', 'a'])
dataset = Dataset.from_iterable([
DatasetItem(id=1, annotations=[ Bbox(2, 2, 1, 1, label=0) ])
], categories=['a', 'b'])
# Note that label id and categories are changed
expected = Dataset.from_iterable([
DatasetItem(id=1, annotations=[ Bbox(1, 2, 3, 4, label=0) ])
], categories=['a', 'b'])
dataset.update(patch)
compare_datasets(self, expected, dataset)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_update_can_project_labels(self):
dataset = Dataset.from_iterable([
# Must be overridden
DatasetItem(id=100, annotations=[
Bbox(1, 2, 3, 3, label=0),
]),
# Must be kept
DatasetItem(id=1, annotations=[
Bbox(1, 2, 3, 4, label=1)
]),
], categories=['a', 'b'])
patch = Dataset.from_iterable([
# Must override
DatasetItem(id=100, annotations=[
Bbox(1, 2, 3, 4, label=0), # Label must be remapped
Bbox(5, 6, 2, 3, label=1), # Label must be remapped
Bbox(2, 2, 2, 3, label=2), # Will be dropped due to label
]),
# Must be added
DatasetItem(id=2, annotations=[
Bbox(1, 2, 3, 2, label=1) # Label must be remapped
]),
], categories=['b', 'a', 'c'])
expected = Dataset.from_iterable([
DatasetItem(id=100, annotations=[
Bbox(1, 2, 3, 4, label=1),
Bbox(5, 6, 2, 3, label=0),
]),
DatasetItem(id=1, annotations=[
Bbox(1, 2, 3, 4, label=1)
]),
DatasetItem(id=2, annotations=[
Bbox(1, 2, 3, 2, label=0)
]),
], categories=['a', 'b'])
dataset.update(patch)
compare_datasets(self, expected, dataset, ignored_attrs='*')
class DatasetItemTest(TestCase):
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_ctor_requires_id(self):
with self.assertRaises(Exception):
# pylint: disable=no-value-for-parameter
DatasetItem()
# pylint: enable=no-value-for-parameter
@staticmethod
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_ctors_with_image():
for args in [
{ 'id': 0, 'image': None },
{ 'id': 0, 'image': 'path.jpg' },
{ 'id': 0, 'image': np.array([1, 2, 3]) },
{ 'id': 0, 'image': lambda f: np.array([1, 2, 3]) },
{ 'id': 0, 'image': Image(data=np.array([1, 2, 3])) },
]:
DatasetItem(**args)
class DatasetFilterTest(TestCase):
@staticmethod
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_item_representations():
item = DatasetItem(id=1, subset='subset',
image=np.ones((5, 4, 3)),
annotations=[
Label(0, attributes={'a1': 1, 'a2': '2'}, id=1, group=2),
Caption('hello', id=1),
Caption('world', group=5),
Label(2, id=3, attributes={ 'x': 1, 'y': '2' }),
Bbox(1, 2, 3, 4, label=4, id=4, attributes={ 'a': 1.0 }),
Bbox(5, 6, 7, 8, id=5, group=5),
Points([1, 2, 2, 0, 1, 1], label=0, id=5),
Mask(id=5, image=np.ones((3, 2))),
Mask(label=3, id=5, image=np.ones((2, 3))),
PolyLine([1, 2, 3, 4, 5, 6, 7, 8], id=11),
Polygon([1, 2, 3, 4, 5, 6, 7, 8]),
]
)
encoded = DatasetItemEncoder.encode(item)
DatasetItemEncoder.to_string(encoded)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_item_filter_can_be_applied(self):
class TestExtractor(Extractor):
def __iter__(self):
for i in range(4):
yield DatasetItem(id=i, subset='train')
extractor = TestExtractor()
filtered = XPathDatasetFilter(extractor, '/item[id > 1]')
self.assertEqual(2, len(filtered))
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_annotations_filter_can_be_applied(self):
class SrcExtractor(Extractor):
def __iter__(self):
return iter([
DatasetItem(id=0),
DatasetItem(id=1, annotations=[
Label(0),
Label(1),
]),
DatasetItem(id=2, annotations=[
Label(0),
Label(2),
]),
])
class DstExtractor(Extractor):
def __iter__(self):
return iter([
DatasetItem(id=0),
DatasetItem(id=1, annotations=[
Label(0),
]),
DatasetItem(id=2, annotations=[
Label(0),
]),
])
extractor = SrcExtractor()
filtered = XPathAnnotationsFilter(extractor,
'/item/annotation[label_id = 0]')
self.assertListEqual(list(filtered), list(DstExtractor()))
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_annotations_filter_can_remove_empty_items(self):
source = Dataset.from_iterable([
DatasetItem(id=0),
DatasetItem(id=1, annotations=[
Label(0),
Label(1),
]),
DatasetItem(id=2, annotations=[
Label(0),
Label(2),
]),
], categories=['a', 'b', 'c'])
expected = Dataset.from_iterable([
DatasetItem(id=2, annotations=[Label(2)]),
], categories=['a', 'b', 'c'])
filtered = XPathAnnotationsFilter(source,
'/item/annotation[label_id = 2]', remove_empty=True)
compare_datasets(self, expected, filtered)
| 33.371168 | 81 | 0.572403 | from unittest import TestCase
import os
import os.path as osp
import numpy as np
from datumaro.components.annotation import (
AnnotationType, Bbox, Caption, Label, LabelCategories, Mask, Points,
Polygon, PolyLine,
)
from datumaro.components.converter import Converter
from datumaro.components.dataset import (
DEFAULT_FORMAT, Dataset, ItemStatus, eager_mode,
)
from datumaro.components.dataset_filter import (
DatasetItemEncoder, XPathAnnotationsFilter, XPathDatasetFilter,
)
from datumaro.components.environment import Environment
from datumaro.components.errors import (
ConflictingCategoriesError, DatasetNotFoundError, MultipleFormatsMatchError,
NoMatchingFormatsError, RepeatedItemError, UnknownFormatError,
)
from datumaro.components.extractor import (
DEFAULT_SUBSET_NAME, DatasetItem, Extractor, ItemTransform, Transform,
)
from datumaro.components.launcher import Launcher
from datumaro.components.media import Image
from datumaro.util.test_utils import TestDir, compare_datasets
from .requirements import Requirements, mark_requirement
class DatasetTest(TestCase):
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_create_from_extractors(self):
class SrcExtractor1(Extractor):
def __iter__(self):
return iter([
DatasetItem(id=1, subset='train', annotations=[
Bbox(1, 2, 3, 4),
Label(4),
]),
DatasetItem(id=1, subset='val', annotations=[
Label(4),
]),
])
class SrcExtractor2(Extractor):
def __iter__(self):
return iter([
DatasetItem(id=1, subset='val', annotations=[
Label(5),
]),
])
class DstExtractor(Extractor):
def __iter__(self):
return iter([
DatasetItem(id=1, subset='train', annotations=[
Bbox(1, 2, 3, 4),
Label(4),
]),
DatasetItem(id=1, subset='val', annotations=[
Label(4),
Label(5),
]),
])
dataset = Dataset.from_extractors(SrcExtractor1(), SrcExtractor2())
compare_datasets(self, DstExtractor(), dataset)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_create_from_iterable(self):
class TestExtractor(Extractor):
def __iter__(self):
return iter([
DatasetItem(id=1, subset='train', annotations=[
Bbox(1, 2, 3, 4, label=2),
Label(4),
]),
DatasetItem(id=1, subset='val', annotations=[
Label(3),
]),
])
def categories(self):
return { AnnotationType.label: LabelCategories.from_iterable(
['a', 'b', 'c', 'd', 'e'])
}
actual = Dataset.from_iterable([
DatasetItem(id=1, subset='train', annotations=[
Bbox(1, 2, 3, 4, label=2),
Label(4),
]),
DatasetItem(id=1, subset='val', annotations=[
Label(3),
]),
], categories=['a', 'b', 'c', 'd', 'e'])
compare_datasets(self, TestExtractor(), actual)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_join_datasets_with_empty_categories(self):
expected = Dataset.from_iterable([
DatasetItem(1, annotations=[
Label(0),
Bbox(1, 2, 3, 4),
Caption('hello world'),
])
], categories=['a'])
src1 = Dataset.from_iterable([
DatasetItem(1, annotations=[ Bbox(1, 2, 3, 4, label=None) ])
], categories=[])
src2 = Dataset.from_iterable([
DatasetItem(1, annotations=[ Label(0) ])
], categories=['a'])
src3 = Dataset.from_iterable([
DatasetItem(1, annotations=[ Caption('hello world') ])
])
actual = Dataset.from_extractors(src1, src2, src3)
compare_datasets(self, expected, actual)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_save_and_load(self):
source_dataset = Dataset.from_iterable([
DatasetItem(id=1, annotations=[ Label(2) ]),
], categories=['a', 'b', 'c'])
with TestDir() as test_dir:
source_dataset.save(test_dir)
loaded_dataset = Dataset.load(test_dir)
compare_datasets(self, source_dataset, loaded_dataset)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_detect(self):
env = Environment()
env.importers.items = {DEFAULT_FORMAT: env.importers[DEFAULT_FORMAT]}
env.extractors.items = {DEFAULT_FORMAT: env.extractors[DEFAULT_FORMAT]}
dataset = Dataset.from_iterable([
DatasetItem(id=1, annotations=[ Label(2) ]),
], categories=['a', 'b', 'c'])
with TestDir() as test_dir:
dataset.save(test_dir)
detected_format = Dataset.detect(test_dir, env=env)
self.assertEqual(DEFAULT_FORMAT, detected_format)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_detect_and_import(self):
env = Environment()
env.importers.items = {DEFAULT_FORMAT: env.importers[DEFAULT_FORMAT]}
env.extractors.items = {DEFAULT_FORMAT: env.extractors[DEFAULT_FORMAT]}
source_dataset = Dataset.from_iterable([
DatasetItem(id=1, annotations=[ Label(2) ]),
], categories=['a', 'b', 'c'])
with TestDir() as test_dir:
source_dataset.save(test_dir)
imported_dataset = Dataset.import_from(test_dir, env=env)
self.assertEqual(imported_dataset.data_path, test_dir)
self.assertEqual(imported_dataset.format, DEFAULT_FORMAT)
compare_datasets(self, source_dataset, imported_dataset)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_report_no_dataset_found(self):
env = Environment()
env.importers.items = {
DEFAULT_FORMAT: env.importers[DEFAULT_FORMAT],
}
env.extractors.items = {
DEFAULT_FORMAT: env.extractors[DEFAULT_FORMAT],
}
with TestDir() as test_dir, self.assertRaises(DatasetNotFoundError):
Dataset.import_from(test_dir, DEFAULT_FORMAT, env=env)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_report_multiple_formats_match(self):
env = Environment()
env.importers.items = {
'a': env.importers[DEFAULT_FORMAT],
'b': env.importers[DEFAULT_FORMAT],
}
env.extractors.items = {
'a': env.extractors[DEFAULT_FORMAT],
'b': env.extractors[DEFAULT_FORMAT],
}
source_dataset = Dataset.from_iterable([
DatasetItem(id=1, annotations=[ Label(2) ]),
], categories=['a', 'b', 'c'])
with TestDir() as test_dir:
source_dataset.save(test_dir)
with self.assertRaises(MultipleFormatsMatchError):
Dataset.import_from(test_dir, env=env)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_report_no_matching_formats(self):
env = Environment()
env.importers.items = {}
env.extractors.items = {}
source_dataset = Dataset.from_iterable([
DatasetItem(id=1, annotations=[ Label(2) ]),
], categories=['a', 'b', 'c'])
with TestDir() as test_dir:
source_dataset.save(test_dir)
with self.assertRaises(NoMatchingFormatsError):
Dataset.import_from(test_dir, env=env)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_report_unknown_format_requested(self):
env = Environment()
env.importers.items = {}
env.extractors.items = {}
source_dataset = Dataset.from_iterable([
DatasetItem(id=1, annotations=[ Label(2) ]),
], categories=['a', 'b', 'c'])
with TestDir() as test_dir:
source_dataset.save(test_dir)
with self.assertRaises(UnknownFormatError):
Dataset.import_from(test_dir, format='custom', env=env)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_export_by_string_format_name(self):
env = Environment()
env.converters.items = {'qq': env.converters[DEFAULT_FORMAT]}
dataset = Dataset.from_iterable([
DatasetItem(id=1, annotations=[ Label(2) ]),
], categories=['a', 'b', 'c'], env=env)
with TestDir() as test_dir:
dataset.export(format='qq', save_dir=test_dir)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_remember_export_options(self):
dataset = Dataset.from_iterable([
DatasetItem(id=1, image=np.ones((1, 2, 3))),
], categories=['a'])
with TestDir() as test_dir:
dataset.save(test_dir, save_images=True)
dataset.put(dataset.get(1))
image_path = osp.join(test_dir, 'images', 'default', '1.jpg')
os.remove(image_path)
dataset.save(test_dir)
self.assertEqual({'save_images': True}, dataset.options)
self.assertTrue(osp.isfile(image_path))
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_compute_length_when_created_from_scratch(self):
dataset = Dataset()
dataset.put(DatasetItem(1))
dataset.put(DatasetItem(2))
dataset.put(DatasetItem(3))
dataset.remove(1)
self.assertEqual(2, len(dataset))
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_compute_length_when_created_from_extractor(self):
class TestExtractor(Extractor):
def __iter__(self):
yield from [
DatasetItem(1),
DatasetItem(2),
DatasetItem(3),
]
dataset = Dataset.from_extractors(TestExtractor())
self.assertEqual(3, len(dataset))
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_compute_length_when_created_from_sequence(self):
dataset = Dataset.from_iterable([
DatasetItem(1),
DatasetItem(2),
DatasetItem(3),
])
self.assertEqual(3, len(dataset))
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_transform_by_string_name(self):
expected = Dataset.from_iterable([
DatasetItem(id=1, attributes={'qq': 1}),
])
class TestTransform(ItemTransform):
def transform_item(self, item):
return self.wrap_item(item, attributes={'qq': 1})
env = Environment()
env.transforms.register('qq', TestTransform)
dataset = Dataset.from_iterable([ DatasetItem(id=1) ], env=env)
actual = dataset.transform('qq')
compare_datasets(self, expected, actual)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_transform(self):
expected = Dataset.from_iterable([
DatasetItem(id=1, attributes={'qq': 1}),
])
class TestTransform(ItemTransform):
def transform_item(self, item):
return self.wrap_item(item, attributes={'qq': 1})
dataset = Dataset.from_iterable([ DatasetItem(id=1) ])
actual = dataset.transform(TestTransform)
compare_datasets(self, expected, actual)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_join_annotations(self):
a = Dataset.from_iterable([
DatasetItem(id=1, subset='train', annotations=[
Label(1, id=3),
Label(2, attributes={ 'x': 1 }),
])
], categories=['a', 'b', 'c', 'd'])
b = Dataset.from_iterable([
DatasetItem(id=1, subset='train', annotations=[
Label(2, attributes={ 'x': 1 }),
Label(3, id=4),
])
], categories=['a', 'b', 'c', 'd'])
expected = Dataset.from_iterable([
DatasetItem(id=1, subset='train', annotations=[
Label(1, id=3),
Label(2, attributes={ 'x': 1 }),
Label(3, id=4),
])
], categories=['a', 'b', 'c', 'd'])
merged = Dataset.from_extractors(a, b)
compare_datasets(self, expected, merged)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_cant_join_different_categories(self):
s1 = Dataset.from_iterable([], categories=['a', 'b'])
s2 = Dataset.from_iterable([], categories=['b', 'a'])
with self.assertRaises(ConflictingCategoriesError):
Dataset.from_extractors(s1, s2)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_join_datasets(self):
s1 = Dataset.from_iterable([ DatasetItem(0), DatasetItem(1) ])
s2 = Dataset.from_iterable([ DatasetItem(1), DatasetItem(2) ])
expected = Dataset.from_iterable([
DatasetItem(0), DatasetItem(1), DatasetItem(2)
])
actual = Dataset.from_extractors(s1, s2)
compare_datasets(self, expected, actual)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_track_modifications_on_addition(self):
dataset = Dataset.from_iterable([
DatasetItem(1),
DatasetItem(2),
])
self.assertFalse(dataset.is_modified)
dataset.put(DatasetItem(3, subset='a'))
self.assertTrue(dataset.is_modified)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_track_modifications_on_removal(self):
dataset = Dataset.from_iterable([
DatasetItem(1),
DatasetItem(2),
])
self.assertFalse(dataset.is_modified)
dataset.remove(1)
self.assertTrue(dataset.is_modified)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_create_patch(self):
expected = Dataset.from_iterable([
DatasetItem(2),
DatasetItem(3, subset='a')
])
dataset = Dataset.from_iterable([
DatasetItem(1),
DatasetItem(2),
])
dataset.put(DatasetItem(2))
dataset.put(DatasetItem(3, subset='a'))
dataset.remove(1)
patch = dataset.get_patch()
self.assertEqual({
('1', DEFAULT_SUBSET_NAME): ItemStatus.removed,
('2', DEFAULT_SUBSET_NAME): ItemStatus.added,
('3', 'a'): ItemStatus.added,
}, patch.updated_items)
self.assertEqual({
'default': ItemStatus.modified,
'a': ItemStatus.modified,
}, patch.updated_subsets)
self.assertEqual(2, len(patch.data))
self.assertEqual(None, patch.data.get(1))
self.assertEqual(dataset.get(2), patch.data.get(2))
self.assertEqual(dataset.get(3, 'a'), patch.data.get(3, 'a'))
compare_datasets(self, expected, dataset)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_create_patch_when_cached(self):
expected = Dataset.from_iterable([
DatasetItem(2),
DatasetItem(3, subset='a')
])
dataset = Dataset.from_iterable([
DatasetItem(1),
DatasetItem(2),
])
dataset.init_cache()
dataset.put(DatasetItem(2))
dataset.put(DatasetItem(3, subset='a'))
dataset.remove(1)
patch = dataset.get_patch()
self.assertEqual({
('1', DEFAULT_SUBSET_NAME): ItemStatus.removed,
('2', DEFAULT_SUBSET_NAME): ItemStatus.modified,
('3', 'a'): ItemStatus.added,
}, patch.updated_items)
self.assertEqual({
'default': ItemStatus.modified,
'a': ItemStatus.modified,
}, patch.updated_subsets)
self.assertEqual(2, len(patch.data))
self.assertEqual(None, patch.data.get(1))
self.assertEqual(dataset.get(2), patch.data.get(2))
self.assertEqual(dataset.get(3, 'a'), patch.data.get(3, 'a'))
compare_datasets(self, expected, dataset)
@mark_requirement(Requirements.DATUM_BUG_257)
def test_can_create_patch_when_transforms_mixed(self):
expected = Dataset.from_iterable([
DatasetItem(2),
DatasetItem(3, subset='a')
])
dataset = Dataset.from_iterable([
DatasetItem(1),
DatasetItem(2),
])
class Remove1(Transform):
def __iter__(self):
for item in self._extractor:
if item.id != '1':
yield item
class Add3(Transform):
def __iter__(self):
for item in self._extractor:
if item.id == '2':
yield item
yield DatasetItem(3, subset='a')
dataset.transform(Remove1)
dataset.transform(Add3)
patch = dataset.get_patch()
self.assertEqual({
('1', DEFAULT_SUBSET_NAME): ItemStatus.removed,
('2', DEFAULT_SUBSET_NAME): ItemStatus.modified,
('3', 'a'): ItemStatus.added,
}, patch.updated_items)
self.assertEqual({
'default': ItemStatus.modified,
'a': ItemStatus.modified,
}, patch.updated_subsets)
self.assertEqual(2, len(patch.data))
self.assertEqual(None, patch.data.get(1))
self.assertEqual(dataset.get(2), patch.data.get(2))
self.assertEqual(dataset.get(3, 'a'), patch.data.get(3, 'a'))
compare_datasets(self, expected, dataset)
@mark_requirement(Requirements.DATUM_BUG_257)
def test_can_create_patch_when_transforms_chained(self):
expected = Dataset.from_iterable([
DatasetItem(2),
DatasetItem(3, subset='a')
])
class TestExtractor(Extractor):
iter_called = 0
def __iter__(self):
yield from [
DatasetItem(1),
DatasetItem(2),
]
__class__.iter_called += 1
class Remove1(Transform):
iter_called = 0
def __iter__(self):
for item in self._extractor:
if item.id != '1':
yield item
__class__.iter_called += 1
class Add3(Transform):
iter_called = 0
def __iter__(self):
yield from self._extractor
yield DatasetItem(3, subset='a')
__class__.iter_called += 1
dataset = Dataset.from_extractors(TestExtractor())
dataset.transform(Remove1)
dataset.transform(Add3)
patch = dataset.get_patch()
self.assertEqual({
('1', DEFAULT_SUBSET_NAME): ItemStatus.removed,
('2', DEFAULT_SUBSET_NAME): ItemStatus.modified,
('3', 'a'): ItemStatus.added,
}, patch.updated_items)
self.assertEqual({
'default': ItemStatus.modified,
'a': ItemStatus.modified,
}, patch.updated_subsets)
self.assertEqual(2, len(patch.data))
self.assertEqual(None, patch.data.get(1))
self.assertEqual(dataset.get(2), patch.data.get(2))
self.assertEqual(dataset.get(3, 'a'), patch.data.get(3, 'a'))
self.assertEqual(TestExtractor.iter_called, 2)
self.assertEqual(Remove1.iter_called, 1)
self.assertEqual(Add3.iter_called, 1)
compare_datasets(self, expected, dataset)
@mark_requirement(Requirements.DATUM_BUG_257)
def test_can_create_patch_when_transforms_intermixed_with_direct_ops(self):
expected = Dataset.from_iterable([
DatasetItem(3, subset='a'),
DatasetItem(4),
DatasetItem(5),
])
class TestExtractor(Extractor):
iter_called = 0
def __iter__(self):
yield from [
DatasetItem(1),
DatasetItem(2),
]
__class__.iter_called += 1
class Remove1(Transform):
iter_called = 0
def __iter__(self):
for item in self._extractor:
if item.id != '1':
yield item
__class__.iter_called += 1
class Add3(Transform):
iter_called = 0
def __iter__(self):
yield from self._extractor
yield DatasetItem(3, subset='a')
__class__.iter_called += 1
dataset = Dataset.from_extractors(TestExtractor())
dataset.init_cache()
dataset.put(DatasetItem(4))
dataset.transform(Remove1)
dataset.put(DatasetItem(5))
dataset.remove(2)
dataset.transform(Add3)
patch = dataset.get_patch()
self.assertEqual({
('1', DEFAULT_SUBSET_NAME): ItemStatus.removed,
('2', DEFAULT_SUBSET_NAME): ItemStatus.removed,
('3', 'a'): ItemStatus.added,
('4', DEFAULT_SUBSET_NAME): ItemStatus.added,
('5', DEFAULT_SUBSET_NAME): ItemStatus.added,
}, patch.updated_items)
self.assertEqual({
'default': ItemStatus.modified,
'a': ItemStatus.modified,
}, patch.updated_subsets)
self.assertEqual(3, len(patch.data))
self.assertEqual(None, patch.data.get(1))
self.assertEqual(None, patch.data.get(2))
self.assertEqual(dataset.get(3, 'a'), patch.data.get(3, 'a'))
self.assertEqual(dataset.get(4), patch.data.get(4))
self.assertEqual(dataset.get(5), patch.data.get(5))
self.assertEqual(TestExtractor.iter_called, 1)
self.assertEqual(Remove1.iter_called, 1)
self.assertEqual(Add3.iter_called, 1)
compare_datasets(self, expected, dataset)
@mark_requirement(Requirements.DATUM_BUG_257)
def test_can_create_patch_when_local_transforms_stacked(self):
expected = Dataset.from_iterable([
DatasetItem(4),
DatasetItem(5),
])
class TestExtractor(Extractor):
iter_called = 0
def __iter__(self):
yield from [
DatasetItem(1),
DatasetItem(2),
]
__class__.iter_called += 1
class ShiftIds(ItemTransform):
def transform_item(self, item):
return item.wrap(id=int(item.id) + 1)
dataset = Dataset.from_extractors(TestExtractor())
dataset.remove(2)
dataset.transform(ShiftIds)
dataset.transform(ShiftIds)
dataset.transform(ShiftIds)
dataset.put(DatasetItem(5))
patch = dataset.get_patch()
self.assertEqual({
('1', DEFAULT_SUBSET_NAME): ItemStatus.removed,
('2', DEFAULT_SUBSET_NAME): ItemStatus.removed,
('4', DEFAULT_SUBSET_NAME): ItemStatus.added,
('5', DEFAULT_SUBSET_NAME): ItemStatus.added,
}, patch.updated_items)
self.assertEqual({
'default': ItemStatus.modified,
}, patch.updated_subsets)
self.assertEqual(2, len(patch.data))
self.assertEqual(None, patch.data.get(1))
self.assertEqual(None, patch.data.get(2))
self.assertEqual(None, patch.data.get(3))
self.assertEqual(dataset.get(4), patch.data.get(4))
self.assertEqual(dataset.get(5), patch.data.get(5))
self.assertEqual(TestExtractor.iter_called, 1)
compare_datasets(self, expected, dataset)
@mark_requirement(Requirements.DATUM_BUG_257)
def test_can_create_patch_when_transforms_chained_and_source_cached(self):
expected = Dataset.from_iterable([
DatasetItem(2),
DatasetItem(3, subset='a')
])
class TestExtractor(Extractor):
iter_called = 0
def __iter__(self):
yield from [
DatasetItem(1),
DatasetItem(2),
]
__class__.iter_called += 1
class Remove1(Transform):
iter_called = 0
def __iter__(self):
for item in self._extractor:
if item.id != '1':
yield item
__class__.iter_called += 1
class Add3(Transform):
iter_called = 0
def __iter__(self):
yield from self._extractor
yield DatasetItem(3, subset='a')
__class__.iter_called += 1
dataset = Dataset.from_extractors(TestExtractor())
dataset.init_cache()
dataset.transform(Remove1)
dataset.transform(Add3)
patch = dataset.get_patch()
self.assertEqual({
('1', DEFAULT_SUBSET_NAME): ItemStatus.removed,
('2', DEFAULT_SUBSET_NAME): ItemStatus.modified,
('3', 'a'): ItemStatus.added,
}, patch.updated_items)
self.assertEqual({
'default': ItemStatus.modified,
'a': ItemStatus.modified,
}, patch.updated_subsets)
self.assertEqual(2, len(patch.data))
self.assertEqual(None, patch.data.get(1))
self.assertEqual(dataset.get(2), patch.data.get(2))
self.assertEqual(dataset.get(3, 'a'), patch.data.get(3, 'a'))
self.assertEqual(TestExtractor.iter_called, 1)
self.assertEqual(Remove1.iter_called, 1)
self.assertEqual(Add3.iter_called, 1)
compare_datasets(self, expected, dataset)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_do_lazy_put_and_remove(self):
iter_called = False
class TestExtractor(Extractor):
def __iter__(self):
nonlocal iter_called
iter_called = True
return iter([
DatasetItem(1),
DatasetItem(2),
])
dataset = Dataset.from_extractors(TestExtractor())
self.assertFalse(dataset.is_cache_initialized)
dataset.put(DatasetItem(3))
dataset.remove(DatasetItem(1))
self.assertFalse(dataset.is_cache_initialized)
self.assertFalse(iter_called)
dataset.init_cache()
self.assertTrue(dataset.is_cache_initialized)
self.assertTrue(iter_called)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_put(self):
dataset = Dataset()
dataset.put(DatasetItem(1))
self.assertTrue((1, '') in dataset)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_do_lazy_get_on_updated_item(self):
iter_called = False
class TestExtractor(Extractor):
def __iter__(self):
nonlocal iter_called
iter_called = True
return iter([
DatasetItem(1),
DatasetItem(2),
])
dataset = Dataset.from_extractors(TestExtractor())
dataset.put(DatasetItem(2))
self.assertTrue((2, '') in dataset)
self.assertFalse(iter_called)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_switch_eager_and_lazy_with_cm_global(self):
iter_called = False
class TestExtractor(Extractor):
def __iter__(self):
nonlocal iter_called
iter_called = True
return iter([
DatasetItem(1),
DatasetItem(2),
])
with eager_mode():
Dataset.from_extractors(TestExtractor())
self.assertTrue(iter_called)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_switch_eager_and_lazy_with_cm_local(self):
iter_called = False
class TestExtractor(Extractor):
def __iter__(self):
nonlocal iter_called
iter_called = True
yield from [
DatasetItem(1),
DatasetItem(2),
DatasetItem(3),
DatasetItem(4),
]
dataset = Dataset.from_extractors(TestExtractor())
with eager_mode(dataset=dataset):
dataset.select(lambda item: int(item.id) < 3)
dataset.select(lambda item: int(item.id) < 2)
self.assertTrue(iter_called)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_do_lazy_select(self):
iter_called = 0
class TestExtractor(Extractor):
def __iter__(self):
nonlocal iter_called
iter_called += 1
yield from [
DatasetItem(1),
DatasetItem(2),
DatasetItem(3),
DatasetItem(4),
]
dataset = Dataset.from_extractors(TestExtractor())
dataset.select(lambda item: int(item.id) < 3)
dataset.select(lambda item: int(item.id) < 2)
self.assertEqual(iter_called, 0)
self.assertEqual(1, len(dataset))
self.assertEqual(iter_called, 1)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_chain_lazy_transforms(self):
iter_called = 0
class TestExtractor(Extractor):
def __iter__(self):
nonlocal iter_called
iter_called += 1
yield from [
DatasetItem(1),
DatasetItem(2),
DatasetItem(3),
DatasetItem(4),
]
dataset = Dataset.from_extractors(TestExtractor())
class TestTransform(ItemTransform):
def transform_item(self, item):
return self.wrap_item(item, id=int(item.id) + 1)
dataset.transform(TestTransform)
dataset.transform(TestTransform)
self.assertEqual(iter_called, 0)
self.assertEqual(4, len(dataset))
self.assertEqual(3, int(min(int(item.id) for item in dataset)))
self.assertEqual(iter_called, 1)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_get_len_after_local_transforms(self):
iter_called = 0
class TestExtractor(Extractor):
def __iter__(self):
nonlocal iter_called
iter_called += 1
yield from [
DatasetItem(1),
DatasetItem(2),
DatasetItem(3),
DatasetItem(4),
]
dataset = Dataset.from_extractors(TestExtractor())
class TestTransform(ItemTransform):
def transform_item(self, item):
return self.wrap_item(item, id=int(item.id) + 1)
dataset.transform(TestTransform)
dataset.transform(TestTransform)
self.assertEqual(iter_called, 0)
self.assertEqual(4, len(dataset))
self.assertEqual(iter_called, 1)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_get_len_after_nonlocal_transforms(self):
iter_called = 0
class TestExtractor(Extractor):
def __iter__(self):
nonlocal iter_called
iter_called += 1
yield from [
DatasetItem(1),
DatasetItem(2),
DatasetItem(3),
DatasetItem(4),
]
dataset = Dataset.from_extractors(TestExtractor())
class TestTransform(Transform):
def __iter__(self):
for item in self._extractor:
yield self.wrap_item(item, id=int(item.id) + 1)
dataset.transform(TestTransform)
dataset.transform(TestTransform)
self.assertEqual(iter_called, 0)
self.assertEqual(4, len(dataset))
self.assertEqual(iter_called, 2)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_get_subsets_after_local_transforms(self):
iter_called = 0
class TestExtractor(Extractor):
def __iter__(self):
nonlocal iter_called
iter_called += 1
yield from [
DatasetItem(1),
DatasetItem(2),
DatasetItem(3),
DatasetItem(4),
]
dataset = Dataset.from_extractors(TestExtractor())
class TestTransform(ItemTransform):
def transform_item(self, item):
return self.wrap_item(item, id=int(item.id) + 1, subset='a')
dataset.transform(TestTransform)
dataset.transform(TestTransform)
self.assertEqual(iter_called, 0)
self.assertEqual({'a'}, set(dataset.subsets()))
self.assertEqual(iter_called, 1)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_get_subsets_after_nonlocal_transforms(self):
iter_called = 0
class TestExtractor(Extractor):
def __iter__(self):
nonlocal iter_called
iter_called += 1
yield from [
DatasetItem(1),
DatasetItem(2),
DatasetItem(3),
DatasetItem(4),
]
dataset = Dataset.from_extractors(TestExtractor())
class TestTransform(Transform):
def __iter__(self):
for item in self._extractor:
yield self.wrap_item(item, id=int(item.id) + 1, subset='a')
dataset.transform(TestTransform)
dataset.transform(TestTransform)
self.assertEqual(iter_called, 0)
self.assertEqual({'a'}, set(dataset.subsets()))
self.assertEqual(iter_called, 2)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_raises_when_repeated_items_in_source(self):
dataset = Dataset.from_iterable([DatasetItem(0), DatasetItem(0)])
with self.assertRaises(RepeatedItemError):
dataset.init_cache()
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_check_item_existence(self):
dataset = Dataset.from_iterable([
DatasetItem(0, subset='a'), DatasetItem(1)
])
self.assertTrue(DatasetItem(0, subset='a') in dataset)
self.assertFalse(DatasetItem(0, subset='b') in dataset)
self.assertTrue((0, 'a') in dataset)
self.assertFalse((0, 'b') in dataset)
self.assertTrue(1 in dataset)
self.assertFalse(0 in dataset)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_put_with_id_override(self):
dataset = Dataset.from_iterable([])
dataset.put(DatasetItem(0, subset='a'), id=2, subset='b')
self.assertTrue((2, 'b') in dataset)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_compute_cache_with_empty_source(self):
dataset = Dataset.from_iterable([])
dataset.put(DatasetItem(2))
dataset.init_cache()
self.assertTrue(2 in dataset)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_cant_do_partial_caching_in_get_when_default(self):
iter_called = 0
class TestExtractor(Extractor):
def __iter__(self):
nonlocal iter_called
iter_called += 1
return iter([
DatasetItem(1),
DatasetItem(2),
DatasetItem(3),
DatasetItem(4),
])
dataset = Dataset.from_extractors(TestExtractor())
dataset.get(3)
dataset.get(4)
self.assertEqual(1, iter_called)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_do_partial_caching_in_get_when_redefined(self):
iter_called = 0
get_called = 0
class TestExtractor(Extractor):
def __iter__(self):
nonlocal iter_called
iter_called += 1
return iter([
DatasetItem(1),
DatasetItem(2),
DatasetItem(3),
DatasetItem(4),
])
def get(self, id, subset=None):
nonlocal get_called
get_called += 1
return DatasetItem(id, subset=subset)
dataset = Dataset.from_extractors(TestExtractor())
dataset.get(3)
dataset.get(4)
self.assertEqual(0, iter_called)
self.assertEqual(2, get_called)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_binds_on_save(self):
dataset = Dataset.from_iterable([DatasetItem(1)])
self.assertFalse(dataset.is_bound)
with TestDir() as test_dir:
dataset.save(test_dir)
self.assertTrue(dataset.is_bound)
self.assertEqual(dataset.data_path, test_dir)
self.assertEqual(dataset.format, DEFAULT_FORMAT)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_flushes_changes_on_save(self):
dataset = Dataset.from_iterable([])
dataset.put(DatasetItem(1))
self.assertTrue(dataset.is_modified)
with TestDir() as test_dir:
dataset.save(test_dir)
self.assertFalse(dataset.is_modified)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_does_not_load_images_on_saving(self):
called = False
def test_loader():
nonlocal called
called = True
dataset = Dataset.from_iterable([
DatasetItem(1, image=test_loader)
])
with TestDir() as test_dir:
dataset.save(test_dir)
self.assertFalse(called)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_transform_labels(self):
expected = Dataset.from_iterable([], categories=['c', 'b'])
dataset = Dataset.from_iterable([], categories=['a', 'b'])
actual = dataset.transform('remap_labels', {'a': 'c'})
compare_datasets(self, expected, actual)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_run_model(self):
dataset = Dataset.from_iterable([
DatasetItem(i, image=np.array([i]))
for i in range(5)
], categories=['label'])
batch_size = 3
expected = Dataset.from_iterable([
DatasetItem(i, image=np.array([i]), annotations=[
Label(0, attributes={ 'idx': i % batch_size, 'data': i })
])
for i in range(5)
], categories=['label'])
calls = 0
class TestLauncher(Launcher):
def launch(self, inputs):
nonlocal calls
calls += 1
for i, inp in enumerate(inputs):
yield [ Label(0, attributes={'idx': i, 'data': inp.item()}) ]
model = TestLauncher()
actual = dataset.run_model(model, batch_size=batch_size)
compare_datasets(self, expected, actual, require_images=True)
self.assertEqual(2, calls)
@mark_requirement(Requirements.DATUM_BUG_259)
def test_can_filter_items(self):
dataset = Dataset.from_iterable([
DatasetItem(id=0, subset='train'),
DatasetItem(id=1, subset='test'),
])
dataset.filter('/item[id > 0]')
self.assertEqual(1, len(dataset))
@mark_requirement(Requirements.DATUM_BUG_257)
def test_filter_registers_changes(self):
dataset = Dataset.from_iterable([
DatasetItem(id=0, subset='train'),
DatasetItem(id=1, subset='test'),
])
dataset.filter('/item[id > 0]')
self.assertEqual({
('0', 'train'): ItemStatus.removed,
('1', 'test'): ItemStatus.modified,
}, dataset.get_patch().updated_items)
@mark_requirement(Requirements.DATUM_BUG_259)
def test_can_filter_annotations(self):
dataset = Dataset.from_iterable([
DatasetItem(id=0, subset='train', annotations=[Label(0), Label(1)]),
DatasetItem(id=1, subset='val', annotations=[Label(2)]),
DatasetItem(id=2, subset='test', annotations=[Label(0), Label(2)]),
], categories=['a', 'b', 'c'])
dataset.filter('/item/annotation[label = "c"]',
filter_annotations=True, remove_empty=True)
self.assertEqual(2, len(dataset))
@mark_requirement(Requirements.DATUM_BUG_259)
def test_can_filter_items_in_merged_dataset(self):
dataset = Dataset.from_extractors(
Dataset.from_iterable([ DatasetItem(id=0, subset='train') ]),
Dataset.from_iterable([ DatasetItem(id=1, subset='test') ]),
)
dataset.filter('/item[id > 0]')
self.assertEqual(1, len(dataset))
@mark_requirement(Requirements.DATUM_BUG_259)
def test_can_filter_annotations_in_merged_dataset(self):
dataset = Dataset.from_extractors(
Dataset.from_iterable([
DatasetItem(id=0, subset='train', annotations=[Label(0)]),
], categories=['a', 'b', 'c']),
Dataset.from_iterable([
DatasetItem(id=1, subset='val', annotations=[Label(1)]),
], categories=['a', 'b', 'c']),
Dataset.from_iterable([
DatasetItem(id=2, subset='test', annotations=[Label(2)]),
], categories=['a', 'b', 'c']),
)
dataset.filter('/item/annotation[label = "c"]',
filter_annotations=True, remove_empty=True)
self.assertEqual(1, len(dataset))
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_inplace_save_writes_only_updated_data(self):
class CustomConverter(Converter):
DEFAULT_IMAGE_EXT = '.jpg'
def apply(self):
assert osp.isdir(self._save_dir)
for item in self._extractor:
name = f'{item.subset}_{item.id}'
with open(osp.join(
self._save_dir, name + '.txt'), 'w') as f:
f.write('\n')
if self._save_images and \
item.has_image and item.image.has_data:
self._save_image(item, name=name)
env = Environment()
env.converters.items = { 'test': CustomConverter }
with TestDir() as path:
dataset = Dataset.from_iterable([
DatasetItem(1, subset='train', image=np.ones((2, 4, 3))),
DatasetItem(2, subset='train',
image=Image(path='2.jpg', size=(3, 2))),
DatasetItem(3, subset='valid', image=np.ones((2, 2, 3))),
], categories=[], env=env)
dataset.export(path, 'test', save_images=True)
dataset.put(DatasetItem(2, subset='train', image=np.ones((3, 2, 3))))
dataset.remove(3, 'valid')
dataset.save(save_images=True)
self.assertEqual({
'train_1.txt', 'train_1.jpg',
'train_2.txt', 'train_2.jpg'
},
set(os.listdir(path)))
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_update_overwrites_matching_items(self):
patch = Dataset.from_iterable([
DatasetItem(id=1, annotations=[ Bbox(1, 2, 3, 4, label=1) ])
], categories=['a', 'b'])
dataset = Dataset.from_iterable([
DatasetItem(id=1, annotations=[ Bbox(2, 2, 1, 1, label=0) ]),
DatasetItem(id=2, annotations=[ Bbox(1, 1, 1, 1, label=1) ]),
], categories=['a', 'b'])
expected = Dataset.from_iterable([
DatasetItem(id=1, annotations=[ Bbox(1, 2, 3, 4, label=1) ]),
DatasetItem(id=2, annotations=[ Bbox(1, 1, 1, 1, label=1) ]),
], categories=['a', 'b'])
dataset.update(patch)
compare_datasets(self, expected, dataset)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_update_can_reorder_labels(self):
patch = Dataset.from_iterable([
DatasetItem(id=1, annotations=[ Bbox(1, 2, 3, 4, label=1) ])
], categories=['b', 'a'])
dataset = Dataset.from_iterable([
DatasetItem(id=1, annotations=[ Bbox(2, 2, 1, 1, label=0) ])
], categories=['a', 'b'])
expected = Dataset.from_iterable([
DatasetItem(id=1, annotations=[ Bbox(1, 2, 3, 4, label=0) ])
], categories=['a', 'b'])
dataset.update(patch)
compare_datasets(self, expected, dataset)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_update_can_project_labels(self):
dataset = Dataset.from_iterable([
DatasetItem(id=100, annotations=[
Bbox(1, 2, 3, 3, label=0),
]),
DatasetItem(id=1, annotations=[
Bbox(1, 2, 3, 4, label=1)
]),
], categories=['a', 'b'])
patch = Dataset.from_iterable([
DatasetItem(id=100, annotations=[
Bbox(1, 2, 3, 4, label=0),
Bbox(5, 6, 2, 3, label=1),
Bbox(2, 2, 2, 3, label=2),
]),
DatasetItem(id=2, annotations=[
Bbox(1, 2, 3, 2, label=1)
]),
], categories=['b', 'a', 'c'])
expected = Dataset.from_iterable([
DatasetItem(id=100, annotations=[
Bbox(1, 2, 3, 4, label=1),
Bbox(5, 6, 2, 3, label=0),
]),
DatasetItem(id=1, annotations=[
Bbox(1, 2, 3, 4, label=1)
]),
DatasetItem(id=2, annotations=[
Bbox(1, 2, 3, 2, label=0)
]),
], categories=['a', 'b'])
dataset.update(patch)
compare_datasets(self, expected, dataset, ignored_attrs='*')
class DatasetItemTest(TestCase):
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_ctor_requires_id(self):
with self.assertRaises(Exception):
DatasetItem()
@staticmethod
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_ctors_with_image():
for args in [
{ 'id': 0, 'image': None },
{ 'id': 0, 'image': 'path.jpg' },
{ 'id': 0, 'image': np.array([1, 2, 3]) },
{ 'id': 0, 'image': lambda f: np.array([1, 2, 3]) },
{ 'id': 0, 'image': Image(data=np.array([1, 2, 3])) },
]:
DatasetItem(**args)
class DatasetFilterTest(TestCase):
@staticmethod
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_item_representations():
item = DatasetItem(id=1, subset='subset',
image=np.ones((5, 4, 3)),
annotations=[
Label(0, attributes={'a1': 1, 'a2': '2'}, id=1, group=2),
Caption('hello', id=1),
Caption('world', group=5),
Label(2, id=3, attributes={ 'x': 1, 'y': '2' }),
Bbox(1, 2, 3, 4, label=4, id=4, attributes={ 'a': 1.0 }),
Bbox(5, 6, 7, 8, id=5, group=5),
Points([1, 2, 2, 0, 1, 1], label=0, id=5),
Mask(id=5, image=np.ones((3, 2))),
Mask(label=3, id=5, image=np.ones((2, 3))),
PolyLine([1, 2, 3, 4, 5, 6, 7, 8], id=11),
Polygon([1, 2, 3, 4, 5, 6, 7, 8]),
]
)
encoded = DatasetItemEncoder.encode(item)
DatasetItemEncoder.to_string(encoded)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_item_filter_can_be_applied(self):
class TestExtractor(Extractor):
def __iter__(self):
for i in range(4):
yield DatasetItem(id=i, subset='train')
extractor = TestExtractor()
filtered = XPathDatasetFilter(extractor, '/item[id > 1]')
self.assertEqual(2, len(filtered))
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_annotations_filter_can_be_applied(self):
class SrcExtractor(Extractor):
def __iter__(self):
return iter([
DatasetItem(id=0),
DatasetItem(id=1, annotations=[
Label(0),
Label(1),
]),
DatasetItem(id=2, annotations=[
Label(0),
Label(2),
]),
])
class DstExtractor(Extractor):
def __iter__(self):
return iter([
DatasetItem(id=0),
DatasetItem(id=1, annotations=[
Label(0),
]),
DatasetItem(id=2, annotations=[
Label(0),
]),
])
extractor = SrcExtractor()
filtered = XPathAnnotationsFilter(extractor,
'/item/annotation[label_id = 0]')
self.assertListEqual(list(filtered), list(DstExtractor()))
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_annotations_filter_can_remove_empty_items(self):
source = Dataset.from_iterable([
DatasetItem(id=0),
DatasetItem(id=1, annotations=[
Label(0),
Label(1),
]),
DatasetItem(id=2, annotations=[
Label(0),
Label(2),
]),
], categories=['a', 'b', 'c'])
expected = Dataset.from_iterable([
DatasetItem(id=2, annotations=[Label(2)]),
], categories=['a', 'b', 'c'])
filtered = XPathAnnotationsFilter(source,
'/item/annotation[label_id = 2]', remove_empty=True)
compare_datasets(self, expected, filtered)
| true | true |
f720773382e2af71b9b530986df7d022a800c635 | 4,874 | py | Python | protoseg/report.py | chriamue/protoseg | 4ddc7d613aadcb9d25b5773eff688214349ab23f | [
"MIT"
] | null | null | null | protoseg/report.py | chriamue/protoseg | 4ddc7d613aadcb9d25b5773eff688214349ab23f | [
"MIT"
] | null | null | null | protoseg/report.py | chriamue/protoseg | 4ddc7d613aadcb9d25b5773eff688214349ab23f | [
"MIT"
] | 1 | 2020-03-30T07:10:54.000Z | 2020-03-30T07:10:54.000Z |
import os
import numpy as np
import cv2
import json
import pandas as pd
import tensorflow as tf
from tensorboard.backend.event_processing import event_accumulator as ea
from matplotlib import pyplot as plt
from matplotlib import colors as colors
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
import seaborn as sns
sns.set(style="darkgrid")
sns.set_context("paper")
from matplotlib.backends.backend_pdf import PdfPages
class Report():
def __init__(self, configs, resultspath='results/'):
self.configs = configs
self.resultspath = resultspath
assert(configs)
# source: https://github.com/JamesChuanggg/Tensorboard2Seaborn/blob/master/beautify.py
def plot(self, acc, tag='loss', smooth_space=100, color_code='#4169E1'):
x_list = []
y_list = []
x_list_raw = []
y_list_raw = []
try:
x = [int(s.step) for s in acc.Scalars(tag=tag)]
y = [s.value for s in acc.Scalars(tag=tag)]
# smooth curve
x_ = []
y_ = []
for i in range(0, len(x), smooth_space):
x_.append(x[i])
y_.append(sum(y[i:i+smooth_space]) / float(smooth_space))
x_.append(x[-1])
y_.append(y[-1])
x_list = x_
y_list = y_
# raw curve
x_list_raw = x
y_list_raw = y
except Exception as e:
print(e)
fig, ax = plt.subplots()
plt.title(tag)
plt.plot(x_list_raw, y_list_raw,
color=colors.to_rgba(color_code, alpha=0.4))
plt.plot(x_list, y_list, color=color_code, linewidth=1.5)
fig.canvas.draw()
return fig, np.array(fig.canvas.renderer._renderer)
def image(self, acc, tag='loss'):
image_list = acc.Images(tag=tag)
with tf.Session() as sess:
img = tf.image.decode_image(image_list[-1].encoded_image_string)
npimg = img.eval(session=sess)
return npimg
def generate(self):
pp = PdfPages(os.path.join(self.resultspath,
os.path.basename(self.configs.filename) + '.pdf'))
for run in self.configs:
resultpath = os.path.join(self.resultspath, run)
event_acc = ea.EventAccumulator(resultpath)
event_acc.Reload()
fig, img = self.plot(event_acc, tag="loss")
plt.text(0.05, 0.95, run, transform=fig.transFigure, size=24)
pp.savefig(fig)
cv2.imwrite(resultpath+'/loss.png', img)
config = self.configs.get()
for metric in config['metrices']:
name = list(metric.keys())[0]
fig, img = self.plot(event_acc, tag=name)
pp.savefig(fig)
cv2.imwrite(resultpath+'/'+name+'.png', img)
pp.close()
def hyperparamopt(self, config, hyperparamoptimizer, resultpath):
filename = os.path.join(resultpath, 'trials.csv')
df = pd.DataFrame(data=hyperparamoptimizer.trials.results)
df = df.set_index('loss')
df.to_csv(filename)
pp = PdfPages(os.path.join(resultpath, 'paramopt.pdf'))
event_acc = ea.EventAccumulator(resultpath)
event_acc.Reload()
for result in hyperparamoptimizer.trials.results:
trial = result['trial']
l = result['loss']
_, loss = self.plot(event_acc, tag='trial'+str(trial)+'_loss')
val_image = self.image(
event_acc, tag='trial'+str(trial)+'_val_image')
val_mask = self.image(
event_acc, tag='trial'+str(trial)+'_val_mask')
val_predicted = self.image(
event_acc, tag='trial'+str(trial)+'_val_predicted')
fig = plt.figure()
fig.add_subplot(2, 4, 1)
plt.axis('on')
plt.imshow(loss)
fig.add_subplot(2, 4, 2)
plt.axis('off')
plt.imshow(val_image)
fig.add_subplot(2, 4, 3)
plt.axis('off')
plt.imshow(val_mask)
fig.add_subplot(2, 4, 4)
plt.axis('off')
plt.imshow(val_predicted)
plt.text(0.05, 0.95, 'trial ' + str(trial) + " loss: " +
str(l), transform=fig.transFigure, size=24)
for i, m in enumerate(config['metrices']):
name = list(m.keys())[0]
tag = 'trial'+str(trial)+'_'+name
_, metric = self.plot(event_acc, tag=tag)
fig.add_subplot(2, len(config['metrices']), len(
config['metrices']) + i+1)
plt.imshow(metric)
pp.attach_note(result['params'])
pp.savefig(fig)
plt.close(fig)
pp.close()
| 35.318841 | 90 | 0.560936 |
import os
import numpy as np
import cv2
import json
import pandas as pd
import tensorflow as tf
from tensorboard.backend.event_processing import event_accumulator as ea
from matplotlib import pyplot as plt
from matplotlib import colors as colors
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
import seaborn as sns
sns.set(style="darkgrid")
sns.set_context("paper")
from matplotlib.backends.backend_pdf import PdfPages
class Report():
def __init__(self, configs, resultspath='results/'):
self.configs = configs
self.resultspath = resultspath
assert(configs)
def plot(self, acc, tag='loss', smooth_space=100, color_code='#4169E1'):
x_list = []
y_list = []
x_list_raw = []
y_list_raw = []
try:
x = [int(s.step) for s in acc.Scalars(tag=tag)]
y = [s.value for s in acc.Scalars(tag=tag)]
x_ = []
y_ = []
for i in range(0, len(x), smooth_space):
x_.append(x[i])
y_.append(sum(y[i:i+smooth_space]) / float(smooth_space))
x_.append(x[-1])
y_.append(y[-1])
x_list = x_
y_list = y_
x_list_raw = x
y_list_raw = y
except Exception as e:
print(e)
fig, ax = plt.subplots()
plt.title(tag)
plt.plot(x_list_raw, y_list_raw,
color=colors.to_rgba(color_code, alpha=0.4))
plt.plot(x_list, y_list, color=color_code, linewidth=1.5)
fig.canvas.draw()
return fig, np.array(fig.canvas.renderer._renderer)
def image(self, acc, tag='loss'):
image_list = acc.Images(tag=tag)
with tf.Session() as sess:
img = tf.image.decode_image(image_list[-1].encoded_image_string)
npimg = img.eval(session=sess)
return npimg
def generate(self):
pp = PdfPages(os.path.join(self.resultspath,
os.path.basename(self.configs.filename) + '.pdf'))
for run in self.configs:
resultpath = os.path.join(self.resultspath, run)
event_acc = ea.EventAccumulator(resultpath)
event_acc.Reload()
fig, img = self.plot(event_acc, tag="loss")
plt.text(0.05, 0.95, run, transform=fig.transFigure, size=24)
pp.savefig(fig)
cv2.imwrite(resultpath+'/loss.png', img)
config = self.configs.get()
for metric in config['metrices']:
name = list(metric.keys())[0]
fig, img = self.plot(event_acc, tag=name)
pp.savefig(fig)
cv2.imwrite(resultpath+'/'+name+'.png', img)
pp.close()
def hyperparamopt(self, config, hyperparamoptimizer, resultpath):
filename = os.path.join(resultpath, 'trials.csv')
df = pd.DataFrame(data=hyperparamoptimizer.trials.results)
df = df.set_index('loss')
df.to_csv(filename)
pp = PdfPages(os.path.join(resultpath, 'paramopt.pdf'))
event_acc = ea.EventAccumulator(resultpath)
event_acc.Reload()
for result in hyperparamoptimizer.trials.results:
trial = result['trial']
l = result['loss']
_, loss = self.plot(event_acc, tag='trial'+str(trial)+'_loss')
val_image = self.image(
event_acc, tag='trial'+str(trial)+'_val_image')
val_mask = self.image(
event_acc, tag='trial'+str(trial)+'_val_mask')
val_predicted = self.image(
event_acc, tag='trial'+str(trial)+'_val_predicted')
fig = plt.figure()
fig.add_subplot(2, 4, 1)
plt.axis('on')
plt.imshow(loss)
fig.add_subplot(2, 4, 2)
plt.axis('off')
plt.imshow(val_image)
fig.add_subplot(2, 4, 3)
plt.axis('off')
plt.imshow(val_mask)
fig.add_subplot(2, 4, 4)
plt.axis('off')
plt.imshow(val_predicted)
plt.text(0.05, 0.95, 'trial ' + str(trial) + " loss: " +
str(l), transform=fig.transFigure, size=24)
for i, m in enumerate(config['metrices']):
name = list(m.keys())[0]
tag = 'trial'+str(trial)+'_'+name
_, metric = self.plot(event_acc, tag=tag)
fig.add_subplot(2, len(config['metrices']), len(
config['metrices']) + i+1)
plt.imshow(metric)
pp.attach_note(result['params'])
pp.savefig(fig)
plt.close(fig)
pp.close()
| true | true |
f72077cdf636f62c3c764ed25b75858a0cc4d91d | 434 | py | Python | data/scripts/templates/object/mobile/shared_giant_veermok.py | obi-two/GameServer | 7d37024e2291a97d49522610cd8f1dbe5666afc2 | [
"MIT"
] | 20 | 2015-02-23T15:11:56.000Z | 2022-03-18T20:56:48.000Z | data/scripts/templates/object/mobile/shared_giant_veermok.py | apathyboy/swganh | 665128efe9154611dec4cb5efc61d246dd095984 | [
"MIT"
] | null | null | null | data/scripts/templates/object/mobile/shared_giant_veermok.py | apathyboy/swganh | 665128efe9154611dec4cb5efc61d246dd095984 | [
"MIT"
] | 20 | 2015-04-04T16:35:59.000Z | 2022-03-24T14:54:37.000Z | #### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Creature()
result.template = "object/mobile/shared_giant_veermok.iff"
result.attribute_template_id = 9
result.stfName("monster_name","veermok")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | 25.529412 | 59 | 0.723502 | true | true | |
f72077d78671f48577f3268c91b0668f5686d755 | 214 | py | Python | Linguagens/Python/Exercicios/cursos_em_video/aulas-01_a_21/012.py | rafaelvizu/Estudos | eef5e3e3706ff99959226c51b9907b6af4377bfe | [
"MIT"
] | null | null | null | Linguagens/Python/Exercicios/cursos_em_video/aulas-01_a_21/012.py | rafaelvizu/Estudos | eef5e3e3706ff99959226c51b9907b6af4377bfe | [
"MIT"
] | null | null | null | Linguagens/Python/Exercicios/cursos_em_video/aulas-01_a_21/012.py | rafaelvizu/Estudos | eef5e3e3706ff99959226c51b9907b6af4377bfe | [
"MIT"
] | null | null | null | print('Exercício Python #012 - Calculando Descontos')
a6 = float(input('Preço: '))
b6 = int(input('Desconto:'))
c6 = a6 * b6 / 100
d6 = a6 - c6
print(' O valor com o desconto de {} % é de {} Reais '.format(b6, d6)) | 35.666667 | 70 | 0.630841 | print('Exercício Python #012 - Calculando Descontos')
a6 = float(input('Preço: '))
b6 = int(input('Desconto:'))
c6 = a6 * b6 / 100
d6 = a6 - c6
print(' O valor com o desconto de {} % é de {} Reais '.format(b6, d6)) | true | true |
f720787fc556e48c6de48c95c2046dbcd33827a9 | 26,887 | py | Python | dask_kubernetes/classic/kubecluster.py | Matt711/dask-kubernetes | 8190529fc140b6ea2c345bde02aa1c647272eb98 | [
"BSD-3-Clause"
] | 1 | 2022-01-20T12:38:27.000Z | 2022-01-20T12:38:27.000Z | dask_kubernetes/classic/kubecluster.py | Matt711/dask-kubernetes | 8190529fc140b6ea2c345bde02aa1c647272eb98 | [
"BSD-3-Clause"
] | null | null | null | dask_kubernetes/classic/kubecluster.py | Matt711/dask-kubernetes | 8190529fc140b6ea2c345bde02aa1c647272eb98 | [
"BSD-3-Clause"
] | null | null | null | import asyncio
import copy
import getpass
import logging
import os
import time
import uuid
import warnings
import yaml
import dask
import dask.distributed
import distributed.security
from distributed.deploy import SpecCluster, ProcessInterface
from distributed.utils import Log, Logs
import kubernetes_asyncio as kubernetes
from kubernetes_asyncio.client.rest import ApiException
from ..constants import KUBECLUSTER_WORKER_CONTAINER_NAME
from ..common.objects import (
make_pod_from_dict,
make_service_from_dict,
make_pdb_from_dict,
clean_pod_template,
clean_service_template,
clean_pdb_template,
)
from ..common.auth import ClusterAuth
from ..common.utils import (
namespace_default,
escape,
)
from ..common.networking import get_external_address_for_scheduler_service
logger = logging.getLogger(__name__)
SCHEDULER_PORT = 8786
class Pod(ProcessInterface):
"""A superclass for Kubernetes Pods
See Also
--------
Worker
Scheduler
"""
def __init__(
self,
cluster,
core_api,
policy_api,
pod_template,
namespace,
loop=None,
**kwargs
):
self._pod = None
self.cluster = cluster
self.core_api = core_api
self.policy_api = policy_api
self.pod_template = copy.deepcopy(pod_template)
self.base_labels = self.pod_template.metadata.labels
self.namespace = namespace
self.name = None
self.loop = loop
self.kwargs = kwargs
super().__init__()
@property
def cluster_name(self):
return self.pod_template.metadata.labels["dask.org/cluster-name"]
async def start(self, **kwargs):
retry_count = 0 # Retry 10 times
while True:
try:
self._pod = await self.core_api.create_namespaced_pod(
self.namespace, self.pod_template
)
return await super().start(**kwargs)
except ApiException as e:
if retry_count < 10:
logger.debug("Error when creating pod, retrying... - %s", str(e))
await asyncio.sleep(0.1)
retry_count += 1
else:
raise e
async def close(self, **kwargs):
if self._pod:
name, namespace = self._pod.metadata.name, self.namespace
try:
await self.core_api.delete_namespaced_pod(name, namespace)
except ApiException as e:
if e.reason == "Not Found":
logger.debug(
"Pod %s in namespace %s has been deleated already.",
name,
namespace,
)
else:
raise
await super().close(**kwargs)
async def logs(self):
try:
log = await self.core_api.read_namespaced_pod_log(
self._pod.metadata.name,
self.namespace,
container=KUBECLUSTER_WORKER_CONTAINER_NAME,
)
except ApiException as e:
if "waiting to start" in str(e):
log = ""
else:
raise e
return Log(log)
async def describe_pod(self):
self._pod = await self.core_api.read_namespaced_pod(
self._pod.metadata.name, self.namespace
)
return self._pod
def __repr__(self):
return "<Pod %s: status=%s>" % (type(self).__name__, self.status)
class Worker(Pod):
"""A Remote Dask Worker controled by Kubernetes
Parameters
----------
scheduler: str
The address of the scheduler
name (optional):
The name passed to the dask-worker CLI at creation time.
"""
def __init__(self, scheduler: str, name=None, **kwargs):
super().__init__(**kwargs)
self.scheduler = scheduler
self.pod_template.metadata.labels["dask.org/component"] = "worker"
self.pod_template.spec.containers[0].env.append(
kubernetes.client.V1EnvVar(
name="DASK_SCHEDULER_ADDRESS", value=self.scheduler
)
)
if name is not None:
worker_name_args = ["--name", str(name)]
self.pod_template.spec.containers[0].args += worker_name_args
class Scheduler(Pod):
"""A Remote Dask Scheduler controled by Kubernetes
Parameters
----------
idle_timeout: str, optional
The scheduler task will exit after this amount of time
if there are no requests from the client. Default is to
never timeout.
service_wait_timeout_s: int (optional)
Timeout, in seconds, to wait for the remote scheduler service to be ready.
Defaults to 30 seconds.
Set to 0 to disable the timeout (not recommended).
"""
def __init__(
self,
idle_timeout: str,
service_wait_timeout_s: int = None,
service_name_retries: int = None,
**kwargs
):
super().__init__(**kwargs)
self.cluster._log("Creating scheduler pod on cluster. This may take some time.")
self.service = None
self._idle_timeout = idle_timeout
self._service_wait_timeout_s = service_wait_timeout_s
self._service_name_retries = service_name_retries
if self._idle_timeout is not None:
self.pod_template.spec.containers[0].args += [
"--idle-timeout",
self._idle_timeout,
]
self.pdb = None
async def start(self, **kwargs):
await super().start(**kwargs)
while (await self.describe_pod()).status.phase == "Pending":
await asyncio.sleep(0.1)
while self.address is None:
logs = await self.logs()
for line in logs.splitlines():
if "Scheduler at:" in line:
self.address = line.split("Scheduler at:")[1].strip()
await asyncio.sleep(0.1)
self.service = await self._create_service()
self.address = "tcp://{name}.{namespace}:{port}".format(
name=self.service.metadata.name,
namespace=self.namespace,
port=SCHEDULER_PORT,
)
self.external_address = await get_external_address_for_scheduler_service(
self.core_api,
self.service,
service_name_resolution_retries=self._service_name_retries,
)
self.pdb = await self._create_pdb()
async def close(self, **kwargs):
if self.service:
await self.core_api.delete_namespaced_service(
self.cluster_name, self.namespace
)
if self.pdb:
await self.policy_api.delete_namespaced_pod_disruption_budget(
self.cluster_name, self.namespace
)
await super().close(**kwargs)
async def _create_service(self):
service_template_dict = dask.config.get("kubernetes.scheduler-service-template")
self.service_template = clean_service_template(
make_service_from_dict(service_template_dict)
)
self.service_template.metadata.name = self.cluster_name
self.service_template.metadata.labels = copy.deepcopy(self.base_labels)
self.service_template.spec.selector["dask.org/cluster-name"] = self.cluster_name
if self.service_template.spec.type is None:
self.service_template.spec.type = dask.config.get(
"kubernetes.scheduler-service-type"
)
await self.core_api.create_namespaced_service(
self.namespace, self.service_template
)
service = await self.core_api.read_namespaced_service(
self.cluster_name, self.namespace
)
if service.spec.type == "LoadBalancer":
# Wait for load balancer to be assigned
start = time.time()
while service.status.load_balancer.ingress is None:
if (
self._service_wait_timeout_s > 0
and time.time() > start + self._service_wait_timeout_s
):
raise asyncio.TimeoutError(
"Timed out waiting for Load Balancer to be provisioned."
)
service = await self.core_api.read_namespaced_service(
self.cluster_name, self.namespace
)
await asyncio.sleep(0.2)
return service
async def _create_pdb(self):
pdb_template_dict = dask.config.get("kubernetes.scheduler-pdb-template")
self.pdb_template = clean_pdb_template(make_pdb_from_dict(pdb_template_dict))
self.pdb_template.metadata.name = self.cluster_name
self.pdb_template.metadata.labels = copy.deepcopy(self.base_labels)
self.pdb_template.spec.selector.match_labels[
"dask.org/cluster-name"
] = self.cluster_name
await self.policy_api.create_namespaced_pod_disruption_budget(
self.namespace, self.pdb_template
)
return await self.policy_api.read_namespaced_pod_disruption_budget(
self.cluster_name, self.namespace
)
class KubeCluster(SpecCluster):
"""Launch a Dask cluster on Kubernetes
This starts a local Dask scheduler and then dynamically launches
Dask workers on a Kubernetes cluster. The Kubernetes cluster is taken
to be either the current one on which this code is running, or as a
fallback, the default one configured in a kubeconfig file.
**Environments**
Your worker pod image should have a similar environment to your local
environment, including versions of Python, dask, cloudpickle, and any
libraries that you may wish to use (like NumPy, Pandas, or Scikit-Learn).
See examples below for suggestions on how to manage and check for this.
**Network**
Since the Dask scheduler is launched locally, for it to work, we need to
be able to open network connections between this local node and all the
workers nodes on the Kubernetes cluster. If the current process is not
already on a Kubernetes node, some network configuration will likely be
required to make this work.
**Resources**
Your Kubernetes resource limits and requests should match the
``--memory-limit`` and ``--nthreads`` parameters given to the
``dask-worker`` command.
Parameters
----------
pod_template: (kubernetes.client.V1Pod, dict, str)
A Kubernetes specification for a Pod for a dask worker. Can be either a
``V1Pod``, a dict representation of a pod, or a path to a yaml file
containing a pod specification.
scheduler_pod_template: kubernetes.client.V1Pod (optional)
A Kubernetes specification for a Pod for a dask scheduler.
Defaults to the pod_template.
name: str (optional)
Name given to the pods. Defaults to ``dask-$USER-random``
namespace: str (optional)
Namespace in which to launch the workers.
Defaults to current namespace if available or "default"
n_workers: int
Number of workers on initial launch.
Use ``scale`` to change this number in the future
env: Dict[str, str]
Dictionary of environment variables to pass to worker pod
host: str
Listen address for local scheduler. Defaults to 0.0.0.0
port: int
Port of local scheduler
auth: List[ClusterAuth] (optional)
Configuration methods to attempt in order. Defaults to
``[InCluster(), KubeConfig()]``.
idle_timeout: str (optional)
The scheduler task will exit after this amount of time
if there are no requests from the client. Default is to
never timeout.
scheduler_service_wait_timeout: int (optional)
Timeout, in seconds, to wait for the remote scheduler service to be ready.
Defaults to 30 seconds.
Set to 0 to disable the timeout (not recommended).
scheduler_service_name_resolution_retries: int (optional)
Number of retries to resolve scheduler service name when running
from within the Kubernetes cluster.
Defaults to 20.
Must be set to 1 or greater.
deploy_mode: str (optional)
Run the scheduler as "local" or "remote".
Defaults to ``"remote"``.
**kwargs: dict
Additional keyword arguments to pass to LocalCluster
Examples
--------
>>> from dask_kubernetes import KubeCluster, make_pod_spec
>>> pod_spec = make_pod_spec(image='ghcr.io/dask/dask:latest',
... memory_limit='4G', memory_request='4G',
... cpu_limit=1, cpu_request=1,
... env={'EXTRA_PIP_PACKAGES': 'fastparquet git+https://github.com/dask/distributed'})
>>> cluster = KubeCluster(pod_spec)
>>> cluster.scale(10)
You can also create clusters with worker pod specifications as dictionaries
or stored in YAML files
>>> cluster = KubeCluster('worker-template.yml')
>>> cluster = KubeCluster({...})
Rather than explicitly setting a number of workers you can also ask the
cluster to allocate workers dynamically based on current workload
>>> cluster.adapt()
You can pass this cluster directly to a Dask client
>>> from dask.distributed import Client
>>> client = Client(cluster)
You can verify that your local environment matches your worker environments
by calling ``client.get_versions(check=True)``. This will raise an
informative error if versions do not match.
>>> client.get_versions(check=True)
The ``ghcr.io/dask/dask`` docker images support ``EXTRA_PIP_PACKAGES``,
``EXTRA_APT_PACKAGES`` and ``EXTRA_CONDA_PACKAGES`` environment variables
to help with small adjustments to the worker environments. We recommend
the use of pip over conda in this case due to a much shorter startup time.
These environment variables can be modified directly from the KubeCluster
constructor methods using the ``env=`` keyword. You may list as many
packages as you like in a single string like the following:
>>> pip = 'pyarrow gcsfs git+https://github.com/dask/distributed'
>>> conda = '-c conda-forge scikit-learn'
>>> KubeCluster(..., env={'EXTRA_PIP_PACKAGES': pip,
... 'EXTRA_CONDA_PACKAGES': conda})
You can also start a KubeCluster with no arguments *if* the worker template
is specified in the Dask config files, either as a full template in
``kubernetes.worker-template`` or a path to a YAML file in
``kubernetes.worker-template-path``.
See https://docs.dask.org/en/latest/configuration.html for more
information about setting configuration values.::
$ export DASK_KUBERNETES__WORKER_TEMPLATE_PATH=worker_template.yaml
>>> cluster = KubeCluster() # automatically finds 'worker_template.yaml'
See Also
--------
KubeCluster.adapt
"""
def __init__(
self,
pod_template=None,
name=None,
namespace=None,
n_workers=None,
host=None,
port=None,
env=None,
auth=ClusterAuth.DEFAULT,
idle_timeout=None,
deploy_mode=None,
interface=None,
protocol=None,
dashboard_address=None,
security=None,
scheduler_service_wait_timeout=None,
scheduler_service_name_resolution_retries=None,
scheduler_pod_template=None,
**kwargs
):
if isinstance(pod_template, str):
with open(pod_template) as f:
pod_template = dask.config.expand_environment_variables(
yaml.safe_load(f)
)
if isinstance(pod_template, dict):
pod_template = make_pod_from_dict(pod_template)
if isinstance(scheduler_pod_template, str):
with open(scheduler_pod_template) as f:
scheduler_pod_template = dask.config.expand_environment_variables(
yaml.safe_load(f)
)
if isinstance(scheduler_pod_template, dict):
scheduler_pod_template = make_pod_from_dict(scheduler_pod_template)
self.pod_template = pod_template
self.scheduler_pod_template = scheduler_pod_template
self._generate_name = dask.config.get("kubernetes.name", override_with=name)
self.namespace = dask.config.get(
"kubernetes.namespace", override_with=namespace
)
self._n_workers = dask.config.get(
"kubernetes.count.start", override_with=n_workers
)
self._idle_timeout = dask.config.get(
"kubernetes.idle-timeout", override_with=idle_timeout
)
self._deploy_mode = dask.config.get(
"kubernetes.deploy-mode", override_with=deploy_mode
)
self._protocol = dask.config.get("kubernetes.protocol", override_with=protocol)
self._interface = dask.config.get(
"kubernetes.interface", override_with=interface
)
self._dashboard_address = dask.config.get(
"kubernetes.dashboard_address", override_with=dashboard_address
)
self._scheduler_service_wait_timeout = dask.config.get(
"kubernetes.scheduler-service-wait-timeout",
override_with=scheduler_service_wait_timeout,
)
self._scheduler_service_name_resolution_retries = dask.config.get(
"kubernetes.scheduler-service-name-resolution-retries",
override_with=scheduler_service_name_resolution_retries,
)
self.security = security
if self.security and not isinstance(
self.security, distributed.security.Security
):
raise RuntimeError(
"Security object is not a valid distributed.security.Security object"
)
self.host = dask.config.get("kubernetes.host", override_with=host)
self.port = dask.config.get("kubernetes.port", override_with=port)
self.env = dask.config.get("kubernetes.env", override_with=env)
self.auth = auth
self.kwargs = kwargs
super().__init__(**self.kwargs)
def _get_pod_template(self, pod_template, pod_type):
if not pod_template and dask.config.get(
"kubernetes.{}-template".format(pod_type), None
):
d = dask.config.get("kubernetes.{}-template".format(pod_type))
d = dask.config.expand_environment_variables(d)
pod_template = make_pod_from_dict(d)
if not pod_template and dask.config.get(
"kubernetes.{}-template-path".format(pod_type), None
):
import yaml
fn = dask.config.get("kubernetes.{}-template-path".format(pod_type))
fn = fn.format(**os.environ)
with open(fn) as f:
d = yaml.safe_load(f)
d = dask.config.expand_environment_variables(d)
pod_template = make_pod_from_dict(d)
return pod_template
def _fill_pod_templates(self, pod_template, pod_type):
pod_template = copy.deepcopy(pod_template)
# Default labels that can't be overwritten
pod_template.metadata.labels["dask.org/cluster-name"] = self._generate_name
pod_template.metadata.labels["dask.org/component"] = pod_type
pod_template.metadata.labels["user"] = escape(getpass.getuser())
pod_template.metadata.labels["app"] = "dask"
pod_template.metadata.namespace = self.namespace
if self.env:
pod_template.spec.containers[0].env.extend(
[
kubernetes.client.V1EnvVar(name=k, value=str(v))
for k, v in self.env.items()
]
)
pod_template.metadata.generate_name = self._generate_name
return pod_template
async def _start(self):
self.pod_template = self._get_pod_template(self.pod_template, pod_type="worker")
self.scheduler_pod_template = self._get_pod_template(
self.scheduler_pod_template, pod_type="scheduler"
)
if not self.pod_template:
msg = (
"Worker pod specification not provided. See KubeCluster "
"docstring for ways to specify workers"
)
raise ValueError(msg)
base_pod_template = self.pod_template
self.pod_template = clean_pod_template(self.pod_template, pod_type="worker")
if not self.scheduler_pod_template:
self.scheduler_pod_template = base_pod_template
self.scheduler_pod_template.spec.containers[0].args = ["dask-scheduler"]
self.scheduler_pod_template = clean_pod_template(
self.scheduler_pod_template, pod_type="scheduler"
)
await ClusterAuth.load_first(self.auth)
self.core_api = kubernetes.client.CoreV1Api()
self.policy_api = kubernetes.client.PolicyV1beta1Api()
if self.namespace is None:
self.namespace = namespace_default()
environ = {k: v for k, v in os.environ.items() if k not in ["user", "uuid"]}
self._generate_name = self._generate_name.format(
user=getpass.getuser(), uuid=str(uuid.uuid4())[:10], **environ
)
self._generate_name = escape(self._generate_name)
self.pod_template = self._fill_pod_templates(
self.pod_template, pod_type="worker"
)
self.scheduler_pod_template = self._fill_pod_templates(
self.scheduler_pod_template, pod_type="scheduler"
)
common_options = {
"cluster": self,
"core_api": self.core_api,
"policy_api": self.policy_api,
"namespace": self.namespace,
"loop": self.loop,
}
if self._deploy_mode == "local":
self.scheduler_spec = {
"cls": dask.distributed.Scheduler,
"options": {
"protocol": self._protocol,
"interface": self._interface,
"host": self.host,
"port": self.port,
"dashboard_address": self._dashboard_address,
"security": self.security,
},
}
elif self._deploy_mode == "remote":
self.scheduler_spec = {
"cls": Scheduler,
"options": {
"idle_timeout": self._idle_timeout,
"service_wait_timeout_s": self._scheduler_service_wait_timeout,
"service_name_retries": self._scheduler_service_name_resolution_retries,
"pod_template": self.scheduler_pod_template,
**common_options,
},
}
else:
raise RuntimeError("Unknown deploy mode %s" % self._deploy_mode)
self.new_spec = {
"cls": Worker,
"options": {"pod_template": self.pod_template, **common_options},
}
self.worker_spec = {i: self.new_spec for i in range(self._n_workers)}
self.name = self.pod_template.metadata.generate_name
await super()._start()
@classmethod
def from_dict(cls, pod_spec, **kwargs):
"""Create cluster with worker pod spec defined by Python dictionary
Deprecated, please use the `KubeCluster` constructor directly.
Examples
--------
>>> spec = {
... 'metadata': {},
... 'spec': {
... 'containers': [{
... 'args': ['dask-worker', '$(DASK_SCHEDULER_ADDRESS)',
... '--nthreads', '1',
... '--death-timeout', '60'],
... 'command': None,
... 'image': 'ghcr.io/dask/dask:latest',
... 'name': 'dask-worker',
... }],
... 'restartPolicy': 'Never',
... }
... }
>>> cluster = KubeCluster.from_dict(spec, namespace='my-ns') # doctest: +SKIP
See Also
--------
KubeCluster.from_yaml
"""
warnings.warn(
"KubeCluster.from_dict is deprecated, use the constructor directly"
)
return cls(pod_spec, **kwargs)
@classmethod
def from_yaml(cls, yaml_path, **kwargs):
"""Create cluster with worker pod spec defined by a YAML file
Deprecated, please use the `KubeCluster` constructor directly.
We can start a cluster with pods defined in an accompanying YAML file
like the following:
.. code-block:: yaml
kind: Pod
metadata:
labels:
foo: bar
baz: quux
spec:
containers:
- image: ghcr.io/dask/dask:latest
name: dask-worker
args: [dask-worker, $(DASK_SCHEDULER_ADDRESS), --nthreads, '2', --memory-limit, 8GB]
restartPolicy: Never
Examples
--------
>>> cluster = KubeCluster.from_yaml('pod.yaml', namespace='my-ns') # doctest: +SKIP
See Also
--------
KubeCluster.from_dict
"""
warnings.warn(
"KubeCluster.from_yaml is deprecated, use the constructor directly"
)
return cls(yaml_path, **kwargs)
def scale(self, n):
# A shim to maintain backward compatibility
# https://github.com/dask/distributed/issues/3054
maximum = dask.config.get("kubernetes.count.max")
if maximum is not None and maximum < n:
logger.info(
"Tried to scale beyond maximum number of workers %d > %d", n, maximum
)
n = maximum
return super().scale(n)
async def _logs(self, scheduler=True, workers=True):
"""Return logs for the scheduler and workers
Parameters
----------
scheduler : boolean
Whether or not to collect logs for the scheduler
workers : boolean or Iterable[str], optional
A list of worker addresses to select.
Defaults to all workers if `True` or no workers if `False`
Returns
-------
logs: Dict[str]
A dictionary of logs, with one item for the scheduler and one for
each worker
"""
logs = Logs()
if scheduler:
logs["Scheduler"] = await self.scheduler.logs()
if workers:
worker_logs = await asyncio.gather(
*[w.logs() for w in self.workers.values()]
)
for key, log in zip(self.workers, worker_logs):
logs[key] = log
return logs
| 36.481682 | 115 | 0.609886 | import asyncio
import copy
import getpass
import logging
import os
import time
import uuid
import warnings
import yaml
import dask
import dask.distributed
import distributed.security
from distributed.deploy import SpecCluster, ProcessInterface
from distributed.utils import Log, Logs
import kubernetes_asyncio as kubernetes
from kubernetes_asyncio.client.rest import ApiException
from ..constants import KUBECLUSTER_WORKER_CONTAINER_NAME
from ..common.objects import (
make_pod_from_dict,
make_service_from_dict,
make_pdb_from_dict,
clean_pod_template,
clean_service_template,
clean_pdb_template,
)
from ..common.auth import ClusterAuth
from ..common.utils import (
namespace_default,
escape,
)
from ..common.networking import get_external_address_for_scheduler_service
logger = logging.getLogger(__name__)
SCHEDULER_PORT = 8786
class Pod(ProcessInterface):
def __init__(
self,
cluster,
core_api,
policy_api,
pod_template,
namespace,
loop=None,
**kwargs
):
self._pod = None
self.cluster = cluster
self.core_api = core_api
self.policy_api = policy_api
self.pod_template = copy.deepcopy(pod_template)
self.base_labels = self.pod_template.metadata.labels
self.namespace = namespace
self.name = None
self.loop = loop
self.kwargs = kwargs
super().__init__()
@property
def cluster_name(self):
return self.pod_template.metadata.labels["dask.org/cluster-name"]
async def start(self, **kwargs):
retry_count = 0
while True:
try:
self._pod = await self.core_api.create_namespaced_pod(
self.namespace, self.pod_template
)
return await super().start(**kwargs)
except ApiException as e:
if retry_count < 10:
logger.debug("Error when creating pod, retrying... - %s", str(e))
await asyncio.sleep(0.1)
retry_count += 1
else:
raise e
async def close(self, **kwargs):
if self._pod:
name, namespace = self._pod.metadata.name, self.namespace
try:
await self.core_api.delete_namespaced_pod(name, namespace)
except ApiException as e:
if e.reason == "Not Found":
logger.debug(
"Pod %s in namespace %s has been deleated already.",
name,
namespace,
)
else:
raise
await super().close(**kwargs)
async def logs(self):
try:
log = await self.core_api.read_namespaced_pod_log(
self._pod.metadata.name,
self.namespace,
container=KUBECLUSTER_WORKER_CONTAINER_NAME,
)
except ApiException as e:
if "waiting to start" in str(e):
log = ""
else:
raise e
return Log(log)
async def describe_pod(self):
self._pod = await self.core_api.read_namespaced_pod(
self._pod.metadata.name, self.namespace
)
return self._pod
def __repr__(self):
return "<Pod %s: status=%s>" % (type(self).__name__, self.status)
class Worker(Pod):
def __init__(self, scheduler: str, name=None, **kwargs):
super().__init__(**kwargs)
self.scheduler = scheduler
self.pod_template.metadata.labels["dask.org/component"] = "worker"
self.pod_template.spec.containers[0].env.append(
kubernetes.client.V1EnvVar(
name="DASK_SCHEDULER_ADDRESS", value=self.scheduler
)
)
if name is not None:
worker_name_args = ["--name", str(name)]
self.pod_template.spec.containers[0].args += worker_name_args
class Scheduler(Pod):
def __init__(
self,
idle_timeout: str,
service_wait_timeout_s: int = None,
service_name_retries: int = None,
**kwargs
):
super().__init__(**kwargs)
self.cluster._log("Creating scheduler pod on cluster. This may take some time.")
self.service = None
self._idle_timeout = idle_timeout
self._service_wait_timeout_s = service_wait_timeout_s
self._service_name_retries = service_name_retries
if self._idle_timeout is not None:
self.pod_template.spec.containers[0].args += [
"--idle-timeout",
self._idle_timeout,
]
self.pdb = None
async def start(self, **kwargs):
await super().start(**kwargs)
while (await self.describe_pod()).status.phase == "Pending":
await asyncio.sleep(0.1)
while self.address is None:
logs = await self.logs()
for line in logs.splitlines():
if "Scheduler at:" in line:
self.address = line.split("Scheduler at:")[1].strip()
await asyncio.sleep(0.1)
self.service = await self._create_service()
self.address = "tcp://{name}.{namespace}:{port}".format(
name=self.service.metadata.name,
namespace=self.namespace,
port=SCHEDULER_PORT,
)
self.external_address = await get_external_address_for_scheduler_service(
self.core_api,
self.service,
service_name_resolution_retries=self._service_name_retries,
)
self.pdb = await self._create_pdb()
async def close(self, **kwargs):
if self.service:
await self.core_api.delete_namespaced_service(
self.cluster_name, self.namespace
)
if self.pdb:
await self.policy_api.delete_namespaced_pod_disruption_budget(
self.cluster_name, self.namespace
)
await super().close(**kwargs)
async def _create_service(self):
service_template_dict = dask.config.get("kubernetes.scheduler-service-template")
self.service_template = clean_service_template(
make_service_from_dict(service_template_dict)
)
self.service_template.metadata.name = self.cluster_name
self.service_template.metadata.labels = copy.deepcopy(self.base_labels)
self.service_template.spec.selector["dask.org/cluster-name"] = self.cluster_name
if self.service_template.spec.type is None:
self.service_template.spec.type = dask.config.get(
"kubernetes.scheduler-service-type"
)
await self.core_api.create_namespaced_service(
self.namespace, self.service_template
)
service = await self.core_api.read_namespaced_service(
self.cluster_name, self.namespace
)
if service.spec.type == "LoadBalancer":
start = time.time()
while service.status.load_balancer.ingress is None:
if (
self._service_wait_timeout_s > 0
and time.time() > start + self._service_wait_timeout_s
):
raise asyncio.TimeoutError(
"Timed out waiting for Load Balancer to be provisioned."
)
service = await self.core_api.read_namespaced_service(
self.cluster_name, self.namespace
)
await asyncio.sleep(0.2)
return service
async def _create_pdb(self):
pdb_template_dict = dask.config.get("kubernetes.scheduler-pdb-template")
self.pdb_template = clean_pdb_template(make_pdb_from_dict(pdb_template_dict))
self.pdb_template.metadata.name = self.cluster_name
self.pdb_template.metadata.labels = copy.deepcopy(self.base_labels)
self.pdb_template.spec.selector.match_labels[
"dask.org/cluster-name"
] = self.cluster_name
await self.policy_api.create_namespaced_pod_disruption_budget(
self.namespace, self.pdb_template
)
return await self.policy_api.read_namespaced_pod_disruption_budget(
self.cluster_name, self.namespace
)
class KubeCluster(SpecCluster):
def __init__(
self,
pod_template=None,
name=None,
namespace=None,
n_workers=None,
host=None,
port=None,
env=None,
auth=ClusterAuth.DEFAULT,
idle_timeout=None,
deploy_mode=None,
interface=None,
protocol=None,
dashboard_address=None,
security=None,
scheduler_service_wait_timeout=None,
scheduler_service_name_resolution_retries=None,
scheduler_pod_template=None,
**kwargs
):
if isinstance(pod_template, str):
with open(pod_template) as f:
pod_template = dask.config.expand_environment_variables(
yaml.safe_load(f)
)
if isinstance(pod_template, dict):
pod_template = make_pod_from_dict(pod_template)
if isinstance(scheduler_pod_template, str):
with open(scheduler_pod_template) as f:
scheduler_pod_template = dask.config.expand_environment_variables(
yaml.safe_load(f)
)
if isinstance(scheduler_pod_template, dict):
scheduler_pod_template = make_pod_from_dict(scheduler_pod_template)
self.pod_template = pod_template
self.scheduler_pod_template = scheduler_pod_template
self._generate_name = dask.config.get("kubernetes.name", override_with=name)
self.namespace = dask.config.get(
"kubernetes.namespace", override_with=namespace
)
self._n_workers = dask.config.get(
"kubernetes.count.start", override_with=n_workers
)
self._idle_timeout = dask.config.get(
"kubernetes.idle-timeout", override_with=idle_timeout
)
self._deploy_mode = dask.config.get(
"kubernetes.deploy-mode", override_with=deploy_mode
)
self._protocol = dask.config.get("kubernetes.protocol", override_with=protocol)
self._interface = dask.config.get(
"kubernetes.interface", override_with=interface
)
self._dashboard_address = dask.config.get(
"kubernetes.dashboard_address", override_with=dashboard_address
)
self._scheduler_service_wait_timeout = dask.config.get(
"kubernetes.scheduler-service-wait-timeout",
override_with=scheduler_service_wait_timeout,
)
self._scheduler_service_name_resolution_retries = dask.config.get(
"kubernetes.scheduler-service-name-resolution-retries",
override_with=scheduler_service_name_resolution_retries,
)
self.security = security
if self.security and not isinstance(
self.security, distributed.security.Security
):
raise RuntimeError(
"Security object is not a valid distributed.security.Security object"
)
self.host = dask.config.get("kubernetes.host", override_with=host)
self.port = dask.config.get("kubernetes.port", override_with=port)
self.env = dask.config.get("kubernetes.env", override_with=env)
self.auth = auth
self.kwargs = kwargs
super().__init__(**self.kwargs)
def _get_pod_template(self, pod_template, pod_type):
if not pod_template and dask.config.get(
"kubernetes.{}-template".format(pod_type), None
):
d = dask.config.get("kubernetes.{}-template".format(pod_type))
d = dask.config.expand_environment_variables(d)
pod_template = make_pod_from_dict(d)
if not pod_template and dask.config.get(
"kubernetes.{}-template-path".format(pod_type), None
):
import yaml
fn = dask.config.get("kubernetes.{}-template-path".format(pod_type))
fn = fn.format(**os.environ)
with open(fn) as f:
d = yaml.safe_load(f)
d = dask.config.expand_environment_variables(d)
pod_template = make_pod_from_dict(d)
return pod_template
def _fill_pod_templates(self, pod_template, pod_type):
pod_template = copy.deepcopy(pod_template)
pod_template.metadata.labels["dask.org/cluster-name"] = self._generate_name
pod_template.metadata.labels["dask.org/component"] = pod_type
pod_template.metadata.labels["user"] = escape(getpass.getuser())
pod_template.metadata.labels["app"] = "dask"
pod_template.metadata.namespace = self.namespace
if self.env:
pod_template.spec.containers[0].env.extend(
[
kubernetes.client.V1EnvVar(name=k, value=str(v))
for k, v in self.env.items()
]
)
pod_template.metadata.generate_name = self._generate_name
return pod_template
async def _start(self):
self.pod_template = self._get_pod_template(self.pod_template, pod_type="worker")
self.scheduler_pod_template = self._get_pod_template(
self.scheduler_pod_template, pod_type="scheduler"
)
if not self.pod_template:
msg = (
"Worker pod specification not provided. See KubeCluster "
"docstring for ways to specify workers"
)
raise ValueError(msg)
base_pod_template = self.pod_template
self.pod_template = clean_pod_template(self.pod_template, pod_type="worker")
if not self.scheduler_pod_template:
self.scheduler_pod_template = base_pod_template
self.scheduler_pod_template.spec.containers[0].args = ["dask-scheduler"]
self.scheduler_pod_template = clean_pod_template(
self.scheduler_pod_template, pod_type="scheduler"
)
await ClusterAuth.load_first(self.auth)
self.core_api = kubernetes.client.CoreV1Api()
self.policy_api = kubernetes.client.PolicyV1beta1Api()
if self.namespace is None:
self.namespace = namespace_default()
environ = {k: v for k, v in os.environ.items() if k not in ["user", "uuid"]}
self._generate_name = self._generate_name.format(
user=getpass.getuser(), uuid=str(uuid.uuid4())[:10], **environ
)
self._generate_name = escape(self._generate_name)
self.pod_template = self._fill_pod_templates(
self.pod_template, pod_type="worker"
)
self.scheduler_pod_template = self._fill_pod_templates(
self.scheduler_pod_template, pod_type="scheduler"
)
common_options = {
"cluster": self,
"core_api": self.core_api,
"policy_api": self.policy_api,
"namespace": self.namespace,
"loop": self.loop,
}
if self._deploy_mode == "local":
self.scheduler_spec = {
"cls": dask.distributed.Scheduler,
"options": {
"protocol": self._protocol,
"interface": self._interface,
"host": self.host,
"port": self.port,
"dashboard_address": self._dashboard_address,
"security": self.security,
},
}
elif self._deploy_mode == "remote":
self.scheduler_spec = {
"cls": Scheduler,
"options": {
"idle_timeout": self._idle_timeout,
"service_wait_timeout_s": self._scheduler_service_wait_timeout,
"service_name_retries": self._scheduler_service_name_resolution_retries,
"pod_template": self.scheduler_pod_template,
**common_options,
},
}
else:
raise RuntimeError("Unknown deploy mode %s" % self._deploy_mode)
self.new_spec = {
"cls": Worker,
"options": {"pod_template": self.pod_template, **common_options},
}
self.worker_spec = {i: self.new_spec for i in range(self._n_workers)}
self.name = self.pod_template.metadata.generate_name
await super()._start()
@classmethod
def from_dict(cls, pod_spec, **kwargs):
warnings.warn(
"KubeCluster.from_dict is deprecated, use the constructor directly"
)
return cls(pod_spec, **kwargs)
@classmethod
def from_yaml(cls, yaml_path, **kwargs):
warnings.warn(
"KubeCluster.from_yaml is deprecated, use the constructor directly"
)
return cls(yaml_path, **kwargs)
def scale(self, n):
# A shim to maintain backward compatibility
# https://github.com/dask/distributed/issues/3054
maximum = dask.config.get("kubernetes.count.max")
if maximum is not None and maximum < n:
logger.info(
"Tried to scale beyond maximum number of workers %d > %d", n, maximum
)
n = maximum
return super().scale(n)
async def _logs(self, scheduler=True, workers=True):
logs = Logs()
if scheduler:
logs["Scheduler"] = await self.scheduler.logs()
if workers:
worker_logs = await asyncio.gather(
*[w.logs() for w in self.workers.values()]
)
for key, log in zip(self.workers, worker_logs):
logs[key] = log
return logs
| true | true |
f720789c8c15c24400e6e5290d6e30a895646242 | 7,694 | py | Python | rfmizer/tests_views.py | JunglistMNSQ/rfm | f42fa1424edbc9c57c9cd27d8183574f72acbf5c | [
"BSD-3-Clause"
] | null | null | null | rfmizer/tests_views.py | JunglistMNSQ/rfm | f42fa1424edbc9c57c9cd27d8183574f72acbf5c | [
"BSD-3-Clause"
] | 4 | 2021-06-04T23:58:19.000Z | 2021-09-22T19:38:00.000Z | rfmizer/tests_views.py | JunglistMNSQ/rfm | f42fa1424edbc9c57c9cd27d8183574f72acbf5c | [
"BSD-3-Clause"
] | null | null | null | from django.test import TestCase
from django.urls import reverse
from unittest import mock
from .fixtures import FixturesMixin
from .models import Person, Tab, User
import hashlib
# Create your tests here.
class TestRegister(FixturesMixin, TestCase):
def test_create_and_login(self):
self.client.post('/register/',
{'username': 'TestUser1',
'email': 'test@test.com',
'password': 'password',
'password2': 'password'})
session = self.client.session
session.save()
user = User.objects.get_by_natural_key('TestUser1')
self.assertEqual(user.get_username(), 'TestUser1')
response = self.client.post('/login/',
{'username': 'TestUser1',
'password': 'password'},
follow=True)
self.assertEqual(response.redirect_chain, [('/profile/', 302)])
def test_create_with_different_passwords(self):
response = self.client.post('/register/',
{'username': 'TestUser1',
'email': 'test@test.com',
'password': 'password1',
'password2': 'password2'})
self.assertRaisesMessage(response, 'Пароли не совпадают')
class TestLogin(FixturesMixin, TestCase):
def test_login(self):
response = self.client.post('/login/',
{'username': 'TestUser',
'password': 'password'},
follow=True)
self.assertEqual(response.redirect_chain, [('/profile/', 302)])
class TestUploadToParse(FixturesMixin, TestCase):
def test_get(self):
response = self.client.get('/upload/', )
self.assertEqual(response.status_code, 200)
self.assertEqual(Tab.objects.filter(owner=self.user)[0],
self.tab_exist)
def test_create_and_parse_corrupt_file(self):
with open(self.file) as f:
response = self.client.post(
'/upload/',
{'name': 'test1',
'file': f},
follow=True
)
session = self.client.session
session.save()
tab = Tab.objects.get(pk=session['tab'])
self.assertTrue(response.context['lines'])
self.assertEqual(tab.name, 'test1')
self.assertTrue(session['tab_is_new'])
self.assertEqual(response.status_code, 200)
self.assertEqual(response.redirect_chain,
[('/parse/', 302)])
response = self.client.post('/parse/',
{'col4': 'date',
'col3': 'name',
'col2': 'phone',
'col1': 'good',
'col0': 'pay'},
follow=True
)
self.assertEqual(response.redirect_chain,
[('/corrupt_data/',
302)])
self.assertEqual(response.status_code, 200)
def test_update_and_parse(self):
with open(self.file) as f:
response = self.client.post(
'/upload/',
{'choice_exist_tab': self.tab_exist.id,
'file': f},
follow=True
)
session = self.client.session
session.save()
tab = Tab.objects.get(pk=session['tab'])
self.assertTrue(response.context['lines'])
self.assertEqual(session['tab_is_new'], False)
self.assertEqual(tab.name, self.tab_exist.name)
self.assertEqual(response.redirect_chain,
[('/parse/', 302)])
response = self.client.post('/parse/',
self.column_order,
follow=True
)
tab = Tab.objects.get(pk=session['tab'])
self.assertEqual(
response.redirect_chain,
[('/my_tables/' + tab.slug, 302)]
)
class TestMyTables(FixturesMixin, TestCase):
def test_get(self):
response = self.client.get('/my_tables/')
qs = response.context['list_tab']
self.assertSetEqual(
set(qs),
{self.tab_exist,
Tab.objects.get(pk=2),
Tab.objects.get(pk=3)}
)
self.assertEqual(response.status_code, 200)
class TestManageTab(FixturesMixin, TestCase):
def setUp(self):
super(TestManageTab, self).setUp()
self.url = reverse('manage_tab', args=(self.tab_exist.slug, ))
def test_get_post(self):
response = self.client.get(self.url)
session = self.client.session
session.save()
self.assertEqual(response.status_code, 200)
class TestDeleteTab(FixturesMixin, TestCase):
def test_post(self):
test_tab = Tab(owner=self.user, name='test_tab_del')
test_tab.save()
url = reverse('delete', args=(test_tab.slug, ))
response = self.client.post(url,
follow=True)
self.assertEqual(response.redirect_chain,
[('/my_tables', 302), ('/my_tables/', 301)])
class TestLog(FixturesMixin, TestCase):
def test_log(self):
response = self.client.get('/log/')
self.assertEqual(response.status_code, 200)
class TestClientList(FixturesMixin, TestCase):
def test_get(self):
url = reverse('client_list',
kwargs={'slug': self.tab_exist.slug, })
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
class TestClientCard(FixturesMixin, TestCase):
def test_get(self):
new_client = Person.get_new_line(self.data)
url = reverse('client_card',
kwargs={'slug_tab': self.tab_exist.slug,
'slug': new_client.slug})
response = self.client.get(url)
session = self.client.session
session.save()
self.assertEqual(response.status_code, 200)
class TestRulesList(FixturesMixin, TestCase):
def test_get(self):
url = reverse('rules', kwargs={'slug': self.tab_exist.slug})
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
class TestProfile(FixturesMixin, TestCase):
def test_get(self):
response = self.client.get('/profile/')
self.assertEqual(response.status_code, 200)
@mock.patch('rfmizer.sms.RocketSMS.check_balance',
return_value=[True, 25, None])
def test_post(self, balance_mock):
password = 'test_sms_pass'
login = 'test_sms_login'
response = self.client.post('/profile/',
{'sms_login': login,
'sms_pass': password},
follow=True)
hash_pass = hashlib.md5(password.encode('utf-8')).hexdigest()
user = User.objects.get(pk=self.user.pk)
self.assertEqual(user.profile.sms_login, login)
self.assertEqual(user.profile.sms_pass, hash_pass)
self.assertEqual(user.profile.balance, 25)
self.assertEqual(response.status_code, 200)
balance_mock.assert_called_once()
| 38.089109 | 71 | 0.525214 | from django.test import TestCase
from django.urls import reverse
from unittest import mock
from .fixtures import FixturesMixin
from .models import Person, Tab, User
import hashlib
class TestRegister(FixturesMixin, TestCase):
def test_create_and_login(self):
self.client.post('/register/',
{'username': 'TestUser1',
'email': 'test@test.com',
'password': 'password',
'password2': 'password'})
session = self.client.session
session.save()
user = User.objects.get_by_natural_key('TestUser1')
self.assertEqual(user.get_username(), 'TestUser1')
response = self.client.post('/login/',
{'username': 'TestUser1',
'password': 'password'},
follow=True)
self.assertEqual(response.redirect_chain, [('/profile/', 302)])
def test_create_with_different_passwords(self):
response = self.client.post('/register/',
{'username': 'TestUser1',
'email': 'test@test.com',
'password': 'password1',
'password2': 'password2'})
self.assertRaisesMessage(response, 'Пароли не совпадают')
class TestLogin(FixturesMixin, TestCase):
def test_login(self):
response = self.client.post('/login/',
{'username': 'TestUser',
'password': 'password'},
follow=True)
self.assertEqual(response.redirect_chain, [('/profile/', 302)])
class TestUploadToParse(FixturesMixin, TestCase):
def test_get(self):
response = self.client.get('/upload/', )
self.assertEqual(response.status_code, 200)
self.assertEqual(Tab.objects.filter(owner=self.user)[0],
self.tab_exist)
def test_create_and_parse_corrupt_file(self):
with open(self.file) as f:
response = self.client.post(
'/upload/',
{'name': 'test1',
'file': f},
follow=True
)
session = self.client.session
session.save()
tab = Tab.objects.get(pk=session['tab'])
self.assertTrue(response.context['lines'])
self.assertEqual(tab.name, 'test1')
self.assertTrue(session['tab_is_new'])
self.assertEqual(response.status_code, 200)
self.assertEqual(response.redirect_chain,
[('/parse/', 302)])
response = self.client.post('/parse/',
{'col4': 'date',
'col3': 'name',
'col2': 'phone',
'col1': 'good',
'col0': 'pay'},
follow=True
)
self.assertEqual(response.redirect_chain,
[('/corrupt_data/',
302)])
self.assertEqual(response.status_code, 200)
def test_update_and_parse(self):
with open(self.file) as f:
response = self.client.post(
'/upload/',
{'choice_exist_tab': self.tab_exist.id,
'file': f},
follow=True
)
session = self.client.session
session.save()
tab = Tab.objects.get(pk=session['tab'])
self.assertTrue(response.context['lines'])
self.assertEqual(session['tab_is_new'], False)
self.assertEqual(tab.name, self.tab_exist.name)
self.assertEqual(response.redirect_chain,
[('/parse/', 302)])
response = self.client.post('/parse/',
self.column_order,
follow=True
)
tab = Tab.objects.get(pk=session['tab'])
self.assertEqual(
response.redirect_chain,
[('/my_tables/' + tab.slug, 302)]
)
class TestMyTables(FixturesMixin, TestCase):
def test_get(self):
response = self.client.get('/my_tables/')
qs = response.context['list_tab']
self.assertSetEqual(
set(qs),
{self.tab_exist,
Tab.objects.get(pk=2),
Tab.objects.get(pk=3)}
)
self.assertEqual(response.status_code, 200)
class TestManageTab(FixturesMixin, TestCase):
def setUp(self):
super(TestManageTab, self).setUp()
self.url = reverse('manage_tab', args=(self.tab_exist.slug, ))
def test_get_post(self):
response = self.client.get(self.url)
session = self.client.session
session.save()
self.assertEqual(response.status_code, 200)
class TestDeleteTab(FixturesMixin, TestCase):
def test_post(self):
test_tab = Tab(owner=self.user, name='test_tab_del')
test_tab.save()
url = reverse('delete', args=(test_tab.slug, ))
response = self.client.post(url,
follow=True)
self.assertEqual(response.redirect_chain,
[('/my_tables', 302), ('/my_tables/', 301)])
class TestLog(FixturesMixin, TestCase):
def test_log(self):
response = self.client.get('/log/')
self.assertEqual(response.status_code, 200)
class TestClientList(FixturesMixin, TestCase):
def test_get(self):
url = reverse('client_list',
kwargs={'slug': self.tab_exist.slug, })
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
class TestClientCard(FixturesMixin, TestCase):
def test_get(self):
new_client = Person.get_new_line(self.data)
url = reverse('client_card',
kwargs={'slug_tab': self.tab_exist.slug,
'slug': new_client.slug})
response = self.client.get(url)
session = self.client.session
session.save()
self.assertEqual(response.status_code, 200)
class TestRulesList(FixturesMixin, TestCase):
def test_get(self):
url = reverse('rules', kwargs={'slug': self.tab_exist.slug})
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
class TestProfile(FixturesMixin, TestCase):
def test_get(self):
response = self.client.get('/profile/')
self.assertEqual(response.status_code, 200)
@mock.patch('rfmizer.sms.RocketSMS.check_balance',
return_value=[True, 25, None])
def test_post(self, balance_mock):
password = 'test_sms_pass'
login = 'test_sms_login'
response = self.client.post('/profile/',
{'sms_login': login,
'sms_pass': password},
follow=True)
hash_pass = hashlib.md5(password.encode('utf-8')).hexdigest()
user = User.objects.get(pk=self.user.pk)
self.assertEqual(user.profile.sms_login, login)
self.assertEqual(user.profile.sms_pass, hash_pass)
self.assertEqual(user.profile.balance, 25)
self.assertEqual(response.status_code, 200)
balance_mock.assert_called_once()
| true | true |
f72078f20b5cc5766f8c851d62a5d4fcbc04c993 | 13 | py | Python | python/testData/inspections/PyUnresolvedReferencesInspection/ImportToContainingFileInPackage/p1/__init__.py | truthiswill/intellij-community | fff88cfb0dc168eea18ecb745d3e5b93f57b0b95 | [
"Apache-2.0"
] | 2 | 2019-04-28T07:48:50.000Z | 2020-12-11T14:18:08.000Z | python/testData/inspections/PyUnresolvedReferencesInspection/ImportToContainingFileInPackage/p1/__init__.py | truthiswill/intellij-community | fff88cfb0dc168eea18ecb745d3e5b93f57b0b95 | [
"Apache-2.0"
] | 173 | 2018-07-05T13:59:39.000Z | 2018-08-09T01:12:03.000Z | python/testData/inspections/PyUnresolvedReferencesInspection/ImportToContainingFileInPackage/p1/__init__.py | truthiswill/intellij-community | fff88cfb0dc168eea18ecb745d3e5b93f57b0b95 | [
"Apache-2.0"
] | 2 | 2020-03-15T08:57:37.000Z | 2020-04-07T04:48:14.000Z | import p1.m1
| 6.5 | 12 | 0.769231 | import p1.m1
| true | true |
f72079009d9d6b4d0c1dd53f8c56f7204272da9b | 1,527 | py | Python | firmware/adafruit-circuitpython-bundle-5.x-mpy-20200915/examples/st7735r_minitft_simpletest.py | freeglow/microcontroller-cpy | 5adfda49da6eefaece81be2a2f26122d68736355 | [
"MIT"
] | null | null | null | firmware/adafruit-circuitpython-bundle-5.x-mpy-20200915/examples/st7735r_minitft_simpletest.py | freeglow/microcontroller-cpy | 5adfda49da6eefaece81be2a2f26122d68736355 | [
"MIT"
] | null | null | null | firmware/adafruit-circuitpython-bundle-5.x-mpy-20200915/examples/st7735r_minitft_simpletest.py | freeglow/microcontroller-cpy | 5adfda49da6eefaece81be2a2f26122d68736355 | [
"MIT"
] | null | null | null | """
This test will initialize the display using displayio and draw a solid green
background, a smaller purple rectangle, and some yellow text.
"""
import board
import terminalio
import displayio
from adafruit_display_text import label
from adafruit_st7735r import ST7735R
# Release any resources currently in use for the displays
displayio.release_displays()
spi = board.SPI()
tft_cs = board.D5
tft_dc = board.D6
display_bus = displayio.FourWire(
spi, command=tft_dc, chip_select=tft_cs, reset=board.D9
)
display = ST7735R(
display_bus, width=160, height=80, colstart=24, rotation=270, bgr=True
)
# Make the display context
splash = displayio.Group(max_size=10)
display.show(splash)
color_bitmap = displayio.Bitmap(160, 80, 1)
color_palette = displayio.Palette(1)
color_palette[0] = 0x00FF00 # Bright Green
bg_sprite = displayio.TileGrid(color_bitmap, pixel_shader=color_palette, x=0, y=0)
splash.append(bg_sprite)
# Draw a smaller inner rectangle
inner_bitmap = displayio.Bitmap(150, 70, 1)
inner_palette = displayio.Palette(1)
inner_palette[0] = 0xAA0088 # Purple
inner_sprite = displayio.TileGrid(inner_bitmap, pixel_shader=inner_palette, x=5, y=5)
splash.append(inner_sprite)
# Draw a label
text_group = displayio.Group(max_size=10, scale=2, x=11, y=40)
text = "Hello World!"
text_area = label.Label(terminalio.FONT, text=text, color=0xFFFF00)
text_group.append(text_area) # Subgroup for text scaling
splash.append(text_group)
while True:
pass
| 28.277778 | 86 | 0.749836 |
import board
import terminalio
import displayio
from adafruit_display_text import label
from adafruit_st7735r import ST7735R
displayio.release_displays()
spi = board.SPI()
tft_cs = board.D5
tft_dc = board.D6
display_bus = displayio.FourWire(
spi, command=tft_dc, chip_select=tft_cs, reset=board.D9
)
display = ST7735R(
display_bus, width=160, height=80, colstart=24, rotation=270, bgr=True
)
splash = displayio.Group(max_size=10)
display.show(splash)
color_bitmap = displayio.Bitmap(160, 80, 1)
color_palette = displayio.Palette(1)
color_palette[0] = 0x00FF00
bg_sprite = displayio.TileGrid(color_bitmap, pixel_shader=color_palette, x=0, y=0)
splash.append(bg_sprite)
inner_bitmap = displayio.Bitmap(150, 70, 1)
inner_palette = displayio.Palette(1)
inner_palette[0] = 0xAA0088
inner_sprite = displayio.TileGrid(inner_bitmap, pixel_shader=inner_palette, x=5, y=5)
splash.append(inner_sprite)
text_group = displayio.Group(max_size=10, scale=2, x=11, y=40)
text = "Hello World!"
text_area = label.Label(terminalio.FONT, text=text, color=0xFFFF00)
text_group.append(text_area)
splash.append(text_group)
while True:
pass
| true | true |
f7207935c2b023e55a08f2a1a9a84c47dc130d71 | 16,613 | py | Python | model/model.py | sahara2001/editsql | d4325ac996d1ed0069def6d349e43e2a1914e761 | [
"MIT"
] | null | null | null | model/model.py | sahara2001/editsql | d4325ac996d1ed0069def6d349e43e2a1914e761 | [
"MIT"
] | null | null | null | model/model.py | sahara2001/editsql | d4325ac996d1ed0069def6d349e43e2a1914e761 | [
"MIT"
] | null | null | null | """ Class for the Sequence to sequence model for ATIS."""
import os
import torch
import torch.nn.functional as F
from . import torch_utils
from . import utils_bert
from data_util.vocabulary import DEL_TOK, UNK_TOK
from .encoder import Encoder, Encoder_Gnn
from .embedder import Embedder
from .token_predictor import construct_token_predictor
import numpy as np
from data_util.atis_vocab import ATISVocabulary
from .gated_graph_conv import GatedGraphConv
def get_token_indices(token, index_to_token):
""" Maps from a gold token (string) to a list of indices.
Inputs:
token (string): String to look up.
index_to_token (list of tokens): Ordered list of tokens.
Returns:
list of int, representing the indices of the token in the probability
distribution.
"""
if token in index_to_token:
if len(set(index_to_token)) == len(index_to_token): # no duplicates
return [index_to_token.index(token)]
else:
indices = []
for index, other_token in enumerate(index_to_token):
if token == other_token:
indices.append(index)
assert len(indices) == len(set(indices))
return indices
else:
return [index_to_token.index(UNK_TOK)]
def flatten_utterances(utterances):
""" Gets a flat sequence from a sequence of utterances.
Inputs:
utterances (list of list of str): Utterances to concatenate.
Returns:
list of str, representing the flattened sequence with separating
delimiter tokens.
"""
sequence = []
for i, utterance in enumerate(utterances):
sequence.extend(utterance)
if i < len(utterances) - 1:
sequence.append(DEL_TOK)
return sequence
def encode_snippets_with_states(snippets, states):
""" Encodes snippets by using previous query states instead.
Inputs:
snippets (list of Snippet): Input snippets.
states (list of dy.Expression): Previous hidden states to use.
TODO: should this by dy.Expression or vector values?
"""
for snippet in snippets:
snippet.set_embedding(torch.cat([states[snippet.startpos],states[snippet.endpos]], dim=0))
return snippets
def load_word_embeddings(input_vocabulary, output_vocabulary, output_vocabulary_schema, params):
# print(output_vocabulary.inorder_tokens)
# print()
def read_glove_embedding(embedding_filename, embedding_size):
glove_embeddings = {}
with open(embedding_filename) as f:
cnt = 1
for line in f:
cnt += 1
if params.debug or not params.train:
if cnt == 1000:
print('Read 1000 word embeddings')
break
l_split = line.split()
word = " ".join(l_split[0:len(l_split) - embedding_size])
embedding = np.array([float(val) for val in l_split[-embedding_size:]])
glove_embeddings[word] = embedding
return glove_embeddings
print('Loading Glove Embedding from', params.embedding_filename)
glove_embedding_size = 300
glove_embeddings = read_glove_embedding(params.embedding_filename, glove_embedding_size)
print('Done')
input_embedding_size = glove_embedding_size
def create_word_embeddings(vocab):
vocabulary_embeddings = np.zeros((len(vocab), glove_embedding_size), dtype=np.float32)
vocabulary_tokens = vocab.inorder_tokens
glove_oov = 0
para_oov = 0
for token in vocabulary_tokens:
token_id = vocab.token_to_id(token)
if token in glove_embeddings:
vocabulary_embeddings[token_id][:glove_embedding_size] = glove_embeddings[token]
else:
glove_oov += 1
print('Glove OOV:', glove_oov, 'Para OOV', para_oov, 'Total', len(vocab))
return vocabulary_embeddings
input_vocabulary_embeddings = create_word_embeddings(input_vocabulary)
output_vocabulary_embeddings = create_word_embeddings(output_vocabulary)
output_vocabulary_schema_embeddings = None
if output_vocabulary_schema:
output_vocabulary_schema_embeddings = create_word_embeddings(output_vocabulary_schema)
return input_vocabulary_embeddings, output_vocabulary_embeddings, output_vocabulary_schema_embeddings, input_embedding_size
class ATISModel(torch.nn.Module):
""" Sequence-to-sequence model for predicting a SQL query given an utterance
and an interaction prefix.
"""
def __init__(
self,
params,
input_vocabulary,
output_vocabulary,
output_vocabulary_schema,
anonymizer):
super().__init__()
self.params = params
if params.use_bert:
self.model_bert, self.tokenizer, self.bert_config = utils_bert.get_bert(params)
self.gnn=None
if 'atis' not in params.data_directory:
if params.use_bert:
if params.use_gnn:
encoder_input_size = self.bert_config.hidden_size
encoder_output_size = params.encoder_state_size
self.gnn = GatedGraphConv(encoder_output_size, 2, 3) #input_dim, num_timesteps, num_edge_types,
input_vocabulary_embeddings, output_vocabulary_embeddings, output_vocabulary_schema_embeddings, input_embedding_size = load_word_embeddings(input_vocabulary, output_vocabulary, output_vocabulary_schema, params)
# Create the output embeddings
self.output_embedder = Embedder(params.output_embedding_size,
name="output-embedding",
initializer=output_vocabulary_embeddings,
vocabulary=output_vocabulary,
anonymizer=anonymizer,
freeze=False)
self.column_name_token_embedder = None
else:
input_vocabulary_embeddings, output_vocabulary_embeddings, output_vocabulary_schema_embeddings, input_embedding_size = load_word_embeddings(input_vocabulary, output_vocabulary, output_vocabulary_schema, params)
params.input_embedding_size = input_embedding_size
self.params.input_embedding_size = input_embedding_size
# Create the input embeddings
self.input_embedder = Embedder(params.input_embedding_size,
name="input-embedding",
initializer=input_vocabulary_embeddings,
vocabulary=input_vocabulary,
anonymizer=anonymizer,
freeze=params.freeze)
# Create the output embeddings
self.output_embedder = Embedder(params.output_embedding_size,
name="output-embedding",
initializer=output_vocabulary_embeddings,
vocabulary=output_vocabulary,
anonymizer=anonymizer,
freeze=False)
self.column_name_token_embedder = Embedder(params.input_embedding_size,
name="schema-embedding",
initializer=output_vocabulary_schema_embeddings,
vocabulary=output_vocabulary_schema,
anonymizer=anonymizer,
freeze=params.freeze)
else:
# Create the input embeddings
self.input_embedder = Embedder(params.input_embedding_size,
name="input-embedding",
vocabulary=input_vocabulary,
anonymizer=anonymizer,
freeze=False)
# Create the output embeddings
self.output_embedder = Embedder(params.output_embedding_size,
name="output-embedding",
vocabulary=output_vocabulary,
anonymizer=anonymizer,
freeze=False)
self.column_name_token_embedder = None
# Create the encoder
encoder_input_size = params.input_embedding_size
encoder_output_size = params.encoder_state_size
if params.use_bert:
encoder_input_size = self.bert_config.hidden_size
if params.discourse_level_lstm:
encoder_input_size += params.encoder_state_size / 2
self.utterance_encoder = Encoder(params.encoder_num_layers, encoder_input_size, encoder_output_size)
# Positional embedder for utterances
attention_key_size = params.encoder_state_size
self.schema_attention_key_size = attention_key_size
if params.state_positional_embeddings:
attention_key_size += params.positional_embedding_size
self.positional_embedder = Embedder(
params.positional_embedding_size,
name="positional-embedding",
num_tokens=params.maximum_utterances)
self.utterance_attention_key_size = attention_key_size
# Create the discourse-level LSTM parameters
if params.discourse_level_lstm:
self.discourse_lstms = torch_utils.create_multilayer_lstm_params(1, params.encoder_state_size, params.encoder_state_size / 2, "LSTM-t")
self.initial_discourse_state = torch_utils.add_params(tuple([params.encoder_state_size / 2]), "V-turn-state-0")
# Snippet encoder
final_snippet_size = 0
if params.use_snippets and not params.previous_decoder_snippet_encoding:
snippet_encoding_size = int(params.encoder_state_size / 2)
final_snippet_size = params.encoder_state_size
if params.snippet_age_embedding:
snippet_encoding_size -= int(
params.snippet_age_embedding_size / 4)
self.snippet_age_embedder = Embedder(
params.snippet_age_embedding_size,
name="snippet-age-embedding",
num_tokens=params.max_snippet_age_embedding)
final_snippet_size = params.encoder_state_size + params.snippet_age_embedding_size / 2
self.snippet_encoder = Encoder(params.snippet_num_layers,
params.output_embedding_size,
snippet_encoding_size)
# Previous query Encoder
if params.use_previous_query:
self.query_encoder = Encoder(params.encoder_num_layers, params.output_embedding_size, params.encoder_state_size)
self.final_snippet_size = final_snippet_size
self.dropout = 0.
def _encode_snippets(self, previous_query, snippets, input_schema):
""" Computes a single vector representation for each snippet.
Inputs:
previous_query (list of str): Previous query in the interaction.
snippets (list of Snippet): Snippets extracted from the previous
Returns:
list of Snippets, where the embedding is set to a vector.
"""
startpoints = [snippet.startpos for snippet in snippets]
endpoints = [snippet.endpos for snippet in snippets]
assert len(startpoints) == 0 or min(startpoints) >= 0
if input_schema:
assert len(endpoints) == 0 or max(endpoints) <= len(previous_query)
else:
assert len(endpoints) == 0 or max(endpoints) < len(previous_query)
snippet_embedder = lambda query_token: self.get_query_token_embedding(query_token, input_schema)
if previous_query and snippets:
_, previous_outputs = self.snippet_encoder(
previous_query, snippet_embedder, dropout_amount=self.dropout)
assert len(previous_outputs) == len(previous_query)
for snippet in snippets:
if input_schema:
embedding = torch.cat([previous_outputs[snippet.startpos],previous_outputs[snippet.endpos-1]], dim=0)
else:
embedding = torch.cat([previous_outputs[snippet.startpos],previous_outputs[snippet.endpos]], dim=0)
if self.params.snippet_age_embedding:
embedding = torch.cat([embedding, self.snippet_age_embedder(min(snippet.age, self.params.max_snippet_age_embedding - 1))], dim=0)
snippet.set_embedding(embedding)
return snippets
def _initialize_discourse_states(self):
discourse_state = self.initial_discourse_state
discourse_lstm_states = []
for lstm in self.discourse_lstms:
hidden_size = lstm.weight_hh.size()[1]
if lstm.weight_hh.is_cuda:
h_0 = torch.cuda.FloatTensor(1,hidden_size).fill_(0)
c_0 = torch.cuda.FloatTensor(1,hidden_size).fill_(0)
else:
h_0 = torch.zeros(1,hidden_size)
c_0 = torch.zeros(1,hidden_size)
discourse_lstm_states.append((h_0, c_0))
return discourse_state, discourse_lstm_states
def _add_positional_embeddings(self, hidden_states, utterances, group=False):
grouped_states = []
start_index = 0
for utterance in utterances:
grouped_states.append(hidden_states[start_index:start_index + len(utterance)])
start_index += len(utterance)
assert len(hidden_states) == sum([len(seq) for seq in grouped_states]) == sum([len(utterance) for utterance in utterances])
new_states = []
flat_sequence = []
num_utterances_to_keep = min(self.params.maximum_utterances, len(utterances))
for i, (states, utterance) in enumerate(zip(
grouped_states[-num_utterances_to_keep:], utterances[-num_utterances_to_keep:])):
positional_sequence = []
index = num_utterances_to_keep - i - 1
for state in states:
positional_sequence.append(torch.cat([state, self.positional_embedder(index)], dim=0))
assert len(positional_sequence) == len(utterance), \
"Expected utterance and state sequence length to be the same, " \
+ "but they were " + str(len(utterance)) \
+ " and " + str(len(positional_sequence))
if group:
new_states.append(positional_sequence)
else:
new_states.extend(positional_sequence)
flat_sequence.extend(utterance)
return new_states, flat_sequence
def build_optim(self):
params_trainer = []
params_bert_trainer = []
for name, param in self.named_parameters():
if param.requires_grad:
if 'model_bert' in name:
params_bert_trainer.append(param)
else:
params_trainer.append(param)
self.trainer = torch.optim.Adam(params_trainer, lr=self.params.initial_learning_rate)
if self.params.fine_tune_bert:
self.bert_trainer = torch.optim.Adam(params_bert_trainer, lr=self.params.lr_bert)
def set_dropout(self, value):
""" Sets the dropout to a specified value.
Inputs:
value (float): Value to set dropout to.
"""
self.dropout = value
def set_learning_rate(self, value):
""" Sets the learning rate for the trainer.
Inputs:
value (float): The new learning rate.
"""
for param_group in self.trainer.param_groups:
param_group['lr'] = value
def save(self, filename):
""" Saves the model to the specified filename.
Inputs:
filename (str): The filename to save to.
"""
torch.save(self.state_dict(), filename)
def load(self, filename):
""" Loads saved parameters into the parameter collection.
Inputs:
filename (str): Name of file containing parameters.
"""
self.load_state_dict(torch.load(filename))
print("Loaded model from file " + filename)
| 41.325871 | 226 | 0.618612 |
import os
import torch
import torch.nn.functional as F
from . import torch_utils
from . import utils_bert
from data_util.vocabulary import DEL_TOK, UNK_TOK
from .encoder import Encoder, Encoder_Gnn
from .embedder import Embedder
from .token_predictor import construct_token_predictor
import numpy as np
from data_util.atis_vocab import ATISVocabulary
from .gated_graph_conv import GatedGraphConv
def get_token_indices(token, index_to_token):
if token in index_to_token:
if len(set(index_to_token)) == len(index_to_token):
return [index_to_token.index(token)]
else:
indices = []
for index, other_token in enumerate(index_to_token):
if token == other_token:
indices.append(index)
assert len(indices) == len(set(indices))
return indices
else:
return [index_to_token.index(UNK_TOK)]
def flatten_utterances(utterances):
sequence = []
for i, utterance in enumerate(utterances):
sequence.extend(utterance)
if i < len(utterances) - 1:
sequence.append(DEL_TOK)
return sequence
def encode_snippets_with_states(snippets, states):
for snippet in snippets:
snippet.set_embedding(torch.cat([states[snippet.startpos],states[snippet.endpos]], dim=0))
return snippets
def load_word_embeddings(input_vocabulary, output_vocabulary, output_vocabulary_schema, params):
def read_glove_embedding(embedding_filename, embedding_size):
glove_embeddings = {}
with open(embedding_filename) as f:
cnt = 1
for line in f:
cnt += 1
if params.debug or not params.train:
if cnt == 1000:
print('Read 1000 word embeddings')
break
l_split = line.split()
word = " ".join(l_split[0:len(l_split) - embedding_size])
embedding = np.array([float(val) for val in l_split[-embedding_size:]])
glove_embeddings[word] = embedding
return glove_embeddings
print('Loading Glove Embedding from', params.embedding_filename)
glove_embedding_size = 300
glove_embeddings = read_glove_embedding(params.embedding_filename, glove_embedding_size)
print('Done')
input_embedding_size = glove_embedding_size
def create_word_embeddings(vocab):
vocabulary_embeddings = np.zeros((len(vocab), glove_embedding_size), dtype=np.float32)
vocabulary_tokens = vocab.inorder_tokens
glove_oov = 0
para_oov = 0
for token in vocabulary_tokens:
token_id = vocab.token_to_id(token)
if token in glove_embeddings:
vocabulary_embeddings[token_id][:glove_embedding_size] = glove_embeddings[token]
else:
glove_oov += 1
print('Glove OOV:', glove_oov, 'Para OOV', para_oov, 'Total', len(vocab))
return vocabulary_embeddings
input_vocabulary_embeddings = create_word_embeddings(input_vocabulary)
output_vocabulary_embeddings = create_word_embeddings(output_vocabulary)
output_vocabulary_schema_embeddings = None
if output_vocabulary_schema:
output_vocabulary_schema_embeddings = create_word_embeddings(output_vocabulary_schema)
return input_vocabulary_embeddings, output_vocabulary_embeddings, output_vocabulary_schema_embeddings, input_embedding_size
class ATISModel(torch.nn.Module):
def __init__(
self,
params,
input_vocabulary,
output_vocabulary,
output_vocabulary_schema,
anonymizer):
super().__init__()
self.params = params
if params.use_bert:
self.model_bert, self.tokenizer, self.bert_config = utils_bert.get_bert(params)
self.gnn=None
if 'atis' not in params.data_directory:
if params.use_bert:
if params.use_gnn:
encoder_input_size = self.bert_config.hidden_size
encoder_output_size = params.encoder_state_size
self.gnn = GatedGraphConv(encoder_output_size, 2, 3)
input_vocabulary_embeddings, output_vocabulary_embeddings, output_vocabulary_schema_embeddings, input_embedding_size = load_word_embeddings(input_vocabulary, output_vocabulary, output_vocabulary_schema, params)
self.output_embedder = Embedder(params.output_embedding_size,
name="output-embedding",
initializer=output_vocabulary_embeddings,
vocabulary=output_vocabulary,
anonymizer=anonymizer,
freeze=False)
self.column_name_token_embedder = None
else:
input_vocabulary_embeddings, output_vocabulary_embeddings, output_vocabulary_schema_embeddings, input_embedding_size = load_word_embeddings(input_vocabulary, output_vocabulary, output_vocabulary_schema, params)
params.input_embedding_size = input_embedding_size
self.params.input_embedding_size = input_embedding_size
self.input_embedder = Embedder(params.input_embedding_size,
name="input-embedding",
initializer=input_vocabulary_embeddings,
vocabulary=input_vocabulary,
anonymizer=anonymizer,
freeze=params.freeze)
self.output_embedder = Embedder(params.output_embedding_size,
name="output-embedding",
initializer=output_vocabulary_embeddings,
vocabulary=output_vocabulary,
anonymizer=anonymizer,
freeze=False)
self.column_name_token_embedder = Embedder(params.input_embedding_size,
name="schema-embedding",
initializer=output_vocabulary_schema_embeddings,
vocabulary=output_vocabulary_schema,
anonymizer=anonymizer,
freeze=params.freeze)
else:
self.input_embedder = Embedder(params.input_embedding_size,
name="input-embedding",
vocabulary=input_vocabulary,
anonymizer=anonymizer,
freeze=False)
self.output_embedder = Embedder(params.output_embedding_size,
name="output-embedding",
vocabulary=output_vocabulary,
anonymizer=anonymizer,
freeze=False)
self.column_name_token_embedder = None
encoder_input_size = params.input_embedding_size
encoder_output_size = params.encoder_state_size
if params.use_bert:
encoder_input_size = self.bert_config.hidden_size
if params.discourse_level_lstm:
encoder_input_size += params.encoder_state_size / 2
self.utterance_encoder = Encoder(params.encoder_num_layers, encoder_input_size, encoder_output_size)
attention_key_size = params.encoder_state_size
self.schema_attention_key_size = attention_key_size
if params.state_positional_embeddings:
attention_key_size += params.positional_embedding_size
self.positional_embedder = Embedder(
params.positional_embedding_size,
name="positional-embedding",
num_tokens=params.maximum_utterances)
self.utterance_attention_key_size = attention_key_size
if params.discourse_level_lstm:
self.discourse_lstms = torch_utils.create_multilayer_lstm_params(1, params.encoder_state_size, params.encoder_state_size / 2, "LSTM-t")
self.initial_discourse_state = torch_utils.add_params(tuple([params.encoder_state_size / 2]), "V-turn-state-0")
final_snippet_size = 0
if params.use_snippets and not params.previous_decoder_snippet_encoding:
snippet_encoding_size = int(params.encoder_state_size / 2)
final_snippet_size = params.encoder_state_size
if params.snippet_age_embedding:
snippet_encoding_size -= int(
params.snippet_age_embedding_size / 4)
self.snippet_age_embedder = Embedder(
params.snippet_age_embedding_size,
name="snippet-age-embedding",
num_tokens=params.max_snippet_age_embedding)
final_snippet_size = params.encoder_state_size + params.snippet_age_embedding_size / 2
self.snippet_encoder = Encoder(params.snippet_num_layers,
params.output_embedding_size,
snippet_encoding_size)
if params.use_previous_query:
self.query_encoder = Encoder(params.encoder_num_layers, params.output_embedding_size, params.encoder_state_size)
self.final_snippet_size = final_snippet_size
self.dropout = 0.
def _encode_snippets(self, previous_query, snippets, input_schema):
startpoints = [snippet.startpos for snippet in snippets]
endpoints = [snippet.endpos for snippet in snippets]
assert len(startpoints) == 0 or min(startpoints) >= 0
if input_schema:
assert len(endpoints) == 0 or max(endpoints) <= len(previous_query)
else:
assert len(endpoints) == 0 or max(endpoints) < len(previous_query)
snippet_embedder = lambda query_token: self.get_query_token_embedding(query_token, input_schema)
if previous_query and snippets:
_, previous_outputs = self.snippet_encoder(
previous_query, snippet_embedder, dropout_amount=self.dropout)
assert len(previous_outputs) == len(previous_query)
for snippet in snippets:
if input_schema:
embedding = torch.cat([previous_outputs[snippet.startpos],previous_outputs[snippet.endpos-1]], dim=0)
else:
embedding = torch.cat([previous_outputs[snippet.startpos],previous_outputs[snippet.endpos]], dim=0)
if self.params.snippet_age_embedding:
embedding = torch.cat([embedding, self.snippet_age_embedder(min(snippet.age, self.params.max_snippet_age_embedding - 1))], dim=0)
snippet.set_embedding(embedding)
return snippets
def _initialize_discourse_states(self):
discourse_state = self.initial_discourse_state
discourse_lstm_states = []
for lstm in self.discourse_lstms:
hidden_size = lstm.weight_hh.size()[1]
if lstm.weight_hh.is_cuda:
h_0 = torch.cuda.FloatTensor(1,hidden_size).fill_(0)
c_0 = torch.cuda.FloatTensor(1,hidden_size).fill_(0)
else:
h_0 = torch.zeros(1,hidden_size)
c_0 = torch.zeros(1,hidden_size)
discourse_lstm_states.append((h_0, c_0))
return discourse_state, discourse_lstm_states
def _add_positional_embeddings(self, hidden_states, utterances, group=False):
grouped_states = []
start_index = 0
for utterance in utterances:
grouped_states.append(hidden_states[start_index:start_index + len(utterance)])
start_index += len(utterance)
assert len(hidden_states) == sum([len(seq) for seq in grouped_states]) == sum([len(utterance) for utterance in utterances])
new_states = []
flat_sequence = []
num_utterances_to_keep = min(self.params.maximum_utterances, len(utterances))
for i, (states, utterance) in enumerate(zip(
grouped_states[-num_utterances_to_keep:], utterances[-num_utterances_to_keep:])):
positional_sequence = []
index = num_utterances_to_keep - i - 1
for state in states:
positional_sequence.append(torch.cat([state, self.positional_embedder(index)], dim=0))
assert len(positional_sequence) == len(utterance), \
"Expected utterance and state sequence length to be the same, " \
+ "but they were " + str(len(utterance)) \
+ " and " + str(len(positional_sequence))
if group:
new_states.append(positional_sequence)
else:
new_states.extend(positional_sequence)
flat_sequence.extend(utterance)
return new_states, flat_sequence
def build_optim(self):
params_trainer = []
params_bert_trainer = []
for name, param in self.named_parameters():
if param.requires_grad:
if 'model_bert' in name:
params_bert_trainer.append(param)
else:
params_trainer.append(param)
self.trainer = torch.optim.Adam(params_trainer, lr=self.params.initial_learning_rate)
if self.params.fine_tune_bert:
self.bert_trainer = torch.optim.Adam(params_bert_trainer, lr=self.params.lr_bert)
def set_dropout(self, value):
self.dropout = value
def set_learning_rate(self, value):
for param_group in self.trainer.param_groups:
param_group['lr'] = value
def save(self, filename):
torch.save(self.state_dict(), filename)
def load(self, filename):
self.load_state_dict(torch.load(filename))
print("Loaded model from file " + filename)
| true | true |
f7207971f79f86b58e5d4a4b9ee3f1c8c602689e | 1,890 | py | Python | MNISTT.py | ankit9437/MNIST | bf620e7779a5383c2ad87cf89cd11651963bd7c5 | [
"MIT"
] | null | null | null | MNISTT.py | ankit9437/MNIST | bf620e7779a5383c2ad87cf89cd11651963bd7c5 | [
"MIT"
] | null | null | null | MNISTT.py | ankit9437/MNIST | bf620e7779a5383c2ad87cf89cd11651963bd7c5 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Sun Dec 15 10:58:44 2019
@author: DELL
"""
from __future__ import print_function, division
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
def d(u, v):
diff = u - v
return diff.dot(diff)
def get_data(limit=None):
print("Reading in and transforming data...")
df = pd.read_csv('train.csv')
data = df.values
np.random.shuffle(data)
X = data[:, 1:] / 255.0 # data is from 0..255
Y = data[:, 0]
if limit is not None:
X, Y = X[:limit], Y[:limit]
return X, Y
def plot_k_means(X, K, max_iter=5, beta=3.0, show_plots=False):
N, D = X.shape
# R = np.zeros((N, K))
exponents = np.empty((N, K))
# initialize M to random
initial_centers = np.random.choice(N, K, replace=False)
M = X[initial_centers]
k = 0
for i in range(max_iter):
k += 1
# step 1: determine assignments / resposibilities
# is this inefficient?
for k in range(K):
for n in range(N):
exponents[n,k] = np.exp(-beta*d(M[k], X[n]))
R = exponents / exponents.sum(axis=1, keepdims=True)
# step 2: recalculate means
for k in range(K):
M[k] = R[:,k].dot(X) / R[:,k].sum()
return M, R
def main():
# mnist data
X, Y = get_data(1000)
# simple data
# X = get_simple_data()
# Y = np.array([0]*300 + [1]*300 + [2]*300)
print("Number of data points:", len(Y))
M, R = plot_k_means(X, len(set(Y)))
# Exercise: Try different values of K and compare the evaluation metrics
# they should look like digits
for k in range(len(M)):
im = M[k].reshape(28, 28)
plt.imshow(im, cmap='Blues')
plt.show()
if __name__ == "__main__":
main() | 21.976744 | 77 | 0.539153 |
from __future__ import print_function, division
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
def d(u, v):
diff = u - v
return diff.dot(diff)
def get_data(limit=None):
print("Reading in and transforming data...")
df = pd.read_csv('train.csv')
data = df.values
np.random.shuffle(data)
X = data[:, 1:] / 255.0
Y = data[:, 0]
if limit is not None:
X, Y = X[:limit], Y[:limit]
return X, Y
def plot_k_means(X, K, max_iter=5, beta=3.0, show_plots=False):
N, D = X.shape
exponents = np.empty((N, K))
initial_centers = np.random.choice(N, K, replace=False)
M = X[initial_centers]
k = 0
for i in range(max_iter):
k += 1
for k in range(K):
for n in range(N):
exponents[n,k] = np.exp(-beta*d(M[k], X[n]))
R = exponents / exponents.sum(axis=1, keepdims=True)
for k in range(K):
M[k] = R[:,k].dot(X) / R[:,k].sum()
return M, R
def main():
X, Y = get_data(1000)
print("Number of data points:", len(Y))
M, R = plot_k_means(X, len(set(Y)))
for k in range(len(M)):
im = M[k].reshape(28, 28)
plt.imshow(im, cmap='Blues')
plt.show()
if __name__ == "__main__":
main() | true | true |
f72079f7da23aae91f56118b998102076ab9cb85 | 5,205 | py | Python | email-finder.py | shivangraikar/Twitter-Data-Mining-For-Targeted-Marketing | d12fe807187d438041b4497cbb82ad9ef14d4dbf | [
"MIT"
] | 5 | 2021-02-25T12:10:02.000Z | 2021-11-13T04:03:42.000Z | email-finder.py | shivangraikar/Twitter-Data-Mining-For-Targeted-Marketing | d12fe807187d438041b4497cbb82ad9ef14d4dbf | [
"MIT"
] | null | null | null | email-finder.py | shivangraikar/Twitter-Data-Mining-For-Targeted-Marketing | d12fe807187d438041b4497cbb82ad9ef14d4dbf | [
"MIT"
] | 3 | 2021-02-25T12:10:06.000Z | 2021-03-21T20:26:15.000Z | import string
import time
import threading
import urllib
import re
import io
import sys
from time import sleep
import pickle
import pandas as pd
import psycopg2
def formats(first, middle, last, domain):
"""
Create a list of 30 possible email formats combining:
- First name: [empty] | Full | Initial |
- Delimeter: [empty] | . | _ | -
- Last name: [empty] | Full | Initial |
"""
list = []
if len(last)==0:
list.append(first + '@' + domain) # first@example.com
else:
list.append(first[0] + last + '@' + domain) # flast@example.com
list.append(first[0] + '.' + last + '@' + domain) # f.last@example.com
list.append(first[0] + '_' + last + '@' + domain) # f_last@example.com
list.append(first + '@' + domain) # first@example.com
list.append(first + last + '@' + domain) # firstlast@example.com
list.append(first + '.' + last + '@' + domain) # first.last@example.com
list.append(first + '_' + last + '@' + domain) # first_last@example.com
list.append(first + '-' + last + '@' + domain) # first-last@example.com
list.append(first + last[0] + '@' + domain) # fistl@example.com
list.append(first + '.' + last[0] + '@' + domain) # first.l@example.com
list.append(first + '_' + last[0] + '@' + domain) # fist_l@example.com
list.append(first[0] + middle + last + '@' + domain) # fmiddlelast@example.com
list.append(first[0] + '.' + middle + last + '@' + domain) # f.middlelast@example.com
list.append(first[0] + middle + '.' + last + '@' + domain) # fmiddle.last@example.com
list.append(first[0] + '_' + middle+ last + '@' + domain) # f_middlelast@example.com
list.append(first[0] + middle +'_' + last + '@' + domain) # fmiddle_last@example.com
list.append(first + middle+ last + '@' + domain) # firstmiddlelast@example.com
list.append(first + middle + '.' + last + '@' + domain) # firstmiddle.last@example.com
list.append(first + '.' + middle + last + '@' + domain) # first.middlelast@example.com
list.append(first + '_' + middle + last + '@' + domain) # first_last@example.com
list.append(first + middle + '_' + last + '@' + domain) # first_last@example.com
list.append(first + middle+ last[0] + '@' + domain) # firstmiddlel@example.com
list.append(first + '.' + middle +last[0] + '@' + domain) # first.middlel@example.com
list.append(first + middle + '.' +last[0] + '@' + domain) # firstmiddle.l@example.com
list.append(first + '_' + middle +last[0] + '@' + domain) # first_middlel@example.com
list.append(first + middle +'_' + last[0] + '@' + domain) # firstmiddle_l@example.com
list.append(last + '@' + domain) # last@example.com
list.append(last + first+ '@' + domain) # lastfirst@example.com
list.append(last + '.' + first + '@' + domain) # last.first@example.com
list.append(last + '_' + first + '@' + domain) # last_first@example.com
list.append(last[0] + '.' + first + '@' + domain) # l.first@example.com
list.append(last[0] + first + '@' + domain) # lfirst@example.com
list.append(last + first[0] + '@' + domain) # lastf@example.com
list.append(last + '.' + first[0] + '@' + domain) # last.f@example.com
list.append(last + '_' + first[0] + '@' + domain) # last_f@example.com
return(list)
val="select distinct name from keywords"
try:
conn = psycopg2.connect(database='Hiranandani', user = "postgres", password = "parth123n@#*", host = "127.0.0.1", port = "5432")
except:
print("Create database first")
df=pd.read_sql(val,conn)
uname=list()
for i in df['name']:
uname.append(i.translate(str.maketrans('', '', string.punctuation)))
a=['dr','ca','er']
notdrca=list()
for i in uname:
if any(x in i.lower() for x in a):
continue
else:
notdrca.append(i)
len2=list()
l1=list()
l3=list()
ln=list()
email_list=list()
for i in notdrca:
if any(x in i.lower() for x in a):
print(i)
for i in notdrca:
try:
i=i.lower()
s=i.split()
if len(s)==2:
email_list.extend(formats(s[0],s[1],'','gmail.com'))
len2.append(i)
elif len(s)==1:
email_list.extend(formats(s[0],'','','gmail.com'))
l1.append(i)
elif len(s)==3:
email_list.extend(formats(s[0],s[1],s[2],'gmail.com'))
l3.append(i)
elif len(s)>3:
ln.append(i)
continue
except:
continue
try:
h=open('emails.pickle','wb')
except Exception as e:
print(e)
pickle.dump(email_list,h)
regex = '^\w+([\.-]?\w+)*@\w+([\.-]?\w+)*(\.\w{2,3})+$'
match=re.match(regex,'harsha_nihar@yahoon')
if match==None:
print(match)
| 36.65493 | 136 | 0.529491 | import string
import time
import threading
import urllib
import re
import io
import sys
from time import sleep
import pickle
import pandas as pd
import psycopg2
def formats(first, middle, last, domain):
list = []
if len(last)==0:
list.append(first + '@' + domain)
else:
list.append(first[0] + last + '@' + domain)
list.append(first[0] + '.' + last + '@' + domain)
list.append(first[0] + '_' + last + '@' + domain)
list.append(first + '@' + domain)
list.append(first + last + '@' + domain)
list.append(first + '.' + last + '@' + domain)
list.append(first + '_' + last + '@' + domain)
list.append(first + '-' + last + '@' + domain)
list.append(first + last[0] + '@' + domain)
list.append(first + '.' + last[0] + '@' + domain)
list.append(first + '_' + last[0] + '@' + domain)
list.append(first[0] + middle + last + '@' + domain)
list.append(first[0] + '.' + middle + last + '@' + domain)
list.append(first[0] + middle + '.' + last + '@' + domain)
list.append(first[0] + '_' + middle+ last + '@' + domain)
list.append(first[0] + middle +'_' + last + '@' + domain)
list.append(first + middle+ last + '@' + domain)
list.append(first + middle + '.' + last + '@' + domain)
list.append(first + '.' + middle + last + '@' + domain)
list.append(first + '_' + middle + last + '@' + domain)
list.append(first + middle + '_' + last + '@' + domain)
list.append(first + middle+ last[0] + '@' + domain)
list.append(first + '.' + middle +last[0] + '@' + domain)
list.append(first + middle + '.' +last[0] + '@' + domain)
list.append(first + '_' + middle +last[0] + '@' + domain)
list.append(first + middle +'_' + last[0] + '@' + domain)
list.append(last + '@' + domain)
list.append(last + first+ '@' + domain)
list.append(last + '.' + first + '@' + domain)
list.append(last + '_' + first + '@' + domain)
list.append(last[0] + '.' + first + '@' + domain)
list.append(last[0] + first + '@' + domain)
list.append(last + first[0] + '@' + domain)
list.append(last + '.' + first[0] + '@' + domain)
list.append(last + '_' + first[0] + '@' + domain)
return(list)
val="select distinct name from keywords"
try:
conn = psycopg2.connect(database='Hiranandani', user = "postgres", password = "parth123n@#*", host = "127.0.0.1", port = "5432")
except:
print("Create database first")
df=pd.read_sql(val,conn)
uname=list()
for i in df['name']:
uname.append(i.translate(str.maketrans('', '', string.punctuation)))
a=['dr','ca','er']
notdrca=list()
for i in uname:
if any(x in i.lower() for x in a):
continue
else:
notdrca.append(i)
len2=list()
l1=list()
l3=list()
ln=list()
email_list=list()
for i in notdrca:
if any(x in i.lower() for x in a):
print(i)
for i in notdrca:
try:
i=i.lower()
s=i.split()
if len(s)==2:
email_list.extend(formats(s[0],s[1],'','gmail.com'))
len2.append(i)
elif len(s)==1:
email_list.extend(formats(s[0],'','','gmail.com'))
l1.append(i)
elif len(s)==3:
email_list.extend(formats(s[0],s[1],s[2],'gmail.com'))
l3.append(i)
elif len(s)>3:
ln.append(i)
continue
except:
continue
try:
h=open('emails.pickle','wb')
except Exception as e:
print(e)
pickle.dump(email_list,h)
regex = '^\w+([\.-]?\w+)*@\w+([\.-]?\w+)*(\.\w{2,3})+$'
match=re.match(regex,'harsha_nihar@yahoon')
if match==None:
print(match)
| true | true |
f7207a2b951aa4caf689bd23c876ef2c79f64116 | 22,958 | py | Python | lib/python3.7/site-packages/django/db/backends/sqlite3/base.py | Boring-Mind/DjangoGirls1 | 54ac8f01d12785470fd5a4ece759206639997122 | [
"Apache-2.0"
] | 304 | 2015-01-06T18:02:49.000Z | 2021-12-11T18:08:37.000Z | lib/python3.7/site-packages/django/db/backends/sqlite3/base.py | Boring-Mind/DjangoGirls1 | 54ac8f01d12785470fd5a4ece759206639997122 | [
"Apache-2.0"
] | 123 | 2019-09-10T14:48:01.000Z | 2019-11-28T21:24:06.000Z | virtual/lib/python3.6/site-packages/django/db/backends/sqlite3/base.py | Krasivaya/Tracks | c18d1c9222dff39e4678d44495a8a7d9434339ff | [
"MIT"
] | 41 | 2015-04-11T14:58:02.000Z | 2021-11-13T20:47:58.000Z | """
SQLite backend for the sqlite3 module in the standard library.
"""
import datetime
import decimal
import functools
import math
import operator
import re
import statistics
import warnings
from itertools import chain
from sqlite3 import dbapi2 as Database
import pytz
from django.core.exceptions import ImproperlyConfigured
from django.db import utils
from django.db.backends import utils as backend_utils
from django.db.backends.base.base import BaseDatabaseWrapper
from django.utils import timezone
from django.utils.dateparse import parse_datetime, parse_time
from django.utils.duration import duration_microseconds
from .client import DatabaseClient # isort:skip
from .creation import DatabaseCreation # isort:skip
from .features import DatabaseFeatures # isort:skip
from .introspection import DatabaseIntrospection # isort:skip
from .operations import DatabaseOperations # isort:skip
from .schema import DatabaseSchemaEditor # isort:skip
def decoder(conv_func):
"""
Convert bytestrings from Python's sqlite3 interface to a regular string.
"""
return lambda s: conv_func(s.decode())
def none_guard(func):
"""
Decorator that returns None if any of the arguments to the decorated
function are None. Many SQL functions return NULL if any of their arguments
are NULL. This decorator simplifies the implementation of this for the
custom functions registered below.
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
return None if None in args else func(*args, **kwargs)
return wrapper
def list_aggregate(function):
"""
Return an aggregate class that accumulates values in a list and applies
the provided function to the data.
"""
return type('ListAggregate', (list,), {'finalize': function, 'step': list.append})
def check_sqlite_version():
if Database.sqlite_version_info < (3, 8, 3):
raise ImproperlyConfigured('SQLite 3.8.3 or later is required (found %s).' % Database.sqlite_version)
check_sqlite_version()
Database.register_converter("bool", b'1'.__eq__)
Database.register_converter("time", decoder(parse_time))
Database.register_converter("datetime", decoder(parse_datetime))
Database.register_converter("timestamp", decoder(parse_datetime))
Database.register_converter("TIMESTAMP", decoder(parse_datetime))
Database.register_adapter(decimal.Decimal, str)
class DatabaseWrapper(BaseDatabaseWrapper):
vendor = 'sqlite'
display_name = 'SQLite'
# SQLite doesn't actually support most of these types, but it "does the right
# thing" given more verbose field definitions, so leave them as is so that
# schema inspection is more useful.
data_types = {
'AutoField': 'integer',
'BigAutoField': 'integer',
'BinaryField': 'BLOB',
'BooleanField': 'bool',
'CharField': 'varchar(%(max_length)s)',
'DateField': 'date',
'DateTimeField': 'datetime',
'DecimalField': 'decimal',
'DurationField': 'bigint',
'FileField': 'varchar(%(max_length)s)',
'FilePathField': 'varchar(%(max_length)s)',
'FloatField': 'real',
'IntegerField': 'integer',
'BigIntegerField': 'bigint',
'IPAddressField': 'char(15)',
'GenericIPAddressField': 'char(39)',
'NullBooleanField': 'bool',
'OneToOneField': 'integer',
'PositiveIntegerField': 'integer unsigned',
'PositiveSmallIntegerField': 'smallint unsigned',
'SlugField': 'varchar(%(max_length)s)',
'SmallIntegerField': 'smallint',
'TextField': 'text',
'TimeField': 'time',
'UUIDField': 'char(32)',
}
data_type_check_constraints = {
'PositiveIntegerField': '"%(column)s" >= 0',
'PositiveSmallIntegerField': '"%(column)s" >= 0',
}
data_types_suffix = {
'AutoField': 'AUTOINCREMENT',
'BigAutoField': 'AUTOINCREMENT',
}
# SQLite requires LIKE statements to include an ESCAPE clause if the value
# being escaped has a percent or underscore in it.
# See https://www.sqlite.org/lang_expr.html for an explanation.
operators = {
'exact': '= %s',
'iexact': "LIKE %s ESCAPE '\\'",
'contains': "LIKE %s ESCAPE '\\'",
'icontains': "LIKE %s ESCAPE '\\'",
'regex': 'REGEXP %s',
'iregex': "REGEXP '(?i)' || %s",
'gt': '> %s',
'gte': '>= %s',
'lt': '< %s',
'lte': '<= %s',
'startswith': "LIKE %s ESCAPE '\\'",
'endswith': "LIKE %s ESCAPE '\\'",
'istartswith': "LIKE %s ESCAPE '\\'",
'iendswith': "LIKE %s ESCAPE '\\'",
}
# The patterns below are used to generate SQL pattern lookup clauses when
# the right-hand side of the lookup isn't a raw string (it might be an expression
# or the result of a bilateral transformation).
# In those cases, special characters for LIKE operators (e.g. \, *, _) should be
# escaped on database side.
#
# Note: we use str.format() here for readability as '%' is used as a wildcard for
# the LIKE operator.
pattern_esc = r"REPLACE(REPLACE(REPLACE({}, '\', '\\'), '%%', '\%%'), '_', '\_')"
pattern_ops = {
'contains': r"LIKE '%%' || {} || '%%' ESCAPE '\'",
'icontains': r"LIKE '%%' || UPPER({}) || '%%' ESCAPE '\'",
'startswith': r"LIKE {} || '%%' ESCAPE '\'",
'istartswith': r"LIKE UPPER({}) || '%%' ESCAPE '\'",
'endswith': r"LIKE '%%' || {} ESCAPE '\'",
'iendswith': r"LIKE '%%' || UPPER({}) ESCAPE '\'",
}
Database = Database
SchemaEditorClass = DatabaseSchemaEditor
# Classes instantiated in __init__().
client_class = DatabaseClient
creation_class = DatabaseCreation
features_class = DatabaseFeatures
introspection_class = DatabaseIntrospection
ops_class = DatabaseOperations
def get_connection_params(self):
settings_dict = self.settings_dict
if not settings_dict['NAME']:
raise ImproperlyConfigured(
"settings.DATABASES is improperly configured. "
"Please supply the NAME value.")
kwargs = {
'database': settings_dict['NAME'],
'detect_types': Database.PARSE_DECLTYPES | Database.PARSE_COLNAMES,
**settings_dict['OPTIONS'],
}
# Always allow the underlying SQLite connection to be shareable
# between multiple threads. The safe-guarding will be handled at a
# higher level by the `BaseDatabaseWrapper.allow_thread_sharing`
# property. This is necessary as the shareability is disabled by
# default in pysqlite and it cannot be changed once a connection is
# opened.
if 'check_same_thread' in kwargs and kwargs['check_same_thread']:
warnings.warn(
'The `check_same_thread` option was provided and set to '
'True. It will be overridden with False. Use the '
'`DatabaseWrapper.allow_thread_sharing` property instead '
'for controlling thread shareability.',
RuntimeWarning
)
kwargs.update({'check_same_thread': False, 'uri': True})
return kwargs
def get_new_connection(self, conn_params):
conn = Database.connect(**conn_params)
conn.create_function("django_date_extract", 2, _sqlite_datetime_extract)
conn.create_function("django_date_trunc", 2, _sqlite_date_trunc)
conn.create_function("django_datetime_cast_date", 2, _sqlite_datetime_cast_date)
conn.create_function("django_datetime_cast_time", 2, _sqlite_datetime_cast_time)
conn.create_function("django_datetime_extract", 3, _sqlite_datetime_extract)
conn.create_function("django_datetime_trunc", 3, _sqlite_datetime_trunc)
conn.create_function("django_time_extract", 2, _sqlite_time_extract)
conn.create_function("django_time_trunc", 2, _sqlite_time_trunc)
conn.create_function("django_time_diff", 2, _sqlite_time_diff)
conn.create_function("django_timestamp_diff", 2, _sqlite_timestamp_diff)
conn.create_function("django_format_dtdelta", 3, _sqlite_format_dtdelta)
conn.create_function('regexp', 2, _sqlite_regexp)
conn.create_function('ACOS', 1, none_guard(math.acos))
conn.create_function('ASIN', 1, none_guard(math.asin))
conn.create_function('ATAN', 1, none_guard(math.atan))
conn.create_function('ATAN2', 2, none_guard(math.atan2))
conn.create_function('CEILING', 1, none_guard(math.ceil))
conn.create_function('COS', 1, none_guard(math.cos))
conn.create_function('COT', 1, none_guard(lambda x: 1 / math.tan(x)))
conn.create_function('DEGREES', 1, none_guard(math.degrees))
conn.create_function('EXP', 1, none_guard(math.exp))
conn.create_function('FLOOR', 1, none_guard(math.floor))
conn.create_function('LN', 1, none_guard(math.log))
conn.create_function('LOG', 2, none_guard(lambda x, y: math.log(y, x)))
conn.create_function('LPAD', 3, _sqlite_lpad)
conn.create_function('MOD', 2, none_guard(math.fmod))
conn.create_function('PI', 0, lambda: math.pi)
conn.create_function('POWER', 2, none_guard(operator.pow))
conn.create_function('RADIANS', 1, none_guard(math.radians))
conn.create_function('REPEAT', 2, none_guard(operator.mul))
conn.create_function('REVERSE', 1, none_guard(lambda x: x[::-1]))
conn.create_function('RPAD', 3, _sqlite_rpad)
conn.create_function('SIN', 1, none_guard(math.sin))
conn.create_function('SQRT', 1, none_guard(math.sqrt))
conn.create_function('TAN', 1, none_guard(math.tan))
conn.create_aggregate('STDDEV_POP', 1, list_aggregate(statistics.pstdev))
conn.create_aggregate('STDDEV_SAMP', 1, list_aggregate(statistics.stdev))
conn.create_aggregate('VAR_POP', 1, list_aggregate(statistics.pvariance))
conn.create_aggregate('VAR_SAMP', 1, list_aggregate(statistics.variance))
conn.execute('PRAGMA foreign_keys = ON')
return conn
def init_connection_state(self):
pass
def create_cursor(self, name=None):
return self.connection.cursor(factory=SQLiteCursorWrapper)
def close(self):
self.validate_thread_sharing()
# If database is in memory, closing the connection destroys the
# database. To prevent accidental data loss, ignore close requests on
# an in-memory db.
if not self.is_in_memory_db():
BaseDatabaseWrapper.close(self)
def _savepoint_allowed(self):
# When 'isolation_level' is not None, sqlite3 commits before each
# savepoint; it's a bug. When it is None, savepoints don't make sense
# because autocommit is enabled. The only exception is inside 'atomic'
# blocks. To work around that bug, on SQLite, 'atomic' starts a
# transaction explicitly rather than simply disable autocommit.
return self.in_atomic_block
def _set_autocommit(self, autocommit):
if autocommit:
level = None
else:
# sqlite3's internal default is ''. It's different from None.
# See Modules/_sqlite/connection.c.
level = ''
# 'isolation_level' is a misleading API.
# SQLite always runs at the SERIALIZABLE isolation level.
with self.wrap_database_errors:
self.connection.isolation_level = level
def disable_constraint_checking(self):
with self.cursor() as cursor:
cursor.execute('PRAGMA foreign_keys = OFF')
# Foreign key constraints cannot be turned off while in a multi-
# statement transaction. Fetch the current state of the pragma
# to determine if constraints are effectively disabled.
enabled = cursor.execute('PRAGMA foreign_keys').fetchone()[0]
return not bool(enabled)
def enable_constraint_checking(self):
self.cursor().execute('PRAGMA foreign_keys = ON')
def check_constraints(self, table_names=None):
"""
Check each table name in `table_names` for rows with invalid foreign
key references. This method is intended to be used in conjunction with
`disable_constraint_checking()` and `enable_constraint_checking()`, to
determine if rows with invalid references were entered while constraint
checks were off.
"""
if self.features.supports_pragma_foreign_key_check:
with self.cursor() as cursor:
if table_names is None:
violations = self.cursor().execute('PRAGMA foreign_key_check').fetchall()
else:
violations = chain.from_iterable(
cursor.execute('PRAGMA foreign_key_check(%s)' % table_name).fetchall()
for table_name in table_names
)
# See https://www.sqlite.org/pragma.html#pragma_foreign_key_check
for table_name, rowid, referenced_table_name, foreign_key_index in violations:
foreign_key = cursor.execute(
'PRAGMA foreign_key_list(%s)' % table_name
).fetchall()[foreign_key_index]
column_name, referenced_column_name = foreign_key[3:5]
primary_key_column_name = self.introspection.get_primary_key_column(cursor, table_name)
primary_key_value, bad_value = cursor.execute(
'SELECT %s, %s FROM %s WHERE rowid = %%s' % (
primary_key_column_name, column_name, table_name
),
(rowid,),
).fetchone()
raise utils.IntegrityError(
"The row in table '%s' with primary key '%s' has an "
"invalid foreign key: %s.%s contains a value '%s' that "
"does not have a corresponding value in %s.%s." % (
table_name, primary_key_value, table_name, column_name,
bad_value, referenced_table_name, referenced_column_name
)
)
else:
with self.cursor() as cursor:
if table_names is None:
table_names = self.introspection.table_names(cursor)
for table_name in table_names:
primary_key_column_name = self.introspection.get_primary_key_column(cursor, table_name)
if not primary_key_column_name:
continue
key_columns = self.introspection.get_key_columns(cursor, table_name)
for column_name, referenced_table_name, referenced_column_name in key_columns:
cursor.execute(
"""
SELECT REFERRING.`%s`, REFERRING.`%s` FROM `%s` as REFERRING
LEFT JOIN `%s` as REFERRED
ON (REFERRING.`%s` = REFERRED.`%s`)
WHERE REFERRING.`%s` IS NOT NULL AND REFERRED.`%s` IS NULL
"""
% (
primary_key_column_name, column_name, table_name,
referenced_table_name, column_name, referenced_column_name,
column_name, referenced_column_name,
)
)
for bad_row in cursor.fetchall():
raise utils.IntegrityError(
"The row in table '%s' with primary key '%s' has an "
"invalid foreign key: %s.%s contains a value '%s' that "
"does not have a corresponding value in %s.%s." % (
table_name, bad_row[0], table_name, column_name,
bad_row[1], referenced_table_name, referenced_column_name,
)
)
def is_usable(self):
return True
def _start_transaction_under_autocommit(self):
"""
Start a transaction explicitly in autocommit mode.
Staying in autocommit mode works around a bug of sqlite3 that breaks
savepoints when autocommit is disabled.
"""
self.cursor().execute("BEGIN")
def is_in_memory_db(self):
return self.creation.is_in_memory_db(self.settings_dict['NAME'])
FORMAT_QMARK_REGEX = re.compile(r'(?<!%)%s')
class SQLiteCursorWrapper(Database.Cursor):
"""
Django uses "format" style placeholders, but pysqlite2 uses "qmark" style.
This fixes it -- but note that if you want to use a literal "%s" in a query,
you'll need to use "%%s".
"""
def execute(self, query, params=None):
if params is None:
return Database.Cursor.execute(self, query)
query = self.convert_query(query)
return Database.Cursor.execute(self, query, params)
def executemany(self, query, param_list):
query = self.convert_query(query)
return Database.Cursor.executemany(self, query, param_list)
def convert_query(self, query):
return FORMAT_QMARK_REGEX.sub('?', query).replace('%%', '%')
def _sqlite_datetime_parse(dt, tzname=None):
if dt is None:
return None
try:
dt = backend_utils.typecast_timestamp(dt)
except (TypeError, ValueError):
return None
if tzname is not None:
dt = timezone.localtime(dt, pytz.timezone(tzname))
return dt
def _sqlite_date_trunc(lookup_type, dt):
dt = _sqlite_datetime_parse(dt)
if dt is None:
return None
if lookup_type == 'year':
return "%i-01-01" % dt.year
elif lookup_type == 'quarter':
month_in_quarter = dt.month - (dt.month - 1) % 3
return '%i-%02i-01' % (dt.year, month_in_quarter)
elif lookup_type == 'month':
return "%i-%02i-01" % (dt.year, dt.month)
elif lookup_type == 'week':
dt = dt - datetime.timedelta(days=dt.weekday())
return "%i-%02i-%02i" % (dt.year, dt.month, dt.day)
elif lookup_type == 'day':
return "%i-%02i-%02i" % (dt.year, dt.month, dt.day)
def _sqlite_time_trunc(lookup_type, dt):
if dt is None:
return None
try:
dt = backend_utils.typecast_time(dt)
except (ValueError, TypeError):
return None
if lookup_type == 'hour':
return "%02i:00:00" % dt.hour
elif lookup_type == 'minute':
return "%02i:%02i:00" % (dt.hour, dt.minute)
elif lookup_type == 'second':
return "%02i:%02i:%02i" % (dt.hour, dt.minute, dt.second)
def _sqlite_datetime_cast_date(dt, tzname):
dt = _sqlite_datetime_parse(dt, tzname)
if dt is None:
return None
return dt.date().isoformat()
def _sqlite_datetime_cast_time(dt, tzname):
dt = _sqlite_datetime_parse(dt, tzname)
if dt is None:
return None
return dt.time().isoformat()
def _sqlite_datetime_extract(lookup_type, dt, tzname=None):
dt = _sqlite_datetime_parse(dt, tzname)
if dt is None:
return None
if lookup_type == 'week_day':
return (dt.isoweekday() % 7) + 1
elif lookup_type == 'week':
return dt.isocalendar()[1]
elif lookup_type == 'quarter':
return math.ceil(dt.month / 3)
elif lookup_type == 'iso_year':
return dt.isocalendar()[0]
else:
return getattr(dt, lookup_type)
def _sqlite_datetime_trunc(lookup_type, dt, tzname):
dt = _sqlite_datetime_parse(dt, tzname)
if dt is None:
return None
if lookup_type == 'year':
return "%i-01-01 00:00:00" % dt.year
elif lookup_type == 'quarter':
month_in_quarter = dt.month - (dt.month - 1) % 3
return '%i-%02i-01 00:00:00' % (dt.year, month_in_quarter)
elif lookup_type == 'month':
return "%i-%02i-01 00:00:00" % (dt.year, dt.month)
elif lookup_type == 'week':
dt = dt - datetime.timedelta(days=dt.weekday())
return "%i-%02i-%02i 00:00:00" % (dt.year, dt.month, dt.day)
elif lookup_type == 'day':
return "%i-%02i-%02i 00:00:00" % (dt.year, dt.month, dt.day)
elif lookup_type == 'hour':
return "%i-%02i-%02i %02i:00:00" % (dt.year, dt.month, dt.day, dt.hour)
elif lookup_type == 'minute':
return "%i-%02i-%02i %02i:%02i:00" % (dt.year, dt.month, dt.day, dt.hour, dt.minute)
elif lookup_type == 'second':
return "%i-%02i-%02i %02i:%02i:%02i" % (dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second)
def _sqlite_time_extract(lookup_type, dt):
if dt is None:
return None
try:
dt = backend_utils.typecast_time(dt)
except (ValueError, TypeError):
return None
return getattr(dt, lookup_type)
@none_guard
def _sqlite_format_dtdelta(conn, lhs, rhs):
"""
LHS and RHS can be either:
- An integer number of microseconds
- A string representing a datetime
"""
try:
real_lhs = datetime.timedelta(0, 0, lhs) if isinstance(lhs, int) else backend_utils.typecast_timestamp(lhs)
real_rhs = datetime.timedelta(0, 0, rhs) if isinstance(rhs, int) else backend_utils.typecast_timestamp(rhs)
if conn.strip() == '+':
out = real_lhs + real_rhs
else:
out = real_lhs - real_rhs
except (ValueError, TypeError):
return None
# typecast_timestamp returns a date or a datetime without timezone.
# It will be formatted as "%Y-%m-%d" or "%Y-%m-%d %H:%M:%S[.%f]"
return str(out)
@none_guard
def _sqlite_time_diff(lhs, rhs):
left = backend_utils.typecast_time(lhs)
right = backend_utils.typecast_time(rhs)
return (
(left.hour * 60 * 60 * 1000000) +
(left.minute * 60 * 1000000) +
(left.second * 1000000) +
(left.microsecond) -
(right.hour * 60 * 60 * 1000000) -
(right.minute * 60 * 1000000) -
(right.second * 1000000) -
(right.microsecond)
)
@none_guard
def _sqlite_timestamp_diff(lhs, rhs):
left = backend_utils.typecast_timestamp(lhs)
right = backend_utils.typecast_timestamp(rhs)
return duration_microseconds(left - right)
@none_guard
def _sqlite_regexp(re_pattern, re_string):
return bool(re.search(re_pattern, str(re_string)))
@none_guard
def _sqlite_lpad(text, length, fill_text):
if len(text) >= length:
return text[:length]
return (fill_text * length)[:length - len(text)] + text
@none_guard
def _sqlite_rpad(text, length, fill_text):
return (text + fill_text * length)[:length]
| 40.923351 | 115 | 0.619828 | import datetime
import decimal
import functools
import math
import operator
import re
import statistics
import warnings
from itertools import chain
from sqlite3 import dbapi2 as Database
import pytz
from django.core.exceptions import ImproperlyConfigured
from django.db import utils
from django.db.backends import utils as backend_utils
from django.db.backends.base.base import BaseDatabaseWrapper
from django.utils import timezone
from django.utils.dateparse import parse_datetime, parse_time
from django.utils.duration import duration_microseconds
from .client import DatabaseClient
from .creation import DatabaseCreation
from .features import DatabaseFeatures
from .introspection import DatabaseIntrospection
from .operations import DatabaseOperations
from .schema import DatabaseSchemaEditor
def decoder(conv_func):
return lambda s: conv_func(s.decode())
def none_guard(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
return None if None in args else func(*args, **kwargs)
return wrapper
def list_aggregate(function):
return type('ListAggregate', (list,), {'finalize': function, 'step': list.append})
def check_sqlite_version():
if Database.sqlite_version_info < (3, 8, 3):
raise ImproperlyConfigured('SQLite 3.8.3 or later is required (found %s).' % Database.sqlite_version)
check_sqlite_version()
Database.register_converter("bool", b'1'.__eq__)
Database.register_converter("time", decoder(parse_time))
Database.register_converter("datetime", decoder(parse_datetime))
Database.register_converter("timestamp", decoder(parse_datetime))
Database.register_converter("TIMESTAMP", decoder(parse_datetime))
Database.register_adapter(decimal.Decimal, str)
class DatabaseWrapper(BaseDatabaseWrapper):
vendor = 'sqlite'
display_name = 'SQLite'
# thing" given more verbose field definitions, so leave them as is so that
# schema inspection is more useful.
data_types = {
'AutoField': 'integer',
'BigAutoField': 'integer',
'BinaryField': 'BLOB',
'BooleanField': 'bool',
'CharField': 'varchar(%(max_length)s)',
'DateField': 'date',
'DateTimeField': 'datetime',
'DecimalField': 'decimal',
'DurationField': 'bigint',
'FileField': 'varchar(%(max_length)s)',
'FilePathField': 'varchar(%(max_length)s)',
'FloatField': 'real',
'IntegerField': 'integer',
'BigIntegerField': 'bigint',
'IPAddressField': 'char(15)',
'GenericIPAddressField': 'char(39)',
'NullBooleanField': 'bool',
'OneToOneField': 'integer',
'PositiveIntegerField': 'integer unsigned',
'PositiveSmallIntegerField': 'smallint unsigned',
'SlugField': 'varchar(%(max_length)s)',
'SmallIntegerField': 'smallint',
'TextField': 'text',
'TimeField': 'time',
'UUIDField': 'char(32)',
}
data_type_check_constraints = {
'PositiveIntegerField': '"%(column)s" >= 0',
'PositiveSmallIntegerField': '"%(column)s" >= 0',
}
data_types_suffix = {
'AutoField': 'AUTOINCREMENT',
'BigAutoField': 'AUTOINCREMENT',
}
# SQLite requires LIKE statements to include an ESCAPE clause if the value
# being escaped has a percent or underscore in it.
# See https://www.sqlite.org/lang_expr.html for an explanation.
operators = {
'exact': '= %s',
'iexact': "LIKE %s ESCAPE '\\'",
'contains': "LIKE %s ESCAPE '\\'",
'icontains': "LIKE %s ESCAPE '\\'",
'regex': 'REGEXP %s',
'iregex': "REGEXP '(?i)' || %s",
'gt': '> %s',
'gte': '>= %s',
'lt': '< %s',
'lte': '<= %s',
'startswith': "LIKE %s ESCAPE '\\'",
'endswith': "LIKE %s ESCAPE '\\'",
'istartswith': "LIKE %s ESCAPE '\\'",
'iendswith': "LIKE %s ESCAPE '\\'",
}
# The patterns below are used to generate SQL pattern lookup clauses when
# the right-hand side of the lookup isn't a raw string (it might be an expression
pattern_esc = r"REPLACE(REPLACE(REPLACE({}, '\', '\\'), '%%', '\%%'), '_', '\_')"
pattern_ops = {
'contains': r"LIKE '%%' || {} || '%%' ESCAPE '\'",
'icontains': r"LIKE '%%' || UPPER({}) || '%%' ESCAPE '\'",
'startswith': r"LIKE {} || '%%' ESCAPE '\'",
'istartswith': r"LIKE UPPER({}) || '%%' ESCAPE '\'",
'endswith': r"LIKE '%%' || {} ESCAPE '\'",
'iendswith': r"LIKE '%%' || UPPER({}) ESCAPE '\'",
}
Database = Database
SchemaEditorClass = DatabaseSchemaEditor
client_class = DatabaseClient
creation_class = DatabaseCreation
features_class = DatabaseFeatures
introspection_class = DatabaseIntrospection
ops_class = DatabaseOperations
def get_connection_params(self):
settings_dict = self.settings_dict
if not settings_dict['NAME']:
raise ImproperlyConfigured(
"settings.DATABASES is improperly configured. "
"Please supply the NAME value.")
kwargs = {
'database': settings_dict['NAME'],
'detect_types': Database.PARSE_DECLTYPES | Database.PARSE_COLNAMES,
**settings_dict['OPTIONS'],
}
if 'check_same_thread' in kwargs and kwargs['check_same_thread']:
warnings.warn(
'The `check_same_thread` option was provided and set to '
'True. It will be overridden with False. Use the '
'`DatabaseWrapper.allow_thread_sharing` property instead '
'for controlling thread shareability.',
RuntimeWarning
)
kwargs.update({'check_same_thread': False, 'uri': True})
return kwargs
def get_new_connection(self, conn_params):
conn = Database.connect(**conn_params)
conn.create_function("django_date_extract", 2, _sqlite_datetime_extract)
conn.create_function("django_date_trunc", 2, _sqlite_date_trunc)
conn.create_function("django_datetime_cast_date", 2, _sqlite_datetime_cast_date)
conn.create_function("django_datetime_cast_time", 2, _sqlite_datetime_cast_time)
conn.create_function("django_datetime_extract", 3, _sqlite_datetime_extract)
conn.create_function("django_datetime_trunc", 3, _sqlite_datetime_trunc)
conn.create_function("django_time_extract", 2, _sqlite_time_extract)
conn.create_function("django_time_trunc", 2, _sqlite_time_trunc)
conn.create_function("django_time_diff", 2, _sqlite_time_diff)
conn.create_function("django_timestamp_diff", 2, _sqlite_timestamp_diff)
conn.create_function("django_format_dtdelta", 3, _sqlite_format_dtdelta)
conn.create_function('regexp', 2, _sqlite_regexp)
conn.create_function('ACOS', 1, none_guard(math.acos))
conn.create_function('ASIN', 1, none_guard(math.asin))
conn.create_function('ATAN', 1, none_guard(math.atan))
conn.create_function('ATAN2', 2, none_guard(math.atan2))
conn.create_function('CEILING', 1, none_guard(math.ceil))
conn.create_function('COS', 1, none_guard(math.cos))
conn.create_function('COT', 1, none_guard(lambda x: 1 / math.tan(x)))
conn.create_function('DEGREES', 1, none_guard(math.degrees))
conn.create_function('EXP', 1, none_guard(math.exp))
conn.create_function('FLOOR', 1, none_guard(math.floor))
conn.create_function('LN', 1, none_guard(math.log))
conn.create_function('LOG', 2, none_guard(lambda x, y: math.log(y, x)))
conn.create_function('LPAD', 3, _sqlite_lpad)
conn.create_function('MOD', 2, none_guard(math.fmod))
conn.create_function('PI', 0, lambda: math.pi)
conn.create_function('POWER', 2, none_guard(operator.pow))
conn.create_function('RADIANS', 1, none_guard(math.radians))
conn.create_function('REPEAT', 2, none_guard(operator.mul))
conn.create_function('REVERSE', 1, none_guard(lambda x: x[::-1]))
conn.create_function('RPAD', 3, _sqlite_rpad)
conn.create_function('SIN', 1, none_guard(math.sin))
conn.create_function('SQRT', 1, none_guard(math.sqrt))
conn.create_function('TAN', 1, none_guard(math.tan))
conn.create_aggregate('STDDEV_POP', 1, list_aggregate(statistics.pstdev))
conn.create_aggregate('STDDEV_SAMP', 1, list_aggregate(statistics.stdev))
conn.create_aggregate('VAR_POP', 1, list_aggregate(statistics.pvariance))
conn.create_aggregate('VAR_SAMP', 1, list_aggregate(statistics.variance))
conn.execute('PRAGMA foreign_keys = ON')
return conn
def init_connection_state(self):
pass
def create_cursor(self, name=None):
return self.connection.cursor(factory=SQLiteCursorWrapper)
def close(self):
self.validate_thread_sharing()
if not self.is_in_memory_db():
BaseDatabaseWrapper.close(self)
def _savepoint_allowed(self):
return self.in_atomic_block
def _set_autocommit(self, autocommit):
if autocommit:
level = None
else:
level = ''
with self.wrap_database_errors:
self.connection.isolation_level = level
def disable_constraint_checking(self):
with self.cursor() as cursor:
cursor.execute('PRAGMA foreign_keys = OFF')
enabled = cursor.execute('PRAGMA foreign_keys').fetchone()[0]
return not bool(enabled)
def enable_constraint_checking(self):
self.cursor().execute('PRAGMA foreign_keys = ON')
def check_constraints(self, table_names=None):
if self.features.supports_pragma_foreign_key_check:
with self.cursor() as cursor:
if table_names is None:
violations = self.cursor().execute('PRAGMA foreign_key_check').fetchall()
else:
violations = chain.from_iterable(
cursor.execute('PRAGMA foreign_key_check(%s)' % table_name).fetchall()
for table_name in table_names
)
e_name, rowid, referenced_table_name, foreign_key_index in violations:
foreign_key = cursor.execute(
'PRAGMA foreign_key_list(%s)' % table_name
).fetchall()[foreign_key_index]
column_name, referenced_column_name = foreign_key[3:5]
primary_key_column_name = self.introspection.get_primary_key_column(cursor, table_name)
primary_key_value, bad_value = cursor.execute(
'SELECT %s, %s FROM %s WHERE rowid = %%s' % (
primary_key_column_name, column_name, table_name
),
(rowid,),
).fetchone()
raise utils.IntegrityError(
"The row in table '%s' with primary key '%s' has an "
"invalid foreign key: %s.%s contains a value '%s' that "
"does not have a corresponding value in %s.%s." % (
table_name, primary_key_value, table_name, column_name,
bad_value, referenced_table_name, referenced_column_name
)
)
else:
with self.cursor() as cursor:
if table_names is None:
table_names = self.introspection.table_names(cursor)
for table_name in table_names:
primary_key_column_name = self.introspection.get_primary_key_column(cursor, table_name)
if not primary_key_column_name:
continue
key_columns = self.introspection.get_key_columns(cursor, table_name)
for column_name, referenced_table_name, referenced_column_name in key_columns:
cursor.execute(
"""
SELECT REFERRING.`%s`, REFERRING.`%s` FROM `%s` as REFERRING
LEFT JOIN `%s` as REFERRED
ON (REFERRING.`%s` = REFERRED.`%s`)
WHERE REFERRING.`%s` IS NOT NULL AND REFERRED.`%s` IS NULL
"""
% (
primary_key_column_name, column_name, table_name,
referenced_table_name, column_name, referenced_column_name,
column_name, referenced_column_name,
)
)
for bad_row in cursor.fetchall():
raise utils.IntegrityError(
"The row in table '%s' with primary key '%s' has an "
"invalid foreign key: %s.%s contains a value '%s' that "
"does not have a corresponding value in %s.%s." % (
table_name, bad_row[0], table_name, column_name,
bad_row[1], referenced_table_name, referenced_column_name,
)
)
def is_usable(self):
return True
def _start_transaction_under_autocommit(self):
self.cursor().execute("BEGIN")
def is_in_memory_db(self):
return self.creation.is_in_memory_db(self.settings_dict['NAME'])
FORMAT_QMARK_REGEX = re.compile(r'(?<!%)%s')
class SQLiteCursorWrapper(Database.Cursor):
def execute(self, query, params=None):
if params is None:
return Database.Cursor.execute(self, query)
query = self.convert_query(query)
return Database.Cursor.execute(self, query, params)
def executemany(self, query, param_list):
query = self.convert_query(query)
return Database.Cursor.executemany(self, query, param_list)
def convert_query(self, query):
return FORMAT_QMARK_REGEX.sub('?', query).replace('%%', '%')
def _sqlite_datetime_parse(dt, tzname=None):
if dt is None:
return None
try:
dt = backend_utils.typecast_timestamp(dt)
except (TypeError, ValueError):
return None
if tzname is not None:
dt = timezone.localtime(dt, pytz.timezone(tzname))
return dt
def _sqlite_date_trunc(lookup_type, dt):
dt = _sqlite_datetime_parse(dt)
if dt is None:
return None
if lookup_type == 'year':
return "%i-01-01" % dt.year
elif lookup_type == 'quarter':
month_in_quarter = dt.month - (dt.month - 1) % 3
return '%i-%02i-01' % (dt.year, month_in_quarter)
elif lookup_type == 'month':
return "%i-%02i-01" % (dt.year, dt.month)
elif lookup_type == 'week':
dt = dt - datetime.timedelta(days=dt.weekday())
return "%i-%02i-%02i" % (dt.year, dt.month, dt.day)
elif lookup_type == 'day':
return "%i-%02i-%02i" % (dt.year, dt.month, dt.day)
def _sqlite_time_trunc(lookup_type, dt):
if dt is None:
return None
try:
dt = backend_utils.typecast_time(dt)
except (ValueError, TypeError):
return None
if lookup_type == 'hour':
return "%02i:00:00" % dt.hour
elif lookup_type == 'minute':
return "%02i:%02i:00" % (dt.hour, dt.minute)
elif lookup_type == 'second':
return "%02i:%02i:%02i" % (dt.hour, dt.minute, dt.second)
def _sqlite_datetime_cast_date(dt, tzname):
dt = _sqlite_datetime_parse(dt, tzname)
if dt is None:
return None
return dt.date().isoformat()
def _sqlite_datetime_cast_time(dt, tzname):
dt = _sqlite_datetime_parse(dt, tzname)
if dt is None:
return None
return dt.time().isoformat()
def _sqlite_datetime_extract(lookup_type, dt, tzname=None):
dt = _sqlite_datetime_parse(dt, tzname)
if dt is None:
return None
if lookup_type == 'week_day':
return (dt.isoweekday() % 7) + 1
elif lookup_type == 'week':
return dt.isocalendar()[1]
elif lookup_type == 'quarter':
return math.ceil(dt.month / 3)
elif lookup_type == 'iso_year':
return dt.isocalendar()[0]
else:
return getattr(dt, lookup_type)
def _sqlite_datetime_trunc(lookup_type, dt, tzname):
dt = _sqlite_datetime_parse(dt, tzname)
if dt is None:
return None
if lookup_type == 'year':
return "%i-01-01 00:00:00" % dt.year
elif lookup_type == 'quarter':
month_in_quarter = dt.month - (dt.month - 1) % 3
return '%i-%02i-01 00:00:00' % (dt.year, month_in_quarter)
elif lookup_type == 'month':
return "%i-%02i-01 00:00:00" % (dt.year, dt.month)
elif lookup_type == 'week':
dt = dt - datetime.timedelta(days=dt.weekday())
return "%i-%02i-%02i 00:00:00" % (dt.year, dt.month, dt.day)
elif lookup_type == 'day':
return "%i-%02i-%02i 00:00:00" % (dt.year, dt.month, dt.day)
elif lookup_type == 'hour':
return "%i-%02i-%02i %02i:00:00" % (dt.year, dt.month, dt.day, dt.hour)
elif lookup_type == 'minute':
return "%i-%02i-%02i %02i:%02i:00" % (dt.year, dt.month, dt.day, dt.hour, dt.minute)
elif lookup_type == 'second':
return "%i-%02i-%02i %02i:%02i:%02i" % (dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second)
def _sqlite_time_extract(lookup_type, dt):
if dt is None:
return None
try:
dt = backend_utils.typecast_time(dt)
except (ValueError, TypeError):
return None
return getattr(dt, lookup_type)
@none_guard
def _sqlite_format_dtdelta(conn, lhs, rhs):
try:
real_lhs = datetime.timedelta(0, 0, lhs) if isinstance(lhs, int) else backend_utils.typecast_timestamp(lhs)
real_rhs = datetime.timedelta(0, 0, rhs) if isinstance(rhs, int) else backend_utils.typecast_timestamp(rhs)
if conn.strip() == '+':
out = real_lhs + real_rhs
else:
out = real_lhs - real_rhs
except (ValueError, TypeError):
return None
return str(out)
@none_guard
def _sqlite_time_diff(lhs, rhs):
left = backend_utils.typecast_time(lhs)
right = backend_utils.typecast_time(rhs)
return (
(left.hour * 60 * 60 * 1000000) +
(left.minute * 60 * 1000000) +
(left.second * 1000000) +
(left.microsecond) -
(right.hour * 60 * 60 * 1000000) -
(right.minute * 60 * 1000000) -
(right.second * 1000000) -
(right.microsecond)
)
@none_guard
def _sqlite_timestamp_diff(lhs, rhs):
left = backend_utils.typecast_timestamp(lhs)
right = backend_utils.typecast_timestamp(rhs)
return duration_microseconds(left - right)
@none_guard
def _sqlite_regexp(re_pattern, re_string):
return bool(re.search(re_pattern, str(re_string)))
@none_guard
def _sqlite_lpad(text, length, fill_text):
if len(text) >= length:
return text[:length]
return (fill_text * length)[:length - len(text)] + text
@none_guard
def _sqlite_rpad(text, length, fill_text):
return (text + fill_text * length)[:length]
| true | true |
f7207bb8de11f352cd6af90099062c5d0ac72db3 | 2,345 | py | Python | tests/ccapi/test__attr__.py | achillesrasquinha/CCPy | 7a5b4fc008a9a0c90caee5d2d6a8c67393dcb822 | [
"MIT"
] | 9 | 2020-05-12T08:16:35.000Z | 2022-01-06T03:22:18.000Z | tests/ccapi/test__attr__.py | achillesrasquinha/CCPy | 7a5b4fc008a9a0c90caee5d2d6a8c67393dcb822 | [
"MIT"
] | 3 | 2020-10-14T16:29:24.000Z | 2021-10-04T07:24:34.000Z | tests/ccapi/test__attr__.py | achillesrasquinha/CCPy | 7a5b4fc008a9a0c90caee5d2d6a8c67393dcb822 | [
"MIT"
] | 1 | 2019-12-30T23:13:46.000Z | 2019-12-30T23:13:46.000Z | # imports - standard imports
import os.path as osp
import subprocess
# imports - test imports
import pytest
# imports - module imports
from ccapi.__attr__ import (
read,
pardir,
strip,
safe_decode,
sequence_filter,
get_revision
)
def call(*args, **kwargs):
subprocess.call(args, **kwargs)
def test_read(tmpdir):
directory = tmpdir.mkdir("tmp")
tempfile = directory.join("foobar.txt")
tempfile.write("foobar")
assert tempfile.read() == read(str(tempfile))
tempfile = directory.join("barfoo.txt")
tempfile.write(\
"""
foobar
\n
barfoo
"""
)
assert tempfile.read() == read(str(tempfile))
def test_pardir():
assert pardir(__file__) == osp.dirname(__file__)
assert pardir(__file__, 2) == osp.dirname(osp.dirname(__file__))
def test_strip():
string = "foobar"
assert strip(string) == string
string = "\n foobar\nfoobar \n "
assert strip(string) == "foobar\nfoobar"
string = "\n\n\n"
assert strip(string) == ""
string = "\r\nfoobar\nfoobar\n"
assert strip(string) == "foobar\nfoobar"
def test_safe_decode():
assert safe_decode(b"foobar") == "foobar"
assert safe_decode( "foobar") == "foobar"
assert safe_decode(123456789) == 123456789
def test_sequence_filter():
assert sequence_filter([0,1,2,3,4,5], filter_ = lambda x: x % 2 == 0) == [0,2,4]
assert sequence_filter([0,1,2,3,4,5], filter_ = lambda x: x % 2 != 0, type_ = tuple) == (1,3,5)
def test_get_revision(tmpdir):
directory = tmpdir.mkdir("tmp")
path = str(directory)
with pytest.raises(subprocess.CalledProcessError):
get_revision(path)
assert get_revision(path, raise_err = False) == None
# Initialize the git repository
call("git","init",path)
call("git","config","user.email","foobar@foobar.com", cwd = path)
call("git","config","user.name" ,"Foo Bar", cwd = path)
with pytest.raises(subprocess.CalledProcessError):
get_revision(path)
tempfile = directory.join("foobar.txt")
tempfile.write("foobar")
call("git","add",".", cwd = path)
call("git","commit","-m","'Test Commit'", cwd = path)
assert len(get_revision(path)) == 40
assert len(get_revision(path, short = True)) == 7 | 26.055556 | 99 | 0.620469 |
import os.path as osp
import subprocess
import pytest
from ccapi.__attr__ import (
read,
pardir,
strip,
safe_decode,
sequence_filter,
get_revision
)
def call(*args, **kwargs):
subprocess.call(args, **kwargs)
def test_read(tmpdir):
directory = tmpdir.mkdir("tmp")
tempfile = directory.join("foobar.txt")
tempfile.write("foobar")
assert tempfile.read() == read(str(tempfile))
tempfile = directory.join("barfoo.txt")
tempfile.write(\
"""
foobar
\n
barfoo
"""
)
assert tempfile.read() == read(str(tempfile))
def test_pardir():
assert pardir(__file__) == osp.dirname(__file__)
assert pardir(__file__, 2) == osp.dirname(osp.dirname(__file__))
def test_strip():
string = "foobar"
assert strip(string) == string
string = "\n foobar\nfoobar \n "
assert strip(string) == "foobar\nfoobar"
string = "\n\n\n"
assert strip(string) == ""
string = "\r\nfoobar\nfoobar\n"
assert strip(string) == "foobar\nfoobar"
def test_safe_decode():
assert safe_decode(b"foobar") == "foobar"
assert safe_decode( "foobar") == "foobar"
assert safe_decode(123456789) == 123456789
def test_sequence_filter():
assert sequence_filter([0,1,2,3,4,5], filter_ = lambda x: x % 2 == 0) == [0,2,4]
assert sequence_filter([0,1,2,3,4,5], filter_ = lambda x: x % 2 != 0, type_ = tuple) == (1,3,5)
def test_get_revision(tmpdir):
directory = tmpdir.mkdir("tmp")
path = str(directory)
with pytest.raises(subprocess.CalledProcessError):
get_revision(path)
assert get_revision(path, raise_err = False) == None
call("git","init",path)
call("git","config","user.email","foobar@foobar.com", cwd = path)
call("git","config","user.name" ,"Foo Bar", cwd = path)
with pytest.raises(subprocess.CalledProcessError):
get_revision(path)
tempfile = directory.join("foobar.txt")
tempfile.write("foobar")
call("git","add",".", cwd = path)
call("git","commit","-m","'Test Commit'", cwd = path)
assert len(get_revision(path)) == 40
assert len(get_revision(path, short = True)) == 7 | true | true |
f7207db9c00de82d099854acdd08b0a2728247b5 | 1,679 | py | Python | genderPredictScript.py | ganesh2583/Python-Data_Science | 233586491d3863176a008b938b0946c472940a6d | [
"MIT"
] | null | null | null | genderPredictScript.py | ganesh2583/Python-Data_Science | 233586491d3863176a008b938b0946c472940a6d | [
"MIT"
] | null | null | null | genderPredictScript.py | ganesh2583/Python-Data_Science | 233586491d3863176a008b938b0946c472940a6d | [
"MIT"
] | null | null | null | from sklearn import tree
from sklearn import neighbors
from sklearn import gaussian_process
#[height, weight, shoe size]
X = [[181,80,10],[161,70,6],[171,66,7],[176,88,7],[189,100,8],[141,80,5],[156,78,6],[161,50,6],[171,60,7],[151,78,7],[171,40,7]]
#Gender
Y = ['male','male','male','male','male','female','female','female','female','female','female']
#Define 'DecisionTreeClassifier' Classifier From the imported Tree
decisionTreeclassifier = tree.DecisionTreeClassifier()
#Fit the data into the Classifier
decisionTreeclassifier = decisionTreeclassifier.fit(X,Y)
#Perform Prediction
decisionTreeclassifierPrediction = decisionTreeclassifier.predict([[161,60,9]])
#Print the Classifier
print(decisionTreeclassifier)
#Print the Prediction
print(decisionTreeclassifierPrediction)
#Define 'KNeighborsClassifier' Classifier From the imported Tree
kNeighborsClassifier = neighbors.KNeighborsClassifier()
#Fit the data into the Classifier
kNeighborsClassifier = kNeighborsClassifier.fit(X,Y)
#Perform Prediction
kNeighborsClassifierPrediction = kNeighborsClassifier.predict([[161,60,9]])
#Print the Classifier
print(kNeighborsClassifier)
#Print the Prediction
print(kNeighborsClassifierPrediction)
#Define 'GaussianProcessClassifier' Classifier From the imported Tree
gaussianProcessClassifier = gaussian_process.GaussianProcessClassifier()
#Fit the data into the Classifier
gaussianProcessClassifier = gaussianProcessClassifier.fit(X,Y)
#Perform Prediction
gaussianProcessClassifierPrediction = gaussianProcessClassifier.predict([[161,60,9]])
#Print the Classifier
print(gaussianProcessClassifier)
#Print the Prediction
print(gaussianProcessClassifierPrediction) | 29.45614 | 128 | 0.805837 | from sklearn import tree
from sklearn import neighbors
from sklearn import gaussian_process
X = [[181,80,10],[161,70,6],[171,66,7],[176,88,7],[189,100,8],[141,80,5],[156,78,6],[161,50,6],[171,60,7],[151,78,7],[171,40,7]]
Y = ['male','male','male','male','male','female','female','female','female','female','female']
decisionTreeclassifier = tree.DecisionTreeClassifier()
decisionTreeclassifier = decisionTreeclassifier.fit(X,Y)
decisionTreeclassifierPrediction = decisionTreeclassifier.predict([[161,60,9]])
print(decisionTreeclassifier)
print(decisionTreeclassifierPrediction)
kNeighborsClassifier = neighbors.KNeighborsClassifier()
kNeighborsClassifier = kNeighborsClassifier.fit(X,Y)
kNeighborsClassifierPrediction = kNeighborsClassifier.predict([[161,60,9]])
print(kNeighborsClassifier)
print(kNeighborsClassifierPrediction)
gaussianProcessClassifier = gaussian_process.GaussianProcessClassifier()
gaussianProcessClassifier = gaussianProcessClassifier.fit(X,Y)
gaussianProcessClassifierPrediction = gaussianProcessClassifier.predict([[161,60,9]])
print(gaussianProcessClassifier)
print(gaussianProcessClassifierPrediction) | true | true |
f7207ef7a8d650d8bd9ead304e0f30c6f37038f4 | 366 | py | Python | main.py | zruss11/Nike-InboxCheck | 5e3be689797a8d861e4894b1df4bd6ed8f1066e8 | [
"MIT"
] | 11 | 2017-09-09T03:47:18.000Z | 2019-11-15T14:12:51.000Z | main.py | zruss11/Nike-InboxCheck | 5e3be689797a8d861e4894b1df4bd6ed8f1066e8 | [
"MIT"
] | null | null | null | main.py | zruss11/Nike-InboxCheck | 5e3be689797a8d861e4894b1df4bd6ed8f1066e8 | [
"MIT"
] | 5 | 2017-09-09T03:48:07.000Z | 2020-07-04T00:59:01.000Z | import requests
from classes.login import Login
from classes.logger import logger
log = logger().log
with open('config/accounts.txt') as accounts_file:
accounts = accounts_file.read().splitlines()
def run(x):
req = requests.Session()
log("{} Attempting Login".format(x.split(':')[0]))
l = Login(req)
l.login(x)
for x in accounts:
run(x) | 19.263158 | 54 | 0.674863 | import requests
from classes.login import Login
from classes.logger import logger
log = logger().log
with open('config/accounts.txt') as accounts_file:
accounts = accounts_file.read().splitlines()
def run(x):
req = requests.Session()
log("{} Attempting Login".format(x.split(':')[0]))
l = Login(req)
l.login(x)
for x in accounts:
run(x) | true | true |
f7207f32e150c7bc0ac42fe89ad3f6575a482a24 | 2,502 | py | Python | google/cloud/bigquery/_http.py | msuozzo/python-bigquery | dcb8728c12f5ab0d7809a1b6cf72755dff973772 | [
"Apache-2.0"
] | 384 | 2020-02-07T06:39:26.000Z | 2022-03-30T18:25:50.000Z | google/cloud/bigquery/_http.py | msuozzo/python-bigquery | dcb8728c12f5ab0d7809a1b6cf72755dff973772 | [
"Apache-2.0"
] | 770 | 2020-02-04T10:46:40.000Z | 2022-03-31T15:12:19.000Z | google/cloud/bigquery/_http.py | msuozzo/python-bigquery | dcb8728c12f5ab0d7809a1b6cf72755dff973772 | [
"Apache-2.0"
] | 177 | 2020-02-06T05:24:31.000Z | 2022-03-25T18:51:36.000Z | # Copyright 2015 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Create / interact with Google BigQuery connections."""
import os
import pkg_resources
from google.cloud import _http # type: ignore # pytype: disable=import-error
from google.cloud.bigquery import __version__
# TODO: Increase the minimum version of google-cloud-core to 1.6.0
# and remove this logic. See:
# https://github.com/googleapis/python-bigquery/issues/509
if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE") == "true": # pragma: NO COVER
release = pkg_resources.get_distribution("google-cloud-core").parsed_version
if release < pkg_resources.parse_version("1.6.0"):
raise ImportError("google-cloud-core >= 1.6.0 is required to use mTLS feature")
class Connection(_http.JSONConnection):
"""A connection to Google BigQuery via the JSON REST API.
Args:
client (google.cloud.bigquery.client.Client): The client that owns the current connection.
client_info (Optional[google.api_core.client_info.ClientInfo]): Instance used to generate user agent.
api_endpoint (str): The api_endpoint to use. If None, the library will decide what endpoint to use.
"""
DEFAULT_API_ENDPOINT = "https://bigquery.googleapis.com"
DEFAULT_API_MTLS_ENDPOINT = "https://bigquery.mtls.googleapis.com"
def __init__(self, client, client_info=None, api_endpoint=None):
super(Connection, self).__init__(client, client_info)
self.API_BASE_URL = api_endpoint or self.DEFAULT_API_ENDPOINT
self.API_BASE_MTLS_URL = self.DEFAULT_API_MTLS_ENDPOINT
self.ALLOW_AUTO_SWITCH_TO_MTLS_URL = api_endpoint is None
self._client_info.gapic_version = __version__
self._client_info.client_library_version = __version__
API_VERSION = "v2"
"""The version of the API, used in building the API call's URL."""
API_URL_TEMPLATE = "{api_base_url}/bigquery/{api_version}{path}"
"""A template for the URL of a particular API call."""
| 41.7 | 109 | 0.742606 |
import os
import pkg_resources
from google.cloud import _http port __version__
if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE") == "true":
release = pkg_resources.get_distribution("google-cloud-core").parsed_version
if release < pkg_resources.parse_version("1.6.0"):
raise ImportError("google-cloud-core >= 1.6.0 is required to use mTLS feature")
class Connection(_http.JSONConnection):
DEFAULT_API_ENDPOINT = "https://bigquery.googleapis.com"
DEFAULT_API_MTLS_ENDPOINT = "https://bigquery.mtls.googleapis.com"
def __init__(self, client, client_info=None, api_endpoint=None):
super(Connection, self).__init__(client, client_info)
self.API_BASE_URL = api_endpoint or self.DEFAULT_API_ENDPOINT
self.API_BASE_MTLS_URL = self.DEFAULT_API_MTLS_ENDPOINT
self.ALLOW_AUTO_SWITCH_TO_MTLS_URL = api_endpoint is None
self._client_info.gapic_version = __version__
self._client_info.client_library_version = __version__
API_VERSION = "v2"
API_URL_TEMPLATE = "{api_base_url}/bigquery/{api_version}{path}"
| true | true |
f720804843b0cf052334ea5fd31c198aa77f5bcf | 51,935 | py | Python | tests/test_catalina_10_15_7.py | RhetTbull/osxphotos | 0e9b9d625190b94c1dd68276e3b0e5367002d87c | [
"MIT"
] | 656 | 2019-08-14T14:10:44.000Z | 2022-03-28T15:25:42.000Z | tests/test_catalina_10_15_7.py | RhetTbull/osxphotos | 0e9b9d625190b94c1dd68276e3b0e5367002d87c | [
"MIT"
] | 557 | 2019-10-14T19:00:02.000Z | 2022-03-28T00:48:30.000Z | tests/test_catalina_10_15_7.py | RhetTbull/osxphotos | 0e9b9d625190b94c1dd68276e3b0e5367002d87c | [
"MIT"
] | 58 | 2019-12-27T01:39:33.000Z | 2022-02-26T22:18:49.000Z | """ Basic tests for Photos 5 on MacOS 10.15.7 """
import datetime
import os
import os.path
import pathlib
import sqlite3
import tempfile
import time
from collections import Counter, namedtuple
import pytest
import osxphotos
from osxphotos._constants import _UNKNOWN_PERSON
from osxphotos.utils import _get_os_version
OS_VERSION = _get_os_version()
SKIP_TEST = "OSXPHOTOS_TEST_EXPORT" not in os.environ or OS_VERSION[1] != "15"
PHOTOS_DB_LOCAL = os.path.expanduser("~/Pictures/Photos Library.photoslibrary")
PHOTOS_DB = "tests/Test-10.15.7.photoslibrary/database/photos.db"
PHOTOS_DB_PATH = "/Test-10.15.7.photoslibrary/database/photos.db"
PHOTOS_LIBRARY_PATH = "/Test-10.15.7.photoslibrary"
PHOTOS_DB_LEN = 25
PHOTOS_NOT_IN_TRASH_LEN = 23
PHOTOS_IN_TRASH_LEN = 2
PHOTOS_DB_IMPORT_SESSIONS = 17
KEYWORDS = [
"Kids",
"wedding",
"flowers",
"England",
"London",
"London 2018",
"St. James's Park",
"UK",
"United Kingdom",
"foo/bar",
"Travel",
"Maria",
"Drink",
"Val d'Isère",
"Wine",
"Wine Bottle",
"Food",
"Furniture",
"Pizza",
"Table",
"Cloudy",
"Cord",
"Outdoor",
"Sky",
"Sunset Sunrise",
]
# Photos 5 includes blank person for detected face
PERSONS = ["Katie", "Suzy", "Maria", _UNKNOWN_PERSON]
ALBUMS = [
"2018-10 - Sponsion, Museum, Frühstück, Römermuseum",
"2019-10/11 Paris Clermont",
"AlbumInFolder",
"EmptyAlbum",
"I have a deleted twin", # there's an empty album with same name that has been deleted
"Multi Keyword",
"Pumpkin Farm",
"Raw",
"Sorted Manual",
"Sorted Newest First",
"Sorted Oldest First",
"Sorted Title",
"Test Album", # there are 2 albums named "Test Album" for testing duplicate album names
]
KEYWORDS_DICT = {
"Drink": 2,
"England": 1,
"Kids": 4,
"London 2018": 1,
"London": 1,
"Maria": 1,
"St. James's Park": 1,
"Travel": 2,
"UK": 1,
"United Kingdom": 1,
"Val d'Isère": 2,
"Wine Bottle": 2,
"Wine": 2,
"flowers": 1,
"foo/bar": 1,
"wedding": 3,
"Food": 2,
"Furniture": 2,
"Pizza": 2,
"Table": 2,
"Cloudy": 2,
"Cord": 2,
"Outdoor": 2,
"Sky": 2,
"Sunset Sunrise": 2,
}
PERSONS_DICT = {"Katie": 3, "Suzy": 2, "Maria": 2, _UNKNOWN_PERSON: 1}
ALBUM_DICT = {
"2018-10 - Sponsion, Museum, Frühstück, Römermuseum": 1,
"2019-10/11 Paris Clermont": 1,
"AlbumInFolder": 2,
"EmptyAlbum": 0,
"I have a deleted twin": 1,
"Multi Keyword": 2,
"Pumpkin Farm": 3,
"Raw": 4,
"Sorted Manual": 3,
"Sorted Newest First": 3,
"Sorted Oldest First": 3,
"Sorted Title": 3,
"Test Album": 2,
} # Note: there are 2 albums named "Test Album" for testing duplicate album names
UUID_DICT = {
"missing": "A1DD1F98-2ECD-431F-9AC9-5AFEFE2D3A5C",
"favorite": "E9BC5C36-7CD1-40A1-A72B-8B8FAC227D51",
"not_favorite": "A1DD1F98-2ECD-431F-9AC9-5AFEFE2D3A5C",
"hidden": "A1DD1F98-2ECD-431F-9AC9-5AFEFE2D3A5C",
"not_hidden": "E9BC5C36-7CD1-40A1-A72B-8B8FAC227D51",
"has_adjustments": "E9BC5C36-7CD1-40A1-A72B-8B8FAC227D51",
"adjustments_info": "7783E8E6-9CAC-40F3-BE22-81FB7051C266",
"no_adjustments": "D05A5FE3-15FB-49A1-A15D-AB3DA6F8B068",
"location": "DC99FBDD-7A52-4100-A5BB-344131646C30",
"no_location": "6191423D-8DB8-4D4C-92BE-9BBBA308AAC4",
"external_edit": "DC99FBDD-7A52-4100-A5BB-344131646C30",
"no_external_edit": "E9BC5C36-7CD1-40A1-A72B-8B8FAC227D51",
"export": "D79B8D77-BFFC-460B-9312-034F2877D35B", # "Pumkins2.jpg"
"export_tif": "8846E3E6-8AC8-4857-8448-E3D025784410",
"in_album": "D79B8D77-BFFC-460B-9312-034F2877D35B", # "Pumkins2.jpg"
"date_invalid": "8846E3E6-8AC8-4857-8448-E3D025784410",
"intrash": "71E3E212-00EB-430D-8A63-5E294B268554",
"not_intrash": "DC99FBDD-7A52-4100-A5BB-344131646C30",
"intrash_person_keywords": "6FD38366-3BF2-407D-81FE-7153EB6125B6",
"import_session": "8846E3E6-8AC8-4857-8448-E3D025784410",
"movie": "D1359D09-1373-4F3B-B0E3-1A4DE573E4A3",
"description_newlines": "7F74DD34-5920-4DA3-B284-479887A34F66",
"no_duplicates": "E9BC5C36-7CD1-40A1-A72B-8B8FAC227D51",
"multi_query_1": "D79B8D77-BFFC-460B-9312-034F2877D35B",
"multi_query_2": "E9BC5C36-7CD1-40A1-A72B-8B8FAC227D51",
}
UUID_DICT_LOCAL = {
"not_visible": "4A836160-51B2-4E32-907D-ECDDB2CEC657", # IMG_9815.JPG
"burst": "9A5B4CE6-6A9F-4917-95D4-1C98D14FCE4F", # IMG_9812.JPG
"burst_key": "9A5B4CE6-6A9F-4917-95D4-1C98D14FCE4F", # IMG_9812.JPG
"burst_not_key": "4A836160-51B2-4E32-907D-ECDDB2CEC657", # IMG_9815.JPG
"burst_selected": "75154738-83AA-4DCD-A913-632D5D1C0FEE", # IMG_9814.JPG
"burst_not_selected": "89E235DD-B9AC-4E8D-BDA2-986981CA7582", # IMG_9813.JPG
"burst_default": "F5E6BD24-B493-44E9-BDA2-7AD9D2CC8C9D", # IMG_9816.JPG
"burst_not_default": "75154738-83AA-4DCD-A913-632D5D1C0FEE", # IMG_9814.JPG
"live_edited": "54A01B04-16D7-4FDE-8860-19F2A641E433", # IMG_3203.HEIC
"live": "8EC216A2-0032-4934-BD3F-04C6259B3304", # IMG_3259.HEIC
}
UUID_PUMPKIN_FARM = [
"F12384F6-CD17-4151-ACBA-AE0E3688539E",
"D79B8D77-BFFC-460B-9312-034F2877D35B",
"1EB2B765-0765-43BA-A90C-0D0580E6172C",
]
ALBUM_SORT_ORDER = [
"1EB2B765-0765-43BA-A90C-0D0580E6172C",
"F12384F6-CD17-4151-ACBA-AE0E3688539E",
"D79B8D77-BFFC-460B-9312-034F2877D35B",
]
ALBUM_KEY_PHOTO = "D79B8D77-BFFC-460B-9312-034F2877D35B"
UTI_DICT = {
"8846E3E6-8AC8-4857-8448-E3D025784410": "public.tiff",
"7783E8E6-9CAC-40F3-BE22-81FB7051C266": "public.jpeg",
"1EB2B765-0765-43BA-A90C-0D0580E6172C": "public.jpeg",
}
UTI_ORIGINAL_DICT = {
"8846E3E6-8AC8-4857-8448-E3D025784410": "public.tiff",
"7783E8E6-9CAC-40F3-BE22-81FB7051C266": "public.heic",
"1EB2B765-0765-43BA-A90C-0D0580E6172C": "public.jpeg",
}
RawInfo = namedtuple(
"RawInfo",
[
"comment",
"original_filename",
"has_raw",
"israw",
"raw_original",
"uti",
"uti_original",
"uti_raw",
],
)
RAW_DICT = {
"D05A5FE3-15FB-49A1-A15D-AB3DA6F8B068": RawInfo(
"raw image, no jpeg pair",
"DSC03584.dng",
False,
True,
False,
"com.adobe.raw-image",
"com.adobe.raw-image",
None,
),
"A92D9C26-3A50-4197-9388-CB5F7DB9FA91": RawInfo(
"raw+jpeg, jpeg original",
"IMG_1994.JPG",
True,
False,
False,
"public.jpeg",
"public.jpeg",
"com.canon.cr2-raw-image",
),
"4D521201-92AC-43E5-8F7C-59BC41C37A96": RawInfo(
"raw+jpeg, raw original",
"IMG_1997.JPG",
True,
False,
True,
"public.jpeg",
"public.jpeg",
"com.canon.cr2-raw-image",
),
"E9BC5C36-7CD1-40A1-A72B-8B8FAC227D51": RawInfo(
"jpeg, no raw",
"wedding.jpg",
False,
False,
False,
"public.jpeg",
"public.jpeg",
None,
),
}
ORIGINAL_FILENAME_DICT = {
"uuid": "D79B8D77-BFFC-460B-9312-034F2877D35B",
"filename": "D79B8D77-BFFC-460B-9312-034F2877D35B.jpeg",
"original_filename": "Pumkins2.jpg",
}
UUID_IS_REFERENCE = "A1DD1F98-2ECD-431F-9AC9-5AFEFE2D3A5C"
UUID_NOT_REFERENCE = "F12384F6-CD17-4151-ACBA-AE0E3688539E"
UUID_DUPLICATE = ""
UUID_DETECTED_TEXT = {
"E2078879-A29C-4D6F-BACB-E3BBE6C3EB91": "osxphotos",
"A92D9C26-3A50-4197-9388-CB5F7DB9FA91": None,
}
@pytest.fixture(scope="module")
def photosdb():
return osxphotos.PhotosDB(dbfile=PHOTOS_DB)
@pytest.fixture(scope="module")
def photosdb_local():
return osxphotos.PhotosDB(dbfile=PHOTOS_DB_LOCAL)
def test_init1():
# test named argument
photosdb = osxphotos.PhotosDB(dbfile=PHOTOS_DB)
assert isinstance(photosdb, osxphotos.PhotosDB)
def test_init2():
# test positional argument
photosdb = osxphotos.PhotosDB(PHOTOS_DB)
assert isinstance(photosdb, osxphotos.PhotosDB)
def test_init3():
# test positional and named argument (raises exception)
with pytest.raises(Exception):
assert osxphotos.PhotosDB(PHOTOS_DB, dbfile=PHOTOS_DB)
def test_init4():
# test invalid db
(bad_db, bad_db_name) = tempfile.mkstemp(suffix=".db", prefix="osxphotos-")
os.close(bad_db)
with pytest.raises(Exception):
assert osxphotos.PhotosDB(bad_db_name)
with pytest.raises(Exception):
assert osxphotos.PhotosDB(dbfile=bad_db_name)
try:
os.remove(bad_db_name)
except:
pass
def test_init5(mocker):
# test failed get_last_library_path
def bad_library():
return None
# get_last_library actually in utils but need to patch it in photosdb because it's imported into photosdb
# because of the layout of photosdb/ need to patch it this way...don't really understand why, but it works
mocker.patch("osxphotos.photosdb.photosdb.get_last_library_path", new=bad_library)
with pytest.raises(Exception):
assert osxphotos.PhotosDB()
def test_db_len(photosdb):
# assert photosdb.db_version in osxphotos._TESTED_DB_VERSIONS
assert len(photosdb) == PHOTOS_DB_LEN
def test_db_version(photosdb):
# assert photosdb.db_version in osxphotos._TESTED_DB_VERSIONS
assert photosdb.db_version == "6000"
def test_persons(photosdb):
assert "Katie" in photosdb.persons
assert Counter(PERSONS) == Counter(photosdb.persons)
def test_keywords(photosdb):
assert "wedding" in photosdb.keywords
assert Counter(KEYWORDS) == Counter(photosdb.keywords)
def test_album_names(photosdb):
assert "Pumpkin Farm" in photosdb.albums
assert Counter(ALBUMS) == Counter(photosdb.albums)
def test_keywords_dict(photosdb):
keywords = photosdb.keywords_as_dict
assert keywords["wedding"] == 3
assert keywords == KEYWORDS_DICT
def test_persons_as_dict(photosdb):
persons = photosdb.persons_as_dict
assert persons["Maria"] == 2
assert persons == PERSONS_DICT
def test_albums_as_dict(photosdb):
albums = photosdb.albums_as_dict
assert albums["Pumpkin Farm"] == 3
assert albums == ALBUM_DICT
def test_album_sort_order(photosdb):
album = [a for a in photosdb.album_info if a.title == "Pumpkin Farm"][0]
photos = album.photos
uuids = [p.uuid for p in photos]
assert uuids == ALBUM_SORT_ORDER
def test_album_empty_album(photosdb):
album = [a for a in photosdb.album_info if a.title == "EmptyAlbum"][0]
photos = album.photos
assert photos == []
def test_attributes(photosdb):
photos = photosdb.photos(uuid=["D79B8D77-BFFC-460B-9312-034F2877D35B"])
assert len(photos) == 1
p = photos[0]
assert p.keywords == ["Kids"]
assert p.original_filename == "Pumkins2.jpg"
assert p.filename == "D79B8D77-BFFC-460B-9312-034F2877D35B.jpeg"
assert p.date == datetime.datetime(
2018, 9, 28, 16, 7, 7, 0, datetime.timezone(datetime.timedelta(seconds=-14400))
)
assert p.date_added == datetime.datetime(
2019,
7,
27,
9,
16,
49,
778432,
tzinfo=datetime.timezone(datetime.timedelta(days=-1, seconds=72000)),
)
assert p.description == "Girl holding pumpkin"
assert p.title == "I found one!"
assert sorted(p.albums) == ["Multi Keyword", "Pumpkin Farm", "Test Album"]
assert p.persons == ["Katie"]
assert p.path.endswith(
"tests/Test-10.15.7.photoslibrary/originals/D/D79B8D77-BFFC-460B-9312-034F2877D35B.jpeg"
)
assert p.ismissing == False
def test_attributes_2(photosdb):
"""Test attributes including height, width, etc"""
photos = photosdb.photos(uuid=[UUID_DICT["has_adjustments"]])
assert len(photos) == 1
p = photos[0]
assert sorted(p.keywords) == ["Maria", "wedding"]
assert p.original_filename == "wedding.jpg"
assert p.filename == "E9BC5C36-7CD1-40A1-A72B-8B8FAC227D51.jpeg"
assert p.date == datetime.datetime(
2019,
4,
15,
14,
40,
24,
86000,
datetime.timezone(datetime.timedelta(seconds=-14400)),
)
assert p.description == "Bride Wedding day"
assert p.title is None
assert sorted(p.albums) == [
"AlbumInFolder",
"I have a deleted twin",
"Multi Keyword",
]
assert p.persons == ["Maria"]
assert p.path.endswith(
"tests/Test-10.15.7.photoslibrary/originals/E/E9BC5C36-7CD1-40A1-A72B-8B8FAC227D51.jpeg"
)
assert not p.ismissing
assert p.hasadjustments
assert p.height == 1325
assert p.width == 1526
assert p.original_height == 1367
assert p.original_width == 2048
assert p.orientation == 1
assert p.original_orientation == 1
assert p.original_filesize == 460483
def test_missing(photosdb):
photos = photosdb.photos(uuid=[UUID_DICT["missing"]])
assert len(photos) == 1
p = photos[0]
assert p.path is None
assert p.ismissing == True
def test_favorite(photosdb):
photos = photosdb.photos(uuid=[UUID_DICT["favorite"]])
assert len(photos) == 1
p = photos[0]
assert p.favorite == True
def test_not_favorite(photosdb):
photos = photosdb.photos(uuid=[UUID_DICT["not_favorite"]])
assert len(photos) == 1
p = photos[0]
assert p.favorite == False
def test_hidden(photosdb):
photos = photosdb.photos(uuid=[UUID_DICT["hidden"]])
assert len(photos) == 1
p = photos[0]
assert p.hidden == True
def test_not_hidden(photosdb):
photos = photosdb.photos(uuid=[UUID_DICT["not_hidden"]])
assert len(photos) == 1
p = photos[0]
assert p.hidden == False
def test_visible(photosdb):
"""test visible"""
photos = photosdb.photos(uuid=[UUID_DICT["not_hidden"]])
assert len(photos) == 1
p = photos[0]
assert p.visible
def test_not_burst(photosdb):
"""test not burst"""
photos = photosdb.photos(uuid=[UUID_DICT["not_hidden"]])
assert len(photos) == 1
p = photos[0]
assert not p.burst
def test_location_1(photosdb):
# test photo with lat/lon info
photos = photosdb.photos(uuid=[UUID_DICT["location"]])
assert len(photos) == 1
p = photos[0]
lat, lon = p.location
assert lat == pytest.approx(51.50357167)
assert lon == pytest.approx(-0.1318055)
def test_location_2(photosdb):
# test photo with no location info
photos = photosdb.photos(uuid=[UUID_DICT["no_location"]])
assert len(photos) == 1
p = photos[0]
lat, lon = p.location
assert lat is None
assert lon is None
def test_hasadjustments1(photosdb):
# test hasadjustments == True
photos = photosdb.photos(uuid=[UUID_DICT["has_adjustments"]])
assert len(photos) == 1
p = photos[0]
assert p.hasadjustments == True
def test_hasadjustments2(photosdb):
# test hasadjustments == False
photos = photosdb.photos(uuid=[UUID_DICT["no_adjustments"]])
assert len(photos) == 1
p = photos[0]
assert p.hasadjustments == False
def test_external_edit1(photosdb):
# test image has been edited in external editor
photos = photosdb.photos(uuid=[UUID_DICT["external_edit"]])
assert len(photos) == 1
p = photos[0]
assert p.external_edit == True
def test_external_edit2(photosdb):
# test image has not been edited in external editor
photos = photosdb.photos(uuid=[UUID_DICT["no_external_edit"]])
assert len(photos) == 1
p = photos[0]
assert p.external_edit == False
def test_path_edited1(photosdb):
# test a valid edited path
photos = photosdb.photos(uuid=["E9BC5C36-7CD1-40A1-A72B-8B8FAC227D51"])
assert len(photos) == 1
p = photos[0]
path = p.path_edited
assert path.endswith(
"resources/renders/E/E9BC5C36-7CD1-40A1-A72B-8B8FAC227D51_1_201_a.jpeg"
)
assert os.path.exists(path)
def test_path_edited2(photosdb):
# test an invalid edited path
photos = photosdb.photos(uuid=[UUID_DICT["no_adjustments"]])
assert len(photos) == 1
p = photos[0]
path = p.path_edited
assert path is None
def test_path_derivatives(photosdb):
# test an path_derivatives
photos = photosdb.photos(uuid=[UUID_DICT["no_adjustments"]])
assert len(photos) == 1
p = photos[0]
path = p.path_derivatives
derivs = [
"D05A5FE3-15FB-49A1-A15D-AB3DA6F8B068_1_100_o.jpeg",
"D05A5FE3-15FB-49A1-A15D-AB3DA6F8B068_1_105_c.jpeg",
]
for i, p in enumerate(path):
assert p.endswith(derivs[i])
def test_ismovie(photosdb):
# test ismovie == True
photos = photosdb.photos(uuid=[UUID_DICT["movie"]])
p = photos[0]
assert p.ismovie
def test_not_ismovie(photosdb):
# test ismovie == False
photos = photosdb.photos(uuid=[UUID_DICT["no_adjustments"]])
p = photos[0]
assert not p.ismovie
def test_count(photosdb):
photos = photosdb.photos()
assert len(photos) == PHOTOS_NOT_IN_TRASH_LEN
def test_photos_intrash_1(photosdb):
"""test PhotosDB.photos(intrash=True)"""
photos = photosdb.photos(intrash=True)
assert len(photos) == PHOTOS_IN_TRASH_LEN
def test_photos_intrash_2(photosdb):
"""test PhotosDB.photos(intrash=True)"""
photos = photosdb.photos(intrash=True)
for p in photos:
assert p.intrash
def test_photos_intrash_3(photosdb):
"""test PhotosDB.photos(intrash=False)"""
photos = photosdb.photos(intrash=False)
for p in photos:
assert not p.intrash
def test_photoinfo_intrash_1(photosdb):
"""Test PhotoInfo.intrash"""
p = photosdb.photos(uuid=[UUID_DICT["intrash"]], intrash=True)[0]
assert p.intrash
assert p.date_trashed.isoformat() == "2120-06-10T11:24:47.685857-05:00"
def test_photoinfo_intrash_2(photosdb):
"""Test PhotoInfo.intrash and intrash=default"""
p = photosdb.photos(uuid=[UUID_DICT["intrash"]])
assert not p
def test_photoinfo_intrash_3(photosdb):
"""Test PhotoInfo.intrash and photo has keyword and person"""
p = photosdb.photos(uuid=[UUID_DICT["intrash_person_keywords"]], intrash=True)[0]
assert p.intrash
assert "Maria" in p.persons
assert "wedding" in p.keywords
def test_photoinfo_intrash_4(photosdb):
"""Test PhotoInfo.intrash and photo has keyword and person"""
p = photosdb.photos(persons=["Maria"], intrash=True)[0]
assert p.intrash
assert "Maria" in p.persons
assert "wedding" in p.keywords
def test_photoinfo_intrash_5(photosdb):
"""Test PhotoInfo.intrash and photo has keyword and person"""
p = photosdb.photos(keywords=["wedding"], intrash=True)[0]
assert p.intrash
assert "Maria" in p.persons
assert "wedding" in p.keywords
def test_photoinfo_not_intrash(photosdb):
"""Test PhotoInfo.intrash"""
p = photosdb.photos(uuid=[UUID_DICT["not_intrash"]])[0]
assert not p.intrash
assert p.date_trashed is None
def test_keyword_2(photosdb):
photos = photosdb.photos(keywords=["wedding"])
assert len(photos) == 2 # won't show the one in the trash
def test_keyword_not_in_album(photosdb):
# find all photos with keyword "Kids" not in the album "Pumpkin Farm"
photos1 = photosdb.photos(albums=["Pumpkin Farm"])
photos2 = photosdb.photos(keywords=["Kids"])
photos3 = [p for p in photos2 if p not in photos1]
assert len(photos3) == 1
assert photos3[0].uuid == "A1DD1F98-2ECD-431F-9AC9-5AFEFE2D3A5C"
def test_album_folder_name(photosdb):
"""Test query with album name same as a folder name"""
photos = photosdb.photos(albums=["Pumpkin Farm"])
assert sorted(p.uuid for p in photos) == sorted(UUID_PUMPKIN_FARM)
def test_multi_person(photosdb):
photos = photosdb.photos(persons=["Katie", "Suzy"])
assert len(photos) == 3
def test_get_db_path(photosdb):
db_path = photosdb.db_path
assert db_path.endswith(PHOTOS_DB_PATH)
def test_get_library_path(photosdb):
lib_path = photosdb.library_path
assert lib_path.endswith(PHOTOS_LIBRARY_PATH)
def test_get_db_connection(photosdb):
"""Test PhotosDB.get_db_connection"""
conn, cursor = photosdb.get_db_connection()
assert isinstance(conn, sqlite3.Connection)
assert isinstance(cursor, sqlite3.Cursor)
results = conn.execute(
"SELECT ZUUID FROM ZGENERICASSET WHERE ZFAVORITE = 1;"
).fetchall()
assert len(results) == 1
assert results[0][0] == "E9BC5C36-7CD1-40A1-A72B-8B8FAC227D51" # uuid
conn.close()
def test_export_1(photosdb):
# test basic export
# get an unedited image and export it using default filename
tempdir = tempfile.TemporaryDirectory(prefix="osxphotos_")
dest = tempdir.name
photos = photosdb.photos(uuid=[UUID_DICT["export"]])
filename = photos[0].original_filename
expected_dest = os.path.join(dest, filename)
got_dest = photos[0].export(dest)[0]
assert got_dest == expected_dest
assert os.path.isfile(got_dest)
def test_export_2(photosdb):
# test export with user provided filename
tempdir = tempfile.TemporaryDirectory(prefix="osxphotos_")
dest = tempdir.name
photos = photosdb.photos(uuid=[UUID_DICT["export"]])
timestamp = time.time()
filename = f"osxphotos-export-2-test-{timestamp}.jpg"
expected_dest = os.path.join(dest, filename)
got_dest = photos[0].export(dest, filename)[0]
assert got_dest == expected_dest
assert os.path.isfile(got_dest)
def test_export_3(photosdb):
# test file already exists and test increment=True (default)
tempdir = tempfile.TemporaryDirectory(prefix="osxphotos_")
dest = tempdir.name
photos = photosdb.photos(uuid=[UUID_DICT["export"]])
filename = photos[0].original_filename
filename2 = pathlib.Path(filename)
filename2 = f"{filename2.stem} (1){filename2.suffix}"
expected_dest_2 = os.path.join(dest, filename2)
got_dest = photos[0].export(dest)[0]
got_dest_2 = photos[0].export(dest)[0]
assert got_dest_2 == expected_dest_2
assert os.path.isfile(got_dest_2)
def test_export_4(photosdb):
# test user supplied file already exists and test increment=True (default)
tempdir = tempfile.TemporaryDirectory(prefix="osxphotos_")
dest = tempdir.name
photos = photosdb.photos(uuid=[UUID_DICT["export"]])
timestamp = time.time()
filename = f"osxphotos-export-2-test-{timestamp}.jpg"
filename2 = f"osxphotos-export-2-test-{timestamp} (1).jpg"
expected_dest_2 = os.path.join(dest, filename2)
got_dest = photos[0].export(dest, filename)[0]
got_dest_2 = photos[0].export(dest, filename)[0]
assert got_dest_2 == expected_dest_2
assert os.path.isfile(got_dest_2)
def test_export_5(photosdb):
# test file already exists and test increment=True (default)
# and overwrite = True
tempdir = tempfile.TemporaryDirectory(prefix="osxphotos_")
dest = tempdir.name
photos = photosdb.photos(uuid=[UUID_DICT["export"]])
filename = photos[0].original_filename
expected_dest = os.path.join(dest, filename)
got_dest = photos[0].export(dest)[0]
got_dest_2 = photos[0].export(dest, overwrite=True)[0]
assert got_dest_2 == got_dest
assert got_dest_2 == expected_dest
assert os.path.isfile(got_dest_2)
def test_export_6(photosdb):
# test user supplied file already exists and test increment=True (default)
# and overwrite = True
tempdir = tempfile.TemporaryDirectory(prefix="osxphotos_")
dest = tempdir.name
photos = photosdb.photos(uuid=[UUID_DICT["export"]])
timestamp = time.time()
filename = f"osxphotos-export-test-{timestamp}.jpg"
expected_dest = os.path.join(dest, filename)
got_dest = photos[0].export(dest, filename)[0]
got_dest_2 = photos[0].export(dest, filename, overwrite=True)[0]
assert got_dest_2 == got_dest
assert got_dest_2 == expected_dest
assert os.path.isfile(got_dest_2)
def test_export_7(photosdb):
# test file already exists and test increment=False (not default), overwrite=False (default)
# should raise exception
tempdir = tempfile.TemporaryDirectory(prefix="osxphotos_")
dest = tempdir.name
photos = photosdb.photos(uuid=[UUID_DICT["export"]])
filename = photos[0].filename
got_dest = photos[0].export(dest)[0]
with pytest.raises(Exception) as e:
# try to export again with increment = False
assert photos[0].export(dest, increment=False)
assert e.type == type(FileExistsError())
def test_export_8(photosdb):
# try to export missing file
tempdir = tempfile.TemporaryDirectory(prefix="osxphotos_")
dest = tempdir.name
photos = photosdb.photos(uuid=[UUID_DICT["missing"]])
assert photos[0].export(dest) == []
def test_export_9(photosdb):
# try to export edited file that's not edited
# should raise exception
tempdir = tempfile.TemporaryDirectory(prefix="osxphotos_")
dest = tempdir.name
photos = photosdb.photos(uuid=[UUID_DICT["no_adjustments"]])
filename = photos[0].filename
with pytest.raises(Exception) as e:
assert photos[0].export(dest, edited=True)
assert e.type == ValueError
def test_export_10(photosdb):
# try to export edited file that's not edited and name provided
# should raise exception
tempdir = tempfile.TemporaryDirectory(prefix="osxphotos_")
dest = tempdir.name
photos = photosdb.photos(uuid=[UUID_DICT["no_adjustments"]])
timestamp = time.time()
filename = f"osxphotos-export-test-{timestamp}.jpg"
with pytest.raises(Exception) as e:
assert photos[0].export(dest, filename, edited=True)
assert e.type == ValueError
def test_export_11(photosdb):
# export edited file with name provided
tempdir = tempfile.TemporaryDirectory(prefix="osxphotos_")
dest = tempdir.name
photos = photosdb.photos(uuid=[UUID_DICT["has_adjustments"]])
timestamp = time.time()
filename = f"osxphotos-export-test-{timestamp}.jpg"
expected_dest = os.path.join(dest, filename)
got_dest = photos[0].export(dest, filename, edited=True)[0]
assert got_dest == expected_dest
def test_export_12(photosdb):
# export edited file with default name
tempdir = tempfile.TemporaryDirectory(prefix="osxphotos_")
dest = tempdir.name
photos = photosdb.photos(uuid=[UUID_DICT["has_adjustments"]])
edited_name = pathlib.Path(photos[0].path_edited).name
edited_suffix = pathlib.Path(edited_name).suffix
filename = (
pathlib.Path(photos[0].original_filename).stem + "_edited" + edited_suffix
)
expected_dest = os.path.join(dest, filename)
got_dest = photos[0].export(dest, edited=True)[0]
assert got_dest == expected_dest
def test_export_13(photosdb):
# export to invalid destination
# should raise exception
tempdir = tempfile.TemporaryDirectory(prefix="osxphotos_")
dest = tempdir.name
# create a folder that doesn't exist
i = 0
while os.path.isdir(dest):
dest = os.path.join(dest, str(i))
i += 1
photos = photosdb.photos(uuid=[UUID_DICT["export"]])
filename = photos[0].filename
with pytest.raises(Exception) as e:
assert photos[0].export(dest)
assert e.type == type(FileNotFoundError())
def test_export_14(photosdb, caplog):
# test export with user provided filename with different (but valid) extension than source
tempdir = tempfile.TemporaryDirectory(prefix="osxphotos_")
dest = tempdir.name
photos = photosdb.photos(uuid=[UUID_DICT["export_tif"]])
timestamp = time.time()
filename = f"osxphotos-export-2-test-{timestamp}.tif"
expected_dest = os.path.join(dest, filename)
got_dest = photos[0].export(dest, filename)[0]
assert got_dest == expected_dest
assert os.path.isfile(got_dest)
assert "Invalid destination suffix" not in caplog.text
def test_export_no_original_filename(photosdb):
# test export OK if original filename is null
# issue #267
tempdir = tempfile.TemporaryDirectory(prefix="osxphotos_")
dest = tempdir.name
photos = photosdb.photos(uuid=[UUID_DICT["export"]])
# monkey patch original_filename for testing
original_filename = photos[0]._info["originalFilename"]
photos[0]._info["originalFilename"] = None
filename = f"{photos[0].uuid}.jpeg"
expected_dest = os.path.join(dest, filename)
got_dest = photos[0].export(dest)[0]
assert got_dest == expected_dest
assert os.path.isfile(got_dest)
photos[0]._info["originalFilename"] = original_filename
def test_eq():
"""Test equality of two PhotoInfo objects"""
photosdb1 = osxphotos.PhotosDB(dbfile=PHOTOS_DB)
photosdb2 = osxphotos.PhotosDB(dbfile=PHOTOS_DB)
photos1 = photosdb1.photos(uuid=[UUID_DICT["export"]])
photos2 = photosdb2.photos(uuid=[UUID_DICT["export"]])
assert photos1[0] == photos2[0]
def test_eq_2():
"""Test equality of two PhotoInfo objects when one has memoized property"""
photosdb1 = osxphotos.PhotosDB(dbfile=PHOTOS_DB)
photosdb2 = osxphotos.PhotosDB(dbfile=PHOTOS_DB)
photos1 = photosdb1.photos(uuid=[UUID_DICT["in_album"]])
photos2 = photosdb2.photos(uuid=[UUID_DICT["in_album"]])
# memoize a value
albums = photos1[0].albums
assert albums
assert photos1[0] == photos2[0]
def test_not_eq(photosdb):
photos1 = photosdb.photos(uuid=[UUID_DICT["export"]])
photos2 = photosdb.photos(uuid=[UUID_DICT["missing"]])
assert photos1[0] != photos2[0]
def test_photosdb_repr():
photosdb = osxphotos.PhotosDB(dbfile=PHOTOS_DB)
photosdb2 = eval(repr(photosdb))
ignore_keys = ["_tmp_db", "_tempdir", "_tempdir_name", "_db_connection"]
assert {k: v for k, v in photosdb.__dict__.items() if k not in ignore_keys} == {
k: v for k, v in photosdb2.__dict__.items() if k not in ignore_keys
}
def test_photosinfo_repr(photosdb):
photos = photosdb.photos(uuid=[UUID_DICT["favorite"]])
photo = photos[0]
photo2 = eval(repr(photo))
assert {k: str(v).encode("utf-8") for k, v in photo.__dict__.items()} == {
k: str(v).encode("utf-8") for k, v in photo2.__dict__.items()
}
def test_from_to_date(photosdb):
"""test from_date / to_date"""
os.environ["TZ"] = "US/Pacific"
time.tzset()
photos = photosdb.photos(from_date=datetime.datetime(2018, 10, 28))
assert len(photos) == 16
photos = photosdb.photos(to_date=datetime.datetime(2018, 10, 28))
assert len(photos) == 7
photos = photosdb.photos(
from_date=datetime.datetime(2018, 9, 28), to_date=datetime.datetime(2018, 9, 29)
)
assert len(photos) == 4
def test_from_to_date_tz(photosdb):
"""Test from_date / to_date with and without timezone"""
os.environ["TZ"] = "US/Pacific"
time.tzset()
photos = photosdb.photos(
from_date=datetime.datetime(2018, 9, 28, 13, 7, 0),
to_date=datetime.datetime(2018, 9, 28, 13, 9, 0),
)
assert len(photos) == 1
assert photos[0].uuid == "D79B8D77-BFFC-460B-9312-034F2877D35B"
photos = photosdb.photos(
from_date=datetime.datetime(
2018,
9,
28,
16,
7,
0,
tzinfo=datetime.timezone(datetime.timedelta(days=-1, seconds=72000)),
),
to_date=datetime.datetime(
2018,
9,
28,
16,
9,
0,
tzinfo=datetime.timezone(datetime.timedelta(days=-1, seconds=72000)),
),
)
assert len(photos) == 1
assert photos[0].uuid == "D79B8D77-BFFC-460B-9312-034F2877D35B"
def test_date_invalid():
"""Test date is invalid"""
# doesn't run correctly with the module-level fixture
from datetime import datetime, timedelta, timezone
import osxphotos
photosdb = osxphotos.PhotosDB(dbfile=PHOTOS_DB)
photos = photosdb.photos(uuid=[UUID_DICT["date_invalid"]])
assert len(photos) == 1
p = photos[0]
delta = timedelta(seconds=p.tzoffset)
tz = timezone(delta)
assert p.date == datetime(1970, 1, 1).astimezone(tz=tz)
def test_date_modified_invalid(photosdb):
"""Test date modified is invalid"""
photos = photosdb.photos(uuid=[UUID_DICT["date_invalid"]])
assert len(photos) == 1
p = photos[0]
assert p.date_modified is None
def test_import_session_count(photosdb):
"""Test PhotosDB.import_session"""
import_sessions = photosdb.import_info
assert len(import_sessions) == PHOTOS_DB_IMPORT_SESSIONS
def test_import_session_photo(photosdb):
"""Test photo.import_session"""
photo = photosdb.get_photo(UUID_DICT["import_session"])
import_session = photo.import_info
assert import_session.creation_date == datetime.datetime(
2020,
6,
6,
7,
15,
24,
729811,
tzinfo=datetime.timezone(datetime.timedelta(days=-1, seconds=61200), "PDT"),
)
assert import_session.start_date == datetime.datetime(
2020,
6,
6,
7,
15,
24,
725564,
tzinfo=datetime.timezone(datetime.timedelta(days=-1, seconds=61200), "PDT"),
)
assert import_session.end_date == datetime.datetime(
2020,
6,
6,
7,
15,
24,
725564,
tzinfo=datetime.timezone(datetime.timedelta(days=-1, seconds=61200), "PDT"),
)
assert len(import_session.photos) == 1
def test_uti(photosdb):
"""test uti"""
for uuid, uti in UTI_DICT.items():
photo = photosdb.get_photo(uuid)
assert photo.uti == uti
assert photo.uti_original == UTI_ORIGINAL_DICT[uuid]
def test_raw(photosdb):
"""Test various raw properties"""
for uuid, rawinfo in RAW_DICT.items():
photo = photosdb.get_photo(uuid)
assert photo.original_filename == rawinfo.original_filename
assert photo.has_raw == rawinfo.has_raw
assert photo.israw == rawinfo.israw
assert photo.uti == rawinfo.uti
assert photo.uti_original == rawinfo.uti_original
assert photo.uti_raw == rawinfo.uti_raw
def test_verbose(capsys):
"""test verbose output in PhotosDB()"""
photosdb = osxphotos.PhotosDB(dbfile=PHOTOS_DB, verbose=print)
captured = capsys.readouterr()
assert "Processing database" in captured.out
def test_original_filename(photosdb):
"""test original filename"""
uuid = ORIGINAL_FILENAME_DICT["uuid"]
photo = photosdb.get_photo(uuid)
assert photo.original_filename == ORIGINAL_FILENAME_DICT["original_filename"]
assert photo.filename == ORIGINAL_FILENAME_DICT["filename"]
# monkey patch
original_filename = photo._info["originalFilename"]
photo._info["originalFilename"] = None
assert photo.original_filename == ORIGINAL_FILENAME_DICT["filename"]
photo._info["originalFilename"] = original_filename
# The following tests only run on the author's personal library
# They test things difficult to test in the test libraries
@pytest.mark.skipif(SKIP_TEST, reason="Skip if not running on author's local machine.")
def test_not_visible_burst(photosdb_local):
"""test not visible and burst (needs image from local library)"""
photo = photosdb_local.get_photo(UUID_DICT_LOCAL["not_visible"])
assert not photo.visible
assert photo.burst
@pytest.mark.skipif(SKIP_TEST, reason="Skip if not running on author's local machine.")
def test_visible_burst(photosdb_local):
"""test not visible and burst (needs image from local library)"""
photo = photosdb_local.get_photo(UUID_DICT_LOCAL["burst"])
assert photo.visible
assert photo.burst
assert len(photo.burst_photos) == 4
@pytest.mark.skipif(SKIP_TEST, reason="Skip if not running on author's local machine.")
def test_burst_key(photosdb_local):
"""test burst_key"""
photo = photosdb_local.get_photo(UUID_DICT_LOCAL["burst_key"])
assert photo.burst_key
photo = photosdb_local.get_photo(UUID_DICT_LOCAL["burst_not_key"])
assert not photo.burst_key
@pytest.mark.skipif(SKIP_TEST, reason="Skip if not running on author's local machine.")
def test_burst_selected(photosdb_local):
"""test burst_selected"""
photo = photosdb_local.get_photo(UUID_DICT_LOCAL["burst_selected"])
assert photo.burst_selected
photo = photosdb_local.get_photo(UUID_DICT_LOCAL["burst_not_selected"])
assert not photo.burst_selected
@pytest.mark.skipif(SKIP_TEST, reason="Skip if not running on author's local machine.")
def test_burst_default_pic(photosdb_local):
"""test burst_default_pick"""
photo = photosdb_local.get_photo(UUID_DICT_LOCAL["burst_default"])
assert photo.burst_default_pick
photo = photosdb_local.get_photo(UUID_DICT_LOCAL["burst_not_default"])
assert not photo.burst_default_pick
@pytest.mark.skipif(SKIP_TEST, reason="Skip if not running on author's local machine.")
def test_path_edited_live_photo(photosdb_local):
"""test path_edited_live_photo (needs image from local library)"""
photo = photosdb_local.get_photo(UUID_DICT_LOCAL["live_edited"])
assert photo.path_edited_live_photo is not None
@pytest.mark.skipif(SKIP_TEST, reason="Skip if not running on author's local machine.")
def test_path_edited_live_photo_not_edited(photosdb_local):
"""test path_edited_live_photo for a live photo that's not edited (needs image from local library)"""
photo = photosdb_local.get_photo(UUID_DICT_LOCAL["live"])
assert photo.path_edited_live_photo is None
def test_is_reference(photosdb):
"""test isreference"""
photo = photosdb.get_photo(UUID_IS_REFERENCE)
assert photo.isreference
photo = photosdb.get_photo(UUID_NOT_REFERENCE)
assert not photo.isreference
def test_adjustments(photosdb):
"""test adjustments/AdjustmentsInfo"""
from osxphotos.adjustmentsinfo import AdjustmentsInfo
photo = photosdb.get_photo(UUID_DICT["adjustments_info"])
adjustments = photo.adjustments
assert isinstance(adjustments, AdjustmentsInfo)
assert adjustments.asdict() == {
"data": b"mW[\xb7\xa2:\xb3\xfd/\xbe\xda\xa3\x17((\xf4\x18\xdf\x03H\xc2E\xb9%\\\xc4\xb3\xce\x03\x02\x12.\x82\n\x1at\x8f\xfd\xdf\xbf\xb8\xba\xfb\xec\xdec\x1c\xde\x92\xaa\xcc\x9aU\t\xa9\x99\xbff\x8f\xe26T}gv\xa7~\xf6\xe3\xaf\xd9\xf1^\xb5\xb9s?\x1f\x8b\xdb\xec\xc7\x8c\x97\xf5\xf5r\xf6m\x96^.\xd1O\xbf\xf7\xe4\x8a\xff\xce}\xe7\x17\x1c3\x0c\x19)\xce)*\x1e\xd5O#\xffmvi\xd3\xf1\xd4\xdf\xce\xcc\xd3\xc5\xfb\xd9\xdf\xdff\xe7bL\xf3tL\xdf\xf8\xe7t\x18\x8b[\\\xe5#\x99\xfdXr\x0b\x81-\xa8.E[u\xc5?\x11\xd8\xba\xef\x02C\xff\xe9l\x14UI\xc6\xd9\x0f\x81[.\xbe\xcd\xfa[Utc:\xfe\x0c\xc7\xd0\xdf\xb1\xd2\xf1\xff\x163\x06i^\xdf\x87\xf1\xcc\xdc\x86\xd9\x8f\xff\xf9\xeb\xff\xf1(\xba\xf4\xd8\x16\xf9\xec\xc7x\xbb\x17,\x8bb\x1c\xab\xae\x1c\xde\x04\xfb\xd3\x89\rw}\x96\xb6\xbb\x9fq\xb9o\xbf&\r6n\xdfs\xc3\xd7d\xd5]\xee\xe3o\x9f\xefKn\xbd\x14\xc4\xe5\x8a\x93\x16\xc2ZX,\xe4\xdf\xab\xc0t\xe9\x87\xfb\xad\xf8\x03Hm\xd3\xac\xf1\xfa\xaa\xfb\x13]\xbd\xbd\xa1\xbab\xf8\x89>\xbcs\x1c\xc6*\xfbbu\xe1\x16\xef \x1c\xb7\x96\x84%\xbf\\/DA\xe6xy\xc5\xadY\xfdD\xee\xcb&K\xdcR^\xf0\xe2JZ-\xd6\x82\xc8I\xac\x12\xf7\xb1\x8f\xd2\xf6\xfe\x0e\xfe}!\x89+\xee\x8f\x8f\x15\xf3\xf8'\x11\x86\xbe\xe4\xe5\xf5J\xe4Y\xa5EYZ\xf0k\xf1\xdbl\xec\xbb\xb4EiW\x16\xbf\x82\x08\xe2j\xcd\t\xb2\xb4\\\x8bk\xf1\xbd}\x0b\xf1\xcb\xb2\x14\x17\xb2\xc0\xf3\xeb\x95\xb0\xe6DIZ,\x99I\x96\xde&Q\xfe\xf7\xc7\x88}\x95\xd1N/l\xb3at\xd9\xe6\xdc\xe5\x88\xa3\xc6\x8f\x15q\x8f\xf8\xc6\x89U'\x860\xb9\xda\x1b\xf7b\xc1\xf2\x18\xab\xe7;\xe4\x13Ro\x82\xb5%\x83\xaa\xe1\x0e\xc4\x8c-\xd8\xf2\x9e\x19\xe9m\x9c\xf2\xf9\x18\xc7r\x9a\xb5\xfcb\xbfl\xb5\xcf\x0fbQ\xad\r\xbd\xa8\xc9\x13\x0bf^\x84\x94\t\xaa\x073\x06$\xd1#\x07\xc4\xaa\xb5\x07m\x92\xc4\x1b\xdd\xb4\xd2\xd6I\xa6G\t\x97Jy\x0co4\xcc\xc5\x88\x8f\x0eC\xb4\xe0\x0fG\xfe2\xed\x8d\xe8T\xa8gM\xc3\x8d\x13Q1fD\xa2H\x831\xe2s#\xe2\xc8\x1e\xc3\x9c\xe1\xb6\x0c\xb7\t\xe2\xe6fz\xe9\xf0\xf8\xfc\x08\xd7\xa2\xc6\x0f\xdeAEcx>\x84)\x8c\xae\xd1\x83\x1b\x86Mm\xc5\xa7)k[Q\x80Op\xc0\xaa\xca\x80\x92c\xa46\x19\x08\x84\xd0\x00\xf9\x1eG\xc4b\x80\x07\xdc\xb6\xdb\x98\x1b\xb3\x00\xf2\xf6\xbe\x8aJt\x02\xce\xa6\x94[\xb7C\xf8\x14\xa1>\xd2/Q\xf3,??\xb6\\\x98!\xd2p\xa1\xd7\xbb\xa6j\x9d\xd0\x9c1\xa3\x9c\xa3\xbd\xec\xd4P\xe5\x04\xc3\xdf\x80\x97m\xdc\x8c\xc7/\xc0F,\x83\x05\xf4\x92\x92\xd3\xb5\xd8\xe7\x1fZ\xf4\xf9\x11\x19\xf6\xa2\xdc\xc0!\x12\xac\r?\xc5%L\xa5\x90\x12\x13C\xd5\x0c\xa3\t\xed\xdd\xb8\xc7\x11\xaa\xb6x\xab\x9aI\xf3\x8ba\xc3\xf6\x8e\x9f\x18 \x7f\xfa\x02$\xacV~\xe8\xc4\xad\xb5rt;\xcc\x91\xca;\xb2\xb2\xa7\x93\xdb\x81\xa7\x1f\x00b#\xad\xc9\xf6\x08e!\x8c\xca\x18?\xbd\xc2J\xb3\xea\x10^\xaa/\x82\xdc\x9b \xc3\x0b\x7f\xe1\xb5\xb0\xd1\xe2\xc4QK\xf1\x1ey\x02r\xc9\xd6\x02HA\x00\x99\x18t~\x98\xf3\xa2\x94$!\x8a&'\x82\x93\xbf\xe7P\xbe\x87\xe7\xb2\xfd\xfch\x96\x9f\x1f\xf8!\xff\xc30\xe4\x8b\xdf\x88\xe1\xdevsU\x1c\xbdk\xc96\x8b\xce\xe5mB\xaf=l\xb9\xb8s\x8e7^\\\xb2cD\xae\xefc\xd9\xf6\xfb\x18E7k\xa4\x97X\x9b\x9f\xf0]Y\xed\xc1\xa5\xfb\xaa!\xf7\xab\x86<l\xbde\xdf\x1fp\x1e\x9a\xb1\x99\x14jG\xf4s\x9f\x132\xef\x8d.\xa9m\x1c\x1fL\xbd\xd9?T\xb0\xc3\x9f\x1f\xd6\x96\x01\x1c\xf5\xa6\x8coj\xb1E)\xb1W\xcd\xeb\x10\xe4\xb2\xcbq\x9f\x1fy0w|\x9e7\x82p'\x04\xe5\xa4\x10\xedI\x91\x8b@\x0c\xe2\x81\xac'\xbf5_\xc3\x0b\x05H\xb79\xfb\xee\xa1q\x05\xfa\x88\xa56\x15\x10R\x0f(\x92\xab\xbd|\x84\xc8\x0e\x82\x81\xe2;\xd9J\xc6\xc5?f\x13}\xc0'\xf5\xfcR8i1\x87_\xca<\xd5(\xf5\x81\x1a>\xb5)\xb9x5\xef\xfaP\x91\x02\xed\x00\x1c\xa7\xbf6\xe1\x93B\xc8!\x8d2<\x02|\x80\x8c\x1e\xc4\nN\xc8Xou\xfb\xe2W\xc9\xc2|\xf9\xc7\xb4\x94oo\x1c\x9d\nX#\xbd\xa3Q\x0eCl\x16\xce\xb3a\xd9\xc8\x9b0\x18\xed\xddR\xb4\x1f\xaf+\x82j\x883\x04\xcf\xf0\x98\xc5t\xf2}\xfd\xe4xm\xab\xd6a\x1c\xde\x0e\xf8\xd0\x99\xe7KtT\xa31\xea\x14'\xf3\xb9\x9d\x86\xedt\x8b\xc1`\xe2\xbe\xb6kE\xb2_bV@Q4\xba\xa6|Vk\xdf\x16{O#\xd3\x11l\xa8g\xa2tm\xb8M\xb8\xa6\x82\xa9\xf9\x99WD\x8el\xb8y\x9c\xc1v\x02\x9d\xe2\xea>54\xc4\x9d\xed']\xee\xb4\xecfW\r\xb55n(\xf4\x8d\x9d\xec\xe9\xe3\xa4\xae6\xd66\xaa\x16j\x04\xe1\xa8`\xaa|~\x9c\xb4K\xef\x18>\x97\xb3\x04=\xb1\\\x9c4?q6H\xe6\xad\x8b\xe9\xe5\x94_j\x88\x01\xe3Ar\xb8\x90\xf3kG\xd9\xd5\xc3\xdd\xc5D\xda\xdf\x9d\xbal\nEOh\xd9U\xaf\xb3\xc1\x9b\x87\x0b\xe9pp:\xf7s\xfa\xf9!k~co\xc9\xee\xbc=\xd9\xaeD\x17\x08t\t\xceU\x93U\x88\xc3\xa6B\x91\xa5\r\x12\xae\xc7\xad\x0b\x92\x97\xaf\xeb\xca\xc1TV\xb5\x9en\"\xc1\xce\xab\xca\x9ao\xe5vs\xf3\xe5\xd1\x08\xedC\x80^km\x0e\x1c\x80\xfc\x00\x9at\x7fUwW\xb0\xf5#\x1d5\xa5\xb1\xf1s\x0bq\x9d\x86\x04g\xfbl\xc16,/h\xe3K\x9a\x00\xcf\x04^\xdd\x83\xec\xd4\x15\xfb[\xf5CHe\xd8yZ*\xf9W\xb5s\\;C\x13\xa2\x9d^\xdby\x82\xe8IG}\xa8W`\xb0j\xe5\xe6\xe0\x86\xb74\xff\xb4+\xb9-$\xb4\xddm\x86\xa7\xf6R<XJN\xd8\xb7\xe7J\xbf\xdb\xbb\x8bTw\x9bMnm\xedC\xab\x82\x01\xa8\x12\xf6\xc8\xba6p\xc6\x9aj\xf2\xb04\xb3\xde=\xc1k\xfb\xa2/\xa49\xd0\x0e\xfd\t\xa9\xe0\xc5\xae\x86\xbdNh\xb7\x05\x19\x06\x08\xc8 \xc8p\xcd\xeb^jEq3U\xae\xd1\xd3\xa2\x9f\x9a\x0b\xab\x93\xab\x95,\xaf\xa7];XX\xdb5\xf7\xf4jen\x06!\xf1\x83\x8b\xebE@\xc4\x94\xdf\x00\x9f\xdb\x9b\x1b\xfbaa\xe1\x9a\x92\xc8\xb1Z*\xe4H>oa\xd6\x1c\x9e\x88\xd7\x0f\\\xe0=]b\xc0\xc4\x06T:\x00\xd5\xce-l\x9e\x8d\xba'^\xe5(\xb6&\r\xdef\xe0vA\xd38%w\xd4\xd4\xcc\x86\xa8<\x1b\xb8\x19\xdc\xe7+\xb7l\xa5H7\x9f\x1f\x9e)\x84\xdd\x15G\x9e\xb1\x14B\xa2:\x1bm\x11z\x16\x95\xaf`\x1a\x12\xf3iwf\x15\x12\x0b\xfbw\xebE\x9f\xbe\x16iv\xc0\xdd]FL#\x99m\x12?d'\xa9\xf3\x02K\xd8\tM\xfd\xa8\xf2\x87\xed\xf4\xf7\xb6zB\xeb<\x90+\x19\x1f\xe0U\x1e\xdb\xa9-\xad\x8e\xbb\xd4\x15\xb8\x9aUYoqx\xb3\x96\xc3<\xa8y\xc7i\xc2\x97_\x8d\x0b\xad51+\x8c\x03\xf7\x8a\xbd\xa1R\xae\x83\xe1\xd4\xd4\x05\xeb\x10FY\x9dqT\xeen\xef\x8bw\x15\x80[\xe6e\xd3\xb8\x84:%5Y,\xe1\xb6\xef\xec*\xa7\x10daG\xa5\x07\xd8J\xfe\x86\xa8\x9e\x9e\xf5\x8e:\xd9Xk@\x98*B\xc8\xda\\\xecM25Rp~ME\x0ey\xe5\x18\xa1\xf6\xa2\x9f\x95\xb4F\xb06\xac&\xca\xa6'6;.\xa8H\xfe\x04\xad\x8dw\xea\x1e[n\x92\xac\x91\x12\x03\x7f@\x83\xcf\x19\x10%\xaeG\xec\x03\x14\xc2C\xa9\xa6\x8a\xde\xd2r\xc2\x81\x06\xd3&&\x9b\xb8\x85\x87d\x9f\x93C\xa3\t\xa6\xb3\xf7\xe5J[\x8c\xf9\x92\x8a\xaca\xf6N\xe4\x7f~\xa0\x9d\x9c\xe1\xfbt2!l\xfcM)\xed\xd9\x11\x0fu\x94\xabz$\x9c\x86\x89\xdca\x96\x8cu\xa5%\x86I\x8f\x15\xa9\x00\x10}tDQ\x0b\r\x13\x87>\x1f\x00Xz\xa9\xb2\xc84A\xc1\x13\x95\x1b\xd8\xd3KG\x9e;C\xe7\xc8\xb1\x94\x13\x8d\x96\xac\xd7r\x9e\x1e\xf5\xa4\xc4\xee\x1a\x8a\xc2\xbe$\x0f\x15\xf6\xe1\xfeL\x12Y7)k\xe3\x0e\x01K\xc1\xb3\xd1\x96\x80\xa2q'*\xde\xb5'\x13\t\x04\xae\xa04\xdc\xb8MLv\x17\x9f\xff\xfcx\xee\xe6\xc6\xb5t7\ngh\xe1p\x1d\xab\xfb\xd3b=kD\x16\x81\xfb>H'\xa7\xd78\x01\x17\xaa\xab\x02\xd1\x0e\x11\x02s\x80\x05\x8f\xdd\xa6;v\xabF\x90\xca>\xb8\x98~J\x9e\x0bm! \x7f\x82\x0b\xe0\x0c~\xad\x08\xecW\x0c]\xaf2\xac\xad\xe9G)\x95\xae\xe0\x9c\xb0}\x96(\xe8B/\xa4\xbc\x08\xf6\xe10 H@\x04\xfc\x145Gv\xd7\xd8\x9a2?\x82\xbd\x106\xc8\xe2uI\xc9\xee\xbe|\xd2T!H\xe9<c\xb7\xa7\xa3\"G\xd5G;{a\xd70\x85$\x08\x118\x81\xa8\xd97\xea$\x81\xde\x0f:\xe4\xdc\xb5\xaew\xacR\xa0\xa0\x1d\x9c\x04\xc55\x90l\x9c<\xbd (\xa0uW\x16\xa5\xa6\x84N\xed\xcfc\xed98*\xe5,\xa3m\x10xv\x08\xae\x92\x82\xado\xc0A\xf1v\xbe\xbc\xd5\xf7\xc0c\xdd\x12k\xcb\xd2;\x95\\\xa9-\xfb\xff0\xe9\xdf\xbe\x05\xb8\xf2\xa7|]\xfeK\xbcr\x1c\x93\x9e\x94Tc\xf1K\xbe\xf2o\xf9\xfa\x87\xfc}\xbfD\xf8\x9f\xc2\xf8\x1fI\xfcK\"\x7f\x9b\x11\xa6?\xb7\xc5\xf3m\x96\xb8\xd5R`\xb2\x9d\xe9vQ^I\xd2\xfa\xef\xdf\x8a|\xd3w\xe3\x8d=A\xfe\x10\xe9\x98\xa4yO\xdf\n\x9dyU9{bT\xa7\xea\xeb\xa9\x84\xcf\xe9m\x0c\xfa\xae\x98\xfd\xfd\xbf\x7f\xff\x17",
"editor": "com.apple.Photos",
"format_id": "com.apple.photo",
"base_version": 0,
"format_version": "1.5",
"adjustments": [
{
"formatVersion": 1,
"enabled": True,
"settings": {
"offsetLocalLight": 0,
"offsetHighlights": 0,
"inputLight": 0.3073453608247423,
"offsetExposure": 0,
"offsetBlackPoint": 0,
"offsetBrightness": 0,
"statistics": {
"p02": 0.00784313725490196,
"p50": 0.09803921568627451,
"autoValue": 0.2856,
"blackPoint": 0.0031976514035982175,
"tonalRange": 0.09845670498375754,
"p25": 0.03529411764705882,
"p98": 0.6,
"lightMap": "FVpKd0pbSVkQWA5XR1kNWBNWFFYqMCOpJFgbWBmuF1YhjCT7J9Eik0ZhIWJFl1PIVGlWa1dtWW9acl12X3lD/hJwDlUPVkdYJFcPVRAxFBZIWEhYGVNEWBJXElYYWCGIJalNYxvgF3AgbUrwUd5V1VZsV21Zb1pxXHVfeBmDDSkNVw5WF1YVVDFWR1dHV0hXSFdIWElYGVkTWkrIPasv/U75D1sPZBRtUmZUaFVqVv0ssCjJWfxcll54FyEZSBBWR1YbVBkcET4UHEdXSVhJWElZSllKW0tcTF1MXiVgRfENCg9lOnRSfVRoVGpVkyg/K0UcRhk0UPoOIBJfR+dHVw0NDzMaHB9YSFhJWElZSlpKWktbTF1MXk5gT2FPYg0GDWQ1vDV/VHM2gCFsV4JC1xWgFa8UwhISFBIUVxRXOWoSVRiKSKBIYklZSllKWkpbS1xMXk1fT2FPYhBmDQUNWlJ6NGMUdRB1N9AXwxOnEyQTEhMRDkcXRRcUFVgWSyPeJaciZUpiSlpKW0tbTFxMXU1fT2FPYlFkDWYNBg5uVP4ROhKJERARERISEnQUd158YYURVxNVFxQX0RdXFmgl/k3/Sv9KWkpbS1xMXU1eT2FPYlFkUXMOdB5tPqgv/w+9KYwqoFl0WnNbr153X3lhq0pbSloXWRVrJtwpWD+fSuA6XEpnTF1MX05gT2FPY1FlP3ooZSdUIWIYeBnhGmodhh+oHnYjMSWZIGkXvBELS/JKXEpbGkgWrBeKRahM6kzZTd9O00/dT+NQ11HTUL4TgxAhDywROREWEWsh7xQlIzszRTRGM0MuPRt6EoVMXUxeFFwPEA8ODzQRRhLFEswSuhK8HpQbcxwvFywPQg4fDW0SzA+aDwwQEBUyDxYpPj1OQFA8TDZENNoqkUywFF0RDw8ODhEQERHoEWASYhtjKGMpQiY2IzQbag9rDwwQGw4SDhoNDw0SFSIeNyk9O09CUTtML35MvzqRFBUScRFmFbcWwxQQGfNPllBjUWUrZSZnImpVbBVtVnANcQ0LDSMaKSEsISojMjA8Mz5ceF55Hnkgyi7QM5oPDhxbECwPIRa7HOkU7w4IDQcPeVN9HOdWcFlxEnAOGQwHDR0mMyw3LDcrMikwMD0seGCMYXwvfB6CJKVi2BVFFtASwA/fDpoNHQ0dDwwP5g2fDQYNCR91JpIPdw13DRAOGSs8N0U0QjNALjsuOSh8XuNjgkeAGYwgnizmH1IZphnSTfmo+w/9DQkMKhLmKfMO8w2REnYSdBIRFB0SIAwRJDs8SjtKOEYzQGGAZIA6jGaCV4MdiiJ+K9lCrQ9tHUMZTRz7D+ENERQTFIwXqBLqEKQVGRkgHCQdJR0nDR4NKylEKUgpRCQ8D4FmhFqOZ9NjiBmDGZUw9FnPDa8QqBnNOMcRxRwnGjMdYRwfGRoUGiEsJjArNSk1JDQfLg0KFhwlLCsyDzAPFg8NUolmiGuMLp8jnCCdJKMZlBEsEB8SPh7jHSclLiYvJDIjLyEzKzwzRDNFMUQxRBEzEhMXGhwnKEcSERE9ETcSj1GPaJVWkxiOHoweoxkpFB0ODg0nDyMjNS47Mj0yPjA+ITUhQTpOPVE5Sw1CEQ0XICMvJS4qahVNJlw4dR9mKFckZyR1GZ0TPyOhHFYMEw8ZEBMdJSImHjohPiNAMD8sPCs0LTkkNg0bDBcMFRgmHSksOyzdJMAeaC/PI4UnqSVPH34UhBNCD2UPJw9qExsYIyMnIiUhJSQuJzwyQDVDMT0uOCMvDhcMIhQUDRAnPTJ4L4kjvidvMNouliyFJmshqhtvEzgblxlgJn0pjiEqIigjKSUrJ3s+Tj1NNkUzQit2DlISDg0NFXAMCw8dGEsfkje/KHgimSVgLrcXRR6TErcPcxt3FGwhjh23FKonMidwFEcUnw8vEK8QChBPGcoNBxMSDkEUaA4UElYWPx9wHaEmzxedF1AbVRlpGmAajRFjHJkVcxySIn0TihdyElMSLBXSJOYY7RAWEQsRsQ0HFRYOPhMZF4UZgBaAGlwgxSTDFakWhCWlFZYXdhZkD4INXQ9iD2td3w5yEZoNVQ/RL9cSuxfIFFkQCg8XDR4UGRdBGV4fsxhuFcYtjiDYHIwbihiEE5QRbRVlFHISUQ1TEFgPaA2cD4ASxw9kFowpnhyLHG0hbg9YDwgNCg0PGVohgSO7F54XghvBFoUXmhY9GIwWfxNhE34PMRKhEekOxw5uDykNVhF6F8sr0CWhLpQ1/yL+HqgOCA0HDUsqtiuyJYYUtRJhFXoTaxNoD04SeBOBE5MURRE+ES4PDw0LDhoVFw9QEpIQahy2D24RQxF2ENsQjA4JDQUOPiHJKIQVaw8qEmYSVg8wEnUPUw15EXUssRFhEVEQaRkbEnYMDA+bEX4UkRJ1G8AcuQ9fDB4Taw+cDQcNBRNBGtMczSOHI4YTUREfEVkXkBx8EoQTnRNuDnoNJg4wElsNYRWjE8MSYyPTTeFJuA2gDAUNjQ+WDysNBw0JHlkREynRF6YenRNkEZAPLQ9KGXEPnhGSD3gPfg0gD3o=",
"localAutoValue": 0.36000000000000004,
"whitePoint": 1.003921568627451,
"p10": 0.01568627450980392,
"highKey": 0.8063460882459689,
},
"offsetContrast": 0,
"offsetShadows": 0,
},
"identifier": "SmartTone",
}
],
"metadata": {
"masterWidth": 3024,
"pipelineVersion": "OSX.4",
"masterHeight": 4032,
"orientation": 1,
},
"orientation": 1,
"adjustment_format_version": 1,
"version_info": {
"buildNumber": "19G73",
"appVersion": "161.0.120",
"schemaRevision": 1,
"platform": "OSX",
},
"timestamp": "2020-10-03T22:54:20+00:00",
}
def test_no_adjustments(photosdb):
"""test adjustments when photo has no adjusments"""
photo = photosdb.get_photo(UUID_DICT["no_adjustments"])
assert photo.adjustments is None
def test_exiftool_newlines_in_description(photosdb):
"""Test that exiftool handles newlines embedded in description, issue #393"""
photo = photosdb.get_photo(UUID_DICT["description_newlines"])
exif = photo._exiftool_dict()
assert photo.description.find("\n") > 0
assert exif["EXIF:ImageDescription"].find("\n") > 0
@pytest.mark.skip(SKIP_TEST, reason="Not yet implemented")
def test_duplicates_1(photosdb):
# test photo has duplicates
photo = photosdb.get_photo(uuid=UUID_DICT["duplicates"])
assert len(photo.duplicates) == 1
assert photo.duplicates[0].uuid == UUID_DUPLICATE
def test_duplicates_2(photosdb):
# test photo does not have duplicates
photo = photosdb.get_photo(uuid=UUID_DICT["no_duplicates"])
assert not photo.duplicates
def test_compound_query(photosdb):
"""test photos() with multiple query terms"""
photos = photosdb.photos(persons=["Katie", "Maria"], albums=["Multi Keyword"])
assert len(photos) == 2
assert UUID_DICT["multi_query_1"] in [p.uuid for p in photos]
assert UUID_DICT["multi_query_2"] in [p.uuid for p in photos]
def test_multi_keyword(photosdb):
"""test photos() with multiple keywords"""
photos = photosdb.photos(keywords=["Kids", "wedding"])
assert len(photos) == 6
def test_multi_album(photosdb):
"""test photos() with multiple albums"""
photos = photosdb.photos(albums=["Pumpkin Farm", "Test Album"])
assert len(photos) == 3
def test_multi_uuid(photosdb):
"""test photos() with multiple uuids"""
photos = photosdb.photos(uuid=[UUID_DICT["favorite"], UUID_DICT["not_favorite"]])
assert len(photos) == 2
def test_detected_text(photosdb):
"""test PhotoInfo.detected_text"""
for uuid, expected_text in UUID_DETECTED_TEXT.items():
photo = photosdb.get_photo(uuid=uuid)
detected_text = " ".join(text for text, conf in photo.detected_text())
if expected_text is not None:
assert expected_text in detected_text
else:
assert not detected_text
| 35.596299 | 7,228 | 0.694772 |
import datetime
import os
import os.path
import pathlib
import sqlite3
import tempfile
import time
from collections import Counter, namedtuple
import pytest
import osxphotos
from osxphotos._constants import _UNKNOWN_PERSON
from osxphotos.utils import _get_os_version
OS_VERSION = _get_os_version()
SKIP_TEST = "OSXPHOTOS_TEST_EXPORT" not in os.environ or OS_VERSION[1] != "15"
PHOTOS_DB_LOCAL = os.path.expanduser("~/Pictures/Photos Library.photoslibrary")
PHOTOS_DB = "tests/Test-10.15.7.photoslibrary/database/photos.db"
PHOTOS_DB_PATH = "/Test-10.15.7.photoslibrary/database/photos.db"
PHOTOS_LIBRARY_PATH = "/Test-10.15.7.photoslibrary"
PHOTOS_DB_LEN = 25
PHOTOS_NOT_IN_TRASH_LEN = 23
PHOTOS_IN_TRASH_LEN = 2
PHOTOS_DB_IMPORT_SESSIONS = 17
KEYWORDS = [
"Kids",
"wedding",
"flowers",
"England",
"London",
"London 2018",
"St. James's Park",
"UK",
"United Kingdom",
"foo/bar",
"Travel",
"Maria",
"Drink",
"Val d'Isère",
"Wine",
"Wine Bottle",
"Food",
"Furniture",
"Pizza",
"Table",
"Cloudy",
"Cord",
"Outdoor",
"Sky",
"Sunset Sunrise",
]
PERSONS = ["Katie", "Suzy", "Maria", _UNKNOWN_PERSON]
ALBUMS = [
"2018-10 - Sponsion, Museum, Frühstück, Römermuseum",
"2019-10/11 Paris Clermont",
"AlbumInFolder",
"EmptyAlbum",
"I have a deleted twin",
"Multi Keyword",
"Pumpkin Farm",
"Raw",
"Sorted Manual",
"Sorted Newest First",
"Sorted Oldest First",
"Sorted Title",
"Test Album", # there are 2 albums named "Test Album" for testing duplicate album names
]
KEYWORDS_DICT = {
"Drink": 2,
"England": 1,
"Kids": 4,
"London 2018": 1,
"London": 1,
"Maria": 1,
"St. James's Park": 1,
"Travel": 2,
"UK": 1,
"United Kingdom": 1,
"Val d'Isère": 2,
"Wine Bottle": 2,
"Wine": 2,
"flowers": 1,
"foo/bar": 1,
"wedding": 3,
"Food": 2,
"Furniture": 2,
"Pizza": 2,
"Table": 2,
"Cloudy": 2,
"Cord": 2,
"Outdoor": 2,
"Sky": 2,
"Sunset Sunrise": 2,
}
PERSONS_DICT = {"Katie": 3, "Suzy": 2, "Maria": 2, _UNKNOWN_PERSON: 1}
ALBUM_DICT = {
"2018-10 - Sponsion, Museum, Frühstück, Römermuseum": 1,
"2019-10/11 Paris Clermont": 1,
"AlbumInFolder": 2,
"EmptyAlbum": 0,
"I have a deleted twin": 1,
"Multi Keyword": 2,
"Pumpkin Farm": 3,
"Raw": 4,
"Sorted Manual": 3,
"Sorted Newest First": 3,
"Sorted Oldest First": 3,
"Sorted Title": 3,
"Test Album": 2,
} # Note: there are 2 albums named "Test Album" for testing duplicate album names
UUID_DICT = {
"missing": "A1DD1F98-2ECD-431F-9AC9-5AFEFE2D3A5C",
"favorite": "E9BC5C36-7CD1-40A1-A72B-8B8FAC227D51",
"not_favorite": "A1DD1F98-2ECD-431F-9AC9-5AFEFE2D3A5C",
"hidden": "A1DD1F98-2ECD-431F-9AC9-5AFEFE2D3A5C",
"not_hidden": "E9BC5C36-7CD1-40A1-A72B-8B8FAC227D51",
"has_adjustments": "E9BC5C36-7CD1-40A1-A72B-8B8FAC227D51",
"adjustments_info": "7783E8E6-9CAC-40F3-BE22-81FB7051C266",
"no_adjustments": "D05A5FE3-15FB-49A1-A15D-AB3DA6F8B068",
"location": "DC99FBDD-7A52-4100-A5BB-344131646C30",
"no_location": "6191423D-8DB8-4D4C-92BE-9BBBA308AAC4",
"external_edit": "DC99FBDD-7A52-4100-A5BB-344131646C30",
"no_external_edit": "E9BC5C36-7CD1-40A1-A72B-8B8FAC227D51",
"export": "D79B8D77-BFFC-460B-9312-034F2877D35B", # "Pumkins2.jpg"
"export_tif": "8846E3E6-8AC8-4857-8448-E3D025784410",
"in_album": "D79B8D77-BFFC-460B-9312-034F2877D35B", # "Pumkins2.jpg"
"date_invalid": "8846E3E6-8AC8-4857-8448-E3D025784410",
"intrash": "71E3E212-00EB-430D-8A63-5E294B268554",
"not_intrash": "DC99FBDD-7A52-4100-A5BB-344131646C30",
"intrash_person_keywords": "6FD38366-3BF2-407D-81FE-7153EB6125B6",
"import_session": "8846E3E6-8AC8-4857-8448-E3D025784410",
"movie": "D1359D09-1373-4F3B-B0E3-1A4DE573E4A3",
"description_newlines": "7F74DD34-5920-4DA3-B284-479887A34F66",
"no_duplicates": "E9BC5C36-7CD1-40A1-A72B-8B8FAC227D51",
"multi_query_1": "D79B8D77-BFFC-460B-9312-034F2877D35B",
"multi_query_2": "E9BC5C36-7CD1-40A1-A72B-8B8FAC227D51",
}
UUID_DICT_LOCAL = {
"not_visible": "4A836160-51B2-4E32-907D-ECDDB2CEC657", # IMG_9815.JPG
"burst": "9A5B4CE6-6A9F-4917-95D4-1C98D14FCE4F", # IMG_9812.JPG
"burst_key": "9A5B4CE6-6A9F-4917-95D4-1C98D14FCE4F", # IMG_9812.JPG
"burst_not_key": "4A836160-51B2-4E32-907D-ECDDB2CEC657", # IMG_9815.JPG
"burst_selected": "75154738-83AA-4DCD-A913-632D5D1C0FEE", # IMG_9814.JPG
"burst_not_selected": "89E235DD-B9AC-4E8D-BDA2-986981CA7582", # IMG_9813.JPG
"burst_default": "F5E6BD24-B493-44E9-BDA2-7AD9D2CC8C9D", # IMG_9816.JPG
"burst_not_default": "75154738-83AA-4DCD-A913-632D5D1C0FEE", # IMG_9814.JPG
"live_edited": "54A01B04-16D7-4FDE-8860-19F2A641E433", # IMG_3203.HEIC
"live": "8EC216A2-0032-4934-BD3F-04C6259B3304", # IMG_3259.HEIC
}
UUID_PUMPKIN_FARM = [
"F12384F6-CD17-4151-ACBA-AE0E3688539E",
"D79B8D77-BFFC-460B-9312-034F2877D35B",
"1EB2B765-0765-43BA-A90C-0D0580E6172C",
]
ALBUM_SORT_ORDER = [
"1EB2B765-0765-43BA-A90C-0D0580E6172C",
"F12384F6-CD17-4151-ACBA-AE0E3688539E",
"D79B8D77-BFFC-460B-9312-034F2877D35B",
]
ALBUM_KEY_PHOTO = "D79B8D77-BFFC-460B-9312-034F2877D35B"
UTI_DICT = {
"8846E3E6-8AC8-4857-8448-E3D025784410": "public.tiff",
"7783E8E6-9CAC-40F3-BE22-81FB7051C266": "public.jpeg",
"1EB2B765-0765-43BA-A90C-0D0580E6172C": "public.jpeg",
}
UTI_ORIGINAL_DICT = {
"8846E3E6-8AC8-4857-8448-E3D025784410": "public.tiff",
"7783E8E6-9CAC-40F3-BE22-81FB7051C266": "public.heic",
"1EB2B765-0765-43BA-A90C-0D0580E6172C": "public.jpeg",
}
RawInfo = namedtuple(
"RawInfo",
[
"comment",
"original_filename",
"has_raw",
"israw",
"raw_original",
"uti",
"uti_original",
"uti_raw",
],
)
RAW_DICT = {
"D05A5FE3-15FB-49A1-A15D-AB3DA6F8B068": RawInfo(
"raw image, no jpeg pair",
"DSC03584.dng",
False,
True,
False,
"com.adobe.raw-image",
"com.adobe.raw-image",
None,
),
"A92D9C26-3A50-4197-9388-CB5F7DB9FA91": RawInfo(
"raw+jpeg, jpeg original",
"IMG_1994.JPG",
True,
False,
False,
"public.jpeg",
"public.jpeg",
"com.canon.cr2-raw-image",
),
"4D521201-92AC-43E5-8F7C-59BC41C37A96": RawInfo(
"raw+jpeg, raw original",
"IMG_1997.JPG",
True,
False,
True,
"public.jpeg",
"public.jpeg",
"com.canon.cr2-raw-image",
),
"E9BC5C36-7CD1-40A1-A72B-8B8FAC227D51": RawInfo(
"jpeg, no raw",
"wedding.jpg",
False,
False,
False,
"public.jpeg",
"public.jpeg",
None,
),
}
ORIGINAL_FILENAME_DICT = {
"uuid": "D79B8D77-BFFC-460B-9312-034F2877D35B",
"filename": "D79B8D77-BFFC-460B-9312-034F2877D35B.jpeg",
"original_filename": "Pumkins2.jpg",
}
UUID_IS_REFERENCE = "A1DD1F98-2ECD-431F-9AC9-5AFEFE2D3A5C"
UUID_NOT_REFERENCE = "F12384F6-CD17-4151-ACBA-AE0E3688539E"
UUID_DUPLICATE = ""
UUID_DETECTED_TEXT = {
"E2078879-A29C-4D6F-BACB-E3BBE6C3EB91": "osxphotos",
"A92D9C26-3A50-4197-9388-CB5F7DB9FA91": None,
}
@pytest.fixture(scope="module")
def photosdb():
return osxphotos.PhotosDB(dbfile=PHOTOS_DB)
@pytest.fixture(scope="module")
def photosdb_local():
return osxphotos.PhotosDB(dbfile=PHOTOS_DB_LOCAL)
def test_init1():
# test named argument
photosdb = osxphotos.PhotosDB(dbfile=PHOTOS_DB)
assert isinstance(photosdb, osxphotos.PhotosDB)
def test_init2():
# test positional argument
photosdb = osxphotos.PhotosDB(PHOTOS_DB)
assert isinstance(photosdb, osxphotos.PhotosDB)
def test_init3():
# test positional and named argument (raises exception)
with pytest.raises(Exception):
assert osxphotos.PhotosDB(PHOTOS_DB, dbfile=PHOTOS_DB)
def test_init4():
# test invalid db
(bad_db, bad_db_name) = tempfile.mkstemp(suffix=".db", prefix="osxphotos-")
os.close(bad_db)
with pytest.raises(Exception):
assert osxphotos.PhotosDB(bad_db_name)
with pytest.raises(Exception):
assert osxphotos.PhotosDB(dbfile=bad_db_name)
try:
os.remove(bad_db_name)
except:
pass
def test_init5(mocker):
# test failed get_last_library_path
def bad_library():
return None
# get_last_library actually in utils but need to patch it in photosdb because it's imported into photosdb
mocker.patch("osxphotos.photosdb.photosdb.get_last_library_path", new=bad_library)
with pytest.raises(Exception):
assert osxphotos.PhotosDB()
def test_db_len(photosdb):
# assert photosdb.db_version in osxphotos._TESTED_DB_VERSIONS
assert len(photosdb) == PHOTOS_DB_LEN
def test_db_version(photosdb):
# assert photosdb.db_version in osxphotos._TESTED_DB_VERSIONS
assert photosdb.db_version == "6000"
def test_persons(photosdb):
assert "Katie" in photosdb.persons
assert Counter(PERSONS) == Counter(photosdb.persons)
def test_keywords(photosdb):
assert "wedding" in photosdb.keywords
assert Counter(KEYWORDS) == Counter(photosdb.keywords)
def test_album_names(photosdb):
assert "Pumpkin Farm" in photosdb.albums
assert Counter(ALBUMS) == Counter(photosdb.albums)
def test_keywords_dict(photosdb):
keywords = photosdb.keywords_as_dict
assert keywords["wedding"] == 3
assert keywords == KEYWORDS_DICT
def test_persons_as_dict(photosdb):
persons = photosdb.persons_as_dict
assert persons["Maria"] == 2
assert persons == PERSONS_DICT
def test_albums_as_dict(photosdb):
albums = photosdb.albums_as_dict
assert albums["Pumpkin Farm"] == 3
assert albums == ALBUM_DICT
def test_album_sort_order(photosdb):
album = [a for a in photosdb.album_info if a.title == "Pumpkin Farm"][0]
photos = album.photos
uuids = [p.uuid for p in photos]
assert uuids == ALBUM_SORT_ORDER
def test_album_empty_album(photosdb):
album = [a for a in photosdb.album_info if a.title == "EmptyAlbum"][0]
photos = album.photos
assert photos == []
def test_attributes(photosdb):
photos = photosdb.photos(uuid=["D79B8D77-BFFC-460B-9312-034F2877D35B"])
assert len(photos) == 1
p = photos[0]
assert p.keywords == ["Kids"]
assert p.original_filename == "Pumkins2.jpg"
assert p.filename == "D79B8D77-BFFC-460B-9312-034F2877D35B.jpeg"
assert p.date == datetime.datetime(
2018, 9, 28, 16, 7, 7, 0, datetime.timezone(datetime.timedelta(seconds=-14400))
)
assert p.date_added == datetime.datetime(
2019,
7,
27,
9,
16,
49,
778432,
tzinfo=datetime.timezone(datetime.timedelta(days=-1, seconds=72000)),
)
assert p.description == "Girl holding pumpkin"
assert p.title == "I found one!"
assert sorted(p.albums) == ["Multi Keyword", "Pumpkin Farm", "Test Album"]
assert p.persons == ["Katie"]
assert p.path.endswith(
"tests/Test-10.15.7.photoslibrary/originals/D/D79B8D77-BFFC-460B-9312-034F2877D35B.jpeg"
)
assert p.ismissing == False
def test_attributes_2(photosdb):
photos = photosdb.photos(uuid=[UUID_DICT["has_adjustments"]])
assert len(photos) == 1
p = photos[0]
assert sorted(p.keywords) == ["Maria", "wedding"]
assert p.original_filename == "wedding.jpg"
assert p.filename == "E9BC5C36-7CD1-40A1-A72B-8B8FAC227D51.jpeg"
assert p.date == datetime.datetime(
2019,
4,
15,
14,
40,
24,
86000,
datetime.timezone(datetime.timedelta(seconds=-14400)),
)
assert p.description == "Bride Wedding day"
assert p.title is None
assert sorted(p.albums) == [
"AlbumInFolder",
"I have a deleted twin",
"Multi Keyword",
]
assert p.persons == ["Maria"]
assert p.path.endswith(
"tests/Test-10.15.7.photoslibrary/originals/E/E9BC5C36-7CD1-40A1-A72B-8B8FAC227D51.jpeg"
)
assert not p.ismissing
assert p.hasadjustments
assert p.height == 1325
assert p.width == 1526
assert p.original_height == 1367
assert p.original_width == 2048
assert p.orientation == 1
assert p.original_orientation == 1
assert p.original_filesize == 460483
def test_missing(photosdb):
photos = photosdb.photos(uuid=[UUID_DICT["missing"]])
assert len(photos) == 1
p = photos[0]
assert p.path is None
assert p.ismissing == True
def test_favorite(photosdb):
photos = photosdb.photos(uuid=[UUID_DICT["favorite"]])
assert len(photos) == 1
p = photos[0]
assert p.favorite == True
def test_not_favorite(photosdb):
photos = photosdb.photos(uuid=[UUID_DICT["not_favorite"]])
assert len(photos) == 1
p = photos[0]
assert p.favorite == False
def test_hidden(photosdb):
photos = photosdb.photos(uuid=[UUID_DICT["hidden"]])
assert len(photos) == 1
p = photos[0]
assert p.hidden == True
def test_not_hidden(photosdb):
photos = photosdb.photos(uuid=[UUID_DICT["not_hidden"]])
assert len(photos) == 1
p = photos[0]
assert p.hidden == False
def test_visible(photosdb):
photos = photosdb.photos(uuid=[UUID_DICT["not_hidden"]])
assert len(photos) == 1
p = photos[0]
assert p.visible
def test_not_burst(photosdb):
photos = photosdb.photos(uuid=[UUID_DICT["not_hidden"]])
assert len(photos) == 1
p = photos[0]
assert not p.burst
def test_location_1(photosdb):
# test photo with lat/lon info
photos = photosdb.photos(uuid=[UUID_DICT["location"]])
assert len(photos) == 1
p = photos[0]
lat, lon = p.location
assert lat == pytest.approx(51.50357167)
assert lon == pytest.approx(-0.1318055)
def test_location_2(photosdb):
# test photo with no location info
photos = photosdb.photos(uuid=[UUID_DICT["no_location"]])
assert len(photos) == 1
p = photos[0]
lat, lon = p.location
assert lat is None
assert lon is None
def test_hasadjustments1(photosdb):
# test hasadjustments == True
photos = photosdb.photos(uuid=[UUID_DICT["has_adjustments"]])
assert len(photos) == 1
p = photos[0]
assert p.hasadjustments == True
def test_hasadjustments2(photosdb):
# test hasadjustments == False
photos = photosdb.photos(uuid=[UUID_DICT["no_adjustments"]])
assert len(photos) == 1
p = photos[0]
assert p.hasadjustments == False
def test_external_edit1(photosdb):
# test image has been edited in external editor
photos = photosdb.photos(uuid=[UUID_DICT["external_edit"]])
assert len(photos) == 1
p = photos[0]
assert p.external_edit == True
def test_external_edit2(photosdb):
# test image has not been edited in external editor
photos = photosdb.photos(uuid=[UUID_DICT["no_external_edit"]])
assert len(photos) == 1
p = photos[0]
assert p.external_edit == False
def test_path_edited1(photosdb):
# test a valid edited path
photos = photosdb.photos(uuid=["E9BC5C36-7CD1-40A1-A72B-8B8FAC227D51"])
assert len(photos) == 1
p = photos[0]
path = p.path_edited
assert path.endswith(
"resources/renders/E/E9BC5C36-7CD1-40A1-A72B-8B8FAC227D51_1_201_a.jpeg"
)
assert os.path.exists(path)
def test_path_edited2(photosdb):
# test an invalid edited path
photos = photosdb.photos(uuid=[UUID_DICT["no_adjustments"]])
assert len(photos) == 1
p = photos[0]
path = p.path_edited
assert path is None
def test_path_derivatives(photosdb):
# test an path_derivatives
photos = photosdb.photos(uuid=[UUID_DICT["no_adjustments"]])
assert len(photos) == 1
p = photos[0]
path = p.path_derivatives
derivs = [
"D05A5FE3-15FB-49A1-A15D-AB3DA6F8B068_1_100_o.jpeg",
"D05A5FE3-15FB-49A1-A15D-AB3DA6F8B068_1_105_c.jpeg",
]
for i, p in enumerate(path):
assert p.endswith(derivs[i])
def test_ismovie(photosdb):
# test ismovie == True
photos = photosdb.photos(uuid=[UUID_DICT["movie"]])
p = photos[0]
assert p.ismovie
def test_not_ismovie(photosdb):
# test ismovie == False
photos = photosdb.photos(uuid=[UUID_DICT["no_adjustments"]])
p = photos[0]
assert not p.ismovie
def test_count(photosdb):
photos = photosdb.photos()
assert len(photos) == PHOTOS_NOT_IN_TRASH_LEN
def test_photos_intrash_1(photosdb):
photos = photosdb.photos(intrash=True)
assert len(photos) == PHOTOS_IN_TRASH_LEN
def test_photos_intrash_2(photosdb):
photos = photosdb.photos(intrash=True)
for p in photos:
assert p.intrash
def test_photos_intrash_3(photosdb):
photos = photosdb.photos(intrash=False)
for p in photos:
assert not p.intrash
def test_photoinfo_intrash_1(photosdb):
p = photosdb.photos(uuid=[UUID_DICT["intrash"]], intrash=True)[0]
assert p.intrash
assert p.date_trashed.isoformat() == "2120-06-10T11:24:47.685857-05:00"
def test_photoinfo_intrash_2(photosdb):
p = photosdb.photos(uuid=[UUID_DICT["intrash"]])
assert not p
def test_photoinfo_intrash_3(photosdb):
p = photosdb.photos(uuid=[UUID_DICT["intrash_person_keywords"]], intrash=True)[0]
assert p.intrash
assert "Maria" in p.persons
assert "wedding" in p.keywords
def test_photoinfo_intrash_4(photosdb):
p = photosdb.photos(persons=["Maria"], intrash=True)[0]
assert p.intrash
assert "Maria" in p.persons
assert "wedding" in p.keywords
def test_photoinfo_intrash_5(photosdb):
p = photosdb.photos(keywords=["wedding"], intrash=True)[0]
assert p.intrash
assert "Maria" in p.persons
assert "wedding" in p.keywords
def test_photoinfo_not_intrash(photosdb):
p = photosdb.photos(uuid=[UUID_DICT["not_intrash"]])[0]
assert not p.intrash
assert p.date_trashed is None
def test_keyword_2(photosdb):
photos = photosdb.photos(keywords=["wedding"])
assert len(photos) == 2 # won't show the one in the trash
def test_keyword_not_in_album(photosdb):
photos1 = photosdb.photos(albums=["Pumpkin Farm"])
photos2 = photosdb.photos(keywords=["Kids"])
photos3 = [p for p in photos2 if p not in photos1]
assert len(photos3) == 1
assert photos3[0].uuid == "A1DD1F98-2ECD-431F-9AC9-5AFEFE2D3A5C"
def test_album_folder_name(photosdb):
photos = photosdb.photos(albums=["Pumpkin Farm"])
assert sorted(p.uuid for p in photos) == sorted(UUID_PUMPKIN_FARM)
def test_multi_person(photosdb):
photos = photosdb.photos(persons=["Katie", "Suzy"])
assert len(photos) == 3
def test_get_db_path(photosdb):
db_path = photosdb.db_path
assert db_path.endswith(PHOTOS_DB_PATH)
def test_get_library_path(photosdb):
lib_path = photosdb.library_path
assert lib_path.endswith(PHOTOS_LIBRARY_PATH)
def test_get_db_connection(photosdb):
conn, cursor = photosdb.get_db_connection()
assert isinstance(conn, sqlite3.Connection)
assert isinstance(cursor, sqlite3.Cursor)
results = conn.execute(
"SELECT ZUUID FROM ZGENERICASSET WHERE ZFAVORITE = 1;"
).fetchall()
assert len(results) == 1
assert results[0][0] == "E9BC5C36-7CD1-40A1-A72B-8B8FAC227D51"
conn.close()
def test_export_1(photosdb):
tempdir = tempfile.TemporaryDirectory(prefix="osxphotos_")
dest = tempdir.name
photos = photosdb.photos(uuid=[UUID_DICT["export"]])
filename = photos[0].original_filename
expected_dest = os.path.join(dest, filename)
got_dest = photos[0].export(dest)[0]
assert got_dest == expected_dest
assert os.path.isfile(got_dest)
def test_export_2(photosdb):
tempdir = tempfile.TemporaryDirectory(prefix="osxphotos_")
dest = tempdir.name
photos = photosdb.photos(uuid=[UUID_DICT["export"]])
timestamp = time.time()
filename = f"osxphotos-export-2-test-{timestamp}.jpg"
expected_dest = os.path.join(dest, filename)
got_dest = photos[0].export(dest, filename)[0]
assert got_dest == expected_dest
assert os.path.isfile(got_dest)
def test_export_3(photosdb):
tempdir = tempfile.TemporaryDirectory(prefix="osxphotos_")
dest = tempdir.name
photos = photosdb.photos(uuid=[UUID_DICT["export"]])
filename = photos[0].original_filename
filename2 = pathlib.Path(filename)
filename2 = f"{filename2.stem} (1){filename2.suffix}"
expected_dest_2 = os.path.join(dest, filename2)
got_dest = photos[0].export(dest)[0]
got_dest_2 = photos[0].export(dest)[0]
assert got_dest_2 == expected_dest_2
assert os.path.isfile(got_dest_2)
def test_export_4(photosdb):
tempdir = tempfile.TemporaryDirectory(prefix="osxphotos_")
dest = tempdir.name
photos = photosdb.photos(uuid=[UUID_DICT["export"]])
timestamp = time.time()
filename = f"osxphotos-export-2-test-{timestamp}.jpg"
filename2 = f"osxphotos-export-2-test-{timestamp} (1).jpg"
expected_dest_2 = os.path.join(dest, filename2)
got_dest = photos[0].export(dest, filename)[0]
got_dest_2 = photos[0].export(dest, filename)[0]
assert got_dest_2 == expected_dest_2
assert os.path.isfile(got_dest_2)
def test_export_5(photosdb):
tempdir = tempfile.TemporaryDirectory(prefix="osxphotos_")
dest = tempdir.name
photos = photosdb.photos(uuid=[UUID_DICT["export"]])
filename = photos[0].original_filename
expected_dest = os.path.join(dest, filename)
got_dest = photos[0].export(dest)[0]
got_dest_2 = photos[0].export(dest, overwrite=True)[0]
assert got_dest_2 == got_dest
assert got_dest_2 == expected_dest
assert os.path.isfile(got_dest_2)
def test_export_6(photosdb):
tempdir = tempfile.TemporaryDirectory(prefix="osxphotos_")
dest = tempdir.name
photos = photosdb.photos(uuid=[UUID_DICT["export"]])
timestamp = time.time()
filename = f"osxphotos-export-test-{timestamp}.jpg"
expected_dest = os.path.join(dest, filename)
got_dest = photos[0].export(dest, filename)[0]
got_dest_2 = photos[0].export(dest, filename, overwrite=True)[0]
assert got_dest_2 == got_dest
assert got_dest_2 == expected_dest
assert os.path.isfile(got_dest_2)
def test_export_7(photosdb):
tempdir = tempfile.TemporaryDirectory(prefix="osxphotos_")
dest = tempdir.name
photos = photosdb.photos(uuid=[UUID_DICT["export"]])
filename = photos[0].filename
got_dest = photos[0].export(dest)[0]
with pytest.raises(Exception) as e:
assert photos[0].export(dest, increment=False)
assert e.type == type(FileExistsError())
def test_export_8(photosdb):
tempdir = tempfile.TemporaryDirectory(prefix="osxphotos_")
dest = tempdir.name
photos = photosdb.photos(uuid=[UUID_DICT["missing"]])
assert photos[0].export(dest) == []
def test_export_9(photosdb):
# should raise exception
tempdir = tempfile.TemporaryDirectory(prefix="osxphotos_")
dest = tempdir.name
photos = photosdb.photos(uuid=[UUID_DICT["no_adjustments"]])
filename = photos[0].filename
with pytest.raises(Exception) as e:
assert photos[0].export(dest, edited=True)
assert e.type == ValueError
def test_export_10(photosdb):
# try to export edited file that's not edited and name provided
tempdir = tempfile.TemporaryDirectory(prefix="osxphotos_")
dest = tempdir.name
photos = photosdb.photos(uuid=[UUID_DICT["no_adjustments"]])
timestamp = time.time()
filename = f"osxphotos-export-test-{timestamp}.jpg"
with pytest.raises(Exception) as e:
assert photos[0].export(dest, filename, edited=True)
assert e.type == ValueError
def test_export_11(photosdb):
tempdir = tempfile.TemporaryDirectory(prefix="osxphotos_")
dest = tempdir.name
photos = photosdb.photos(uuid=[UUID_DICT["has_adjustments"]])
timestamp = time.time()
filename = f"osxphotos-export-test-{timestamp}.jpg"
expected_dest = os.path.join(dest, filename)
got_dest = photos[0].export(dest, filename, edited=True)[0]
assert got_dest == expected_dest
def test_export_12(photosdb):
tempdir = tempfile.TemporaryDirectory(prefix="osxphotos_")
dest = tempdir.name
photos = photosdb.photos(uuid=[UUID_DICT["has_adjustments"]])
edited_name = pathlib.Path(photos[0].path_edited).name
edited_suffix = pathlib.Path(edited_name).suffix
filename = (
pathlib.Path(photos[0].original_filename).stem + "_edited" + edited_suffix
)
expected_dest = os.path.join(dest, filename)
got_dest = photos[0].export(dest, edited=True)[0]
assert got_dest == expected_dest
def test_export_13(photosdb):
tempdir = tempfile.TemporaryDirectory(prefix="osxphotos_")
dest = tempdir.name
i = 0
while os.path.isdir(dest):
dest = os.path.join(dest, str(i))
i += 1
photos = photosdb.photos(uuid=[UUID_DICT["export"]])
filename = photos[0].filename
with pytest.raises(Exception) as e:
assert photos[0].export(dest)
assert e.type == type(FileNotFoundError())
def test_export_14(photosdb, caplog):
# test export with user provided filename with different (but valid) extension than source
tempdir = tempfile.TemporaryDirectory(prefix="osxphotos_")
dest = tempdir.name
photos = photosdb.photos(uuid=[UUID_DICT["export_tif"]])
timestamp = time.time()
filename = f"osxphotos-export-2-test-{timestamp}.tif"
expected_dest = os.path.join(dest, filename)
got_dest = photos[0].export(dest, filename)[0]
assert got_dest == expected_dest
assert os.path.isfile(got_dest)
assert "Invalid destination suffix" not in caplog.text
def test_export_no_original_filename(photosdb):
# test export OK if original filename is null
# issue #267
tempdir = tempfile.TemporaryDirectory(prefix="osxphotos_")
dest = tempdir.name
photos = photosdb.photos(uuid=[UUID_DICT["export"]])
# monkey patch original_filename for testing
original_filename = photos[0]._info["originalFilename"]
photos[0]._info["originalFilename"] = None
filename = f"{photos[0].uuid}.jpeg"
expected_dest = os.path.join(dest, filename)
got_dest = photos[0].export(dest)[0]
assert got_dest == expected_dest
assert os.path.isfile(got_dest)
photos[0]._info["originalFilename"] = original_filename
def test_eq():
photosdb1 = osxphotos.PhotosDB(dbfile=PHOTOS_DB)
photosdb2 = osxphotos.PhotosDB(dbfile=PHOTOS_DB)
photos1 = photosdb1.photos(uuid=[UUID_DICT["export"]])
photos2 = photosdb2.photos(uuid=[UUID_DICT["export"]])
assert photos1[0] == photos2[0]
def test_eq_2():
photosdb1 = osxphotos.PhotosDB(dbfile=PHOTOS_DB)
photosdb2 = osxphotos.PhotosDB(dbfile=PHOTOS_DB)
photos1 = photosdb1.photos(uuid=[UUID_DICT["in_album"]])
photos2 = photosdb2.photos(uuid=[UUID_DICT["in_album"]])
# memoize a value
albums = photos1[0].albums
assert albums
assert photos1[0] == photos2[0]
def test_not_eq(photosdb):
photos1 = photosdb.photos(uuid=[UUID_DICT["export"]])
photos2 = photosdb.photos(uuid=[UUID_DICT["missing"]])
assert photos1[0] != photos2[0]
def test_photosdb_repr():
photosdb = osxphotos.PhotosDB(dbfile=PHOTOS_DB)
photosdb2 = eval(repr(photosdb))
ignore_keys = ["_tmp_db", "_tempdir", "_tempdir_name", "_db_connection"]
assert {k: v for k, v in photosdb.__dict__.items() if k not in ignore_keys} == {
k: v for k, v in photosdb2.__dict__.items() if k not in ignore_keys
}
def test_photosinfo_repr(photosdb):
photos = photosdb.photos(uuid=[UUID_DICT["favorite"]])
photo = photos[0]
photo2 = eval(repr(photo))
assert {k: str(v).encode("utf-8") for k, v in photo.__dict__.items()} == {
k: str(v).encode("utf-8") for k, v in photo2.__dict__.items()
}
def test_from_to_date(photosdb):
os.environ["TZ"] = "US/Pacific"
time.tzset()
photos = photosdb.photos(from_date=datetime.datetime(2018, 10, 28))
assert len(photos) == 16
photos = photosdb.photos(to_date=datetime.datetime(2018, 10, 28))
assert len(photos) == 7
photos = photosdb.photos(
from_date=datetime.datetime(2018, 9, 28), to_date=datetime.datetime(2018, 9, 29)
)
assert len(photos) == 4
def test_from_to_date_tz(photosdb):
os.environ["TZ"] = "US/Pacific"
time.tzset()
photos = photosdb.photos(
from_date=datetime.datetime(2018, 9, 28, 13, 7, 0),
to_date=datetime.datetime(2018, 9, 28, 13, 9, 0),
)
assert len(photos) == 1
assert photos[0].uuid == "D79B8D77-BFFC-460B-9312-034F2877D35B"
photos = photosdb.photos(
from_date=datetime.datetime(
2018,
9,
28,
16,
7,
0,
tzinfo=datetime.timezone(datetime.timedelta(days=-1, seconds=72000)),
),
to_date=datetime.datetime(
2018,
9,
28,
16,
9,
0,
tzinfo=datetime.timezone(datetime.timedelta(days=-1, seconds=72000)),
),
)
assert len(photos) == 1
assert photos[0].uuid == "D79B8D77-BFFC-460B-9312-034F2877D35B"
def test_date_invalid():
# doesn't run correctly with the module-level fixture
from datetime import datetime, timedelta, timezone
import osxphotos
photosdb = osxphotos.PhotosDB(dbfile=PHOTOS_DB)
photos = photosdb.photos(uuid=[UUID_DICT["date_invalid"]])
assert len(photos) == 1
p = photos[0]
delta = timedelta(seconds=p.tzoffset)
tz = timezone(delta)
assert p.date == datetime(1970, 1, 1).astimezone(tz=tz)
def test_date_modified_invalid(photosdb):
photos = photosdb.photos(uuid=[UUID_DICT["date_invalid"]])
assert len(photos) == 1
p = photos[0]
assert p.date_modified is None
def test_import_session_count(photosdb):
import_sessions = photosdb.import_info
assert len(import_sessions) == PHOTOS_DB_IMPORT_SESSIONS
def test_import_session_photo(photosdb):
photo = photosdb.get_photo(UUID_DICT["import_session"])
import_session = photo.import_info
assert import_session.creation_date == datetime.datetime(
2020,
6,
6,
7,
15,
24,
729811,
tzinfo=datetime.timezone(datetime.timedelta(days=-1, seconds=61200), "PDT"),
)
assert import_session.start_date == datetime.datetime(
2020,
6,
6,
7,
15,
24,
725564,
tzinfo=datetime.timezone(datetime.timedelta(days=-1, seconds=61200), "PDT"),
)
assert import_session.end_date == datetime.datetime(
2020,
6,
6,
7,
15,
24,
725564,
tzinfo=datetime.timezone(datetime.timedelta(days=-1, seconds=61200), "PDT"),
)
assert len(import_session.photos) == 1
def test_uti(photosdb):
for uuid, uti in UTI_DICT.items():
photo = photosdb.get_photo(uuid)
assert photo.uti == uti
assert photo.uti_original == UTI_ORIGINAL_DICT[uuid]
def test_raw(photosdb):
for uuid, rawinfo in RAW_DICT.items():
photo = photosdb.get_photo(uuid)
assert photo.original_filename == rawinfo.original_filename
assert photo.has_raw == rawinfo.has_raw
assert photo.israw == rawinfo.israw
assert photo.uti == rawinfo.uti
assert photo.uti_original == rawinfo.uti_original
assert photo.uti_raw == rawinfo.uti_raw
def test_verbose(capsys):
photosdb = osxphotos.PhotosDB(dbfile=PHOTOS_DB, verbose=print)
captured = capsys.readouterr()
assert "Processing database" in captured.out
def test_original_filename(photosdb):
uuid = ORIGINAL_FILENAME_DICT["uuid"]
photo = photosdb.get_photo(uuid)
assert photo.original_filename == ORIGINAL_FILENAME_DICT["original_filename"]
assert photo.filename == ORIGINAL_FILENAME_DICT["filename"]
original_filename = photo._info["originalFilename"]
photo._info["originalFilename"] = None
assert photo.original_filename == ORIGINAL_FILENAME_DICT["filename"]
photo._info["originalFilename"] = original_filename
# They test things difficult to test in the test libraries
@pytest.mark.skipif(SKIP_TEST, reason="Skip if not running on author's local machine.")
def test_not_visible_burst(photosdb_local):
photo = photosdb_local.get_photo(UUID_DICT_LOCAL["not_visible"])
assert not photo.visible
assert photo.burst
@pytest.mark.skipif(SKIP_TEST, reason="Skip if not running on author's local machine.")
def test_visible_burst(photosdb_local):
photo = photosdb_local.get_photo(UUID_DICT_LOCAL["burst"])
assert photo.visible
assert photo.burst
assert len(photo.burst_photos) == 4
@pytest.mark.skipif(SKIP_TEST, reason="Skip if not running on author's local machine.")
def test_burst_key(photosdb_local):
photo = photosdb_local.get_photo(UUID_DICT_LOCAL["burst_key"])
assert photo.burst_key
photo = photosdb_local.get_photo(UUID_DICT_LOCAL["burst_not_key"])
assert not photo.burst_key
@pytest.mark.skipif(SKIP_TEST, reason="Skip if not running on author's local machine.")
def test_burst_selected(photosdb_local):
photo = photosdb_local.get_photo(UUID_DICT_LOCAL["burst_selected"])
assert photo.burst_selected
photo = photosdb_local.get_photo(UUID_DICT_LOCAL["burst_not_selected"])
assert not photo.burst_selected
@pytest.mark.skipif(SKIP_TEST, reason="Skip if not running on author's local machine.")
def test_burst_default_pic(photosdb_local):
photo = photosdb_local.get_photo(UUID_DICT_LOCAL["burst_default"])
assert photo.burst_default_pick
photo = photosdb_local.get_photo(UUID_DICT_LOCAL["burst_not_default"])
assert not photo.burst_default_pick
@pytest.mark.skipif(SKIP_TEST, reason="Skip if not running on author's local machine.")
def test_path_edited_live_photo(photosdb_local):
photo = photosdb_local.get_photo(UUID_DICT_LOCAL["live_edited"])
assert photo.path_edited_live_photo is not None
@pytest.mark.skipif(SKIP_TEST, reason="Skip if not running on author's local machine.")
def test_path_edited_live_photo_not_edited(photosdb_local):
photo = photosdb_local.get_photo(UUID_DICT_LOCAL["live"])
assert photo.path_edited_live_photo is None
def test_is_reference(photosdb):
photo = photosdb.get_photo(UUID_IS_REFERENCE)
assert photo.isreference
photo = photosdb.get_photo(UUID_NOT_REFERENCE)
assert not photo.isreference
def test_adjustments(photosdb):
from osxphotos.adjustmentsinfo import AdjustmentsInfo
photo = photosdb.get_photo(UUID_DICT["adjustments_info"])
adjustments = photo.adjustments
assert isinstance(adjustments, AdjustmentsInfo)
assert adjustments.asdict() == {
"data": b"mW[\xb7\xa2:\xb3\xfd/\xbe\xda\xa3\x17((\xf4\x18\xdf\x03H\xc2E\xb9%\\\xc4\xb3\xce\x03\x02\x12.\x82\n\x1at\x8f\xfd\xdf\xbf\xb8\xba\xfb\xec\xdec\x1c\xde\x92\xaa\xcc\x9aU\t\xa9\x99\xbff\x8f\xe26T}gv\xa7~\xf6\xe3\xaf\xd9\xf1^\xb5\xb9s?\x1f\x8b\xdb\xec\xc7\x8c\x97\xf5\xf5r\xf6m\x96^.\xd1O\xbf\xf7\xe4\x8a\xff\xce}\xe7\x17\x1c3\x0c\x19)\xce)*\x1e\xd5O#\xffmvi\xd3\xf1\xd4\xdf\xce\xcc\xd3\xc5\xfb\xd9\xdf\xdff\xe7bL\xf3tL\xdf\xf8\xe7t\x18\x8b[\\\xe5#\x99\xfdXr\x0b\x81-\xa8.E[u\xc5?\x11\xd8\xba\xef\x02C\xff\xe9l\x14UI\xc6\xd9\x0f\x81[.\xbe\xcd\xfa[Utc:\xfe\x0c\xc7\xd0\xdf\xb1\xd2\xf1\xff\x163\x06i^\xdf\x87\xf1\xcc\xdc\x86\xd9\x8f\xff\xf9\xeb\xff\xf1(\xba\xf4\xd8\x16\xf9\xec\xc7x\xbb\x17,\x8bb\x1c\xab\xae\x1c\xde\x04\xfb\xd3\x89\rw}\x96\xb6\xbb\x9fq\xb9o\xbf&\r6n\xdfs\xc3\xd7d\xd5]\xee\xe3o\x9f\xefKn\xbd\x14\xc4\xe5\x8a\x93\x16\xc2ZX,\xe4\xdf\xab\xc0t\xe9\x87\xfb\xad\xf8\x03Hm\xd3\xac\xf1\xfa\xaa\xfb\x13]\xbd\xbd\xa1\xbab\xf8\x89>\xbcs\x1c\xc6*\xfbbu\xe1\x16\xef \x1c\xb7\x96\x84%\xbf\\/DA\xe6xy\xc5\xadY\xfdD\xee\xcb&K\xdcR^\xf0\xe2JZ-\xd6\x82\xc8I\xac\x12\xf7\xb1\x8f\xd2\xf6\xfe\x0e\xfe}!\x89+\xee\x8f\x8f\x15\xf3\xf8'\x11\x86\xbe\xe4\xe5\xf5J\xe4Y\xa5EYZ\xf0k\xf1\xdbl\xec\xbb\xb4EiW\x16\xbf\x82\x08\xe2j\xcd\t\xb2\xb4\\\x8bk\xf1\xbd}\x0b\xf1\xcb\xb2\x14\x17\xb2\xc0\xf3\xeb\x95\xb0\xe6DIZ,\x99I\x96\xde&Q\xfe\xf7\xc7\x88}\x95\xd1N/l\xb3at\xd9\xe6\xdc\xe5\x88\xa3\xc6\x8f\x15q\x8f\xf8\xc6\x89U'\x860\xb9\xda\x1b\xf7b\xc1\xf2\x18\xab\xe7;\xe4\x13Ro\x82\xb5%\x83\xaa\xe1\x0e\xc4\x8c-\xd8\xf2\x9e\x19\xe9m\x9c\xf2\xf9\x18\xc7r\x9a\xb5\xfcb\xbfl\xb5\xcf\x0fbQ\xad\r\xbd\xa8\xc9\x13\x0bf^\x84\x94\t\xaa\x073\x06$\xd1#\x07\xc4\xaa\xb5\x07m\x92\xc4\x1b\xdd\xb4\xd2\xd6I\xa6G\t\x97Jy\x0co4\xcc\xc5\x88\x8f\x0eC\xb4\xe0\x0fG\xfe2\xed\x8d\xe8T\xa8gM\xc3\x8d\x13Q1fD\xa2H\x831\xe2s#\xe2\xc8\x1e\xc3\x9c\xe1\xb6\x0c\xb7\t\xe2\xe6fz\xe9\xf0\xf8\xfc\x08\xd7\xa2\xc6\x0f\xdeAEcx>\x84)\x8c\xae\xd1\x83\x1b\x86Mm\xc5\xa7)k[Q\x80Op\xc0\xaa\xca\x80\x92c\xa46\x19\x08\x84\xd0\x00\xf9\x1eG\xc4b\x80\x07\xdc\xb6\xdb\x98\x1b\xb3\x00\xf2\xf6\xbe\x8aJt\x02\xce\xa6\x94[\xb7C\xf8\x14\xa1>\xd2/Q\xf3,??\xb6\\\x98!\xd2p\xa1\xd7\xbb\xa6j\x9d\xd0\x9c1\xa3\x9c\xa3\xbd\xec\xd4P\xe5\x04\xc3\xdf\x80\x97m\xdc\x8c\xc7/\xc0F,\x83\x05\xf4\x92\x92\xd3\xb5\xd8\xe7\x1fZ\xf4\xf9\x11\x19\xf6\xa2\xdc\xc0!\x12\xac\r?\xc5%L\xa5\x90\x12\x13C\xd5\x0c\xa3\t\xed\xdd\xb8\xc7\x11\xaa\xb6x\xab\x9aI\xf3\x8ba\xc3\xf6\x8e\x9f\x18 \x7f\xfa\x02$\xacV~\xe8\xc4\xad\xb5rt;\xcc\x91\xca;\xb2\xb2\xa7\x93\xdb\x81\xa7\x1f\x00b#\xad\xc9\xf6\x08e!\x8c\xca\x18?\xbd\xc2J\xb3\xea\x10^\xaa/\x82\xdc\x9b \xc3\x0b\x7f\xe1\xb5\xb0\xd1\xe2\xc4QK\xf1\x1ey\x02r\xc9\xd6\x02HA\x00\x99\x18t~\x98\xf3\xa2\x94$!\x8a&'\x82\x93\xbf\xe7P\xbe\x87\xe7\xb2\xfd\xfch\x96\x9f\x1f\xf8!\xff\xc30\xe4\x8b\xdf\x88\xe1\xdevsU\x1c\xbdk\xc96\x8b\xce\xe5mB\xaf=l\xb9\xb8s\x8e7^\\\xb2cD\xae\xefc\xd9\xf6\xfb\x18E7k\xa4\x97X\x9b\x9f\xf0]Y\xed\xc1\xa5\xfb\xaa!\xf7\xab\x86<l\xbde\xdf\x1fp\x1e\x9a\xb1\x99\x14jG\xf4s\x9f\x132\xef\x8d.\xa9m\x1c\x1fL\xbd\xd9?T\xb0\xc3\x9f\x1f\xd6\x96\x01\x1c\xf5\xa6\x8coj\xb1E)\xb1W\xcd\xeb\x10\xe4\xb2\xcbq\x9f\x1fy0w|\x9e7\x82p'\x04\xe5\xa4\x10\xedI\x91\x8b@\x0c\xe2\x81\xac'\xbf5_\xc3\x0b\x05H\xb79\xfb\xee\xa1q\x05\xfa\x88\xa56\x15\x10R\x0f(\x92\xab\xbd|\x84\xc8\x0e\x82\x81\xe2;\xd9J\xc6\xc5?f\x13}\xc0'\xf5\xfcR8i1\x87_\xca<\xd5(\xf5\x81\x1a>\xb5)\xb9x5\xef\xfaP\x91\x02\xed\x00\x1c\xa7\xbf6\xe1\x93B\xc8!\x8d2<\x02|\x80\x8c\x1e\xc4\nN\xc8Xou\xfb\xe2W\xc9\xc2|\xf9\xc7\xb4\x94oo\x1c\x9d\nX#\xbd\xa3Q\x0eCl\x16\xce\xb3a\xd9\xc8\x9b0\x18\xed\xddR\xb4\x1f\xaf+\x82j\x883\x04\xcf\xf0\x98\xc5t\xf2}\xfd\xe4xm\xab\xd6a\x1c\xde\x0e\xf8\xd0\x99\xe7KtT\xa31\xea\x14'\xf3\xb9\x9d\x86\xedt\x8b\xc1`\xe2\xbe\xb6kE\xb2_bV@Q4\xba\xa6|Vk\xdf\x16{O#\xd3\x11l\xa8g\xa2tm\xb8M\xb8\xa6\x82\xa9\xf9\x99WD\x8el\xb8y\x9c\xc1v\x02\x9d\xe2\xea>54\xc4\x9d\xed']\xee\xb4\xecfW\r\xb55n(\xf4\x8d\x9d\xec\xe9\xe3\xa4\xae6\xd66\xaa\x16j\x04\xe1\xa8`\xaa|~\x9c\xb4K\xef\x18>\x97\xb3\x04=\xb1\\\x9c4?q6H\xe6\xad\x8b\xe9\xe5\x94_j\x88\x01\xe3Ar\xb8\x90\xf3kG\xd9\xd5\xc3\xdd\xc5D\xda\xdf\x9d\xbal\nEOh\xd9U\xaf\xb3\xc1\x9b\x87\x0b\xe9pp:\xf7s\xfa\xf9!k~co\xc9\xee\xbc=\xd9\xaeD\x17\x08t\t\xceU\x93U\x88\xc3\xa6B\x91\xa5\r\x12\xae\xc7\xad\x0b\x92\x97\xaf\xeb\xca\xc1TV\xb5\x9en\"\xc1\xce\xab\xca\x9ao\xe5vs\xf3\xe5\xd1\x08\xedC\x80^km\x0e\x1c\x80\xfc\x00\x9at\x7fUwW\xb0\xf5
"editor": "com.apple.Photos",
"format_id": "com.apple.photo",
"base_version": 0,
"format_version": "1.5",
"adjustments": [
{
"formatVersion": 1,
"enabled": True,
"settings": {
"offsetLocalLight": 0,
"offsetHighlights": 0,
"inputLight": 0.3073453608247423,
"offsetExposure": 0,
"offsetBlackPoint": 0,
"offsetBrightness": 0,
"statistics": {
"p02": 0.00784313725490196,
"p50": 0.09803921568627451,
"autoValue": 0.2856,
"blackPoint": 0.0031976514035982175,
"tonalRange": 0.09845670498375754,
"p25": 0.03529411764705882,
"p98": 0.6,
"lightMap": "FVpKd0pbSVkQWA5XR1kNWBNWFFYqMCOpJFgbWBmuF1YhjCT7J9Eik0ZhIWJFl1PIVGlWa1dtWW9acl12X3lD/hJwDlUPVkdYJFcPVRAxFBZIWEhYGVNEWBJXElYYWCGIJalNYxvgF3AgbUrwUd5V1VZsV21Zb1pxXHVfeBmDDSkNVw5WF1YVVDFWR1dHV0hXSFdIWElYGVkTWkrIPasv/U75D1sPZBRtUmZUaFVqVv0ssCjJWfxcll54FyEZSBBWR1YbVBkcET4UHEdXSVhJWElZSllKW0tcTF1MXiVgRfENCg9lOnRSfVRoVGpVkyg/K0UcRhk0UPoOIBJfR+dHVw0NDzMaHB9YSFhJWElZSlpKWktbTF1MXk5gT2FPYg0GDWQ1vDV/VHM2gCFsV4JC1xWgFa8UwhISFBIUVxRXOWoSVRiKSKBIYklZSllKWkpbS1xMXk1fT2FPYhBmDQUNWlJ6NGMUdRB1N9AXwxOnEyQTEhMRDkcXRRcUFVgWSyPeJaciZUpiSlpKW0tbTFxMXU1fT2FPYlFkDWYNBg5uVP4ROhKJERARERISEnQUd158YYURVxNVFxQX0RdXFmgl/k3/Sv9KWkpbS1xMXU1eT2FPYlFkUXMOdB5tPqgv/w+9KYwqoFl0WnNbr153X3lhq0pbSloXWRVrJtwpWD+fSuA6XEpnTF1MX05gT2FPY1FlP3ooZSdUIWIYeBnhGmodhh+oHnYjMSWZIGkXvBELS/JKXEpbGkgWrBeKRahM6kzZTd9O00/dT+NQ11HTUL4TgxAhDywROREWEWsh7xQlIzszRTRGM0MuPRt6EoVMXUxeFFwPEA8ODzQRRhLFEswSuhK8HpQbcxwvFywPQg4fDW0SzA+aDwwQEBUyDxYpPj1OQFA8TDZENNoqkUywFF0RDw8ODhEQERHoEWASYhtjKGMpQiY2IzQbag9rDwwQGw4SDhoNDw0SFSIeNyk9O09CUTtML35MvzqRFBUScRFmFbcWwxQQGfNPllBjUWUrZSZnImpVbBVtVnANcQ0LDSMaKSEsISojMjA8Mz5ceF55Hnkgyi7QM5oPDhxbECwPIRa7HOkU7w4IDQcPeVN9HOdWcFlxEnAOGQwHDR0mMyw3LDcrMikwMD0seGCMYXwvfB6CJKVi2BVFFtASwA/fDpoNHQ0dDwwP5g2fDQYNCR91JpIPdw13DRAOGSs8N0U0QjNALjsuOSh8XuNjgkeAGYwgnizmH1IZphnSTfmo+w/9DQkMKhLmKfMO8w2REnYSdBIRFB0SIAwRJDs8SjtKOEYzQGGAZIA6jGaCV4MdiiJ+K9lCrQ9tHUMZTRz7D+ENERQTFIwXqBLqEKQVGRkgHCQdJR0nDR4NKylEKUgpRCQ8D4FmhFqOZ9NjiBmDGZUw9FnPDa8QqBnNOMcRxRwnGjMdYRwfGRoUGiEsJjArNSk1JDQfLg0KFhwlLCsyDzAPFg8NUolmiGuMLp8jnCCdJKMZlBEsEB8SPh7jHSclLiYvJDIjLyEzKzwzRDNFMUQxRBEzEhMXGhwnKEcSERE9ETcSj1GPaJVWkxiOHoweoxkpFB0ODg0nDyMjNS47Mj0yPjA+ITUhQTpOPVE5Sw1CEQ0XICMvJS4qahVNJlw4dR9mKFckZyR1GZ0TPyOhHFYMEw8ZEBMdJSImHjohPiNAMD8sPCs0LTkkNg0bDBcMFRgmHSksOyzdJMAeaC/PI4UnqSVPH34UhBNCD2UPJw9qExsYIyMnIiUhJSQuJzwyQDVDMT0uOCMvDhcMIhQUDRAnPTJ4L4kjvidvMNouliyFJmshqhtvEzgblxlgJn0pjiEqIigjKSUrJ3s+Tj1NNkUzQit2DlISDg0NFXAMCw8dGEsfkje/KHgimSVgLrcXRR6TErcPcxt3FGwhjh23FKonMidwFEcUnw8vEK8QChBPGcoNBxMSDkEUaA4UElYWPx9wHaEmzxedF1AbVRlpGmAajRFjHJkVcxySIn0TihdyElMSLBXSJOYY7RAWEQsRsQ0HFRYOPhMZF4UZgBaAGlwgxSTDFakWhCWlFZYXdhZkD4INXQ9iD2td3w5yEZoNVQ/RL9cSuxfIFFkQCg8XDR4UGRdBGV4fsxhuFcYtjiDYHIwbihiEE5QRbRVlFHISUQ1TEFgPaA2cD4ASxw9kFowpnhyLHG0hbg9YDwgNCg0PGVohgSO7F54XghvBFoUXmhY9GIwWfxNhE34PMRKhEekOxw5uDykNVhF6F8sr0CWhLpQ1/yL+HqgOCA0HDUsqtiuyJYYUtRJhFXoTaxNoD04SeBOBE5MURRE+ES4PDw0LDhoVFw9QEpIQahy2D24RQxF2ENsQjA4JDQUOPiHJKIQVaw8qEmYSVg8wEnUPUw15EXUssRFhEVEQaRkbEnYMDA+bEX4UkRJ1G8AcuQ9fDB4Taw+cDQcNBRNBGtMczSOHI4YTUREfEVkXkBx8EoQTnRNuDnoNJg4wElsNYRWjE8MSYyPTTeFJuA2gDAUNjQ+WDysNBw0JHlkREynRF6YenRNkEZAPLQ9KGXEPnhGSD3gPfg0gD3o=",
"localAutoValue": 0.36000000000000004,
"whitePoint": 1.003921568627451,
"p10": 0.01568627450980392,
"highKey": 0.8063460882459689,
},
"offsetContrast": 0,
"offsetShadows": 0,
},
"identifier": "SmartTone",
}
],
"metadata": {
"masterWidth": 3024,
"pipelineVersion": "OSX.4",
"masterHeight": 4032,
"orientation": 1,
},
"orientation": 1,
"adjustment_format_version": 1,
"version_info": {
"buildNumber": "19G73",
"appVersion": "161.0.120",
"schemaRevision": 1,
"platform": "OSX",
},
"timestamp": "2020-10-03T22:54:20+00:00",
}
def test_no_adjustments(photosdb):
photo = photosdb.get_photo(UUID_DICT["no_adjustments"])
assert photo.adjustments is None
def test_exiftool_newlines_in_description(photosdb):
photo = photosdb.get_photo(UUID_DICT["description_newlines"])
exif = photo._exiftool_dict()
assert photo.description.find("\n") > 0
assert exif["EXIF:ImageDescription"].find("\n") > 0
@pytest.mark.skip(SKIP_TEST, reason="Not yet implemented")
def test_duplicates_1(photosdb):
# test photo has duplicates
photo = photosdb.get_photo(uuid=UUID_DICT["duplicates"])
assert len(photo.duplicates) == 1
assert photo.duplicates[0].uuid == UUID_DUPLICATE
def test_duplicates_2(photosdb):
# test photo does not have duplicates
photo = photosdb.get_photo(uuid=UUID_DICT["no_duplicates"])
assert not photo.duplicates
def test_compound_query(photosdb):
photos = photosdb.photos(persons=["Katie", "Maria"], albums=["Multi Keyword"])
assert len(photos) == 2
assert UUID_DICT["multi_query_1"] in [p.uuid for p in photos]
assert UUID_DICT["multi_query_2"] in [p.uuid for p in photos]
def test_multi_keyword(photosdb):
photos = photosdb.photos(keywords=["Kids", "wedding"])
assert len(photos) == 6
def test_multi_album(photosdb):
photos = photosdb.photos(albums=["Pumpkin Farm", "Test Album"])
assert len(photos) == 3
def test_multi_uuid(photosdb):
photos = photosdb.photos(uuid=[UUID_DICT["favorite"], UUID_DICT["not_favorite"]])
assert len(photos) == 2
def test_detected_text(photosdb):
for uuid, expected_text in UUID_DETECTED_TEXT.items():
photo = photosdb.get_photo(uuid=uuid)
detected_text = " ".join(text for text, conf in photo.detected_text())
if expected_text is not None:
assert expected_text in detected_text
else:
assert not detected_text
| true | true |
f720807430145448da0dda4234ceca5a1f6435e7 | 255 | py | Python | problems/013.py | JoshKarpel/Euler | 9c4a89cfe4b0114d84a82e2b2894c7b8af815e93 | [
"MIT"
] | 1 | 2017-09-20T22:26:24.000Z | 2017-09-20T22:26:24.000Z | problems/013.py | JoshKarpel/euler-python | 9c4a89cfe4b0114d84a82e2b2894c7b8af815e93 | [
"MIT"
] | null | null | null | problems/013.py | JoshKarpel/euler-python | 9c4a89cfe4b0114d84a82e2b2894c7b8af815e93 | [
"MIT"
] | null | null | null | import os
def solve():
filepath = os.path.join(os.path.dirname(__file__), '013_numbers.txt')
with open(filepath) as f:
numbers = [int(x) for x in f]
return int(str(sum(numbers))[:10])
if __name__ == '__main__':
print(solve())
| 18.214286 | 73 | 0.619608 | import os
def solve():
filepath = os.path.join(os.path.dirname(__file__), '013_numbers.txt')
with open(filepath) as f:
numbers = [int(x) for x in f]
return int(str(sum(numbers))[:10])
if __name__ == '__main__':
print(solve())
| true | true |
f7208121384b71d9a38bf011097a42030c385a61 | 798 | py | Python | app/ngrok.py | nnsnodnb/line-bot-django-handle | 27d0e29b674831eac8068124f6445d0698968f40 | [
"Apache-2.0"
] | null | null | null | app/ngrok.py | nnsnodnb/line-bot-django-handle | 27d0e29b674831eac8068124f6445d0698968f40 | [
"Apache-2.0"
] | 1 | 2020-06-05T18:48:36.000Z | 2020-06-05T18:48:36.000Z | app/ngrok.py | nnsnodnb/line-bot-django-handle | 27d0e29b674831eac8068124f6445d0698968f40 | [
"Apache-2.0"
] | null | null | null | from django.conf import settings
import requests
import socket
BASE_HOST = '127.0.0.1'
PORT = 4040
class Ngrok(object):
def __init__(self, port=PORT, *args, **kwargs):
super(Ngrok, self).__init__(*args, **kwargs)
self.port = port
self._check_launch_ngrok()
def _check_launch_ngrok(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((BASE_HOST, self.port))
# error handling: socket.error
s.close()
def get_public_url(self):
if not settings.USE_NGROK:
return None
response = requests.get(f'http://{BASE_HOST}:{self.port}/api/tunnels').json()
tunnels = response['tunnels']
tunnel = tunnels[1]
public_url = tunnel['public_url']
return public_url
| 24.9375 | 85 | 0.630326 | from django.conf import settings
import requests
import socket
BASE_HOST = '127.0.0.1'
PORT = 4040
class Ngrok(object):
def __init__(self, port=PORT, *args, **kwargs):
super(Ngrok, self).__init__(*args, **kwargs)
self.port = port
self._check_launch_ngrok()
def _check_launch_ngrok(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((BASE_HOST, self.port))
s.close()
def get_public_url(self):
if not settings.USE_NGROK:
return None
response = requests.get(f'http://{BASE_HOST}:{self.port}/api/tunnels').json()
tunnels = response['tunnels']
tunnel = tunnels[1]
public_url = tunnel['public_url']
return public_url
| true | true |
f720816e82bbc8f3addb15db8bbee82d4cadc5e1 | 811 | py | Python | scipy/sparse/data.py | lorentzenchr/scipy | 393a05ee927883ad6316b7092c851afea8f16816 | [
"BSD-3-Clause"
] | 9,095 | 2015-01-02T18:24:23.000Z | 2022-03-31T20:35:31.000Z | scipy/sparse/data.py | lorentzenchr/scipy | 393a05ee927883ad6316b7092c851afea8f16816 | [
"BSD-3-Clause"
] | 11,500 | 2015-01-01T01:15:30.000Z | 2022-03-31T23:07:35.000Z | scipy/sparse/data.py | lorentzenchr/scipy | 393a05ee927883ad6316b7092c851afea8f16816 | [
"BSD-3-Clause"
] | 5,838 | 2015-01-05T11:56:42.000Z | 2022-03-31T23:21:19.000Z | # This file is not meant for public use and will be removed in SciPy v2.0.0.
# Use the `scipy.sparse` namespace for importing the functions
# included below.
import warnings
from . import _data
__all__ = [ # noqa: F822
'isscalarlike',
'matrix',
'name',
'npfunc',
'spmatrix',
'validateaxis',
]
def __dir__():
return __all__
def __getattr__(name):
if name not in __all__:
raise AttributeError(
"scipy.sparse.data is deprecated and has no attribute "
f"{name}. Try looking in scipy.sparse instead.")
warnings.warn(f"Please use `{name}` from the `scipy.sparse` namespace, "
"the `scipy.sparse.data` namespace is deprecated.",
category=DeprecationWarning, stacklevel=2)
return getattr(_data, name)
| 23.852941 | 76 | 0.641184 |
import warnings
from . import _data
__all__ = [
'isscalarlike',
'matrix',
'name',
'npfunc',
'spmatrix',
'validateaxis',
]
def __dir__():
return __all__
def __getattr__(name):
if name not in __all__:
raise AttributeError(
"scipy.sparse.data is deprecated and has no attribute "
f"{name}. Try looking in scipy.sparse instead.")
warnings.warn(f"Please use `{name}` from the `scipy.sparse` namespace, "
"the `scipy.sparse.data` namespace is deprecated.",
category=DeprecationWarning, stacklevel=2)
return getattr(_data, name)
| true | true |
f72081ae67b5d48042132e9f4744873649364661 | 5,831 | py | Python | idatasets/datasets/util.py | rahul1990gupta/indic-nlp-datasets | 4d0935b194263579b9653cf8c3d4ecdd17af687d | [
"MIT"
] | 1 | 2020-08-16T11:44:37.000Z | 2020-08-16T11:44:37.000Z | idatasets/datasets/util.py | rahul1990gupta/indic-nlp-datasets | 4d0935b194263579b9653cf8c3d4ecdd17af687d | [
"MIT"
] | 2 | 2020-08-16T05:23:43.000Z | 2020-10-21T06:59:15.000Z | idatasets/datasets/util.py | rahul1990gupta/indic-nlp-datasets | 4d0935b194263579b9653cf8c3d4ecdd17af687d | [
"MIT"
] | 2 | 2021-06-13T05:40:26.000Z | 2022-02-05T15:53:23.000Z | import os
from urllib.error import URLError, HTTPError
from urllib.request import urlretrieve
import tqdm
import tarfile
import zipfile
import shutil
import ssl
ssl._create_default_https_context = ssl._create_unverified_context
def download_file(
origin: str,
cache_subdir: str = "datasets") -> str:
fname = origin.split("/")[-1]
datadir_base = os.path.expanduser(os.path.join("~", ".keras"))
if not os.access(datadir_base, os.W_OK):
datadir_base = os.path.join("/tmp", ".keras")
datadir = os.path.join(datadir_base, cache_subdir)
if not os.path.exists(datadir):
os.makedirs(datadir)
fpath = os.path.join(datadir, fname)
if os.path.exists(fpath):
return fpath
global progbar
progbar = None
def dl_progress(count: int, block_size: int, total_size: int) -> None:
global progbar
if progbar is None:
progbar = tqdm.tqdm(total=total_size)
else:
progbar.update(block_size)
error_msg = "URL fetch failure on {}: {} -- {}"
if not os.path.exists(fpath):
try:
try:
urlretrieve(origin, fpath, dl_progress)
except URLError as e:
raise Exception(error_msg.format(origin, e.errno, e.reason))
except HTTPError as e:
raise Exception(error_msg.format(origin, e.code, e.msg))
except (Exception, KeyboardInterrupt):
if os.path.exists(fpath):
os.remove(fpath)
raise
progbar = None
return fpath
def get_file(
origin: str,
untar: bool = False,
unzip: bool = False,
cache_subdir: str = "datasets") -> str:
"""Downloads a file from a URL if it not already in the cache."""
# https://raw.githubusercontent.com/fchollet/keras/master/keras/utils/data_utils.py
# Copyright Francois Chollet, Google, others (2015)
# Under MIT license
fname = origin.split("/")[-1].split(".")[0]
datadir_base = os.path.expanduser(os.path.join("~", ".keras"))
if not os.access(datadir_base, os.W_OK):
datadir_base = os.path.join("/tmp", ".keras")
datadir = os.path.join(datadir_base, cache_subdir)
if not os.path.exists(datadir):
os.makedirs(datadir)
if untar or unzip:
untar_fpath = os.path.join(datadir, fname)
if unzip:
fpath = untar_fpath + ".zip"
else:
fpath = untar_fpath + ".tar.gz"
else:
fpath = os.path.join(datadir, fname)
global progbar
progbar = None
def dl_progress(count, block_size, total_size):
global progbar
if progbar is None:
progbar = tqdm.tqdm(total=total_size)
else:
progbar.update(block_size)
error_msg = "URL fetch failure on {}: {} -- {}"
if not os.path.exists(fpath):
try:
try:
urlretrieve(origin, fpath, dl_progress)
except URLError as e:
raise Exception(error_msg.format(origin, e.errno, e.reason))
except HTTPError as e:
raise Exception(error_msg.format(origin, e.code, e.msg))
except (Exception, KeyboardInterrupt):
if os.path.exists(fpath):
os.remove(fpath)
raise
progbar = None
if untar:
if not os.path.exists(untar_fpath):
print("Untaring file...")
tfile = tarfile.open(fpath, "r:gz")
try:
tfile.extractall(path=datadir)
except (Exception, KeyboardInterrupt):
if os.path.exists(untar_fpath):
if os.path.isfile(untar_fpath):
os.remove(untar_fpath)
else:
shutil.rmtree(untar_fpath)
raise
tfile.close()
return untar_fpath
elif unzip:
if not os.path.exists(untar_fpath):
print("Unzipping file...")
with zipfile.ZipFile(fpath) as file_:
try:
file_.extractall(path=datadir)
except (Exception, KeyboardInterrupt):
if os.path.exists(untar_fpath):
if os.path.isfile(untar_fpath):
os.remove(untar_fpath)
else:
shutil.rmtree(untar_fpath)
raise
return untar_fpath
return fpath
class Bunch(dict):
"""Container object exposing keys as attributes
Bunch objects are sometimes used as an output for functions and methods.
They extend dictionaries by enabling values to be accessed by key,
`bunch["value_key"]`, or by an attribute, `bunch.value_key`.
Examples
--------
>>> b = Bunch(a=1, b=2)
>>> b['b']
2
>>> b.b
2
>>> b.a = 3
>>> b['a']
3
>>> b.c = 6
>>> b['c']
6
"""
def __init__(self, **kwargs):
super().__init__(kwargs)
def __setattr__(self, key: str, value: str):
self[key] = value
def __dir__(self):
return self.keys()
def __getattr__(self, key: str):
try:
return self[key]
except KeyError:
raise AttributeError(key)
def __setstate__(self, state):
# Bunch pickles generated with scikit-learn 0.16.* have an non
# empty __dict__. This causes a surprising behaviour when
# loading these pickles scikit-learn 0.17: reading bunch.key
# uses __dict__ but assigning to bunch.key use __setattr__ and
# only changes bunch['key']. More details can be found at:
# https://github.com/scikit-learn/scikit-learn/issues/6196.
# Overriding __setstate__ to be a noop has the effect of
# ignoring the pickled __dict__
pass
| 31.690217 | 87 | 0.575373 | import os
from urllib.error import URLError, HTTPError
from urllib.request import urlretrieve
import tqdm
import tarfile
import zipfile
import shutil
import ssl
ssl._create_default_https_context = ssl._create_unverified_context
def download_file(
origin: str,
cache_subdir: str = "datasets") -> str:
fname = origin.split("/")[-1]
datadir_base = os.path.expanduser(os.path.join("~", ".keras"))
if not os.access(datadir_base, os.W_OK):
datadir_base = os.path.join("/tmp", ".keras")
datadir = os.path.join(datadir_base, cache_subdir)
if not os.path.exists(datadir):
os.makedirs(datadir)
fpath = os.path.join(datadir, fname)
if os.path.exists(fpath):
return fpath
global progbar
progbar = None
def dl_progress(count: int, block_size: int, total_size: int) -> None:
global progbar
if progbar is None:
progbar = tqdm.tqdm(total=total_size)
else:
progbar.update(block_size)
error_msg = "URL fetch failure on {}: {} -- {}"
if not os.path.exists(fpath):
try:
try:
urlretrieve(origin, fpath, dl_progress)
except URLError as e:
raise Exception(error_msg.format(origin, e.errno, e.reason))
except HTTPError as e:
raise Exception(error_msg.format(origin, e.code, e.msg))
except (Exception, KeyboardInterrupt):
if os.path.exists(fpath):
os.remove(fpath)
raise
progbar = None
return fpath
def get_file(
origin: str,
untar: bool = False,
unzip: bool = False,
cache_subdir: str = "datasets") -> str:
fname = origin.split("/")[-1].split(".")[0]
datadir_base = os.path.expanduser(os.path.join("~", ".keras"))
if not os.access(datadir_base, os.W_OK):
datadir_base = os.path.join("/tmp", ".keras")
datadir = os.path.join(datadir_base, cache_subdir)
if not os.path.exists(datadir):
os.makedirs(datadir)
if untar or unzip:
untar_fpath = os.path.join(datadir, fname)
if unzip:
fpath = untar_fpath + ".zip"
else:
fpath = untar_fpath + ".tar.gz"
else:
fpath = os.path.join(datadir, fname)
global progbar
progbar = None
def dl_progress(count, block_size, total_size):
global progbar
if progbar is None:
progbar = tqdm.tqdm(total=total_size)
else:
progbar.update(block_size)
error_msg = "URL fetch failure on {}: {} -- {}"
if not os.path.exists(fpath):
try:
try:
urlretrieve(origin, fpath, dl_progress)
except URLError as e:
raise Exception(error_msg.format(origin, e.errno, e.reason))
except HTTPError as e:
raise Exception(error_msg.format(origin, e.code, e.msg))
except (Exception, KeyboardInterrupt):
if os.path.exists(fpath):
os.remove(fpath)
raise
progbar = None
if untar:
if not os.path.exists(untar_fpath):
print("Untaring file...")
tfile = tarfile.open(fpath, "r:gz")
try:
tfile.extractall(path=datadir)
except (Exception, KeyboardInterrupt):
if os.path.exists(untar_fpath):
if os.path.isfile(untar_fpath):
os.remove(untar_fpath)
else:
shutil.rmtree(untar_fpath)
raise
tfile.close()
return untar_fpath
elif unzip:
if not os.path.exists(untar_fpath):
print("Unzipping file...")
with zipfile.ZipFile(fpath) as file_:
try:
file_.extractall(path=datadir)
except (Exception, KeyboardInterrupt):
if os.path.exists(untar_fpath):
if os.path.isfile(untar_fpath):
os.remove(untar_fpath)
else:
shutil.rmtree(untar_fpath)
raise
return untar_fpath
return fpath
class Bunch(dict):
def __init__(self, **kwargs):
super().__init__(kwargs)
def __setattr__(self, key: str, value: str):
self[key] = value
def __dir__(self):
return self.keys()
def __getattr__(self, key: str):
try:
return self[key]
except KeyError:
raise AttributeError(key)
def __setstate__(self, state):
pass
| true | true |
f72082ade0aa616c87208ff39d48373868d15c94 | 1,030 | py | Python | api/client/test/test_api_list_pipelines_response.py | krishnakumar27/mlx | dce67d58dffa24ca7a6a4d6b5fd8d4eb94e35215 | [
"Apache-2.0"
] | 98 | 2021-05-03T23:27:53.000Z | 2022-03-13T02:29:12.000Z | api/client/test/test_api_list_pipelines_response.py | krishnakumar27/mlx | dce67d58dffa24ca7a6a4d6b5fd8d4eb94e35215 | [
"Apache-2.0"
] | 296 | 2021-05-03T22:44:26.000Z | 2022-03-31T11:50:16.000Z | api/client/test/test_api_list_pipelines_response.py | krishnakumar27/mlx | dce67d58dffa24ca7a6a4d6b5fd8d4eb94e35215 | [
"Apache-2.0"
] | 38 | 2021-05-03T22:52:59.000Z | 2022-03-31T03:58:34.000Z | # Copyright 2021 The MLX Contributors
#
# SPDX-License-Identifier: Apache-2.0
# coding: utf-8
"""
MLX API
MLX API Extension for Kubeflow Pipelines # noqa: E501
OpenAPI spec version: 0.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.models.api_list_pipelines_response import ApiListPipelinesResponse # noqa: E501
from swagger_client.rest import ApiException
class TestApiListPipelinesResponse(unittest.TestCase):
"""ApiListPipelinesResponse unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testApiListPipelinesResponse(self):
"""Test ApiListPipelinesResponse"""
# FIXME: construct object with mandatory attributes with example values
# model = swagger_client.models.api_list_pipelines_response.ApiListPipelinesResponse() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 23.409091 | 108 | 0.729126 |
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.models.api_list_pipelines_response import ApiListPipelinesResponse
from swagger_client.rest import ApiException
class TestApiListPipelinesResponse(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testApiListPipelinesResponse(self):
s
if __name__ == '__main__':
unittest.main()
| true | true |
f72082bda44b211831ca8f51067e919a7b88a005 | 29,302 | py | Python | stable_baselines/trpo_mpi/trpo_mpi.py | Ow-woo/stable-baselines | ece376f62b0eaa3b58e90593b7db5fb9de3d82c5 | [
"MIT"
] | 3 | 2020-04-14T15:28:02.000Z | 2020-09-23T00:55:48.000Z | stable_baselines/trpo_mpi/trpo_mpi.py | Ow-woo/stable-baselines | ece376f62b0eaa3b58e90593b7db5fb9de3d82c5 | [
"MIT"
] | 1 | 2019-10-30T07:31:52.000Z | 2019-10-30T07:31:52.000Z | stable_baselines/trpo_mpi/trpo_mpi.py | Ow-woo/stable-baselines | ece376f62b0eaa3b58e90593b7db5fb9de3d82c5 | [
"MIT"
] | 7 | 2019-10-01T05:49:22.000Z | 2021-12-24T07:11:55.000Z | import time
from contextlib import contextmanager
from collections import deque
import gym
from mpi4py import MPI
import tensorflow as tf
import numpy as np
import stable_baselines.common.tf_util as tf_util
from stable_baselines.common.tf_util import total_episode_reward_logger
from stable_baselines.common import explained_variance, zipsame, dataset, fmt_row, colorize, ActorCriticRLModel, \
SetVerbosity, TensorboardWriter
from stable_baselines import logger
from stable_baselines.common.mpi_adam import MpiAdam
from stable_baselines.common.cg import conjugate_gradient
from stable_baselines.common.policies import ActorCriticPolicy
from stable_baselines.common.misc_util import flatten_lists
from stable_baselines.common.runners import traj_segment_generator
from stable_baselines.trpo_mpi.utils import add_vtarg_and_adv
class TRPO(ActorCriticRLModel):
"""
Trust Region Policy Optimization (https://arxiv.org/abs/1502.05477)
:param policy: (ActorCriticPolicy or str) The policy model to use (MlpPolicy, CnnPolicy, CnnLstmPolicy, ...)
:param env: (Gym environment or str) The environment to learn from (if registered in Gym, can be str)
:param gamma: (float) the discount value
:param timesteps_per_batch: (int) the number of timesteps to run per batch (horizon)
:param max_kl: (float) the Kullback-Leibler loss threshold
:param cg_iters: (int) the number of iterations for the conjugate gradient calculation
:param lam: (float) GAE factor
:param entcoeff: (float) the weight for the entropy loss
:param cg_damping: (float) the compute gradient dampening factor
:param vf_stepsize: (float) the value function stepsize
:param vf_iters: (int) the value function's number iterations for learning
:param verbose: (int) the verbosity level: 0 none, 1 training information, 2 tensorflow debug
:param tensorboard_log: (str) the log location for tensorboard (if None, no logging)
:param _init_setup_model: (bool) Whether or not to build the network at the creation of the instance
:param policy_kwargs: (dict) additional arguments to be passed to the policy on creation
:param full_tensorboard_log: (bool) enable additional logging when using tensorboard
WARNING: this logging can take a lot of space quickly
:param seed: (int) Seed for the pseudo-random generators (python, numpy, tensorflow).
If None (default), use random seed. Note that if you want completely deterministic
results, you must set `n_cpu_tf_sess` to 1.
:param n_cpu_tf_sess: (int) The number of threads for TensorFlow operations
If None, the number of cpu of the current machine will be used.
"""
def __init__(self, policy, env, gamma=0.99, timesteps_per_batch=1024, max_kl=0.01, cg_iters=10, lam=0.98,
entcoeff=0.0, cg_damping=1e-2, vf_stepsize=3e-4, vf_iters=3, verbose=0, tensorboard_log=None,
_init_setup_model=True, policy_kwargs=None, full_tensorboard_log=False,
seed=None, n_cpu_tf_sess=1):
super(TRPO, self).__init__(policy=policy, env=env, verbose=verbose, requires_vec_env=False,
_init_setup_model=_init_setup_model, policy_kwargs=policy_kwargs,
seed=seed, n_cpu_tf_sess=n_cpu_tf_sess)
self.using_gail = False
self.timesteps_per_batch = timesteps_per_batch
self.cg_iters = cg_iters
self.cg_damping = cg_damping
self.gamma = gamma
self.lam = lam
self.max_kl = max_kl
self.vf_iters = vf_iters
self.vf_stepsize = vf_stepsize
self.entcoeff = entcoeff
self.tensorboard_log = tensorboard_log
self.full_tensorboard_log = full_tensorboard_log
# GAIL Params
self.hidden_size_adversary = 100
self.adversary_entcoeff = 1e-3
self.expert_dataset = None
self.g_step = 1
self.d_step = 1
self.d_stepsize = 3e-4
self.graph = None
self.sess = None
self.policy_pi = None
self.loss_names = None
self.assign_old_eq_new = None
self.compute_losses = None
self.compute_lossandgrad = None
self.compute_fvp = None
self.compute_vflossandgrad = None
self.d_adam = None
self.vfadam = None
self.get_flat = None
self.set_from_flat = None
self.timed = None
self.allmean = None
self.nworkers = None
self.rank = None
self.reward_giver = None
self.step = None
self.proba_step = None
self.initial_state = None
self.params = None
self.summary = None
if _init_setup_model:
self.setup_model()
def _get_pretrain_placeholders(self):
policy = self.policy_pi
action_ph = policy.pdtype.sample_placeholder([None])
if isinstance(self.action_space, gym.spaces.Discrete):
return policy.obs_ph, action_ph, policy.policy
return policy.obs_ph, action_ph, policy.deterministic_action
def setup_model(self):
# prevent import loops
from stable_baselines.gail.adversary import TransitionClassifier
with SetVerbosity(self.verbose):
assert issubclass(self.policy, ActorCriticPolicy), "Error: the input policy for the TRPO model must be " \
"an instance of common.policies.ActorCriticPolicy."
self.nworkers = MPI.COMM_WORLD.Get_size()
self.rank = MPI.COMM_WORLD.Get_rank()
np.set_printoptions(precision=3)
self.graph = tf.Graph()
with self.graph.as_default():
self.set_random_seed(self.seed)
self.sess = tf_util.make_session(num_cpu=self.n_cpu_tf_sess, graph=self.graph)
if self.using_gail:
self.reward_giver = TransitionClassifier(self.observation_space, self.action_space,
self.hidden_size_adversary,
entcoeff=self.adversary_entcoeff)
# Construct network for new policy
self.policy_pi = self.policy(self.sess, self.observation_space, self.action_space, self.n_envs, 1,
None, reuse=False, **self.policy_kwargs)
# Network for old policy
with tf.variable_scope("oldpi", reuse=False):
old_policy = self.policy(self.sess, self.observation_space, self.action_space, self.n_envs, 1,
None, reuse=False, **self.policy_kwargs)
with tf.variable_scope("loss", reuse=False):
atarg = tf.placeholder(dtype=tf.float32, shape=[None]) # Target advantage function (if applicable)
ret = tf.placeholder(dtype=tf.float32, shape=[None]) # Empirical return
observation = self.policy_pi.obs_ph
action = self.policy_pi.pdtype.sample_placeholder([None])
kloldnew = old_policy.proba_distribution.kl(self.policy_pi.proba_distribution)
ent = self.policy_pi.proba_distribution.entropy()
meankl = tf.reduce_mean(kloldnew)
meanent = tf.reduce_mean(ent)
entbonus = self.entcoeff * meanent
vferr = tf.reduce_mean(tf.square(self.policy_pi.value_flat - ret))
# advantage * pnew / pold
ratio = tf.exp(self.policy_pi.proba_distribution.logp(action) -
old_policy.proba_distribution.logp(action))
surrgain = tf.reduce_mean(ratio * atarg)
optimgain = surrgain + entbonus
losses = [optimgain, meankl, entbonus, surrgain, meanent]
self.loss_names = ["optimgain", "meankl", "entloss", "surrgain", "entropy"]
dist = meankl
all_var_list = tf_util.get_trainable_vars("model")
var_list = [v for v in all_var_list if "/vf" not in v.name and "/q/" not in v.name]
vf_var_list = [v for v in all_var_list if "/pi" not in v.name and "/logstd" not in v.name]
self.get_flat = tf_util.GetFlat(var_list, sess=self.sess)
self.set_from_flat = tf_util.SetFromFlat(var_list, sess=self.sess)
klgrads = tf.gradients(dist, var_list)
flat_tangent = tf.placeholder(dtype=tf.float32, shape=[None], name="flat_tan")
shapes = [var.get_shape().as_list() for var in var_list]
start = 0
tangents = []
for shape in shapes:
var_size = tf_util.intprod(shape)
tangents.append(tf.reshape(flat_tangent[start: start + var_size], shape))
start += var_size
gvp = tf.add_n([tf.reduce_sum(grad * tangent)
for (grad, tangent) in zipsame(klgrads, tangents)]) # pylint: disable=E1111
# Fisher vector products
fvp = tf_util.flatgrad(gvp, var_list)
tf.summary.scalar('entropy_loss', meanent)
tf.summary.scalar('policy_gradient_loss', optimgain)
tf.summary.scalar('value_function_loss', surrgain)
tf.summary.scalar('approximate_kullback-leibler', meankl)
tf.summary.scalar('loss', optimgain + meankl + entbonus + surrgain + meanent)
self.assign_old_eq_new = \
tf_util.function([], [], updates=[tf.assign(oldv, newv) for (oldv, newv) in
zipsame(tf_util.get_globals_vars("oldpi"),
tf_util.get_globals_vars("model"))])
self.compute_losses = tf_util.function([observation, old_policy.obs_ph, action, atarg], losses)
self.compute_fvp = tf_util.function([flat_tangent, observation, old_policy.obs_ph, action, atarg],
fvp)
self.compute_vflossandgrad = tf_util.function([observation, old_policy.obs_ph, ret],
tf_util.flatgrad(vferr, vf_var_list))
@contextmanager
def timed(msg):
if self.rank == 0 and self.verbose >= 1:
print(colorize(msg, color='magenta'))
start_time = time.time()
yield
print(colorize("done in {:.3f} seconds".format((time.time() - start_time)),
color='magenta'))
else:
yield
def allmean(arr):
assert isinstance(arr, np.ndarray)
out = np.empty_like(arr)
MPI.COMM_WORLD.Allreduce(arr, out, op=MPI.SUM)
out /= self.nworkers
return out
tf_util.initialize(sess=self.sess)
th_init = self.get_flat()
MPI.COMM_WORLD.Bcast(th_init, root=0)
self.set_from_flat(th_init)
with tf.variable_scope("Adam_mpi", reuse=False):
self.vfadam = MpiAdam(vf_var_list, sess=self.sess)
if self.using_gail:
self.d_adam = MpiAdam(self.reward_giver.get_trainable_variables(), sess=self.sess)
self.d_adam.sync()
self.vfadam.sync()
with tf.variable_scope("input_info", reuse=False):
tf.summary.scalar('discounted_rewards', tf.reduce_mean(ret))
tf.summary.scalar('learning_rate', tf.reduce_mean(self.vf_stepsize))
tf.summary.scalar('advantage', tf.reduce_mean(atarg))
tf.summary.scalar('kl_clip_range', tf.reduce_mean(self.max_kl))
if self.full_tensorboard_log:
tf.summary.histogram('discounted_rewards', ret)
tf.summary.histogram('learning_rate', self.vf_stepsize)
tf.summary.histogram('advantage', atarg)
tf.summary.histogram('kl_clip_range', self.max_kl)
if tf_util.is_image(self.observation_space):
tf.summary.image('observation', observation)
else:
tf.summary.histogram('observation', observation)
self.timed = timed
self.allmean = allmean
self.step = self.policy_pi.step
self.proba_step = self.policy_pi.proba_step
self.initial_state = self.policy_pi.initial_state
self.params = tf_util.get_trainable_vars("model") + tf_util.get_trainable_vars("oldpi")
if self.using_gail:
self.params.extend(self.reward_giver.get_trainable_variables())
self.summary = tf.summary.merge_all()
self.compute_lossandgrad = \
tf_util.function([observation, old_policy.obs_ph, action, atarg, ret],
[self.summary, tf_util.flatgrad(optimgain, var_list)] + losses)
def learn(self, total_timesteps, callback=None, log_interval=100, tb_log_name="TRPO",
reset_num_timesteps=True):
new_tb_log = self._init_num_timesteps(reset_num_timesteps)
callback = self._init_callback(callback)
with SetVerbosity(self.verbose), TensorboardWriter(self.graph, self.tensorboard_log, tb_log_name, new_tb_log) \
as writer:
self._setup_learn()
with self.sess.as_default():
callback.on_training_start(locals(), globals())
seg_gen = traj_segment_generator(self.policy_pi, self.env, self.timesteps_per_batch,
reward_giver=self.reward_giver,
gail=self.using_gail, callback=callback)
episodes_so_far = 0
timesteps_so_far = 0
iters_so_far = 0
t_start = time.time()
len_buffer = deque(maxlen=40) # rolling buffer for episode lengths
reward_buffer = deque(maxlen=40) # rolling buffer for episode rewards
true_reward_buffer = None
if self.using_gail:
true_reward_buffer = deque(maxlen=40)
# Initialize dataloader
batchsize = self.timesteps_per_batch // self.d_step
self.expert_dataset.init_dataloader(batchsize)
# Stats not used for now
# TODO: replace with normal tb logging
# g_loss_stats = Stats(loss_names)
# d_loss_stats = Stats(reward_giver.loss_name)
# ep_stats = Stats(["True_rewards", "Rewards", "Episode_length"])
while True:
if timesteps_so_far >= total_timesteps:
break
logger.log("********** Iteration %i ************" % iters_so_far)
def fisher_vector_product(vec):
return self.allmean(self.compute_fvp(vec, *fvpargs, sess=self.sess)) + self.cg_damping * vec
# ------------------ Update G ------------------
logger.log("Optimizing Policy...")
# g_step = 1 when not using GAIL
mean_losses = None
vpredbefore = None
tdlamret = None
observation = None
action = None
seg = None
for k in range(self.g_step):
with self.timed("sampling"):
seg = seg_gen.__next__()
# Stop training early (triggered by the callback)
if not seg.get('continue_training', True): # pytype: disable=attribute-error
break
add_vtarg_and_adv(seg, self.gamma, self.lam)
# ob, ac, atarg, ret, td1ret = map(np.concatenate, (obs, acs, atargs, rets, td1rets))
observation, action = seg["observations"], seg["actions"]
atarg, tdlamret = seg["adv"], seg["tdlamret"]
vpredbefore = seg["vpred"] # predicted value function before update
atarg = (atarg - atarg.mean()) / (atarg.std() + 1e-8) # standardized advantage function estimate
# true_rew is the reward without discount
if writer is not None:
total_episode_reward_logger(self.episode_reward,
seg["true_rewards"].reshape(
(self.n_envs, -1)),
seg["dones"].reshape((self.n_envs, -1)),
writer, self.num_timesteps)
args = seg["observations"], seg["observations"], seg["actions"], atarg
# Subsampling: see p40-42 of John Schulman thesis
# http://joschu.net/docs/thesis.pdf
fvpargs = [arr[::5] for arr in args]
self.assign_old_eq_new(sess=self.sess)
with self.timed("computegrad"):
steps = self.num_timesteps + (k + 1) * (seg["total_timestep"] / self.g_step)
run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
run_metadata = tf.RunMetadata() if self.full_tensorboard_log else None
# run loss backprop with summary, and save the metadata (memory, compute time, ...)
if writer is not None:
summary, grad, *lossbefore = self.compute_lossandgrad(*args, tdlamret, sess=self.sess,
options=run_options,
run_metadata=run_metadata)
if self.full_tensorboard_log:
writer.add_run_metadata(run_metadata, 'step%d' % steps)
writer.add_summary(summary, steps)
else:
_, grad, *lossbefore = self.compute_lossandgrad(*args, tdlamret, sess=self.sess,
options=run_options,
run_metadata=run_metadata)
lossbefore = self.allmean(np.array(lossbefore))
grad = self.allmean(grad)
if np.allclose(grad, 0):
logger.log("Got zero gradient. not updating")
else:
with self.timed("conjugate_gradient"):
stepdir = conjugate_gradient(fisher_vector_product, grad, cg_iters=self.cg_iters,
verbose=self.rank == 0 and self.verbose >= 1)
assert np.isfinite(stepdir).all()
shs = .5 * stepdir.dot(fisher_vector_product(stepdir))
# abs(shs) to avoid taking square root of negative values
lagrange_multiplier = np.sqrt(abs(shs) / self.max_kl)
# logger.log("lagrange multiplier:", lm, "gnorm:", np.linalg.norm(g))
fullstep = stepdir / lagrange_multiplier
expectedimprove = grad.dot(fullstep)
surrbefore = lossbefore[0]
stepsize = 1.0
thbefore = self.get_flat()
for _ in range(10):
thnew = thbefore + fullstep * stepsize
self.set_from_flat(thnew)
mean_losses = surr, kl_loss, *_ = self.allmean(
np.array(self.compute_losses(*args, sess=self.sess)))
improve = surr - surrbefore
logger.log("Expected: %.3f Actual: %.3f" % (expectedimprove, improve))
if not np.isfinite(mean_losses).all():
logger.log("Got non-finite value of losses -- bad!")
elif kl_loss > self.max_kl * 1.5:
logger.log("violated KL constraint. shrinking step.")
elif improve < 0:
logger.log("surrogate didn't improve. shrinking step.")
else:
logger.log("Stepsize OK!")
break
stepsize *= .5
else:
logger.log("couldn't compute a good step")
self.set_from_flat(thbefore)
if self.nworkers > 1 and iters_so_far % 20 == 0:
# list of tuples
paramsums = MPI.COMM_WORLD.allgather((thnew.sum(), self.vfadam.getflat().sum()))
assert all(np.allclose(ps, paramsums[0]) for ps in paramsums[1:])
for (loss_name, loss_val) in zip(self.loss_names, mean_losses):
logger.record_tabular(loss_name, loss_val)
with self.timed("vf"):
for _ in range(self.vf_iters):
# NOTE: for recurrent policies, use shuffle=False?
for (mbob, mbret) in dataset.iterbatches((seg["observations"], seg["tdlamret"]),
include_final_partial_batch=False,
batch_size=128,
shuffle=True):
grad = self.allmean(self.compute_vflossandgrad(mbob, mbob, mbret, sess=self.sess))
self.vfadam.update(grad, self.vf_stepsize)
# Stop training early (triggered by the callback)
if not seg.get('continue_training', True): # pytype: disable=attribute-error
break
logger.record_tabular("explained_variance_tdlam_before",
explained_variance(vpredbefore, tdlamret))
if self.using_gail:
# ------------------ Update D ------------------
logger.log("Optimizing Discriminator...")
logger.log(fmt_row(13, self.reward_giver.loss_name))
assert len(observation) == self.timesteps_per_batch
batch_size = self.timesteps_per_batch // self.d_step
# NOTE: uses only the last g step for observation
d_losses = [] # list of tuples, each of which gives the loss for a minibatch
# NOTE: for recurrent policies, use shuffle=False?
for ob_batch, ac_batch in dataset.iterbatches((observation, action),
include_final_partial_batch=False,
batch_size=batch_size,
shuffle=True):
ob_expert, ac_expert = self.expert_dataset.get_next_batch()
# update running mean/std for reward_giver
if self.reward_giver.normalize:
self.reward_giver.obs_rms.update(np.concatenate((ob_batch, ob_expert), 0))
# Reshape actions if needed when using discrete actions
if isinstance(self.action_space, gym.spaces.Discrete):
if len(ac_batch.shape) == 2:
ac_batch = ac_batch[:, 0]
if len(ac_expert.shape) == 2:
ac_expert = ac_expert[:, 0]
*newlosses, grad = self.reward_giver.lossandgrad(ob_batch, ac_batch, ob_expert, ac_expert)
self.d_adam.update(self.allmean(grad), self.d_stepsize)
d_losses.append(newlosses)
logger.log(fmt_row(13, np.mean(d_losses, axis=0)))
# lr: lengths and rewards
lr_local = (seg["ep_lens"], seg["ep_rets"], seg["ep_true_rets"]) # local values
list_lr_pairs = MPI.COMM_WORLD.allgather(lr_local) # list of tuples
lens, rews, true_rets = map(flatten_lists, zip(*list_lr_pairs))
true_reward_buffer.extend(true_rets)
else:
# lr: lengths and rewards
lr_local = (seg["ep_lens"], seg["ep_rets"]) # local values
list_lr_pairs = MPI.COMM_WORLD.allgather(lr_local) # list of tuples
lens, rews = map(flatten_lists, zip(*list_lr_pairs))
len_buffer.extend(lens)
reward_buffer.extend(rews)
if len(len_buffer) > 0:
logger.record_tabular("EpLenMean", np.mean(len_buffer))
logger.record_tabular("EpRewMean", np.mean(reward_buffer))
if self.using_gail:
logger.record_tabular("EpTrueRewMean", np.mean(true_reward_buffer))
logger.record_tabular("EpThisIter", len(lens))
episodes_so_far += len(lens)
current_it_timesteps = MPI.COMM_WORLD.allreduce(seg["total_timestep"])
timesteps_so_far += current_it_timesteps
self.num_timesteps += current_it_timesteps
iters_so_far += 1
logger.record_tabular("EpisodesSoFar", episodes_so_far)
logger.record_tabular("TimestepsSoFar", self.num_timesteps)
logger.record_tabular("TimeElapsed", time.time() - t_start)
if self.verbose >= 1 and self.rank == 0:
logger.dump_tabular()
callback.on_training_end()
return self
def save(self, save_path, cloudpickle=False):
if self.using_gail and self.expert_dataset is not None:
# Exit processes to pickle the dataset
self.expert_dataset.prepare_pickling()
data = {
"gamma": self.gamma,
"timesteps_per_batch": self.timesteps_per_batch,
"max_kl": self.max_kl,
"cg_iters": self.cg_iters,
"lam": self.lam,
"entcoeff": self.entcoeff,
"cg_damping": self.cg_damping,
"vf_stepsize": self.vf_stepsize,
"vf_iters": self.vf_iters,
"hidden_size_adversary": self.hidden_size_adversary,
"adversary_entcoeff": self.adversary_entcoeff,
"expert_dataset": self.expert_dataset,
"g_step": self.g_step,
"d_step": self.d_step,
"d_stepsize": self.d_stepsize,
"using_gail": self.using_gail,
"verbose": self.verbose,
"policy": self.policy,
"observation_space": self.observation_space,
"action_space": self.action_space,
"n_envs": self.n_envs,
"n_cpu_tf_sess": self.n_cpu_tf_sess,
"seed": self.seed,
"_vectorize_action": self._vectorize_action,
"policy_kwargs": self.policy_kwargs
}
params_to_save = self.get_parameters()
self._save_to_file(save_path, data=data, params=params_to_save, cloudpickle=cloudpickle)
| 54.162662 | 121 | 0.524742 | import time
from contextlib import contextmanager
from collections import deque
import gym
from mpi4py import MPI
import tensorflow as tf
import numpy as np
import stable_baselines.common.tf_util as tf_util
from stable_baselines.common.tf_util import total_episode_reward_logger
from stable_baselines.common import explained_variance, zipsame, dataset, fmt_row, colorize, ActorCriticRLModel, \
SetVerbosity, TensorboardWriter
from stable_baselines import logger
from stable_baselines.common.mpi_adam import MpiAdam
from stable_baselines.common.cg import conjugate_gradient
from stable_baselines.common.policies import ActorCriticPolicy
from stable_baselines.common.misc_util import flatten_lists
from stable_baselines.common.runners import traj_segment_generator
from stable_baselines.trpo_mpi.utils import add_vtarg_and_adv
class TRPO(ActorCriticRLModel):
def __init__(self, policy, env, gamma=0.99, timesteps_per_batch=1024, max_kl=0.01, cg_iters=10, lam=0.98,
entcoeff=0.0, cg_damping=1e-2, vf_stepsize=3e-4, vf_iters=3, verbose=0, tensorboard_log=None,
_init_setup_model=True, policy_kwargs=None, full_tensorboard_log=False,
seed=None, n_cpu_tf_sess=1):
super(TRPO, self).__init__(policy=policy, env=env, verbose=verbose, requires_vec_env=False,
_init_setup_model=_init_setup_model, policy_kwargs=policy_kwargs,
seed=seed, n_cpu_tf_sess=n_cpu_tf_sess)
self.using_gail = False
self.timesteps_per_batch = timesteps_per_batch
self.cg_iters = cg_iters
self.cg_damping = cg_damping
self.gamma = gamma
self.lam = lam
self.max_kl = max_kl
self.vf_iters = vf_iters
self.vf_stepsize = vf_stepsize
self.entcoeff = entcoeff
self.tensorboard_log = tensorboard_log
self.full_tensorboard_log = full_tensorboard_log
self.hidden_size_adversary = 100
self.adversary_entcoeff = 1e-3
self.expert_dataset = None
self.g_step = 1
self.d_step = 1
self.d_stepsize = 3e-4
self.graph = None
self.sess = None
self.policy_pi = None
self.loss_names = None
self.assign_old_eq_new = None
self.compute_losses = None
self.compute_lossandgrad = None
self.compute_fvp = None
self.compute_vflossandgrad = None
self.d_adam = None
self.vfadam = None
self.get_flat = None
self.set_from_flat = None
self.timed = None
self.allmean = None
self.nworkers = None
self.rank = None
self.reward_giver = None
self.step = None
self.proba_step = None
self.initial_state = None
self.params = None
self.summary = None
if _init_setup_model:
self.setup_model()
def _get_pretrain_placeholders(self):
policy = self.policy_pi
action_ph = policy.pdtype.sample_placeholder([None])
if isinstance(self.action_space, gym.spaces.Discrete):
return policy.obs_ph, action_ph, policy.policy
return policy.obs_ph, action_ph, policy.deterministic_action
def setup_model(self):
from stable_baselines.gail.adversary import TransitionClassifier
with SetVerbosity(self.verbose):
assert issubclass(self.policy, ActorCriticPolicy), "Error: the input policy for the TRPO model must be " \
"an instance of common.policies.ActorCriticPolicy."
self.nworkers = MPI.COMM_WORLD.Get_size()
self.rank = MPI.COMM_WORLD.Get_rank()
np.set_printoptions(precision=3)
self.graph = tf.Graph()
with self.graph.as_default():
self.set_random_seed(self.seed)
self.sess = tf_util.make_session(num_cpu=self.n_cpu_tf_sess, graph=self.graph)
if self.using_gail:
self.reward_giver = TransitionClassifier(self.observation_space, self.action_space,
self.hidden_size_adversary,
entcoeff=self.adversary_entcoeff)
self.policy_pi = self.policy(self.sess, self.observation_space, self.action_space, self.n_envs, 1,
None, reuse=False, **self.policy_kwargs)
with tf.variable_scope("oldpi", reuse=False):
old_policy = self.policy(self.sess, self.observation_space, self.action_space, self.n_envs, 1,
None, reuse=False, **self.policy_kwargs)
with tf.variable_scope("loss", reuse=False):
atarg = tf.placeholder(dtype=tf.float32, shape=[None])
ret = tf.placeholder(dtype=tf.float32, shape=[None])
observation = self.policy_pi.obs_ph
action = self.policy_pi.pdtype.sample_placeholder([None])
kloldnew = old_policy.proba_distribution.kl(self.policy_pi.proba_distribution)
ent = self.policy_pi.proba_distribution.entropy()
meankl = tf.reduce_mean(kloldnew)
meanent = tf.reduce_mean(ent)
entbonus = self.entcoeff * meanent
vferr = tf.reduce_mean(tf.square(self.policy_pi.value_flat - ret))
ratio = tf.exp(self.policy_pi.proba_distribution.logp(action) -
old_policy.proba_distribution.logp(action))
surrgain = tf.reduce_mean(ratio * atarg)
optimgain = surrgain + entbonus
losses = [optimgain, meankl, entbonus, surrgain, meanent]
self.loss_names = ["optimgain", "meankl", "entloss", "surrgain", "entropy"]
dist = meankl
all_var_list = tf_util.get_trainable_vars("model")
var_list = [v for v in all_var_list if "/vf" not in v.name and "/q/" not in v.name]
vf_var_list = [v for v in all_var_list if "/pi" not in v.name and "/logstd" not in v.name]
self.get_flat = tf_util.GetFlat(var_list, sess=self.sess)
self.set_from_flat = tf_util.SetFromFlat(var_list, sess=self.sess)
klgrads = tf.gradients(dist, var_list)
flat_tangent = tf.placeholder(dtype=tf.float32, shape=[None], name="flat_tan")
shapes = [var.get_shape().as_list() for var in var_list]
start = 0
tangents = []
for shape in shapes:
var_size = tf_util.intprod(shape)
tangents.append(tf.reshape(flat_tangent[start: start + var_size], shape))
start += var_size
gvp = tf.add_n([tf.reduce_sum(grad * tangent)
for (grad, tangent) in zipsame(klgrads, tangents)])
fvp = tf_util.flatgrad(gvp, var_list)
tf.summary.scalar('entropy_loss', meanent)
tf.summary.scalar('policy_gradient_loss', optimgain)
tf.summary.scalar('value_function_loss', surrgain)
tf.summary.scalar('approximate_kullback-leibler', meankl)
tf.summary.scalar('loss', optimgain + meankl + entbonus + surrgain + meanent)
self.assign_old_eq_new = \
tf_util.function([], [], updates=[tf.assign(oldv, newv) for (oldv, newv) in
zipsame(tf_util.get_globals_vars("oldpi"),
tf_util.get_globals_vars("model"))])
self.compute_losses = tf_util.function([observation, old_policy.obs_ph, action, atarg], losses)
self.compute_fvp = tf_util.function([flat_tangent, observation, old_policy.obs_ph, action, atarg],
fvp)
self.compute_vflossandgrad = tf_util.function([observation, old_policy.obs_ph, ret],
tf_util.flatgrad(vferr, vf_var_list))
@contextmanager
def timed(msg):
if self.rank == 0 and self.verbose >= 1:
print(colorize(msg, color='magenta'))
start_time = time.time()
yield
print(colorize("done in {:.3f} seconds".format((time.time() - start_time)),
color='magenta'))
else:
yield
def allmean(arr):
assert isinstance(arr, np.ndarray)
out = np.empty_like(arr)
MPI.COMM_WORLD.Allreduce(arr, out, op=MPI.SUM)
out /= self.nworkers
return out
tf_util.initialize(sess=self.sess)
th_init = self.get_flat()
MPI.COMM_WORLD.Bcast(th_init, root=0)
self.set_from_flat(th_init)
with tf.variable_scope("Adam_mpi", reuse=False):
self.vfadam = MpiAdam(vf_var_list, sess=self.sess)
if self.using_gail:
self.d_adam = MpiAdam(self.reward_giver.get_trainable_variables(), sess=self.sess)
self.d_adam.sync()
self.vfadam.sync()
with tf.variable_scope("input_info", reuse=False):
tf.summary.scalar('discounted_rewards', tf.reduce_mean(ret))
tf.summary.scalar('learning_rate', tf.reduce_mean(self.vf_stepsize))
tf.summary.scalar('advantage', tf.reduce_mean(atarg))
tf.summary.scalar('kl_clip_range', tf.reduce_mean(self.max_kl))
if self.full_tensorboard_log:
tf.summary.histogram('discounted_rewards', ret)
tf.summary.histogram('learning_rate', self.vf_stepsize)
tf.summary.histogram('advantage', atarg)
tf.summary.histogram('kl_clip_range', self.max_kl)
if tf_util.is_image(self.observation_space):
tf.summary.image('observation', observation)
else:
tf.summary.histogram('observation', observation)
self.timed = timed
self.allmean = allmean
self.step = self.policy_pi.step
self.proba_step = self.policy_pi.proba_step
self.initial_state = self.policy_pi.initial_state
self.params = tf_util.get_trainable_vars("model") + tf_util.get_trainable_vars("oldpi")
if self.using_gail:
self.params.extend(self.reward_giver.get_trainable_variables())
self.summary = tf.summary.merge_all()
self.compute_lossandgrad = \
tf_util.function([observation, old_policy.obs_ph, action, atarg, ret],
[self.summary, tf_util.flatgrad(optimgain, var_list)] + losses)
def learn(self, total_timesteps, callback=None, log_interval=100, tb_log_name="TRPO",
reset_num_timesteps=True):
new_tb_log = self._init_num_timesteps(reset_num_timesteps)
callback = self._init_callback(callback)
with SetVerbosity(self.verbose), TensorboardWriter(self.graph, self.tensorboard_log, tb_log_name, new_tb_log) \
as writer:
self._setup_learn()
with self.sess.as_default():
callback.on_training_start(locals(), globals())
seg_gen = traj_segment_generator(self.policy_pi, self.env, self.timesteps_per_batch,
reward_giver=self.reward_giver,
gail=self.using_gail, callback=callback)
episodes_so_far = 0
timesteps_so_far = 0
iters_so_far = 0
t_start = time.time()
len_buffer = deque(maxlen=40)
reward_buffer = deque(maxlen=40)
true_reward_buffer = None
if self.using_gail:
true_reward_buffer = deque(maxlen=40)
batchsize = self.timesteps_per_batch // self.d_step
self.expert_dataset.init_dataloader(batchsize)
while True:
if timesteps_so_far >= total_timesteps:
break
logger.log("********** Iteration %i ************" % iters_so_far)
def fisher_vector_product(vec):
return self.allmean(self.compute_fvp(vec, *fvpargs, sess=self.sess)) + self.cg_damping * vec
logger.log("Optimizing Policy...")
mean_losses = None
vpredbefore = None
tdlamret = None
observation = None
action = None
seg = None
for k in range(self.g_step):
with self.timed("sampling"):
seg = seg_gen.__next__()
if not seg.get('continue_training', True):
break
add_vtarg_and_adv(seg, self.gamma, self.lam)
observation, action = seg["observations"], seg["actions"]
atarg, tdlamret = seg["adv"], seg["tdlamret"]
vpredbefore = seg["vpred"]
atarg = (atarg - atarg.mean()) / (atarg.std() + 1e-8)
if writer is not None:
total_episode_reward_logger(self.episode_reward,
seg["true_rewards"].reshape(
(self.n_envs, -1)),
seg["dones"].reshape((self.n_envs, -1)),
writer, self.num_timesteps)
args = seg["observations"], seg["observations"], seg["actions"], atarg
fvpargs = [arr[::5] for arr in args]
self.assign_old_eq_new(sess=self.sess)
with self.timed("computegrad"):
steps = self.num_timesteps + (k + 1) * (seg["total_timestep"] / self.g_step)
run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
run_metadata = tf.RunMetadata() if self.full_tensorboard_log else None
if writer is not None:
summary, grad, *lossbefore = self.compute_lossandgrad(*args, tdlamret, sess=self.sess,
options=run_options,
run_metadata=run_metadata)
if self.full_tensorboard_log:
writer.add_run_metadata(run_metadata, 'step%d' % steps)
writer.add_summary(summary, steps)
else:
_, grad, *lossbefore = self.compute_lossandgrad(*args, tdlamret, sess=self.sess,
options=run_options,
run_metadata=run_metadata)
lossbefore = self.allmean(np.array(lossbefore))
grad = self.allmean(grad)
if np.allclose(grad, 0):
logger.log("Got zero gradient. not updating")
else:
with self.timed("conjugate_gradient"):
stepdir = conjugate_gradient(fisher_vector_product, grad, cg_iters=self.cg_iters,
verbose=self.rank == 0 and self.verbose >= 1)
assert np.isfinite(stepdir).all()
shs = .5 * stepdir.dot(fisher_vector_product(stepdir))
lagrange_multiplier = np.sqrt(abs(shs) / self.max_kl)
fullstep = stepdir / lagrange_multiplier
expectedimprove = grad.dot(fullstep)
surrbefore = lossbefore[0]
stepsize = 1.0
thbefore = self.get_flat()
for _ in range(10):
thnew = thbefore + fullstep * stepsize
self.set_from_flat(thnew)
mean_losses = surr, kl_loss, *_ = self.allmean(
np.array(self.compute_losses(*args, sess=self.sess)))
improve = surr - surrbefore
logger.log("Expected: %.3f Actual: %.3f" % (expectedimprove, improve))
if not np.isfinite(mean_losses).all():
logger.log("Got non-finite value of losses -- bad!")
elif kl_loss > self.max_kl * 1.5:
logger.log("violated KL constraint. shrinking step.")
elif improve < 0:
logger.log("surrogate didn't improve. shrinking step.")
else:
logger.log("Stepsize OK!")
break
stepsize *= .5
else:
logger.log("couldn't compute a good step")
self.set_from_flat(thbefore)
if self.nworkers > 1 and iters_so_far % 20 == 0:
paramsums = MPI.COMM_WORLD.allgather((thnew.sum(), self.vfadam.getflat().sum()))
assert all(np.allclose(ps, paramsums[0]) for ps in paramsums[1:])
for (loss_name, loss_val) in zip(self.loss_names, mean_losses):
logger.record_tabular(loss_name, loss_val)
with self.timed("vf"):
for _ in range(self.vf_iters):
for (mbob, mbret) in dataset.iterbatches((seg["observations"], seg["tdlamret"]),
include_final_partial_batch=False,
batch_size=128,
shuffle=True):
grad = self.allmean(self.compute_vflossandgrad(mbob, mbob, mbret, sess=self.sess))
self.vfadam.update(grad, self.vf_stepsize)
if not seg.get('continue_training', True):
break
logger.record_tabular("explained_variance_tdlam_before",
explained_variance(vpredbefore, tdlamret))
if self.using_gail:
logger.log("Optimizing Discriminator...")
logger.log(fmt_row(13, self.reward_giver.loss_name))
assert len(observation) == self.timesteps_per_batch
batch_size = self.timesteps_per_batch // self.d_step
d_losses = []
for ob_batch, ac_batch in dataset.iterbatches((observation, action),
include_final_partial_batch=False,
batch_size=batch_size,
shuffle=True):
ob_expert, ac_expert = self.expert_dataset.get_next_batch()
if self.reward_giver.normalize:
self.reward_giver.obs_rms.update(np.concatenate((ob_batch, ob_expert), 0))
if isinstance(self.action_space, gym.spaces.Discrete):
if len(ac_batch.shape) == 2:
ac_batch = ac_batch[:, 0]
if len(ac_expert.shape) == 2:
ac_expert = ac_expert[:, 0]
*newlosses, grad = self.reward_giver.lossandgrad(ob_batch, ac_batch, ob_expert, ac_expert)
self.d_adam.update(self.allmean(grad), self.d_stepsize)
d_losses.append(newlosses)
logger.log(fmt_row(13, np.mean(d_losses, axis=0)))
lr_local = (seg["ep_lens"], seg["ep_rets"], seg["ep_true_rets"])
list_lr_pairs = MPI.COMM_WORLD.allgather(lr_local)
lens, rews, true_rets = map(flatten_lists, zip(*list_lr_pairs))
true_reward_buffer.extend(true_rets)
else:
lr_local = (seg["ep_lens"], seg["ep_rets"])
list_lr_pairs = MPI.COMM_WORLD.allgather(lr_local)
lens, rews = map(flatten_lists, zip(*list_lr_pairs))
len_buffer.extend(lens)
reward_buffer.extend(rews)
if len(len_buffer) > 0:
logger.record_tabular("EpLenMean", np.mean(len_buffer))
logger.record_tabular("EpRewMean", np.mean(reward_buffer))
if self.using_gail:
logger.record_tabular("EpTrueRewMean", np.mean(true_reward_buffer))
logger.record_tabular("EpThisIter", len(lens))
episodes_so_far += len(lens)
current_it_timesteps = MPI.COMM_WORLD.allreduce(seg["total_timestep"])
timesteps_so_far += current_it_timesteps
self.num_timesteps += current_it_timesteps
iters_so_far += 1
logger.record_tabular("EpisodesSoFar", episodes_so_far)
logger.record_tabular("TimestepsSoFar", self.num_timesteps)
logger.record_tabular("TimeElapsed", time.time() - t_start)
if self.verbose >= 1 and self.rank == 0:
logger.dump_tabular()
callback.on_training_end()
return self
def save(self, save_path, cloudpickle=False):
if self.using_gail and self.expert_dataset is not None:
self.expert_dataset.prepare_pickling()
data = {
"gamma": self.gamma,
"timesteps_per_batch": self.timesteps_per_batch,
"max_kl": self.max_kl,
"cg_iters": self.cg_iters,
"lam": self.lam,
"entcoeff": self.entcoeff,
"cg_damping": self.cg_damping,
"vf_stepsize": self.vf_stepsize,
"vf_iters": self.vf_iters,
"hidden_size_adversary": self.hidden_size_adversary,
"adversary_entcoeff": self.adversary_entcoeff,
"expert_dataset": self.expert_dataset,
"g_step": self.g_step,
"d_step": self.d_step,
"d_stepsize": self.d_stepsize,
"using_gail": self.using_gail,
"verbose": self.verbose,
"policy": self.policy,
"observation_space": self.observation_space,
"action_space": self.action_space,
"n_envs": self.n_envs,
"n_cpu_tf_sess": self.n_cpu_tf_sess,
"seed": self.seed,
"_vectorize_action": self._vectorize_action,
"policy_kwargs": self.policy_kwargs
}
params_to_save = self.get_parameters()
self._save_to_file(save_path, data=data, params=params_to_save, cloudpickle=cloudpickle)
| true | true |
f720830ef2390c7f939ff23286a68aa2ce3b6879 | 6,591 | py | Python | mslib/msui/qt5/ui_topview_window.py | iamansoni/MSS | 69bc8fc61ab277697ca691119f911382a63860c0 | [
"Apache-2.0"
] | null | null | null | mslib/msui/qt5/ui_topview_window.py | iamansoni/MSS | 69bc8fc61ab277697ca691119f911382a63860c0 | [
"Apache-2.0"
] | null | null | null | mslib/msui/qt5/ui_topview_window.py | iamansoni/MSS | 69bc8fc61ab277697ca691119f911382a63860c0 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'mslib/msui/ui/ui_topview_window.ui'
#
# Created by: PyQt5 UI code generator 5.9.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_TopViewWindow(object):
def setupUi(self, TopViewWindow):
TopViewWindow.setObjectName("TopViewWindow")
TopViewWindow.resize(952, 782)
self.centralwidget = QtWidgets.QWidget(TopViewWindow)
self.centralwidget.setObjectName("centralwidget")
self.verticalLayout = QtWidgets.QVBoxLayout(self.centralwidget)
self.verticalLayout.setObjectName("verticalLayout")
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.mpl = MplTopViewWidget(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.mpl.sizePolicy().hasHeightForWidth())
self.mpl.setSizePolicy(sizePolicy)
self.mpl.setMinimumSize(QtCore.QSize(100, 100))
self.mpl.setObjectName("mpl")
self.horizontalLayout_2.addWidget(self.mpl)
self.verticalLayout.addLayout(self.horizontalLayout_2)
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.btMapRedraw = QtWidgets.QPushButton(self.centralwidget)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.btMapRedraw.setFont(font)
self.btMapRedraw.setFlat(False)
self.btMapRedraw.setObjectName("btMapRedraw")
self.horizontalLayout.addWidget(self.btMapRedraw)
spacerItem = QtWidgets.QSpacerItem(20, 20, QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem)
self.btSettings = QtWidgets.QPushButton(self.centralwidget)
self.btSettings.setObjectName("btSettings")
self.horizontalLayout.addWidget(self.btSettings)
spacerItem1 = QtWidgets.QSpacerItem(20, 20, QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem1)
self.btRoundtrip = QtWidgets.QPushButton(self.centralwidget)
self.btRoundtrip.setObjectName("btRoundtrip")
self.horizontalLayout.addWidget(self.btRoundtrip)
spacerItem2 = QtWidgets.QSpacerItem(20, 20, QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem2)
self.cbTools = QtWidgets.QComboBox(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.cbTools.sizePolicy().hasHeightForWidth())
self.cbTools.setSizePolicy(sizePolicy)
self.cbTools.setObjectName("cbTools")
self.cbTools.addItem("")
self.cbTools.addItem("")
self.horizontalLayout.addWidget(self.cbTools)
spacerItem3 = QtWidgets.QSpacerItem(20, 20, QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem3)
self.cbChangeMapSection = QtWidgets.QComboBox(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.cbChangeMapSection.sizePolicy().hasHeightForWidth())
self.cbChangeMapSection.setSizePolicy(sizePolicy)
self.cbChangeMapSection.setSizeAdjustPolicy(QtWidgets.QComboBox.AdjustToContents)
self.cbChangeMapSection.setObjectName("cbChangeMapSection")
self.cbChangeMapSection.addItem("")
self.cbChangeMapSection.addItem("")
self.cbChangeMapSection.addItem("")
self.cbChangeMapSection.addItem("")
self.cbChangeMapSection.addItem("")
self.cbChangeMapSection.addItem("")
self.cbChangeMapSection.addItem("")
self.horizontalLayout.addWidget(self.cbChangeMapSection)
spacerItem4 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem4)
self.verticalLayout.addLayout(self.horizontalLayout)
TopViewWindow.setCentralWidget(self.centralwidget)
self.actionCloseWindow = QtWidgets.QAction(TopViewWindow)
self.actionCloseWindow.setObjectName("actionCloseWindow")
TopViewWindow.addAction(self.actionCloseWindow)
self.retranslateUi(TopViewWindow)
self.actionCloseWindow.triggered.connect(TopViewWindow.close)
QtCore.QMetaObject.connectSlotsByName(TopViewWindow)
def retranslateUi(self, TopViewWindow):
_translate = QtCore.QCoreApplication.translate
TopViewWindow.setWindowTitle(_translate("TopViewWindow", "Top View - Mission Support System"))
self.btMapRedraw.setText(_translate("TopViewWindow", "&REDRAW"))
self.btMapRedraw.setShortcut(_translate("TopViewWindow", "R"))
self.btSettings.setText(_translate("TopViewWindow", "options"))
self.btRoundtrip.setText(_translate("TopViewWindow", "make roundtrip"))
self.cbTools.setItemText(0, _translate("TopViewWindow", "(select to open tool)"))
self.cbTools.setItemText(1, _translate("TopViewWindow", "WMS"))
self.cbChangeMapSection.setItemText(0, _translate("TopViewWindow", "to reset map select a region"))
self.cbChangeMapSection.setItemText(1, _translate("TopViewWindow", "Spitsbergen, large"))
self.cbChangeMapSection.setItemText(2, _translate("TopViewWindow", "Spitsbergen, local"))
self.cbChangeMapSection.setItemText(3, _translate("TopViewWindow", "Europe (ste)"))
self.cbChangeMapSection.setItemText(4, _translate("TopViewWindow", "Germany (ste)"))
self.cbChangeMapSection.setItemText(5, _translate("TopViewWindow", "Europe (cyl)"))
self.cbChangeMapSection.setItemText(6, _translate("TopViewWindow", "Germany (cyl)"))
self.actionCloseWindow.setText(_translate("TopViewWindow", "CloseWindow"))
self.actionCloseWindow.setShortcut(_translate("TopViewWindow", "Ctrl+W"))
from mslib.msui.mpl_qtwidget import MplTopViewWidget
| 58.327434 | 122 | 0.739645 |
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_TopViewWindow(object):
def setupUi(self, TopViewWindow):
TopViewWindow.setObjectName("TopViewWindow")
TopViewWindow.resize(952, 782)
self.centralwidget = QtWidgets.QWidget(TopViewWindow)
self.centralwidget.setObjectName("centralwidget")
self.verticalLayout = QtWidgets.QVBoxLayout(self.centralwidget)
self.verticalLayout.setObjectName("verticalLayout")
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.mpl = MplTopViewWidget(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.mpl.sizePolicy().hasHeightForWidth())
self.mpl.setSizePolicy(sizePolicy)
self.mpl.setMinimumSize(QtCore.QSize(100, 100))
self.mpl.setObjectName("mpl")
self.horizontalLayout_2.addWidget(self.mpl)
self.verticalLayout.addLayout(self.horizontalLayout_2)
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.btMapRedraw = QtWidgets.QPushButton(self.centralwidget)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.btMapRedraw.setFont(font)
self.btMapRedraw.setFlat(False)
self.btMapRedraw.setObjectName("btMapRedraw")
self.horizontalLayout.addWidget(self.btMapRedraw)
spacerItem = QtWidgets.QSpacerItem(20, 20, QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem)
self.btSettings = QtWidgets.QPushButton(self.centralwidget)
self.btSettings.setObjectName("btSettings")
self.horizontalLayout.addWidget(self.btSettings)
spacerItem1 = QtWidgets.QSpacerItem(20, 20, QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem1)
self.btRoundtrip = QtWidgets.QPushButton(self.centralwidget)
self.btRoundtrip.setObjectName("btRoundtrip")
self.horizontalLayout.addWidget(self.btRoundtrip)
spacerItem2 = QtWidgets.QSpacerItem(20, 20, QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem2)
self.cbTools = QtWidgets.QComboBox(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.cbTools.sizePolicy().hasHeightForWidth())
self.cbTools.setSizePolicy(sizePolicy)
self.cbTools.setObjectName("cbTools")
self.cbTools.addItem("")
self.cbTools.addItem("")
self.horizontalLayout.addWidget(self.cbTools)
spacerItem3 = QtWidgets.QSpacerItem(20, 20, QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem3)
self.cbChangeMapSection = QtWidgets.QComboBox(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.cbChangeMapSection.sizePolicy().hasHeightForWidth())
self.cbChangeMapSection.setSizePolicy(sizePolicy)
self.cbChangeMapSection.setSizeAdjustPolicy(QtWidgets.QComboBox.AdjustToContents)
self.cbChangeMapSection.setObjectName("cbChangeMapSection")
self.cbChangeMapSection.addItem("")
self.cbChangeMapSection.addItem("")
self.cbChangeMapSection.addItem("")
self.cbChangeMapSection.addItem("")
self.cbChangeMapSection.addItem("")
self.cbChangeMapSection.addItem("")
self.cbChangeMapSection.addItem("")
self.horizontalLayout.addWidget(self.cbChangeMapSection)
spacerItem4 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem4)
self.verticalLayout.addLayout(self.horizontalLayout)
TopViewWindow.setCentralWidget(self.centralwidget)
self.actionCloseWindow = QtWidgets.QAction(TopViewWindow)
self.actionCloseWindow.setObjectName("actionCloseWindow")
TopViewWindow.addAction(self.actionCloseWindow)
self.retranslateUi(TopViewWindow)
self.actionCloseWindow.triggered.connect(TopViewWindow.close)
QtCore.QMetaObject.connectSlotsByName(TopViewWindow)
def retranslateUi(self, TopViewWindow):
_translate = QtCore.QCoreApplication.translate
TopViewWindow.setWindowTitle(_translate("TopViewWindow", "Top View - Mission Support System"))
self.btMapRedraw.setText(_translate("TopViewWindow", "&REDRAW"))
self.btMapRedraw.setShortcut(_translate("TopViewWindow", "R"))
self.btSettings.setText(_translate("TopViewWindow", "options"))
self.btRoundtrip.setText(_translate("TopViewWindow", "make roundtrip"))
self.cbTools.setItemText(0, _translate("TopViewWindow", "(select to open tool)"))
self.cbTools.setItemText(1, _translate("TopViewWindow", "WMS"))
self.cbChangeMapSection.setItemText(0, _translate("TopViewWindow", "to reset map select a region"))
self.cbChangeMapSection.setItemText(1, _translate("TopViewWindow", "Spitsbergen, large"))
self.cbChangeMapSection.setItemText(2, _translate("TopViewWindow", "Spitsbergen, local"))
self.cbChangeMapSection.setItemText(3, _translate("TopViewWindow", "Europe (ste)"))
self.cbChangeMapSection.setItemText(4, _translate("TopViewWindow", "Germany (ste)"))
self.cbChangeMapSection.setItemText(5, _translate("TopViewWindow", "Europe (cyl)"))
self.cbChangeMapSection.setItemText(6, _translate("TopViewWindow", "Germany (cyl)"))
self.actionCloseWindow.setText(_translate("TopViewWindow", "CloseWindow"))
self.actionCloseWindow.setShortcut(_translate("TopViewWindow", "Ctrl+W"))
from mslib.msui.mpl_qtwidget import MplTopViewWidget
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.