language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | sympy__sympy | sympy/functions/elementary/complexes.py | {
"start": 8422,
"end": 13639
} | class ____(DefinedFunction):
"""
Returns the complex sign of an expression:
Explanation
===========
If the expression is real the sign will be:
* $1$ if expression is positive
* $0$ if expression is equal to zero
* $-1$ if expression is negative
If the expression is imaginary the sign will be:
* $I$ if im(expression) is positive
* $-I$ if im(expression) is negative
Otherwise an unevaluated expression will be returned. When evaluated, the
result (in general) will be ``cos(arg(expr)) + I*sin(arg(expr))``.
Examples
========
>>> from sympy import sign, I
>>> sign(-1)
-1
>>> sign(0)
0
>>> sign(-3*I)
-I
>>> sign(1 + I)
sign(1 + I)
>>> _.evalf()
0.707106781186548 + 0.707106781186548*I
Parameters
==========
arg : Expr
Real or imaginary expression.
Returns
=======
expr : Expr
Complex sign of expression.
See Also
========
Abs, conjugate
"""
is_complex = True
_singularities = True
def doit(self, **hints):
s = super().doit()
if s == self and self.args[0].is_zero is False:
return self.args[0] / Abs(self.args[0])
return s
@classmethod
def eval(cls, arg):
# handle what we can
if arg.is_Mul:
c, args = arg.as_coeff_mul()
unk = []
s = sign(c)
for a in args:
if a.is_extended_negative:
s = -s
elif a.is_extended_positive:
pass
else:
if a.is_imaginary:
ai = im(a)
if ai.is_comparable: # i.e. a = I*real
s *= I
if ai.is_extended_negative:
# can't use sign(ai) here since ai might not be
# a Number
s = -s
else:
unk.append(a)
else:
unk.append(a)
if c is S.One and len(unk) == len(args):
return None
return s * cls(arg._new_rawargs(*unk))
if arg is S.NaN:
return S.NaN
if arg.is_zero: # it may be an Expr that is zero
return S.Zero
if arg.is_extended_positive:
return S.One
if arg.is_extended_negative:
return S.NegativeOne
if arg.is_Function:
if isinstance(arg, sign):
return arg
if arg.is_imaginary:
if arg.is_Pow and arg.exp is S.Half:
# we catch this because non-trivial sqrt args are not expanded
# e.g. sqrt(1-sqrt(2)) --x--> to I*sqrt(sqrt(2) - 1)
return I
arg2 = -I * arg
if arg2.is_extended_positive:
return I
if arg2.is_extended_negative:
return -I
def _eval_Abs(self):
if fuzzy_not(self.args[0].is_zero):
return S.One
def _eval_conjugate(self):
return sign(conjugate(self.args[0]))
def _eval_derivative(self, x):
if self.args[0].is_extended_real:
from sympy.functions.special.delta_functions import DiracDelta
return 2 * Derivative(self.args[0], x, evaluate=True) \
* DiracDelta(self.args[0])
elif self.args[0].is_imaginary:
from sympy.functions.special.delta_functions import DiracDelta
return 2 * Derivative(self.args[0], x, evaluate=True) \
* DiracDelta(-I * self.args[0])
def _eval_is_nonnegative(self):
if self.args[0].is_nonnegative:
return True
def _eval_is_nonpositive(self):
if self.args[0].is_nonpositive:
return True
def _eval_is_imaginary(self):
return self.args[0].is_imaginary
def _eval_is_integer(self):
return self.args[0].is_extended_real
def _eval_is_zero(self):
return self.args[0].is_zero
def _eval_power(self, other):
if (
fuzzy_not(self.args[0].is_zero) and
other.is_integer and
other.is_even
):
return S.One
def _eval_nseries(self, x, n, logx, cdir=0):
arg0 = self.args[0]
x0 = arg0.subs(x, 0)
if x0 != 0:
return self.func(x0)
if cdir != 0:
cdir = arg0.dir(x, cdir)
return -S.One if re(cdir) < 0 else S.One
def _eval_rewrite_as_Piecewise(self, arg, **kwargs):
if arg.is_extended_real:
return Piecewise((1, arg > 0), (-1, arg < 0), (0, True))
def _eval_rewrite_as_Heaviside(self, arg, **kwargs):
from sympy.functions.special.delta_functions import Heaviside
if arg.is_extended_real:
return Heaviside(arg) * 2 - 1
def _eval_rewrite_as_Abs(self, arg, **kwargs):
return Piecewise((0, Eq(arg, 0)), (arg / Abs(arg), True))
def _eval_simplify(self, **kwargs):
return self.func(factor_terms(self.args[0])) # XXX include doit?
| sign |
python | django__django | django/db/backends/oracle/functions.py | {
"start": 509,
"end": 812
} | class ____(Func):
function = "NUMTODSINTERVAL"
template = "%(function)s(%(expressions)s, 'SECOND')"
def __init__(self, expression, *, output_field=None, **extra):
super().__init__(
expression, output_field=output_field or DurationField(), **extra
)
| SecondsToInterval |
python | pyparsing__pyparsing | pyparsing/core.py | {
"start": 98033,
"end": 99466
} | class ____(ParserElement):
# internal placeholder class to hold a place were '...' is added to a parser element,
# once another ParserElement is added, this placeholder will be replaced with a SkipTo
def __init__(self, expr: ParserElement, must_skip: bool = False) -> None:
super().__init__()
self.anchor = expr
self.must_skip = must_skip
def _generateDefaultName(self) -> str:
return str(self.anchor + Empty()).replace("Empty", "...")
def __add__(self, other) -> ParserElement:
skipper = SkipTo(other).set_name("...")("_skipped*")
if self.must_skip:
def must_skip(t):
if not t._skipped or t._skipped.as_list() == [""]:
del t[0]
t.pop("_skipped", None)
def show_skip(t):
if t._skipped.as_list()[-1:] == [""]:
t.pop("_skipped")
t["_skipped"] = f"missing <{self.anchor!r}>"
return (
self.anchor + skipper().add_parse_action(must_skip)
| skipper().add_parse_action(show_skip)
) + other
return self.anchor + skipper + other
def __repr__(self):
return self.defaultName
def parseImpl(self, *args) -> ParseImplReturnType:
raise Exception(
"use of `...` expression without following SkipTo target expression"
)
| _PendingSkip |
python | getsentry__sentry | src/sentry/integrations/msteams/card_builder/utils.py | {
"start": 0,
"end": 970
} | class ____:
HELP_TITLE = "Please use one of the following commands for Sentry:"
HELP_MESSAGE = (
"- **link**: link your Microsoft Teams identity to your Sentry account"
"\n\n- **unlink**: unlink your Microsoft Teams identity from your Sentry account"
"\n\n- **help**: view list of all bot commands"
)
UNRECOGNIZED_COMMAND = "Sorry, I didn't understand '{command_text}'."
AVAILABLE_COMMANDS_TEXT = "Type **help**: to see the list of available commands"
MENTIONED_TITLE = (
"Sentry for Microsoft Teams does not support any commands in channels, only in direct messages."
" To unlink your Microsoft Teams identity from your Sentry account message the personal bot."
)
MENTIONED_TEXT = (
"Want to learn more about configuring alerts in Sentry? Check out our documentation."
)
DOCS_BUTTON = "Docs"
DOCS_URL = "https://docs.sentry.io/product/alerts-notifications/alerts/"
| HelpMessages |
python | donnemartin__interactive-coding-challenges | bit_manipulation/print_binary/test_print_binary.py | {
"start": 18,
"end": 613
} | class ____(unittest.TestCase):
def test_print_binary(self):
bit = Bits()
self.assertEqual(bit.print_binary(None), 'ERROR')
self.assertEqual(bit.print_binary(0), 'ERROR')
self.assertEqual(bit.print_binary(1), 'ERROR')
num = 0.625
expected = '0.101'
self.assertEqual(bit.print_binary(num), expected)
num = 0.987654321
self.assertEqual(bit.print_binary(num), 'ERROR')
print('Success: test_print_binary')
def main():
test = TestBits()
test.test_print_binary()
if __name__ == '__main__':
main()
| TestBits |
python | scipy__scipy | scipy/special/tests/test_basic.py | {
"start": 80010,
"end": 80956
} | class ____:
def test_euler(self):
eu0 = special.euler(0)
eu1 = special.euler(1)
eu2 = special.euler(2) # just checking segfaults
assert_allclose(eu0, [1], rtol=1e-15)
assert_allclose(eu1, [1, 0], rtol=1e-15)
assert_allclose(eu2, [1, 0, -1], rtol=1e-15)
eu24 = special.euler(24)
mathworld = [1,1,5,61,1385,50521,2702765,199360981,
19391512145,2404879675441,
370371188237525,69348874393137901,
15514534163557086905]
correct = zeros((25,),'d')
for k in range(0,13):
if (k % 2):
correct[2*k] = -float(mathworld[k])
else:
correct[2*k] = float(mathworld[k])
with np.errstate(all='ignore'):
err = nan_to_num((eu24-correct)/correct)
errmax = max(err)
assert_allclose(errmax, 0.0, atol=1.5e-14, rtol=0)
| TestEuler |
python | pypa__pipenv | pipenv/vendor/packaging/metadata.py | {
"start": 1647,
"end": 17663
} | class ____(TypedDict, total=False):
"""A dictionary of raw core metadata.
Each field in core metadata maps to a key of this dictionary (when data is
provided). The key is lower-case and underscores are used instead of dashes
compared to the equivalent core metadata field. Any core metadata field that
can be specified multiple times or can hold multiple values in a single
field have a key with a plural name. See :class:`Metadata` whose attributes
match the keys of this dictionary.
Core metadata fields that can be specified multiple times are stored as a
list or dict depending on which is appropriate for the field. Any fields
which hold multiple values in a single field are stored as a list.
"""
# Metadata 1.0 - PEP 241
metadata_version: str
name: str
version: str
platforms: list[str]
summary: str
description: str
keywords: list[str]
home_page: str
author: str
author_email: str
license: str
# Metadata 1.1 - PEP 314
supported_platforms: list[str]
download_url: str
classifiers: list[str]
requires: list[str]
provides: list[str]
obsoletes: list[str]
# Metadata 1.2 - PEP 345
maintainer: str
maintainer_email: str
requires_dist: list[str]
provides_dist: list[str]
obsoletes_dist: list[str]
requires_python: str
requires_external: list[str]
project_urls: dict[str, str]
# Metadata 2.0
# PEP 426 attempted to completely revamp the metadata format
# but got stuck without ever being able to build consensus on
# it and ultimately ended up withdrawn.
#
# However, a number of tools had started emitting METADATA with
# `2.0` Metadata-Version, so for historical reasons, this version
# was skipped.
# Metadata 2.1 - PEP 566
description_content_type: str
provides_extra: list[str]
# Metadata 2.2 - PEP 643
dynamic: list[str]
# Metadata 2.3 - PEP 685
# No new fields were added in PEP 685, just some edge case were
# tightened up to provide better interoptability.
_STRING_FIELDS = {
"author",
"author_email",
"description",
"description_content_type",
"download_url",
"home_page",
"license",
"maintainer",
"maintainer_email",
"metadata_version",
"name",
"requires_python",
"summary",
"version",
}
_LIST_FIELDS = {
"classifiers",
"dynamic",
"obsoletes",
"obsoletes_dist",
"platforms",
"provides",
"provides_dist",
"provides_extra",
"requires",
"requires_dist",
"requires_external",
"supported_platforms",
}
_DICT_FIELDS = {
"project_urls",
}
def _parse_keywords(data: str) -> list[str]:
"""Split a string of comma-separate keyboards into a list of keywords."""
return [k.strip() for k in data.split(",")]
def _parse_project_urls(data: list[str]) -> dict[str, str]:
"""Parse a list of label/URL string pairings separated by a comma."""
urls = {}
for pair in data:
# Our logic is slightly tricky here as we want to try and do
# *something* reasonable with malformed data.
#
# The main thing that we have to worry about, is data that does
# not have a ',' at all to split the label from the Value. There
# isn't a singular right answer here, and we will fail validation
# later on (if the caller is validating) so it doesn't *really*
# matter, but since the missing value has to be an empty str
# and our return value is dict[str, str], if we let the key
# be the missing value, then they'd have multiple '' values that
# overwrite each other in a accumulating dict.
#
# The other potentional issue is that it's possible to have the
# same label multiple times in the metadata, with no solid "right"
# answer with what to do in that case. As such, we'll do the only
# thing we can, which is treat the field as unparseable and add it
# to our list of unparsed fields.
parts = [p.strip() for p in pair.split(",", 1)]
parts.extend([""] * (max(0, 2 - len(parts)))) # Ensure 2 items
# TODO: The spec doesn't say anything about if the keys should be
# considered case sensitive or not... logically they should
# be case-preserving and case-insensitive, but doing that
# would open up more cases where we might have duplicate
# entries.
label, url = parts
if label in urls:
# The label already exists in our set of urls, so this field
# is unparseable, and we can just add the whole thing to our
# unparseable data and stop processing it.
raise KeyError("duplicate labels in project urls")
urls[label] = url
return urls
def _get_payload(msg: email.message.Message, source: bytes | str) -> str:
"""Get the body of the message."""
# If our source is a str, then our caller has managed encodings for us,
# and we don't need to deal with it.
if isinstance(source, str):
payload: str = msg.get_payload()
return payload
# If our source is a bytes, then we're managing the encoding and we need
# to deal with it.
else:
bpayload: bytes = msg.get_payload(decode=True)
try:
return bpayload.decode("utf8", "strict")
except UnicodeDecodeError:
raise ValueError("payload in an invalid encoding")
# The various parse_FORMAT functions here are intended to be as lenient as
# possible in their parsing, while still returning a correctly typed
# RawMetadata.
#
# To aid in this, we also generally want to do as little touching of the
# data as possible, except where there are possibly some historic holdovers
# that make valid data awkward to work with.
#
# While this is a lower level, intermediate format than our ``Metadata``
# class, some light touch ups can make a massive difference in usability.
# Map METADATA fields to RawMetadata.
_EMAIL_TO_RAW_MAPPING = {
"author": "author",
"author-email": "author_email",
"classifier": "classifiers",
"description": "description",
"description-content-type": "description_content_type",
"download-url": "download_url",
"dynamic": "dynamic",
"home-page": "home_page",
"keywords": "keywords",
"license": "license",
"maintainer": "maintainer",
"maintainer-email": "maintainer_email",
"metadata-version": "metadata_version",
"name": "name",
"obsoletes": "obsoletes",
"obsoletes-dist": "obsoletes_dist",
"platform": "platforms",
"project-url": "project_urls",
"provides": "provides",
"provides-dist": "provides_dist",
"provides-extra": "provides_extra",
"requires": "requires",
"requires-dist": "requires_dist",
"requires-external": "requires_external",
"requires-python": "requires_python",
"summary": "summary",
"supported-platform": "supported_platforms",
"version": "version",
}
_RAW_TO_EMAIL_MAPPING = {raw: email for email, raw in _EMAIL_TO_RAW_MAPPING.items()}
def parse_email(data: bytes | str) -> tuple[RawMetadata, dict[str, list[str]]]:
"""Parse a distribution's metadata stored as email headers (e.g. from ``METADATA``).
This function returns a two-item tuple of dicts. The first dict is of
recognized fields from the core metadata specification. Fields that can be
parsed and translated into Python's built-in types are converted
appropriately. All other fields are left as-is. Fields that are allowed to
appear multiple times are stored as lists.
The second dict contains all other fields from the metadata. This includes
any unrecognized fields. It also includes any fields which are expected to
be parsed into a built-in type but were not formatted appropriately. Finally,
any fields that are expected to appear only once but are repeated are
included in this dict.
"""
raw: dict[str, str | list[str] | dict[str, str]] = {}
unparsed: dict[str, list[str]] = {}
if isinstance(data, str):
parsed = email.parser.Parser(policy=email.policy.compat32).parsestr(data)
else:
parsed = email.parser.BytesParser(policy=email.policy.compat32).parsebytes(data)
# We have to wrap parsed.keys() in a set, because in the case of multiple
# values for a key (a list), the key will appear multiple times in the
# list of keys, but we're avoiding that by using get_all().
for name in frozenset(parsed.keys()):
# Header names in RFC are case insensitive, so we'll normalize to all
# lower case to make comparisons easier.
name = name.lower()
# We use get_all() here, even for fields that aren't multiple use,
# because otherwise someone could have e.g. two Name fields, and we
# would just silently ignore it rather than doing something about it.
headers = parsed.get_all(name) or []
# The way the email module works when parsing bytes is that it
# unconditionally decodes the bytes as ascii using the surrogateescape
# handler. When you pull that data back out (such as with get_all() ),
# it looks to see if the str has any surrogate escapes, and if it does
# it wraps it in a Header object instead of returning the string.
#
# As such, we'll look for those Header objects, and fix up the encoding.
value = []
# Flag if we have run into any issues processing the headers, thus
# signalling that the data belongs in 'unparsed'.
valid_encoding = True
for h in headers:
# It's unclear if this can return more types than just a Header or
# a str, so we'll just assert here to make sure.
assert isinstance(h, (email.header.Header, str))
# If it's a header object, we need to do our little dance to get
# the real data out of it. In cases where there is invalid data
# we're going to end up with mojibake, but there's no obvious, good
# way around that without reimplementing parts of the Header object
# ourselves.
#
# That should be fine since, if mojibacked happens, this key is
# going into the unparsed dict anyways.
if isinstance(h, email.header.Header):
# The Header object stores it's data as chunks, and each chunk
# can be independently encoded, so we'll need to check each
# of them.
chunks: list[tuple[bytes, str | None]] = []
for bin, encoding in email.header.decode_header(h):
try:
bin.decode("utf8", "strict")
except UnicodeDecodeError:
# Enable mojibake.
encoding = "latin1"
valid_encoding = False
else:
encoding = "utf8"
chunks.append((bin, encoding))
# Turn our chunks back into a Header object, then let that
# Header object do the right thing to turn them into a
# string for us.
value.append(str(email.header.make_header(chunks)))
# This is already a string, so just add it.
else:
value.append(h)
# We've processed all of our values to get them into a list of str,
# but we may have mojibake data, in which case this is an unparsed
# field.
if not valid_encoding:
unparsed[name] = value
continue
raw_name = _EMAIL_TO_RAW_MAPPING.get(name)
if raw_name is None:
# This is a bit of a weird situation, we've encountered a key that
# we don't know what it means, so we don't know whether it's meant
# to be a list or not.
#
# Since we can't really tell one way or another, we'll just leave it
# as a list, even though it may be a single item list, because that's
# what makes the most sense for email headers.
unparsed[name] = value
continue
# If this is one of our string fields, then we'll check to see if our
# value is a list of a single item. If it is then we'll assume that
# it was emitted as a single string, and unwrap the str from inside
# the list.
#
# If it's any other kind of data, then we haven't the faintest clue
# what we should parse it as, and we have to just add it to our list
# of unparsed stuff.
if raw_name in _STRING_FIELDS and len(value) == 1:
raw[raw_name] = value[0]
# If this is one of our list of string fields, then we can just assign
# the value, since email *only* has strings, and our get_all() call
# above ensures that this is a list.
elif raw_name in _LIST_FIELDS:
raw[raw_name] = value
# Special Case: Keywords
# The keywords field is implemented in the metadata spec as a str,
# but it conceptually is a list of strings, and is serialized using
# ", ".join(keywords), so we'll do some light data massaging to turn
# this into what it logically is.
elif raw_name == "keywords" and len(value) == 1:
raw[raw_name] = _parse_keywords(value[0])
# Special Case: Project-URL
# The project urls is implemented in the metadata spec as a list of
# specially-formatted strings that represent a key and a value, which
# is fundamentally a mapping, however the email format doesn't support
# mappings in a sane way, so it was crammed into a list of strings
# instead.
#
# We will do a little light data massaging to turn this into a map as
# it logically should be.
elif raw_name == "project_urls":
try:
raw[raw_name] = _parse_project_urls(value)
except KeyError:
unparsed[name] = value
# Nothing that we've done has managed to parse this, so it'll just
# throw it in our unparseable data and move on.
else:
unparsed[name] = value
# We need to support getting the Description from the message payload in
# addition to getting it from the the headers. This does mean, though, there
# is the possibility of it being set both ways, in which case we put both
# in 'unparsed' since we don't know which is right.
try:
payload = _get_payload(parsed, data)
except ValueError:
unparsed.setdefault("description", []).append(
parsed.get_payload(decode=isinstance(data, bytes))
)
else:
if payload:
# Check to see if we've already got a description, if so then both
# it, and this body move to unparseable.
if "description" in raw:
description_header = cast(str, raw.pop("description"))
unparsed.setdefault("description", []).extend(
[description_header, payload]
)
elif "description" in unparsed:
unparsed["description"].append(payload)
else:
raw["description"] = payload
# We need to cast our `raw` to a metadata, because a TypedDict only support
# literal key names, but we're computing our key names on purpose, but the
# way this function is implemented, our `TypedDict` can only have valid key
# names.
return cast(RawMetadata, raw), unparsed
_NOT_FOUND = object()
# Keep the two values in sync.
_VALID_METADATA_VERSIONS = ["1.0", "1.1", "1.2", "2.1", "2.2", "2.3"]
_MetadataVersion = Literal["1.0", "1.1", "1.2", "2.1", "2.2", "2.3"]
_REQUIRED_ATTRS = frozenset(["metadata_version", "name", "version"])
| RawMetadata |
python | pypa__setuptools | setuptools/_vendor/wheel/vendored/packaging/_tokenizer.py | {
"start": 2130,
"end": 5292
} | class ____:
"""Context-sensitive token parsing.
Provides methods to examine the input stream to check whether the next token
matches.
"""
def __init__(
self,
source: str,
*,
rules: "Dict[str, Union[str, re.Pattern[str]]]",
) -> None:
self.source = source
self.rules: Dict[str, re.Pattern[str]] = {
name: re.compile(pattern) for name, pattern in rules.items()
}
self.next_token: Optional[Token] = None
self.position = 0
def consume(self, name: str) -> None:
"""Move beyond provided token name, if at current position."""
if self.check(name):
self.read()
def check(self, name: str, *, peek: bool = False) -> bool:
"""Check whether the next token has the provided name.
By default, if the check succeeds, the token *must* be read before
another check. If `peek` is set to `True`, the token is not loaded and
would need to be checked again.
"""
assert (
self.next_token is None
), f"Cannot check for {name!r}, already have {self.next_token!r}"
assert name in self.rules, f"Unknown token name: {name!r}"
expression = self.rules[name]
match = expression.match(self.source, self.position)
if match is None:
return False
if not peek:
self.next_token = Token(name, match[0], self.position)
return True
def expect(self, name: str, *, expected: str) -> Token:
"""Expect a certain token name next, failing with a syntax error otherwise.
The token is *not* read.
"""
if not self.check(name):
raise self.raise_syntax_error(f"Expected {expected}")
return self.read()
def read(self) -> Token:
"""Consume the next token and return it."""
token = self.next_token
assert token is not None
self.position += len(token.text)
self.next_token = None
return token
def raise_syntax_error(
self,
message: str,
*,
span_start: Optional[int] = None,
span_end: Optional[int] = None,
) -> NoReturn:
"""Raise ParserSyntaxError at the given position."""
span = (
self.position if span_start is None else span_start,
self.position if span_end is None else span_end,
)
raise ParserSyntaxError(
message,
source=self.source,
span=span,
)
@contextlib.contextmanager
def enclosing_tokens(
self, open_token: str, close_token: str, *, around: str
) -> Iterator[None]:
if self.check(open_token):
open_position = self.position
self.read()
else:
open_position = None
yield
if open_position is None:
return
if not self.check(close_token):
self.raise_syntax_error(
f"Expected matching {close_token} for {open_token}, after {around}",
span_start=open_position,
)
self.read()
| Tokenizer |
python | PrefectHQ__prefect | src/prefect/server/schemas/filters.py | {
"start": 32218,
"end": 32941
} | class ____(PrefectFilterBaseModel):
"""Filter by `TaskRun.subflow_run`."""
exists_: Optional[bool] = Field(
default=None,
description=(
"If true, only include task runs that are subflow run parents; if false,"
" exclude parent task runs"
),
)
def _get_filter_list(
self, db: "PrefectDBInterface"
) -> Iterable[sa.ColumnExpressionArgument[bool]]:
filters: list[sa.ColumnExpressionArgument[bool]] = []
if self.exists_ is True:
filters.append(db.TaskRun.subflow_run.has())
elif self.exists_ is False:
filters.append(sa.not_(db.TaskRun.subflow_run.has()))
return filters
| TaskRunFilterSubFlowRuns |
python | dagster-io__dagster | python_modules/libraries/dagster-wandb/dagster_wandb/io_manager.py | {
"start": 1307,
"end": 33985
} | class ____(IOManager):
"""IO Manager to handle Artifacts in Weights & Biases (W&B) .
It handles 3 different inputs:
- Pickable objects (the serialization module is configurable)
- W&B Objects (Audio, Table, Image, etc)
- W&B Artifacts
"""
def __init__(self, wandb_client, config: Config):
self.wandb = wandb_client
dagster_run_id = config["dagster_run_id"]
self.dagster_run_id = dagster_run_id
self.wandb_host = config["wandb_host"]
self.wandb_entity = config["wandb_entity"]
self.wandb_project = config["wandb_project"]
self.wandb_run_id = config.get("wandb_run_id") or dagster_run_id
self.wandb_run_name = config.get("wandb_run_name") or f"dagster-run-{dagster_run_id[0:8]}"
# augments the run tags
wandb_run_tags = config["wandb_run_tags"] or []
if "dagster_wandb" not in wandb_run_tags:
wandb_run_tags = [*wandb_run_tags, "dagster_wandb"]
self.wandb_run_tags = wandb_run_tags
self.base_dir = config["base_dir"]
cache_duration_in_minutes = config["cache_duration_in_minutes"]
default_cache_expiration_in_minutes = 60 * 24 * 30 # 60 minutes * 24 hours * 30 days
self.cache_duration_in_minutes = (
cache_duration_in_minutes
if cache_duration_in_minutes is not None
else default_cache_expiration_in_minutes
)
def _get_local_storage_path(self):
path = self.base_dir
if os.path.basename(path) != "storage":
path = os.path.join(path, "storage")
path = os.path.join(path, "wandb_artifacts_manager")
os.makedirs(path, exist_ok=True)
return path
def _get_artifacts_path(self, name, version):
local_storage_path = self._get_local_storage_path()
path = os.path.join(local_storage_path, "artifacts", f"{name}.{version}")
os.makedirs(path, exist_ok=True)
return path
def _get_wandb_logs_path(self):
local_storage_path = self._get_local_storage_path()
# Adding a random uuid to avoid collisions in multi-process context
path = os.path.join(local_storage_path, "runs", self.dagster_run_id, str(uuid.uuid4()))
os.makedirs(path, exist_ok=True)
return path
def _clean_local_storage_path(self):
local_storage_path = self._get_local_storage_path()
cache_duration_in_minutes = self.cache_duration_in_minutes
current_timestamp = int(time.time())
expiration_timestamp = current_timestamp - (
cache_duration_in_minutes * 60 # convert to seconds
)
for root, dirs, files in os.walk(local_storage_path, topdown=False):
for name in files:
current_file_path = os.path.join(root, name)
most_recent_access = os.lstat(current_file_path).st_atime
if most_recent_access <= expiration_timestamp or cache_duration_in_minutes == 0:
os.remove(current_file_path)
for name in dirs:
current_dir_path = os.path.join(root, name)
if not os.path.islink(current_dir_path):
if len(os.listdir(current_dir_path)) == 0 or cache_duration_in_minutes == 0:
shutil.rmtree(current_dir_path)
@contextmanager
def wandb_run(self):
self.wandb.init(
id=self.wandb_run_id,
name=self.wandb_run_name,
project=self.wandb_project,
entity=self.wandb_entity,
dir=self._get_wandb_logs_path(),
tags=self.wandb_run_tags,
anonymous="never",
resume="allow",
)
try:
yield self.wandb.run
finally:
self.wandb.finish()
self._clean_local_storage_path()
def _upload_artifact(self, context: OutputContext, obj):
if not context.has_partition_key and context.has_asset_partitions:
raise WandbArtifactsIOManagerError(
"Sorry, but the Weights & Biases (W&B) IO Manager can't handle processing several"
" partitions at the same time within a single run. Please process each partition"
" separately. If you think this might be an error, don't hesitate to reach out to"
" Weights & Biases Support."
)
with self.wandb_run() as run:
parameters = {}
if context.definition_metadata is not None:
parameters = context.definition_metadata.get("wandb_artifact_configuration", {})
raise_on_unknown_write_configuration_keys(parameters)
serialization_module = parameters.get("serialization_module", {})
serialization_module_name = serialization_module.get("name", "pickle")
if serialization_module_name not in ACCEPTED_SERIALIZATION_MODULES:
raise WandbArtifactsIOManagerError(
f"Oops! It looks like the value you provided, '{serialization_module_name}',"
" isn't recognized as a valid serialization module. Here are the ones we do"
f" support: {ACCEPTED_SERIALIZATION_MODULES}."
)
serialization_module_parameters = serialization_module.get("parameters", {})
serialization_module_parameters_with_protocol = {
"protocol": (
pickle.HIGHEST_PROTOCOL # we use the highest available protocol if we don't pass one
),
**serialization_module_parameters,
}
artifact_type = parameters.get("type", "artifact")
artifact_description = parameters.get("description")
artifact_metadata = {
"source_integration": "dagster_wandb",
"source_integration_version": __version__,
"source_dagster_run_id": self.dagster_run_id,
"source_created_at": datetime.datetime.now(datetime.timezone.utc).isoformat(),
"source_python_version": platform.python_version(),
}
if isinstance(obj, Artifact):
if parameters.get("name") is not None:
raise WandbArtifactsIOManagerError(
"You've provided a 'name' property in the 'wandb_artifact_configuration'"
" settings. However, this 'name' property should only be used when the"
" output isn't already an Artifact object."
)
if parameters.get("type") is not None:
raise WandbArtifactsIOManagerError(
"You've provided a 'type' property in the 'wandb_artifact_configuration'"
" settings. However, this 'type' property should only be used when the"
" output isn't already an Artifact object."
)
if obj.name is None:
raise WandbArtifactsIOManagerError(
"The Weights & Biases (W&B) Artifact you provided is missing a name."
" Please, assign a name to your Artifact."
)
if context.has_asset_key and obj.name != context.get_asset_identifier()[0]:
asset_identifier = context.get_asset_identifier()[0]
context.log.warning(
f"Please note, the name '{obj.name}' of your Artifact is overwritten by the"
f" name derived from the AssetKey '{asset_identifier}'. For consistency and"
" to avoid confusion, we advise sharing a constant for both your asset's"
" name and the artifact's name."
)
obj._name = asset_identifier # noqa: SLF001
if context.has_partition_key:
artifact_name = f"{obj.name}.{context.partition_key}"
# The Artifact provided is produced in a partitioned execution we add the
# partition as a suffix to the Artifact name
obj._name = artifact_name # noqa: SLF001
if len(serialization_module) != 0: # not an empty dict
context.log.warning(
"You've included a 'serialization_module' in the"
" 'wandb_artifact_configuration' settings. However, this doesn't have any"
" impact when the output is already an Artifact object."
)
# The obj is already an Artifact we augment its metadata
artifact = obj
artifact.metadata = {**artifact.metadata, **artifact_metadata}
if artifact.description is not None and artifact_description is not None:
raise WandbArtifactsIOManagerError(
"You've given a 'description' in the 'wandb_artifact_configuration'"
" settings for an existing Artifact that already has a description. Please,"
" either set the description using 'wandb_artifact_argument' or when"
" creating your Artifact."
)
if artifact_description is not None:
artifact.description = artifact_description
else:
if context.has_asset_key:
if parameters.get("name") is not None:
raise WandbArtifactsIOManagerError(
"You've included a 'name' property in the"
" 'wandb_artifact_configuration' settings. But, a 'name' is only needed"
" when there's no 'AssetKey'. When an Artifact is created from an"
" @asset, it uses the asset name. When it's created from an @op with an"
" 'asset_key' for the output, that value is used. Please remove the"
" 'name' property."
)
artifact_name = context.get_asset_identifier()[0] # name of asset
else:
name_parameter = parameters.get("name")
if name_parameter is None:
raise WandbArtifactsIOManagerError(
"The 'name' property is missing in the 'wandb_artifact_configuration'"
" settings. For Artifacts created from an @op, a 'name' property is"
" needed. You could also use an @asset as an alternative."
)
assert name_parameter is not None
artifact_name = name_parameter
if context.has_partition_key:
artifact_name = f"{artifact_name}.{context.partition_key}"
# We replace the | character with - because it is not allowed in artifact names
# The | character is used in multi-dimensional partition keys
artifact_name = str(artifact_name).replace("|", "-")
# Creates an artifact to hold the obj
artifact = self.wandb.Artifact(
name=artifact_name,
type=artifact_type,
description=artifact_description,
metadata=artifact_metadata,
)
if isinstance(obj, WBValue):
if len(serialization_module) != 0: # not an empty dict
context.log.warning(
"You've included a 'serialization_module' in the"
" 'wandb_artifact_configuration' settings. However, this doesn't have"
" any impact when the output is already a W&B object like e.g Table or"
" Image."
)
# Adds the WBValue object using the class name as the name for the file
artifact.add(obj, obj.__class__.__name__)
elif obj is not None:
# The output is not a native wandb Object, we serialize it
pickle_artifact_content(
context,
serialization_module_name,
serialization_module_parameters_with_protocol,
artifact,
obj,
)
# Add any files: https://docs.wandb.ai/ref/python/artifact#add_file
add_files = parameters.get("add_files")
if add_files is not None and len(add_files) > 0:
for add_file in add_files:
artifact.add_file(**add_file)
# Add any dirs: https://docs.wandb.ai/ref/python/artifact#add_dir
add_dirs = parameters.get("add_dirs")
if add_dirs is not None and len(add_dirs) > 0:
for add_dir in add_dirs:
artifact.add_dir(**add_dir)
# Add any reference: https://docs.wandb.ai/ref/python/artifact#add_reference
add_references = parameters.get("add_references")
if add_references is not None and len(add_references) > 0:
for add_reference in add_references:
artifact.add_reference(**add_reference)
# Augments the aliases
aliases = parameters.get("aliases", [])
aliases.append(f"dagster-run-{self.dagster_run_id[0:8]}")
if "latest" not in aliases:
aliases.append("latest")
# Logs the artifact
self.wandb.log_artifact(artifact, aliases=aliases)
artifact.wait()
# Adds useful metadata to the output or Asset
artifacts_base_url = (
"https://wandb.ai"
if self.wandb_host == WANDB_CLOUD_HOST
else self.wandb_host.rstrip("/")
)
assert artifact.id is not None
output_metadata = {
"dagster_run_id": MetadataValue.dagster_run(self.dagster_run_id),
"wandb_artifact_id": MetadataValue.text(artifact.id),
"wandb_artifact_type": MetadataValue.text(artifact.type),
"wandb_artifact_version": MetadataValue.text(artifact.version),
"wandb_artifact_size": MetadataValue.int(artifact.size),
"wandb_artifact_url": MetadataValue.url(
f"{artifacts_base_url}/{run.entity}/{run.project}/artifacts/{artifact.type}/{'/'.join(artifact.name.rsplit(':', 1))}"
),
"wandb_entity": MetadataValue.text(run.entity),
"wandb_project": MetadataValue.text(run.project),
"wandb_run_id": MetadataValue.text(run.id),
"wandb_run_name": MetadataValue.text(run.name),
"wandb_run_path": MetadataValue.text(run.path),
"wandb_run_url": MetadataValue.url(run.url),
}
context.add_output_metadata(output_metadata)
def _download_artifact(self, context: InputContext):
with self.wandb_run() as run:
parameters = {}
if context.definition_metadata is not None:
parameters = context.definition_metadata.get("wandb_artifact_configuration", {})
raise_on_unknown_read_configuration_keys(parameters)
partitions_configuration = parameters.get("partitions", {})
if not context.has_asset_partitions and len(partitions_configuration) > 0:
raise WandbArtifactsIOManagerError(
"You've included a 'partitions' value in the 'wandb_artifact_configuration'"
" settings but it's not within a partitioned execution. Please only use"
" 'partitions' within a partitioned context."
)
if context.has_asset_partitions:
# Note: this is currently impossible to unit test with current Dagster APIs but was
# tested thoroughly manually
name = parameters.get("get")
path = parameters.get("get_path")
if name is not None or path is not None:
raise WandbArtifactsIOManagerError(
"You've given a value for 'get' and/or 'get_path' in the"
" 'wandb_artifact_configuration' settings during a partitioned execution."
" Please use the 'partitions' property to set 'get' or 'get_path' for each"
" individual partition. To set a default value for all partitions, use '*'."
)
artifact_name = parameters.get("name")
if artifact_name is None:
artifact_name = context.asset_key.path[0] # name of asset
partitions = [
(key, f"{artifact_name}.{str(key).replace('|', '-')}")
for key in context.asset_partition_keys
]
output = {}
for key, artifact_name in partitions:
context.log.info(f"Handling partition with key '{key}'")
partition_configuration = partitions_configuration.get(
key, partitions_configuration.get("*")
)
raise_on_empty_configuration(key, partition_configuration)
raise_on_unknown_partition_keys(key, partition_configuration)
partition_version = None
partition_alias = None
if partition_configuration and partition_configuration is not None:
partition_version = partition_configuration.get("version")
partition_alias = partition_configuration.get("alias")
if partition_version is not None and partition_alias is not None:
raise WandbArtifactsIOManagerError(
"You've provided both 'version' and 'alias' for the partition with"
" key '{key}'. You should only use one of these properties at a"
" time. If you choose not to use any, the latest version will be"
" used by default. If this partition is configured with the '*'"
" key, please correct the wildcard configuration."
)
partition_identifier = partition_version or partition_alias or "latest"
artifact_uri = (
f"{run.entity}/{run.project}/{artifact_name}:{partition_identifier}"
)
try:
api = self.wandb.Api()
api.artifact(artifact_uri)
except Exception as exception:
raise WandbArtifactsIOManagerError(
"The artifact you're attempting to download might not exist, or you"
" might have forgotten to include the 'name' property in the"
" 'wandb_artifact_configuration' settings."
) from exception
artifact = run.use_artifact(artifact_uri)
artifacts_path = self._get_artifacts_path(artifact_name, artifact.version)
if partition_configuration and partition_configuration is not None:
partition_name = partition_configuration.get("get")
partition_path = partition_configuration.get("get_path")
if partition_name is not None and partition_path is not None:
raise WandbArtifactsIOManagerError(
"You've provided both 'get' and 'get_path' in the"
" 'wandb_artifact_configuration' settings for the partition with"
" key '{key}'. Only one of these properties should be used. If you"
" choose not to use any, the whole Artifact will be returned. If"
" this partition is configured with the '*' key, please correct the"
" wildcard configuration."
)
if partition_name is not None:
wandb_object = artifact.get(partition_name)
if wandb_object is not None:
output[key] = wandb_object
continue
if partition_path is not None:
path = artifact.get_path(partition_path)
download_path = path.download(root=artifacts_path)
if download_path is not None:
output[key] = download_path
continue
artifact_dir = artifact.download(root=artifacts_path)
unpickled_content = unpickle_artifact_content(artifact_dir)
if unpickled_content is not None:
output[key] = unpickled_content
continue
artifact.verify(root=artifacts_path)
output[key] = artifact
if len(output) == 1:
# If there's only one partition, return the value directly
return next(iter(output.values()))
return output
elif context.has_asset_key:
# Input is an asset
if parameters.get("name") is not None:
raise WandbArtifactsIOManagerError(
"A conflict has been detected in the provided configuration settings. The"
" 'name' parameter appears to be specified twice - once in the"
" 'wandb_artifact_configuration' metadata dictionary, and again as an"
" AssetKey. Kindly avoid setting the name directly, since the AssetKey will"
" be used for this purpose."
)
artifact_name = context.get_asset_identifier()[0] # name of asset
else:
artifact_name = parameters.get("name")
if artifact_name is None:
raise WandbArtifactsIOManagerError(
"The 'name' property is missing in the 'wandb_artifact_configuration'"
" settings. For Artifacts used in an @op, a 'name' property is required."
" You could use an @asset as an alternative."
)
if context.has_partition_key:
artifact_name = f"{artifact_name}.{context.partition_key}"
artifact_alias = parameters.get("alias")
artifact_version = parameters.get("version")
if artifact_alias is not None and artifact_version is not None:
raise WandbArtifactsIOManagerError(
"You've provided both 'version' and 'alias' in the"
" 'wandb_artifact_configuration' settings. Only one should be used at a time."
" If you decide not to use any, the latest version will be applied"
" automatically."
)
artifact_identifier = artifact_alias or artifact_version or "latest"
artifact_uri = f"{run.entity}/{run.project}/{artifact_name}:{artifact_identifier}"
# This try/except block is a workaround for a bug in the W&B SDK, this should be removed
# once the bug is fixed.
try:
artifact = run.use_artifact(artifact_uri)
except Exception:
api = self.wandb.Api()
artifact = api.artifact(artifact_uri)
name = parameters.get("get")
path = parameters.get("get_path")
if name is not None and path is not None:
raise WandbArtifactsIOManagerError(
"You've provided both 'get' and 'get_path' in the"
" 'wandb_artifact_configuration' settings. Only one should be used at a time."
" If you decide not to use any, the entire Artifact will be returned."
)
if name is not None:
return artifact.get(name)
artifacts_path = self._get_artifacts_path(artifact_name, artifact.version)
if path is not None:
path = artifact.get_path(path)
return path.download(root=artifacts_path)
artifact_dir = artifact.download(root=artifacts_path)
unpickled_content = unpickle_artifact_content(artifact_dir)
if unpickled_content is not None:
return unpickled_content
artifact.verify(root=artifacts_path)
return artifact
def handle_output(self, context: OutputContext, obj) -> None:
if obj is None:
context.log.warning(
"The output value given to the Weights & Biases (W&B) IO Manager is empty. If this"
" was intended, you can disregard this warning."
)
else:
try:
self._upload_artifact(context, obj)
except WandbArtifactsIOManagerError as exception:
raise exception
except Exception as exception:
raise WandbArtifactsIOManagerError() from exception
def load_input(self, context: InputContext):
try:
return self._download_artifact(context)
except WandbArtifactsIOManagerError as exception:
raise exception
except Exception as exception:
raise WandbArtifactsIOManagerError() from exception
@dagster_maintained_io_manager
@io_manager(
required_resource_keys={"wandb_resource", "wandb_config"},
description="IO manager to read and write W&B Artifacts",
config_schema={
"run_name": Field(
String,
is_required=False,
description=(
"Short display name for this run, which is how you'll identify this run in the UI."
" By default, it`s set to a string with the following format dagster-run-[8 first"
" characters of the Dagster Run ID] e.g. dagster-run-7e4df022."
),
),
"run_id": Field(
String,
is_required=False,
description=(
"Unique ID for this run, used for resuming. It must be unique in the project, and"
" if you delete a run you can't reuse the ID. Use the name field for a short"
" descriptive name, or config for saving hyperparameters to compare across runs."
r" The ID cannot contain the following special characters: /\#?%:.. You need to set"
" the Run ID when you are doing experiment tracking inside Dagster to allow the IO"
" Manager to resume the run. By default it`s set to the Dagster Run ID e.g "
" 7e4df022-1bf2-44b5-a383-bb852df4077e."
),
),
"run_tags": Field(
[String],
is_required=False,
description=(
"A list of strings, which will populate the list of tags on this run in the UI."
" Tags are useful for organizing runs together, or applying temporary labels like"
" 'baseline' or 'production'. It's easy to add and remove tags in the UI, or filter"
" down to just runs with a specific tag. Any W&B Run used by the integration will"
" have the dagster_wandb tag."
),
),
"base_dir": Field(
String,
is_required=False,
description=(
"Base directory used for local storage and caching. W&B Artifacts and W&B Run logs"
" will be written and read from that directory. By default, it`s using the"
" DAGSTER_HOME directory."
),
),
"cache_duration_in_minutes": Field(
Int,
is_required=False,
description=(
"Defines the amount of time W&B Artifacts and W&B Run logs should be kept in the"
" local storage. Only files and directories that were not opened for that amount of"
" time are removed from the cache. Cache purging happens at the end of an IO"
" Manager execution. You can set it to 0, if you want to disable caching"
" completely. Caching improves speed when an Artifact is reused between jobs"
" running on the same machine. It defaults to 30 days."
),
),
},
)
def wandb_artifacts_io_manager(context: InitResourceContext):
"""Dagster IO Manager to create and consume W&B Artifacts.
It allows any Dagster @op or @asset to create and consume W&B Artifacts natively.
For a complete set of documentation, see `Dagster integration <https://docs.wandb.ai/guides/integrations/dagster>`_.
**Example:**
.. code-block:: python
@repository
def my_repository():
return [
*with_resources(
load_assets_from_current_module(),
resource_defs={
"wandb_config": make_values_resource(
entity=str,
project=str,
),
"wandb_resource": wandb_resource.configured(
{"api_key": {"env": "WANDB_API_KEY"}}
),
"wandb_artifacts_manager": wandb_artifacts_io_manager.configured(
{"cache_duration_in_minutes": 60} # only cache files for one hour
),
},
resource_config_by_key={
"wandb_config": {
"config": {
"entity": "my_entity",
"project": "my_project"
}
}
},
),
]
@asset(
name="my_artifact",
metadata={
"wandb_artifact_configuration": {
"type": "dataset",
}
},
io_manager_key="wandb_artifacts_manager",
)
def create_dataset():
return [1, 2, 3]
"""
wandb_client = context.resources.wandb_resource["sdk"]
wandb_host = context.resources.wandb_resource["host"]
wandb_entity = context.resources.wandb_config["entity"]
wandb_project = context.resources.wandb_config["project"]
wandb_run_name = None
wandb_run_id = None
wandb_run_tags = None
base_dir = (
context.instance.storage_directory() if context.instance else os.environ["DAGSTER_HOME"]
)
cache_duration_in_minutes = None
if context.resource_config is not None:
wandb_run_name = context.resource_config.get("run_name")
wandb_run_id = context.resource_config.get("run_id")
wandb_run_tags = context.resource_config.get("run_tags")
base_dir = context.resource_config.get("base_dir", base_dir)
cache_duration_in_minutes = context.resource_config.get("cache_duration_in_minutes")
if "PYTEST_CURRENT_TEST" in os.environ:
dagster_run_id = UNIT_TEST_RUN_ID
else:
dagster_run_id = context.run_id
assert dagster_run_id is not None
config: Config = {
"dagster_run_id": dagster_run_id,
"wandb_host": wandb_host,
"wandb_entity": wandb_entity,
"wandb_project": wandb_project,
"wandb_run_name": wandb_run_name,
"wandb_run_id": wandb_run_id,
"wandb_run_tags": wandb_run_tags,
"base_dir": base_dir,
"cache_duration_in_minutes": cache_duration_in_minutes,
}
return ArtifactsIOManager(wandb_client, config)
| ArtifactsIOManager |
python | walkccc__LeetCode | solutions/3011. Find if Array Can Be Sorted/3011.py | {
"start": 0,
"end": 995
} | class ____:
def canSortArray(self, nums: list[int]) -> int:
# Divide the array into distinct segments where each segment is comprised
# of consecutive elements sharing an equal number of set bits. Ensure that
# for each segment, when moving from left to right, the maximum of a
# preceding segment is less than the minimum of the following segment.
prevSetBits = 0
prevMax = -math.inf # the maximum of the previous segment
currMax = -math.inf # the maximum of the current segment
currMin = math.inf # the minimum of the current segment
for num in nums:
setBits = num.bit_count()
if setBits != prevSetBits: # Start a new segment.
if prevMax > currMin:
return False
prevSetBits = setBits
prevMax = currMax
currMax = num
currMin = num
else: # Continue with the current segment.
currMax = max(currMax, num)
currMin = min(currMin, num)
return prevMax <= currMin
| Solution |
python | django-extensions__django-extensions | tests/templatetags/test_highlighting.py | {
"start": 1974,
"end": 2387
} | class ____(TestCase):
"""Tests for parse_teplate filter."""
def test_should_mark_html_as_safe(self):
ctx = Context({"value": "<h1>Hello World</h1>"})
content = """{% load highlighting %}
{{ value|parse_template }}
"""
expected_result = """<h1>Hello World</h1>"""
result = Template(content).render(ctx)
self.assertHTMLEqual(result, expected_result)
| ParseTemplateTests |
python | keras-team__keras | keras/src/ops/math_test.py | {
"start": 35813,
"end": 36655
} | class ____(testing.TestCase):
"""Test the floating dtype to verify that the behavior matches JAX."""
ALL_DTYPES = [
x
for x in dtypes.ALLOWED_DTYPES
if x
not in (
"string",
"complex64",
"complex128",
# Remove 64-bit dtypes.
"float64",
"uint64",
"int64",
)
+ dtypes.FLOAT8_TYPES # Remove float8 dtypes for the following tests
] + [None]
INT_DTYPES = [x for x in dtypes.INT_TYPES if x not in ("uint64", "int64")]
FLOAT_DTYPES = [x for x in dtypes.FLOAT_TYPES if x not in ("float64",)]
if backend.backend() == "torch":
ALL_DTYPES = [x for x in ALL_DTYPES if x not in ("uint16", "uint32")]
INT_DTYPES = [x for x in INT_DTYPES if x not in ("uint16", "uint32")]
| MathDtypeTest |
python | getsentry__sentry | tests/sentry/notifications/platform/test_service.py | {
"start": 891,
"end": 8136
} | class ____(TestCase):
def setUp(self) -> None:
self.target = GenericNotificationTarget(
provider_key=NotificationProviderKey.EMAIL,
resource_type=NotificationTargetResourceType.EMAIL,
resource_id="test@example.com",
)
self.slack_integration = self.create_integration(
organization=self.organization, provider="slack", external_id="ext-123"
)
self.integration_target = IntegrationNotificationTarget(
provider_key=NotificationProviderKey.SLACK,
resource_type=NotificationTargetResourceType.CHANNEL,
resource_id="C1234567890",
integration_id=self.slack_integration.id,
organization_id=self.organization.id,
)
self.template = MockNotificationTemplate()
def test_basic_notify(self) -> None:
service = NotificationService(data=MockNotification(message="this is a test notification"))
service.notify_sync(targets=[self.target])
@mock.patch("sentry.notifications.platform.service.logger")
def test_validation_on_notify(self, mock_logger: mock.MagicMock) -> None:
service = NotificationService(data=MockNotification(message="this is a test notification"))
with pytest.raises(
NotificationServiceError,
match="Must provide either a strategy or targets. Strategy is preferred.",
):
service.notify_sync()
strategy = MockStrategy(targets=[])
with pytest.raises(
NotificationServiceError,
match="Cannot provide both strategy and targets, only one is permitted. Strategy is preferred.",
):
service.notify_sync(strategy=strategy, targets=[self.target])
service.notify_sync(strategy=strategy)
mock_logger.warning.assert_called_once_with(
"Strategy '%s' did not yield targets", strategy.__class__.__name__
)
@mock.patch("sentry.integrations.utils.metrics.EventLifecycle.record_event")
@mock.patch("sentry.notifications.platform.email.provider.EmailNotificationProvider.send")
def test_notify_target_calls_provider_correctly(
self, mock_send: mock.MagicMock, mock_record: mock.MagicMock
) -> None:
service = NotificationService(data=MockNotification(message="test"))
service.notify_target(target=self.target)
mock_send.assert_called_once()
# SLO assertions
assert_count_of_metric(mock_record, EventLifecycleOutcome.STARTED, outcome_count=1)
assert_count_of_metric(mock_record, EventLifecycleOutcome.SUCCESS, outcome_count=1)
@mock.patch("sentry.notifications.platform.email.provider.EmailNotificationProvider.send")
def test_notify_sync_collects_errors(self, mock_send: mock.MagicMock) -> None:
mock_send.side_effect = IntegrationConfigurationError("Provider error", 400)
service = NotificationService(data=MockNotification(message="test"))
errors = service.notify_sync(targets=[self.target])
assert len(errors[NotificationProviderKey.EMAIL]) == 1
assert "Provider error" in errors[NotificationProviderKey.EMAIL][0]
def test_render_template_classmethod(self) -> None:
data = MockNotification(message="test")
template = MockNotificationTemplate()
result = NotificationService.render_template(
data=data, template=template, provider=EmailNotificationProvider
)
assert isinstance(result, EmailMultiAlternatives)
@mock.patch("sentry.integrations.utils.metrics.EventLifecycle.record_event")
def test_basic_notify_target_async(self, mock_record: mock.MagicMock) -> None:
service = NotificationService(data=MockNotification(message="this is a test notification"))
with self.tasks():
service.notify_async(targets=[self.target])
# slo asserts
assert_count_of_metric(mock_record, EventLifecycleOutcome.STARTED, 1)
assert_count_of_metric(mock_record, EventLifecycleOutcome.SUCCESS, 1)
@mock.patch("sentry.notifications.platform.email.provider.EmailNotificationProvider.send")
@mock.patch("sentry.integrations.utils.metrics.EventLifecycle.record_event")
def test_notify_target_async_with_api_error(
self, mock_record: mock.MagicMock, mock_send: mock.MagicMock
) -> None:
mock_send.side_effect = ApiError("API request failed", 400)
service = NotificationService(data=MockNotification(message="this is a test notification"))
with self.tasks():
service.notify_async(targets=[self.target])
# slo asserts
assert_count_of_metric(mock_record, EventLifecycleOutcome.STARTED, 1)
assert_count_of_metric(mock_record, EventLifecycleOutcome.FAILURE, 1)
@mock.patch("sentry.integrations.utils.metrics.EventLifecycle.record_event")
@mock.patch("sentry.notifications.platform.slack.provider.SlackNotificationProvider.send")
def test_basic_notify_integration_target_async(
self, mock_slack_send: mock.MagicMock, mock_record: mock.MagicMock
) -> None:
service = NotificationService(data=MockNotification(message="this is a test notification"))
with self.tasks():
service.notify_async(targets=[self.integration_target])
# slo asserts
assert_count_of_metric(mock_record, EventLifecycleOutcome.STARTED, 1)
assert_count_of_metric(mock_record, EventLifecycleOutcome.SUCCESS, 1)
@mock.patch("sentry.notifications.platform.slack.provider.SlackNotificationProvider.send")
@mock.patch("sentry.integrations.utils.metrics.EventLifecycle.record_event")
def test_notify_integration_target_async_with_api_error(
self, mock_record: mock.MagicMock, mock_send: mock.MagicMock
) -> None:
mock_send.side_effect = ApiError("Slack API request failed", 400)
service = NotificationService(data=MockNotification(message="this is a test notification"))
with self.tasks():
service.notify_async(targets=[self.integration_target])
# slo asserts
assert_count_of_metric(mock_record, EventLifecycleOutcome.STARTED, 1)
assert_count_of_metric(mock_record, EventLifecycleOutcome.FAILURE, 1)
@mock.patch("sentry.integrations.utils.metrics.EventLifecycle.record_event")
@mock.patch("sentry.notifications.platform.slack.provider.SlackNotificationProvider.send")
@mock.patch("sentry.notifications.platform.email.provider.EmailNotificationProvider.send")
def test_notify_mixed_targets_async(
self,
mock_email_send: mock.MagicMock,
mock_slack_send: mock.MagicMock,
mock_record: mock.MagicMock,
) -> None:
"""Test sending notifications to both generic and integration targets"""
service = NotificationService(data=MockNotification(message="this is a test notification"))
with self.tasks():
service.notify_async(targets=[self.target, self.integration_target])
# slo asserts - should have 2 notifications sent
assert_count_of_metric(mock_record, EventLifecycleOutcome.STARTED, 2)
assert_count_of_metric(mock_record, EventLifecycleOutcome.SUCCESS, 2)
| NotificationServiceTest |
python | django__django | django/forms/boundfield.py | {
"start": 382,
"end": 12216
} | class ____(RenderableFieldMixin):
"A Field plus data"
def __init__(self, form, field, name):
self.form = form
self.field = field
self.name = name
self.html_name = form.add_prefix(name)
self.html_initial_name = form.add_initial_prefix(name)
self.html_initial_id = form.add_initial_prefix(self.auto_id)
if self.field.label is None:
self.label = pretty_name(name)
else:
self.label = self.field.label
self.help_text = field.help_text or ""
self.renderer = form.renderer
@cached_property
def subwidgets(self):
"""
Most widgets yield a single subwidget, but others like RadioSelect and
CheckboxSelectMultiple produce one subwidget for each choice.
This property is cached so that only one database query occurs when
rendering ModelChoiceFields.
"""
id_ = self.field.widget.attrs.get("id") or self.auto_id
attrs = {"id": id_} if id_ else {}
attrs = self.build_widget_attrs(attrs)
return [
BoundWidget(self.field.widget, widget, self.form.renderer)
for widget in self.field.widget.subwidgets(
self.html_name, self.value(), attrs=attrs
)
]
def __bool__(self):
# BoundField evaluates to True even if it doesn't have subwidgets.
return True
def __iter__(self):
return iter(self.subwidgets)
def __len__(self):
return len(self.subwidgets)
def __getitem__(self, idx):
# Prevent unnecessary reevaluation when accessing BoundField's attrs
# from templates.
if not isinstance(idx, (int, slice)):
raise TypeError(
"BoundField indices must be integers or slices, not %s."
% type(idx).__name__
)
return self.subwidgets[idx]
@property
def errors(self):
"""
Return an ErrorList (empty if there are no errors) for this field.
"""
return self.form.errors.get(
self.name, self.form.error_class(renderer=self.form.renderer)
)
@property
def template_name(self):
return self.field.template_name or self.form.renderer.field_template_name
def get_context(self):
return {"field": self}
def as_widget(self, widget=None, attrs=None, only_initial=False):
"""
Render the field by rendering the passed widget, adding any HTML
attributes passed as attrs. If a widget isn't specified, use the
field's default widget.
"""
widget = widget or self.field.widget
if self.field.localize:
widget.is_localized = True
attrs = attrs or {}
attrs = self.build_widget_attrs(attrs, widget)
if self.auto_id and "id" not in widget.attrs:
attrs.setdefault(
"id", self.html_initial_id if only_initial else self.auto_id
)
if only_initial and self.html_initial_name in self.form.data:
# Propagate the hidden initial value.
value = self.form._widget_data_value(
self.field.hidden_widget(),
self.html_initial_name,
)
else:
value = self.value()
return widget.render(
name=self.html_initial_name if only_initial else self.html_name,
value=value,
attrs=attrs,
renderer=self.form.renderer,
)
def as_text(self, attrs=None, **kwargs):
"""
Return a string of HTML for representing this as an
<input type="text">.
"""
return self.as_widget(TextInput(), attrs, **kwargs)
def as_textarea(self, attrs=None, **kwargs):
"""Return a string of HTML for representing this as a <textarea>."""
return self.as_widget(Textarea(), attrs, **kwargs)
def as_hidden(self, attrs=None, **kwargs):
"""
Return a string of HTML for representing this as an
<input type="hidden">.
"""
return self.as_widget(self.field.hidden_widget(), attrs, **kwargs)
@property
def data(self):
"""
Return the data for this BoundField, or None if it wasn't given.
"""
return self.form._widget_data_value(self.field.widget, self.html_name)
def value(self):
"""
Return the value for this BoundField, using the initial value if
the form is not bound or the data otherwise.
"""
data = self.initial
if self.form.is_bound:
data = self.field.bound_data(self.data, data)
return self.field.prepare_value(data)
def _has_changed(self):
field = self.field
if field.show_hidden_initial:
hidden_widget = field.hidden_widget()
initial_value = self.form._widget_data_value(
hidden_widget,
self.html_initial_name,
)
try:
initial_value = field.to_python(initial_value)
except ValidationError:
# Always assume data has changed if validation fails.
return True
else:
initial_value = self.initial
return field.has_changed(initial_value, self.data)
def label_tag(self, contents=None, attrs=None, label_suffix=None, tag=None):
"""
Wrap the given contents in a <label>, if the field has an ID attribute.
contents should be mark_safe'd to avoid HTML escaping. If contents
aren't given, use the field's HTML-escaped label.
If attrs are given, use them as HTML attributes on the <label> tag.
label_suffix overrides the form's label_suffix.
"""
contents = contents or self.label
if label_suffix is None:
label_suffix = (
self.field.label_suffix
if self.field.label_suffix is not None
else self.form.label_suffix
)
# Only add the suffix if the label does not end in punctuation.
# Translators: If found as last label character, these punctuation
# characters will prevent the default label_suffix to be appended to
# the label
if label_suffix and contents and contents[-1] not in _(":?.!"):
contents = format_html("{}{}", contents, label_suffix)
widget = self.field.widget
id_ = widget.attrs.get("id") or self.auto_id
if id_:
id_for_label = widget.id_for_label(id_)
if id_for_label:
attrs = attrs or {}
if tag != "legend":
attrs = {**attrs, "for": id_for_label}
if self.field.required and hasattr(self.form, "required_css_class"):
attrs = attrs or {}
if "class" in attrs:
attrs["class"] += " " + self.form.required_css_class
else:
attrs["class"] = self.form.required_css_class
context = {
"field": self,
"label": contents,
"attrs": attrs,
"use_tag": bool(id_),
"tag": tag or "label",
}
return self.form.render(self.form.template_name_label, context)
def legend_tag(self, contents=None, attrs=None, label_suffix=None):
"""
Wrap the given contents in a <legend>, if the field has an ID
attribute. Contents should be mark_safe'd to avoid HTML escaping. If
contents aren't given, use the field's HTML-escaped label.
If attrs are given, use them as HTML attributes on the <legend> tag.
label_suffix overrides the form's label_suffix.
"""
return self.label_tag(contents, attrs, label_suffix, tag="legend")
def css_classes(self, extra_classes=None):
"""
Return a string of space-separated CSS classes for this field.
"""
if hasattr(extra_classes, "split"):
extra_classes = extra_classes.split()
extra_classes = set(extra_classes or [])
if self.errors and hasattr(self.form, "error_css_class"):
extra_classes.add(self.form.error_css_class)
if self.field.required and hasattr(self.form, "required_css_class"):
extra_classes.add(self.form.required_css_class)
return " ".join(extra_classes)
@property
def is_hidden(self):
"""Return True if this BoundField's widget is hidden."""
return self.field.widget.is_hidden
@property
def auto_id(self):
"""
Calculate and return the ID attribute for this BoundField, if the
associated Form has specified auto_id. Return an empty string
otherwise.
"""
auto_id = self.form.auto_id # Boolean or string
if auto_id and "%s" in str(auto_id):
return auto_id % self.html_name
elif auto_id:
return self.html_name
return ""
@property
def id_for_label(self):
"""
Wrapper around the field widget's `id_for_label` method.
Useful, for example, for focusing on this field regardless of whether
it has a single widget or a MultiWidget.
"""
widget = self.field.widget
id_ = widget.attrs.get("id") or self.auto_id
return widget.id_for_label(id_)
@cached_property
def initial(self):
return self.form.get_initial_for_field(self.field, self.name)
def build_widget_attrs(self, attrs, widget=None):
widget = widget or self.field.widget
attrs = dict(attrs) # Copy attrs to avoid modifying the argument.
if (
widget.use_required_attribute(self.initial)
and self.field.required
and self.form.use_required_attribute
):
# MultiValueField has require_all_fields: if False, fall back
# on subfields.
if (
hasattr(self.field, "require_all_fields")
and not self.field.require_all_fields
and isinstance(self.field.widget, MultiWidget)
):
for subfield, subwidget in zip(self.field.fields, widget.widgets):
subwidget.attrs["required"] = (
subwidget.use_required_attribute(self.initial)
and subfield.required
)
else:
attrs["required"] = True
if self.field.disabled:
attrs["disabled"] = True
if not widget.is_hidden and self.errors:
attrs["aria-invalid"] = "true"
# Preserve aria-describedby provided by the attrs argument so user
# can set the desired order.
if not attrs.get("aria-describedby") and not self.use_fieldset:
if aria_describedby := self.aria_describedby:
attrs["aria-describedby"] = aria_describedby
return attrs
@property
def aria_describedby(self):
# Preserve aria-describedby set on the widget.
if self.field.widget.attrs.get("aria-describedby"):
return None
aria_describedby = []
if self.auto_id and not self.is_hidden:
if self.help_text:
aria_describedby.append(f"{self.auto_id}_helptext")
if self.errors:
aria_describedby.append(f"{self.auto_id}_error")
return " ".join(aria_describedby)
@property
def widget_type(self):
return re.sub(
r"widget$|input$", "", self.field.widget.__class__.__name__.lower()
)
@property
def use_fieldset(self):
"""
Return the value of this BoundField widget's use_fieldset attribute.
"""
return self.field.widget.use_fieldset
@html_safe
| BoundField |
python | apache__airflow | shared/logging/src/airflow_shared/logging/percent_formatter.py | {
"start": 3050,
"end": 6600
} | class ____(ConsoleRenderer):
"""A Structlog processor that uses a stdlib-like percent based format string."""
_fmt: str
# From https://github.com/python/cpython/blob/v3.12.11/Lib/logging/__init__.py#L563-L587
callsite_parameters: ClassVar[dict[str, CallsiteParameter]] = {
"pathname": CallsiteParameter.PATHNAME,
"filename": CallsiteParameter.FILENAME,
"module": CallsiteParameter.MODULE,
"lineno": CallsiteParameter.LINENO,
"funcName": CallsiteParameter.FUNC_NAME,
"thread": CallsiteParameter.THREAD,
"threadName": CallsiteParameter.THREAD_NAME,
"process": CallsiteParameter.PROCESS,
# This one isn't listed in the docs until 3.14, but it's worked for a long time
"processName": CallsiteParameter.PROCESS_NAME,
}
special_keys = {
"event",
"name",
"logger",
"logger_name",
"timestamp",
"level",
} | set(map(operator.attrgetter("value"), callsite_parameters.values()))
@classmethod
def callsite_params_from_fmt_string(cls, fmt: str) -> collections.abc.Iterable[CallsiteParameter]:
# Pattern based on https://github.com/python/cpython/blob/v3.12.11/Lib/logging/__init__.py#L441, but
# with added grouping, and comments to aid clarity, even if we don't care about anything beyond the
# mapping key
pattern = re.compile(
r"""
%\( (?P<key> \w+ ) \) # The mapping key (in parenthesis. The bit we care about)
[#0+ -]* # Conversion flags
(?: \*|\d+ )? # Minimum field width
(?: \. (?: \* | \d+ ) )? # Precision (floating point)
[diouxefgcrsa%] # Conversion type
""",
re.I | re.X,
)
for match in pattern.finditer(fmt):
if param := cls.callsite_parameters.get(match["key"]):
yield param
def __init__(self, fmt: str, **kwargs):
super().__init__(**kwargs)
self._fmt = fmt
def __call__(self, logger: WrappedLogger, method_name: str, event_dict: EventDict):
exc = event_dict.pop("exception", None)
exc_info = event_dict.pop("exc_info", None)
stack = event_dict.pop("stack", None)
params = _LazyLogRecordDict(
event_dict,
method_name,
# To maintain compat with old log levels, we don't want to color info, just everything else
{**ConsoleRenderer.get_default_level_styles(), "info": ""},
self._styles,
)
sio = StringIO()
sio.write(self._fmt % params)
sio.write(
"".join(
" " + self._default_column_formatter(key, val)
for key, val in event_dict.items()
if key not in self.special_keys
).rstrip(" ")
)
if stack is not None:
sio.write("\n" + stack)
if exc_info or exc is not None:
sio.write("\n\n" + "=" * 79 + "\n")
if exc_info:
if isinstance(exc_info, BaseException):
exc_info = (exc_info.__class__, exc_info, exc_info.__traceback__)
if not isinstance(exc_info, tuple):
if (exc_info := sys.exc_info()) == (None, None, None):
exc_info = None
if exc_info:
self._exception_formatter(sio, exc_info)
elif exc is not None:
sio.write("\n" + exc)
return sio.getvalue()
| PercentFormatRender |
python | falconry__falcon | tests/test_middleware.py | {
"start": 1447,
"end": 1827
} | class ____:
def process_request(self, req, resp):
global context
context['transaction_id'] = 'unique-req-id'
def process_resource(self, req, resp, resource, params):
global context
context['resource_transaction_id'] = 'unique-req-id-2'
def process_response(self, req, resp, resource, req_succeeded):
pass
| TransactionIdMiddleware |
python | sympy__sympy | sympy/physics/mechanics/tests/test_system_class.py | {
"start": 29531,
"end": 38219
} | class ____:
def test_cart_pendulum_kanes(self):
# This example is the same as in the top documentation of System
# Added a spring to the cart
g, l, mc, mp, k = symbols('g l mc mp k')
F, qp, qc, up, uc = dynamicsymbols('F qp qc up uc')
rail = RigidBody('rail')
cart = RigidBody('cart', mass=mc)
bob = Particle('bob', mass=mp)
bob_frame = ReferenceFrame('bob_frame')
system = System.from_newtonian(rail)
assert system.bodies == (rail,)
assert system.frame == rail.frame
assert system.fixed_point == rail.masscenter
slider = PrismaticJoint('slider', rail, cart, qc, uc, joint_axis=rail.x)
pin = PinJoint('pin', cart, bob, qp, up, joint_axis=cart.z,
child_interframe=bob_frame, child_point=l * bob_frame.y)
system.add_joints(slider, pin)
assert system.joints == (slider, pin)
assert system.get_joint('slider') == slider
assert system.get_body('bob') == bob
system.apply_uniform_gravity(-g * system.y)
system.add_loads((cart.masscenter, F * rail.x))
system.add_actuators(TorqueActuator(k * qp, cart.z, bob_frame, cart))
system.validate_system()
system.form_eoms()
assert isinstance(system.eom_method, KanesMethod)
assert (simplify(system.mass_matrix - ImmutableMatrix(
[[mp + mc, mp * l * cos(qp)], [mp * l * cos(qp), mp * l ** 2]]))
== zeros(2, 2))
assert (simplify(system.forcing - ImmutableMatrix([
[mp * l * up ** 2 * sin(qp) + F],
[-mp * g * l * sin(qp) + k * qp]])) == zeros(2, 1))
system.add_holonomic_constraints(
sympify(bob.masscenter.pos_from(rail.masscenter).dot(system.x)))
assert system.eom_method is None
system.q_ind, system.q_dep = qp, qc
system.u_ind, system.u_dep = up, uc
system.validate_system()
# Computed solution based on manually solving the constraints
subs = {qc: -l * sin(qp),
uc: -l * cos(qp) * up,
uc.diff(t): l * (up ** 2 * sin(qp) - up.diff(t) * cos(qp))}
upd_expected = (
(-g * mp * sin(qp) + k * qp / l + l * mc * sin(2 * qp) * up ** 2 / 2
- l * mp * sin(2 * qp) * up ** 2 / 2 - F * cos(qp)) /
(l * (mc * cos(qp) ** 2 + mp * sin(qp) ** 2)))
upd_sol = tuple(solve(system.form_eoms().xreplace(subs),
up.diff(t)).values())[0]
assert simplify(upd_sol - upd_expected) == 0
assert isinstance(system.eom_method, KanesMethod)
# Test other output
Mk = -ImmutableMatrix([[0, 1], [1, 0]])
gk = -ImmutableMatrix([uc, up])
Md = ImmutableMatrix([[-l ** 2 * mp * cos(qp) ** 2 + l ** 2 * mp,
l * mp * cos(qp) - l * (mc + mp) * cos(qp)],
[l * cos(qp), 1]])
gd = ImmutableMatrix(
[[-g * l * mp * sin(qp) + k * qp - l ** 2 * mp * up ** 2 * sin(qp) *
cos(qp) - l * F * cos(qp)], [l * up ** 2 * sin(qp)]])
Mm = (Mk.row_join(zeros(2, 2))).col_join(zeros(2, 2).row_join(Md))
gm = gk.col_join(gd)
assert simplify(system.mass_matrix - Md) == zeros(2, 2)
assert simplify(system.forcing - gd) == zeros(2, 1)
assert simplify(system.mass_matrix_full - Mm) == zeros(4, 4)
assert simplify(system.forcing_full - gm) == zeros(4, 1)
def test_cart_pendulum_lagrange(self):
# Lagrange version of test_cart_pendulus_kanes
# Added a spring to the cart
g, l, mc, mp, k = symbols('g l mc mp k')
F, qp, qc = dynamicsymbols('F qp qc')
qpd, qcd = dynamicsymbols('qp qc', 1)
rail = RigidBody('rail')
cart = RigidBody('cart', mass=mc)
bob = Particle('bob', mass=mp)
bob_frame = ReferenceFrame('bob_frame')
system = System.from_newtonian(rail)
assert system.bodies == (rail,)
assert system.frame == rail.frame
assert system.fixed_point == rail.masscenter
slider = PrismaticJoint('slider', rail, cart, qc, qcd,
joint_axis=rail.x)
pin = PinJoint('pin', cart, bob, qp, qpd, joint_axis=cart.z,
child_interframe=bob_frame, child_point=l * bob_frame.y)
system.add_joints(slider, pin)
assert system.joints == (slider, pin)
assert system.get_joint('slider') == slider
assert system.get_body('bob') == bob
for body in system.bodies:
body.potential_energy = body.mass * g * body.masscenter.pos_from(
system.fixed_point).dot(system.y)
system.add_loads((cart.masscenter, F * rail.x))
system.add_actuators(TorqueActuator(k * qp, cart.z, bob_frame, cart))
system.validate_system(LagrangesMethod)
system.form_eoms(LagrangesMethod)
assert (simplify(system.mass_matrix - ImmutableMatrix(
[[mp + mc, mp * l * cos(qp)], [mp * l * cos(qp), mp * l ** 2]]))
== zeros(2, 2))
assert (simplify(system.forcing - ImmutableMatrix([
[mp * l * qpd ** 2 * sin(qp) + F], [-mp * g * l * sin(qp) + k * qp]]
)) == zeros(2, 1))
system.add_holonomic_constraints(
sympify(bob.masscenter.pos_from(rail.masscenter).dot(system.x)))
assert system.eom_method is None
system.q_ind, system.q_dep = qp, qc
# Computed solution based on manually solving the constraints
subs = {qc: -l * sin(qp),
qcd: -l * cos(qp) * qpd,
qcd.diff(t): l * (qpd ** 2 * sin(qp) - qpd.diff(t) * cos(qp))}
qpdd_expected = (
(-g * mp * sin(qp) + k * qp / l + l * mc * sin(2 * qp) * qpd ** 2 /
2 - l * mp * sin(2 * qp) * qpd ** 2 / 2 - F * cos(qp)) /
(l * (mc * cos(qp) ** 2 + mp * sin(qp) ** 2)))
eoms = system.form_eoms(LagrangesMethod)
lam1 = system.eom_method.lam_vec[0]
lam1_sol = system.eom_method.solve_multipliers()[lam1]
qpdd_sol = solve(eoms[0].xreplace({lam1: lam1_sol}).xreplace(subs),
qpd.diff(t))[0]
assert simplify(qpdd_sol - qpdd_expected) == 0
assert isinstance(system.eom_method, LagrangesMethod)
# Test other output
Md = ImmutableMatrix([[l ** 2 * mp, l * mp * cos(qp), -l * cos(qp)],
[l * mp * cos(qp), mc + mp, -1]])
gd = ImmutableMatrix(
[[-g * l * mp * sin(qp) + k * qp],
[l * mp * sin(qp) * qpd ** 2 + F]])
Mm = (eye(2).row_join(zeros(2, 3))).col_join(zeros(3, 2).row_join(
Md.col_join(ImmutableMatrix([l * cos(qp), 1, 0]).T)))
gm = ImmutableMatrix([qpd, qcd] + gd[:] + [l * sin(qp) * qpd ** 2])
assert simplify(system.mass_matrix - Md) == zeros(2, 3)
assert simplify(system.forcing - gd) == zeros(2, 1)
assert simplify(system.mass_matrix_full - Mm) == zeros(5, 5)
assert simplify(system.forcing_full - gm) == zeros(5, 1)
def test_box_on_ground(self):
# Particle sliding on ground with friction. The applied force is assumed
# to be positive and to be higher than the friction force.
g, m, mu = symbols('g m mu')
q, u, ua = dynamicsymbols('q u ua')
N, F = dynamicsymbols('N F', positive=True)
P = Particle("P", mass=m)
system = System()
system.add_bodies(P)
P.masscenter.set_pos(system.fixed_point, q * system.x)
P.masscenter.set_vel(system.frame, u * system.x + ua * system.y)
system.q_ind, system.u_ind, system.u_aux = [q], [u], [ua]
system.kdes = [q.diff(t) - u]
system.apply_uniform_gravity(-g * system.y)
system.add_loads(
Force(P, N * system.y),
Force(P, F * system.x - mu * N * system.x))
system.validate_system()
system.form_eoms()
# Test other output
Mk = ImmutableMatrix([1])
gk = ImmutableMatrix([u])
Md = ImmutableMatrix([m])
gd = ImmutableMatrix([F - mu * N])
Mm = (Mk.row_join(zeros(1, 1))).col_join(zeros(1, 1).row_join(Md))
gm = gk.col_join(gd)
aux_eqs = ImmutableMatrix([N - m * g])
assert simplify(system.mass_matrix - Md) == zeros(1, 1)
assert simplify(system.forcing - gd) == zeros(1, 1)
assert simplify(system.mass_matrix_full - Mm) == zeros(2, 2)
assert simplify(system.forcing_full - gm) == zeros(2, 1)
assert simplify(system.eom_method.auxiliary_eqs - aux_eqs
) == zeros(1, 1)
| TestSystemExamples |
python | apache__airflow | airflow-core/src/airflow/callbacks/base_callback_sink.py | {
"start": 945,
"end": 1146
} | class ____:
"""Base class for Callbacks Sinks."""
def send(self, callback: CallbackRequest) -> None:
"""Send callback for execution."""
raise NotImplementedError()
| BaseCallbackSink |
python | geekcomputers__Python | venv/Lib/site-packages/pip/_internal/commands/freeze.py | {
"start": 583,
"end": 3203
} | class ____(Command):
"""
Output installed packages in requirements format.
packages are listed in a case-insensitive sorted order.
"""
ignore_require_venv = True
usage = """
%prog [options]"""
log_streams = ("ext://sys.stderr", "ext://sys.stderr")
def add_options(self) -> None:
self.cmd_opts.add_option(
"-r",
"--requirement",
dest="requirements",
action="append",
default=[],
metavar="file",
help=(
"Use the order in the given requirements file and its "
"comments when generating output. This option can be "
"used multiple times."
),
)
self.cmd_opts.add_option(
"-l",
"--local",
dest="local",
action="store_true",
default=False,
help=(
"If in a virtualenv that has global access, do not output "
"globally-installed packages."
),
)
self.cmd_opts.add_option(
"--user",
dest="user",
action="store_true",
default=False,
help="Only output packages installed in user-site.",
)
self.cmd_opts.add_option(cmdoptions.list_path())
self.cmd_opts.add_option(
"--all",
dest="freeze_all",
action="store_true",
help=(
"Do not skip these packages in the output:"
" {}".format(", ".join(_dev_pkgs()))
),
)
self.cmd_opts.add_option(
"--exclude-editable",
dest="exclude_editable",
action="store_true",
help="Exclude editable package from output.",
)
self.cmd_opts.add_option(cmdoptions.list_exclude())
self.parser.insert_option_group(0, self.cmd_opts)
def run(self, options: Values, args: List[str]) -> int:
skip = set(stdlib_pkgs)
if not options.freeze_all:
skip.update(_dev_pkgs())
if options.excludes:
skip.update(options.excludes)
cmdoptions.check_list_path_option(options)
for line in freeze(
requirement=options.requirements,
local_only=options.local,
user_only=options.user,
paths=options.path,
isolated=options.isolated_mode,
skip=skip,
exclude_editable=options.exclude_editable,
):
sys.stdout.write(line + "\n")
return SUCCESS
| FreezeCommand |
python | astropy__astropy | astropy/uncertainty/core.py | {
"start": 21687,
"end": 26469
} | class ____(Distribution, np.ndarray):
# This includes the important override of view and __getitem__
# which are needed for all ndarray subclass Distributions, but not
# for the scalar one.
_samples_cls = np.ndarray
# Override view so that we stay a Distribution version of the new type.
def view(self, dtype=None, type=None):
"""New view of array with the same data.
Like `~numpy.ndarray.view` except that the result will always be a new
`~astropy.uncertainty.Distribution` instance. If the requested
``type`` is a `~astropy.uncertainty.Distribution`, then no change in
``dtype`` is allowed.
"""
if type is None:
if isinstance(dtype, builtins.type) and issubclass(dtype, np.ndarray):
type = self._get_distribution_cls(dtype)
dtype = None
else:
type = self.__class__
else:
type = self._get_distribution_cls(type)
type = self._get_distribution_cls(type)
if dtype is None:
return super().view(type=type)
dtype = np.dtype(dtype)
if dtype == self.dtype:
return super().view(type=type)
if dtype.names == ("samples",):
# Assume the user knows what they are doing.
return super().view(dtype, type)
if dtype.shape == () and dtype.itemsize == self.dtype.itemsize:
dtype = self._get_distribution_dtype(
dtype,
self.n_samples,
itemsize=super(Distribution, self).dtype["samples"].base.itemsize,
)
return super().view(dtype, type)
samples_cls = type._samples_cls
if dtype.itemsize == self.dtype.itemsize:
distr = self.distribution
distr_view = distr.view(dtype, samples_cls)
# This does not necessarily leave the sample axis in the right place.
return Distribution(np.moveaxis(distr_view, distr.ndim - 1, -1))
elif dtype.itemsize == super(Distribution, self).dtype["samples"].base.itemsize:
distr = np.moveaxis(self.distribution, -1, -2)
distr_view = distr.view(dtype, samples_cls).squeeze(-1)
return Distribution(distr_view)
else:
raise ValueError(
f"{self.__class__} can only be viewed with a dtype with "
"itemsize {self.strides[-1]} or {self.dtype.itemsize}"
)
@property
def distribution(self):
# Like in the creation, we go through an ndarray to ensure we have our
# actual dtype and to avoid entering, e.g., Quantity.__getitem__, which
# would give problems with units.
structured = super().view(np.ndarray)
distribution = structured["samples"]["sample"].view(self._samples_cls)
distribution.__array_finalize__(self)
return distribution
def __getitem__(self, item):
if isinstance(item, str):
# "samples" should always get back to the samples class.
if item == "samples":
return self.distribution
else:
# Hard to get this right directly, so instead get item from the
# distribution, and create a new instance. We move the sample axis to
# the end to ensure the order is right for possible subarrays.
return Distribution(np.moveaxis(self.distribution[item], self.ndim, -1))
if isinstance(item, Distribution):
# Required for in-place operations like dist[dist < 0] += 360.
return self.distribution[item.distribution]
result = super().__getitem__(item)
if isinstance(result, np.void):
return result.view((ScalarDistribution, result.dtype))
else:
return result
def __setitem__(self, item, value):
if isinstance(item, Distribution):
# Support operations like dist[dist < 0] = 0.
self.distribution[item.distribution] = value
return
if isinstance(item, str):
if item == "samples":
self.distribution[()] = value
return
# Get a view of this item (non-trivial; see above).
self = self[item]
item = ()
if not isinstance(value, Distribution):
# If value is not already a Distribution, first make it an array
# to help interpret possible structured dtype, and then turn it
# into a Distribution with n_samples=1 (which will broadcast).
value = np.asanyarray(value, dtype=self.dtype)
value = Distribution(value[..., np.newaxis])
super().__setitem__(item, value)
| ArrayDistribution |
python | Pylons__pyramid | tests/test_config/test_init.py | {
"start": 47977,
"end": 49086
} | class ____(unittest.TestCase):
def setUp(self):
from pyramid.config import global_registries
global_registries.empty()
tearDown = setUp
def _makeConfigurator(self, *arg, **kw):
from pyramid.config import Configurator
config = Configurator(*arg, **kw)
return config
def test_global_registries_empty(self):
from pyramid.config import global_registries
self.assertEqual(global_registries.last, None)
def test_global_registries(self):
from pyramid.config import global_registries
config1 = self._makeConfigurator()
config1.make_wsgi_app()
self.assertEqual(global_registries.last, config1.registry)
config2 = self._makeConfigurator()
config2.make_wsgi_app()
self.assertEqual(global_registries.last, config2.registry)
self.assertEqual(
list(global_registries), [config1.registry, config2.registry]
)
global_registries.remove(config2.registry)
self.assertEqual(global_registries.last, config1.registry)
| TestGlobalRegistriesIntegration |
python | scrapy__scrapy | scrapy/exceptions.py | {
"start": 1354,
"end": 1564
} | class ____(Exception):
"""Drop item from the item pipeline"""
def __init__(self, message: str, log_level: str | None = None):
super().__init__(message)
self.log_level = log_level
| DropItem |
python | pyca__cryptography | src/cryptography/hazmat/primitives/hashes.py | {
"start": 2770,
"end": 2863
} | class ____(HashAlgorithm):
name = "sha512"
digest_size = 64
block_size = 128
| SHA512 |
python | allegroai__clearml | clearml/backend_api/services/v2_23/projects.py | {
"start": 111681,
"end": 113896
} | class ____(Response):
"""
Response of projects.get_model_tags endpoint.
:param tags: The list of unique tag values
:type tags: Sequence[str]
:param system_tags: The list of unique system tag values. Returned only if
'include_system' is set to 'true' in the request
:type system_tags: Sequence[str]
"""
_service = "projects"
_action = "get_model_tags"
_version = "2.23"
_schema = {
"definitions": {},
"properties": {
"system_tags": {
"description": "The list of unique system tag values. Returned only if 'include_system' is set to 'true' in the request",
"items": {"type": "string"},
"type": ["array", "null"],
},
"tags": {
"description": "The list of unique tag values",
"items": {"type": "string"},
"type": ["array", "null"],
},
},
"type": "object",
}
def __init__(
self, tags: Optional[List[str]] = None, system_tags: Optional[List[str]] = None, **kwargs: Any
) -> None:
super(GetModelTagsResponse, self).__init__(**kwargs)
self.tags = tags
self.system_tags = system_tags
@schema_property("tags")
def tags(self) -> Optional[List[str]]:
return self._property_tags
@tags.setter
def tags(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_tags = None
return
self.assert_isinstance(value, "tags", (list, tuple))
self.assert_isinstance(value, "tags", six.string_types, is_array=True)
self._property_tags = value
@schema_property("system_tags")
def system_tags(self) -> Optional[List[str]]:
return self._property_system_tags
@system_tags.setter
def system_tags(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_system_tags = None
return
self.assert_isinstance(value, "system_tags", (list, tuple))
self.assert_isinstance(value, "system_tags", six.string_types, is_array=True)
self._property_system_tags = value
| GetModelTagsResponse |
python | apache__airflow | task-sdk/src/airflow/sdk/definitions/edges.py | {
"start": 7310,
"end": 7467
} | class ____(TypedDict):
"""Extra metadata that the Dag can store about an edge, usually generated from an EdgeModifier."""
label: str | None
| EdgeInfoType |
python | PrefectHQ__prefect | tests/cli/test_deploy.py | {
"start": 203117,
"end": 218681
} | class ____:
async def test_docker_build_step_exists_does_not_prompt_build_custom_docker_image(
self,
docker_work_pool,
mock_build_docker_image,
):
prefect_file = Path("prefect.yaml")
with prefect_file.open(mode="r") as f:
prefect_config = yaml.safe_load(f)
with open("Dockerfile", "w") as f:
f.write("FROM python:3.9-slim\n")
prefect_config["build"] = [
{
"prefect_docker.deployments.steps.build_docker_image": {
"requires": "prefect-docker",
"image_name": "local/repo",
"tag": "dev",
"id": "build-image",
"dockerfile": "Dockerfile",
}
}
]
# save it back
with prefect_file.open(mode="w") as f:
yaml.safe_dump(prefect_config, f)
result = await run_sync_in_worker_thread(
invoke_and_assert,
command=(
"deploy ./flows/hello.py:my_flow -n test-name --interval 3600 -p"
f" {docker_work_pool.name}"
),
user_input=(
# Decline remote storage
"n"
+ readchar.key.ENTER
+
# Accept save configuration
"y"
+ readchar.key.ENTER
),
expected_output_does_not_contain=[
"Would you like to build a custom Docker image"
],
)
assert result.exit_code == 0
assert "An important name/test" in result.output
with prefect_file.open(mode="r") as f:
prefect_config = yaml.safe_load(f)
async def test_other_build_step_exists_prompts_build_custom_docker_image(
self,
docker_work_pool: WorkPool,
):
prefect_file = Path("prefect.yaml")
with prefect_file.open(mode="r") as f:
prefect_config = yaml.safe_load(f)
prefect_config["build"] = [
{
"prefect.deployments.steps.run_shell_script": {
"id": "sample-bash-cmd",
"script": "echo 'Hello, World!'",
"stream_output": False,
}
}
]
# save it back
with prefect_file.open(mode="w") as f:
yaml.safe_dump(prefect_config, f)
result = await run_sync_in_worker_thread(
invoke_and_assert,
command=(
"deploy ./flows/hello.py:my_flow -n test-name --interval 3600"
f" -p {docker_work_pool.name}"
),
user_input=(
# Reject build custom docker image
"n" + readchar.key.ENTER
),
expected_output_contains=[
"Would you like to build a custom Docker image",
],
)
assert result.exit_code == 0
assert "An important name/test" in result.output
async def test_no_build_step_exists_prompts_build_custom_docker_image(
self, docker_work_pool: WorkPool, prefect_client: PrefectClient
):
result = await run_sync_in_worker_thread(
invoke_and_assert,
command=(
"deploy ./flows/hello.py:my_flow -n test-name --interval 3600"
f" -p {docker_work_pool.name}"
),
user_input=(
# Reject build custom docker image
"n" + readchar.key.ENTER
),
expected_output_contains=["Would you like to build a custom Docker image"],
)
assert result.exit_code == 0
assert "An important name/test" in result.output
# prefect_file = Path("prefect.yaml")
# with open(prefect_file, "r") as f:
# config = yaml.safe_load(f)
# assert len(config["deployments"]) == 2
# assert config["deployments"][1]["name"] == "test-name"
# assert not config["deployments"][1].get("build")
deployment = await prefect_client.read_deployment_by_name(
"An important name/test-name"
)
assert deployment.schedules and len(deployment.schedules) == 1
assert getattr(deployment.schedules[0].schedule, "interval") == timedelta(
seconds=3600
)
async def test_prompt_build_custom_docker_image_accepted_use_existing_dockerfile_accepted(
self, docker_work_pool: WorkPool, mock_build_docker_image: AsyncMock
):
with open("Dockerfile", "w") as f:
f.write("FROM python:3.9-slim\n")
result = await run_sync_in_worker_thread(
invoke_and_assert,
command=(
"deploy ./flows/hello.py:my_flow -n test-name --interval 3600"
f" -p {docker_work_pool.name}"
),
user_input=(
# Accept build custom docker image
"y"
+ readchar.key.ENTER
# Accept use existing dockerfile
+ "y"
+ readchar.key.ENTER
# Enter repo name
+ "prefecthq/prefect"
+ readchar.key.ENTER
+
# Default image_name
readchar.key.ENTER
+
# Default tag
readchar.key.ENTER
+
# Reject push to registry
"n"
+ readchar.key.ENTER
),
expected_output_contains=[
"Would you like to build a custom Docker image",
"Would you like to use the Dockerfile in the current directory?",
"Image prefecthq/prefect/test-name:latest will be built",
"Would you like to push this image to a remote registry?",
],
expected_output_does_not_contain=["Is this a private registry?"],
)
assert result.exit_code == 0
assert "An important name/test" in result.output
async def test_prompt_build_custom_docker_image_accepted_use_existing_dockerfile_rejected_rename_accepted(
self, docker_work_pool: WorkPool, mock_build_docker_image: AsyncMock
):
with open("Dockerfile", "w") as f:
f.write("FROM python:3.9-slim\n")
result = await run_sync_in_worker_thread(
invoke_and_assert,
command=(
"deploy ./flows/hello.py:my_flow -n test-name --interval 3600"
f" -p {docker_work_pool.name}"
),
user_input=(
# Accept build custom docker image
"y"
+ readchar.key.ENTER
# Reject use existing dockerfile
+ "n"
+ readchar.key.ENTER
# Accept rename dockerfile
+ "y"
+ readchar.key.ENTER
+
# Enter new dockerfile name
"Dockerfile.backup"
+ readchar.key.ENTER
# Enter repo name
+ "prefecthq/prefect"
+ readchar.key.ENTER
+
# Default image_name
readchar.key.ENTER
+
# Default tag
readchar.key.ENTER
+
# Reject push to registry
"n"
+ readchar.key.ENTER
# Accept save configuration
+ "y"
+ readchar.key.ENTER
),
expected_output_contains=[
"Would you like to build a custom Docker image",
"Would you like to use the Dockerfile in the current directory?",
"A Dockerfile exists. You chose not to use it.",
"Image prefecthq/prefect/test-name:latest will be built",
"Would you like to push this image to a remote registry?",
],
expected_output_does_not_contain=["Is this a private registry?"],
)
assert result.exit_code == 0
async def test_prompt_build_custom_docker_image_accepted_use_existing_dockerfile_rejected_rename_rejected(
self, docker_work_pool: WorkPool
):
with open("Dockerfile", "w") as f:
f.write("FROM python:3.9-slim\n")
result = await run_sync_in_worker_thread(
invoke_and_assert,
command=(
"deploy ./flows/hello.py:my_flow -n test-name --interval 3600"
f" -p {docker_work_pool.name}"
),
user_input=(
# Accept build custom docker image
"y"
+ readchar.key.ENTER
# Reject use existing dockerfile
+ "n"
+ readchar.key.ENTER
# Accept rename dockerfile
+ "n"
+ readchar.key.ENTER
),
expected_code=1,
expected_output_contains=[
"Would you like to build a custom Docker image",
"Would you like to use the Dockerfile in the current directory?",
"A Dockerfile exists. You chose not to use it.",
(
"A Dockerfile already exists. Please remove or rename the existing"
" one."
),
],
expected_output_does_not_contain=["Is this a private registry?"],
)
assert result.exit_code == 1
async def test_prompt_build_custom_docker_image_accepted_no_existing_dockerfile_uses_auto_build(
self, docker_work_pool: WorkPool, mock_build_docker_image: AsyncMock
):
await run_sync_in_worker_thread(
invoke_and_assert,
command=(
"deploy ./flows/hello.py:my_flow -n test-name --interval 3600"
f" -p {docker_work_pool.name}"
),
user_input=(
# Accept build custom docker image
"y"
+ readchar.key.ENTER
# Enter repo name
+ "prefecthq/prefect"
+ readchar.key.ENTER
# Default image_name
+ readchar.key.ENTER
# Default tag
+ readchar.key.ENTER
# Reject push to registry
+ "n"
+ readchar.key.ENTER
# Accept save configuration
+ "y"
+ readchar.key.ENTER
),
expected_output_contains=[
"Would you like to build a custom Docker image",
"Image prefecthq/prefect/test-name:latest will be built",
"Would you like to push this image to a remote registry?",
],
expected_output_does_not_contain=["Is this a private registry?"],
)
async def test_no_existing_work_pool_image_gets_updated_after_adding_build_docker_image_step(
self, docker_work_pool: WorkPool, mock_build_docker_image: AsyncMock
):
prefect_file = Path("prefect.yaml")
if prefect_file.exists():
prefect_file.unlink()
assert not prefect_file.exists()
await run_sync_in_worker_thread(
invoke_and_assert,
command=(
"deploy ./flows/hello.py:my_flow -n test-name --interval 3600"
f" -p {docker_work_pool.name}"
),
user_input=(
# Accept build custom docker image
"y"
+ readchar.key.ENTER
# Enter repo name
+ "prefecthq/prefect"
+ readchar.key.ENTER
# Default image_name
+ readchar.key.ENTER
# Default tag
+ readchar.key.ENTER
# Reject push to registry
+ "n"
+ readchar.key.ENTER
# Decline remote storage
+ "n"
+ readchar.key.ENTER
# Accept save configuration
+ "y"
+ readchar.key.ENTER
),
expected_output_contains=[
"Would you like to build a custom Docker image",
"Image prefecthq/prefect/test-name:latest will be built",
"Would you like to push this image to a remote registry?",
],
expected_output_does_not_contain=["Is this a private registry?"],
)
async def test_work_pool_image_already_exists_not_updated_after_adding_build_docker_image_step(
self,
docker_work_pool: WorkPool,
mock_build_docker_image: AsyncMock,
prefect_client: PrefectClient,
):
prefect_file = Path("prefect.yaml")
with open("prefect.yaml", "w") as f:
contents = {
"work_pool": {
"name": docker_work_pool.name,
"job_variables": {"image": "original-image"},
}
}
yaml.dump(contents, f)
assert prefect_file.exists()
await run_sync_in_worker_thread(
invoke_and_assert,
command=(
"deploy ./flows/hello.py:my_flow -n test-name --interval 3600"
f" -p {docker_work_pool.name}"
),
user_input=(
# Accept build custom docker image
"y"
+ readchar.key.ENTER
# Enter repo name
+ "prefecthq/prefect"
+ readchar.key.ENTER
# Default image_name
+ readchar.key.ENTER
# Default tag
+ readchar.key.ENTER
# Reject push to registry
+ "n"
+ readchar.key.ENTER
# Decline remote storage
+ "n"
+ readchar.key.ENTER
),
expected_output_contains=[
"Would you like to build a custom Docker image",
"Image prefecthq/prefect/test-name:latest will be built",
"Would you like to push this image to a remote registry?",
],
expected_output_does_not_contain=["Is this a private registry?"],
)
deployment = await prefect_client.read_deployment_by_name(
"An important name/test-name"
)
assert deployment.name == "test-name"
assert deployment.work_pool_name == docker_work_pool.name
assert deployment.job_variables.get("image") is not None
async def test_deploying_managed_work_pool_does_not_prompt_to_build_image(
self, managed_work_pool: WorkPool
):
await run_sync_in_worker_thread(
invoke_and_assert,
command=(
"deploy ./flows/hello.py:my_flow -n test-name --interval 3600"
f" -p {managed_work_pool.name}"
),
user_input=(
# Decline remote storage
"n"
+ readchar.key.ENTER
# Decline save configuration
+ "n"
+ readchar.key.ENTER
),
expected_output_contains=[
"$ prefect deployment run 'An important name/test-name'",
],
expected_output_does_not_contain=[
"Would you like to build a custom Docker image?",
],
)
| TestDeployDockerBuildSteps |
python | fastai__fastai | fastai/vision/augment.py | {
"start": 50662,
"end": 51595
} | class ____():
def __init__(self, max_hue=0.1, p=0.75, draw=None, batch=False): store_attr()
def _def_draw(self, x):
if not self.batch: res = x.new_empty(x.size(0)).uniform_(math.log(1-self.max_hue), -math.log(1-self.max_hue))
else: res = x.new_zeros(x.size(0)) + random.uniform(math.log(1-self.max_hue), -math.log(1-self.max_hue))
return torch.exp(res)
def before_call(self, x):
self.change = _draw_mask(x, self._def_draw, draw=self.draw, p=self.p, neutral=0., batch=self.batch)
def __call__(self, x):
h,s,v = x.unbind(1)
h += self.change[:,None,None]
h = h % 1.0
return x.set_(torch.stack((h, s, v),dim=1))
# %% ../../nbs/09_vision.augment.ipynb 239
@patch
@delegates(_Hue.__init__)
def hue(x: TensorImage, **kwargs):
func = _Hue(**kwargs)
func.before_call(x)
return TensorImage(x.hsv(func))
# %% ../../nbs/09_vision.augment.ipynb 240
| _Hue |
python | readthedocs__readthedocs.org | readthedocs/search/tests/test_xss.py | {
"start": 117,
"end": 883
} | class ____:
def test_facted_page_xss(self, all_projects):
query = '"XSS"'
page_search = PageSearch(query=query, projects={"docs": "latest"})
results = page_search.execute()
expected = """
<h3><span>XSS</span> exploit</h3>
""".strip()
hits = results.hits.hits
assert len(hits) == 1
assert hits[0]["_source"]["version"] == "latest"
inner_hits = hits[0]["inner_hits"]
section_hits = inner_hits["sections"]["hits"]["hits"]
assert len(section_hits) == 1
section_content_highlight = section_hits[0]["highlight"]["sections.content"]
assert len(section_content_highlight) == 1
assert expected in section_content_highlight[0]
| TestXSS |
python | huggingface__transformers | src/transformers/models/tapas/tokenization_tapas.py | {
"start": 101896,
"end": 101984
} | class ____:
text: str
numeric_value: Optional[NumericValue] = None
@dataclass
| Cell |
python | bokeh__bokeh | src/bokeh/models/glyphs.py | {
"start": 56590,
"end": 57680
} | class ____(LRTBGlyph):
''' Render vertical bars, given a center coordinate, width and (top, bottom) coordinates.
'''
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
__example__ = "examples/reference/models/VBar.py"
_args = ('x', 'width', 'top', 'bottom')
x = NumberSpec(default=field("x"), help="""
The x-coordinates of the centers of the vertical bars.
""")
width = DistanceSpec(default=1, help="""
The widths of the vertical bars.
""")
bottom = NumberSpec(default=0, help="""
The y-coordinates of the bottom edges.
""")
top = NumberSpec(default=field("top"), help="""
The y-coordinates of the top edges.
""")
line_props = Include(LineProps, help="""
The {prop} values for the vertical bars.
""")
fill_props = Include(FillProps, help="""
The {prop} values for the vertical bars.
""")
hatch_props = Include(HatchProps, help="""
The {prop} values for the vertical bars.
""")
| VBar |
python | doocs__leetcode | lcof/面试题56 - I. 数组中数字出现的次数/Solution.py | {
"start": 0,
"end": 252
} | class ____:
def singleNumbers(self, nums: List[int]) -> List[int]:
xs = reduce(xor, nums)
a = 0
lb = xs & -xs
for x in nums:
if x & lb:
a ^= x
b = xs ^ a
return [a, b]
| Solution |
python | tensorflow__tensorflow | third_party/xla/build_tools/lint/check_contents.py | {
"start": 1060,
"end": 5529
} | class ____:
"""Path and line where a prohibited regex was found.
Attributes:
path: Path of the file which has the prohibited regex.
line_number: The number of the offending line.
line_contents: The text of the offending line.
matched_text: The exact string matched by the regex.
"""
path: str
line_number: int
line_contents: str
matched_text: str
def filter_hunks_by_path(
hunks: Iterable[diff_parser.Hunk],
*,
path_regexes: list[str],
path_regex_exclusions: list[str],
) -> list[diff_parser.Hunk]:
"""Filters files according to path_regexes.
If a file matches both a path_regex and a path_regex_exclusion, then
it will be filtered out.
Arguments:
hunks: A sequence of Hunk objects representing the hunks of the diff in the
change.
path_regexes: A list of regexes. Paths matching these will pass through the
filter. By default, every path is matched.
path_regex_exclusions: A list of regexes. Paths that match both a path_regex
and a path_regex_exclusion won't pass through the filter.
Returns:
A list of FileDiffs whose paths match a path_regex and don't match
any path_regex_exclusions.
"""
if not path_regexes:
path_regexes = [".*"] # by default match everything
path_regexes = [re.compile(regex) for regex in path_regexes]
def should_include(path: str) -> bool:
return any(regex.search(path) for regex in path_regexes)
path_regex_exclusions = [re.compile(regex) for regex in path_regex_exclusions]
def should_exclude(path: str) -> bool:
return any(regex.search(path) for regex in path_regex_exclusions)
return [
hunk
for hunk in hunks
if should_include(hunk.file) and not should_exclude(hunk.file)
]
def check_diffs(
hunks: Iterable[diff_parser.Hunk],
*,
prohibited_regex: str,
suppression_regex: str | None = None,
) -> list[RegexLocation]:
"""Checks FileDiffs for prohibited regexes.
Arguments:
hunks: A sequence of Hunk objects representing the hunks of the diff.
prohibited_regex: The regex that isn't allowed in the diff.
suppression_regex: A regex used as an escape hatch to allow the prohibited
regex in the diff. If this is found on the same line as prohibited_regex,
there is no error.
Returns:
A list of RegexLocations where the prohibited_regex is found.
"""
prohibited_regex = re.compile(prohibited_regex)
if suppression_regex is not None:
suppression_regex = re.compile(suppression_regex)
def should_not_suppress(line) -> bool:
if suppression_regex:
return not suppression_regex.search(line)
return True
regex_locations = []
for hunk in hunks:
for line_no, line in hunk.added_lines():
if should_not_suppress(line):
regex_locations.extend(
[
RegexLocation(hunk.file, line_no, line, regex_match.group())
for regex_match in prohibited_regex.finditer(line)
]
)
return regex_locations
def main(argv: Sequence[str]):
parser = argparse.ArgumentParser(
description="Check `git diff` for prohibited regexes."
)
parser.add_argument("--path_regex", nargs="*", default=[])
parser.add_argument("--path_regex_exclusion", nargs="*", default=[])
parser.add_argument("--prohibited_regex", required=True)
parser.add_argument("--suppression_regex")
parser.add_argument("--failure_message", required=True)
# We don't want to include path/to/check_contents.py as an argument
args = parser.parse_args(argv[1:])
file_diffs = filter_hunks_by_path(
diff_parser.parse_hunks(diff_parser.get_git_diff_stdout()),
path_regexes=args.path_regex,
path_regex_exclusions=args.path_regex_exclusion,
)
regex_locations = check_diffs(
file_diffs,
prohibited_regex=args.prohibited_regex,
suppression_regex=args.suppression_regex,
)
if regex_locations:
for loc in regex_locations:
logging.error(
"Found `%s` in %s:%s",
args.prohibited_regex,
loc.path,
loc.line_number,
)
logging.error(
"Matched `%s` in line `%s`", loc.matched_text, loc.line_contents
)
logging.error("Failure message: %s", args.failure_message)
sys.exit(1)
else:
logging.info(
"Prohibited regex `%s` not found in diff!", args.prohibited_regex
)
sys.exit(0)
if __name__ == "__main__":
main(sys.argv)
| RegexLocation |
python | dask__distributed | distributed/diagnostics/nvml.py | {
"start": 303,
"end": 896
} | class ____(IntEnum):
UNINITIALIZED = auto()
"""No attempt yet made to initialize PyNVML"""
INITIALIZED = auto()
"""PyNVML was successfully initialized"""
DISABLED_PYNVML_NOT_AVAILABLE = auto()
"""PyNVML not installed"""
DISABLED_CONFIG = auto()
"""PyNVML diagnostics disabled by ``distributed.diagnostics.nvml`` config setting"""
DISABLED_LIBRARY_NOT_FOUND = auto()
"""PyNVML available, but NVML not installed"""
DISABLED_WSL_INSUFFICIENT_DRIVER = auto()
"""PyNVML and NVML available, but on WSL and the driver version is insufficient"""
| NVMLState |
python | keras-team__keras | keras/src/ops/numpy.py | {
"start": 65412,
"end": 65938
} | class ____(Operation):
def call(self, x):
return backend.numpy.copy(x)
def compute_output_spec(self, x):
sparse = getattr(x, "sparse", False)
return KerasTensor(x.shape, dtype=x.dtype, sparse=sparse)
@keras_export(["keras.ops.copy", "keras.ops.numpy.copy"])
def copy(x):
"""Returns a copy of `x`.
Args:
x: Input tensor.
Returns:
A copy of `x`.
"""
if any_symbolic_tensors((x,)):
return Copy().symbolic_call(x)
return backend.numpy.copy(x)
| Copy |
python | encode__django-rest-framework | rest_framework/exceptions.py | {
"start": 1886,
"end": 2752
} | class ____(str):
"""
A string-like object that can additionally have a code.
"""
code = None
def __new__(cls, string, code=None):
self = super().__new__(cls, string)
self.code = code
return self
def __eq__(self, other):
result = super().__eq__(other)
if result is NotImplemented:
return NotImplemented
try:
return result and self.code == other.code
except AttributeError:
return result
def __ne__(self, other):
result = self.__eq__(other)
if result is NotImplemented:
return NotImplemented
return not result
def __repr__(self):
return 'ErrorDetail(string=%r, code=%r)' % (
str(self),
self.code,
)
def __hash__(self):
return hash(str(self))
| ErrorDetail |
python | kamyu104__LeetCode-Solutions | Python/numbers-with-same-consecutive-differences.py | {
"start": 33,
"end": 385
} | class ____(object):
def numsSameConsecDiff(self, N, K):
"""
:type N: int
:type K: int
:rtype: List[int]
"""
curr = range(10)
for i in xrange(N-1):
curr = [x*10 + y for x in curr for y in set([x%10 + K, x%10 - K])
if x and 0 <= y < 10]
return curr
| Solution |
python | great-expectations__great_expectations | contrib/great_expectations_zipcode_expectations/great_expectations_zipcode_expectations/expectations/expect_column_values_to_be_valid_vermont_zip.py | {
"start": 1743,
"end": 4078
} | class ____(ColumnMapExpectation):
"""Expect values in this column to be valid Vermont zipcodes.
See https://pypi.org/project/zipcodes/ for more information.
"""
# These examples will be shown in the public gallery.
# They will also be executed as unit tests for your Expectation.
examples = [
{
"data": {
"valid_vermont_zip": ["05001", "05362", "05757", "05907"],
"invalid_vermont_zip": ["-10000", "1234", "99999", "25487"],
},
"tests": [
{
"title": "basic_positive_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "valid_vermont_zip"},
"out": {"success": True},
},
{
"title": "basic_negative_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "invalid_vermont_zip"},
"out": {"success": False},
},
],
}
]
# This is the id string of the Metric used by this Expectation.
# For most Expectations, it will be the same as the `condition_metric_name` defined in your Metric class above.
map_metric = "column_values.valid_vermont_zip"
# This is a list of parameter names that can affect whether the Expectation evaluates to True or False
success_keys = ("mostly",)
# This dictionary contains default values for any parameters that should have default values
default_kwarg_values = {}
# This object contains metadata for display in the public Gallery
library_metadata = {
"maturity": "experimental", # "experimental", "beta", or "production"
"tags": [
"hackathon",
"typed-entities",
], # Tags for this Expectation in the Gallery
"contributors": [ # Github handles for all contributors to this Expectation.
"@luismdiaz01",
"@derekma73", # Don't forget to add your github handle here!
],
"requirements": ["zipcodes"],
}
if __name__ == "__main__":
ExpectColumnValuesToBeValidVermontZip().print_diagnostic_checklist()
| ExpectColumnValuesToBeValidVermontZip |
python | qdrant__qdrant-client | qdrant_client/embed/utils.py | {
"start": 115,
"end": 2522
} | class ____(BaseModel):
current: str
tail: Optional[list["FieldPath"]] = Field(default=None)
def as_str_list(self) -> list[str]:
"""
>>> FieldPath(current='a', tail=[FieldPath(current='b', tail=[FieldPath(current='c'), FieldPath(current='d')])]).as_str_list()
['a.b.c', 'a.b.d']
"""
# Recursive function to collect all paths
def collect_paths(path: FieldPath, prefix: str = "") -> list[str]:
current_path = prefix + path.current
if not path.tail:
return [current_path]
else:
paths = []
for sub_path in path.tail:
paths.extend(collect_paths(sub_path, current_path + "."))
return paths
# Collect all paths starting from this object
return collect_paths(self)
def convert_paths(paths: list[str]) -> list[FieldPath]:
"""Convert string paths into FieldPath objects
Paths which share the same root are grouped together.
Args:
paths: List[str]: List of str paths containing "." as separator
Returns:
List[FieldPath]: List of FieldPath objects
"""
sorted_paths = sorted(paths)
prev_root = None
converted_paths = []
for path in sorted_paths:
parts = path.split(".")
root = parts[0]
if root != prev_root:
converted_paths.append(FieldPath(current=root))
prev_root = root
current = converted_paths[-1]
for part in parts[1:]:
if current.tail is None:
current.tail = []
found = False
for tail in current.tail:
if tail.current == part:
current = tail
found = True
break
if not found:
new_tail = FieldPath(current=part)
assert current.tail is not None
current.tail.append(new_tail)
current = new_tail
return converted_paths
def read_base64(file_path: Union[str, Path]) -> str:
"""Convert a file path to a base64 encoded string."""
path = Path(file_path)
if not path.exists():
raise FileNotFoundError(f"The file {path} does not exist.")
with open(path, "rb") as file:
file_content = file.read()
return base64.b64encode(file_content).decode("utf-8")
| FieldPath |
python | neetcode-gh__leetcode | python/0322-coin-change.py | {
"start": 0,
"end": 354
} | class ____:
def coinChange(self, coins: List[int], amount: int) -> int:
dp = [amount + 1] * (amount + 1)
dp[0] = 0
for a in range(1, amount + 1):
for c in coins:
if a - c >= 0:
dp[a] = min(dp[a], 1 + dp[a - c])
return dp[amount] if dp[amount] != amount + 1 else -1
| Solution |
python | scikit-learn__scikit-learn | sklearn/tests/metadata_routing_common.py | {
"start": 5240,
"end": 6743
} | class ____(RegressorMixin, BaseEstimator):
"""A regressor consuming metadata.
Parameters
----------
registry : list, default=None
If a list, the estimator will append itself to the list in order to have
a reference to the estimator later on. Since that reference is not
required in all tests, registration can be skipped by leaving this value
as None.
"""
def __init__(self, registry=None):
self.registry = registry
def partial_fit(self, X, y, sample_weight="default", metadata="default"):
if self.registry is not None:
self.registry.append(self)
record_metadata_not_default(
self, sample_weight=sample_weight, metadata=metadata
)
return self
def fit(self, X, y, sample_weight="default", metadata="default"):
if self.registry is not None:
self.registry.append(self)
record_metadata_not_default(
self, sample_weight=sample_weight, metadata=metadata
)
return self
def predict(self, X, y=None, sample_weight="default", metadata="default"):
record_metadata_not_default(
self, sample_weight=sample_weight, metadata=metadata
)
return np.zeros(shape=(len(X),))
def score(self, X, y, sample_weight="default", metadata="default"):
record_metadata_not_default(
self, sample_weight=sample_weight, metadata=metadata
)
return 1
| ConsumingRegressor |
python | django__django | tests/fixtures/models.py | {
"start": 2665,
"end": 2995
} | class ____(models.Model):
person = models.ForeignKey(Person, models.CASCADE)
permissions = models.ManyToManyField(Permission, blank=True)
objects = VisaManager()
def __str__(self):
return "%s %s" % (
self.person.name,
", ".join(p.name for p in self.permissions.all()),
)
| Visa |
python | facebook__pyre-check | tools/upgrade/errors.py | {
"start": 13989,
"end": 17272
} | class ____(Exception):
pass
def _str_to_int(digits: str) -> Optional[int]:
try:
return int(digits)
except ValueError:
return None
def _get_unused_ignore_codes(errors: List[Dict[str, str]]) -> List[int]:
unused_ignore_codes: List[int] = []
ignore_errors = [error for error in errors if error["code"] == "0"]
for error in ignore_errors:
match = re.search(
r"The `pyre-ignore\[(.*?)\]` or `pyre-fixme\[.*?\]`", error["description"]
)
if match:
unused_ignore_codes.extend(
int_code
for int_code in (
_str_to_int(code.strip()) for code in match.group(1).split(",")
)
if int_code is not None
)
unused_ignore_codes.sort()
return unused_ignore_codes
def _remove_unused_ignores(line: str, errors: List[Dict[str, str]]) -> str:
unused_ignore_codes = _get_unused_ignore_codes(errors)
match = re.search(r"pyre-(ignore|fixme) *\[([0-9, ]+)\]", line)
stripped_line = re.sub(r"# *pyre-(ignore|fixme).*$", "", line).rstrip()
if not match:
return stripped_line
# One or more codes are specified in the ignore comment.
# Remove only the codes that are erroring as unused.
ignore_codes_string = match.group(2)
ignore_codes = [
int(code.strip()) for code in ignore_codes_string.split(",") if code != ""
]
remaining_ignore_codes = set(ignore_codes) - set(unused_ignore_codes)
if len(remaining_ignore_codes) == 0 or len(unused_ignore_codes) == 0:
return stripped_line
else:
return line.replace(
ignore_codes_string,
", ".join([str(code) for code in remaining_ignore_codes]),
)
def _line_ranges_spanned_by_format_strings(
source: str,
) -> Dict[libcst.CSTNode, LineRange]:
def _code_range_to_line_range(
code_range: libcst._position.CodeRange,
) -> LineRange:
return code_range.start.line, code_range.end.line
try:
wrapper = libcst.metadata.MetadataWrapper(libcst.parse_module(source))
except libcst._exceptions.ParserSyntaxError as exception:
# NOTE: This should not happen. If a file is unparseable for libcst, it
# would probably have been unparseable for Pyre as well. In that case,
# we would not have raised a 404 parse error and not reached here in the
# first place. Still, catch the exception and just skip the special
# handling of format strings.
LOG.warning(
"Not moving out fixmes from f-strings because"
f" libcst failed to parse the file: {exception}"
)
return {}
position_map = wrapper.resolve(libcst.metadata.PositionProvider)
return {
format_string: _code_range_to_line_range(position_map[format_string])
for format_string in libcst_matchers.findall(
wrapper.module, libcst_matchers.FormattedString()
)
}
def _map_line_to_start_of_range(line_ranges: List[LineRange]) -> Dict[int, int]:
target_line_map = {}
for start, end in reversed(line_ranges):
for line in range(start, end + 1):
target_line_map[line] = start
return target_line_map
| LineBreakParsingException |
python | getsentry__sentry | src/sentry/api/endpoints/custom_rules.py | {
"start": 1225,
"end": 1454
} | class ____(Exception):
def __init__(self, error_code: UnsupportedSearchQueryReason, *args, **kwargs):
super().__init__(error_code.value, *args, **kwargs)
self.error_code = error_code.value
| UnsupportedSearchQuery |
python | pytorch__pytorch | torch/autograd/gradcheck.py | {
"start": 884,
"end": 91997
} | class ____(RuntimeError):
r"""Error raised by :func:`gradcheck` and :func:`gradgradcheck`."""
def _is_sparse_compressed_tensor(obj: torch.Tensor):
return obj.layout in {
torch.sparse_csr,
torch.sparse_csc,
torch.sparse_bsr,
torch.sparse_bsc,
}
def _is_sparse_any_tensor(obj: torch.Tensor):
return _is_sparse_compressed_tensor(obj) or obj.layout is torch.sparse_coo
def _is_float_or_complex_tensor(obj):
return is_tensor_like(obj) and (obj.is_floating_point() or obj.is_complex())
def _allocate_jacobians_with_inputs(
input_tensors: tuple, numel_output
) -> tuple[torch.Tensor, ...]:
# Makes zero-filled tensors from inputs. If `numel_output` is not None, for
# each tensor in `input_tensors`, returns a new zero-filled tensor with height
# of `t.numel` and width of `numel_output`. Otherwise, for each tensor, returns
# a 1-d tensor with size `(t.numel,)`. Each new tensor will be strided and have
# the same dtype and device as those of the corresponding input.
out: list[torch.Tensor] = [
t.new_zeros((t.numel(), numel_output), layout=torch.strided)
for t in input_tensors
if _is_float_or_complex_tensor(t) and t.requires_grad
]
return tuple(out)
def _allocate_jacobians_with_outputs(
output_tensors: tuple, numel_input, dtype=None, device=None
) -> tuple[torch.Tensor, ...]:
# Makes zero-filled tensors from outputs. If `dim` is not None, for each tensor
# in `output_tensors`, returns a new zero-filled tensor with height of `dim` and
# width of `t.numel`. Otherwise, for each tensor, returns a 1-d tensor with size
# (t.numel,).
options = {"dtype": dtype, "device": device, "layout": torch.strided}
out: list[torch.Tensor] = [
t.new_zeros((numel_input, t.numel()), **options)
for t in output_tensors
if _is_float_or_complex_tensor(t)
]
return tuple(out)
def _iter_tensors(
x: Union[torch.Tensor, Iterable[torch.Tensor]], only_requiring_grad: bool = False
) -> Iterable[torch.Tensor]:
if is_tensor_like(x):
# mypy doesn't narrow type of `x` to torch.Tensor
if x.requires_grad or not only_requiring_grad: # type: ignore[union-attr]
yield x # type: ignore[misc]
elif isinstance(x, collections.abc.Iterable) and not isinstance(x, str):
for elem in x:
yield from _iter_tensors(elem, only_requiring_grad)
def _densify(x):
# return a copy of sparse x with all unspecified elements
# "replaced" with zero-valued elements
if isinstance(x, (list, tuple)):
return type(x)(map(_densify, x))
elif not is_tensor_like(x) or x.layout in {torch.strided, torch._mkldnn}: # type: ignore[attr-defined] # no attr _mkldnn
return x
elif x.layout is torch.sparse_coo:
device = x.device
indices_dtype = x._indices().dtype
tmp = torch.ones(x.shape[: x.sparse_dim()], dtype=torch.int8, device=device)
indices = tmp.nonzero().t().to(dtype=indices_dtype)
values = torch.zeros(
(tmp.numel(), *x.shape[x.sparse_dim() :]), dtype=x.dtype, device=device
)
x_coalesced = x.detach().coalesce()
if x_coalesced.numel() > 0:
stride = tmp.stride()
flat_indices = (
x_coalesced.indices()
.mul(
torch.tensor(stride, dtype=indices_dtype, device=device).unsqueeze(
1
)
)
.sum(0)
)
values[flat_indices] = x_coalesced.values()
return (
torch.sparse_coo_tensor(indices, values, x.shape)
._coalesced_(True)
.requires_grad_(x.requires_grad)
)
elif _is_sparse_compressed_tensor(x):
blocksize = (
x.values().shape[1:3]
if x.layout in {torch.sparse_bsr, torch.sparse_bsc}
else None
)
compressed_indices = (
x.crow_indices()
if x.layout in {torch.sparse_csr, torch.sparse_bsr}
else x.ccol_indices()
)
# We'll use intermediate sparse COO for simplicity
r = _densify(x.detach().to_sparse(layout=torch.sparse_coo)).to_sparse(
layout=x.layout, blocksize=blocksize
)
# Check that all elements are specified also after `to_sparse` op:
dense_numel = r.values().numel() // max(1, r.values().shape[0])
batch_numel = compressed_indices.numel() // compressed_indices.shape[-1]
sparse_numel = r.numel() // max(1, dense_numel * batch_numel)
if sparse_numel != r._nnz():
raise AssertionError(
f"{x.layout} densify failed: expected nnz={sparse_numel} but got {r._nnz()}"
)
return r.requires_grad_(x.requires_grad)
elif _is_sparse_any_tensor(x):
raise NotImplementedError(x.layout)
return x
def _iter_tensor(x_tensor):
# (Only used for slow gradcheck) Returns a generator that yields the following
# elements at each iteration:
# 1) a tensor: the same tensor is returned across all iterations. The tensor
# is not the same as the original x_tensor as given as input - it is
# prepared so that it can be modified in-place. Depending on whether the
# input tensor is strided, sparse, or dense, the returned tensor may or may
# not share storage with x_tensor.
# 2) a tuple of indices that can be used with advanced indexing (yielded in
# dictionary order)
# 3) flattened index that will be used to index into the Jacobian tensor
#
# For a tensor t with size (2, 2), _iter_tensor yields:
# `x, (0, 0), 0`, `x, (0, 1), 1`, `x, (1, 0), 2`, `x, (1, 1), 3`
#
# where x is the t.data of the original tensor. Perturbing the entry of x
# at index (1, 1) yields the 3rd column of the overall Jacobian matrix.
if _is_sparse_any_tensor(x_tensor):
def get_stride(size):
dim = len(size)
tmp = 1
stride = [0] * dim
for i in reversed(range(dim)):
stride[i] = tmp
tmp *= size[i]
return stride
x_nnz = x_tensor._nnz()
x_size = list(x_tensor.size())
if x_tensor.layout is torch.sparse_coo:
x_indices = x_tensor._indices().t()
x_values = x_tensor._values()
elif x_tensor.layout is torch.sparse_csr:
x_indices = torch._convert_indices_from_csr_to_coo(
x_tensor.crow_indices(), x_tensor.col_indices()
).t()
x_values = x_tensor.values()
elif x_tensor.layout is torch.sparse_csc:
x_indices = torch._convert_indices_from_csr_to_coo(
x_tensor.ccol_indices(), x_tensor.row_indices(), transpose=True
).t()
x_values = x_tensor.values()
elif x_tensor.layout is torch.sparse_bsr:
x_block_values = x_tensor.values()
x_blocksize = x_block_values.size()[1:3]
x_indices = (
torch._convert_indices_from_csr_to_coo(
x_tensor.crow_indices(), x_tensor.col_indices()
)
.repeat_interleave(x_blocksize[0] * x_blocksize[1], 1)
.mul_(torch.tensor(x_blocksize, device=x_tensor.device).reshape(2, 1))
.add_(
torch.stack(
torch.where(torch.ones(x_blocksize, device=x_tensor.device))
).repeat(1, x_nnz)
)
.t()
)
x_values = x_block_values.flatten(0, 2)
x_nnz = x_values.size(0)
elif x_tensor.layout is torch.sparse_bsc:
x_block_values = x_tensor.values()
x_blocksize = x_block_values.size()[1:3]
x_indices = (
torch._convert_indices_from_csr_to_coo(
x_tensor.ccol_indices(), x_tensor.row_indices(), transpose=True
)
.repeat_interleave(x_blocksize[0] * x_blocksize[1], 1)
.mul_(torch.tensor(x_blocksize, device=x_tensor.device).reshape(2, 1))
.add_(
torch.stack(
torch.where(torch.ones(x_blocksize, device=x_tensor.device))
).repeat(1, x_nnz)
)
.t()
)
x_values = x_block_values.flatten(0, 2)
x_nnz = x_values.size(0)
else:
raise NotImplementedError(f"_iter_tensor for {x_tensor.layout} input")
x_stride = get_stride(x_size)
# Use .data here to get around the version check
x_values = x_values.data
for i in range(x_nnz):
x_value = x_values[i]
for x_idx in product(*[range(m) for m in x_values.size()[1:]]):
indices = x_indices[i].tolist() + list(x_idx)
d_idx = sum(indices[k] * x_stride[k] for k in range(len(x_size)))
yield x_value, x_idx, d_idx
elif x_tensor.layout == torch._mkldnn: # type: ignore[attr-defined]
for d_idx, x_idx in enumerate(product(*[range(m) for m in x_tensor.size()])):
# this is really inefficient, but without indexing implemented, there's
# not really a better way than converting back and forth
x_tensor_dense = x_tensor.to_dense()
yield x_tensor_dense, x_idx, d_idx
else:
# Use .data here to get around the version check
x_tensor = x_tensor.data
for d_idx, x_idx in enumerate(product(*[range(m) for m in x_tensor.size()])):
yield x_tensor, x_idx, d_idx
def _get_numerical_jacobian(
fn, inputs, outputs=None, target=None, eps=1e-3, is_forward_ad=False
) -> list[tuple[torch.Tensor, ...]]:
"""Compute the numerical Jacobian of `fn(inputs)` with respect to `target`.
If not specified, targets are the input. Returns M * N Jacobians where N is the
number of tensors in target that require grad and M is the number of non-integral
outputs.
Args:
fn: the function to compute the jacobian for
inputs: inputs to `fn`
outputs: provide precomputed outputs to avoid one extra invocation of fn
target: the Tensors wrt whom Jacobians are calculated (default=`inputs`)
eps: the magnitude of the perturbation during finite differencing
(default=`1e-3`)
is_forward_ad: if this numerical jacobian is computed to be checked wrt
forward AD gradients (this is used for error checking only)
Returns:
A list of M N-tuples of tensors
Note that `target` may not even be part of `input` to `fn`, so please be
**very careful** in this to not clone `target`.
"""
jacobians: list[tuple[torch.Tensor, ...]] = []
if outputs is None:
outputs = _as_tuple(fn(*_as_tuple(inputs)))
if not is_forward_ad and any(o.is_complex() for o in outputs):
raise ValueError(
"Expected output to be non-complex. get_numerical_jacobian no "
"longer supports functions that return complex outputs."
)
if target is None:
target = inputs
inp_indices = [
i for i, a in enumerate(target) if is_tensor_like(a) and a.requires_grad
]
for inp, inp_idx in zip(_iter_tensors(target, True), inp_indices):
jacobians += [
get_numerical_jacobian_wrt_specific_input(
fn,
inp_idx,
inputs,
outputs,
eps,
input=inp,
is_forward_ad=is_forward_ad,
)
]
return jacobians
@deprecated(
"`get_numerical_jacobian` was part of PyTorch's private API and not "
"meant to be exposed. We are deprecating it and it will be removed "
"in a future version of PyTorch. If you have a specific use for "
"this or feature request for this to be a stable API, please file "
"us an issue at https://github.com/pytorch/pytorch/issues/new",
category=FutureWarning,
)
def get_numerical_jacobian(fn, inputs, target=None, eps=1e-3, grad_out=1.0):
"""Compute the numerical Jacobian for a given fn and its inputs.
This is a Deprecated API.
Args:
fn: the function to compute the Jacobian for (must take inputs as a tuple)
inputs: input to `fn`
target: the Tensors wrt whom Jacobians are calculated (default=`input`)
eps: the magnitude of the perturbation during finite differencing
(default=`1e-3`)
grad_out: defaults to 1.0.
Returns:
A list of Jacobians of `fn` (restricted to its first output) with respect to
each input or target, if provided.
Note that `target` may not even be part of `input` to `fn`, so please be
**very careful** in this to not clone `target`.
"""
if (
grad_out != 1.0
): # grad_out param is only kept for backward compatibility reasons
raise ValueError(
"Expected grad_out to be 1.0. get_numerical_jacobian no longer "
"supports values of grad_out != 1.0."
)
def fn_pack_inps(*inps):
return fn(inps)
jacobians = _get_numerical_jacobian(fn_pack_inps, inputs, None, target, eps)
return tuple(jacobian_for_each_output[0] for jacobian_for_each_output in jacobians)
def _compute_numerical_gradient(fn, entry, v, norm_v, nbhd_checks_fn):
# Computes numerical directional derivative as finite difference
# of function `fn` at input `entry`, perturbed by vector `v`.
if _is_sparse_compressed_tensor(entry):
# sparse compressed tensors don't implement sub/add/copy_
# yet. However, in non-masked semantics context entry and v
# have the same sparse indices ...
if entry.layout != v.layout:
raise AssertionError(
f"Expected entry and v to have the same layout, but got {entry.layout} and {v.layout}"
)
if entry._nnz() != v._nnz():
raise AssertionError(
f"Expected entry and v to have the same nnz, but got {entry._nnz()} and {v._nnz()} "
f"with entry shape {entry.shape}"
)
# ... the finite differencing can be performed on values only:
entry = entry.values()
v = v.values()
# we'll detach to avoid backward computations that sparse
# tensors have limited support for.
entry = entry.detach()
orig = entry.clone()
entry.copy_(orig - v)
outa = fn()
entry.copy_(orig + v)
outb = fn()
entry.copy_(orig)
def compute(a, b):
nbhd_checks_fn(a, b)
ret = (b - a) / (2 * norm_v) # use central difference approx
return ret.detach().reshape(-1)
return tuple(compute(a, b) for (a, b) in zip(outa, outb))
def _compute_numerical_jvps_wrt_specific_input(
jvp_fn, delta, input_is_complex, is_forward_ad=False
) -> list[torch.Tensor]:
# Computing the jacobian only works for real delta
# For details on the algorithm used here, refer:
# Section 3.5.3 https://arxiv.org/pdf/1701.00392.pdf
# s = fn(z) where z = x for real valued input
# and z = x + yj for complex valued input
jvps: list[torch.Tensor] = []
ds_dx_tup = jvp_fn(delta[0] if isinstance(delta, tuple) else delta)
if input_is_complex: # C -> R
ds_dy_tup = (
jvp_fn(delta[1] * 1j) if isinstance(delta, tuple) else jvp_fn(delta * 1j)
)
for ds_dx, ds_dy in zip(ds_dx_tup, ds_dy_tup):
if ds_dx.is_complex():
raise AssertionError("Expected ds_dx to be real-valued, not complex")
# conjugate wirtinger derivative
conj_w_d = ds_dx + ds_dy * 1j
jvps.append(conj_w_d)
else:
for ds_dx in ds_dx_tup: # R -> R or (R -> C for the forward AD case)
if not is_forward_ad and ds_dx.is_complex():
raise AssertionError("Expected ds_dx to be real-valued, not complex.")
jvps.append(ds_dx)
return jvps
def _combine_jacobian_cols(
jacobians_cols: dict[int, list[torch.Tensor]], outputs, input, numel
) -> tuple[torch.Tensor, ...]:
# jacobian_cols maps column_idx -> output_idx -> single column of jacobian Tensor
# we return a list that maps output_idx -> full jacobian Tensor
jacobians = _allocate_jacobians_with_outputs(
outputs, numel, dtype=input.dtype if input.dtype.is_complex else None
)
for i, jacobian in enumerate(jacobians):
for k, v in jacobians_cols.items():
jacobian[k] = v[i]
return jacobians
def _prepare_input(
input: torch.Tensor, maybe_perturbed_input: Optional[torch.Tensor], fast_mode=False
) -> torch.Tensor:
# Prepares the inputs to be passed into the function while including the new
# modified input.
if input.layout == torch._mkldnn: # type: ignore[attr-defined] # no attr _mkldnn
# Convert back to mkldnn
if maybe_perturbed_input is not None:
return maybe_perturbed_input.to_mkldnn()
else:
return input
elif _is_sparse_any_tensor(input):
if fast_mode and maybe_perturbed_input is not None:
# entry is already a "cloned" version of the original tensor
# thus changes to entry are not reflected in the input
return maybe_perturbed_input
else:
return input
else:
# We cannot use entry (input.data) if we want gradgrad to work because
# fn (in the gradgrad case) needs to compute grad wrt input
return input
def _check_outputs_same_dtype_and_shape(output1, output2, eps, idx=None) -> None:
# Check that the returned outputs don't have different dtype or shape when you
# perturb the input
on_index = f"on index {idx} " if idx is not None else ""
if output1.shape != output2.shape:
raise AssertionError(
f"Expected `func` to return outputs with the same shape"
f" when inputs are perturbed {on_index}by {eps}, but got:"
f" shapes {output1.shape} and {output2.shape}."
)
if output1.dtype != output2.dtype:
raise AssertionError(
f"Expected `func` to return outputs with the same dtype"
f" when inputs are perturbed {on_index}by {eps}, but got:"
f" dtypes {output1.dtype} and {output2.dtype}."
)
def get_numerical_jacobian_wrt_specific_input(
fn, input_idx, inputs, outputs, eps, input=None, is_forward_ad=False
) -> tuple[torch.Tensor, ...]:
# Computes the numerical jacobians wrt to a single input. Returns N jacobian
# tensors, where N is the number of outputs. We use a dictionary for
# jacobian_cols because indices aren't necessarily consecutive for sparse inputs
# When we perturb only a single element of the input tensor at a time, the jvp
# is equivalent to a single col of the Jacobian matrix of fn.
jacobian_cols: dict[int, list[torch.Tensor]] = {}
input = inputs[input_idx] if input is None else input
if not input.requires_grad:
raise AssertionError("Expected input to have requires_grad=True")
for x, idx, d_idx in _iter_tensor(input):
wrapped_fn = _with_prepare_inputs(fn, inputs, input_idx, x)
input_to_perturb = x[idx]
nbhd_checks_fn = functools.partial(
_check_outputs_same_dtype_and_shape, idx=idx, eps=eps
)
jvp_fn = _get_numerical_jvp_fn(
wrapped_fn, input_to_perturb, eps, nbhd_checks_fn
)
jacobian_cols[d_idx] = _compute_numerical_jvps_wrt_specific_input(
jvp_fn, eps, x.is_complex(), is_forward_ad
)
return _combine_jacobian_cols(jacobian_cols, outputs, input, input.numel())
def _get_analytical_jacobian_forward_ad(
fn, inputs, outputs, *, check_grad_dtypes=False, all_u=None
) -> tuple[tuple[torch.Tensor, ...], ...]:
"""Compute the analytical Jacobian using forward mode AD of `fn(inputs)` using forward mode AD with respect to `target`.
Return N * M Jacobians where N is the number of tensors in target that require grad and
M is the number of non-integral outputs.
Contrary to other functions here, this function requires "inputs" to actually be used by the function.
The computed value is expected to be wrong if the function captures the inputs by side effect instead of
using the passed ones (many torch.nn tests do this).
Args:
fn: the function to compute the jacobian for
inputs: inputs to `fn`
outputs: provide precomputed outputs to avoid one extra invocation of fn
check_grad_dtypes: if True, will check that the gradient dtype are valid
all_u (optional): if provided, the Jacobian will be right multiplied with this vector
Returns:
A tuple of M N-tuples of tensors
"""
# To avoid early import issues
fwAD = torch.autograd.forward_ad
tensor_inputs = tuple(i for i in inputs if is_tensor_like(i) and i.requires_grad)
if any(i.is_complex() for i in tensor_inputs):
raise ValueError(
"Expected inputs to be non-complex for _get_analytical_jacobian_forward_ad."
)
if all_u:
jacobians = tuple(
_allocate_jacobians_with_outputs(outputs, 1) for i in tensor_inputs
)
else:
jacobians = tuple(
_allocate_jacobians_with_outputs(outputs, i.numel()) for i in tensor_inputs
)
with fwAD.dual_level():
fw_grads = []
dual_inputs = []
for inp in inputs:
if is_tensor_like(inp) and inp.requires_grad:
if inp.layout == torch._mkldnn: # type: ignore[attr-defined]
raise ValueError(
"MKLDNN inputs are not support for forward AD gradcheck."
)
inp = fwAD.make_dual(inp.detach(), torch.zeros_like(inp))
# If inp is a differentiable view, the dual might not be the tangent given to
# make_dual, so read it explicitly from the dual tensor
fw_grads.append(fwAD.unpack_dual(inp)[1])
dual_inputs.append(inp)
if all_u:
# Do the full reduction in one pass
# To be consistent with numerical evaluation, we actually compute one reduction per input
for i, (fw_grad, u) in enumerate(zip(fw_grads, all_u)):
fw_grad.copy_(u.view_as(fw_grad))
raw_outputs = _as_tuple(fn(*dual_inputs))
dual_outputs = filter(_is_float_or_complex_tensor, raw_outputs)
for index_o, d_o in enumerate(dual_outputs):
val, res = fwAD.unpack_dual(d_o)
if (
check_grad_dtypes
and res is not None
and val.is_complex() != res.is_complex()
):
raise GradcheckError("Forward AD gradient has dtype mismatch.")
# Remove extra dimension of size 1 corresponding to the reduced input
jacobians[i][index_o].squeeze_(0)
if res is None:
jacobians[i][index_o].zero_()
else:
jacobians[i][index_o].copy_(res.reshape(-1))
fw_grad.zero_()
else:
# Reconstruct the full Jacobian column by column
for i, fw_grad in enumerate(fw_grads):
for lin_idx, grad_idx in enumerate(
product(*[range(m) for m in fw_grad.size()])
):
fw_grad[grad_idx] = 1.0
raw_outputs = _as_tuple(fn(*dual_inputs))
dual_outputs = filter(_is_float_or_complex_tensor, raw_outputs)
for index_o, d_o in enumerate(dual_outputs):
val, res = fwAD.unpack_dual(d_o)
if (
check_grad_dtypes
and res is not None
and val.is_complex() != res.is_complex()
):
raise GradcheckError(
"Forward AD gradient has dtype mismatch."
)
if res is None:
jacobians[i][index_o][lin_idx].zero_()
else:
jacobians[i][index_o][lin_idx].copy_(res.reshape(-1))
fw_grad[grad_idx] = 0.0
return jacobians
def _get_input_to_perturb(input):
# Prepare the input so that it can be modified in-place and do certain
# operations that require the tensor to have strides. If fast_mode=False,
# _iter_tensor would handle the below cases:
if input.layout == torch._mkldnn: # type: ignore[attr-defined] # no attr _mkldnn
# Convert to dense so we can perform operations that require strided tensors
input_to_perturb = input.to_dense()
elif _is_sparse_any_tensor(input):
# Clone because input may require grad, and copy_ calls resize_,
# which is not allowed for .data
input_to_perturb = input.clone()
else:
input_to_perturb = input.data
return input_to_perturb
def _with_prepare_inputs(fn, inputs, input_idx, input_to_perturb, fast_mode=False):
# Wraps `fn` so that its inputs are already supplied
def wrapped_fn():
inp = tuple(
_prepare_input(a, input_to_perturb if i == input_idx else None, fast_mode)
if is_tensor_like(a)
else a
for i, a in enumerate(_as_tuple(inputs))
)
return tuple(a.clone() for a in _as_tuple(fn(*inp)))
return wrapped_fn
def _get_numerical_jvp_fn(wrapped_fn, input_to_perturb, eps, nbhd_checks_fn):
# Wraps jvp_fn so that certain arguments are already supplied
def jvp_fn(delta):
return _compute_numerical_gradient(
wrapped_fn, input_to_perturb, delta, eps, nbhd_checks_fn
)
return jvp_fn
def _reshape_tensor_or_tuple(u, shape):
# We don't need to reshape when input corresponding to u is sparse
if isinstance(u, tuple):
if not _is_sparse_any_tensor(u[0]):
return (u[0].reshape(shape), u[1].reshape(shape))
else:
if not _is_sparse_any_tensor(u):
return u.reshape(shape)
return u
def _mul_tensor_or_tuple(u, k):
if isinstance(u, tuple):
return (k * u[0], k * u[1])
else:
return k * u
def _get_numerical_jvp_wrt_specific_input(
fn, input_idx, inputs, u, eps, is_forward_ad=False
) -> list[torch.Tensor]:
input = inputs[input_idx]
input_to_perturb = _get_input_to_perturb(input)
wrapped_fn = _with_prepare_inputs(fn, inputs, input_idx, input_to_perturb, True)
nbhd_checks_fn = functools.partial(_check_outputs_same_dtype_and_shape, eps=eps)
jvp_fn = _get_numerical_jvp_fn(wrapped_fn, input_to_perturb, eps, nbhd_checks_fn)
u = _reshape_tensor_or_tuple(u, input_to_perturb.shape)
u = _mul_tensor_or_tuple(u, eps)
return _compute_numerical_jvps_wrt_specific_input(
jvp_fn, u, input.is_complex(), is_forward_ad
)
def _get_numerical_vJu(
fn, inputs, inp_indices, func_out, all_u, all_v, eps, is_forward_ad
):
# Note that all_v can also be None, in that case, this function only computes Ju.
reduced_jacobians: list[list[torch.Tensor]] = []
for inp_idx, u in zip(inp_indices, all_u):
all_Ju = _get_numerical_jvp_wrt_specific_input(
fn, inp_idx, inputs, u, eps, is_forward_ad
)
# Filter out the Ju for non floating point outputs
filtered_Ju = []
func_out = _as_tuple(func_out)
if len(all_Ju) != len(func_out):
raise AssertionError(
f"Expected all_Ju and func_out to have the same length, "
f"but got {len(all_Ju)} and {len(func_out)}"
)
for Ju, output in zip(all_Ju, func_out):
if _is_float_or_complex_tensor(output):
filtered_Ju.append(Ju)
else:
# TODO: handle the other Ju
pass
if all_v is not None:
jacobian_scalars: list[torch.Tensor] = []
for v, Ju in zip(all_v, filtered_Ju):
jacobian_scalars.append(_dot_with_type_promotion(v, Ju))
reduced_jacobians.append(jacobian_scalars)
else:
reduced_jacobians.append(filtered_Ju)
return reduced_jacobians
def _check_jacobians_equal(j1, j2, atol):
# Check whether the max difference between two Jacobian tensors are within some
# tolerance `atol`.
for j1_x, j2_x in zip(j1, j2):
if j1_x.numel() != 0 and (j1_x - j2_x).abs().max() > atol:
return False
return True
def _stack_and_check_tensors(
list_of_list_of_tensors, inputs, numel_outputs
) -> tuple[tuple[torch.Tensor, ...], bool, bool]:
# For the ith tensor in the inner list checks whether it has the same size and
# dtype as the ith differentiable input.
out_jacobians = _allocate_jacobians_with_inputs(inputs, numel_outputs)
diff_input_list = list(_iter_tensors(inputs, True))
correct_grad_sizes = True
correct_grad_types = True
for i, tensor_list in enumerate(list_of_list_of_tensors):
inp = diff_input_list[i]
out_jacobian = out_jacobians[i]
for j, tensor in enumerate(tensor_list):
if tensor is not None and tensor.size() != inp.size():
correct_grad_sizes = False
elif tensor is not None and tensor.dtype != inp.dtype:
correct_grad_types = False
if tensor is None:
out_jacobian[:, j].zero_()
else:
dense = tensor.to_dense() if tensor.layout != torch.strided else tensor
if out_jacobian[:, j].numel() != dense.numel():
raise AssertionError(
f"Expected out_jacobian column to have {dense.numel()} elements, "
f"but got {out_jacobian[:, j].numel()}"
)
out_jacobian[:, j] = dense.reshape(-1)
return out_jacobians, correct_grad_sizes, correct_grad_types
FAILED_NONDET_MSG = """\n
NOTE: If your op relies on non-deterministic operations i.e., it is listed here:
https://pytorch.org/docs/stable/generated/torch.use_deterministic_algorithms.html
this failure might be expected.
If you are adding a new operator, please file an issue and then use one of the
workarounds. The workaround depends on how your test invokes gradcheck/gradgradcheck.
If the test
- manually invokes gradcheck/gradgradcheck, then call gradcheck/gradgradcheck
with `nondet_tol=<tol>` as a keyword argument.
- is OpInfo-based (e.g., in test_ops_gradients.py), then modify the OpInfo for the test
to have `gradcheck_nondet_tol=<tol>`.
- is a Module test (e.g., in common_nn.py), then modify the corresponding
module_test entry to have `gradcheck_nondet_tol=<tol>`
"""
def _check_analytical_jacobian_attributes(
inputs, output, nondet_tol, check_grad_dtypes, fast_mode=False, v=None
) -> tuple[torch.Tensor, ...]:
# This is used by both fast and slow mode:
# - For slow mode, vjps[i][j] is the jth row of the Jacobian wrt the ith
# input.
# - For fast mode, vjps[i][0] is a linear combination of the rows
# of the Jacobian wrt the ith input
diff_input_list = list(_iter_tensors(inputs, True))
def vjp_fn(grad_output):
return torch.autograd.grad(
output, diff_input_list, grad_output, retain_graph=True, allow_unused=True
)
# Compute everything twice to check for nondeterminism (which we call reentrancy)
if fast_mode:
vjps1 = _get_analytical_vjps_wrt_specific_output(vjp_fn, output.clone(), v)
vjps2 = _get_analytical_vjps_wrt_specific_output(vjp_fn, output.clone(), v)
else:
vjps1 = _compute_analytical_jacobian_rows(vjp_fn, output.clone())
vjps2 = _compute_analytical_jacobian_rows(vjp_fn, output.clone())
output_numel = output.numel() if not fast_mode else 1
jacobians1, types_ok, sizes_ok = _stack_and_check_tensors(
vjps1, inputs, output_numel
)
jacobians2, _, _ = _stack_and_check_tensors(vjps2, inputs, output_numel)
reentrant = _check_jacobians_equal(jacobians1, jacobians2, nondet_tol)
if not types_ok and check_grad_dtypes:
raise GradcheckError("Gradient has dtype mismatch")
if not sizes_ok:
raise GradcheckError("Analytical gradient has incorrect size")
if not reentrant:
raise GradcheckError(
"Backward is not reentrant, i.e., running backward with "
"same input and grad_output multiple times gives different values, "
"although analytical gradient matches numerical gradient."
f"The tolerance for nondeterminism was {nondet_tol}." + FAILED_NONDET_MSG
)
return jacobians1
def _get_analytical_vJu_backward_mode(
inputs, outputs, nondet_tol, check_grad_dtypes, all_v, all_u
):
reduced_jacobians: list[list[torch.Tensor]] = []
for output, v in zip(outputs, all_v):
all_vJ = _check_analytical_jacobian_attributes(
inputs, output, nondet_tol, check_grad_dtypes, fast_mode=True, v=v
)
jacobian_scalars: list[torch.Tensor] = []
for vJ, u in zip(all_vJ, all_u):
# Why do we need squeeze here? vJ is a 2-d tensor so that we can reuse
# the error checking logic from slow mode
vJ = vJ.T.squeeze(0)
if vJ.is_complex(): # C -> R
tv = torch.view_as_real(vJ.resolve_conj())
tr = tv.select(-1, 0)
ti = tv.select(-1, 1)
jacobian_scalars.append(tr.dot(u[0]) + 1j * ti.dot(u[1]))
else: # R -> R
jacobian_scalars.append(vJ.dot(u))
reduced_jacobians.append(jacobian_scalars)
return reduced_jacobians
@deprecated(
"`get_analytical_jacobian` was part of PyTorch's private API and not "
"meant to be exposed. We are deprecating it and it will be removed "
"in a future version of PyTorch. If you have a specific use for "
"this or feature request for this to be a stable API, please file "
"us an issue at https://github.com/pytorch/pytorch/issues/new",
category=FutureWarning,
)
def get_analytical_jacobian(inputs, output, nondet_tol=0.0, grad_out=1.0):
# Replicates the behavior of the old get_analytical_jacobian before the refactor
# This shares much of its code with _check_analytical_jacobian_attributes
if (
grad_out != 1.0
): # grad_out param is only kept for backward compatibility reasons
raise ValueError(
"Expected grad_out to be 1.0. get_analytical_jacobian no longer "
"supports values of grad_out != 1.0."
)
if output.is_complex():
raise ValueError(
"Expected output to be non-complex. get_analytical_jacobian no "
"longer supports functions that return complex outputs."
)
diff_input_list = list(_iter_tensors(inputs, True))
def vjp_fn(grad_output):
return torch.autograd.grad(
output, diff_input_list, grad_output, retain_graph=True, allow_unused=True
)
# Compute everything twice to check for nondeterminism (which we call reentrancy)
vjps1 = _compute_analytical_jacobian_rows(vjp_fn, output.clone())
vjps2 = _compute_analytical_jacobian_rows(vjp_fn, output.clone())
output_numel = output.numel()
jacobians1, types_ok, sizes_ok = _stack_and_check_tensors(
vjps1, inputs, output_numel
)
jacobians2, _, _ = _stack_and_check_tensors(vjps2, inputs, output_numel)
reentrant = _check_jacobians_equal(jacobians1, jacobians2, nondet_tol)
return jacobians1, reentrant, sizes_ok, types_ok
def _get_analytical_jacobian(inputs, outputs, input_idx, output_idx):
# Computes the analytical Jacobian in slow mode for a single input-output pair.
# Forgoes performing checks on dtype, shape, and reentrancy.
jacobians = _check_analytical_jacobian_attributes(
inputs, outputs[output_idx], nondet_tol=float("inf"), check_grad_dtypes=False
)
return jacobians[input_idx]
def _compute_analytical_jacobian_rows(
vjp_fn, sample_output
) -> list[list[Optional[torch.Tensor]]]:
# Computes Jacobian row-by-row by projecting `vjp_fn` = v^T J on standard basis
# vectors: vjp_fn(e) = e^T J is a corresponding row of the Jacobian.
# NB: this function does not assume vjp_fn(v) to return tensors with the same
# number of elements for different v. This is checked when we later combine the
# rows into a single tensor.
grad_out_base = torch.zeros_like(
sample_output, memory_format=torch.legacy_contiguous_format
)
flat_grad_out = grad_out_base.view(-1)
# jacobians_rows[i][j] is the Jacobian jth row for the ith input
jacobians_rows: list[list[Optional[torch.Tensor]]] = []
for j in range(flat_grad_out.numel()):
flat_grad_out.zero_()
flat_grad_out[j] = 1.0 # projection for jth row of Jacobian
grad_inputs = vjp_fn(grad_out_base)
for i, d_x in enumerate(grad_inputs):
if j == 0:
jacobians_rows.append([])
jacobians_rows[i] += [
d_x.clone() if isinstance(d_x, torch.Tensor) else None
]
return jacobians_rows
def _get_analytical_vjps_wrt_specific_output(
vjp_fn, sample_output, v
) -> list[list[Optional[torch.Tensor]]]:
grad_inputs = vjp_fn(v.reshape(sample_output.shape))
vjps: list[list[Optional[torch.Tensor]]] = [
[vjp.clone() if isinstance(vjp, torch.Tensor) else None] for vjp in grad_inputs
]
return vjps
def _check_inputs(tupled_inputs) -> bool:
# Make sure that gradients are saved for at least one input
any_input_requiring_grad = False
for idx, inp in enumerate(tupled_inputs):
if is_tensor_like(inp) and inp.requires_grad:
if not (inp.dtype == torch.float64 or inp.dtype == torch.complex128):
warnings.warn(
f"Input #{idx} requires gradient and "
"is not a double precision floating point or complex. "
"This check will likely fail if all the inputs are "
"not of double precision floating point or complex. ",
stacklevel=2,
)
if inp.is_sparse:
content = inp._values()
elif _is_sparse_compressed_tensor(inp):
content = inp.values()
else:
content = inp
# TODO: To cover more problematic cases, replace stride = 0 check with
# "any overlap in memory" once we have a proper function to check it.
if content.layout is not torch._mkldnn: # type: ignore[attr-defined]
if not all(
st > 0 or sz <= 1
for st, sz in zip(content.stride(), content.size())
):
raise RuntimeError(
f"The {idx}th input has a dimension with stride 0. gradcheck only "
"supports inputs that are non-overlapping to be able to "
"compute the numerical gradients correctly. You should call "
".contiguous on the input before passing it to gradcheck."
)
any_input_requiring_grad = True
if not any_input_requiring_grad:
raise ValueError(
"gradcheck expects at least one input tensor to require gradient, "
"but none of the them have requires_grad=True."
)
return True
def _check_outputs(outputs) -> None:
if any(_is_sparse_any_tensor(t) for t in outputs if isinstance(t, torch.Tensor)):
# it is easier to call to_dense() on the sparse output than
# to modify analytical jacobian
raise ValueError(
"Sparse output is not supported at gradcheck yet. "
"Please call to_dense(masked_grad=...) on the output of fn for gradcheck."
)
if any(t.layout == torch._mkldnn for t in outputs if isinstance(t, torch.Tensor)): # type: ignore[attr-defined]
raise ValueError(
"MKLDNN output is not supported at gradcheck yet. "
"Please call to_dense(masked_grad=...) on the output of fn for gradcheck."
)
def _check_no_differentiable_outputs(
func, inputs, func_out, eps, *, is_forward_ad
) -> bool:
# When there are no differentiable outputs, numerical gradient for a function is
# expected to be zero.
jacobians_all_inputs_outputs = _get_numerical_jacobian(
func, inputs, func_out, eps=eps, is_forward_ad=is_forward_ad
)
for jacobians_all_outputs_and_fixed_input in jacobians_all_inputs_outputs:
for jacobian in jacobians_all_outputs_and_fixed_input:
if torch.ne(jacobian, 0).sum() > 0:
raise GradcheckError(
"Numerical gradient for function expected to be zero"
)
return True
def _check_no_differentiable_outputs_fast(
func, func_out, all_inputs, inputs_indices, all_u, eps, nondet_tol
):
for inp_idx, u in zip(inputs_indices, all_u):
jvps = _get_numerical_jvp_wrt_specific_input(func, inp_idx, all_inputs, u, eps)
for jvp in jvps:
if jvp.numel() == 0:
continue
if (jvp - torch.zeros_like(jvp)).abs().max() > nondet_tol:
raise GradcheckError(
"Numerical gradient for function expected to be zero"
)
return True
FAILED_BATCHED_GRAD_MSG = """
gradcheck or gradgradcheck failed while testing batched gradient computation.
This could have been invoked in a number of ways (via a test that calls
gradcheck/gradgradcheck directly or via an autogenerated test).
If you are adding a new operator, please file an issue and then use one of the
workarounds. The workaround depends on how your test invokes gradcheck/gradgradcheck.
If the test
- manually invokes gradcheck/gradgradcheck, then call gradcheck/gradgradcheck
with `check_batched_grad=False` as a keyword argument.
- is OpInfo-based (e.g., in test_ops_gradients.py), then modify the OpInfo for the test
to have `check_batched_grad=False` and/or `check_batched_gradgrad=False`.
If you're modifying an existing operator that supports batched grad computation,
or wish to make a new operator work with batched grad computation, please read
the following.
To compute batched grads (e.g., jacobians, hessians), we vmap over the backward
computation. The most common failure case is if there is a 'vmap-incompatible
operation' in the backward pass. Please see
NOTE: [How to write vmap-compatible backward formulas]
in the codebase for an explanation of how to fix this.
""".strip()
FAILED_BATCHED_GRAD_MSG_FWD_AD = """
gradcheck failed while testing batched gradient computation with forward-mode AD.
This test is enabled automatically when both `check_batched_grad=True`
and `check_forward_ad=True`, but can be disabled in the following ways
dependong on how the test was invoked (via a test that calls gradcheck
directly or via an autogenerated test).
If you are adding a new operator, please file an issue and then use one of the
workarounds. The workaround depends on how your test invokes gradcheck/gradgradcheck.
If the test
- manually invokes gradcheck/gradgradcheck, then call gradcheck/gradgradcheck
with `check_batched_forward_grad=False` as a keyword argument.
- is OpInfo-based (e.g., in test_ops_gradients.py), then modify the OpInfo for the test
to have `check_batched_forward_grad=False`
"""
def _get_failed_batched_grad_test_msg(
output_idx, input_idx, res, exp, is_forward_ad=False
):
return f"""
For output {output_idx} and input {input_idx}:
{FAILED_BATCHED_GRAD_MSG_FWD_AD if is_forward_ad else FAILED_BATCHED_GRAD_MSG}
Got:
{res}
Expected:
{exp}
""".strip()
def _test_batched_grad_forward_ad(func, inputs) -> bool:
fwAD = torch.autograd.forward_ad # To avoid early import issues (do we need this?)
if not isinstance(inputs, tuple):
raise AssertionError("Expected inputs to be a tuple")
for input_idx, current_input in enumerate(inputs):
if not (is_tensor_like(current_input) and current_input.requires_grad):
continue
def jvp(tangent: torch.Tensor):
with fwAD.dual_level():
dual = fwAD.make_dual(current_input.detach(), tangent)
inputs_with_dual = tuple(
dual
if idx == input_idx
else (inp.detach() if is_tensor_like(inp) else inp)
for idx, inp in enumerate(inputs)
)
dual_outputs = _as_tuple(func(*inputs_with_dual))
ret = []
for dual_output in dual_outputs:
if dual_output is None:
continue
primal_out, tangent_out = fwAD.unpack_dual(dual_output)
if tangent_out is not None:
ret.append(tangent_out)
else:
ret.append(
torch.zeros(
[], dtype=primal_out.dtype, device=primal_out.device
).expand(primal_out.shape)
)
return tuple(ret)
if not _is_float_or_complex_tensor(current_input):
continue
tangents = [torch.randn_like(current_input) for _ in range(2)]
expected = [jvp(t) for t in tangents]
expected = [torch.stack(shards) for shards in zip(*expected)]
try:
result = _vmap(jvp)(torch.stack(tangents))
except RuntimeError as ex:
# Rethrow to provide a better error message
raise GradcheckError(
f"While computing batched gradients, got: {ex}\n\n{FAILED_BATCHED_GRAD_MSG_FWD_AD}"
) from ex
for input_idx, (res, exp) in enumerate(zip(result, expected)):
if torch.allclose(res, exp):
continue
raise GradcheckError(
_get_failed_batched_grad_test_msg(
input_idx, input_idx, res, exp, is_forward_ad=True
)
)
return True
def _test_batched_grad(input, output, output_idx) -> bool:
# NB: _test_batched_grad compares two autograd.grad invocations with a single
# vmap(autograd.grad) invocation. It's not exactly a "gradcheck" in the
# sense that we're not comparing an analytical jacobian with a numeric one,
# but it is morally similar (we could have computed a full analytic jac
# via vmap, but that is potentially slow)
diff_input_list = list(_iter_tensors(input, True))
grad = functools.partial(
torch.autograd.grad,
output,
diff_input_list,
retain_graph=True,
allow_unused=True,
)
def vjp(v):
results = grad(v)
results = tuple(
grad
if grad is not None
else torch.zeros([], dtype=inp.dtype, device=inp.device).expand(inp.shape)
for grad, inp in zip(results, diff_input_list)
)
return results
grad_outputs = [torch.randn_like(output) for _ in range(2)]
expected = [vjp(gO) for gO in grad_outputs]
expected = [torch.stack(shards) for shards in zip(*expected)]
# Squash warnings since these are expected to happen in most cases
# NB: this doesn't work for CUDA tests: https://github.com/pytorch/pytorch/issues/50209
with warnings.catch_warnings():
warnings.filterwarnings("ignore", message="There is a performance drop")
warnings.filterwarnings("ignore", message="Please use `torch.vmap`")
try:
result = vmap(vjp)(torch.stack(grad_outputs))
except RuntimeError as ex:
# It's OK that we're not raising the error at the correct callsite.
# That's because the callsite is always going to inside the Python
# autograd.grad instead of the C++ traceback of what line in the
# backward formula
raise GradcheckError(
f"While computing batched gradients, got: {ex}\n\n{FAILED_BATCHED_GRAD_MSG}"
) from ex
for input_idx, (res, exp) in enumerate(zip(result, expected)):
if torch.allclose(res, exp):
continue
raise GradcheckError(
_get_failed_batched_grad_test_msg(output_idx, input_idx, res, exp)
)
return True
def _test_backward_mul_by_grad_output(outputs, inputs, masked) -> bool:
# Tests that backward is multiplied by grad_output
diff_input_list: list[torch.Tensor] = list(_iter_tensors(inputs, True))
if not diff_input_list:
raise GradcheckError("no Tensors requiring grad found in input")
grads_input = torch.autograd.grad(
outputs,
diff_input_list,
[
torch.zeros_like(o, memory_format=torch.legacy_contiguous_format)
for o in outputs
],
allow_unused=True,
)
for gi, di in zip(grads_input, diff_input_list):
if gi is None:
continue
if isinstance(gi, torch.Tensor) and gi.layout != torch.strided:
if gi.layout != di.layout:
raise GradcheckError(
"grad is incorrect layout ("
+ str(gi.layout)
+ " is not "
+ str(di.layout)
+ ")"
)
if _is_sparse_any_tensor(gi):
sparse_kind = str(gi.layout).replace("torch.", "").replace("_coo", "")
if gi.sparse_dim() != di.sparse_dim():
raise GradcheckError(
f"grad is {sparse_kind} tensor, but has incorrect sparse_dim"
f" {gi.sparse_dim()}, expected {di.sparse_dim()}"
)
if gi.dense_dim() != di.dense_dim():
raise GradcheckError(
f"grad is {sparse_kind} tensor, but has incorrect dense_dim"
f" {gi.dense_dim()}, expected {di.dense_dim()}"
)
gi = gi.to_dense()
di = di.to_dense()
if masked:
if not torch.allclose(gi, torch.zeros_like(gi)):
raise GradcheckError("backward not multiplied by grad_output")
elif not gi.eq(0).all():
raise GradcheckError("backward not multiplied by grad_output")
if gi.dtype != di.dtype:
raise GradcheckError("grad is incorrect type")
if gi.device != di.device:
raise GradcheckError("grad is incorrect device")
if gi.size() != di.size():
raise GradcheckError("grad is incorrect size")
return True
def _test_undefined_forward_mode(func, outputs, inputs):
fwAD = torch.autograd.forward_ad
_inp_tensors_idx, inp_tensors = _get_inp_tensors(inputs)
_all_v, all_u, _all_u_dense = _make_vectors(
inp_tensors, outputs, use_forward_ad=True
)
with fwAD.dual_level():
fw_grads = []
dual_inputs = []
tensor_indices = set()
for i, inp in enumerate(inputs):
if is_tensor_like(inp) and inp.requires_grad:
if inp.layout == torch._mkldnn: # type: ignore[attr-defined]
raise ValueError(
"MKLDNN inputs are not support for forward AD gradcheck."
)
inp = fwAD.make_dual(inp.detach(), torch.zeros_like(inp))
# If inp is a differentiable view, the dual might not be the tangent given to
# make_dual, so read it explicitly from the dual tensor
fw_grads.append(fwAD.unpack_dual(inp)[1])
tensor_indices.add(i)
dual_inputs.append(inp)
for fw_grad, u in zip(fw_grads, all_u):
fw_grad.copy_(u.view_as(fw_grad))
for idx, inp in enumerate(inputs):
if idx not in tensor_indices:
continue
dual_inp_obj = dual_inputs[idx]
# case 1 (Materialized Zero Tensor Tangent)
dual_inputs[idx] = fwAD.make_dual(inp.detach(), torch.zeros_like(inp))
raw_outputs = _as_tuple(func(*dual_inputs))
dual_outputs1 = filter(_is_float_or_complex_tensor, raw_outputs)
# case 2 (Efficient Zero Tensor Tangent since we don't make a dual object and pass a regular tensor)
dual_inputs[idx] = inp.detach()
raw_outputs = _as_tuple(func(*dual_inputs))
dual_outputs2 = filter(_is_float_or_complex_tensor, raw_outputs)
# reset
dual_inputs[idx] = dual_inp_obj
for index_o, (d_o1, d_o2) in enumerate(zip(dual_outputs1, dual_outputs2)):
_val1, res1 = fwAD.unpack_dual(d_o1)
_val2, res2 = fwAD.unpack_dual(d_o2)
if not (res1 is None or res2 is None):
if not torch.allclose(res1, res2):
raise GradcheckError(
"Mismatch in tangent values for output with index: ",
index_o,
" when input: ",
inp,
" has an undefined tangent value. ",
" Got: ",
res1,
" but expected: ",
res2,
)
return True
def _test_undefined_backward_mode(func, outputs, inputs) -> bool:
diff_input_list: list[torch.Tensor] = list(_iter_tensors(inputs, True))
if not diff_input_list:
raise GradcheckError("no Tensors requiring grad found in input")
def warn_bc_breaking():
warnings.warn(
"Backwards compatibility: New undefined gradient support checking "
"feature is enabled by default, but it may break existing callers "
"of this function. If this is true for you, you can call this "
'function with "check_undefined_grad=False" to disable the feature',
stacklevel=2,
)
def check_undefined_grad_support(output_to_check):
grads_output = [
torch.zeros_like(o, memory_format=torch.legacy_contiguous_format)
for o in output_to_check
]
try:
grads_input = torch.autograd.grad(
output_to_check, diff_input_list, grads_output, allow_unused=True
)
except RuntimeError as e:
warn_bc_breaking()
raise GradcheckError(
"Expected backward function to handle undefined output grads. "
'Please look at "Notes about undefined output gradients" in '
'"tools/autograd/derivatives.yaml"'
) from e
for gi in grads_input:
if (gi is not None) and (not gi.eq(0).all()):
warn_bc_breaking()
raise GradcheckError(
"Expected all input grads to be undefined or zero when all output grads are undefined "
'or zero. Please look at "Notes about undefined output gradients" in '
'"tools/autograd/derivatives.yaml"'
)
return True
# All backward functions must work properly if all output grads are undefined
outputs_to_check = [
[
torch._C._functions.UndefinedGrad()(o)
for o in _differentiable_outputs(func(*inputs))
# This check filters out Tensor-likes that aren't instances of Tensor.
if isinstance(o, torch.Tensor)
]
]
# If there are multiple output grads, we should be able to undef one at a time without error
if len(outputs_to_check[0]) > 1:
for undef_grad_idx in range(len(outputs)):
output_to_check = _differentiable_outputs(func(*inputs))
outputs_to_check.append(
[
torch._C._functions.UndefinedGrad()(o)
if idx == undef_grad_idx
else o
for idx, o in enumerate(output_to_check)
]
)
return all(check_undefined_grad_support(output) for output in outputs_to_check)
def _as_tuple(x):
if isinstance(x, tuple):
return x
elif isinstance(x, list):
return tuple(x)
else:
return (x,)
def _differentiable_outputs(x):
return tuple(o for o in _as_tuple(x) if o.requires_grad)
def _get_notallclose_msg(
analytical,
numerical,
output_idx,
input_idx,
complex_indices,
test_imag=False,
is_forward_ad=False,
) -> str:
out_is_complex = (
(not is_forward_ad) and complex_indices and output_idx in complex_indices
)
inp_is_complex = is_forward_ad and complex_indices and input_idx in complex_indices
part = "imaginary" if test_imag else "real"
element = "inputs" if is_forward_ad else "outputs"
prefix = (
""
if not (out_is_complex or inp_is_complex)
else f"While considering the {part} part of complex {element} only, "
)
mode = "computed with forward mode " if is_forward_ad else ""
return (
prefix
+ f"Jacobian {mode}mismatch for output {output_idx:d} with respect to input {input_idx:d},\n"
f"numerical:{numerical}\nanalytical:{analytical}\n"
)
def _transpose(matrix_of_tensors):
# returns list of tuples
return list(zip(*matrix_of_tensors))
def _real_and_imag_output(fn):
# returns new functions real(fn), and imag(fn) where real(fn) and imag(fn) behave the same as
# the original fn, except torch.real or torch.imag are applied to the complex outputs
def apply_to_c_outs(fn, fn_to_apply):
def wrapped_fn(*inputs):
outs = _as_tuple(fn(*inputs))
return tuple(fn_to_apply(o) if o.is_complex() else o for o in outs)
return wrapped_fn
return apply_to_c_outs(fn, torch.real), apply_to_c_outs(fn, torch.imag)
def _real_and_imag_input(fn, complex_inp_indices, tupled_inputs):
# returns new functions that take real inputs instead of complex inputs as
# (x, y) -> fn(x + y * 1j). And it computes: inp -> fn(inp + y * 1j) and inp -> fn(x + inp * 1j).
# In each case, the other part is considered constant.
# We do not use 0 for the constant here to make sure we always call the user function with a valid input.
def apply_to_c_inps(fn, fn_to_apply):
def wrapped_fn(*inputs):
new_inputs = list(inputs)
for should_be_complex in complex_inp_indices:
new_inputs[should_be_complex] = fn_to_apply(
new_inputs[should_be_complex], tupled_inputs[should_be_complex]
)
return _as_tuple(fn(*new_inputs))
return wrapped_fn
real_fn = apply_to_c_inps(fn, lambda inp, orig: inp + orig.imag * 1j)
imag_fn = apply_to_c_inps(fn, lambda inp, orig: orig.real + inp * 1j)
return real_fn, imag_fn
def _gradcheck_real_imag(
gradcheck_fn,
func,
func_out,
tupled_inputs,
outputs,
eps,
rtol,
atol,
check_grad_dtypes,
check_forward_ad,
check_backward_ad,
nondet_tol,
check_undefined_grad,
):
complex_out_indices = [i for i, o in enumerate(outputs) if o.is_complex()]
has_any_complex_output = any(o.is_complex() for o in _as_tuple(func_out))
if check_backward_ad:
if has_any_complex_output:
real_fn, imag_fn = _real_and_imag_output(func)
imag_func_out = imag_fn(*tupled_inputs)
imag_outputs = _differentiable_outputs(imag_func_out)
gradcheck_fn(
imag_fn,
imag_func_out,
tupled_inputs,
imag_outputs,
eps,
rtol,
atol,
check_grad_dtypes,
nondet_tol,
complex_indices=complex_out_indices,
test_imag=True,
)
real_func_out = real_fn(*tupled_inputs)
real_outputs = _differentiable_outputs(real_func_out)
gradcheck_fn(
real_fn,
real_func_out,
tupled_inputs,
real_outputs,
eps,
rtol,
atol,
check_grad_dtypes,
nondet_tol,
complex_indices=complex_out_indices,
)
else:
gradcheck_fn(
func,
func_out,
tupled_inputs,
outputs,
eps,
rtol,
atol,
check_grad_dtypes,
nondet_tol,
)
if check_forward_ad:
complex_inp_indices = [
i
for i, inp in enumerate(tupled_inputs)
if is_tensor_like(inp) and inp.is_complex()
]
if complex_inp_indices:
real_fn, imag_fn = _real_and_imag_input(
func, complex_inp_indices, tupled_inputs
)
imag_inputs = [
inp.imag if is_tensor_like(inp) and inp.is_complex() else inp
for inp in tupled_inputs
]
imag_func_out = imag_fn(*imag_inputs)
diff_imag_func_out = _differentiable_outputs(imag_func_out)
gradcheck_fn(
imag_fn,
imag_func_out,
imag_inputs,
diff_imag_func_out,
eps,
rtol,
atol,
check_grad_dtypes,
nondet_tol,
complex_indices=complex_inp_indices,
test_imag=True,
use_forward_ad=True,
)
real_inputs = [
inp.real if is_tensor_like(inp) and inp.is_complex() else inp
for inp in tupled_inputs
]
real_func_out = real_fn(*real_inputs)
diff_real_func_out = _differentiable_outputs(real_func_out)
gradcheck_fn(
real_fn,
real_func_out,
real_inputs,
diff_real_func_out,
eps,
rtol,
atol,
check_grad_dtypes,
nondet_tol,
complex_indices=complex_inp_indices,
use_forward_ad=True,
)
if check_undefined_grad:
_test_undefined_forward_mode(imag_fn, imag_func_out, imag_inputs)
_test_undefined_forward_mode(real_fn, real_func_out, real_inputs)
else:
gradcheck_fn(
func,
func_out,
tupled_inputs,
outputs,
eps,
rtol,
atol,
check_grad_dtypes,
nondet_tol,
use_forward_ad=True,
)
if check_undefined_grad:
_test_undefined_forward_mode(func, outputs, tupled_inputs)
def _slow_gradcheck(
func,
func_out,
tupled_inputs,
outputs,
eps,
rtol,
atol,
check_grad_dtypes,
nondet_tol,
*,
use_forward_ad=False,
complex_indices=None,
test_imag=False,
masked=False,
):
func_out = _as_tuple(func_out)
if not outputs:
return _check_no_differentiable_outputs(
func, tupled_inputs, func_out, eps=eps, is_forward_ad=use_forward_ad
)
tupled_inputs_numerical = tupled_inputs if masked else _densify(tupled_inputs)
numerical = _transpose(
_get_numerical_jacobian(
func,
tupled_inputs_numerical,
func_out,
eps=eps,
is_forward_ad=use_forward_ad,
)
)
# Note: [numerical vs analytical output length]
# The numerical path returns jacobian quantity for all outputs, even if requires_grad of that
# output is False. This behavior is necessary for _check_no_differentiable_outputs to work.
numerical = [nj for o, nj in zip(func_out, numerical) if o.requires_grad]
if use_forward_ad:
analytical_forward = _get_analytical_jacobian_forward_ad(
func, tupled_inputs, func_out, check_grad_dtypes=check_grad_dtypes
)
for i, n_per_out in enumerate(numerical):
for j, n in enumerate(n_per_out):
a = analytical_forward[j][i]
if not _allclose_with_type_promotion(a, n.to(a.device), rtol, atol):
raise GradcheckError(
_get_notallclose_msg(
a, n, i, j, complex_indices, test_imag, is_forward_ad=True
)
)
else:
for i, o in enumerate(outputs):
analytical = _check_analytical_jacobian_attributes(
tupled_inputs, o, nondet_tol, check_grad_dtypes
)
for j, (a, n) in enumerate(zip(analytical, numerical[i])):
if not _allclose_with_type_promotion(a, n.to(a.device), rtol, atol):
raise GradcheckError(
_get_notallclose_msg(a, n, i, j, complex_indices, test_imag)
)
return True
def _dot_with_type_promotion(u, v):
if u.dim() != 1 or v.dim() != 1:
raise AssertionError(
f"Expected u and v to be 1D tensors, but got dims {u.dim()} and {v.dim()}"
)
return (u * v).sum()
def _allclose_with_type_promotion(a, b, rtol, atol):
promoted_type = torch.promote_types(a.dtype, b.dtype)
a = a.to(dtype=promoted_type)
b = b.to(dtype=promoted_type)
return torch.allclose(a, b, rtol, atol)
def _to_real_dtype(dtype):
if dtype == torch.complex128:
return torch.float64
elif dtype == torch.complex64:
return torch.float32
else:
return dtype
def _vec_from_tensor(x, generator, downcast_complex=False):
# Create a random vector with the same number of elements as x and the same
# dtype/device. If x is complex and downcast_complex is False, we create a
# complex tensor with only real component.
if x.layout == torch.sparse_coo:
# For sparse, create a random sparse vec with random values in the same
# indices. Make sure size is set so that it isn't inferred to be smaller.
x_values = x._values()
dtype = _to_real_dtype(x.dtype) if downcast_complex else x.dtype
values = (
torch.rand(x_values.numel(), generator=generator)
.to(dtype=dtype, device=x.device)
.view(x_values.shape)
)
values /= values.norm()
vec = torch.sparse_coo_tensor(x._indices(), values, x.size(), device=x.device)
elif _is_sparse_compressed_tensor(x):
if x.layout in {torch.sparse_csr, torch.sparse_bsr}:
compressed_indices, plain_indices = x.crow_indices(), x.col_indices()
else:
compressed_indices, plain_indices = x.ccol_indices(), x.row_indices()
x_values = x.values()
dtype = _to_real_dtype(x.dtype) if downcast_complex else x.dtype
values = (
torch.rand(x_values.numel(), generator=generator)
.to(dtype=dtype, device=x.device)
.view(x_values.shape)
)
values /= values.norm()
vec = torch.sparse_compressed_tensor(
compressed_indices,
plain_indices,
values,
x.size(),
layout=x.layout,
device=x.device,
)
else:
dtype = _to_real_dtype(x.dtype) if downcast_complex else x.dtype
vec = torch.rand(x.numel(), generator=generator).to(
dtype=dtype, device=x.device
)
vec /= vec.norm()
return vec
def _get_inp_tensors(tupled_inputs):
inp_idx_tup = [
(i, t)
for i, t in enumerate(tupled_inputs)
if is_tensor_like(t) and t.requires_grad
]
return [tup[0] for tup in inp_idx_tup], [tup[1] for tup in inp_idx_tup]
def _adjusted_atol(atol, u, v):
# In slow gradcheck, we compare A and B element-wise, i.e., for some a, b we
# allow: |a - b| < atol + rtol * b. But since we now compare q1 = v^T A u and
# q2 = v^T B u, we must allow |q1 - q2| < v^T E u + rtol * v^T B u, where E is
# the correctly sized matrix in which each entry is atol.
#
# We see that atol needs to be scaled by v^T M u (where M is an all-ones M x N
# matrix): v^T M u = \sum_{i} \sum_{j} u_i * v_j = (\sum_{i} u_i)(\sum_{i} v_i)
# TODO: properly handle case when u is tuple instead of only taking first element
u = u[0] if isinstance(u, tuple) else u
sum_u = u.sum()
sum_v = 1.0 if v is None else v.sum()
return atol * float(sum_u) * float(sum_v)
FAST_FAIL_SLOW_OK_MSG = """
Fast gradcheck failed but element-wise differences are small. This means that the
test might've passed in slow_mode!
If you are adding a new operator, please file an issue and then use one of the
workarounds. The workaround depends on how your test invokes gradcheck/gradgradcheck:
If the test
- manually invokes gradcheck/gradgradcheck, then call gradcheck/gradgradcheck
with `fast_mode=False` as a keyword argument.
- is OpInfo-based (e.g., in test_ops_gradients.py), then modify the OpInfo for the test
to have `gradcheck_fast_mode=False`
- is a Module test (e.g., in common_nn.py), then modify the corresponding
module_test entry to have `gradcheck_fast_mode=False`
""".strip()
def _run_slow_mode_and_get_error(
func, tupled_inputs, outputs, input_idx, output_idx, rtol, atol, eps, is_forward_ad
):
# Compute jacobians in slow mode for better error message
slow_numerical = _get_numerical_jacobian(
func, tupled_inputs, outputs, eps=eps, is_forward_ad=is_forward_ad
)[input_idx][output_idx]
if is_forward_ad:
def new_fn(inp):
new_inputs = list(tupled_inputs)
new_inputs[input_idx] = inp
return _as_tuple(func(*new_inputs))[output_idx]
slow_analytical = _get_analytical_jacobian_forward_ad(
new_fn, (tupled_inputs[input_idx],), (outputs[output_idx],)
)[0][0]
else:
slow_analytical = _get_analytical_jacobian(
tupled_inputs, outputs, input_idx, output_idx
)
# Assume jacobians are non-empty and have the same shape
slow_max_diff = (slow_numerical - slow_analytical).abs().max()
slow_allclose = torch.allclose(slow_analytical, slow_numerical, rtol, atol)
msg = (
"\nThe above quantities relating the numerical and analytical jacobians are computed \n"
"in fast mode. See: https://github.com/pytorch/pytorch/issues/53876 for more background \n"
"about fast mode. Below, we recompute numerical and analytical jacobians in slow mode:\n\n"
f"Numerical:\n {slow_numerical}\n"
f"Analytical:\n{slow_analytical}\n\n"
f"The max per-element difference (slow mode) is: {slow_max_diff}.\n"
)
if slow_allclose:
# Slow gradcheck would've passed!
msg += FAST_FAIL_SLOW_OK_MSG
return msg
def _to_flat_dense_if_sparse(tensor):
if _is_sparse_any_tensor(tensor):
return tensor.to_dense().reshape(-1)
else:
return tensor
def _make_vectors(inp_tensors, outputs, *, use_forward_ad):
# Use our own generator to avoid messing with the user's RNG state
g_cpu = torch.Generator()
def _vec_from_tensor_cpu(*args):
# Default allocate all tensors on CPU, so they are on the same device as the generator
# even if the user specified a default device
with torch.device("cpu"):
return _vec_from_tensor(*args)
all_u = []
all_u_dense = []
for inp in inp_tensors:
ur = _vec_from_tensor_cpu(inp, g_cpu, True)
ur_dense = _to_flat_dense_if_sparse(ur)
if inp.is_complex():
ui = _vec_from_tensor_cpu(inp, g_cpu, True)
all_u.append((ur, ui))
ui_dense = _to_flat_dense_if_sparse(ui)
all_u_dense.append((ur_dense, ui_dense))
else:
all_u.append(ur)
all_u_dense.append(ur_dense)
all_v = (
None
if use_forward_ad
else [_vec_from_tensor_cpu(out, g_cpu) for out in outputs]
)
return all_v, all_u, all_u_dense
def _check_analytical_numerical_equal(
all_analytical,
all_numerical,
complex_indices,
tupled_inputs,
outputs,
func,
all_v,
all_u,
rtol,
atol,
eps,
test_imag,
*,
is_forward_ad=False,
):
for i, all_numerical_for_input_i in enumerate(all_numerical):
for j, n in enumerate(all_numerical_for_input_i):
# Forward AD generates the transpose of what this function expects
if is_forward_ad:
a = all_analytical[i][j]
else:
a = all_analytical[j][i]
n = n.to(device=a.device)
updated_atol = _adjusted_atol(atol, all_u[i], all_v[j] if all_v else None)
if not _allclose_with_type_promotion(a, n.to(a.device), rtol, updated_atol):
jacobians_str = _run_slow_mode_and_get_error(
func, tupled_inputs, outputs, i, j, rtol, atol, eps, is_forward_ad
)
raise GradcheckError(
_get_notallclose_msg(
a, n, j, i, complex_indices, test_imag, is_forward_ad
)
+ jacobians_str
)
def _fast_gradcheck(
func,
func_out,
inputs,
outputs,
eps,
rtol,
atol,
check_grad_dtypes,
nondet_tol,
*,
use_forward_ad=False,
complex_indices=None,
test_imag=False,
masked=False,
):
# See https://github.com/pytorch/pytorch/issues/53876 for details
inp_tensors_idx, inp_tensors = _get_inp_tensors(inputs)
# Backward mode computes v^T * J (VJP)
# Since we computed J * u (JVP) through finite difference method, we perform an equality check
# between VJP * u, v * JVP
# ----
# Forward mode computes J * u (JVP)
# Since we already compute JVP through finite difference method,
# we don't need v for correctness check here as asserted below
all_v, all_u, all_u_dense = _make_vectors(
inp_tensors, outputs, use_forward_ad=use_forward_ad
)
inputs_numerical, all_u_numerical, all_v_numerical = (
(inputs, all_u, all_v) if masked else _densify((inputs, all_u, all_v))
)
numerical_vJu = _get_numerical_vJu(
func,
inputs_numerical,
inp_tensors_idx,
func_out,
all_u_numerical,
all_v_numerical,
eps,
is_forward_ad=use_forward_ad,
)
# TODO: replicate https://github.com/pytorch/pytorch/pull/77743 for fast gradcheck as well
if use_forward_ad:
if all_v is not None:
raise AssertionError("Expected all_v to be None.")
analytical_vJu = _get_analytical_jacobian_forward_ad(
func,
inputs,
_as_tuple(func_out),
all_u=all_u,
check_grad_dtypes=check_grad_dtypes,
)
else:
if not outputs:
_check_no_differentiable_outputs_fast(
func, func_out, inputs, inp_tensors_idx, all_u, eps, nondet_tol
)
analytical_vJu = _get_analytical_vJu_backward_mode(
inputs, outputs, nondet_tol, check_grad_dtypes, all_v, all_u_dense
)
_check_analytical_numerical_equal(
analytical_vJu,
numerical_vJu,
complex_indices,
inputs,
outputs,
func,
all_v,
all_u,
rtol,
atol,
eps,
test_imag,
is_forward_ad=use_forward_ad,
)
return True
# Note [VarArg of Tensors]
# ~~~~~~~~~~~~~~~~~~~~~~~~
# 'func' accepts a vararg of tensors, which isn't expressible in the type system at the moment.
# If https://mypy.readthedocs.io/en/latest/additional_features.html?highlight=callable#extended-callable-types is accepted,
# the '...' first argument of Callable can be replaced with VarArg(Tensor).
# For now, we permit any input.
def gradcheck(
func: Callable[..., Union[_TensorOrTensors]], # See Note [VarArg of Tensors]
inputs: _TensorOrTensors,
*,
eps: float = 1e-6,
atol: float = 1e-5,
rtol: float = 1e-3,
raise_exception: bool = True,
nondet_tol: float = 0.0,
check_undefined_grad: bool = True,
check_grad_dtypes: bool = False,
check_batched_grad: bool = False,
check_batched_forward_grad: bool = False,
check_forward_ad: bool = False,
check_backward_ad: bool = True,
fast_mode: bool = False,
masked: Optional[bool] = None,
) -> bool: # noqa: D400,D205
r"""Check gradients computed via small finite differences against analytical
gradients wrt tensors in :attr:`inputs` that are of floating point or complex type
and with ``requires_grad=True``.
The check between numerical and analytical gradients uses :func:`~torch.allclose`.
For most of the complex functions we consider for optimization purposes, no notion of
Jacobian exists. Instead, gradcheck verifies if the numerical and analytical values of
the Wirtinger and Conjugate Wirtinger derivatives are consistent. Because the gradient
computation is done under the assumption that the overall function has a real-valued
output, we treat functions with complex output in a special way. For these functions,
gradcheck is applied to two real-valued functions corresponding to taking the real
components of the complex outputs for the first, and taking the imaginary components
of the complex outputs for the second. For more details, check out
:ref:`complex_autograd-doc`.
.. note::
The default values are designed for :attr:`input` of double precision.
This check will likely fail if :attr:`input` is of less precision, e.g.,
``FloatTensor``.
.. note::
Gradcheck may fail when evaluated on non-differentiable points
because the numerically computed gradients via finite differencing may differ
those computed analytically (not necessarily because either is incorrect).
For more context, see :ref:`non-differentiable-func-grad`.
.. warning::
If any checked tensor in :attr:`input` has overlapping memory, i.e.,
different indices pointing to the same memory address (e.g., from
:func:`torch.Tensor.expand`), this check will likely fail because the numerical
gradients computed by point perturbation at such indices will change
values at all other indices that share the same memory address.
Args:
func (function): a Python function that takes Tensor inputs and returns
a Tensor or a tuple of Tensors
inputs (tuple of Tensor or Tensor): inputs to the function
eps (float, optional): perturbation for finite differences
atol (float, optional): absolute tolerance
rtol (float, optional): relative tolerance
raise_exception (bool, optional): indicating whether to raise an exception if
the check fails. The exception gives more information about the
exact nature of the failure. This is helpful when debugging gradchecks.
nondet_tol (float, optional): tolerance for non-determinism. When running
identical inputs through the differentiation, the results must either match
exactly (default, 0.0) or be within this tolerance.
check_undefined_grad (bool, optional): if ``True``, check if undefined output grads
are supported and treated as zeros, for ``Tensor`` outputs.
check_batched_grad (bool, optional): if ``True``, check if we can compute
batched gradients using prototype vmap support. Defaults to False.
check_batched_forward_grad (bool, optional): if ``True``, checks if we can compute
batched forward gradients using forward ad and prototype vmap support. Defaults to ``False``.
check_forward_ad (bool, optional): if ``True``, check that the gradients computed with forward
mode AD match the numerical ones. Defaults to ``False``.
check_backward_ad (bool, optional): if ``False``, do not perform any checks that rely on
backward mode AD to be implemented. Defaults to ``True``.
fast_mode (bool, optional): Fast mode for gradcheck and gradgradcheck is currently only
implemented for R to R functions. If none of the inputs and outputs are complex
a faster implementation of gradcheck that no longer computes the entire jacobian
is run; otherwise, we fall back to the slow implementation.
masked (bool, optional): if ``True``, the gradients of unspecified elements of
sparse tensors are ignored. Defaults to ``False``.
Returns:
``True`` if all differences satisfy allclose condition
"""
if not (check_forward_ad or check_backward_ad):
raise AssertionError(
"Expected at least one of check_forward_ad or check_backward_ad to be True"
)
if check_batched_grad and not check_backward_ad:
raise AssertionError(
"Setting check_batched_grad=True requires check_backward_ad to be True"
)
if check_batched_forward_grad and not check_forward_ad:
raise AssertionError(
"Setting check_batched_forward_grad=True requires check_forward_ad to be True"
)
args = locals().copy()
args.pop("raise_exception")
if not raise_exception:
try:
return _gradcheck_helper(**args)
except GradcheckError:
return False
else:
return _gradcheck_helper(**args)
def _gradcheck_helper(
func,
inputs,
eps,
atol,
rtol,
nondet_tol,
check_undefined_grad,
check_grad_dtypes,
check_batched_grad,
check_batched_forward_grad,
check_forward_ad,
check_backward_ad,
fast_mode,
masked,
):
tupled_inputs = _as_tuple(inputs)
_check_inputs(tupled_inputs)
func_out = func(*tupled_inputs)
outputs = _differentiable_outputs(func_out)
_check_outputs(outputs)
gradcheck_fn = functools.partial(
_fast_gradcheck if fast_mode else _slow_gradcheck, masked=masked
)
_gradcheck_real_imag(
gradcheck_fn,
func,
func_out,
tupled_inputs,
outputs,
eps,
rtol,
atol,
check_grad_dtypes,
check_forward_ad=check_forward_ad,
check_backward_ad=check_backward_ad,
nondet_tol=nondet_tol,
check_undefined_grad=check_undefined_grad,
)
if check_batched_forward_grad:
_test_batched_grad_forward_ad(func, tupled_inputs)
# Short circuit because remaining tests rely on backward AD to be implemented
if not check_backward_ad:
return True
for i, o in enumerate(outputs):
if check_batched_grad:
_test_batched_grad(tupled_inputs, o, i)
_test_backward_mul_by_grad_output(outputs, tupled_inputs, masked)
if check_undefined_grad and check_backward_ad:
_test_undefined_backward_mode(func, outputs, tupled_inputs)
return True
def gradgradcheck(
func: Callable[..., _TensorOrTensors], # See Note [VarArg of Tensors]
inputs: _TensorOrTensors,
grad_outputs: Optional[_TensorOrTensors] = None,
*,
eps: float = 1e-6,
atol: float = 1e-5,
rtol: float = 1e-3,
gen_non_contig_grad_outputs: bool = False,
raise_exception: bool = True,
nondet_tol: float = 0.0,
check_undefined_grad: bool = True,
check_grad_dtypes: bool = False,
check_batched_grad: bool = False,
check_fwd_over_rev: bool = False,
check_rev_over_rev: bool = True,
fast_mode: bool = False,
masked: bool = False,
) -> bool: # noqa: D400,D205
r"""Check gradients of gradients computed via small finite differences
against analytical gradients wrt tensors in :attr:`inputs` and
:attr:`grad_outputs` that are of floating point or complex type and with
``requires_grad=True``.
This function checks that backpropagating through the gradients computed
to the given :attr:`grad_outputs` are correct.
The check between numerical and analytical gradients uses :func:`~torch.allclose`.
.. note::
The default values are designed for :attr:`input` and
:attr:`grad_outputs` of double precision. This check will likely fail if
they are of less precision, e.g., ``FloatTensor``.
.. warning::
If any checked tensor in :attr:`input` and :attr:`grad_outputs` has
overlapping memory, i.e., different indices pointing to the same memory
address (e.g., from :func:`torch.Tensor.expand`), this check will likely fail
because the numerical gradients computed by point perturbation at such
indices will change values at all other indices that share the same
memory address.
Args:
func (function): a Python function that takes Tensor inputs and returns
a Tensor or a tuple of Tensors
inputs (tuple of Tensor or Tensor): inputs to the function
grad_outputs (tuple of Tensor or Tensor, optional): The gradients with
respect to the function's outputs.
eps (float, optional): perturbation for finite differences
atol (float, optional): absolute tolerance
rtol (float, optional): relative tolerance
gen_non_contig_grad_outputs (bool, optional): if :attr:`grad_outputs` is
``None`` and :attr:`gen_non_contig_grad_outputs` is ``True``, the
randomly generated gradient outputs are made to be noncontiguous
raise_exception (bool, optional): indicating whether to raise an exception if
the check fails. The exception gives more information about the
exact nature of the failure. This is helpful when debugging gradchecks.
nondet_tol (float, optional): tolerance for non-determinism. When running
identical inputs through the differentiation, the results must either match
exactly (default, 0.0) or be within this tolerance. Note that a small amount
of nondeterminism in the gradient will lead to larger inaccuracies in
the second derivative.
check_undefined_grad (bool, optional): if True, check if undefined output grads
are supported and treated as zeros
check_batched_grad (bool, optional): if True, check if we can compute
batched gradients using prototype vmap support. Defaults to False.
fast_mode (bool, optional): if True, run a faster implementation of gradgradcheck that
no longer computes the entire jacobian.
masked (bool, optional): if True, the gradients of unspecified elements of
sparse tensors are ignored (default, False).
Returns:
True if all differences satisfy allclose condition
"""
if not (check_fwd_over_rev or check_rev_over_rev):
raise AssertionError(
"Expected at least one of check_fwd_over_rev or check_rev_over_rev to be True"
)
if check_undefined_grad and not check_rev_over_rev:
raise AssertionError(
"Setting check_undefined_grad=True requires check_rev_over_rev to be True"
)
if check_batched_grad and not check_rev_over_rev:
raise AssertionError(
"Setting check_batched_grad=True requires check_rev_over_rev to be True"
)
# TODO: do we want to test this too?
# assert not (check_batched_forward_grad and not check_fwd_over_rev), (
# "Setting check_batched_forward_grad=True requires check_fwd_over_rev to be True")
tupled_inputs = _as_tuple(inputs)
if grad_outputs is None:
# If grad_outputs is not specified, create random Tensors of the same shape, type, and device as the outputs
outputs = _differentiable_outputs(func(*tupled_inputs))
tupled_grad_outputs = tuple(
torch.testing.make_tensor(
x.shape,
dtype=x.dtype
if x.is_floating_point() or x.is_complex()
else torch.double,
device=x.device,
low=-1,
high=1,
requires_grad=True,
noncontiguous=gen_non_contig_grad_outputs,
)
for x in outputs
)
else:
tupled_grad_outputs = _as_tuple(grad_outputs)
num_outputs = len(tupled_grad_outputs)
# NB: We need to save the requires_grad information about the inputs here because gradcheck detaches inputs
# before running forward mode AD
diff_input_args_indices = {
i for i, x in enumerate(tupled_inputs) if is_tensor_like(x) and x.requires_grad
}
diff_grad_output_indices = {
i for i, x in enumerate(tupled_grad_outputs) if x.requires_grad
}
def new_func(*args):
# Restore the requires_grad information
input_args = tuple(
x.requires_grad_() if i in diff_input_args_indices else x
for i, x in enumerate(args[:-num_outputs])
)
outputs = _differentiable_outputs(func(*input_args))
grad_outputs = tuple(
x.requires_grad_() if i in diff_grad_output_indices else x
for i, x in enumerate(args[-num_outputs:])
)
diff_input_args = tuple(
x for i, x in enumerate(input_args) if i in diff_input_args_indices
)
grad_inputs = torch.autograd.grad(
outputs, diff_input_args, grad_outputs, create_graph=True, allow_unused=True
)
grad_inputs = tuple(g for g in grad_inputs if g is not None)
return grad_inputs
return gradcheck(
new_func,
tupled_inputs + tupled_grad_outputs,
eps=eps,
atol=atol,
rtol=rtol,
raise_exception=raise_exception,
nondet_tol=nondet_tol,
check_undefined_grad=check_undefined_grad,
check_grad_dtypes=check_grad_dtypes,
check_batched_grad=check_batched_grad,
fast_mode=fast_mode,
check_forward_ad=check_fwd_over_rev,
check_backward_ad=check_rev_over_rev,
masked=masked,
)
| GradcheckError |
python | huggingface__transformers | examples/modular-transformers/modeling_add_function.py | {
"start": 2399,
"end": 3460
} | class ____(nn.Module):
"""
Multi-headed attention from 'Attention Is All You Need' paper. Modified to use sliding window attention: Longformer
and "Generating Long Sequences with Sparse Transformers".
Adapted from transformers.models.mistral.modeling_mistral.MistralAttention:
The input dimension here is attention_hidden_size = 2 * hidden_size, and head_dim = attention_hidden_size // num_heads.
The extra factor of 2 comes from the input being the concatenation of original_hidden_states with the output of the previous (mamba) layer
(see fig. 2 in https://huggingface.co/papers/2405.16712).
Additionally, replaced
attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim) with
attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim/2)
"""
def __init__(self):
pass
def forward(self) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
_ = apply_rotary_pos_emb(1, 1, 1, 1)
| TestAttention |
python | numpy__numpy | numpy/exceptions.py | {
"start": 2780,
"end": 5644
} | class ____(ValueError, IndexError):
"""Axis supplied was invalid.
This is raised whenever an ``axis`` parameter is specified that is larger
than the number of array dimensions.
For compatibility with code written against older numpy versions, which
raised a mixture of :exc:`ValueError` and :exc:`IndexError` for this
situation, this exception subclasses both to ensure that
``except ValueError`` and ``except IndexError`` statements continue
to catch ``AxisError``.
Parameters
----------
axis : int or str
The out of bounds axis or a custom exception message.
If an axis is provided, then `ndim` should be specified as well.
ndim : int, optional
The number of array dimensions.
msg_prefix : str, optional
A prefix for the exception message.
Attributes
----------
axis : int, optional
The out of bounds axis or ``None`` if a custom exception
message was provided. This should be the axis as passed by
the user, before any normalization to resolve negative indices.
.. versionadded:: 1.22
ndim : int, optional
The number of array dimensions or ``None`` if a custom exception
message was provided.
.. versionadded:: 1.22
Examples
--------
>>> import numpy as np
>>> array_1d = np.arange(10)
>>> np.cumsum(array_1d, axis=1)
Traceback (most recent call last):
...
numpy.exceptions.AxisError: axis 1 is out of bounds for array of dimension 1
Negative axes are preserved:
>>> np.cumsum(array_1d, axis=-2)
Traceback (most recent call last):
...
numpy.exceptions.AxisError: axis -2 is out of bounds for array of dimension 1
The class constructor generally takes the axis and arrays'
dimensionality as arguments:
>>> print(np.exceptions.AxisError(2, 1, msg_prefix='error'))
error: axis 2 is out of bounds for array of dimension 1
Alternatively, a custom exception message can be passed:
>>> print(np.exceptions.AxisError('Custom error message'))
Custom error message
"""
__slots__ = ("_msg", "axis", "ndim")
def __init__(self, axis, ndim=None, msg_prefix=None):
if ndim is msg_prefix is None:
# single-argument form: directly set the error message
self._msg = axis
self.axis = None
self.ndim = None
else:
self._msg = msg_prefix
self.axis = axis
self.ndim = ndim
def __str__(self):
axis = self.axis
ndim = self.ndim
if axis is ndim is None:
return self._msg
else:
msg = f"axis {axis} is out of bounds for array of dimension {ndim}"
if self._msg is not None:
msg = f"{self._msg}: {msg}"
return msg
| AxisError |
python | PyCQA__pylint | tests/functional/n/no/no_member_dataclasses.py | {
"start": 1457,
"end": 1715
} | class ____:
attr1: str
attr2: str
dict_prop: Dict[str, str] = field(default_factory=dict)
def some_func(self) -> None:
for key, value in self.dict_prop.items(): # No error here
print(key)
print(value)
| TestClass |
python | doocs__leetcode | solution/2900-2999/2943.Maximize Area of Square Hole in Grid/Solution.py | {
"start": 0,
"end": 496
} | class ____:
def maximizeSquareHoleArea(
self, n: int, m: int, hBars: List[int], vBars: List[int]
) -> int:
def f(nums: List[int]) -> int:
nums.sort()
ans = cnt = 1
for i in range(1, len(nums)):
if nums[i] == nums[i - 1] + 1:
cnt += 1
ans = max(ans, cnt)
else:
cnt = 1
return ans + 1
return min(f(hBars), f(vBars)) ** 2
| Solution |
python | huggingface__transformers | tests/trainer/test_trainer_fsdp.py | {
"start": 1998,
"end": 2856
} | class ____(TestCasePlus):
@require_torch_multi_accelerator
@require_accelerate
@run_first
def test_trainer(self):
output_dir = self.get_auto_remove_tmp_dir()
cmd = [
"accelerate",
"launch",
"--use_fsdp",
"--main_process_port",
f"{get_torch_dist_unique_port()}",
"--num_processes",
f"{backend_device_count(torch_device)}",
"--fsdp_transformer_layer_cls_to_wrap",
"GPT2Block",
f"{self.test_file_dir}/test_trainer_fsdp.py",
"--output_dir",
f"{output_dir}",
"--report_to",
"none",
]
execute_subprocess_async(cmd, env=self.get_env())
# successful return here == success - any errors would have caused an error in the sub-call
| TestFSDPTrainer |
python | Textualize__textual | docs/examples/events/dictionary.py | {
"start": 292,
"end": 1387
} | class ____(App):
"""Searches a dictionary API as-you-type."""
CSS_PATH = "dictionary.tcss"
def compose(self) -> ComposeResult:
yield Input(placeholder="Search for a word")
yield VerticalScroll(Static(id="results"), id="results-container")
async def on_input_changed(self, message: Input.Changed) -> None:
"""A coroutine to handle a text changed message."""
if message.value:
# Look up the word in the background
asyncio.create_task(self.lookup_word(message.value))
else:
# Clear the results
self.query_one("#results", Static).update()
async def lookup_word(self, word: str) -> None:
"""Looks up a word."""
url = f"https://api.dictionaryapi.dev/api/v2/entries/en/{word}"
async with httpx.AsyncClient() as client:
results = (await client.get(url)).text
if word == self.query_one(Input).value:
self.query_one("#results", Static).update(JSON(results))
if __name__ == "__main__":
app = DictionaryApp()
app.run()
| DictionaryApp |
python | huggingface__transformers | src/transformers/models/sam3/processing_sam3.py | {
"start": 2278,
"end": 28869
} | class ____(ProcessorMixin):
r"""
Constructs a SAM3 processor which wraps a SAM3 image processor and bounding boxes processing into a
single processor.
[`Sam2Processor`] offers all the functionalities of [`Sam2ImageProcessorFast`] and [`Sam2VideoProcessor`]. See the docstring of
[`~Sam2ImageProcessorFast.__call__`] and [`~Sam2VideoProcessor.__call__`] for more information.
Args:
image_processor (`Sam2ImageProcessorFast`):
An instance of [`Sam2ImageProcessorFast`].
tokenizer ([`PreTrainedTokenizer`, `PreTrainedTokenizerFast`]):
An instance of [`PreTrainedTokenizer`, `PreTrainedTokenizerFast`]. The tokenizer is a required input.
target_size (`int`, *optional*):
The target size (target_size, target_size) to which the image will be resized.
point_pad_value (`int`, *optional*, defaults to -10):
The value used for padding input boxes.
"""
def __init__(
self, image_processor, tokenizer, target_size: Optional[int] = None, point_pad_value: int = -10, **kwargs
):
super().__init__(image_processor, tokenizer, **kwargs)
self.point_pad_value = point_pad_value
self.target_size = target_size if target_size is not None else self.image_processor.size["height"]
def __call__(
self,
images: Optional[ImageInput] = None,
text: Optional[Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]]] = None,
segmentation_maps: Optional[ImageInput] = None,
input_boxes: Optional[Union[list[list[list[float]]], torch.Tensor]] = None,
input_boxes_labels: Optional[Union[list[list[list[int]]], torch.Tensor]] = None,
original_sizes: Optional[Union[list[list[float]], torch.Tensor]] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
**kwargs,
) -> BatchEncoding:
r"""
This method uses [`Sam3ImageProcessorFast.__call__`] method to prepare image(s) for the model. It also prepares bounding boxes for the model if they are provided.
Args:
images (`ImageInput`, *optional*):
The image(s) to process.
text (`str`, `list[str]`, `list[list[str]]`, *optional*):
The text to process.
segmentation_maps (`ImageInput`, *optional*):
The segmentation maps to process.
input_boxes (`list[list[list[float]]]`, `torch.Tensor`, *optional*):
The bounding boxes to process.
input_boxes_labels (`list[list[int]]`, `torch.Tensor`, *optional*):
The labels for the bounding boxes.
original_sizes (`list[list[float]]`, `torch.Tensor`, *optional*):
The original sizes of the images.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return.
**kwargs:
Additional keyword arguments to pass to the image processor.
Returns:
A [`BatchEncoding`] with the following fields:
- `pixel_values` (`torch.Tensor`): The processed image(s).
- `original_sizes` (`list[list[float]]`): The original sizes of the images.
- `labels` (`torch.Tensor`): The processed segmentation maps (if provided).
- `input_boxes_labels` (`torch.Tensor`): The processed labels for the bounding boxes.
- `input_boxes` (`torch.Tensor`): The processed bounding boxes.
"""
encoding = None
if images is not None:
encoding = self.image_processor(
images,
segmentation_maps=segmentation_maps,
return_tensors=return_tensors,
**kwargs,
)
elif original_sizes is not None:
if isinstance(original_sizes, torch.Tensor):
original_sizes = original_sizes.cpu().tolist()
encoding = BatchEncoding({"original_sizes": original_sizes}, tensor_type=return_tensors)
elif input_boxes is not None:
raise ValueError("Either images or original_sizes must be provided if input_boxes is not None")
text = self._resolve_text_prompts(text, input_boxes)
if text is not None:
text_inputs = self.tokenizer(text, return_tensors=return_tensors, padding="max_length", max_length=32)
if encoding is not None:
encoding.update(text_inputs)
else:
encoding = text_inputs
# Process input boxes if provided
if input_boxes is not None:
original_sizes = encoding["original_sizes"]
# Validate and convert inputs to standardized format
processed_boxes = self._validate_single_input(
input_boxes,
expected_depth=3,
input_name="boxes",
expected_format="[image level, box level, box coordinates]",
expected_coord_size=4,
)
processed_boxes_labels = self._validate_single_input(
input_boxes_labels,
expected_depth=2,
input_name="labels",
expected_format="[image level, box level]",
)
# Get padding requirements for all inputs
if processed_boxes is not None:
boxes_max_dims = self._get_nested_dimensions(processed_boxes)[:2]
if processed_boxes_labels is not None:
boxes_labels_max_dims = self._get_nested_dimensions(processed_boxes_labels)[:2]
# Ensure boxes and labels have consistent dimensions
if processed_boxes is not None and processed_boxes_labels is not None:
if boxes_max_dims != boxes_labels_max_dims:
raise ValueError(
"Input boxes and labels have inconsistent dimensions. Please ensure they have the same dimensions."
)
# Pad and normalize all inputs to final tensor format
if processed_boxes is not None:
padded_boxes = self._pad_nested_list(processed_boxes, boxes_max_dims + [4])
final_boxes = torch.tensor(padded_boxes, dtype=torch.float32)
self._normalize_tensor_coordinates(
final_boxes, original_sizes, is_bounding_box=True, preserve_padding=True
)
final_boxes = box_xyxy_to_cxcywh(final_boxes)
encoding.update({"input_boxes": final_boxes})
if processed_boxes_labels is not None:
padded_boxes_labels = self._pad_nested_list(processed_boxes_labels, boxes_labels_max_dims)
final_boxes_labels = torch.tensor(padded_boxes_labels, dtype=torch.int64)
encoding.update({"input_boxes_labels": final_boxes_labels})
return encoding
def _normalize_coordinates(self, coords: "torch.Tensor", original_size, is_bounding_box=False) -> "torch.Tensor":
"""
Expects a numpy array of length 2 in the final dimension. Requires the original image size in (H, W) format.
Args:
target_size (`int`):
The target size of the image.
coords (`torch.Tensor`):
The coordinates to be normalized.
original_size (`tuple`):
The original size of the image.
is_bounding_box (`bool`, *optional*, defaults to `False`):
Whether the coordinates are bounding boxes.
"""
old_h, old_w = original_size
coords = deepcopy(coords).float()
if is_bounding_box:
coords = coords.reshape(-1, 2, 2)
coords[..., 0] = coords[..., 0] / old_w
coords[..., 1] = coords[..., 1] / old_h
if is_bounding_box:
coords = coords.reshape(-1, 4)
return coords
def _convert_to_nested_list(self, data, expected_depth, current_depth=0):
"""
Recursively convert various input formats (tensors, numpy arrays, lists) to nested lists.
Preserves None values within lists.
Args:
data: Input data in any format (may be None or contain None values)
expected_depth: Expected nesting depth
current_depth: Current depth in recursion
Returns:
Nested list representation of the data (or None)
"""
if data is None:
return None
# Convert tensor/numpy to list if we're at a leaf level or if it's a multi-dimensional array
if isinstance(data, torch.Tensor): # PyTorch tensor
if current_depth == expected_depth - 2 or len(data.shape) <= 2: # At coordinate level or small tensor
return data.numpy().tolist()
else:
return [self._convert_to_nested_list(item, expected_depth, current_depth + 1) for item in data]
elif isinstance(data, np.ndarray): # NumPy array
if current_depth == expected_depth - 2 or len(data.shape) <= 2: # At coordinate level or small array
return data.tolist()
else:
return [self._convert_to_nested_list(item, expected_depth, current_depth + 1) for item in data]
elif isinstance(data, list):
if current_depth == expected_depth:
# We've reached the expected depth, return as is
return data
else:
# Continue recursion, preserving None values
return [
self._convert_to_nested_list(item, expected_depth, current_depth + 1) if item is not None else None
for item in data
]
elif isinstance(data, (int, float)):
return data
else:
raise ValueError(f"Unsupported data type: {type(data)}")
def _resolve_text_prompts(self, text, input_boxes):
"""
Resolve text prompts by setting defaults based on prompt types.
"""
# If no text provided, infer default based on prompt type
if text is None:
return "visual" if input_boxes else None
if not isinstance(text, (list, tuple)):
return text
# Validate list/tuple length matches both prompt types if provided
text = list(text) # Convert to list to allow modification
if input_boxes and len(text) != len(input_boxes):
raise ValueError(
f"The number of text prompts must match the number of input boxes. "
f"Got {len(text)} text prompts and {len(input_boxes)} input boxes."
)
# Fill in None values with defaults based on corresponding prompt
for i, text_value in enumerate(text):
if text_value is None and input_boxes and input_boxes[i] is not None:
text[i] = "visual"
return text
def _get_nested_dimensions(self, nested_list, max_dims=None):
"""
Get the maximum dimensions at each level of nesting, skipping None values.
Args:
nested_list (`list`):
Nested list structure (may contain None values).
max_dims (`list`, *optional*):
Current maximum dimensions (for recursion).
Returns:
`list`: A list of maximum dimensions for each nesting level.
"""
if max_dims is None:
max_dims = []
if not isinstance(nested_list, list):
return max_dims
if len(max_dims) == 0:
max_dims.append(len(nested_list))
else:
max_dims[0] = max(max_dims[0], len(nested_list))
if len(nested_list) > 0:
for item in nested_list:
# Skip None values
if item is None:
continue
if isinstance(item, list):
sub_dims = self._get_nested_dimensions(item)
# Merge sub_dims into max_dims
for i, dim in enumerate(sub_dims):
if i + 1 >= len(max_dims):
max_dims.append(dim)
else:
max_dims[i + 1] = max(max_dims[i + 1], dim)
return max_dims
def _pad_nested_list(self, nested_list, target_dims, current_level=0, pad_value=None):
"""
Recursively pad a nested list to match target dimensions. Replaces None values with padded structures.
Args:
nested_list (`list`):
Nested list to pad (may contain None values).
target_dims (`list`):
Target dimensions for each level.
current_level (`int`, *optional*, defaults to 0):
Current nesting level.
pad_value (`int`, *optional*):
Value to use for padding.
Returns:
`list`: The padded nested list.
"""
if pad_value is None:
pad_value = self.point_pad_value
if current_level >= len(target_dims):
return nested_list
# Ensure we have a list
if not isinstance(nested_list, list):
nested_list = [nested_list]
# Pad current level
current_size = len(nested_list)
target_size = target_dims[current_level]
# Pad with appropriate values
if current_level == len(target_dims) - 1:
# At the coordinate level, pad with pad_value
nested_list.extend([pad_value] * (target_size - current_size))
else:
# At higher levels, pad with nested structures
if current_size > 0:
# Create appropriately sized template
if current_level < len(target_dims) - 2:
# For non-coordinate levels, create empty nested structure
template_dims = target_dims[current_level + 1 :]
template = self._create_empty_nested_structure(template_dims, pad_value)
else:
# For coordinate level, create list of pad_values
template = [pad_value] * target_dims[current_level + 1]
nested_list.extend([deepcopy(template) for _ in range(target_size - current_size)])
else:
# Create from scratch
template_dims = target_dims[current_level + 1 :]
template = self._create_empty_nested_structure(template_dims, pad_value)
nested_list.extend([deepcopy(template) for _ in range(target_size)])
# Recursively pad sublists, replacing None with padded structures
if current_level < len(target_dims) - 1:
for i in range(len(nested_list)):
if nested_list[i] is None:
# Replace None with fully padded structure
template_dims = target_dims[current_level + 1 :]
nested_list[i] = self._create_empty_nested_structure(template_dims, pad_value)
elif isinstance(nested_list[i], list):
nested_list[i] = self._pad_nested_list(nested_list[i], target_dims, current_level + 1, pad_value)
return nested_list
def _create_empty_nested_structure(self, dims, pad_value):
"""
Create an empty nested structure with given dimensions filled with pad_value.
Args:
dims (`list`):
The dimensions of the nested structure.
pad_value (`int`):
The value to fill the structure with.
"""
if len(dims) == 1:
return [pad_value] * dims[0]
else:
return [self._create_empty_nested_structure(dims[1:], pad_value) for _ in range(dims[0])]
def _get_nesting_level(self, input_list):
"""
Get the nesting level of a list structure, skipping None values.
Args:
input_list (`list`):
The list to get the nesting level of.
"""
if isinstance(input_list, list):
if len(input_list) == 0:
return 1
# Find first non-None element to determine nesting level
for item in input_list:
if item is not None:
return 1 + self._get_nesting_level(item)
# All elements are None, treat as single level
return 1
elif isinstance(input_list, (np.ndarray, torch.Tensor)):
# For arrays/tensors, the nesting level is the number of dimensions
return len(input_list.shape)
return 0
def _validate_single_input(
self,
data: Union[torch.Tensor, np.ndarray, list],
expected_depth: int,
input_name: str,
expected_format: str,
expected_coord_size: Optional[int] = None,
) -> list:
"""
Validate a single input by ensuring proper nesting and raising an error if the input is not valid.
Args:
data (`torch.Tensor`, `np.ndarray`, or `list`):
Input data to process.
expected_depth (`int`):
Expected nesting depth.
input_name (`str`):
Name of the input for error messages.
expected_format (`str`):
The expected format of the input.
expected_coord_size (`int`, *optional*):
Expected coordinate size (4 for boxes, None for labels).
.
"""
if data is None:
return None
# Handle tensors and numpy arrays first
if isinstance(data, (torch.Tensor, np.ndarray)):
# For tensors/arrays, we can directly check the number of dimensions
if data.ndim != expected_depth:
raise ValueError(
f"Input {input_name} must be a tensor/array with {expected_depth} dimensions. The expected nesting format is {expected_format}. Got {data.ndim} dimensions."
)
elif expected_coord_size is not None:
if data.shape[-1] != expected_coord_size:
raise ValueError(
f"Input {input_name} must be a tensor/array with {expected_coord_size} as the last dimension, got {data.shape[-1]}."
)
return self._convert_to_nested_list(data, expected_depth)
# Handle nested lists
if isinstance(data, list):
current_depth = self._get_nesting_level(data)
if current_depth != expected_depth:
raise ValueError(
f"Input {input_name} must be a nested list with {expected_depth} levels. The expected nesting format is {expected_format}. Got {current_depth} levels."
)
return self._convert_to_nested_list(data, expected_depth)
def _normalize_tensor_coordinates(self, tensor, original_sizes, is_bounding_box=False, preserve_padding=False):
"""
Helper method to normalize coordinates in a tensor across multiple images.
Args:
tensor (`torch.Tensor`):
Input tensor with coordinates.
original_sizes (`list`):
Original image sizes.
is_bounding_box (`bool`, *optional*, defaults to `False`):
Whether coordinates are bounding boxes.
preserve_padding (`bool`, *optional*, defaults to `False`):
Whether to preserve padding values (for boxes).
"""
if preserve_padding:
# For boxes: avoid normalizing pad values
mask = tensor != self.point_pad_value
coord_mask = mask.all(dim=-1, keepdim=True)
for img_idx in range(len(original_sizes)):
if img_idx < tensor.shape[0]:
original_size = original_sizes[img_idx] if img_idx < len(original_sizes) else original_sizes[0]
normalized_coords = self._normalize_coordinates(
tensor[img_idx], original_size, is_bounding_box=is_bounding_box
)
if preserve_padding:
# Only update non-padded values
img_mask = coord_mask[img_idx]
tensor[img_idx] = torch.where(
img_mask.expand_as(tensor[img_idx]), normalized_coords, tensor[img_idx]
)
else:
tensor[img_idx] = normalized_coords
def post_process_semantic_segmentation(self, outputs, target_sizes=None, threshold=0.5):
"""
Converts the output of [`Sam3Model`] into semantic segmentation maps.
Args:
outputs ([`Sam3ImageSegmentationOutput`]):
Raw outputs of the model containing semantic_seg.
target_sizes (`list[tuple]` of length `batch_size`, *optional*):
List of tuples corresponding to the requested final size (height, width) of each prediction. If unset,
predictions will not be resized.
threshold (`float`, *optional*, defaults to 0.5):
Threshold for binarizing the semantic segmentation masks.
Returns:
semantic_segmentation: `list[torch.Tensor]` of length `batch_size`, where each item is a semantic
segmentation map of shape (height, width) corresponding to the target_sizes entry (if `target_sizes` is
specified). Each entry is a binary mask (0 or 1).
"""
return self.image_processor.post_process_semantic_segmentation(outputs, target_sizes, threshold)
def post_process_object_detection(self, outputs, threshold=0.3, target_sizes=None):
"""
Converts the raw output of [`Sam3Model`] into final bounding boxes in (top_left_x, top_left_y,
bottom_right_x, bottom_right_y) format. This is a convenience wrapper around the image processor method.
Args:
outputs ([`Sam3ImageSegmentationOutput`]):
Raw outputs of the model containing pred_boxes, pred_logits, and optionally presence_logits.
threshold (`float`, *optional*, defaults to 0.3):
Score threshold to keep object detection predictions.
target_sizes (`list[tuple[int, int]]`, *optional*):
List of tuples (`tuple[int, int]`) containing the target size `(height, width)` of each image in the
batch. If unset, predictions will not be resized.
Returns:
`list[dict]`: A list of dictionaries, each dictionary containing the following keys:
- **scores** (`torch.Tensor`): The confidence scores for each predicted box on the image.
- **boxes** (`torch.Tensor`): Image bounding boxes in (top_left_x, top_left_y, bottom_right_x,
bottom_right_y) format.
Example:
```python
>>> from transformers import AutoModel, AutoProcessor
>>> from PIL import Image
>>> import requests
>>> model = AutoModel.from_pretrained("facebook/sam3-base")
>>> processor = AutoProcessor.from_pretrained("facebook/sam3-base")
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> inputs = processor(images=image, text="cat", return_tensors="pt")
>>> outputs = model(**inputs)
>>> # Post-process to get bounding boxes
>>> results = processor.post_process_object_detection(outputs, threshold=0.3, target_sizes=[image.size[::-1]])
>>> boxes = results[0]["boxes"]
>>> scores = results[0]["scores"]
```
"""
return self.image_processor.post_process_object_detection(outputs, threshold, target_sizes)
def post_process_instance_segmentation(
self,
outputs,
threshold=0.3,
mask_threshold=0.5,
target_sizes=None,
):
"""
Converts the raw output of [`Sam3Model`] into instance segmentation predictions with bounding boxes and masks.
This is a convenience wrapper around the image processor method.
Args:
outputs ([`Sam3ImageSegmentationOutput`]):
Raw outputs of the model containing pred_boxes, pred_logits, pred_masks, and optionally
presence_logits.
threshold (`float`, *optional*, defaults to 0.3):
Score threshold to keep instance predictions.
mask_threshold (`float`, *optional*, defaults to 0.5):
Threshold for binarizing the predicted masks.
target_sizes (`list[tuple[int, int]]`, *optional*):
List of tuples (`tuple[int, int]`) containing the target size `(height, width)` of each image in the
batch. If unset, predictions will not be resized.
Returns:
`list[dict]`: A list of dictionaries, each dictionary containing the following keys:
- **scores** (`torch.Tensor`): The confidence scores for each predicted instance on the image.
- **boxes** (`torch.Tensor`): Image bounding boxes in (top_left_x, top_left_y, bottom_right_x,
bottom_right_y) format.
- **masks** (`torch.Tensor`): Binary segmentation masks for each instance, shape (num_instances,
height, width).
Example:
```python
>>> from transformers import AutoModel, AutoProcessor
>>> from PIL import Image
>>> import requests
>>> model = AutoModel.from_pretrained("facebook/sam3-base")
>>> processor = AutoProcessor.from_pretrained("facebook/sam3-base")
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> inputs = processor(images=image, text="cat", return_tensors="pt")
>>> outputs = model(**inputs)
>>> # Post-process to get instance segmentation
>>> results = processor.post_process_instance_segmentation(
... outputs, threshold=0.3, target_sizes=[image.size[::-1]]
... )
>>> masks = results[0]["masks"]
>>> boxes = results[0]["boxes"]
>>> scores = results[0]["scores"]
```
"""
return self.image_processor.post_process_instance_segmentation(
outputs, threshold, mask_threshold, target_sizes
)
__all__ = ["Sam3Processor"]
| Sam3Processor |
python | getsentry__sentry | tests/sentry/models/test_organization.py | {
"start": 1487,
"end": 7042
} | class ____(TestCase, HybridCloudTestMixin):
def test_slugify_on_new_orgs(self) -> None:
org = Organization.objects.create(name="name", slug="---downtown_canada---")
assert org.slug == "downtown-canada"
# Only slugify on new instances of Organization
org.slug = "---downtown_canada---"
org.save()
org.refresh_from_db()
assert org.slug == "---downtown_canada---"
org = Organization.objects.create(name="---foo_bar---")
assert org.slug == "foo-bar"
def test_slugify_long_org_names(self) -> None:
# Org name is longer than allowed org slug, and should be trimmed when slugified.
org = Organization.objects.create(name="Stove, Electrical, and Catering Stuff")
assert org.slug == "stove-electrical-and-catering"
# Ensure org slugs are unique
org2 = Organization.objects.create(name="Stove, Electrical, and Catering Stuff")
assert org2.slug.startswith("stove-electrical-and-cateri-")
assert len(org2.slug) > len("stove-electrical-and-cateri-")
assert org.slug != org2.slug
def test_get_default_owner(self) -> None:
user = self.create_user("foo@example.com")
org = self.create_organization(owner=user)
assert org.get_default_owner().id == user.id
def test_default_owner_id(self) -> None:
user = self.create_user("foo@example.com")
org = self.create_organization(owner=user)
assert org.default_owner_id == user.id
def test_default_owner_id_no_owner(self) -> None:
org = self.create_organization()
assert org.default_owner_id is None
@mock.patch.object(
Organization,
"get_members_with_org_roles",
side_effect=Organization.get_members_with_org_roles,
autospec=True,
)
def test_default_owner_id_cached(self, mock_get_owners: mock.MagicMock) -> None:
user = self.create_user("foo@example.com")
org = self.create_organization(owner=user)
assert org.default_owner_id == user.id
assert mock_get_owners.call_count == 1
assert org.default_owner_id == user.id
assert mock_get_owners.call_count == 1
def test_flags_have_changed(self) -> None:
org = self.create_organization()
update_tracked_data(org)
org.flags.allow_joinleave = True # Only flag that defaults to True
org.flags.early_adopter = True
org.flags.codecov_access = True
org.flags.require_2fa = True
org.flags.disable_member_project_creation = (
False # set to True by default for new orgs in save()
)
org.flags.prevent_superuser_access = True
org.flags.disable_member_invite = True
assert flag_has_changed(org, "allow_joinleave") is False
assert flag_has_changed(org, "early_adopter")
assert flag_has_changed(org, "codecov_access")
assert flag_has_changed(org, "require_2fa")
assert flag_has_changed(org, "disable_member_project_creation")
assert flag_has_changed(org, "prevent_superuser_access")
assert flag_has_changed(org, "disable_member_invite")
def test_has_changed(self) -> None:
org = self.create_organization()
update_tracked_data(org)
org.name = "Bizzy"
assert has_changed(org, "name") is True
OrganizationOption.objects.create(
organization=org, key="sentry:require_scrub_ip_address", value=False
)
o = OrganizationOption.objects.get(organization=org, key="sentry:require_scrub_ip_address")
update_tracked_data(o)
o.value = True
assert has_changed(o, "value") is True
OrganizationOption.objects.create(organization=org, key="sentry:sensitive_fields", value=[])
s = OrganizationOption.objects.get(organization=org, key="sentry:sensitive_fields")
update_tracked_data(s)
s.value = ["email"]
assert has_changed(s, "value") is True
OrganizationOption.objects.create(
organization=org, key="sentry:safe_fields", value=["email"]
)
f = OrganizationOption.objects.get(organization=org, key="sentry:safe_fields")
update_tracked_data(f)
f.value = ["email"]
assert has_changed(f, "value") is False
OrganizationOption.objects.create(
organization=org, key="sentry:store_crash_reports", value=0
)
p = OrganizationOption.objects.get(organization=org, key="sentry:store_crash_reports")
update_tracked_data(p)
p.value = 10
assert has_changed(p, "value") is True
def test_name_hasnt_changed_on_init(self) -> None:
inst = Organization(id=1, name="bar")
update_tracked_data(inst)
self.assertFalse(has_changed(inst, "name"))
def test_name_has_changes_before_save(self) -> None:
inst = Organization(id=1, name="bar")
update_tracked_data(inst)
inst.name = "baz"
self.assertTrue(has_changed(inst, "name"))
self.assertEqual(old_value(inst, "name"), "bar")
def test_name_hasnt_changed_after_save(self) -> None:
inst = Organization(id=1, name="bar")
update_tracked_data(inst)
inst.name = "baz"
self.assertTrue(has_changed(inst, "name"))
self.assertEqual(old_value(inst, "name"), "bar")
update_tracked_data(inst)
models.signals.post_save.send(instance=inst, sender=type(inst), created=False)
self.assertFalse(has_changed(inst, "name"))
| OrganizationTest |
python | pytorch__pytorch | torch/_dynamo/convert_frame.py | {
"start": 29113,
"end": 31023
} | class ____:
"""
Represents the core data returned from a single dynamo run, including:
- Guards, wrapped inside tracer_output.output_graph.guards
- Generated bytecode
- Other information needed for compilation.
This data structure should capture all the "interesting" information dynamo
produces on the frontend side before it enters user backend.
"""
tracer_output: DynamoTracerOutput
bytecode: types.CodeType
last_attempt_start_time: Optional[float]
def build_guards(
self,
code: types.CodeType,
hooks: Optional[Hooks] = None,
save: bool = False,
cache_entry: Optional[CacheEntry] = None,
strict_error: bool = False,
) -> CheckFunctionManager:
output_graph = self.tracer_output.output_graph
assert output_graph is not None
return CheckFunctionManager(
code,
output_graph,
cache_entry,
hooks.guard_fail_fn if hooks else None,
hooks.guard_filter_fn if hooks else None,
save_guards=save,
strict_error=strict_error,
)
def graph_capture_output(
self, argdefs: Optional[tuple[Any, ...]] = None
) -> GraphCaptureOutput:
output_graph = self.tracer_output.output_graph
assert output_graph is not None
return GraphCaptureOutput(
OutputGraphCommon(
output_graph.dump_guards_state(),
output_graph.import_sources,
output_graph.shape_env,
output_graph.export_metadata,
output_graph.tracked_fakes_id_to_source,
),
output_graph.import_sources,
output_graph.traced_code,
self.bytecode,
self.tracer_output.closure,
argdefs,
self.tracer_output.f_globals,
)
@dataclass
| DynamoOutput |
python | kamyu104__LeetCode-Solutions | Python/longest-increasing-subsequence.py | {
"start": 5009,
"end": 5527
} | class ____(object):
def lengthOfLIS(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
val_to_idx = {num:i for i, num in enumerate(sorted(set(nums)))}
st = SegmentTree(len(val_to_idx))
for x in nums:
st.update(val_to_idx[x], val_to_idx[x], st.query(0, val_to_idx[x]-1)+1 if val_to_idx[x] >= 1 else 1)
return st.query(0, len(val_to_idx)-1) if len(val_to_idx) >= 1 else 0
# Time: O(n^2)
# Space: O(n)
# Traditional DP solution.
| Solution4 |
python | django-extensions__django-extensions | tests/testapp/derived_classes_for_testing/test_module.py | {
"start": 109,
"end": 158
} | class ____(IncludedMixin):
pass
| ThirdDerivedClass |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-google-ads/unit_tests/test_components.py | {
"start": 4398,
"end": 6562
} | class ____:
def test_given_valid_query_returns_expected_request_body(self, config_for_custom_query_tests, requests_mock):
config = config_for_custom_query_tests
config["custom_queries_array"][0]["query"] = (
"SELECT campaign_budget.name, campaign.name, metrics.interaction_event_types FROM campaign_budget"
)
requester = CustomGAQueryHttpRequester(
name="test_custom_ga_query_http_requester",
parameters={
"query": config["custom_queries_array"][0]["query"],
"cursor_field": "{{ False }}",
},
config=config,
)
request_body = requester.get_request_body_json(stream_slice={})
assert request_body == {"query": "SELECT campaign_budget.name, campaign.name, metrics.interaction_event_types FROM campaign_budget"}
def test_given_valid_query_with_cursor_field_returns_expected_request_body(self, config_for_custom_query_tests, requests_mock):
config = config_for_custom_query_tests
config["custom_queries_array"][0]["query"] = (
"SELECT campaign_budget.name, campaign.name, metrics.interaction_event_types, segments.date FROM campaign_budget ORDER BY segments.date ASC"
)
requester = CustomGAQueryHttpRequester(
name="test_custom_ga_query_http_requester",
parameters={
"query": config["custom_queries_array"][0]["query"],
"cursor_field": "segments.date",
},
config=config,
)
request_body = requester.get_request_body_json(
stream_slice={
"customer_id": "customers/123",
"parent_slice": {"customer_id": "123", "parent_slice": {}},
"start_time": "2025-07-18",
"end_time": "2025-07-19",
}
)
assert request_body == {
"query": "SELECT campaign_budget.name, campaign.name, metrics.interaction_event_types, segments.date FROM campaign_budget WHERE segments.date BETWEEN '2025-07-18' AND '2025-07-19' ORDER BY segments.date ASC"
}
| TestCustomGAQueryHttpRequester |
python | nedbat__coveragepy | tests/test_api.py | {
"start": 30583,
"end": 33271
} | class ____(UsingModulesMixin, CoverageTest):
"""Test methods for coverage methods taking include and omit."""
# An abstract method for subclasses to define, to appease mypy.
def coverage_usepkgs(self, **kwargs_unused: TCovKwargs) -> Iterable[str]:
"""Run coverage on usepkgs, return a line summary. kwargs are for Coverage(**kwargs)."""
raise NotImplementedError() # pragma: not covered
def filenames_in(self, summary: Iterable[str], filenames: str) -> None:
"""Assert the `filenames` are in the `summary`."""
for filename in filenames.split():
assert filename in summary
def filenames_not_in(self, summary: Iterable[str], filenames: str) -> None:
"""Assert the `filenames` are not in the `summary`."""
for filename in filenames.split():
assert filename not in summary
def test_nothing_specified(self) -> None:
result = self.coverage_usepkgs()
self.filenames_in(result, "p1a p1b p2a p2b othera otherb osa osb")
self.filenames_not_in(result, "p1c")
# Because there was no source= specified, we don't search for
# un-executed files.
def test_include(self) -> None:
result = self.coverage_usepkgs(include=["*/p1a.py"])
self.filenames_in(result, "p1a")
self.filenames_not_in(result, "p1b p1c p2a p2b othera otherb osa osb")
def test_include_2(self) -> None:
result = self.coverage_usepkgs(include=["*a.py"])
self.filenames_in(result, "p1a p2a othera osa")
self.filenames_not_in(result, "p1b p1c p2b otherb osb")
def test_include_as_string(self) -> None:
result = self.coverage_usepkgs(include="*a.py")
self.filenames_in(result, "p1a p2a othera osa")
self.filenames_not_in(result, "p1b p1c p2b otherb osb")
def test_omit(self) -> None:
result = self.coverage_usepkgs(omit=["*/p1a.py"])
self.filenames_in(result, "p1b p2a p2b")
self.filenames_not_in(result, "p1a p1c")
def test_omit_2(self) -> None:
result = self.coverage_usepkgs(omit=["*a.py"])
self.filenames_in(result, "p1b p2b otherb osb")
self.filenames_not_in(result, "p1a p1c p2a othera osa")
def test_omit_as_string(self) -> None:
result = self.coverage_usepkgs(omit="*a.py")
self.filenames_in(result, "p1b p2b otherb osb")
self.filenames_not_in(result, "p1a p1c p2a othera osa")
def test_omit_and_include(self) -> None:
result = self.coverage_usepkgs(include=["*/p1*"], omit=["*/p1a.py"])
self.filenames_in(result, "p1b")
self.filenames_not_in(result, "p1a p1c p2a p2b")
| IncludeOmitTestsMixin |
python | qdrant__qdrant-client | qdrant_client/http/api/search_api.py | {
"start": 1311,
"end": 17477
} | class ____:
def __init__(self, api_client: "Union[ApiClient, AsyncApiClient]"):
self.api_client = api_client
def _build_for_discover_batch_points(
self,
collection_name: str,
consistency: m.ReadConsistency = None,
timeout: int = None,
discover_request_batch: m.DiscoverRequestBatch = None,
):
"""
Look for points based on target and/or positive and negative example pairs, in batch.
"""
path_params = {
"collection_name": str(collection_name),
}
query_params = {}
if consistency is not None:
query_params["consistency"] = str(consistency)
if timeout is not None:
query_params["timeout"] = str(timeout)
headers = {}
body = jsonable_encoder(discover_request_batch)
if "Content-Type" not in headers:
headers["Content-Type"] = "application/json"
return self.api_client.request(
type_=m.InlineResponse20017,
method="POST",
url="/collections/{collection_name}/points/discover/batch",
headers=headers if headers else None,
path_params=path_params,
params=query_params,
content=body,
)
def _build_for_discover_points(
self,
collection_name: str,
consistency: m.ReadConsistency = None,
timeout: int = None,
discover_request: m.DiscoverRequest = None,
):
"""
Use context and a target to find the most similar points to the target, constrained by the context. When using only the context (without a target), a special search - called context search - is performed where pairs of points are used to generate a loss that guides the search towards the zone where most positive examples overlap. This means that the score minimizes the scenario of finding a point closer to a negative than to a positive part of a pair. Since the score of a context relates to loss, the maximum score a point can get is 0.0, and it becomes normal that many points can have a score of 0.0. When using target (with or without context), the score behaves a little different: The integer part of the score represents the rank with respect to the context, while the decimal part of the score relates to the distance to the target. The context part of the score for each pair is calculated +1 if the point is closer to a positive than to a negative part of a pair, and -1 otherwise.
"""
path_params = {
"collection_name": str(collection_name),
}
query_params = {}
if consistency is not None:
query_params["consistency"] = str(consistency)
if timeout is not None:
query_params["timeout"] = str(timeout)
headers = {}
body = jsonable_encoder(discover_request)
if "Content-Type" not in headers:
headers["Content-Type"] = "application/json"
return self.api_client.request(
type_=m.InlineResponse20016,
method="POST",
url="/collections/{collection_name}/points/discover",
headers=headers if headers else None,
path_params=path_params,
params=query_params,
content=body,
)
def _build_for_query_batch_points(
self,
collection_name: str,
consistency: m.ReadConsistency = None,
timeout: int = None,
query_request_batch: m.QueryRequestBatch = None,
):
"""
Universally query points in batch. This endpoint covers all capabilities of search, recommend, discover, filters. But also enables hybrid and multi-stage queries.
"""
path_params = {
"collection_name": str(collection_name),
}
query_params = {}
if consistency is not None:
query_params["consistency"] = str(consistency)
if timeout is not None:
query_params["timeout"] = str(timeout)
headers = {}
body = jsonable_encoder(query_request_batch)
if "Content-Type" not in headers:
headers["Content-Type"] = "application/json"
return self.api_client.request(
type_=m.InlineResponse20022,
method="POST",
url="/collections/{collection_name}/points/query/batch",
headers=headers if headers else None,
path_params=path_params,
params=query_params,
content=body,
)
def _build_for_query_points(
self,
collection_name: str,
consistency: m.ReadConsistency = None,
timeout: int = None,
query_request: m.QueryRequest = None,
):
"""
Universally query points. This endpoint covers all capabilities of search, recommend, discover, filters. But also enables hybrid and multi-stage queries.
"""
path_params = {
"collection_name": str(collection_name),
}
query_params = {}
if consistency is not None:
query_params["consistency"] = str(consistency)
if timeout is not None:
query_params["timeout"] = str(timeout)
headers = {}
body = jsonable_encoder(query_request)
if "Content-Type" not in headers:
headers["Content-Type"] = "application/json"
return self.api_client.request(
type_=m.InlineResponse20021,
method="POST",
url="/collections/{collection_name}/points/query",
headers=headers if headers else None,
path_params=path_params,
params=query_params,
content=body,
)
def _build_for_query_points_groups(
self,
collection_name: str,
consistency: m.ReadConsistency = None,
timeout: int = None,
query_groups_request: m.QueryGroupsRequest = None,
):
"""
Universally query points, grouped by a given payload field
"""
path_params = {
"collection_name": str(collection_name),
}
query_params = {}
if consistency is not None:
query_params["consistency"] = str(consistency)
if timeout is not None:
query_params["timeout"] = str(timeout)
headers = {}
body = jsonable_encoder(query_groups_request)
if "Content-Type" not in headers:
headers["Content-Type"] = "application/json"
return self.api_client.request(
type_=m.InlineResponse20018,
method="POST",
url="/collections/{collection_name}/points/query/groups",
headers=headers if headers else None,
path_params=path_params,
params=query_params,
content=body,
)
def _build_for_recommend_batch_points(
self,
collection_name: str,
consistency: m.ReadConsistency = None,
timeout: int = None,
recommend_request_batch: m.RecommendRequestBatch = None,
):
"""
Look for the points which are closer to stored positive examples and at the same time further to negative examples.
"""
path_params = {
"collection_name": str(collection_name),
}
query_params = {}
if consistency is not None:
query_params["consistency"] = str(consistency)
if timeout is not None:
query_params["timeout"] = str(timeout)
headers = {}
body = jsonable_encoder(recommend_request_batch)
if "Content-Type" not in headers:
headers["Content-Type"] = "application/json"
return self.api_client.request(
type_=m.InlineResponse20017,
method="POST",
url="/collections/{collection_name}/points/recommend/batch",
headers=headers if headers else None,
path_params=path_params,
params=query_params,
content=body,
)
def _build_for_recommend_point_groups(
self,
collection_name: str,
consistency: m.ReadConsistency = None,
timeout: int = None,
recommend_groups_request: m.RecommendGroupsRequest = None,
):
"""
Look for the points which are closer to stored positive examples and at the same time further to negative examples, grouped by a given payload field.
"""
path_params = {
"collection_name": str(collection_name),
}
query_params = {}
if consistency is not None:
query_params["consistency"] = str(consistency)
if timeout is not None:
query_params["timeout"] = str(timeout)
headers = {}
body = jsonable_encoder(recommend_groups_request)
if "Content-Type" not in headers:
headers["Content-Type"] = "application/json"
return self.api_client.request(
type_=m.InlineResponse20018,
method="POST",
url="/collections/{collection_name}/points/recommend/groups",
headers=headers if headers else None,
path_params=path_params,
params=query_params,
content=body,
)
def _build_for_recommend_points(
self,
collection_name: str,
consistency: m.ReadConsistency = None,
timeout: int = None,
recommend_request: m.RecommendRequest = None,
):
"""
Look for the points which are closer to stored positive examples and at the same time further to negative examples.
"""
path_params = {
"collection_name": str(collection_name),
}
query_params = {}
if consistency is not None:
query_params["consistency"] = str(consistency)
if timeout is not None:
query_params["timeout"] = str(timeout)
headers = {}
body = jsonable_encoder(recommend_request)
if "Content-Type" not in headers:
headers["Content-Type"] = "application/json"
return self.api_client.request(
type_=m.InlineResponse20016,
method="POST",
url="/collections/{collection_name}/points/recommend",
headers=headers if headers else None,
path_params=path_params,
params=query_params,
content=body,
)
def _build_for_search_batch_points(
self,
collection_name: str,
consistency: m.ReadConsistency = None,
timeout: int = None,
search_request_batch: m.SearchRequestBatch = None,
):
"""
Retrieve by batch the closest points based on vector similarity and given filtering conditions
"""
path_params = {
"collection_name": str(collection_name),
}
query_params = {}
if consistency is not None:
query_params["consistency"] = str(consistency)
if timeout is not None:
query_params["timeout"] = str(timeout)
headers = {}
body = jsonable_encoder(search_request_batch)
if "Content-Type" not in headers:
headers["Content-Type"] = "application/json"
return self.api_client.request(
type_=m.InlineResponse20017,
method="POST",
url="/collections/{collection_name}/points/search/batch",
headers=headers if headers else None,
path_params=path_params,
params=query_params,
content=body,
)
def _build_for_search_matrix_offsets(
self,
collection_name: str,
consistency: m.ReadConsistency = None,
timeout: int = None,
search_matrix_request: m.SearchMatrixRequest = None,
):
"""
Compute distance matrix for sampled points with an offset based output format
"""
path_params = {
"collection_name": str(collection_name),
}
query_params = {}
if consistency is not None:
query_params["consistency"] = str(consistency)
if timeout is not None:
query_params["timeout"] = str(timeout)
headers = {}
body = jsonable_encoder(search_matrix_request)
if "Content-Type" not in headers:
headers["Content-Type"] = "application/json"
return self.api_client.request(
type_=m.InlineResponse20024,
method="POST",
url="/collections/{collection_name}/points/search/matrix/offsets",
headers=headers if headers else None,
path_params=path_params,
params=query_params,
content=body,
)
def _build_for_search_matrix_pairs(
self,
collection_name: str,
consistency: m.ReadConsistency = None,
timeout: int = None,
search_matrix_request: m.SearchMatrixRequest = None,
):
"""
Compute distance matrix for sampled points with a pair based output format
"""
path_params = {
"collection_name": str(collection_name),
}
query_params = {}
if consistency is not None:
query_params["consistency"] = str(consistency)
if timeout is not None:
query_params["timeout"] = str(timeout)
headers = {}
body = jsonable_encoder(search_matrix_request)
if "Content-Type" not in headers:
headers["Content-Type"] = "application/json"
return self.api_client.request(
type_=m.InlineResponse20023,
method="POST",
url="/collections/{collection_name}/points/search/matrix/pairs",
headers=headers if headers else None,
path_params=path_params,
params=query_params,
content=body,
)
def _build_for_search_point_groups(
self,
collection_name: str,
consistency: m.ReadConsistency = None,
timeout: int = None,
search_groups_request: m.SearchGroupsRequest = None,
):
"""
Retrieve closest points based on vector similarity and given filtering conditions, grouped by a given payload field
"""
path_params = {
"collection_name": str(collection_name),
}
query_params = {}
if consistency is not None:
query_params["consistency"] = str(consistency)
if timeout is not None:
query_params["timeout"] = str(timeout)
headers = {}
body = jsonable_encoder(search_groups_request)
if "Content-Type" not in headers:
headers["Content-Type"] = "application/json"
return self.api_client.request(
type_=m.InlineResponse20018,
method="POST",
url="/collections/{collection_name}/points/search/groups",
headers=headers if headers else None,
path_params=path_params,
params=query_params,
content=body,
)
def _build_for_search_points(
self,
collection_name: str,
consistency: m.ReadConsistency = None,
timeout: int = None,
search_request: m.SearchRequest = None,
):
"""
Retrieve closest points based on vector similarity and given filtering conditions
"""
path_params = {
"collection_name": str(collection_name),
}
query_params = {}
if consistency is not None:
query_params["consistency"] = str(consistency)
if timeout is not None:
query_params["timeout"] = str(timeout)
headers = {}
body = jsonable_encoder(search_request)
if "Content-Type" not in headers:
headers["Content-Type"] = "application/json"
return self.api_client.request(
type_=m.InlineResponse20016,
method="POST",
url="/collections/{collection_name}/points/search",
headers=headers if headers else None,
path_params=path_params,
params=query_params,
content=body,
)
| _SearchApi |
python | airbytehq__airbyte | airbyte-ci/connectors/pipelines/pipelines/airbyte_ci/connectors/publish/pipeline.py | {
"start": 8608,
"end": 9925
} | class ____(Step):
context: PublishConnectorContext
title = "Push existing version image as latest"
@property
def latest_docker_image_name(self) -> str:
return f"{self.context.docker_repository}:latest"
async def _run(self, attempts: int = 3) -> StepResult:
per_platform_containers = [
self.context.dagger_client.container(platform=platform).from_(f"docker.io/{self.context.docker_image}")
for platform in consts.BUILD_PLATFORMS
]
try:
image_ref = await per_platform_containers[0].publish(
f"docker.io/{self.latest_docker_image_name}",
platform_variants=per_platform_containers[1:],
forced_compression=ImageLayerCompression.Gzip,
)
return StepResult(step=self, status=StepStatus.SUCCESS, stdout=f"Published {image_ref}")
except QueryError as e:
if attempts > 0:
self.context.logger.error(str(e))
self.context.logger.warn(f"Failed to publish {self.context.docker_image}. Retrying. {attempts} attempts left.")
await anyio.sleep(5)
return await self._run(attempts - 1)
return StepResult(step=self, status=StepStatus.FAILURE, stderr=str(e))
| PushVersionImageAsLatest |
python | faif__python-patterns | tests/structural/test_bridge.py | {
"start": 128,
"end": 1695
} | class ____(unittest.TestCase):
def test_bridge_shall_draw_with_concrete_api_implementation(cls):
ci1 = DrawingAPI1()
ci2 = DrawingAPI2()
with (
patch.object(ci1, "draw_circle") as mock_ci1_draw_circle,
patch.object(ci2, "draw_circle") as mock_ci2_draw_circle,
):
sh1 = CircleShape(1, 2, 3, ci1)
sh1.draw()
cls.assertEqual(mock_ci1_draw_circle.call_count, 1)
sh2 = CircleShape(1, 2, 3, ci2)
sh2.draw()
cls.assertEqual(mock_ci2_draw_circle.call_count, 1)
def test_bridge_shall_scale_both_api_circles_with_own_implementation(cls):
SCALE_FACTOR = 2
CIRCLE1_RADIUS = 3
EXPECTED_CIRCLE1_RADIUS = 6
CIRCLE2_RADIUS = CIRCLE1_RADIUS * CIRCLE1_RADIUS
EXPECTED_CIRCLE2_RADIUS = CIRCLE2_RADIUS * SCALE_FACTOR
ci1 = DrawingAPI1()
ci2 = DrawingAPI2()
sh1 = CircleShape(1, 2, CIRCLE1_RADIUS, ci1)
sh2 = CircleShape(1, 2, CIRCLE2_RADIUS, ci2)
sh1.scale(SCALE_FACTOR)
sh2.scale(SCALE_FACTOR)
cls.assertEqual(sh1._radius, EXPECTED_CIRCLE1_RADIUS)
cls.assertEqual(sh2._radius, EXPECTED_CIRCLE2_RADIUS)
with (
patch.object(sh1, "scale") as mock_sh1_scale_circle,
patch.object(sh2, "scale") as mock_sh2_scale_circle,
):
sh1.scale(2)
sh2.scale(2)
cls.assertEqual(mock_sh1_scale_circle.call_count, 1)
cls.assertEqual(mock_sh2_scale_circle.call_count, 1)
| BridgeTest |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_chart_crossing07.py | {
"start": 315,
"end": 2139
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_crossing07.xlsx")
self.ignore_elements = {"xl/charts/chart1.xml": ["<c:formatCode"]}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "stock"})
date_format = workbook.add_format({"num_format": 14})
chart.axis_ids = [87397120, 87399424]
data = [
[39083, 39084, 39085, 39086, 39087],
[27.2, 25.03, 19.05, 20.34, 18.5],
[23.49, 19.55, 15.12, 17.84, 16.34],
[25.45, 23.05, 17.32, 20.45, 17.34],
]
for row in range(5):
worksheet.write(row, 0, data[0][row], date_format)
worksheet.write(row, 1, data[1][row])
worksheet.write(row, 2, data[2][row])
worksheet.write(row, 3, data[3][row])
worksheet.set_column("A:D", 11)
chart.add_series(
{
"categories": "=Sheet1!$A$1:$A$5",
"values": "=Sheet1!$B$1:$B$5",
}
)
chart.add_series(
{
"categories": "=Sheet1!$A$1:$A$5",
"values": "=Sheet1!$C$1:$C$5",
}
)
chart.add_series(
{
"categories": "=Sheet1!$A$1:$A$5",
"values": "=Sheet1!$D$1:$D$5",
}
)
chart.set_x_axis({"crossing": 39085})
chart.set_y_axis({"crossing": 15})
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | spyder-ide__spyder | spyder/widgets/waitingspinner.py | {
"start": 1685,
"end": 8983
} | class ____(QWidget):
def __init__(self, parent, centerOnParent=True,
disableParentWhenSpinning=False, modality=Qt.NonModal):
# super().__init__(parent)
QWidget.__init__(self, parent)
self._centerOnParent = centerOnParent
self._disableParentWhenSpinning = disableParentWhenSpinning
# WAS IN initialize()
self._color = QColor(Qt.black)
self._roundness = 100.0
self._minimumTrailOpacity = 3.14159265358979323846
self._trailFadePercentage = 80.0
self._trailSizeDecreasing = False
self._revolutionsPerSecond = 1.57079632679489661923
self._numberOfLines = 20
self._lineLength = 10
self._lineWidth = 2
self._innerRadius = 10
self._currentCounter = 0
self._isSpinning = False
self._timer = QTimer(self)
self._timer.timeout.connect(self.rotate)
self.updateSize()
self.updateTimer()
self.hide()
# END initialize()
self.setWindowModality(modality)
self.setAttribute(Qt.WA_TranslucentBackground)
self.show()
def paintEvent(self, QPaintEvent):
if not self._isSpinning:
return
self.updatePosition()
painter = QPainter(self)
painter.fillRect(self.rect(), Qt.transparent)
painter.setRenderHint(QPainter.Antialiasing, True)
if self._currentCounter >= self._numberOfLines:
self._currentCounter = 0
painter.setPen(Qt.NoPen)
for i in range(0, self._numberOfLines):
painter.save()
painter.translate(self._innerRadius + self._lineLength, self._innerRadius + self._lineLength)
rotateAngle = float(360 * i) / float(self._numberOfLines)
painter.rotate(rotateAngle)
painter.translate(self._innerRadius, 0)
distance = self.lineCountDistanceFromPrimary(i, self._currentCounter, self._numberOfLines)
color = self.currentLineColor(distance, self._numberOfLines, self._trailFadePercentage,
self._minimumTrailOpacity, self._color)
# Compute the scaling factor to apply to the size and thickness
# of the lines in the trail.
if self._trailSizeDecreasing:
sf = (self._numberOfLines - distance) / self._numberOfLines
else:
sf = 1
painter.setBrush(color)
rect = QRect(0, round(-self._lineWidth / 2),
round(sf * self._lineLength),
round(sf * self._lineWidth))
painter.drawRoundedRect(
rect, self._roundness, self._roundness, Qt.RelativeSize)
painter.restore()
def start(self):
self.updatePosition()
self._isSpinning = True
if self.parentWidget and self._disableParentWhenSpinning:
self.parentWidget().setEnabled(False)
if not self._timer.isActive():
self._timer.start()
self._currentCounter = 0
self.show()
def stop(self):
if not self._isSpinning:
# No need to repaint everything if it is already stopped
return
self._isSpinning = False
if self.parentWidget() and self._disableParentWhenSpinning:
self.parentWidget().setEnabled(True)
if self._timer.isActive():
self._timer.stop()
self._currentCounter = 0
self.show()
self.repaint()
def setNumberOfLines(self, lines):
self._numberOfLines = lines
self._currentCounter = 0
self.updateTimer()
def setLineLength(self, length):
self._lineLength = length
self.updateSize()
def setLineWidth(self, width):
self._lineWidth = width
self.updateSize()
def setInnerRadius(self, radius):
self._innerRadius = radius
self.updateSize()
def color(self):
return self._color
def roundness(self):
return self._roundness
def minimumTrailOpacity(self):
return self._minimumTrailOpacity
def trailFadePercentage(self):
return self._trailFadePercentage
def revolutionsPersSecond(self):
return self._revolutionsPerSecond
def numberOfLines(self):
return self._numberOfLines
def lineLength(self):
return self._lineLength
def isTrailSizeDecreasing(self):
"""
Return whether the length and thickness of the trailing lines
are decreasing.
"""
return self._trailSizeDecreasing
def lineWidth(self):
return self._lineWidth
def innerRadius(self):
return self._innerRadius
def isSpinning(self):
return self._isSpinning
def setRoundness(self, roundness):
self._roundness = max(0.0, min(100.0, roundness))
def setColor(self, color=Qt.black):
self._color = QColor(color)
def setRevolutionsPerSecond(self, revolutionsPerSecond):
self._revolutionsPerSecond = revolutionsPerSecond
self.updateTimer()
def setTrailFadePercentage(self, trail):
self._trailFadePercentage = trail
def setTrailSizeDecreasing(self, value):
"""
Set whether the length and thickness of the trailing lines
are decreasing.
"""
self._trailSizeDecreasing = value
def setMinimumTrailOpacity(self, minimumTrailOpacity):
self._minimumTrailOpacity = minimumTrailOpacity
def rotate(self):
self._currentCounter += 1
if self._currentCounter >= self._numberOfLines:
self._currentCounter = 0
self.update()
def updateSize(self):
size = int((self._innerRadius + self._lineLength) * 2)
self.setFixedSize(size, size)
def updateTimer(self):
self._timer.setInterval(int(1000 / (self._numberOfLines *
self._revolutionsPerSecond)))
def updatePosition(self):
if self.parentWidget() and self._centerOnParent:
self.move(int(self.parentWidget().width() / 2 -
self.width() / 2),
int(self.parentWidget().height() / 2 -
self.height() / 2))
def lineCountDistanceFromPrimary(self, current, primary, totalNrOfLines):
distance = primary - current
if distance < 0:
distance += totalNrOfLines
return distance
def currentLineColor(self, countDistance, totalNrOfLines, trailFadePerc, minOpacity, colorinput):
color = QColor(colorinput)
if countDistance == 0:
return color
minAlphaF = minOpacity / 100.0
distanceThreshold = int(math.ceil((totalNrOfLines - 1) * trailFadePerc / 100.0))
if countDistance > distanceThreshold:
color.setAlphaF(minAlphaF)
else:
alphaDiff = color.alphaF() - minAlphaF
gradient = alphaDiff / float(distanceThreshold + 1)
resultAlpha = color.alphaF() - gradient * countDistance
# If alpha is out of bounds, clip it.
resultAlpha = min(1.0, max(0.0, resultAlpha))
color.setAlphaF(resultAlpha)
return color
| QWaitingSpinner |
python | getsentry__sentry | src/sentry/relocation/models/relocationtransfer.py | {
"start": 528,
"end": 625
} | class ____(models.TextChoices):
Request = "request"
Reply = "reply"
| RelocationTransferState |
python | PrefectHQ__prefect | src/prefect/_internal/concurrency/services.py | {
"start": 11815,
"end": 12656
} | class ____(_QueueServiceBase[T]):
def send(self, item: T) -> None:
"""
Send an item to this instance of the service.
"""
with self._lock:
if self._stopped:
raise RuntimeError("Cannot put items in a stopped service instance.")
logger.debug("Service %r enqueuing item %r", self, item)
self._queue.put_nowait(self._prepare_item(item))
def _prepare_item(self, item: T) -> T:
"""
Prepare an item for submission to the service. This is called before
the item is sent to the service.
The default implementation returns the item unchanged.
"""
return item
@abc.abstractmethod
async def _handle(self, item: T) -> None:
"""
Process an item sent to the service.
"""
| QueueService |
python | facelessuser__soupsieve | tests/test_level4/test_is.py | {
"start": 89,
"end": 3389
} | class ____(util.TestCase):
"""Test is selectors."""
MARKUP = """
<div>
<p>Some text <span id="1"> in a paragraph</span>.
<a id="2" href="http://google.com">Link</a>
</p>
</div>
"""
def test_is(self):
"""Test multiple selectors with "is"."""
self.assert_selector(
self.MARKUP,
":is(span, a)",
["1", "2"],
flags=util.HTML
)
def test_is_multi_comma(self):
"""Test multiple selectors but with an empty slot due to multiple commas."""
self.assert_selector(
self.MARKUP,
":is(span, , a)",
["1", "2"],
flags=util.HTML
)
def test_is_leading_comma(self):
"""Test multiple selectors but with an empty slot due to leading commas."""
self.assert_selector(
self.MARKUP,
":is(, span, a)",
["1", "2"],
flags=util.HTML
)
def test_is_trailing_comma(self):
"""Test multiple selectors but with an empty slot due to trailing commas."""
self.assert_selector(
self.MARKUP,
":is(span, a, )",
["1", "2"],
flags=util.HTML
)
def test_is_empty(self):
"""Test empty `:is()` selector list."""
self.assert_selector(
self.MARKUP,
":is()",
[],
flags=util.HTML
)
def test_nested_is(self):
"""Test multiple nested selectors."""
self.assert_selector(
self.MARKUP,
":is(span, a:is(#\\32))",
["1", "2"],
flags=util.HTML
)
self.assert_selector(
self.MARKUP,
":is(span, a:is(#\\32))",
["1", "2"],
flags=util.HTML
)
def test_is_with_other_pseudo(self):
"""Test `:is()` behavior when paired with `:not()`."""
# Each pseudo class is evaluated separately
# So this will not match
self.assert_selector(
self.MARKUP,
":is(span):not(span)",
[],
flags=util.HTML
)
def test_multiple_is(self):
"""Test `:is()` behavior when paired with `:not()`."""
# Each pseudo class is evaluated separately
# So this will not match
self.assert_selector(
self.MARKUP,
":is(span):is(div)",
[],
flags=util.HTML
)
# Each pseudo class is evaluated separately
# So this will match
self.assert_selector(
self.MARKUP,
":is(a):is(#\\32)",
['2'],
flags=util.HTML
)
def test_invalid_pseudo_class_start_combinator(self):
"""Test invalid start combinator in pseudo-classes other than `:has()`."""
self.assert_raises(':is(> div)', SelectorSyntaxError)
self.assert_raises(':is(div, > div)', SelectorSyntaxError)
def test_invalid_pseudo_orphan_close(self):
"""Test invalid, orphaned pseudo close."""
self.assert_raises('div)', SelectorSyntaxError)
def test_invalid_pseudo_open(self):
"""Test invalid pseudo close."""
self.assert_raises(':is(div', SelectorSyntaxError)
| TestIs |
python | apache__airflow | providers/standard/src/airflow/providers/standard/triggers/file.py | {
"start": 1283,
"end": 3210
} | class ____(BaseTrigger):
"""
A trigger that fires exactly once after it finds the requested file or folder.
:param filepath: File or folder name (relative to the base path set within the connection), can
be a glob.
:param recursive: when set to ``True``, enables recursive directory matching behavior of
``**`` in glob filepath parameter. Defaults to ``False``.
:param poke_interval: Time that the job should wait in between each try
"""
def __init__(
self,
filepath: str,
recursive: bool = False,
poke_interval: float = 5.0,
**kwargs,
):
super().__init__()
self.filepath = filepath
self.recursive = recursive
self.poke_interval = poke_interval
def serialize(self) -> tuple[str, dict[str, Any]]:
"""Serialize FileTrigger arguments and classpath."""
return (
"airflow.providers.standard.triggers.file.FileTrigger",
{
"filepath": self.filepath,
"recursive": self.recursive,
"poke_interval": self.poke_interval,
},
)
async def run(self) -> AsyncIterator[TriggerEvent]:
"""Loop until the relevant files are found."""
while True:
for path in glob(self.filepath, recursive=self.recursive):
if os.path.isfile(path):
mod_time_f = os.path.getmtime(path)
mod_time = datetime.datetime.fromtimestamp(mod_time_f).strftime("%Y%m%d%H%M%S")
self.log.info("Found File %s last modified: %s", path, mod_time)
yield TriggerEvent(True)
return
for _, _, files in os.walk(path):
if files:
yield TriggerEvent(True)
return
await asyncio.sleep(self.poke_interval)
| FileTrigger |
python | ansible__ansible | test/lib/ansible_test/_internal/metadata.py | {
"start": 6161,
"end": 7036
} | class ____:
"""Flags for enabling specific debugging features."""
self: bool = False
"""Debug ansible-test itself."""
ansiballz: bool = False
"""Debug AnsiballZ modules."""
cli: bool = False
"""Debug Ansible CLI programs other than ansible-test."""
on_demand: bool = False
"""Enable debugging features only when ansible-test is running under a debugger."""
@property
def enable(self) -> bool:
"""Return `True` if any debugger feature other than on-demand is enabled."""
return any(getattr(self, field.name) for field in dataclasses.fields(self) if field.name != 'on_demand')
@classmethod
def all(cls, enabled: bool) -> t.Self:
"""Return a `DebuggerFlags` instance with all flags enabled or disabled."""
return cls(**{field.name: enabled for field in dataclasses.fields(cls)})
| DebuggerFlags |
python | plotly__plotly.py | plotly/graph_objs/violin/_line.py | {
"start": 233,
"end": 2959
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "violin"
_path_str = "violin.line"
_valid_props = {"color", "width"}
@property
def color(self):
"""
Sets the color of line bounding the violin(s).
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def width(self):
"""
Sets the width (in px) of line bounding the violin(s).
The 'width' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["width"]
@width.setter
def width(self, val):
self["width"] = val
@property
def _prop_descriptions(self):
return """\
color
Sets the color of line bounding the violin(s).
width
Sets the width (in px) of line bounding the violin(s).
"""
def __init__(self, arg=None, color=None, width=None, **kwargs):
"""
Construct a new Line object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.violin.Line`
color
Sets the color of line bounding the violin(s).
width
Sets the width (in px) of line bounding the violin(s).
Returns
-------
Line
"""
super().__init__("line")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.violin.Line
constructor must be a dict or
an instance of :class:`plotly.graph_objs.violin.Line`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("color", arg, color)
self._set_property("width", arg, width)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Line |
python | pytorch__pytorch | torch/_inductor/runtime/caching/exceptions.py | {
"start": 2263,
"end": 2626
} | class ____(CacheError, TypeError):
"""Base class for user-facing cache errors that also inherit from TypeError.
This class combines CacheError with TypeError to provide compatibility
with existing exception handling patterns while maintaining the cache
error hierarchy. All user-facing cache errors should inherit from this class.
"""
| UserError |
python | facebook__pyre-check | source/interprocedural_analyses/taint/test/integration/lists.py | {
"start": 2705,
"end": 3105
} | class ____:
def __str__(self) -> str:
return ""
def returns_list_repr(x: Any) -> List[HasRepr]:
...
def inconsistent_type_context(l: List[HasStr]) -> None:
# Demonstrate a (fixed) inconsistency in how we handle nested generators.
# The type context is different between the inner generator and the outer one.
[str(x) for x in returns_list_repr([str(x) for x in l])]
| HasStr |
python | walkccc__LeetCode | solutions/3430. Maximum and Minimum Sums of at Most Size K Subarrays/3430.py | {
"start": 0,
"end": 1491
} | class ____:
# Similar to 2104. Sum of Subarray Ranges
def minMaxSubarraySum(self, nums: list[int], k: int) -> int:
prevGt, nextGt = self._getPrevNext(nums, operator.lt)
prevLt, nextLt = self._getPrevNext(nums, operator.gt)
return (self._subarraySum(nums, prevGt, nextGt, k) +
self._subarraySum(nums, prevLt, nextLt, k))
def _subarraySum(
self,
nums: list[int],
prev: list[int],
next: list[int],
k: int
) -> int:
"""
Returns the sum of all subarrays with a size <= k, The `prev` and `next`
arrays are used to store the indices of the nearest numbers that are
smaller or larger than the current number, respectively.
"""
res = 0
for i, num in enumerate(nums):
l = min(i - prev[i], k)
r = min(next[i] - i, k)
extra = max(0, l + r - 1 - k)
res += num * (l * r - extra * (extra + 1) // 2)
return res
def _getPrevNext(
self,
nums: list[int],
op: callable
) -> tuple[list[int], list[int]]:
"""
Returns `prev` and `next`, that store the indices of the nearest numbers
that are smaller or larger than the current number depending on `op`.
"""
n = len(nums)
prev = [-1] * n
next = [n] * n
stack = []
for i, num in enumerate(nums):
while stack and op(nums[stack[-1]], num):
index = stack.pop()
next[index] = i
if stack:
prev[i] = stack[-1]
stack.append(i)
return prev, next
| Solution |
python | jazzband__django-redis | tests/test_client.py | {
"start": 2162,
"end": 5198
} | class ____:
@patch("test_client.DefaultClient.get_client")
@patch("test_client.DefaultClient.__init__", return_value=None)
def test_delete_pattern_calls_get_client_given_no_client(
self,
init_mock,
get_client_mock,
):
client = DefaultClient()
client._backend = Mock()
client._backend.key_prefix = ""
client.delete_pattern(pattern="foo*")
get_client_mock.assert_called_once_with(write=True)
@patch("test_client.DefaultClient.make_pattern")
@patch("test_client.DefaultClient.get_client", return_value=Mock())
@patch("test_client.DefaultClient.__init__", return_value=None)
def test_delete_pattern_calls_make_pattern(
self,
init_mock,
get_client_mock,
make_pattern_mock,
):
client = DefaultClient()
client._backend = Mock()
client._backend.key_prefix = ""
get_client_mock.return_value.scan_iter.return_value = []
client.delete_pattern(pattern="foo*")
kwargs = {"version": None, "prefix": None}
make_pattern_mock.assert_called_once_with("foo*", **kwargs)
@patch("test_client.DefaultClient.make_pattern")
@patch("test_client.DefaultClient.get_client", return_value=Mock())
@patch("test_client.DefaultClient.__init__", return_value=None)
def test_delete_pattern_calls_scan_iter_with_count_if_itersize_given(
self,
init_mock,
get_client_mock,
make_pattern_mock,
):
client = DefaultClient()
client._backend = Mock()
client._backend.key_prefix = ""
get_client_mock.return_value.scan_iter.return_value = []
client.delete_pattern(pattern="foo*", itersize=90210)
get_client_mock.return_value.scan_iter.assert_called_once_with(
count=90210,
match=make_pattern_mock.return_value,
)
@patch("test_client.DefaultClient.make_pattern")
@patch("test_client.DefaultClient.get_client", return_value=Mock())
@patch("test_client.DefaultClient.__init__", return_value=None)
def test_delete_pattern_calls_pipeline_delete_and_execute(
self,
init_mock,
get_client_mock,
make_pattern_mock,
):
client = DefaultClient()
client._backend = Mock()
client._backend.key_prefix = ""
get_client_mock.return_value.scan_iter.return_value = [":1:foo", ":1:foo-a"]
get_client_mock.return_value.pipeline.return_value = Mock()
get_client_mock.return_value.pipeline.return_value.delete = Mock()
get_client_mock.return_value.pipeline.return_value.execute = Mock()
client.delete_pattern(pattern="foo*")
assert get_client_mock.return_value.pipeline.return_value.delete.call_count == 2
get_client_mock.return_value.pipeline.return_value.delete.assert_has_calls(
[call(":1:foo"), call(":1:foo-a")],
)
get_client_mock.return_value.pipeline.return_value.execute.assert_called_once()
| TestDefaultClient |
python | realpython__materials | tic-tac-toe-ai-python/source_code_bonus/tic-tac-toe/library/src/tic_tac_toe/game/engine.py | {
"start": 417,
"end": 1321
} | class ____:
player1: Player
player2: Player
renderer: Renderer
error_handler: ErrorHandler | None = None
def __post_init__(self):
validate_players(self.player1, self.player2)
def play(self, starting_mark: Mark = Mark("X")) -> None:
game_state = GameState(Grid(), starting_mark)
while True:
self.renderer.render(game_state)
if game_state.game_over:
break
player = self.get_current_player(game_state)
try:
game_state = player.make_move(game_state)
except InvalidMove as ex:
if self.error_handler:
self.error_handler(ex)
def get_current_player(self, game_state: GameState) -> Player:
if game_state.current_mark is self.player1.mark:
return self.player1
else:
return self.player2
| TicTacToe |
python | getsentry__sentry | tests/sentry/integrations/slack/notifications/test_issue_alert.py | {
"start": 2068,
"end": 42486
} | class ____(SlackActivityNotificationTest, PerformanceIssueTestCase):
def setUp(self) -> None:
super().setUp()
action_data = {
"id": "sentry.mail.actions.NotifyEmailAction",
"targetType": "Member",
"targetIdentifier": str(self.user.id),
}
self.rule = Rule.objects.create(
project=self.project,
label="ja rule",
data={
"match": "all",
"actions": [action_data],
},
)
def test_issue_alert_user_block(self) -> None:
"""
Test that issues alert are sent to a Slack user with the proper payload when block kit is
enabled.
"""
event = self.store_event(
data={"message": "Hello world", "level": "error"}, project_id=self.project.id
)
notification_uuid = str(uuid.uuid4())
notification = AlertRuleNotification(
Notification(event=event, rule=self.rule),
ActionTargetType.MEMBER,
self.user.id,
notification_uuid=notification_uuid,
)
with self.tasks():
notification.send()
blocks = orjson.loads(self.mock_post.call_args.kwargs["blocks"])
fallback_text = self.mock_post.call_args.kwargs["text"]
assert (
fallback_text
== f"Alert triggered <http://testserver/organizations/{event.organization.slug}/alerts/rules/{event.project.slug}/{self.rule.id}/details/|ja rule>"
)
assert blocks[0]["text"]["text"] == fallback_text
assert event.group
assert (
blocks[1]["text"]["text"]
== f":red_circle: <http://testserver/organizations/{event.organization.slug}/issues/{event.group.id}/?referrer=issue_alert-slack¬ification_uuid={notification_uuid}&alert_rule_id={self.rule.id}&alert_type=issue|*Hello world*>"
)
assert (
blocks[4]["elements"][0]["text"]
== f"{event.project.slug} | <http://testserver/settings/account/notifications/alerts/?referrer=issue_alert-slack-user¬ification_uuid={notification_uuid}&organizationId={event.organization.id}|Notification Settings>"
)
@responses.activate
@mock.patch("sentry.integrations.slack.message_builder.issues.get_tags", new=fake_get_tags)
@mock.patch(
"sentry.services.eventstore.models.GroupEvent.occurrence",
return_value=TEST_PERF_ISSUE_OCCURRENCE,
new_callable=mock.PropertyMock,
)
def test_performance_issue_alert_user_block(self, occurrence) -> None:
"""
Test that performance issue alerts are sent to a Slack user with the proper payload when
block kit is enabled.
"""
event = self.create_performance_issue()
# this is a PerformanceNPlusOneGroupType event
notification = AlertRuleNotification(
Notification(event=event, rule=self.rule), ActionTargetType.MEMBER, self.user.id
)
with self.tasks():
notification.send()
blocks = orjson.loads(self.mock_post.call_args.kwargs["blocks"])
fallback_text = self.mock_post.call_args.kwargs["text"]
assert (
fallback_text
== f"Alert triggered <http://testserver/organizations/{event.organization.slug}/alerts/rules/{event.project.slug}/{self.rule.id}/details/|ja rule>"
)
assert blocks[0]["text"]["text"] == fallback_text
self.assert_performance_issue_blocks_with_culprit_blocks(
blocks,
event.organization,
event.project.slug,
event.group,
"issue_alert-slack",
alert_type=FineTuningAPIKey.ALERTS,
issue_link_extra_params=f"&alert_rule_id={self.rule.id}&alert_type=issue",
)
@mock.patch("sentry.integrations.slack.message_builder.issues.get_tags", new=fake_get_tags)
@responses.activate
def test_crons_issue_alert_user_block(self) -> None:
orig_event = self.store_event(
data={"message": "Hello world", "level": "error"}, project_id=self.project.id
)
event = orig_event.for_group(orig_event.groups[0])
occurrence = IssueOccurrence(
uuid.uuid4().hex,
self.project.id,
uuid.uuid4().hex,
["some-fingerprint"],
"something bad happened",
"it was bad",
"1234",
{"Test": 123},
[
IssueEvidence("Evidence 1", "Value 1", True),
IssueEvidence("Evidence 2", "Value 2", False),
IssueEvidence("Evidence 3", "Value 3", False),
],
MonitorIncidentType,
datetime.now(UTC),
"info",
"/api/123",
)
occurrence.save()
event.occurrence = occurrence
event.group.type = MonitorIncidentType.type_id
notification = AlertRuleNotification(
Notification(event=event, rule=self.rule), ActionTargetType.MEMBER, self.user.id
)
with self.tasks():
notification.send()
blocks = orjson.loads(self.mock_post.call_args.kwargs["blocks"])
fallback_text = self.mock_post.call_args.kwargs["text"]
assert (
fallback_text
== f"Alert triggered <http://testserver/organizations/{event.organization.slug}/alerts/rules/{event.project.slug}/{self.rule.id}/details/|ja rule>"
)
assert len(blocks) == 5
@patch(
"sentry.services.eventstore.models.GroupEvent.occurrence",
return_value=TEST_ISSUE_OCCURRENCE,
new_callable=mock.PropertyMock,
)
def test_generic_issue_alert_user_block(self, occurrence: MagicMock) -> None:
"""
Test that generic issue alerts are sent to a Slack user with the proper payload when
block kit is enabled.
"""
event = self.store_event(
data={"message": "Hellboy's world", "level": "error"}, project_id=self.project.id
)
group_event = event.for_group(event.groups[0])
notification = AlertRuleNotification(
Notification(event=group_event, rule=self.rule), ActionTargetType.MEMBER, self.user.id
)
with self.tasks():
notification.send()
blocks = orjson.loads(self.mock_post.call_args.kwargs["blocks"])
fallback_text = self.mock_post.call_args.kwargs["text"]
assert (
fallback_text
== f"Alert triggered <http://testserver/organizations/{event.organization.slug}/alerts/rules/{event.project.slug}/{self.rule.id}/details/|ja rule>"
)
assert blocks[0]["text"]["text"] == fallback_text
self.assert_generic_issue_blocks(
blocks,
group_event.organization,
event.project.slug,
event.group,
"issue_alert-slack",
alert_type="alerts",
issue_link_extra_params=f"&alert_rule_id={self.rule.id}&alert_type=issue",
)
@patch(
"sentry.services.eventstore.models.GroupEvent.occurrence",
return_value=TEST_ISSUE_OCCURRENCE,
new_callable=mock.PropertyMock,
)
@with_feature("organizations:workflow-engine-trigger-actions")
def test_generic_issue_alert_user_block_workflow_engine_dual_write(
self, occurrence: MagicMock
) -> None:
"""
Tests that we build links correctly when dual writing
"""
event = self.store_event(
data={"message": "Hellboy's world", "level": "error"}, project_id=self.project.id
)
group_event = event.for_group(event.groups[0])
# Create a rule with the legacy rule id being another rule
rule = self.create_project_rule(
project=self.project,
action_data=[{"legacy_rule_id": self.rule.id}],
name="ja rule",
)
notification = AlertRuleNotification(
Notification(event=group_event, rule=rule), ActionTargetType.MEMBER, self.user.id
)
with self.tasks():
notification.send()
blocks = orjson.loads(self.mock_post.call_args.kwargs["blocks"])
fallback_text = self.mock_post.call_args.kwargs["text"]
# Assert we are using the legacy rule id
assert (
fallback_text
== f"Alert triggered <http://testserver/organizations/{event.organization.slug}/alerts/rules/{event.project.slug}/{self.rule.id}/details/|ja rule>"
)
assert blocks[0]["text"]["text"] == fallback_text
self.assert_generic_issue_blocks(
blocks,
group_event.organization,
event.project.slug,
event.group,
"issue_alert-slack",
alert_type="alerts",
issue_link_extra_params=f"&alert_rule_id={self.rule.id}&alert_type=issue",
)
@patch(
"sentry.services.eventstore.models.GroupEvent.occurrence",
return_value=TEST_ISSUE_OCCURRENCE,
new_callable=mock.PropertyMock,
)
@with_feature("organizations:workflow-engine-ui-links")
def test_generic_issue_alert_user_block_workflow_engine_ui_links(
self, occurrence: MagicMock
) -> None:
"""
Tests that we build links correctly when dual writing
"""
event = self.store_event(
data={"message": "Hellboy's world", "level": "error"}, project_id=self.project.id
)
group_event = event.for_group(event.groups[0])
rule = self.create_project_rule(
project=self.project,
action_data=[{"workflow_id": "1234567890"}],
name="ja rule",
)
notification = AlertRuleNotification(
Notification(event=group_event, rule=rule), ActionTargetType.MEMBER, self.user.id
)
with self.tasks():
notification.send()
blocks = orjson.loads(self.mock_post.call_args.kwargs["blocks"])
fallback_text = self.mock_post.call_args.kwargs["text"]
# Assert we are using the workflow id and created a link to the workflow
assert (
fallback_text
== f"Alert triggered <http://testserver/organizations/{event.organization.id}/monitors/alerts/1234567890/|ja rule>"
)
assert blocks[0]["text"]["text"] == fallback_text
self.assert_generic_issue_blocks(
blocks,
group_event.organization,
event.project.slug,
event.group,
"issue_alert-slack",
alert_type="alerts",
issue_link_extra_params="&workflow_id=1234567890&alert_type=issue",
)
def test_disabled_org_integration_for_user(self) -> None:
with assume_test_silo_mode(SiloMode.CONTROL):
OrganizationIntegration.objects.get(integration=self.integration).update(
status=ObjectStatus.DISABLED
)
event = self.store_event(
data={"message": "Hello world", "level": "error"}, project_id=self.project.id
)
notification = AlertRuleNotification(
Notification(event=event, rule=self.rule), ActionTargetType.MEMBER, self.user.id
)
with self.tasks():
notification.send()
assert not self.mock_post.called
def test_issue_alert_issue_owners_block(self) -> None:
"""
Test that issue alerts are sent to issue owners in Slack with the proper payload when block
kit is enabled.
"""
event = self.store_event(
data={"message": "Hello world", "level": "error"}, project_id=self.project.id
)
action_data = {
"id": "sentry.mail.actions.NotifyEmailAction",
"targetType": "IssueOwners",
"targetIdentifier": "",
}
rule = Rule.objects.create(
project=self.project,
label="ja rule",
data={
"match": "all",
"actions": [action_data],
},
)
ProjectOwnership.objects.create(project_id=self.project.id)
notification = AlertRuleNotification(
Notification(event=event, rule=rule),
ActionTargetType.ISSUE_OWNERS,
self.user.id,
FallthroughChoiceType.ACTIVE_MEMBERS,
)
with self.tasks():
notification.send()
blocks = orjson.loads(self.mock_post.call_args.kwargs["blocks"])
fallback_text = self.mock_post.call_args.kwargs["text"]
notification_uuid = notification.notification_uuid
assert (
fallback_text
== f"Alert triggered <http://testserver/organizations/{event.organization.slug}/alerts/rules/{event.project.slug}/{rule.id}/details/|ja rule>"
)
assert blocks[0]["text"]["text"] == fallback_text
assert event.group
assert (
blocks[1]["text"]["text"]
== f":red_circle: <http://testserver/organizations/{event.organization.slug}/issues/{event.group.id}/?referrer=issue_alert-slack¬ification_uuid={notification_uuid}&alert_rule_id={rule.id}&alert_type=issue|*Hello world*>"
)
assert (
blocks[4]["elements"][0]["text"]
== f"{event.project.slug} | <http://testserver/settings/account/notifications/alerts/?referrer=issue_alert-slack-user¬ification_uuid={notification_uuid}&organizationId={event.organization.id}|Notification Settings>"
)
def test_issue_alert_issue_owners_environment_block(self) -> None:
"""
Test that issue alerts are sent to issue owners in Slack with the environment in the query
params when the alert rule filters by environment and block kit is enabled.
"""
environment = self.create_environment(self.project, name="production")
event = self.store_event(
data={"message": "Hello world", "level": "error", "environment": environment.name},
project_id=self.project.id,
)
action_data = {
"id": "sentry.mail.actions.NotifyEmailAction",
"targetType": "IssueOwners",
"targetIdentifier": "",
}
rule = Rule.objects.create(
project=self.project,
label="ja rule",
data={
"match": "all",
"actions": [action_data],
},
)
rule = self.create_project_rule(
project=self.project,
action_data=[action_data],
name="ja rule",
environment_id=environment.id,
)
ProjectOwnership.objects.create(project_id=self.project.id)
notification = AlertRuleNotification(
Notification(event=event, rule=rule),
ActionTargetType.ISSUE_OWNERS,
self.user.id,
FallthroughChoiceType.ACTIVE_MEMBERS,
)
with self.tasks():
notification.send()
blocks = orjson.loads(self.mock_post.call_args.kwargs["blocks"])
fallback_text = self.mock_post.call_args.kwargs["text"]
notification_uuid = notification.notification_uuid
assert (
fallback_text
== f"Alert triggered <http://testserver/organizations/{event.organization.slug}/alerts/rules/{event.project.slug}/{rule.id}/details/|ja rule>"
)
assert blocks[0]["text"]["text"] == fallback_text
assert event.group
assert (
blocks[1]["text"]["text"]
== f":red_circle: <http://testserver/organizations/{event.organization.slug}/issues/{event.group.id}/?referrer=issue_alert-slack¬ification_uuid={notification_uuid}&environment=production&alert_rule_id={rule.id}&alert_type=issue|*Hello world*>"
)
assert (
blocks[4]["elements"][0]["text"]
== f"{event.project.slug} | {environment.name} | <http://testserver/settings/account/notifications/alerts/?referrer=issue_alert-slack-user¬ification_uuid={notification_uuid}&organizationId={event.organization.id}|Notification Settings>"
)
@responses.activate
def test_issue_alert_team_issue_owners_block(self) -> None:
"""
Test that issue alerts are sent to a team in Slack via an Issue Owners rule action with the
proper payload when block kit is enabled.
"""
# add a second user to the team so we can be sure it's only
# sent once (to the team, and not to each individual user)
user2 = self.create_user(is_superuser=False)
self.create_member(teams=[self.team], user=user2, organization=self.organization)
with assume_test_silo_mode(SiloMode.CONTROL):
self.idp = self.create_identity_provider(type="slack", external_id="TXXXXXXX2")
self.identity = Identity.objects.create(
external_id="UXXXXXXX2",
idp=self.idp,
user=user2,
status=IdentityStatus.VALID,
scopes=[],
)
# update the team's notification settings
ExternalActor.objects.create(
team_id=self.team.id,
organization=self.organization,
integration_id=self.integration.id,
provider=ExternalProviders.SLACK.value,
external_name="goma",
external_id="CXXXXXXX2",
)
with assume_test_silo_mode(SiloMode.CONTROL):
# provider is disabled by default
NotificationSettingProvider.objects.create(
team_id=self.team.id,
scope_type="team",
scope_identifier=self.team.id,
provider="slack",
type="alerts",
value="always",
)
g_rule = GrammarRule(Matcher("path", "*"), [Owner("team", self.team.slug)])
ProjectOwnership.objects.create(project_id=self.project.id, schema=dump_schema([g_rule]))
event = self.store_event(
data={
"message": "Hello world",
"level": "error",
"stacktrace": {"frames": [{"filename": "foo.py"}]},
},
project_id=self.project.id,
)
action_data = {
"id": "sentry.mail.actions.NotifyEmailAction",
"targetType": "IssueOwners",
"targetIdentifier": "",
}
rule = Rule.objects.create(
project=self.project,
label="ja rule",
data={
"match": "all",
"actions": [action_data],
},
)
notification = AlertRuleNotification(
Notification(event=event, rule=rule), ActionTargetType.ISSUE_OWNERS, self.team.id
)
with self.tasks():
notification.send()
# check that only one was sent out - more would mean each user is being notified
# rather than the team
assert self.mock_post.call_count == 1
# check that the team got a notification
blocks = orjson.loads(self.mock_post.call_args.kwargs["blocks"])
fallback_text = self.mock_post.call_args.kwargs["text"]
channel = self.mock_post.call_args.kwargs["channel"]
assert channel == "CXXXXXXX2"
notification_uuid = notification.notification_uuid
assert (
fallback_text
== f"Alert triggered <http://testserver/organizations/{event.organization.slug}/alerts/rules/{event.project.slug}/{rule.id}/details/|ja rule>"
)
assert blocks[0]["text"]["text"] == fallback_text
assert event.group
assert (
blocks[1]["text"]["text"]
== f":red_circle: <http://testserver/organizations/{event.organization.slug}/issues/{event.group.id}/?referrer=issue_alert-slack¬ification_uuid={notification_uuid}&alert_rule_id={rule.id}&alert_type=issue|*Hello world*>"
)
assert blocks[6]["elements"][0]["text"] == f"Suggested Assignees: #{self.team.slug}"
assert (
blocks[7]["elements"][0]["text"]
== f"{event.project.slug} | <http://testserver/settings/{event.organization.slug}/teams/{self.team.slug}/notifications/?referrer=issue_alert-slack-team¬ification_uuid={notification_uuid}|Notification Settings>"
)
@responses.activate
def test_disabled_org_integration_for_team(self) -> None:
# update the team's notification settings
ExternalActor.objects.create(
team_id=self.team.id,
organization=self.organization,
integration_id=self.integration.id,
provider=ExternalProviders.SLACK.value,
external_name="goma",
external_id="CXXXXXXX2",
)
with assume_test_silo_mode(SiloMode.CONTROL):
OrganizationIntegration.objects.get(integration=self.integration).update(
status=ObjectStatus.DISABLED
)
grammar_rule = GrammarRule(Matcher("path", "*"), [Owner("team", self.team.slug)])
ProjectOwnership.objects.create(
project_id=self.project.id, schema=dump_schema([grammar_rule])
)
event = self.store_event(
data={
"message": "Hello world",
"level": "error",
"stacktrace": {"frames": [{"filename": "foo.py"}]},
},
project_id=self.project.id,
)
action_data = {
"id": "sentry.mail.actions.NotifyEmailAction",
"targetType": "IssueOwners",
"targetIdentifier": "",
}
rule = Rule.objects.create(
project=self.project,
label="ja rule",
data={
"match": "all",
"actions": [action_data],
},
)
notification = AlertRuleNotification(
Notification(event=event, rule=rule), ActionTargetType.ISSUE_OWNERS, self.team.id
)
with self.tasks():
notification.send()
# org integrationon disabled
assert self.mock_post.call_count == 0
@patch.object(sentry, "digests")
def test_issue_alert_team_issue_owners_user_settings_off_digests(
self, digests: MagicMock
) -> None:
"""Test that issue alerts are sent to a team in Slack via an Issue Owners rule action
even when the users' issue alert notification settings are off and digests are triggered."""
backend = RedisBackend()
digests.backend.digest = backend.digest
digests.enabled.return_value = True
# turn off the user's issue alert notification settings
# there was a bug where issue alerts to a team's Slack channel
# were only firing if this was set to ALWAYS
with assume_test_silo_mode(SiloMode.CONTROL):
NotificationSettingOption.objects.create(
user_id=self.user.id,
scope_type="user",
scope_identifier=self.user.id,
type="alerts",
value="never",
)
# add a second user to the team so we can be sure it's only
# sent once (to the team, and not to each individual user)
user2 = self.create_user(is_superuser=False)
self.create_member(teams=[self.team], user=user2, organization=self.organization)
with assume_test_silo_mode(SiloMode.CONTROL):
self.idp = self.create_identity_provider(type="slack", external_id="TXXXXXXX2")
self.identity = Identity.objects.create(
external_id="UXXXXXXX2",
idp=self.idp,
user=user2,
status=IdentityStatus.VALID,
scopes=[],
)
# update the team's notification settings
ExternalActor.objects.create(
team_id=self.team.id,
organization=self.organization,
integration_id=self.integration.id,
provider=ExternalProviders.SLACK.value,
external_name="goma",
external_id="CXXXXXXX2",
)
with assume_test_silo_mode(SiloMode.CONTROL):
NotificationSettingProvider.objects.create(
team_id=self.team.id,
scope_type="team",
scope_identifier=self.team.id,
provider="slack",
type="alerts",
value="always",
)
g_rule = GrammarRule(Matcher("path", "*"), [Owner("team", self.team.slug)])
ProjectOwnership.objects.create(project_id=self.project.id, schema=dump_schema([g_rule]))
event = self.store_event(
data={
"message": "Hello world",
"level": "error",
"stacktrace": {"frames": [{"filename": "foo.py"}]},
},
project_id=self.project.id,
)
action_data = {
"id": "sentry.mail.actions.NotifyEmailAction",
"targetType": "IssueOwners",
"targetIdentifier": "",
}
rule = Rule.objects.create(
project=self.project,
label="ja rule",
data={
"match": "all",
"actions": [action_data],
},
)
key = f"mail:p:{self.project.id}"
backend.add(key, event_to_record(event, [rule]), increment_delay=0, maximum_delay=0)
with self.tasks():
deliver_digest(key)
# check that only one was sent out - more would mean each user is being notified
# rather than the team
# check that the team got a notification
assert self.mock_post.call_args.kwargs["channel"] == "CXXXXXXX2"
blocks = orjson.loads(self.mock_post.call_args.kwargs["blocks"])
assert "Hello world" in blocks[1]["text"]["text"]
title_link = blocks[1]["text"]["text"][13:][1:-1]
notification_uuid = self.get_notification_uuid(title_link)
assert event.group
assert (
blocks[-2]["elements"][0]["text"]
== f"{self.project.slug} | <http://testserver/settings/{self.organization.slug}/teams/{self.team.slug}/notifications/?referrer=issue_alert-slack-team¬ification_uuid={notification_uuid}|Notification Settings>"
)
def test_issue_alert_team_block(self) -> None:
"""Test that issue alerts are sent to a team in Slack when block kit is enabled."""
# add a second organization
org = self.create_organization(owner=self.user)
with assume_test_silo_mode(SiloMode.CONTROL):
self.create_organization_integration(
organization_id=org.id, integration=self.integration
)
# add a second user to the team so we can be sure it's only
# sent once (to the team, and not to each individual user)
user2 = self.create_user(is_superuser=False)
self.create_member(teams=[self.team], user=user2, organization=self.organization)
with assume_test_silo_mode(SiloMode.CONTROL):
self.idp = self.create_identity_provider(type="slack", external_id="TXXXXXXX2")
self.identity = Identity.objects.create(
external_id="UXXXXXXX2",
idp=self.idp,
user=user2,
status=IdentityStatus.VALID,
scopes=[],
)
# update the team's notification settings
ExternalActor.objects.create(
team_id=self.team.id,
organization=self.organization,
integration_id=self.integration.id,
provider=ExternalProviders.SLACK.value,
external_name="goma",
external_id="CXXXXXXX2",
)
with assume_test_silo_mode(SiloMode.CONTROL):
NotificationSettingProvider.objects.create(
team_id=self.team.id,
scope_type="team",
scope_identifier=self.team.id,
provider="slack",
type="alerts",
value="always",
)
event = self.store_event(
data={"message": "Hello world", "level": "error"}, project_id=self.project.id
)
action_data = {
"id": "sentry.mail.actions.NotifyEmailAction",
"targetType": "Team",
"targetIdentifier": str(self.team.id),
}
rule = Rule.objects.create(
project=self.project,
label="ja rule",
data={
"match": "all",
"actions": [action_data],
},
)
notification = Notification(event=event, rule=rule)
with self.options({"system.url-prefix": "http://example.com"}), self.tasks():
self.adapter.notify(notification, ActionTargetType.TEAM, self.team.id)
# check that only one was sent out - more would mean each user is being notified
# rather than the team
assert self.mock_post.call_count == 1
# check that the team got a notification
assert self.mock_post.call_args.kwargs["channel"] == "CXXXXXXX2"
blocks = orjson.loads(self.mock_post.call_args.kwargs["blocks"])
fallback_text = self.mock_post.call_args.kwargs["text"]
notification_uuid = self.get_notification_uuid(blocks[1]["text"]["text"])
assert (
fallback_text
== f"Alert triggered <http://example.com/organizations/{event.organization.slug}/alerts/rules/{event.project.slug}/{rule.id}/details/|ja rule>"
)
assert blocks[0]["text"]["text"] == fallback_text
assert event.group
assert (
blocks[1]["text"]["text"]
== f":red_circle: <http://example.com/organizations/{event.organization.slug}/issues/{event.group.id}/?referrer=issue_alert-slack¬ification_uuid={notification_uuid}&alert_rule_id={rule.id}&alert_type=issue|*Hello world*>"
)
assert (
blocks[5]["elements"][0]["text"]
== f"{event.project.slug} | <http://example.com/settings/{event.organization.slug}/teams/{self.team.slug}/notifications/?referrer=issue_alert-slack-team¬ification_uuid={notification_uuid}|Notification Settings>"
)
def test_issue_alert_team_new_project(self) -> None:
"""Test that issue alerts are sent to a team in Slack when the team has added a new project"""
# add a second user to the team so we can be sure it's only
# sent once (to the team, and not to each individual user)
user2 = self.create_user(is_superuser=False)
self.create_member(teams=[self.team], user=user2, organization=self.organization)
with assume_test_silo_mode(SiloMode.CONTROL):
self.idp = self.create_identity_provider(type="slack", external_id="TXXXXXXX2")
self.identity = Identity.objects.create(
external_id="UXXXXXXX2",
idp=self.idp,
user=user2,
status=IdentityStatus.VALID,
scopes=[],
)
# update the team's notification settings
ExternalActor.objects.create(
team_id=self.team.id,
organization=self.organization,
integration_id=self.integration.id,
provider=ExternalProviders.SLACK.value,
external_name="goma",
external_id="CXXXXXXX2",
)
with assume_test_silo_mode(SiloMode.CONTROL):
NotificationSettingProvider.objects.create(
team_id=self.team.id,
scope_type="team",
scope_identifier=self.team.id,
provider="slack",
type="alerts",
value="always",
)
# add a new project
project2 = self.create_project(
name="hellboy", organization=self.organization, teams=[self.team]
)
event = self.store_event(
data={"message": "Hello world", "level": "error"}, project_id=project2.id
)
action_data = {
"id": "sentry.mail.actions.NotifyEmailAction",
"targetType": "Team",
"targetIdentifier": str(self.team.id),
}
rule = Rule.objects.create(
project=project2,
label="ja rule",
data={
"match": "all",
"actions": [action_data],
},
)
notification = Notification(event=event, rule=rule)
with self.options({"system.url-prefix": "http://example.com"}), self.tasks():
self.adapter.notify(notification, ActionTargetType.TEAM, self.team.id)
# check that only one was sent out - more would mean each user is being notified
# rather than the team
assert self.mock_post.call_count == 1
# check that the team got a notification
assert self.mock_post.call_args.kwargs["channel"] == "CXXXXXXX2"
blocks = orjson.loads(self.mock_post.call_args.kwargs["blocks"])
assert "Hello world" in blocks[1]["text"]["text"]
title_link = blocks[1]["text"]["text"][13:][1:-1]
notification_uuid = self.get_notification_uuid(title_link)
assert event.group
assert (
blocks[-2]["elements"][0]["text"]
== f"{project2.slug} | <http://example.com/settings/{self.organization.slug}/teams/{self.team.slug}/notifications/?referrer=issue_alert-slack-team¬ification_uuid={notification_uuid}|Notification Settings>"
)
def test_not_issue_alert_team_removed_project(self) -> None:
"""Test that issue alerts are not sent to a team in Slack when the team has removed the project the issue belongs to"""
# create the team's notification settings
ExternalActor.objects.create(
team_id=self.team.id,
organization=self.organization,
integration_id=self.integration.id,
provider=ExternalProviders.SLACK.value,
external_name="goma",
external_id="CXXXXXXX2",
)
with assume_test_silo_mode(SiloMode.CONTROL):
NotificationSettingProvider.objects.create(
team_id=self.team.id,
scope_type="team",
scope_identifier=self.team.id,
provider="slack",
type="alerts",
value="always",
)
# remove the project from the team
self.project.remove_team(self.team)
event = self.store_event(
data={"message": "Hello world", "level": "error"}, project_id=self.project.id
)
action_data = {
"id": "sentry.mail.actions.NotifyEmailAction",
"targetType": "Team",
"targetIdentifier": str(self.team.id),
}
rule = Rule.objects.create(
project=self.project,
label="ja rule",
data={
"match": "all",
"actions": [action_data],
},
)
notification = Notification(event=event, rule=rule)
with self.options({"system.url-prefix": "http://example.com"}), self.tasks():
self.adapter.notify(notification, ActionTargetType.TEAM, self.team.id)
assert self.mock_post.call_count == 0
def test_issue_alert_team_fallback(self) -> None:
"""Test that issue alerts are sent to each member of a team in Slack."""
user2 = self.create_user(is_superuser=False)
self.create_member(teams=[self.team], user=user2, organization=self.organization)
with assume_test_silo_mode(SiloMode.CONTROL):
self.identity = Identity.objects.create(
external_id="UXXXXXXX2",
idp=self.idp,
user=user2,
status=IdentityStatus.VALID,
scopes=[],
)
event = self.store_event(
data={"message": "Hello world", "level": "error"}, project_id=self.project.id
)
action_data = {
"id": "sentry.mail.actions.NotifyEmailAction",
"targetType": "Team",
"targetIdentifier": str(self.team.id),
}
rule = Rule.objects.create(
project=self.project,
label="ja rule",
data={
"match": "all",
"actions": [action_data],
},
)
notification = Notification(event=event, rule=rule)
with self.options({"system.url-prefix": "http://example.com"}), self.tasks():
self.adapter.notify(notification, ActionTargetType.TEAM, self.team.id)
assert self.mock_post.call_count == 2
# check that self.user got a notification
call1 = self.mock_post.call_args_list[0].kwargs
call2 = self.mock_post.call_args_list[1].kwargs
# don't assume a particular order
data = call1 if call1["channel"] == "UXXXXXXX1" else call2
data2 = call2 if call2["channel"] == "UXXXXXXX2" else call1
assert data["channel"] == "UXXXXXXX1"
blocks = orjson.loads(data["blocks"])
assert "Hello world" in blocks[1]["text"]["text"]
title_link = blocks[1]["text"]["text"]
notification_uuid = self.get_notification_uuid(title_link)
assert event.group
assert (
blocks[-2]["elements"][0]["text"]
== f"{self.project.slug} | <http://example.com/settings/account/notifications/alerts/?referrer=issue_alert-slack-user¬ification_uuid={notification_uuid}&organizationId={event.organization.id}|Notification Settings>"
)
# check that user2 got a notification as well
assert data2["channel"] == "UXXXXXXX2"
assert "blocks" in data2
blocks = orjson.loads(data2["blocks"])
assert "Hello world" in blocks[1]["text"]["text"]
title_link = blocks[1]["text"]["text"]
notification_uuid = self.get_notification_uuid(title_link)
assert event.group
assert (
blocks[-2]["elements"][0]["text"]
== f"{self.project.slug} | <http://example.com/settings/account/notifications/alerts/?referrer=issue_alert-slack-user¬ification_uuid={notification_uuid}&organizationId={event.organization.id}|Notification Settings>"
)
@patch.object(sentry, "digests")
def test_digest_enabled_block(self, digests: MagicMock) -> None:
"""
Test that with digests enabled, but Slack notification settings
(and not email settings) enabled, we send a Slack notification with the proper
payload when block kit is enabled.
"""
backend = RedisBackend()
digests.backend.digest = backend.digest
digests.enabled.return_value = True
rule = self.create_project_rule(project=self.project)
ProjectOwnership.objects.create(project_id=self.project.id)
event = self.store_event(
data={"message": "Hello world", "level": "error"}, project_id=self.project.id
)
key = f"mail:p:{self.project.id}:IssueOwners::AllMembers"
backend.add(key, event_to_record(event, [rule]), increment_delay=0, maximum_delay=0)
with self.tasks():
deliver_digest(key)
blocks = orjson.loads(self.mock_post.call_args.kwargs["blocks"])
fallback_text = self.mock_post.call_args.kwargs["text"]
notification_uuid = self.get_notification_uuid(blocks[1]["text"]["text"])
assert (
fallback_text
== f"Alert triggered <http://testserver/organizations/{event.organization.slug}/alerts/rules/{event.project.slug}/{rule.id}/details/|Test Alert>"
)
assert blocks[0]["text"]["text"] == fallback_text
assert event.group
assert (
blocks[1]["text"]["text"]
== f":red_circle: <http://testserver/organizations/{event.organization.slug}/issues/{event.group.id}/?referrer=issue_alert-slack¬ification_uuid={notification_uuid}&alert_rule_id={rule.id}&alert_type=issue|*Hello world*>"
)
assert (
blocks[4]["elements"][0]["text"]
== f"{event.project.slug} | <http://testserver/settings/account/notifications/alerts/?referrer=issue_alert-slack-user¬ification_uuid={notification_uuid}&organizationId={event.organization.id}|Notification Settings>"
)
| SlackIssueAlertNotificationTest |
python | automl__auto-sklearn | autosklearn/util/logging_.py | {
"start": 6908,
"end": 8116
} | class ____(PickableLoggerAdapter):
def __init__(self, name: str, host: str, port: int):
self.name = name
self.host = host
self.port = port
self.logger = _get_named_client_logger(name=name, host=host, port=port)
def __getstate__(self) -> Dict[str, Any]:
"""
Method is called when pickle dumps an object.
Returns
-------
Dictionary, representing the object state to be pickled. Ignores
the self.logger field and only returns the logger name.
"""
return {
"name": self.name,
"host": self.host,
"port": self.port,
}
def __setstate__(self, state: Dict[str, Any]) -> None:
"""
Method is called when pickle loads an object. Retrieves the name and
creates a logger.
Parameters
----------
state - dictionary, containing the logger name.
"""
self.name = state["name"]
self.host = state["host"]
self.port = state["port"]
self.logger = _get_named_client_logger(
name=self.name,
host=self.host,
port=self.port,
)
| PicklableClientLogger |
python | PrefectHQ__prefect | src/prefect/client/schemas/objects.py | {
"start": 29627,
"end": 31235
} | class ____(PrefectBaseModel):
"""
A Prefect Cloud workspace.
Expected payload for each workspace returned by the `me/workspaces` route.
"""
account_id: UUID = Field(..., description="The account id of the workspace.")
account_name: str = Field(..., description="The account name.")
account_handle: str = Field(..., description="The account's unique handle.")
workspace_id: UUID = Field(..., description="The workspace id.")
workspace_name: str = Field(..., description="The workspace name.")
workspace_description: str = Field(..., description="Description of the workspace.")
workspace_handle: str = Field(..., description="The workspace's unique handle.")
model_config: ClassVar[ConfigDict] = ConfigDict(extra="ignore")
@property
def handle(self) -> str:
"""
The full handle of the workspace as `account_handle` / `workspace_handle`
"""
return self.account_handle + "/" + self.workspace_handle
def api_url(self) -> str:
"""
Generate the API URL for accessing this workspace
"""
return (
f"{PREFECT_CLOUD_API_URL.value()}"
f"/accounts/{self.account_id}"
f"/workspaces/{self.workspace_id}"
)
def ui_url(self) -> str:
"""
Generate the UI URL for accessing this workspace
"""
return (
f"{PREFECT_CLOUD_UI_URL.value()}"
f"/account/{self.account_id}"
f"/workspace/{self.workspace_id}"
)
def __hash__(self) -> int:
return hash(self.handle)
| Workspace |
python | pytorch__pytorch | tools/stats/utilization_stats_lib.py | {
"start": 796,
"end": 1171
} | class ____(DataClassJsonMixin): # type: ignore[misc, no-any-unimported]
uuid: str | None = None
util_percent: UtilizationStats | None = None
mem_util_percent: UtilizationStats | None = None
allocated_mem_percent: UtilizationStats | None = None
allocated_mem_value: UtilizationStats | None = None
total_mem_value: float | None = None
@dataclass
| GpuUsage |
python | qdrant__qdrant-client | qdrant_client/http/models/models.py | {
"start": 61530,
"end": 61882
} | class ____(BaseModel):
usage: Optional["Usage"] = Field(default=None, description="")
time: Optional[float] = Field(default=None, description="Time spent to process this request")
status: Optional[str] = Field(default=None, description="")
result: Optional["CollectionClusterInfo"] = Field(default=None, description="")
| InlineResponse2007 |
python | spack__spack | lib/spack/spack/test/cmd/develop.py | {
"start": 706,
"end": 13920
} | class ____:
def check_develop(self, env, spec, path=None, build_dir=None):
path = path or spec.name
# check in memory representation
assert spec.name in env.dev_specs
dev_specs_entry = env.dev_specs[spec.name]
assert dev_specs_entry["path"] == path
assert dev_specs_entry["spec"] == str(spec)
# check yaml representation
dev_config = spack.config.get("develop", {})
assert spec.name in dev_config
yaml_entry = dev_config[spec.name]
assert yaml_entry["spec"] == str(spec)
if path == spec.name:
# default paths aren't written out
assert "path" not in yaml_entry
else:
assert yaml_entry["path"] == path
if build_dir is not None:
scope = env.scope_name
assert build_dir == spack.config.get(
"packages:{}:package_attributes:build_directory".format(spec.name), scope
)
def test_develop_no_path_no_clone(self):
env("create", "test")
with ev.read("test") as e:
# develop checks that the path exists
fs.mkdirp(os.path.join(e.path, "mpich"))
develop("--no-clone", "mpich@1.0")
self.check_develop(e, spack.spec.Spec("mpich@=1.0"))
def test_develop_no_clone(self, tmp_path: pathlib.Path):
env("create", "test")
with ev.read("test") as e:
develop("--no-clone", "-p", str(tmp_path), "mpich@1.0")
self.check_develop(e, spack.spec.Spec("mpich@=1.0"), str(tmp_path))
def test_develop_no_version(self, tmp_path: pathlib.Path):
env("create", "test")
with ev.read("test") as e:
develop("--no-clone", "-p", str(tmp_path), "mpich")
self.check_develop(e, spack.spec.Spec("mpich@=main"), str(tmp_path))
def test_develop(self):
env("create", "test")
with ev.read("test") as e:
develop("mpich@1.0")
self.check_develop(e, spack.spec.Spec("mpich@=1.0"))
def test_develop_no_args(self):
env("create", "test")
with ev.read("test") as e:
# develop and remove it
develop("mpich@1.0")
shutil.rmtree(os.path.join(e.path, "mpich"))
# test develop with no args
develop()
self.check_develop(e, spack.spec.Spec("mpich@=1.0"))
def test_develop_build_directory(self):
env("create", "test")
with ev.read("test") as e:
develop("-b", "test_build_dir", "mpich@1.0")
self.check_develop(e, spack.spec.Spec("mpich@=1.0"), None, "test_build_dir")
def test_develop_twice(self):
env("create", "test")
with ev.read("test") as e:
develop("mpich@1.0")
self.check_develop(e, spack.spec.Spec("mpich@=1.0"))
develop("mpich@1.0")
# disk representation isn't updated unless we write
# second develop command doesn't change it, so we don't write
# but we check disk representation
e.write()
self.check_develop(e, spack.spec.Spec("mpich@=1.0"))
assert len(e.dev_specs) == 1
def test_develop_update_path(self, tmp_path: pathlib.Path):
env("create", "test")
with ev.read("test") as e:
develop("mpich@1.0")
develop("-p", str(tmp_path), "mpich@1.0")
self.check_develop(e, spack.spec.Spec("mpich@=1.0"), str(tmp_path))
assert len(e.dev_specs) == 1
def test_develop_update_spec(self):
env("create", "test")
with ev.read("test") as e:
develop("mpich@1.0")
develop("mpich@2.0")
self.check_develop(e, spack.spec.Spec("mpich@=2.0"))
assert len(e.dev_specs) == 1
def test_develop_applies_changes(self, monkeypatch):
env("create", "test")
with ev.read("test") as e:
e.add("mpich@1.0")
e.concretize()
e.write()
monkeypatch.setattr(spack.stage.Stage, "steal_source", lambda x, y: None)
develop("mpich@1.0")
# Check modifications actually worked
spec = next(e.roots())
assert spec.satisfies("dev_path=*")
def test_develop_applies_changes_parents(self, monkeypatch):
env("create", "test")
with ev.read("test") as e:
e.add("hdf5^mpich@1.0")
e.concretize()
e.write()
orig_hash = next(e.roots()).dag_hash()
monkeypatch.setattr(spack.stage.Stage, "steal_source", lambda x, y: None)
develop("mpich@1.0")
# Check modifications actually worked
new_hdf5 = next(e.roots())
assert new_hdf5.dag_hash() != orig_hash
assert new_hdf5["mpi"].satisfies("dev_path=*")
def test_develop_applies_changes_spec_conflict(self, monkeypatch):
env("create", "test")
with ev.read("test") as e:
e.add("mpich@1.0")
e.concretize()
e.write()
monkeypatch.setattr(spack.stage.Stage, "steal_source", lambda x, y: None)
with pytest.raises(ev.SpackEnvironmentDevelopError, match="conflicts with concrete"):
develop("mpich@1.1")
def test_develop_applies_changes_path(self, monkeypatch):
env("create", "test")
with ev.read("test") as e:
e.add("mpich@1.0")
e.concretize()
e.write()
# canonicalize paths relative to env
testpath1 = spack.util.path.canonicalize_path("test/path1", e.path)
testpath2 = spack.util.path.canonicalize_path("test/path2", e.path)
monkeypatch.setattr(spack.stage.Stage, "steal_source", lambda x, y: None)
# Testing that second call to develop successfully changes both config and specs
for path in (testpath1, testpath2):
develop("--path", path, "mpich@1.0")
# Check modifications actually worked
spec = next(e.roots())
assert spec.satisfies(f"dev_path={path}")
assert spack.config.get("develop:mpich:path") == path
def test_develop_no_modify(self, monkeypatch):
env("create", "test")
with ev.read("test") as e:
e.add("mpich@1.0")
e.concretize()
e.write()
monkeypatch.setattr(spack.stage.Stage, "steal_source", lambda x, y: None)
develop("--no-modify-concrete-specs", "mpich@1.0")
# Check modifications were not applied
spec = next(e.roots())
assert not spec.satisfies("dev_path=*")
def test_develop_canonicalize_path(self, monkeypatch):
env("create", "test")
with ev.read("test") as e:
e.add("mpich@1.0")
e.concretize()
e.write()
path = "../$user"
abspath = spack.util.path.canonicalize_path(path, e.path)
def check_path(stage, dest):
assert dest == abspath
monkeypatch.setattr(spack.stage.Stage, "steal_source", check_path)
develop("-p", path, "mpich@1.0")
self.check_develop(e, spack.spec.Spec("mpich@=1.0"), path)
# Check modifications actually worked
spec = next(e.roots())
assert spec.satisfies("dev_path=%s" % abspath)
def test_develop_canonicalize_path_no_args(self, monkeypatch):
env("create", "test")
with ev.read("test") as e:
e.add("mpich@1.0")
e.concretize()
e.write()
path = "$user"
abspath = spack.util.path.canonicalize_path(path, e.path)
def check_path(stage, dest):
assert dest == abspath
monkeypatch.setattr(spack.stage.Stage, "steal_source", check_path)
# Defensive check to ensure canonicalization failures don't pollute FS
assert abspath.startswith(e.path)
# Create path to allow develop to modify env
fs.mkdirp(abspath)
develop("--no-clone", "-p", path, "mpich@1.0")
self.check_develop(e, spack.spec.Spec("mpich@=1.0"), path)
# Remove path to ensure develop with no args runs staging code
os.rmdir(abspath)
develop()
self.check_develop(e, spack.spec.Spec("mpich@=1.0"), path)
# Check modifications actually worked
spec = next(e.roots())
assert spec.satisfies("dev_path=%s" % abspath)
def _git_commit_list(git_repo_dir):
git = spack.util.git.git()
with fs.working_dir(git_repo_dir):
output = git("log", "--pretty=format:%h", "-n", "20", output=str)
return output.strip().split()
def test_develop_full_git_repo(
mutable_mock_env_path,
mock_git_version_info,
install_mockery,
mock_packages,
monkeypatch,
mutable_config,
request,
):
repo_path, filename, commits = mock_git_version_info
monkeypatch.setattr(
spack.package_base.PackageBase, "git", "file://%s" % repo_path, raising=False
)
spec = spack.concretize.concretize_one("git-test-commit@1.2")
try:
spec.package.do_stage()
commits = _git_commit_list(spec.package.stage[0].source_path)
# Outside of "spack develop" Spack will only pull exactly the commit it
# needs, with no additional history
assert len(commits) == 1
finally:
spec.package.do_clean()
# Now use "spack develop": look at the resulting dev_path and make
# sure the git repo pulled includes the full branch history (or rather,
# more than just one commit).
env("create", "test")
with ev.read("test") as e:
add("git-test-commit@1.2")
e.concretize()
e.write()
develop("git-test-commit@1.2")
e.write()
spec = e.all_specs()[0]
develop_dir = spec.variants["dev_path"].value
commits = _git_commit_list(develop_dir)
assert len(commits) > 1
def test_recursive(mutable_mock_env_path, install_mockery, mock_fetch):
env("create", "test")
with ev.read("test") as e:
add("indirect-mpich@1.0")
e.concretize()
e.write()
specs = e.all_specs()
assert len(specs) > 1
develop("--recursive", "mpich")
expected_dev_specs = ["mpich", "direct-mpich", "indirect-mpich"]
for spec in expected_dev_specs:
assert spec in e.dev_specs
spec = next(e.roots())
for dep in spec.traverse():
assert dep.satisfies("dev_path=*") == (dep.name in expected_dev_specs)
def test_develop_fails_with_multiple_concrete_versions(
mutable_mock_env_path, install_mockery, mock_fetch
):
env("create", "test")
with ev.read("test") as e:
add("indirect-mpich@1.0")
add("indirect-mpich@0.9")
e.unify = False
e.concretize()
with pytest.raises(SpackError) as develop_error:
develop("indirect-mpich", fail_on_error=True)
error_str = "has multiple concrete instances in the graph"
assert error_str in str(develop_error.value)
def test_concretize_dev_path_with_at_symbol_in_env(
mutable_mock_env_path, tmp_path: pathlib.Path, mock_packages
):
spec_like = "develop-test@develop"
develop_dir = tmp_path / "build@location"
develop_dir.mkdir()
env("create", "test_at_sym")
with ev.read("test_at_sym") as e:
add(spec_like)
e.concretize()
e.write()
develop(f"--path={develop_dir}", spec_like)
result = e.concrete_roots()
assert len(result) == 1
cspec = result[0]
assert cspec.satisfies(spec_like), cspec
assert cspec.is_develop, cspec
assert str(develop_dir) in cspec.variants["dev_path"], cspec
def _failing_fn(*args, **kwargs):
# This stands in for a function that should never be called as
# part of a test.
assert False
@pytest.mark.parametrize("_devpath_should_exist", [True, False])
@pytest.mark.disable_clean_stage_check
def test_develop_with_devpath_staging(
monkeypatch,
mutable_mock_env_path,
mock_packages,
tmp_path: pathlib.Path,
mock_archive,
install_mockery,
mock_fetch,
mock_resource_fetch,
mock_stage,
_devpath_should_exist,
):
# If the specified develop path exists, a resource should not be
# downloaded at all at install time. Otherwise, it should be.
env("create", "test")
develop_dir = tmp_path / "build@location"
if _devpath_should_exist:
develop_dir.mkdir()
monkeypatch.setattr(URLFetchStrategy, "fetch", _failing_fn)
spec_like = "simple-resource@1.0"
with ev.read("test") as e:
e.add(spec_like)
e.concretize()
e.write()
develop(f"--path={develop_dir}", spec_like)
e.install_all()
expected_resource_path = develop_dir / "resource.tgz"
if _devpath_should_exist:
# If we made it here, we didn't try to download anything.
pass
else:
assert os.path.exists(expected_resource_path)
| TestDevelop |
python | kubernetes-client__python | kubernetes/base/dynamic/discovery.py | {
"start": 13287,
"end": 16494
} | class ____(Discoverer):
""" A convenient container for storing discovered API resources. Allows
easy searching and retrieval of specific resources.
All resources are discovered for the cluster upon object instantiation.
"""
def update(self, resources):
self.__resources = resources
def __init__(self, client, cache_file):
Discoverer.__init__(self, client, cache_file)
def discover(self):
self.__resources = self.parse_api_groups(request_resources=True)
@property
def api_groups(self):
""" list available api groups """
return self.parse_api_groups(request_resources=True, update=True)['apis'].keys()
def search(self, **kwargs):
""" Takes keyword arguments and returns matching resources. The search
will happen in the following order:
prefix: The api prefix for a resource, ie, /api, /oapi, /apis. Can usually be ignored
group: The api group of a resource. Will also be extracted from api_version if it is present there
api_version: The api version of a resource
kind: The kind of the resource
arbitrary arguments (see below), in random order
The arbitrary arguments can be any valid attribute for an Resource object
"""
results = self.__search(self.__build_search(**kwargs), self.__resources)
if not results:
self.invalidate_cache()
results = self.__search(self.__build_search(**kwargs), self.__resources)
return results
def __build_search(self, prefix=None, group=None, api_version=None, kind=None, **kwargs):
if not group and api_version and '/' in api_version:
group, api_version = api_version.split('/')
items = [prefix, group, api_version, kind, kwargs]
return list(map(lambda x: x or '*', items))
def __search(self, parts, resources):
part = parts[0]
resourcePart = resources.get(part)
if part != '*' and resourcePart:
if isinstance(resourcePart, ResourceGroup):
return self.__search(parts[1:], resourcePart.resources)
elif isinstance(resourcePart, dict):
return self.__search(parts[1:], resourcePart)
else:
if parts[1] != '*' and isinstance(parts[1], dict):
for _resource in resourcePart:
for term, value in parts[1].items():
if getattr(_resource, term) == value:
return [_resource]
return []
else:
return resourcePart
elif part == '*':
matches = []
for key in resources.keys():
matches.extend(self.__search([key] + parts[1:], resources))
return matches
return []
def __iter__(self):
for _, groups in self.__resources.items():
for _, versions in groups.items():
for _, resources in versions.items():
for _, resource in resources.items():
yield resource
| EagerDiscoverer |
python | astropy__astropy | astropy/units/function/logarithmic.py | {
"start": 4172,
"end": 4805
} | class ____(LogUnit):
"""Logarithmic physical units expressed in magnitudes.
Parameters
----------
physical_unit : `~astropy.units.Unit` or `string`
Unit that is encapsulated within the magnitude function unit.
If not given, dimensionless.
function_unit : `~astropy.units.Unit` or `string`
By default, this is ``mag``, but this allows one to use an equivalent
unit such as ``2 mag``.
"""
@cached_property
def _default_function_unit(self):
from .units import mag
return mag
@property
def _quantity_class(self):
return Magnitude
| MagUnit |
python | airbytehq__airbyte | airbyte-integrations/connectors/destination-vectara/destination_vectara/destination.py | {
"start": 519,
"end": 4468
} | class ____(Destination):
def write(
self, config: Mapping[str, Any], configured_catalog: ConfiguredAirbyteCatalog, input_messages: Iterable[AirbyteMessage]
) -> Iterable[AirbyteMessage]:
"""
Reads the input stream of messages, config, and catalog to write data to the destination.
This method returns an iterable (typically a generator of AirbyteMessages via yield) containing state messages received
in the input message stream. Outputting a state message means that every AirbyteRecordMessage which came before it has been
successfully persisted to the destination. This is used to ensure fault tolerance in the case that a sync fails before fully completing,
then the source is given the last state message output from this method as the starting point of the next sync.
:param config: dict of JSON configuration matching the configuration declared in spec.json
:param configured_catalog: The Configured Catalog describing the schema of the data being received and how it should be persisted in the
destination
:param input_messages: The stream of input messages received from the source
:return: Iterable of AirbyteStateMessages wrapped in AirbyteMessage structs
"""
config_model = VectaraConfig.parse_obj(config)
writer = VectaraWriter(
client=VectaraClient(config_model),
text_fields=config_model.text_fields,
title_field=config_model.title_field,
metadata_fields=config_model.metadata_fields,
catalog=configured_catalog,
)
writer.delete_streams_to_overwrite(catalog=configured_catalog)
for message in input_messages:
if message.type == Type.STATE:
# Emitting a state message indicates that all records which came before it have been written to the destination. So we flush
# the queue to ensure writes happen, then output the state message to indicate it's safe to checkpoint state
writer.flush()
yield message
elif message.type == Type.RECORD:
record = message.record
writer.queue_write_operation(record)
else:
# ignore other message types for now
continue
# Make sure to flush any records still in the queue
writer.flush()
def check(self, logger: logging.Logger, config: VectaraConfig) -> AirbyteConnectionStatus:
"""
Tests if the input configuration can be used to successfully connect to the destination with the needed permissions
e.g: if a provided API token or password can be used to connect and write to the destination.
:param logger: Logging object to display debug/info/error to the logs
(logs will not be accessible via airbyte UI if they are not passed to this logger)
:param config: Json object containing the configuration of this destination, content of this json is as specified in
the properties of the spec.json file
:return: AirbyteConnectionStatus indicating a Success or Failure
"""
client = VectaraClient(config=config)
client_error = client.check()
if client_error:
return AirbyteConnectionStatus(status=Status.FAILED, message="\n".join([client_error]))
else:
return AirbyteConnectionStatus(status=Status.SUCCEEDED)
def spec(self, *args: Any, **kwargs: Any) -> ConnectorSpecification:
return ConnectorSpecification(
documentationUrl="https://docs.airbyte.com/integrations/destinations/vectara",
supportsIncremental=True,
supported_destination_sync_modes=[DestinationSyncMode.overwrite, DestinationSyncMode.append],
connectionSpecification=VectaraConfig.schema(),
)
| DestinationVectara |
python | django__django | tests/invalid_models_tests/test_ordinary_fields.py | {
"start": 20487,
"end": 24908
} | class ____(TestCase):
def test_both_attributes_omitted(self):
class Model(models.Model):
field = models.DecimalField()
field = Model._meta.get_field("field")
if connection.features.supports_no_precision_decimalfield:
expected = []
else:
expected = [
Error(
"DecimalFields must define a 'decimal_places' attribute.",
obj=field,
id="fields.E130",
),
Error(
"DecimalFields must define a 'max_digits' attribute.",
obj=field,
id="fields.E132",
),
]
self.assertEqual(field.check(), expected)
def test_both_attributes_omitted_required_db_features(self):
class Model(models.Model):
field = models.DecimalField()
class Meta:
required_db_features = {"supports_no_precision_decimalfield"}
field = Model._meta.get_field("field")
self.assertEqual(field.check(databases=self.databases), [])
@skipUnlessDBFeature("supports_no_precision_decimalfield")
def test_only_max_digits_defined(self):
class Model(models.Model):
field = models.DecimalField(max_digits=13)
field = Model._meta.get_field("field")
self.assertEqual(
field.check(),
[
Error(
"DecimalField’s max_digits and decimal_places must both "
"be defined or both omitted.",
obj=field,
id="fields.E135",
),
],
)
@skipUnlessDBFeature("supports_no_precision_decimalfield")
def test_only_decimal_places_defined(self):
class Model(models.Model):
field = models.DecimalField(decimal_places=5)
field = Model._meta.get_field("field")
self.assertEqual(
field.check(),
[
Error(
"DecimalField’s max_digits and decimal_places must both "
"be defined or both omitted.",
obj=field,
id="fields.E135",
),
],
)
def test_negative_max_digits_and_decimal_places(self):
class Model(models.Model):
field = models.DecimalField(max_digits=-1, decimal_places=-1)
field = Model._meta.get_field("field")
self.assertEqual(
field.check(),
[
Error(
"'decimal_places' must be a non-negative integer.",
obj=field,
id="fields.E131",
),
Error(
"'max_digits' must be a positive integer.",
obj=field,
id="fields.E133",
),
],
)
def test_bad_values_of_max_digits_and_decimal_places(self):
class Model(models.Model):
field = models.DecimalField(max_digits="bad", decimal_places="bad")
field = Model._meta.get_field("field")
self.assertEqual(
field.check(),
[
Error(
"'decimal_places' must be a non-negative integer.",
obj=field,
id="fields.E131",
),
Error(
"'max_digits' must be a positive integer.",
obj=field,
id="fields.E133",
),
],
)
def test_decimal_places_greater_than_max_digits(self):
class Model(models.Model):
field = models.DecimalField(max_digits=9, decimal_places=10)
field = Model._meta.get_field("field")
self.assertEqual(
field.check(),
[
Error(
"'max_digits' must be greater or equal to 'decimal_places'.",
obj=field,
id="fields.E134",
),
],
)
def test_valid_field(self):
class Model(models.Model):
field = models.DecimalField(max_digits=10, decimal_places=10)
field = Model._meta.get_field("field")
self.assertEqual(field.check(), [])
@isolate_apps("invalid_models_tests")
| DecimalFieldTests |
python | django__django | django/utils/archive.py | {
"start": 5167,
"end": 6781
} | class ____(BaseArchive):
def __init__(self, file):
self._archive = tarfile.open(file)
def list(self, *args, **kwargs):
self._archive.list(*args, **kwargs)
def extract(self, to_path):
members = self._archive.getmembers()
leading = self.has_leading_dir(x.name for x in members)
for member in members:
name = member.name
if leading:
name = self.split_leading_dir(name)[1]
filename = self.target_filename(to_path, name)
if member.isdir():
if filename:
os.makedirs(filename, exist_ok=True)
else:
try:
extracted = self._archive.extractfile(member)
except (KeyError, AttributeError) as exc:
# Some corrupt tar files seem to produce this
# (specifically bad symlinks)
print(
"In the tar file %s the member %s is invalid: %s"
% (name, member.name, exc)
)
else:
dirname = os.path.dirname(filename)
if dirname:
os.makedirs(dirname, exist_ok=True)
with open(filename, "wb") as outfile:
shutil.copyfileobj(extracted, outfile)
self._copy_permissions(member.mode, filename)
finally:
if extracted:
extracted.close()
def close(self):
self._archive.close()
| TarArchive |
python | pypa__pipenv | pipenv/vendor/click_didyoumean/__init__.py | {
"start": 1863,
"end": 2077
} | class ____(DYMMixin, click.CommandCollection):
"""
click CommandCollection to provide git-like
*did-you-mean* functionality when a certain
command is not found in the group.
"""
| DYMCommandCollection |
python | scipy__scipy | scipy/stats/_hypotests.py | {
"start": 39434,
"end": 49510
} | class ____:
statistic: float
pvalue: float
@xp_capabilities(np_only=True)
def barnard_exact(table, alternative="two-sided", pooled=True, n=32):
r"""Perform a Barnard exact test on a 2x2 contingency table.
Parameters
----------
table : array_like of ints
A 2x2 contingency table. Elements should be non-negative integers.
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the null and alternative hypotheses. Default is 'two-sided'.
Please see explanations in the Notes section below.
pooled : bool, optional
Whether to compute score statistic with pooled variance (as in
Student's t-test, for example) or unpooled variance (as in Welch's
t-test). Default is ``True``.
n : int, optional
Number of sampling points used in the construction of the sampling
method. Note that this argument will automatically be converted to
the next higher power of 2 since `scipy.stats.qmc.Sobol` is used to
select sample points. Default is 32. Must be positive. In most cases,
32 points is enough to reach good precision. More points comes at
performance cost.
Returns
-------
ber : BarnardExactResult
A result object with the following attributes.
statistic : float
The Wald statistic with pooled or unpooled variance, depending
on the user choice of `pooled`.
pvalue : float
P-value, the probability of obtaining a distribution at least as
extreme as the one that was actually observed, assuming that the
null hypothesis is true.
See Also
--------
chi2_contingency : Chi-square test of independence of variables in a
contingency table.
fisher_exact : Fisher exact test on a 2x2 contingency table.
boschloo_exact : Boschloo's exact test on a 2x2 contingency table,
which is an uniformly more powerful alternative to Fisher's exact test.
Notes
-----
Barnard's test is an exact test used in the analysis of contingency
tables. It examines the association of two categorical variables, and
is a more powerful alternative than Fisher's exact test
for 2x2 contingency tables.
Let's define :math:`X_0` a 2x2 matrix representing the observed sample,
where each column stores the binomial experiment, as in the example
below. Let's also define :math:`p_1, p_2` the theoretical binomial
probabilities for :math:`x_{11}` and :math:`x_{12}`. When using
Barnard exact test, we can assert three different null hypotheses :
- :math:`H_0 : p_1 \geq p_2` versus :math:`H_1 : p_1 < p_2`,
with `alternative` = "less"
- :math:`H_0 : p_1 \leq p_2` versus :math:`H_1 : p_1 > p_2`,
with `alternative` = "greater"
- :math:`H_0 : p_1 = p_2` versus :math:`H_1 : p_1 \neq p_2`,
with `alternative` = "two-sided" (default one)
In order to compute Barnard's exact test, we are using the Wald
statistic [3]_ with pooled or unpooled variance.
Under the default assumption that both variances are equal
(``pooled = True``), the statistic is computed as:
.. math::
T(X) = \frac{
\hat{p}_1 - \hat{p}_2
}{
\sqrt{
\hat{p}(1 - \hat{p})
(\frac{1}{c_1} +
\frac{1}{c_2})
}
}
with :math:`\hat{p}_1, \hat{p}_2` and :math:`\hat{p}` the estimator of
:math:`p_1, p_2` and :math:`p`, the latter being the combined probability,
given the assumption that :math:`p_1 = p_2`.
If this assumption is invalid (``pooled = False``), the statistic is:
.. math::
T(X) = \frac{
\hat{p}_1 - \hat{p}_2
}{
\sqrt{
\frac{\hat{p}_1 (1 - \hat{p}_1)}{c_1} +
\frac{\hat{p}_2 (1 - \hat{p}_2)}{c_2}
}
}
The p-value is then computed as:
.. math::
\sum
\binom{c_1}{x_{11}}
\binom{c_2}{x_{12}}
\pi^{x_{11} + x_{12}}
(1 - \pi)^{t - x_{11} - x_{12}}
where the sum is over all 2x2 contingency tables :math:`X` such that:
* :math:`T(X) \leq T(X_0)` when `alternative` = "less",
* :math:`T(X) \geq T(X_0)` when `alternative` = "greater", or
* :math:`T(X) \geq |T(X_0)|` when `alternative` = "two-sided".
Above, :math:`c_1, c_2` are the sum of the columns 1 and 2,
and :math:`t` the total (sum of the 4 sample's element).
The returned p-value is the maximum p-value taken over the nuisance
parameter :math:`\pi`, where :math:`0 \leq \pi \leq 1`.
This function's complexity is :math:`O(n c_1 c_2)`, where `n` is the
number of sample points.
References
----------
.. [1] Barnard, G. A. "Significance Tests for 2x2 Tables". *Biometrika*.
34.1/2 (1947): 123-138. :doi:`dpgkg3`
.. [2] Mehta, Cyrus R., and Pralay Senchaudhuri. "Conditional versus
unconditional exact tests for comparing two binomials."
*Cytel Software Corporation* 675 (2003): 1-5.
.. [3] "Wald Test". *Wikipedia*. https://en.wikipedia.org/wiki/Wald_test
Examples
--------
An example use of Barnard's test is presented in [2]_.
Consider the following example of a vaccine efficacy study
(Chan, 1998). In a randomized clinical trial of 30 subjects, 15 were
inoculated with a recombinant DNA influenza vaccine and the 15 were
inoculated with a placebo. Twelve of the 15 subjects in the placebo
group (80%) eventually became infected with influenza whereas for the
vaccine group, only 7 of the 15 subjects (47%) became infected. The
data are tabulated as a 2 x 2 table::
Vaccine Placebo
Yes 7 12
No 8 3
When working with statistical hypothesis testing, we usually use a
threshold probability or significance level upon which we decide
to reject the null hypothesis :math:`H_0`. Suppose we choose the common
significance level of 5%.
Our alternative hypothesis is that the vaccine will lower the chance of
becoming infected with the virus; that is, the probability :math:`p_1` of
catching the virus with the vaccine will be *less than* the probability
:math:`p_2` of catching the virus without the vaccine. Therefore, we call
`barnard_exact` with the ``alternative="less"`` option:
>>> import scipy.stats as stats
>>> res = stats.barnard_exact([[7, 12], [8, 3]], alternative="less")
>>> res.statistic
-1.894
>>> res.pvalue
0.03407
Under the null hypothesis that the vaccine will not lower the chance of
becoming infected, the probability of obtaining test results at least as
extreme as the observed data is approximately 3.4%. Since this p-value is
less than our chosen significance level, we have evidence to reject
:math:`H_0` in favor of the alternative.
Suppose we had used Fisher's exact test instead:
>>> _, pvalue = stats.fisher_exact([[7, 12], [8, 3]], alternative="less")
>>> pvalue
0.0640
With the same threshold significance of 5%, we would not have been able
to reject the null hypothesis in favor of the alternative. As stated in
[2]_, Barnard's test is uniformly more powerful than Fisher's exact test
because Barnard's test does not condition on any margin. Fisher's test
should only be used when both sets of marginals are fixed.
"""
if n <= 0:
raise ValueError(
"Number of points `n` must be strictly positive, "
f"found {n!r}"
)
table = np.asarray(table, dtype=np.int64)
if not table.shape == (2, 2):
raise ValueError("The input `table` must be of shape (2, 2).")
if np.any(table < 0):
raise ValueError("All values in `table` must be nonnegative.")
if 0 in table.sum(axis=0):
# If both values in column are zero, the p-value is 1 and
# the score's statistic is NaN.
return BarnardExactResult(np.nan, 1.0)
total_col_1, total_col_2 = table.sum(axis=0)
x1 = np.arange(total_col_1 + 1, dtype=np.int64).reshape(-1, 1)
x2 = np.arange(total_col_2 + 1, dtype=np.int64).reshape(1, -1)
# We need to calculate the wald statistics for each combination of x1 and
# x2.
p1, p2 = x1 / total_col_1, x2 / total_col_2
if pooled:
p = (x1 + x2) / (total_col_1 + total_col_2)
variances = p * (1 - p) * (1 / total_col_1 + 1 / total_col_2)
else:
variances = p1 * (1 - p1) / total_col_1 + p2 * (1 - p2) / total_col_2
# To avoid warning when dividing by 0
with np.errstate(divide="ignore", invalid="ignore"):
wald_statistic = np.divide((p1 - p2), np.sqrt(variances))
wald_statistic[p1 == p2] = 0 # Removing NaN values
wald_stat_obs = wald_statistic[table[0, 0], table[0, 1]]
if alternative == "two-sided":
index_arr = np.abs(wald_statistic) >= abs(wald_stat_obs)
elif alternative == "less":
index_arr = wald_statistic <= wald_stat_obs
elif alternative == "greater":
index_arr = wald_statistic >= wald_stat_obs
else:
msg = (
"`alternative` should be one of {'two-sided', 'less', 'greater'},"
f" found {alternative!r}"
)
raise ValueError(msg)
x1_sum_x2 = x1 + x2
x1_log_comb = _compute_log_combinations(total_col_1)
x2_log_comb = _compute_log_combinations(total_col_2)
x1_sum_x2_log_comb = x1_log_comb[x1] + x2_log_comb[x2]
result = shgo(
_get_binomial_log_p_value_with_nuisance_param,
args=(x1_sum_x2, x1_sum_x2_log_comb, index_arr),
bounds=((0, 1),),
n=n,
sampling_method="sobol",
)
# result.fun is the negative log pvalue and therefore needs to be
# changed before return
p_value = np.clip(np.exp(-result.fun), a_min=0, a_max=1)
return BarnardExactResult(wald_stat_obs, p_value)
@dataclass
| BarnardExactResult |
python | vyperlang__vyper | vyper/venom/passes/sccp/sccp.py | {
"start": 752,
"end": 985
} | class ____:
start: IRBasicBlock
end: IRBasicBlock
WorkListItem = Union[FlowWorkItem, SSAWorkListItem]
LatticeItem = Union[LatticeEnum, IRLiteral, IRLabel, IRAbstractMemLoc]
Lattice = dict[IRVariable, LatticeItem]
| FlowWorkItem |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/dialects/postgresql/named_types.py | {
"start": 17575,
"end": 17663
} | class ____(schema._CreateDropBase):
__visit_name__ = "create_enum_type"
| CreateEnumType |
python | langchain-ai__langchain | libs/standard-tests/tests/unit_tests/test_basic_retriever.py | {
"start": 475,
"end": 840
} | class ____(RetrieversIntegrationTests):
@property
def retriever_constructor(self) -> type[ParrotRetriever]:
return ParrotRetriever
@property
def retriever_constructor_params(self) -> dict:
return {"parrot_name": "Polly"}
@property
def retriever_query_example(self) -> str:
return "parrot"
| TestParrotRetrieverIntegration |
python | bokeh__bokeh | tests/support/util/screenshot.py | {
"start": 1575,
"end": 1673
} | class ____(TypedDict):
level: str
text: str
url: str
line: int
col: int
| JSMessage |
python | kamyu104__LeetCode-Solutions | Python/balanced-k-factor-decomposition.py | {
"start": 2437,
"end": 3346
} | class ____(object):
def minDifference(self, n, k):
"""
:type n: int
:type k: int
:rtype: List[int]
"""
def factors(n):
for i in xrange(1, n+1):
if i*i > n:
break
if n%i:
continue
yield i
if n//i != i:
yield n//i
def backtracking(remain):
if len(curr) == k-1:
curr.append(remain)
if not result or max(result)-min(result) > max(curr)-min(curr):
result[:] = curr
curr.pop()
return
for i in factors(remain):
curr.append(i)
backtracking(remain//i)
curr.pop()
result, curr = [], []
backtracking(n)
return result
| Solution3 |
python | doocs__leetcode | solution/2500-2599/2506.Count Pairs Of Similar Strings/Solution.py | {
"start": 0,
"end": 295
} | class ____:
def similarPairs(self, words: List[str]) -> int:
ans = 0
cnt = Counter()
for s in words:
x = 0
for c in map(ord, s):
x |= 1 << (c - ord("a"))
ans += cnt[x]
cnt[x] += 1
return ans
| Solution |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-marketo/source_marketo/source.py | {
"start": 2616,
"end": 5650
} | class ____(MarketoStream):
cursor_field = "createdAt"
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._state = {}
def filter_by_state(self, stream_state: Mapping[str, Any] = None, record: Mapping[str, Any] = None) -> Iterable:
"""
Endpoint does not provide query filtering params, but they provide us
cursor field in most cases, so we used that as incremental filtering
during the parsing.
"""
if record[self.cursor_field] >= (stream_state or {}).get(self.cursor_field, self.start_date):
yield record
def parse_response(self, response: requests.Response, stream_state: Mapping[str, Any], **kwargs) -> Iterable[MutableMapping]:
json_response = response.json().get(self.data_field) or []
for record in json_response:
yield from self.filter_by_state(stream_state=stream_state, record=record)
@property
def state(self):
return self._state
@state.setter
def state(self, value):
self._state = value
def get_updated_state(self, current_stream_state: MutableMapping[str, Any], latest_record: Mapping[str, Any]) -> Mapping[str, Any]:
latest_cursor_value = latest_record.get(self.cursor_field, self.start_date) or self.start_date
current_cursor_value = current_stream_state.get(self.cursor_field, self.start_date) or self.start_date
self._state = {self.cursor_field: max(latest_cursor_value, current_cursor_value)}
return self._state
def stream_slices(self, sync_mode, stream_state: Mapping[str, Any] = None, **kwargs) -> Iterable[Optional[MutableMapping[str, any]]]:
"""
Override default stream_slices CDK method to provide date_slices as page chunks for data fetch.
Returns list of dict, example: [{
"startDate": "2020-01-01T0:0:0Z",
"endDate": "2021-01-02T0:0:0Z"
},
{
"startDate": "2020-01-03T0:0:0Z",
"endDate": "2021-01-04T0:0:0Z"
},
...]
"""
start_date = pendulum.parse(self.start_date)
# Determine stream_state, if no stream_state we use start_date
if stream_state:
start_date = pendulum.parse(stream_state.get(self.cursor_field))
# use the lowest date between start_date and self.end_date, otherwise API fails if start_date is in future
start_date = min(start_date, pendulum.now())
date_slices = []
end_date = pendulum.parse(self.end_date) if self.end_date else pendulum.now()
while start_date < end_date:
# the amount of days for each data-chunk beginning from start_date
end_date_slice = start_date.add(days=self.window_in_days)
date_slice = {"startAt": to_datetime_str(start_date), "endAt": to_datetime_str(end_date_slice)}
date_slices.append(date_slice)
start_date = end_date_slice
return date_slices
| IncrementalMarketoStream |
python | getsentry__sentry | tests/sentry/middleware/integrations/parsers/test_slack.py | {
"start": 1238,
"end": 14951
} | class ____(TestCase):
factory = RequestFactory()
timestamp = "123123123"
def setUp(self) -> None:
self.user = self.create_user()
self.organization = self.create_organization(owner=self.user)
self.integration = self.create_integration(
organization=self.organization, external_id="TXXXXXXX1", provider="slack"
)
def get_response(self, request: HttpRequest) -> HttpResponse:
return HttpResponse(status=200, content="passthrough")
@responses.activate
@patch(
"slack_sdk.signature.SignatureVerifier.is_valid",
return_value=True,
)
def test_webhook(self, mock_verify: MagicMock) -> None:
# Retrieve the correct integration
data = urlencode({"team_id": self.integration.external_id}).encode("utf-8")
signature = _encode_data(secret="slack-signing-secret", data=data, timestamp=self.timestamp)
request = self.factory.post(
path=reverse("sentry-integration-slack-commands"),
data=data,
content_type="application/x-www-form-urlencoded",
HTTP_X_SLACK_SIGNATURE=signature,
HTTP_X_SLACK_REQUEST_TIMESTAMP=self.timestamp,
)
parser = SlackRequestParser(request, self.get_response)
integration = parser.get_integration_from_request()
assert integration == self.integration
# Returns response from region
responses.add(
responses.POST,
"http://us.testserver/extensions/slack/commands/",
status=status.HTTP_201_CREATED,
body=b"region_response",
)
response = parser.get_response()
assert isinstance(response, HttpResponse)
assert response.status_code == status.HTTP_201_CREATED
assert response.content == b"region_response"
assert len(responses.calls) == 1
assert_no_webhook_payloads()
# ...even if it returns an error
responses.add(
responses.POST,
"http://us.testserver/extensions/slack/commands/",
status=401,
body=b"error_response",
)
response = parser.get_response()
assert isinstance(response, HttpResponse)
assert response.status_code == status.HTTP_401_UNAUTHORIZED
assert response.content == b"error_response"
assert len(responses.calls) == 2
assert_no_webhook_payloads()
@responses.activate
def test_django_view(self) -> None:
# Retrieve the correct integration
path = reverse(
"sentry-integration-slack-link-identity",
kwargs={"signed_params": sign(salt=SALT, integration_id=self.integration.id)},
)
request = self.factory.post(path)
parser = SlackRequestParser(request, self.get_response)
parser_integration = parser.get_integration_from_request()
if not parser_integration:
raise AssertionError("Parser could not identify an integration")
assert parser_integration.id == self.integration.id
# Passes through to control silo
response = parser.get_response()
assert isinstance(response, HttpResponse)
assert response.status_code == status.HTTP_200_OK
assert response.content == b"passthrough"
assert len(responses.calls) == 0
assert_no_webhook_payloads()
@patch(
"sentry.integrations.slack.requests.base.SlackRequest._check_signing_secret",
return_value=True,
)
@patch("sentry.middleware.integrations.parsers.slack.convert_to_async_slack_response")
def test_triggers_async_response(
self, mock_slack_task: MagicMock, mock_signing_secret: MagicMock
) -> None:
response_url = "https://hooks.slack.com/commands/TXXXXXXX1/1234567890123/something"
data = {
"payload": json.dumps(
{"team_id": self.integration.external_id, "response_url": response_url}
)
}
request = self.factory.post(reverse("sentry-integration-slack-action"), data=data)
parser = SlackRequestParser(request, self.get_response)
response = parser.get_response()
mock_slack_task.apply_async.assert_called_once_with(
kwargs={
"region_names": ["us"],
"payload": create_async_request_payload(request),
"response_url": response_url,
}
)
assert response.status_code == status.HTTP_200_OK
@patch(
"sentry.integrations.slack.requests.base.SlackRequest._check_signing_secret",
return_value=True,
)
@patch("sentry.middleware.integrations.parsers.slack.convert_to_async_slack_response")
def test_skips_async_response_if_org_integration_missing(
self, mock_slack_task, mock_signing_secret
):
response_url = "https://hooks.slack.com/commands/TXXXXXXX1/1234567890123/something"
data = {
"payload": json.dumps(
{"team_id": self.integration.external_id, "response_url": response_url}
)
}
with (
assume_test_silo_mode_of(OrganizationIntegration),
outbox_context(transaction.atomic(using=router.db_for_write(OrganizationIntegration))),
):
OrganizationIntegration.objects.filter(organization_id=self.organization.id).delete()
request = self.factory.post(reverse("sentry-integration-slack-action"), data=data)
parser = SlackRequestParser(request, self.get_response)
response = parser.get_response()
assert response.status_code == status.HTTP_202_ACCEPTED
assert mock_slack_task.apply_async.call_count == 0
def test_async_request_payload(self) -> None:
data = {
"payload": json.dumps(
{
"team_id": self.integration.external_id,
"response_url": "https://hooks.slack.com/commands/TXXXXX1/12345678/something",
}
)
}
request = self.factory.post(reverse("sentry-integration-slack-action"), data=data)
result = create_async_request_payload(request)
assert "method" in result
assert result["method"] == request.method
assert "path" in result
assert result["path"] == request.get_full_path()
assert "uri" in result
assert result["uri"] == request.build_absolute_uri()
assert "headers" in result
assert isinstance(result["headers"], dict)
assert "body" in result
assert result["body"] == request.body.decode("utf8")
def test_targeting_all_orgs(self) -> None:
# Install the integration on two organizations
other_organization = self.create_organization()
self.integration.add_organization(other_organization)
# Case 1: Without passing an organization, we expect to filter to both.
for cmd in ["link team", "unlink team"]:
data = urlencode(
{
"text": cmd,
"team_id": self.integration.external_id,
}
).encode("utf-8")
request = self.factory.post(
reverse("sentry-integration-slack-commands"),
data=data,
content_type="application/x-www-form-urlencoded",
)
parser = SlackRequestParser(request, self.get_response)
organizations = parser.get_organizations_from_integration(self.integration)
organization_ids = {org.id for org in organizations}
assert len(organization_ids) == 2
assert self.organization.id in organization_ids
assert other_organization.id in organization_ids
def test_targeting_specific_org(self) -> None:
# Install the integration on two organizations
other_organization = self.create_organization()
self.integration.add_organization(other_organization)
# When the organization slug is provided, filter to just that one.
for cmd in ["link team", "unlink team"]:
data = urlencode(
{
"text": f"{cmd} {other_organization.slug}",
"team_id": self.integration.external_id,
}
).encode("utf-8")
request = self.factory.post(
reverse("sentry-integration-slack-commands"),
data=data,
content_type="application/x-www-form-urlencoded",
)
parser = SlackRequestParser(request, self.get_response)
organizations = parser.get_organizations_from_integration(self.integration)
assert len(organizations) == 1
assert organizations[0].id == other_organization.id
def test_targeting_irrelevant_org(self) -> None:
# Install the integration on two organizations
other_organization = self.create_organization()
self.integration.add_organization(other_organization)
# And add another, maybe the user belongs to it, maybe not
irrelevant_organization = self.create_organization()
# Case 3: If the organization slug is irrelevant, ignore it and return all orgs
for cmd in ["link team", "unlink team"]:
data = urlencode(
{
"text": f"{cmd} {irrelevant_organization.slug}",
"team_id": self.integration.external_id,
}
).encode("utf-8")
request = self.factory.post(
reverse("sentry-integration-slack-commands"),
data=data,
content_type="application/x-www-form-urlencoded",
)
parser = SlackRequestParser(request, self.get_response)
organizations = parser.get_organizations_from_integration(self.integration)
organization_ids = {org.id for org in organizations}
assert len(organization_ids) == 2
assert irrelevant_organization.id not in organization_ids
def test_targeting_issue_actions(self) -> None:
# Install the integration on two organizations
other_organization = self.create_organization()
self.integration.add_organization(other_organization)
# Case 1:With the default actions (non-encoded), we shouldn't filter the organization
data = urlencode(
{
"payload": json.dumps(
{
"actions": [{"action_id": SlackAction.RESOLVE_DIALOG}],
"team_id": self.integration.external_id,
}
),
}
).encode("utf-8")
request = self.factory.post(
reverse("sentry-integration-slack-action"),
data=data,
content_type="application/x-www-form-urlencoded",
)
parser = SlackRequestParser(request, self.get_response)
organizations = parser.get_organizations_from_integration(self.integration)
organization_ids = {org.id for org in organizations}
assert len(organization_ids) == 2
assert self.organization.id in organization_ids
assert other_organization.id in organization_ids
# Case 2: With the encoded action, we should filter to a single organization
project = self.create_project(organization=other_organization)
encoded_action = encode_action_id(
action=SlackAction.RESOLVE_DIALOG,
organization_id=other_organization.id,
project_id=project.id,
)
data = urlencode(
{
"payload": json.dumps(
{
"actions": [{"action_id": encoded_action}],
"team_id": self.integration.external_id,
}
),
}
).encode("utf-8")
request = self.factory.post(
reverse("sentry-integration-slack-action"),
data=data,
content_type="application/x-www-form-urlencoded",
)
parser = SlackRequestParser(request, self.get_response)
organizations = parser.get_organizations_from_integration(self.integration)
organization_ids = {org.id for org in organizations}
assert len(organization_ids) == 1
assert other_organization.id in organization_ids
# Case 3: If we see an irrelevant organization, we should ignore it
irrelevant_organization = self.create_organization()
project = self.create_project(organization=irrelevant_organization)
encoded_action = encode_action_id(
action=SlackAction.RESOLVE_DIALOG,
organization_id=irrelevant_organization.id,
project_id=project.id,
)
data = urlencode(
{
"payload": json.dumps(
{
"actions": [{"action_id": encoded_action}],
"team_id": self.integration.external_id,
}
),
}
).encode("utf-8")
request = self.factory.post(
reverse("sentry-integration-slack-action"),
data=data,
content_type="application/x-www-form-urlencoded",
)
parser = SlackRequestParser(request, self.get_response)
organizations = parser.get_organizations_from_integration(self.integration)
organization_ids = {org.id for org in organizations}
assert len(organization_ids) == 2
assert self.organization.id in organization_ids
assert other_organization.id in organization_ids
| SlackRequestParserTest |
python | PyCQA__pylint | doc/exts/pylint_messages.py | {
"start": 2215,
"end": 2463
} | class ____(NamedTuple):
checker: str
id: str
name: str
definition: MessageDefinition
example_code: str
checker_module_name: str
checker_module_path: str
shared: bool = False
default_enabled: bool = True
| MessageData |
python | django-haystack__django-haystack | haystack/exceptions.py | {
"start": 428,
"end": 539
} | class ____(HaystackError):
"""Raised when a model is not handled by the router setup."""
pass
| NotHandled |
python | pytorch__pytorch | torch/_dynamo/convert_frame.py | {
"start": 31023,
"end": 31555
} | class ____:
"""
Represents core data structure that dynamo will pass to a backend, including:
- Graph module
- Example inputs
- The FakeTensorMode used for compiling graph.
This data structure should capture all the information dynamo produces
on for the user backend.
"""
backend_id: str
graph_module: torch.fx.GraphModule
example_inputs: Any
fake_mode: torch._subclasses.fake_tensor.FakeTensorMode
tensor_to_context: WeakIdKeyDictionary
@dataclass(frozen=True)
| BackendInput |
python | Textualize__textual | tests/css/test_inheritance.py | {
"start": 126,
"end": 226
} | class ____(Widget):
DEFAULT_CSS = """
Widget1 {
background: red;
}
"""
| Widget1 |
python | tensorflow__tensorflow | tensorflow/python/distribute/cluster_resolver/tfconfig_cluster_resolver.py | {
"start": 1537,
"end": 6737
} | class ____(ClusterResolver):
"""Implementation of a ClusterResolver which reads the TF_CONFIG EnvVar.
This is an implementation of cluster resolvers when using TF_CONFIG to set
information about the cluster. The cluster spec returned will be
initialized from the TF_CONFIG environment variable.
An example to set TF_CONFIG is:
```Python
os.environ['TF_CONFIG'] = json.dumps({
'cluster': {
'worker': ["localhost:12345", "localhost:23456"]
},
'task': {'type': 'worker', 'index': 0}
})
```
However, sometimes the container orchestration framework will set TF_CONFIG
for you. In this case, you can just create an instance without passing in any
arguments. You can find an example here to let Kuburnetes set TF_CONFIG for
you: https://github.com/tensorflow/ecosystem/tree/master/kubernetes. Then you
can use it with `tf.distribute.Strategy` as:
```Python
# `TFConfigClusterResolver` is already the default one in the following
# strategy.
strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy(
cluster_resolver=TFConfigClusterResolver())
```
"""
def __init__(self,
task_type=None,
task_id=None,
rpc_layer=None,
environment=None):
"""Creates a new TFConfigClusterResolver.
Args:
task_type: (String, optional) Overrides the task type specified in the
TF_CONFIG environment variable.
task_id: (Integer, optional) Overrides the task index specified in the
TF_CONFIG environment variable.
rpc_layer: (String, optional) Overrides the rpc layer TensorFlow uses.
environment: (String, optional) Overrides the environment TensorFlow
operates in.
"""
self._task_type = task_type
self._task_id = task_id
self._rpc_layer = rpc_layer
self._environment = environment
@property
def task_type(self):
if self._task_type is None:
task_info = _get_value_in_tfconfig(_TASK_KEY, {})
return str(task_info['type']) if 'type' in task_info else None
else:
return str(self._task_type)
@property
def task_id(self):
if self._task_id is None:
task_info = _get_value_in_tfconfig(_TASK_KEY, {})
return int(task_info['index']) if 'index' in task_info else None
else:
return int(self._task_id)
@task_type.setter
def task_type(self, task_type):
self._task_type = task_type
@task_id.setter
def task_id(self, task_id):
self._task_id = task_id
@property
def environment(self):
return self._environment
@property
def rpc_layer(self):
if self._rpc_layer is None:
return _get_value_in_tfconfig(_RPC_LAYER_KEY)
else:
return self._rpc_layer
@rpc_layer.setter
def rpc_layer(self, rpc_layer):
self._rpc_layer = rpc_layer
def num_accelerators(self,
task_type=None,
task_id=None,
config_proto=None):
task_type = self.task_type if task_type is None else task_type
task_id = self.task_id if task_id is None else task_id
return super(TFConfigClusterResolver, self).num_accelerators(
task_type, task_id, config_proto)
def cluster_spec(self):
"""Returns a ClusterSpec based on the TF_CONFIG environment variable.
Returns:
A ClusterSpec with information from the TF_CONFIG environment variable.
"""
tf_config = _load_tf_config()
if 'cluster' not in tf_config:
return ClusterSpec({})
return ClusterSpec(tf_config['cluster'])
def master(self, task_type=None, task_id=None, rpc_layer=None):
"""Returns the master address to use when creating a TensorFlow session.
Note: this is only useful for TensorFlow 1.x.
Args:
task_type: (String, optional) Overrides and sets the task_type of the
master.
task_id: (Integer, optional) Overrides and sets the task id of the
master.
rpc_layer: (String, optional) Overrides and sets the protocol over which
TensorFlow nodes communicate with each other.
Returns:
The address of the master.
Raises:
RuntimeError: If the task_type or task_id is not specified and the
`TF_CONFIG` environment variable does not contain a task section.
"""
# If `session_master` is set, just use that.
session_master = _get_value_in_tfconfig(_SESSION_MASTER_KEY)
if session_master is not None:
return session_master
# Return an empty string if we are the only job in the ClusterSpec.
cluster_spec = self.cluster_spec()
if (not cluster_spec.jobs or
(len(cluster_spec.jobs) == 1 and
len(cluster_spec.job_tasks(cluster_spec.jobs[0])) == 1)):
return ''
# We try to auto-detect the task type and id, but uses the user-supplied one
# where available
task_type = task_type if task_type is not None else self.task_type
task_id = task_id if task_id is not None else self.task_id
rpc_layer = rpc_layer if rpc_layer is not None else self.rpc_layer
return format_master_url(cluster_spec.task_address(task_type, task_id),
rpc_layer)
| TFConfigClusterResolver |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.