language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | pandas-dev__pandas | pandas/io/parsers/arrow_parser_wrapper.py | {
"start": 666,
"end": 11952
} | class ____(ParserBase):
"""
Wrapper for the pyarrow engine for read_csv()
"""
def __init__(self, src: ReadBuffer[bytes], **kwds) -> None:
super().__init__(kwds)
self.kwds = kwds
self.src = src
self._parse_kwds()
def _parse_kwds(self) -> None:
"""
Validates keywords before passing to pyarrow.
"""
encoding: str | None = self.kwds.get("encoding")
self.encoding = "utf-8" if encoding is None else encoding
na_values = self.kwds["na_values"]
if isinstance(na_values, dict):
raise ValueError(
"The pyarrow engine doesn't support passing a dict for na_values"
)
self.na_values = list(self.kwds["na_values"])
def _get_pyarrow_options(self) -> None:
"""
Rename some arguments to pass to pyarrow
"""
mapping = {
"usecols": "include_columns",
"na_values": "null_values",
"escapechar": "escape_char",
"skip_blank_lines": "ignore_empty_lines",
"decimal": "decimal_point",
"quotechar": "quote_char",
}
for pandas_name, pyarrow_name in mapping.items():
if pandas_name in self.kwds and self.kwds.get(pandas_name) is not None:
self.kwds[pyarrow_name] = self.kwds.pop(pandas_name)
# Date format handling
# If we get a string, we need to convert it into a list for pyarrow
# If we get a dict, we want to parse those separately
date_format = self.date_format
if isinstance(date_format, str):
date_format = [date_format]
else:
# In case of dict, we don't want to propagate through, so
# just set to pyarrow default of None
# Ideally, in future we disable pyarrow dtype inference (read in as string)
# to prevent misreads.
date_format = None
self.kwds["timestamp_parsers"] = date_format
self.parse_options = {
option_name: option_value
for option_name, option_value in self.kwds.items()
if option_value is not None
and option_name
in ("delimiter", "quote_char", "escape_char", "ignore_empty_lines")
}
on_bad_lines = self.kwds.get("on_bad_lines")
if on_bad_lines is not None:
if callable(on_bad_lines):
self.parse_options["invalid_row_handler"] = on_bad_lines
elif on_bad_lines == ParserBase.BadLineHandleMethod.ERROR:
self.parse_options["invalid_row_handler"] = (
None # PyArrow raises an exception by default
)
elif on_bad_lines == ParserBase.BadLineHandleMethod.WARN:
def handle_warning(invalid_row) -> str:
warnings.warn(
f"Expected {invalid_row.expected_columns} columns, but found "
f"{invalid_row.actual_columns}: {invalid_row.text}",
ParserWarning,
stacklevel=find_stack_level(),
)
return "skip"
self.parse_options["invalid_row_handler"] = handle_warning
elif on_bad_lines == ParserBase.BadLineHandleMethod.SKIP:
self.parse_options["invalid_row_handler"] = lambda _: "skip"
self.convert_options = {
option_name: option_value
for option_name, option_value in self.kwds.items()
if option_value is not None
and option_name
in (
"include_columns",
"null_values",
"true_values",
"false_values",
"decimal_point",
"timestamp_parsers",
)
}
self.convert_options["strings_can_be_null"] = "" in self.kwds["null_values"]
# autogenerated column names are prefixed with 'f' in pyarrow.csv
if self.header is None and "include_columns" in self.convert_options:
self.convert_options["include_columns"] = [
f"f{n}" for n in self.convert_options["include_columns"]
]
self.read_options = {
"autogenerate_column_names": self.header is None,
"skip_rows": self.header
if self.header is not None
else self.kwds["skiprows"],
"encoding": self.encoding,
}
def _get_convert_options(self):
pyarrow_csv = import_optional_dependency("pyarrow.csv")
try:
convert_options = pyarrow_csv.ConvertOptions(**self.convert_options)
except TypeError as err:
include = self.convert_options.get("include_columns", None)
if include is not None:
self._validate_usecols(include)
nulls = self.convert_options.get("null_values", set())
if not lib.is_list_like(nulls) or not all(
isinstance(x, str) for x in nulls
):
raise TypeError(
"The 'pyarrow' engine requires all na_values to be strings"
) from err
raise
return convert_options
def _adjust_column_names(self, table: pa.Table) -> bool:
num_cols = len(table.columns)
multi_index_named = True
if self.header is None:
if self.names is None:
self.names = range(num_cols)
if len(self.names) != num_cols:
# usecols is passed through to pyarrow, we only handle index col here
# The only way self.names is not the same length as number of cols is
# if we have int index_col. We should just pad the names(they will get
# removed anyways) to expected length then.
columns_prefix = [str(x) for x in range(num_cols - len(self.names))]
self.names = columns_prefix + self.names
multi_index_named = False
return multi_index_named
def _finalize_index(self, frame: DataFrame, multi_index_named: bool) -> DataFrame:
if self.index_col is not None:
index_to_set = self.index_col.copy()
for i, item in enumerate(self.index_col):
if is_integer(item):
index_to_set[i] = frame.columns[item]
# String case
elif item not in frame.columns:
raise ValueError(f"Index {item} invalid")
# Process dtype for index_col and drop from dtypes
if self.dtype is not None:
key, new_dtype = (
(item, self.dtype.get(item))
if self.dtype.get(item) is not None
else (frame.columns[item], self.dtype.get(frame.columns[item]))
)
if new_dtype is not None:
frame[key] = frame[key].astype(new_dtype)
del self.dtype[key]
frame.set_index(index_to_set, drop=True, inplace=True)
# Clear names if headerless and no name given
if self.header is None and not multi_index_named:
frame.index.names = [None] * len(frame.index.names)
return frame
def _finalize_dtype(self, frame: DataFrame) -> DataFrame:
if self.dtype is not None:
# Ignore non-existent columns from dtype mapping
# like other parsers do
if isinstance(self.dtype, dict):
self.dtype = {
k: pandas_dtype(v)
for k, v in self.dtype.items()
if k in frame.columns
}
else:
self.dtype = pandas_dtype(self.dtype)
try:
frame = frame.astype(self.dtype)
except TypeError as err:
# GH#44901 reraise to keep api consistent
raise ValueError(str(err)) from err
return frame
def _finalize_pandas_output(
self, frame: DataFrame, multi_index_named: bool
) -> DataFrame:
"""
Processes data read in based on kwargs.
Parameters
----------
frame : DataFrame
The DataFrame to process.
multi_index_named : bool
Returns
-------
DataFrame
The processed DataFrame.
"""
frame = self._do_date_conversions(frame.columns, frame)
frame = self._finalize_index(frame, multi_index_named)
frame = self._finalize_dtype(frame)
return frame
def _validate_usecols(self, usecols) -> None:
if lib.is_list_like(usecols) and not all(isinstance(x, str) for x in usecols):
raise ValueError(
"The pyarrow engine does not allow 'usecols' to be integer "
"column positions. Pass a list of string column names instead."
)
elif callable(usecols):
raise ValueError(
"The pyarrow engine does not allow 'usecols' to be a callable."
)
def read(self) -> DataFrame:
"""
Reads the contents of a CSV file into a DataFrame and
processes it according to the kwargs passed in the
constructor.
Returns
-------
DataFrame
The DataFrame created from the CSV file.
"""
pa = import_optional_dependency("pyarrow")
pyarrow_csv = import_optional_dependency("pyarrow.csv")
self._get_pyarrow_options()
convert_options = self._get_convert_options()
try:
table = pyarrow_csv.read_csv(
self.src,
read_options=pyarrow_csv.ReadOptions(**self.read_options),
parse_options=pyarrow_csv.ParseOptions(**self.parse_options),
convert_options=convert_options,
)
except pa.ArrowInvalid as e:
raise ParserError(e) from e
dtype_backend = self.kwds["dtype_backend"]
# Convert all pa.null() cols -> float64 (non nullable)
# else Int64 (nullable case, see below)
if dtype_backend is lib.no_default:
new_schema = table.schema
new_type = pa.float64()
for i, arrow_type in enumerate(table.schema.types):
if pa.types.is_null(arrow_type):
new_schema = new_schema.set(
i, new_schema.field(i).with_type(new_type)
)
table = table.cast(new_schema)
multi_index_named = self._adjust_column_names(table)
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore",
"make_block is deprecated",
Pandas4Warning,
)
frame = arrow_table_to_pandas(
table,
dtype_backend=dtype_backend,
null_to_int64=True,
dtype=self.dtype,
names=self.names,
)
if self.header is None:
frame.columns = self.names
return self._finalize_pandas_output(frame, multi_index_named)
| ArrowParserWrapper |
python | pypa__hatch | src/hatch/index/core.py | {
"start": 251,
"end": 786
} | class ____:
def __init__(self, repo: str):
self.repo = hyperlink.parse(repo).normalize()
# PyPI
if self.repo.host.endswith("pypi.org"): # no cov
repo_url = self.repo.replace(host="pypi.org") if self.repo.host == "upload.pypi.org" else self.repo
self.simple = repo_url.click("/simple/")
self.project = repo_url.click("/project/")
# Assume devpi
else:
self.simple = self.repo.child("+simple", "")
self.project = self.repo
| IndexURLs |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 1203201,
"end": 1203385
} | class ____(sgqlc.types.Type, Contribution):
"""Represents a user signing up for a GitHub account."""
__schema__ = github_schema
__field_names__ = ()
| JoinedGitHubContribution |
python | pydata__xarray | xarray/core/indexing.py | {
"start": 21773,
"end": 22288
} | class ____(ExplicitlyIndexedNDArrayMixin):
"""Marker class for indexing adapters.
These classes translate between Xarray's indexing semantics and the underlying array's
indexing semantics.
"""
def get_duck_array(self):
key = BasicIndexer((slice(None),) * self.ndim)
return self[key]
async def async_get_duck_array(self):
"""These classes are applied to in-memory arrays, so specific async support isn't needed."""
return self.get_duck_array()
| IndexingAdapter |
python | wandb__wandb | wandb/sdk/artifacts/exceptions.py | {
"start": 1042,
"end": 1524
} | class ____(ArtifactStatusError):
"""Raised for Artifact methods or attributes only available after logging."""
def __init__(self, fullname: str, obj: ArtifactT):
*_, name = fullname.split(".")
msg = (
f"{fullname!r} used prior to logging artifact or while in offline mode. "
f"Call {nameof(obj.wait)}() before accessing logged artifact properties."
)
super().__init__(msg=msg, name=name, obj=obj)
| ArtifactNotLoggedError |
python | chardet__chardet | chardet/enums.py | {
"start": 322,
"end": 712
} | class ____(Flag):
"""
This enum represents the different language filters we can apply to a
``UniversalDetector``.
"""
NONE = 0x00
CHINESE_SIMPLIFIED = 0x01
CHINESE_TRADITIONAL = 0x02
JAPANESE = 0x04
KOREAN = 0x08
NON_CJK = 0x10
ALL = 0x1F
CHINESE = CHINESE_SIMPLIFIED | CHINESE_TRADITIONAL
CJK = CHINESE | JAPANESE | KOREAN
| LanguageFilter |
python | Netflix__metaflow | metaflow/_vendor/v3_7/typing_extensions.py | {
"start": 54718,
"end": 70473
} | class ____(metaclass=_TypeVarLikeMeta):
"""Type variable."""
_backported_typevarlike = typing.TypeVar
def __new__(cls, name, *constraints, bound=None,
covariant=False, contravariant=False,
default=_marker, infer_variance=False):
if hasattr(typing, "TypeAliasType"):
# PEP 695 implemented, can pass infer_variance to typing.TypeVar
typevar = typing.TypeVar(name, *constraints, bound=bound,
covariant=covariant, contravariant=contravariant,
infer_variance=infer_variance)
else:
typevar = typing.TypeVar(name, *constraints, bound=bound,
covariant=covariant, contravariant=contravariant)
if infer_variance and (covariant or contravariant):
raise ValueError("Variance cannot be specified with infer_variance.")
typevar.__infer_variance__ = infer_variance
_set_default(typevar, default)
_set_module(typevar)
return typevar
def __init_subclass__(cls) -> None:
raise TypeError(f"type '{__name__}.TypeVar' is not an acceptable base type")
# Python 3.10+ has PEP 612
if hasattr(typing, 'ParamSpecArgs'):
ParamSpecArgs = typing.ParamSpecArgs
ParamSpecKwargs = typing.ParamSpecKwargs
# 3.7-3.9
else:
class _Immutable:
"""Mixin to indicate that object should not be copied."""
__slots__ = ()
def __copy__(self):
return self
def __deepcopy__(self, memo):
return self
class ParamSpecArgs(_Immutable):
"""The args for a ParamSpec object.
Given a ParamSpec object P, P.args is an instance of ParamSpecArgs.
ParamSpecArgs objects have a reference back to their ParamSpec:
P.args.__origin__ is P
This type is meant for runtime introspection and has no special meaning to
static type checkers.
"""
def __init__(self, origin):
self.__origin__ = origin
def __repr__(self):
return f"{self.__origin__.__name__}.args"
def __eq__(self, other):
if not isinstance(other, ParamSpecArgs):
return NotImplemented
return self.__origin__ == other.__origin__
class ParamSpecKwargs(_Immutable):
"""The kwargs for a ParamSpec object.
Given a ParamSpec object P, P.kwargs is an instance of ParamSpecKwargs.
ParamSpecKwargs objects have a reference back to their ParamSpec:
P.kwargs.__origin__ is P
This type is meant for runtime introspection and has no special meaning to
static type checkers.
"""
def __init__(self, origin):
self.__origin__ = origin
def __repr__(self):
return f"{self.__origin__.__name__}.kwargs"
def __eq__(self, other):
if not isinstance(other, ParamSpecKwargs):
return NotImplemented
return self.__origin__ == other.__origin__
# 3.10+
if hasattr(typing, 'ParamSpec'):
# Add default parameter - PEP 696
class ParamSpec(metaclass=_TypeVarLikeMeta):
"""Parameter specification."""
_backported_typevarlike = typing.ParamSpec
def __new__(cls, name, *, bound=None,
covariant=False, contravariant=False,
infer_variance=False, default=_marker):
if hasattr(typing, "TypeAliasType"):
# PEP 695 implemented, can pass infer_variance to typing.TypeVar
paramspec = typing.ParamSpec(name, bound=bound,
covariant=covariant,
contravariant=contravariant,
infer_variance=infer_variance)
else:
paramspec = typing.ParamSpec(name, bound=bound,
covariant=covariant,
contravariant=contravariant)
paramspec.__infer_variance__ = infer_variance
_set_default(paramspec, default)
_set_module(paramspec)
return paramspec
def __init_subclass__(cls) -> None:
raise TypeError(f"type '{__name__}.ParamSpec' is not an acceptable base type")
# 3.7-3.9
else:
# Inherits from list as a workaround for Callable checks in Python < 3.9.2.
class ParamSpec(list, _DefaultMixin):
"""Parameter specification variable.
Usage::
P = ParamSpec('P')
Parameter specification variables exist primarily for the benefit of static
type checkers. They are used to forward the parameter types of one
callable to another callable, a pattern commonly found in higher order
functions and decorators. They are only valid when used in ``Concatenate``,
or s the first argument to ``Callable``. In Python 3.10 and higher,
they are also supported in user-defined Generics at runtime.
See class Generic for more information on generic types. An
example for annotating a decorator::
T = TypeVar('T')
P = ParamSpec('P')
def add_logging(f: Callable[P, T]) -> Callable[P, T]:
'''A type-safe decorator to add logging to a function.'''
def inner(*args: P.args, **kwargs: P.kwargs) -> T:
logging.info(f'{f.__name__} was called')
return f(*args, **kwargs)
return inner
@add_logging
def add_two(x: float, y: float) -> float:
'''Add two numbers together.'''
return x + y
Parameter specification variables defined with covariant=True or
contravariant=True can be used to declare covariant or contravariant
generic types. These keyword arguments are valid, but their actual semantics
are yet to be decided. See PEP 612 for details.
Parameter specification variables can be introspected. e.g.:
P.__name__ == 'T'
P.__bound__ == None
P.__covariant__ == False
P.__contravariant__ == False
Note that only parameter specification variables defined in global scope can
be pickled.
"""
# Trick Generic __parameters__.
__class__ = typing.TypeVar
@property
def args(self):
return ParamSpecArgs(self)
@property
def kwargs(self):
return ParamSpecKwargs(self)
def __init__(self, name, *, bound=None, covariant=False, contravariant=False,
infer_variance=False, default=_marker):
super().__init__([self])
self.__name__ = name
self.__covariant__ = bool(covariant)
self.__contravariant__ = bool(contravariant)
self.__infer_variance__ = bool(infer_variance)
if bound:
self.__bound__ = typing._type_check(bound, 'Bound must be a type.')
else:
self.__bound__ = None
_DefaultMixin.__init__(self, default)
# for pickling:
def_mod = _caller()
if def_mod != 'typing_extensions':
self.__module__ = def_mod
def __repr__(self):
if self.__infer_variance__:
prefix = ''
elif self.__covariant__:
prefix = '+'
elif self.__contravariant__:
prefix = '-'
else:
prefix = '~'
return prefix + self.__name__
def __hash__(self):
return object.__hash__(self)
def __eq__(self, other):
return self is other
def __reduce__(self):
return self.__name__
# Hack to get typing._type_check to pass.
def __call__(self, *args, **kwargs):
pass
# 3.7-3.9
if not hasattr(typing, 'Concatenate'):
# Inherits from list as a workaround for Callable checks in Python < 3.9.2.
class _ConcatenateGenericAlias(list):
# Trick Generic into looking into this for __parameters__.
__class__ = typing._GenericAlias
# Flag in 3.8.
_special = False
def __init__(self, origin, args):
super().__init__(args)
self.__origin__ = origin
self.__args__ = args
def __repr__(self):
_type_repr = typing._type_repr
return (f'{_type_repr(self.__origin__)}'
f'[{", ".join(_type_repr(arg) for arg in self.__args__)}]')
def __hash__(self):
return hash((self.__origin__, self.__args__))
# Hack to get typing._type_check to pass in Generic.
def __call__(self, *args, **kwargs):
pass
@property
def __parameters__(self):
return tuple(
tp for tp in self.__args__ if isinstance(tp, (typing.TypeVar, ParamSpec))
)
# 3.7-3.9
@typing._tp_cache
def _concatenate_getitem(self, parameters):
if parameters == ():
raise TypeError("Cannot take a Concatenate of no types.")
if not isinstance(parameters, tuple):
parameters = (parameters,)
if not isinstance(parameters[-1], ParamSpec):
raise TypeError("The last parameter to Concatenate should be a "
"ParamSpec variable.")
msg = "Concatenate[arg, ...]: each arg must be a type."
parameters = tuple(typing._type_check(p, msg) for p in parameters)
return _ConcatenateGenericAlias(self, parameters)
# 3.10+
if hasattr(typing, 'Concatenate'):
Concatenate = typing.Concatenate
_ConcatenateGenericAlias = typing._ConcatenateGenericAlias # noqa: F811
# 3.9
elif sys.version_info[:2] >= (3, 9):
@_ExtensionsSpecialForm
def Concatenate(self, parameters):
"""Used in conjunction with ``ParamSpec`` and ``Callable`` to represent a
higher order function which adds, removes or transforms parameters of a
callable.
For example::
Callable[Concatenate[int, P], int]
See PEP 612 for detailed information.
"""
return _concatenate_getitem(self, parameters)
# 3.7-8
else:
class _ConcatenateForm(_ExtensionsSpecialForm, _root=True):
def __getitem__(self, parameters):
return _concatenate_getitem(self, parameters)
Concatenate = _ConcatenateForm(
'Concatenate',
doc="""Used in conjunction with ``ParamSpec`` and ``Callable`` to represent a
higher order function which adds, removes or transforms parameters of a
callable.
For example::
Callable[Concatenate[int, P], int]
See PEP 612 for detailed information.
""")
# 3.10+
if hasattr(typing, 'TypeGuard'):
TypeGuard = typing.TypeGuard
# 3.9
elif sys.version_info[:2] >= (3, 9):
@_ExtensionsSpecialForm
def TypeGuard(self, parameters):
"""Special typing form used to annotate the return type of a user-defined
type guard function. ``TypeGuard`` only accepts a single type argument.
At runtime, functions marked this way should return a boolean.
``TypeGuard`` aims to benefit *type narrowing* -- a technique used by static
type checkers to determine a more precise type of an expression within a
program's code flow. Usually type narrowing is done by analyzing
conditional code flow and applying the narrowing to a block of code. The
conditional expression here is sometimes referred to as a "type guard".
Sometimes it would be convenient to use a user-defined boolean function
as a type guard. Such a function should use ``TypeGuard[...]`` as its
return type to alert static type checkers to this intention.
Using ``-> TypeGuard`` tells the static type checker that for a given
function:
1. The return value is a boolean.
2. If the return value is ``True``, the type of its argument
is the type inside ``TypeGuard``.
For example::
def is_str(val: Union[str, float]):
# "isinstance" type guard
if isinstance(val, str):
# Type of ``val`` is narrowed to ``str``
...
else:
# Else, type of ``val`` is narrowed to ``float``.
...
Strict type narrowing is not enforced -- ``TypeB`` need not be a narrower
form of ``TypeA`` (it can even be a wider form) and this may lead to
type-unsafe results. The main reason is to allow for things like
narrowing ``List[object]`` to ``List[str]`` even though the latter is not
a subtype of the former, since ``List`` is invariant. The responsibility of
writing type-safe type guards is left to the user.
``TypeGuard`` also works with type variables. For more information, see
PEP 647 (User-Defined Type Guards).
"""
item = typing._type_check(parameters, f'{self} accepts only a single type.')
return typing._GenericAlias(self, (item,))
# 3.7-3.8
else:
class _TypeGuardForm(_ExtensionsSpecialForm, _root=True):
def __getitem__(self, parameters):
item = typing._type_check(parameters,
f'{self._name} accepts only a single type')
return typing._GenericAlias(self, (item,))
TypeGuard = _TypeGuardForm(
'TypeGuard',
doc="""Special typing form used to annotate the return type of a user-defined
type guard function. ``TypeGuard`` only accepts a single type argument.
At runtime, functions marked this way should return a boolean.
``TypeGuard`` aims to benefit *type narrowing* -- a technique used by static
type checkers to determine a more precise type of an expression within a
program's code flow. Usually type narrowing is done by analyzing
conditional code flow and applying the narrowing to a block of code. The
conditional expression here is sometimes referred to as a "type guard".
Sometimes it would be convenient to use a user-defined boolean function
as a type guard. Such a function should use ``TypeGuard[...]`` as its
return type to alert static type checkers to this intention.
Using ``-> TypeGuard`` tells the static type checker that for a given
function:
1. The return value is a boolean.
2. If the return value is ``True``, the type of its argument
is the type inside ``TypeGuard``.
For example::
def is_str(val: Union[str, float]):
# "isinstance" type guard
if isinstance(val, str):
# Type of ``val`` is narrowed to ``str``
...
else:
# Else, type of ``val`` is narrowed to ``float``.
...
Strict type narrowing is not enforced -- ``TypeB`` need not be a narrower
form of ``TypeA`` (it can even be a wider form) and this may lead to
type-unsafe results. The main reason is to allow for things like
narrowing ``List[object]`` to ``List[str]`` even though the latter is not
a subtype of the former, since ``List`` is invariant. The responsibility of
writing type-safe type guards is left to the user.
``TypeGuard`` also works with type variables. For more information, see
PEP 647 (User-Defined Type Guards).
""")
# Vendored from cpython typing._SpecialFrom
| TypeVar |
python | has2k1__plotnine | plotnine/stats/stat_unique.py | {
"start": 67,
"end": 449
} | class ____(stat):
"""
Remove duplicates
{usage}
Parameters
----------
{common_parameters}
See Also
--------
plotnine.geom_point : The default `geom` for this `stat`.
"""
DEFAULT_PARAMS = {"geom": "point", "position": "identity", "na_rm": False}
def compute_panel(self, data, scales):
return data.drop_duplicates()
| stat_unique |
python | PyCQA__pylint | doc/data/messages/i/invalid-overridden-method/bad.py | {
"start": 74,
"end": 178
} | class ____(Fruit):
def bore(self, insect): # [invalid-overridden-method]
insect.eat(self)
| Apple |
python | HypothesisWorks__hypothesis | hypothesis-python/src/hypothesis/strategies/_internal/featureflags.py | {
"start": 5199,
"end": 5539
} | class ____(SearchStrategy[FeatureFlags]):
def __init__(self, at_least_one_of: Iterable[Hashable] = ()):
super().__init__()
self._at_least_one_of = frozenset(at_least_one_of)
def do_draw(self, data: ConjectureData) -> FeatureFlags:
return FeatureFlags(data, at_least_one_of=self._at_least_one_of)
| FeatureStrategy |
python | agronholm__apscheduler | src/apscheduler/_events.py | {
"start": 4895,
"end": 5158
} | class ____(SchedulerEvent):
"""
Signals that a scheduler has stopped.
:ivar exception: the exception that caused the scheduler to stop, if any
"""
exception: BaseException | None = None
@attrs.define(kw_only=True, frozen=True)
| SchedulerStopped |
python | google__pytype | pytype/vm_test.py | {
"start": 3217,
"end": 3692
} | class ____(VmTestBase):
"""Tests for recording annotations."""
def test_record_local_ops(self):
self.ctx.vm.run_program("v: int = None", "", maximum_depth=10)
self.assertEqual(
self.ctx.vm.local_ops,
{
"<module>": [
vm.LocalOp(name="v", op=vm.LocalOp.Op.ASSIGN),
vm.LocalOp(name="v", op=vm.LocalOp.Op.ANNOTATE),
]
},
)
if __name__ == "__main__":
test_base.main()
| AnnotationsTest |
python | etianen__django-reversion | tests/test_app/tests/test_admin.py | {
"start": 8823,
"end": 9320
} | class ____(LoginMixin, AdminMixin, TestBase):
def testHistorylistView(self):
with reversion.create_revision():
obj = TestModelParent.objects.create()
response = self.client.get(resolve_url("admin:test_app_testmodelparent_history", obj.pk))
self.assertContains(response, resolve_url(
"admin:test_app_testmodelparent_revision",
obj.pk,
Version.objects.get_for_model(TestModelParent).get().pk,
))
| AdminHistoryViewTest |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 1540110,
"end": 1570919
} | class ____(
sgqlc.types.Type,
Node,
Actor,
PackageOwner,
ProjectOwner,
ProjectV2Owner,
ProjectV2Recent,
RepositoryDiscussionAuthor,
RepositoryDiscussionCommentAuthor,
RepositoryOwner,
UniformResourceLocatable,
ProfileOwner,
Sponsorable,
):
"""A user is an individual's account on GitHub that owns repositories
and can make new content.
"""
__schema__ = github_schema
__field_names__ = (
"bio",
"bio_html",
"can_receive_organization_emails_when_notifications_restricted",
"commit_comments",
"company",
"company_html",
"contributions_collection",
"created_at",
"database_id",
"followers",
"following",
"gist",
"gist_comments",
"gists",
"hovercard",
"interaction_ability",
"is_bounty_hunter",
"is_campus_expert",
"is_developer_program_member",
"is_employee",
"is_following_viewer",
"is_git_hub_star",
"is_hireable",
"is_site_admin",
"is_viewer",
"issue_comments",
"issues",
"organization",
"organization_verified_domain_emails",
"organizations",
"pronouns",
"public_keys",
"pull_requests",
"repositories_contributed_to",
"saved_replies",
"social_accounts",
"starred_repositories",
"status",
"top_repositories",
"twitter_username",
"updated_at",
"viewer_can_follow",
"viewer_is_following",
"watching",
)
bio = sgqlc.types.Field(String, graphql_name="bio")
"""The user's public profile bio."""
bio_html = sgqlc.types.Field(sgqlc.types.non_null(HTML), graphql_name="bioHTML")
"""The user's public profile bio as HTML."""
can_receive_organization_emails_when_notifications_restricted = sgqlc.types.Field(
sgqlc.types.non_null(Boolean),
graphql_name="canReceiveOrganizationEmailsWhenNotificationsRestricted",
args=sgqlc.types.ArgDict((("login", sgqlc.types.Arg(sgqlc.types.non_null(String), graphql_name="login", default=None)),)),
)
"""Could this user receive email notifications, if the organization
had notification restrictions enabled?
Arguments:
* `login` (`String!`): The login of the organization to check.
"""
commit_comments = sgqlc.types.Field(
sgqlc.types.non_null(CommitCommentConnection),
graphql_name="commitComments",
args=sgqlc.types.ArgDict(
(
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
("before", sgqlc.types.Arg(String, graphql_name="before", default=None)),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
)
),
)
"""A list of commit comments made by this user.
Arguments:
* `after` (`String`): Returns the elements in the list that come
after the specified cursor.
* `before` (`String`): Returns the elements in the list that come
before the specified cursor.
* `first` (`Int`): Returns the first _n_ elements from the list.
* `last` (`Int`): Returns the last _n_ elements from the list.
"""
company = sgqlc.types.Field(String, graphql_name="company")
"""The user's public profile company."""
company_html = sgqlc.types.Field(sgqlc.types.non_null(HTML), graphql_name="companyHTML")
"""The user's public profile company as HTML."""
contributions_collection = sgqlc.types.Field(
sgqlc.types.non_null(ContributionsCollection),
graphql_name="contributionsCollection",
args=sgqlc.types.ArgDict(
(
("organization_id", sgqlc.types.Arg(ID, graphql_name="organizationID", default=None)),
("from_", sgqlc.types.Arg(DateTime, graphql_name="from", default=None)),
("to", sgqlc.types.Arg(DateTime, graphql_name="to", default=None)),
)
),
)
"""The collection of contributions this user has made to different
repositories.
Arguments:
* `organization_id` (`ID`): The ID of the organization used to
filter contributions.
* `from_` (`DateTime`): Only contributions made at this time or
later will be counted. If omitted, defaults to a year ago.
* `to` (`DateTime`): Only contributions made before and up to
(including) this time will be counted. If omitted, defaults to
the current time or one year from the provided from argument.
"""
created_at = sgqlc.types.Field(sgqlc.types.non_null(DateTime), graphql_name="createdAt")
"""Identifies the date and time when the object was created."""
database_id = sgqlc.types.Field(Int, graphql_name="databaseId")
"""Identifies the primary key from the database."""
followers = sgqlc.types.Field(
sgqlc.types.non_null(FollowerConnection),
graphql_name="followers",
args=sgqlc.types.ArgDict(
(
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
("before", sgqlc.types.Arg(String, graphql_name="before", default=None)),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
)
),
)
"""A list of users the given user is followed by.
Arguments:
* `after` (`String`): Returns the elements in the list that come
after the specified cursor.
* `before` (`String`): Returns the elements in the list that come
before the specified cursor.
* `first` (`Int`): Returns the first _n_ elements from the list.
* `last` (`Int`): Returns the last _n_ elements from the list.
"""
following = sgqlc.types.Field(
sgqlc.types.non_null(FollowingConnection),
graphql_name="following",
args=sgqlc.types.ArgDict(
(
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
("before", sgqlc.types.Arg(String, graphql_name="before", default=None)),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
)
),
)
"""A list of users the given user is following.
Arguments:
* `after` (`String`): Returns the elements in the list that come
after the specified cursor.
* `before` (`String`): Returns the elements in the list that come
before the specified cursor.
* `first` (`Int`): Returns the first _n_ elements from the list.
* `last` (`Int`): Returns the last _n_ elements from the list.
"""
gist = sgqlc.types.Field(
Gist,
graphql_name="gist",
args=sgqlc.types.ArgDict((("name", sgqlc.types.Arg(sgqlc.types.non_null(String), graphql_name="name", default=None)),)),
)
"""Find gist by repo name.
Arguments:
* `name` (`String!`): The gist name to find.
"""
gist_comments = sgqlc.types.Field(
sgqlc.types.non_null(GistCommentConnection),
graphql_name="gistComments",
args=sgqlc.types.ArgDict(
(
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
("before", sgqlc.types.Arg(String, graphql_name="before", default=None)),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
)
),
)
"""A list of gist comments made by this user.
Arguments:
* `after` (`String`): Returns the elements in the list that come
after the specified cursor.
* `before` (`String`): Returns the elements in the list that come
before the specified cursor.
* `first` (`Int`): Returns the first _n_ elements from the list.
* `last` (`Int`): Returns the last _n_ elements from the list.
"""
gists = sgqlc.types.Field(
sgqlc.types.non_null(GistConnection),
graphql_name="gists",
args=sgqlc.types.ArgDict(
(
("privacy", sgqlc.types.Arg(GistPrivacy, graphql_name="privacy", default=None)),
("order_by", sgqlc.types.Arg(GistOrder, graphql_name="orderBy", default=None)),
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
("before", sgqlc.types.Arg(String, graphql_name="before", default=None)),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
)
),
)
"""A list of the Gists the user has created.
Arguments:
* `privacy` (`GistPrivacy`): Filters Gists according to privacy.
* `order_by` (`GistOrder`): Ordering options for gists returned
from the connection
* `after` (`String`): Returns the elements in the list that come
after the specified cursor.
* `before` (`String`): Returns the elements in the list that come
before the specified cursor.
* `first` (`Int`): Returns the first _n_ elements from the list.
* `last` (`Int`): Returns the last _n_ elements from the list.
"""
hovercard = sgqlc.types.Field(
sgqlc.types.non_null(Hovercard),
graphql_name="hovercard",
args=sgqlc.types.ArgDict((("primary_subject_id", sgqlc.types.Arg(ID, graphql_name="primarySubjectId", default=None)),)),
)
"""The hovercard information for this user in a given context
Arguments:
* `primary_subject_id` (`ID`): The ID of the subject to get the
hovercard in the context of
"""
interaction_ability = sgqlc.types.Field(RepositoryInteractionAbility, graphql_name="interactionAbility")
"""The interaction ability settings for this user."""
is_bounty_hunter = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="isBountyHunter")
"""Whether or not this user is a participant in the GitHub Security
Bug Bounty.
"""
is_campus_expert = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="isCampusExpert")
"""Whether or not this user is a participant in the GitHub Campus
Experts Program.
"""
is_developer_program_member = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="isDeveloperProgramMember")
"""Whether or not this user is a GitHub Developer Program member."""
is_employee = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="isEmployee")
"""Whether or not this user is a GitHub employee."""
is_following_viewer = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="isFollowingViewer")
"""Whether or not this user is following the viewer. Inverse of
viewerIsFollowing
"""
is_git_hub_star = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="isGitHubStar")
"""Whether or not this user is a member of the GitHub Stars Program."""
is_hireable = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="isHireable")
"""Whether or not the user has marked themselves as for hire."""
is_site_admin = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="isSiteAdmin")
"""Whether or not this user is a site administrator."""
is_viewer = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="isViewer")
"""Whether or not this user is the viewing user."""
issue_comments = sgqlc.types.Field(
sgqlc.types.non_null(IssueCommentConnection),
graphql_name="issueComments",
args=sgqlc.types.ArgDict(
(
("order_by", sgqlc.types.Arg(IssueCommentOrder, graphql_name="orderBy", default=None)),
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
("before", sgqlc.types.Arg(String, graphql_name="before", default=None)),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
)
),
)
"""A list of issue comments made by this user.
Arguments:
* `order_by` (`IssueCommentOrder`): Ordering options for issue
comments returned from the connection.
* `after` (`String`): Returns the elements in the list that come
after the specified cursor.
* `before` (`String`): Returns the elements in the list that come
before the specified cursor.
* `first` (`Int`): Returns the first _n_ elements from the list.
* `last` (`Int`): Returns the last _n_ elements from the list.
"""
issues = sgqlc.types.Field(
sgqlc.types.non_null(IssueConnection),
graphql_name="issues",
args=sgqlc.types.ArgDict(
(
("order_by", sgqlc.types.Arg(IssueOrder, graphql_name="orderBy", default=None)),
("labels", sgqlc.types.Arg(sgqlc.types.list_of(sgqlc.types.non_null(String)), graphql_name="labels", default=None)),
("states", sgqlc.types.Arg(sgqlc.types.list_of(sgqlc.types.non_null(IssueState)), graphql_name="states", default=None)),
("filter_by", sgqlc.types.Arg(IssueFilters, graphql_name="filterBy", default=None)),
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
("before", sgqlc.types.Arg(String, graphql_name="before", default=None)),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
)
),
)
"""A list of issues associated with this user.
Arguments:
* `order_by` (`IssueOrder`): Ordering options for issues returned
from the connection.
* `labels` (`[String!]`): A list of label names to filter the pull
requests by.
* `states` (`[IssueState!]`): A list of states to filter the
issues by.
* `filter_by` (`IssueFilters`): Filtering options for issues
returned from the connection.
* `after` (`String`): Returns the elements in the list that come
after the specified cursor.
* `before` (`String`): Returns the elements in the list that come
before the specified cursor.
* `first` (`Int`): Returns the first _n_ elements from the list.
* `last` (`Int`): Returns the last _n_ elements from the list.
"""
organization = sgqlc.types.Field(
Organization,
graphql_name="organization",
args=sgqlc.types.ArgDict((("login", sgqlc.types.Arg(sgqlc.types.non_null(String), graphql_name="login", default=None)),)),
)
"""Find an organization by its login that the user belongs to.
Arguments:
* `login` (`String!`): The login of the organization to find.
"""
organization_verified_domain_emails = sgqlc.types.Field(
sgqlc.types.non_null(sgqlc.types.list_of(sgqlc.types.non_null(String))),
graphql_name="organizationVerifiedDomainEmails",
args=sgqlc.types.ArgDict((("login", sgqlc.types.Arg(sgqlc.types.non_null(String), graphql_name="login", default=None)),)),
)
"""Verified email addresses that match verified domains for a
specified organization the user is a member of.
Arguments:
* `login` (`String!`): The login of the organization to match
verified domains from.
"""
organizations = sgqlc.types.Field(
sgqlc.types.non_null(OrganizationConnection),
graphql_name="organizations",
args=sgqlc.types.ArgDict(
(
("order_by", sgqlc.types.Arg(OrganizationOrder, graphql_name="orderBy", default=None)),
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
("before", sgqlc.types.Arg(String, graphql_name="before", default=None)),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
)
),
)
"""A list of organizations the user belongs to.
Arguments:
* `order_by` (`OrganizationOrder`): Ordering options for the
User's organizations. (default: `null`)
* `after` (`String`): Returns the elements in the list that come
after the specified cursor.
* `before` (`String`): Returns the elements in the list that come
before the specified cursor.
* `first` (`Int`): Returns the first _n_ elements from the list.
* `last` (`Int`): Returns the last _n_ elements from the list.
"""
pronouns = sgqlc.types.Field(String, graphql_name="pronouns")
"""The user's profile pronouns"""
public_keys = sgqlc.types.Field(
sgqlc.types.non_null(PublicKeyConnection),
graphql_name="publicKeys",
args=sgqlc.types.ArgDict(
(
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
("before", sgqlc.types.Arg(String, graphql_name="before", default=None)),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
)
),
)
"""A list of public keys associated with this user.
Arguments:
* `after` (`String`): Returns the elements in the list that come
after the specified cursor.
* `before` (`String`): Returns the elements in the list that come
before the specified cursor.
* `first` (`Int`): Returns the first _n_ elements from the list.
* `last` (`Int`): Returns the last _n_ elements from the list.
"""
pull_requests = sgqlc.types.Field(
sgqlc.types.non_null(PullRequestConnection),
graphql_name="pullRequests",
args=sgqlc.types.ArgDict(
(
(
"states",
sgqlc.types.Arg(sgqlc.types.list_of(sgqlc.types.non_null(PullRequestState)), graphql_name="states", default=None),
),
("labels", sgqlc.types.Arg(sgqlc.types.list_of(sgqlc.types.non_null(String)), graphql_name="labels", default=None)),
("head_ref_name", sgqlc.types.Arg(String, graphql_name="headRefName", default=None)),
("base_ref_name", sgqlc.types.Arg(String, graphql_name="baseRefName", default=None)),
("order_by", sgqlc.types.Arg(IssueOrder, graphql_name="orderBy", default=None)),
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
("before", sgqlc.types.Arg(String, graphql_name="before", default=None)),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
)
),
)
"""A list of pull requests associated with this user.
Arguments:
* `states` (`[PullRequestState!]`): A list of states to filter the
pull requests by.
* `labels` (`[String!]`): A list of label names to filter the pull
requests by.
* `head_ref_name` (`String`): The head ref name to filter the pull
requests by.
* `base_ref_name` (`String`): The base ref name to filter the pull
requests by.
* `order_by` (`IssueOrder`): Ordering options for pull requests
returned from the connection.
* `after` (`String`): Returns the elements in the list that come
after the specified cursor.
* `before` (`String`): Returns the elements in the list that come
before the specified cursor.
* `first` (`Int`): Returns the first _n_ elements from the list.
* `last` (`Int`): Returns the last _n_ elements from the list.
"""
repositories_contributed_to = sgqlc.types.Field(
sgqlc.types.non_null(RepositoryConnection),
graphql_name="repositoriesContributedTo",
args=sgqlc.types.ArgDict(
(
("privacy", sgqlc.types.Arg(RepositoryPrivacy, graphql_name="privacy", default=None)),
("order_by", sgqlc.types.Arg(RepositoryOrder, graphql_name="orderBy", default=None)),
("is_locked", sgqlc.types.Arg(Boolean, graphql_name="isLocked", default=None)),
("include_user_repositories", sgqlc.types.Arg(Boolean, graphql_name="includeUserRepositories", default=None)),
(
"contribution_types",
sgqlc.types.Arg(sgqlc.types.list_of(RepositoryContributionType), graphql_name="contributionTypes", default=None),
),
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
("before", sgqlc.types.Arg(String, graphql_name="before", default=None)),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
)
),
)
"""A list of repositories that the user recently contributed to.
Arguments:
* `privacy` (`RepositoryPrivacy`): If non-null, filters
repositories according to privacy
* `order_by` (`RepositoryOrder`): Ordering options for
repositories returned from the connection
* `is_locked` (`Boolean`): If non-null, filters repositories
according to whether they have been locked
* `include_user_repositories` (`Boolean`): If true, include user
repositories
* `contribution_types` (`[RepositoryContributionType]`): If non-
null, include only the specified types of contributions. The
GitHub.com UI uses [COMMIT, ISSUE, PULL_REQUEST, REPOSITORY]
* `after` (`String`): Returns the elements in the list that come
after the specified cursor.
* `before` (`String`): Returns the elements in the list that come
before the specified cursor.
* `first` (`Int`): Returns the first _n_ elements from the list.
* `last` (`Int`): Returns the last _n_ elements from the list.
"""
saved_replies = sgqlc.types.Field(
SavedReplyConnection,
graphql_name="savedReplies",
args=sgqlc.types.ArgDict(
(
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
("before", sgqlc.types.Arg(String, graphql_name="before", default=None)),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
(
"order_by",
sgqlc.types.Arg(SavedReplyOrder, graphql_name="orderBy", default={"field": "UPDATED_AT", "direction": "DESC"}),
),
)
),
)
"""Replies this user has saved
Arguments:
* `after` (`String`): Returns the elements in the list that come
after the specified cursor.
* `before` (`String`): Returns the elements in the list that come
before the specified cursor.
* `first` (`Int`): Returns the first _n_ elements from the list.
* `last` (`Int`): Returns the last _n_ elements from the list.
* `order_by` (`SavedReplyOrder`): The field to order saved replies
by. (default: `{field: UPDATED_AT, direction: DESC}`)
"""
social_accounts = sgqlc.types.Field(
sgqlc.types.non_null(SocialAccountConnection),
graphql_name="socialAccounts",
args=sgqlc.types.ArgDict(
(
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
("before", sgqlc.types.Arg(String, graphql_name="before", default=None)),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
)
),
)
"""The user's social media accounts, ordered as they appear on the
user's profile.
Arguments:
* `after` (`String`): Returns the elements in the list that come
after the specified cursor.
* `before` (`String`): Returns the elements in the list that come
before the specified cursor.
* `first` (`Int`): Returns the first _n_ elements from the list.
* `last` (`Int`): Returns the last _n_ elements from the list.
"""
starred_repositories = sgqlc.types.Field(
sgqlc.types.non_null(StarredRepositoryConnection),
graphql_name="starredRepositories",
args=sgqlc.types.ArgDict(
(
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
("before", sgqlc.types.Arg(String, graphql_name="before", default=None)),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
("owned_by_viewer", sgqlc.types.Arg(Boolean, graphql_name="ownedByViewer", default=None)),
("order_by", sgqlc.types.Arg(StarOrder, graphql_name="orderBy", default=None)),
)
),
)
"""Repositories the user has starred.
Arguments:
* `after` (`String`): Returns the elements in the list that come
after the specified cursor.
* `before` (`String`): Returns the elements in the list that come
before the specified cursor.
* `first` (`Int`): Returns the first _n_ elements from the list.
* `last` (`Int`): Returns the last _n_ elements from the list.
* `owned_by_viewer` (`Boolean`): Filters starred repositories to
only return repositories owned by the viewer.
* `order_by` (`StarOrder`): Order for connection
"""
status = sgqlc.types.Field("UserStatus", graphql_name="status")
"""The user's description of what they're currently doing."""
top_repositories = sgqlc.types.Field(
sgqlc.types.non_null(RepositoryConnection),
graphql_name="topRepositories",
args=sgqlc.types.ArgDict(
(
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
("before", sgqlc.types.Arg(String, graphql_name="before", default=None)),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
("order_by", sgqlc.types.Arg(sgqlc.types.non_null(RepositoryOrder), graphql_name="orderBy", default=None)),
("since", sgqlc.types.Arg(DateTime, graphql_name="since", default=None)),
)
),
)
"""Repositories the user has contributed to, ordered by contribution
rank, plus repositories the user has created
Arguments:
* `after` (`String`): Returns the elements in the list that come
after the specified cursor.
* `before` (`String`): Returns the elements in the list that come
before the specified cursor.
* `first` (`Int`): Returns the first _n_ elements from the list.
* `last` (`Int`): Returns the last _n_ elements from the list.
* `order_by` (`RepositoryOrder!`): Ordering options for
repositories returned from the connection
* `since` (`DateTime`): How far back in time to fetch contributed
repositories
"""
twitter_username = sgqlc.types.Field(String, graphql_name="twitterUsername")
"""The user's Twitter username."""
updated_at = sgqlc.types.Field(sgqlc.types.non_null(DateTime), graphql_name="updatedAt")
"""Identifies the date and time when the object was last updated."""
viewer_can_follow = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="viewerCanFollow")
"""Whether or not the viewer is able to follow the user."""
viewer_is_following = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="viewerIsFollowing")
"""Whether or not this user is followed by the viewer. Inverse of
isFollowingViewer.
"""
watching = sgqlc.types.Field(
sgqlc.types.non_null(RepositoryConnection),
graphql_name="watching",
args=sgqlc.types.ArgDict(
(
("privacy", sgqlc.types.Arg(RepositoryPrivacy, graphql_name="privacy", default=None)),
("order_by", sgqlc.types.Arg(RepositoryOrder, graphql_name="orderBy", default=None)),
("affiliations", sgqlc.types.Arg(sgqlc.types.list_of(RepositoryAffiliation), graphql_name="affiliations", default=None)),
(
"owner_affiliations",
sgqlc.types.Arg(
sgqlc.types.list_of(RepositoryAffiliation), graphql_name="ownerAffiliations", default=("OWNER", "COLLABORATOR")
),
),
("is_locked", sgqlc.types.Arg(Boolean, graphql_name="isLocked", default=None)),
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
("before", sgqlc.types.Arg(String, graphql_name="before", default=None)),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
)
),
)
"""A list of repositories the given user is watching.
Arguments:
* `privacy` (`RepositoryPrivacy`): If non-null, filters
repositories according to privacy
* `order_by` (`RepositoryOrder`): Ordering options for
repositories returned from the connection
* `affiliations` (`[RepositoryAffiliation]`): Affiliation options
for repositories returned from the connection. If none
specified, the results will include repositories for which the
current viewer is an owner or collaborator, or member.
* `owner_affiliations` (`[RepositoryAffiliation]`): Array of
owner's affiliation options for repositories returned from the
connection. For example, OWNER will include only repositories
that the organization or user being viewed owns. (default:
`[OWNER, COLLABORATOR]`)
* `is_locked` (`Boolean`): If non-null, filters repositories
according to whether they have been locked
* `after` (`String`): Returns the elements in the list that come
after the specified cursor.
* `before` (`String`): Returns the elements in the list that come
before the specified cursor.
* `first` (`Int`): Returns the first _n_ elements from the list.
* `last` (`Int`): Returns the last _n_ elements from the list.
"""
| User |
python | pypa__pip | tests/data/packages/HackedEggInfo/setup.py | {
"start": 88,
"end": 303
} | class ____(orig_egg_info.egg_info):
def run(self):
orig_egg_info.egg_info.run(self)
setup(
name="hackedegginfo",
version="0.0.0",
cmdclass={"egg_info": egg_info},
zip_safe=False,
)
| egg_info |
python | pypa__pipenv | pipenv/patched/pip/_internal/req/__init__.py | {
"start": 587,
"end": 3126
} | class ____:
name: str
def _validate_requirements(
requirements: List[InstallRequirement],
) -> Generator[Tuple[str, InstallRequirement], None, None]:
for req in requirements:
assert req.name, f"invalid to-be-installed requirement: {req}"
yield req.name, req
def install_given_reqs(
requirements: List[InstallRequirement],
global_options: Sequence[str],
root: Optional[str],
home: Optional[str],
prefix: Optional[str],
warn_script_location: bool,
use_user_site: bool,
pycompile: bool,
progress_bar: str,
) -> List[InstallationResult]:
"""
Install everything in the given list.
(to be called after having downloaded and unpacked the packages)
"""
to_install = collections.OrderedDict(_validate_requirements(requirements))
if to_install:
logger.info(
"Installing collected packages: %s",
", ".join(to_install.keys()),
)
installed = []
show_progress = logger.isEnabledFor(logging.INFO) and len(to_install) > 1
items = iter(to_install.values())
if show_progress:
renderer = get_install_progress_renderer(
bar_type=progress_bar, total=len(to_install)
)
items = renderer(items)
with indent_log():
for requirement in items:
req_name = requirement.name
assert req_name is not None
if requirement.should_reinstall:
logger.info("Attempting uninstall: %s", req_name)
with indent_log():
uninstalled_pathset = requirement.uninstall(auto_confirm=True)
else:
uninstalled_pathset = None
try:
requirement.install(
global_options,
root=root,
home=home,
prefix=prefix,
warn_script_location=warn_script_location,
use_user_site=use_user_site,
pycompile=pycompile,
)
except Exception:
# if install did not succeed, rollback previous uninstall
if uninstalled_pathset and not requirement.install_succeeded:
uninstalled_pathset.rollback()
raise
else:
if uninstalled_pathset and requirement.install_succeeded:
uninstalled_pathset.commit()
installed.append(InstallationResult(req_name))
return installed
| InstallationResult |
python | pytorch__pytorch | torch/_dynamo/variables/ctx_manager.py | {
"start": 11197,
"end": 12911
} | class ____(ContextWrappingVariable):
"""represents torch.func.jvp increment/decrement nesting"""
# A guard is needed as the grad level is baked into the torch FX graph
# This is fine if jvp is only called from within the function
# being compiled. But the FX graph may be invalid in the case of a jvp
# call from eager that calls the compiled function, as the jvp levels
# may be different.
_guards_singleton = Guard(GlobalStateSource(), GuardBuilder.FUNCTORCH_STACK_MATCH) # type: ignore[arg-type]
@staticmethod
def create(
tx: "InstructionTranslator", **kwargs: Any
) -> "JvpIncrementNestingCtxManagerVariable":
var = JvpIncrementNestingCtxManagerVariable(
target_values=None,
initial_values=None,
**kwargs,
)
return var
def enter(self, tx: "InstructionTranslator") -> VariableTracker:
install_guard(self._guards_singleton)
jvp_level = torch._functorch.eager_transforms.enter_jvp_nesting()
self.set_cleanup_hook(
tx, lambda: torch._functorch.eager_transforms.exit_jvp_nesting()
)
self.proxy = tx.output.create_node(
"call_function",
torch._C._functorch._jvp_increment_nesting,
(),
{},
)
return variables.ConstantVariable.create(jvp_level)
def exit(
self, tx: "InstructionTranslator", *args: VariableTracker
) -> VariableTracker:
self.cleanup()
tx.output.create_node(
"call_function", torch._C._functorch._jvp_decrement_nesting, (), {}
)
return variables.ConstantVariable.create(None)
| JvpIncrementNestingCtxManagerVariable |
python | PrefectHQ__prefect | tests/client/test_prefect_client.py | {
"start": 62652,
"end": 64033
} | class ____:
@pytest.fixture
async def test_app(self):
app = FastAPI()
basic = HTTPBasic()
# Returns given credentials if an Authorization
# header is passed, otherwise raises 403
@app.get("/api/check_for_auth_header")
async def check_for_auth_header(credentials=Depends(basic)):
return {"username": credentials.username, "password": credentials.password}
return app
async def test_client_passes_auth_string_as_auth_header(self, test_app):
auth_string = "admin:admin"
async with PrefectClient(test_app, auth_string=auth_string) as client:
res = await client._client.get("/check_for_auth_header")
assert res.status_code == status.HTTP_200_OK
assert res.json() == {"username": "admin", "password": "admin"}
async def test_client_no_auth_header_without_auth_string(self, test_app):
async with PrefectClient(test_app) as client:
with pytest.raises(httpx.HTTPStatusError, match="401"):
await client._client.get("/check_for_auth_header")
async def test_get_client_includes_auth_string_from_context(self):
with temporary_settings(updates={PREFECT_API_AUTH_STRING: "admin:test"}):
client = get_client()
assert client._client.headers["Authorization"].startswith("Basic")
| TestClientAuthString |
python | jina-ai__jina | tests/integration/networking/__init__.py | {
"start": 84,
"end": 195
} | class ____(Executor):
@requests
def foo(self, docs, **kwargs):
docs[0].text = 'dummy'
| DummyExecutor |
python | getsentry__sentry | tests/sentry/middleware/test_access_log_middleware.py | {
"start": 920,
"end": 1070
} | class ____(Endpoint):
permission_classes = (SentryIsAuthenticated,)
def get(self, request):
return Response({"ok": True})
| DummyEndpoint |
python | charliermarsh__ruff | scripts/check_ecosystem.py | {
"start": 819,
"end": 6002
} | class ____(NamedTuple):
"""A GitHub repository at a specific ref."""
org: str
repo: str
ref: str | None
select: str = ""
ignore: str = ""
exclude: str = ""
# Generating fixes is slow and verbose
show_fixes: bool = False
@asynccontextmanager
async def clone(self: Self, checkout_dir: Path) -> AsyncIterator[Path]:
"""Shallow clone this repository to a temporary directory."""
if checkout_dir.exists():
logger.debug(f"Reusing {self.org}:{self.repo}")
yield await self._get_commit(checkout_dir)
return
logger.debug(f"Cloning {self.org}:{self.repo}")
git_clone_command = [
"git",
"clone",
"--config",
"advice.detachedHead=false",
"--quiet",
"--depth",
"1",
"--no-tags",
]
if self.ref:
git_clone_command.extend(["--branch", self.ref])
git_clone_command.extend(
[
f"https://github.com/{self.org}/{self.repo}",
checkout_dir,
],
)
git_clone_process = await create_subprocess_exec(
*git_clone_command,
env={"GIT_TERMINAL_PROMPT": "0"},
)
status_code = await git_clone_process.wait()
logger.debug(
f"Finished cloning {self.org}/{self.repo} with status {status_code}",
)
yield await self._get_commit(checkout_dir)
def url_for(self: Self, commit_sha: str, path: str, lnum: int | None = None) -> str:
"""
Return the GitHub URL for the given commit, path, and line number, if given.
"""
# Default to main branch
url = f"https://github.com/{self.org}/{self.repo}/blob/{commit_sha}/{path}"
if lnum:
url += f"#L{lnum}"
return url
async def _get_commit(self: Self, checkout_dir: Path) -> str:
"""Return the commit sha for the repository in the checkout directory."""
git_sha_process = await create_subprocess_exec(
*["git", "rev-parse", "HEAD"],
cwd=checkout_dir,
stdout=PIPE,
)
git_sha_stdout, _ = await git_sha_process.communicate()
assert await git_sha_process.wait() == 0, (
f"Failed to retrieve commit sha at {checkout_dir}"
)
return git_sha_stdout.decode().strip()
# Repositories to check
# We check most repositories with the default ruleset instead of all rules to avoid
# noisy reports when new rules are added; see https://github.com/astral-sh/ruff/pull/3590
REPOSITORIES: list[Repository] = [
Repository("DisnakeDev", "disnake", "master"),
Repository("PostHog", "HouseWatch", "main"),
Repository("RasaHQ", "rasa", "main"),
Repository("Snowflake-Labs", "snowcli", "main"),
Repository("aiven", "aiven-client", "main"),
Repository("alteryx", "featuretools", "main"),
Repository("apache", "airflow", "main", select="ALL"),
Repository("apache", "superset", "master", select="ALL"),
Repository("aws", "aws-sam-cli", "develop"),
Repository("binary-husky", "gpt_academic", "master"),
Repository("bloomberg", "pytest-memray", "main"),
Repository("bokeh", "bokeh", "branch-3.3", select="ALL"),
# Disabled due to use of explicit `select` with `E999`, which has been removed.
# See: https://github.com/astral-sh/ruff/pull/12129
# Repository("demisto", "content", "master"),
Repository("docker", "docker-py", "main"),
Repository("facebookresearch", "chameleon", "main"),
Repository("freedomofpress", "securedrop", "develop"),
Repository("fronzbot", "blinkpy", "dev"),
Repository("ibis-project", "ibis", "master"),
Repository("ing-bank", "probatus", "main"),
Repository("jrnl-org", "jrnl", "develop"),
Repository("langchain-ai", "langchain", "main"),
Repository("latchbio", "latch", "main"),
Repository("lnbits", "lnbits", "main"),
Repository("milvus-io", "pymilvus", "master"),
Repository("mlflow", "mlflow", "master"),
Repository("model-bakers", "model_bakery", "main"),
Repository("pandas-dev", "pandas", "main"),
Repository("prefecthq", "prefect", "main"),
Repository("pypa", "build", "main"),
Repository("pypa", "cibuildwheel", "main"),
Repository("pypa", "pip", "main"),
Repository("pypa", "setuptools", "main"),
Repository("python", "mypy", "master"),
Repository("python", "typeshed", "main", select="PYI"),
Repository("python-poetry", "poetry", "master"),
Repository("qdrant", "qdrant-client", "master"),
Repository("reflex-dev", "reflex", "main"),
Repository("rotki", "rotki", "develop"),
Repository("scikit-build", "scikit-build", "main"),
Repository("scikit-build", "scikit-build-core", "main"),
Repository("sphinx-doc", "sphinx", "master"),
Repository("spruceid", "siwe-py", "main"),
Repository("tiangolo", "fastapi", "master"),
Repository("yandex", "ch-backup", "main"),
Repository("zulip", "zulip", "main", select="ALL"),
]
SUMMARY_LINE_RE = re.compile(r"^(Found \d+ error.*)|(.*potentially fixable with.*)$")
| Repository |
python | django__django | tests/aggregation_regress/models.py | {
"start": 1466,
"end": 1674
} | class ____(models.Model):
EntryID = models.AutoField(primary_key=True, db_column="Entry ID")
Entry = models.CharField(unique=True, max_length=50)
Exclude = models.BooleanField(default=False)
| Entries |
python | pytest-dev__pytest | testing/_py/test_local.py | {
"start": 40690,
"end": 42696
} | class ____:
pytestmark = win32only
def test_owner_group_not_implemented(self, path1):
with pytest.raises(NotImplementedError):
_ = path1.stat().owner
with pytest.raises(NotImplementedError):
_ = path1.stat().group
def test_chmod_simple_int(self, path1):
mode = path1.stat().mode
# Ensure that we actually change the mode to something different.
path1.chmod((mode == 0 and 1) or 0)
try:
print(path1.stat().mode)
print(mode)
assert path1.stat().mode != mode
finally:
path1.chmod(mode)
assert path1.stat().mode == mode
def test_path_comparison_lowercase_mixed(self, path1):
t1 = path1.join("a_path")
t2 = path1.join("A_path")
assert t1 == t1
assert t1 == t2
def test_relto_with_mixed_case(self, path1):
t1 = path1.join("a_path", "fiLe")
t2 = path1.join("A_path")
assert t1.relto(t2) == "fiLe"
def test_allow_unix_style_paths(self, path1):
t1 = path1.join("a_path")
assert t1 == str(path1) + "\\a_path"
t1 = path1.join("a_path/")
assert t1 == str(path1) + "\\a_path"
t1 = path1.join("dir/a_path")
assert t1 == str(path1) + "\\dir\\a_path"
def test_sysfind_in_currentdir(self, path1):
cmd = local.sysfind("cmd")
root = cmd.new(dirname="", basename="") # c:\ in most installations
with root.as_cwd():
x = local.sysfind(cmd.relto(root))
assert x.check(file=1)
def test_fnmatch_file_abspath_posix_pattern_on_win32(self, tmpdir):
# path-matching patterns might contain a posix path separator '/'
# Test that we can match that pattern on windows.
import posixpath
b = tmpdir.join("a", "b")
assert b.fnmatch(posixpath.sep.join("ab"))
pattern = posixpath.sep.join([str(tmpdir), "*", "b"])
assert b.fnmatch(pattern)
| TestWINLocalPath |
python | lazyprogrammer__machine_learning_examples | rl2/mountaincar/pg_tf.py | {
"start": 792,
"end": 1361
} | class ____:
def __init__(self, M1, M2, f=tf.nn.tanh, use_bias=True, zeros=False):
if zeros:
W = np.zeros((M1, M2), dtype=np.float32)
else:
W = tf.random_normal(shape=(M1, M2)) * np.sqrt(2. / M1, dtype=np.float32)
self.W = tf.Variable(W)
self.use_bias = use_bias
if use_bias:
self.b = tf.Variable(np.zeros(M2).astype(np.float32))
self.f = f
def forward(self, X):
if self.use_bias:
a = tf.matmul(X, self.W) + self.b
else:
a = tf.matmul(X, self.W)
return self.f(a)
# approximates pi(a | s)
| HiddenLayer |
python | PyCQA__pylint | tests/functional/u/useless/useless_parent_delegation.py | {
"start": 14591,
"end": 14748
} | class ____(NoReturnType):
choices = ["a", "b"]
def draw(self) -> str: # [useless-parent-delegation]
return super().draw()
| ReturnTypeSpecified |
python | realpython__materials | dwitter-part-1/source_code_final/dwitter/models.py | {
"start": 159,
"end": 690
} | class ____(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
follows = models.ManyToManyField(
"self", related_name="followed_by", symmetrical=False, blank=True
)
def __str__(self):
return self.user.username
@receiver(post_save, sender=User)
def create_profile(sender, instance, created, **kwargs):
if created:
user_profile = Profile(user=instance)
user_profile.save()
user_profile.follows.add(instance.profile)
user_profile.save()
| Profile |
python | Lightning-AI__lightning | tests/tests_pytorch/strategies/test_deepspeed.py | {
"start": 1653,
"end": 1998
} | class ____(BoringModel):
def __init__(self):
super().__init__()
self.layer = None
def configure_model(self) -> None:
if self.layer is None:
self.layer = torch.nn.Linear(32, 2)
def on_load_checkpoint(self, checkpoint: dict[str, Any]) -> None:
self.configure_model()
| ModelParallelBoringModel |
python | apache__airflow | airflow-core/tests/unit/ti_deps/deps/test_task_not_running_dep.py | {
"start": 1046,
"end": 1389
} | class ____:
def test_not_running_state(self):
ti = Mock(state=State.QUEUED, end_date=datetime(2016, 1, 1))
assert TaskNotRunningDep().is_met(ti=ti)
def test_running_state(self):
ti = Mock(state=State.RUNNING, end_date=datetime(2016, 1, 1))
assert not TaskNotRunningDep().is_met(ti=ti)
| TestTaskNotRunningDep |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/spanner.py | {
"start": 22044,
"end": 25409
} | class ____(GoogleCloudBaseOperator):
"""
Deletes a Cloud Spanner database.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:SpannerDeleteDatabaseInstanceOperator`
:param instance_id: Cloud Spanner instance ID.
:param database_id: Cloud Spanner database ID.
:param project_id: Optional, the ID of the project that owns the Cloud Spanner
Database. If set to None or missing, the default project_id from the Google Cloud connection is used.
:param gcp_conn_id: The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
# [START gcp_spanner_database_delete_template_fields]
template_fields: Sequence[str] = (
"project_id",
"instance_id",
"database_id",
"gcp_conn_id",
"impersonation_chain",
)
# [END gcp_spanner_database_delete_template_fields]
def __init__(
self,
*,
instance_id: str,
database_id: str,
project_id: str = PROVIDE_PROJECT_ID,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
self.instance_id = instance_id
self.project_id = project_id
self.database_id = database_id
self.gcp_conn_id = gcp_conn_id
self._validate_inputs()
self.impersonation_chain = impersonation_chain
super().__init__(**kwargs)
def _validate_inputs(self) -> None:
if self.project_id == "":
raise AirflowException("The required parameter 'project_id' is empty")
if not self.instance_id:
raise AirflowException("The required parameter 'instance_id' is empty or None")
if not self.database_id:
raise AirflowException("The required parameter 'database_id' is empty or None")
def execute(self, context: Context) -> bool:
hook = SpannerHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
database = hook.get_database(
project_id=self.project_id, instance_id=self.instance_id, database_id=self.database_id
)
if not database:
self.log.info(
"The Cloud Spanner database was missing: "
"'%s' in project '%s' and instance '%s'. Assuming success.",
self.database_id,
self.project_id,
self.instance_id,
)
return True
return hook.delete_database(
project_id=self.project_id, instance_id=self.instance_id, database_id=self.database_id
)
| SpannerDeleteDatabaseInstanceOperator |
python | falconry__falcon | falcon/inspect.py | {
"start": 12035,
"end": 12478
} | class ____(_Traversable):
"""Describes a sink.
Args:
prefix (str): The prefix of the sink.
name (str): The name of the sink function or class.
source_info (str): The source path where this sink was defined.
"""
__visit_name__ = 'sink'
def __init__(self, prefix: str, name: str, source_info: str):
self.prefix = prefix
self.name = name
self.source_info = source_info
| SinkInfo |
python | python-poetry__poetry | src/poetry/utils/threading.py | {
"start": 328,
"end": 2420
} | class ____(functools.cached_property[T]):
def __init__(self, func: Callable[[C], T]) -> None:
super().__init__(func)
self._semaphore = threading.BoundedSemaphore()
self._locks: WeakKeyDictionary[object, threading.Lock] = WeakKeyDictionary()
@overload
def __get__(
self, instance: None, owner: type[Any] | None = ...
) -> AtomicCachedProperty[T]: ...
@overload
def __get__(self, instance: object, owner: type[Any] | None = ...) -> T: ...
def __get__(
self, instance: C | None, owner: type[Any] | None = None
) -> AtomicCachedProperty[T] | T:
# If there's no instance, return the descriptor itself
if instance is None:
return self
if instance not in self._locks:
with self._semaphore:
# we double-check the lock has not been created by another thread
if instance not in self._locks:
self._locks[instance] = threading.Lock()
# Use a thread-safe lock to ensure the property is computed only once
with self._locks[instance]:
return super().__get__(instance, owner)
def atomic_cached_property(func: Callable[[C], T]) -> AtomicCachedProperty[T]:
"""
A thread-safe implementation of functools.cached_property that ensures lazily-computed
properties are calculated only once, even in multithreaded environments.
This property decorator works similar to functools.cached_property but employs
thread locks and a bounded semaphore to handle concurrent access safely.
The computed value is cached on the instance itself and is reused for subsequent
accesses unless explicitly invalidated. The added thread-safety makes it ideal for
situations where multiple threads might access and compute the property simultaneously.
Note:
- The cache is stored in the instance dictionary just like `functools.cached_property`.
:param func: The function to be turned into a thread-safe cached property.
"""
return AtomicCachedProperty(func)
| AtomicCachedProperty |
python | mlflow__mlflow | mlflow/langchain/chat_agent_langgraph.py | {
"start": 11007,
"end": 13120
} | class ____(ToolNode):
"""
Helper class to make ToolNodes be compatible with
:py:class:`ChatAgentState <mlflow.langchain.chat_agent_langgraph.ChatAgentState>`.
Parse ``attachments`` and ``custom_outputs`` keys from the string output of a
LangGraph tool.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def invoke(self, input: Input, config: RunnableConfig | None = None, **kwargs: Any) -> Any:
"""
Wraps the standard ToolNode invoke method to:
- Parse ChatAgentState into LangChain messages
- Parse dictionary string outputs from both UC function and standard LangChain python tools
that include keys ``content``, ``attachments``, and ``custom_outputs``.
"""
messages = input["messages"]
for msg in messages:
for tool_call in msg.get("tool_calls", []):
tool_call["name"] = tool_call["function"]["name"]
tool_call["args"] = json.loads(tool_call["function"]["arguments"])
input["messages"] = convert_to_messages(messages)
result = super().invoke(input, config, **kwargs)
messages = []
custom_outputs = None
for m in result["messages"]:
try:
return_obj = json.loads(m.content)
if all(key in return_obj for key in ("format", "value", "truncated")):
# Dictionary output with custom_outputs and attachments from a UC function
try:
return_obj = json.loads(return_obj["value"])
except Exception:
pass
if "custom_outputs" in return_obj:
custom_outputs = return_obj["custom_outputs"]
if m.id is None:
m.id = str(uuid4())
messages.append(parse_message(m, attachments=return_obj.get("attachments")))
except Exception:
messages.append(parse_message(m))
return {"messages": messages, "custom_outputs": custom_outputs}
| ChatAgentToolNode |
python | pytorch__pytorch | test/inductor/test_cudagraph_trees.py | {
"start": 2740,
"end": 179933
} | class ____(InductorTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls._stack = contextlib.ExitStack()
cls._stack.enter_context(
config.patch(
{
"debug": True,
"cpp.min_chunk_size": 1,
"triton.autotune_pointwise": False, # too slow
"implicit_fallbacks": False,
}
)
)
@classmethod
def tearDownClass(cls):
cls._stack.close()
super().tearDownClass()
def setUp(self):
torch._dynamo.reset()
super().setUp()
def tearDown(self):
super().tearDown()
torch._dynamo.reset()
if HAS_CUDA_AND_TRITON:
def get_all_cudagraph_segments():
segments = torch.cuda.memory_snapshot()
return [segment for segment in segments if segment["segment_pool_id"] != (0, 0)]
def all_live_blocks():
blocks_addrs = []
for segment in get_all_cudagraph_segments():
addr = segment["address"]
for block in segment["blocks"]:
if block["state"] == "active_allocated":
blocks_addrs.append(addr)
addr += block["size"]
return blocks_addrs
def all_live_block_count():
return len(all_live_blocks())
class CudaGraphTreeTests(TestCase):
def setUp(self):
super().setUp()
self.graph_stack = contextlib.ExitStack()
self.graph_stack.enter_context(
config.patch(
{
"triton.cudagraphs": True,
"triton.cudagraph_trees": True,
"triton.fast_path_cudagraph_asserts": True, # too slow
"triton.slow_path_cudagraph_asserts": True,
}
)
)
self.graph_stack.enter_context(
dynamo_config.patch(automatic_dynamic_shapes=True)
)
self.device_idx = torch.rand([0], device="cuda").device.index
warnings.filterwarnings("ignore")
def tearDown(self):
super().tearDown()
torch._dynamo.reset()
gc.collect()
torch.cuda.empty_cache()
self.graph_stack.close()
self.assertIsNone(self.get_manager())
self.assertEqual(all_live_block_count(), 0)
self.assertEqual(len(get_all_cudagraph_segments()), 0)
warnings.resetwarnings()
def get_manager(self, device_index=None):
return torch._inductor.cudagraph_trees.get_container(
device_index if device_index else self.device_idx
).tree_manager
def get_roots(self):
return self.get_manager().get_roots()
def curr_node(self):
return self.get_manager().current_node
def get_root_children(self):
return [root.num_descendants() for root in self.get_roots()]
def cudagraphify_impl(
self, *args, is_inference=True, is_backward=False, **kwargs
):
return tree_cudagraphify_impl(
*args,
**kwargs,
device_index=self.device_idx,
is_inference=is_inference,
is_backward=is_backward,
)
@staticmethod
def run_twc(fn, *args, **kwargs):
fn(*args, **kwargs)
return fn(*args, **kwargs)
def num_checkpoints(self):
return self.get_manager().debug_checkpointing_counter
def test_run_simple(self):
def foo(x):
return x * x * x
foo_opt = torch.compile(foo)
ones = torch.ones([4, 4], device="cuda")
zeros = torch.zeros([5, 5], device="cuda")
self.run_twc(foo_opt, ones)
self.run_twc(foo_opt, zeros)
self.assertEqual(self.get_root_children(), [0, 0])
def check_rng(self):
@torch.compile(mode="reduce-overhead")
def foo():
return torch.rand([20])
torch.manual_seed(0)
out = foo()
out2 = foo()
out3 = foo()
torch.manual_seed(0)
self.assertEqual(out, foo())
self.assertEqual(out2, foo())
self.assertEqual(out3, foo())
@torch._inductor.config.patch("fallback_random", True)
def test_rng_trees(self):
self.check_rng()
@torch._inductor.config.patch("triton.cudagraph_trees", False)
@torch._inductor.config.patch("fallback_random", True)
def test_rng_non_trees(self):
self.check_rng()
def test_mutation_reinplaced(self):
import torch.nn as nn
class Model(nn.Module):
def __init__(self) -> None:
super().__init__()
def forward(self, input, other, out):
input = torch.logical_xor(input=input, other=other, out=out)
return input
x = torch.rand([1, 2, 1, 4, 9, 7], dtype=torch.float32).cuda()
y = torch.rand([1, 2, 1, 4, 9, 7], dtype=torch.float32).cuda()
z = torch.rand([1, 2, 1, 4, 9, 7], dtype=torch.float16).cuda()
model = Model().cuda()
eag = model(x, y, z)
with capture_stderr() as captured_output:
opt = torch.compile(model.forward, mode="reduce-overhead")(x, y, z)
FileCheck().check(
"skipping cudagraphs due to mutated inputs (1 instances). Found from"
).check("torch.logical_xor").run(captured_output[0])
self.assertEqual(counters["inductor"]["cudagraph_skips"], 1)
@requires_multigpu()
@parametrize("backend", ("inductor", "cudagraphs"))
def test_multiple_devices_msg(self, backend):
def foo(x, y):
return (x + 1, y + 2)
foo = get_compile_fn(backend)(foo)
with capture_stderr() as captured_output:
foo(torch.ones([10], device="cuda"), torch.ones([20]))
if torch._inductor.config.graph_partition:
# graph partition splits on cpu ops
self.assertEqual(counters["inductor"]["cudagraph_skips"], 0)
else:
FileCheck().check(
"skipping cudagraphs due to cpu device (arg1_1). Found from"
).check("y + 2").run(captured_output[0])
self.assertEqual(counters["inductor"]["cudagraph_skips"], 1)
with capture_stderr() as captured_output:
foo(
torch.ones([10], device="cuda:0"), torch.ones([10], device="cuda:1")
)
FileCheck().check("skipping cudagraphs due to multiple devices").run(
captured_output[0]
)
self.assertEqual(
counters["inductor"]["cudagraph_skips"],
1 if torch._inductor.config.graph_partition else 2,
)
@torch._inductor.config.patch("triton.cudagraph_skip_dynamic_graphs", True)
def test_skip_symbolic(self):
@torch.compile(dynamic=True)
def foo(x, y):
return x + y
with capture_stderr() as captured_output:
foo(torch.rand([10], device="cuda"), torch.rand([10], device="cuda"))
FileCheck().check(
"skipping cudagraphs due to graph with symbolic shapes inputs"
).check("x + y").run(captured_output[0])
self.assertEqual(counters["inductor"]["cudagraph_skips"], 1)
@parametrize("backend", ("inductor", "cudagraphs"))
@torch._dynamo.config.patch("cudagraph_backend_keep_input_mutation", True)
@torch._dynamo.config.patch("cudagraph_backend_support_input_mutation", True)
@torch._inductor.config.patch("triton.cudagraph_support_input_mutation", True)
def test_mutation_on_inp(self, backend):
def foo(x):
x.add_(2)
return x
foo = get_compile_fn(backend)(foo)
def inp():
return torch.ones([10], device="cuda")
with capture_stderr() as captured_output:
foo(inp())
FileCheck().check(
"skipping cudagraphs due to mutated inputs (1 instances). Found from"
).check(".add_(2)").run(captured_output[0])
self.assertEqual(counters["inductor"]["cudagraph_skips"], 1)
# mutation on inp doesn't hit cudagraphs
self.assertEqual(len(self.get_manager().roots), 0)
# mutation on parameters/buffers hits cudagraphs
class Mod(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.buf = torch.ones([10], device="cuda")
def forward(self, x):
self.buf.add_(x)
return self.buf + x
def foo(mod, x):
return mod(x)
foo = get_compile_fn(backend)(foo)
mod = Mod()
mod2 = Mod()
for _ in range(3):
self.assertEqual(foo(mod, inp()), mod2(inp()))
self.assertEqual(mod.buf, mod2.buf)
self.assertIsNotNone(self.get_manager())
@parametrize("backend", ("inductor", "cudagraphs"))
@torch._dynamo.config.patch("cudagraph_backend_keep_input_mutation", True)
@torch._dynamo.config.patch("cudagraph_backend_support_input_mutation", False)
@torch._inductor.config.patch("triton.cudagraph_support_input_mutation", False)
def test_mutation_cudagraph_managed_tensors_config(self, backend):
def foo(x):
return x + 1
def mut(x):
x.add_(2)
return x
def non_mut(x):
return x.add(2)
mut = get_compile_fn(backend)(mut)
foo = get_compile_fn(backend)(foo)
with capture_stderr() as captured_output:
for _ in range(3):
torch.compiler.cudagraph_mark_step_begin()
inp = torch.rand([4], device="cuda")
tmp = foo(inp)
mut_out = mut(tmp)
self.assertEqual(mut_out, non_mut(foo(inp)))
FileCheck().check_count(
"skipping cudagraphs due to mutated inputs (1 instances). Found from",
1,
exactly=True,
).run(captured_output[0])
@parametrize("backend", ("inductor", "cudagraphs"))
@torch._dynamo.config.patch("cudagraph_backend_keep_input_mutation", True)
@torch._dynamo.config.patch("cudagraph_backend_support_input_mutation", True)
@torch._inductor.config.patch("triton.cudagraph_support_input_mutation", True)
def test_mutation_cudagraph_managed_tensors(self, backend):
def foo(x):
return x + 1
def mut(x):
x.add_(2)
return x
def non_mut(x):
return x.add(2)
mut = get_compile_fn(backend)(mut)
foo = get_compile_fn(backend)(foo)
with capture_stderr() as captured_output:
for _ in range(3):
torch.compiler.cudagraph_mark_step_begin()
inp = torch.rand([4], device="cuda")
tmp = foo(inp)
mut_out = mut(tmp)
self.assertEqual(mut_out, non_mut(foo(inp)))
FileCheck().check_count(
"skipping cudagraphs due to mutated inputs (1 instances). Found from",
0,
exactly=True,
).run(captured_output[0])
self.assertTrue("cudagraph_skips" not in counters["inductor"])
torch.compiler.cudagraph_mark_step_begin()
inp = torch.rand([4], device="cuda")
tmp = foo(inp)
mut_inp = tmp.clone()
# in this case, what previously a mutated cudagraph managed tensor is no longer,
# now its an input from eager we should fallback to inductor without cudagraphs
with capture_stderr() as captured_output:
mut(mut_inp)
FileCheck().check(
"skipping cudagraphs due to mutated inputs (1 instances). Found from"
).check("x.add_(2)").run(captured_output[0])
self.assertEqual(mut_inp, non_mut(foo(inp)))
self.assertEqual(counters["inductor"]["cudagraph_skips"], 1)
@parametrize("backend", ("inductor", "cudagraphs"))
@torch._dynamo.config.patch("cudagraph_backend_keep_input_mutation", True)
@torch._dynamo.config.patch("cudagraph_backend_support_input_mutation", True)
@torch._inductor.config.patch("triton.cudagraph_support_input_mutation", True)
def test_mutation_cudagraph_managed_tensor_warn(self, backend):
def foo(x):
return x.add_(1)
def fee(y, z):
return z.add(3)
def inp():
return torch.rand([4], device="cuda")
foo = get_compile_fn(backend)(foo)
fee = get_compile_fn(backend)(fee)
with capture_stderr() as captured_output:
for _ in range(3):
torch.compiler.cudagraph_mark_step_begin()
fee(inp(), foo(inp()))
FileCheck().check_count(
"skipping cudagraphs due to mutated inputs (1 instances). Found from",
1,
exactly=True,
).run(captured_output[0])
self.assertEqual(counters["inductor"]["cudagraph_skips"], 1)
@parametrize("backend", ("inductor", "cudagraphs"))
@torch._dynamo.config.patch("cudagraph_backend_keep_input_mutation", True)
@torch._dynamo.config.patch("cudagraph_backend_support_input_mutation", True)
@torch._inductor.config.patch("triton.cudagraph_support_input_mutation", True)
def test_mutation_cudagraph_managed_tensor_warn_only_once(self, backend):
def foo(x):
return x + 1
def mut(x):
x.add_(2)
return x
def inp():
return torch.rand([4], device="cuda")
mut = get_compile_fn(backend)(mut)
foo = get_compile_fn(backend)(foo)
with capture_stderr() as captured_output:
# Should warn for current_node=None
mut(inp())
for _ in range(3):
torch.compiler.cudagraph_mark_step_begin()
tmp = foo(inp())
mut(tmp) # should not warn
mut_inp = tmp.clone()
mut(mut_inp) # should not warn since mut has warned
FileCheck().check_count(
"skipping cudagraphs due to mutated inputs (1 instances). Found from",
1,
exactly=True,
).run(captured_output[0])
self.assertEqual(counters["inductor"]["cudagraph_skips"], 1)
def test_index_put(self):
def fn(x, y, z):
x = torch.zeros_like(x)
return x.index_put_([y], z, True)
fn_c = torch.compile(mode="reduce-overhead")(fn)
for i in range(3):
def args():
x = torch.zeros((512, 512), dtype=torch.bool, device="cuda")
y = torch.arange(512, dtype=torch.int64, device="cuda")
z = torch.ones((512, 512), dtype=torch.bool, device="cuda")
return x, y, z
if i == 0:
out, code = run_and_get_code(fn_c, *args())
FileCheck().check("aten.index_put_").check_same("True").run(code[0])
else:
out = fn_c(*args())
self.assertEqual(fn(*args()), out)
def test_function_compiled_multiple_times(self):
def foo(x):
y = foo2(x)
y2 = foo2(y)
return y + y2
def foo2(x):
torch._dynamo.graph_break()
return x * x * x
foo_opt = torch.compile(foo)
ones = torch.ones([4, 4], device="cuda")
foo(ones)
foo_opt(ones)
foo_opt(ones)
self.assertEqual(foo_opt(ones), foo(ones))
# paths
children = self.get_root_children()
# one root with two children
self.assertEqual(children, [2])
def test_end_recording_early(self):
def foo(x):
y = x * x * x
torch._dynamo.graph_break()
z = x + y
return z
@torch.compile
def foo2(x):
return x + 4
foo_opt = torch.compile(foo)
for _ in range(3):
out = foo_opt(torch.ones([4, 4], device="cuda"))
del out
# when I tried inducing separate recordings via graph break,
# the frame kept interfering by keeping outputs alive
# this isn't great by simulates the logic.
from torch._dynamo.mutation_guard import GenerationTracker
GenerationTracker.generation -= 1
out = foo2(torch.ones([4, 4], device="cuda"))
del out
foo_opt(torch.ones([4, 4], device="cuda"))
# Two separate traces - one has a child, one doesn't
self.assertEqual(self.get_root_children(), [1, 0])
def test_execution_into_recording(self):
def foo(x):
y = x + x
if y.sum() > 0:
return y + 10
else:
return y - 10
foo_opt = torch.compile(foo)
inp = torch.zeros([4, 4], dtype=torch.float, device="cuda")
self.assertEqual(foo_opt(inp), foo(inp))
self.assertEqual(foo_opt(inp), foo(inp))
inp.add_(1)
out_eager = foo(inp)
out_warmup = foo_opt(inp)
self.assertEqual(out_warmup, out_eager)
# warmup should be have storage deallocator hooked on
self.assertEqual(all_live_block_count(), 1)
out_live = foo_opt(inp)
self.assertEqual(out_live, out_eager)
# should be in recording mode, with storage deallocator hooked on
self.assertEqual(all_live_block_count(), 1)
# warmup should have been freed
del out_warmup
# should be in recording mode, with storage deallocator hooked on
self.assertEqual(all_live_block_count(), 1)
del out_live
self.assertEqual(all_live_block_count(), 0)
out = foo_opt(inp)
self.assertEqual(foo(inp), out)
# should be in execution mode
self.assertEqual(all_live_block_count(), 0)
def test_forward_with_skipped_cudagraphed_backward(self):
@torch.compile(mode="reduce-overhead")
def foo(x):
return x * x * x
for _ in range(3):
inp = torch.rand([20, 20], device="cuda", requires_grad=True)
out = foo(inp)
with config.patch(always_complex_memory_overlap_TESTING_ONLY=True):
back_inp = torch.empty_strided([20, 20], [0, 1], device="cuda")
out.backward(back_inp)
# we should not have cudagraph'd the backwards
new_id = self.get_manager().new_graph_id().id
self.assertEqual(new_id, 1)
self.assertFalse(self.get_manager().running_forwards_with_pending_backwards)
@torch._functorch.config.patch("enable_autograd_cache", True)
@torch._inductor.config.patch("fx_graph_cache", True)
@torch._inductor.config.patch("fx_graph_remote_cache", False)
# Currently fx graph cache is turned off for specialize_float=False
@torch._dynamo.config.patch("specialize_float", True)
def test_cache_hit_forward_miss_backward(self):
# Test that we don't cache cudagraphs, skipping cudagraphs on backward on a cache miss
@torch.compile(mode="reduce-overhead")
def foo(x):
return x * x * x
# Run forwards, fx graph should cache miss
for _ in range(3):
torch._dynamo.reset()
counters.clear()
FxGraphCache.clear()
AOTAutogradCache.clear()
with config.patch(always_complex_memory_overlap_TESTING_ONLY=True):
inp = torch.rand([20, 20], device="cuda", requires_grad=True)
out = foo(inp)
self.assertEqual(counters["inductor"]["fxgraph_cache_miss"], 1)
# Reset dynamo and related caches except for FXGraphCache
torch._dynamo.reset()
# Forwards should be a cache hit now, we still skip cudagraphs
inp = torch.rand([20, 20], device="cuda", requires_grad=True)
out = foo(inp)
self.assertEqual(counters["inductor"]["fxgraph_cache_miss"], 1)
self.assertEqual(counters["inductor"]["fxgraph_cache_hit"], 1)
# Run backward without complex memory overlap being set
# Run the backward without complex memory overlap reason
# cache should miss, but cudagraphs should not run
# because forward skipped it
back_inp = torch.empty_strided([20, 20], [0, 1], device="cuda")
out.backward(back_inp)
self.assertEqual(counters["inductor"]["fxgraph_cache_miss"], 2)
# Run it one more time, this time AOTAutogradCache will hit
self.assertEqual(counters["aot_autograd"]["autograd_cache_miss"], 2)
self.assertEqual(counters["aot_autograd"]["autograd_cache_saved"], 1)
torch._dynamo.reset()
inp = torch.rand([20, 20], device="cuda", requires_grad=True)
out = foo(inp)
back_inp = torch.empty_strided([20, 20], [0, 1], device="cuda")
out.backward(back_inp)
self.assertEqual(counters["aot_autograd"]["autograd_cache_hit"], 1)
# we should not have cudagraph'd anything
assert self.get_manager() is None
@torch._functorch.config.patch("enable_autograd_cache", True)
@torch._inductor.config.patch("fx_graph_cache", True)
@torch._inductor.config.patch("fx_graph_remote_cache", False)
# Currently fx graph cache is turned off for specialize_float=False
@torch._dynamo.config.patch("specialize_float", True)
@requires_multigpu()
def test_cached_boxed_forward_device_index(self):
@torch.compile(mode="reduce-overhead")
def foo(x):
return x * x * x
# Run with device index 1 so that we can see
# on a cache hit we stay on device index 1
with torch.cuda._DeviceGuard(1):
torch.cuda.set_device(1)
inp = torch.rand([20, 20], device="cuda", requires_grad=True)
out = foo(inp)
self.assertEqual(counters["inductor"]["fxgraph_cache_miss"], 1)
# Compile the backward and save to cache
back_inp = torch.empty_strided([20, 20], [0, 1], device="cuda")
out.backward(back_inp)
self.assertEqual(counters["inductor"]["fxgraph_cache_miss"], 2)
self.assertEqual(counters["aot_autograd"]["autograd_cache_miss"], 1)
self.assertEqual(counters["aot_autograd"]["autograd_cache_saved"], 1)
# Reset dynamo and rerun a few times
for i in range(3):
torch._dynamo.reset()
inp = torch.rand([20, 20], device="cuda", requires_grad=True)
out = foo(inp)
# Should cache hit each time; boxed_forward_device_index should still be set properly to 1
self.assertEqual(
counters["aot_autograd"]["autograd_cache_hit"], i + 1
)
back_inp = torch.empty_strided([20, 20], [0, 1], device="cuda")
out.backward(back_inp)
# After everything, we should have cudagraphs on device 1
self.assertTrue(self.get_manager(device_index=0) is None)
self.assertFalse(self.get_manager(device_index=1) is None)
@torch._functorch.config.patch("enable_autograd_cache", True)
@torch._inductor.config.patch("fx_graph_cache", True)
@torch._inductor.config.patch("fx_graph_remote_cache", False)
# Currently fx graph cache is turned off for specialize_float=False
@torch._dynamo.config.patch("specialize_float", True)
def test_backward_gets_cached_cudagraphs(self):
# We pass cpu tensors to foo and save that into the cache
# On a subsequent run in a new process, cudagraphs should be
# disabled properly on both forward and backwards runs.
@torch.compile(mode="reduce-overhead")
def foo(x):
return x * x * x
torch._dynamo.reset()
counters.clear()
FxGraphCache.clear()
AOTAutogradCache.clear()
# Use cpu device to disable cudagraphs during compilation
inp = torch.rand([20, 20], device="cpu", requires_grad=True)
out = foo(inp)
self.assertEqual(counters["inductor"]["fxgraph_cache_miss"], 1)
back_inp = torch.empty_strided([20, 20], [0, 1], device="cpu")
out.backward(back_inp)
self.assertEqual(counters["inductor"]["fxgraph_cache_miss"], 2)
# Run again on new process
torch._dynamo.reset()
# Forward and backward should also disable cudagraphs without compilation
inp = torch.rand([20, 20], device="cpu", requires_grad=True)
out = foo(inp)
# AOTAutogradCache will load the forward and the backward from cache immediately, so fx_graph_cache_hit will equal 2
self.assertEqual(counters["inductor"]["fxgraph_cache_hit"], 2)
self.assertEqual(counters["aot_autograd"]["autograd_cache_hit"], 1)
torch._dynamo.reset()
back_inp = torch.empty_strided([20, 20], [0, 1], device="cpu")
out.backward(back_inp)
# we should not have cudagraph'd anything
assert self.get_manager() is None
@torch._inductor.config.patch("triton.skip_cudagraph_warmup", True)
@torch._functorch.config.patch("enable_autograd_cache", True)
@torch._inductor.config.patch("fx_graph_cache", True)
@torch._inductor.config.patch("fx_graph_remote_cache", False)
# Currently fx graph cache is turned off for specialize_float=False
@torch._dynamo.config.patch("specialize_float", True)
def test_cached_forward_backward(self):
counters.clear()
AOTAutogradCache.clear()
FxGraphCache.clear()
@torch.compile
def foo(x):
torch.manual_seed(0)
y = x * 2
return torch.sin(y) * torch.nn.functional.dropout(x, p=0.4)
inp = torch.rand([4, 4], requires_grad=True, device="cuda")
inp2 = inp.detach().clone().requires_grad_(True)
out = foo(inp)
out.sum().backward()
self.assertEqual(self.get_root_children(), [1])
# the three saved tensors should die in the backward
# we kept alive the output
self.assertEqual(self.curr_node().expected_dead_indices_before_graph, [])
if torch._inductor.config.graph_partition:
self.assertEqual(
self.curr_node().expected_dead_indices_after_graph,
[(0, 0), (0, 2)],
)
else:
self.assertEqual(
self.curr_node().expected_dead_indices_after_graph,
[(0, 1), (0, 2)],
)
self.assertFalse(self.get_manager().new_graph_id().id == 0)
self.assertEqual(counters["aot_autograd"]["autograd_cache_miss"], 1)
# Reset dynamo and rerun. We should see a cache hit now
torch._dynamo.reset()
out2 = foo(inp2)
out2.sum().backward()
self.assertEqual(out, out2)
self.assertEqual(inp.grad, inp2.grad)
self.assertEqual(self.get_root_children(), [1])
self.assertFalse(self.get_manager().new_graph_id().id == 0)
self.assertEqual(counters["aot_autograd"]["autograd_cache_hit"], 1)
@parametrize("backend", ("inductor", "cudagraphs"))
def test_forward_backward_not_called(self, backend):
def foo(x, y):
x_out = x * x * x
torch._dynamo.graph_break()
y_out = y * y * y
return x_out, y_out
foo = get_compile_fn(backend)(foo)
for _ in range(3):
inps = [
torch.rand([20, 20], requires_grad=True, device="cuda")
for _ in range(2)
]
x_out, y_out = foo(inps[0], inps[1])
x_out.sum().backward()
self.assertFalse(self.get_manager().running_forwards_with_pending_backwards)
# we should not have cudagraph'd the y backward
new_id = self.get_manager().new_graph_id().id
self.assertEqual(new_id, 3)
def _test_unaligned_static_input_impl(self, expected_clones):
def fn(x, y):
return (x + y,)
def get_aligned_inputs():
return [torch.rand([5, 5], device="cuda") for _ in range(2)]
mod = make_fx(fn)(*get_aligned_inputs())
mode = torch._subclasses.FakeTensorMode()
with mode:
inps = [torch.rand([6, 5], device="cuda")[1:] for _ in range(2)]
compiled_f = compile_fx_inner(
mod, inps, static_input_idxs=[0], cudagraphs=True
)
def get_unaligned_inputs():
return [torch.rand([6, 5], device="cuda")[1:] for _ in range(2)]
class CloneCounterMode(TorchDispatchMode):
def __init__(self) -> None:
self.count = 0
def __torch_dispatch__(self, func, types, args=(), kwargs=None):
kwargs = {} if kwargs is None else kwargs
self.count += func is torch.ops.aten.clone.default
return func(*args, **kwargs)
for _ in range(3):
with CloneCounterMode() as m:
compiled_f(get_unaligned_inputs())
self.assertEqual(m.count, expected_clones)
compiled_f(get_aligned_inputs())
self.assertEqual(m.count, expected_clones)
def test_unaligned_static_input_trees(self):
self._test_unaligned_static_input_impl(expected_clones=0)
@torch._inductor.config.patch("triton.cudagraph_trees", False)
def test_unaligned_static_input_non_trees(self):
self._test_unaligned_static_input_impl(expected_clones=0)
@torch._inductor.config.patch("triton.cudagraphs", False)
def test_unaligned_static_input_no_cudagraphs(self):
self._test_unaligned_static_input_impl(expected_clones=0)
@torch._inductor.config.patch("graph_partition", True)
@torch._inductor.config.patch("implicit_fallbacks", True)
def test_graph_partition_custom_rule(self):
def get_num_partitions(code):
code = "".join(code)
found = re.search(r"partitions=\[(.*)\]", code)
assert found is not None
partitions = found.group(1)
num_partitions = len([p for p in partitions.split(",") if p])
return num_partitions
@torch.library.custom_op("mylib::bar", mutates_args=())
def bar(x: torch.Tensor, flag: int) -> torch.Tensor:
return x.clone()
@bar.register_fake
def _(x, flag):
return x.clone()
def f(x, flag):
x = x + 1
x = bar(x, flag)
x = x + 1
return x
x = torch.randn(2, device="cuda")
f_compiled = torch.compile(f, mode="reduce-overhead", fullgraph=True)
_, code = run_and_get_code(f_compiled, x, True)
num_partitions = get_num_partitions(code)
self.assertEqual(num_partitions, 1)
@torch.library.custom_op("mylib::baz", mutates_args=())
def baz(x: torch.Tensor) -> torch.Tensor:
return x.clone()
@baz.register_fake
def _(x):
return x.clone()
# custom_should_partition_ops takes effect which lead to 2 partitions
torch._inductor.config.custom_should_partition_ops = ["mylib::baz"]
def f(x):
x = x + 1
x = baz(x)
x = x + 1
return x
f_compiled = torch.compile(f, mode="reduce-overhead", fullgraph=True)
_, code = run_and_get_code(f_compiled, x)
num_partitions = get_num_partitions(code)
self.assertEqual(num_partitions, 2)
# update the config should NOT force recompile
torch._inductor.config.custom_should_partition_ops = []
with torch.compiler.set_stance("fail_on_recompile"):
f_compiled(x)
# run_and_get_code forces recompile. Now we should cache miss, recompile, and
# only have 1 partition.
_, code = run_and_get_code(f_compiled, x)
num_partitions = get_num_partitions(code)
self.assertEqual(num_partitions, 1)
# test that op_overload name takes effect which lead to 2 partitions
torch._inductor.config.custom_should_partition_ops = ["mylib::baz.default"]
f_compiled = torch.compile(f, mode="reduce-overhead", fullgraph=True)
_, code = run_and_get_code(f_compiled, x)
num_partitions = get_num_partitions(code)
self.assertEqual(num_partitions, 2)
@torch._inductor.config.patch("graph_partition", True)
@torch._inductor.config.patch("implicit_fallbacks", True)
def test_graph_partition_with_memory_plan_reuse(self):
BATCH_SIZE = 16
MLP_SIZE = 128
HIDDEN_SIZE = 128
RANDOM_SEED = 0
@torch.library.custom_op(
"silly::attention",
mutates_args=["out"],
tags=(torch._C.Tag.cudagraph_unsafe,),
)
def attention(
q: torch.Tensor, k: torch.Tensor, v: torch.Tensor, out: torch.Tensor
) -> None:
out.copy_(q + k + v)
@attention.register_fake
def _(q, k, v, out):
return None
class ParentModel(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
def forward(self, x: torch.Tensor) -> torch.Tensor:
return x
class Attention(torch.nn.Module):
def __init__(self, mlp_size: int, hidden_size: int) -> None:
super().__init__()
self.pre_attn = torch.nn.Linear(mlp_size, hidden_size, bias=False)
self.post_attn = torch.nn.Linear(hidden_size, mlp_size, bias=False)
self.rms_norm_weight = torch.nn.Parameter(torch.ones(hidden_size))
def rms_norm_ref(self, x: torch.Tensor) -> torch.Tensor:
x_f32 = x.float()
return (
x_f32
* torch.rsqrt(
torch.mean(x_f32.square(), dim=-1, keepdim=True) + 1e-6
)
* self.rms_norm_weight
).to(x.dtype)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.pre_attn(x)
x = self.rms_norm_ref(x)
attn_output = torch.empty_like(x)
torch.ops.silly.attention(x, x, x, attn_output)
x = attn_output
x = self.rms_norm_ref(x)
x = self.post_attn(x)
return x
class CompiledAttention(torch.nn.Module):
def __init__(
self,
*,
mlp_size: int,
hidden_size: int,
) -> None:
super().__init__()
self.attn = Attention(mlp_size, hidden_size)
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.attn(x)
class CompiledAttentionTwo(CompiledAttention):
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.attn(x) + x
class SimpleModelWithTwoGraphs(ParentModel):
def __init__(
self,
*,
mlp_size: int,
hidden_size: int,
) -> None:
super().__init__()
self.attn_one = CompiledAttention(
mlp_size=mlp_size,
hidden_size=hidden_size,
)
self.attn_two = CompiledAttentionTwo(
mlp_size=mlp_size,
hidden_size=hidden_size,
)
self.hidden_states = torch.zeros((BATCH_SIZE, MLP_SIZE)).cuda()
def forward(self, x: torch.Tensor) -> torch.Tensor:
bsz = x.shape[0]
# CUDAGraph expects same tensor addresses for each run
self.hidden_states[:bsz].copy_(x)
x = self.attn_one(self.hidden_states[:bsz])
self.hidden_states[:bsz].copy_(x)
x = self.attn_two(self.hidden_states[:bsz])
return x
eager_model = (
SimpleModelWithTwoGraphs(
mlp_size=MLP_SIZE,
hidden_size=HIDDEN_SIZE,
)
.eval()
.cuda()
)
compiled_model = torch.compile(eager_model, mode="reduce-overhead")
inputs = torch.randn(BATCH_SIZE, MLP_SIZE).cuda()
for _ in range(3):
eager_out = eager_model(inputs)
compiled_out = compiled_model(inputs)
self.assertEqual(eager_out, compiled_out)
@torch._inductor.config.patch("graph_partition", True)
@torch._inductor.config.patch("triton.cudagraph_trees", False)
def test_graph_partition_gc(self):
def _test_dummy():
def foo(x):
return x + 1
foo = torch.compile(foo)
for _ in range(3):
foo(torch.randn(2, 3, device="cuda"))
_test_dummy()
gc.collect()
self.assertIsNone(self.get_manager())
def test_sparsity(self):
def foo(view_6, buf31):
return aten._sparse_coo_tensor_with_dims_and_tensors(
1,
1,
[1000000, 64],
view_6,
buf31,
dtype=torch.float32,
layout=torch.sparse_coo,
device="cuda",
pin_memory=None,
)
foo_opt = torch.compile(foo)
view_6 = torch.zeros([1, 102397], dtype=torch.int64, device="cuda")
buf31 = torch.rand([102397, 64], device="cuda")
for _ in range(3):
self.assertEqual(foo_opt(view_6, buf31), foo(view_6, buf31))
def test_accumulate_multiple_recordings(self):
def foo(x):
y = x + x + x
torch._dynamo.graph_break()
if y.sum() <= 0:
return y
else:
return y * 10
foo_opt = torch.compile(foo)
# two separate compilations & recordings
out1 = self.run_twc(foo_opt, torch.zeros([5], device="cuda"))
# out1 gets manually freed
out2 = self.run_twc(foo_opt, torch.zeros([6], device="cuda"))
self.assertEqual(all_live_block_count(), 1)
out3 = self.run_twc(foo_opt, torch.ones([5], device="cuda"))
self.assertEqual(out3, foo(torch.ones([5], device="cuda")))
self.assertEqual(all_live_block_count(), 1)
del out1, out2
self.assertEqual(all_live_block_count(), 1)
del out3
gc.collect()
self.assertEqual(all_live_block_count(), 0)
@torch._inductor.config.patch("freezing", True)
def test_constant_output(self):
class Mod(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.param = torch.nn.Parameter(
torch.tensor([float(i) for i in range(10)], device="cuda")
)
def forward(self, inp):
return self.param, self.param[0:2], inp + 2
inp = torch.tensor([2], device="cuda")
m = Mod()
with torch.no_grad():
out_eager = m(inp)
m_comp = torch.compile(m)
for _ in range(3):
self.assertEqual(out_eager, m_comp(inp))
def test_live_outputs_multiple_graphs(self):
def foo(x):
x = x + x + x
y = x + 1
torch._dynamo.graph_break()
z = x * x
if z.sum() > 0:
return y + 1
else:
return y
foo_opt = torch.compile(foo)
self.run_twc(foo_opt, torch.zeros([5], device="cuda"))
self.assertEqual(self.num_checkpoints(), 0)
out = self.run_twc(foo_opt, torch.ones([5], device="cuda"))
self.assertEqual(all_live_block_count(), 1)
del out
self.assertEqual(all_live_block_count(), 0)
# we need to checkpoint from function to warmup y + 1,
# and then again to record it
self.assertEqual(self.num_checkpoints(), 2)
def test_expanded_inputs(self):
x = torch.rand(1, 512, device="cuda").expand(4, 512)
def foo(x):
return x + 4 + torch.ones([4, 512], device="cuda")
foo_opt = torch.compile()(foo)
for _ in range(3):
self.assertEqual(foo_opt(x), foo(x))
self.assertFalse(self.get_manager().new_graph_id().id == 0)
@torch._inductor.config.patch("triton.skip_cudagraph_warmup", True)
def test_tensor_dies_between_checkpoint(self):
def foo(args):
x = args[0]
args.clear()
return x + 1, x + 2
inp = torch.rand([4], device="cuda")
inp_list = [inp]
foo_cg = self.cudagraphify_impl(foo, inp_list, ())
foo_cg(inp_list)
foo_cg([inp])
out1, out2 = foo_cg([inp])
inp = [out1]
del out1, out2
def foo2(args):
x = args[0]
args.clear()
return [x * x * x]
self.assertEqual(self.num_checkpoints(), 0)
foo2_cg = self.cudagraphify_impl(foo2, inp, ())
x = foo2_cg(inp)[0]
self.assertEqual(self.num_checkpoints(), 1)
# out2 dies between the previous recording and the new one,
# need to be manually deallocated after the checkpoint
self.assertEqual(all_live_block_count(), 1)
del x
self.assertEqual(all_live_block_count(), 0)
def test_aliased_storage_single_weakref(self):
@torch.compile(mode="reduce-overhead")
def foo(x):
x = x * 20
x_alias = x[0]
y = x * 10
y_alias = y[0]
torch._dynamo.graph_break()
ind = torch.tensor(4, device="cuda")
x_alias2 = x[ind:]
y_alias2 = y[ind:]
return x, x_alias, x_alias2, y_alias, y_alias2
for _ in range(4):
outs = foo(torch.rand([20, 20], device="cuda"))
ptr_to_ref = {
out.untyped_storage().data_ptr(): out.untyped_storage()._cdata
for out in outs
}
self.assertEqual(len(ptr_to_ref), 2)
for out in outs:
self.assertEqual(
ptr_to_ref[out.untyped_storage().data_ptr()],
out.untyped_storage()._cdata,
)
del outs
del out
node = self.get_manager().current_node
self.assertEqual(len(list(node.path_live_weakrefs())), 0)
self.assertFalse(self.get_manager().new_graph_id().id == 0)
def test_aliasing_static_ref(self):
class Mod(torch.nn.Linear):
def forward(self, x):
return self.weight.T @ x, self.weight.T, self.weight[0:4]
m = Mod(10, 10).cuda()
@torch.compile(mode="reduce-overhead")
def foo(mod, x):
return mod(x)
@torch.compile(mode="reduce-overhead")
def foo2(x):
return x[2:]
param_c = cdata(m.weight)
for _ in range(3):
x = torch.rand([10, 10], device="cuda", requires_grad=True)
torch.compiler.cudagraph_mark_step_begin()
out1, alias_1, alias_2 = foo(m, x)
self.assertEqual(len({param_c, cdata(alias_1), cdata(alias_2)}), 1)
out2 = foo2(out1)
out2.sum().backward()
self.assertEqual(cdata(out1), cdata(out2))
m.weight.grad = None
m.bias.grad = None
node = self.curr_node()
first_node = next(node._path_from_root)
if torch._inductor.config.graph_partition:
# graph partition may changed the order of outputs
self.assertFalse(first_node.unaliased_in_all_paths[1])
self.assertTrue(first_node.cached_tensor_outputs[1] is None)
else:
self.assertFalse(first_node.unaliased_in_all_paths[0])
self.assertTrue(first_node.cached_tensor_outputs[0] is None)
@torch._inductor.config.patch("implicit_fallbacks", True)
def test_multinomial(self):
def sample_multinomial(probs, num_samples, replacement=True):
return torch.multinomial(probs, num_samples, replacement=replacement)
# Create and prepare probability tensor on GPU
probs = torch.tensor([0.1, 0.2, 0.3, 0.4]).cuda()
probs = probs / probs.sum()
# Sample using the function
num_skipped = counters["inductor"]["cudagraph_skips"]
with torch._dynamo.utils.preserve_rng_state():
samples = self.run_twc(
sample_multinomial, probs, num_samples=5, replacement=True
)
with torch._dynamo.utils.preserve_rng_state():
samples_compiled = self.run_twc(
torch.compile(sample_multinomial),
probs,
num_samples=5,
replacement=True,
)
self.assertEqual(samples, samples_compiled)
self.assertEqual(num_skipped, counters["inductor"]["cudagraph_skips"])
@skipIfRocm
def test_checkpointing_resets_persistent_refs(self):
@torch.compile(mode="reduce-overhead")
def foo(x):
return x @ x
def inp():
return torch.rand([20, 20], device="cuda", requires_grad=False)
for _ in range(3):
foo(inp())
self.assertEqual(self.num_checkpoints(), 0)
out = foo(inp())
out_id = id(out)
del out
self.assertEqual(id(foo(inp())), out_id)
@torch.compile(mode="reduce-overhead")
def foo2(x):
return x[0], x @ x
for i in range(2):
out = foo(inp())
from torch._dynamo.mutation_guard import GenerationTracker
GenerationTracker.generation -= 1
out_alias, out2 = foo2(out)
del out_alias
self.assertEqual(all_live_block_count(), 2)
del out
self.assertEqual(all_live_block_count(), 1)
del out2
self.assertEqual(all_live_block_count(), 0)
self.assertEqual(self.num_checkpoints(), i + 1)
new_out = foo(inp())
curr_node = self.curr_node()
self.assertFalse(curr_node.unaliased_in_all_paths[0])
self.assertFalse(out_id == id(new_out))
def test_aliased_static_parameter(self):
inp = torch.rand([20, 20], device="cuda")
def foo(args):
x = args[0]
args.clear()
return (x[0],)
foo_cg = self.cudagraphify_impl(foo, [inp], (0,))
for _ in range(3):
out = foo_cg([inp])[0]
self.assertEqual(cdata(inp), cdata(out))
node = self.curr_node()
self.assertEqual(node.cached_tensor_outputs, [None])
self.assertEqual(node.unaliased_in_all_paths, [False])
def test_warmup_stream_sync(self):
def foo(args):
x = args[0]
args.clear()
x_orig = x
for _ in range(100):
x = x @ x
return (x,)
inp = torch.rand([4096, 4096], device="cuda")
ref = foo([inp])[0]
torch.cuda.synchronize()
user_stream = torch.cuda.Stream()
with torch.cuda.stream(user_stream):
foo_cg = self.cudagraphify_impl(foo, [inp], (0,))
out = foo_cg([inp])[0]
y = out + 1
self.assertEqual(y, ref + 1)
def test_unaligned_static_parameter(self):
def gen_inp():
inp = torch.ones([20], device="cuda")
return [inp[1:]]
def foo(args):
x = args[0]
args.clear()
return (x + x,)
foo_cg = self.cudagraphify_impl(foo, gen_inp(), (0,))
for _ in range(3):
out = foo_cg(gen_inp())
self.assertEqual(out, foo(gen_inp()))
del out
node = self.curr_node()
self.assertEqual(node.static_input_data_ptrs, [None])
def test_amp_cache_disabled(self):
@torch.compile()
def foo(x):
return x + x
for _ in range(3):
out = foo(torch.rand([4, 4], device="cuda", requires_grad=True))
# amp cache for cudagraph outputs should be disabled
t2 = torch.rand([4, 4], device="cuda")
with torch.cuda.amp.autocast():
run_once = out @ t2
out.detach().zero_()
run_twice = out @ t2
self.assertNotEqual(run_once, run_twice)
def test_remove_hooks_on_cached_tensors(self):
@torch.compile()
def foo(x):
return x * x
inp = torch.rand([4], device="cuda", requires_grad=True)
for _ in range(5):
out = foo(inp)
self.assertIsNone(out._backward_hooks)
out.register_hook(lambda: None)
# today, torch.compile never outputs a leaf tensor which is the only
# tensor that can register _post_accumulate_grad_hooks
# add this as a preventative test
@torch.compile()
def foo(x):
return torch.rand([4], device="cuda", requires_grad=True)
for _ in range(5):
out = foo(inp)
self.assertIsNone(out._post_accumulate_grad_hooks)
out.register_post_accumulate_grad_hook(lambda: None)
def test_multiple_insert_removal_caching(self):
torch._C._set_cached_tensors_enabled(True)
try:
x = torch.rand([4], device="cuda")
torch._C._add_cached_tensor(x)
self.assertTrue(torch._C._is_cached_tensor(x))
torch._C._add_cached_tensor(x)
torch._C._remove_cached_tensor(x)
self.assertFalse(torch._C._is_cached_tensor(x))
finally:
torch._C._set_cached_tensors_enabled(False)
def test_accumulate_grad(self):
# cudagraph trees shouldn't interfere with accumulation logic
def compute_grad(grad_output, create_graph):
x = torch.randn(5, 5, requires_grad=True, device="cuda")
@torch.compile()
def foo(x):
return x + 2
y = foo(x)
y.backward(grad_output, retain_graph=True)
x_grad = x.grad
x_grad_clone = x.grad.clone()
y.backward(grad_output, create_graph=create_graph)
return x_grad, x_grad_clone
for _ in range(3):
grad_output = torch.ones(5, 5, device="cuda")
# Accumulate in-place when create_graph is False
x_grad, x_grad_clone = compute_grad(grad_output, create_graph=False)
self.assertEqual(x_grad, x_grad_clone * 2)
# Accumulate out-of-place when create_graph is False
x_grad, x_grad_clone = compute_grad(grad_output, create_graph=True)
self.assertEqual(x_grad, x_grad_clone)
def test_frozen_fn(self):
@torch.compile()
def foo(x):
return x @ x
for _ in range(3):
out = foo(torch.rand([10, 10], device="cuda"))
self.assertTrue(self.get_manager().new_graph_id().id == 1)
frozen = torch._dynamo.run(foo)
for _ in range(3):
out = frozen(torch.rand([10, 10], device="cuda"))
# didn't do additional recordings
self.assertTrue(self.get_manager().new_graph_id().id == 2)
def test_empty_cpu_tensor(self):
def foo(x):
return x @ x, torch.tensor([])
foo_opt = torch.compile(foo)
x = torch.rand([4], device="cuda")
for _ in range(3):
out_opt = foo_opt(x)
self.assertEqual(foo(x), out_opt)
self.assertTrue(self.get_manager().new_graph_id().id == 1)
def test_output_alias(self):
inp = torch.rand([20, 20], device="cuda")
def foo(args):
x = args[0]
args.clear()
out = x + x
return (x, x[0])
foo_cg = self.cudagraphify_impl(foo, [inp], ())
for _ in range(3):
out_1, out_2 = foo_cg([inp])
self.assertEqual(cdata(out_1), cdata(out_2))
del out_1, out_2
self.assertEqual(len(list(self.curr_node().path_live_weakrefs())), 0)
self.assertEqual(self.curr_node().cached_tensor_outputs, [None, None])
def test_empty_storage(self):
@torch.compile(mode="reduce-overhead")
def foo(x):
return (
(x + x + x),
torch.zeros([0], device="cuda"),
torch.zeros([100], device="cuda")[0:0],
)
inp = torch.rand([4], device="cuda")
for _ in range(3):
out = foo(inp)
node = self.curr_node()
self.assertEqual(len(list(node.path_live_weakrefs())), 1)
@torch.compile(mode="reduce-overhead")
def foo(x):
return (x + x + x), torch.rand([4], device="cuda") + 10
inp = torch.rand([0], device="cuda")
for _ in range(3):
out = foo(inp)
node = self.curr_node()
self.assertEqual(len(list(node.path_live_weakrefs())), 1)
@torch._inductor.config.patch("triton.skip_cudagraph_warmup", True)
def test_aliased_output_checkpoint(self):
def foo(args):
x = args[0]
args.clear()
y = x + 2
return x + 1, y, y[0]
inp = torch.rand([4, 4], device="cuda")
foo_cg = self.cudagraphify_impl(foo, [inp], ())
foo_cg([inp])
foo_cg([inp])
out1, out2, out3 = foo_cg([inp])
inp = [out1]
del out1, out2, out3
def foo2(args):
x = args[0]
args.clear()
return [x * x * x]
self.assertEqual(self.num_checkpoints(), 0)
foo2_cg = self.cudagraphify_impl(foo2, inp, ())
x = foo2_cg(inp)[0]
self.assertEqual(self.num_checkpoints(), 1)
# out2 and out3 dies between the previous recording and the new one,
# need to be manually deallocated after the checkpoint
self.assertEqual(all_live_block_count(), 1)
del x
self.assertEqual(all_live_block_count(), 0)
@skipIfRocm
@unittest.skipUnless(IS_X86 and IS_LINUX, "cpp contexts are linux only")
@torch._inductor.config.patch("triton.cudagraph_trees_history_recording", True)
def test_workspace_allocation_error(self):
torch._C._cuda_clearCublasWorkspaces()
prev = torch._inductor.cudagraph_trees.clear_cublas_manager
try:
torch._inductor.cudagraph_trees.clear_cublas_manager = (
contextlib.nullcontext
)
@torch.compile()
def foo(x, y):
return x @ x
inps = [torch.rand([400, 400], device="cuda") for _ in range(2)]
thrown = False
try:
foo(*inps)
except Exception as e:
thrown = True
if not IS_ARM64:
self.assertTrue(
"at::cuda::blas::gemm<float, float>" in str(e)
or "at::cuda::blas::gemm_internal_cublas<float, float>"
in str(e)
)
self.assertTrue(
"getCurrentCUDABlasHandle" in str(e)
or "getNewWorkspace" in str(e)
)
self.assertTrue(thrown)
finally:
torch._C._cuda_clearCublasWorkspaces()
torch._inductor.cudagraph_trees.clear_cublas_manager = prev
torch._inductor.cudagraph_trees.get_container(
self.device_idx
).tree_manager = None
def test_peristed_output_livenes(self):
@torch.compile
def foo(x):
return x + x
for _ in range(3):
foo(torch.rand([2, 2], device="cuda"))
node = self.get_manager().current_node
self.assertEqual(len(list(node.path_live_weakrefs())), 0)
out = foo(torch.rand([2, 2], device="cuda"))
self.assertTrue(out is node.cached_tensor_outputs[0])
self.assertEqual(len(list(node.path_live_weakrefs())), 1)
out_ref = out[0:]
del out
self.assertEqual(len(list(node.path_live_weakrefs())), 1)
del out_ref
self.assertEqual(len(list(node.path_live_weakrefs())), 0)
@torch._inductor.config.patch("triton.skip_cudagraph_warmup", True)
def test_tensor_no_longer_in_pool(self):
def foo(args):
x = args[0]
args.clear()
return x + 1, x + 2
inp = torch.rand([4], device="cuda")
inp_list = [inp]
foo_cg = self.cudagraphify_impl(foo, inp_list, ())
x1, x2 = foo_cg(inp_list)
def foo2(args):
x = args[0]
args.clear()
return [x * x * x]
inp_list = [x1]
foo2_cg = self.cudagraphify_impl(foo2, inp_list, ())
foo2_cg(inp_list)
del x1, x2
# TODO make configurable
x1, x2 = foo_cg([inp])
self.assertEqual(self.num_checkpoints(), 0)
# input location has changed, should force recompile and checkpointing
foo2_cg([torch.zeros_like(x1)])
self.assertEqual(self.num_checkpoints(), 1)
self.assertEqual(self.get_root_children(), [2])
@torch._inductor.config.patch("triton.skip_cudagraph_warmup", True)
def test_checkpoint_shared_output_storage_deallocation(self):
def foo(args):
x = args[0]
args.clear()
x_tmp = x + 1
return x[0], x[1]
inp = torch.rand([2, 2], device="cuda")
inp_list = [inp]
foo_cg = self.cudagraphify_impl(foo, inp_list, ())
foo_cg(inp_list)
foo_cg([inp])
x1, x2 = foo_cg([inp])
inp = [x1]
def foo2(args):
x = args[0]
args.clear()
y = x * x
return y[0], y[1]
foo2_cg = self.cudagraphify_impl(foo2, inp, ())
foo2_cg(inp)
self.assertEqual(self.num_checkpoints(), 1)
self.assertEqual(
x1.untyped_storage().data_ptr(), x2.untyped_storage().data_ptr()
)
self.assertEqual(all_live_block_count(), 1)
del x1
self.assertEqual(all_live_block_count(), 1)
del x2
self.assertEqual(all_live_block_count(), 0)
@torch._inductor.config.patch("triton.skip_cudagraph_warmup", True)
def test_cleanup(self):
def test_closure():
@torch.compile
def foo(x):
return x + 1 + 2, x * 10
foo(torch.rand([4], device="cuda"))
return foo(torch.rand([4], device="cuda"))
out1, out2 = test_closure()
torch._dynamo.reset()
# TODO - deallocate on tensor deallocation
# self.assertTrue(self.get_manager() is not None)
# del out1
# self.assertTrue(self.get_manager() is not None)
# del out2
self.assertTrue(self.get_manager() is None)
@torch._inductor.config.patch("triton.skip_cudagraph_warmup", True)
def test_forward_backward(self):
@torch.compile
def foo(x):
y = x * 2
return torch.sin(y) * torch.nn.functional.dropout(x, p=0.4)
inp = torch.rand([4, 4], requires_grad=True, device="cuda")
out = foo(inp)
out.sum().backward()
self.assertEqual(self.get_root_children(), [1])
# the three saved tensors should die in the backward
# we kept alive the output
self.assertEqual(self.curr_node().expected_dead_indices_before_graph, [])
if torch._inductor.config.graph_partition:
self.assertEqual(
self.curr_node().expected_dead_indices_after_graph,
[(0, 0), (0, 2)],
)
else:
self.assertEqual(
self.curr_node().expected_dead_indices_after_graph,
[(0, 1), (0, 2)],
)
self.assertFalse(self.get_manager().new_graph_id().id == 0)
def test_separate_recordings(self):
def foo_unopt(x, y):
return (x + 1) @ y
foo = torch.compile(foo_unopt)
foo_unopt(
torch.ones([20, 20], device="cuda"), torch.ones([20, 20], device="cuda")
)
inps = [
torch.ones([20, 20], device="cuda", requires_grad=False)
for _ in range(2)
]
out = foo(*inps)
torch.cuda.synchronize()
foo(*inps)
torch.cuda.synchronize()
foo(*inps)
torch.cuda.synchronize()
foo_unopt(
torch.ones([20, 20], device="cuda"), torch.ones([20, 20], device="cuda")
)
inps2 = [
torch.rand([40, 40], device="cuda", requires_grad=False)
for _ in range(2)
]
foo(*inps2)
foo(*inps2)
foo(*inps2)
# two separate roots
self.assertEqual(self.get_root_children(), [0, 0])
def test_alias_of_parameter(self):
class AliasMod(nn.Module):
def __init__(self) -> None:
super().__init__()
self.param = torch.nn.Parameter(torch.rand([20, 20], device="cuda"))
def forward(self, x):
return self.param[0], self.param, self.param + x
@torch.compile(mode="reduce-overhead")
def foo(mod, inp):
return mod(inp)
inp = torch.rand([20, 20], device="cuda")
mod = AliasMod()
storage_ref = torch.multiprocessing.reductions.StorageWeakRef(
mod.param.untyped_storage()
)
for _ in range(3):
outs = foo(mod, inp)
self.assertEqual(mod(inp), outs)
self.assertFalse(storage_ref.expired())
node = self.get_manager().current_node
self.assertEqual(len(list(node.path_live_weakrefs())), 1)
@torch._dynamo.config.patch("inline_inbuilt_nn_modules", False)
@torch._inductor.config.patch("triton.cudagraph_support_input_mutation", False)
def test_unstable_ptr(self):
import torch
@torch.compile(mode="reduce-overhead")
def foo(m, inp):
return m(inp)
def f():
l = []
m = torch.nn.Linear(20, 20).cuda()
for _ in range(4):
inp = torch.rand([20, 20], device="cuda")
foo(m, inp)
m.weight.data = torch.rand([20, 20], device="cuda")
self.assertRaises(RuntimeError, f)
@requires_multigpu()
def test_manager_per_device(self):
def test():
def foo(args):
x = args[0]
args.clear()
return (x + 3,)
inp = torch.rand([20, 20], device=f"cuda:{self.device_idx}")
inp_list = [inp]
foo_cg = self.cudagraphify_impl(foo, inp_list, ())
for _ in range(3):
self.assertEqual(foo_cg([inp]), foo([inp]))
next_idx = (self.device_idx + 1) % torch.cuda.device_count()
self.assertTrue(self.get_manager(device_index=next_idx) is None)
self.assertFalse(self.get_manager(device_index=self.device_idx) is None)
test()
self.assertTrue(self.get_manager(device_index=self.device_idx) is None)
def test_error_on_dealloc_use(self):
@torch.compile()
def foo(x):
return x * x * x
inp = torch.rand([4], device="cuda")
out = foo(inp)
out2 = foo(inp)
with self.assertRaisesRegex(Exception, "overwritten by a subsequent"):
out + out
foo(inp)
with self.assertRaisesRegex(Exception, "overwritten by a subsequent"):
out2 + out2
def test_error_on_dealloc_use2(self):
@torch.compile()
def foo(x):
return x * x * x
inp = torch.rand([4], device="cuda")
out = foo(inp).detach()
out2 = foo(inp).detach()
with self.assertRaises(Exception) as exc:
out + out
FileCheck().check("overwritten").check("x * x * x").run(repr(exc.exception))
foo(inp)
with self.assertRaises(Exception) as exc:
out2 + out2
FileCheck().check("overwritten").check("x * x * x").run(repr(exc.exception))
@unittest.skipIf(not torch.backends.cudnn.is_available(), "requires cudnn")
def test_conv_benchmark(self):
with torch.backends.cudnn.flags(
enabled=True, benchmark=True, deterministic=False
):
m = torch.nn.Conv2d(5, 6, [3, 3]).cuda()
inp = torch.randn([2, 5, 16, 16]).cuda()
@torch.compile()
def foo(m, inp):
return m(inp)
foo(m, inp)
def test_single_stream_use(self):
@torch.compile()
def foo(x):
return (x * x * x).relu()
inp = torch.rand([4], device="cuda", requires_grad=True)
streams = set()
streams_init = {seg["stream"] for seg in get_all_cudagraph_segments()}
for _ in range(4):
foo(inp).sum().backward()
inp.grad = None
streams = {
seg["stream"] for seg in get_all_cudagraph_segments()
} - streams_init
self.assertEqual(len(streams), 1)
self.assertFalse(self.get_manager().new_graph_id().id == 0)
@torch._dynamo.config.patch("assume_static_by_default", False)
def test_dynamic_backward(self):
def foo(x):
x = torch.cat([x, x])
return torch.addmm(x, x, x).relu(), x.size(0)
opt_foo = torch.compile(mode="reduce-overhead")(foo)
def run_test(foo, inp):
r, s = foo(inp)
r.sum().backward()
g = inp.grad.clone()
inp.grad = None
r = r.clone()
return r, s, g
def run_big_test(inp):
r0, s0, g0 = run_test(foo, inp)
r1, s1, g1 = run_test(opt_foo, inp)
r2, s2, g2 = run_test(opt_foo, inp)
self.assertEqual(r0, r1)
self.assertEqual(r0, r2)
self.assertEqual(s0, s1)
self.assertEqual(s0, s2)
self.assertEqual(g0, g1)
self.assertEqual(g0, g2)
inp = torch.randn(2, 4, device="cuda", requires_grad=True)
run_big_test(inp)
inp = torch.randn(3, 6, device="cuda", requires_grad=True)
run_big_test(inp)
def test_dynamic_warmup(self):
COUNTER = 0
def f(inps):
i, x = inps
inps.clear()
nonlocal COUNTER
COUNTER += 1
return x * 2
x = torch.randn(2, device="cuda")
inp_list = [2, x]
foo_cg = self.cudagraphify_impl(f, inp_list, ())
foo_cg(inp_list) # warmup
foo_cg([2, x]) # record
foo_cg([2, x]) # replay
self.assertEqual(COUNTER, 2)
# Switching the size will require a warmup again
x = torch.randn(3, device="cuda")
inp_list = [3, x]
foo_cg(inp_list) # warmup
foo_cg([3, x]) # record
foo_cg([3, x]) # replay
self.assertEqual(COUNTER, 4)
def test_forward_generation(self):
def foo(x):
return x * x * x
def foo2(x):
return x * 12
foo_opt = torch.compile(foo)
foo2_opt = torch.compile(foo2)
ones = torch.ones([4, 4], device="cuda", requires_grad=True)
out = foo_opt(ones)
out2 = foo2_opt(out)
self.assertEqual(all_live_block_count(), 2)
self.assertTrue(self.get_manager().running_forwards_with_pending_backwards)
out2.sum().backward()
self.assertFalse(self.get_manager().running_forwards_with_pending_backwards)
ones.grad = None
del out
del out2
foo2_opt(foo_opt(ones)).sum().backward()
out = foo_opt(ones.detach())
self.assertFalse(self.get_manager().running_forwards_with_pending_backwards)
self.assertFalse(self.get_manager().new_graph_id().id == 0)
def test_warn_on_pending_backward(self):
@torch.compile
def foo(x):
return x * x * x
out = foo(torch.rand([4, 4], device="cuda", requires_grad=True))
out = foo(torch.rand([4, 4], device="cuda", requires_grad=True))
warnings.resetwarnings()
with warnings.catch_warnings(record=True) as w:
out = foo(torch.rand([4, 4], device="cuda", requires_grad=True))
FileCheck().check(
"Unable to hit fast path of CUDAGraphs because of pending"
).run(str(w[0]))
self.assertTrue(self.get_manager().new_graph_id().id == 0)
def test_mark_step(self):
@torch.compile
def foo(x):
return x * x * x
torch.compiler.cudagraph_mark_step_begin()
out = foo(torch.rand([4, 4], device="cuda", requires_grad=True))
torch.compiler.cudagraph_mark_step_begin()
out = foo(torch.rand([4, 4], device="cuda", requires_grad=True))
self.assertFalse(self.get_manager().new_graph_id().id == 0)
@torch._dynamo.config.patch("capture_scalar_outputs", True)
def test_incompatible_cudagraph_ops_item(self):
@torch.compile(mode="reduce-overhead")
def foo(x):
return x.item()
# NB: This doesn't work with float, because float unbacked codegen
# is currently broken. But testing the float case here is also
# awkward, because we plan to Tensor-ify the float compute, and as
# a result we'd actually expect this to work with cuda graphs!
with capture_stderr() as captured_output:
self.assertEqual(foo(torch.tensor(3, device="cuda")), 3)
self.assertEqual(foo(torch.tensor(6, device="cuda")), 6)
# NOTE: this test is named after incompatible ops, but is not skipping due to incompatible ops.
# This should get fixed.
FileCheck().check(
" to incompatible op aten._local_scalar_dense.default"
).run(captured_output[0])
self.assertEqual(counters["inductor"]["cudagraph_skips"], 1)
@torch._dynamo.config.patch("compiled_autograd", True)
def test_compiled_autograd_static_input_params(self):
@torch.compile(mode="reduce-overhead")
def bwd(loss):
loss.backward()
model = torch.nn.Linear(10, 10, bias=False, device="cuda")
x = torch.randn(10, 10, device="cuda")
for _ in range(5):
out = model(x)
bwd(out.sum())
model.weight.grad = None
# i=0, 0 copies (warmup)
# i=1, 2 copies (record, 1/3 inputs marked as static)
# i>1, 0 copies (run)
self.assertEqual(
counters["inductor"]["cudagraph_recorded_non_static_inputs"], 2
)
@torch._dynamo.config.patch("capture_dynamic_output_shape_ops", True)
def test_incompatible_cudagraph_ops_nonzero(self):
@torch.compile(mode="reduce-overhead")
def foo(x):
return x.nonzero()
with capture_stderr() as captured_output:
self.assertEqual(
foo(torch.tensor([1, 0, 2], device="cuda")),
torch.tensor([[0], [2]]),
)
self.assertEqual(
foo(torch.tensor([1, 0, 0], device="cuda")), torch.tensor([[0]])
)
FileCheck().check("incompatible op aten.nonzero.default").check("foo").run(
captured_output[0]
)
self.assertEqual(counters["inductor"]["cudagraph_skips"], 1)
@torch._dynamo.config.patch("capture_dynamic_output_shape_ops", True)
def test_incompatible_cudagraph_ops_nonzero_graph_breaks(self):
@torch.compile(mode="reduce-overhead")
def foo(x):
y = x.nonzero() # skip
torch._dynamo.graph_break()
return y.nonzero() # skip 2 times (due to recompile)
foo(torch.tensor([1, 0, 2], device="cuda"))
foo(torch.tensor([1, 0, 0], device="cuda"))
self.assertEqual(counters["inductor"]["cudagraph_skips"], 3)
@torch._dynamo.config.patch("capture_dynamic_output_shape_ops", True)
def test_incompatible_cudagraph_ops_nonzero_backend(self):
@torch.compile(backend="cudagraphs")
def foo(x):
return x.nonzero()
with capture_stderr() as captured_output:
self.assertEqual(
foo(torch.tensor([1, 0, 2], device="cuda")),
torch.tensor([[0], [2]]),
)
self.assertEqual(
foo(torch.tensor([1, 0, 0], device="cuda")), torch.tensor([[0]])
)
FileCheck().check(
"skipping cudagraphs due to incompatible op (nonzero)"
).run(captured_output[0])
self.assertEqual(counters["inductor"]["cudagraph_skips"], 1)
@torch._dynamo.config.patch("capture_dynamic_output_shape_ops", True)
@torch._inductor.config.patch("cpp_wrapper", True)
def test_skip_cpp_wrapper(self):
def foo(x):
return x + 1
foo_c = torch.compile(mode="reduce-overhead")(foo)
with capture_stderr() as captured_output:
t = torch.rand([32], device="cuda")
self.assertEqual(foo(t), foo_c(t))
FileCheck().check("skipping cudagraphs due to cpp wrapper enabled").run(
captured_output[0]
)
self.assertEqual(counters["inductor"]["cudagraph_skips"], 1)
def test_storage_access_error(self):
x = torch.rand([4], device="cuda")
torch._C._set_storage_access_error_msg(x, "custom error msg")
with self.assertRaisesRegex(Exception, "custom error msg"):
device = x.untyped_storage()
def test_side_stream_memory_allocation(self):
device = f"cuda:{self.device_idx}"
def multi_stream_allocation(args):
side_stream = torch.cuda.Stream()
side_stream.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(side_stream):
side_stream_buffer = torch.ones(
*args, device=device, dtype=torch.float32
)
torch.cuda.current_stream().wait_stream(side_stream)
main_stream_buffer = torch.ones(
*args, device=device, dtype=torch.float32
)
if isinstance(args, list):
args.clear()
return main_stream_buffer, side_stream_buffer
graphed_multi_stream_func = tree_cudagraphify_impl(
multi_stream_allocation,
inputs=[],
static_input_idxs=[],
is_backward=False,
is_inference=False,
device_index=self.device_idx,
stack_traces=["dummy stack trace1", "dummy stack trace2"],
)
ref_out = torch.ones((2, 3), device=device, dtype=torch.float32)
for _ in range(3):
torch.compiler.cudagraph_mark_step_begin()
main_stream_buffer, side_stream_buffer = graphed_multi_stream_func(
[2, 3]
)
self.assertEqual(main_stream_buffer, ref_out)
self.assertEqual(side_stream_buffer, ref_out)
self.assertEqual(self.get_manager().new_graph_id().id, 1)
@torch._dynamo.config.patch("inline_inbuilt_nn_modules", False)
@torch._inductor.config.patch("triton.cudagraph_support_input_mutation", False)
def test_static_inputs_address_mutation_log(self):
class Goo(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.linear = torch.nn.Linear(2, 2, device="cuda")
def forward(self, x) -> torch.Tensor:
return self.linear(x)
class Foo(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.static_tensor = torch.zeros((2, 2), device="cuda")
self.goo = Goo()
def forward(self, x) -> torch.Tensor:
self.static_tensor.add_(torch.ones((2, 2), device="cuda"))
return self.static_tensor + x + self.goo(x)
foo = Foo()
foo = torch.compile(foo, mode="reduce-overhead")
inp = torch.rand((2, 2), device="cuda")
for _ in range(3):
foo(inp)
# mutates static input tensors' addresses
foo.static_tensor = torch.ones((2, 2), device="cuda")
foo.goo.linear.bias = torch.nn.Parameter(torch.ones((2,), device="cuda"))
with self.assertRaisesRegex(
Exception,
r"(?s)static input data pointer changed.\n"
r"input name: primals_.*. data pointer changed from .* to .*. input stack trace:.*"
r"input name: primals_.*. data pointer changed from .* to .*. input stack trace:.*,"
r" in forward\n.* self.static_tensor.add\_\(torch.ones\(\(2, 2\), device=\"cuda\"\)\).*\n",
):
self.curr_node().run(
[foo.goo.linear.weight, foo.goo.linear.bias, foo.static_tensor, inp]
)
def _run_iter(self, param, fn):
fwd_output = fn(torch.ones(2, 2), param)
fwd_output.sum().backward()
grad_output = param.grad.detach().clone()
param.grad = None
return fwd_output, grad_output
def _assert_equal_multi_loop(self, param, fn_eager, fn_compiled):
exp_output, exp_grad = self._run_iter(param, fn_eager)
for _ in range(5):
compiled_output, compiled_grad = self._run_iter(param, fn_compiled)
self.assertEqual(exp_output, compiled_output)
self.assertEqual(exp_grad, compiled_grad)
def run_static_input_param_test(self, fn_eager, num_graphs):
with torch.device("cuda"):
fn_compiled = torch.compile(fn_eager, mode="reduce-overhead")
p1 = torch.nn.Parameter(torch.rand([2, 2]))
self._assert_equal_multi_loop(p1, fn_eager, fn_compiled)
p2 = torch.nn.Parameter(torch.rand([2, 2]))
self._assert_equal_multi_loop(p2, fn_eager, fn_compiled)
# Run p1 again to ensure we reuse the previous recording
self._assert_equal_multi_loop(p1, fn_eager, fn_compiled)
self.assertEqual(self.get_manager().new_graph_id().id, num_graphs)
def _module_test(self, mod, name="weight", param_wrapping=True):
with torch.device("cuda"):
def fn(x, mod):
return mod(x)
fn_compiled = torch.compile(fn, mode="reduce-overhead", fullgraph=True)
def run_test_iter(mod, fn):
fwd_output = fn(torch.ones(2, 2), mod)
fwd_output.sum().backward()
grad_output = mod.weight.grad.detach().clone()
mod.zero_grad()
return fwd_output, grad_output
def run_test():
exp_output, exp_grad = run_test_iter(mod, fn)
for _ in range(5):
compiled_output, compiled_grad = run_test_iter(mod, fn_compiled)
self.assertEqual(exp_output, compiled_output)
self.assertEqual(exp_grad, compiled_grad)
run_test()
old_attr = getattr(mod, name)
modified_attr = torch.rand_like(old_attr)
if param_wrapping:
modified_attr = torch.nn.Parameter(modified_attr)
setattr(mod, name, modified_attr)
run_test()
# Run original version to verify we reuse the other recording
setattr(mod, name, old_attr)
run_test()
# Fwd + bwd graphs for each version of the function => 4 graphs
self.assertEqual(self.get_manager().new_graph_id().id, 4)
@torch._dynamo.config.patch("error_on_recompile", True)
@torch._dynamo.config.patch("inline_inbuilt_nn_modules", True)
def test_multi_dispatch_single_compile_param_inputs(self):
# Verify that we can record multiple cudagraphs for a single
# compiled function with param inputs
def fn(x, y):
return x * y
# Fwd + bwd graphs for each version of the function => 4 graphs
self.run_static_input_param_test(fn, 4)
@torch._dynamo.config.patch("error_on_recompile", True)
@torch._dynamo.config.patch("inline_inbuilt_nn_modules", True)
def test_multi_dispatch_single_compile_builtin_module(self):
# Verify that we don't recompile when changing the param of a builtin module
# and that we record another cudagraph
# Note: Linear is a builtin module so we enable that config setting above
self._module_test(torch.nn.Linear(2, 3, device="cuda"))
@torch._dynamo.config.patch("error_on_recompile", True)
@torch._dynamo.config.patch("inline_inbuilt_nn_modules", True)
def test_multi_dispatch_single_compile_builtin_module_buffers(self):
# Verify that we don't recompile when changing the buffer of a builtin module
# and that we record another cudagraph
self._module_test(
torch.nn.BatchNorm1d(2, device="cuda"),
name="running_mean",
param_wrapping=False,
)
@torch._inductor.config.patch("triton.cudagraphs", True)
@torch._dynamo.config.patch("error_on_recompile", True)
@torch._dynamo.config.patch("inline_inbuilt_nn_modules", True)
def test_multi_dispatch_custom_module(self):
# Test that we can correctly dispatch multiple graphs
# if params of a custom module change
class TestModule(torch.nn.Module):
def __init__(self, param) -> None:
super().__init__()
self.weight = param
def forward(self, x):
return self.weight * x
self._module_test(
TestModule(torch.nn.Parameter(torch.rand([2, 2], device="cuda")))
)
@torch._dynamo.config.patch("error_on_recompile", True)
@torch._dynamo.config.patch("inline_inbuilt_nn_modules", True)
def test_multi_dispatch_custom_module_buffer(self):
# Test that we can correctly dispatch multiple graphs
# if buffers of a custom module change
class TestModule(torch.nn.Module):
def __init__(self, param, buf) -> None:
super().__init__()
self.weight = param
self.buf = torch.nn.Buffer(buf)
def forward(self, x):
return x * self.weight + self.buf
self._module_test(
TestModule(
torch.nn.Parameter(torch.rand([2, 2], device="cuda")),
torch.rand([2, 2], device="cuda"),
),
name="buf",
param_wrapping=False,
)
@torch._inductor.config.patch("triton.cudagraphs", True)
@torch._dynamo.config.patch("error_on_recompile", True)
@torch._dynamo.config.patch("inline_inbuilt_nn_modules", True)
def test_multi_dispatch_child_node(self):
# Test that we can correctly dispatch multiple graphs if a child node
# in the tree has stable input pointers change
def fn(x, p):
# Graph 1
y = x * x
torch._dynamo.graph_break()
# Graph 2
return y * p
# We have 5 graphs here
# Graph 1
# / \
# Graph 2 w/ p1 Graph 2 w/ p2
# and then two backward graphs
self.run_static_input_param_test(fn, 5)
@torch._dynamo.config.patch("error_on_recompile", True)
@torch._dynamo.config.patch("inline_inbuilt_nn_modules", True)
def test_multi_dispatch_parent_node(self):
def fn(x, p):
# Graph 1
y = x * p
torch._dynamo.graph_break()
# Graph 2
return y + x
# We have 6 graphs here
# Graph 1 w/ p1 Graph 1 w/ p2
# | |
# Graph 2 (v1) Graph 2 (v2)
# There are two versions of graph 2 because
# we re-record due to different memory state after running the
# two versions of Graph 1
# and then two backward graphs
self.run_static_input_param_test(fn, 6)
@torch._dynamo.config.patch("error_on_recompile", True)
@torch._dynamo.config.patch("inline_inbuilt_nn_modules", False)
@torch._inductor.config.patch("triton.cudagraph_support_input_mutation", True)
@torch._inductor.config.patch("triton.cudagraph_unexpected_rerecord_limit", 0)
def test_fallback_to_eager_if_recompiling_too_many_times(self):
class Foo(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.param = torch.nn.Parameter(torch.rand([2, 2], device="cuda"))
def forward(self, x):
return x * self.param
with capture_stderr() as captured_output:
# We have 3 graphs here
# None
# / \
# (fwd w/ p1, Graph 0) (bwd w/p2, Graph2)
# (bwd w/ p1, Graph 1)
# All other graphs are skipped because we hit the max recording limit
# (=0 for each node and function pair)
fn_compiled = torch.compile(Foo(), mode="reduce-overhead")
for _ in range(3):
fn_compiled(torch.rand([2, 2], device="cuda")).sum().backward()
fn_compiled.param.grad = None
# Change static tensor address
fn_compiled.param.data = torch.rand([2, 2], device="cuda")
fn_compiled(torch.rand([2, 2], device="cuda")).sum().backward()
self.assertEqual(self.get_manager().new_graph_id().id, 3)
FileCheck().check(
"skipping cudagraph due to function 0 exceeding max re-recording limit (=0) "
"on cudagraph node None due to static input data pointer changed."
).run(captured_output[0])
self.assertEqual(counters["inductor"]["cudagraph_skips"], 1)
@torch._dynamo.config.patch("error_on_recompile", True)
@torch._dynamo.config.patch("inline_inbuilt_nn_modules", False)
@torch._inductor.config.patch("triton.cudagraph_support_input_mutation", True)
@torch._inductor.config.patch("triton.cudagraph_unexpected_rerecord_limit", 0)
def test_fallback_to_eager_if_recompiling_too_many_times_warn_only_once(self):
class Foo(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.param = torch.nn.Parameter(torch.rand([2, 2], device="cuda"))
def forward(self, x):
return x * self.param
with capture_stderr() as captured_output:
with torch.device("cuda"):
# We have 3 graphs here
# None
# / \
# (fwd w/ p1, Graph 0) (bwd w/p2, Graph2)
# (bwd w/ p1, Graph 1)
# All other graphs are skipped because we hit the max recording limit
# (=0 for each node and function pair)
fn_compiled = torch.compile(Foo(), mode="reduce-overhead")
for _ in range(3):
fn_compiled(torch.rand([2, 2], device="cuda")).sum().backward()
fn_compiled.param.grad = None
for _ in range(5):
# Change static tensor address
fn_compiled.param.data = torch.rand([2, 2], device="cuda")
fn_compiled(torch.rand([2, 2], device="cuda")).sum().backward()
fn_compiled.param.grad = None
FileCheck().check_count(
"skipping cudagraph due to function 0 exceeding max re-recording limit (=0) "
"on cudagraph node None due to static input data pointer changed.",
1,
exactly=True,
).check_count(
"skipping cudagraph due to function 1 exceeding max re-recording limit (=0) "
"on cudagraph node None due to static input data pointer changed.",
1,
exactly=True,
).run(captured_output[0])
self.assertEqual(counters["inductor"]["cudagraph_skips"], 2)
@torch._dynamo.config.patch("inline_inbuilt_nn_modules", False)
@torch._inductor.config.patch("triton.cudagraph_support_input_mutation", True)
@torch._inductor.config.patch("triton.cudagraph_unexpected_rerecord_limit", 0)
def test_fallback_to_eager_if_recompiling_too_many_times_due_to_cudagraph_managed_tensor(
self,
):
# By setting triton.cudagraph_support_input_mutation=True, we force re-record
# if cudagraph managed tensor addresses changed.
@torch.compile(mode="reduce-overhead")
def foo(x):
return x + 1
@torch.compile(mode="reduce-overhead")
def goo(x):
return x * 2
for _ in range(3):
torch.compiler.cudagraph_mark_step_begin()
inp = torch.rand((2, 3), device="cuda")
y = foo(inp)
z = goo(y)
with capture_stderr() as captured_output:
torch.compiler.cudagraph_mark_step_begin()
x = torch.rand(2, 3, device="cuda")
y = foo(x)
y_clone = y.clone()
z = goo(y_clone)
# eager function should run successfully
for _ in range(5):
torch.compiler.cudagraph_mark_step_begin()
x = torch.rand(2, 3, device="cuda")
y = foo(x)
y_clone = y.clone()
z = goo(y_clone)
FileCheck().check_count(
"skipping cudagraph due to function 1 exceeding max re-recording limit (=0) "
"on cudagraph node 0 due to cudagraph managed tensor data pointer changed",
1,
exactly=True,
).run(captured_output[0])
self.assertEqual(counters["inductor"]["cudagraph_skips"], 1)
@torch._dynamo.config.patch("inline_inbuilt_nn_modules", False)
@torch._dynamo.config.patch("error_on_recompile", True)
@torch._dynamo.config.patch("inline_inbuilt_nn_modules", True)
@torch._inductor.config.patch("triton.cudagraph_unexpected_rerecord_limit", 1)
def test_not_fallback_to_eager_if_have_not_recompiling_too_many_times(self):
def fn(x, y):
return x * y
# We have 4 graphs here
# None
# / \
# (fwd w/ p1, Graph 0) (fwd w/p2, Graph2)
# (bwd w/ p1, Graph 1) (bwd w/p2, Graph3)
self.run_static_input_param_test(fn, 4)
self.assertEqual(counters["inductor"]["cudagraph_skips"], 0)
@torch._dynamo.config.patch("error_on_recompile", True)
@torch._dynamo.config.patch("inline_inbuilt_nn_modules", True)
def test_no_rerecord_with_mark_static_address(self):
class Mod(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear = nn.Linear(2, 2)
def forward(self, x):
return self.linear(x)
mod = Mod().cuda()
def fn_eager(x, marked_static_y):
return torch.cos(x) + mod(marked_static_y)
with torch.device("cuda"):
fn_compiled = torch.compile(fn_eager, mode="reduce-overhead")
# y is marked static
y = torch.randn(2, 2)
torch._dynamo.mark_static_address(y)
# Chanhing pointer of x should not lead to re-records
for _ in range(5):
x = torch.randn(2, 2, requires_grad=True)
res = fn_compiled(x, y)
res.sum().backward()
x.grad = None
mod.linear.weight.grad = None
mod.linear.bias.grad = None
# One forward and one backward
self.assertEqual(self.get_manager().new_graph_id().id, 2)
def test_tensor_constant_mutation(self):
class Foo(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.tensor_constant = torch.ones((2, 3), device="cuda")
def forward(self, x: torch.Tensor) -> torch.Tensor:
self.tensor_constant += 1
return x + self.tensor_constant
foo = Foo()
foo = torch.compile(foo, mode="reduce-overhead")
inp = torch.rand((2, 3), device="cuda")
for _ in range(3):
foo(inp)
@torch._inductor.config.patch("triton.cudagraph_support_input_mutation", True)
def test_rerecord_if_static_input_address_changed(self):
# By setting triton.cudagraph_support_input_mutation=True, we force re-record
# if static tensor addresses changed.
class Goo(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.linear = torch.nn.Linear(2, 2, device="cuda")
def forward(self, x) -> torch.Tensor:
return self.linear(x)
class Foo(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.register_buffer(
"static_tensor", torch.zeros((2, 2), device="cuda")
)
self.goo = Goo()
def forward(self, x) -> torch.Tensor:
self.static_tensor.add_(torch.ones((2, 2), device="cuda"))
return self.static_tensor + x + self.goo(x)
foo = Foo()
foo = torch.compile(foo, mode="reduce-overhead")
inp = torch.rand((2, 2), device="cuda")
for _ in range(3):
foo(inp)
# mutates static input tensors' addresses
foo.static_tensor = torch.ones((2, 2), device="cuda")
foo.goo.linear.bias = torch.nn.Parameter(torch.ones((2,), device="cuda"))
if torch._dynamo.config.inline_inbuilt_nn_modules:
for _ in range(3):
foo(inp)
else:
# Run with specific function id to avoid dynamo recompiling
self.get_manager().run(
[
foo.goo.linear.weight,
foo.goo.linear.bias,
foo.static_tensor,
inp,
],
FunctionID(0),
)
self.assertEqual(self.get_manager().new_graph_id().id, 2)
@torch._inductor.config.patch("triton.cudagraph_dynamic_shape_warn_limit", 1)
def test_skip_if_dynamic_shape_limit_reached1(self):
class Mod(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.linear = torch.nn.Linear(3, 3, device="cuda")
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.linear(x)
def iter(batch_size: int, mod: torch.nn.Module):
x = torch.rand((batch_size, 3), device="cuda")
for _ in range(3):
mod(x)
mod = torch.compile(Mod(), mode="reduce-overhead")
with capture_stderr() as captured_output:
for batch_size in range(10, 40, 10):
iter(batch_size, mod)
FileCheck().check(
"CUDAGraph supports dynamic shapes by recording a new graph for each "
"distinct input size. Recording too many CUDAGraphs may lead to "
"extra overhead. We have observed 2 distinct sizes. "
"Please consider the following options for better performance: "
"a) padding inputs to a few fixed number of shapes; or b) set "
"torch._inductor.config.triton.cudagraph_skip_dynamic_graphs=True. "
"Set torch._inductor.config.triton.cudagraph_dynamic_shape_warn_limit=None "
"to silence this warning."
).run("\n".join(captured_output))
@torch._inductor.config.patch("triton.cudagraph_dynamic_shape_warn_limit", 1)
def test_skip_if_dynamic_shape_limit_reached2(self):
class Mod(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.attn = torch.nn.MultiheadAttention(
embed_dim=3, num_heads=3, device="cuda"
)
def forward(
self, q: torch.Tensor, k: torch.Tensor, v: torch.Tensor
) -> torch.Tensor:
return self.attn(q, k, v)
mod = torch.compile(Mod(), mode="reduce-overhead")
def iter(batch_size: int, length: int):
q = torch.rand((batch_size, length, 3), device="cuda")
k = torch.rand((batch_size, length, 3), device="cuda")
v = torch.rand((batch_size, length, 3), device="cuda")
for _ in range(3):
mod(q, k, v)
with capture_stderr() as captured_output:
for batch_size in range(10, 40, 10):
for length in range(10, 30, 10):
iter(batch_size, length)
print(captured_output)
FileCheck().check(
"CUDAGraph supports dynamic shapes by recording a new graph for each "
"distinct input size. Recording too many CUDAGraphs may lead to "
"extra overhead. We have observed 2 distinct sizes. "
"Please consider the following options for better performance: "
"a) padding inputs to a few fixed number of shapes; or b) set "
"torch._inductor.config.triton.cudagraph_skip_dynamic_graphs=True. "
"Set torch._inductor.config.triton.cudagraph_dynamic_shape_warn_limit=None "
"to silence this warning."
).run(captured_output[0])
@torch._inductor.config.patch("triton.cudagraph_dynamic_shape_warn_limit", 1)
def test_warn_once_if_dynamic_shape_limit_reached(self):
class Mod(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.linear = torch.nn.Linear(3, 3, device="cuda")
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.linear(x)
def iter(batch_size: int, mod: torch.nn.Module):
x = torch.rand((batch_size, 3), device="cuda")
for _ in range(3):
mod(x)
mod = torch.compile(Mod(), mode="reduce-overhead")
with capture_stderr() as captured_output:
for batch_size in range(10, 200, 10):
iter(batch_size, mod)
FileCheck().check_count(
"CUDAGraph supports dynamic shapes by recording a new graph for each "
"distinct input size. Recording too many CUDAGraphs may lead to "
"extra overhead. We have observed 2 distinct sizes. "
"Please consider the following options for better performance: "
"a) padding inputs to a few fixed number of shapes; or b) set "
"torch._inductor.config.triton.cudagraph_skip_dynamic_graphs=True. "
"Set torch._inductor.config.triton.cudagraph_dynamic_shape_warn_limit=None "
"to silence this warning.",
1,
exactly=True,
).run("\n".join(captured_output))
@torch._inductor.config.patch("cpp_wrapper", 1)
def test_cpp_wrapper(self):
def f(x):
return torch.sin(x)
compiled = torch.compile(f, mode="reduce-overhead")
example_input = torch.randn(10, device="cuda")
compiled_result = self.run_twc(compiled, example_input)
eager_result = f(example_input)
self.assertEqual(compiled_result, eager_result)
@torch._inductor.config.patch("graph_partition", True)
def test_graph_partition(self):
def f(x, y):
x1 = x + 1
y1 = y + 1
y_cpu = y1.cpu() + 1
z = x @ y
return x1 + y1 + z + y_cpu.cuda()
x, y = [torch.randn(2, 2, device="cuda") for _ in range(2)]
x_cloned, y_cloned = [tmp.clone() for tmp in [x, y]]
eager_out = f(x, y)
f_compiled = torch.compile(f, mode="reduce-overhead")
for _ in range(5):
compiled_out = f_compiled(x_cloned, y_cloned)
self.assertEqual(eager_out, compiled_out)
# 2 graph partitions lead to 2 cudagraph
self.assertEqual(self.get_manager().new_graph_id().id, 2)
def test_graph_partition_view_fallback(self):
def f(x):
y = x + 1
z = torch.ops.aten.view.dtype(y, torch.float8_e4m3fn)
z_cpu = z.cpu()
u_cuda = z_cpu.cuda()
return u_cuda
compiled_f = torch.compile(f, mode="reduce-overhead")
for _ in range(3):
x = torch.ones(2, dtype=torch.int32, device="cuda")
eager_out = f(x)
compiled_out = compiled_f(x)
self.assertEqual(eager_out, compiled_out)
@torch._inductor.config.patch("graph_partition", True)
def test_graph_partition_log_message(self):
def foo(x, y):
return (x + 1, y + 2)
foo = torch.compile(foo, mode="reduce-overhead")
with capture_stderr() as captured_output:
foo(torch.ones([10], device="cuda"), torch.ones([20]))
FileCheck().check_count(
"cudagraph partition due to non gpu ops. Found from", 1, exactly=True
).check_count("return (x + 1, y + 2)", 1, exactly=True).check(
"cudagraph partition into 2 partitions"
).run(captured_output[0])
@torch._inductor.config.patch("graph_partition", True)
def test_graph_partition_cpu_scalar1(self):
def f(x, y):
return x + y
compiled_f = torch.compile(f, mode="reduce-overhead")
inputs = (torch.ones(2, 2, device="cuda"), torch.ones((), device="cpu"))
for i in range(3):
if i == 0:
_, code = run_and_get_code(compiled_f, *inputs)
FileCheck().check_count(".copy_", 1, exactly=True).run(code[0])
else:
compiled_f(*inputs)
self.assertEqual(compiled_f(*inputs), f(*inputs))
self.assertEqual(self.get_manager().new_graph_id().id, 1)
@torch._inductor.config.patch("graph_partition", True)
def test_graph_partition_cpu_scalar2(self):
def f(x, y, z):
return x + y, x + z
compiled_f = torch.compile(f, mode="reduce-overhead")
inputs = (
torch.ones((), device="cpu"),
torch.ones(2, 2, device="cuda"),
torch.ones(2, 2, device="cuda"),
)
for i in range(3):
if i == 0:
_, code = run_and_get_code(compiled_f, *inputs)
FileCheck().check_count(".copy_", 1, exactly=True).run(code[0])
else:
compiled_f(*inputs)
self.assertEqual(compiled_f(*inputs), f(*inputs))
self.assertEqual(self.get_manager().new_graph_id().id, 1)
@torch._inductor.config.patch("graph_partition", True)
def test_graph_partition_cpu_scalar3(self):
def f(x, y, cpu_scalar_tensor):
z = x + y
z = z + cpu_scalar_tensor
return z
compiled_f = torch.compile(f, mode="reduce-overhead")
inputs = (
torch.randn(2, 2, device="cuda"),
torch.randn(2, 2, device="cuda"),
torch.tensor(1, device="cpu"),
)
for i in range(3):
if i == 0:
_, code = run_and_get_code(compiled_f, *inputs)
FileCheck().check_count(".copy_", 1, exactly=True).run(code[0])
else:
compiled_f(*inputs)
self.assertEqual(compiled_f(*inputs), f(*inputs))
self.assertEqual(self.get_manager().new_graph_id().id, 1)
@torch._inductor.config.patch("graph_partition", True)
def test_graph_partition_cpu_scalar4(self):
# cpu_scalar_tensor is accessed by cpu_scalar2 which is
# added with a gpu tensor z. This test checks the cpu
# scalar tensors are still moved in this case.
def f(x, y, cpu_scalar_tensor):
cpu_scalar2 = cpu_scalar_tensor + 1
z = x + y
z = z + cpu_scalar2
return z
compiled_f = torch.compile(f, mode="reduce-overhead")
inputs = (
torch.randn(2, 2, device="cuda"),
torch.randn(2, 2, device="cuda"),
torch.tensor(1, device="cpu"),
)
for i in range(3):
if i == 0:
_, code = run_and_get_code(compiled_f, *inputs)
FileCheck().check_count(".copy_", 1, exactly=True).run(code[0])
else:
compiled_f(*inputs)
self.assertEqual(compiled_f(*inputs), f(*inputs))
self.assertEqual(self.get_manager().new_graph_id().id, 1)
@torch._inductor.config.patch("graph_partition", True)
# turn on input mutation support to avoid skipping cudagraph at dynamo level
@torch._inductor.config.patch("triton.cudagraph_support_input_mutation", True)
def test_graph_partition_cpu_scalar_mutation(self):
# tests that input mutation on a cpu scalar tensor x is correctly
# handled when moving x to gpu at the beginning of the graph.
@torch.compile(mode="reduce-overhead")
def foo(x, y):
return x.copy_(y)
x = torch.tensor(1)
y = torch.tensor(2, device="cuda")
for _ in range(3):
foo(x, y)
self.assertEqual(x, torch.tensor(2, device="cpu"))
self.assertEqual(y, torch.tensor(2, device="cuda"))
self.assertEqual(self.get_manager().new_graph_id().id, 1)
@torch._inductor.config.patch("graph_partition", True)
def test_graph_partition_cpu_scalar_device_put(self):
@torch.compile(mode="reduce-overhead")
def foo(x):
y = x.to("cuda")
z = y.to("cpu")
return z
x = torch.tensor(1)
for _ in range(3):
foo(x)
self.assertEqual(x, torch.tensor(1, device="cpu"))
@torch._inductor.config.patch("graph_partition", True)
def test_graph_partition_cpu_scalar_multiple(self):
def f(x, y, z):
return x + y, x + z
compiled_f = torch.compile(f, mode="reduce-overhead")
inputs = (
torch.ones((), device="cpu"),
torch.ones((), device="cpu"),
torch.ones(2, 2, device="cuda"),
)
for i in range(3):
if i == 0:
_, code = run_and_get_code(compiled_f, *inputs)
FileCheck().check_regex(r".copy_.*True").run(code[0])
FileCheck().check_count(".copy_", 1, exactly=True).run(code[0])
else:
compiled_f(*inputs)
self.assertEqual(compiled_f(*inputs), f(*inputs))
self.assertEqual(self.get_manager().new_graph_id().id, 1)
@torch._inductor.config.patch("graph_partition", True)
@torch._inductor.config.patch("triton.cudagraphs", False)
def test_graph_partition_reduce_overhead_mode_effectiveness(self):
# test that `mode="reduce-overhead"` still controls whether
# cudagraph is applied. i.e., cudagraph is not applied when
# mode="default".
def f(x, y):
x1 = x + 1
y1 = y + 1
y_cpu = y1.cpu() + 1
z = x @ y
return x1 + y1 + z + y_cpu.cuda()
x, y = [torch.randn(2, 2, device="cuda") for _ in range(2)]
f_compiled = torch.compile(f)
for _ in range(5):
_out = f_compiled(x, y)
self.assertEqual(self.get_manager() is None, True)
@torch._inductor.config.patch("graph_partition", True)
def test_graph_partition_forward_backward(self):
class Mod(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.linear = torch.nn.Linear(16, 16)
def forward(self, x):
x1 = x + 1
y1 = x + 2
y_cpu = y1.cpu() + 1
z = x @ y1
inp = x1 + y1 + z + y_cpu.cuda()
return self.linear(inp)
model = Mod().cuda()
input_data = torch.randn(16, 16).cuda()
criterion = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
compiled_model = torch.compile(model, mode="reduce-overhead")
for _ in range(5):
output = compiled_model(input_data)
loss = criterion(output, torch.randint(0, 10, (16,)).cuda())
optimizer.zero_grad()
loss.backward()
optimizer.step()
# 2 graph partitions lead to 2 fwd cudagraphs and 1 bwd cudagraphs
self.assertEqual(self.get_manager().new_graph_id().id, 3)
@torch._inductor.config.patch("graph_partition", True)
def test_graph_partition_cpu_only(self):
class Mod(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.linear = torch.nn.Linear(16, 16)
def forward(self, x):
x1 = x + 1
y1 = x + 2
y_cpu = y1 + 1
z = x @ y1
inp = x1 + y1 + z + y_cpu
return self.linear(inp)
model = Mod().cpu()
input_data = torch.randn(16, 16).cpu()
criterion = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
compiled_model = torch.compile(model, mode="default")
for _ in range(5):
output = compiled_model(input_data)
loss = criterion(output, torch.randint(0, 10, (16,)).cpu())
optimizer.zero_grad()
loss.backward()
optimizer.step()
# 0 cudagraph since all ops are on cpu
self.assertEqual(self.get_manager() is None, True)
@torch._inductor.config.patch("graph_partition", True)
def test_graph_partition_forward_with_skipped_cudagraphed_backward(self):
@torch.compile(mode="reduce-overhead")
def foo(x):
return x * x * x
for _ in range(3):
inp = torch.rand([20, 20], device="cuda", requires_grad=True)
out = foo(inp)
with config.patch(always_complex_memory_overlap_TESTING_ONLY=True):
back_inp = torch.empty_strided([20, 20], [0, 1], device="cuda")
out.backward(back_inp)
# we should not have cudagraph'd the backwards
new_id = self.get_manager().new_graph_id().id
self.assertEqual(new_id, 1)
self.assertFalse(self.get_manager().running_forwards_with_pending_backwards)
@torch._inductor.config.patch("graph_partition", True)
def test_graph_partition_forward_backward_not_called(self):
# tests saved tensor is handled correctly
def foo(x, y):
x_out = x * x * x
torch._dynamo.graph_break()
y_out = y * y * y
return x_out, y_out
foo = torch.compile(foo, mode="reduce-overhead")
for _ in range(3):
inps = [
torch.rand([20, 20], requires_grad=True, device="cuda")
for _ in range(2)
]
x_out, y_out = foo(inps[0], inps[1])
x_out.sum().backward()
self.assertFalse(self.get_manager().running_forwards_with_pending_backwards)
# we should not have cudagraph'd the y backward
new_id = self.get_manager().new_graph_id().id
self.assertEqual(new_id, 3)
@requires_multigpu()
@torch._inductor.config.patch("graph_partition", True)
def test_graph_partition_multiple_devices_msg(self):
def foo(x, y):
return (x + 1, y + 2)
foo = torch.compile(foo, mode="reduce-overhead")
for _ in range(3):
foo(torch.ones([10], device="cuda"), torch.ones([20]))
self.assertEqual(counters["inductor"]["cudagraph_skips"], 0)
with capture_stderr() as captured_output:
for _ in range(3):
foo(
torch.ones([10], device="cuda:0"),
torch.ones([10], device="cuda:1"),
)
FileCheck().check("skipping cudagraphs due to multiple devices").run(
captured_output[0]
)
self.assertEqual(counters["inductor"]["cudagraph_skips"], 1)
new_id = self.get_manager().new_graph_id().id
self.assertEqual(new_id, 1)
@torch._inductor.config.patch("graph_partition", True)
def test_graph_partition_dynamic_shapes(self):
def foo(x):
return x + 1
compiled_foo = torch.compile(foo, mode="reduce-overhead", fullgraph=True)
for input_shape in range(1, 4):
for _ in range(3):
compiled_foo(torch.randn(input_shape, device="cuda"))
# 3 cudagraphs for 3 input shapes
self.assertEqual(self.get_manager().new_graph_id().id, 3)
@torch._inductor.config.patch("graph_partition", True)
def test_graph_partition_cpu_op_and_dynamic_shapes(self):
def f(x, y):
x1 = x + 1
y1 = y + 1
y_cpu = y1.cpu() + 1
z = x @ y
return x1 + y1 + z + y_cpu.cuda()
f_compiled = torch.compile(f)
x, y = torch.ones(3, 3, device="cuda"), torch.randn(3, 3, device="cuda")
for _ in range(3):
compiled_out = f_compiled(x, y)
self.assertEqual(compiled_out, f(x, y))
x, y = torch.ones(4, 4, device="cuda"), torch.randn(4, 4, device="cuda")
for _ in range(3):
compiled_out = f_compiled(x, y)
self.assertEqual(compiled_out, f(x, y))
# 4 cudagraphs, due to (2 dynamic shapes) x (2 graph partitions)
self.assertEqual(self.get_manager().new_graph_id().id, 4)
@config.patch(implicit_fallbacks=True)
@config.patch("graph_partition", False)
def test_skip_cudagraph_unsafe_ops(self):
@torch.library.custom_op(
"mylib::mysin",
mutates_args=["out_list"],
schema="(Tensor x, Tensor(a!)[]? out_list) -> Tensor",
tags=(torch._C.Tag.cudagraph_unsafe,),
)
def mysin(x, out_list) -> torch.Tensor:
r = x.sin()
if out_list is not None:
out_list[0].copy_(r)
return r
@mysin.register_fake
def _(x, out_list) -> torch.Tensor:
return torch.empty_like(x)
def fn(x):
x = x * 3
s = [torch.empty_like(x)]
x = mysin(x, s)
x = x / 3
return x, s[0]
x = torch.randn(3, requires_grad=False, device="cuda")
expected = fn(x)
compiled_f = torch.compile(fn, mode="reduce-overhead", fullgraph=True)
with capture_stderr() as captured_output:
for _ in range(3):
result = compiled_f(x)
self.assertEqual(result, expected)
FileCheck().check("incompatible op mylib.mysin.default").run(
captured_output[0]
)
self.assertEqual(counters["inductor"]["cudagraph_skips"], 1)
@config.patch(implicit_fallbacks=True)
@torch._inductor.config.patch("graph_partition", True)
def test_graph_partition_custom_op(self):
@torch.library.custom_op(
"mylib::movement",
mutates_args=(),
tags=(torch._C.Tag.cudagraph_unsafe,),
)
def movement(pic: torch.Tensor) -> torch.Tensor:
img = pic.cpu()
cropped_img = (img + 1) * 2
return cropped_img.cuda() / 255.0
@movement.register_fake
def _(pic):
return torch.empty_like(pic)
@torch.library.custom_op(
"mylib::modify",
mutates_args=(),
tags=(torch._C.Tag.cudagraph_unsafe,),
)
def modify(pic: torch.Tensor) -> torch.Tensor:
pic1 = pic + 1
pic1_cpu = (pic1.cpu() + 1) * 2
return pic1_cpu.cuda() + pic
@modify.register_fake
def _(pic):
return torch.empty_like(pic)
@torch.library.custom_op("mylib::transform", mutates_args=())
def transform(pic: torch.Tensor) -> torch.Tensor:
return (pic + 1) * 2
@transform.register_fake
def _(pic):
return torch.empty_like(pic)
img = torch.randn(3, 64, 64, device="cuda")
def f(img):
x = (img + 10) * 2
y = movement(x)
z = y + 1
u = transform(z)
v = 2 * u + 1
out = modify(v)
return out + 1
compiled_f = torch.compile(f, fullgraph=True)
eager_out = f(img)
compiled_out = compiled_f(img)
self.assertEqual(eager_out, compiled_out)
compiled_f = torch.compile(f, mode="reduce-overhead", fullgraph=True)
eager_out = f(img)
for _ in range(3):
compiled_out = compiled_f(img)
self.assertEqual(eager_out, compiled_out)
# splitting on 2 custom gives 3 cudagraphs
self.assertEqual(self.get_manager().new_graph_id().id, 3)
@config.patch(implicit_fallbacks=True)
@config.patch("graph_partition", True)
def test_graph_partition_custom_op_mutation(self):
@torch.library.custom_op(
"mylib::mysin",
mutates_args=["out_list"],
schema="(Tensor x, Tensor(a!)[]? out_list) -> Tensor",
tags=(torch._C.Tag.cudagraph_unsafe,),
)
def mysin(x, out_list) -> torch.Tensor:
r = x.sin()
if out_list is not None:
out_list[0].copy_(r)
return r
@mysin.register_fake
def _(x, out_list) -> torch.Tensor:
return torch.empty_like(x)
def fn(x):
x = x * 3
s = [torch.empty_like(x)]
x = mysin(x, s)
x = x / 3
return x, s[0]
x = torch.randn(3, requires_grad=False, device="cuda")
expected = fn(x)
compiled_f = torch.compile(fn, mode="reduce-overhead", fullgraph=True)
for _ in range(3):
result = compiled_f(x)
self.assertEqual(result, expected)
# splitting on 1 custom gives 2 cudagraphs
self.assertEqual(self.get_manager().new_graph_id().id, 2)
@config.patch(implicit_fallbacks=True)
@torch._inductor.config.patch("graph_partition", True)
def test_graph_partition_custom_op_mutation_late_free(self):
@torch.library.custom_op(
"mylib::op1",
mutates_args=["x"],
schema="(Tensor(a!)? x) -> (Tensor, Tensor)",
device_types="cuda",
)
def op1(x) -> tuple[torch.Tensor, torch.Tensor]:
x = x + 1
return (x + 1, x + 2)
@op1.register_fake
def _(x) -> tuple[torch.Tensor, torch.Tensor]:
return (torch.empty_like(x), torch.empty_like(x))
@torch.library.custom_op(
"mylib::cg_unsafe_op",
mutates_args=[],
schema="(Tensor x, Tensor y, Tensor x1, Tensor y1) -> Tensor",
device_types="cuda",
tags=(torch._C.Tag.cudagraph_unsafe,),
)
def cg_unsafe_op(x0, x1, y0, y1) -> torch.Tensor:
return x0 + x1 + y0 + y1
@cg_unsafe_op.register_fake
def _(x0, x1, y0, y1) -> torch.Tensor:
return torch.empty_like(x0)
def f(x):
x = x + 1
x = op1(x)
x0, x1 = x[0], x[1]
y0 = x0 + 1
y1 = x1 + 1
y = cg_unsafe_op(x0, x1, y0, y1)
z = y + x0 + x1
z0, z1 = op1(z)
z2 = z0 + z1
res = cg_unsafe_op(z2, z2, y, y)
return res
x = torch.randn(2, 2, device="cuda")
x_cloned = x.clone()
eager_out = f(x)
f_compiled = torch.compile(f, mode="reduce-overhead")
for _ in range(5):
compiled_out = f_compiled(x_cloned)
self.assertEqual(eager_out, compiled_out)
@config.patch(implicit_fallbacks=True)
@torch._inductor.config.patch("graph_partition", True)
def test_graph_partition_custom_op_dynamoc_shapes(self):
@torch.library.custom_op(
"mylib::movement",
mutates_args=(),
tags=(torch._C.Tag.cudagraph_unsafe,),
)
def movement(pic: torch.Tensor) -> torch.Tensor:
img = pic.cpu()
cropped_img = (img + 1) * 2
return cropped_img.cuda() / 255.0
@movement.register_fake
def _(pic):
return torch.empty_like(pic)
def f(img):
x = (img + 10) * 2
y = movement(x)
z = y + 1
v = 2 * z + 1
return v + 1
compiled_f = torch.compile(f, fullgraph=True)
compiled_f = torch.compile(f, mode="reduce-overhead", fullgraph=True)
def run(size):
img = torch.randn(3, size, size, device="cuda")
eager_out = f(img)
for _ in range(3):
compiled_out = compiled_f(img)
self.assertEqual(eager_out, compiled_out)
run(64)
run(17)
run(42)
# 2 (from splitting on 1 custom op) x 3 (dynamic shapes) = 6
self.assertEqual(self.get_manager().new_graph_id().id, 6)
@config.patch(implicit_fallbacks=True)
@torch._inductor.config.patch("graph_partition", True)
def test_graph_partition_custom_op_no_split(self):
@torch.library.custom_op(
"mylib::modify",
mutates_args=(),
)
def modify(x: torch.Tensor) -> torch.Tensor:
return (x + 1) * 2
@modify.register_fake
def _(pic):
return torch.empty_like(pic)
def f(img):
x = (img + 10) * 2
y = modify(x)
z = y + 1
v = 2 * z + 1
return v + 1
compiled_f = torch.compile(f, fullgraph=True)
compiled_f = torch.compile(f, mode="reduce-overhead", fullgraph=True)
def run(size):
img = torch.randn(3, size, size, device="cuda")
eager_out = f(img)
for _ in range(3):
compiled_out = compiled_f(img)
self.assertEqual(eager_out, compiled_out)
run(64)
run(17)
run(42)
# 1 (from not splitting on custom op) x 3 (dynamic shapes) = 3
self.assertEqual(self.get_manager().new_graph_id().id, 3)
@torch._inductor.config.patch("graph_partition", True)
def test_graph_partition_cpu_tensor_symints(self):
def f(x, y):
return x + 1, y + 1
compiled_f = torch.compile(f, mode="reduce-overhead")
def run(shape_x, shape_y):
x = torch.randn(shape_x, device="cuda")
y = torch.randn(shape_y, device="cpu")
for _ in range(3):
compiled_f(x, y)
# static shape. record a NEW cudagraph
run(shape_x=(2, 3), shape_y=(4, 4))
# shape_y becomes dynamic shape leading to a new dynamo graph.
# This new dynamo graph forces a NEW cudagraph although tensor y is on cpu
run(shape_x=(2, 3), shape_y=(5, 6))
# tensor y is on cpu so NO new cudagraph is recorded
run(shape_x=(2, 3), shape_y=(7, 8))
# shape_x becomes dynamic shape, leading to a new dynamo graph
# this new dynamo graph forces a NEW cudagraph
run(shape_x=(3, 4), shape_y=(4, 4))
# tensor y is on cpu so NO new cudagraph is recorded
run(shape_x=(3, 4), shape_y=(10, 11))
self.assertEqual(self.get_manager().new_graph_id().id, 3)
@torch._inductor.config.patch("graph_partition", True)
def test_graph_partition_reorder_cpu_and_gpu(self):
def f(x_cuda, y_cpu, z_cuda, weight_cuda, weight_cpu):
x_cuda0 = x_cuda + 1
x_cuda1 = x_cuda0 @ weight_cuda
x_cuda2 = 2 * (x_cuda1 + x_cuda)
y_cpu0 = y_cpu + 1
y_cpu1 = y_cpu0 @ weight_cpu
z_cuda0 = z_cuda + 1
z_cuda1 = z_cuda0 @ weight_cuda
z_cuda2 = 2 * (z_cuda1 + z_cuda)
return x_cuda2, y_cpu1, z_cuda2
x_cuda = torch.randn(3, 3, device="cuda")
y_cpu = torch.randn(3, 3, device="cpu")
z_cuda = torch.randn(3, 3, device="cuda")
weight_cuda = torch.randn(3, 3, device="cuda")
weight_cpu = torch.randn(3, 3, device="cpu")
eager_out = f(x_cuda, y_cpu, z_cuda, weight_cuda, weight_cpu)
compiled_f = torch.compile(f, mode="reduce-overhead")
for _ in range(3):
compiled_out = compiled_f(
x_cuda, y_cpu, z_cuda, weight_cuda, weight_cpu
)
self.assertEqual(eager_out, compiled_out)
# reorder merges ops on cuda into 1 graph partition
self.assertEqual(self.get_manager().new_graph_id().id, 1)
@torch._inductor.config.patch("graph_partition", True)
def test_graph_partition_reorder_cpu_and_gpu_interleave(self):
def f(x_cuda, y_cpu, z_cuda, weight_cuda, weight_cpu):
# partition 1 on cuda, no dependency
x_cuda0 = x_cuda + 1
x_cuda1 = x_cuda0 @ weight_cuda
x_cuda2 = 2 * (x_cuda1 + x_cuda)
# partition 2 on cpu w/ dependency on partition 1
y_cpu0 = y_cpu + 1
x_cuda2_cpu = x_cuda2.cpu() # adds dependency on gpu computations
y_cpu1 = y_cpu0 @ weight_cpu + x_cuda2_cpu
# partition 3 on cuda w/o dependency
z_cuda0 = z_cuda + 1
z_cuda1 = z_cuda0 @ weight_cuda
z_cuda2 = 2 * (z_cuda1 + z_cuda)
# partition 4 on cpu w/o dependency
y_cpu2 = y_cpu + 5
y_cpu3 = y_cpu2 @ weight_cpu
# partition 5 on cuda w/o dependency
u_cuda0 = z_cuda + 3
u_cuda1 = u_cuda0 @ weight_cuda
u_cuda2 = 2 * (u_cuda0 + u_cuda1)
return x_cuda2, y_cpu1, z_cuda2, y_cpu3, u_cuda2
x_cuda = torch.randn(3, 3, device="cuda")
y_cpu = torch.randn(3, 3, device="cpu")
z_cuda = torch.randn(3, 3, device="cuda")
weight_cuda = torch.randn(3, 3, device="cuda")
weight_cpu = torch.randn(3, 3, device="cpu")
eager_out = f(x_cuda, y_cpu, z_cuda, weight_cuda, weight_cpu)
compiled_f = torch.compile(f, mode="reduce-overhead")
for _ in range(3):
compiled_out = compiled_f(
x_cuda, y_cpu, z_cuda, weight_cuda, weight_cpu
)
self.assertEqual(eager_out, compiled_out)
# the optimal order is
# [[partition 4 on cpu], [partition 1,3,5 on cuda], [partition 2 on cpu]]
# since partition2 depends on partition1. So we have 1 cudagraph in total.
self.assertEqual(self.get_manager().new_graph_id().id, 1)
@config.patch(implicit_fallbacks=True)
@torch._inductor.config.patch("graph_partition", True)
def test_graph_partition_reorder_custom_op_with_no_dependency(self):
# Two reasons for this:
# 1. We want to reuse the same mask for many masked_fill calls
# 2. Prevent inductor from fusing this op into other ops (e.g. masked_fill)
# so we can still reorder in scheduler
@torch.library.custom_op(
"mylib::create_mask",
mutates_args=(),
tags=(torch._C.Tag.cudagraph_unsafe,),
)
def create_mask(
padded_size: int, original_size: int, device: torch.device
) -> torch.Tensor:
mask = torch.zeros((padded_size,), dtype=torch.bool, device=device)
mask[original_size:] = True
return mask
@create_mask.register_fake
def _(padded_size, original_size, device):
return torch.empty((padded_size,), dtype=torch.bool, device=device)
def f(padded_tensor, original_tensor, weight):
original_size = original_tensor.size()[0]
padded_size = padded_tensor.size()[0]
# element wise op so we don't care padding value
padded_tensor = padded_tensor + 1
padded_tensor = torch.nn.functional.relu(padded_tensor)
# dot product requires padding with 0
dot_res = padded_tensor.dot(weight)
padded_tensor += dot_res
# min requires padding with inf, so we create mask now
mask = create_mask(padded_size, original_size, padded_tensor.device)
min_res = torch.min(
torch.ops.aten.masked_fill(padded_tensor, mask, float("inf"))
)
# max requires padding with inf. we can reuse previous mask
max_res = torch.max(
torch.ops.aten.masked_fill(padded_tensor, mask, -float("inf"))
)
return min_res + max_res + padded_tensor
compiled_f = torch.compile(f, mode="reduce-overhead")
def run(padded_size, original_size):
padded_tensor = torch.randn(padded_size, device="cuda")
padded_tensor[original_size:] = 0
original_tensor = torch.randn(original_size, device="meta")
weight = torch.randn(padded_size, device="cuda")
eager_out = f(padded_tensor, original_tensor, weight)
for _ in range(3):
compiled_out = compiled_f(padded_tensor, original_tensor, weight)
self.assertEqual(eager_out, compiled_out)
# although custom op `create_mask` happens at the middle of function, reorder
# moves it to the front so we only have 1 partition. This leads to 1 cudagraph
run(8, 4)
# recompilation leads to 1 NEW cudagraph
run(8, 6)
self.assertEqual(self.get_manager().new_graph_id().id, 2)
@config.patch(implicit_fallbacks=True)
@torch._inductor.config.patch("graph_partition", True)
def test_graph_partition_reorder_custom_op_with_no_dependency1(self):
# wrap with custom op so this is not fused into other ops
@torch.library.custom_op(
"mylib::create_size_tensor",
mutates_args=(),
tags=(torch._C.Tag.cudagraph_unsafe,),
)
def create_size_tensor(
tensor: torch.Tensor, device: torch.device
) -> torch.Tensor:
size = tensor.size()[0]
zero = torch.zeros((), device=device)
return zero + size
@create_size_tensor.register_fake
def _(tensor, device):
size = tensor.size()[0]
zero = torch.zeros((), device=device, dtype=torch.int64)
return zero + size
def fill(
padded_tensor: torch.Tensor, original_size: torch.Tensor, value
) -> torch.Tensor:
padded_size = padded_tensor.size()[0]
size_range = torch.arange(padded_size, device=padded_tensor.device)
padded_tensor = torch.where(
size_range >= original_size, value, padded_tensor
)
return padded_tensor
def f(padded_tensor, original_tensor, weight):
# element wise op so we don't care padding value
padded_tensor = padded_tensor + 1
padded_tensor = torch.nn.functional.relu(padded_tensor)
# dot product requires padding with 0
dot_res = padded_tensor.dot(weight)
padded_tensor += dot_res
# min requires padding with inf, so we create mask now
original_size_cuda = create_size_tensor(original_tensor, "cuda")
padded_tensor = fill(padded_tensor, original_size_cuda, float("inf"))
min_res = torch.min(padded_tensor)
# max requires padding with inf. we can reuse previous mask
padded_tensor = fill(padded_tensor, original_size_cuda, -float("inf"))
max_res = torch.max(padded_tensor)
return min_res + max_res + padded_tensor
compiled_f = torch.compile(f, mode="reduce-overhead")
def run(padded_size, original_size):
padded_tensor = torch.randn(padded_size, device="cuda")
padded_tensor[original_size:] = 0
original_tensor = torch.randn(original_size, device="meta")
weight = torch.randn(padded_size, device="cuda")
eager_out = f(padded_tensor, original_tensor, weight)
for _ in range(3):
compiled_out = compiled_f(padded_tensor, original_tensor, weight)
assert torch.allclose(eager_out, compiled_out)
# although custom op `create_mask` happens at the middle of function, reorder
# moves it to the front so we only have 1 partition. This leads to 1 cudagraph
run(8, 4)
# recompilation leads to 1 NEW cudagraph
run(8, 6)
# reuse previous cudagraph
run(8, 7)
self.assertEqual(self.get_manager().new_graph_id().id, 2)
@torch._inductor.config.patch("graph_partition", True)
def test_graph_partition_simple(self):
def f(x, y):
x1 = x + 1
y1 = y + 1
y_cpu = y1.cpu() + 1
z = x @ y
return x1 + y1 + z + y_cpu.to("cuda")
x, y = [torch.ones(2, 2, device="cuda") for _ in range(2)]
x_cloned, y_cloned = [tmp.clone() for tmp in [x, y]]
eager_out = f(x, y)
f_compiled = torch.compile(f)
compiled_out = f_compiled(x_cloned, y_cloned)
self.assertEqual(eager_out, compiled_out)
_, code = run_and_get_code(f_compiled, x_cloned, y_cloned)
if not config.cpp_wrapper:
FileCheck().check("def partition_0(args):").check(
"recursively_apply_fns = runner.recursively_apply_fns"
).run(code[0])
@torch._inductor.config.patch("graph_partition", True)
def test_graph_partition_foreach_op(self):
def fn(a0, a1):
c = torch._foreach_abs([a0, a1])
return torch.mul(c[0], a0)
compiled_fn = torch.compile(fn)
a0 = torch.randn(2, 3, device="cuda")
a1 = torch.randn(2, 3, device="cuda")
eager_out = fn(a0, a1)
compiled_out = compiled_fn(a0, a1)
self.assertEqual(eager_out, compiled_out)
@torch._inductor.config.patch("graph_partition", True)
def test_graph_partition_condition_op(self):
def f(p, b):
def true_fn(x):
return torch.cos(x)
def false_fn(x):
return torch.sin(x)
return torch.cond(p, true_fn, false_fn, [b])
compiled_f = torch.compile(f)
# static shape
p = torch.tensor([True], device="cuda")
a = torch.ones([2, 3], device="cuda")
eager_out = f(p, a)
compiled_out = compiled_f(p, a)
self.assertEqual(eager_out, compiled_out)
# dynamic shape with backed symint
p = torch.tensor([True], device="cuda")
a = torch.ones([4, 5], device="cuda")
eager_out = f(p, a)
compiled_out = compiled_f(p, a)
self.assertEqual(eager_out, compiled_out)
@torch._inductor.config.patch("graph_partition", True)
@torch._dynamo.config.patch("capture_scalar_outputs", True)
def test_graph_partition_unbacked_symint_multi_output_layout(self):
def f(p, size_tensor):
size_val = size_tensor.item()
b = torch.ones([size_val, 3], device="cuda")
def true_fn(x):
return torch.cos(x), torch.cos(x) + 1
def false_fn(x):
return torch.sin(x), torch.sin(x) + 1
cond_out = torch.cond(p, true_fn, false_fn, [b])
return cond_out[0] + cond_out[1]
compiled_f = torch.compile(f)
p = torch.tensor([True], device="cuda")
size_tensor = torch.tensor(2, device="cuda")
eager_out = f(p, size_tensor)
compiled_out = compiled_f(p, size_tensor)
self.assertEqual(eager_out, compiled_out)
@torch._inductor.config.patch("graph_partition", True)
def test_graph_partition_symint(self):
def f(x, y):
x1 = x + 1
y1 = y + 1
y_cpu = y1.cpu() + 1
z = x @ y
return x1 + y1 + z + y_cpu.to("cuda")
f_compiled = torch.compile(f)
x, y = (
torch.ones(3, 3, device="cuda"),
torch.randn(3, 3, device="cuda"),
)
compiled_out = f_compiled(x, y)
self.assertEqual(compiled_out, f(x, y))
x, y = (
torch.ones(4, 4, device="cuda"),
torch.randn(4, 4, device="cuda"),
)
compiled_out = f_compiled(x, y)
self.assertEqual(compiled_out, f(x, y))
@torch._inductor.config.patch("graph_partition", True)
def test_graph_partition_symint_cat_backward(self):
def f(x, w):
y = torch.cat((x, x), dim=0)
z = y @ w
return z @ z.T
compiled_f = torch.compile(f)
for shape in (2, 3):
torch.manual_seed(42)
eager_x = torch.randn(shape, 2, device="cuda")
eager_w = torch.randn(2, 2, device="cuda", requires_grad=True)
torch.manual_seed(42)
compiled_x = torch.randn(shape, 2, device="cuda")
compiled_w = torch.randn(2, 2, device="cuda", requires_grad=True)
f(eager_x, eager_w).sum().backward()
compiled_f(compiled_x, compiled_w).sum().backward()
self.assertEqual(eager_w.grad, compiled_w.grad)
@dynamo_config.patch("capture_dynamic_output_shape_ops", True)
@config.patch(implicit_fallbacks=True)
@torch._inductor.config.patch("graph_partition", True)
def test_graph_partition_symint_from_nested_indirect_indexing(self):
def nested(x, repeats):
rank = torch.arange(repeats.numel(), device=x.device)
index = rank.repeat_interleave(repeats, dim=0)
return torch.index_select(x, index=index, dim=0)
example_inputs = (
torch.randn((32, 64), device="cuda"),
repeats := torch.tensor([5, 10, 15], device="cuda"),
)
torch._dynamo.mark_dynamic(repeats, 0) # create backed symint
nested_opt = torch.compile(nested, backend="inductor")
expect = nested(*example_inputs)
actual = nested_opt(*example_inputs)
self.assertEqual(expect, actual)
@torch._inductor.config.patch("graph_partition", True)
def test_graph_partition_symint_from_mutation_index(self):
x = torch.zeros(7, device="cuda")
def fn(n, a):
a[n] = -1
return a
opt_fn = torch.compile(fn, fullgraph=True)
for n in range(2, x.shape[0]):
opt_fn(n, x)
self.assertEqual(x[n], -1)
# Negative index triggers new compilation.
opt_fn(-x.shape[0], x)
self.assertEqual(x[0], -1)
@torch._inductor.config.patch("graph_partition", True)
def test_graph_partition_unbacked_symint(self):
def f(x, y):
x1 = x + 1
y1 = y + 1
y_cpu = y1.cpu() + 1
z = x @ y
return x1 + y1 + z + y_cpu.to("cuda")
f_compiled = torch.compile(f)
x, y = (
torch.ones(3, 3, device="cuda"),
torch.randn(3, 3, device="cuda"),
)
torch._dynamo.decorators.mark_unbacked(x, 0)
torch._dynamo.decorators.mark_unbacked(y, 1)
compiled_out = f_compiled(x, y)
eager_out = f(x, y)
self.assertEqual(compiled_out, eager_out)
@torch._inductor.config.patch("graph_partition", True)
def test_graph_partition_dynamic_scalar_inputs(self):
def f(x, y, integer):
x1 = x + 1
y1 = y + 1
y_cpu = y1.cpu() + 1
z = x @ y
z += integer
return x1 + y1 + z + y_cpu.to("cuda")
f_compiled = torch.compile(f)
x, y = (
torch.ones(3, 3, device="cuda"),
torch.randn(3, 3, device="cuda"),
)
torch._dynamo.decorators.mark_unbacked(x, 0)
torch._dynamo.decorators.mark_unbacked(y, 1)
compiled_out = f_compiled(x, y, 5)
self.assertEqual(compiled_out, f(x, y, 5))
compiled_out = f_compiled(x, y, 6)
self.assertEqual(compiled_out, f(x, y, 6))
@torch._inductor.config.patch("graph_partition", True)
@torch._dynamo.config.patch("capture_scalar_outputs", True)
def test_graph_partition_item(self):
def f(x):
y = x + 1
scalar = y.item()
return x + y + scalar
compiled_f = torch.compile(f)
compiled_out = compiled_f(torch.tensor(1, device="cuda"))
self.assertEqual(compiled_out, f(torch.tensor(1, device="cuda")))
@torch._inductor.config.patch("graph_partition", True)
def test_graph_partition_buffer_reuse(self):
def f(x, y):
x1 = x + 1
y1 = y + 1
y_cpu = y1.cpu() + 1
z = x1 + y1 + x @ y
u = (y_cpu.to("cuda") + 2) @ y + 3
u_cpu = u.cpu() + 2
return z + u_cpu.to("cuda")
x, y = [torch.ones(2, 2, device="cuda") for _ in range(2)]
x_cloned, y_cloned = [tmp.clone() for tmp in [x, y]]
eager_out = f(x, y)
f_compiled = torch.compile(f)
compiled_out = f_compiled(x_cloned, y_cloned)
self.assertEqual(eager_out, compiled_out)
@torch._inductor.config.patch("graph_partition", True)
def test_graph_partition_fused_scheduler_node(self):
def foo(x):
x = x * 20
x_alias = x[0]
y = x * 10
y_alias = y[0]
torch._dynamo.graph_break()
ind = torch.tensor(4, device="cuda")
x_alias2 = x[ind:]
y_alias2 = y[ind:]
return x, x_alias, x_alias2, y_alias, y_alias2
compiled_foo = torch.compile(foo)
x = torch.rand([20, 20], device="cuda")
eager_out = foo(x)
compiled_out = compiled_foo(x)
self.assertEqual(eager_out, compiled_out)
# Use autotune_at_compile_time=True to test standalone_compile
@parametrize("autotune_at_compile_time", [True, False])
@config.patch("graph_partition", True)
def test_graph_partition_kernel_reuse(self, autotune_at_compile_time):
def foo(x):
# partition 1
x1 = x @ x
y1 = x1 + 1
z_cpu = y1.cpu() + 1
# partition 2
# partition 2 should reuse the fused triton kernel generated
# in partition 1
x2 = z_cpu.to("cuda") @ z_cpu.to("cuda")
y2 = x2 + 1
return y1, y2
with config.patch(
"triton.autotune_at_compile_time", autotune_at_compile_time
):
compiled_foo = torch.compile(foo)
x = torch.randn((20, 20), device="cuda")
eager_out = foo(x)
compiled_out, code = run_and_get_code(compiled_foo, x)
self.assertEqual(eager_out, compiled_out)
if autotune_at_compile_time:
# auto-tuning block should only appear once. We generate auto-tuning code
# for all the kernels no matter if they are defined in the main graph or
# subgraph, to avoid the overhead of executing multiple auto-tuning code blocks.
FileCheck().check_count(
"Compile-time auto-tuning block", 1, exactly=True
).run(code[0])
# triton_poi_fused_add_ should appear twice, first in the auto-tuning block,
# and then in the main code block
FileCheck().check_count(
"def triton_poi_fused_add_", 2, exactly=True
).run(code[0])
# cpu kernel definition should only appence once, not in the auto-tuning block
FileCheck().check_count(
"cpp_fused__to_copy_add_1 = ", 1, exactly=True
).run(code[0])
else:
# triton_poi_fused_add_ should appear once, because of kernel reuse
FileCheck().check_count(
"def triton_poi_fused_add_", 1, exactly=True
).run(code[0])
@config.patch("graph_partition", True)
def test_graph_partition_user_defined_triton_kernel_reuse(self):
from torch.testing._internal.triton_utils import add_kernel
def foo(x, y):
# partition 1
output1 = torch.empty_like(x)
add_kernel[(4,)](x, y, output1, n_elements=128, BLOCK_SIZE=16)
output1_cpu = output1.cpu() + 1
# partition 2 should reuse the user-defined kernel
x2 = output1_cpu.to("cuda")
output2 = torch.empty_like(x)
add_kernel[(4,)](x2, y, output2, n_elements=128, BLOCK_SIZE=16)
return output1, output2
compiled_foo = torch.compile(foo)
x = torch.randn(128, device="cuda")
y = torch.randn(128, device="cuda")
eager_out = foo(x, y)
compiled_out, code = run_and_get_code(compiled_foo, x, y)
self.assertEqual(eager_out, compiled_out)
FileCheck().check_count(
"async_compile.triton('add_kernel',", 1, exactly=True
).run(code[0])
def test_meta_tensor(self):
def foobar(x, y):
return x * 2, y * 3
foo_c = torch.compile(mode="reduce-overhead")(foobar)
t = torch.empty((1, 16, 128, 128), device="meta")
y = torch.rand([64], device="cuda")
eager_out = foobar(t, y)
for _ in range(3):
compiled_out = foo_c(t, y)
compiled_out = foo_c(t, y)
self.assertEqual(eager_out, compiled_out)
self.assertEqual(self.get_manager().new_graph_id().id, 1)
@torch._inductor.config.patch("triton.cudagraph_capture_sizes", (2, 5, 7))
def test_cudagraph_capture_sizes(self):
def f(x):
return x + 1
f = torch.compile(f, mode="reduce-overhead")
def run(shape):
x = torch.randn((shape, 5), device="cuda")
torch._dynamo.mark_dynamic(x, 0)
for _ in range(3):
f(x)
for i in range(1, 10):
run(i)
self.assertEqual(self.get_manager().new_graph_id().id, 3)
@torch._inductor.config.patch(
"triton.cudagraph_capture_sizes",
(
(2, 3),
(4, 5),
(6, 2),
(7, 3),
),
)
def test_cudagraph_capture_sizes1(self):
def f(x):
return x + 1
f = torch.compile(f, mode="reduce-overhead")
def run(batch_size, seq_len, d):
x = torch.randn((batch_size, seq_len, d), device="cuda")
torch._dynamo.mark_dynamic(x, 0)
torch._dynamo.mark_dynamic(x, 1)
for _ in range(3):
f(x)
for i in range(2, 10):
for j in range(2, 10):
run(i, j, 8)
self.assertEqual(self.get_manager().new_graph_id().id, 4)
@torch._inductor.config.patch(
"triton.cudagraph_capture_sizes",
(
(2, 3, 4),
(4, 4, 3),
(3, 4, 4),
(4, 2, 3),
),
)
def test_cudagraph_capture_sizes2(self):
def f(x):
return x + 1
f = torch.compile(f, mode="reduce-overhead")
def run(batch_size, seq_len, d):
x = torch.randn((batch_size, seq_len, d), device="cuda")
torch._dynamo.mark_dynamic(x, 0)
torch._dynamo.mark_dynamic(x, 1)
torch._dynamo.mark_dynamic(x, 2)
for _ in range(3):
f(x)
for i in range(2, 5):
for j in range(2, 5):
for k in range(2, 5):
run(i, j, k)
self.assertEqual(self.get_manager().new_graph_id().id, 4)
@torch._inductor.config.patch("triton.cudagraph_or_error", True)
def test_cudagraph_or_error(self):
def f(x):
x.add_(1)
return x
f = torch.compile(f, mode="reduce-overhead")
with self.assertRaises(RuntimeError):
f(torch.tensor(1, device="cuda"))
class TestSAC(TestCase):
def _make_observer_mode(self):
class ObserverMode(TorchDispatchMode):
def __init__(self):
super().__init__()
self.curr_run = 0
self.op_outputs = defaultdict(list)
def __torch_dispatch__(
self,
func: OpOverload,
types: Sequence[type],
args: Sequence[object] = (),
kwargs: Mapping[str, object] = immutable_dict(),
) -> object:
return func(*args, **kwargs)
return ObserverMode
def test_simple(self):
device = "cuda"
from torch._prims.rng_prims import graphsafe_run_with_rng_state
ObserverMode = self._make_observer_mode()
@graphsafe_run_with_rng_state.py_impl(ObserverMode)
def _(mode, op, *args, **kwargs):
with no_dispatch():
out = graphsafe_run_with_rng_state(op, *args, **kwargs)
mode.op_outputs[op].append(out)
return out
obs = ObserverMode()
x = torch.randn(4, 4, device=device, requires_grad=True)
y = torch.randn(4, 4, device=device, requires_grad=True)
for _ in range(2):
torch._dynamo.reset()
def gn(x, y):
return torch.sigmoid(torch.rand_like(x) * y) * x
def fn(x, y):
x = torch.sin(x)
x = torch.utils.checkpoint.checkpoint(gn, x, y, use_reentrant=True)
x = torch.sin(x)
return x
aot_eager_decomp_partition = functools.partial(
aot_eager_decomp_partition_with_mode, mode=obs
)
fn = torch.compile(fn, backend=aot_eager_decomp_partition)
fn(x, y).sum().backward()
self.assertEqual(len(obs.op_outputs[aten.rand.default]), 4)
for i in range(2):
self.assertEqual(
obs.op_outputs[aten.rand.default][0 + 2 * i],
obs.op_outputs[aten.rand.default][1 + 2 * i],
)
self.assertNotEqual(
obs.op_outputs[aten.rand.default][0],
obs.op_outputs[aten.rand.default][2],
)
def test_cudagraph_uneven_forward_backward(self):
# torch.compile cudagraphs are difficult to test
# the rng updating bc is sensitive to duration of pending backwards, etc.
# this is a short repro to mimic the runtime wrappers integration
# and show that updating the backward rng state with cudagraphs works:
def forward():
state = torch.cuda.get_rng_state()
perm = torch.randperm(10, device="cuda")
return state, perm
def backward(rng_state):
current_state = torch.cuda.get_rng_state()
torch.cuda.set_rng_state(rng_state.cpu())
perm = torch.randperm(10, device="cuda")
torch.cuda.set_rng_state(current_state)
return perm
def normal_test():
state, perm = forward()
repro_perm = backward(state)
return perm, repro_perm
def graphsafe_forward():
perm = torch.randperm(10, device="cuda")
return perm
def graphsafe_backward(generator, new_state):
current_state = generator.graphsafe_get_state()
generator.graphsafe_set_state(new_state)
perm = torch.randperm(10, device="cuda")
generator.graphsafe_set_state(current_state)
return perm
def graph_test(generator, capture_cuda_graph):
if capture_cuda_graph:
graph = torch.cuda.CUDAGraph()
# state should be cloned before the graph
old_state = generator.graphsafe_get_state()
new_state = old_state.clone_state()
if capture_cuda_graph:
# state should be register to the graph
graph.register_generator_state(new_state)
# only capturing the backward
with torch.cuda.graph(graph):
repro_perm = graphsafe_backward(generator, new_state)
# some number of uneven forwards
graphsafe_forward()
graphsafe_forward()
graphsafe_forward()
# state prior to rng invocation
state = generator.get_state()
perm = graphsafe_forward()
new_state.set_state(state)
if capture_cuda_graph:
graph.replay()
else:
repro_perm = graphsafe_backward(generator, new_state)
return perm, repro_perm
self.assertEqual(*normal_test())
generator = torch.cuda.default_generators[0]
self.assertEqual(*graph_test(generator, capture_cuda_graph=False))
self.assertEqual(*graph_test(generator, capture_cuda_graph=True))
def test_cpu_and_cuda_rng(self):
device = "cuda"
ObserverMode = self._make_observer_mode()
from torch._prims.rng_prims import (
graphsafe_run_with_rng_state,
run_and_save_rng_state,
run_with_rng_state,
)
for hop in [
graphsafe_run_with_rng_state,
run_and_save_rng_state,
run_with_rng_state,
]:
def make_impl(hop):
@hop.py_impl(ObserverMode)
def _(mode, *args, **kwargs):
with no_dispatch():
out = hop(*args, **kwargs)
op = None
for inp in itertools.chain(args, kwargs.values()):
if isinstance(inp, torch._ops.OpOverload):
op = inp
break
assert op is not None
if hop is run_and_save_rng_state:
mode.op_outputs[op].append(out[1])
else:
mode.op_outputs[op].append(out)
return out
make_impl(hop)
obs = ObserverMode()
def gn(x, y):
return torch.sigmoid(torch.rand_like(x) * y) * x
def gn2(x):
return x * torch.randperm(x.numel(), device=x.device).reshape(x.shape)
def fn(x, y, z):
x = torch.sin(x)
x = torch.utils.checkpoint.checkpoint(gn, x, y, use_reentrant=True)
x = torch.sin(x)
z = torch.utils.checkpoint.checkpoint(gn2, z, use_reentrant=True)
return x * z.cuda()
aot_eager_decomp_partition = functools.partial(
aot_eager_decomp_partition_with_mode, mode=obs
)
fn = torch.compile(fn, backend=aot_eager_decomp_partition)
x = torch.randn(4, 4, device=device, requires_grad=True)
y = torch.randn(4, 4, device=device, requires_grad=True)
z = torch.randn(4, 4, requires_grad=True)
fn(x, y, z).sum().backward()
for op in [aten.rand.default, aten.randperm.default]:
self.assertEqual(len(obs.op_outputs[op]), 2)
self.assertEqual(
obs.op_outputs[op][0],
obs.op_outputs[op][1],
)
self.assertEqual(
obs.op_outputs[op][0].device.type,
"cpu" if op == aten.randperm.default else "cuda",
)
@parametrize("order", (list(itertools.permutations([0, 1, 2]))))
def test_uneven_forward_backward(self, order):
device = "cuda"
ObserverMode = self._make_observer_mode()
from torch._prims.rng_prims import graphsafe_run_with_rng_state
@graphsafe_run_with_rng_state.py_impl(ObserverMode)
def _(mode, op, *args, **kwargs):
with no_dispatch():
out = graphsafe_run_with_rng_state(op, *args, **kwargs)
mode.op_outputs[(mode.curr_run, op)].append(out)
return out
obs = ObserverMode()
def gn(x, y):
return torch.sigmoid(torch.rand_like(x) * y) * x
def gn2(x):
return x * torch.randperm(x.numel(), device=x.device).reshape(x.shape)
def fn(x, y):
x = torch.sin(x)
x = torch.utils.checkpoint.checkpoint(gn, x, y, use_reentrant=True)
x = torch.sin(x)
x = torch.utils.checkpoint.checkpoint(gn2, x, use_reentrant=True)
return x
aot_eager_decomp_partition = functools.partial(
aot_eager_decomp_partition_with_mode, mode=obs
)
fn_c = torch.compile(fn, backend=aot_eager_decomp_partition)
torch.manual_seed(0)
outs = []
for i in range(len(order)):
obs.curr_run = i
x = torch.randn(4, 4, device=device, requires_grad=True)
y = torch.randn(4, 4, device=device, requires_grad=True)
outs.append(fn_c(x, y))
for idx in order:
obs.curr_run = idx
outs[idx].sum().backward()
for run in range(len(order)):
for op in (aten.rand.default, aten.randperm.default):
self.assertEqual(len(obs.op_outputs[(run, op)]), 2)
self.assertEqual(
obs.op_outputs[(run, op)][0],
obs.op_outputs[(run, op)][1],
)
if run != 0:
self.assertNotEqual(
obs.op_outputs[(run - 1, op)][0],
obs.op_outputs[(run, op)][0],
)
@config.patch(fallback_random=True)
@config.patch("test_configs.graphsafe_rng_func_ignores_fallback_random", True)
def _test_cudagraphs_aot_eager_compat_equal(self, device):
def gn(x, y):
return torch.sigmoid(torch.rand_like(x) * y) * x
def fn(x, y):
x = torch.sin(x)
x = torch.utils.checkpoint.checkpoint(gn, x, y, use_reentrant=True)
x = torch.sin(x)
return x
outs = []
grads = []
outs2 = []
grads2 = []
compile_fns = [
lambda fn: torch.compile(fn, backend="aot_eager_decomp_partition"),
lambda fn: torch.compile(fn, mode="reduce-overhead"),
]
for i, compile_fn in enumerate(compile_fns):
torch.manual_seed(0)
for _ in range(3):
x = torch.randn(4, 4, device=device, requires_grad=True)
y = torch.randn(4, 4, device=device, requires_grad=True)
out = compile_fn(fn)(x, y)
torch.cuda.synchronize()
out.sum().backward()
if i == 0:
outs.append(out.clone())
grads.append((x.grad.clone(), y.grad.clone()))
else:
outs2.append(out.clone())
grads2.append((x.grad.clone(), y.grad.clone()))
self.assertEqual(outs, outs2)
self.assertEqual(grads, grads2)
self.assertEqual(counters["inductor"]["cudagraph_skips"], 0)
def test_cudagraphs_aot_eager_compat_equal(self):
self._test_cudagraphs_aot_eager_compat_equal(torch.device("cuda:0"))
@requires_multigpu()
def test_cudagraphs_aot_eager_compat_equal_device_one(self):
self._test_cudagraphs_aot_eager_compat_equal(torch.device("cuda:1"))
@config.patch(graph_partition=True)
def test_graph_partition_cudagraphs_aot_eager_compat_equal(self):
self._test_cudagraphs_aot_eager_compat_equal(torch.device("cuda:0"))
@requires_multigpu()
def test_multi_device(self):
def gn(x, y):
return torch.sigmoid(torch.rand_like(x) * y) * x
def fn(x, y):
x = torch.sin(x)
x = torch.utils.checkpoint.checkpoint(gn, x, y, use_reentrant=True)
x = torch.sin(x)
return x
def multi_fn(x, y, a, b):
return fn(x, y), fn(a, b)
x = torch.randn(4, 4, device="cuda:0", requires_grad=True)
y = torch.randn(4, 4, device="cuda:0", requires_grad=True)
a = torch.randn(4, 4, device="cuda:1", requires_grad=True)
b = torch.randn(4, 4, device="cuda:1", requires_grad=True)
# No errors. TODO - get graphs from logging, couldn't figure out how
multi_fn_c = torch.compile(multi_fn, backend="aot_eager_decomp_partition")
out = multi_fn_c(x, y, a, b)
out[0].sum().backward()
def test_retain_graph(self):
device = "cuda"
ObserverMode = self._make_observer_mode()
from torch._prims.rng_prims import graphsafe_run_with_rng_state
@graphsafe_run_with_rng_state.py_impl(ObserverMode)
def _(mode, op, *args, **kwargs):
with no_dispatch():
out = graphsafe_run_with_rng_state(op, *args, **kwargs)
mode.op_outputs[op].append(out)
return out
obs = ObserverMode()
def gn(x, y):
return torch.sigmoid(torch.rand_like(x) * y) * x
def fn(x, y):
x = torch.sin(x)
x = torch.utils.checkpoint.checkpoint(gn, x, y, use_reentrant=True)
x = torch.sin(x)
return x
x = torch.randn(4, 4, device=device, requires_grad=True)
y = torch.randn(4, 4, device=device, requires_grad=True)
aot_eager_decomp_partition = functools.partial(
aot_eager_decomp_partition_with_mode, mode=obs
)
fn = torch.compile(fn, backend=aot_eager_decomp_partition)
out = fn(x, y).sum()
out.backward(retain_graph=True)
out.backward()
self.assertEqual(len(obs.op_outputs[aten.rand.default]), 3)
self.assertEqual(
obs.op_outputs[aten.rand.default][0],
obs.op_outputs[aten.rand.default][1],
)
self.assertEqual(
obs.op_outputs[aten.rand.default][1],
obs.op_outputs[aten.rand.default][2],
)
instantiate_parametrized_tests(CudaGraphTreeTests)
instantiate_parametrized_tests(TestSAC)
if __name__ == "__main__":
from torch._inductor.test_case import run_tests
if not TEST_CUDA_GRAPH:
if __name__ == "__main__":
sys.exit(0)
raise unittest.SkipTest("cuda graph test is skipped")
if HAS_CUDA_AND_TRITON:
run_tests(needs="filelock")
| TestCase |
python | huggingface__transformers | src/transformers/models/glm/modeling_glm.py | {
"start": 15421,
"end": 15952
} | class ____(PreTrainedModel):
config: GlmConfig
base_model_prefix = "model"
supports_gradient_checkpointing = True
_no_split_modules = ["GlmDecoderLayer"]
_skip_keys_device_placement = ["past_key_values"]
_supports_flash_attn = True
_supports_sdpa = True
_supports_flex_attn = True
_can_compile_fullgraph = True
_supports_attention_backend = True
_can_record_outputs = {
"hidden_states": GlmDecoderLayer,
"attentions": GlmAttention,
}
@auto_docstring
| GlmPreTrainedModel |
python | pytorch__pytorch | torch/ao/quantization/observer.py | {
"start": 26327,
"end": 34261
} | class ____(UniformQuantizationObserverBase):
r"""Observer module for computing the quantization parameters based on the
running per channel min and max values.
This observer uses the tensor min/max statistics to compute the per channel
quantization parameters. The module records the running minimum and maximum
of incoming tensors, and uses this statistic to compute the quantization
parameters.
Args:
ch_axis: Channel axis
dtype: dtype argument to the `quantize` node needed to implement the
reference model spec.
qscheme: Quantization scheme to be used
reduce_range: Reduces the range of the quantized data type by 1 bit
quant_min: Minimum quantization value. If unspecified, it will follow the 8-bit setup.
quant_max: Maximum quantization value. If unspecified, it will follow the 8-bit setup.
eps: Epsilon value for float32, Defaults to `torch.finfo(torch.float32).eps`.
The quantization parameters are computed the same way as in
:class:`~torch.ao.quantization.observer.MinMaxObserver`, with the difference
that the running min/max values are stored per channel.
Scales and zero points are thus computed per channel as well.
.. note:: If the running minimum equals to the running maximum, the scales
and zero_points are set to 1.0 and 0.
"""
min_val: torch.Tensor
max_val: torch.Tensor
def __init__(
self,
ch_axis=0,
dtype=torch.quint8,
qscheme=torch.per_channel_affine,
reduce_range=False,
quant_min=None,
quant_max=None,
factory_kwargs=None,
eps=torch.finfo(torch.float32).eps,
is_dynamic=False,
**kwargs,
) -> None:
if not is_per_channel(qscheme):
raise NotImplementedError(
"PerChannelMinMaxObserver's qscheme only support \
torch.per_channel_symmetric, torch.per_channel_affine and torch.per_channel_affine_float_qparams."
)
if is_dynamic:
raise NotImplementedError(
"PerChannelMinMaxObserver doesn't support dynamic quantization"
)
super().__init__(
dtype=dtype,
qscheme=qscheme,
reduce_range=reduce_range,
quant_min=quant_min,
quant_max=quant_max,
factory_kwargs=factory_kwargs,
eps=eps,
is_dynamic=is_dynamic,
**kwargs,
)
factory_kwargs = torch.nn.factory_kwargs(factory_kwargs)
self.ch_axis = ch_axis
self.register_buffer("min_val", torch.tensor([], **factory_kwargs))
self.register_buffer("max_val", torch.tensor([], **factory_kwargs))
if (
self.qscheme == torch.per_channel_symmetric
and self.reduce_range
and self.dtype == torch.quint8
):
raise NotImplementedError(
"Cannot reduce range for symmetric quantization for quint8"
)
def forward(self, x_orig):
return self._forward(x_orig)
def _forward(self, x_orig):
if x_orig.numel() == 0:
return x_orig
x = x_orig.detach() # avoid keeping autograd tape
min_val = self.min_val
max_val = self.max_val
x_dim = x.size()
new_axis_list = [i for i in range(len(x_dim))] # noqa: C416
new_axis_list[self.ch_axis] = 0
new_axis_list[0] = self.ch_axis
y = x.permute(new_axis_list)
# Need to match dtype of min/max because the updates to buffers
# are done in place and types need to match for comparisons
y = y.to(self.min_val.dtype)
y = torch.flatten(y, start_dim=1)
if min_val.numel() == 0 or max_val.numel() == 0:
min_val, max_val = torch.aminmax(y, dim=1)
else:
min_val_cur, max_val_cur = torch.aminmax(y, dim=1)
min_val = torch.min(min_val_cur, min_val)
max_val = torch.max(max_val_cur, max_val)
self.min_val.resize_(min_val.shape)
self.max_val.resize_(max_val.shape)
self.min_val.copy_(min_val)
self.max_val.copy_(max_val)
return x_orig
@torch.jit.export
def calculate_qparams(self): # type: ignore[override]
return self._calculate_qparams(self.min_val, self.max_val)
def extra_repr(self):
return f"min_val={self.min_val}, max_val={self.max_val}"
def _load_from_state_dict(
self,
state_dict: dict[str, Any],
prefix: str,
local_metadata: dict[str, torch.Tensor],
strict: bool,
missing_keys: list[str],
unexpected_keys: list[str],
error_msgs: list[str],
):
version = local_metadata.get("version")
if version is not None and version < 3:
local_state = ["min_vals", "max_vals"]
expected_min_name = "min_vals"
expected_max_name = "max_vals"
else:
local_state = ["min_val", "max_val"]
expected_min_name = "min_val"
expected_max_name = "max_val"
for name in local_state:
key = prefix + name
if key in state_dict:
val = state_dict[key]
# Custom handling to allow loading min_val or max_val
# of size N into uninitialized buffers of size 0. The
# buffers are resized here, and the values are copied in
# the default state_dict loading code of the parent.
if name == expected_min_name:
self.min_val.resize_(val.shape)
elif name == expected_max_name:
self.max_val.resize_(val.shape)
else:
warnings.warn(
f"Observer load_from_state_dict got unexpected name {name}",
stacklevel=2,
)
# For torchscript module we need to update the attributes here since we do not
# call the `_load_from_state_dict` function defined module.py
if torch.jit.is_scripting():
if name == expected_min_name:
self.min_val.copy_(val)
elif name == expected_max_name:
self.max_val.copy_(val)
else:
warnings.warn(
f"Observer load_from_state_dict got unexpected name {name}",
stacklevel=2,
)
elif strict:
missing_keys.append(key)
if not torch.jit.is_scripting():
super()._load_from_state_dict(
state_dict,
prefix,
local_metadata,
False,
missing_keys,
unexpected_keys,
error_msgs,
)
def _load_from_state_dict_script(
self,
state_dict: dict[str, Any],
prefix: str,
local_metadata: dict[str, torch.Tensor],
strict: bool,
missing_keys: list[str],
unexpected_keys: list[str],
error_msgs: list[str],
):
self._load_from_state_dict(
state_dict,
prefix,
local_metadata,
strict,
missing_keys,
unexpected_keys,
error_msgs,
)
@torch.jit.export
def reset_min_max_vals(self):
"""Resets the min/max values."""
# This used to be torch.ones but that does not work because
# JIT compiler can optimize it via common subexpression elimination
# in which case both min_val and max_val point to the same tensor.
self.min_val = torch.rand(
0,
)
self.max_val = torch.rand(
0,
)
| PerChannelMinMaxObserver |
python | getsentry__sentry | src/sentry/api/endpoints/organization_events_spans_performance.py | {
"start": 14884,
"end": 15331
} | class ____:
id: str
start_timestamp: float
finish_timestamp: float
exclusive_time: float
trace_id: str
def serialize(self) -> Any:
return {
"id": self.id,
"startTimestamp": self.start_timestamp,
"finishTimestamp": self.finish_timestamp,
"exclusiveTime": self.exclusive_time,
"trace": self.trace_id,
}
@dataclasses.dataclass(frozen=True)
| ExampleSpan |
python | pytorch__pytorch | .github/scripts/generate_ci_workflows.py | {
"start": 1397,
"end": 2956
} | class ____:
os: str
build_configs: list[dict[str, str]]
package_type: str
# Optional fields
build_environment: str = ""
ciflow_config: CIFlowConfig = field(default_factory=CIFlowConfig)
is_scheduled: str = ""
branches: str = "nightly"
# Mainly for macos
macos_runner: str = "macos-14-xlarge"
# Mainly used for libtorch builds
build_variant: str = ""
def __post_init__(self) -> None:
if self.build_environment == "":
self.build_environment = "-".join(
item
for item in [self.os, "binary", self.package_type, self.build_variant]
if item != ""
)
def generate_workflow_file(self, workflow_template: jinja2.Template) -> None:
output_file_path = (
GITHUB_DIR
/ f"workflows/generated-{self.build_environment}-{self.branches}.yml"
)
with open(output_file_path, "w") as output_file:
GENERATED = "generated" # Note that please keep the variable GENERATED otherwise phabricator will hide the whole file
output_file.writelines([f"# @{GENERATED} DO NOT EDIT MANUALLY\n"])
try:
content = workflow_template.render(asdict(self))
except Exception as e:
print(f"Failed on template: {workflow_template}", file=sys.stderr)
raise e
output_file.write(content)
if content[-1] != "\n":
output_file.write("\n")
print(output_file_path)
| BinaryBuildWorkflow |
python | getsentry__sentry | src/sentry/release_health/base.py | {
"start": 5501,
"end": 5577
} | class ____(DurationPercentiles, UserCounts):
pass
| UserCountsAndPercentiles |
python | great-expectations__great_expectations | tests/integration/test_utils/data_source_config/mssql.py | {
"start": 608,
"end": 1400
} | class ____(DataSourceTestConfig):
@property
@override
def label(self) -> str:
return "mssql"
@property
@override
def pytest_mark(self) -> pytest.MarkDecorator:
return pytest.mark.mssql
@override
def create_batch_setup(
self,
request: pytest.FixtureRequest,
data: pd.DataFrame,
extra_data: Mapping[str, pd.DataFrame],
context: AbstractDataContext,
engine_manager: Optional[SessionSQLEngineManager] = None,
) -> BatchTestSetup:
return MSSQLBatchTestSetup(
data=data,
config=self,
extra_data=extra_data,
table_name=self.table_name,
context=context,
engine_manager=engine_manager,
)
| MSSQLDatasourceTestConfig |
python | doocs__leetcode | solution/0900-0999/0932.Beautiful Array/Solution.py | {
"start": 0,
"end": 310
} | class ____:
def beautifulArray(self, n: int) -> List[int]:
if n == 1:
return [1]
left = self.beautifulArray((n + 1) >> 1)
right = self.beautifulArray(n >> 1)
left = [x * 2 - 1 for x in left]
right = [x * 2 for x in right]
return left + right
| Solution |
python | ray-project__ray | doc/source/ray-core/doc_code/direct_transport_nixl.py | {
"start": 1601,
"end": 2480
} | class ____:
def __init__(self):
self.tensor1 = torch.tensor([1, 2, 3])
self.tensor2 = torch.tensor([4, 5, 6])
self.tensor3 = torch.tensor([7, 8, 9])
@ray.method(tensor_transport="nixl")
def send_dict1(self):
return {"round1-1": self.tensor1, "round1-2": self.tensor2}
@ray.method(tensor_transport="nixl")
def send_dict2(self):
return {"round2-1": self.tensor1, "round2-3": self.tensor3}
def sum_dict(self, dict):
return sum(v.sum().item() for v in dict.values())
sender, receiver = Actor.remote(), Actor.remote()
ref1 = sender.send_dict1.remote()
result1 = receiver.sum_dict.remote(ref1)
print(ray.get(result1))
ref2 = sender.send_dict2.remote()
result2 = receiver.sum_dict.remote(ref2)
try:
print(ray.get(result2))
except ValueError as e:
print("Error caught:", e)
# __nixl_limitations_end__
| Actor |
python | mlflow__mlflow | tests/utils/test_annotations.py | {
"start": 1139,
"end": 1273
} | class ____:
a: int
b: int
def add(self):
return self.a + self.b
@deprecated()
@dataclass
| AnotherDeprecatedDataClass |
python | jazzband__django-oauth-toolkit | oauth2_provider/models.py | {
"start": 22536,
"end": 24339
} | class ____(models.Model):
class Meta:
abstract = True
constraints = [
models.UniqueConstraint(
fields=["device_code"],
name="%(app_label)s_%(class)s_unique_device_code",
),
]
AUTHORIZED = "authorized"
AUTHORIZATION_PENDING = "authorization-pending"
EXPIRED = "expired"
DENIED = "denied"
DEVICE_FLOW_STATUS = (
(AUTHORIZED, _("Authorized")),
(AUTHORIZATION_PENDING, _("Authorization pending")),
(EXPIRED, _("Expired")),
(DENIED, _("Denied")),
)
id = models.BigAutoField(primary_key=True)
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
related_name="%(app_label)s_%(class)s",
null=True,
blank=True,
on_delete=models.CASCADE,
)
device_code = models.CharField(max_length=100, unique=True)
user_code = models.CharField(max_length=100)
scope = models.CharField(max_length=64, null=True)
interval = models.IntegerField(default=5)
expires = models.DateTimeField()
status = models.CharField(
max_length=64, blank=True, choices=DEVICE_FLOW_STATUS, default=AUTHORIZATION_PENDING
)
client_id = models.CharField(max_length=100, db_index=True)
last_checked = models.DateTimeField(auto_now=True)
def is_expired(self):
"""
Check device flow session expiration and set the status to "expired" if current time
is past the "expires" deadline.
"""
if self.status == self.EXPIRED:
return True
now = datetime.now(tz=dt_timezone.utc)
if now >= self.expires:
self.status = self.EXPIRED
self.save(update_fields=["status"])
return True
return False
| AbstractDeviceGrant |
python | wandb__wandb | wandb/vendor/pygments/lexers/asm.py | {
"start": 5511,
"end": 5856
} | class ____(DelegatingLexer):
"""
For the output of 'objdump -Sr on compiled D files'
"""
name = 'd-objdump'
aliases = ['d-objdump']
filenames = ['*.d-objdump']
mimetypes = ['text/x-d-objdump']
def __init__(self, **options):
super(DObjdumpLexer, self).__init__(DLexer, ObjdumpLexer, **options)
| DObjdumpLexer |
python | spulec__freezegun | tests/test_datetimes.py | {
"start": 14632,
"end": 14776
} | class ____: # type: ignore
def __call__(self, *args: Any, **kws: Any) -> Any:
return (args, kws)
@freeze_time("2012-01-14")
| Callable |
python | dagster-io__dagster | helm/dagster/schema/schema/charts/dagster/subschema/ingress.py | {
"start": 268,
"end": 434
} | class ____(BaseModel):
enabled: bool
secretName: str
# Enforce as HTTPIngressPath: see https://github.com/dagster-io/dagster/issues/3184
| IngressTLSConfiguration |
python | lazyprogrammer__machine_learning_examples | rl2/mountaincar/pg_theano.py | {
"start": 1262,
"end": 1813
} | class ____:
def __init__(self, M1, M2, f=T.nnet.relu, use_bias=True, zeros=False):
if zeros:
W = np.zeros((M1, M2))
else:
W = np.random.randn(M1, M2) * np.sqrt(2. / M1)
self.W = theano.shared(W)
self.params = [self.W]
self.use_bias = use_bias
if use_bias:
self.b = theano.shared(np.zeros(M2))
self.params += [self.b]
self.f = f
def forward(self, X):
if self.use_bias:
a = X.dot(self.W) + self.b
else:
a = X.dot(self.W)
return self.f(a)
# approximates pi(a | s)
| HiddenLayer |
python | TheAlgorithms__Python | graphs/bidirectional_a_star.py | {
"start": 1661,
"end": 4805
} | class ____:
"""
>>> astar = AStar((0, 0), (len(grid) - 1, len(grid[0]) - 1))
>>> (astar.start.pos_y + delta[3][0], astar.start.pos_x + delta[3][1])
(0, 1)
>>> [x.pos for x in astar.get_successors(astar.start)]
[(1, 0), (0, 1)]
>>> (astar.start.pos_y + delta[2][0], astar.start.pos_x + delta[2][1])
(1, 0)
>>> astar.retrace_path(astar.start)
[(0, 0)]
>>> astar.search() # doctest: +NORMALIZE_WHITESPACE
[(0, 0), (1, 0), (2, 0), (2, 1), (2, 2), (2, 3), (3, 3),
(4, 3), (4, 4), (5, 4), (5, 5), (6, 5), (6, 6)]
"""
def __init__(self, start: TPosition, goal: TPosition):
self.start = Node(start[1], start[0], goal[1], goal[0], 0, None)
self.target = Node(goal[1], goal[0], goal[1], goal[0], 99999, None)
self.open_nodes = [self.start]
self.closed_nodes: list[Node] = []
self.reached = False
def search(self) -> list[TPosition]:
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
current_node = self.open_nodes.pop(0)
if current_node.pos == self.target.pos:
return self.retrace_path(current_node)
self.closed_nodes.append(current_node)
successors = self.get_successors(current_node)
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(child_node)
else:
# retrieve the best current path
better_node = self.open_nodes.pop(self.open_nodes.index(child_node))
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(child_node)
else:
self.open_nodes.append(better_node)
return [self.start.pos]
def get_successors(self, parent: Node) -> list[Node]:
"""
Returns a list of successors (both in the grid and free spaces)
"""
successors = []
for action in delta:
pos_x = parent.pos_x + action[1]
pos_y = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0]) - 1 and 0 <= pos_y <= len(grid) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
pos_x,
pos_y,
self.target.pos_y,
self.target.pos_x,
parent.g_cost + 1,
parent,
)
)
return successors
def retrace_path(self, node: Node | None) -> list[TPosition]:
"""
Retrace the path from parents to parents until start node
"""
current_node = node
path = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x))
current_node = current_node.parent
path.reverse()
return path
| AStar |
python | dagster-io__dagster | python_modules/automation/automation_tests/dagster_docs_tests/test_common_errors.py | {
"start": 10633,
"end": 12494
} | class ____:
"""Test detection of structural issues in docstrings."""
# Using function-based validation approach
def test_empty_sections(self):
"""Test detection of empty sections."""
docstring = '''"""Function with empty sections.
Args:
# Empty section - no parameters listed
Returns:
# Another empty section
Raises:
# Yet another empty section
"""'''
result = validate_docstring_text(docstring, "test.function")
# Should detect empty sections or comment-only content
assert result.has_warnings() or result.has_errors()
def test_duplicate_sections(self):
"""Test detection of duplicate sections."""
docstring = '''"""Function with duplicate sections.
Args:
param1: First parameter
Args:
param2: Duplicate Args section
Returns:
First return description
Returns:
Duplicate Returns section
"""'''
result = validate_docstring_text(docstring, "test.function")
# Should detect duplicate section headers
assert result.has_warnings() or result.has_errors()
def test_sections_in_wrong_order(self):
"""Test that unusual section ordering doesn't break parsing."""
docstring = '''"""Function with sections in unusual order.
Returns:
Return value described before Args
Raises:
Exception described before Args
Args:
param1: Parameter described last
Examples:
Example shown at the end
"""'''
result = validate_docstring_text(docstring, "test.function")
# Should still be valid RST (section order is not enforced by RST)
assert result.parsing_successful
| TestStructuralErrors |
python | sphinx-doc__sphinx | sphinx/util/nodes.py | {
"start": 1093,
"end": 26567
} | class ____(Generic[N]):
"""A helper class for Node.findall().
It checks that the given node is an instance of the specified node-classes and
has the specified node-attributes.
For example, following example searches ``reference`` node having ``refdomain``
and ``reftype`` attributes::
matcher = NodeMatcher(nodes.reference, refdomain='std', reftype='citation')
matcher.findall(doctree)
# => [<reference ...>, <reference ...>, ...]
A special value ``typing.Any`` matches any kind of node-attributes. For example,
following example searches ``reference`` node having ``refdomain`` attributes::
matcher = NodeMatcher(nodes.reference, refdomain=Any)
matcher.findall(doctree)
# => [<reference ...>, <reference ...>, ...]
"""
def __init__(self, *node_classes: type[N], **attrs: Any) -> None:
self.classes = node_classes
self.attrs = attrs
def match(self, node: Node) -> bool:
try:
if self.classes and not isinstance(node, self.classes):
return False
if self.attrs:
if not isinstance(node, nodes.Element):
return False
for key, value in self.attrs.items():
if key not in node:
return False
elif value is Any:
continue
elif node.get(key) != value:
return False
return True
except Exception:
# for non-Element nodes
return False
def __call__(self, node: Node) -> bool:
return self.match(node)
def findall(self, node: Node) -> Iterator[N]:
"""An alternative to `Node.findall` with improved type safety.
While the `NodeMatcher` object can be used as an argument to `Node.findall`, doing so
confounds type checkers' ability to determine the return type of the iterator.
"""
for found in node.findall(self):
yield cast('N', found)
def get_full_module_name(node: Node) -> str:
"""Return full module dotted path like: 'docutils.nodes.paragraph'
:param nodes.Node node: target node
:return: full module dotted path
"""
return f'{node.__module__}.{node.__class__.__name__}'
def repr_domxml(node: Node, length: int = 80) -> str:
"""Return DOM XML representation of the specified node like:
'<paragraph translatable="False"><inline classes="versionadded">Added in version...'
:param nodes.Node node: target node
:param int length:
length of return value to be striped. if false-value is specified, repr_domxml
returns full of DOM XML representation.
:return: DOM XML representation
"""
try:
text = node.asdom().toxml()
except Exception:
text = str(node)
if length and len(text) > length:
text = text[:length] + '...'
return text
def apply_source_workaround(node: Element) -> None:
# workaround: nodes.term have wrong rawsource if classifier is specified.
# The behavior of docutils-0.11, 0.12 is:
# * when ``term text : classifier1 : classifier2`` is specified,
# * rawsource of term node will have: ``term text : classifier1 : classifier2``
# * rawsource of classifier node will be None
if isinstance(node, nodes.classifier) and not node.rawsource:
logger.debug(
'[i18n] PATCH: %r to have source, line and rawsource: %s',
get_full_module_name(node),
repr_domxml(node),
)
definition_list_item = node.parent
node.source = definition_list_item.source
node.line = definition_list_item.line - 1 # type: ignore[operator]
node.rawsource = node.astext() # set 'classifier1' (or 'classifier2')
elif isinstance(node, nodes.classifier) and not node.source:
# docutils-0.15 fills in rawsource attribute, but not in source.
node.source = node.parent.source
if isinstance(node, nodes.image) and node.source is None:
logger.debug(
'[i18n] PATCH: %r to have source, line: %s',
get_full_module_name(node),
repr_domxml(node),
)
node.source, node.line = node.parent.source, node.parent.line
if isinstance(node, nodes.title) and node.source is None:
logger.debug(
'[i18n] PATCH: %r to have source: %s',
get_full_module_name(node),
repr_domxml(node),
)
node.source, node.line = node.parent.source, node.parent.line
if isinstance(node, nodes.term):
logger.debug(
'[i18n] PATCH: %r to have rawsource: %s',
get_full_module_name(node),
repr_domxml(node),
)
# strip classifier from rawsource of term
for classifier in reversed(list(node.parent.findall(nodes.classifier))):
node.rawsource = re.sub(
r'\s*:\s*%s' % re.escape(classifier.astext()), '', node.rawsource
)
if isinstance(node, nodes.topic) and node.source is None:
# docutils-0.18 does not fill the source attribute of topic
logger.debug(
'[i18n] PATCH: %r to have source, line: %s',
get_full_module_name(node),
repr_domxml(node),
)
node.source, node.line = node.parent.source, node.parent.line
# workaround: literal_block under bullet list
# See: https://github.com/sphinx-doc/sphinx/issues/4913
if isinstance(node, nodes.literal_block) and node.source is None:
with contextlib.suppress(ValueError):
node.source = get_node_source(node)
# workaround: recommonmark-0.2.0 doesn't set rawsource attribute
if not node.rawsource:
node.rawsource = node.astext()
if node.source and node.rawsource:
return
# workaround: some docutils nodes doesn't have source, line.
if isinstance(
node,
(
# https://github.com/sphinx-doc/sphinx/issues/1305 rubric directive
nodes.rubric,
# https://github.com/sphinx-doc/sphinx/issues/1477 line node
nodes.line,
# https://github.com/sphinx-doc/sphinx/issues/3093 image directive in substitution
nodes.image,
# https://github.com/sphinx-doc/sphinx/issues/3335 field list syntax
nodes.field_name,
),
):
logger.debug(
'[i18n] PATCH: %r to have source and line: %s',
get_full_module_name(node),
repr_domxml(node),
)
try:
node.source = get_node_source(node)
except ValueError:
node.source = ''
node.line = 0 # need fix docutils to get `node.line`
return
IGNORED_NODES = (
nodes.Invisible,
nodes.literal_block,
nodes.doctest_block,
addnodes.versionmodified,
# XXX there are probably more
)
def is_translatable(node: Node) -> bool:
if isinstance(node, addnodes.translatable):
return True
# image node marked as translatable or having alt text
if isinstance(node, nodes.image) and (node.get('translatable') or node.get('alt')):
return True
if isinstance(node, nodes.Inline) and 'translatable' not in node: # type: ignore[operator]
# inline node must not be translated if 'translatable' is not set
return False
if isinstance(node, nodes.TextElement):
if not node.source:
logger.debug(
'[i18n] SKIP %r because no node.source: %s',
get_full_module_name(node),
repr_domxml(node),
)
return False # built-in message
if isinstance(node, IGNORED_NODES) and 'translatable' not in node:
logger.debug(
'[i18n] SKIP %r because node is in IGNORED_NODES '
"and no node['translatable']: %s",
get_full_module_name(node),
repr_domxml(node),
)
return False
if not node.get('translatable', True):
# not(node['translatable'] == True or node['translatable'] is None)
logger.debug(
"[i18n] SKIP %r because not node['translatable']: %s",
get_full_module_name(node),
repr_domxml(node),
)
return False
# <field_name>orphan</field_name>
# XXX ignore all metadata (== docinfo)
if isinstance(node, nodes.field_name) and (node.children[0] == 'orphan'):
logger.debug(
'[i18n] SKIP %r because orphan node: %s',
get_full_module_name(node),
repr_domxml(node),
)
return False
return True
return isinstance(node, nodes.meta)
LITERAL_TYPE_NODES = (
nodes.literal_block,
nodes.doctest_block,
nodes.math_block,
nodes.raw,
)
IMAGE_TYPE_NODES = (
nodes.image,
) # fmt: skip
def _clean_extracted_message(text: str) -> str:
"""Remove trailing backslashes from each line of *text*."""
if '\\' in text:
# TODO(picnixz): if possible, find a regex alternative
# that is not vulnerable to a ReDOS (the code below is
# equivalent to re.sub(r'[ \t]*\\[ \t]*$', text, re.MULTILINE)).
buffer = StringIO()
for line in text.splitlines(keepends=True):
split = line.rsplit('\\', maxsplit=1)
if len(split) == 2:
prefix, suffix = split
if re.match(r'^[ \t]*\s$', suffix):
# The line ends with some NL character, preceded by
# one or more whitespaces (to be dropped), the backslash,
# and possibly other whitespaces on its left.
buffer.write(prefix.rstrip(' \t'))
buffer.write(suffix.lstrip(' \t'))
elif not suffix:
# backslash is at the end of the LAST line
buffer.write(prefix.rstrip(' \t'))
else:
# backslash is is in the middle of the line
buffer.write(line)
else:
buffer.write(line)
text = buffer.getvalue()
return text.replace('\n', ' ').strip()
def extract_messages(doctree: Element) -> Iterable[tuple[Element, str]]:
"""Extract translatable messages from a document tree."""
for node in doctree.findall(is_translatable):
if isinstance(node, addnodes.translatable):
for msg in node.extract_original_messages():
yield node, msg # type: ignore[misc]
continue
if isinstance(node, LITERAL_TYPE_NODES):
msg = node.rawsource
if not msg:
msg = node.astext()
elif isinstance(node, nodes.image):
if node.get('alt'):
yield node, node['alt']
if node.get('translatable'):
image_uri = node.get('original_uri', node['uri'])
msg = f'.. image:: {image_uri}'
else:
msg = ''
elif isinstance(node, nodes.meta):
msg = node['content']
else:
text = node.rawsource # type: ignore[attr-defined]
msg = _clean_extracted_message(text)
# XXX nodes rendering empty are likely a bug in sphinx.addnodes
if msg:
yield node, msg # type: ignore[misc]
def get_node_source(node: Element) -> str:
for pnode in traverse_parent(node):
if pnode.source:
return pnode.source
msg = 'node source not found'
raise ValueError(msg)
def get_node_line(node: Element) -> int:
for pnode in traverse_parent(node):
if pnode.line:
return pnode.line
msg = 'node line not found'
raise ValueError(msg)
def traverse_parent(node: Element, cls: Any = None) -> Iterable[Element]:
while node:
if cls is None or isinstance(node, cls):
yield node
node = node.parent
def get_prev_node(node: Node) -> Node | None:
pos = node.parent.index(node)
if pos > 0:
return node.parent[pos - 1]
else:
return None
def traverse_translatable_index(
doctree: Element,
) -> Iterable[tuple[Element, list[tuple[str, str, str, str, str | None]]]]:
"""Traverse translatable index node from a document tree."""
matcher = NodeMatcher(addnodes.index, inline=False)
for node in matcher.findall(doctree):
if 'raw_entries' in node:
entries = node['raw_entries']
else:
entries = node['entries']
yield node, entries
def nested_parse_with_titles(
state: RSTState, content: StringList, node: Node, content_offset: int = 0
) -> str:
"""Version of state.nested_parse() that allows titles and does not require
titles to have the same decoration as the calling document.
This is useful when the parsed content comes from a completely different
context, such as docstrings.
This function is retained for compatibility and will be deprecated in
Sphinx 8. Prefer ``nested_parse_to_nodes()``.
"""
with _fresh_title_style_context(state):
ret = state.nested_parse(content, content_offset, node, match_titles=True)
return ret
def clean_astext(node: Element) -> str:
"""Like node.astext(), but ignore images."""
node = node.deepcopy()
for img in node.findall(nodes.image):
img['alt'] = ''
for raw in list(node.findall(nodes.raw)):
raw.parent.remove(raw)
return node.astext()
def split_explicit_title(text: str) -> tuple[bool, str, str]:
"""Split role content into title and target, if given."""
match = explicit_title_re.match(text)
if match:
return True, match.group(1), match.group(2)
return False, text, text
indextypes = ['single', 'pair', 'double', 'triple', 'see', 'seealso']
def process_index_entry(
entry: str,
targetid: str,
) -> list[tuple[str, str, str, str, str | None]]:
indexentries: list[tuple[str, str, str, str, str | None]] = []
entry = entry.strip()
oentry = entry
main = ''
if entry.startswith('!'):
main = 'main'
entry = entry[1:].lstrip()
for index_type in (
'module',
'keyword',
'operator',
'object',
'exception',
'statement',
'builtin',
):
if entry.startswith(f'{index_type}:'):
value = entry[len(index_type) + 1 :].strip()
if index_type == 'builtin':
value = f'built-in function; {value}'
else:
value = f'{index_type}; {value}'
msg = __(
'%r is no longer supported for index entries (from entry %r). '
"Use 'pair: %s' instead."
) % (index_type, entry, value)
raise ValueError(msg)
for index_type in indextypes:
if entry.startswith(f'{index_type}:'):
value = entry[len(index_type) + 1 :].strip()
if index_type == 'double':
index_type = 'pair'
indexentries.append((index_type, value, targetid, main, None))
break
# shorthand notation for single entries
else:
for value in oentry.split(','):
value = value.strip()
main = ''
if value.startswith('!'):
main = 'main'
value = value[1:].lstrip()
if not value:
continue
indexentries.append(('single', value, targetid, main, None))
return indexentries
def inline_all_toctrees(
builder: Builder,
docnameset: set[str],
docname: str,
tree: nodes.document,
colorfunc: Callable[[str], str],
traversed: list[str],
indent: str = '',
) -> nodes.document:
"""Inline all toctrees in the *tree*.
Record all docnames in *docnameset*, and output docnames with *colorfunc*.
"""
tree = tree.deepcopy()
for toctreenode in list(tree.findall(addnodes.toctree)):
newnodes = []
includefiles = map(str, toctreenode['includefiles'])
indent += ' '
for includefile in includefiles:
if includefile not in traversed:
try:
traversed.append(includefile)
logger.info(indent + colorfunc(includefile)) # NoQA: G003
subtree = inline_all_toctrees(
builder,
docnameset,
includefile,
builder.env.get_doctree(includefile),
colorfunc,
traversed,
indent,
)
docnameset.add(includefile)
except Exception:
logger.warning(
__('toctree contains ref to nonexisting file %r'),
includefile,
location=docname,
type='toc',
subtype='not_readable',
)
else:
sof = addnodes.start_of_file(docname=includefile)
sof.children = subtree.children
for sectionnode in sof.findall(nodes.section):
if 'docname' not in sectionnode:
sectionnode['docname'] = includefile
newnodes.append(sof)
toctreenode.parent.replace(toctreenode, newnodes)
return tree
def _make_id(string: str) -> str:
"""Convert `string` into an identifier and return it.
This function is a modified version of ``docutils.nodes.make_id()`` of
docutils-0.16.
Changes:
* Allow to use capital alphabet characters
* Allow to use dots (".") and underscores ("_") for an identifier
without a leading character.
# Author: David Goodger <goodger@python.org>
# Maintainer: docutils-develop@lists.sourceforge.net
# Copyright: This module has been placed in the public domain.
"""
id = string.translate(_non_id_translate_digraphs)
id = id.translate(_non_id_translate)
# get rid of non-ascii characters.
# 'ascii' lowercase to prevent problems with turkish locale.
id = unicodedata.normalize('NFKD', id).encode('ascii', 'ignore').decode('ascii')
# shrink runs of whitespace and replace by hyphen
id = _non_id_chars.sub('-', ' '.join(id.split()))
id = _non_id_at_ends.sub('', id)
return str(id)
_non_id_chars = re.compile('[^a-zA-Z0-9._]+')
_non_id_at_ends = re.compile('^[-0-9._]+|-+$')
_non_id_translate = {
0x00F8: 'o', # o with stroke
0x0111: 'd', # d with stroke
0x0127: 'h', # h with stroke
0x0131: 'i', # dotless i
0x0142: 'l', # l with stroke
0x0167: 't', # t with stroke
0x0180: 'b', # b with stroke
0x0183: 'b', # b with topbar
0x0188: 'c', # c with hook
0x018C: 'd', # d with topbar
0x0192: 'f', # f with hook
0x0199: 'k', # k with hook
0x019A: 'l', # l with bar
0x019E: 'n', # n with long right leg
0x01A5: 'p', # p with hook
0x01AB: 't', # t with palatal hook
0x01AD: 't', # t with hook
0x01B4: 'y', # y with hook
0x01B6: 'z', # z with stroke
0x01E5: 'g', # g with stroke
0x0225: 'z', # z with hook
0x0234: 'l', # l with curl
0x0235: 'n', # n with curl
0x0236: 't', # t with curl
0x0237: 'j', # dotless j
0x023C: 'c', # c with stroke
0x023F: 's', # s with swash tail
0x0240: 'z', # z with swash tail
0x0247: 'e', # e with stroke
0x0249: 'j', # j with stroke
0x024B: 'q', # q with hook tail
0x024D: 'r', # r with stroke
0x024F: 'y', # y with stroke
}
_non_id_translate_digraphs = {
0x00DF: 'sz', # ligature sz
0x00E6: 'ae', # ae
0x0153: 'oe', # ligature oe
0x0238: 'db', # db digraph
0x0239: 'qp', # qp digraph
}
def make_id(
env: BuildEnvironment,
document: nodes.document,
prefix: str = '',
term: str | None = None,
) -> str:
"""Generate an appropriate node_id for given *prefix* and *term*."""
node_id = None
if prefix:
idformat = prefix + '-%s'
else:
idformat = (document.settings.id_prefix or 'id') + '%s'
# try to generate node_id by *term*
if prefix and term:
node_id = _make_id(idformat % term)
if node_id == prefix:
# *term* is not good to generate a node_id.
node_id = None
elif term:
node_id = _make_id(term)
if not node_id:
node_id = None # fallback to None
while node_id is None or node_id in document.ids:
node_id = idformat % env.new_serialno(prefix)
return node_id
def find_pending_xref_condition(
node: addnodes.pending_xref, condition: str
) -> Element | None:
"""Pick matched pending_xref_condition node up from the pending_xref."""
for subnode in node:
if (
isinstance(subnode, addnodes.pending_xref_condition)
and subnode.get('condition') == condition
):
return subnode
return None
def make_refnode(
builder: Builder,
fromdocname: str,
todocname: str,
targetid: str | None,
child: Node | list[Node],
title: str | None = None,
) -> nodes.reference:
"""Shortcut to create a reference node."""
node = nodes.reference('', '', internal=True)
if fromdocname == todocname and targetid:
node['refid'] = targetid
else:
if targetid:
node['refuri'] = (
builder.get_relative_uri(fromdocname, todocname) + '#' + targetid
)
else:
node['refuri'] = builder.get_relative_uri(fromdocname, todocname)
if title:
node['reftitle'] = title
node += child
return node
def set_source_info(directive: Directive, node: Node) -> None:
node.source, node.line = directive.state_machine.get_source_and_line(
directive.lineno
)
def set_role_source_info(inliner: Inliner, lineno: int, node: Node) -> None:
gsal = inliner.reporter.get_source_and_line # type: ignore[attr-defined]
node.source, node.line = gsal(lineno)
def copy_source_info(src: Element, dst: Element) -> None:
with contextlib.suppress(ValueError):
dst.source = get_node_source(src)
dst.line = get_node_line(src)
NON_SMARTQUOTABLE_PARENT_NODES = (
nodes.FixedTextElement,
nodes.literal,
nodes.math,
nodes.image,
nodes.raw,
nodes.problematic,
addnodes.not_smartquotable,
)
def is_smartquotable(node: Node) -> bool:
"""Check whether the node is smart-quotable or not."""
for pnode in traverse_parent(node.parent):
if isinstance(pnode, NON_SMARTQUOTABLE_PARENT_NODES):
return False
if pnode.get('support_smartquotes', None) is False:
return False
return getattr(node, 'support_smartquotes', None) is not False
def process_only_nodes(document: Node, tags: Tags) -> None:
"""Filter ``only`` nodes which do not match *tags*."""
for node in document.findall(addnodes.only):
if _only_node_keep_children(node, tags):
node.replace_self(node.children or nodes.comment())
else:
# A comment on the comment() nodes being inserted: replacing by [] would
# result in a "Losing ids" exception if there is a target node before
# the only node, so we make sure docutils can transfer the id to
# something, even if it's just a comment and will lose the id anyway...
node.replace_self(nodes.comment())
def _only_node_keep_children(node: addnodes.only, tags: Tags) -> bool:
"""Keep children if tags match or error."""
try:
return tags.eval_condition(node['expr'])
except Exception as err:
logger.warning(
__('exception while evaluating only directive expression: %s'),
err,
location=node,
)
return True
def _copy_except__document(el: Element) -> Element:
"""Monkey-patch ```nodes.Element.copy``` to not copy the ``_document``
attribute.
See: https://github.com/sphinx-doc/sphinx/issues/11116#issuecomment-1376767086
"""
newnode = object.__new__(el.__class__)
# set in Element.__init__()
newnode.children = []
newnode.rawsource = el.rawsource
newnode.tagname = el.tagname
# copied in Element.copy()
newnode.attributes = {
k: (v if k not in {'ids', 'classes', 'names', 'dupnames', 'backrefs'} else v[:])
for k, v in el.attributes.items()
}
newnode.line = el.line
newnode.source = el.source
return newnode
nodes.Element.copy = _copy_except__document # type: ignore[assignment]
def _deepcopy(el: Element) -> Element:
"""Monkey-patch ```nodes.Element.deepcopy``` for speed."""
newnode = el.copy()
newnode.children = [child.deepcopy() for child in el.children]
for child in newnode.children:
child.parent = newnode
if el.document:
child.document = el.document
if child.source is None:
child.source = el.document.current_source
if child.line is None:
child.line = el.document.current_line
return newnode
nodes.Element.deepcopy = _deepcopy # type: ignore[assignment]
| NodeMatcher |
python | sphinx-doc__sphinx | sphinx/testing/util.py | {
"start": 3007,
"end": 8684
} | class ____(sphinx.application.Sphinx):
"""A subclass of :class:`~sphinx.application.Sphinx` for tests.
The constructor uses some better default values for the initialization
parameters and supports arbitrary keywords stored in the :attr:`extras`
read-only mapping.
It is recommended to use::
@pytest.mark.sphinx('html', testroot='root')
def test(app):
app = ...
instead of::
def test():
app = SphinxTestApp('html', srcdir=srcdir)
In the former case, the 'app' fixture takes care of setting the source
directory, whereas in the latter, the user must provide it themselves.
"""
# see https://github.com/sphinx-doc/sphinx/pull/12089 for the
# discussion on how the signature of this class should be used
def __init__(
self,
/, # to allow 'self' as an extras
buildername: str = 'html',
srcdir: Path | None = None,
builddir: Path | None = None, # extra constructor argument
freshenv: bool = False, # argument is not in the same order as in the superclass
confoverrides: dict[str, Any] | None = None,
status: StringIO | None = None,
warning: StringIO | None = None,
tags: Sequence[str] = (),
docutils_conf: str | None = None, # extra constructor argument
parallel: int = 0,
# additional arguments at the end to keep the signature
verbosity: int = 0, # argument is not in the same order as in the superclass
warningiserror: bool = False, # argument is not in the same order as in the superclass
pdb: bool = False,
exception_on_warning: bool = False,
# unknown keyword arguments
**extras: Any,
) -> None:
self._builder_name = buildername
assert srcdir is not None
if verbosity == -1:
quiet = True
verbosity = 0
else:
quiet = False
if status is None:
# ensure that :attr:`status` is a StringIO and not sys.stdout
# but allow the stream to be /dev/null by passing verbosity=-1
status = None if quiet else StringIO()
elif not isinstance(status, StringIO):
err = f"'status' must be an io.StringIO object, got: {type(status)}"
raise TypeError(err)
if warning is None:
# ensure that :attr:`warning` is a StringIO and not sys.stderr
# but allow the stream to be /dev/null by passing verbosity=-1
warning = None if quiet else StringIO()
elif not isinstance(warning, StringIO):
err = f"'warning' must be an io.StringIO object, got: {type(warning)}"
raise TypeError(err)
self.docutils_conf_path = srcdir / 'docutils.conf'
if docutils_conf is not None:
self.docutils_conf_path.write_text(docutils_conf, encoding='utf8')
if builddir is None:
builddir = srcdir / '_build'
confdir = srcdir
outdir = builddir.joinpath(buildername)
outdir.mkdir(parents=True, exist_ok=True)
doctreedir = builddir.joinpath('doctrees')
doctreedir.mkdir(parents=True, exist_ok=True)
if confoverrides is None:
confoverrides = {}
self._saved_path = sys.path.copy()
self.extras: Mapping[str, Any] = MappingProxyType(extras)
"""Extras keyword arguments."""
try:
super().__init__(
srcdir,
confdir,
outdir,
doctreedir,
buildername,
confoverrides=confoverrides,
status=status,
warning=warning,
freshenv=freshenv,
warningiserror=warningiserror,
tags=tags,
verbosity=verbosity,
parallel=parallel,
pdb=pdb,
exception_on_warning=exception_on_warning,
)
except Exception:
self.cleanup()
raise
def _init_builder(self) -> None:
# override the default theme to 'basic' rather than 'alabaster'
# for test independence
if 'html_theme' in self.config._overrides:
pass # respect overrides
elif 'html_theme' in self.config and self.config.html_theme == 'alabaster':
self.config.html_theme = self.config._overrides.get('html_theme', 'basic')
super()._init_builder()
@property
def status(self) -> StringIO:
"""The in-memory text I/O for the application status messages."""
# sphinx.application.Sphinx uses StringIO for a quiet stream
assert isinstance(self._status, StringIO)
return self._status
@property
def warning(self) -> StringIO:
"""The in-memory text I/O for the application warning messages."""
# sphinx.application.Sphinx uses StringIO for a quiet stream
assert isinstance(self._warning, StringIO)
return self._warning
def cleanup(self, doctrees: bool = False) -> None:
sys.path[:] = self._saved_path
_clean_up_global_state()
try:
self.docutils_conf_path.unlink(missing_ok=True)
except OSError as exc:
if exc.errno != 30: # Ignore "read-only file system" errors
raise
def __repr__(self) -> str:
return f'<{self.__class__.__name__} buildername={self._builder_name!r}>'
def build(self, force_all: bool = False, filenames: Sequence[Path] = ()) -> None:
self.env._pickled_doctree_cache.clear()
super().build(force_all, filenames)
| SphinxTestApp |
python | getsentry__sentry | src/sentry/integrations/slack/message_builder/discover.py | {
"start": 162,
"end": 541
} | class ____(BlockSlackMessageBuilder):
def __init__(self, title: str, chart_url: str) -> None:
super().__init__()
self.title = title
self.chart_url = chart_url
def build(self) -> SlackBody:
return self._build_blocks(
self.get_image_block(self.chart_url, title=self.title, alt="Discover Chart")
)
| SlackDiscoverMessageBuilder |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-salesforce/unit_tests/integration/test_bulk_stream.py | {
"start": 3323,
"end": 29836
} | class ____(TestCase):
def setUp(self) -> None:
self._config = ConfigBuilder().client_id(_CLIENT_ID).client_secret(_CLIENT_SECRET).refresh_token(_REFRESH_TOKEN)
self._http_mocker = HttpMocker()
self._http_mocker.__enter__()
given_authentication(self._http_mocker, _CLIENT_ID, _CLIENT_SECRET, _REFRESH_TOKEN, _INSTANCE_URL, _ACCESS_TOKEN)
self._timeout = BulkSalesforceStream.DEFAULT_WAIT_TIMEOUT
def tearDown(self) -> None:
self._http_mocker.__exit__(None, None, None)
BulkSalesforceStream.DEFAULT_WAIT_TIMEOUT = self._timeout
@freezegun.freeze_time(_NOW.isoformat())
def test_when_read_then_create_job_and_extract_records_from_result(self) -> None:
given_stream(self._http_mocker, _BASE_URL, _STREAM_NAME, SalesforceDescribeResponseBuilder().field(_A_FIELD_NAME))
self._http_mocker.post(
_make_full_job_request([_A_FIELD_NAME]),
JobCreateResponseBuilder().with_id(_JOB_ID).build(),
)
self._http_mocker.get(
HttpRequest(f"{_BASE_URL}/jobs/query/{_JOB_ID}"),
[
JobInfoResponseBuilder().with_id(_JOB_ID).with_state("InProgress").build(),
JobInfoResponseBuilder().with_id(_JOB_ID).with_state("UploadComplete").build(),
JobInfoResponseBuilder().with_id(_JOB_ID).with_state("JobComplete").build(),
],
)
self._http_mocker.get(
HttpRequest(f"{_BASE_URL}/jobs/query/{_JOB_ID}/results"),
HttpResponse(f"{_A_FIELD_NAME}\nfield_value"),
)
delete_request = self._mock_delete_job(_JOB_ID)
output = read(_STREAM_NAME, SyncMode.full_refresh, self._config)
assert len(output.records) == 1
self._http_mocker.assert_number_of_calls(delete_request, 1)
@freezegun.freeze_time(_NOW.isoformat())
def test_given_null_bytes_when_read_then_remove_null_bytes(self) -> None:
given_stream(self._http_mocker, _BASE_URL, _STREAM_NAME, SalesforceDescribeResponseBuilder().field(_A_FIELD_NAME))
self._http_mocker.post(
_make_full_job_request([_A_FIELD_NAME]),
JobCreateResponseBuilder().with_id(_JOB_ID).build(),
)
self._http_mocker.get(
HttpRequest(f"{_BASE_URL}/jobs/query/{_JOB_ID}"),
[
JobInfoResponseBuilder().with_id(_JOB_ID).with_state("JobComplete").build(),
],
)
self._http_mocker.get(
HttpRequest(f"{_BASE_URL}/jobs/query/{_JOB_ID}/results"),
HttpResponse(b'"a_field"\n\x00"001\x004W000027f6UwQAI"\n\x00\x00'.decode()),
)
self._mock_delete_job(_JOB_ID)
output = read(_STREAM_NAME, SyncMode.full_refresh, self._config)
assert len(output.records) == 1
assert output.records[0].record.data[_A_FIELD_NAME] == "0014W000027f6UwQAI"
@freezegun.freeze_time(_NOW.isoformat())
def test_given_type_when_read_then_field_is_casted_with_right_type(self) -> None:
given_stream(self._http_mocker, _BASE_URL, _STREAM_NAME, SalesforceDescribeResponseBuilder().field(_A_FIELD_NAME, "boolean"))
self._http_mocker.post(
_make_full_job_request([_A_FIELD_NAME]),
JobCreateResponseBuilder().with_id(_JOB_ID).build(),
)
self._http_mocker.get(
HttpRequest(f"{_BASE_URL}/jobs/query/{_JOB_ID}"),
[
JobInfoResponseBuilder().with_id(_JOB_ID).with_state("JobComplete").build(),
],
)
self._http_mocker.get(
HttpRequest(f"{_BASE_URL}/jobs/query/{_JOB_ID}/results"),
HttpResponse('"a_field"\ntrue'),
)
self._mock_delete_job(_JOB_ID)
output = read(_STREAM_NAME, SyncMode.full_refresh, self._config)
assert len(output.records) == 1
assert type(output.records[0].record.data[_A_FIELD_NAME]) == bool
@freezegun.freeze_time(_NOW.isoformat())
def test_given_no_data_provided_when_read_then_field_is_none(self) -> None:
given_stream(
self._http_mocker, _BASE_URL, _STREAM_NAME, SalesforceDescribeResponseBuilder().field(_A_FIELD_NAME).field(_ANOTHER_FIELD_NAME)
)
self._http_mocker.post(
_make_full_job_request([_A_FIELD_NAME, _ANOTHER_FIELD_NAME]),
JobCreateResponseBuilder().with_id(_JOB_ID).build(),
)
self._http_mocker.get(
HttpRequest(f"{_BASE_URL}/jobs/query/{_JOB_ID}"),
[
JobInfoResponseBuilder().with_id(_JOB_ID).with_state("JobComplete").build(),
],
)
self._http_mocker.get(
HttpRequest(f"{_BASE_URL}/jobs/query/{_JOB_ID}/results"),
HttpResponse(f'"{_A_FIELD_NAME}","{_ANOTHER_FIELD_NAME}"\n,"another field value"'),
)
self._mock_delete_job(_JOB_ID)
output = read(_STREAM_NAME, SyncMode.full_refresh, self._config)
assert len(output.records) == 1
assert _A_FIELD_NAME not in output.records[0].record.data or output.records[0].record.data[_A_FIELD_NAME] is None
@freezegun.freeze_time(_NOW.isoformat())
def test_given_csv_unix_dialect_provided_when_read_then_parse_csv_properly(self) -> None:
given_stream(
self._http_mocker, _BASE_URL, _STREAM_NAME, SalesforceDescribeResponseBuilder().field(_A_FIELD_NAME).field(_ANOTHER_FIELD_NAME)
)
self._http_mocker.post(
_make_full_job_request([_A_FIELD_NAME, _ANOTHER_FIELD_NAME]),
JobCreateResponseBuilder().with_id(_JOB_ID).build(),
)
self._http_mocker.get(
HttpRequest(f"{_BASE_URL}/jobs/query/{_JOB_ID}"),
[
JobInfoResponseBuilder().with_id(_JOB_ID).with_state("JobComplete").build(),
],
)
data = [
{_A_FIELD_NAME: "1", _ANOTHER_FIELD_NAME: '"first_name" "last_name"'},
{_A_FIELD_NAME: "2", _ANOTHER_FIELD_NAME: "'" + 'first_name"\n' + "'" + 'last_name\n"'},
{_A_FIELD_NAME: "3", _ANOTHER_FIELD_NAME: "first_name last_name"},
]
self._http_mocker.get(
HttpRequest(f"{_BASE_URL}/jobs/query/{_JOB_ID}/results"),
HttpResponse(self._create_csv([_A_FIELD_NAME, _ANOTHER_FIELD_NAME], data, "unix")),
)
self._mock_delete_job(_JOB_ID)
output = read(_STREAM_NAME, SyncMode.full_refresh, self._config)
assert len(output.records) == 3
@freezegun.freeze_time(_NOW.isoformat())
def test_given_specific_encoding_when_read_then_parse_csv_properly(self) -> None:
given_stream(
self._http_mocker, _BASE_URL, _STREAM_NAME, SalesforceDescribeResponseBuilder().field(_A_FIELD_NAME).field(_ANOTHER_FIELD_NAME)
)
self._http_mocker.post(
_make_full_job_request([_A_FIELD_NAME, _ANOTHER_FIELD_NAME]),
JobCreateResponseBuilder().with_id(_JOB_ID).build(),
)
self._http_mocker.get(
HttpRequest(f"{_BASE_URL}/jobs/query/{_JOB_ID}"),
[
JobInfoResponseBuilder().with_id(_JOB_ID).with_state("JobComplete").build(),
],
)
self._http_mocker.get(
HttpRequest(f"{_BASE_URL}/jobs/query/{_JOB_ID}/results"),
HttpResponse(b'"\xc4"\n,"4"\n\x00,"\xca \xfc"'.decode("ISO-8859-1"), headers={"Content-Type": "text/csv; charset=ISO-8859-1"}),
)
self._mock_delete_job(_JOB_ID)
output = read(_STREAM_NAME, SyncMode.full_refresh, self._config)
assert output.records[0].record.data == {"Ä": "4"}
assert output.records[1].record.data == {"Ä": "Ê ü"}
@freezegun.freeze_time(_NOW.isoformat())
def test_given_locator_when_read_then_extract_records_from_both_pages(self):
given_stream(self._http_mocker, _BASE_URL, _STREAM_NAME, SalesforceDescribeResponseBuilder().field(_A_FIELD_NAME))
self._http_mocker.post(
_make_full_job_request([_A_FIELD_NAME]),
JobCreateResponseBuilder().with_id(_JOB_ID).build(),
)
self._http_mocker.get(
HttpRequest(f"{_BASE_URL}/jobs/query/{_JOB_ID}"),
JobInfoResponseBuilder().with_id(_JOB_ID).with_state("JobComplete").build(),
)
self._http_mocker.get(
HttpRequest(f"{_BASE_URL}/jobs/query/{_JOB_ID}/results"),
HttpResponse(f"{_A_FIELD_NAME}\nfield_value", headers={"Sforce-Locator": _SECOND_PAGE_LOCATOR}),
)
self._http_mocker.get(
HttpRequest(f"{_BASE_URL}/jobs/query/{_JOB_ID}/results", query_params={"locator": _SECOND_PAGE_LOCATOR}),
HttpResponse(f"{_A_FIELD_NAME}\nanother_field_value"),
)
self._mock_delete_job(_JOB_ID)
output = read(_STREAM_NAME, SyncMode.full_refresh, self._config)
assert len(output.records) == 2
@freezegun.freeze_time(_NOW.isoformat())
def test_given_job_creation_have_transient_error_when_read_then_sync_properly(self):
given_stream(self._http_mocker, _BASE_URL, _STREAM_NAME, SalesforceDescribeResponseBuilder().field(_A_FIELD_NAME))
self._http_mocker.post(
_make_full_job_request([_A_FIELD_NAME]),
[
_RETRYABLE_RESPONSE,
JobCreateResponseBuilder().with_id(_JOB_ID).build(),
],
)
self._http_mocker.get(
HttpRequest(f"{_BASE_URL}/jobs/query/{_JOB_ID}"),
JobInfoResponseBuilder().with_id(_JOB_ID).with_state("JobComplete").build(),
)
self._http_mocker.get(
HttpRequest(f"{_BASE_URL}/jobs/query/{_JOB_ID}/results"),
HttpResponse(f"{_A_FIELD_NAME}\nfield_value"),
)
self._mock_delete_job(_JOB_ID)
output = read(_STREAM_NAME, SyncMode.full_refresh, self._config)
assert len(output.errors) == 0
assert len(output.records) == 1
@freezegun.freeze_time(_NOW.isoformat())
def test_given_job_polling_have_transient_error_when_read_then_sync_properly(self):
given_stream(self._http_mocker, _BASE_URL, _STREAM_NAME, SalesforceDescribeResponseBuilder().field(_A_FIELD_NAME))
self._http_mocker.post(
_make_full_job_request([_A_FIELD_NAME]),
JobCreateResponseBuilder().with_id(_JOB_ID).build(),
)
self._http_mocker.get(
HttpRequest(f"{_BASE_URL}/jobs/query/{_JOB_ID}"),
[
_RETRYABLE_RESPONSE,
JobInfoResponseBuilder().with_id(_JOB_ID).with_state("JobComplete").build(),
],
)
self._http_mocker.get(
HttpRequest(f"{_BASE_URL}/jobs/query/{_JOB_ID}/results"),
HttpResponse(f"{_A_FIELD_NAME}\nfield_value"),
)
self._mock_delete_job(_JOB_ID)
output = read(_STREAM_NAME, SyncMode.full_refresh, self._config)
assert len(output.errors) == 0
assert len(output.records) == 1
@freezegun.freeze_time(_NOW.isoformat())
def test_given_bulk_restrictions_when_read_then_switch_to_standard(self):
given_stream(self._http_mocker, _BASE_URL, _STREAM_NAME, SalesforceDescribeResponseBuilder().field(_A_FIELD_NAME))
self._http_mocker.post(
_make_full_job_request([_A_FIELD_NAME]),
HttpResponse("[{}]", 403),
)
self._http_mocker.get(
create_standard_http_request(_STREAM_NAME, [_A_FIELD_NAME]),
create_standard_http_response([_A_FIELD_NAME]),
)
output = read(_STREAM_NAME, SyncMode.full_refresh, self._config)
assert len(output.records) == 1
@freezegun.freeze_time(_NOW.isoformat())
def test_given_non_transient_error_on_job_creation_when_read_then_fail_sync(self):
given_stream(self._http_mocker, _BASE_URL, _STREAM_NAME, SalesforceDescribeResponseBuilder().field(_A_FIELD_NAME))
self._http_mocker.post(
_make_full_job_request([_A_FIELD_NAME]),
HttpResponse(
json.dumps(
[
{
"errorCode": "API_ERROR",
"message": "Implementation restriction... <can't complete the error message as I can't reproduce this issue>",
}
]
),
400,
),
)
output = read(_STREAM_NAME, SyncMode.full_refresh, self._config)
assert output.get_stream_statuses(_STREAM_NAME)[-1] == AirbyteStreamStatus.INCOMPLETE
def test_given_job_times_out_when_read_then_abort_job(self):
BulkSalesforceStream.DEFAULT_WAIT_TIMEOUT = timedelta(microseconds=1)
given_stream(self._http_mocker, _BASE_URL, _STREAM_NAME, SalesforceDescribeResponseBuilder().field(_A_FIELD_NAME))
self._http_mocker.post(
_make_full_job_request([_A_FIELD_NAME]),
JobCreateResponseBuilder().with_id(_JOB_ID).build(),
)
abort_request = HttpRequest(f"{_BASE_URL}/jobs/query/{_JOB_ID}", body=json.dumps({"state": "Aborted"}))
self._http_mocker.patch(
abort_request,
JobInfoResponseBuilder().with_id(_JOB_ID).with_state("Aborted").build(),
)
self._http_mocker.get(
HttpRequest(f"{_BASE_URL}/jobs/query/{_JOB_ID}"),
JobInfoResponseBuilder().with_id(_JOB_ID).with_state("InProgress").build(),
)
read(_STREAM_NAME, SyncMode.full_refresh, self._config)
self._http_mocker.assert_number_of_calls(abort_request, 3)
@freezegun.freeze_time(_NOW.isoformat())
def test_given_job_is_aborted_when_read_then_fail_sync(self):
given_stream(self._http_mocker, _BASE_URL, _STREAM_NAME, SalesforceDescribeResponseBuilder().field(_A_FIELD_NAME))
self._http_mocker.post(
_make_full_job_request([_A_FIELD_NAME]),
JobCreateResponseBuilder().with_id(_JOB_ID).build(),
)
self._http_mocker.get(
HttpRequest(f"{_BASE_URL}/jobs/query/{_JOB_ID}"),
JobInfoResponseBuilder().with_id(_JOB_ID).with_state("Aborted").build(),
)
self._mock_delete_job(_JOB_ID)
output = read(_STREAM_NAME, SyncMode.full_refresh, self._config)
assert output.get_stream_statuses(_STREAM_NAME)[-1] == AirbyteStreamStatus.INCOMPLETE
@freezegun.freeze_time(_NOW.isoformat())
def test_given_job_is_failed_when_read_then_switch_to_standard(self):
given_stream(self._http_mocker, _BASE_URL, _STREAM_NAME, SalesforceDescribeResponseBuilder().field(_A_FIELD_NAME))
self._http_mocker.post(
_make_full_job_request([_A_FIELD_NAME]),
JobCreateResponseBuilder().with_id(_JOB_ID).build(),
)
self._http_mocker.get(
HttpRequest(f"{_BASE_URL}/jobs/query/{_JOB_ID}"),
JobInfoResponseBuilder().with_id(_JOB_ID).with_state("Failed").build(),
)
self._http_mocker.get(
create_standard_http_request(_STREAM_NAME, [_A_FIELD_NAME], _ACCESS_TOKEN),
create_standard_http_response([_A_FIELD_NAME]),
)
self._mock_delete_job(_JOB_ID)
output = read(_STREAM_NAME, SyncMode.full_refresh, self._config)
assert len(output.records) == 1
@freezegun.freeze_time(_NOW.isoformat())
def test_given_retryable_error_on_download_job_result_when_read_then_extract_records(self):
given_stream(self._http_mocker, _BASE_URL, _STREAM_NAME, SalesforceDescribeResponseBuilder().field(_A_FIELD_NAME))
self._http_mocker.post(
_make_full_job_request([_A_FIELD_NAME]),
JobCreateResponseBuilder().with_id(_JOB_ID).build(),
)
self._http_mocker.get(
HttpRequest(f"{_BASE_URL}/jobs/query/{_JOB_ID}"),
JobInfoResponseBuilder().with_id(_JOB_ID).with_state("JobComplete").build(),
)
self._http_mocker.get(
HttpRequest(f"{_BASE_URL}/jobs/query/{_JOB_ID}/results"),
[
_RETRYABLE_RESPONSE,
HttpResponse(f"{_A_FIELD_NAME}\nfield_value"),
],
)
self._mock_delete_job(_JOB_ID)
output = read(_STREAM_NAME, SyncMode.full_refresh, self._config)
assert len(output.records) == 1
@freezegun.freeze_time(_NOW.isoformat())
def test_given_retryable_error_on_delete_job_result_when_read_then_do_not_break(self):
given_stream(self._http_mocker, _BASE_URL, _STREAM_NAME, SalesforceDescribeResponseBuilder().field(_A_FIELD_NAME))
self._http_mocker.post(
_make_full_job_request([_A_FIELD_NAME]),
JobCreateResponseBuilder().with_id(_JOB_ID).build(),
)
self._http_mocker.get(
HttpRequest(f"{_BASE_URL}/jobs/query/{_JOB_ID}"),
JobInfoResponseBuilder().with_id(_JOB_ID).with_state("JobComplete").build(),
)
self._http_mocker.get(
HttpRequest(f"{_BASE_URL}/jobs/query/{_JOB_ID}/results"),
HttpResponse(f"{_A_FIELD_NAME}\nfield_value"),
)
self._http_mocker.delete(
HttpRequest(f"{_BASE_URL}/jobs/query/{_JOB_ID}"),
[
_RETRYABLE_RESPONSE,
HttpResponse(""),
],
)
output = read(_STREAM_NAME, SyncMode.full_refresh, self._config)
assert output.get_stream_statuses(_STREAM_NAME)[-1] == AirbyteStreamStatus.COMPLETE
@freezegun.freeze_time(_NOW.isoformat())
def test_given_non_retryable_error_on_delete_job_result_when_read_then_fail_to_sync(self):
"""
This is interesting: right now, we retry with the same policies has the other requests but it seems fair to just be a best effort,
catch everything and not retry
"""
given_stream(self._http_mocker, _BASE_URL, _STREAM_NAME, SalesforceDescribeResponseBuilder().field(_A_FIELD_NAME))
self._http_mocker.post(
_make_full_job_request([_A_FIELD_NAME]),
JobCreateResponseBuilder().with_id(_JOB_ID).build(),
)
self._http_mocker.get(
HttpRequest(f"{_BASE_URL}/jobs/query/{_JOB_ID}"),
JobInfoResponseBuilder().with_id(_JOB_ID).with_state("JobComplete").build(),
)
self._http_mocker.get(
HttpRequest(f"{_BASE_URL}/jobs/query/{_JOB_ID}/results"),
HttpResponse(f"{_A_FIELD_NAME}\nfield_value"),
)
self._http_mocker.delete(
HttpRequest(f"{_BASE_URL}/jobs/query/{_JOB_ID}"),
HttpResponse("", 429),
)
output = read(_STREAM_NAME, SyncMode.full_refresh, self._config)
assert output.get_stream_statuses(_STREAM_NAME)[-1] == AirbyteStreamStatus.INCOMPLETE
@freezegun.freeze_time(_NOW.isoformat())
def test_given_incremental_when_read_then_create_job_and_extract_records_from_result(self) -> None:
start_date = (_NOW - timedelta(days=10)).replace(microsecond=0)
first_upper_boundary = start_date + timedelta(days=7)
self._config.start_date(start_date).stream_slice_step("P7D")
given_stream(self._http_mocker, _BASE_URL, _STREAM_NAME, _INCREMENTAL_SCHEMA_BUILDER)
self._create_sliced_job(start_date, first_upper_boundary, _STREAM_NAME, _INCREMENTAL_FIELDS, "first_slice_job_id", record_count=2)
self._create_sliced_job(first_upper_boundary, _NOW, _STREAM_NAME, _INCREMENTAL_FIELDS, "second_slice_job_id", record_count=1)
output = read(_STREAM_NAME, SyncMode.incremental, self._config)
assert len(output.records) == 3
@freezegun.freeze_time(_NOW.isoformat())
def test_given_slice_fails_when_read_then_state_is_partitioned(self) -> None:
# FIXME this test fails because the error happens in the thread that generates the slices as oppose to the thread the read the
# partition. In order to fix that, we probably need a flag to allow for the job orchestrator to continue creating the jobs even if
# there is an error
start_date = (_NOW - timedelta(days=20)).replace(microsecond=0)
slice_range = timedelta(days=7)
first_upper_boundary = start_date + slice_range
second_upper_boundary = first_upper_boundary + slice_range
self._config.start_date(start_date).stream_slice_step("P7D")
given_stream(self._http_mocker, _BASE_URL, _STREAM_NAME, _INCREMENTAL_SCHEMA_BUILDER)
self._create_sliced_job(start_date, first_upper_boundary, _STREAM_NAME, _INCREMENTAL_FIELDS, "first_slice_job_id", record_count=2)
self._http_mocker.post(
_make_sliced_job_request(first_upper_boundary, second_upper_boundary, _INCREMENTAL_FIELDS),
HttpResponse("", status_code=400),
)
self._create_sliced_job(second_upper_boundary, _NOW, _STREAM_NAME, _INCREMENTAL_FIELDS, "third_slice_job_id", record_count=1)
output = read(_STREAM_NAME, SyncMode.incremental, self._config)
assert len(output.records) == 3
assert len(output.most_recent_state.stream_state.slices) == 2
@freezegun.freeze_time(_NOW.isoformat())
def test_given_parent_stream_when_read_then_return_record_for_all_children(self) -> None:
start_date = (_NOW - timedelta(days=10)).replace(microsecond=0)
first_upper_boundary = start_date + timedelta(days=7)
self._config.start_date(start_date).stream_slice_step("P7D")
given_stream(self._http_mocker, _BASE_URL, _STREAM_WITH_PARENT_NAME, SalesforceDescribeResponseBuilder().field(_A_FIELD_NAME))
self._create_sliced_job_with_records(
start_date,
first_upper_boundary,
_PARENT_STREAM_NAME,
"first_parent_slice_job_id",
[{"Id": "parent1", "SystemModstamp": "any"}, {"Id": "parent2", "SystemModstamp": "any"}],
)
self._create_sliced_job_with_records(
first_upper_boundary, _NOW, _PARENT_STREAM_NAME, "second_parent_slice_job_id", [{"Id": "parent3", "SystemModstamp": "any"}]
)
self._http_mocker.post(
self._build_job_creation_request(
f"SELECT {', '.join([_A_FIELD_NAME])} FROM {_STREAM_WITH_PARENT_NAME} WHERE ContentDocumentId IN ('parent1', 'parent2', 'parent3')"
),
JobCreateResponseBuilder().with_id(_JOB_ID).build(),
)
self._http_mocker.get(
HttpRequest(f"{_BASE_URL}/jobs/query/{_JOB_ID}"),
JobInfoResponseBuilder().with_id(_JOB_ID).with_state("JobComplete").build(),
)
self._http_mocker.get(
HttpRequest(f"{_BASE_URL}/jobs/query/{_JOB_ID}/results"),
HttpResponse(f"{_A_FIELD_NAME}\nfield_value"),
)
self._mock_delete_job(_JOB_ID)
output = read(_STREAM_WITH_PARENT_NAME, SyncMode.full_refresh, self._config)
assert len(output.records) == 1
def _create_sliced_job(
self, lower_boundary: datetime, upper_boundary: datetime, stream_name: str, fields: List[str], job_id: str, record_count: int
) -> None:
self._create_sliced_job_with_records(
lower_boundary, upper_boundary, stream_name, job_id, self._generate_random_records(fields, record_count)
)
def _create_sliced_job_with_records(
self, lower_boundary: datetime, upper_boundary: datetime, stream_name: str, job_id: str, records: List[Dict[str, str]]
) -> None:
self._http_mocker.post(
self._make_sliced_job_request(lower_boundary, upper_boundary, stream_name, list(records[0].keys())),
JobCreateResponseBuilder().with_id(job_id).build(),
)
self._http_mocker.get(
HttpRequest(f"{_BASE_URL}/jobs/query/{job_id}"),
JobInfoResponseBuilder().with_id(job_id).with_state("JobComplete").build(),
)
self._http_mocker.get(
HttpRequest(f"{_BASE_URL}/jobs/query/{job_id}/results"),
HttpResponse(self._generate_csv(records)),
)
self._mock_delete_job(job_id)
def _mock_delete_job(self, job_id: str) -> HttpRequest:
request = HttpRequest(f"{_BASE_URL}/jobs/query/{job_id}")
self._http_mocker.delete(
request,
HttpResponse(""),
)
return request
def _create_csv(self, headers: List[str], data: List[Dict[str, str]], dialect: str) -> str:
with io.StringIO("", newline="") as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=headers, dialect=dialect)
writer.writeheader()
for line in data:
writer.writerow(line)
return csvfile.getvalue()
def _make_sliced_job_request(
self, lower_boundary: datetime, upper_boundary: datetime, stream_name: str, fields: List[str]
) -> HttpRequest:
return self._build_job_creation_request(
f"SELECT {', '.join(fields)} FROM {stream_name} WHERE SystemModstamp >= {lower_boundary.isoformat(timespec='milliseconds')} AND SystemModstamp < {upper_boundary.isoformat(timespec='milliseconds')}"
)
def _make_full_job_request(self, fields: List[str], stream_name: str = _STREAM_NAME) -> HttpRequest:
return self._build_job_creation_request(f"SELECT {', '.join(fields)} FROM {stream_name}")
def _generate_random_records(self, fields: List[str], record_count: int) -> List[Dict[str, str]]:
record = {field: "2021-01-18T21:18:20.000Z" if field in {"SystemModstamp"} else f"{field}_value" for field in fields}
return [record for _ in range(record_count)]
def _generate_csv(self, records: List[Dict[str, str]]) -> str:
"""
This method does not handle field types for now which may cause some test failures on change if we start considering using some
fields for calculation. One example of that would be cursor field parsing to datetime.
"""
keys = list(records[0].keys()) # assuming all the records have the same keys
csv_entry = []
for record in records:
csv_entry.append(",".join([record[key] for key in keys]))
entries = "\n".join(csv_entry)
return f"{','.join(keys)}\n{entries}"
def _build_job_creation_request(self, query: str) -> HttpRequest:
return HttpRequest(
f"{_BASE_URL}/jobs/query",
body=json.dumps(
{"operation": "queryAll", "query": query, "contentType": "CSV", "columnDelimiter": "COMMA", "lineEnding": "LF"}
),
)
| BulkStreamTest |
python | prompt-toolkit__python-prompt-toolkit | src/prompt_toolkit/layout/processors.py | {
"start": 20946,
"end": 22141
} | class ____(Processor):
"""
Make trailing whitespace visible.
:param get_char: Callable that returns one character.
"""
def __init__(
self,
get_char: Callable[[], str] | None = None,
style: str = "class:training-whitespace",
) -> None:
def default_get_char() -> str:
if "\xb7".encode(get_app().output.encoding(), "replace") == b"?":
return "."
else:
return "\xb7"
self.style = style
self.get_char = get_char or default_get_char
def apply_transformation(self, ti: TransformationInput) -> Transformation:
fragments = ti.fragments
if fragments and fragments[-1][1].endswith(" "):
t = (self.style, self.get_char())
fragments = explode_text_fragments(fragments)
# Walk backwards through all te fragments and replace whitespace.
for i in range(len(fragments) - 1, -1, -1):
char = fragments[i][1]
if char == " ":
fragments[i] = t
else:
break
return Transformation(fragments)
| ShowTrailingWhiteSpaceProcessor |
python | neetcode-gh__leetcode | python/0236-lowest-common-ancestor-of-a-binary-tree.py | {
"start": 163,
"end": 653
} | class ____:
def lowestCommonAncestor(self, root: 'TreeNode', p: 'TreeNode', q: 'TreeNode') -> 'TreeNode':
if not root:
return
if root == p or root == q:
return root
left = self.lowestCommonAncestor(root.left, p, q)
right = self.lowestCommonAncestor(root.right, p, q)
if left and right:
return root
if left:
return left
if right:
return right
return None
| Solution |
python | getsentry__sentry | tests/sentry/api/endpoints/test_organization_profiling_profiles.py | {
"start": 30643,
"end": 33221
} | class ____(APITestCase):
endpoint = "sentry-api-0-organization-profiling-chunks"
features = {
"organizations:continuous-profiling": True,
}
def setUp(self) -> None:
self.login_as(user=self.user)
self.url = reverse(self.endpoint, args=(self.organization.slug,))
def test_forbids_multiple_projects(self) -> None:
projects = [self.create_project() for _ in range(3)]
with self.feature(self.features):
response = self.client.get(self.url, {"project": [project.id for project in projects]})
assert response.status_code == 400
assert response.data == {
"detail": ErrorDetail(string="one project_id must be specified.", code="parse_error")
}
def test_requires_profiler_id(self) -> None:
with self.feature(self.features):
response = self.client.get(self.url, {"project": [self.project.id]})
assert response.status_code == 400
assert response.data == {
"detail": ErrorDetail(string="profiler_id must be specified.", code="parse_error")
}
@patch(
"sentry.api.endpoints.organization_profiling_profiles.proxy_profiling_service",
wraps=proxy_profiling_service,
)
@patch("sentry.profiles.profile_chunks.raw_snql_query")
@freeze_time("2024-07-11 00:00:00")
def test_proxies_to_profiling_service(
self, mock_raw_snql_query: MagicMock, mock_proxy_profiling_service: MagicMock
) -> None:
profiler_id = uuid4().hex
chunk_ids = [uuid4().hex for _ in range(3)]
mock_raw_snql_query.return_value = {
"data": [{"chunk_id": chunk_id} for chunk_id in chunk_ids]
}
with self.feature(self.features):
self.client.get(
self.url,
{
"project": [self.project.id],
"profiler_id": profiler_id,
"statsPeriod": "1d",
},
)
end = datetime.fromisoformat("2024-07-11 00:00:00").replace(tzinfo=UTC)
start = end - timedelta(days=1)
mock_proxy_profiling_service.assert_called_with(
method="POST",
path=f"/organizations/{self.project.organization.id}/projects/{self.project.id}/chunks",
json_data={
"profiler_id": profiler_id,
"chunk_ids": chunk_ids,
"start": str(int(start.timestamp() * 1e9)),
"end": str(int(end.timestamp() * 1e9)),
},
)
| OrganizationProfilingChunksTest |
python | dagster-io__dagster | python_modules/dagster/dagster/_daemon/sensor.py | {
"start": 3806,
"end": 3987
} | class ____(NamedTuple):
"""Placeholder for runs that are skipped during the run_key idempotence check."""
run_key: Optional[str]
existing_run: DagsterRun
| SkippedSensorRun |
python | huggingface__transformers | tests/models/wav2vec2_phoneme/test_tokenization_wav2vec2_phoneme.py | {
"start": 1082,
"end": 19737
} | class ____(TokenizerTesterMixin, unittest.TestCase):
from_pretrained_id = "facebook/wav2vec2-lv-60-espeak-cv-ft"
tokenizer_class = Wav2Vec2PhonemeCTCTokenizer
test_rust_tokenizer = False
@classmethod
def setUpClass(cls):
super().setUpClass()
vocab = (
"<s> <pad> </s> <unk> n s t ə l a i k d m ɛ ɾ e ɪ p o ɐ z ð f j v b ɹ ʁ ʊ iː r w ʌ u ɡ æ aɪ ʃ h ɔ ɑː "
"ŋ ɚ eɪ β uː y ɑ̃ oʊ ᵻ eː θ aʊ ts oː ɔ̃ ɣ ɜ ɑ dʒ əl x ɜː ç ʒ tʃ ɔː ɑːɹ ɛ̃ ʎ ɔːɹ ʋ aː ɕ œ ø oːɹ ɲ yː "
"ʔ iə i5 s. tɕ ?? nʲ ɛː œ̃ ɭ ɔø ʑ tʲ ɨ ɛɹ ts. rʲ ɪɹ ɭʲ i.5 ɔɪ q sʲ u5 ʊɹ iɜ a5 iɛ5 øː ʕ ja əɜ th ɑ5 "
"oɪ dʲ ə5 tɕh ts.h mʲ ɯ dʑ vʲ e̞ tʃʲ ei5 o5 onɡ5 ɑu5 iɑ5 ai5 aɪɚ kh ə1 ʐ i2 ʉ ħ t[ aɪə ʲ ju ə2 u2 oɜ "
"pː iɛɜ ou5 y5 uɜ tː uo5 d[ uoɜ tsh ɑɜ ɵ i̪5 uei5 ɟ aɜ ɑɨ i.ɜ eʊ o2 ɐ̃ ä pʲ kʲ n̩ ɒ ph ɑu2 uɨ əɪ ɫ ɬ "
"yɜ bʲ ɑ2 s̪ aiɜ χ ɐ̃ʊ̃ 1 ə4 yæɜ a2 ɨː t̪ iouɜ ũ onɡɜ aɨ iɛ2 ɔɨ ɑuɜ o̞ ei2 iou2 c kː y2 ɖ oe dˤ yɛɜ "
'əʊ S ɡʲ onɡ2 u" eiɜ ʈ ɯᵝ iou5 dZ r̝̊ i.2 tS s^ ʝ yə5 iɑɜ uə5 pf ɨu iɑ2 ou2 ər2 fʲ ai2 r̝ uəɜ ɳ əɨ '
"ua5 uɪ ɽ bː yu5 uo2 yɛ5 l̩ ɻ ərɜ ʂ i̪2 ouɜ uaɜ a. a.ː yæ5 dː r̩ ee ɪu ər5 i̪ ɜ æi u: i.ː t^ o1 ɪ^ "
"ai ueiɜ æː ɛɪ eə i. ɴ ie ua2 ɑ1 o4 tʃː o: ɑ: u1 N i̪1 au yæ2 u. qː yəɜ y: kʰ tʃʰ iʊ sx õ uo tʰ "
"uai5 bʰ u.ː uə2 ʊə d^ s̪ː yiɜ dʰ r. oe: i1 ɟː yu2 nʲʲ i̪4 uei2 tsʲ ɸ ĩ ɑ4 t̪ː eɑ u4 e: tsː ʈʰ ɡʰ "
"ɯɯ dʒʲ ʂʲ X ɵː uaiɜ tɕʲ ã t^ː ẽː yɛ2 cː i.1 ɛʊ dˤdˤ dʒː i4 ɡː yi ɕʲ ɟʰ pʰ dʑʲ yuɜ ua1 ua4 æiː ɐɐ "
"ui iou1 ʊː a1 iou4 cʰ iɛ1 yə2 ɖʰ ẽ ʒʲ ää ər4 iːː ɪː iɑ1 ər1 œː øi ɪuː cʰcʰ əː1 iː1 ũ kʰː o̞o̞ xʲ "
"ou1 iɛ4 e̞e̞ y1 dzː dʲʲ dʰː ɯᵝɯᵝ lː uo1 i.4 i: yɛ5ʲ a4"
).split(" ")
vocab_tokens = dict(zip(vocab, range(len(vocab))))
cls.special_tokens_map = {"pad_token": "<pad>", "unk_token": "<unk>", "bos_token": "<s>", "eos_token": "</s>"}
cls.vocab_file = os.path.join(cls.tmpdirname, VOCAB_FILES_NAMES["vocab_file"])
with open(cls.vocab_file, "w", encoding="utf-8") as fp:
fp.write(json.dumps(vocab_tokens) + "\n")
# overwrite since phonemes require specific creation
def get_clean_sequence(self, tokenizer, with_prefix_space=False, max_length=20, min_length=5) -> tuple[str, list]:
toks = [(i, tokenizer.decode([i], clean_up_tokenization_spaces=False)) for i in range(len(tokenizer))]
toks = list(filter(lambda t: [t[0]] == tokenizer.encode(t[1], do_phonemize=False), toks))
if max_length is not None and len(toks) > max_length:
toks = toks[:max_length]
if min_length is not None and len(toks) < min_length and len(toks) > 0:
while len(toks) < min_length:
toks = toks + toks
# toks_str = [t[1] for t in toks]
toks_ids = [t[0] for t in toks]
# Ensure consistency
output_txt = tokenizer.decode(toks_ids, clean_up_tokenization_spaces=False)
if " " not in output_txt and len(toks_ids) > 1:
output_txt = (
tokenizer.decode([toks_ids[0]], clean_up_tokenization_spaces=False)
+ " "
+ tokenizer.decode(toks_ids[1:], clean_up_tokenization_spaces=False)
)
if with_prefix_space:
output_txt = " " + output_txt
output_ids = tokenizer.encode(output_txt, add_special_tokens=False)
return output_txt, output_ids
@classmethod
def get_tokenizer(cls, pretrained_name=None, **kwargs):
kwargs.update(cls.special_tokens_map)
pretrained_name = pretrained_name or cls.tmpdirname
return Wav2Vec2PhonemeCTCTokenizer.from_pretrained(pretrained_name, **kwargs)
def test_tokenizer_add_new_tokens(self):
tokenizer = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft")
# check adding a single token
tokenizer.add_tokens("xxx")
token_ids = tokenizer("m xxx ɪ", do_phonemize=False).input_ids
self.assertEqual(token_ids, [13, 392, 17]) # xxx should be last token
tokenizer.add_tokens(["aaa", "bbb", "ccc"])
token_ids = tokenizer("m aaa ɪ ccc", do_phonemize=False).input_ids
self.assertEqual(token_ids, [13, 393, 17, 395]) # aaa and ccc should be after xxx and 2 after aaa
token_ids = tokenizer("maɪ c", do_phonemize=False).input_ids
self.assertEqual(token_ids, [3, 200]) # mai should be <unk> (=3)
def test_phonemize(self):
tokenizer = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft")
input_text = "Hello how are you"
phonemes = tokenizer.phonemize(input_text, phonemizer_lang="en-us")
self.assertEqual(phonemes, "h ə l oʊ h aʊ ɑːɹ j uː")
def test_encode(self):
tokenizer = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft")
input_text = "Hello how are you"
phonemes = tokenizer.phonemize(input_text, phonemizer_lang="en-us")
self.assertEqual(tokenizer(input_text).input_ids, tokenizer(phonemes, do_phonemize=False).input_ids)
def test_encode_decode(self):
tokenizer = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft")
input_text = "Hello how are you"
phonemes = tokenizer.phonemize(input_text, phonemizer_lang="en-us")
phonemes_enc_dec = tokenizer.decode(tokenizer(input_text).input_ids)
self.assertEqual(phonemes, phonemes_enc_dec)
def test_decode(self):
tokenizer = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft")
sample_ids = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98],
[24, 22, 5, 24, 22, 5, 77],
]
tokens = tokenizer.decode(sample_ids[0])
batch_tokens = tokenizer.batch_decode(sample_ids)
self.assertEqual(tokens, batch_tokens[0])
self.assertEqual(batch_tokens, ["k s ɾ ɾ l ɭʲ", "j ð s j ð s oːɹ"])
def test_phonemize_with_word_del(self):
tokenizer = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft", word_delimiter_token="|"
)
tokenizer.add_tokens("|")
input_text = "Hello how are you"
phonemes = tokenizer.phonemize(input_text, phonemizer_lang="en-us")
self.assertEqual(phonemes, "h ə l oʊ | h aʊ | ɑːɹ | j uː |")
def test_encode_with_del(self):
tokenizer = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft", word_delimiter_token="|"
)
tokenizer.add_tokens("|")
input_text = "Hello how are you"
phonemes = tokenizer.phonemize(input_text, phonemizer_lang="en-us")
self.assertEqual(tokenizer(input_text).input_ids, tokenizer(phonemes, do_phonemize=False).input_ids)
def test_decode_with_del(self):
tokenizer = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft", word_delimiter_token="|"
)
tokenizer.add_tokens("|")
# fmt: off
sample_ids = [
[11, 5, 15, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 15, 8, tokenizer.word_delimiter_token_id, 98],
[tokenizer.word_delimiter_token_id, 24, 22, tokenizer.word_delimiter_token_id, 5, 24, 22, 5, 77],
]
# fmt: on
# decode with word_del_token filter
tokens = tokenizer.decode(sample_ids[0])
batch_tokens = tokenizer.batch_decode(sample_ids)
self.assertEqual(tokens, batch_tokens[0])
self.assertEqual(batch_tokens, ["k s ɾ ɾ l ɭʲ", "j ð s j ð s oːɹ"])
# decode with no word_del_token filter
tokens = tokenizer.decode(sample_ids[0], filter_word_delimiter_token=False)
batch_tokens = tokenizer.batch_decode(sample_ids, filter_word_delimiter_token=False)
self.assertEqual(tokens, batch_tokens[0])
self.assertEqual(batch_tokens, ["k s ɾ | ɾ l | ɭʲ", "| j ð | s j ð s oːɹ"])
def test_encode_decode_with_del(self):
tokenizer = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft", word_delimiter_token="|"
)
tokenizer.add_tokens("|")
input_text = "Hello how are you"
phonemes = tokenizer.phonemize(input_text, phonemizer_lang="en-us")
phonemes_enc_dec = tokenizer.decode(tokenizer(input_text).input_ids, filter_word_delimiter_token=False)
self.assertEqual(phonemes, phonemes_enc_dec)
def test_encode_decode_with_del_filter(self):
tokenizer = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft", word_delimiter_token="|"
)
tokenizer.add_tokens("|")
input_text = "Hello how are you"
phonemes = tokenizer.phonemize(input_text, phonemizer_lang="en-us")
phonemes_enc_dec = tokenizer.decode(tokenizer(input_text).input_ids, filter_word_delimiter_token=True)
self.assertEqual(" ".join([p.strip() for p in phonemes.split(" |")]).strip(), phonemes_enc_dec)
def test_change_phonemizer_lang(self):
tokenizer = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft", word_delimiter_token=None
)
input_text = "Hello how are you"
input_ids_en = tokenizer(input_text, phonemizer_lang="en-us").input_ids
input_ids_fr = tokenizer(input_text, phonemizer_lang="fr-fr").input_ids
self.assertNotEqual(input_ids_en, input_ids_fr)
text_en = tokenizer.decode(input_ids_en)
text_fr = tokenizer.decode(input_ids_fr)
self.assertEqual(text_en, "h ə l oʊ h aʊ ɑːɹ j uː")
self.assertEqual(text_fr, "ɛ l o h aʊ a ʁ j u")
def test_case_insensitive(self):
tokenizer = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft")
input_text_up = "Hello how Are you"
input_text_low = "hello how are you"
input_ids_up = tokenizer(input_text_up).input_ids
input_ids_low = tokenizer(input_text_low).input_ids
self.assertEqual(input_ids_up, input_ids_low)
def test_tokenizer_decode_added_tokens(self):
tokenizer = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft")
tokenizer.add_tokens(["!", "?"])
tokenizer.add_special_tokens({"cls_token": "$$$"})
# fmt: off
sample_ids = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98, 392, 392, 393, 392, 392, 393, 394, 394],
[24, 22, 5, 24, 22, 5, 77, tokenizer.pad_token_id, 394, 394],
]
# fmt: on
batch_tokens = tokenizer.batch_decode(sample_ids)
self.assertEqual(batch_tokens, ["k s ɾ ɾ l ɭʲ ! ? ! ? $$$", "j ð s j ð s oːɹ $$$"])
@staticmethod
def get_from_offsets(offsets, key):
retrieved_list = [d[key] for d in offsets]
return retrieved_list
def test_offsets(self):
tokenizer = self.get_tokenizer(word_delimiter_token="|")
tokenizer.add_tokens("|")
# fmt: off
# ksssɾɾ|ɾɾ<pad>ɾɾ|<pad>ɾlll|ɭʲ -> k s ɾ ɾ | ɾ l | ɭʲ"
sample_ids = [11, 5, 5, 5, 15, 15, tokenizer.pad_token_id, 15, 15, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 15, 8, 8, 8, tokenizer.word_delimiter_token_id, 98]
# fmt: on
outputs = tokenizer.decode(sample_ids, output_char_offsets=True, filter_word_delimiter_token=False)
# check Wav2Vec2CTCTokenizerOutput keys for char
self.assertEqual(len(outputs.keys()), 2)
self.assertTrue("text" in outputs)
self.assertTrue("char_offsets" in outputs)
self.assertTrue(isinstance(outputs, Wav2Vec2PhonemeCTCTokenizerOutput))
# check that order of chars is correct and identical for both outputs
self.assertEqual(" ".join(self.get_from_offsets(outputs["char_offsets"], "char")), outputs.text)
self.assertListEqual(
self.get_from_offsets(outputs["char_offsets"], "char"), ["k", "s", "ɾ", "ɾ", "|", "ɾ", "l", "|", "ɭʲ"]
)
# check that offsets are actually correct for char
# 0-1 is 11, 1-4 is 5, 4-6 is first 15, 6-7 is <pad> (thus not shown), 7-9 is second 15, 9-10 is word_delimiter_token,
# 10-11 is <pad> (thus not shown), 11-12 is third 15, 12-15 is 8, 15-16 is word_delimiter_token, 16-17 is 98
self.assertListEqual(
self.get_from_offsets(outputs["char_offsets"], "start_offset"), [0, 1, 4, 7, 9, 11, 12, 15, 16]
)
self.assertListEqual(
self.get_from_offsets(outputs["char_offsets"], "end_offset"), [1, 4, 6, 9, 10, 12, 15, 16, 17]
)
def test_offsets_batch(self):
tokenizer = self.get_tokenizer(word_delimiter_token="|")
def check_list_tuples_equal(outputs_batch, outputs_list):
self.assertTrue(isinstance(outputs_batch, Wav2Vec2PhonemeCTCTokenizerOutput))
self.assertTrue(isinstance(outputs_list[0], Wav2Vec2PhonemeCTCTokenizerOutput))
# transform list to ModelOutput
outputs_batch_2 = Wav2Vec2PhonemeCTCTokenizerOutput(
{k: [d[k] for d in outputs_list] for k in outputs_list[0]}
)
self.assertListEqual(outputs_batch["text"], outputs_batch_2["text"])
def recursive_check(list_or_dict_1, list_or_dict_2):
if isinstance(list_or_dict_1, list):
[recursive_check(l1, l2) for l1, l2 in zip(list_or_dict_1, list_or_dict_2)]
self.assertEqual(list_or_dict_1, list_or_dict_2)
if "char_offsets" in outputs_batch:
recursive_check(outputs_batch["char_offsets"], outputs_batch_2["char_offsets"])
# fmt: off
sample_ids = [
[11, 5, 15, tokenizer.pad_token_id, 15, 4, 8, 98, 32, 32, 32, 32, 4, 33, tokenizer.word_delimiter_token_id, 32, 32, 33, 34, 34],
[24, 22, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 24, 22, 22, 22, 4, 5, 77, tokenizer.pad_token_id, 22, 22, 4, 34, 34, 34, 34],
]
# fmt: on
# We assume that `decode` works as expected. All we will check now is
# the output type is correct and the output is identical to `decode`
# char
outputs_char_batch = tokenizer.batch_decode(sample_ids, output_char_offsets=True)
outputs_char = [tokenizer.decode(ids, output_char_offsets=True) for ids in sample_ids]
check_list_tuples_equal(outputs_char_batch, outputs_char)
@unittest.skip(reason="Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes")
def test_added_tokens_do_lower_case(self):
pass
@unittest.skip(reason="Wav2Vec2PhonemeTokenizer always puts spaces between phonemes")
def test_encode_decode_with_spaces(self):
pass
@unittest.skip(
reason="encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency"
)
def test_internal_consistency(self):
pass
@unittest.skip(reason="Wav2Vec2PhonemeModel has no max model length => no testing")
def test_add_tokens_tokenizer(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
vocab_size = tokenizer.vocab_size
all_size = len(tokenizer)
self.assertNotEqual(vocab_size, 0)
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
new_toks = ["aaaaa bbbbbb", "cccccccccdddddddd"]
added_toks = tokenizer.add_tokens(new_toks)
vocab_size_2 = tokenizer.vocab_size
all_size_2 = len(tokenizer)
self.assertNotEqual(vocab_size_2, 0)
self.assertEqual(vocab_size, vocab_size_2)
self.assertEqual(added_toks, len(new_toks))
self.assertEqual(all_size_2, all_size + len(new_toks))
tokens = tokenizer.encode("aaaaa bbbbbb low cccccccccdddddddd l", add_special_tokens=False)
self.assertGreaterEqual(len(tokens), 4)
self.assertGreater(tokens[0], tokenizer.vocab_size - 1)
self.assertGreater(tokens[-3], tokenizer.vocab_size - 1)
new_toks_2 = {"eos_token": ">>>>|||<||<<|<<", "pad_token": "<<<<<|||>|>>>>|>"}
added_toks_2 = tokenizer.add_special_tokens(new_toks_2)
vocab_size_3 = tokenizer.vocab_size
all_size_3 = len(tokenizer)
self.assertNotEqual(vocab_size_3, 0)
self.assertEqual(vocab_size, vocab_size_3)
self.assertEqual(added_toks_2, len(new_toks_2))
self.assertEqual(all_size_3, all_size_2 + len(new_toks_2))
tokens = tokenizer.encode(
">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l", add_special_tokens=False
)
self.assertGreaterEqual(len(tokens), 6)
self.assertGreater(tokens[0], tokenizer.vocab_size - 1)
self.assertGreater(tokens[0], tokens[1])
self.assertGreater(tokens[-3], tokenizer.vocab_size - 1)
self.assertGreater(tokens[-3], tokens[-4])
self.assertEqual(tokens[0], tokenizer.eos_token_id)
self.assertEqual(tokens[-3], tokenizer.pad_token_id)
@unittest.skip(reason="The tokenizer shouldn't be used to encode input IDs (except for labels), only to decode.")
def test_tf_encode_plus_sent_to_model(self):
pass
@unittest.skip(reason="The tokenizer shouldn't be used to encode input IDs (except for labels), only to decode.")
def test_torch_encode_plus_sent_to_model(self):
pass
def test_convert_tokens_to_string_format(self):
# The default common tokenizer tests assumes that the output of `convert_tokens_to_string` is a string which
# is not the case for Wav2Vec2PhonemeCTCTokenizer.
tokenizers = self.get_tokenizers(fast=True, do_lower_case=True)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
tokens = ["ð", "ɪ", "s", "ɪ", "z", "ɐ", "t", "ɛ", "k", "s", "t"]
output = tokenizer.convert_tokens_to_string(tokens)
self.assertIsInstance(output["text"], str)
| Wav2Vec2PhonemeCTCTokenizerTest |
python | tensorflow__tensorflow | tensorflow/python/eager/core.py | {
"start": 1848,
"end": 2258
} | class ____(Exception):
"""Exception class to handle fallback from the fastpath.
The fastpath that we refer to here is the one implemented to reduce per-op
overheads (TFE_Py_FastPathExecute_C). If the conditions for executing the op
on the fastpath are not met, we fallback to a safer (and more complete)
slowpath, and this Exception is raised to signal that transition.
"""
pass
| _FallbackException |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/linalg/linear_operator_tridiag_test.py | {
"start": 1255,
"end": 4022
} | class ____(object):
def build_operator_and_matrix(
self, build_info, dtype, use_placeholder,
ensure_self_adjoint_and_pd=False,
diagonals_format='sequence'):
shape = list(build_info.shape)
# Ensure that diagonal has large enough values. If we generate a
# self adjoint PD matrix, then the diagonal will be dominant guaranteeing
# positive definitess.
diag = linear_operator_test_util.random_sign_uniform(
shape[:-1], minval=4., maxval=6., dtype=dtype)
# We'll truncate these depending on the format
subdiag = linear_operator_test_util.random_sign_uniform(
shape[:-1], minval=1., maxval=2., dtype=dtype)
if ensure_self_adjoint_and_pd:
# Abs on complex64 will result in a float32, so we cast back up.
diag = math_ops.cast(math_ops.abs(diag), dtype=dtype)
# The first element of subdiag is ignored. We'll add a dummy element
# to superdiag to pad it.
superdiag = math_ops.conj(subdiag)
superdiag = manip_ops.roll(superdiag, shift=-1, axis=-1)
else:
superdiag = linear_operator_test_util.random_sign_uniform(
shape[:-1], minval=1., maxval=2., dtype=dtype)
matrix_diagonals = array_ops_stack.stack(
[superdiag, diag, subdiag], axis=-2)
matrix = gen_array_ops.matrix_diag_v3(
matrix_diagonals,
k=(-1, 1),
num_rows=-1,
num_cols=-1,
align='LEFT_RIGHT',
padding_value=0.)
if diagonals_format == 'sequence':
diagonals = [superdiag, diag, subdiag]
elif diagonals_format == 'compact':
diagonals = array_ops_stack.stack([superdiag, diag, subdiag], axis=-2)
elif diagonals_format == 'matrix':
diagonals = matrix
lin_op_diagonals = diagonals
if use_placeholder:
if diagonals_format == 'sequence':
lin_op_diagonals = [array_ops.placeholder_with_default(
d, shape=None) for d in lin_op_diagonals]
else:
lin_op_diagonals = array_ops.placeholder_with_default(
lin_op_diagonals, shape=None)
operator = linalg_lib.LinearOperatorTridiag(
diagonals=lin_op_diagonals,
diagonals_format=diagonals_format,
is_self_adjoint=True if ensure_self_adjoint_and_pd else None,
is_positive_definite=True if ensure_self_adjoint_and_pd else None)
return operator, matrix
@staticmethod
def operator_shapes_infos():
shape_info = linear_operator_test_util.OperatorShapesInfo
# non-batch operators (n, n) and batch operators.
return [
shape_info((3, 3)),
shape_info((1, 6, 6)),
shape_info((3, 4, 4)),
shape_info((2, 1, 3, 3))
]
@test_util.with_eager_op_as_function
@test_util.run_all_in_graph_and_eager_modes
| _LinearOperatorTriDiagBase |
python | gevent__gevent | src/greentest/3.14/test_socket.py | {
"start": 16342,
"end": 17649
} | class ____(unittest.TestCase, ThreadableTest):
def __init__(self, methodName='runTest'):
unittest.TestCase.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def setUp(self):
self.serv = socket.socket(socket.AF_VSOCK, socket.SOCK_STREAM)
self.addCleanup(self.serv.close)
self.serv.bind((socket.VMADDR_CID_ANY, VSOCKPORT))
self.serv.listen()
self.serverExplicitReady()
self.serv.settimeout(support.LOOPBACK_TIMEOUT)
self.conn, self.connaddr = self.serv.accept()
self.addCleanup(self.conn.close)
def clientSetUp(self):
time.sleep(0.1)
self.cli = socket.socket(socket.AF_VSOCK, socket.SOCK_STREAM)
self.addCleanup(self.cli.close)
cid = get_cid()
if cid in (socket.VMADDR_CID_HOST, socket.VMADDR_CID_ANY):
# gh-119461: Use the local communication address (loopback)
cid = socket.VMADDR_CID_LOCAL
self.cli.connect((cid, VSOCKPORT))
def testStream(self):
try:
msg = self.conn.recv(1024)
except PermissionError as exc:
self.skipTest(repr(exc))
self.assertEqual(msg, MSG)
def _testStream(self):
self.cli.send(MSG)
self.cli.close()
| ThreadedVSOCKSocketStreamTest |
python | coleifer__peewee | tests/shortcuts.py | {
"start": 652,
"end": 814
} | class ____(TestModel):
tweet = ForeignKeyField(Tweet)
tag = ForeignKeyField(Tag)
class Meta:
primary_key = CompositeKey('tweet', 'tag')
| TweetTag |
python | ansible__ansible | test/integration/targets/tasks/action_plugins/action_that_fails.py | {
"start": 84,
"end": 566
} | class ____(ActionBase):
def run(self, tmp=None, task_vars=None):
args = self.validate_argument_spec(argument_spec=dict(
fail_mode=dict(default='raise', choices=['raise', 'result_dict'], type='str')
))
if args[0].validated_parameters['fail_mode'] == 'raise':
raise Exception("I am an exception from an action.")
return dict(exception="I am a captured traceback from an action", failed=True, msg="sorry, it broke")
| ActionModule |
python | realpython__materials | arcade-platformer/arcade_platformer/15_moving_platforms.py | {
"start": 7820,
"end": 22145
} | class ____(arcade.View):
def __init__(self) -> None:
super().__init__()
# These lists will hold different sets of sprites
self.coins = None
self.background = None
self.walls = None
self.ladders = None
self.goals = None
self.enemies = None
# One sprite for the player, no more is needed
self.player = None
# We need a physics engine as well
self.physics_engine = None
# Someplace to keep score
self.score = 0
# Which level are we on?
self.level = 1
# Load up our sounds here
self.coin_sound = arcade.load_sound(
str(ASSETS_PATH / "sounds" / "coin.wav")
)
self.jump_sound = arcade.load_sound(
str(ASSETS_PATH / "sounds" / "jump.wav")
)
self.victory_sound = arcade.load_sound(
str(ASSETS_PATH / "sounds" / "victory.wav")
)
# Check if a joystick is connected
joysticks = arcade.get_joysticks()
if joysticks:
# If so, get the first one
self.joystick = joysticks[0]
self.joystick.open()
else:
# If not, flag it so we won't use it
print("There are no Joysticks")
self.joystick = None
def setup(self) -> None:
"""Sets up the game for the current level"""
# Get the current map based on the level
map_name = f"platform_level_{self.level:02}.tmx"
map_path = ASSETS_PATH / map_name
# What are the names of the layers?
wall_layer = "ground"
coin_layer = "coins"
goal_layer = "goal"
background_layer = "background"
ladders_layer = "ladders"
# Load the current map
game_map = arcade.tilemap.read_tmx(str(map_path))
# Load the layers
self.background = arcade.tilemap.process_layer(
game_map, layer_name=background_layer, scaling=MAP_SCALING
)
self.goals = arcade.tilemap.process_layer(
game_map, layer_name=goal_layer, scaling=MAP_SCALING
)
self.walls = arcade.tilemap.process_layer(
game_map, layer_name=wall_layer, scaling=MAP_SCALING
)
self.ladders = arcade.tilemap.process_layer(
game_map, layer_name=ladders_layer, scaling=MAP_SCALING
)
self.coins = arcade.tilemap.process_layer(
game_map, layer_name=coin_layer, scaling=MAP_SCALING
)
# Process moving platforms
moving_platforms_layer_name = "moving_platforms"
moving_platforms = arcade.tilemap.process_layer(
game_map,
layer_name=moving_platforms_layer_name,
scaling=MAP_SCALING,
)
for sprite in moving_platforms:
self.walls.append(sprite)
# Set the background color
background_color = arcade.color.FRESH_AIR
if game_map.background_color:
background_color = game_map.background_color
arcade.set_background_color(background_color)
# Find the edge of the map to control viewport scrolling
self.map_width = (
game_map.map_size.width - 1
) * game_map.tile_size.width
# Create the player sprite, if they're not already setup
if not self.player:
self.player = self.create_player_sprite()
# Move the player sprite back to the beginning
self.player.center_x = PLAYER_START_X
self.player.center_y = PLAYER_START_Y
self.player.change_x = 0
self.player.change_y = 0
# Setup our enemies
self.enemies = self.create_enemy_sprites()
# Reset the viewport
self.view_left = 0
self.view_bottom = 0
# Load the physics engine for this map
self.physics_engine = arcade.PhysicsEnginePlatformer(
player_sprite=self.player,
platforms=self.walls,
gravity_constant=GRAVITY,
ladders=self.ladders,
)
def create_enemy_sprites(self) -> arcade.SpriteList:
"""Creates enemy sprites appropriate for the current level
Returns:
A Sprite List of enemies"""
enemies = arcade.SpriteList()
# Only enemies on level 2
if self.level == 2:
enemies.append(Enemy(1464, 320))
return enemies
def create_player_sprite(self) -> arcade.AnimatedWalkingSprite:
# Where are the player images stored?
texture_path = ASSETS_PATH / "images" / "player"
# Setup the appropriate textures
walking_paths = [
texture_path / f"alienGreen_walk{x}.png" for x in (1, 2)
]
climbing_paths = [
texture_path / f"alienGreen_climb{x}.png" for x in (1, 2)
]
standing_path = texture_path / "alienGreen_stand.png"
# Load them all now
walking_right_textures = [
arcade.load_texture(texture) for texture in walking_paths
]
walking_left_textures = [
arcade.load_texture(texture, mirrored=True)
for texture in walking_paths
]
walking_up_textures = [
arcade.load_texture(texture) for texture in climbing_paths
]
walking_down_textures = [
arcade.load_texture(texture) for texture in climbing_paths
]
standing_right_textures = [arcade.load_texture(standing_path)]
standing_left_textures = [
arcade.load_texture(standing_path, mirrored=True)
]
# Create the sprite
player = arcade.AnimatedWalkingSprite()
# Add the proper textures
player.stand_left_textures = standing_left_textures
player.stand_right_textures = standing_right_textures
player.walk_left_textures = walking_left_textures
player.walk_right_textures = walking_right_textures
player.walk_up_textures = walking_up_textures
player.walk_down_textures = walking_down_textures
# Set the player defaults
player.center_x = PLAYER_START_X
player.center_y = PLAYER_START_Y
player.state = arcade.FACE_RIGHT
# Set the initial texture
player.texture = player.stand_right_textures[0]
return player
def on_key_press(self, key: int, modifiers: int) -> None:
"""Arguments:
key -- Which key was pressed
modifiers -- Which modifiers were down at the time
"""
# Check for player left/right movement
if key in [arcade.key.LEFT, arcade.key.J]:
self.player.change_x = -PLAYER_MOVE_SPEED
elif key in [arcade.key.RIGHT, arcade.key.L]:
self.player.change_x = PLAYER_MOVE_SPEED
# Check if player can climb up or down
elif key in [arcade.key.UP, arcade.key.I]:
if self.physics_engine.is_on_ladder():
self.player.change_y = PLAYER_MOVE_SPEED
elif key in [arcade.key.DOWN, arcade.key.K]:
if self.physics_engine.is_on_ladder():
self.player.change_y = -PLAYER_MOVE_SPEED
# Check if we can jump
elif key == arcade.key.SPACE:
if self.physics_engine.can_jump():
self.player.change_y = PLAYER_JUMP_SPEED
# Play the jump sound
arcade.play_sound(self.jump_sound)
# Did the user want to pause?
elif key == arcade.key.ESCAPE:
# Pass the current view to preserve this view's state
pause = PauseView(self)
self.window.show_view(pause)
def on_key_release(self, key: int, modifiers: int) -> None:
"""Arguments:
key -- The key which was released
modifiers -- Which modifiers were down at the time
"""
# Check for player left/right movement
if key in [
arcade.key.LEFT,
arcade.key.J,
arcade.key.RIGHT,
arcade.key.L,
]:
self.player.change_x = 0
# Check if player can climb up or down
elif key in [
arcade.key.UP,
arcade.key.I,
arcade.key.DOWN,
arcade.key.K,
]:
if self.physics_engine.is_on_ladder():
self.player.change_y = 0
def on_update(self, delta_time: float) -> None:
"""Updates the position of all game objects
Arguments:
delta_time {float} -- How much time since the last call
"""
# First, check for joystick movement
if self.joystick:
# Check if we're in the dead zone
if abs(self.joystick.x) > DEAD_ZONE:
self.player.change_x = self.joystick.x * PLAYER_MOVE_SPEED
else:
self.player.change_x = 0
if abs(self.joystick.y) > DEAD_ZONE:
if self.physics_engine.is_on_ladder():
self.player.change_y = self.joystick.y * PLAYER_MOVE_SPEED
else:
self.player.change_y = 0
# Did the user press the jump button?
if self.joystick.buttons[0]:
if self.physics_engine.can_jump():
self.player.change_y = PLAYER_JUMP_SPEED
# Play the jump sound
arcade.play_sound(self.jump_sound)
# Update the player animation
self.player.update_animation(delta_time)
# Are there enemies? Update them as well
self.enemies.update_animation(delta_time)
for enemy in self.enemies:
enemy.center_x += enemy.change_x
walls_hit = arcade.check_for_collision_with_list(
sprite=enemy, sprite_list=self.walls
)
if walls_hit:
enemy.change_x *= -1
# Update player movement based on the physics engine
self.physics_engine.update()
# Restrict user movement so they can't walk off screen
if self.player.left < 0:
self.player.left = 0
# Check if we've picked up a coin
coins_hit = arcade.check_for_collision_with_list(
sprite=self.player, sprite_list=self.coins
)
for coin in coins_hit:
# Add the coin score to our score
self.score += int(coin.properties["point_value"])
# Play the coin sound
arcade.play_sound(self.coin_sound)
# Remove the coin
coin.remove_from_sprite_lists()
# Has Roz collided with an enemy?
enemies_hit = arcade.check_for_collision_with_list(
sprite=self.player, sprite_list=self.enemies
)
if enemies_hit:
self.setup()
title_view = TitleView()
window.show_view(title_view)
# Now check if we're at the ending goal
goals_hit = arcade.check_for_collision_with_list(
sprite=self.player, sprite_list=self.goals
)
if goals_hit:
# Play the victory sound
self.victory_sound.play()
# Setup the next level
self.level += 1
self.setup()
# Set the viewport, scrolling if necessary
self.scroll_viewport()
def scroll_viewport(self) -> None:
"""Scrolls the viewport when the player gets close to the edges"""
# Scroll left
# Find the current left boundary
left_boundary = self.view_left + LEFT_VIEWPORT_MARGIN
# Are we to the left of this boundary? Then we should scroll left
if self.player.left < left_boundary:
self.view_left -= left_boundary - self.player.left
# But don't scroll past the left edge of the map
if self.view_left < 0:
self.view_left = 0
# Scroll right
# Find the current right boundary
right_boundary = self.view_left + SCREEN_WIDTH - RIGHT_VIEWPORT_MARGIN
# Are we right of this boundary? Then we should scroll right
if self.player.right > right_boundary:
self.view_left += self.player.right - right_boundary
# Don't scroll past the right edge of the map
if self.view_left > self.map_width - SCREEN_WIDTH:
self.view_left = self.map_width - SCREEN_WIDTH
# Scroll up
top_boundary = self.view_bottom + SCREEN_HEIGHT - TOP_VIEWPORT_MARGIN
if self.player.top > top_boundary:
self.view_bottom += self.player.top - top_boundary
# Scroll down
bottom_boundary = self.view_bottom + BOTTOM_VIEWPORT_MARGIN
if self.player.bottom < bottom_boundary:
self.view_bottom -= bottom_boundary - self.player.bottom
# Only scroll to integers. Otherwise we end up with pixels that
# don't line up on the screen
self.view_bottom = int(self.view_bottom)
self.view_left = int(self.view_left)
# Do the scrolling
arcade.set_viewport(
left=self.view_left,
right=SCREEN_WIDTH + self.view_left,
bottom=self.view_bottom,
top=SCREEN_HEIGHT + self.view_bottom,
)
def on_draw(self) -> None:
arcade.start_render()
# Draw all the sprites
self.background.draw()
self.walls.draw()
self.coins.draw()
self.goals.draw()
self.ladders.draw()
self.enemies.draw()
self.player.draw()
# Draw the score in the lower left
score_text = f"Score: {self.score}"
# First a black background for a shadow effect
arcade.draw_text(
score_text,
start_x=10 + self.view_left,
start_y=10 + self.view_bottom,
color=arcade.csscolor.BLACK,
font_size=40,
)
# Now in white slightly shifted
arcade.draw_text(
score_text,
start_x=15 + self.view_left,
start_y=15 + self.view_bottom,
color=arcade.csscolor.WHITE,
font_size=40,
)
if __name__ == "__main__":
window = arcade.Window(
width=SCREEN_WIDTH, height=SCREEN_HEIGHT, title=SCREEN_TITLE
)
title_view = TitleView()
window.show_view(title_view)
arcade.run()
| PlatformerView |
python | matplotlib__matplotlib | lib/matplotlib/animation.py | {
"start": 25895,
"end": 26227
} | class ____(ImageMagickBase, MovieWriter):
"""
Pipe-based animated gif writer.
Frames are streamed directly to ImageMagick via a pipe and written
in a single pass.
"""
input_names = "-" # stdin
# Combine ImageMagick options with temp file-based writing
@writers.register('imagemagick_file')
| ImageMagickWriter |
python | dagster-io__dagster | examples/docs_snippets/docs_snippets/guides/data-modeling/asset-factories/advanced-yaml-asset-factory.py | {
"start": 1020,
"end": 1703
} | class ____(pydantic.BaseModel):
aws: AwsConfig
etl_jobs: list[JobConfig]
def to_definitions(self) -> dg.Definitions:
s3_resource = self.aws.to_resource()
return dg.Definitions.merge(
*[job.to_etl_job(s3_resource) for job in self.etl_jobs]
)
def load_etl_jobs_from_yaml(yaml_path: str) -> dg.Definitions:
yaml_template = jinja2.Environment().from_string(open(yaml_path).read())
config = yaml.safe_load(yaml_template.render(env=os.environ))
return EtlJobsConfig.model_validate(config).to_definitions()
# highlight-end
@dg.definitions
def defs():
return load_etl_jobs_from_yaml("etl_jobs_with_jinja.yaml")
| EtlJobsConfig |
python | FactoryBoy__factory_boy | tests/test_using.py | {
"start": 1162,
"end": 2177
} | class ____:
@classmethod
def create(cls, **kwargs):
instance = cls(**kwargs)
instance.id = 1
return instance
class FakeModelManager:
def get_or_create(self, **kwargs):
defaults = kwargs.pop('defaults', {})
kwargs.update(defaults)
instance = FakeModel.create(**kwargs)
instance.id = 2
instance._defaults = defaults
return instance, True
def create(self, **kwargs):
instance = FakeModel.create(**kwargs)
instance.id = 2
instance._defaults = None
return instance
def values_list(self, *args, **kwargs):
return self
def order_by(self, *args, **kwargs):
return [1]
def using(self, db):
return self
objects = FakeModelManager()
def __init__(self, **kwargs):
for name, value in kwargs.items():
setattr(self, name, value)
self.id = None
| FakeModel |
python | pytorch__pytorch | test/torch_np/test_basic.py | {
"start": 12745,
"end": 14076
} | class ____(TestCase):
def test_divmod_out(self):
x1 = w.arange(8, 15)
x2 = w.arange(4, 11)
out = (w.empty_like(x1), w.empty_like(x1))
quot, rem = w.divmod(x1, x2, out=out)
assert_equal(quot, x1 // x2)
assert_equal(rem, x1 % x2)
out1, out2 = out
assert quot is out1
assert rem is out2
def test_divmod_out_list(self):
x1 = [4, 5, 6]
x2 = [2, 1, 2]
out = (w.empty_like(x1), w.empty_like(x1))
quot, rem = w.divmod(x1, x2, out=out)
assert quot is out[0]
assert rem is out[1]
@xfail # ("out1, out2 not implemented")
def test_divmod_pos_only(self):
x1 = [4, 5, 6]
x2 = [2, 1, 2]
out1, out2 = w.empty_like(x1), w.empty_like(x1)
quot, rem = w.divmod(x1, x2, out1, out2)
assert quot is out1
assert rem is out2
def test_divmod_no_out(self):
# check that the out= machinery handles no out at all
x1 = w.array([4, 5, 6])
x2 = w.array([2, 1, 2])
quot, rem = w.divmod(x1, x2)
assert_equal(quot, x1 // x2)
assert_equal(rem, x1 % x2)
def test_divmod_out_both_pos_and_kw(self):
o = w.empty(1)
with assert_raises(TypeError):
w.divmod(1, 2, o, o, out=(o, o))
| TestDivmod |
python | gevent__gevent | src/greentest/3.14/test_smtplib.py | {
"start": 29421,
"end": 31019
} | class ____(unittest.TestCase):
respdata = b'250 OK' + (b'.' * smtplib._MAXLINE * 2) + b'\n'
def setUp(self):
self.thread_key = threading_helper.threading_setup()
self.old_stdout = sys.stdout
self.output = io.StringIO()
sys.stdout = self.output
self.evt = threading.Event()
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.settimeout(15)
self.port = socket_helper.bind_port(self.sock)
servargs = (self.evt, self.respdata, self.sock)
self.thread = threading.Thread(target=server, args=servargs)
self.thread.start()
self.evt.wait()
self.evt.clear()
def tearDown(self):
self.evt.wait()
sys.stdout = self.old_stdout
threading_helper.join_thread(self.thread)
del self.thread
self.doCleanups()
threading_helper.threading_cleanup(*self.thread_key)
def testLineTooLong(self):
self.assertRaises(smtplib.SMTPResponseException, smtplib.SMTP,
HOST, self.port, 'localhost', 3)
sim_users = {'Mr.A@somewhere.com':'John A',
'Ms.B@xn--fo-fka.com':'Sally B',
'Mrs.C@somewhereesle.com':'Ruth C',
}
sim_auth = ('Mr.A@somewhere.com', 'somepassword')
sim_cram_md5_challenge = ('PENCeUxFREJoU0NnbmhNWitOMjNGNn'
'dAZWx3b29kLmlubm9zb2Z0LmNvbT4=')
sim_lists = {'list-1':['Mr.A@somewhere.com','Mrs.C@somewhereesle.com'],
'list-2':['Ms.B@xn--fo-fka.com',],
}
# Simulated SMTP channel & server
| TooLongLineTests |
python | getsentry__sentry | src/sentry/issue_detection/detectors/mn_plus_one_db_span_detector.py | {
"start": 1507,
"end": 4060
} | class ____(MNPlusOneState):
"""
The initial state for the MN+1 DB Query detector, and the state we return to
whenever there is no active repeating pattern being checked.
Keeps a list of recently seen spans until a repeat is found, at which point
it transitions to the ContinuingMNPlusOne state.
"""
__slots__ = ("settings", "event", "recent_spans")
def __init__(
self,
settings: dict[str, Any],
event: dict[str, Any],
initial_spans: Sequence[Span] | None = None,
) -> None:
self.settings = settings
self.event = event
self.recent_spans = deque(initial_spans or [], self.settings["max_sequence_length"])
def next(self, span: Span) -> tuple[MNPlusOneState, PerformanceProblem | None]:
# Can't be a potential MN+1 without at least 2 previous spans.
if len(self.recent_spans) <= 1:
self.recent_spans.append(span)
return (self, None)
# Has an MN pattern begun to repeat itself? If so, transition to the
# ContinuingMNPlusOne state.
# Convert the recent_spans deque into a list for slicing. Skip the last
# item in the list because that would find an N+1 instead.
recent_span_list = list(self.recent_spans)
for i, recent_span in enumerate(recent_span_list[:-1]):
if self._equivalent(span, recent_span):
pattern = recent_span_list[i:]
if self._is_valid_pattern(pattern):
return (ContinuingMNPlusOne(self.settings, self.event, pattern, span), None)
# We haven't found a pattern yet, so remember this span and keep
# looking.
self.recent_spans.append(span)
return (self, None)
def _is_valid_pattern(self, pattern: Sequence[Span]) -> bool:
"""A valid pattern contains at least one db operation and is not all equivalent."""
found_db_op = False
found_different_span = False
for span in pattern:
op = span.get("op") or ""
description = span.get("description") or ""
found_db_op = found_db_op or bool(
op.startswith("db")
and not op.startswith("db.redis")
and description
and not description.endswith("...")
)
found_different_span = found_different_span or not self._equivalent(pattern[0], span)
if found_db_op and found_different_span:
return True
return False
| SearchingForMNPlusOne |
python | sympy__sympy | sympy/assumptions/predicates/common.py | {
"start": 166,
"end": 898
} | class ____(Predicate):
"""
Commutative predicate.
Explanation
===========
``ask(Q.commutative(x))`` is true iff ``x`` commutes with any other
object with respect to multiplication operation.
Examples
========
>>> from sympy import Q, ask, Symbol
>>> x = Symbol('x')
>>> ask(Q.commutative(x))
True
>>> ask(Q.commutative(2))
True
>>> nc = Symbol('nc', commutative=False)
>>> ask(Q.commutative(nc))
False
>>> ask(Q.commutative(x * nc))
False
"""
name = 'commutative'
handler = Dispatcher("CommutativeHandler", doc="Handler for key 'commutative'.")
binrelpreds = {Eq: Q.eq, Ne: Q.ne, Gt: Q.gt, Lt: Q.lt, Ge: Q.ge, Le: Q.le}
| CommutativePredicate |
python | PyCQA__pylint | tests/checkers/base/unittest_multi_naming_style.py | {
"start": 436,
"end": 5164
} | class ____(CheckerTestCase):
CHECKER_CLASS = base.NameChecker
MULTI_STYLE_RE = re.compile("(?:(?P<UP>[A-Z]+)|(?P<down>[a-z]+))$")
@set_config(class_rgx=MULTI_STYLE_RE)
def test_multi_name_detection_majority(self) -> None:
classes = astroid.extract_node(
"""
class classb(object): #@
pass
class CLASSA(object): #@
pass
class CLASSC(object): #@
pass
"""
)
message = MessageTest(
"invalid-name",
node=classes[0],
args=(
"Class",
"classb",
"the `UP` group in the '(?:(?P<UP>[A-Z]+)|(?P<down>[a-z]+))$' pattern",
),
confidence=HIGH,
line=2,
col_offset=0,
end_line=2,
end_col_offset=12,
)
with self.assertAddsMessages(message):
cls = None
for cls in classes:
self.checker.visit_classdef(cls)
if cls:
self.checker.leave_module(cls.root)
@set_config(class_rgx=MULTI_STYLE_RE)
def test_multi_name_detection_first_invalid(self) -> None:
classes = astroid.extract_node(
"""
class class_a(object): #@
pass
class classb(object): #@
pass
class CLASSC(object): #@
pass
"""
)
messages = [
MessageTest(
"invalid-name",
node=classes[0],
args=(
"Class",
"class_a",
"'(?:(?P<UP>[A-Z]+)|(?P<down>[a-z]+))$' pattern",
),
confidence=HIGH,
line=2,
col_offset=0,
end_line=2,
end_col_offset=13,
),
MessageTest(
"invalid-name",
node=classes[2],
args=(
"Class",
"CLASSC",
"the `down` group in the '(?:(?P<UP>[A-Z]+)|(?P<down>[a-z]+))$' pattern",
),
confidence=HIGH,
line=6,
col_offset=0,
end_line=6,
end_col_offset=12,
),
]
with self.assertAddsMessages(*messages):
cls = None
for cls in classes:
self.checker.visit_classdef(cls)
if cls:
self.checker.leave_module(cls.root)
@set_config(
method_rgx=MULTI_STYLE_RE,
function_rgx=MULTI_STYLE_RE,
name_group=("function:method",),
)
def test_multi_name_detection_group(self) -> None:
function_defs = astroid.extract_node(
"""
class First(object):
def func(self): #@
pass
def FUNC(): #@
pass
""",
module_name="test",
)
message = MessageTest(
"invalid-name",
node=function_defs[1],
args=(
"Function",
"FUNC",
"the `down` group in the '(?:(?P<UP>[A-Z]+)|(?P<down>[a-z]+))$' pattern",
),
confidence=HIGH,
line=6,
col_offset=0,
end_line=6,
end_col_offset=8,
)
with self.assertAddsMessages(message):
func = None
for func in function_defs:
self.checker.visit_functiondef(func)
if func:
self.checker.leave_module(func.root)
@set_config(
function_rgx=re.compile("(?:(?P<ignore>FOO)|(?P<UP>[A-Z]+)|(?P<down>[a-z]+))$")
)
def test_multi_name_detection_exempt(self) -> None:
function_defs = astroid.extract_node(
"""
def FOO(): #@
pass
def lower(): #@
pass
def FOO(): #@
pass
def UPPER(): #@
pass
"""
)
message = MessageTest(
"invalid-name",
node=function_defs[3],
args=(
"Function",
"UPPER",
"the `down` group in the '(?:(?P<ignore>FOO)|(?P<UP>[A-Z]+)|(?P<down>[a-z]+))$' pattern",
),
confidence=HIGH,
line=8,
col_offset=0,
end_line=8,
end_col_offset=9,
)
with self.assertAddsMessages(message):
func = None
for func in function_defs:
self.checker.visit_functiondef(func)
if func:
self.checker.leave_module(func.root)
| TestMultiNamingStyle |
python | networkx__networkx | networkx/algorithms/centrality/flow_matrix.py | {
"start": 1252,
"end": 2397
} | class ____:
def __init__(self, L, width=None, dtype=None):
global np
import numpy as np
(n, n) = L.shape
self.dtype = dtype
self.n = n
if width is None:
self.w = self.width(L)
else:
self.w = width
self.C = np.zeros((self.w, n), dtype=dtype)
self.L1 = L[1:, 1:]
self.init_solver(L)
def init_solver(self, L):
pass
def solve(self, r):
raise nx.NetworkXError("Implement solver")
def solve_inverse(self, r):
raise nx.NetworkXError("Implement solver")
def get_rows(self, r1, r2):
for r in range(r1, r2 + 1):
self.C[r % self.w, 1:] = self.solve_inverse(r)
return self.C
def get_row(self, r):
self.C[r % self.w, 1:] = self.solve_inverse(r)
return self.C[r % self.w]
def width(self, L):
m = 0
for i, row in enumerate(L):
w = 0
y = np.nonzero(row)[-1]
if len(y) > 0:
v = y - i
w = v.max() - v.min() + 1
m = max(w, m)
return m
| InverseLaplacian |
python | getsentry__sentry | tests/sentry/integrations/test_issues.py | {
"start": 1271,
"end": 20842
} | class ____(TestCase):
def test_status_sync_inbound_resolve(self) -> None:
group = self.group
assert group.status == GroupStatus.UNRESOLVED
with assume_test_silo_mode(SiloMode.CONTROL):
integration = self.create_provider_integration(provider="example", external_id="123456")
integration.add_organization(group.organization, self.user)
for oi in OrganizationIntegration.objects.filter(
integration_id=integration.id, organization_id=group.organization.id
):
oi.update(
config={
"sync_comments": True,
"sync_status_outbound": True,
"sync_status_inbound": True,
"sync_assignee_outbound": True,
"sync_assignee_inbound": True,
}
)
external_issue = ExternalIssue.objects.create(
organization_id=group.organization.id, integration_id=integration.id, key="APP-123"
)
GroupLink.objects.create(
group_id=group.id,
project_id=group.project_id,
linked_type=GroupLink.LinkedType.issue,
linked_id=external_issue.id,
relationship=GroupLink.Relationship.references,
)
installation = integration.get_installation(group.organization.id)
assert isinstance(installation, ExampleIntegration)
with self.feature("organizations:integrations-issue-sync"), self.tasks():
installation.sync_status_inbound(
external_issue.key,
{"project_id": "APP", "status": {"id": "12345", "category": "done"}},
)
assert Group.objects.get(id=group.id).status == GroupStatus.RESOLVED
activity = Activity.objects.get(group_id=group.id, type=ActivityType.SET_RESOLVED.value)
assert activity.data == {
"integration_id": integration.id,
"provider": integration.get_provider().name,
"provider_key": integration.get_provider().key,
}
def test_sync_status_resolve_in_next_release_no_releases(self) -> None:
group = self.group
assert group.status == GroupStatus.UNRESOLVED
with assume_test_silo_mode(SiloMode.CONTROL):
integration = self.create_provider_integration(provider="example", external_id="123456")
integration.add_organization(group.organization, self.user)
for oi in OrganizationIntegration.objects.filter(
integration_id=integration.id, organization_id=group.organization.id
):
oi.update(
config={
"sync_comments": True,
"sync_status_outbound": True,
"sync_status_inbound": True,
"sync_assignee_outbound": True,
"sync_assignee_inbound": True,
"resolution_strategy": "resolve_next_release",
}
)
external_issue = ExternalIssue.objects.create(
organization_id=group.organization.id, integration_id=integration.id, key="APP-123"
)
GroupLink.objects.create(
group_id=group.id,
project_id=group.project_id,
linked_type=GroupLink.LinkedType.issue,
linked_id=external_issue.id,
relationship=GroupLink.Relationship.references,
)
installation = integration.get_installation(group.organization.id)
assert isinstance(installation, ExampleIntegration)
with self.feature("organizations:integrations-issue-sync"), self.tasks():
installation.sync_status_inbound(
external_issue.key,
{"project_id": "APP", "status": {"id": "12345", "category": "done"}},
)
group = Group.objects.get(id=group.id)
assert group.status == GroupStatus.RESOLVED
assert group.resolved_at is not None
activity = Activity.objects.get(group_id=group.id, type=ActivityType.SET_RESOLVED.value)
assert activity.data == {
"integration_id": integration.id,
"provider": integration.get_provider().name,
"provider_key": integration.get_provider().key,
}
def test_sync_status_resolve_in_next_release_with_releases(self) -> None:
release = Release.objects.create(organization_id=self.project.organization_id, version="a")
release2 = Release.objects.create(organization_id=self.project.organization_id, version="b")
release.add_project(self.project)
release2.add_project(self.project)
group = self.create_group(status=GroupStatus.UNRESOLVED)
# add releases in the reverse order
self.create_group_release(group=group, release=release2)
self.create_group_release(group=group, release=release)
assert group.status == GroupStatus.UNRESOLVED
with assume_test_silo_mode(SiloMode.CONTROL):
integration = self.create_provider_integration(provider="example", external_id="123456")
integration.add_organization(group.organization, self.user)
for oi in OrganizationIntegration.objects.filter(
integration_id=integration.id, organization_id=group.organization.id
):
oi.update(
config={
"sync_comments": True,
"sync_status_outbound": True,
"sync_status_inbound": True,
"sync_assignee_outbound": True,
"sync_assignee_inbound": True,
"resolution_strategy": "resolve_next_release",
}
)
external_issue = ExternalIssue.objects.create(
organization_id=group.organization.id, integration_id=integration.id, key="APP-123"
)
GroupLink.objects.create(
group_id=group.id,
project_id=group.project_id,
linked_type=GroupLink.LinkedType.issue,
linked_id=external_issue.id,
relationship=GroupLink.Relationship.references,
)
installation = integration.get_installation(group.organization.id)
assert isinstance(installation, ExampleIntegration)
with self.feature("organizations:integrations-issue-sync"), self.tasks():
installation.sync_status_inbound(
external_issue.key,
{"project_id": "APP", "status": {"id": "12345", "category": "done"}},
)
group = Group.objects.get(id=group.id)
assert group.status == GroupStatus.RESOLVED
assert group.resolved_at is not None
activity = Activity.objects.get(
group_id=group.id,
type=ActivityType.SET_RESOLVED_IN_RELEASE.value,
)
assert GroupResolution.objects.filter(
group=group,
current_release_version=release.version,
release=release2,
type=GroupResolution.Type.in_next_release,
).exists()
assert activity.data == {
"integration_id": integration.id,
"provider": integration.get_provider().name,
"provider_key": integration.get_provider().key,
"inNextRelease": True,
"version": release2.version,
}
def test_sync_status_does_not_override_existing_recent_group_resolution(self) -> None:
"""
Test that the sync_status_inbound does not override the existing group resolution
if the group was recently resolved
"""
release = Release.objects.create(organization_id=self.project.organization_id, version="a")
release2 = Release.objects.create(organization_id=self.project.organization_id, version="b")
release.add_project(self.project)
release2.add_project(self.project)
group = self.create_group(status=GroupStatus.UNRESOLVED)
# add releases in the reverse order
self.create_group_release(group=group, release=release2)
self.create_group_release(group=group, release=release)
assert group.status == GroupStatus.UNRESOLVED
# Resolve the group in old_release
group.update(status=GroupStatus.RESOLVED, substatus=None)
resolution = GroupResolution.objects.create(release=release, group=group)
assert resolution.current_release_version is None
assert resolution.release == release
activity = Activity.objects.create(
group=group,
project=group.project,
type=ActivityType.SET_RESOLVED_IN_RELEASE.value,
ident=resolution.id,
data={"version": release.version},
)
with assume_test_silo_mode(SiloMode.CONTROL):
integration = self.create_provider_integration(provider="example", external_id="123456")
integration.add_organization(group.organization, self.user)
for oi in OrganizationIntegration.objects.filter(
integration_id=integration.id, organization_id=group.organization.id
):
oi.update(
config={
"sync_comments": True,
"sync_status_outbound": True,
"sync_status_inbound": True,
"sync_assignee_outbound": True,
"sync_assignee_inbound": True,
"resolution_strategy": "resolve_next_release",
}
)
external_issue = ExternalIssue.objects.create(
organization_id=group.organization.id, integration_id=integration.id, key="APP-123"
)
GroupLink.objects.create(
group_id=group.id,
project_id=group.project_id,
linked_type=GroupLink.LinkedType.issue,
linked_id=external_issue.id,
relationship=GroupLink.Relationship.references,
)
installation = integration.get_installation(group.organization.id)
assert isinstance(installation, ExampleIntegration)
with self.feature("organizations:integrations-issue-sync"), self.tasks():
installation.sync_status_inbound(
external_issue.key,
{"project_id": "APP", "status": {"id": "12345", "category": "done"}},
)
assert Group.objects.get(id=group.id).status == GroupStatus.RESOLVED
resolution.refresh_from_db()
assert resolution.release == release
assert resolution.current_release_version is None
activity.refresh_from_db()
assert activity.data["version"] == release.version
def test_sync_status_resolve_in_next_release_with_semver(self) -> None:
release = Release.objects.create(
organization_id=self.project.organization_id, version="app@1.2.4"
)
release2 = Release.objects.create(
organization_id=self.project.organization_id, version="app@1.2.3"
)
release.add_project(self.project)
release2.add_project(self.project)
group = self.create_group(status=GroupStatus.UNRESOLVED)
self.create_group_release(group=group, release=release)
self.create_group_release(group=group, release=release2)
assert group.status == GroupStatus.UNRESOLVED
with assume_test_silo_mode(SiloMode.CONTROL):
integration = self.create_provider_integration(provider="example", external_id="123456")
integration.add_organization(group.organization, self.user)
for oi in OrganizationIntegration.objects.filter(
integration_id=integration.id, organization_id=group.organization.id
):
oi.update(
config={
"sync_comments": True,
"sync_status_outbound": True,
"sync_status_inbound": True,
"sync_assignee_outbound": True,
"sync_assignee_inbound": True,
"resolution_strategy": "resolve_next_release",
}
)
external_issue = ExternalIssue.objects.create(
organization_id=group.organization.id, integration_id=integration.id, key="APP-123"
)
GroupLink.objects.create(
group_id=group.id,
project_id=group.project_id,
linked_type=GroupLink.LinkedType.issue,
linked_id=external_issue.id,
relationship=GroupLink.Relationship.references,
)
installation = integration.get_installation(group.organization.id)
assert isinstance(installation, ExampleIntegration)
with self.feature("organizations:integrations-issue-sync"), self.tasks():
installation.sync_status_inbound(
external_issue.key,
{"project_id": "APP", "status": {"id": "12345", "category": "done"}},
)
group = Group.objects.get(id=group.id)
assert group.status == GroupStatus.RESOLVED
assert group.resolved_at is not None
activity = Activity.objects.get(
group_id=group.id,
type=ActivityType.SET_RESOLVED_IN_RELEASE.value,
)
assert GroupResolution.objects.filter(
group=group,
current_release_version=release.version,
release=release2,
type=GroupResolution.Type.in_next_release,
).exists()
assert activity.data == {
"integration_id": integration.id,
"provider": integration.get_provider().name,
"provider_key": integration.get_provider().key,
"inNextRelease": True,
"current_release_version": "app@1.2.4",
}
def test_sync_status_resolve_in_current_release_with_releases(self) -> None:
release = Release.objects.create(organization_id=self.project.organization_id, version="a")
release.add_project(self.project)
group = self.create_group(status=GroupStatus.UNRESOLVED)
self.create_group_release(group=group, release=release)
assert group.status == GroupStatus.UNRESOLVED
with assume_test_silo_mode(SiloMode.CONTROL):
integration = self.create_provider_integration(provider="example", external_id="123456")
integration.add_organization(group.organization, self.user)
for oi in OrganizationIntegration.objects.filter(
integration_id=integration.id, organization_id=group.organization.id
):
oi.update(
config={
"sync_comments": True,
"sync_status_outbound": True,
"sync_status_inbound": True,
"sync_assignee_outbound": True,
"sync_assignee_inbound": True,
"resolution_strategy": "resolve_current_release",
}
)
external_issue = ExternalIssue.objects.create(
organization_id=group.organization.id, integration_id=integration.id, key="APP-123"
)
GroupLink.objects.create(
group_id=group.id,
project_id=group.project_id,
linked_type=GroupLink.LinkedType.issue,
linked_id=external_issue.id,
relationship=GroupLink.Relationship.references,
)
installation = integration.get_installation(group.organization.id)
assert isinstance(installation, ExampleIntegration)
with self.feature("organizations:integrations-issue-sync"), self.tasks():
installation.sync_status_inbound(
external_issue.key,
{"project_id": "APP", "status": {"id": "12345", "category": "done"}},
)
group = Group.objects.get(id=group.id)
assert group.status == GroupStatus.RESOLVED
assert group.resolved_at is not None
activity = Activity.objects.get(
group_id=group.id,
type=ActivityType.SET_RESOLVED_IN_RELEASE.value,
)
assert GroupResolution.objects.filter(
group=group,
current_release_version=None,
release=release,
type=GroupResolution.Type.in_release,
).exists()
assert activity.data == {
"integration_id": integration.id,
"provider": integration.get_provider().name,
"provider_key": integration.get_provider().key,
}
def test_status_sync_inbound_unresolve(self) -> None:
group = self.group
group.status = GroupStatus.RESOLVED
group.substatus = None
group.save()
assert group.status == GroupStatus.RESOLVED
activity = Activity.objects.create(
group=group,
project=group.project,
type=ActivityType.SET_RESOLVED.value,
datetime=timezone.now(),
)
with assume_test_silo_mode(SiloMode.CONTROL):
integration = self.create_provider_integration(provider="example", external_id="123456")
integration.add_organization(group.organization, self.user)
for oi in OrganizationIntegration.objects.filter(
integration_id=integration.id, organization_id=group.organization.id
):
oi.update(
config={
"sync_comments": True,
"sync_status_outbound": True,
"sync_status_inbound": True,
"sync_assignee_outbound": True,
"sync_assignee_inbound": True,
}
)
external_issue = ExternalIssue.objects.create(
organization_id=group.organization.id, integration_id=integration.id, key="APP-123"
)
GroupLink.objects.create(
group_id=group.id,
project_id=group.project_id,
linked_type=GroupLink.LinkedType.issue,
linked_id=external_issue.id,
relationship=GroupLink.Relationship.references,
)
installation = integration.get_installation(group.organization.id)
assert isinstance(installation, ExampleIntegration)
with self.feature("organizations:integrations-issue-sync"), self.tasks():
installation.sync_status_inbound(
external_issue.key,
{"project_id": "APP", "status": {"id": "12345", "category": "in_progress"}},
)
assert Group.objects.get(id=group.id).status == GroupStatus.UNRESOLVED
activity = Activity.objects.get(
group_id=group.id, type=ActivityType.SET_UNRESOLVED.value
)
assert activity.data == {
"integration_id": integration.id,
"provider": integration.get_provider().name,
"provider_key": integration.get_provider().key,
}
| IssueSyncIntegration |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/secrets/test_systems_manager.py | {
"start": 1615,
"end": 14819
} | class ____:
@mock.patch(
"airflow.providers.amazon.aws.secrets.systems_manager."
"SystemsManagerParameterStoreBackend.get_conn_value"
)
def test_aws_ssm_get_connection(self, mock_get_value):
mock_get_value.return_value = "scheme://user:pass@host:100"
conn = SystemsManagerParameterStoreBackend().get_connection("fake_conn")
assert conn.host == "host"
@mock_aws
@pytest.mark.parametrize("ssm_value", [JSON_CONNECTION, URI_CONNECTION])
def test_get_conn_value(self, ssm_value):
param = {
"Name": "/airflow/connections/test_postgres",
"Type": "String",
"Value": ssm_value,
}
ssm_backend = SystemsManagerParameterStoreBackend()
ssm_backend.client.put_parameter(**param)
returned_conn_value = ssm_backend.get_conn_value(conn_id="test_postgres")
assert ssm_value == returned_conn_value
test_conn = ssm_backend.get_connection(conn_id="test_postgres")
assert test_conn.conn_id == "test_postgres"
assert test_conn.conn_type == "postgres"
assert test_conn.login == "my-login"
assert test_conn.password == "my-pass"
assert test_conn.host == "my-host"
assert test_conn.port == 5432
assert test_conn.schema == "my-schema"
assert test_conn.extra_dejson == {"param1": "val1", "param2": "val2"}
@mock_aws
def test_get_conn_value_non_existent_key(self):
"""
Test that if the key with connection ID is not present in SSM,
SystemsManagerParameterStoreBackend.get_connection should return None
"""
conn_id = "test_mysql"
param = {
"Name": "/airflow/connections/test_postgres",
"Type": "String",
"Value": "postgresql://airflow:airflow@host:5432/airflow",
}
ssm_backend = SystemsManagerParameterStoreBackend()
ssm_backend.client.put_parameter(**param)
assert ssm_backend.get_conn_value(conn_id=conn_id) is None
assert ssm_backend.get_connection(conn_id=conn_id) is None
@mock_aws
def test_get_variable(self):
param = {"Name": "/airflow/variables/hello", "Type": "String", "Value": "world"}
ssm_backend = SystemsManagerParameterStoreBackend()
ssm_backend.client.put_parameter(**param)
returned_uri = ssm_backend.get_variable("hello")
assert returned_uri == "world"
@mock_aws
def test_get_config(self):
param = {
"Name": "/airflow/config/sql_alchemy_conn",
"Type": "String",
"Value": "sqlite:///Users/test_user/airflow.db",
}
ssm_backend = SystemsManagerParameterStoreBackend()
ssm_backend.client.put_parameter(**param)
returned_uri = ssm_backend.get_config("sql_alchemy_conn")
assert returned_uri == "sqlite:///Users/test_user/airflow.db"
@mock_aws
def test_get_variable_secret_string(self):
param = {"Name": "/airflow/variables/hello", "Type": "SecureString", "Value": "world"}
ssm_backend = SystemsManagerParameterStoreBackend()
ssm_backend.client.put_parameter(**param)
returned_uri = ssm_backend.get_variable("hello")
assert returned_uri == "world"
@mock_aws
def test_get_variable_non_existent_key(self):
"""
Test that if Variable key is not present in SSM,
SystemsManagerParameterStoreBackend.get_variables should return None
"""
param = {"Name": "/airflow/variables/hello", "Type": "String", "Value": "world"}
ssm_backend = SystemsManagerParameterStoreBackend()
ssm_backend.client.put_parameter(**param)
assert ssm_backend.get_variable("test_mysql") is None
@conf_vars(
{
("secrets", "backend"): "airflow.providers.amazon.aws.secrets.systems_manager."
"SystemsManagerParameterStoreBackend",
(
"secrets",
"backend_kwargs",
): '{"use_ssl": false, "role_arn": "arn:aws:iam::222222222222:role/awesome-role"}',
}
)
@mock.patch("airflow.providers.amazon.aws.hooks.base_aws.SessionFactory")
def test_passing_client_kwargs(self, mock_session_factory):
backends = initialize_secrets_backends()
systems_manager = next(
backend
for backend in backends
if backend.__class__.__name__ == "SystemsManagerParameterStoreBackend"
)
# Mock SessionFactory, session and client
mock_session_factory_instance = mock_session_factory.return_value
mock_ssm_client = mock.MagicMock(return_value="mock-ssm-client")
mock_session = mock.MagicMock()
mock_session.client = mock_ssm_client
mock_create_session = mock.MagicMock(return_value=mock_session)
mock_session_factory_instance.create_session = mock_create_session
systems_manager.client
assert mock_session_factory.call_count == 1
mock_session_factory_call_kwargs = mock_session_factory.call_args.kwargs
assert "conn" in mock_session_factory_call_kwargs
conn_wrapper = mock_session_factory_call_kwargs["conn"]
assert conn_wrapper.conn_id == "SystemsManagerParameterStoreBackend__connection"
assert conn_wrapper.role_arn == "arn:aws:iam::222222222222:role/awesome-role"
mock_ssm_client.assert_called_once_with(service_name="ssm", use_ssl=False)
@mock.patch(
"airflow.providers.amazon.aws.secrets.systems_manager.SystemsManagerParameterStoreBackend._get_secret"
)
def test_connection_prefix_none_value(self, mock_get_secret):
"""
Test that if Variable key is not present in SSM,
SystemsManagerParameterStoreBackend.get_conn_value should return None,
SystemsManagerParameterStoreBackend._get_secret should not be called
"""
kwargs = {"connections_prefix": None}
ssm_backend = SystemsManagerParameterStoreBackend(**kwargs)
assert ssm_backend.get_conn_value("test_mysql") is None
mock_get_secret.assert_not_called()
@mock.patch(
"airflow.providers.amazon.aws.secrets.systems_manager.SystemsManagerParameterStoreBackend._get_secret"
)
def test_variable_prefix_none_value(self, mock_get_secret):
"""
Test that if Variable key is not present in SSM,
SystemsManagerParameterStoreBackend.get_variables should return None,
SystemsManagerParameterStoreBackend._get_secret should not be called
"""
kwargs = {"variables_prefix": None}
ssm_backend = SystemsManagerParameterStoreBackend(**kwargs)
assert ssm_backend.get_variable("hello") is None
mock_get_secret.assert_not_called()
@mock.patch(
"airflow.providers.amazon.aws.secrets.systems_manager.SystemsManagerParameterStoreBackend._get_secret"
)
def test_config_prefix_none_value(self, mock_get_secret):
"""
Test that if Variable key is not present in SSM,
SystemsManagerParameterStoreBackend.get_config should return None,
SystemsManagerParameterStoreBackend._get_secret should not be called
"""
kwargs = {"config_prefix": None}
ssm_backend = SystemsManagerParameterStoreBackend(**kwargs)
assert ssm_backend.get_config("config") is None
mock_get_secret.assert_not_called()
@mock.patch(
"airflow.providers.amazon.aws.secrets.systems_manager.SystemsManagerParameterStoreBackend.client",
new_callable=mock.PropertyMock,
)
@pytest.mark.parametrize(
("connection_id", "connections_lookup_pattern", "num_client_calls"),
[
("test", "test", 1),
("test", ".*", 1),
("test", "T.*", 1),
("test", "dummy-pattern", 0),
("test", None, 1),
],
)
def test_connection_lookup_pattern(
self, mock_client, connection_id, connections_lookup_pattern, num_client_calls
):
"""
Test that if Connection ID is looked up in AWS Parameter Store
"""
mock_client().get_parameter.return_value = {
"Parameter": {
"Value": None,
},
}
kwargs = {"connections_lookup_pattern": connections_lookup_pattern}
secrets_manager_backend = SystemsManagerParameterStoreBackend(**kwargs)
secrets_manager_backend.get_conn_value(connection_id)
assert mock_client().get_parameter.call_count == num_client_calls
@mock.patch(
"airflow.providers.amazon.aws.secrets.systems_manager.SystemsManagerParameterStoreBackend.client",
new_callable=mock.PropertyMock,
)
@pytest.mark.parametrize(
("variable_key", "variables_lookup_pattern", "num_client_calls"),
[
("test", "test", 1),
("test", ".*", 1),
("test", "T.*", 1),
("test", "dummy-pattern", 0),
("test", None, 1),
],
)
def test_variable_lookup_pattern(
self, mock_client, variable_key, variables_lookup_pattern, num_client_calls
):
"""
Test that if Variable key is looked up in AWS Parameter Store
"""
mock_client().get_parameter.return_value = {
"Parameter": {
"Value": None,
},
}
kwargs = {"variables_lookup_pattern": variables_lookup_pattern}
secrets_manager_backend = SystemsManagerParameterStoreBackend(**kwargs)
secrets_manager_backend.get_variable(variable_key)
assert mock_client().get_parameter.call_count == num_client_calls
@mock.patch(
"airflow.providers.amazon.aws.secrets.systems_manager.SystemsManagerParameterStoreBackend.client",
new_callable=mock.PropertyMock,
)
@pytest.mark.parametrize(
("config_key", "config_lookup_pattern", "num_client_calls"),
[
("test", "test", 1),
("test", ".*", 1),
("test", "T.*", 1),
("test", "dummy-pattern", 0),
("test", None, 1),
],
)
def test_config_lookup_pattern(self, mock_client, config_key, config_lookup_pattern, num_client_calls):
"""
Test that if Variable key is looked up in AWS Parameter Store
"""
mock_client().get_parameter.return_value = {
"Parameter": {
"Value": None,
},
}
kwargs = {"config_lookup_pattern": config_lookup_pattern}
secrets_manager_backend = SystemsManagerParameterStoreBackend(**kwargs)
secrets_manager_backend.get_config(config_key)
assert mock_client().get_parameter.call_count == num_client_calls
@mock.patch(
"airflow.providers.amazon.aws.secrets.systems_manager.SystemsManagerParameterStoreBackend.client",
new_callable=mock.PropertyMock,
)
def test_connection_prefix_with_no_leading_slash(self, mock_client):
"""
Test that if Connection ID is looked up in AWS Parameter Store with the added leading "/"
"""
mock_client().get_parameter.return_value = {
"Parameter": {
"Value": None,
},
}
kwargs = {"connections_prefix": "airflow/connections"}
secrets_manager_backend = SystemsManagerParameterStoreBackend(**kwargs)
secrets_manager_backend.get_conn_value("test_mysql")
mock_client().get_parameter.assert_called_with(
Name="/airflow/connections/test_mysql", WithDecryption=True
)
@mock.patch(
"airflow.providers.amazon.aws.secrets.systems_manager.SystemsManagerParameterStoreBackend.client",
new_callable=mock.PropertyMock,
)
def test_variable_prefix_with_no_leading_slash(self, mock_client):
"""
Test that if Variable key is looked up in AWS Parameter Store with the added leading "/"
"""
mock_client().get_parameter.return_value = {
"Parameter": {
"Value": None,
},
}
kwargs = {"variables_prefix": "airflow/variables"}
secrets_manager_backend = SystemsManagerParameterStoreBackend(**kwargs)
secrets_manager_backend.get_variable("hello")
mock_client().get_parameter.assert_called_with(Name="/airflow/variables/hello", WithDecryption=True)
@mock.patch(
"airflow.providers.amazon.aws.secrets.systems_manager.SystemsManagerParameterStoreBackend.client",
new_callable=mock.PropertyMock,
)
def test_config_prefix_with_no_leading_slash(self, mock_client):
"""
Test that if Config key is looked up in AWS Parameter Store with the added leading "/"
"""
mock_client().get_parameter.return_value = {
"Parameter": {
"Value": None,
},
}
kwargs = {"config_prefix": "airflow/config"}
secrets_manager_backend = SystemsManagerParameterStoreBackend(**kwargs)
secrets_manager_backend.get_config("config")
mock_client().get_parameter.assert_called_with(Name="/airflow/config/config", WithDecryption=True)
| TestSsmSecrets |
python | tensorflow__tensorflow | tensorflow/python/grappler/cluster.py | {
"start": 1010,
"end": 4362
} | class ____(object):
"""Grappler Clusters."""
def __init__(self,
allow_soft_placement=True,
disable_detailed_stats=True,
disable_timeline=True,
devices=None):
"""Creates a Cluster.
Args:
allow_soft_placement: If True, TF will automatically fix illegal
placements instead of erroring out if the placement isn't legal.
disable_detailed_stats: If True, detailed statistics will not be
available.
disable_timeline: If True, the timeline information will not be reported.
devices: A list of devices of type device_properties_pb2.NamedDevice.
If None, a device list will be created based on the spec of
the local machine.
"""
self._tf_cluster = None
self._generate_timeline = not disable_timeline
if devices is None:
self._tf_cluster = tf_cluster.TF_NewCluster(allow_soft_placement,
disable_detailed_stats)
else:
devices_serialized = [device.SerializeToString() for device in devices]
self._tf_cluster = tf_cluster.TF_NewVirtualCluster(devices_serialized)
def Shutdown(self):
if self._tf_cluster is not None:
tf_cluster.TF_ShutdownCluster(self._tf_cluster)
self._tf_cluster = None
def __del__(self):
self.Shutdown()
@property
def tf_cluster(self):
return self._tf_cluster
def ListDevices(self):
"""Returns a list of available hardware devices."""
if self._tf_cluster is None:
return []
return [device_properties_pb2.NamedDevice.FromString(device)
for device in tf_cluster.TF_ListDevices(self._tf_cluster)]
def ListAvailableOps(self):
"""Returns a list of all available operations (sorted alphabetically)."""
return tf_cluster.TF_ListAvailableOps()
def GetSupportedDevices(self, item):
return tf_cluster.TF_GetSupportedDevices(self._tf_cluster, item.tf_item)
def EstimatePerformance(self, device):
return tf_cluster.TF_EstimatePerformance(device.SerializeToString())
def MeasureCosts(self, item):
"""Returns the cost of running the specified item.
Args:
item: The item for which to measure the costs.
Returns: The triplet op_perfs, runtime, step_stats.
"""
op_perf_bytes_list, run_time, step_stats_bytes = tf_cluster.TF_MeasureCosts(
item.tf_item, self._tf_cluster, self._generate_timeline)
op_perfs = [op_performance_data_pb2.OpPerformance.FromString(op_perf_bytes)
for op_perf_bytes in op_perf_bytes_list]
return (op_perfs, run_time,
step_stats_pb2.StepStats.FromString(step_stats_bytes))
def DeterminePeakMemoryUsage(self, item):
"""Returns a snapshot of the peak memory usage.
Args:
item: The item for which to measure the costs.
Returns: A hashtable indexed by device name.
"""
return tf_cluster.TF_DeterminePeakMemoryUsage(item.tf_item,
self._tf_cluster)
@contextlib.contextmanager
def Provision(allow_soft_placement=True,
disable_detailed_stats=True,
disable_timeline=True,
devices=None):
cluster = Cluster(allow_soft_placement, disable_detailed_stats,
disable_timeline, devices)
yield cluster
cluster.Shutdown()
| Cluster |
python | run-llama__llama_index | llama-index-integrations/readers/llama-index-readers-gitbook/tests/test_gitbook_client.py | {
"start": 163,
"end": 3943
} | class ____(unittest.TestCase):
def setUp(self):
"""Sets up test environment before each test case."""
self.client = GitbookClient("test_token")
self.space_id = "test_space"
self.page_id = "test_page"
self.fixtures_path = os.path.join(os.path.dirname(__file__), "fixtures")
def load_fixture(self, filename):
"""Helper method to load test data files."""
with open(os.path.join(self.fixtures_path, filename), encoding="utf-8") as f:
return json.load(f)
@patch("requests.get")
def test_get_space(self, mock_get):
# Load test data
mock_data = self.load_fixture("space_response.json")
# Set up mock response
mock_response = Mock()
mock_response.status_code = 200
mock_response.json.return_value = mock_data
mock_get.return_value = mock_response
response = self.client.get_space(self.space_id)
# Verify
mock_get.assert_called_once_with(
f"{self.client.base_url}/spaces/{self.space_id}",
headers=self.client.headers,
)
self.assertEqual(response["title"], mock_data["title"])
self.assertEqual(response["id"], mock_data["id"])
@patch("requests.get")
def test_list_pages(self, mock_get):
# Load test data
space_data = self.load_fixture("space_response.json")
pages_data = self.load_fixture("pages_response.json")
# Set up space info response
space_response = Mock()
space_response.status_code = 200
space_response.json.return_value = space_data
# Set up pages list response
pages_response = Mock()
pages_response.status_code = 200
pages_response.json.return_value = pages_data
mock_get.side_effect = [space_response, pages_response]
response = self.client.list_pages(self.space_id)
# Verify
self.assertEqual(len(response), 2)
self.assertEqual(
response[0]["title"],
f"{space_data['title']} > {pages_data['pages'][0]['title']}",
)
self.assertEqual(response[0]["id"], pages_data["pages"][0]["id"])
@patch("requests.get")
def test_get_page_markdown(self, mock_get):
# Load test data
mock_data = self.load_fixture("page_content_response.json")
# Set up mock response
mock_response = Mock()
mock_response.status_code = 200
mock_response.json.return_value = mock_data
mock_get.return_value = mock_response
content = self.client.get_page_markdown(self.space_id, self.page_id)
# Verify
mock_get.assert_called_once_with(
f"{self.client.base_url}/spaces/{self.space_id}/content/page/{self.page_id}?format=markdown",
headers=self.client.headers,
)
self.assertEqual(content, mock_data["markdown"])
@patch("requests.get")
def test_error_handling(self, mock_get):
"""Tests error handling for API requests."""
# Set up mock error response
mock_response = Mock()
mock_response.status_code = 401
mock_response.ok = False
mock_response.reason = "Unauthorized"
mock_response.raise_for_status.side_effect = requests.exceptions.HTTPError(
response=mock_response
)
mock_get.return_value = mock_response
# Verify error handling
import pytest
with pytest.raises(Exception) as context:
self.client.get_space(self.space_id)
error_message = str(context.value)
assert "Error" in error_message
assert "401" in error_message
assert "Unauthorized" in error_message
if __name__ == "__main__":
unittest.main()
| TestGitbookClient |
python | apache__airflow | providers/apache/pinot/tests/unit/apache/pinot/hooks/test_pinot.py | {
"start": 10151,
"end": 14885
} | class ____:
def setup_method(self):
self.conn = conn = mock.MagicMock()
self.conn.host = "host"
self.conn.port = "1000"
self.conn.login = "user"
self.conn.password = "pwd"
self.conn.extra_dejson = {}
class PinotAdminHookTest(PinotAdminHook):
def get_connection(self, conn_id):
return conn
self.db_hook = PinotAdminHookTest()
@mock.patch("airflow.providers.apache.pinot.hooks.pinot.PinotAdminHook.run_cli")
def test_add_schema_with_auth(self, mock_run_cli):
params = ["schema_file", False]
self.db_hook.add_schema(*params)
mock_run_cli.assert_called_once_with(
[
"AddSchema",
"-user",
self.conn.login,
"-password",
self.conn.password,
"-controllerHost",
self.conn.host,
"-controllerPort",
self.conn.port,
"-schemaFile",
params[0],
]
)
@mock.patch("airflow.providers.apache.pinot.hooks.pinot.PinotAdminHook.run_cli")
def test_add_table_with_auth(self, mock_run_cli):
params = ["config_file", False]
self.db_hook.add_table(*params)
mock_run_cli.assert_called_once_with(
[
"AddTable",
"-user",
self.conn.login,
"-password",
self.conn.password,
"-controllerHost",
self.conn.host,
"-controllerPort",
self.conn.port,
"-filePath",
params[0],
]
)
@mock.patch("airflow.providers.apache.pinot.hooks.pinot.PinotAdminHook.run_cli")
def test_create_segment_with_auth(self, mock_run_cli):
params = {
"generator_config_file": "a",
"data_dir": "b",
"segment_format": "c",
"out_dir": "d",
"overwrite": True,
"table_name": "e",
"segment_name": "f",
"time_column_name": "g",
"schema_file": "h",
"reader_config_file": "i",
"enable_star_tree_index": False,
"star_tree_index_spec_file": "j",
"hll_size": 9,
"hll_columns": "k",
"hll_suffix": "l",
"num_threads": 8,
"post_creation_verification": True,
"retry": 7,
}
self.db_hook.create_segment(**params)
mock_run_cli.assert_called_once_with(
[
"CreateSegment",
"-user",
self.conn.login,
"-password",
self.conn.password,
"-generatorConfigFile",
params["generator_config_file"],
"-dataDir",
params["data_dir"],
"-format",
params["segment_format"],
"-outDir",
params["out_dir"],
"-overwrite",
params["overwrite"],
"-tableName",
params["table_name"],
"-segmentName",
params["segment_name"],
"-timeColumnName",
params["time_column_name"],
"-schemaFile",
params["schema_file"],
"-readerConfigFile",
params["reader_config_file"],
"-starTreeIndexSpecFile",
params["star_tree_index_spec_file"],
"-hllSize",
params["hll_size"],
"-hllColumns",
params["hll_columns"],
"-hllSuffix",
params["hll_suffix"],
"-numThreads",
params["num_threads"],
"-postCreationVerification",
params["post_creation_verification"],
"-retry",
params["retry"],
]
)
@mock.patch("airflow.providers.apache.pinot.hooks.pinot.PinotAdminHook.run_cli")
def test_upload_segment_with_auth(self, mock_run_cli):
params = ["segment_dir", False]
self.db_hook.upload_segment(*params)
mock_run_cli.assert_called_once_with(
[
"UploadSegment",
"-user",
self.conn.login,
"-password",
self.conn.password,
"-controllerHost",
self.conn.host,
"-controllerPort",
self.conn.port,
"-segmentDir",
params[0],
]
)
| TestPinotAdminHookWithAuth |
python | sympy__sympy | sympy/matrices/expressions/blockmatrix.py | {
"start": 18864,
"end": 31906
} | class ____(BlockMatrix):
"""A sparse matrix with block matrices along its diagonals
Examples
========
>>> from sympy import MatrixSymbol, BlockDiagMatrix, symbols
>>> n, m, l = symbols('n m l')
>>> X = MatrixSymbol('X', n, n)
>>> Y = MatrixSymbol('Y', m, m)
>>> BlockDiagMatrix(X, Y)
Matrix([
[X, 0],
[0, Y]])
Notes
=====
If you want to get the individual diagonal blocks, use
:meth:`get_diag_blocks`.
See Also
========
sympy.matrices.dense.diag
"""
def __new__(cls, *mats):
mats = [_sympify(m) for m in mats]
for mat in mats:
if not isinstance(mat, MatrixExpr):
raise ValueError(
f"BlockDiagMatrix requires matrix-like objects. "
f"Got {type(mat).__name__} with value {mat}")
return Basic.__new__(BlockDiagMatrix, *mats)
@property
def diag(self):
return self.args
@property
def blocks(self):
from sympy.matrices.immutable import ImmutableDenseMatrix
mats = self.args
data = [[mats[i] if i == j else ZeroMatrix(mats[i].rows, mats[j].cols)
for j in range(len(mats))]
for i in range(len(mats))]
return ImmutableDenseMatrix(data, evaluate=False)
@property
def shape(self):
return (sum(block.rows for block in self.args),
sum(block.cols for block in self.args))
@property
def blockshape(self):
n = len(self.args)
return (n, n)
@property
def rowblocksizes(self):
return [block.rows for block in self.args]
@property
def colblocksizes(self):
return [block.cols for block in self.args]
def _all_square_blocks(self):
"""Returns true if all blocks are square"""
return all(mat.is_square for mat in self.args)
def _eval_determinant(self):
if self._all_square_blocks():
return Mul(*[det(mat) for mat in self.args])
# At least one block is non-square. Since the entire matrix must be square we know there must
# be at least two blocks in this matrix, in which case the entire matrix is necessarily rank-deficient
return S.Zero
def _eval_inverse(self, expand='ignored'):
if self._all_square_blocks():
return BlockDiagMatrix(*[mat.inverse() for mat in self.args])
# See comment in _eval_determinant()
raise NonInvertibleMatrixError('Matrix det == 0; not invertible.')
def _eval_transpose(self):
return BlockDiagMatrix(*[mat.transpose() for mat in self.args])
def _blockmul(self, other):
if (isinstance(other, BlockDiagMatrix) and
self.colblocksizes == other.rowblocksizes):
return BlockDiagMatrix(*[a*b for a, b in zip(self.args, other.args)])
else:
return BlockMatrix._blockmul(self, other)
def _blockadd(self, other):
if (isinstance(other, BlockDiagMatrix) and
self.blockshape == other.blockshape and
self.rowblocksizes == other.rowblocksizes and
self.colblocksizes == other.colblocksizes):
return BlockDiagMatrix(*[a + b for a, b in zip(self.args, other.args)])
else:
return BlockMatrix._blockadd(self, other)
def get_diag_blocks(self):
"""Return the list of diagonal blocks of the matrix.
Examples
========
>>> from sympy import BlockDiagMatrix, Matrix
>>> A = Matrix([[1, 2], [3, 4]])
>>> B = Matrix([[5, 6], [7, 8]])
>>> M = BlockDiagMatrix(A, B)
How to get diagonal blocks from the block diagonal matrix:
>>> diag_blocks = M.get_diag_blocks()
>>> diag_blocks[0]
Matrix([
[1, 2],
[3, 4]])
>>> diag_blocks[1]
Matrix([
[5, 6],
[7, 8]])
"""
return self.args
def block_collapse(expr):
"""Evaluates a block matrix expression
>>> from sympy import MatrixSymbol, BlockMatrix, symbols, Identity, ZeroMatrix, block_collapse
>>> n,m,l = symbols('n m l')
>>> X = MatrixSymbol('X', n, n)
>>> Y = MatrixSymbol('Y', m, m)
>>> Z = MatrixSymbol('Z', n, m)
>>> B = BlockMatrix([[X, Z], [ZeroMatrix(m, n), Y]])
>>> print(B)
Matrix([
[X, Z],
[0, Y]])
>>> C = BlockMatrix([[Identity(n), Z]])
>>> print(C)
Matrix([[I, Z]])
>>> print(block_collapse(C*B))
Matrix([[X, Z + Z*Y]])
"""
from sympy.strategies.util import expr_fns
hasbm = lambda expr: isinstance(expr, MatrixExpr) and expr.has(BlockMatrix)
conditioned_rl = condition(
hasbm,
typed(
{MatAdd: do_one(bc_matadd, bc_block_plus_ident),
MatMul: do_one(bc_matmul, bc_dist),
MatPow: bc_matmul,
Transpose: bc_transpose,
Inverse: bc_inverse,
BlockMatrix: do_one(bc_unpack, deblock)}
)
)
rule = exhaust(
bottom_up(
exhaust(conditioned_rl),
fns=expr_fns
)
)
result = rule(expr)
doit = getattr(result, 'doit', None)
if doit is not None:
return doit()
else:
return result
def bc_unpack(expr):
if expr.blockshape == (1, 1):
return expr.blocks[0, 0]
return expr
def bc_matadd(expr):
args = sift(expr.args, lambda M: isinstance(M, BlockMatrix))
blocks = args[True]
if not blocks:
return expr
nonblocks = args[False]
block = blocks[0]
for b in blocks[1:]:
block = block._blockadd(b)
if nonblocks:
return MatAdd(*nonblocks) + block
else:
return block
def bc_block_plus_ident(expr):
idents = [arg for arg in expr.args if arg.is_Identity]
if not idents:
return expr
blocks = [arg for arg in expr.args if isinstance(arg, BlockMatrix)]
if (blocks and all(b.structurally_equal(blocks[0]) for b in blocks)
and blocks[0].is_structurally_symmetric):
block_id = BlockDiagMatrix(*[Identity(k)
for k in blocks[0].rowblocksizes])
rest = [arg for arg in expr.args if not arg.is_Identity and not isinstance(arg, BlockMatrix)]
return MatAdd(block_id * len(idents), *blocks, *rest).doit()
return expr
def bc_dist(expr):
""" Turn a*[X, Y] into [a*X, a*Y] """
factor, mat = expr.as_coeff_mmul()
if factor == 1:
return expr
unpacked = unpack(mat)
if isinstance(unpacked, BlockDiagMatrix):
B = unpacked.diag
new_B = [factor * mat for mat in B]
return BlockDiagMatrix(*new_B)
elif isinstance(unpacked, BlockMatrix):
B = unpacked.blocks
new_B = [
[factor * B[i, j] for j in range(B.cols)] for i in range(B.rows)]
return BlockMatrix(new_B)
return expr
def bc_matmul(expr):
if isinstance(expr, MatPow):
if expr.args[1].is_Integer and expr.args[1] > 0:
factor, matrices = 1, [expr.args[0]]*expr.args[1]
else:
return expr
else:
factor, matrices = expr.as_coeff_matrices()
i = 0
while (i+1 < len(matrices)):
A, B = matrices[i:i+2]
if isinstance(A, BlockMatrix) and isinstance(B, BlockMatrix):
matrices[i] = A._blockmul(B)
matrices.pop(i+1)
elif isinstance(A, BlockMatrix):
matrices[i] = A._blockmul(BlockMatrix([[B]]))
matrices.pop(i+1)
elif isinstance(B, BlockMatrix):
matrices[i] = BlockMatrix([[A]])._blockmul(B)
matrices.pop(i+1)
else:
i+=1
return MatMul(factor, *matrices).doit()
def bc_transpose(expr):
collapse = block_collapse(expr.arg)
return collapse._eval_transpose()
def bc_inverse(expr):
if isinstance(expr.arg, BlockDiagMatrix):
return expr.inverse()
expr2 = blockinverse_1x1(expr)
if expr != expr2:
return expr2
return blockinverse_2x2(Inverse(reblock_2x2(expr.arg)))
def blockinverse_1x1(expr):
if isinstance(expr.arg, BlockMatrix) and expr.arg.blockshape == (1, 1):
mat = Matrix([[expr.arg.blocks[0].inverse()]])
return BlockMatrix(mat)
return expr
def blockinverse_2x2(expr):
if isinstance(expr.arg, BlockMatrix) and expr.arg.blockshape == (2, 2):
# See: Inverses of 2x2 Block Matrices, Tzon-Tzer Lu and Sheng-Hua Shiou
[[A, B],
[C, D]] = expr.arg.blocks.tolist()
formula = _choose_2x2_inversion_formula(A, B, C, D)
if formula != None:
MI = expr.arg.schur(formula).I
if formula == 'A':
AI = A.I
return BlockMatrix([[AI + AI * B * MI * C * AI, -AI * B * MI], [-MI * C * AI, MI]])
if formula == 'B':
BI = B.I
return BlockMatrix([[-MI * D * BI, MI], [BI + BI * A * MI * D * BI, -BI * A * MI]])
if formula == 'C':
CI = C.I
return BlockMatrix([[-CI * D * MI, CI + CI * D * MI * A * CI], [MI, -MI * A * CI]])
if formula == 'D':
DI = D.I
return BlockMatrix([[MI, -MI * B * DI], [-DI * C * MI, DI + DI * C * MI * B * DI]])
return expr
def _choose_2x2_inversion_formula(A, B, C, D):
"""
Assuming [[A, B], [C, D]] would form a valid square block matrix, find
which of the classical 2x2 block matrix inversion formulas would be
best suited.
Returns 'A', 'B', 'C', 'D' to represent the algorithm involving inversion
of the given argument or None if the matrix cannot be inverted using
any of those formulas.
"""
# Try to find a known invertible matrix. Note that the Schur complement
# is currently not being considered for this
A_inv = ask(Q.invertible(A))
if A_inv == True:
return 'A'
B_inv = ask(Q.invertible(B))
if B_inv == True:
return 'B'
C_inv = ask(Q.invertible(C))
if C_inv == True:
return 'C'
D_inv = ask(Q.invertible(D))
if D_inv == True:
return 'D'
# Otherwise try to find a matrix that isn't known to be non-invertible
if A_inv != False:
return 'A'
if B_inv != False:
return 'B'
if C_inv != False:
return 'C'
if D_inv != False:
return 'D'
return None
def deblock(B):
""" Flatten a BlockMatrix of BlockMatrices """
if not isinstance(B, BlockMatrix) or not B.blocks.has(BlockMatrix):
return B
wrap = lambda x: x if isinstance(x, BlockMatrix) else BlockMatrix([[x]])
bb = B.blocks.applyfunc(wrap) # everything is a block
try:
MM = Matrix(0, sum(bb[0, i].blocks.shape[1] for i in range(bb.shape[1])), [])
for row in range(0, bb.shape[0]):
M = Matrix(bb[row, 0].blocks)
for col in range(1, bb.shape[1]):
M = M.row_join(bb[row, col].blocks)
MM = MM.col_join(M)
return BlockMatrix(MM)
except ShapeError:
return B
def reblock_2x2(expr):
"""
Reblock a BlockMatrix so that it has 2x2 blocks of block matrices. If
possible in such a way that the matrix continues to be invertible using the
classical 2x2 block inversion formulas.
"""
if not isinstance(expr, BlockMatrix) or not all(d > 2 for d in expr.blockshape):
return expr
BM = BlockMatrix # for brevity's sake
rowblocks, colblocks = expr.blockshape
blocks = expr.blocks
for i in range(1, rowblocks):
for j in range(1, colblocks):
# try to split rows at i and cols at j
A = bc_unpack(BM(blocks[:i, :j]))
B = bc_unpack(BM(blocks[:i, j:]))
C = bc_unpack(BM(blocks[i:, :j]))
D = bc_unpack(BM(blocks[i:, j:]))
formula = _choose_2x2_inversion_formula(A, B, C, D)
if formula is not None:
return BlockMatrix([[A, B], [C, D]])
# else: nothing worked, just split upper left corner
return BM([[blocks[0, 0], BM(blocks[0, 1:])],
[BM(blocks[1:, 0]), BM(blocks[1:, 1:])]])
def bounds(sizes):
""" Convert sequence of numbers into pairs of low-high pairs
>>> from sympy.matrices.expressions.blockmatrix import bounds
>>> bounds((1, 10, 50))
[(0, 1), (1, 11), (11, 61)]
"""
low = 0
rv = []
for size in sizes:
rv.append((low, low + size))
low += size
return rv
def blockcut(expr, rowsizes, colsizes):
""" Cut a matrix expression into Blocks
>>> from sympy import ImmutableMatrix, blockcut
>>> M = ImmutableMatrix(4, 4, range(16))
>>> B = blockcut(M, (1, 3), (1, 3))
>>> type(B).__name__
'BlockMatrix'
>>> ImmutableMatrix(B.blocks[0, 1])
Matrix([[1, 2, 3]])
"""
rowbounds = bounds(rowsizes)
colbounds = bounds(colsizes)
return BlockMatrix([[MatrixSlice(expr, rowbound, colbound)
for colbound in colbounds]
for rowbound in rowbounds])
| BlockDiagMatrix |
python | bokeh__bokeh | src/bokeh/models/glyphs.py | {
"start": 51963,
"end": 52331
} | class ____(MathTextGlyph):
""" Render mathematical content using `MathML <https://www.w3.org/Math/>`_
notation.
See :ref:`ug_styling_mathtext` in the |user guide| for more information.
"""
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
| MathMLGlyph |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/pipelines/pipeline.py | {
"start": 45160,
"end": 46465
} | class ____(graphene.Interface):
name = graphene.NonNull(graphene.String)
description = graphene.String()
owners = non_null_list(GrapheneDefinitionOwner)
pipeline_snapshot_id = graphene.NonNull(graphene.String)
dagster_types = non_null_list(GrapheneDagsterType)
dagster_type_or_error = graphene.Field(
graphene.NonNull(GrapheneDagsterTypeOrError),
dagsterTypeName=graphene.Argument(graphene.NonNull(graphene.String)),
)
solids = non_null_list(GrapheneSolid)
modes = non_null_list(GrapheneMode)
solid_handles = graphene.Field(
non_null_list(GrapheneSolidHandle), parentHandleID=graphene.String()
)
solid_handle = graphene.Field(
GrapheneSolidHandle,
handleID=graphene.Argument(graphene.NonNull(graphene.String)),
)
tags = non_null_list(GraphenePipelineTag)
metadata_entries = non_null_list(GrapheneMetadataEntry)
runs = graphene.Field(
non_null_list(GrapheneRun),
cursor=graphene.String(),
limit=graphene.Int(),
)
schedules = non_null_list(GrapheneSchedule)
sensors = non_null_list(GrapheneSensor)
parent_snapshot_id = graphene.String()
graph_name = graphene.NonNull(graphene.String)
class Meta:
name = "IPipelineSnapshot"
| GrapheneIPipelineSnapshot |
python | lepture__authlib | tests/django_helper.py | {
"start": 128,
"end": 554
} | class ____(RequestFactory):
@property
def session(self):
engine = import_module(settings.SESSION_ENGINE)
cookie = self.cookies.get(settings.SESSION_COOKIE_NAME)
if cookie:
return engine.SessionStore(cookie.value)
session = engine.SessionStore()
session.save()
self.cookies[settings.SESSION_COOKIE_NAME] = session.session_key
return session
| RequestClient |
python | pyca__cryptography | src/cryptography/x509/extensions.py | {
"start": 2560,
"end": 2717
} | class ____(Exception):
def __init__(self, msg: str, oid: ObjectIdentifier) -> None:
super().__init__(msg)
self.oid = oid
| DuplicateExtension |
python | sympy__sympy | sympy/physics/quantum/operator.py | {
"start": 9809,
"end": 14728
} | class ____(Operator):
"""An unevaluated outer product between a ket and bra.
This constructs an outer product between any subclass of ``KetBase`` and
``BraBase`` as ``|a><b|``. An ``OuterProduct`` inherits from Operator as they act as
operators in quantum expressions. For reference see [1]_.
Parameters
==========
ket : KetBase
The ket on the left side of the outer product.
bar : BraBase
The bra on the right side of the outer product.
Examples
========
Create a simple outer product by hand and take its dagger::
>>> from sympy.physics.quantum import Ket, Bra, OuterProduct, Dagger
>>> k = Ket('k')
>>> b = Bra('b')
>>> op = OuterProduct(k, b)
>>> op
|k><b|
>>> op.hilbert_space
H
>>> op.ket
|k>
>>> op.bra
<b|
>>> Dagger(op)
|b><k|
In quantum expressions, outer products will be automatically
identified and created::
>>> k*b
|k><b|
However, the creation of inner products always has higher priority than that of
outer products:
>>> b*k*b
<b|k>*<b|
References
==========
.. [1] https://en.wikipedia.org/wiki/Outer_product
"""
is_commutative = False
def __new__(cls, *args, **old_assumptions):
from sympy.physics.quantum.state import KetBase, BraBase
if len(args) != 2:
raise ValueError('2 parameters expected, got %d' % len(args))
ket_expr = expand(args[0])
bra_expr = expand(args[1])
if (isinstance(ket_expr, (KetBase, Mul)) and
isinstance(bra_expr, (BraBase, Mul))):
ket_c, kets = ket_expr.args_cnc()
bra_c, bras = bra_expr.args_cnc()
if len(kets) != 1 or not isinstance(kets[0], KetBase):
raise TypeError('KetBase subclass expected'
', got: %r' % Mul(*kets))
if len(bras) != 1 or not isinstance(bras[0], BraBase):
raise TypeError('BraBase subclass expected'
', got: %r' % Mul(*bras))
if not kets[0].dual_class() == bras[0].__class__:
raise TypeError(
'ket and bra are not dual classes: %r, %r' %
(kets[0].__class__, bras[0].__class__)
)
# TODO: make sure the hilbert spaces of the bra and ket are
# compatible
obj = Expr.__new__(cls, *(kets[0], bras[0]), **old_assumptions)
obj.hilbert_space = kets[0].hilbert_space
return Mul(*(ket_c + bra_c)) * obj
op_terms = []
if isinstance(ket_expr, Add) and isinstance(bra_expr, Add):
for ket_term in ket_expr.args:
for bra_term in bra_expr.args:
op_terms.append(OuterProduct(ket_term, bra_term,
**old_assumptions))
elif isinstance(ket_expr, Add):
for ket_term in ket_expr.args:
op_terms.append(OuterProduct(ket_term, bra_expr,
**old_assumptions))
elif isinstance(bra_expr, Add):
for bra_term in bra_expr.args:
op_terms.append(OuterProduct(ket_expr, bra_term,
**old_assumptions))
else:
raise TypeError(
'Expected ket and bra expression, got: %r, %r' %
(ket_expr, bra_expr)
)
return Add(*op_terms)
@property
def ket(self):
"""Return the ket on the left side of the outer product."""
return self.args[0]
@property
def bra(self):
"""Return the bra on the right side of the outer product."""
return self.args[1]
def _eval_adjoint(self):
return OuterProduct(Dagger(self.bra), Dagger(self.ket))
def _sympystr(self, printer, *args):
return printer._print(self.ket) + printer._print(self.bra)
def _sympyrepr(self, printer, *args):
return '%s(%s,%s)' % (self.__class__.__name__,
printer._print(self.ket, *args), printer._print(self.bra, *args))
def _pretty(self, printer, *args):
pform = self.ket._pretty(printer, *args)
return prettyForm(*pform.right(self.bra._pretty(printer, *args)))
def _latex(self, printer, *args):
k = printer._print(self.ket, *args)
b = printer._print(self.bra, *args)
return k + b
def _represent(self, **options):
k = self.ket._represent(**options)
b = self.bra._represent(**options)
return k*b
def _eval_trace(self, **kwargs):
# TODO if operands are tensorproducts this may be will be handled
# differently.
return self.ket._eval_trace(self.bra, **kwargs)
| OuterProduct |
python | getsentry__sentry | src/sentry/preprod/pull_request/types.py | {
"start": 1980,
"end": 2218
} | class ____(BaseModel):
"""
Complete pull request data including file changes.
"""
pull_request: PullRequestDetails
files: list[PullRequestFileChange]
build_details: list[BuildDetailsApiResponse]
| PullRequestWithFiles |
python | pytorch__pytorch | test/inductor/test_group_batch_fusion.py | {
"start": 8531,
"end": 9537
} | class ____(torch.nn.Module):
def __init__(self, device):
super().__init__()
self.device = device
def forward(self, x):
inputs = torch.ops.aten.split(x.to(self.device), 500, dim=1)
x_split = torch.ops.aten.split(inputs[0].to(self.device), 50, dim=1)
y_split = torch.ops.aten.split(inputs[1].to(self.device), 50, dim=1)
tanh_1 = [torch.ops.aten.tanh(x_split[i]) for i in range(len(x_split))]
tanh_2 = [torch.ops.aten.tanh(y_split[i]) for i in range(len(y_split))]
sigmoid_1 = [torch.ops.aten.sigmoid(tanh_1[i]) for i in range(len(tanh_1))]
sigmoid_2 = [torch.ops.aten.sigmoid(tanh_2[i]) for i in range(len(tanh_2))]
relu_1 = [torch.ops.aten.relu(sigmoid_1[i]) for i in range(len(sigmoid_1))]
relu_2 = [torch.ops.aten.relu(sigmoid_2[i]) for i in range(len(sigmoid_2))]
add = [torch.ops.aten.add(relu_1[i], relu_2[i]) for i in range(len(relu_1))]
return torch.cat(add, dim=1)
| TestPoitwiseOpsPostGrad |
python | astropy__astropy | astropy/nddata/tests/test_nddata.py | {
"start": 17400,
"end": 22202
} | class ____(NDData):
@property
def wcs(self):
return WCS()
def test_overriden_wcs():
# Check that a sub-class that overrides `.wcs` without providing a setter
# works
NDDataCustomWCS(np.ones((5, 5)))
# set up parameters for test_collapse:
np.random.seed(42)
collapse_units = [None, u.Jy]
collapse_propagate = [True, False]
collapse_data_shapes = [
# 3D example:
(4, 3, 2),
# 5D example
(6, 5, 4, 3, 2),
]
collapse_ignore_masked = [True, False]
collapse_masks = list(
chain.from_iterable(
[
# try the operations without a mask (all False):
np.zeros(collapse_data_shape).astype(bool)
]
+ [
# assemble a bunch of random masks:
np.random.randint(0, 2, size=collapse_data_shape).astype(bool)
for _ in range(10)
]
for collapse_data_shape in collapse_data_shapes
)
)
# the following provides pytest.mark.parametrize with every
# permutation of (1) the units, (2) propagating/not propagating
# uncertainties, and (3) the data shapes of different ndim.
permute = (
len(collapse_masks)
* len(collapse_propagate)
* len(collapse_units)
* len(collapse_ignore_masked)
)
collapse_units = permute // len(collapse_units) * collapse_units
collapse_propagate = permute // len(collapse_propagate) * collapse_propagate
collapse_masks = permute // len(collapse_masks) * collapse_masks
collapse_ignore_masked = permute // len(collapse_ignore_masked) * collapse_ignore_masked
@pytest.mark.parametrize(
"mask, unit, propagate_uncertainties, operation_ignores_mask",
list(
zip(collapse_masks, collapse_units, collapse_propagate, collapse_ignore_masked)
),
)
def test_collapse(mask, unit, propagate_uncertainties, operation_ignores_mask):
# unique set of combinations of each of the N-1 axes for an N-D cube:
axes_permutations = {tuple(axes[:2]) for axes in permutations(range(mask.ndim))}
# each of the single axis slices:
axes_permutations.update(set(range(mask.ndim)))
axes_permutations.update({None})
cube = np.arange(np.prod(mask.shape)).reshape(mask.shape)
numpy_cube = np.ma.masked_array(cube, mask=mask)
ma_cube = Masked(cube, mask=mask)
ndarr = NDDataArray(cube, uncertainty=StdDevUncertainty(cube), unit=unit, mask=mask)
# By construction, the minimum value along each axis is always the zeroth index and
# the maximum is always the last along that axis. We verify that here, so we can
# test that the correct uncertainties are extracted during the
# `NDDataArray.min` and `NDDataArray.max` methods later:
for axis in range(cube.ndim):
assert np.all(np.equal(cube.argmin(axis=axis), 0))
assert np.all(np.equal(cube.argmax(axis=axis), cube.shape[axis] - 1))
# confirm that supported nddata methods agree with corresponding numpy methods
# for the masked data array:
sum_methods = ["sum", "mean"]
ext_methods = ["min", "max"]
all_methods = sum_methods + ext_methods
# for all supported methods, ensure the masking is propagated:
for method in all_methods:
for axes in axes_permutations:
astropy_method = getattr(ma_cube, method)(axis=axes)
numpy_method = getattr(numpy_cube, method)(axis=axes)
nddata_method = getattr(ndarr, method)(
axis=axes,
propagate_uncertainties=propagate_uncertainties,
operation_ignores_mask=operation_ignores_mask,
)
astropy_unmasked = astropy_method.base[~astropy_method.mask]
nddata_unmasked = nddata_method.data[~nddata_method.mask]
# check if the units are passed through correctly:
assert unit == nddata_method.unit
# check if the numpy and astropy.utils.masked results agree when
# the result is not fully masked:
if len(astropy_unmasked) > 0:
if not operation_ignores_mask:
# compare with astropy
assert np.all(np.equal(astropy_unmasked, nddata_unmasked))
assert np.all(np.equal(astropy_method.mask, nddata_method.mask))
else:
# compare with numpy
assert np.ma.all(
np.ma.equal(numpy_method, np.asanyarray(nddata_method))
)
# For extremum methods, ensure the uncertainty returned corresponds to the
# min/max data value. We've created the uncertainties to have the same value
# as the data array, so we can just check for equality:
if method in ext_methods and propagate_uncertainties:
assert np.ma.all(np.ma.equal(astropy_method, nddata_method))
| NDDataCustomWCS |
python | getsentry__sentry | src/sentry/preprod/migrations/0012_installablepreprod.py | {
"start": 269,
"end": 2538
} | class ____(CheckedMigration):
# This flag is used to mark that a migration shouldn't be automatically run in production.
# This should only be used for operations where it's safe to run the migration after your
# code has deployed. So this should not be used for most operations that alter the schema
# of a table.
# Here are some things that make sense to mark as post deployment:
# - Large data migrations. Typically we want these to be run manually so that they can be
# monitored and not block the deploy for a long period of time while they run.
# - Adding indexes to large tables. Since this can take a long time, we'd generally prefer to
# run this outside deployments so that we don't block them. Note that while adding an index
# is a schema change, it's completely safe to run the operation after the code has deployed.
# Once deployed, run these manually via: https://develop.sentry.dev/database-migrations/#migration-deployment
is_post_deployment = False
dependencies = [
("preprod", "0011_add_preprod_artifact_app_name_and_app_id_fields"),
]
operations = [
migrations.CreateModel(
name="InstallablePreprodArtifact",
fields=[
(
"id",
sentry.db.models.fields.bounded.BoundedBigAutoField(
primary_key=True, serialize=False
),
),
("date_updated", models.DateTimeField(auto_now=True)),
("date_added", models.DateTimeField(auto_now_add=True)),
("url_path", models.CharField(db_index=True, max_length=255, unique=True)),
("expiration_date", models.DateTimeField(null=True)),
("download_count", models.PositiveIntegerField(default=0, null=True)),
(
"preprod_artifact",
sentry.db.models.fields.foreignkey.FlexibleForeignKey(
on_delete=django.db.models.deletion.CASCADE, to="preprod.preprodartifact"
),
),
],
options={
"db_table": "sentry_installablepreprodartifact",
},
),
]
| Migration |
python | google__pytype | pytype/directors/directors_test.py | {
"start": 22838,
"end": 23958
} | class ____(DirectorTestCase):
"""Test pragmas."""
def test_valid(self):
self._create("""
def f(x) -> str: # pytype: pragma=cache-return
...
""")
self.assertTrue(self._director.has_pragma("cache-return", 2))
self.assertFalse(self._director.has_pragma("cache-return", 3))
def test_invalid(self):
self._create("""
def f(x) -> str: # pytype: pragma=bad-pragma
...
""")
self.assertFalse(self._director.has_pragma("bad-pragma", 2))
err = self._errorlog.unique_sorted_errors()[0]
self.assertEqual(err.name, "invalid-directive")
self.assertRegex(err.message, "Unknown pytype pragmas")
self.assertRegex(err.message, ".*bad-pragma")
def test_line_range(self):
# We currently do not adjust line numbers for pragmas
self._create("""
def f(
x # pytype: pragma=cache-return
) -> str:
...
""")
self.assertFalse(self._director.has_pragma("cache-return", 2))
self.assertTrue(self._director.has_pragma("cache-return", 3))
self.assertFalse(self._director.has_pragma("cache-return", 4))
| PragmaDirectivesTest |
python | weaviate__weaviate-python-client | weaviate/collections/classes/config_vectorizers.py | {
"start": 20590,
"end": 64330
} | class ____:
"""Use this factory class to create the correct object for the `vectorizer_config` argument in the `collections.create()` method.
Each staticmethod provides options specific to the named vectorizer in the function's name. Under-the-hood data validation steps
will ensure that any mis-specifications will be caught before the request is sent to Weaviate.
"""
@staticmethod
def none() -> _VectorizerConfigCreate:
"""Create a `_VectorizerConfigCreate` object with the vectorizer set to `Vectorizer.NONE`."""
return _VectorizerConfigCreate(vectorizer=Vectorizers.NONE)
@staticmethod
def img2vec_neural(
image_fields: List[str],
) -> _VectorizerConfigCreate:
"""Create a `_Img2VecNeuralConfigCreate` object for use when vectorizing using the `img2vec-neural` model.
See the [documentation](https://weaviate.io/developers/weaviate/modules/retriever-vectorizer-modules/img2vec-neural)
for detailed usage.
Args:
image_fields: The image fields to use. This is a required field and must match the property fields
of the collection that are defined as `DataType.BLOB`.
Raises:
pydantic.ValidationError: If `image_fields` is not a `list`.
"""
return _Img2VecNeuralConfig(imageFields=image_fields)
@staticmethod
def multi2vec_clip(
image_fields: Optional[Union[List[str], List[Multi2VecField]]] = None,
text_fields: Optional[Union[List[str], List[Multi2VecField]]] = None,
interference_url: Optional[str] = None,
inference_url: Optional[str] = None,
vectorize_collection_name: bool = True,
) -> _VectorizerConfigCreate:
"""Create a `_Multi2VecClipConfigCreate` object for use when vectorizing using the `multi2vec-clip` model.
See the [documentation](https://weaviate.io/developers/weaviate/model-providers/transformers/embeddings-multimodal)
for detailed usage.
Args:
image_fields: The image fields to use in vectorization.
text_fields: The text fields to use in vectorization.
inference_url: The inference url to use where API requests should go. Defaults to `None`, which uses the server-defined default.
vectorize_collection_name: Whether to vectorize the collection name. Defaults to `True`.
Raises:
pydantic.ValidationError: If `image_fields` or `text_fields` are not `None` or a `list`.
"""
if interference_url is not None:
if inference_url is not None:
raise ValueError(
"You have provided `interference_url` as well as `inference_url`. Please only provide `inference_url`, as `interference_url` is deprecated."
)
else:
warnings.warn(
message="""This parameter is deprecated and will be removed in a future release. Please use `inference_url` instead.""",
category=DeprecationWarning,
stacklevel=1,
)
return _Multi2VecClipConfig(
imageFields=_map_multi2vec_fields(image_fields),
textFields=_map_multi2vec_fields(text_fields),
inferenceUrl=inference_url,
)
@staticmethod
def multi2vec_bind(
audio_fields: Optional[Union[List[str], List[Multi2VecField]]] = None,
depth_fields: Optional[Union[List[str], List[Multi2VecField]]] = None,
image_fields: Optional[Union[List[str], List[Multi2VecField]]] = None,
imu_fields: Optional[Union[List[str], List[Multi2VecField]]] = None,
text_fields: Optional[Union[List[str], List[Multi2VecField]]] = None,
thermal_fields: Optional[Union[List[str], List[Multi2VecField]]] = None,
video_fields: Optional[Union[List[str], List[Multi2VecField]]] = None,
vectorize_collection_name: bool = True,
) -> _VectorizerConfigCreate:
"""Create a `_Multi2VecBindConfigCreate` object for use when vectorizing using the `multi2vec-clip` model.
See the [documentation](https://weaviate.io/developers/weaviate/model-providers/imagebind/embeddings-multimodal)
for detailed usage.
Args:
audio_fields: The audio fields to use in vectorization.
depth_fields: The depth fields to use in vectorization.
image_fields: The image fields to use in vectorization.
imu_fields: The IMU fields to use in vectorization.
text_fields: The text fields to use in vectorization.
thermal_fields: The thermal fields to use in vectorization.
video_fields: The video fields to use in vectorization.
vectorize_collection_name: Whether to vectorize the collection name. Defaults to `True`.
Raises:
pydantic.ValidationError: If any of the `*_fields` are not `None` or a `list`.
"""
return _Multi2VecBindConfig(
audioFields=_map_multi2vec_fields(audio_fields),
depthFields=_map_multi2vec_fields(depth_fields),
imageFields=_map_multi2vec_fields(image_fields),
IMUFields=_map_multi2vec_fields(imu_fields),
textFields=_map_multi2vec_fields(text_fields),
thermalFields=_map_multi2vec_fields(thermal_fields),
videoFields=_map_multi2vec_fields(video_fields),
)
@staticmethod
def ref2vec_centroid(
reference_properties: List[str],
method: Literal["mean"] = "mean",
) -> _VectorizerConfigCreate:
"""Create a `_Ref2VecCentroidConfigCreate` object for use when vectorizing using the `ref2vec-centroid` model.
See the [documentation](https://weaviate.io/developers/weaviate/modules/retriever-vectorizer-modules/ref2vec-centroid)
for detailed usage.
Args:
reference_properties: The reference properties to use in vectorization, REQUIRED.
method: The method to use in vectorization. Defaults to `mean`.
Raises:
pydantic.ValidationError: If `reference_properties` is not a `list`.
"""
return _Ref2VecCentroidConfig(
referenceProperties=reference_properties,
method=method,
)
@staticmethod
def text2vec_aws(
model: Optional[Union[AWSModel, str]] = None,
region: str = "", # cant have a non-default value after a default value, but we cant change the order for BC - will be validated in the model
endpoint: Optional[str] = None,
service: Union[AWSService, str] = "bedrock",
vectorize_collection_name: bool = True,
) -> _VectorizerConfigCreate:
"""Create a `_Text2VecAWSConfigCreate` object for use when vectorizing using the `text2vec-aws` model.
See the [documentation](https://weaviate.io/developers/weaviate/model-providers/aws/embeddings)
for detailed usage.
Args:
model: The model to use, REQUIRED for service "bedrock".
region: The AWS region to run the model from, REQUIRED.
endpoint: The model to use, REQUIRED for service "sagemaker".
service: The AWS service to use, options are "bedrock" and "sagemaker".
vectorize_collection_name: Whether to vectorize the collection name. Defaults to `True`.
"""
return _Text2VecAWSConfig(
model=model,
region=region,
vectorizeClassName=vectorize_collection_name,
service=service,
endpoint=endpoint,
)
@staticmethod
def text2vec_azure_openai(
resource_name: str,
deployment_id: str,
vectorize_collection_name: bool = True,
base_url: Optional[AnyHttpUrl] = None,
dimensions: Optional[int] = None,
model: Optional[str] = None,
) -> _VectorizerConfigCreate:
"""Create a `_Text2VecAzureOpenAIConfigCreate` object for use when vectorizing using the `text2vec-azure-openai` model.
See the [documentation](https://weaviate.io/developers/weaviate/model-providers/openai-azure/embeddings)
for detailed usage.
Args:
resource_name: The resource name to use, REQUIRED.
deployment_id: The deployment ID to use, REQUIRED.
vectorize_collection_name: Whether to vectorize the collection name. Defaults to `True`.
base_url: The base URL to use where API requests should go. Defaults to `None`, which uses the server-defined default.
dimensions: The dimensionality of the vectors. Defaults to `None`, which uses the server-defined default.
Raises:
pydantic.ValidationError: If `resource_name` or `deployment_id` are not `str`.
"""
return _Text2VecAzureOpenAIConfig(
baseURL=base_url,
dimensions=dimensions,
resourceName=resource_name,
deploymentId=deployment_id,
vectorizeClassName=vectorize_collection_name,
model=model,
)
@staticmethod
def text2vec_contextionary(
vectorize_collection_name: bool = True,
) -> _VectorizerConfigCreate:
"""Create a `_Text2VecContextionaryConfigCreate` object for use when vectorizing using the `text2vec-contextionary` model.
See the [documentation](https://weaviate.io/developers/weaviate/modules/retriever-vectorizer-modules/text2vec-contextionary)
for detailed usage.
Args:
vectorize_collection_name: Whether to vectorize the collection name. Defaults to `True`.
Raises:
pydantic.ValidationError: If `vectorize_collection_name` is not a `bool`.
"""
return _Text2VecContextionaryConfig(vectorizeClassName=vectorize_collection_name)
@staticmethod
def custom(
module_name: str, module_config: Optional[Dict[str, Any]] = None
) -> _VectorizerConfigCreate:
"""Create a `_VectorizerCustomConfig` object for use when vectorizing using a custom specification.
Args:
module_name: The name of the module to use, REQUIRED.
module_config: The configuration to use for the module. Defaults to `None`, which uses the server-defined default.
"""
return _VectorizerCustomConfig(
vectorizer=_EnumLikeStr(module_name), module_config=module_config
)
@staticmethod
def text2vec_cohere(
model: Optional[Union[CohereModel, str]] = None,
truncate: Optional[CohereTruncation] = None,
vectorize_collection_name: bool = True,
base_url: Optional[AnyHttpUrl] = None,
) -> _VectorizerConfigCreate:
"""Create a `_Text2VecCohereConfigCreate` object for use when vectorizing using the `text2vec-cohere` model.
See the [documentation](https://weaviate.io/developers/weaviate/model-providers/cohere/embeddings)
for detailed usage.
Args:
model: The model to use. Defaults to `None`, which uses the server-defined default.
truncate: The truncation strategy to use. Defaults to `None`, which uses the server-defined default.
vectorize_collection_name: Whether to vectorize the collection name. Defaults to `True`.
base_url: The base URL to use where API requests should go. Defaults to `None`, which uses the server-defined default.
Raises:
pydantic.ValidationError: If `model` is not a valid value from the `CohereModel` type or if `truncate` is not a valid value from the `CohereTruncation` type.
"""
return _Text2VecCohereConfig(
baseURL=base_url,
model=model,
dimensions=None,
truncate=truncate,
vectorizeClassName=vectorize_collection_name,
)
@staticmethod
def multi2vec_cohere(
*,
model: Optional[Union[CohereMultimodalModel, str]] = None,
truncate: Optional[CohereTruncation] = None,
vectorize_collection_name: bool = True,
base_url: Optional[AnyHttpUrl] = None,
image_fields: Optional[Union[List[str], List[Multi2VecField]]] = None,
text_fields: Optional[Union[List[str], List[Multi2VecField]]] = None,
) -> _VectorizerConfigCreate:
"""Create a `_Multi2VecCohereConfig` object for use when vectorizing using the `multi2vec-cohere` model.
See the [documentation](https://weaviate.io/developers/weaviate/model-providers/cohere/embeddings-multimodal)
for detailed usage.
Args:
model: The model to use. Defaults to `None`, which uses the server-defined default.
truncate: The truncation strategy to use. Defaults to `None`, which uses the server-defined default.
vectorize_collection_name: Whether to vectorize the collection name. Defaults to `True`.
base_url: The base URL to use where API requests should go. Defaults to `None`, which uses the server-defined default.
image_fields: The image fields to use in vectorization.
text_fields: The text fields to use in vectorization.
Raises:
pydantic.ValidationError: If `model` is not a valid value from the `CohereMultimodalModel` type or if `truncate` is not a valid value from the `CohereTruncation` type.
"""
return _Multi2VecCohereConfig(
baseURL=base_url,
model=model,
dimensions=None,
truncate=truncate,
imageFields=_map_multi2vec_fields(image_fields),
textFields=_map_multi2vec_fields(text_fields),
)
@staticmethod
def multi2vec_voyageai(
*,
model: Optional[Union[CohereMultimodalModel, str]] = None,
truncation: Optional[bool] = None,
output_encoding: Optional[str],
vectorize_collection_name: bool = True,
base_url: Optional[AnyHttpUrl] = None,
image_fields: Optional[Union[List[str], List[Multi2VecField]]] = None,
text_fields: Optional[Union[List[str], List[Multi2VecField]]] = None,
) -> _VectorizerConfigCreate:
"""Create a `_Multi2VecCohereConfig` object for use when vectorizing using the `multi2vec-cohere` model.
See the [documentation](https://weaviate.io/developers/weaviate/model-providers/cohere/embeddings-multimodal)
for detailed usage.
Args:
model: The model to use. Defaults to `None`, which uses the server-defined default.
truncate: The truncation strategy to use. Defaults to `None`, which uses the server-defined default.
output_encoding: Deprecated, has no effect.
vectorize_collection_name: Deprecated, has no effect.
base_url: The base URL to use where API requests should go. Defaults to `None`, which uses the server-defined default.
image_fields: The image fields to use in vectorization.
text_fields: The text fields to use in vectorization.
Raises:
pydantic.ValidationError: If `model` is not a valid value from the `CohereMultimodalModel` type or if `truncate` is not a valid value from the `CohereTruncation` type.
"""
return _Multi2VecVoyageaiConfig(
baseURL=base_url,
model=model,
truncation=truncation,
imageFields=_map_multi2vec_fields(image_fields),
textFields=_map_multi2vec_fields(text_fields),
)
@staticmethod
def multi2vec_nvidia(
*,
model: Optional[str] = None,
truncation: Optional[bool] = None,
output_encoding: Optional[str] = None,
vectorize_collection_name: bool = True,
base_url: Optional[AnyHttpUrl] = None,
image_fields: Optional[Union[List[str], List[Multi2VecField]]] = None,
text_fields: Optional[Union[List[str], List[Multi2VecField]]] = None,
) -> _VectorizerConfigCreate:
"""Create a `_Multi2VecNvidiaConfig` object for use when vectorizing using the `multi2vec-nvidia` model.
See the [documentation](https://weaviate.io/developers/weaviate/model-providers/nvidia/embeddings-multimodal)
for detailed usage.
Args:
model: The model to use. Defaults to `None`, which uses the server-defined default.
truncate: The truncation strategy to use. Defaults to `None`, which uses the server-defined default.
output_encoding: Deprecated, has no effect.
vectorize_collection_name: Deprecated, has no effect.
base_url: The base URL to use where API requests should go. Defaults to `None`, which uses the server-defined default.
image_fields: The image fields to use in vectorization.
text_fields: The text fields to use in vectorization.
Raises:
pydantic.ValidationError: If `model` is not a valid value from the `NvidiaMultimodalModel` type or if `truncate` is not a valid value from the `NvidiaTruncation` type.
"""
return _Multi2VecNvidiaConfig(
baseURL=base_url,
model=model,
truncation=truncation,
imageFields=_map_multi2vec_fields(image_fields),
textFields=_map_multi2vec_fields(text_fields),
)
@staticmethod
def text2vec_databricks(
*,
endpoint: str,
instruction: Optional[str] = None,
vectorize_collection_name: bool = True,
) -> _VectorizerConfigCreate:
"""Create a `_Text2VecDatabricksConfig` object for use when vectorizing using the `text2vec-databricks` model.
See the [documentation](https://weaviate.io/developers/weaviate/model-providers/databricks/embeddings)
for detailed usage.
Args:
endpoint: The endpoint to use.
instruction: The instruction strategy to use. Defaults to `None`, which uses the server-defined default.
vectorize_collection_name: Whether to vectorize the collection name. Defaults to `True`.
Raises:
pydantic.ValidationError: If `truncate` is not a valid value from the `CohereModel` type.
"""
return _Text2VecDatabricksConfig(
endpoint=endpoint,
instruction=instruction,
vectorizeClassName=vectorize_collection_name,
)
@staticmethod
def text2vec_gpt4all(
vectorize_collection_name: bool = True,
) -> _VectorizerConfigCreate:
"""Create a `_Text2VecGPT4AllConfigCreate` object for use when vectorizing using the `text2vec-gpt4all` model.
See the [documentation](https://weaviate.io/developers/weaviate/model-providers/gpt4all/embeddings)
for detailed usage.
Args:
vectorize_collection_name: Whether to vectorize the collection name. Defaults to `True`.
Raises:
pydantic.ValidationError: If `vectorize_collection_name` is not a `bool`.
"""
return _Text2VecGPT4AllConfig(vectorizeClassName=vectorize_collection_name)
@staticmethod
def text2vec_huggingface(
model: Optional[str] = None,
passage_model: Optional[str] = None,
query_model: Optional[str] = None,
endpoint_url: Optional[AnyHttpUrl] = None,
wait_for_model: Optional[bool] = None,
use_gpu: Optional[bool] = None,
use_cache: Optional[bool] = None,
vectorize_collection_name: bool = True,
) -> _VectorizerConfigCreate:
"""Create a `_Text2VecHuggingFaceConfigCreate` object for use when vectorizing using the `text2vec-huggingface` model.
See the [documentation](https://weaviate.io/developers/weaviate/model-providers/huggingface/embeddings)
for detailed usage.
Args:
model: The model to use. Defaults to `None`, which uses the server-defined default.
passage_model: The passage model to use. Defaults to `None`, which uses the server-defined default.
query_model: The query model to use. Defaults to `None`, which uses the server-defined default.
endpoint_url: The endpoint URL to use. Defaults to `None`, which uses the server-defined default.
wait_for_model: Whether to wait for the model to be loaded. Defaults to `None`, which uses the server-defined default.
use_gpu: Whether to use the GPU. Defaults to `None`, which uses the server-defined default.
use_cache: Whether to use the cache. Defaults to `None`, which uses the server-defined default.
vectorize_collection_name: Whether to vectorize the collection name. Defaults to `True`.
Raises:
pydantic.ValidationError: If the arguments passed to the function are invalid.
It is important to note that some of these variables are mutually exclusive.
See the [documentation](https://weaviate.io/developers/weaviate/model-providers/huggingface/embeddings#vectorizer-parameters) for more details.
"""
return _Text2VecHuggingFaceConfig(
model=model,
passageModel=passage_model,
queryModel=query_model,
endpointURL=endpoint_url,
waitForModel=wait_for_model,
useGPU=use_gpu,
useCache=use_cache,
vectorizeClassName=vectorize_collection_name,
)
@staticmethod
def text2vec_mistral(
*,
base_url: Optional[AnyHttpUrl] = None,
model: Optional[str] = None,
vectorize_collection_name: bool = True,
) -> _VectorizerConfigCreate:
"""Create a `_Text2VecMistralConfig` object for use when vectorizing using the `text2vec-mistral` model.
See the [documentation](https://weaviate.io/developers/weaviate/model-providers/mistral/embeddings)
for detailed usage.
Args:
base_url: The base URL to use where API requests should go. Defaults to `None`, which uses the server-defined default.
model: The model to use. Defaults to `None`, which uses the server-defined default.
vectorize_collection_name: Whether to vectorize the collection name. Defaults to `True`.
"""
return _Text2VecMistralConfig(
baseURL=base_url, model=model, vectorizeClassName=vectorize_collection_name
)
@staticmethod
def text2vec_ollama(
*,
api_endpoint: Optional[str] = None,
model: Optional[str] = None,
vectorize_collection_name: bool = True,
) -> _VectorizerConfigCreate:
"""Create a `_Text2VecOllamaConfig` object for use when vectorizing using the `text2vec-ollama` model.
See the [documentation](https://weaviate.io/developers/weaviate/model-providers/ollama/embeddings)
for detailed usage.
Args:
api_endpoint: The base URL to use where API requests should go. Defaults to `None`, which uses the server-defined default.
Docker users may need to specify an alias, such as `http://host.docker.internal:11434` so that the container can access the host machine.
model: The model to use. Defaults to `None`, which uses the server-defined default.
vectorize_collection_name: Whether to vectorize the collection name. Defaults to `True`.
"""
return _Text2VecOllamaConfig(
apiEndpoint=api_endpoint,
model=model,
vectorizeClassName=vectorize_collection_name,
)
@staticmethod
def text2vec_openai(
model: Optional[Union[OpenAIModel, str]] = None,
model_version: Optional[str] = None,
type_: Optional[OpenAIType] = None,
vectorize_collection_name: bool = True,
base_url: Optional[AnyHttpUrl] = None,
dimensions: Optional[int] = None,
) -> _VectorizerConfigCreate:
"""Create a `_Text2VecOpenAIConfigCreate` object for use when vectorizing using the `text2vec-openai` model.
See the [documentation](https://weaviate.io/developers/weaviate/model-providers/openai/embeddings)
for detailed usage.
Args:
model: The model to use. Defaults to `None`, which uses the server-defined default.
model_version: The model version to use. Defaults to `None`, which uses the server-defined default.
type_: The type of model to use. Defaults to `None`, which uses the server-defined default.
vectorize_collection_name: Whether to vectorize the collection name. Defaults to `True`.
base_url: The base URL to use where API requests should go. Defaults to `None`, which uses the server-defined default.
dimensions: Number of dimensions. Applicable to v3 OpenAI models only. Defaults to `None`, which uses the server-defined default.
Raises:
pydantic.ValidationError: If `type_` is not a valid value from the `OpenAIType` type.
"""
return _Text2VecOpenAIConfig(
baseURL=base_url,
model=model,
modelVersion=model_version,
type_=type_,
vectorizeClassName=vectorize_collection_name,
dimensions=dimensions,
)
@staticmethod
@docstring_deprecated(
deprecated_in="4.9.0",
details="""
This method is deprecated and will be removed in Q2 '25. Please use :meth:`~weaviate.collections.classes.config._Vectorizer.text2vec_google` instead.
""",
)
@typing_deprecated(
"This method is deprecated and will be removed in Q2 '25. Please use `text2vec_google` instead."
)
def text2vec_palm(
project_id: str,
api_endpoint: Optional[str] = None,
model_id: Optional[str] = None,
title_property: Optional[str] = None,
vectorize_collection_name: bool = True,
) -> _VectorizerConfigCreate:
"""Create a `_Text2VecGoogleConfig` object for use when vectorizing using the `text2vec-palm` model.
See the [documentation](https://weaviate.io/developers/weaviate/model-providers/google/embeddings)
for detailed usage.
Args:
project_id: The project ID to use, REQUIRED.
api_endpoint: The API endpoint to use without a leading scheme such as `http://`. Defaults to `None`, which uses the server-defined default
model_id: The model ID to use. Defaults to `None`, which uses the server-defined default.
title_property: The Weaviate property name for the `gecko-002` or `gecko-003` model to use as the title.
vectorize_collection_name: Whether to vectorize the collection name. Defaults to `True`.
Raises:
pydantic.ValidationError: If `api_endpoint` is not a valid URL.
"""
_Warnings.palm_to_google_t2v()
return _Text2VecGoogleConfig(
projectId=project_id,
apiEndpoint=api_endpoint,
dimensions=None,
modelId=model_id,
vectorizeClassName=vectorize_collection_name,
titleProperty=title_property,
)
@staticmethod
def text2vec_google_aistudio(
model_id: Optional[str] = None,
title_property: Optional[str] = None,
vectorize_collection_name: bool = True,
) -> _VectorizerConfigCreate:
"""Create a `_Text2VecGoogleConfig` object for use when vectorizing using the `text2vec-google` model.
See the [documentation](https://weaviate.io/developers/weaviate/model-providers/google/embeddings)
for detailed usage.
Args:
model_id: The model ID to use. Defaults to `None`, which uses the server-defined default.
title_property: The Weaviate property name for the `gecko-002` or `gecko-003` model to use as the title.
vectorize_collection_name: Whether to vectorize the collection name. Defaults to `True`.
Raises:
pydantic.ValidationError: If `api_endpoint` is not a valid URL.
"""
return _Text2VecGoogleConfig(
projectId=None,
apiEndpoint="generativelanguage.googleapis.com",
dimensions=None,
modelId=model_id,
vectorizeClassName=vectorize_collection_name,
titleProperty=title_property,
)
@staticmethod
def text2vec_google(
project_id: str,
api_endpoint: Optional[str] = None,
model_id: Optional[str] = None,
title_property: Optional[str] = None,
vectorize_collection_name: bool = True,
) -> _VectorizerConfigCreate:
"""Create a `_Text2VecGoogleConfig` object for use when vectorizing using the `text2vec-google` model.
See the [documentation](https://weaviate.io/developers/weaviate/model-providers/google/embeddings)
for detailed usage.
Args:
project_id: The project ID to use, REQUIRED.
api_endpoint: The API endpoint to use without a leading scheme such as `http://`. Defaults to `None`, which uses the server-defined default
model_id: The model ID to use. Defaults to `None`, which uses the server-defined default.
title_property: The Weaviate property name for the `gecko-002` or `gecko-003` model to use as the title.
vectorize_collection_name: Whether to vectorize the collection name. Defaults to `True`.
dimensions: The dimensionality of the vectors. Defaults to `None`, which uses the server-defined default.
Raises:
pydantic.ValidationError: If `api_endpoint` is not a valid URL.
"""
return _Text2VecGoogleConfig(
projectId=project_id,
apiEndpoint=api_endpoint,
dimensions=None,
modelId=model_id,
vectorizeClassName=vectorize_collection_name,
titleProperty=title_property,
)
@staticmethod
@docstring_deprecated(
deprecated_in="4.9.0",
details="""
This method is deprecated and will be removed in Q2 '25. Please use :meth:`~weaviate.collections.classes.config._Vectorizer.multi2vec_google` instead.
""",
)
@typing_deprecated(
"This method is deprecated and will be removed in Q2 '25. Please use `multi2vec_google` instead."
)
def multi2vec_palm(
*,
location: str,
project_id: str,
image_fields: Optional[Union[List[str], List[Multi2VecField]]] = None,
text_fields: Optional[Union[List[str], List[Multi2VecField]]] = None,
video_fields: Optional[Union[List[str], List[Multi2VecField]]] = None,
dimensions: Optional[int] = None,
model_id: Optional[str] = None,
video_interval_seconds: Optional[int] = None,
vectorize_collection_name: bool = True,
) -> _VectorizerConfigCreate:
"""Create a `_Multi2VecPalmConfig` object for use when vectorizing using the `text2vec-palm` model.
See the [documentation](https://weaviate.io/developers/weaviate/model-providers/google/embeddings-multimodal)
for detailed usage.
Args:
location: Where the model runs. REQUIRED.
project_id: The project ID to use, REQUIRED.
image_fields: The image fields to use in vectorization.
text_fields: The text fields to use in vectorization.
video_fields: The video fields to use in vectorization.
dimensions: The number of dimensions to use. Defaults to `None`, which uses the server-defined default.
model_id: The model ID to use. Defaults to `None`, which uses the server-defined default.
video_interval_seconds: Length of a video interval. Defaults to `None`, which uses the server-defined default.
vectorize_collection_name: Whether to vectorize the collection name. Defaults to `True`.
Raises:
pydantic.ValidationError: If `api_endpoint` is not a valid URL.
"""
_Warnings.palm_to_google_m2v()
return _Multi2VecGoogleConfig(
projectId=project_id,
location=location,
imageFields=_map_multi2vec_fields(image_fields),
textFields=_map_multi2vec_fields(text_fields),
videoFields=_map_multi2vec_fields(video_fields),
dimensions=dimensions,
modelId=model_id,
videoIntervalSeconds=video_interval_seconds,
)
@staticmethod
def multi2vec_google(
*,
location: str,
project_id: str,
image_fields: Optional[Union[List[str], List[Multi2VecField]]] = None,
text_fields: Optional[Union[List[str], List[Multi2VecField]]] = None,
video_fields: Optional[Union[List[str], List[Multi2VecField]]] = None,
model_id: Optional[str] = None,
video_interval_seconds: Optional[int] = None,
vectorize_collection_name: bool = True,
) -> _VectorizerConfigCreate:
"""Create a `_Multi2VecGoogleConfig` object for use when vectorizing using the `text2vec-google` model.
See the [documentation](https://weaviate.io/developers/weaviate/model-providers/google/embeddings-multimodal)
for detailed usage.
Args:
location: Where the model runs. REQUIRED.
project_id: The project ID to use, REQUIRED.
image_fields: The image fields to use in vectorization.
text_fields: The text fields to use in vectorization.
video_fields: The video fields to use in vectorization.
model_id: The model ID to use. Defaults to `None`, which uses the server-defined default.
video_interval_seconds: Length of a video interval. Defaults to `None`, which uses the server-defined default.
vectorize_collection_name: Whether to vectorize the collection name. Defaults to `True`.
Raises:
pydantic.ValidationError: If `api_endpoint` is not a valid URL.
"""
return _Multi2VecGoogleConfig(
projectId=project_id,
location=location,
imageFields=_map_multi2vec_fields(image_fields),
textFields=_map_multi2vec_fields(text_fields),
videoFields=_map_multi2vec_fields(video_fields),
dimensions=None,
modelId=model_id,
videoIntervalSeconds=video_interval_seconds,
)
@staticmethod
def text2vec_transformers(
pooling_strategy: Literal["masked_mean", "cls"] = "masked_mean",
dimensions: Optional[int] = None,
vectorize_collection_name: bool = True,
inference_url: Optional[str] = None,
passage_inference_url: Optional[str] = None,
query_inference_url: Optional[str] = None,
) -> _VectorizerConfigCreate:
"""Create a `_Text2VecTransformersConfigCreate` object for use when vectorizing using the `text2vec-transformers` model.
See the [documentation](https://weaviate.io/developers/weaviate/model-providers/transformers/embeddings)
for detailed usage.
Args:
pooling_strategy: The pooling strategy to use. Defaults to `masked_mean`.
dimensions: The number of dimensions for the generated embeddings. Defaults to `None`, which uses the server-defined default.
vectorize_collection_name: Whether to vectorize the collection name. Defaults to `True`.
inference_url: The inference url to use where API requests should go. You can use either this OR passage/query_inference_url. Defaults to `None`, which uses the server-defined default.
passage_inference_url: The inference url to use where passage API requests should go. You can use either this and query_inference_url OR inference_url. Defaults to `None`, which uses the server-defined default.
query_inference_url: The inference url to use where query API requests should go. You can use either this and passage_inference_url OR inference_url. Defaults to `None`, which uses the server-defined default.
Raises:
pydantic.ValidationError: If `pooling_strategy` is not a valid value from the `PoolingStrategy` type.
"""
return _Text2VecTransformersConfig(
poolingStrategy=pooling_strategy,
dimensions=dimensions,
vectorizeClassName=vectorize_collection_name,
inferenceUrl=inference_url,
passageInferenceUrl=passage_inference_url,
queryInferenceUrl=query_inference_url,
)
@staticmethod
def text2vec_jinaai(
model: Optional[Union[JinaModel, str]] = None,
vectorize_collection_name: bool = True,
base_url: Optional[str] = None,
dimensions: Optional[int] = None,
) -> _VectorizerConfigCreate:
"""Create a `_Text2VecJinaConfigCreate` object for use when vectorizing using the `text2vec-jinaai` model.
See the [documentation](https://weaviate.io/developers/weaviate/model-providers/jinaai/embeddings)
for detailed usage.
Args:
model: The model to use. Defaults to `None`, which uses the server-defined default.
See the
[documentation](https://weaviate.io/developers/weaviate/model-providers/jinaai/embeddings#available-models) for more details.
vectorize_collection_name: Whether to vectorize the collection name. Defaults to `True`.
base_url: The base URL to send the vectorization requests to. Defaults to `None`, which uses the server-defined default.
dimensions: The number of dimensions for the generated embeddings. Defaults to `None`, which uses the server-defined default.
"""
return _Text2VecJinaConfig(
model=model,
vectorizeClassName=vectorize_collection_name,
baseURL=base_url,
dimensions=dimensions,
)
@staticmethod
def multi2vec_jinaai(
*,
model: Optional[Union[JinaMultimodalModel, str]] = None,
vectorize_collection_name: bool = True,
base_url: Optional[AnyHttpUrl] = None,
dimensions: Optional[int] = None,
image_fields: Optional[Union[List[str], List[Multi2VecField]]] = None,
text_fields: Optional[Union[List[str], List[Multi2VecField]]] = None,
) -> _VectorizerConfigCreate:
"""Create a `_Multi2VecJinaConfig` object for use when vectorizing using the `multi2vec-jinaai` model.
See the [documentation](https://weaviate.io/developers/weaviate/model-providers/jinaai/embeddings-multimodal)
for detailed usage.
Args:
model: The model to use. Defaults to `None`, which uses the server-defined default.
vectorize_collection_name: Whether to vectorize the collection name. Defaults to `True`.
base_url: The base URL to use where API requests should go. Defaults to `None`, which uses the server-defined default.
dimensions: The number of dimensions for the generated embeddings (only available for some models). Defaults to `None`, which uses the server-defined default.
image_fields: The image fields to use in vectorization.
text_fields: The text fields to use in vectorization.
Raises:
pydantic.ValidationError: If `model` is not a valid value from the `JinaMultimodalModel` type.
"""
return _Multi2VecJinaConfig(
baseURL=base_url,
model=model,
dimensions=dimensions,
imageFields=_map_multi2vec_fields(image_fields),
textFields=_map_multi2vec_fields(text_fields),
)
@staticmethod
def text2vec_voyageai(
*,
model: Optional[Union[VoyageModel, str]] = None,
base_url: Optional[str] = None,
truncate: Optional[bool] = None,
vectorize_collection_name: bool = True,
) -> _VectorizerConfigCreate:
"""Create a `_Text2VecVoyageConfigCreate` object for use when vectorizing using the `text2vec-voyageai` model.
See the [documentation](https://weaviate.io/developers/weaviate/model-providers/voyageai/embeddings)
for detailed usage.
Args:
model: The model to use. Defaults to `None`, which uses the server-defined default.
See the
[documentation](https://weaviate.io/developers/weaviate/model-providers/voyageai/embeddings#available-models) for more details.
base_url: The base URL to use where API requests should go. Defaults to `None`, which uses the server-defined default.
truncate: Whether to truncate the input texts to fit within the context length. Defaults to `None`, which uses the server-defined default.
vectorize_collection_name: Whether to vectorize the collection name. Defaults to `True`.
"""
return _Text2VecVoyageConfig(
model=model,
baseURL=base_url,
truncate=truncate,
vectorizeClassName=vectorize_collection_name,
dimensions=None,
)
@staticmethod
def text2vec_weaviate(
*,
model: Optional[Union[WeaviateModel, str]] = None,
base_url: Optional[str] = None,
vectorize_collection_name: bool = True,
dimensions: Optional[int] = None,
) -> _VectorizerConfigCreate:
"""TODO: add docstrings when the documentation is available."""
return _Text2VecWeaviateConfig(
model=model,
baseURL=base_url,
vectorizeClassName=vectorize_collection_name,
dimensions=dimensions,
)
@staticmethod
def text2vec_nvidia(
*,
model: Optional[str] = None,
base_url: Optional[str] = None,
truncate: Optional[bool] = None,
vectorize_collection_name: bool = True,
) -> _VectorizerConfigCreate:
"""Create a `_Text2VecNvidiaConfigCreate` object for use when vectorizing using the `text2vec-nvidia` model.
See the [documentation](https://weaviate.io/developers/weaviate/model-providers/nvidia/embeddings)
for detailed usage.
Args:
model: The model to use. Defaults to `None`, which uses the server-defined default.
See the
[documentation](https://weaviate.io/developers/weaviate/model-providers/nvidia/embeddings#available-models) for more details.
base_url: The base URL to use where API requests should go. Defaults to `None`, which uses the server-defined default.
truncate: Whether to truncate the input texts to fit within the context length. Defaults to `None`, which uses the server-defined default.
vectorize_collection_name: Whether to vectorize the collection name. Defaults to `True`.
"""
return _Text2VecNvidiaConfig(
model=model,
baseURL=base_url,
truncate=truncate,
vectorizeClassName=vectorize_collection_name,
)
@staticmethod
def text2vec_model2vec(
*,
inference_url: Optional[str] = None,
vectorize_collection_name: bool = True,
) -> _VectorizerConfigCreate:
"""Create a `_Text2VecModel2VecConfigCreate` object for use when vectorizing using the `text2vec-model2vec` model.
See the [documentation](https://weaviate.io/developers/weaviate/model-providers/model2vec/embeddings)
for detailed usage.
Args:
vectorize_collection_name: Whether to vectorize the collection name. Defaults to `True`.
inference_url: The inference url to use where API requests should go. Defaults to `None`, which uses the server-defined default.
"""
return _Text2VecModel2VecConfig(
vectorizeClassName=vectorize_collection_name,
inferenceUrl=inference_url,
)
| _Vectorizer |
python | pytorch__pytorch | test/dynamo/cpython/3_13/test_generators.py | {
"start": 8656,
"end": 10731
} | class ____(__TestCase):
iterables = [
range(0),
range(20),
[1, 2, 3],
(2,),
{13, 48, 211},
frozenset((15, 8, 6)),
{1: 2, 3: 4},
]
non_iterables = [
None,
42,
3.0,
2j,
]
def genexpr(self):
return (x for x in range(10))
def genfunc(self):
def gen(it):
for x in it:
yield x
return gen(range(10))
def process_tests(self, get_generator):
for obj in self.iterables:
g_obj = get_generator(obj)
with self.subTest(g_obj=g_obj, obj=obj):
self.assertListEqual(list(g_obj), list(obj))
g_iter = get_generator(iter(obj))
with self.subTest(g_iter=g_iter, obj=obj):
self.assertListEqual(list(g_iter), list(obj))
err_regex = "'.*' object is not iterable"
for obj in self.non_iterables:
g_obj = get_generator(obj)
with self.subTest(g_obj=g_obj):
self.assertRaisesRegex(TypeError, err_regex, list, g_obj)
def test_modify_f_locals(self):
def modify_f_locals(g, local, obj):
g.gi_frame.f_locals[local] = obj
return g
def get_generator_genexpr(obj):
return modify_f_locals(self.genexpr(), '.0', obj)
def get_generator_genfunc(obj):
return modify_f_locals(self.genfunc(), 'it', obj)
self.process_tests(get_generator_genexpr)
self.process_tests(get_generator_genfunc)
def test_new_gen_from_gi_code(self):
def new_gen_from_gi_code(g, obj):
generator_func = types.FunctionType(g.gi_code, {})
return generator_func(obj)
def get_generator_genexpr(obj):
return new_gen_from_gi_code(self.genexpr(), obj)
def get_generator_genfunc(obj):
return new_gen_from_gi_code(self.genfunc(), obj)
self.process_tests(get_generator_genexpr)
self.process_tests(get_generator_genfunc)
| ModifyUnderlyingIterableTest |
python | joke2k__faker | faker/providers/date_time/de_AT/__init__.py | {
"start": 46,
"end": 778
} | class ____(DateTimeProvider):
DAY_NAMES = {
"0": "Sonntag",
"1": "Montag",
"2": "Dienstag",
"3": "Mittwoch",
"4": "Donnerstag",
"5": "Freitag",
"6": "Samstag",
}
MONTH_NAMES = {
"01": "Jänner",
"02": "Februar",
"03": "März",
"04": "April",
"05": "Mai",
"06": "Juni",
"07": "Juli",
"08": "August",
"09": "September",
"10": "Oktober",
"11": "November",
"12": "Dezember",
}
def day_of_week(self):
day = self.date("%w")
return self.DAY_NAMES[day]
def month_name(self):
month = self.month()
return self.MONTH_NAMES[month]
| Provider |
python | conda__conda | conda/exceptions.py | {
"start": 12163,
"end": 12325
} | class ____(CondaError, OSError):
def __init__(self, path: PathType):
message = "%(path)s"
super().__init__(message, path=path)
| PathNotFoundError |
python | apache__avro | lang/py/avro/test/test_io.py | {
"start": 9501,
"end": 10575
} | class ____(unittest.TestCase):
def __init__(self, test_schema: str, test_datum: object) -> None:
"""Ignore the normal signature for unittest.TestCase because we are generating
many test cases from this one class. This is safe as long as the autoloader
ignores this class. The autoloader will ignore this class as long as it has
no methods starting with `test_`.
"""
super().__init__("io_valid")
self.test_schema = avro.schema.parse(test_schema)
self.test_datum = test_datum
# Never hide repeated warnings when running this test case.
warnings.simplefilter("always")
def io_valid(self) -> None:
"""
In these cases, the provided data should be valid with the given schema.
"""
with warnings.catch_warnings(record=True) as actual_warnings:
self.assertTrue(
avro.io.validate(self.test_schema, self.test_datum),
f"{self.test_datum} did not validate in the schema {self.test_schema}",
)
| IoValidateTestCase |
python | ansible__ansible | lib/ansible/module_utils/errors.py | {
"start": 2030,
"end": 2119
} | class ____(AnsibleValidationError):
"""Error with parameter value"""
| ArgumentValueError |
python | docker__docker-py | tests/unit/api_test.py | {
"start": 14613,
"end": 19391
} | class ____(unittest.TestCase):
stdout_data = b'''
Now, those children out there, they're jumping through the
flames in the hope that the god of the fire will make them fruitful.
Really, you can't blame them. After all, what girl would not prefer the
child of a god to that of some acne-scarred artisan?
'''
stderr_data = b'''
And what of the true God? To whose glory churches and monasteries have been
built on these islands for generations past? Now shall what of Him?
'''
@classmethod
def setup_class(cls):
cls.server = socketserver.ThreadingTCPServer(
('', 0), cls.get_handler_class())
cls.thread = threading.Thread(target=cls.server.serve_forever)
cls.thread.daemon = True
cls.thread.start()
cls.address = f'http://{socket.gethostname()}:{cls.server.server_address[1]}'
@classmethod
def teardown_class(cls):
cls.server.shutdown()
cls.server.server_close()
cls.thread.join()
@classmethod
def get_handler_class(cls):
stdout_data = cls.stdout_data
stderr_data = cls.stderr_data
class Handler(http.server.BaseHTTPRequestHandler):
def do_POST(self):
resp_data = self.get_resp_data()
self.send_response(101)
self.send_header(
'Content-Type', 'application/vnd.docker.raw-stream')
self.send_header('Connection', 'Upgrade')
self.send_header('Upgrade', 'tcp')
self.end_headers()
self.wfile.flush()
time.sleep(0.2)
self.wfile.write(resp_data)
self.wfile.flush()
def get_resp_data(self):
path = self.path.split('/')[-1]
if path == 'tty':
return stdout_data + stderr_data
elif path == 'no-tty':
data = b''
data += self.frame_header(1, stdout_data)
data += stdout_data
data += self.frame_header(2, stderr_data)
data += stderr_data
return data
else:
raise Exception(f'Unknown path {path}')
@staticmethod
def frame_header(stream, data):
return struct.pack('>BxxxL', stream, len(data))
return Handler
def request(self, stream=None, tty=None, demux=None):
assert stream is not None and tty is not None and demux is not None
with APIClient(
base_url=self.address,
version=DEFAULT_DOCKER_API_VERSION
) as client:
if tty:
url = client._url('/tty')
else:
url = client._url('/no-tty')
resp = client._post(url, stream=True)
return client._read_from_socket(
resp, stream=stream, tty=tty, demux=demux)
def test_read_from_socket_tty(self):
res = self.request(stream=True, tty=True, demux=False)
assert next(res) == self.stdout_data + self.stderr_data
with self.assertRaises(StopIteration):
next(res)
def test_read_from_socket_tty_demux(self):
res = self.request(stream=True, tty=True, demux=True)
assert next(res) == (self.stdout_data + self.stderr_data, None)
with self.assertRaises(StopIteration):
next(res)
def test_read_from_socket_no_tty(self):
res = self.request(stream=True, tty=False, demux=False)
assert next(res) == self.stdout_data
assert next(res) == self.stderr_data
with self.assertRaises(StopIteration):
next(res)
def test_read_from_socket_no_tty_demux(self):
res = self.request(stream=True, tty=False, demux=True)
assert (self.stdout_data, None) == next(res)
assert (None, self.stderr_data) == next(res)
with self.assertRaises(StopIteration):
next(res)
def test_read_from_socket_no_stream_tty(self):
res = self.request(stream=False, tty=True, demux=False)
assert res == self.stdout_data + self.stderr_data
def test_read_from_socket_no_stream_tty_demux(self):
res = self.request(stream=False, tty=True, demux=True)
assert res == (self.stdout_data + self.stderr_data, None)
def test_read_from_socket_no_stream_no_tty(self):
res = self.request(stream=False, tty=False, demux=False)
assert res == self.stdout_data + self.stderr_data
def test_read_from_socket_no_stream_no_tty_demux(self):
res = self.request(stream=False, tty=False, demux=True)
assert res == (self.stdout_data, self.stderr_data)
| TCPSocketStreamTest |
python | sympy__sympy | sympy/physics/quantum/cartesian.py | {
"start": 2785,
"end": 3679
} | class ____(HermitianOperator):
"""1D cartesian momentum operator."""
@classmethod
def default_args(self):
return ("Px",)
@classmethod
def _eval_hilbert_space(self, args):
return L2(Interval(S.NegativeInfinity, S.Infinity))
def _apply_operator_PxKet(self, ket, **options):
return ket.momentum*ket
def _represent_XKet(self, basis, *, index=1, **options):
states = basis._enumerate_state(2, start_index=index)
coord1 = states[0].position
coord2 = states[1].position
d = DifferentialOperator(coord1)
delta = DiracDelta(coord1 - coord2)
return -I*hbar*(d*delta)
X = XOp('X')
Y = YOp('Y')
Z = ZOp('Z')
Px = PxOp('Px')
#-------------------------------------------------------------------------
# Position eigenstates
#-------------------------------------------------------------------------
| PxOp |
python | ansible__ansible | test/units/plugins/action/test_action.py | {
"start": 33334,
"end": 35682
} | class ____(unittest.TestCase):
def test_fail_no_json(self):
action_base = _action_base()
rc = 0
stdout = 'foo\nbar\n'
err = 'oopsy'
returned_data = {'rc': rc,
'stdout': stdout,
'stdout_lines': stdout.splitlines(),
'stderr': err}
res = action_base._parse_returned_data(returned_data, 'legacy')
self.assertFalse(res['_ansible_parsed'])
self.assertTrue(res['failed'])
self.assertEqual(res['module_stderr'], err)
def test_json_empty(self):
action_base = _action_base()
rc = 0
stdout = '{}\n'
err = ''
returned_data = {'rc': rc,
'stdout': stdout,
'stdout_lines': stdout.splitlines(),
'stderr': err}
res = action_base._parse_returned_data(returned_data, 'legacy')
del res['_ansible_parsed'] # we always have _ansible_parsed
self.assertEqual(len(res), 0)
self.assertFalse(res)
def test_json_facts(self):
action_base = _action_base()
rc = 0
stdout = '{"ansible_facts": {"foo": "bar", "ansible_blip": "blip_value"}}\n'
err = ''
returned_data = {'rc': rc,
'stdout': stdout,
'stdout_lines': stdout.splitlines(),
'stderr': err}
res = action_base._parse_returned_data(returned_data, 'legacy')
self.assertTrue(res['ansible_facts'])
self.assertIn('ansible_blip', res['ansible_facts'])
def test_json_facts_add_host(self):
action_base = _action_base()
rc = 0
stdout = """{"ansible_facts": {"foo": "bar", "ansible_blip": "blip_value"},
"add_host": {"host_vars": {"some_key": ["whatever the add_host object is"]}
}
}\n"""
err = ''
returned_data = {'rc': rc,
'stdout': stdout,
'stdout_lines': stdout.splitlines(),
'stderr': err}
res = action_base._parse_returned_data(returned_data, 'legacy')
self.assertTrue(res['ansible_facts'])
self.assertIn('ansible_blip', res['ansible_facts'])
self.assertIn('add_host', res)
| TestActionBaseParseReturnedData |
python | huggingface__transformers | src/transformers/models/doge/modeling_doge.py | {
"start": 24485,
"end": 32433
} | class ____(DogePreTrainedModel):
def __init__(self, config: DogeConfig):
super().__init__(config)
self.padding_idx = config.pad_token_id
self.vocab_size = config.vocab_size
self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
self.layers = nn.ModuleList(
[DogeDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
)
self.norm = DogeRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.rotary_emb = DogeRotaryEmbedding(config=config)
self.gradient_checkpointing = False
# Initialize weights and apply final processing
self.post_init()
@check_model_inputs()
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
use_cache: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> MoeModelOutputWithPast:
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
if use_cache and past_key_values is None:
past_key_values = DynamicCache(config=self.config)
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids)
if cache_position is None:
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
cache_position = torch.arange(
past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
)
if position_ids is None:
position_ids = cache_position.unsqueeze(0)
mask_function = create_causal_mask if self.config.sliding_window is None else create_sliding_window_causal_mask
causal_mask = mask_function(
config=self.config,
input_embeds=inputs_embeds,
attention_mask=attention_mask,
cache_position=cache_position,
past_key_values=past_key_values,
position_ids=position_ids,
)
hidden_states = inputs_embeds
position_embeddings = self.rotary_emb(hidden_states, position_ids=position_ids)
for decoder_layer in self.layers[: self.config.num_hidden_layers]:
hidden_states = decoder_layer(
hidden_states,
attention_mask=causal_mask,
position_ids=position_ids,
past_key_values=past_key_values,
use_cache=use_cache,
cache_position=cache_position,
position_embeddings=position_embeddings,
**kwargs,
)
hidden_states = self.norm(hidden_states)
return MoeModelOutputWithPast( # only diff with Mistral is the output type, we need MoE
last_hidden_state=hidden_states,
past_key_values=past_key_values,
)
def load_balancing_loss_func(
gate_logits: Union[torch.Tensor, tuple[torch.Tensor], None],
num_experts: Optional[int] = None,
num_keys: Optional[int] = None,
top_k: int = 2,
attention_mask: Optional[torch.Tensor] = None,
) -> Union[torch.Tensor, int]:
r"""
Computes auxiliary load balancing loss as in Switch Transformer - implemented in Pytorch.
See Switch Transformer (https://huggingface.co/papers/2101.03961) for more details. This function implements the loss
function presented in equations (4) - (6) of the paper. It aims at penalizing cases where the routing between
experts is too unbalanced.
Args:
gate_logits:
Logits from the `router_gate`, should be a tuple of model.config.num_hidden_layers tensors of
shape [2, batch_size * sequence_length, num_keys].
num_experts:
Number of experts
num_keys:
Number of keys
top_k:
The number of experts to route per-token, can be also interpreted as the `top-k` routing
parameter.
attention_mask (`torch.Tensor`, *optional*):
The attention_mask used in forward function
shape [batch_size X sequence_length] if not None.
Returns:
The auxiliary loss.
"""
if gate_logits is None or not isinstance(gate_logits, tuple):
return 0
compute_dtype = gate_logits[0].dtype
compute_device = gate_logits[0].device
all_expert_indices = []
all_routing_weights = []
for layer_gate_logits in gate_logits:
layer_gate_logits = layer_gate_logits.to(compute_device)
(scores_x, scores_y), (indices_x, indices_y) = layer_gate_logits.topk(num_keys, dim=-1)
all_scores = scores_x.unsqueeze(-1) + scores_y.unsqueeze(-2)
all_indices = indices_x.unsqueeze(-1) * num_keys + indices_y.unsqueeze(-2)
all_scores = all_scores.view(*all_scores.shape[:-2], -1)
all_indices = all_indices.view(*all_indices.shape[:-2], -1)
_, position_indices = all_scores.topk(top_k, dim=-1)
expert_indices = all_indices.gather(-1, position_indices)
routing_weights = F.softmax(all_scores, dim=-1)
all_expert_indices.append(expert_indices)
all_routing_weights.append(routing_weights)
all_expert_indices = torch.cat(all_expert_indices, dim=0)
all_routing_weights = torch.cat(all_routing_weights, dim=0)
if attention_mask is None:
# Compute the percentage of tokens routed to each experts
all_expert_indices = all_expert_indices.view(-1)
tokens_per_expert = torch.zeros(num_experts, dtype=compute_dtype, device=compute_device)
pad = torch.ones_like(all_expert_indices, dtype=compute_dtype, device=compute_device)
tokens_per_expert = tokens_per_expert.scatter_add_(0, all_expert_indices, pad) / all_expert_indices.shape[0]
# Compute the average probability of routing to these experts
router_prob_per_expert = torch.mean(all_routing_weights, dim=0)
else:
batch_size, sequence_length = attention_mask.shape
num_hidden_layers = len(gate_logits)
# Compute the mask that masks all padding tokens as 0 with the same shape of expert_mask
expert_attention_mask = (
attention_mask[None, :, :, None]
.expand((num_hidden_layers, batch_size, sequence_length, top_k))
.reshape(-1)
.to(compute_device)
)
all_expert_indices = all_expert_indices.view(-1)[expert_attention_mask.bool()]
# Compute the percentage of tokens routed to each experts
tokens_per_expert = torch.zeros(num_experts, dtype=compute_dtype, device=compute_device)
pad = torch.ones_like(all_expert_indices, dtype=compute_dtype, device=compute_device)
tokens_per_expert = tokens_per_expert.scatter_add_(0, all_expert_indices, pad) / torch.sum(
expert_attention_mask
)
# Compute the mask that masks all padding tokens as 0 with the same shape of tokens_per_expert
router_per_expert_attention_mask = (
attention_mask[None, :, :, None]
.expand((num_hidden_layers, batch_size, sequence_length, num_experts))
.reshape(-1, num_experts)
.to(compute_device)
)
# Compute the average probability of routing to these experts
router_prob_per_expert = torch.sum(all_routing_weights * router_per_expert_attention_mask, dim=0) / torch.sum(
router_per_expert_attention_mask, dim=0
)
overall_loss = torch.sum(tokens_per_expert * router_prob_per_expert)
return overall_loss * num_experts
@auto_docstring
| DogeModel |
python | coleifer__peewee | tests/models.py | {
"start": 155411,
"end": 155536
} | class ____(TestModel):
filename = CharField(primary_key=True)
data = TextField()
timestamp = TimestampField()
| CFile |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.