language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | apache__airflow | providers/google/tests/unit/google/cloud/utils/test_credentials_provider.py | {
"start": 4226,
"end": 5310
} | class ____:
@mock.patch.dict(os.environ, {CREDENTIALS: ENV_VALUE})
@mock.patch("tempfile.NamedTemporaryFile")
def test_provide_gcp_credentials_key_content(self, mock_file):
file_dict = {"foo": "bar"}
string_file = StringIO()
file_content = json.dumps(file_dict)
file_name = "/test/mock-file"
mock_file_handler = mock_file.return_value.__enter__.return_value
mock_file_handler.name = file_name
mock_file_handler.write = string_file.write
with provide_gcp_credentials(key_file_dict=file_dict):
assert os.environ[CREDENTIALS] == file_name
assert file_content == string_file.getvalue()
assert os.environ[CREDENTIALS] == ENV_VALUE
@mock.patch.dict(os.environ, {CREDENTIALS: ENV_VALUE})
def test_provide_gcp_credentials_keep_environment(self):
key_path = "/test/key-path"
with provide_gcp_credentials(key_file_path=key_path):
assert os.environ[CREDENTIALS] == key_path
assert os.environ[CREDENTIALS] == ENV_VALUE
| TestProvideGcpCredentials |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-facebook-marketing/source_facebook_marketing/streams/streams.py | {
"start": 13993,
"end": 14111
} | class ____(AdsInsights):
breakdowns = ["gender"]
action_breakdowns = ["action_type"]
| AdsInsightsDemographicsGender |
python | imageio__imageio | imageio/plugins/_tifffile.py | {
"start": 24257,
"end": 25001
} | class ____(object):
"""Attribute whose value is computed on first access."""
# TODO: help() doesn't work
__slots__ = ("func",)
def __init__(self, func):
self.func = func
# self.__name__ = func.__name__
# self.__doc__ = func.__doc__
# self.lock = threading.RLock()
def __get__(self, instance, owner):
# with self.lock:
if instance is None:
return self
try:
value = self.func(instance)
except AttributeError as e:
raise RuntimeError(e)
if value is NotImplemented:
return getattr(super(owner, instance), self.func.__name__)
setattr(instance, self.func.__name__, value)
return value
| lazyattr |
python | spack__spack | lib/spack/spack/cmd/commands.py | {
"start": 5182,
"end": 13193
} | class ____(ArgparseWriter):
"""Write argparse output as bash programmable tab completion."""
def format(self, cmd: Command) -> str:
"""Return the string representation of a single node in the parser tree.
Args:
cmd: Parsed information about a command or subcommand.
Returns:
String representation of this subcommand.
"""
assert cmd.optionals # we should always at least have -h, --help
assert not (cmd.positionals and cmd.subcommands) # one or the other
# We only care about the arguments/flags, not the help messages
positionals: Tuple[str, ...] = ()
if cmd.positionals:
positionals, _, _, _ = zip(*cmd.positionals)
optionals, _, _, _, _ = zip(*cmd.optionals)
subcommands: Tuple[str, ...] = ()
if cmd.subcommands:
_, subcommands, _ = zip(*cmd.subcommands)
# Flatten lists of lists
optionals = [x for xx in optionals for x in xx]
return (
self.start_function(cmd.prog)
+ self.body(positionals, optionals, subcommands)
+ self.end_function(cmd.prog)
)
def start_function(self, prog: str) -> str:
"""Return the syntax needed to begin a function definition.
Args:
prog: Program name.
Returns:
Function definition beginning.
"""
name = prog.replace("-", "_").replace(" ", "_")
return "\n_{0}() {{".format(name)
def end_function(self, prog: str) -> str:
"""Return the syntax needed to end a function definition.
Args:
prog: Program name
Returns:
Function definition ending.
"""
return "}\n"
def body(
self, positionals: Sequence[str], optionals: Sequence[str], subcommands: Sequence[str]
) -> str:
"""Return the body of the function.
Args:
positionals: List of positional arguments.
optionals: List of optional arguments.
subcommands: List of subcommand parsers.
Returns:
Function body.
"""
if positionals:
return f"""
if $list_options
then
{self.optionals(optionals)}
else
{self.positionals(positionals)}
fi
"""
elif subcommands:
return f"""
if $list_options
then
{self.optionals(optionals)}
else
{self.subcommands(subcommands)}
fi
"""
else:
return f"""
{self.optionals(optionals)}
"""
def positionals(self, positionals: Sequence[str]) -> str:
"""Return the syntax for reporting positional arguments.
Args:
positionals: List of positional arguments.
Returns:
Syntax for positional arguments.
"""
# If match found, return function name
for positional in positionals:
for key, value in _positional_to_subroutine.items():
if positional.startswith(key):
return value
# If no matches found, return empty list
return 'SPACK_COMPREPLY=""'
def optionals(self, optionals: Sequence[str]) -> str:
"""Return the syntax for reporting optional flags.
Args:
optionals: List of optional arguments.
Returns:
Syntax for optional flags.
"""
return f'SPACK_COMPREPLY="{" ".join(optionals)}"'
def subcommands(self, subcommands: Sequence[str]) -> str:
"""Return the syntax for reporting subcommands.
Args:
subcommands: List of subcommand parsers.
Returns:
Syntax for subcommand parsers
"""
return f'SPACK_COMPREPLY="{" ".join(subcommands)}"'
# Map argument destination names to their complete commands
# Earlier items in the list have higher precedence
_dest_to_fish_complete = {
("activate", "view"): "-f -a '(__fish_complete_directories)'",
("bootstrap root", "path"): "-f -a '(__fish_complete_directories)'",
("mirror add", "mirror"): "-f",
("repo add", "path"): "-f -a '(__fish_complete_directories)'",
("test find", "filter"): "-f -a '(__fish_spack_tests)'",
("bootstrap", "name"): "-f -a '(__fish_spack_bootstrap_names)'",
("buildcache create", "key"): "-f -a '(__fish_spack_gpg_keys)'",
("build-env", r"spec \[--\].*"): "-f -a '(__fish_spack_build_env_spec)'",
("checksum", "package"): "-f -a '(__fish_spack_packages)'",
(
"checksum",
"versions",
): "-f -a '(__fish_spack_package_versions $__fish_spack_argparse_argv[1])'",
("config", "path"): "-f -a '(__fish_spack_colon_path)'",
("config", "section"): "-f -a '(__fish_spack_config_sections)'",
("develop", "specs?"): "-f -k -a '(__fish_spack_specs_or_id)'",
("diff", "specs?"): "-f -a '(__fish_spack_installed_specs)'",
("gpg sign", "output"): "-f -a '(__fish_complete_directories)'",
("gpg", "keys?"): "-f -a '(__fish_spack_gpg_keys)'",
("graph", "specs?"): "-f -k -a '(__fish_spack_specs_or_id)'",
("help", "help_command"): "-f -a '(__fish_spack_commands)'",
("list", "filter"): "-f -a '(__fish_spack_packages)'",
("mirror", "mirror"): "-f -a '(__fish_spack_mirrors)'",
("pkg", "package"): "-f -a '(__fish_spack_pkg_packages)'",
("remove", "specs?"): "-f -a '(__fish_spack_installed_specs)'",
("repo", "namespace_or_path"): "$__fish_spack_force_files -a '(__fish_spack_repos)'",
("restage", "specs?"): "-f -k -a '(__fish_spack_specs_or_id)'",
("rm", "specs?"): "-f -a '(__fish_spack_installed_specs)'",
("solve", "specs?"): "-f -k -a '(__fish_spack_specs_or_id)'",
("spec", "specs?"): "-f -k -a '(__fish_spack_specs_or_id)'",
("stage", "specs?"): "-f -k -a '(__fish_spack_specs_or_id)'",
("test-env", r"spec \[--\].*"): "-f -a '(__fish_spack_build_env_spec)'",
("test", r"\[?name.*"): "-f -a '(__fish_spack_tests)'",
("undevelop", "specs?"): "-f -k -a '(__fish_spack_specs_or_id)'",
("verify", "specs_or_files"): "$__fish_spack_force_files -a '(__fish_spack_installed_specs)'",
("view", "path"): "-f -a '(__fish_complete_directories)'",
("", "comment"): "-f",
("", "compiler_spec"): "-f -a '(__fish_spack_installed_compilers)'",
("", "config_scopes"): "-f -a '(__fish_complete_directories)'",
("", "extendable"): "-f -a '(__fish_spack_extensions)'",
("", "installed_specs?"): "-f -a '(__fish_spack_installed_specs)'",
("", "job_url"): "-f",
("", "location_env"): "-f -a '(__fish_complete_directories)'",
("", "pytest_args"): "-f -a '(__fish_spack_unit_tests)'",
("", "package_or_file"): "$__fish_spack_force_files -a '(__fish_spack_packages)'",
("", "package_or_user"): "-f -a '(__fish_spack_packages)'",
("", "package"): "-f -a '(__fish_spack_packages)'",
("", "PKG"): "-f -a '(__fish_spack_packages)'",
("", "prefix"): "-f -a '(__fish_complete_directories)'",
("", r"rev\d?"): "-f -a '(__fish_spack_git_rev)'",
("", "specs?"): "-f -k -a '(__fish_spack_specs)'",
("", "tags?"): "-f -a '(__fish_spack_tags)'",
("", "virtual_package"): "-f -a '(__fish_spack_providers)'",
("", "working_dir"): "-f -a '(__fish_complete_directories)'",
("", r"(\w*_)?env"): "-f -a '(__fish_spack_environments)'",
("", r"(\w*_)?dir(ectory)?"): "-f -a '(__fish_spack_environments)'",
("", r"(\w*_)?mirror_name"): "-f -a '(__fish_spack_mirrors)'",
}
def _fish_dest_get_complete(prog: str, dest: str) -> Optional[str]:
"""Map from subcommand to autocompletion argument.
Args:
prog: Program name.
dest: Destination.
Returns:
Autocompletion argument.
"""
s = prog.split(None, 1)
subcmd = s[1] if len(s) == 2 else ""
for (prog_key, pos_key), value in _dest_to_fish_complete.items():
if subcmd.startswith(prog_key) and re.match(f"^{pos_key}$", dest):
return value
return None
| BashCompletionWriter |
python | huggingface__transformers | src/transformers/models/aimv2/modular_aimv2.py | {
"start": 6137,
"end": 10651
} | class ____(SiglipTextConfig):
r"""
This is the configuration class to store the configuration of a [`Aimv2TextModel`]. It is used to instantiate a
AIMv2 text encoder according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the text encoder of the AIMv2
[apple/aimv2-large-patch14-224-lit](https://huggingface.co/apple/aimv2-large-patch14-224-lit) architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 49408):
Vocabulary size of the AIMv2 text model. Defines the number of different tokens that can be represented by
the `inputs_ids` passed when calling [`Aimv2Model`].
hidden_size (`int`, *optional*, defaults to 768):
Dimensionality of the encoder layers and the pooler layer.
intermediate_size (`int`, *optional*, defaults to 2048):
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 6):
Number of attention heads for each attention layer in the Transformer encoder.
rms_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the rms normalization layers.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
qkv_bias (`bool`, *optional*, defaults to `False`):
Whether to add a bias to the queries, keys and values.
mlp_bias (`bool`, *optional*, defaults to `False`):
Whether to add a bias to the Linear layers or Not.
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` `"quick_gelu"` are supported.
pad_token_id (`int`, *optional*, defaults to 1):
The id of the padding token in the vocabulary.
bos_token_id (`int`, *optional*, defaults to 49406):
The id of the beginning-of-sequence token in the vocabulary.
eos_token_id (`int`, *optional*, defaults to 49407):
The id of the end-of-sequence token in the vocabulary.
max_position_embeddings (`int`, *optional*, defaults to 77):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the for initializing all weight matrices.
"""
def __init__(
self,
vocab_size: int = 49408,
hidden_size: int = 768,
intermediate_size: int = 2048,
num_hidden_layers: int = 12,
num_attention_heads: int = 6,
rms_norm_eps: float = 1e-5,
attention_dropout: float = 0.0,
qkv_bias: bool = False,
mlp_bias: bool = False,
hidden_act: str = "silu",
pad_token_id: Optional[int] = None,
bos_token_id: Optional[int] = None,
eos_token_id: int = 49407,
max_position_embeddings: int = 77,
initializer_range: bool = 0.02,
**kwargs,
):
super().__init__(
vocab_size=vocab_size,
hidden_size=hidden_size,
intermediate_size=intermediate_size,
num_hidden_layers=num_hidden_layers,
num_attention_heads=num_attention_heads,
hidden_act=hidden_act,
max_position_embeddings=max_position_embeddings,
pad_token_id=pad_token_id,
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
**kwargs,
)
self.initializer_range = initializer_range
self.attention_dropout = attention_dropout
self.mlp_bias = mlp_bias
self.qkv_bias = qkv_bias
self.rms_norm_eps = rms_norm_eps
del self.bos_token_id
del self.pad_token_id
del self.projection_size
del self.layer_norm_eps
| Aimv2TextConfig |
python | altair-viz__altair | altair/vegalite/v6/schema/core.py | {
"start": 1155235,
"end": 1161908
} | class ____(VegaLiteSchema):
"""
SchemeParams schema wrapper.
Parameters
----------
name : :class:`Cyclical`, :class:`Diverging`, :class:`Categorical`, :class:`ColorScheme`, :class:`SequentialMultiHue`, :class:`SequentialSingleHue`, Literal['accent', 'category10', 'category20', 'category20b', 'category20c', 'dark2', 'paired', 'pastel1', 'pastel2', 'set1', 'set2', 'set3', 'tableau10', 'tableau20', 'observable10', 'blueorange', 'blueorange-3', 'blueorange-4', 'blueorange-5', 'blueorange-6', 'blueorange-7', 'blueorange-8', 'blueorange-9', 'blueorange-10', 'blueorange-11', 'brownbluegreen', 'brownbluegreen-3', 'brownbluegreen-4', 'brownbluegreen-5', 'brownbluegreen-6', 'brownbluegreen-7', 'brownbluegreen-8', 'brownbluegreen-9', 'brownbluegreen-10', 'brownbluegreen-11', 'purplegreen', 'purplegreen-3', 'purplegreen-4', 'purplegreen-5', 'purplegreen-6', 'purplegreen-7', 'purplegreen-8', 'purplegreen-9', 'purplegreen-10', 'purplegreen-11', 'pinkyellowgreen', 'pinkyellowgreen-3', 'pinkyellowgreen-4', 'pinkyellowgreen-5', 'pinkyellowgreen-6', 'pinkyellowgreen-7', 'pinkyellowgreen-8', 'pinkyellowgreen-9', 'pinkyellowgreen-10', 'pinkyellowgreen-11', 'purpleorange', 'purpleorange-3', 'purpleorange-4', 'purpleorange-5', 'purpleorange-6', 'purpleorange-7', 'purpleorange-8', 'purpleorange-9', 'purpleorange-10', 'purpleorange-11', 'redblue', 'redblue-3', 'redblue-4', 'redblue-5', 'redblue-6', 'redblue-7', 'redblue-8', 'redblue-9', 'redblue-10', 'redblue-11', 'redgrey', 'redgrey-3', 'redgrey-4', 'redgrey-5', 'redgrey-6', 'redgrey-7', 'redgrey-8', 'redgrey-9', 'redgrey-10', 'redgrey-11', 'redyellowblue', 'redyellowblue-3', 'redyellowblue-4', 'redyellowblue-5', 'redyellowblue-6', 'redyellowblue-7', 'redyellowblue-8', 'redyellowblue-9', 'redyellowblue-10', 'redyellowblue-11', 'redyellowgreen', 'redyellowgreen-3', 'redyellowgreen-4', 'redyellowgreen-5', 'redyellowgreen-6', 'redyellowgreen-7', 'redyellowgreen-8', 'redyellowgreen-9', 'redyellowgreen-10', 'redyellowgreen-11', 'spectral', 'spectral-3', 'spectral-4', 'spectral-5', 'spectral-6', 'spectral-7', 'spectral-8', 'spectral-9', 'spectral-10', 'spectral-11', 'blues', 'tealblues', 'teals', 'greens', 'browns', 'greys', 'purples', 'warmgreys', 'reds', 'oranges', 'rainbow', 'sinebow', 'turbo', 'viridis', 'inferno', 'magma', 'plasma', 'cividis', 'bluegreen', 'bluegreen-3', 'bluegreen-4', 'bluegreen-5', 'bluegreen-6', 'bluegreen-7', 'bluegreen-8', 'bluegreen-9', 'bluepurple', 'bluepurple-3', 'bluepurple-4', 'bluepurple-5', 'bluepurple-6', 'bluepurple-7', 'bluepurple-8', 'bluepurple-9', 'goldgreen', 'goldgreen-3', 'goldgreen-4', 'goldgreen-5', 'goldgreen-6', 'goldgreen-7', 'goldgreen-8', 'goldgreen-9', 'goldorange', 'goldorange-3', 'goldorange-4', 'goldorange-5', 'goldorange-6', 'goldorange-7', 'goldorange-8', 'goldorange-9', 'goldred', 'goldred-3', 'goldred-4', 'goldred-5', 'goldred-6', 'goldred-7', 'goldred-8', 'goldred-9', 'greenblue', 'greenblue-3', 'greenblue-4', 'greenblue-5', 'greenblue-6', 'greenblue-7', 'greenblue-8', 'greenblue-9', 'orangered', 'orangered-3', 'orangered-4', 'orangered-5', 'orangered-6', 'orangered-7', 'orangered-8', 'orangered-9', 'purplebluegreen', 'purplebluegreen-3', 'purplebluegreen-4', 'purplebluegreen-5', 'purplebluegreen-6', 'purplebluegreen-7', 'purplebluegreen-8', 'purplebluegreen-9', 'purpleblue', 'purpleblue-3', 'purpleblue-4', 'purpleblue-5', 'purpleblue-6', 'purpleblue-7', 'purpleblue-8', 'purpleblue-9', 'purplered', 'purplered-3', 'purplered-4', 'purplered-5', 'purplered-6', 'purplered-7', 'purplered-8', 'purplered-9', 'redpurple', 'redpurple-3', 'redpurple-4', 'redpurple-5', 'redpurple-6', 'redpurple-7', 'redpurple-8', 'redpurple-9', 'yellowgreenblue', 'yellowgreenblue-3', 'yellowgreenblue-4', 'yellowgreenblue-5', 'yellowgreenblue-6', 'yellowgreenblue-7', 'yellowgreenblue-8', 'yellowgreenblue-9', 'yellowgreen', 'yellowgreen-3', 'yellowgreen-4', 'yellowgreen-5', 'yellowgreen-6', 'yellowgreen-7', 'yellowgreen-8', 'yellowgreen-9', 'yelloworangebrown', 'yelloworangebrown-3', 'yelloworangebrown-4', 'yelloworangebrown-5', 'yelloworangebrown-6', 'yelloworangebrown-7', 'yelloworangebrown-8', 'yelloworangebrown-9', 'yelloworangered', 'yelloworangered-3', 'yelloworangered-4', 'yelloworangered-5', 'yelloworangered-6', 'yelloworangered-7', 'yelloworangered-8', 'yelloworangered-9', 'darkblue', 'darkblue-3', 'darkblue-4', 'darkblue-5', 'darkblue-6', 'darkblue-7', 'darkblue-8', 'darkblue-9', 'darkgold', 'darkgold-3', 'darkgold-4', 'darkgold-5', 'darkgold-6', 'darkgold-7', 'darkgold-8', 'darkgold-9', 'darkgreen', 'darkgreen-3', 'darkgreen-4', 'darkgreen-5', 'darkgreen-6', 'darkgreen-7', 'darkgreen-8', 'darkgreen-9', 'darkmulti', 'darkmulti-3', 'darkmulti-4', 'darkmulti-5', 'darkmulti-6', 'darkmulti-7', 'darkmulti-8', 'darkmulti-9', 'darkred', 'darkred-3', 'darkred-4', 'darkred-5', 'darkred-6', 'darkred-7', 'darkred-8', 'darkred-9', 'lightgreyred', 'lightgreyred-3', 'lightgreyred-4', 'lightgreyred-5', 'lightgreyred-6', 'lightgreyred-7', 'lightgreyred-8', 'lightgreyred-9', 'lightgreyteal', 'lightgreyteal-3', 'lightgreyteal-4', 'lightgreyteal-5', 'lightgreyteal-6', 'lightgreyteal-7', 'lightgreyteal-8', 'lightgreyteal-9', 'lightmulti', 'lightmulti-3', 'lightmulti-4', 'lightmulti-5', 'lightmulti-6', 'lightmulti-7', 'lightmulti-8', 'lightmulti-9', 'lightorange', 'lightorange-3', 'lightorange-4', 'lightorange-5', 'lightorange-6', 'lightorange-7', 'lightorange-8', 'lightorange-9', 'lighttealblue', 'lighttealblue-3', 'lighttealblue-4', 'lighttealblue-5', 'lighttealblue-6', 'lighttealblue-7', 'lighttealblue-8', 'lighttealblue-9']
A color scheme name for ordinal scales (e.g., ``"category10"`` or ``"blues"``).
For the full list of supported schemes, please refer to the `Vega Scheme
<https://vega.github.io/vega/docs/schemes/#reference>`__ reference.
count : float
The number of colors to use in the scheme. This can be useful for scale types such
as ``"quantize"``, which use the length of the scale range to determine the number
of discrete bins for the scale domain.
extent : Sequence[float]
The extent of the color range to use. For example ``[0.2, 1]`` will rescale the
color scheme such that color values in the range *[0, 0.2)* are excluded from the
scheme.
"""
_schema = {"$ref": "#/definitions/SchemeParams"}
def __init__(
self,
name: Optional[SchemaBase | ColorScheme_T] = Undefined,
count: Optional[float] = Undefined,
extent: Optional[Sequence[float]] = Undefined,
**kwds,
):
super().__init__(name=name, count=count, extent=extent, **kwds)
| SchemeParams |
python | pytorch__pytorch | torch/testing/_internal/common_methods_invocations.py | {
"start": 424396,
"end": 1217754
} | class ____(foreach_inputs_sample_func):
def __init__(
self,
arity: int = 3,
rightmost_supports_scalar: bool = False,
rightmost_supports_scalarlist: bool = False,
):
super().__init__(arity, rightmost_supports_scalar, rightmost_supports_scalarlist)
def _should_disable_fastpath(self, opinfo, rightmost_arg, rightmost_arg_type, dtype):
return dtype in integral_types_and(torch.bool) and opinfo.ref == torch.addcmul
def sample_zero_size_tensor_inputs(self, opinfo, device, dtype, requires_grad, **kwargs):
assert "num_input_tensors" not in kwargs
_foreach_inputs_kwargs = {k: kwargs.pop(k, v) for k, v in _foreach_inputs_default_kwargs.items()}
_foreach_inputs_kwargs["requires_grad"] = requires_grad
# zero_size tensor
input = sample_inputs_foreach(None, device, dtype, NUM_SIZE0_TENSORS, zero_size=True, **_foreach_inputs_kwargs)
args = [
sample_inputs_foreach(None, device, dtype, NUM_SIZE0_TENSORS, zero_size=True, **_foreach_inputs_kwargs)
for _ in range(2)
]
kwargs.pop("scalars", None)
kwargs.update(self._sample_kwargs(opinfo, args[-1], ForeachRightmostArgType.TensorList, dtype))
yield ForeachSampleInput(input, *args, **kwargs)
def __call__(self, opinfo, device, dtype, requires_grad, **kwargs):
num_input_tensors_specified = "num_input_tensors" in kwargs
num_input_tensors = kwargs.pop("num_input_tensors") if num_input_tensors_specified else foreach_num_tensors
assert isinstance(num_input_tensors, list)
_foreach_inputs_kwargs = {k: kwargs.pop(k, v) for k, v in _foreach_inputs_default_kwargs.items()}
_foreach_inputs_kwargs["requires_grad"] = requires_grad
allow_higher_dtype_scalars = kwargs.pop("allow_higher_dtype_scalars", False)
for num_tensors, rightmost_arg_type, intersperse_empty_tensors in itertools.product(
num_input_tensors, self._rightmost_arg_types, (True, False)):
_foreach_inputs_kwargs["intersperse_empty_tensors"] = intersperse_empty_tensors
input = sample_inputs_foreach(None, device, dtype, num_tensors, zero_size=False, **_foreach_inputs_kwargs)
args = [
sample_inputs_foreach(None, device, dtype, num_tensors, zero_size=False, **_foreach_inputs_kwargs)
for _ in range(2 - int(rightmost_arg_type == ForeachRightmostArgType.TensorList))
]
rightmost_arg_list = self._sample_rightmost_arg(
opinfo,
rightmost_arg_type,
device,
dtype,
num_tensors,
zero_size=False,
allow_higher_dtype_scalars=False if intersperse_empty_tensors else allow_higher_dtype_scalars,
**_foreach_inputs_kwargs,
)
for rightmost_arg in rightmost_arg_list:
kwargs = {}
if rightmost_arg_type == ForeachRightmostArgType.TensorList:
args.append(rightmost_arg)
elif rightmost_arg_type in [ForeachRightmostArgType.Tensor, ForeachRightmostArgType.ScalarList]:
kwargs["scalars"] = rightmost_arg
else:
kwargs["value"] = rightmost_arg
kwargs.update(self._sample_kwargs(opinfo, rightmost_arg, rightmost_arg_type, dtype))
assert len(args) == 2, f"{len(args)=}"
sample = ForeachSampleInput(input, *args, **kwargs)
yield sample
if rightmost_arg_type == ForeachRightmostArgType.TensorList:
args.pop()
foreach_unary_op_db: list[OpInfo] = [
ForeachFuncInfo(
'exp',
sample_inputs_func=foreach_inputs_sample_func(1, False, False),
dtypesIfHpu=custom_types(torch.float32),
backward_requires_result=True,
supports_autograd=True,
supports_inplace_autograd=True,
supports_forward_ad=True,
supports_sparse=True,
decorators=(
DecorateInfo(
unittest.expectedFailure,
"TestMeta",
"test_dispatch_meta_inplace",
dtypes=integral_types_and(torch.bool,),
),
DecorateInfo(
unittest.expectedFailure,
"TestMeta",
"test_dispatch_symbolic_meta_inplace",
dtypes=integral_types_and(torch.bool,),
),
DecorateInfo(
unittest.expectedFailure,
"TestMeta",
"test_meta_inplace",
dtypes=integral_types_and(torch.bool,),
),
),
),
ForeachFuncInfo(
'acos',
sample_inputs_func=foreach_inputs_sample_func(1, False, False),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16),
supports_autograd=True,
supports_inplace_autograd=True,
supports_forward_ad=True,
decorators=(
DecorateInfo(
unittest.expectedFailure,
"TestMeta",
"test_dispatch_meta_inplace",
dtypes=integral_types_and(torch.bool,),
),
DecorateInfo(
unittest.expectedFailure,
"TestMeta",
"test_dispatch_symbolic_meta_inplace",
dtypes=integral_types_and(torch.bool,),
),
DecorateInfo(
unittest.expectedFailure,
"TestMeta",
"test_meta_inplace",
dtypes=integral_types_and(torch.bool,),
),
),
),
ForeachFuncInfo(
'asin',
sample_inputs_func=foreach_inputs_sample_func(1, False, False),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16),
supports_autograd=True,
supports_inplace_autograd=True,
supports_forward_ad=True,
decorators=(
DecorateInfo(
unittest.expectedFailure,
"TestMeta",
"test_dispatch_meta_inplace",
dtypes=integral_types_and(torch.bool,),
),
DecorateInfo(
unittest.expectedFailure,
"TestMeta",
"test_dispatch_symbolic_meta_inplace",
dtypes=integral_types_and(torch.bool,),
),
DecorateInfo(
unittest.expectedFailure,
"TestMeta",
"test_meta_inplace",
dtypes=integral_types_and(torch.bool,),
),
),
),
ForeachFuncInfo(
'atan',
sample_inputs_func=foreach_inputs_sample_func(1, False, False),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16),
supports_autograd=True,
supports_inplace_autograd=True,
supports_forward_ad=True,
decorators=(
DecorateInfo(
unittest.expectedFailure,
"TestMeta",
"test_dispatch_meta_inplace",
dtypes=integral_types_and(torch.bool,),
),
DecorateInfo(
unittest.expectedFailure,
"TestMeta",
"test_dispatch_symbolic_meta_inplace",
dtypes=integral_types_and(torch.bool,),
),
DecorateInfo(
unittest.expectedFailure,
"TestMeta",
"test_meta_inplace",
dtypes=integral_types_and(torch.bool,),
),
),
),
ForeachFuncInfo(
'cos',
sample_inputs_func=foreach_inputs_sample_func(1, False, False),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16),
supports_autograd=True,
supports_inplace_autograd=True,
supports_forward_ad=True,
decorators=(
DecorateInfo(
unittest.expectedFailure,
"TestMeta",
"test_dispatch_meta_inplace",
dtypes=integral_types_and(torch.bool,),
),
DecorateInfo(
unittest.expectedFailure,
"TestMeta",
"test_dispatch_symbolic_meta_inplace",
dtypes=integral_types_and(torch.bool,),
),
DecorateInfo(
unittest.expectedFailure,
"TestMeta",
"test_meta_inplace",
dtypes=integral_types_and(torch.bool,),
),
),
),
ForeachFuncInfo(
'cosh',
sample_inputs_func=foreach_inputs_sample_func(1, False, False),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16),
supports_autograd=True,
supports_inplace_autograd=True,
supports_forward_ad=True,
decorators=(
DecorateInfo(
unittest.expectedFailure,
"TestMeta",
"test_dispatch_meta_inplace",
dtypes=integral_types_and(torch.bool,),
),
DecorateInfo(
unittest.expectedFailure,
"TestMeta",
"test_dispatch_symbolic_meta_inplace",
dtypes=integral_types_and(torch.bool,),
),
DecorateInfo(
unittest.expectedFailure,
"TestMeta",
"test_meta_inplace",
dtypes=integral_types_and(torch.bool,),
),
),
),
ForeachFuncInfo(
'log',
sample_inputs_func=foreach_inputs_sample_func(1, False, False),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16),
supports_autograd=True,
supports_inplace_autograd=True,
supports_forward_ad=True,
decorators=(
DecorateInfo(
unittest.expectedFailure,
"TestMeta",
"test_dispatch_meta_inplace",
dtypes=integral_types_and(torch.bool,),
),
DecorateInfo(
unittest.expectedFailure,
"TestMeta",
"test_dispatch_symbolic_meta_inplace",
dtypes=integral_types_and(torch.bool,),
),
DecorateInfo(
unittest.expectedFailure,
"TestMeta",
"test_meta_inplace",
dtypes=integral_types_and(torch.bool,),
),
),
),
ForeachFuncInfo(
'log10',
sample_inputs_func=foreach_inputs_sample_func(1, False, False),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16),
supports_autograd=True,
supports_inplace_autograd=True,
supports_forward_ad=True,
decorators=(
DecorateInfo(
unittest.expectedFailure,
"TestMeta",
"test_dispatch_meta_inplace",
dtypes=integral_types_and(torch.bool,),
),
DecorateInfo(
unittest.expectedFailure,
"TestMeta",
"test_dispatch_symbolic_meta_inplace",
dtypes=integral_types_and(torch.bool,),
),
DecorateInfo(
unittest.expectedFailure,
"TestMeta",
"test_meta_inplace",
dtypes=integral_types_and(torch.bool,),
),
),
),
ForeachFuncInfo(
'log2',
sample_inputs_func=foreach_inputs_sample_func(1, False, False),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16),
supports_autograd=True,
supports_inplace_autograd=True,
supports_forward_ad=True,
decorators=(
DecorateInfo(
unittest.expectedFailure,
"TestMeta",
"test_dispatch_meta_inplace",
dtypes=integral_types_and(torch.bool,),
),
DecorateInfo(
unittest.expectedFailure,
"TestMeta",
"test_dispatch_symbolic_meta_inplace",
dtypes=integral_types_and(torch.bool,),
),
DecorateInfo(
unittest.expectedFailure,
"TestMeta",
"test_meta_inplace",
dtypes=integral_types_and(torch.bool,),
),
),
),
ForeachFuncInfo(
'tan',
sample_inputs_func=foreach_inputs_sample_func(1, False, False),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16),
backward_requires_result=True,
supports_autograd=True,
supports_inplace_autograd=True,
supports_forward_ad=True,
decorators=(
# due to https://github.com/pytorch/pytorch/pull/102427 enabling jiterator for complex
DecorateInfo(
unittest.expectedFailure,
"TestMeta",
"test_dispatch_meta_inplace",
dtypes=integral_types_and(torch.bool,),
),
DecorateInfo(
unittest.expectedFailure,
"TestMeta",
"test_dispatch_symbolic_meta_inplace",
dtypes=integral_types_and(torch.bool,),
),
DecorateInfo(
unittest.expectedFailure,
"TestMeta",
"test_meta_inplace",
dtypes=integral_types_and(torch.bool,),
),
DecorateInfo(
toleranceOverride(
{
torch.complex64: tol(atol=3e-04, rtol=2e-05)
}
),
'TestForeach',
'test_parity',
device_type='cuda'
),
),
),
ForeachFuncInfo(
'tanh',
sample_inputs_func=foreach_inputs_sample_func(1, False, False),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16),
backward_requires_result=True,
supports_autograd=True,
supports_inplace_autograd=True,
supports_forward_ad=True,
decorators=(
DecorateInfo(
unittest.expectedFailure,
"TestMeta",
"test_dispatch_meta_inplace",
dtypes=integral_types_and(torch.bool,),
),
DecorateInfo(
unittest.expectedFailure,
"TestMeta",
"test_dispatch_symbolic_meta_inplace",
dtypes=integral_types_and(torch.bool,),
),
DecorateInfo(
unittest.expectedFailure,
"TestMeta",
"test_meta_inplace",
dtypes=integral_types_and(torch.bool,),
),
DecorateInfo(
toleranceOverride(
{torch.complex64: tol(atol=5e-03, rtol=1e-04)}
),
'TestForeach',
'test_parity',
device_type='cuda'
),
),
),
ForeachFuncInfo(
'sin',
sample_inputs_func=foreach_inputs_sample_func(1, False, False),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16),
supports_autograd=True,
supports_inplace_autograd=True,
supports_forward_ad=True,
decorators=(
DecorateInfo(
unittest.expectedFailure,
"TestMeta",
"test_dispatch_meta_inplace",
dtypes=integral_types_and(torch.bool,),
),
DecorateInfo(
unittest.expectedFailure,
"TestMeta",
"test_dispatch_symbolic_meta_inplace",
dtypes=integral_types_and(torch.bool,),
),
DecorateInfo(
unittest.expectedFailure,
"TestMeta",
"test_meta_inplace",
dtypes=integral_types_and(torch.bool,),
),
),
),
ForeachFuncInfo(
'sinh',
sample_inputs_func=foreach_inputs_sample_func(1, False, False),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16),
supports_autograd=True,
supports_inplace_autograd=True,
supports_forward_ad=True,
decorators=(
DecorateInfo(
unittest.expectedFailure,
"TestMeta",
"test_dispatch_meta_inplace",
dtypes=integral_types_and(torch.bool),
),
DecorateInfo(
unittest.expectedFailure,
"TestMeta",
"test_dispatch_symbolic_meta_inplace",
dtypes=integral_types_and(torch.bool),
),
DecorateInfo(
unittest.expectedFailure,
"TestMeta",
"test_meta_inplace",
dtypes=integral_types_and(torch.bool),
),
),
),
ForeachFuncInfo(
'neg',
sample_inputs_func=foreach_inputs_sample_func(1, False, False),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16),
supports_autograd=True,
supports_inplace_autograd=True,
supports_forward_ad=True,
decorators=(
DecorateInfo(
unittest.expectedFailure,
"TestMeta",
"test_dispatch_meta_inplace",
dtypes=(torch.bool,),
),
DecorateInfo(
unittest.expectedFailure,
"TestMeta",
"test_dispatch_meta_outplace",
dtypes=(torch.bool,),
),
DecorateInfo(
unittest.expectedFailure,
"TestMeta",
"test_dispatch_symbolic_meta_inplace",
dtypes=(torch.bool,),
),
DecorateInfo(
unittest.expectedFailure,
"TestMeta",
"test_dispatch_symbolic_meta_outplace",
dtypes=(torch.bool,),
),
DecorateInfo(
unittest.expectedFailure,
"TestMeta",
"test_meta_inplace",
dtypes=(torch.bool,),
),
DecorateInfo(
unittest.expectedFailure,
"TestMeta",
"test_meta_outplace",
dtypes=(torch.bool,),
),
DecorateInfo(
unittest.expectedFailure,
"TestForeach",
"test_unary_op_tensors_on_different_devices",
device_type="cuda",
dtypes=(torch.bool,),
),
),
),
ForeachFuncInfo(
'sqrt',
sample_inputs_func=foreach_inputs_sample_func(1, False, False),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16),
supports_autograd=True,
supports_inplace_autograd=True,
supports_forward_ad=True,
backward_requires_result=True,
decorators=(
DecorateInfo(
unittest.expectedFailure,
"TestMeta",
"test_dispatch_meta_inplace",
dtypes=integral_types_and(torch.bool),
),
DecorateInfo(
unittest.expectedFailure,
"TestMeta",
"test_dispatch_symbolic_meta_inplace",
dtypes=integral_types_and(torch.bool),
),
DecorateInfo(
unittest.expectedFailure,
"TestMeta",
"test_meta_inplace",
dtypes=integral_types_and(torch.bool),
),
),
),
ForeachFuncInfo(
'rsqrt',
sample_inputs_func=foreach_inputs_sample_func(1, False, False),
supports_autograd=True,
supports_inplace_autograd=True,
supports_forward_ad=True,
backward_requires_result=True,
decorators=(
DecorateInfo(
unittest.expectedFailure,
"TestMeta",
"test_dispatch_meta_inplace",
dtypes=integral_types_and(torch.bool),
),
DecorateInfo(
unittest.expectedFailure,
"TestMeta",
"test_dispatch_symbolic_meta_inplace",
dtypes=integral_types_and(torch.bool),
),
DecorateInfo(
unittest.expectedFailure,
"TestMeta",
"test_meta_inplace",
dtypes=integral_types_and(torch.bool),
),
),
),
ForeachFuncInfo(
'ceil',
sample_inputs_func=foreach_inputs_sample_func(1, False, False),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16, torch.int32, torch.int8),
supports_autograd=True,
supports_inplace_autograd=True,
supports_forward_ad=True,
decorators=(
DecorateInfo(
unittest.expectedFailure,
"TestMeta",
"test_dispatch_meta_inplace",
dtypes=complex_types_and(torch.bool),
),
DecorateInfo(
unittest.expectedFailure,
"TestMeta",
"test_dispatch_meta_outplace",
dtypes=complex_types_and(torch.bool),
),
DecorateInfo(
unittest.expectedFailure,
"TestMeta",
"test_dispatch_symbolic_meta_inplace",
dtypes=complex_types_and(torch.bool),
),
DecorateInfo(
unittest.expectedFailure,
"TestMeta",
"test_dispatch_symbolic_meta_outplace",
dtypes=complex_types_and(torch.bool),
),
DecorateInfo(
unittest.expectedFailure,
"TestMeta",
"test_meta_inplace",
dtypes=complex_types_and(torch.bool),
),
DecorateInfo(
unittest.expectedFailure,
"TestMeta",
"test_meta_outplace",
dtypes=complex_types_and(torch.bool),
),
DecorateInfo(
unittest.expectedFailure,
"TestForeach",
"test_autodiff",
device_type="cuda",
dtypes=(torch.complex128,),
),
),
),
ForeachFuncInfo(
'erf',
sample_inputs_func=foreach_inputs_sample_func(1, False, False),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16),
supports_autograd=True,
supports_inplace_autograd=True,
supports_forward_ad=True,
decorators=(
DecorateInfo(
unittest.expectedFailure,
"TestMeta",
"test_dispatch_meta_inplace",
dtypes=integral_types_and(torch.bool) + complex_types(),
),
DecorateInfo(
unittest.expectedFailure,
"TestMeta",
"test_dispatch_meta_outplace",
dtypes=complex_types(),
),
DecorateInfo(
unittest.expectedFailure,
"TestMeta",
"test_dispatch_symbolic_meta_inplace",
dtypes=integral_types_and(torch.bool) + complex_types(),
),
DecorateInfo(
unittest.expectedFailure,
"TestMeta",
"test_dispatch_symbolic_meta_outplace",
dtypes=complex_types(),
),
DecorateInfo(
unittest.expectedFailure,
"TestMeta",
"test_meta_inplace",
dtypes=integral_types_and(torch.bool) + complex_types(),
),
DecorateInfo(
unittest.expectedFailure,
"TestMeta",
"test_meta_outplace",
dtypes=complex_types(),
),
DecorateInfo(
unittest.expectedFailure,
"TestForeach",
"test_autodiff",
device_type="cuda",
dtypes=(torch.complex128,),
),
),
),
ForeachFuncInfo(
'erfc',
sample_inputs_func=foreach_inputs_sample_func(1, False, False),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16),
supports_autograd=True,
supports_inplace_autograd=True,
supports_forward_ad=True,
decorators=(
DecorateInfo(
unittest.expectedFailure,
"TestMeta",
"test_dispatch_meta_inplace",
dtypes=integral_types_and(torch.bool) + complex_types(),
),
DecorateInfo(
unittest.expectedFailure,
"TestMeta",
"test_dispatch_meta_outplace",
dtypes=complex_types(),
),
DecorateInfo(
unittest.expectedFailure,
"TestMeta",
"test_dispatch_symbolic_meta_inplace",
dtypes=integral_types_and(torch.bool) + complex_types(),
),
DecorateInfo(
unittest.expectedFailure,
"TestMeta",
"test_dispatch_symbolic_meta_outplace",
dtypes=complex_types(),
),
DecorateInfo(
unittest.expectedFailure,
"TestMeta",
"test_meta_inplace",
dtypes=integral_types_and(torch.bool) + complex_types(),
),
DecorateInfo(
unittest.expectedFailure,
"TestMeta",
"test_meta_outplace",
dtypes=complex_types(),
),
DecorateInfo(
unittest.expectedFailure,
"TestForeach",
"test_autodiff",
device_type="cuda",
dtypes=(torch.complex128,),
),
),
),
ForeachFuncInfo(
'expm1',
sample_inputs_func=foreach_inputs_sample_func(1, False, False),
dtypesIfHpu=custom_types(torch.float32),
supports_autograd=True,
supports_inplace_autograd=True,
supports_forward_ad=True,
backward_requires_result=True,
decorators=(
DecorateInfo(
unittest.expectedFailure,
"TestMeta",
"test_dispatch_meta_inplace",
dtypes=integral_types_and(torch.bool),
),
DecorateInfo(
unittest.expectedFailure,
"TestMeta",
"test_dispatch_symbolic_meta_inplace",
dtypes=integral_types_and(torch.bool),
),
DecorateInfo(
unittest.expectedFailure,
"TestMeta",
"test_meta_inplace",
dtypes=integral_types_and(torch.bool),
),
),
),
ForeachFuncInfo(
'floor',
sample_inputs_func=foreach_inputs_sample_func(1, False, False),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16),
supports_autograd=True,
supports_inplace_autograd=True,
supports_forward_ad=True,
decorators=(
DecorateInfo(
unittest.expectedFailure,
"TestMeta",
"test_dispatch_meta_inplace",
dtypes=complex_types_and(torch.bool),
),
DecorateInfo(
unittest.expectedFailure,
"TestMeta",
"test_dispatch_meta_outplace",
dtypes=complex_types_and(torch.bool),
),
DecorateInfo(
unittest.expectedFailure,
"TestMeta",
"test_dispatch_symbolic_meta_inplace",
dtypes=complex_types_and(torch.bool),
),
DecorateInfo(
unittest.expectedFailure,
"TestMeta",
"test_dispatch_symbolic_meta_outplace",
dtypes=complex_types_and(torch.bool),
),
DecorateInfo(
unittest.expectedFailure,
"TestMeta",
"test_meta_inplace",
dtypes=complex_types_and(torch.bool),
),
DecorateInfo(
unittest.expectedFailure,
"TestMeta",
"test_meta_outplace",
dtypes=complex_types_and(torch.bool),
),
DecorateInfo(
unittest.expectedFailure,
"TestForeach",
"test_autodiff",
device_type="cuda",
dtypes=(torch.complex128,),
),
),
),
ForeachFuncInfo(
'log1p',
sample_inputs_func=foreach_inputs_sample_func(1, False, False),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16),
supports_autograd=True,
supports_inplace_autograd=True,
supports_forward_ad=True,
decorators=(
DecorateInfo(
unittest.expectedFailure,
"TestMeta",
"test_dispatch_meta_inplace",
dtypes=integral_types_and(torch.bool),
),
DecorateInfo(
unittest.expectedFailure,
"TestMeta",
"test_dispatch_symbolic_meta_inplace",
dtypes=integral_types_and(torch.bool),
),
DecorateInfo(
unittest.expectedFailure,
"TestMeta",
"test_meta_inplace",
dtypes=integral_types_and(torch.bool),
),
),
),
ForeachFuncInfo(
'round',
sample_inputs_func=foreach_inputs_sample_func(1, False, False),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16),
supports_autograd=True,
supports_inplace_autograd=True,
supports_forward_ad=True,
decorators=(
DecorateInfo(
unittest.expectedFailure,
"TestMeta",
"test_dispatch_meta_inplace",
dtypes=complex_types_and(torch.bool),
),
DecorateInfo(
unittest.expectedFailure,
"TestMeta",
"test_dispatch_meta_outplace",
dtypes=complex_types_and(torch.bool),
),
DecorateInfo(
unittest.expectedFailure,
"TestMeta",
"test_dispatch_symbolic_meta_inplace",
dtypes=complex_types_and(torch.bool),
),
DecorateInfo(
unittest.expectedFailure,
"TestMeta",
"test_dispatch_symbolic_meta_outplace",
dtypes=complex_types_and(torch.bool),
),
DecorateInfo(
unittest.expectedFailure,
"TestMeta",
"test_meta_inplace",
dtypes=complex_types_and(torch.bool),
),
DecorateInfo(
unittest.expectedFailure,
"TestMeta",
"test_meta_outplace",
dtypes=complex_types_and(torch.bool),
),
DecorateInfo(
unittest.expectedFailure,
"TestForeach",
"test_autodiff",
device_type="cuda",
dtypes=(torch.complex128,),
),
),
),
ForeachFuncInfo(
'frac',
sample_inputs_func=foreach_inputs_sample_func(1, False, False),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16),
supports_autograd=True,
supports_inplace_autograd=True,
supports_forward_ad=True,
decorators=(
DecorateInfo(
unittest.expectedFailure,
"TestMeta",
"test_dispatch_meta_inplace",
dtypes=integral_types_and(torch.bool) + complex_types(),
),
DecorateInfo(
unittest.expectedFailure,
"TestMeta",
"test_dispatch_meta_outplace",
dtypes=integral_types_and(torch.bool) + complex_types(),
),
DecorateInfo(
unittest.expectedFailure,
"TestMeta",
"test_dispatch_symbolic_meta_inplace",
dtypes=integral_types_and(torch.bool) + complex_types(),
),
DecorateInfo(
unittest.expectedFailure,
"TestMeta",
"test_dispatch_symbolic_meta_outplace",
dtypes=integral_types_and(torch.bool) + complex_types(),
),
DecorateInfo(
unittest.expectedFailure,
"TestMeta",
"test_meta_inplace",
dtypes=integral_types_and(torch.bool) + complex_types(),
),
DecorateInfo(
unittest.expectedFailure,
"TestMeta",
"test_meta_outplace",
dtypes=integral_types_and(torch.bool) + complex_types(),
),
DecorateInfo(
unittest.expectedFailure,
"TestForeach",
"test_autodiff",
device_type="cuda",
dtypes=(torch.complex128,),
),
),
),
ForeachFuncInfo(
'reciprocal',
sample_inputs_func=foreach_inputs_sample_func(1, False, False),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16),
supports_autograd=True,
supports_inplace_autograd=True,
supports_forward_ad=True,
backward_requires_result=True,
decorators=(
DecorateInfo(
unittest.expectedFailure,
"TestMeta",
"test_dispatch_meta_inplace",
dtypes=integral_types_and(torch.bool),
),
DecorateInfo(
unittest.expectedFailure,
"TestMeta",
"test_dispatch_symbolic_meta_inplace",
dtypes=integral_types_and(torch.bool),
),
DecorateInfo(
unittest.expectedFailure,
"TestMeta",
"test_meta_inplace",
dtypes=integral_types_and(torch.bool),
),
),
),
ForeachFuncInfo(
'sigmoid',
sample_inputs_func=foreach_inputs_sample_func(1, False, False),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16, torch.float16),
supports_autograd=True,
supports_inplace_autograd=True,
supports_forward_ad=True,
backward_requires_result=True,
decorators=(
DecorateInfo(
unittest.expectedFailure,
"TestMeta",
"test_dispatch_meta_inplace",
dtypes=integral_types_and(torch.bool),
),
DecorateInfo(
unittest.expectedFailure,
"TestMeta",
"test_dispatch_symbolic_meta_inplace",
dtypes=integral_types_and(torch.bool),
),
DecorateInfo(
unittest.expectedFailure,
"TestMeta",
"test_meta_inplace",
dtypes=integral_types_and(torch.bool),
),
),
),
ForeachFuncInfo(
'trunc',
sample_inputs_func=foreach_inputs_sample_func(1, False, False),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16, torch.int32, torch.int8),
supports_autograd=True,
supports_inplace_autograd=True,
supports_forward_ad=True,
decorators=(
DecorateInfo(
unittest.expectedFailure,
"TestMeta",
"test_dispatch_meta_inplace",
dtypes=complex_types_and(torch.bool),
),
DecorateInfo(
unittest.expectedFailure,
"TestMeta",
"test_dispatch_meta_outplace",
dtypes=complex_types_and(torch.bool),
),
DecorateInfo(
unittest.expectedFailure,
"TestMeta",
"test_dispatch_symbolic_meta_inplace",
dtypes=complex_types_and(torch.bool),
),
DecorateInfo(
unittest.expectedFailure,
"TestMeta",
"test_dispatch_symbolic_meta_outplace",
dtypes=complex_types_and(torch.bool),
),
DecorateInfo(
unittest.expectedFailure,
"TestMeta",
"test_meta_inplace",
dtypes=complex_types_and(torch.bool),
),
DecorateInfo(
unittest.expectedFailure,
"TestMeta",
"test_meta_outplace",
dtypes=complex_types_and(torch.bool),
),
DecorateInfo(
unittest.expectedFailure,
"TestForeach",
"test_autodiff",
device_type="cuda",
dtypes=(torch.complex128,),
),
),
),
ForeachFuncInfo(
'abs',
sample_inputs_func=foreach_inputs_sample_func(1, False, False),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16),
supports_autograd=True,
supports_inplace_autograd=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
decorators=(
DecorateInfo(
unittest.expectedFailure,
"TestMeta",
"test_dispatch_symbolic_meta_inplace",
dtypes=complex_types(),
),
DecorateInfo(
unittest.expectedFailure,
"TestMeta",
"test_dispatch_meta_inplace",
dtypes=complex_types(),
),
DecorateInfo(
unittest.expectedFailure,
"TestMeta",
"test_dispatch_meta_outplace",
device_type="cpu",
dtypes=(torch.bool,),
),
DecorateInfo(
unittest.expectedFailure,
"TestMeta",
"test_dispatch_symbolic_meta_inplace",
device_type="cpu",
dtypes=(torch.bool,),
),
DecorateInfo(
unittest.expectedFailure,
"TestMeta",
"test_dispatch_symbolic_meta_outplace",
device_type="cpu",
dtypes=(torch.bool,),
),
DecorateInfo(
unittest.expectedFailure,
"TestMeta",
"test_meta_inplace",
device_type="cpu",
dtypes=(torch.bool,),
),
DecorateInfo(
unittest.expectedFailure,
"TestMeta",
"test_meta_outplace",
device_type="cpu",
dtypes=(torch.bool,),
),
DecorateInfo(
unittest.expectedFailure,
"TestMeta",
"test_dispatch_meta_inplace",
device_type="cpu",
dtypes=(torch.bool,),
),
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_inplace", dtypes=complex_types()),
),
),
ForeachFuncInfo(
'zero',
sample_inputs_func=foreach_inputs_sample_func(1, False, False),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16),
supports_autograd=True,
supports_inplace_autograd=True,
supports_forward_ad=True,
supports_out=False,
),
ForeachFuncInfo(
'sign',
sample_inputs_func=foreach_inputs_sample_func(1, False, False),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16),
supports_autograd=True,
supports_inplace_autograd=True,
supports_forward_ad=True,
decorators=(
DecorateInfo(
unittest.expectedFailure,
"TestMeta",
"test_dispatch_meta_inplace",
dtypes=complex_types(),
),
DecorateInfo(
unittest.expectedFailure,
"TestMeta",
"test_dispatch_meta_outplace",
dtypes=complex_types(),
),
DecorateInfo(
unittest.expectedFailure,
"TestMeta",
"test_dispatch_symbolic_meta_inplace",
dtypes=complex_types(),
),
DecorateInfo(
unittest.expectedFailure,
"TestMeta",
"test_dispatch_symbolic_meta_outplace",
dtypes=complex_types(),
),
DecorateInfo(
unittest.expectedFailure,
"TestMeta",
"test_meta_inplace",
dtypes=complex_types(),
),
DecorateInfo(
unittest.expectedFailure,
"TestMeta",
"test_meta_outplace",
dtypes=complex_types(),
),
DecorateInfo(
unittest.expectedFailure,
"TestForeach",
"test_autodiff",
device_type="cuda",
dtypes=(torch.complex128,),
),
),
),
ForeachFuncInfo(
'lgamma',
sample_inputs_func=foreach_inputs_sample_func(1, False, False),
supports_autograd=True,
supports_inplace_autograd=True,
supports_forward_ad=True,
decorators=(
DecorateInfo(unittest.skip("In-place lgamma not supported for integral tensors"), "TestMeta",
"test_dispatch_symbolic_meta_inplace", dtypes=integral_types_and(torch.bool)),
# DecorateInfo(unittest.skip("In-place lgamma not supported for integral tensors"), "TestMeta",
# "test_dispatch_meta_inplace", dtypes=integral_types_and(torch.bool)),
DecorateInfo(unittest.skip("In-place lgamma not supported for integral tensors"), "TestMeta",
"test_meta_inplace", dtypes=integral_types_and(torch.bool)),
DecorateInfo(
unittest.expectedFailure,
"TestMeta",
"test_dispatch_meta_inplace",
dtypes=complex_types() + integral_types_and(torch.bool),
),
DecorateInfo(
unittest.expectedFailure,
"TestMeta",
"test_dispatch_meta_outplace",
dtypes=complex_types(),
),
DecorateInfo(
unittest.expectedFailure,
"TestMeta",
"test_dispatch_symbolic_meta_inplace",
dtypes=complex_types() + integral_types_and(torch.bool),
),
DecorateInfo(
unittest.expectedFailure,
"TestMeta",
"test_dispatch_symbolic_meta_outplace",
dtypes=complex_types(),
),
DecorateInfo(
unittest.expectedFailure,
"TestMeta",
"test_meta_inplace",
dtypes=complex_types() + integral_types_and(torch.bool),
),
DecorateInfo(
unittest.expectedFailure,
"TestMeta",
"test_meta_outplace",
dtypes=complex_types(),
),
DecorateInfo(
unittest.expectedFailure,
"TestForeach",
"test_autodiff",
device_type="cuda",
dtypes=(torch.complex128,),
),
),
),
]
foreach_binary_op_db: list[OpInfo] = [
ForeachFuncInfo(
"add",
sample_inputs_func=foreach_inputs_sample_func(2, True, True, True),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16, torch.float16, torch.int32),
supports_alpha_param=True,
supports_autograd=True,
supports_inplace_autograd=True,
supports_forward_ad=True,
decorators=(
# These tests fail with aten._local_scalar_dense not being implemented.
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_outplace"),
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_inplace",
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16)),
# Samples have complex types and inplace only works if the dtype is complex.
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_inplace",
dtypes=(torch.bool,)),
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace",
dtypes=(torch.bool,)),
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace_all_strides",
dtypes=integral_types() + complex_types_and(torch.bool, torch.bfloat16, torch.float16, torch.float64)),
),
),
ForeachFuncInfo(
"sub",
sample_inputs_func=foreach_inputs_sample_func(2, True, True),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16),
supports_alpha_param=True,
supports_autograd=True,
supports_inplace_autograd=True,
supports_forward_ad=True,
decorators=(
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_inplace"),
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace"),
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_inplace"),
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_outplace"),
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace"),
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_outplace"),
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace_all_strides"),
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace_all_strides"),
DecorateInfo(unittest.skip("consistently fails internally and causes other tests to appear flaky"),
"TestForeach", "test_parity", dtypes=(torch.complex128,),
active_if=lambda kwargs: IS_FBCODE and not kwargs["noncontiguous"]),
),
),
ForeachFuncInfo(
"mul",
sample_inputs_func=foreach_inputs_sample_func(2, True, True, True),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16),
supports_autograd=True,
supports_inplace_autograd=True,
supports_forward_ad=True,
decorators=(
# Samples have complex types and inplace only works if the dtype is complex.
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_inplace",
dtypes=(torch.bool,)),
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace",
dtypes=(torch.bool,)),
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_inplace", dtypes=(torch.bool,)),
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace_all_strides",
dtypes=(torch.bool,)),
DecorateInfo(unittest.skip("consistently fails internally and causes other tests to appear flaky"),
"TestForeach", "test_parity", dtypes=(torch.complex128,),
active_if=lambda kwargs: IS_FBCODE and not kwargs["noncontiguous"]),
),
),
ForeachFuncInfo(
"div",
sample_inputs_func=foreach_inputs_sample_func(2, True, True, True),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16, torch.float16, torch.int32, torch.int8),
supports_autograd=True,
supports_inplace_autograd=True,
supports_forward_ad=True,
decorators=(
# Samples have complex types and inplace only works if the dtype is complex.
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_inplace",
dtypes=integral_types_and(torch.bool)),
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace",
dtypes=integral_types_and(torch.bool)),
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_inplace",
dtypes=integral_types_and(torch.bool)),
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace_all_strides",
dtypes=integral_types_and(torch.bool)),
),
),
ForeachFuncInfo(
"clamp_min",
sample_inputs_func=foreach_inputs_sample_func(2, True, True),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16, torch.float16, torch.int64, torch.int32, torch.int8, torch.bool),
supports_autograd=True,
supports_inplace_autograd=True,
supports_forward_ad=True,
decorators=(
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_inplace",
dtypes=complex_types_and(torch.bool)),
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace",
dtypes=complex_types_and(torch.bool)),
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_inplace",
dtypes=complex_types_and(torch.bool)),
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_outplace",
dtypes=complex_types_and(torch.bool)),
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace",
dtypes=complex_types_and(torch.bool)),
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_outplace",
dtypes=complex_types_and(torch.bool)),
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace_all_strides",
dtypes=complex_types_and(torch.bool)),
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace_all_strides",
dtypes=complex_types_and(torch.bool)),
DecorateInfo(
unittest.expectedFailure,
"TestForeach",
"test_autodiff",
device_type="cuda",
dtypes=(torch.complex128,),
),
DecorateInfo(
unittest.expectedFailure,
"TestForeach",
"test_binary_op_scalar_with_overlapping_tensors",
dtypes=complex_types(),
),
),
),
ForeachFuncInfo(
"clamp_max",
sample_inputs_func=foreach_inputs_sample_func(2, True, True),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16, torch.float16, torch.int64, torch.int32, torch.int8, torch.bool),
supports_autograd=True,
supports_inplace_autograd=True,
supports_forward_ad=True,
decorators=(
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_inplace",
dtypes=complex_types_and(torch.bool)),
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace",
dtypes=complex_types_and(torch.bool)),
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_inplace",
dtypes=complex_types_and(torch.bool)),
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_outplace",
dtypes=complex_types_and(torch.bool)),
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace",
dtypes=complex_types_and(torch.bool)),
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_outplace",
dtypes=complex_types_and(torch.bool)),
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace_all_strides",
dtypes=complex_types_and(torch.bool)),
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace_all_strides",
dtypes=complex_types_and(torch.bool)),
DecorateInfo(
unittest.expectedFailure,
"TestForeach",
"test_autodiff",
device_type="cuda",
dtypes=(torch.complex128,),
),
DecorateInfo(
unittest.expectedFailure,
"TestForeach",
"test_binary_op_scalar_with_overlapping_tensors",
dtypes=complex_types(),
),
),
),
# note(crcrpar): forward ad not implemented.
ForeachFuncInfo(
"minimum",
sample_inputs_func=foreach_inputs_sample_func(2, True, True),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16, torch.int32),
supports_autograd=True,
supports_inplace_autograd=False,
supports_forward_ad=False,
decorators=(
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_inplace",
dtypes=complex_types_and(torch.bool)),
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace",
dtypes=complex_types_and(torch.bool)),
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_inplace",
dtypes=complex_types_and(torch.bool)),
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_outplace",
dtypes=complex_types_and(torch.bool)),
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace",
dtypes=complex_types_and(torch.bool)),
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_outplace",
dtypes=complex_types_and(torch.bool)),
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace_all_strides",
dtypes=complex_types_and(torch.bool)),
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace_all_strides",
dtypes=complex_types_and(torch.bool)),
DecorateInfo(
unittest.expectedFailure,
"TestForeach",
"test_autodiff",
device_type="cuda",
dtypes=(torch.complex128,),
),
DecorateInfo(
unittest.expectedFailure,
"TestForeach",
"test_binary_op_scalar_with_overlapping_tensors",
dtypes=complex_types(),
),
),
),
# note(crcrpar): forward ad not implemented.
ForeachFuncInfo(
"maximum",
sample_inputs_func=foreach_inputs_sample_func(2, True, True),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16, torch.int32),
supports_autograd=True,
supports_forward_ad=False,
supports_inplace_autograd=False,
decorators=(
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_inplace",
dtypes=complex_types_and(torch.bool)),
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace",
dtypes=complex_types_and(torch.bool)),
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_inplace",
dtypes=complex_types_and(torch.bool)),
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_outplace",
dtypes=complex_types_and(torch.bool)),
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace",
dtypes=complex_types_and(torch.bool)),
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_outplace",
dtypes=complex_types_and(torch.bool)),
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace_all_strides",
dtypes=complex_types_and(torch.bool)),
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace_all_strides",
dtypes=complex_types_and(torch.bool)),
DecorateInfo(
unittest.expectedFailure,
"TestForeach",
"test_autodiff",
device_type="cuda",
dtypes=(torch.complex128,),
),
DecorateInfo(
unittest.expectedFailure,
"TestForeach",
"test_binary_op_scalar_with_overlapping_tensors",
dtypes=complex_types(),
),
),
),
ForeachFuncInfo(
"pow",
supports_alpha_param=False,
supports_scalar_self_arg=True,
sample_inputs_func=foreach_inputs_sample_func(2, True, True),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16, torch.float16, torch.int32, torch.int8, torch.bool),
supports_autograd=True,
supports_inplace_autograd=True,
supports_forward_ad=True,
decorators=(
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_inplace", dtypes=(torch.bool,)),
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace",
dtypes=(torch.bool,)),
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_inplace", dtypes=(torch.bool,)),
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_outplace", dtypes=(torch.bool,)),
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_outplace",
dtypes=(torch.bool,)),
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace",
dtypes=(torch.bool,)),
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_outplace",
dtypes=(torch.bool,)),
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace",
dtypes=(torch.bool,),),
DecorateInfo(unittest.skip("flaky"), "TestForeach", "test_parity", device_type="cpu", dtypes=(torch.complex64,)),
DecorateInfo(
unittest.skip("failed starting on ROCm 6.2"),
"TestForeach",
"test_parity",
device_type="cuda",
dtypes=(torch.complex64,),
active_if=TEST_WITH_ROCM),
DecorateInfo(
unittest.expectedFailure,
"TestForeach",
"test_binary_op_with_scalar_self_support",
device_type="cuda",
dtypes=(torch.bool,),
active_if=lambda kwargs: kwargs["is_fastpath"],
),
),
backward_requires_result=True,
),
ForeachFuncInfo(
"copy",
sample_inputs_func=foreach_inputs_sample_func(2, False, False),
supports_out=False,
supports_forward_ad=False,
supports_autograd=False,
supports_inplace_autograd=False,
)
]
foreach_pointwise_op_db: list[ForeachFuncInfo] = [
ForeachFuncInfo(
"addcmul",
sample_inputs_func=foreach_pointwise_sample_func(4, True, True),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16),
supports_autograd=True,
supports_inplace_autograd=True,
supports_forward_ad=True,
decorators=(
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_outplace", dtypes=(torch.bool,)),
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace",
dtypes=(torch.bool,)),
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_outplace", dtypes=(torch.bool,)),
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace_all_strides",
dtypes=(torch.bool,)),
# # Samples have complex types and inplace only works if the dtype is complex.
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_inplace", dtypes=(torch.bool,)),
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace",
dtypes=(torch.bool,)),
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_inplace", dtypes=(torch.bool,)),
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace_all_strides",
dtypes=integral_types() + complex_types_and(torch.bool)),
),
),
ForeachFuncInfo(
"addcdiv",
sample_inputs_func=foreach_pointwise_sample_func(4, True, True),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16),
supports_autograd=True,
supports_inplace_autograd=True,
supports_forward_ad=True,
decorators=(
# Samples have complex types and inplace only works if the dtype is complex.
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_inplace",
dtypes=integral_types_and(torch.bool)),
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace",
dtypes=integral_types_and(torch.bool)),
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_inplace",
dtypes=integral_types_and(torch.bool)),
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace_all_strides",
dtypes=integral_types() + complex_types_and(torch.bool)),
# fails with div_cpu is not implemented with ComplexHalf
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_outplace",
dtypes=integral_types_and(torch.bool)),
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace",
dtypes=integral_types_and(torch.bool)),
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_outplace",
dtypes=integral_types_and(torch.bool)),
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace_all_strides",
dtypes=integral_types() + complex_types_and(torch.bool)),
),
),
]
foreach_reduce_op_db: list[ForeachFuncInfo] = [
ForeachFuncInfo(
"max",
sample_inputs_func=foreach_max_sample_func(1, False, False),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16, torch.int32),
supports_autograd=True,
supports_inplace_autograd=True,
supports_forward_ad=True,
decorators=(
# no complex support for ordering ops like max
DecorateInfo(
unittest.expectedFailure,
"TestForeach",
"test_autodiff",
dtypes=(torch.complex128, torch.complex64),
),
DecorateInfo(
unittest.expectedFailure,
"TestForeach",
"test_foreach_reduce_large_input",
dtypes=(torch.complex128, torch.complex64),
),
DecorateInfo(
unittest.expectedFailure,
"TestMeta",
"test_dispatch_symbolic_meta_outplace",
dtypes=(torch.complex128, torch.complex64),
),
DecorateInfo(
unittest.expectedFailure,
"TestMeta",
"test_meta_outplace",
dtypes=(torch.complex128, torch.complex64),
),
DecorateInfo(
unittest.expectedFailure,
"TestMeta",
"test_dispatch_meta_outplace",
dtypes=(torch.complex128, torch.complex64),
),
),
),
ForeachFuncInfo(
"norm",
sample_inputs_func=foreach_norm_sample_func(1, False, False),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16),
supports_autograd=True,
supports_inplace_autograd=True,
supports_forward_ad=True,
decorators=(
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_inplace"),
DecorateInfo(
unittest.expectedFailure,
"TestMeta",
"test_dispatch_meta_outplace",
dtypes=integral_types_and(torch.bool),
),
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace"),
DecorateInfo(
unittest.expectedFailure,
"TestMeta",
"test_dispatch_symbolic_meta_outplace",
dtypes=integral_types_and(torch.bool),
),
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_inplace"),
DecorateInfo(
unittest.expectedFailure,
"TestMeta",
"test_meta_outplace",
dtypes=integral_types_and(torch.bool),
),
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace_all_strides"),
DecorateInfo(
unittest.expectedFailure,
"TestForeach",
"test_foreach_reduce_large_input",
device_type="cuda",
dtypes=integral_types_and(torch.bool),
),
),
),
]
foreach_other_op_db: list[ForeachFuncInfo] = [
ForeachFuncInfo(
"lerp",
sample_inputs_func=foreach_inputs_sample_func(3, True, True),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16),
supports_autograd=True,
supports_inplace_autograd=True,
supports_forward_ad=True,
decorators=(
DecorateInfo(
unittest.expectedFailure,
"TestMeta",
"test_dispatch_meta_inplace",
dtypes=integral_types_and(torch.bool),
),
DecorateInfo(
unittest.expectedFailure,
"TestMeta",
"test_dispatch_meta_outplace",
dtypes=integral_types_and(torch.bool),
),
DecorateInfo(
unittest.expectedFailure,
"TestMeta",
"test_dispatch_symbolic_meta_outplace",
dtypes=integral_types_and(torch.bool),
),
DecorateInfo(
unittest.expectedFailure,
"TestMeta",
"test_dispatch_symbolic_meta_inplace",
dtypes=integral_types_and(torch.bool),
),
DecorateInfo(
unittest.expectedFailure,
"TestMeta",
"test_meta_inplace",
dtypes=integral_types_and(torch.bool),
),
DecorateInfo(
unittest.expectedFailure,
"TestMeta",
"test_meta_outplace",
dtypes=integral_types_and(torch.bool),
),
DecorateInfo(
unittest.expectedFailure,
"TestMeta",
"test_dispatch_symbolic_meta_inplace_all_strides",
dtypes=integral_types_and(torch.bool),
),
DecorateInfo(
unittest.expectedFailure,
"TestMeta",
"test_dispatch_symbolic_meta_outplace_all_strides",
dtypes=integral_types_and(torch.bool),
),
),
),
]
def reference_sign(x):
if x.dtype == np.bool_:
# `np.sign` doesn't support `bool`.
# >>> np.sign(True)
# ufunc 'sign' did not contain a loop
# with signature matching types dtype('bool') -> dtype('bool')
return np.sign(x, dtype=np.uint8).astype(np.bool_)
return np.sign(x)
def reference_sgn(x):
# NumPy doesn't have an equivalent to `torch.sgn` when the dtype is complex.
# For complex inputs, `np.sign` returns sign(x.real) + 0j if x.real != 0 else sign(x.imag) + 0j.
# while `torch.sgn` returns, 0 if abs(input) == 0 else input/abs(input)
if x.dtype not in [np.complex64, np.complex128]:
return reference_sign(x)
out = (x / np.abs(x))
if out.ndim == 0:
# Handle x == 0 case
if (x == 0):
# Can't assign to np.complex object
# So make a new one.
return np.array(complex(0, 0), dtype=x.dtype)
return out
# Handle x == 0 case
mask = (x == 0)
out[mask] = complex(0, 0)
return out
def reference_sigmoid(x):
# 'scipy.special.expit' not supported for the input types
if x.dtype in [np.complex64, np.complex128]:
return (1 / (1 + np.exp(-x)))
return scipy.special.expit(x)
def reference_logsigmoid(x):
return np.where(
x < 0,
x - np.log1p(np.exp(x)),
-np.log1p(np.exp(-x)))
def reference_hardsigmoid(x):
intermediate = x / 6 + 0.5
y = np.clip(intermediate, 0, None)
return np.where(y > 1, 1, y).astype(x.dtype)
def reference_lgamma(x):
# scipy.special.gammaln returns `-inf` when input is `-inf`.
# While Pytorch, C and C++, all return `inf` when input is `-inf`.
# Reference:
# https://en.cppreference.com/w/cpp/numeric/math/lgamma
# https://en.cppreference.com/w/c/numeric/math/lgamma
# To handle the above discrepancy,
# we replace -inf with inf so values
# that were originally -inf map to inf as expected
if x.dtype.kind == 'f':
x = np.where(x == float('-inf'), np.array(float('inf'), dtype=x.dtype), x)
out = scipy.special.gammaln(x)
if x.dtype == np.float16:
# `scipy.special.gammaln` returns output of float32 when input is float16,
# while `torch.lgamma` preserves `float16`. But due to smaller range of float16,
# Pytorch version outputs `inf` while SciPy returns finite values.
out = out.astype(np.float16)
return out
def reference_mvlgamma(x, d):
if x.dtype == np.float16:
return scipy.special.multigammaln(x, d).astype(np.float16)
return scipy.special.multigammaln(x, d)
def reference_softplus(input, beta=1, threshold=20):
non_linear = input * beta <= threshold
output = input.copy()
output[non_linear] = np.log(1 + np.exp(beta * input[non_linear])) / beta
return output
def reference_gelu(X, *, approximate='none'):
def _gelu_ref(X):
return X * stats.norm.cdf(X)
def _tanh_gelu_ref(X):
M_SQRT_2_PI = math.sqrt(2 / math.pi)
Z = M_SQRT_2_PI * (X + 0.044715 * np.power(X, 3.0))
return 0.5 * X * (1.0 + np.tanh(Z))
if approximate == 'tanh':
return _tanh_gelu_ref(X)
else:
return _gelu_ref(X)
def reference_one_hot(a: npt.NDArray, num_classes: int = -1) -> npt.NDArray:
if num_classes == -1:
num_classes = int(np.amax(a) + 1)
idcs = a.reshape(-1) + np.arange(0, a.size, dtype=np.int64) * num_classes
one_hot = np.zeros((a.size, num_classes), dtype=a.dtype)
np.put(one_hot, idcs, 1)
return one_hot.reshape(*a.shape, -1)
def reference_mse_loss(input, target, reduction="mean"):
se = (input - target) ** 2
if reduction == "mean":
return np.mean(se)
elif reduction == "sum":
return np.sum(se)
else: # reduction == "none"
return se
def reference_layer_norm(inp: npt.NDArray, normalized_shape: tuple[int, ...], weight=None, bias=None, eps=1e-5):
return reference_native_layer_norm(inp, normalized_shape, weight, bias, eps)[0]
def reference_native_layer_norm(inp: npt.NDArray, normalized_shape: tuple[int, ...], weight, bias, eps):
feature_size = np.prod(normalized_shape)
inp_view = inp.reshape(-1, feature_size) # type: ignore[call-overload]
mean = inp_view.mean(axis=-1, keepdims=True)
var = inp_view.var(axis=-1, ddof=0, keepdims=True)
Y = (inp_view - mean) / np.sqrt(var + eps)
if weight is None and bias is not None:
Y = Y + bias.reshape(-1)
elif weight is not None and bias is None:
Y = Y * weight.reshape(-1)
elif weight is not None and bias is not None:
Y = Y * weight.reshape(-1) + bias.reshape(-1)
axis = inp.ndim - len(normalized_shape)
stat_shape = inp.shape[:axis] + (1,) * len(normalized_shape)
return Y.reshape(*inp.shape), mean.reshape(stat_shape), (1.0 / np.sqrt(var + eps)).reshape(stat_shape)
def reference_rms_norm(inp: npt.NDArray, normalized_shape: tuple[int, ...], weight=None, eps=None):
if eps is None:
eps = torch.finfo(numpy_to_torch_dtype(inp.dtype)).eps
feature_size = np.prod(normalized_shape)
inp_view = inp.reshape(-1, feature_size) # type: ignore[call-overload]
rms = np.sqrt((inp_view**2).mean(axis=-1, keepdims=True) + eps)
Y = inp_view / rms
if weight is not None:
Y = Y * weight.reshape(-1)
return Y.reshape(*inp.shape)
def reference_group_norm(inp: npt.NDArray, num_groups: int, weight=None, bias=None, eps=1e-5):
inp_view = inp
if np.prod(inp.shape) != 0:
inp_view = inp.reshape((inp.shape[0], num_groups, -1))
mean = inp_view.mean(axis=-1, keepdims=True)
var = inp_view.var(axis=-1, ddof=0, keepdims=True)
Y = (inp_view - mean) / np.sqrt(var + eps)
Y = Y.reshape(inp.shape)
if weight is not None:
# weight is a vector of length equal to the channel
if len(Y.shape) > 2:
weight = np.expand_dims(weight, [0] + [idx + 2 for idx in range(inp.ndim - 2)])
Y = Y * weight
if bias is not None:
# bias is a vector of length equal to the channel
if len(Y.shape) > 2:
bias = np.expand_dims(bias, [0] + [idx + 2 for idx in range(inp.ndim - 2)])
Y = Y + bias
return Y
# using a custom reference function since numpy only has a string side arg (instead of right and side) and doesn't
# have an out_int32 arg. Additionally, numpy doesn't support searchsorted with ND arrays, so this splits those into
# stacked 1D cases
def reference_searchsorted(sorted_sequence, boundary, out_int32=False, right=False, side='left', sorter=None):
side = 'right' if (right or side == 'right') else 'left'
if len(sorted_sequence.shape) == 1 :
ret = np.searchsorted(sorted_sequence, boundary, side=side, sorter=sorter)
return ret.astype(np.int32) if out_int32 else ret
elif sorted_sequence.shape[0] == 0:
if sorter is not None:
sorter = sorter.flatten()
ret = np.searchsorted(sorted_sequence.flatten(), boundary.flatten(), side=side, sorter=sorter)
ret = ret.astype(np.int32) if out_int32 else ret
return ret.reshape(boundary.shape)
else:
# numpy searchsorted only supports 1D inputs so we split up ND inputs
orig_shape = boundary.shape
num_splits = np.prod(sorted_sequence.shape[:-1])
splits = range(num_splits)
sorted_sequence, boundary = sorted_sequence.reshape(num_splits, -1), boundary.reshape(num_splits, -1)
if sorter is not None:
sorter = sorter.reshape(num_splits, -1)
split_sequence = [sorted_sequence[i] for i in splits]
split_boundary = [boundary[i] for i in splits]
split_sorter = [sorter[i] if (sorter is not None) else None for i in splits]
split_ret = [np.searchsorted(s_seq, b, side=side, sorter=s_sort)
for (s_seq, b, s_sort) in zip(split_sequence, split_boundary, split_sorter, strict=True)]
split_ret = [i.astype(np.int32) for i in split_ret] if out_int32 else split_ret
return np.stack(split_ret).reshape(orig_shape)
def reference_hash_tensor(tensor, dim=(), keepdim=False, mode=0):
assert mode == 0, "Only mode=0 (xor_sum) is supported right now"
dtype = tensor.dtype
if dtype.kind == 'f':
tensor = tensor.astype(np.float64).view(np.uint64)
else:
tensor = tensor.astype(np.uint64)
if dim == ():
result = np.bitwise_xor.reduce(tensor.flatten(), keepdims=keepdim)
else:
if isinstance(dim, list):
dim = tuple(dim)
result = np.bitwise_xor.reduce(tensor, axis=dim, keepdims=keepdim)
return result
def loss_reference_reduction_wrapper(fn):
def wrapper(input, target, *, size_average=None, reduce=None, reduction="mean", **other_kwargs):
if size_average is not None or reduce is not None:
raise RuntimeError(
"The keyword arguments 'size_average' and 'reduce' are deprecated and not supported by this wrapper"
)
output = fn(input, target, **other_kwargs)
if reduction == "mean":
return np.mean(output)
elif reduction == "sum":
return np.sum(output)
else: # reduction == "none"
return output
return wrapper
@loss_reference_reduction_wrapper
def reference_smooth_l1_loss(input, target, beta=1.0):
diff = input - target
abs_diff = np.abs(diff)
above_threshold = abs_diff >= beta
loss = np.empty_like(input)
loss[above_threshold] = abs_diff[above_threshold] - 0.5 * beta
loss[~above_threshold] = diff[~above_threshold] ** 2 / (2 * beta)
return loss
def reference_std_var(f):
"""Forwards unbiased/correction kwargs as NumPy's equivalent ddof"""
g = reference_reduction_numpy(f)
@wraps(g)
def wrapper(x: npt.NDArray, *args, **kwargs):
assert not ('unbiased' in kwargs and 'correction' in kwargs)
if 'unbiased' in kwargs:
kwargs['ddof'] = int(kwargs.pop('unbiased'))
elif 'correction' in kwargs:
kwargs['ddof'] = kwargs.pop('correction')
return g(x, *args, **kwargs)
return wrapper
def generate_std_var_kwargs(t: torch.Tensor, **kwargs):
"""Generates unbiased/correction kwargs for std/var operators"""
yield ((), {'unbiased': True})
yield ((), {'unbiased': False})
# Currently, calling std with correction is only enabled when
# both dim and keepdim are provided.
if 'dim' in kwargs and 'keepdim' in kwargs:
yield ((), {'correction': 0})
yield ((), {'correction': 1})
numel = torch.tensor(t.shape)[kwargs.get('dim')].prod()
yield ((), {'correction': numel // 2})
def error_inputs_mean(op_info, device, is_ref=False, **kwargs):
if is_ref:
err_msg1 = (r"mean\(\): could not infer output dtype. "
r"Input dtype must be either a floating point or complex dtype. "
r"Got: torch.int64")
else:
err_msg1 = (r"mean\(\): could not infer output dtype. "
r"Input dtype must be either a floating point or complex dtype. "
r"Got: Long")
yield ErrorInput(
SampleInput(make_tensor((3, 4, 5), dtype=torch.int64, device=device), []),
error_regex=err_msg1,
)
if is_ref:
err_msg2 = (r"mean\(\): could not infer output dtype. "
r"Optional dtype must be either a floating point or complex dtype. "
r"Got: torch.int64")
else:
err_msg2 = (r"mean\(\): could not infer output dtype. "
r"Optional dtype must be either a floating point or complex dtype. "
r"Got: Long")
yield ErrorInput(
SampleInput(
make_tensor((3, 4, 5), dtype=torch.float32, device=device),
[],
dtype=torch.int64),
error_regex=err_msg2
)
# numpy implementation of torch.flatten
# unfortunately there's no np.flatten. we figure out the desired shape and call np.reshape
def reference_flatten(input, start_dim=0, end_dim=-1):
in_shape = input.shape
in_rank = len(in_shape)
for d in start_dim, end_dim:
if not ((in_rank == 0 and d in (-1, 0)) or -in_rank <= d < in_rank):
raise IndexError(f"Dimension out of range (expected to be in range of [{-in_rank}, {in_rank - 1}], but got {d}")
end_dim = end_dim if end_dim >= 0 else in_rank + end_dim
start_dim = start_dim if start_dim >= 0 else in_rank + start_dim
if in_rank == 0:
end_dim = start_dim
if end_dim < start_dim:
raise RuntimeError("flatten() has invalid args: start_dim cannot come after end_dim")
flatten_bit_dim = functools.reduce(operator.mul, in_shape[start_dim:end_dim + 1], 1)
out_shape = in_shape[:start_dim] + (flatten_bit_dim,) + in_shape[end_dim + 1:]
return np.reshape(input, out_shape)
def sample_inputs_alias_copy(op_info, device, dtype, requires_grad, **kwargs):
yield SampleInput(make_tensor((S,), dtype=dtype, device=device, requires_grad=requires_grad))
yield SampleInput(make_tensor((), dtype=dtype, device=device, requires_grad=requires_grad))
# Operator database (sorted alphabetically)
op_db: list[OpInfo] = [
UnaryUfuncInfo('abs',
aliases=('absolute', ),
ref=np.abs,
dtypes=all_types_and_complex_and(torch.half, torch.bfloat16, torch.chalf),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16),
skips=(
DecorateInfo(unittest.skip("In-place abs not supported for complex tensors"), 'TestBwdGradients',
'test_inplace_grad', dtypes=(torch.cdouble,)),
DecorateInfo(unittest.skip("In-place abs not supported for complex tensors"), 'TestBwdGradients',
'test_inplace_gradgrad', dtypes=(torch.cdouble,)),
DecorateInfo(unittest.skip("In-place abs not supported for complex tensors"), 'TestFwdGradients',
'test_inplace_forward_mode_AD', dtypes=(torch.cdouble,)),
DecorateInfo(unittest.skip("In-place abs not supported for complex tensors"), "TestSparseUnaryUfuncs",
"test_inplace", dtypes=(torch.cdouble, torch.cfloat, torch.chalf)),
# Reference: https://github.com/pytorch/pytorch/issues/49224
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_small',
dtypes=[torch.int8], active_if=TEST_WITH_ASAN),
# TODO: Fix test_out_arg_all_dtypes as torch.empty_like(expected_output) where expected_output=op(input)
# We can break the logic of the loop over all possible types but it is OK.
# https://github.com/pytorch/pytorch/blob/master/test/test_unary_ufuncs.py#L440-L449
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_out_arg_all_dtypes',
dtypes=[torch.cfloat, torch.cdouble]),
DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_meta_inplace',
dtypes=(torch.cdouble, torch.cfloat, torch.chalf)),
DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_dispatch_meta_inplace',
dtypes=(torch.cdouble, torch.cfloat, torch.chalf)),
DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_dispatch_symbolic_meta_inplace',
dtypes=(torch.cdouble, torch.cfloat, torch.chalf)),
DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_dispatch_symbolic_meta_inplace_all_strides',
dtypes=(torch.cdouble, torch.cfloat, torch.chalf)),
),
supports_fwgrad_bwgrad=True,
assert_autodiffed=True,
supports_sparse=True,
supports_sparse_csr=True,
supports_sparse_csc=True,
supports_sparse_bsr=True,
supports_sparse_bsc=True,
supports_forward_ad=True),
# NOTE: CPU complex acos produces incorrect outputs (https://github.com/pytorch/pytorch/issues/42952)
UnaryUfuncInfo('acos',
aliases=('arccos', ),
ref=np.arccos,
domain=(-1, 1),
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.chalf, torch.bool, torch.half, torch.bfloat16),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16),
assert_autodiffed=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
promotes_int_to_float=True,
decorators=(precisionOverride({torch.float16: 1e-2,
torch.bfloat16: 1e-1,
torch.complex64: 1e-2}),),
skips=(
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_normal',
device_type='cuda', dtypes=[torch.cdouble], active_if=IS_WINDOWS),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cuda', dtypes=[torch.cdouble], active_if=IS_WINDOWS),
# Failing with wrong imaginary sign on at least some Windows jobs
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_small',
device_type='cuda', dtypes=[torch.cdouble],
active_if=IS_WINDOWS),
# Failing with wrong imaginary sign on at least some Windows jobs
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large',
device_type='cuda', dtypes=[torch.cdouble],
active_if=IS_WINDOWS),
DecorateInfo(unittest.skip("Skipped!"), 'TestBwdGradients', 'test_fn_grad',
dtypes=[torch.cdouble], active_if=IS_WINDOWS),
DecorateInfo(unittest.skip("Skipped!"), 'TestBwdGradients', 'test_method_grad',
dtypes=[torch.cdouble], active_if=IS_WINDOWS),
DecorateInfo(unittest.skip("Skipped!"), 'TestBwdGradients', 'test_inplace_grad',
dtypes=[torch.cdouble], active_if=IS_WINDOWS),
DecorateInfo(unittest.skip("Skipped!"), 'TestFwdGradients', 'test_forward_mode_AD',
dtypes=[torch.cdouble], active_if=IS_WINDOWS),
DecorateInfo(unittest.skip("Skipped!"), 'TestFwdGradients', 'test_inplace_forward_mode_AD',
dtypes=[torch.cdouble], active_if=IS_WINDOWS),)),
# NOTE: the derivative for inplace acosh is not implemented
UnaryUfuncInfo('acosh',
aliases=('arccosh', ),
ref=np.arccosh,
domain=(1, None),
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.chalf, torch.bool, torch.half, torch.bfloat16),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16),
decorators=(precisionOverride({torch.bfloat16: 5e-2}),),
supports_inplace_autograd=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
promotes_int_to_float=True,
skips=(
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_normal',
device_type='cuda', dtypes=[torch.cdouble], active_if=IS_WINDOWS),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cuda', dtypes=[torch.cdouble], active_if=IS_WINDOWS),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cuda', dtypes=[torch.cdouble],
active_if=IS_WINDOWS),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large',
device_type='cuda', dtypes=[torch.cdouble],
active_if=IS_WINDOWS),
# Failing with wrong imaginary sign on at least some Windows jobs
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_small',
device_type='cuda', dtypes=[torch.cdouble],
active_if=IS_WINDOWS),
),
# acosh is not defined at x < 1 (real)
reference_numerics_filter=NumericsFilter(
condition=lambda x: (x < 1 if not x.is_complex() else torch.zeros_like(x, dtype=torch.bool)),
safe_val=2)),
BinaryUfuncInfo('add',
# NumPy has no builtin reference for the alpha kwarg, but it is easy enough to emulate
ref=lambda input, other, *, alpha=1: (
np.add(input, other)
if alpha == 1
else np.add(input, np.multiply(alpha, other))
),
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16,
torch.float16, torch.chalf),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16, torch.int32),
assert_autodiffed=True,
sample_inputs_func=sample_inputs_add_sub,
supports_fwgrad_bwgrad=True,
supports_forward_ad=True,
supports_two_python_scalars=True,
decorators=(
DecorateInfo(
toleranceOverride({torch.chalf: tol(atol=1e-2, rtol=0)}),
'TestBinaryUfuncs', 'test_reference_numerics'),
),
skips=(
# boolean alpha not handled properly
DecorateInfo(unittest.expectedFailure,
'TestNNCOpInfo',
'test_nnc_correctness',
dtypes=(torch.bool,)),
DecorateInfo(unittest.skip("Skipped!"),
'TestCommon',
'test_numpy_refs',
dtypes=(torch.complex128,)),
DecorateInfo(unittest.skip("Skipped!"),
'TestBinaryUfuncs',
'test_reference_numerics_extremal_values',
dtypes=(torch.complex64, torch.complex128)),
)),
OpInfo('item',
op=lambda inp, *args, **kwargs: wrapper_set_seed(torch.Tensor.item, inp, *args, **kwargs),
ref=np.ndarray.item,
method_variant=None,
dtypes=all_types_and_complex_and(torch.bfloat16, torch.float16, torch.chalf, torch.bool),
dtypesIfHpu=custom_types(torch.float32),
supports_out=False,
supports_autograd=False,
error_inputs_func=error_inputs_item,
sample_inputs_func=sample_inputs_item,
skips=(
# Error testing item function variant
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit',
dtypes=(torch.float32, torch.complex64)),
# FX failed to normalize op - add the op to the op_skip list.
DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),
# RuntimeError: Composite compliance check failed with the above error.
DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_operator'),
# Booleans mismatch: AssertionError: False is not true
DecorateInfo(unittest.expectedFailure, 'TestFakeTensor', 'test_fake_autocast'),
# Booleans mismatch: AssertionError: False is not true
DecorateInfo(unittest.expectedFailure, 'TestFakeTensor', 'test_fake'),
)),
OpInfo('arange',
dtypes=all_types_and(torch.bfloat16, torch.float16),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16, torch.int32, torch.int8),
supports_out=True,
supports_autograd=False,
is_factory_function=True,
error_inputs_func=error_inputs_arange,
sample_inputs_func=sample_inputs_arange,
skips=(
# https://github.com/pytorch/pytorch/issues/81774
DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),
# Tests that assume input is a tensor or sequence of tensors
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager'),
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'),
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'),
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'),
# Lazy tensor failures
DecorateInfo(unittest.expectedFailure, 'TestLazyOpInfo', 'test_dispatched_to_lazy'),
DecorateInfo(unittest.skip("Skipped!"), 'TestLazyOpInfo', 'test_correctness'),
DecorateInfo(unittest.skip("Skipped!"), 'TestLazyOpInfo', 'test_correctness_with_reusing_ir'),
# Exception raised from analyzeImpl at ../torch/csrc/jit/ir/alias_analysis.cpp:608
# We don't have an op for aten::arange but it isn't a special case.
# Argument types: bool, bool, bool, int, int, Device, boo
DecorateInfo(unittest.expectedFailure, 'TestNNCOpInfo', 'test_nnc_correctness'),
# Captured graph does not contain aten::arange (succeeds on complex!)
# g: graph():
# %25 : Long(1, strides=[1], requires_grad=0, device=cpu) = prim::Constant[value={1}]()
# return (%25)
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)),
# UserWarning not triggered : Resized a non-empty tensor but did not warn about it.
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'),
)),
OpInfo('cauchy',
op=lambda inp, *args, **kwargs: wrapper_set_seed(torch.Tensor.cauchy_, inp, *args, **kwargs),
inplace_variant=torch.Tensor.cauchy_,
dtypes=floating_types_and(torch.float16, torch.bfloat16),
supports_out=False,
supports_autograd=False,
allow_cow_input_materialize_forward=[0],
sample_inputs_func=sample_inputs_cauchy,
error_inputs_func=error_inputs_cauchy,
skips=(
# Tests that assume input tensor has a meaningful effect on output tensor
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager'),
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'),
# AssertionError: JIT Test does not execute any logic
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
# AssertionError: Tensor-likes are not close!
DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'),
# FX failed to normalize op - add the op to the op_skip list.
DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),
# vmap: calling random operator not supported
DecorateInfo(unittest.skip("Test expects tensor input"), "TestVmapOperatorsOpInfo", "test_vmap_exhaustive"),
DecorateInfo(unittest.skip("Test expects tensor input"), "TestVmapOperatorsOpInfo", "test_op_has_batch_rule"),
DecorateInfo(unittest.skip("make_traced() doesn't set seed properly!"), 'TestCommon', 'test_python_ref_executor'),
DecorateInfo(unittest.expectedFailure, 'TestDecomp', 'test_quick'),
)),
OpInfo('exponential',
op=lambda inp, *args, **kwargs: wrapper_set_seed(torch.Tensor.exponential_, inp, *args, **kwargs),
inplace_variant=torch.Tensor.exponential_,
dtypes=floating_types_and(torch.float16, torch.bfloat16),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16),
supports_out=False,
supports_autograd=False,
allow_cow_input_materialize_forward=[0],
sample_inputs_func=sample_inputs_exponential,
error_inputs_func=error_inputs_exponential,
skips=(
# Tests that assume input tensor has a meaningful effect on output tensor
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager'),
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'),
# AssertionError: JIT Test does not execute any logic
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
# AssertionError: Tensor-likes are not close!
DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'),
# FX failed to normalize op - add the op to the op_skip list.
DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),
# vmap: calling random operator not supported
DecorateInfo(unittest.expectedFailure, "TestVmapOperatorsOpInfo", "test_vmap_exhaustive"),
DecorateInfo(unittest.expectedFailure, "TestVmapOperatorsOpInfo", "test_op_has_batch_rule"),
DecorateInfo(unittest.expectedFailure, 'TestDecomp', 'test_quick'),
DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'),
)),
OpInfo('geometric',
op=lambda inp, *args, **kwargs: wrapper_set_seed(torch.Tensor.geometric_, inp, *args, **kwargs),
inplace_variant=torch.Tensor.geometric_,
dtypes=floating_types_and(torch.float16, torch.bfloat16, torch.int8, torch.int16, torch.int32, torch.int64, torch.uint8),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16),
supports_out=False,
supports_autograd=False,
allow_cow_input_materialize_forward=[0],
sample_inputs_func=sample_inputs_geometric,
error_inputs_func=error_inputs_geometric,
skips=(
# Tests that assume input tensor has a meaningful effect on output tensor
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager'),
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'),
# AssertionError: JIT Test does not execute any logic
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
# AssertionError: Tensor-likes are not close!
DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'),
# FX failed to normalize op - add the op to the op_skip list.
DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),
# vmap: calling random operator not supported
DecorateInfo(unittest.skip("Test expects tensor input"), "TestVmapOperatorsOpInfo", "test_vmap_exhaustive"),
DecorateInfo(unittest.skip("Test expects tensor input"), "TestVmapOperatorsOpInfo", "test_op_has_batch_rule"),
DecorateInfo(unittest.expectedFailure, 'TestDecomp', 'test_quick'),
)),
OpInfo('log_normal',
op=lambda inp, *args, **kwargs: wrapper_set_seed(torch.Tensor.log_normal_, inp, *args, **kwargs),
inplace_variant=torch.Tensor.log_normal_,
dtypes=floating_types_and(torch.float16, torch.bfloat16),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16),
supports_out=False,
supports_autograd=False,
allow_cow_input_materialize_forward=[0],
sample_inputs_func=sample_inputs_log_normal,
error_inputs_func=error_inputs_log_normal,
skips=(
# Tests that assume input tensor has a meaningful effect on output tensor
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager'),
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'),
# AssertionError: JIT Test does not execute any logic
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
# AssertionError: Tensor-likes are not close!
DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'),
# FX failed to normalize op - add the op to the op_skip list.
DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),
# vmap: calling random operator not supported
DecorateInfo(unittest.skip("Test expects tensor input"), "TestVmapOperatorsOpInfo", "test_vmap_exhaustive"),
DecorateInfo(unittest.skip("Test expects tensor input"), "TestVmapOperatorsOpInfo", "test_op_has_batch_rule"),
DecorateInfo(unittest.expectedFailure, 'TestDecomp', 'test_quick'),
)),
OpInfo('normal',
variant_test_name='in_place',
op=lambda inp, *args, **kwargs: wrapper_set_seed(torch.Tensor.normal_, inp, *args, **kwargs),
inplace_variant=torch.Tensor.normal_,
dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16),
supports_out=False,
supports_autograd=False,
allow_cow_input_materialize_forward=[0],
sample_inputs_func=sample_inputs_normal,
error_inputs_func=error_inputs_normal,
skips=(
# Tests that assume input is a tensor or sequence of tensors
DecorateInfo(unittest.skip("Test expects tensor input"), "TestCommon", "test_noncontiguous_samples"),
# Tests that assume input tensor has a meaningful effect on output tensor
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager'),
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'),
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'),
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'),
DecorateInfo(unittest.expectedFailure, 'TestDecomp', 'test_quick'),
# AssertionError: JIT Test does not execute any logic
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
# AssertionError: Tensor-likes are not close!
DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'),
# FX failed to normalize op - add the op to the op_skip list.
DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),
# vmap: calling random operator not supported
DecorateInfo(unittest.skip("Test expects tensor input"), "TestVmapOperatorsOpInfo", "test_vmap_exhaustive"),
DecorateInfo(unittest.skip("Test expects tensor input"), "TestVmapOperatorsOpInfo", "test_op_has_batch_rule"),
)),
OpInfo('uniform',
op=lambda inp, *args, **kwargs: wrapper_set_seed(torch.Tensor.uniform_, inp, *args, **kwargs),
method_variant=None,
inplace_variant=torch.Tensor.uniform_,
dtypes=floating_and_complex_types_and(torch.bfloat16, torch.float16),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16),
supports_out=False,
supports_autograd=False,
is_factory_function=False,
allow_cow_input_materialize_forward=[0],
sample_inputs_func=sample_inputs_uniform,
error_inputs_func=error_inputs_uniform,
skips=(
# FX failed to normalize op - add the op to the op_skip list.
DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),
# Tests that assume input tensor has a meaningful effect on output tensor
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager'),
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'),
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'),
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'),
# AssertionError: JIT Test does not execute any logic
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
# aten.uniform was not decomposed
DecorateInfo(unittest.expectedFailure, 'TestDecomp', 'test_quick'),
DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'),
)),
BinaryUfuncInfo('clamp_max',
ref=_clamp_max_numpy,
dtypes=all_types_and(torch.bool, torch.float16, torch.bfloat16),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16, torch.int32, torch.int8),
supports_forward_ad=True,
supports_rhs_python_scalar=False,
supports_fwgrad_bwgrad=True,
rhs_make_tensor_kwargs=dict(exclude_zero=False),
skips=(
# RuntimeError: "max_elementwise_cuda" not implemented for 'ComplexFloat'
DecorateInfo(unittest.expectedFailure,
'TestBinaryUfuncs',
'test_type_promotion',
device_type='cuda'),
# dispatch to lazy test failed
DecorateInfo(unittest.expectedFailure, 'TestLazyOpInfo', 'test_dispatched_to_lazy'),
# test error disabled since rhs non-tensor python scalar is supported
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_errors'),
)),
BinaryUfuncInfo('clamp_min',
ref=_clamp_min_numpy,
dtypes=all_types_and(torch.bool, torch.float16, torch.bfloat16),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16, torch.int32, torch.int8),
supports_forward_ad=True,
supports_rhs_python_scalar=False,
supports_fwgrad_bwgrad=True,
rhs_make_tensor_kwargs=dict(exclude_zero=False),
skips=(
# RuntimeError: "min_elementwise_cuda" not implemented for 'ComplexFloat'
DecorateInfo(unittest.expectedFailure,
'TestBinaryUfuncs',
'test_type_promotion',
device_type='cuda'),
# dispatch to lazy test failed
DecorateInfo(unittest.expectedFailure, 'TestLazyOpInfo', 'test_dispatched_to_lazy'),
# test error disabled since rhs non-tensor python scalar is supported
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_errors'),
)),
BinaryUfuncInfo('mul',
aliases=('multiply',),
dtypes=all_types_and_complex_and(torch.chalf, torch.float16, torch.bfloat16, torch.bool),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16),
assert_autodiffed=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_two_python_scalars=True,
error_inputs_sparse_func=error_inputs_sparse_mul,
sample_inputs_sparse_coo_func=partial(sample_inputs_sparse_mul, layout=torch.sparse_coo),
sample_inputs_sparse_csr_func=partial(sample_inputs_sparse_mul, layout=torch.sparse_csr),
sample_inputs_sparse_csc_func=partial(sample_inputs_sparse_mul, layout=torch.sparse_csc),
sample_inputs_sparse_bsr_func=partial(sample_inputs_sparse_mul, layout=torch.sparse_bsr),
sample_inputs_sparse_bsc_func=partial(sample_inputs_sparse_mul, layout=torch.sparse_bsc)),
BinaryUfuncInfo('sub',
# NumPy has no builtin reference for the alpha kwarg, but it is easy enough to emulate
ref=lambda input, other, *, alpha=1: np.subtract(input, np.multiply(alpha, other)),
aliases=('subtract',),
dtypes=all_types_and_complex_and(torch.bfloat16, torch.float16, torch.chalf),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16),
assert_autodiffed=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
sample_inputs_func=sample_inputs_add_sub,
supports_two_python_scalars=True,
decorators=(
DecorateInfo(
toleranceOverride({torch.float16: tol(atol=1e-2, rtol=0),
torch.bfloat16: tol(atol=1e-5, rtol=5e-3),
torch.complex32: tol(atol=1e-5, rtol=1e-3)}),
'TestBinaryUfuncs', 'test_reference_numerics'),
DecorateInfo(
toleranceOverride({torch.chalf: tol(atol=1e-2, rtol=0)}),
'TestCommon', 'test_complex_half_reference_testing', device_type='cpu'),
DecorateInfo(
toleranceOverride({torch.chalf: tol(atol=5e-3, rtol=0)}),
'TestDecomp', 'test_comprehensive', device_type='cpu'),
DecorateInfo(
toleranceOverride({torch.chalf: tol(atol=5e-3, rtol=0)}),
'TestDecomp', 'test_quick', device_type='cpu'),
),
skips=(
DecorateInfo(unittest.skip("Skipped!"),
'TestBinaryUfuncs',
'test_reference_numerics',
dtypes=(torch.uint8,)),
DecorateInfo(unittest.skip("Skipped!"),
'TestBinaryUfuncs',
'test_reference_numerics_small_values',
dtypes=(torch.uint8,)),
)),
OpInfo('addmm',
# This addmm OpInfo is for when alpha and beta are not both equal to 1.
# alpha=beta=1 is tested in the following opinfo, because that special case will
# trigger addmm being decomposed by a jit pass.
dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16),
dtypesIfROCM=floating_and_complex_types_and(torch.float16, torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.bfloat16),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16),
assert_autodiffed=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
sample_inputs_func=sample_inputs_addmm,
skips=(
# Issue with conj and torch dispatch, see https://github.com/pytorch/pytorch/issues/82479
DecorateInfo(
unittest.skip("Skipped!"),
'TestSchemaCheckModeOpInfo',
'test_schema_correctness',
dtypes=(torch.complex64, torch.complex128)),
DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-3, rtol=2e-3)}),
"TestConsistency", "test_output_grad_match", device_type="mps"),
)),
OpInfo('addmm',
# When alpha=beta=1 as compile-time constants, JIT will decompose addmm into mm and add.
variant_test_name='decomposed',
dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.bfloat16),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16),
assert_autodiffed=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
autodiff_nonfusible_nodes=['aten::add', 'aten::mm'],
sample_inputs_func=partial(sample_inputs_addmm, alpha=1, beta=1),
skips=(
# Issue with conj and torch dispatch, see https://github.com/pytorch/pytorch/issues/82479
DecorateInfo(
unittest.skip("Skipped!"),
'TestSchemaCheckModeOpInfo',
'test_schema_correctness',
dtypes=(torch.complex64, torch.complex128)),
# https://github.com/pytorch/pytorch/issues/71784
DecorateInfo(unittest.skip('Skipped!'), 'TestNNCOpInfo', 'test_nnc_correctness',
device_type='cpu', dtypes=(torch.float16,)),
)),
OpInfo('addmv',
dtypes=all_types_and_complex_and(torch.bfloat16, torch.float16),
dtypesIfCUDA=floating_types_and(torch.float16, torch.complex64, torch.complex128,
torch.bfloat16),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
decorators=[
DecorateInfo(
toleranceOverride({torch.half: tol(atol=1e-5, rtol=3e-3)}),
'TestInductorOpInfo', 'test_comprehensive', device_type='cpu'),
DecorateInfo(toleranceOverride({torch.float32: tol(atol=2e-5, rtol=3e-6)}),
"TestConsistency", "test_output_match", device_type="mps"),
DecorateInfo(toleranceOverride({torch.float32: tol(atol=2e-5, rtol=3e-6)}),
"TestConsistency", "test_output_grad_match", device_type="mps"),
],
sample_inputs_func=sample_inputs_addmv),
OpInfo('addbmm',
ref=lambda M, batch1, batch2, beta=1, alpha=1: np.add(np.multiply(np.asarray(beta, dtype=M.dtype), M),
np.multiply(np.asarray(alpha, dtype=batch1.dtype),
np.sum(np.matmul(batch1, batch2), axis=0))),
dtypes=all_types_and_complex_and(torch.bfloat16, torch.float16),
dtypesIfCUDA=floating_and_complex_types_and(torch.float16,
*[torch.bfloat16]
if SM53OrLater or TEST_WITH_ROCM else []),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16),
# Runs very slowly on slow gradcheck - alternatively reduce input sizes
gradcheck_fast_mode=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
decorators=[
DecorateInfo(
toleranceOverride({torch.float32: tol(atol=1.3e-05, rtol=1.3e-05),
torch.complex64: tol(atol=1e-05, rtol=1.2e-03)}),
'TestCommon', 'test_numpy_refs'),
# MPS has slightly worse precision. Is this acceptable?
DecorateInfo(
toleranceOverride({torch.float32: tol(atol=1.3e-04, rtol=1.3e-04),
torch.complex64: tol(atol=1e-05, rtol=1.2e-03)}),
'TestCommon', 'test_numpy_ref_mps'),
DecorateInfo(
toleranceOverride({torch.float32: tol(atol=1e-5, rtol=1e-5),
torch.bfloat16: tol(atol=2e-1, rtol=6e-1)}),
'TestConsistency',
'test_output_match',
),
DecorateInfo(
toleranceOverride({torch.float32: tol(atol=1.5e-05, rtol=1e-05)}),
'TestCommon', 'test_out'),
DecorateInfo(
toleranceOverride({torch.half: tol(atol=6e-3, rtol=1e-2)}),
'TestInductorOpInfo', 'test_comprehensive', device_type='cpu'),
],
skips=(
# NVIDIA only assures that bfloat16 is supported by bmm if SM >= 5.3
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_dtypes', device_type='cuda', active_if=not SM53OrLater),
# addbmm does not correctly warn when resizing out= inputs
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'),
# https://github.com/pytorch/pytorch/issues/55907
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager'),
),
sample_inputs_func=sample_inputs_addbmm),
OpInfo('baddbmm',
dtypes=all_types_and_complex_and(torch.bfloat16, torch.float16),
dtypesIfCUDA=floating_types_and(torch.float16, torch.complex64, torch.complex128,
torch.bfloat16),
backward_dtypesIfCUDA=floating_types_and(torch.float16,
*[torch.bfloat16] if SM53OrLater or TEST_WITH_ROCM else [],
torch.complex64, torch.complex128),
# Runs very slowly on slow gradcheck - alternatively reduce input sizes
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16),
gradcheck_fast_mode=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
decorators=[
DecorateInfo(
toleranceOverride({torch.complex64: tol(atol=1e-05, rtol=1.2e-03)}),
'TestCommon', 'test_variant_consistency_eager', device_type='cuda'),
# Higher differences starting with Zen3 or Alder Lake
DecorateInfo(
toleranceOverride({torch.complex64: tol(atol=4e-05, rtol=4e-06)}),
'TestDecomp', 'test_quick', device_type='cpu'),
DecorateInfo(
toleranceOverride({torch.complex64: tol(atol=1e-05, rtol=1.2e-03)}),
'TestMathBits', 'test_conj_view', device_type='cuda'),
],
sample_inputs_func=sample_inputs_baddbmm,
skips=(
# Issue with conj and torch dispatch, see https://github.com/pytorch/pytorch/issues/82479
DecorateInfo(
unittest.skip("Skipped!"),
'TestSchemaCheckModeOpInfo',
'test_schema_correctness',
dtypes=(torch.complex64, torch.complex128)),
)),
OpInfo('dot',
dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.bfloat16),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16),
assert_autodiffed=True,
sample_inputs_func=sample_inputs_dot_vdot,
error_inputs_func=error_inputs_dot_vdot,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
skips=(
# Issue with conj and torch dispatch, see https://github.com/pytorch/pytorch/issues/82479
DecorateInfo(
unittest.skip("Skipped!"),
'TestSchemaCheckModeOpInfo',
'test_schema_correctness',
dtypes=(torch.complex64, torch.complex128)),
)),
OpInfo('vdot',
dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.bfloat16),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16),
sample_inputs_func=sample_inputs_dot_vdot,
error_inputs_func=error_inputs_dot_vdot,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
skips=(
# Issue with conj and torch dispatch, see https://github.com/pytorch/pytorch/issues/82479
DecorateInfo(
unittest.skip("Skipped!"),
'TestSchemaCheckModeOpInfo',
'test_schema_correctness',
dtypes=(torch.complex64, torch.complex128)),
)),
OpInfo('bmm',
dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.float16,
*[torch.bfloat16]
if SM53OrLater or TEST_WITH_ROCM else []),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16),
assert_autodiffed=True,
assert_jit_shape_analysis=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
skips=(
# NVIDIA only assures that bfloat16 is supported by bmm if SM >= 5.3
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_dtypes', device_type='cuda', active_if=not SM53OrLater),
DecorateInfo(toleranceOverride({torch.float32: tol(atol=1e-5, rtol=1e-5)}),
"TestCommon", "test_out"),
# Fast math on MacOS-13?
DecorateInfo(
toleranceOverride({torch.float32: tol(atol=2e-5, rtol=5e-6)}),
'TestConsistency',
'test_output_match',
active_if=lambda _: MACOS_VERSION < 14.0,
device_type='mps',
dtypes=(torch.float32,)),
),
sample_inputs_func=sample_inputs_bmm),
OpInfo('mv',
dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.bfloat16),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16),
assert_autodiffed=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
sample_inputs_func=sample_inputs_mv),
OpInfo('addr',
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
# Reference: https://github.com/pytorch/pytorch/issues/50747
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
skips=(
# Reference: https://github.com/pytorch/pytorch/issues/50747
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager',
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16)),
),
sample_inputs_func=sample_inputs_addr,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL),
OpInfo('addcmul',
dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16),
assert_autodiffed=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
skips=(
# TODO: update sample inputs with for_inplace_variant kwarg to support this test
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager'),
),
sample_inputs_func=sample_inputs_addcmul_addcdiv,
reference_inputs_func=partial(
reference_inputs_elementwise_ternary, sample_inputs_func=reference_inputs_addcmul_addcdiv)),
OpInfo('addcdiv',
dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
skips=(
# TODO: update sample inputs with for_inplace_variant kwarg to support this test
DecorateInfo(unittest.expectedFailure,
'TestCommon',
'test_variant_consistency_eager'),
),
sample_inputs_func=sample_inputs_addcmul_addcdiv,
reference_inputs_func=partial(
reference_inputs_elementwise_ternary, sample_inputs_func=reference_inputs_addcmul_addcdiv)),
UnaryUfuncInfo('asin',
aliases=('arcsin', ),
ref=np.arcsin,
domain=(-1, 1),
supports_sparse=True,
supports_sparse_csr=True,
supports_sparse_csc=True,
supports_sparse_bsr=True,
supports_sparse_bsc=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
promotes_int_to_float=True,
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.chalf, torch.bool, torch.half, torch.bfloat16),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16),
assert_autodiffed=True,
decorators=[
DecorateInfo(
toleranceOverride({torch.float16: tol(atol=1e-05, rtol=1e-03)}),
'TestUnaryUfuncs', device_type='cuda'
),
DecorateInfo(
toleranceOverride({torch.float32: tol(atol=8e-5, rtol=4e-5)}),
'TestInductorOpInfo', 'test_comprehensive', device_type='cuda'
),
DecorateInfo(
toleranceOverride({torch.complex64: tol(atol=5e-05, rtol=2e-05)}),
'TestUnaryUfuncs', 'test_reference_numerics_extremal', device_type='cpu'
),
precisionOverride({torch.bfloat16: 1e-2}),
],
skips=(
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cuda', dtypes=[torch.cdouble],
active_if=IS_WINDOWS),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large',
device_type='cuda', dtypes=[torch.cdouble],
active_if=IS_WINDOWS),
DecorateInfo(unittest.skip("Skipped! sparse backward not supported"),
'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'),
)),
# NOTE: derivative for inplace asinh is not implemented
UnaryUfuncInfo('asinh',
aliases=('arcsinh', ),
ref=np.arcsinh,
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.chalf, torch.bool, torch.half, torch.bfloat16),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16),
decorators=(precisionOverride({torch.bfloat16: 5e-2}),),
supports_inplace_autograd=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_sparse=True,
supports_sparse_csr=True,
supports_sparse_csc=True,
supports_sparse_bsr=True,
supports_sparse_bsc=True,
promotes_int_to_float=True,
skips=(
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_small',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_normal',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cuda', dtypes=[torch.cdouble],
active_if=IS_WINDOWS),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large',
device_type='cuda', dtypes=[torch.cdouble],
active_if=IS_WINDOWS),
DecorateInfo(unittest.skip("Skipped! sparse backward not supported"),
'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'),
)),
UnaryUfuncInfo('atan',
aliases=('arctan', ),
ref=np.arctan,
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.chalf, torch.bool, torch.half, torch.bfloat16),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16),
assert_autodiffed=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_sparse=True,
supports_sparse_csr=True,
supports_sparse_csc=True,
supports_sparse_bsr=True,
supports_sparse_bsc=True,
promotes_int_to_float=True,
decorators=(precisionOverride({torch.bfloat16: 1e-2}),),
skips=(
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
dtypes=[torch.cfloat, torch.cdouble], active_if=(IS_MACOS or IS_WINDOWS)),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large',
dtypes=[torch.cfloat, torch.cdouble], active_if=(IS_MACOS or IS_WINDOWS)),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_small',
dtypes=[torch.cfloat, torch.cdouble], active_if=(IS_MACOS or IS_WINDOWS)),
DecorateInfo(unittest.skip("Skipped! sparse backward not supported"),
'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'),
)),
BinaryUfuncInfo('atan2',
aliases=('arctan2',),
dtypes=all_types_and(torch.bool, torch.bfloat16, torch.half),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
promotes_int_to_float=True,
supports_rhs_python_scalar=False,
skips=(
# Incorrectly attempts to use a scalar for the second argument
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_jit_alias_remapping'),
)),
UnaryUfuncInfo('atanh',
aliases=('arctanh', ),
ref=np.arctanh,
domain=(-1, 1),
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.chalf, torch.bool, torch.half, torch.bfloat16),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16),
decorators=[
precisionOverride({torch.bfloat16: 1e-2}),
DecorateInfo(
toleranceOverride({torch.float32: tol(atol=9e-3, rtol=8e-5)}),
"TestInductorOpInfo",
"test_comprehensive",
device_type="cuda"
),
DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-3, rtol=2e-3)}),
"TestConsistency", "test_output_grad_match", device_type="mps"),
],
supports_inplace_autograd=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_sparse=True,
supports_sparse_csr=True,
supports_sparse_csc=True,
supports_sparse_bsr=True,
supports_sparse_bsc=True,
promotes_int_to_float=True,
skips=(
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_small',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cuda', dtypes=[torch.cfloat, torch.cdouble],
active_if=IS_WINDOWS),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large',
device_type='cuda', dtypes=[torch.cfloat],
active_if=IS_WINDOWS),
DecorateInfo(unittest.skip("Skipped! sparse backward not supported"),
'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'),
)),
OpInfo('allclose',
dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16),
ref=np.allclose,
supports_autograd=False,
supports_forward_ad=False,
sample_inputs_func=sample_inputs_allclose,
skips=(
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
DecorateInfo(unittest.skip("Skipped!"), 'TestNNCOpInfo', 'test_nnc_correctness'),
DecorateInfo(unittest.skip("Skipped!"), 'TestCudaFuserOpInfo'),
),
supports_out=False),
OpInfo('broadcast_to',
ref=np.broadcast_to,
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# See https://github.com/pytorch/pytorch/pull/78358
check_batched_forward_grad=False,
sample_inputs_func=sample_inputs_broadcast_to),
OpInfo('broadcast_shapes',
op=torch.broadcast_shapes,
ref=np.broadcast_shapes if np.lib.NumpyVersion(np.__version__) >= '1.20.0' else None,
dtypes=_dispatch_dtypes((torch.float32,)),
supports_out=False,
supports_gradgrad=False,
assert_autodiffed=False,
supports_autograd=False,
supports_scripting=False,
sample_inputs_func=sample_inputs_broadcast_shapes,
skips=(
# https://github.com/pytorch/pytorch/issues/64997
DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),
# skip dtype tests since broadcast_shape is not device dependent.
# having dtypes limited to torch.float32 would cause test_dtypes to report unexpected success
DecorateInfo(unittest.skip('Skipped!'), 'TestCommon', 'test_dtypes'),
# skip these tests since we have non tensor input
DecorateInfo(unittest.skip('Skipped!'), "TestCommon", "test_noncontiguous_samples"),
DecorateInfo(unittest.skip('Skipped!'), 'TestCommon', 'test_variant_consistency_eager'),
DecorateInfo(unittest.skip('Skipped!'), 'TestJit', 'test_variant_consistency_jit'),
)),
OpInfo('broadcast_tensors',
ref=np.broadcast_arrays,
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16),
sample_inputs_func=sample_inputs_broadcast_tensors,
reference_inputs_func=reference_inputs_broadcast_tensors,
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# See https://github.com/pytorch/pytorch/pull/78358
check_batched_forward_grad=False,
skips=(
# https://github.com/pytorch/pytorch/issues/64997
DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),
# JIT does not support variadic tensors.
# RuntimeError: input->type()->kind() == TypeKind::OptionalType
# INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":252,
# please report a bug to PyTorch.
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=[torch.float32]),
)),
OpInfo('block_diag',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf),
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# Default batching rule in core doesn't work for ops with TensorList args
check_batched_forward_grad=False,
skips=(
# https://github.com/pytorch/pytorch/issues/64997
DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),
# JIT does not support variadic tensors.
# RuntimeError: input->type()->kind() == TypeKind::OptionalType
# INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":252,
# please report a bug to PyTorch.
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=[torch.float32]),
),
sample_inputs_func=sample_inputs_block_diag),
UnaryUfuncInfo('bitwise_not',
ref=np.bitwise_not,
dtypes=integral_types_and(torch.bool),
dtypesIfHpu=custom_types(torch.bool),
operator_variant=operator.invert,
supports_autograd=False),
BinaryUfuncInfo('bitwise_left_shift',
op=torch.bitwise_left_shift,
dtypes=integral_types(),
dtypesIfCUDA=integral_types(),
dtypesIfHpu=custom_types(torch.int32, torch.int8, torch.bool),
operator_variant=operator.lshift,
inplace_operator_variant=operator.ilshift,
supports_autograd=False,
supports_one_python_scalar=True,
rhs_make_tensor_kwargs=dict(low=0),
skips=(
DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_type_promotion'),
# https://github.com/pytorch/pytorch/issues/70904
DecorateInfo(unittest.skip("Some inputs produce undefined outputs"), 'TestCommon', 'test_compare_cpu'),
)),
BinaryUfuncInfo('bitwise_right_shift',
op=torch.bitwise_right_shift,
dtypes=integral_types(),
dtypesIfCUDA=integral_types(),
dtypesIfHpu=custom_types(torch.int32, torch.int8, torch.bool),
operator_variant=operator.rshift,
inplace_operator_variant=operator.irshift,
supports_autograd=False,
supports_one_python_scalar=True,
rhs_make_tensor_kwargs=dict(low=0),
skips=(
DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_type_promotion'),
# https://github.com/pytorch/pytorch/issues/70904
DecorateInfo(unittest.skip("Some inputs produce undefined outputs"), 'TestCommon', 'test_compare_cpu'),
)),
OpInfo('combinations',
op=torch.combinations,
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# See https://github.com/pytorch/pytorch/pull/78358
check_batched_forward_grad=False,
supports_out=False,
sample_inputs_func=sample_inputs_combinations),
OpInfo('cartesian_prod',
op=torch.cartesian_prod,
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# See https://github.com/pytorch/pytorch/pull/78358
check_batched_forward_grad=False,
sample_inputs_func=sample_inputs_cartesian_prod,
skips=(
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'),
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'),
DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),
# RuntimeError: input->type()->kind() == TypeKind::OptionalType
# INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":270
DecorateInfo(unittest.expectedFailure,
'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)),
)),
OpInfo('cdist',
dtypes=floating_types(),
supports_out=False,
supports_gradgrad=False,
assert_autodiffed=False,
sample_inputs_func=sample_inputs_cdist),
UnaryUfuncInfo('ceil',
ref=np.ceil,
dtypes=all_types_and(torch.half, torch.bfloat16),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16, torch.int32, torch.int8),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
skips=(
DecorateInfo(unittest.expectedFailure,
'TestNNCOpInfo',
'test_nnc_correctness',
dtypes=tuple(t for t in integral_types() if t != torch.uint8)),
),
supports_sparse=True,
supports_sparse_csr=True,
supports_sparse_csc=True,
supports_sparse_bsr=True,
supports_sparse_bsc=True,
assert_autodiffed=True),
OpInfo('cholesky',
dtypes=floating_and_complex_types(),
sample_inputs_func=sample_inputs_linalg_cholesky,
gradcheck_wrapper=gradcheck_wrapper_hermitian_input,
decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack],),
OpInfo('cholesky_inverse',
dtypes=floating_and_complex_types(),
backward_dtypes=floating_and_complex_types(),
# https://github.com/pytorch/pytorch/issues/80411
gradcheck_fast_mode=True,
supports_fwgrad_bwgrad=True,
supports_forward_ad=True,
check_batched_gradgrad=True,
sample_inputs_func=sample_inputs_linalg_cholesky_inverse,
gradcheck_wrapper=gradcheck_wrapper_triangular_input_real_positive_diagonal,
decorators=[
skipCUDAIfNoMagma,
skipCPUIfNoLapack,
DecorateInfo(
toleranceOverride({
torch.float32: tol(atol=5e-03, rtol=1e-04)
}),
'TestCommon', device_type='cpu',
),
DecorateInfo(
toleranceOverride({
torch.float32: tol(atol=5e-03, rtol=1e-04)
}),
'TestEagerFusionOpInfo', device_type='cpu',
),
],
skips=(
# Strides are not the same! Original strides were ((4, 2, 1),) and strides are now ((4, 1, 2),)
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'),),
),
OpInfo('cholesky_solve',
op=torch.cholesky_solve,
dtypes=floating_and_complex_types(),
sample_inputs_func=sample_inputs_cholesky_solve,
check_batched_gradgrad=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
gradcheck_wrapper=lambda *args, **kwargs: gradcheck_wrapper_triangular_input(*args, idx=1, **kwargs),
decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack]),
OpInfo('chunk',
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16, torch.chalf),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16, torch.int32),
sample_inputs_func=sample_inputs_chunk,
reference_inputs_func=reference_inputs_chunk,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_out=False),
OpInfo('unsafe_chunk',
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16, torch.chalf),
sample_inputs_func=sample_inputs_chunk,
check_batched_forward_grad=False,
reference_inputs_func=reference_inputs_chunk,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_out=False),
OpInfo('clone',
ref=np.copy,
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16, torch.chalf),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16, torch.int32, torch.int8, torch.bool),
sample_inputs_func=sample_inputs_clone_contiguous,
reference_inputs_func=reference_inputs_clone_contiguous,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_out=False,
skips=(
# TypeError: _copy_dispatcher() got an unexpected keyword argument 'memory_format'
# (NumPy reference needs to be extended with memory_format)
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_numpy_ref'),
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_numpy_ref_mps'),
),),
OpInfo('contiguous',
op=lambda x, *args, **kwargs: x.contiguous(*args, **kwargs),
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16, torch.chalf),
sample_inputs_func=sample_inputs_clone_contiguous,
reference_inputs_func=reference_inputs_clone_contiguous,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
autodiff_fusible_nodes=['aten::contiguous'],
assert_jit_shape_analysis=True,
supports_out=False,
skips=(
DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),
)),
OpInfo('sum_to_size',
op=lambda x, *args, **kwargs: x.sum_to_size(*args, **kwargs),
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
sample_inputs_func=sample_inputs_sum_to_size,
error_inputs_func=error_inputs_sum_to_size,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_out=False,
skips=(
# lambda impl
DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"),
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float,)),
)),
OpInfo('clamp',
aliases=('clip',),
ref=_clamp_numpy,
dtypes=all_types_and(torch.bfloat16, torch.half),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16, torch.int32, torch.int8, torch.bool),
sample_inputs_func=sample_inputs_clamp,
reference_inputs_func=partial(reference_inputs_elementwise_ternary, sample_inputs_func=sample_inputs_clamp),
assert_autodiffed=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
skips=(
# NNC appear to not handle boolean clamp
DecorateInfo(unittest.expectedFailure,
'TestNNCOpInfo',
'test_nnc_correctness',
dtypes=(torch.bool,)),
# MPS does not support float64, while numpy does internal computations in float64.
# See https://github.com/pytorch/pytorch/blob/3c1cf03fde145bdbe1f5ffb81765d076c10b4c04/test/test_ops.py#L260-L264
DecorateInfo(unittest.expectedFailure,
'TestCommon',
'test_numpy_ref_mps'),
)),
UnaryUfuncInfo('positive',
ref=np.positive,
dtypes=all_types_and_complex_and(torch.half, torch.bfloat16, torch.chalf),
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_sparse=True,
supports_sparse_csr=True,
supports_sparse_csc=True,
supports_sparse_bsr=True,
supports_sparse_bsc=True,
),
UnaryUfuncInfo('conj',
ref=np.conj,
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16,
torch.half, torch.chalf),
dtypesIfHpu=custom_types(torch.float32, torch.int32),
supports_sparse=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# See https://github.com/pytorch/pytorch/pull/78358
check_batched_forward_grad=False,
supports_out=False),
UnaryUfuncInfo('conj_physical',
decomp_aten_name='_conj_physical',
ref=np.conj,
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16,
torch.half, torch.chalf),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_sparse=True,
supports_sparse_csr=True,
supports_sparse_csc=True,
supports_sparse_bsr=True,
supports_sparse_bsc=True,
skips=(
# RuntimeError: inputSet && outputSet
# INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":118,
# please report a bug to PyTorch.
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32, )),
DecorateInfo(unittest.skip("Skipped! conj_physical_ not implemented for sparse"),
'TestSparseUnaryUfuncs', 'test_inplace'),
)),
OpInfo('resolve_conj',
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16),
sample_inputs_func=sample_inputs_view_as_real,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_out=False,
),
OpInfo('resolve_neg',
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16),
sample_inputs_func=sample_inputs_view_as_real,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_out=False,
),
OpInfo('view_as_real',
dtypes=complex_types(),
supports_forward_ad=True,
supports_out=False,
supports_fwgrad_bwgrad=True,
sample_inputs_func=sample_inputs_view_as_real,
test_conjugated_samples=False,
),
OpInfo('view_as_complex',
dtypes=floating_types_and(torch.half),
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
test_neg_view=False,
sample_inputs_func=sample_inputs_view_as_complex,
skips=(
# RuntimeError: Tensor must have a last dimension with stride 1
DecorateInfo(unittest.expectedFailure, "TestCommon", "test_noncontiguous_samples"),
# RuntimeError: "eq_cpu" not implemented for 'ComplexHalf'
DecorateInfo(unittest.skip("Skipped!"), 'TestNNCOpInfo', 'test_nnc_correctness', dtypes=(torch.half,)),
# RuntimeError: view size is not compatible with input tensor's size and stride
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace_all_strides"),
)),
BinaryUfuncInfo('complex',
dtypes=floating_types_and(torch.half),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_rhs_python_scalar=False,
error_inputs_func=error_inputs_complex,
skips=(
# Tests don't account for complex's type promotion semantics
DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_type_promotion'),
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out', device_type='mps'),
DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_binary_ufuncs_mixed_dtype'),)),
BinaryUfuncInfo('copysign',
sample_inputs_func=sample_inputs_copysign,
dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16),
promotes_int_to_float=True,
# https://github.com/pytorch/pytorch/issues/80411
gradcheck_fast_mode=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True),
OpInfo('corrcoef',
dtypes=all_types_and_complex_and(torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_corrcoef,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# See https://github.com/pytorch/pytorch/pull/78358
check_batched_forward_grad=False,
skips=(
# Issue with conj and torch dispatch, see https://github.com/pytorch/pytorch/issues/82479
DecorateInfo(
unittest.skip("Skipped!"),
'TestSchemaCheckModeOpInfo',
'test_schema_correctness',
dtypes=(torch.complex64, torch.complex128)),
),
supports_out=False),
UnaryUfuncInfo('cos',
ref=np.cos,
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.chalf, torch.bool, torch.half, torch.bfloat16),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16),
assert_autodiffed=True,
handles_large_floats=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
promotes_int_to_float=True,
decorators=(precisionOverride({torch.bfloat16: 1e-2}),),
skips=(
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large',
dtypes=(torch.cfloat, torch.cdouble,), device_type='cpu', active_if=IS_WINDOWS),
# This fails on CUDA but passes on ROCm
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large',
dtypes=(torch.cdouble,), device_type='cuda'),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
dtypes=[torch.cfloat, torch.cdouble], active_if=IS_WINDOWS),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cpu',
dtypes=[torch.cfloat, torch.cdouble], active_if=IS_MACOS),
# AssertionError: Tensor-likes are not close!
# Greatest absolute difference: nan at index (700,) (up to 1e-05 allowed)
# Greatest relative difference: nan at index (700,) (up to 0.001 allowed)
DecorateInfo(unittest.expectedFailure, 'TestUnaryUfuncs', 'test_reference_numerics_large',
device_type='cuda',
dtypes=(torch.chalf,), active_if=IS_WINDOWS),
)),
UnaryUfuncInfo('cosh',
ref=np_unary_ufunc_integer_promotion_wrapper(np.cosh),
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.chalf, torch.bool, torch.half, torch.bfloat16),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16),
assert_autodiffed=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
promotes_int_to_float=True,
skips=(
# Reference: https://github.com/pytorch/pytorch/issues/48641
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large',
device_type='cpu', dtypes=[torch.int8]),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large',
dtypes=[torch.cdouble]),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
dtypes=[torch.cfloat, torch.cdouble], active_if=IS_WINDOWS),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large',
dtypes=[torch.cfloat, torch.cdouble], active_if=IS_WINDOWS),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cpu',
dtypes=[torch.cfloat, torch.cdouble], active_if=IS_MACOS),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large',
device_type='cpu',
dtypes=[torch.cfloat, torch.cdouble], active_if=IS_MACOS),
# AssertionError: Tensor-likes are not close!
# Greatest absolute difference: nan at index (6000,) (up to 1e-05 allowed)
# Greatest relative difference: nan at index (6000,) (up to 0.001 allowed)
DecorateInfo(unittest.expectedFailure, 'TestUnaryUfuncs', 'test_reference_numerics_large',
device_type='cuda',
dtypes=(torch.chalf,), active_if=IS_WINDOWS),
)),
OpInfo('cov',
dtypes=all_types_and_complex_and(torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_cov,
error_inputs_func=error_inputs_cov,
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
skips=(
# Issue with conj and torch dispatch, see https://github.com/pytorch/pytorch/issues/82479
DecorateInfo(
unittest.skip("Skipped!"),
'TestSchemaCheckModeOpInfo',
'test_schema_correctness',
dtypes=(torch.complex64, torch.complex128)),
# Float did not match double
DecorateInfo(unittest.expectedFailure, 'TestBwdGradients', 'test_fn_grad'),
# Jacobian mismatch
DecorateInfo(unittest.expectedFailure, 'TestBwdGradients', 'test_fn_gradgrad'),
DecorateInfo(unittest.expectedFailure, 'TestFwdGradients', 'test_forward_mode_AD'),
DecorateInfo(unittest.skip("Barely fails"), 'TestFwdGradients', 'test_fn_fwgrad_bwgrad'),
# JIT test not working for tensor kwargs (https://github.com/pytorch/pytorch/issues/58507)
# RuntimeError:
# undefined value tensor:
# File "<string>", line 3
# def the_method(i0):
# return torch.cov(i0, correction=0, fweights=None, aweights=tensor([0.0518, 0.4681], dtype=torch.float32, requires_grad=True)) # noqa: B950
# ~~~~~~ <--- HERE
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
DecorateInfo(toleranceOverride({torch.float16: tol(atol=8e-3, rtol=1.4e-3)}),
"TestInductorOpInfo", "test_comprehensive", device_type="cpu"),
DecorateInfo(toleranceOverride({torch.float32: tol(atol=3e-4, rtol=1e-4)}),
"TestConsistency", "test_output_grad_match", device_type="mps"),
)),
OpInfo('cross',
dtypes=all_types_and_complex_and(torch.half, torch.bfloat16),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16, torch.int32),
sample_inputs_func=sample_inputs_cross,
supports_fwgrad_bwgrad=True,
supports_out=True,
supports_forward_ad=True),
OpInfo('cumsum',
dtypes=all_types_and_complex_and(torch.half, torch.bfloat16),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16, torch.int32),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
skips=(
# cumsum does not handle correctly out= dtypes
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'),
),
sample_inputs_func=sample_inputs_cumulative_ops),
OpInfo('cumprod',
dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
skips=(
# cumprod does not handle correctly out= dtypes
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'),
),
# gradgradcheck fails in fast_mode=True: #56275
sample_inputs_func=sample_inputs_cumprod,
gradcheck_fast_mode=False),
OpInfo('cummax',
dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16),
sample_inputs_func=partial(sample_inputs_cumulative_ops, supports_dtype_kwargs=False),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
skips=(
),
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL),
OpInfo('cummin',
dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16),
sample_inputs_func=partial(sample_inputs_cumulative_ops, supports_dtype_kwargs=False),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
skips=(
),
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL),
UnaryUfuncInfo('deg2rad',
ref=np.radians,
decorators=(precisionOverride({torch.bfloat16: 7e-1,
torch.float16: 7e-1}),),
dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_sparse=True,
supports_sparse_csr=True,
supports_sparse_csc=True,
supports_sparse_bsr=True,
supports_sparse_bsc=True,
promotes_int_to_float=True),
OpInfo('diff',
op=torch.diff,
# np.diff has np._NoValue as default values for prepend and append, compare_with_reference breaks if prepend/append
# are set as None when converting to numpy
ref=lambda input, n=1, dim=-1, prepend=np._NoValue, append=np._NoValue: (
np.diff(input, n, dim, np._NoValue if prepend is None else prepend, np._NoValue if append is None else append)
),
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
# Runs very slowly on slow gradcheck - alternatively reduce input sizes
gradcheck_fast_mode=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
sample_inputs_func=sample_inputs_diff,
error_inputs_func=error_inputs_diff,
# See https://github.com/pytorch/pytorch/pull/78358
check_batched_forward_grad=False,
skips=(
)),
BinaryUfuncInfo('div',
aliases=('divide',),
variant_test_name='no_rounding_mode',
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16, torch.int32, torch.int8),
# Runs very slowly on slow gradcheck - alternatively reduce input sizes
gradcheck_fast_mode=True,
supports_forward_ad=True,
promotes_int_to_float=True,
supports_fwgrad_bwgrad=True,
supports_two_python_scalars=True,
assert_autodiffed=True,
rhs_make_tensor_kwargs=dict(exclude_zero=True),),
BinaryUfuncInfo('div',
aliases=('divide',),
variant_test_name='trunc_rounding',
dtypes=all_types_and(torch.half, torch.bfloat16),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16, torch.int32, torch.int8),
sample_kwargs=lambda device, dtype, input:
({"rounding_mode": "trunc"}, {"rounding_mode": "trunc"}),
# https://github.com/pytorch/pytorch/issues/80411
gradcheck_fast_mode=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_two_python_scalars=True,
assert_autodiffed=True,
rhs_make_tensor_kwargs=dict(exclude_zero=True),
decorators=(
# See https://github.com/pytorch/pytorch/issues/111126
DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_type_promotion'),
),
skips=(
# RuntimeError: MALFORMED INPUT: Unhandled node kind (in computeValue): aten::div
DecorateInfo(unittest.expectedFailure, 'TestNNCOpInfo', 'test_working'),
# FIXME:
# torch.autograd.gradcheck.GradcheckError: Jacobian mismatch for
# output 0 with respect to input 1,
# numerical:tensor(-17746.9307, dtype=torch.float64)
# analytical:tensor(0., dtype=torch.float64)
DecorateInfo(unittest.skip("Skipped!"), 'TestBwdGradients',
'test_fn_grad', device_type='cpu',
dtypes=(torch.float64,)),
)),
BinaryUfuncInfo('div',
aliases=('divide',),
variant_test_name='floor_rounding',
dtypes=all_types_and(torch.half, torch.bfloat16),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16, torch.int32, torch.int8),
sample_kwargs=lambda device, dtype, input:
({"rounding_mode": "floor"}, {"rounding_mode": "floor"}),
# https://github.com/pytorch/pytorch/issues/80411
gradcheck_fast_mode=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_two_python_scalars=True,
assert_autodiffed=True,
rhs_make_tensor_kwargs=dict(exclude_zero=True),
decorators=(
# See https://github.com/pytorch/pytorch/issues/111126
DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_type_promotion'),
),
skips=(
# RuntimeError: MALFORMED INPUT: Unhandled node kind (in computeValue): aten::div
DecorateInfo(unittest.expectedFailure, 'TestNNCOpInfo', 'test_working'),
# FIXME:
# torch.autograd.gradcheck.GradcheckError: Jacobian mismatch for
# output 0 with respect to input 1,
# numerical:tensor(-17746.9307, dtype=torch.float64)
# analytical:tensor(0., dtype=torch.float64)
DecorateInfo(unittest.skip("Skipped!"), 'TestBwdGradients',
'test_fn_grad',
dtypes=(torch.float64,),
device_type='cpu'),
DecorateInfo(unittest.skip("Broken on MacOS13"),
'TestConsistency',
'test_output_match',
device_type='mps',
dtypes=(torch.float16,),
active_if=lambda _: MACOS_VERSION < 14.0),
)),
BinaryUfuncInfo('true_divide',
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf),
supports_forward_ad=True,
promotes_int_to_float=True,
supports_fwgrad_bwgrad=True,
supports_two_python_scalars=True,
rhs_make_tensor_kwargs=dict(exclude_zero=True)),
OpInfo('equal',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16, torch.int32, torch.int8, torch.bool),
ref=lambda input, other: (input == other).all(),
sample_inputs_func=sample_inputs_equal,
supports_autograd=False,
supports_tracing=False,
skips=(
)),
UnaryUfuncInfo('exp',
ref=np_unary_ufunc_integer_promotion_wrapper(np.exp),
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16),
skips=(
# Reference: https://github.com/pytorch/pytorch/issues/48010
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble], active_if=IS_WINDOWS),
),
assert_autodiffed=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
promotes_int_to_float=True),
OpInfo('expand',
op=lambda self, shape: self.expand(shape),
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16, torch.int32),
sample_inputs_func=sample_inputs_expand,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
assert_jit_shape_analysis=True,
supports_out=False,
skips=(
DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),
)),
OpInfo('expand_as',
op=lambda self, other: self.expand_as(other),
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16, torch.int32, torch.int8, torch.bool),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
sample_inputs_func=sample_inputs_expand_as,
supports_out=False,
skips=(
DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),),
),
OpInfo('expand_copy',
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_expand,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
assert_jit_shape_analysis=True,
supports_out=True,
skips=(
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)),
)),
OpInfo('diag',
ref=np.diag,
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
dtypesIfCUDA=all_types_and_complex_and(torch.chalf, torch.bool, torch.half, torch.bfloat16),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
check_batched_forward_grad=False,
sample_inputs_func=sample_inputs_diag,
error_inputs_func=error_inputs_diag),
OpInfo('diag_embed',
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16, torch.chalf),
supports_out=False,
# Runs very slowly on slow gradcheck - alternatively reduce input sizes
gradcheck_fast_mode=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
sample_inputs_func=sample_inputs_diagonal_diag_embed,
reference_inputs_func=reference_inputs_diagonal_diag_embed,
error_inputs_func=error_inputs_diagonal_diag_embed),
OpInfo('diagonal',
aten_backward_name='diagonal_backward',
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16, torch.chalf),
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
sample_inputs_func=sample_inputs_diagonal_diag_embed,
reference_inputs_func=reference_inputs_diagonal_diag_embed,
error_inputs_func=error_inputs_diagonal_diag_embed),
OpInfo('diagonal_copy',
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16, torch.chalf),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
sample_inputs_func=sample_inputs_diagonal_diag_embed,
reference_inputs_func=reference_inputs_diagonal_diag_embed,
error_inputs_func=error_inputs_diagonal_diag_embed),
OpInfo('diagonal_scatter',
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
sample_inputs_func=sample_inputs_diagonal_scatter),
OpInfo('alias_copy',
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16, torch.chalf),
sample_inputs_func=sample_inputs_alias_copy,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_out=True),
BinaryUfuncInfo('eq',
ref=np.equal,
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16, torch.chalf),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16, torch.int32, torch.int8, torch.bool),
always_returns_bool=True,
supports_autograd=False,
sample_inputs_func=sample_inputs_comparison_ops,
skips=(
)),
BinaryUfuncInfo('fmax',
op=torch.fmax,
dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16, torch.int32, torch.int8, torch.bool),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_rhs_python_scalar=False,
skips=(
# RuntimeError: "max_elementwise_cuda" not implemented for 'ComplexFloat'
DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_type_promotion'),
)),
BinaryUfuncInfo('fmin',
op=torch.fmin,
dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16, torch.int32, torch.int8, torch.bool),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_rhs_python_scalar=False,
skips=(
# RuntimeError: "min_elementwise_cuda" not implemented for 'ComplexFloat'
DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_type_promotion'),
)),
BinaryUfuncInfo('fmod',
ref=np.fmod,
dtypes=all_types_and(torch.float16, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.float16, torch.bfloat16),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16, torch.int32),
# https://github.com/pytorch/pytorch/issues/80411
gradcheck_fast_mode=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
assert_autodiffed=None,
rhs_make_tensor_kwargs={'exclude_zero': True},
decorators=(
DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs',
'test_contig_vs_every_other',
dtypes=(torch.bfloat16,)),
DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs',
'test_non_contig',
dtypes=(torch.bfloat16,)),
DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs',
'test_reference_numerics',
dtypes=(torch.bfloat16,)),
DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs',
'test_reference_numerics_small_values',
dtypes=(torch.uint8,)),
# FIXME:
# torch.autograd.gradcheck.GradcheckError: Jacobian mismatch for
# output 0 with respect to input 1,
# numerical:tensor(101.6283, dtype=torch.float64)
# analytical:tensor(-18.3575, dtype=torch.float64)
DecorateInfo(unittest.skip("Skipped!"), 'TestBwdGradients',
'test_fn_grad',
dtypes=(torch.float64,),
device_type='cpu'),
)),
BinaryUfuncInfo('remainder',
ref=np.remainder,
dtypes=all_types_and(torch.float16, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.float16, torch.bfloat16),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16, torch.int32, torch.int8, torch.bool),
# https://github.com/pytorch/pytorch/issues/80411
gradcheck_fast_mode=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
assert_autodiffed=None,
operator_variant=operator.mod,
inplace_operator_variant=operator.imod,
supports_one_python_scalar=True,
rhs_make_tensor_kwargs={'exclude_zero': True},
decorators=(
DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs',
'test_contig_vs_every_other',
dtypes=(torch.bfloat16,)),
DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs',
'test_non_contig',
dtypes=(torch.bfloat16,)),
DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs',
'test_reference_numerics',
dtypes=(torch.bfloat16,)),
DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs',
'test_reference_numerics_small_values',
dtypes=(torch.uint8,)),
DecorateInfo(unittest.skip("Skipped!"), 'TestNNCOpInfo',
'test_nnc_correctness',
dtypes=(torch.bfloat16,)),
# Fails on XLA
# False is not true : Tensors failed to compare as equal!
# Attempted to compare equality of tensors with different dtypes
DecorateInfo(unittest.skip("Skipped!"), 'TestOpInfo', device_type='xla', dtypes=(torch.long,)),
# FIXME:
# torch.autograd.gradcheck.GradcheckError: Jacobian mismatch for
# output 0 with respect to input 1,
# numerical:tensor(102.4676, dtype=torch.float64)
# analytical:tensor(-17.5182, dtype=torch.float64)
DecorateInfo(unittest.skip("Skipped!"), 'TestBwdGradients',
'test_fn_grad', device_type='cpu',
dtypes=(torch.float64,)),
DecorateInfo(
toleranceOverride({
torch.float16: tol(atol=5e-4, rtol=3e-3),
}),
"TestInductorOpInfo",
"test_comprehensive",
device_type="cuda"
),
DecorateInfo(unittest.skip("Broken on MacOS13"),
'TestConsistency',
'test_output_match',
device_type='mps',
dtypes=(torch.float16,),
active_if=lambda _: MACOS_VERSION < 14.0),
)),
UnaryUfuncInfo('frac',
ref=lambda x: np.modf(x)[0],
dtypes=floating_types_and(torch.bfloat16, torch.float16),
dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16),
assert_autodiffed=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_sparse=True,
supports_sparse_csr=True,
supports_sparse_csc=True,
supports_sparse_bsr=True,
supports_sparse_bsc=True,
skips=(
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
dtypes=(torch.bfloat16, torch.float16, torch.float32, torch.float64)),
# 76047
DecorateInfo(unittest.expectedFailure, 'TestNNCOpInfo', 'test_nnc_correctness',
dtypes=(torch.bfloat16, torch.float32, torch.float64)),
)),
OpInfo('stft',
decorators=[
skipCPUIfNoFFT,
DecorateInfo(unittest.skip("Skipped! stft does not match the native function"),
'TestJit', 'test_variant_consistency_jit'),
],
dtypes=floating_and_complex_types(),
sample_inputs_func=sample_inputs_stft,
# Runs very slowly on slow gradcheck - alternatively reduce input sizes
gradcheck_fast_mode=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
check_batched_forward_grad=False,
check_batched_grad=False,
check_batched_gradgrad=False,
supports_out=False,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
),
OpInfo('istft',
dtypes=complex_types(),
sample_inputs_func=sample_inputs_istft,
# Runs very slowly on slow gradcheck - alternatively reduce input sizes
gradcheck_fast_mode=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
check_batched_forward_grad=False,
check_batched_grad=False,
check_batched_gradgrad=False,
supports_out=False,
decorators=(
DecorateInfo(unittest.skip("Skipped! istft does not match the native function"),
'TestJit', 'test_variant_consistency_jit'),
),
skips=(
skipCPUIfNoFFT,
# gradcheck fails on ROCm (gh-68429)
# grad is computed improperly (probably for weights tensor)
DecorateInfo(unittest.expectedFailure, 'TestBwdGradients', 'test_fn_grad'),
# Pre-existing condition (calls .item); needs to be fixed
DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_backward'),
)),
UnaryUfuncInfo('floor',
ref=np.floor,
dtypes=all_types_and(torch.half, torch.bfloat16),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16, torch.int32, torch.int8),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
skips=(
DecorateInfo(unittest.expectedFailure,
'TestNNCOpInfo',
'test_nnc_correctness',
dtypes=tuple(t for t in integral_types() if t != torch.uint8)),
),
supports_sparse=True,
supports_sparse_csr=True,
supports_sparse_csc=True,
supports_sparse_bsr=True,
supports_sparse_bsc=True,
assert_autodiffed=True),
OpInfo('flip',
op=torch.flip,
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16, torch.int32, torch.int8, torch.bool),
sample_inputs_func=sample_inputs_flip,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_out=False),
OpInfo('fliplr',
op=torch.fliplr,
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_fliplr_flipud,
error_inputs_func=error_inputs_fliplr,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_out=False),
OpInfo('flipud',
op=torch.flipud,
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_fliplr_flipud,
error_inputs_func=error_inputs_flipud,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_out=False),
OpInfo('sparse.sampled_addmm',
dtypes=floating_and_complex_types(),
supports_autograd=True,
sample_inputs_func=sample_inputs_sparse_sampled_addmm,
decorators=[
skipCUDAIf(not ((_get_torch_cuda_version() >= (11, 3))
or (_get_torch_rocm_version() >= (5, 2))),
"cusparseSDDMM was added in 11.2.1"),
skipCPUIfNoMklSparse,
skipXPU],
skips=(
# NotImplementedError: Tensors of type SparseCsrTensorImpl do not have is_contiguous
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_noncontiguous_samples'),
# RuntimeError: Sparse CSR tensors do not have strides.
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out'),
DecorateInfo(unittest.skip("Skipped!"), 'TestTags', 'test_tags'),
# RuntimeError: sampled_addmm: Expected result to have sparse csr layout, but got Strided
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out_warning'),
# RuntimeError: Sparse CSR tensors do not have strides
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager'),
# RuntimeError: Sparse CSR tensors do not have strides
DecorateInfo(unittest.skip("Skipped!"), 'TestCompositeCompliance', 'test_operator'),
# RuntimeError: Sparse CSR tensors do not have strides
DecorateInfo(unittest.skip("Skipped!"), 'TestCompositeCompliance', 'test_backward'),
# RuntimeError: Sparse CSR tensors do not have strides
DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_conj_view'),
# RuntimeError: Sparse CSR tensors do not have strides
DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_conj_view'),
# RuntimeError: Sparse CSR tensors do not have strides
DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_view'),
# RuntimeError: Sparse CSR tensors do not have strides
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
# RuntimeError: unsupported memory format option Preserve
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
# RuntimeError: sparse_mask does not support automatic differentiation for outputs with complex dtype
# RuntimeError: Sparse CSR tensors do not have strides
DecorateInfo(unittest.skip("Skipped!"), 'TestFwdGradients', 'test_fn_fwgrad_bwgrad'),
# ValueError: Sparse output is not supported at gradcheck yet. Please call to_dense(masked_grad=...) ...
DecorateInfo(unittest.skip("Skipped!"), 'TestBwdGradients', 'test_fn_grad'),
# RuntimeError: sparse_mask does not support automatic differentiation for outputs with complex dtype.
# RuntimeError: Sparse CSR tensors do not have is_contiguous
DecorateInfo(unittest.skip("Skipped!"), 'TestBwdGradients', 'test_fn_gradgrad'),
# ValueError: Sparse output is not supported at gradcheck yet. Please call to_dense(masked_grad=...) ...
DecorateInfo(unittest.skip("Skipped!"), 'TestFwdGradients', 'test_forward_mode_AD'),
# NotImplementedError: Could not run 'aten::sparse_sampled_addmm' with arguments from the 'SparseCsrMeta' backend.
DecorateInfo(unittest.skip("Skipped!"), 'TestMeta', 'test_dispatch_meta_outplace'),
DecorateInfo(unittest.skip("Skipped!"), 'TestMeta', 'test_dispatch_symbolic_meta_outplace'),
DecorateInfo(unittest.skip("Skipped!"), 'TestMeta', 'test_meta_outplace'),
DecorateInfo(unittest.skip("Skipped!"), 'TestMeta', 'test_dispatch_symbolic_meta_outplace_all_strides'),
DecorateInfo(unittest.skip("Skipped!"), 'TestFakeTensor', 'test_fake_crossref_backward_no_amp'),
)),
OpInfo('sparse.mm',
dtypes=floating_types_and(torch.bfloat16, torch.float16),
variant_test_name='reduce',
supports_autograd=True,
supports_out=False,
supports_gradgrad=False,
supports_forward_ad=False,
sample_inputs_func=sample_inputs_sparse_mm_reduce,
decorators=[onlyCPU],
skips=(
# NotImplementedError: Tensors of type SparseCsrTensorImpl do not have is_contiguous
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_noncontiguous_samples'),
# RuntimeError: Sparse CSR tensors do not have strides.
DecorateInfo(unittest.skip("Skipped!"), 'TestTags', 'test_tags'),
# RuntimeError: Sparse CSR tensors do not have strides
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager'),
# RuntimeError: Sparse CSR tensors do not have strides
DecorateInfo(unittest.skip("Skipped!"), 'TestCompositeCompliance', 'test_operator'),
# RuntimeError: Sparse CSR tensors do not have strides
DecorateInfo(unittest.skip("Skipped!"), 'TestCompositeCompliance', 'test_backward'),
# RuntimeError: Sparse CSR tensors do not have strides
DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_conj_view'),
# RuntimeError: Sparse CSR tensors do not have strides
DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_conj_view'),
# RuntimeError: Sparse CSR tensors do not have strides
DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_view'),
# RuntimeError: Sparse CSR tensors do not have strides
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
# RuntimeError: unsupported memory format option Preserve
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
# ValueError: Sparse output is not supported at gradcheck yet. Please call to_dense(masked_grad=...) ...
DecorateInfo(unittest.skip("Skipped!"), 'TestFwdGradients', 'test_fn_fwgrad_bwgrad'),
# RuntimeError: Sparse CSR tensors do not have is_contiguou
DecorateInfo(unittest.skip("Skipped!"), 'TestBwdGradients', 'test_fn_grad'),
# ValueError: Sparse output is not supported at gradcheck yet. Please call to_dense(masked_grad=...) ...
DecorateInfo(unittest.skip("Skipped!"), 'TestBwdGradients', 'test_fn_gradgrad'),
# RuntimeError: Sparse CSR tensors do not have strides
DecorateInfo(unittest.skip("Skipped!"), 'TestFwdGradients', 'test_forward_mode_AD'),
# ValueError: Sparse output is not supported at gradcheck yet. Please call to_dense(masked_grad=...) ...
DecorateInfo(unittest.skip("Skipped!"), 'TestBwdGradients', 'test_fn_fail_gradgrad'),
# NotImplementedError: Could not run 'aten::_sparse_mm_reduce_impl' with arguments from the 'SparseCsrMeta' backend
DecorateInfo(unittest.skip("Skipped!"), 'TestMeta', 'test_dispatch_meta_outplace'),
DecorateInfo(unittest.skip("Skipped!"), 'TestMeta', 'test_dispatch_symbolic_meta_outplace'),
DecorateInfo(unittest.skip("Skipped!"), 'TestMeta', 'test_meta_outplace'),
)),
UnaryUfuncInfo('i0',
ref=np_unary_ufunc_integer_promotion_wrapper(
scipy.special.i0) if TEST_SCIPY else None,
aliases=('special.i0',),
decorators=(precisionOverride({torch.bfloat16: 3e-1,
torch.float16: 5e-1}),),
dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
promotes_int_to_float=True,
sample_inputs_func=sample_inputs_i0_i1,
skips=(
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large',
dtypes=(torch.int8,)),
)),
BinaryUfuncInfo('floor_divide',
ref=_floor_divide_np,
dtypes=all_types_and(torch.half, torch.bfloat16),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16, torch.int32, torch.int8, torch.bool),
supports_autograd=False,
rhs_make_tensor_kwargs=dict(exclude_zero=True),
supports_two_python_scalars=True,
skips=(
# AssertionError: Results of original model and exported/imported version of model differed
DecorateInfo(unittest.skip('Skipped!'), 'TestJit', 'test_variant_consistency_jit'),
# bfloat16 floor_divide compared with a float32 reference works inconsistently
DecorateInfo(unittest.skip('Skipped!'), 'TestBinaryUfuncs',
dtypes=(torch.bfloat16,)),
# int8 floor divide has different results for -128 // -1 vs. NumPy
DecorateInfo(unittest.skip('Skipped!'), 'TestBinaryUfuncs', 'test_reference_numerics_small_values',
dtypes=(torch.int8,)),
# The following tests fails on some jobs
DecorateInfo(unittest.skip('Skipped!'), 'TestBinaryUfuncs', 'test_reference_numerics_extremal_values',
dtypes=(torch.float16,)),
DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-3, rtol=5e-3)}),
'TestBinaryUfuncs', 'test_reference_numerics'),
)),
UnaryUfuncInfo('frexp',
op=torch.frexp,
ref=np.frexp,
dtypes=floating_types_and(torch.half, torch.bfloat16),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16),
# skip testing torch.frexp as it is not supported by ROCm platform yet
decorators=[],
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
skips=(
# skips below tests as torch.frexp returns tuple-like (mantissa, exponent) as outputs,
# while these tests currently requires output to a single tensor.
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_batch_vs_slicing'),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_contig_vs_every_other'),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_contig_vs_transposed'),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_non_contig_expand'),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_variant_consistency'),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_out_arg_all_dtypes'),
# skips test_reference_numerics due to error in Windows CI.
# The np.frexp returns exponent as np.intc dtype on Windows platform,
# and np.intc does not have the correspond torch dtype
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_small',
active_if=IS_WINDOWS),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large',
active_if=IS_WINDOWS),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
active_if=IS_WINDOWS),
)),
UnaryUfuncInfo('log1p',
ref=np.log1p,
aliases=('special.log1p',),
domain=(-1, None),
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16),
decorators=(precisionOverride({torch.bfloat16: 1e-1}),),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_sparse=True,
supports_sparse_csr=True,
supports_sparse_csc=True,
supports_sparse_bsr=True,
supports_sparse_bsc=True,
assert_autodiffed=True,
promotes_int_to_float=True),
BinaryUfuncInfo('ge',
ref=np.greater_equal,
aliases=('greater_equal',),
dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16, torch.int32),
always_returns_bool=True,
supports_autograd=False,
skips=(
)),
OpInfo('geqrf',
dtypes=floating_and_complex_types(),
sample_inputs_func=sample_inputs_linalg_qr_geqrf,
decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack],
supports_autograd=False,
skips=(
# FIXME: geqrf can't forward with complex inputs that require grad
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_dtypes'),
# Strides are not the same!
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'),
)),
BinaryUfuncInfo('gt',
ref=np.greater,
aliases=('greater',),
dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16, torch.int32, torch.int8),
always_returns_bool=True,
supports_autograd=False,
skips=(
)),
UnaryUfuncInfo('imag',
ref=np.imag,
dtypes=complex_types_and(torch.chalf),
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# See https://github.com/pytorch/pytorch/issues/66357
# RuntimeError: view_as_real doesn't work on unresolved conjugated tensors.
check_batched_forward_grad=False,
skips=(
# Skip since real and imag don't have out variants.
DecorateInfo(unittest.expectedFailure, 'TestUnaryUfuncs', 'test_out_arg_all_dtypes'),
)),
OpInfo('gradient',
dtypes=floating_and_complex_types_and(torch.int8, torch.int16,
torch.int32, torch.int64,
torch.bfloat16, torch.half),
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# See https://github.com/pytorch/pytorch/pull/78358
check_batched_forward_grad=False,
skips=(
DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),
# following tests give a runtime error with undefined value tensor
# see discussion : https://github.com/pytorch/pytorch/issues/56660
# RuntimeError:
# Arguments for call are not valid.
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32, torch.complex64)), # noqa: B950
DecorateInfo(unittest.skip("Skipped!"), 'TestNNCOpInfo', 'test_nnc_correctness'),
DecorateInfo(unittest.skip("Skipped!"), 'TestCudaFuserOpInfo'),
),
supports_inplace_autograd=False,
sample_inputs_func=sample_inputs_gradient,
error_inputs_func=error_inputs_gradient),
OpInfo('isin',
dtypes=all_types_and(torch.bfloat16, torch.half),
supports_autograd=False,
sample_inputs_func=sample_inputs_isin),
OpInfo('kthvalue',
dtypes=all_types_and(torch.bfloat16, torch.float16),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16, torch.int32),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
sample_inputs_func=sample_inputs_kthvalue,
error_inputs_func=error_inputs_kthvalue),
BinaryUfuncInfo('le',
ref=np.less_equal,
aliases=('less_equal',),
dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16),
always_returns_bool=True,
supports_autograd=False,
skips=(
)),
OpInfo('linspace',
dtypes=all_types_and_complex_and(torch.bfloat16, torch.float16),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16, torch.int32),
is_factory_function=True,
supports_out=True,
supports_autograd=False,
error_inputs_func=error_inputs_linspace,
sample_inputs_func=sample_inputs_linspace,
skips=(
# FX failed to normalize op - add the op to the op_skip list.
DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),
# Tests that assume input is a tensor or sequence of tensors
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager'),
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'),
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'),
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'),
# Same failure as arange: cannot find linspace in captured graph
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)),
# UserWarning not triggered : Resized a non-empty tensor but did not warn about it.
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'),
# UserWarning: CUDA caching allocator reports a memory leak not verified by the driver API
# in __main__.TestJitCUDA.test_variant_consistency_jit_logspace_cuda_complex64!
# Caching allocator allocated memory was 0 and is now reported as 307200 on device 0.
# CUDA driver allocated memory was 1254555648 and is now 1242955776.
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit',
dtypes=(torch.cfloat,), device_type="cuda"),
)),
OpInfo('linspace',
dtypes=all_types_and_complex_and(torch.bfloat16, torch.float16),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16, torch.int32),
is_factory_function=True,
supports_out=True,
supports_autograd=False,
error_inputs_func=error_inputs_linspace,
sample_inputs_func=sample_inputs_linspace_tensor_overload,
variant_test_name="tensor_overload",
skips=(
# FX failed to normalize op - add the op to the op_skip list.
DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),
# TypeError: 'int' object is not subscriptable
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager'),
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'),
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'),
# Same failure as arange: cannot find linspace in captured graph
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)),
# UserWarning not triggered : Resized a non-empty tensor but did not warn about it.
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'),
# UserWarning: CUDA caching allocator reports a memory leak not verified by the driver API
# in __main__.TestJitCUDA.test_variant_consistency_jit_logspace_cuda_complex64!
# Caching allocator allocated memory was 0 and is now reported as 307200 on device 0.
# CUDA driver allocated memory was 1254555648 and is now 1242955776.
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit',
dtypes=(torch.cfloat,), device_type="cuda"),
)),
OpInfo('logspace',
dtypes=all_types_and_complex_and(torch.half, torch.bfloat16),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16),
is_factory_function=True,
supports_out=True,
supports_autograd=False,
error_inputs_func=error_inputs_linspace,
sample_inputs_func=sample_inputs_logspace,
skips=(
# FX failed to normalize op - add the op to the op_skip list.
DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),
# Tests that assume input is a tensor or sequence of tensors
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager'),
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'),
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'),
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'),
# Same failure as arange: cannot find linspace in captured graph
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)),
# UserWarning not triggered : Resized a non-empty tensor but did not warn about it.
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'),
# Off-by-one issue when casting floats to ints
DecorateInfo(unittest.expectedFailure, 'TestDecomp', 'test_quick',
dtypes=(torch.int16, torch.int32, torch.int64), device_type="cuda"),
DecorateInfo(unittest.expectedFailure, 'TestDecomp', 'test_comprehensive',
dtypes=(torch.int16, torch.int32, torch.int64), device_type="cuda"),
# UserWarning: CUDA caching allocator reports a memory leak not verified by the driver API
# in __main__.TestJitCUDA.test_variant_consistency_jit_logspace_cuda_complex64!
# Caching allocator allocated memory was 0 and is now reported as 307200 on device 0.
# CUDA driver allocated memory was 1254555648 and is now 1242955776.
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit',
dtypes=(torch.cfloat,), device_type="cuda"),
)),
OpInfo('logspace',
dtypes=all_types_and_complex_and(torch.half, torch.bfloat16),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16),
is_factory_function=True,
supports_out=True,
supports_autograd=False,
error_inputs_func=error_inputs_linspace,
sample_inputs_func=sample_inputs_logspace_tensor_overload,
variant_test_name="tensor_overload",
skips=(
# FX failed to normalize op - add the op to the op_skip list.
DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),
# TypeError: 'int' object is not subscriptable
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager'),
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'),
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'),
# Same failure as arange: cannot find linspace in captured graph
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)),
# UserWarning not triggered : Resized a non-empty tensor but did not warn about it.
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'),
# Off-by-one issue when casting floats to ints
DecorateInfo(unittest.expectedFailure, 'TestDecomp', 'test_quick',
dtypes=(torch.int16, torch.int32, torch.int64), device_type="cuda"),
DecorateInfo(unittest.expectedFailure, 'TestDecomp', 'test_comprehensive',
dtypes=(torch.int16, torch.int32, torch.int64), device_type="cuda"),
# UserWarning: CUDA caching allocator reports a memory leak not verified by the driver API
# in __main__.TestJitCUDA.test_variant_consistency_jit_logspace_cuda_complex64!
# Caching allocator allocated memory was 0 and is now reported as 307200 on device 0.
# CUDA driver allocated memory was 1254555648 and is now 1242955776.
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit',
dtypes=(torch.cfloat,), device_type="cuda"),
)),
UnaryUfuncInfo('log',
ref=np.log,
domain=(0, None),
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf),
backward_dtypesIfCUDA=floating_and_complex_types_and(torch.half, torch.bfloat16, torch.chalf),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16),
assert_autodiffed=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
promotes_int_to_float=True,
decorators=(precisionOverride({torch.bfloat16: 5e-2}),),
skips=(
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble],
active_if=IS_WINDOWS),
),
# log(z)->-inf for |z|->0
reference_numerics_filter=NumericsFilter(condition=lambda x: torch.abs(x) < 0.1, safe_val=1)),
UnaryUfuncInfo('log10',
ref=np.log10,
domain=(0, None),
decorators=(precisionOverride({torch.bfloat16: 5e-2}),),
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16),
assert_autodiffed=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
promotes_int_to_float=True,
skips=(
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble],
active_if=IS_WINDOWS),
),
# log10(z)->-inf for |z|->0
reference_numerics_filter=NumericsFilter(condition=lambda x: torch.abs(x) < 0.1, safe_val=1)),
UnaryUfuncInfo('log2',
ref=np.log2,
domain=(0, None),
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16),
assert_autodiffed=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
promotes_int_to_float=True,
decorators=(precisionOverride({torch.bfloat16: 1e-1}),),
skips=(
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
dtypes=[torch.cfloat, torch.cdouble]),
),
# log2(z)->-inf for |z|->0
reference_numerics_filter=NumericsFilter(condition=lambda x: torch.abs(x) < 0.1, safe_val=1)),
BinaryUfuncInfo('ldexp',
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
# Runs very slowly on slow gradcheck - alternatively reduce input sizes
gradcheck_fast_mode=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_inplace_autograd=False,
promotes_int_to_float=True,
supports_out=True,
supports_rhs_python_scalar=False,
skips=(
# RuntimeError: mul(): functions with out=... arguments don't support
# automatic differentiation, but one of the arguments requires grad
# https://github.com/pytorch/pytorch/issues/68966
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager'),
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'),
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'),
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'),
),
decorators=[
DecorateInfo(
toleranceOverride({
torch.complex64: tol(atol=1e-05, rtol=1e-05)
}),
'TestCommon', device_type='cpu',
),
], ),
BinaryUfuncInfo('logaddexp',
dtypes=floating_and_complex_types_and(torch.bfloat16, torch.float16),
dtypesIfCUDA=floating_and_complex_types_and(torch.bfloat16, torch.float16, torch.complex32),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_rhs_python_scalar=False),
OpInfo('logaddexp2',
dtypes=floating_types_and(torch.bfloat16, torch.half),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
sample_inputs_func=sample_inputs_logaddexp),
UnaryUfuncInfo('logical_not',
ref=np.logical_not,
decorators=(precisionOverride({torch.bfloat16: 7e-1,
torch.float16: 5e-1}),),
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16, torch.int8, torch.bool),
supports_autograd=False,
skips=(
# The function variant always returns BoolTensor
# while the inplace variant preserves the input dtype.
# >>> t = torch.randn(3)
# >>> torch.logical_not(t)
# tensor([False, False, False])
# >>> torch.logical_not(t).dtype
# torch.bool
# >>> t.logical_not_().dtype
# torch.float32
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_variant_consistency',
dtypes=all_types_and_complex_and(torch.half, torch.bfloat16)),
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager',
dtypes=all_types_and_complex_and(torch.half, torch.bfloat16)),
)),
BinaryUfuncInfo('lt',
ref=np.less,
aliases=('less',),
dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16, torch.int8, torch.int32),
always_returns_bool=True,
supports_autograd=False,
skips=(
)),
OpInfo('lu_unpack',
op=torch.lu_unpack,
dtypes=floating_and_complex_types(),
# Runs very slowly on slow gradcheck - alternatively reduce input sizes
gradcheck_fast_mode=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
skips=(skipCPUIfNoLapack,),
sample_inputs_func=sample_inputs_lu_unpack),
OpInfo('lu',
op=torch.lu,
dtypes=floating_and_complex_types(),
# Runs very slowly on slow gradcheck - alternatively reduce input sizes
gradcheck_fast_mode=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# https://github.com/pytorch/pytorch/issues/66357
check_batched_forward_grad=False,
sample_inputs_func=sample_inputs_lu,
decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack],
skips=(
# we skip jit tests because `lu` is a torch function
# RuntimeError:
# 'Tensor (inferred)' object has no attribute or method 'lu'.:
# File "<string>", line 3
# def the_method(i0):
# return i0.lu(True, True)
# ~~~~~ <--- HERE
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
# RuntimeError not raised: Expected RuntimeError when calling with input.device=cpu and out.device=cuda
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'),
# UserWarning not triggered : Resized a non-empty tensor but did not warn about it.
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'),
)),
OpInfo('lu_solve',
op=torch.lu_solve,
dtypes=floating_and_complex_types(),
supports_forward_ad=True,
# See https://github.com/pytorch/pytorch/issues/66357
check_batched_forward_grad=False,
supports_fwgrad_bwgrad=True,
sample_inputs_func=sample_inputs_lu_solve,
skips=(
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out',
device_type='mps', dtypes=[torch.float32]),
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager',
device_type='mps', dtypes=[torch.float32]),
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit',
device_type='mps', dtypes=[torch.float32]),
DecorateInfo(unittest.skip("Tests different backward paths"),
"TestCommon", "test_floating_inputs_are_differentiable"),),
decorators=[skipCPUIfNoLapack, skipCUDAIfNoMagmaAndNoCusolver]),
OpInfo('masked_fill',
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16, torch.int8, torch.bool, torch.int32),
sample_inputs_func=sample_inputs_masked_fill,
error_inputs_func=error_inputs_masked_fill,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
check_batched_forward_grad=False,
supports_out=False),
OpInfo('masked_scatter',
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16, torch.int8, torch.bool, torch.int32),
sample_inputs_func=sample_inputs_masked_scatter,
error_inputs_func=error_inputs_masked_scatter,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# https://github.com/pytorch/pytorch/issues/66357
check_batched_forward_grad=False,
supports_out=False,
skips=(
# Compiler issue on ROCm. Regression started in ROCm 6.4.
DecorateInfo(unittest.skip('Skipped!'), 'TestCommon', 'test_non_standard_bool_values',
dtypes=[torch.bool], active_if=TEST_WITH_ROCM),
)),
OpInfo('masked_select',
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16, torch.int32),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
sample_inputs_func=sample_inputs_masked_select,
error_inputs_func=error_inputs_masked_select,
skips=(
# Compiler issue on ROCm. Might need to skip until ROCm5.5
DecorateInfo(unittest.skip('Skipped!'), 'TestCommon', 'test_non_standard_bool_values',
dtypes=[torch.bool], active_if=TEST_WITH_ROCM),
)),
OpInfo('matrix_exp',
dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16),
aliases=('linalg.matrix_exp',),
sample_inputs_func=sample_inputs_matrix_exp,
# Needs to construct a 2nx2n matrix by copy_ ing into it
check_batched_grad=False,
check_batched_gradgrad=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# https://github.com/pytorch/pytorch/issues/66357
check_batched_forward_grad=False,
skips=(
# mexp does not support bf16 and fp16
DecorateInfo(unittest.skip('Skipped!'), 'TestInductorOpInfo', 'test_comprehensive',
dtypes=[torch.half], device_type="cpu"),
),
supports_out=False,
),
OpInfo('matmul',
aliases=('linalg.matmul',),
dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.float16,
*[torch.bfloat16]
if SM53OrLater or TEST_WITH_ROCM else []),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16),
assert_autodiffed=True,
assert_jit_shape_analysis=True,
# Runs very slowly on slow gradcheck - alternatively reduce input sizes
gradcheck_fast_mode=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
check_batched_forward_grad=False,
sample_inputs_func=partial(sample_inputs_matmul, is_rmatmul=False),
decorators=[
# NVIDIA only assures that bfloat16 is supported by bmm if SM >= 5.3
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_dtypes', device_type='cuda', active_if=not SM53OrLater),
# ROCm intermittently fails the test with standard atol/rtol
DecorateInfo(toleranceOverride({torch.float32: tol(atol=1e-4, rtol=0)}),
'TestCommon', 'test_noncontiguous_samples', device_type='cuda',
active_if=TEST_WITH_ROCM),
DecorateInfo(toleranceOverride({torch.float32: tol(atol=1e-4, rtol=0)}),
'TestCommon', 'test_out', device_type='cuda',
active_if=TEST_WITH_ROCM),
# mv for the sample with shapes (S, S, M, M), (M,) has some variance in the
# backward on CPU
DecorateInfo(toleranceOverride({torch.float32: tol(atol=0, rtol=1e-5)}),
'TestCommon', 'test_noncontiguous_samples',
device_type='cpu'),
DecorateInfo(
toleranceOverride({
torch.float32: tol(atol=1e-5, rtol=1e-5),
torch.complex64: tol(atol=1e-5, rtol=1e-5),
}),
"TestDecomp", "test_comprehensive", device_type="cuda",
),
],
skips=(
# Strides are not the same!
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'),
# https://github.com/pytorch/pytorch/issues/67470
DecorateInfo(unittest.skip("67470!"),
'TestCommon', 'test_noncontiguous_samples',
device_type='cpu', dtypes=(torch.long,)),
# AssertionError: False is not true : Tensors failed to compare as equal!
DecorateInfo(unittest.skip("Skipped!"), 'TestOpInfo',
device_type='xla', dtypes=(torch.long,)),
# https://github.com/pytorch/pytorch/issues/71774
DecorateInfo(unittest.skip('Skipped!'), 'TestNNCOpInfo', 'test_nnc_correctness',
device_type='cpu', dtypes=(torch.long,)),
)),
OpInfo('max',
variant_test_name='reduction_with_dim',
dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16, torch.int32),
sample_inputs_func=sample_inputs_max_min_reduction_with_dim,
supports_fwgrad_bwgrad=True,
skips=(
),
supports_forward_ad=True),
OpInfo('max',
variant_test_name='reduction_no_dim',
dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16, torch.int32),
supports_out=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
sample_inputs_func=sample_inputs_max_min_reduction_no_dim,
skips=(
)),
OpInfo('median',
dtypes=all_types_and(torch.bfloat16, torch.float16),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16, torch.int32),
# TODO: some signatures of median do support out
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
error_inputs_func=error_inputs_median,
sample_inputs_func=partial(sample_inputs_reduction, supports_multiple_dims=False)),
OpInfo('nanmedian',
dtypes=all_types_and(torch.bfloat16, torch.float16),
# TODO: some signatures of nanmedian do support out
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
sample_inputs_func=partial(sample_inputs_reduction, supports_multiple_dims=False)),
OpInfo('var_mean',
dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16),
sample_inputs_func=sample_inputs_std_var,
# TODO: some signatures of var_mean do support out
supports_out=False,
supports_forward_ad=True,
check_batched_forward_grad=False,
supports_fwgrad_bwgrad=True,
decorators=(
DecorateInfo(toleranceOverride({torch.float64: tol(atol=2e-7, rtol=2e-7)}),
"TestDecomp", "test_comprehensive", device_type="cuda"),
DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-3, rtol=2e-3)}),
"TestInductorOpInfo", "test_comprehensive", device_type="cuda"),
)),
OpInfo('var_mean',
variant_test_name='unbiased',
dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16),
sample_inputs_func=sample_inputs_std_var_unbiased,
# TODO: some signatures of var_mean do support out
supports_out=False,
supports_forward_ad=True,
check_batched_forward_grad=False,
supports_fwgrad_bwgrad=True,
decorators=(
DecorateInfo(toleranceOverride({torch.float64: tol(atol=2e-7, rtol=2e-7)}),
"TestDecomp", "test_comprehensive", device_type="cuda"),
DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-3, rtol=2e-3)}),
"TestInductorOpInfo", "test_comprehensive", device_type="cuda"),
)),
OpInfo('std_mean',
dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16),
sample_inputs_func=sample_inputs_std_var,
# TODO: some signatures of std_mean do support out
supports_out=False,
supports_forward_ad=True,
check_batched_forward_grad=False,
supports_fwgrad_bwgrad=True,
decorators=(
DecorateInfo(toleranceOverride({torch.float64: tol(atol=2e-7, rtol=2e-7)}),
"TestDecomp", "test_comprehensive", device_type="cuda"),
)),
OpInfo('std_mean',
variant_test_name='unbiased',
dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16),
sample_inputs_func=sample_inputs_std_var_unbiased,
# TODO: some signatures of var_mean do support out
supports_out=False,
supports_forward_ad=True,
check_batched_forward_grad=False,
supports_fwgrad_bwgrad=True,
decorators=(
DecorateInfo(
toleranceOverride({
torch.float16: tol(atol=4e-5, rtol=9e-3),
torch.float64: tol(atol=2e-7, rtol=2e-7),
}),
"TestDecomp",
"test_comprehensive",
device_type="cuda"
),
DecorateInfo(
toleranceOverride({
torch.float16: tol(atol=4e-5, rtol=9e-3),
torch.float64: tol(atol=2e-7, rtol=2e-7),
}),
"TestInductorOpInfo",
"test_comprehensive",
device_type="cuda"
),
)),
OpInfo('meshgrid',
variant_test_name='variadic_tensors',
ref=np.meshgrid,
dtypes=all_types_and_complex_and(torch.bfloat16, torch.bool, torch.float16),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16),
sample_inputs_func=partial(sample_inputs_meshgrid, variant='variadic'),
skips=[
# JIT does not support variadic tensors.
# RuntimeError: input->type()->kind() == TypeKind::OptionalType
# INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":252,
# please report a bug to PyTorch.
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
# meshgrid is defined in torch.functional to take a
# variadic list of tensors. Variadic parameters are not
# compatible with the normalize operator tests.
DecorateInfo(unittest.skip("Skipped!"), 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),
# Skip operator schema test because this is a functional and not an operator
DecorateInfo(unittest.skip("Skipped!"), 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'),
],
supports_out=False,
supports_fwgrad_bwgrad=True,
supports_forward_ad=True,
# See https://github.com/pytorch/pytorch/pull/78358
check_batched_forward_grad=False,),
OpInfo('meshgrid',
variant_test_name='list_of_tensors',
# Unlike the variant above, we do not use np.meshgrid as a
# ref since it does not officially support list of numpy
# arrays.
dtypes=all_types_and_complex_and(torch.bfloat16, torch.bool, torch.float16),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16),
sample_inputs_func=partial(sample_inputs_meshgrid, variant='list'),
skips=[
# meshgrid is defined in torch.functional to take a
# variadic list of tensors. Variadic parameters are not
# compatible with the normalize operator tests.
DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),
],
assert_autodiffed=True,
supports_out=False,
autodiff_nonfusible_nodes=[],
supports_fwgrad_bwgrad=True,
supports_forward_ad=True,
# See https://github.com/pytorch/pytorch/pull/78358
check_batched_forward_grad=False,),
OpInfo('min',
variant_test_name='reduction_with_dim',
dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16, torch.int32),
sample_inputs_func=sample_inputs_max_min_reduction_with_dim,
supports_fwgrad_bwgrad=True,
supports_forward_ad=True,
skips=(
)),
OpInfo('min',
variant_test_name='reduction_no_dim',
dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),
supports_out=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
sample_inputs_func=sample_inputs_max_min_reduction_no_dim,
skips=(
)),
OpInfo('quantile',
dtypes=floating_types(),
sample_inputs_func=sample_inputs_reduction_quantile,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# See https://github.com/pytorch/pytorch/issues/66357
# Relies on copy_ to broadcast, but the forward AD path calls broadcast_to which
# does not have a batching rule in core
check_batched_forward_grad=False),
OpInfo('nanquantile',
dtypes=floating_types(),
sample_inputs_func=sample_inputs_reduction_quantile,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# See https://github.com/pytorch/pytorch/issues/66357
# Relies on copy_ to broadcast, but the forward AD path calls broadcast_to which
# does not have a batching rule in core
check_batched_forward_grad=False),
BinaryUfuncInfo(
'max',
aliases=('maximum',),
variant_test_name='binary',
dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16, torch.int32),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
assert_autodiffed=True,
ref=np.maximum,
supports_rhs_python_scalar=False,
skips=(
# Incorrectly attempts to use a scalar for the second argument
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_jit_alias_remapping'),
# TODO: FIXME: RuntimeError: "max_elementwise_cuda" not implemented for 'ComplexFloat'
DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_type_promotion', device_type='cuda'),
)),
BinaryUfuncInfo(
'maximum',
dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16, torch.int32),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
ref=np.maximum,
supports_rhs_python_scalar=False,
skips=(
# TODO: FIXME: RuntimeError: "max_elementwise_cuda" not implemented for 'ComplexFloat'
DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_type_promotion', device_type='cuda'),
)),
BinaryUfuncInfo(
'min',
aliases=('minimum',),
variant_test_name='binary',
dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16, torch.int32),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
assert_autodiffed=True,
ref=np.minimum,
supports_rhs_python_scalar=False,
skips=(
# Incorrectly attempts to use a scalar for the second argument
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_jit_alias_remapping'),
# TODO: FIXME: RuntimeError: "min_elementwise_cuda" not implemented for 'ComplexFloat'
DecorateInfo(unittest.expectedFailure,
'TestBinaryUfuncs',
'test_type_promotion',
device_type='cuda'),
)),
BinaryUfuncInfo(
'minimum',
dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16, torch.int32),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
ref=np.minimum,
supports_rhs_python_scalar=False,
skips=(
# TODO: FIXME: RuntimeError: "min_elementwise_cuda" not implemented for 'ComplexFloat'
DecorateInfo(unittest.expectedFailure,
'TestBinaryUfuncs',
'test_type_promotion',
device_type='cuda'),
),
),
BinaryUfuncInfo('logical_and',
ref=np.logical_and,
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16, torch.int32, torch.int8, torch.bool),
supports_autograd=False,
always_returns_bool=True,
supports_rhs_python_scalar=False),
BinaryUfuncInfo('logical_or',
ref=np.logical_or,
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16, torch.int8, torch.bool),
supports_autograd=False,
always_returns_bool=True,
supports_rhs_python_scalar=False),
BinaryUfuncInfo('logical_xor',
ref=np.logical_xor,
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16, torch.int8, torch.bool),
supports_autograd=False,
always_returns_bool=True,
supports_rhs_python_scalar=False,
skips=(
)),
BinaryUfuncInfo('bitwise_and',
ref=np.bitwise_and,
dtypes=integral_types_and(torch.bool),
dtypesIfHpu=custom_types(torch.bool),
operator_variant=operator.and_,
inplace_operator_variant=operator.iand,
supports_autograd=False,
supports_one_python_scalar=True,
skips=(
# RuntimeError: "bitwise_and_cuda" not implemented for 'Half'
DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs',
'test_type_promotion', device_type='cuda'),
)),
BinaryUfuncInfo('bitwise_or',
ref=np.bitwise_or,
dtypes=integral_types_and(torch.bool),
dtypesIfHpu=custom_types(torch.bool),
operator_variant=operator.or_,
inplace_operator_variant=operator.ior,
supports_autograd=False,
supports_one_python_scalar=True,
skips=(
# TODO: FIXME: RuntimeError: "bitwise_or_cuda" not implemented for 'Half'
DecorateInfo(unittest.expectedFailure,
'TestBinaryUfuncs',
'test_type_promotion',
device_type='cuda'),
)),
BinaryUfuncInfo('bitwise_xor',
ref=np.bitwise_xor,
dtypes=integral_types_and(torch.bool),
dtypesIfHpu=custom_types(torch.bool),
operator_variant=operator.xor,
inplace_operator_variant=operator.ixor,
supports_autograd=False,
supports_one_python_scalar=True,
skips=(
# TODO: FIXME: RuntimeError: "bitwise_xor_cuda" not implemented for 'Half'
DecorateInfo(unittest.expectedFailure,
'TestBinaryUfuncs',
'test_type_promotion',
device_type='cuda'),
)),
BinaryUfuncInfo('heaviside',
ref=lambda a, b: (
# necessary because np.heaviside incorrectly returns float64 when passed args of dtype int64
np.int64(np.heaviside(a, b)) if a.dtype == np.int64 and b.dtype == np.int64 else np.heaviside(a, b)
),
dtypes=all_types_and(torch.bool, torch.float16, torch.bfloat16),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16, torch.int32),
supports_autograd=False,
supports_rhs_python_scalar=False,
skips=(
# RuntimeError: heaviside is not yet implemented for tensors with different dtypes.
DecorateInfo(unittest.expectedFailure,
'TestBinaryUfuncs',
'test_type_promotion'),
DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_binary_ufuncs_mixed_dtype'),
# PyTorch's heaviside does not appear to propagate NaNs
DecorateInfo(unittest.skip("Skipped!"),
'TestBinaryUfuncs',
'test_reference_numerics_extremal_values'),
)),
BinaryUfuncInfo('lcm',
ref=np.lcm,
dtypes=integral_types_and(),
supports_autograd=False,
supports_rhs_python_scalar=False),
BinaryUfuncInfo('gcd',
ref=np.gcd,
dtypes=integral_types_and(),
supports_autograd=False,
supports_rhs_python_scalar=False,
skips=(
DecorateInfo(unittest.expectedFailure,
'TestBinaryUfuncs',
'test_reference_numerics_small_values',
dtypes=(torch.int8,)),)),
BinaryUfuncInfo('isclose',
ref=np.isclose,
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
sample_inputs_func=sample_inputs_isclose,
error_inputs_func=error_inputs_isclose,
supports_autograd=False,
supports_out=False,
supports_rhs_python_scalar=False,
skips=(
DecorateInfo(unittest.expectedFailure,
'TestCommon',
'test_numpy_refs', dtypes=(torch.complex128,)),
# RuntimeError: Short did not match Int
DecorateInfo(unittest.expectedFailure,
'TestBinaryUfuncs',
'test_type_promotion'),
DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_binary_ufuncs_mixed_dtype'),
DecorateInfo(unittest.skip("Skipped!"),
'TestBinaryUfuncs',
'test_reference_numerics_extremal_values'),
)),
# `softmax` supports different dtypes based on whether `dtype` argument,
# is passed or not. Hence two OpInfo entries, one with dtype and other without.
# https://github.com/pytorch/pytorch/issues/68752
OpInfo('softmax',
aliases=('special.softmax', 'nn.functional.softmax',),
aten_name='softmax',
aten_backward_name='_softmax_backward_data',
dtypes=floating_types_and(torch.half, torch.bfloat16),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16),
sample_inputs_func=sample_inputs_softmax_variant,
assert_jit_shape_analysis=True,
assert_autodiffed=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_out=True),
OpInfo('softmax',
aliases=('special.softmax', 'nn.functional.softmax',),
variant_test_name="with_dtype",
aten_name='softmax',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16),
sample_inputs_func=partial(sample_inputs_softmax_variant, with_dtype=True),
assert_autodiffed=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_out=True),
OpInfo(
'_softmax_backward_data',
op=torch.ops.aten._softmax_backward_data,
aten_name='_softmax_backward_data',
dtypes=floating_types_and(torch.bfloat16, torch.float16),
sample_inputs_func=sample_inputs_softmax_backward_data,
assert_autodiffed=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_out=False,
skips=(
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)),
),
),
# `softmin` supports different dtypes based on whether `dtype` argument,
# is passed or not. Hence two OpInfo entries, one with dtype and other without.
# https://github.com/pytorch/pytorch/issues/68752
OpInfo('nn.functional.softmin',
aten_name='softmin',
dtypes=floating_types_and(torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_softmax_variant,
assert_jit_shape_analysis=False,
assert_autodiffed=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_out=False),
OpInfo('nn.functional.softmin',
variant_test_name="with_dtype",
aten_name='softmin',
dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16),
sample_inputs_func=partial(sample_inputs_softmax_variant, with_dtype=True),
assert_autodiffed=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_out=False),
OpInfo(
"nn.functional.cross_entropy",
dtypes=floating_types_and(torch.float16, torch.bfloat16),
sample_inputs_func=sample_inputs_cross_entropy,
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
decorators=(
DecorateInfo(
toleranceOverride({torch.float32: tol(atol=3e-3, rtol=1e-3)}),
"TestJit",
"test_variant_consistency_jit",
device_type="cpu",
),
),
skips=(
# AssertionError: False is not true : Scalars failed to compare as equal! 0 != 1536
# test_ops.TestJitCUDA.test_variant_consistency_jit_nn_functional_cross_entropy_cuda_float32 leaked
# 1536 bytes CUDA memory on device 0
DecorateInfo(
unittest.expectedFailure,
"TestJit",
"test_variant_consistency_jit",
device_type="cuda",
),
DecorateInfo(unittest.skip("FP16 corss_entropy cases have not been enabled on MPS yet"),
dtypes=(torch.half,), device_type="mps"),
)
),
OpInfo('nn.functional.normalize',
dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_normalize,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True),
OpInfo('aminmax',
ref=lambda x, dim=None, keepdim=False: (np.amin(x, axis=dim, keepdims=keepdim), np.amax(x, axis=dim, keepdims=keepdim)),
dtypes=all_types_and(torch.bool, torch.float16, torch.bfloat16),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16, torch.int32, torch.int8),
decorators=(onlyNativeDeviceTypes,),
supports_autograd=False,
sample_inputs_func=sample_inputs_aminmax,
error_inputs_func=error_inputs_aminmax_amax_amin),
OpInfo('as_strided',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16, torch.int32, torch.int8, torch.bool),
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# vmap does not support inplace views
check_inplace_batched_forward_grad=False,
sample_inputs_func=sample_inputs_as_strided,
skips=(
# Note: This xfail is fine -- it's inherent to how as_strided works
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_noncontiguous_samples'),
# AssertionError: False is not true : Scalars failed to compare as equal!
DecorateInfo(unittest.skip("Errors when storage_offset is included"),
'TestCommon', 'test_variant_consistency_eager'),
# Not close
DecorateInfo(unittest.skip("Errors when storage_offset is included"),
'TestCommon', 'test_complex_half_reference_testing'),
# Not close
DecorateInfo(unittest.skip("Errors when storage_offset is included"), 'TestMathBits', 'test_conj_view'),
DecorateInfo(unittest.skip("Errors when storage_offset is included"), 'TestMathBits', 'test_neg_view'),
DecorateInfo(unittest.skip("Numerous errors"), 'TestFwdGradients'),
DecorateInfo(unittest.skip("Numerous errors"), 'TestBwdGradients'),
)),
OpInfo('as_strided',
variant_test_name='partial_views',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16, torch.int32, torch.int8, torch.bool),
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# vmap does not support inplace views
check_inplace_batched_forward_grad=False,
sample_inputs_func=sample_inputs_as_strided_partial_views,
skips=(
# Note: This xfail is fine -- it's inherent to how as_strided works
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_noncontiguous_samples'),
# These fail because the test changes the input's in-memory layout
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_complex_half_reference_testing'),
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager'),
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_compare_cpu'),
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
DecorateInfo(unittest.expectedFailure, 'TestFwdGradients', 'test_fn_fwgrad_bwgrad',
dtypes=(torch.complex64, torch.complex128)),
DecorateInfo(unittest.expectedFailure, 'TestFwdGradients', 'test_forward_mode_AD'),
DecorateInfo(unittest.expectedFailure, 'TestFwdGradients', 'test_inplace_forward_mode_AD'),
DecorateInfo(unittest.expectedFailure, 'TestBwdGradients', 'test_inplace_grad'),
DecorateInfo(unittest.expectedFailure, 'TestBwdGradients', 'test_inplace_gradgrad'),
DecorateInfo(unittest.expectedFailure, 'TestProxyTensorOpInfo',
'test_make_fx_symbolic_exhaustive_inplace'),
DecorateInfo(unittest.expectedFailure, 'TestNNCOpInfo', 'test_nnc_correctness'),
# Fail but are also flaky
DecorateInfo(unittest.skip("Test changes in memory layout"), 'TestMathBits'),
DecorateInfo(unittest.skip("Modifies input strides and storage_offset"), 'TestCommon',
'test_non_standard_bool_values'),
# RuntimeError: setStorage: sizes [2, 2], strides [1, 2], storage offset 10, and itemsize 2 requiring a
# storage size of 28 are out of bounds for storage of size 20
DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_meta_inplace'),
DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_dispatch_meta_inplace'),
DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_dispatch_symbolic_meta_inplace'),
DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_dispatch_symbolic_meta_inplace_all_strides'),
)),
OpInfo('as_strided_copy',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf),
supports_out=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# vmap does not support inplace views
check_inplace_batched_forward_grad=False,
sample_inputs_func=sample_inputs_as_strided,
skips=(
# Note: This xfail is fine -- it's inherent to how as_strided works
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_noncontiguous_samples'),
# AssertionError: False is not true : Scalars failed to compare as equal!
DecorateInfo(unittest.skip("Errors when storage_offset is included"),
'TestCommon', 'test_variant_consistency_eager'),
# Not close
DecorateInfo(unittest.skip("Errors when storage_offset is included"),
'TestCommon', 'test_complex_half_reference_testing'),
# Not close
DecorateInfo(unittest.skip("Errors when storage_offset is included"), 'TestMathBits', 'test_conj_view'),
DecorateInfo(unittest.skip("Errors when storage_offset is included"), 'TestMathBits', 'test_neg_view'),
DecorateInfo(unittest.skip("Numerous errors"), 'TestFwdGradients'),
DecorateInfo(unittest.skip("Numerous errors"), 'TestBwdGradients'),
DecorateInfo(unittest.expectedFailure, 'TestDTensorOps', 'test_dtensor_op_db'),
)),
OpInfo('as_strided_scatter',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf),
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# vmap does not support inplace views
check_inplace_batched_forward_grad=False,
sample_inputs_func=sample_inputs_as_strided_scatter,
error_inputs_func=error_inputs_as_strided_scatter,
skips=(
DecorateInfo(unittest.skip('Works for int64, fails for everything else'), 'TestCommon', 'test_noncontiguous_samples'), # noqa: B950
DecorateInfo(unittest.skip('Fails in most cases, passes on LAZY for some reason'), 'TestCommon', 'test_variant_consistency_eager'), # noqa: B950
DecorateInfo(unittest.skip('Fails on cuda'), 'TestCommon', 'test_complex_half_reference_testing',
active_if=not TEST_WITH_ROCM),
DecorateInfo(unittest.expectedFailure, 'TestBwdGradients', 'test_fn_grad'),
DecorateInfo(unittest.expectedFailure, 'TestFwdGradients', 'test_forward_mode_AD'),
DecorateInfo(unittest.skip('Passes on complex128 and float64 only'), 'TestFwdGradients', 'test_fn_fwgrad_bwgrad'),
# AssertionError: Tensor-likes are not close! (new_empty_strided.default)
DecorateInfo(unittest.skip("Expected: new_empty_strided is not comparable"), 'TestDecomp', 'test_comprehensive'),)),
OpInfo('native_layer_norm',
aten_name='native_layer_norm',
ref=reference_native_layer_norm,
dtypes=floating_types_and(torch.half, torch.bfloat16),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16),
supports_out=False,
assert_jit_shape_analysis=True,
supports_fwgrad_bwgrad=True,
sample_inputs_func=sample_inputs_native_layer_norm,
error_inputs_func=error_inputs_native_layer_norm,
skips=(
# IndexError: tuple index out of range
DecorateInfo(unittest.skip('Skipped!'), 'TestFwdGradients', 'test_forward_mode_AD'),
# Tests fail when weight=None and bias is defined
# https://github.com/pytorch/pytorch/issues/79705
DecorateInfo(unittest.expectedFailure, 'TestBwdGradients', 'test_fn_gradgrad'),
# JIT test also tries to compute double backward, which fails
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
DecorateInfo(unittest.skip("Unsupported on MPS for now"), 'TestCommon', 'test_numpy_ref_mps'),
DecorateInfo(toleranceOverride({torch.float32: tol(atol=2e-03, rtol=5e-03)}),
"TestDecomp", "test_comprehensive", device_type="cpu"),
)),
OpInfo('native_batch_norm',
aten_name='native_batch_norm',
dtypes=floating_types_and(torch.float16, torch.bfloat16),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
assert_jit_shape_analysis=True,
allow_cow_input_materialize_forward=[3, 4],
allow_cow_input_materialize_backward=[3, 4],
sample_inputs_func=sample_inputs_native_batch_norm,
skips=(
# NotImplementedError: Could not run
# 'aten::native_batch_norm.out' with arguments from the 'CPU' backend.
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning', device_type="cpu"),
# RuntimeError: out_invstd.dim() == 1 && out_invstd.is_contiguous() && out_invstd.sizes()[0]
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out', device_type="cuda"),
# Problem with _get_numerical_jacobian
# IndexError: tuple index out of range
DecorateInfo(unittest.skip("Skipped!"), 'TestFwdGradients', 'test_forward_mode_AD'),
# RuntimeError: deepEquals(input.iValue, deepCopiedInput) INTERNAL ASSERT FAILED
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
# https://github.com/pytorch/pytorch/issues/85960
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_compare_cpu'),
# AssertionError: Booleans mismatch: True is not False
DecorateInfo(unittest.skip("Skipped!"), 'TestFakeTensor', 'test_fake_autocast'),
DecorateInfo(unittest.skip("Skipped!"), 'TestFakeTensor', 'test_fake'),
DecorateInfo(toleranceOverride({torch.float32: tol(atol=5e-5, rtol=5e-5)}),
"TestCompositeCompliance", "test_forward_ad"),
)
),
OpInfo('_native_batch_norm_legit',
aten_name='_native_batch_norm_legit',
dtypes=floating_types_and(torch.float16, torch.bfloat16),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
assert_jit_shape_analysis=True,
allow_cow_input_materialize_forward=[3, 4],
allow_cow_input_materialize_backward=[3, 4],
sample_inputs_func=sample_inputs__native_batch_norm_legit,
skips=(
# NotImplementedError: Could not run
# 'aten::native_batch_norm.out' with arguments from the 'CPU' backend.
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning', device_type="cpu"),
# RuntimeError: out_invstd.dim() == 1 && out_invstd.is_contiguous() && out_invstd.sizes()[0]
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out', device_type="cuda"),
# Problem with _get_numerical_jacobian
# IndexError: tuple index out of range
DecorateInfo(unittest.skip("Skipped!"), 'TestFwdGradients', 'test_forward_mode_AD'),
# RuntimeError: deepEquals(input.iValue, deepCopiedInput) INTERNAL ASSERT FAILED
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
# https://github.com/pytorch/pytorch/issues/85960
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_compare_cpu'),
DecorateInfo(toleranceOverride({torch.float32: tol(atol=5e-5, rtol=5e-5)}),
"TestCompositeCompliance", "test_forward_ad"),
)
),
OpInfo('_batch_norm_with_update',
op=torch.ops.aten._batch_norm_with_update,
aten_name='_batch_norm_with_update',
dtypes=floating_types_and(torch.float16, torch.bfloat16),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
assert_jit_shape_analysis=True,
allow_cow_input_materialize_forward=[3, 4],
allow_cow_input_materialize_backward=[3, 4],
sample_inputs_func=sample_inputs__batch_norm_with_update,
skips=(
# NotImplementedError: Could not run
# 'aten::native_batch_norm.out' with arguments from the 'CPU' backend.
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning', device_type="cpu"),
# RuntimeError: out_invstd.dim() == 1 && out_invstd.is_contiguous() && out_invstd.sizes()[0]
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out', device_type="cuda"),
# Problem with _get_numerical_jacobian
# IndexError: tuple index out of range
DecorateInfo(unittest.skip("Skipped!"), 'TestFwdGradients', 'test_forward_mode_AD'),
# RuntimeError: deepEquals(input.iValue, deepCopiedInput) INTERNAL ASSERT FAILED
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
DecorateInfo(toleranceOverride({torch.float32: tol(atol=5e-5, rtol=5e-5)}),
"TestCompositeCompliance", "test_forward_ad"),
# _batch_norm_with_update expects contiguous inputs for cudnn and miopen
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_noncontiguous_samples', device_type="cuda"),
DecorateInfo(unittest.expectedFailure,
'TestMeta', 'test_dispatch_symbolic_meta_outplace_all_strides', device_type="cuda"),
# _batch_norm_with_update does not have python bindings
DecorateInfo(unittest.skip("Skipped!"), 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),
# aten out variants do not accept out= kwarg, only python out variants
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'),
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'),
)
),
OpInfo('nn.functional.cosine_similarity',
aten_name="cosine_similarity",
dtypes=floating_types_and(torch.half, torch.bfloat16),
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
decorators=[
DecorateInfo(
toleranceOverride({torch.float16: tol(atol=1.3e-5, rtol=2e-2)}),
"TestInductorOpInfo",
"test_comprehensive",
device_type="cuda"
),
],
sample_inputs_func=sample_inputs_cosine_similarity),
OpInfo('nn.functional.adaptive_avg_pool1d',
dtypes=floating_types_and(torch.half, torch.bfloat16),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16, torch.float16),
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
error_inputs_func=error_inputs_adaptive_avg_pool1d,
sample_inputs_func=sample_inputs_adaptive_avg_pool1d),
OpInfo('nn.functional.adaptive_avg_pool2d',
dtypes=floating_types_and(torch.half, torch.bfloat16),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16, torch.float16),
decorators=(
# RuntimeError:
# adaptive_avg_pool2d(Tensor input, int[2] output_size) -> (Tensor):
# Expected a value of type 'List[int]' for argument 'output_size' but
# instead found type 'Tuple[NoneType, int]'. :
# File "<string>", line 3
# def the_method(i0):
# return torch.nn.functional.adaptive_avg_pool2d(i0, (None, 7))
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ <--- HERE
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
),
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
error_inputs_func=error_inputs_adaptive_avg_pool2d,
sample_inputs_func=sample_inputs_adaptive_avg_pool2d),
OpInfo('nn.functional.adaptive_avg_pool3d',
dtypes=floating_types_and(torch.half, torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16, torch.float16),
decorators=(
# RuntimeError:
# adaptive_avg_pool3d(Tensor input, int[3] output_size) -> (Tensor):
# Expected a value of type 'List[int]' for argument 'output_size' but
# instead found type 'Tuple[NoneType, NoneType, NoneType]'. :
# File "<string>", line 3
#
# def the_method(i0):
# return torch.nn.functional.adaptive_avg_pool3d(i0, (None, None, None))
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ <--- HERE
#
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
),
# Runs very slowly on slow gradcheck - alternatively reduce input sizes
gradcheck_fast_mode=True,
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
error_inputs_func=error_inputs_adaptive_avg_pool3d,
sample_inputs_func=sample_inputs_adaptive_avg_pool3d),
OpInfo('nn.functional.adaptive_max_pool1d',
dtypes=floating_types_and(torch.half, torch.bfloat16),
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# got: Batching rule not implemented for aten::flatten.using_ints
check_batched_forward_grad=False,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
error_inputs_func=error_inputs_adaptive_max_pool1d,
sample_inputs_func=sample_inputs_adaptive_max_pool1d),
OpInfo('nn.functional.adaptive_max_pool2d',
dtypes=floating_types_and(torch.half, torch.bfloat16),
decorators=(
# RuntimeError:
# adaptive_max_pool2d(Tensor input, int[2] output_size) -> (Tensor):
# Expected a value of type 'List[int]' for argument 'output_size' but
# instead found type 'Tuple[NoneType, int]'. :
# File "<string>", line 3
# def the_method(i0):
# return torch.nn.functional.adaptive_max_pool2d(i0, (None, 7))
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ <--- HERE
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
),
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# got: Batching rule not implemented for aten::flatten.using_ints
check_batched_forward_grad=False,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
error_inputs_func=error_inputs_adaptive_max_pool2d,
sample_inputs_func=sample_inputs_adaptive_max_pool2d),
OpInfo('nn.functional.adaptive_max_pool3d',
dtypes=floating_types_and(torch.bfloat16, torch.half),
decorators=(
# RuntimeError:
# adaptive_max_pool3d(Tensor input, int[3] output_size) -> (Tensor):
# Expected a value of type 'List[int]' for argument 'output_size' but
# instead found type 'Tuple[NoneType, NoneType, NoneType]'. :
# File "<string>", line 3
#
# def the_method(i0):
# return torch.nn.functional.adaptive_max_pool3d(i0, (None, None, None))
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ <--- HERE
#
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
),
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# got: Batching rule not implemented for aten::flatten.using_ints
check_batched_forward_grad=False,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
error_inputs_func=error_inputs_adaptive_max_pool3d,
sample_inputs_func=sample_inputs_adaptive_max_pool3d),
OpInfo('nn.functional.avg_pool1d',
aten_name='avg_pool1d',
supports_autograd=True,
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
dtypes=floating_types_and(torch.int64, torch.float16, torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16, torch.float16),
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
error_inputs_func=error_inputs_avg_pool1d,
sample_inputs_func=sample_inputs_avgpool1d),
OpInfo('nn.functional.avg_pool3d',
aten_name='avg_pool3d',
supports_autograd=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
dtypes=floating_types_and(torch.int64),
dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16, torch.float16),
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
error_inputs_func=error_inputs_avg_pool3d,
sample_inputs_func=sample_inputs_avgpool3d,
skips=(
# AssertionError: Tensor-likes are not close!
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out', device_type='cpu'),
)),
OpInfo(
"nn.functional.binary_cross_entropy_with_logits",
aten_name="binary_cross_entropy_with_logits",
supports_autograd=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_out=False,
dtypes=floating_types_and(torch.half, torch.bfloat16),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16, torch.float16),
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
sample_inputs_func=sample_inputs_binary_cross_entropy_with_logits,
skips=(
DecorateInfo(
unittest.skip("Skipped!"),
'TestJit',
'test_variant_consistency_jit',
dtypes=(torch.float32,)
),
DecorateInfo(toleranceOverride({torch.float32: tol(atol=2e-5, rtol=3e-6)}),
"TestConsistency", "test_output_match", device_type="mps"),
),
),
UnaryUfuncInfo(
'nn.functional.relu',
aten_name="relu",
ref=lambda a: np.where(a <= 0, 0, a),
supports_autograd=True,
supports_sparse=True,
supports_sparse_csr=True,
supports_sparse_csc=True,
supports_sparse_bsr=True,
supports_sparse_bsc=True,
dtypes=all_types_and(torch.half, torch.bfloat16),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16, torch.float16),
sample_inputs_func=sample_inputs_nn_activation_relu,
supports_out=False,
supports_fwgrad_bwgrad=True,
supports_forward_ad=True),
OpInfo('nn.functional.conv_transpose1d',
# `ref` for this function is backward of
# corresponding `conv*d`
ref=partial(conv_transpose_ref, fn=torch.nn.functional.conv_transpose1d),
aten_name='conv_transpose1d',
aliases=('conv_transpose1d',),
dtypes=floating_and_complex_types_and(torch.int64, torch.float16, torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.chalf,
torch.bfloat16),
sample_inputs_func=sample_inputs_conv_transpose1d,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
assert_jit_shape_analysis=True,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
decorators=(
DecorateInfo(
toleranceOverride({torch.float32: tol(atol=1e-04, rtol=1.3e-06), }),
'TestCommon', 'test_variant_consistency_eager', device_type='cuda'),
DecorateInfo(
toleranceOverride({torch.chalf: tol(atol=5e-2, rtol=5e-2), }),
'TestCommon', 'test_complex_half_reference_testing'),
DecorateInfo(
toleranceOverride({torch.float: tol(atol=1.5e-5, rtol=1.5e-5), }),
'TestCommon', 'test_numpy_ref_mps'),
DecorateInfo(
toleranceOverride({torch.half: tol(atol=1e-3, rtol=5e-3), }),
'TestInductorOpInfo', 'test_comprehensive', device_type='cpu'),
),
skips=(
# Reason for Skip: https://github.com/pytorch/pytorch/pull/79694#issuecomment-1186949486
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit',
dtypes=(torch.complex64,)),
# RuntimeError: UNSUPPORTED DTYPE: complex
DecorateInfo(unittest.expectedFailure, 'TestNNCOpInfo', 'test_nnc_correctness',
dtypes=(torch.complex64, torch.complex128)),
# RuntimeError: !lhs.isAliasOf(rhs)INTERNAL ASSERT FAILED at
# "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":104, please report a bug to PyTorch.
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit',
dtypes=(torch.float,)),
# RuntimeError: "slow_conv2d_cpu_grad_input" not implemented for 'Long'
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_numpy_ref',
dtypes=(torch.int64,)),
),
supports_out=False,),
OpInfo('nn.functional.conv_transpose2d',
aten_name='conv_transpose2d',
aliases=('conv_transpose2d',),
# `ref` for this function is backward of
# corresponding `conv*d`
ref=partial(conv_transpose_ref, fn=torch.nn.functional.conv_transpose2d),
dtypes=floating_and_complex_types_and(torch.int64, torch.float16, torch.bfloat16),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.chalf,
torch.bfloat16),
sample_inputs_func=sample_inputs_conv_transpose2d,
# Runs very slowly on slow-gradcheck for complex.
gradcheck_fast_mode=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
assert_jit_shape_analysis=True,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
decorators=[
DecorateInfo(
toleranceOverride({torch.float32: tol(atol=1e-04, rtol=1.3e-06), }),
'TestCommon', 'test_variant_consistency_eager', device_type='cuda'),
DecorateInfo(
toleranceOverride({torch.float32: tol(atol=2e-05, rtol=5e-05), }),
'TestCommon', 'test_noncontiguous_samples', device_type='cuda'),
DecorateInfo(
toleranceOverride({torch.chalf: tol(atol=8e-2, rtol=8e-2), }),
'TestCommon', 'test_complex_half_reference_testing'),
DecorateInfo(
toleranceOverride({torch.half: tol(atol=1e-3, rtol=4e-3), }),
'TestInductorOpInfo', 'test_comprehensive', device_type='cpu')],
skips=(
# RuntimeError: !lhs.isAliasOf(rhs)INTERNAL ASSERT FAILED at
# "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":104, please report a bug to PyTorch.
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
# RuntimeError: UNSUPPORTED DTYPE: complex
DecorateInfo(unittest.expectedFailure, 'TestNNCOpInfo', 'test_nnc_correctness',
dtypes=(torch.complex64, torch.complex128)),
# RuntimeError: "slow_conv2d_cpu_grad_input" not implemented for 'Long'
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_numpy_ref',
dtypes=(torch.int64,)),
# Reference: https://github.com/pytorch/pytorch/issues/86356
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_numpy_ref',
dtypes=(torch.double, torch.cdouble)),
DecorateInfo(unittest.skip("Unsupported on MPS for now"), 'TestCommon', 'test_numpy_ref_mps'),
# AssertionError: None mismatch: torch.complex64 is not None
DecorateInfo(unittest.expectedFailure, 'TestDtypeCustomRules', 'test_custom_rules',
dtypes=(torch.complex64, torch.complex128)),
),
supports_out=False,),
OpInfo('nn.functional.conv_transpose3d',
aten_name='conv_transpose3d',
aliases=('conv_transpose3d',),
# `ref` for this function is backward of
# corresponding `conv*d`
ref=partial(conv_transpose_ref, fn=torch.nn.functional.conv_transpose3d),
dtypes=floating_and_complex_types_and(torch.int64, torch.float16, torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(
torch.float16, torch.chalf, torch.bfloat16),
sample_inputs_func=sample_inputs_conv_transpose3d,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
assert_jit_shape_analysis=True,
# Runs very slowly on slow-gradcheck - alternatively reduce input sizes
gradcheck_fast_mode=True,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
decorators=[
DecorateInfo(
toleranceOverride({torch.float16: tol(atol=5e-2, rtol=5e-2), }),
'TestInductorOpInfo', 'test_comprehensive', device_type='cuda'),
DecorateInfo(
toleranceOverride({torch.float32: tol(atol=1e-04, rtol=1.3e-06),
torch.complex64: tol(atol=1.3e-04, rtol=1.3e-05)}),
'TestCommon', 'test_variant_consistency_eager', device_type='cuda'),
DecorateInfo(
toleranceOverride({torch.float32: tol(atol=2e-04, rtol=2e-04), }),
'TestCompositeCompliance', 'test_operator', device_type='cuda'),
DecorateInfo(
toleranceOverride({torch.float32: tol(atol=1.3e-04, rtol=1.3e-06),
torch.complex64: tol(atol=1.3e-04, rtol=1.3e-05)}),
'TestCommon', 'test_noncontiguous_samples', device_type='cuda'),
DecorateInfo(
toleranceOverride({torch.float32: tol(atol=1e-04, rtol=2e-05), }),
'TestCompositeCompliance', 'test_forward_ad', device_type='cuda',
active_if=TEST_CUDNN),
DecorateInfo(
toleranceOverride({torch.complex64: tol(atol=1e-4, rtol=1e-4)}),
"TestMathBits", "test_conj_view", device_type='cuda'),
DecorateInfo(
toleranceOverride({torch.chalf: tol(atol=9e-2, rtol=9e-2), }),
'TestCommon', 'test_complex_half_reference_testing'),
DecorateInfo(
toleranceOverride({torch.half: tol(atol=9e-3, rtol=2e-1), }),
'TestInductorOpInfo', 'test_comprehensive', device_type='cpu')],
skips=(
# RuntimeError: !lhs.isAliasOf(rhs)INTERNAL ASSERT FAILED at
# "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":104, please report a bug to PyTorch.
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
# RuntimeError: "slow_conv3d_cpu_grad_input" not implemented for 'Long'
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_numpy_ref',
dtypes=(torch.int64,)),
# Reference: https://github.com/pytorch/pytorch/issues/86356
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_numpy_ref',
dtypes=(torch.double, torch.cdouble)),
DecorateInfo(unittest.skip("Unsupported on MPS for now"), 'TestCommon', 'test_numpy_ref_mps'),
# RuntimeError: UNSUPPORTED DTYPE: complex
DecorateInfo(unittest.expectedFailure, 'TestNNCOpInfo', 'test_nnc_correctness',
dtypes=(torch.complex64, torch.complex128)),
DecorateInfo(unittest.skip('Skipped for ROCm!'), 'TestCommon', 'test_complex_half_reference_testing',
dtypes=[torch.complex32], active_if=TEST_WITH_ROCM),
),
supports_out=False,),
OpInfo('nn.functional.conv1d',
aliases=('conv1d',),
aten_name='conv1d',
dtypes=floating_and_complex_types_and(torch.int64, torch.float16, torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.chalf,
torch.bfloat16),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16),
sample_inputs_func=sample_inputs_conv1d,
error_inputs_func=error_inputs_conv1d,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
assert_jit_shape_analysis=True,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
decorators=(
DecorateInfo(
toleranceOverride({torch.chalf: tol(atol=1e-2, rtol=5e-2)}),
'TestCommon', 'test_complex_half_reference_testing'
),
DecorateInfo(
toleranceOverride({torch.float16: tol(atol=2e-3, rtol=1e-3)}),
'TestInductorOpInfo', 'test_comprehensive', device_type='cuda',
),
),
skips=(
# RuntimeError: !lhs.isAliasOf(rhs)INTERNAL ASSERT FAILED at
# "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":103, please report a bug to PyTorch.
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
# Ref: https://github.com/pytorch/pytorch/issues/75309
# AssertionError: None mismatch: torch.complex128 is not None
DecorateInfo(unittest.expectedFailure, 'TestDtypeCustomRules',
'test_custom_rules', dtypes=(torch.complex64, torch.complex128)),
# Ref: https://github.com/pytorch/pytorch/issues/75309
# RuntimeError: UNSUPPORTED DTYPE: complex
DecorateInfo(unittest.expectedFailure, 'TestNNCOpInfo',
'test_nnc_correctness', dtypes=(torch.complex64, torch.complex128)),
),
supports_expanded_weight=True,
supports_out=False,),
OpInfo('nn.functional.conv2d',
aliases=('conv2d',),
aten_name='conv2d',
dtypes=floating_and_complex_types_and(torch.int64, torch.float16, torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.chalf,
torch.bfloat16),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16),
sample_inputs_func=partial(sample_inputs_conv2d),
error_inputs_func=error_inputs_conv2d,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
# Runs very slowly on slow gradcheck - alternatively reduce input sizes
gradcheck_fast_mode=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
assert_jit_shape_analysis=True,
decorators=(
DecorateInfo(
toleranceOverride({torch.chalf: tol(atol=6e-2, rtol=5e-2)}),
'TestCommon', 'test_complex_half_reference_testing',
),
DecorateInfo(
toleranceOverride({torch.float16: tol(atol=5e-3, rtol=1e-3)}),
'TestInductorOpInfo', 'test_comprehensive',
),
),
skips=(
# RuntimeError: !lhs.isAliasOf(rhs)INTERNAL ASSERT FAILED at
# "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":103, please report a bug to PyTorch.
DecorateInfo(unittest.skip("Works on some configs!"), 'TestJit', 'test_variant_consistency_jit'),
# Ref: https://github.com/pytorch/pytorch/issues/75309
# AssertionError: None mismatch: torch.complex128 is not None
DecorateInfo(unittest.expectedFailure, 'TestDtypeCustomRules',
'test_custom_rules', dtypes=(torch.complex64, torch.complex128)),
# RuntimeError: UNSUPPORTED DTYPE: complex
DecorateInfo(unittest.expectedFailure, 'TestNNCOpInfo',
'test_nnc_correctness', dtypes=(torch.complex64, torch.complex128)),
),
supports_expanded_weight=True,
supports_out=False,),
OpInfo('nn.functional.conv3d',
aliases=('conv3d',),
aten_name='conv3d',
dtypes=floating_and_complex_types_and(torch.int64, torch.bfloat16, torch.float16),
dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.chalf, torch.bfloat16),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16),
sample_inputs_func=sample_inputs_conv3d,
error_inputs_func=error_inputs_conv3d,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
gradcheck_fast_mode=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
decorators=(
DecorateInfo(
toleranceOverride({torch.chalf: tol(atol=6e-2, rtol=5e-2)}),
'TestCommon', 'test_complex_half_reference_testing',
),
# TF32
DecorateInfo(
toleranceOverride({torch.float32: tol(atol=5e-3, rtol=1e-3),
torch.complex64: tol(atol=5e-3, rtol=1e-3)}),
'TestCommon', 'test_noncontiguous_samples',
),
DecorateInfo(
toleranceOverride({torch.complex64: tol(atol=2e-5, rtol=3e-6)}),
'TestCommon', 'test_variant_consistency_eager',
),
DecorateInfo(
toleranceOverride({torch.complex64: tol(atol=5e-5, rtol=5e-6)}),
'TestMathBits', 'test_conj_view',
),
DecorateInfo(
toleranceOverride({torch.float32: tol(atol=5e-5, rtol=5e-6)}),
'TestOperators', 'test_vjpvmap',
),
DecorateInfo(
toleranceOverride({torch.float16: tol(atol=5e-3, rtol=1e-3)}),
'TestInductorOpInfo', 'test_comprehensive',
),
),
skips=(
# RuntimeError: !lhs.isAliasOf(rhs) INTERNAL ASSERT FAILED at
# "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":103, please report a bug to PyTorch.
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
# RuntimeError: UNSUPPORTED DTYPE: complex
DecorateInfo(unittest.expectedFailure, 'TestNNCOpInfo',
'test_nnc_correctness', dtypes=(torch.complex64, torch.complex128)),
# AssertionError: Tensor-likes are not close!
# break slow tests
DecorateInfo(unittest.skip('Skipped!'), 'TestCommon', 'test_compare_cpu'),
),
supports_expanded_weight=True,
supports_out=False,),
OpInfo('nn.functional.group_norm',
aten_name='group_norm',
aliases=('group_norm',),
ref=reference_group_norm,
dtypes=floating_types_and(torch.float16, torch.bfloat16),
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
error_inputs_func=error_inputs_group_norm,
decorators=[
# RuntimeError: Cannot insert a Tensor that requires grad as a constant.
# Consider making it a parameter or input, or detaching the gradient
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)),
DecorateInfo(
toleranceOverride({torch.float32: tol(atol=5e-05, rtol=3e-03)}),
"TestDecomp",
"test_comprehensive",
device_type="cpu"
),
],
sample_inputs_func=sample_inputs_group_norm,
reference_inputs_func=reference_inputs_group_norm,
supports_expanded_weight=True,),
OpInfo('nn.functional.instance_norm',
# no ref because instance_norm will often have numerical instability (large numbers or nan)
dtypes=floating_types_and(torch.float16, torch.bfloat16),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16),
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
allow_cow_input_materialize_forward=['running_mean', 'running_var'],
decorators=[
# RuntimeError: Cannot insert a Tensor that requires grad as a constant.
# Consider making it a parameter or input, or detaching the gradient
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)),
],
sample_inputs_func=sample_inputs_instance_norm,
supports_expanded_weight=True,),
OpInfo('nn.functional.layer_norm',
aten_name='layer_norm',
aten_backward_name='layer_norm_backward',
aliases=('layer_norm',),
ref=reference_layer_norm,
dtypes=floating_types_and(torch.half, torch.bfloat16),
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
assert_jit_shape_analysis=True,
decorators=[
DecorateInfo(
toleranceOverride({torch.float32: tol(atol=1e-05, rtol=1e-03)}),
'TestCommon', 'test_numpy_refs'
),
DecorateInfo(unittest.skip("Bug in MPS backend!"), 'TestCommon', 'test_numpy_ref_mps'),
],
sample_inputs_func=sample_inputs_layer_norm,
supports_expanded_weight=True,),
OpInfo('nn.functional.rms_norm',
aten_name='rms_norm',
aliases=('rms_norm',),
ref=reference_rms_norm,
dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16),
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
sample_inputs_func=sample_inputs_rms_norm,
error_inputs_func=error_inputs_rms_norm,),
OpInfo('nn.functional.local_response_norm',
dtypes=floating_types_and(torch.int64, torch.float16, torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
decorators=[
# RuntimeError: falseINTERNAL ASSERT FAILED at
# "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185, please report a bug to PyTorch.
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)),
],
sample_inputs_func=sample_inputs_local_response_norm,),
OpInfo('constant_pad_nd',
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half),
sample_inputs_func=sample_inputs_constant_pad_nd,
supports_out=False,
skips=(
# bool can't be passed to Scalar arguments in JIT tracer because
# BoolType is not a subtype of ScalarType.
DecorateInfo(
unittest.expectedFailure, 'TestNNCOpInfo',
'test_nnc_correctness', dtypes=(torch.bool,)),
)),
OpInfo('nn.functional.pad',
variant_test_name='constant',
aten_name='constant_pad_nd',
# Runs very slowly on slow gradcheck - alternatively reduce input sizes
gradcheck_fast_mode=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half),
sample_inputs_func=partial(sample_inputs_nn_pad, mode='constant'),
supports_out=False),
OpInfo('nn.functional.pad',
variant_test_name='reflect',
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
dtypes=all_types_and_complex_and(torch.bfloat16, torch.half),
sample_inputs_func=partial(sample_inputs_nn_pad, mode='reflect'),
skips=(
# Doesn't have a corresponding aten operator.
# RuntimeError: falseINTERNAL ASSERT FAILED at
# "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185, please report a bug to PyTorch.
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)),
),
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
supports_out=False),
OpInfo('nn.functional.pad',
variant_test_name='replicate',
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
dtypes=all_types_and_complex_and(torch.half, torch.bfloat16),
sample_inputs_func=partial(sample_inputs_nn_pad, mode='replicate'),
skips=(
# Doesn't have a corresponding aten operator.
# RuntimeError: falseINTERNAL ASSERT FAILED at
# "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185, please report a bug to PyTorch.
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)),
),
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
supports_out=False),
OpInfo('nn.functional.pad',
variant_test_name='replicate_negative',
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
dtypes=all_types_and_complex_and(torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_nn_pad_replicate_negative,
skips=(
# Doesn't have a corresponding aten operator.
# RuntimeError: falseINTERNAL ASSERT FAILED at
# "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185, please report a bug to PyTorch.
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)),
# Some negative padding cases cause a segfault on MPS
DecorateInfo(unittest.skip("Not fully supported on MPS"), 'TestConsistency'),
),
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
supports_out=False),
OpInfo('nn.functional.pad',
variant_test_name='circular',
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half),
sample_inputs_func=partial(sample_inputs_nn_pad, mode='circular'),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
check_batched_grad=False,
# https://github.com/pytorch/pytorch/issues/66357
check_batched_forward_grad=False,
skips=(
# Doesn't have a corresponding aten operator.
# RuntimeError: falseINTERNAL ASSERT FAILED at
# "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185, please report a bug to PyTorch.
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)),
# Difference from <type> is larger with decomposition new_empty_strided.default than original on output 0
DecorateInfo(unittest.skip("Expected: new_empty_strided is not comparable"), 'TestDecomp', 'test_comprehensive'),
),
supports_out=False),
OpInfo('nn.functional.hardswish',
aten_name="hardswish",
aten_backward_name='hardswish_backward',
supports_autograd=True,
assert_autodiffed=True,
sample_inputs_func=sample_inputs_hardswish,
dtypes=floating_types_and(torch.bfloat16, torch.half),
supports_gradgrad=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_out=False,
autodiff_nonfusible_nodes=["aten::hardswish"]),
OpInfo('nn.functional.unfold',
aten_name='im2col',
dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16, torch.bool),
dtypesIfCUDA=floating_and_complex_types_and(torch.half, torch.bfloat16, torch.bool),
sample_inputs_func=sample_inputs_nn_unfold,
# Runs very slowly on slow gradcheck - alternatively reduce input sizes
gradcheck_fast_mode=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_out=False,
skips=(
# NOTE: this failure may not reproduce consistently on different systems
# false INTERNAL ASSERT FAILED at "...torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185
DecorateInfo(unittest.skip("Internal assert failed!"), 'TestJit', 'test_variant_consistency_jit'),
# Compiler issue on ROCm. Regression started in ROCm 6.4.
DecorateInfo(unittest.skip('Skipped!'), 'TestCommon', 'test_non_standard_bool_values',
dtypes=[torch.bool], active_if=TEST_WITH_ROCM),
)),
OpInfo('nn.functional.interpolate',
aten_name="interpolate",
variant_test_name='nearest',
supports_autograd=True,
supports_fwgrad_bwgrad=True,
supports_forward_ad=True,
dtypes=floating_types_and(torch.uint8, torch.half, torch.bfloat16),
sample_inputs_func=partial(sample_inputs_interpolate, 'nearest'),
skips=(
# RuntimeError: false
# INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185,
# please report a bug to PyTorch.
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
),
supports_out=False),
OpInfo('nn.functional.interpolate',
aten_name="interpolate",
variant_test_name='nearest-exact',
supports_autograd=True,
supports_fwgrad_bwgrad=True,
supports_forward_ad=True,
dtypes=floating_types_and(torch.half, torch.bfloat16, torch.uint8),
sample_inputs_func=partial(sample_inputs_interpolate, 'nearest-exact'),
skips=(
# RuntimeError: false
# INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185,
# please report a bug to PyTorch.
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
# RuntimeError: aten::_upsample_nearest_exact*d hit the vmap fallback which is currently disabled
DecorateInfo(unittest.expectedFailure, 'TestOperators', 'test_vmapjvpall_has_batch_rule'),
DecorateInfo(unittest.expectedFailure, 'TestOperators', 'test_vmapvjp_has_batch_rule'),
DecorateInfo(unittest.expectedFailure, 'TestVmapOperatorsOpInfo', 'test_op_has_batch_rule'),
),
supports_out=False),
OpInfo('nn.functional.interpolate',
aten_name="interpolate",
variant_test_name='linear',
supports_autograd=True,
supports_fwgrad_bwgrad=True,
supports_forward_ad=True,
dtypes=floating_types_and(torch.half, torch.bfloat16),
sample_inputs_func=partial(sample_inputs_interpolate, 'linear'),
skips=(
# RuntimeError: false
# INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185,
# please report a bug to PyTorch.
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
),
supports_out=False),
OpInfo('nn.functional.interpolate',
aten_name="interpolate",
variant_test_name='bilinear',
supports_fwgrad_bwgrad=True,
supports_autograd=True,
supports_forward_ad=True,
dtypes=floating_types_and(torch.uint8, torch.half, torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
sample_inputs_func=partial(sample_inputs_interpolate, 'bilinear'),
reference_inputs_func=partial(reference_inputs_interpolate, 'bilinear'),
skips=(
# RuntimeError: false
# INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185,
# please report a bug to PyTorch.
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
),
supports_out=False),
OpInfo('nn.functional.interpolate',
aten_name="interpolate",
variant_test_name='bicubic',
supports_autograd=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
dtypes=floating_types_and(torch.uint8, torch.half, torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
sample_inputs_func=partial(sample_inputs_interpolate, 'bicubic'),
reference_inputs_func=partial(reference_inputs_interpolate, 'bicubic'),
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
skips=(
# RuntimeError: false
# INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185,
# please report a bug to PyTorch.
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
),
supports_out=False),
OpInfo('nn.functional.interpolate',
aten_name="interpolate",
variant_test_name='trilinear',
supports_autograd=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
dtypes=floating_types_and(torch.half, torch.bfloat16),
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
sample_inputs_func=partial(sample_inputs_interpolate, 'trilinear'),
skips=(
# RuntimeError: false
# INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185,
# please report a bug to PyTorch.
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
),
supports_out=False),
OpInfo('nn.functional.interpolate',
aten_name="interpolate",
variant_test_name='area',
supports_autograd=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
dtypes=floating_types_and(torch.half, torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
sample_inputs_func=partial(sample_inputs_interpolate, 'area'),
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
skips=(
# RuntimeError: false
# INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185,
# please report a bug to PyTorch.
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
),
supports_out=False),
OpInfo('nn.functional.upsample_bilinear',
supports_autograd=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
dtypes=floating_types_and(torch.uint8, torch.half, torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
sample_inputs_func=partial(sample_inputs_upsample, 'bilinear'),
reference_inputs_func=partial(reference_inputs_upsample, 'bilinear'),
skips=(
# RuntimeError: false
# INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185,
# please report a bug to PyTorch.
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
),
supports_out=False),
OpInfo('_upsample_bilinear2d_aa',
op=torch.ops.aten._upsample_bilinear2d_aa,
aten_name='_upsample_bilinear2d_aa',
supports_autograd=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
dtypes=floating_types_and(torch.uint8),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
sample_inputs_func=partial(sample_inputs_upsample_aa, 'bilinear'),
supports_out=False,
skips=(
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
DecorateInfo(unittest.expectedFailure, 'TestDTensorOps', 'test_dtensor_op_db'),
DecorateInfo(unittest.expectedFailure, 'TestInductorOpInfo', 'test_comprehensive'),
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'),
)),
OpInfo(
"nn.functional.soft_margin_loss",
dtypes=floating_types_and(torch.half, torch.bfloat16),
supports_out=False,
supports_forward_ad=True,
# doesn't support grad on target
sample_inputs_func=partial(sample_inputs_loss, rhs_requires_grad=False),
error_inputs_func=error_inputs_soft_margin_loss,
),
OpInfo('nn.functional.upsample_nearest',
supports_autograd=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
dtypes=floating_types_and(torch.uint8, torch.half, torch.bfloat16),
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
sample_inputs_func=partial(sample_inputs_upsample, 'nearest'),
skips=(
# RuntimeError: false
# INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185,
# please report a bug to PyTorch.
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
),
supports_out=False),
OpInfo(
"nn.functional.margin_ranking_loss",
dtypes=all_types_and(torch.half, torch.bfloat16),
supports_out=False,
sample_inputs_func=sample_inputs_margin_ranking_loss,
error_inputs_func=error_inputs_margin_ranking_loss,
reference_inputs_func=reference_inputs_margin_ranking_loss,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True),
OpInfo(
"nn.functional.multi_margin_loss",
dtypes=floating_types(),
dtypesIfCUDA=floating_types_and(torch.bfloat16, torch.float16),
supports_out=False,
supports_gradgrad=False,
sample_inputs_func=sample_inputs_multi_margin_loss,
reference_inputs_func=reference_inputs_multi_margin_loss,
error_inputs_func=error_inputs_multi_margin_loss,
decorators=(
DecorateInfo(
toleranceOverride({torch.float32: tol(atol=1e-4, rtol=1e-4)}),
"TestJit",
"test_variant_consistency_jit",
),
),
),
OpInfo(
"nn.functional.multilabel_margin_loss",
dtypes=floating_types(),
dtypesIfCUDA=floating_types_and(torch.bfloat16, torch.float16),
supports_out=False,
supports_gradgrad=False,
sample_inputs_func=sample_inputs_multilabel_margin_loss,
reference_inputs_func=reference_inputs_multilabel_margin_loss,
error_inputs_func=error_inputs_multilabel_margin_loss,
),
OpInfo('nn.functional.leaky_relu',
aliases=None,
aten_name="leaky_relu",
aten_backward_name='leaky_relu_backward',
sample_inputs_func=sample_inputs_leaky_relu,
dtypes=floating_types_and(torch.bfloat16, torch.float16),
inplace_variant=lambda x, negative_slope=0.01:
torch.nn.functional.leaky_relu(x, negative_slope, inplace=True),
supports_autograd=True,
assert_autodiffed=True,
supports_gradgrad=True,
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
autodiff_nonfusible_nodes=["aten::leaky_relu"]),
OpInfo(
"nn.functional.multilabel_soft_margin_loss",
supports_out=False,
dtypes=floating_types_and(torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_multilabel_soft_margin_loss,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
decorators=(
DecorateInfo(
toleranceOverride({torch.float32: tol(atol=1e-4, rtol=1e-4)}),
"TestJit",
"test_variant_consistency_jit",
),
DecorateInfo(
toleranceOverride({torch.float16: tol(atol=4e-3, rtol=1.3e-3)}),
"TestInductorOpInfo",
"test_comprehensive",
device_type="cuda"
),
),
skips=(
# AssertionError: False is not true : Scalars failed to compare as equal! 0 != 4096
# __main__.TestJitCUDA.test_variant_consistency_jit_nn_functional_multilabel_soft_margin_loss_cuda_float32
# leaked 4096 bytes CUDA memory on device 0
DecorateInfo(
# Skip instead of expectedFailure because this fails
# locally for me but passes in CI.
unittest.skip("Skipped!"),
"TestJit",
"test_variant_consistency_jit",
device_type="cuda",
),
),
),
OpInfo('nn.functional.avg_pool2d',
aten_name='avg_pool2d',
supports_autograd=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
dtypes=floating_types_and(torch.int64, torch.float16, torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
error_inputs_func=error_inputs_avg_pool2d,
sample_inputs_func=sample_inputs_avgpool2d,
skips=(
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out', device_type='cuda'),
)),
OpInfo('nn.functional.fractional_max_pool2d',
supports_autograd=True,
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
op=lambda input, *args, **kwargs:
wrapper_set_seed(torch.nn.functional.fractional_max_pool2d, input, *args, **kwargs),
# vmap does not support random operations
check_batched_forward_grad=False,
dtypes=floating_types_and(torch.bfloat16, torch.float16),
test_neg_view=False,
sample_inputs_func=sample_inputs_fractional_max_pool2d,
decorators=(
# FIXME: AssertionError: False is not true : Tensors failed to compare as equal!
DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),
# RuntimeError: input->type()->kind() == TypeKind::OptionalType
# INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":270
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit')),
skips=(
DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'),)),
OpInfo('nn.functional.fractional_max_pool3d',
supports_autograd=True,
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
op=lambda input, *args, **kwargs:
wrapper_set_seed(torch.nn.functional.fractional_max_pool3d, input, *args, **kwargs),
# vmap does not support random operations
check_batched_forward_grad=False,
dtypes=floating_types_and(torch.bfloat16, torch.float16),
test_neg_view=False,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
sample_inputs_func=sample_inputs_fractional_max_pool3d,
decorators=(
# FIXME: both derivatives are implemented incorrectly
# https://github.com/pytorch/pytorch/issues/69322
# FIXME: AssertionError: False is not true : Tensors failed to compare as equal!
DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),
# RuntimeError: input->type()->kind() == TypeKind::OptionalType
# INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":270
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit')),
skips=(
DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'),)),
OpInfo('nn.functional.max_pool1d',
aten_name='max_pool1d',
supports_autograd=True,
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# got: Batching rule not implemented for aten::flatten.using_ints
check_batched_forward_grad=False,
# TODO: add shape checks
assert_jit_shape_analysis=False,
dtypes=floating_types_and(torch.bfloat16, torch.float16),
dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
skips=(
# Pre-existing condition; Needs to be fixed
DecorateInfo(unittest.skip("Works on some configs"), 'TestNNCOpInfo',
'test_nnc_correctness', dtypes=(torch.bfloat16,)),
# RuntimeError: The tensor has a non-zero number of elements, but its data is not allocated yet.
# Caffe2 uses a lazy allocation, so you will need to call mutable_data() or raw_mutable_data()
# to actually allocate memory
DecorateInfo(unittest.skip("Skipped!"), 'TestTags', 'test_tags'),
),
error_inputs_func=error_inputs_max_pool1d,
sample_inputs_func=sample_inputs_max_pool),
OpInfo('nn.functional.max_pool2d',
aten_name='max_pool2d',
# Runs very slowly on slow gradcheck - alternatively reduce input sizes
gradcheck_fast_mode=True,
# Vmap is not happy with non-contiguous (channels_last) inputs
check_batched_gradgrad=False,
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# got: Batching rule not implemented for aten::flatten.using_ints
check_batched_forward_grad=False,
assert_jit_shape_analysis=True,
dtypes=all_types_and(torch.float16, torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
error_inputs_func=error_inputs_max_pool2d,
sample_inputs_func=sample_inputs_max_pool),
OpInfo('max_pool2d_with_indices_backward',
op=max_pool2d_backward,
# We've defined a custom op, so there's no corresponding aten op
aten_name=None,
method_variant=None,
inplace_variant=None,
operator_variant=None,
inplace_operator_variant=None,
check_batched_gradgrad=False,
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
check_batched_forward_grad=False,
assert_jit_shape_analysis=False,
dtypes=floating_types_and(torch.bfloat16, torch.float16),
sample_inputs_func=sample_inputs_max_pool,
skips=(
# We've defined a custom op here, and we don't handle the case where we receive an out kwarg
DecorateInfo(unittest.skip("Skipped!"), "TestCommon", "test_out"),
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'),
# FX failed to normalize op - add the op to the op_skip list.
DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),
# object has no attribute max_pool2d_with_indices_backward (It's not available on torch -- so expected)
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit')
)),
OpInfo('nn.functional.max_pool3d',
aten_name='max_pool3d',
# Runs very slowly on slow gradcheck - alternatively reduce input sizes
gradcheck_fast_mode=True,
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# got: Batching rule not implemented for aten::flatten.using_ints
check_batched_forward_grad=False,
# TODO: add shape checks
assert_jit_shape_analysis=False,
dtypes=all_types_and(torch.bfloat16, torch.float16),
dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
# TODO: investigate nondeterminism
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
error_inputs_func=error_inputs_max_pool3d,
sample_inputs_func=sample_inputs_max_pool),
OpInfo('nn.functional.max_unpool1d',
aten_name='max_unpool1d',
supports_autograd=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_out=False,
assert_jit_shape_analysis=False,
dtypes=floating_types_and(torch.float16, torch.bfloat16),
sample_inputs_func=sample_inputs_max_unpool,
skips=(
# Gradients are tested in `variant_test_name=grad` below.
# We skip tests here because there is non-determinism in backward
# with gather, when there are writes into the same memory location,
# and if there are several indices pointing to the same memory,
# gradcheck is oblivious about that and cannot perturb them all at once
# (see sample_inputs_max_unpool_grad to find out more).
DecorateInfo(unittest.skip("Skipped!"), 'TestBwdGradients', 'test_fn_grad'),
DecorateInfo(unittest.skip("Skipped!"), 'TestBwdGradients', 'test_fn_gradgrad'),
DecorateInfo(unittest.skip("Skipped!"), 'TestFwdGradients', 'test_forward_mode_AD',
active_if=(not IS_MACOS)),
DecorateInfo(unittest.skip("Skipped!"), 'TestCompositeCompliance', 'test_forward_ad',
device_type='cpu'),
DecorateInfo(unittest.skip("Skipped!"), 'TestDecomp', 'test_quick_core_backward'),
)),
OpInfo('nn.functional.max_unpool1d',
variant_test_name='grad',
aten_name='max_unpool1d',
supports_autograd=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_out=False,
assert_jit_shape_analysis=False,
dtypes=floating_types_and(torch.float16, torch.bfloat16),
sample_inputs_func=sample_inputs_max_unpool_grad),
OpInfo('nn.functional.max_unpool2d',
aten_name='max_unpool2d',
supports_autograd=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_out=False,
assert_jit_shape_analysis=False,
dtypes=floating_types_and(torch.float16, torch.bfloat16),
sample_inputs_func=sample_inputs_max_unpool,
skips=(
# Gradients are tested in `variant_test_name=grad` below.
# We skip tests here because there is non-determinism in backward
# with gather, when there are writes into the same memory location,
# and if there are several indices pointing to the same memory,
# gradcheck is oblivious about that and cannot perturb them all at once
# (see sample_inputs_max_unpool_grad to find out more).
DecorateInfo(unittest.skip("Skipped!"), 'TestFwdGradients', 'test_forward_mode_AD',
active_if=(not IS_MACOS)),
DecorateInfo(unittest.skip("Skipped!"), 'TestBwdGradients', 'test_fn_gradgrad'),
DecorateInfo(unittest.skip("Skipped!"), 'TestBwdGradients', 'test_fn_grad'),
DecorateInfo(unittest.skip("Skipped!"), 'TestCompositeCompliance', 'test_forward_ad'),
DecorateInfo(unittest.skip("Skipped!"), 'TestDecomp', 'test_quick_core_backward'),
)),
OpInfo('nn.functional.max_unpool2d',
variant_test_name='grad',
aten_name='max_unpool2d',
# Runs very slowly on slow gradcheck - alternatively reduce input sizes
gradcheck_fast_mode=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# Vmap is not happy with non-contiguous (channels_last) inputs
check_batched_grad=False,
supports_out=False,
assert_jit_shape_analysis=False,
dtypes=floating_types_and(torch.float16, torch.bfloat16),
sample_inputs_func=sample_inputs_max_unpool_grad),
OpInfo('nn.functional.max_unpool3d',
aten_name='max_unpool3d',
# Runs very slowly on slow gradcheck - alternatively reduce input sizes
gradcheck_fast_mode=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_out=False,
assert_jit_shape_analysis=False,
dtypes=floating_types_and(torch.float16, torch.bfloat16),
sample_inputs_func=sample_inputs_max_unpool,
skips=(
# Gradients are tested in `variant_test_name=grad` below.
# We skip tests here because there is non-determinism in backward
# with gather, when there are writes into the same memory location,
# and if there are several indices pointing to the same memory,
# gradcheck is oblivious about that and cannot perturb them all at once
# (see sample_inputs_max_unpool_grad to find out more).
DecorateInfo(unittest.skip("Skipped!"), 'TestFwdGradients', 'test_forward_mode_AD',
active_if=(not IS_MACOS)),
DecorateInfo(unittest.skip("Skipped!"), 'TestBwdGradients', 'test_fn_gradgrad'),
DecorateInfo(unittest.skip("Skipped!"), 'TestBwdGradients', 'test_fn_grad'),
DecorateInfo(unittest.skip("Skipped!"), 'TestCompositeCompliance', 'test_forward_ad'),
DecorateInfo(unittest.skip("Skipped!"), 'TestDecomp', 'test_quick_core_backward'),
)),
OpInfo('nn.functional.max_unpool3d',
variant_test_name='grad',
aten_name='max_unpool3d',
supports_autograd=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_out=False,
assert_jit_shape_analysis=False,
dtypes=floating_types_and(torch.float16, torch.bfloat16),
sample_inputs_func=sample_inputs_max_unpool_grad),
OpInfo('nn.functional.linear',
aten_name='linear',
supports_autograd=True,
supports_gradgrad=True,
sample_inputs_func=sample_inputs_linear,
dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16),
dtypesIfROCM=floating_and_complex_types_and(torch.float16, torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.bfloat16),
backward_dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.bfloat16),
# linear calls mm under the hood which is nondeterministic on CUDA
# https://pytorch.org/docs/stable/generated/torch.use_deterministic_algorithms.html#torch.use_deterministic_algorithms
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# See https://github.com/pytorch/pytorch/issues/66357
check_batched_forward_grad=False,
supports_expanded_weight=True,
decorators=(
# Strides are not the same!
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'),
)),
OpInfo('nn.functional.bilinear',
aten_name='bilinear',
supports_autograd=True,
sample_inputs_func=sample_inputs_bilinear,
dtypes=all_types_and(torch.float16, torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.float16,
*[torch.bfloat16] if SM53OrLater or TEST_WITH_ROCM else []),
decorators=(
DecorateInfo(toleranceOverride({torch.float16: tol(atol=2e-03, rtol=1.3e-03)}),
'TestInductorOpInfo', 'test_comprehensive', device_type='cpu'),
),
skips=(
# NVIDIA only assures that bfloat16 is supported by bmm if SM >= 5.3
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_dtypes', device_type='cuda', active_if=not SM53OrLater),
DecorateInfo(unittest.skip("Skipped!"), 'TestNNCOpInfo', 'test_nnc_correctness', dtypes=(torch.bfloat16,)),
),
# Runs very slowly on slow gradcheck - alternatively reduce input sizes
gradcheck_fast_mode=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_out=False),
OpInfo('nn.functional.glu',
aten_name='glu',
# Runs very slowly on slow gradcheck - alternatively reduce input sizes
gradcheck_fast_mode=True,
sample_inputs_func=sample_inputs_glu,
dtypes=floating_types_and(torch.bfloat16, torch.float16),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_out=False),
UnaryUfuncInfo(
'nn.functional.elu',
aten_backward_name='elu_backward',
ref=lambda x, alpha=1.0, inplace=False:
np.maximum(0., x) + np.minimum(0., alpha * (np.exp(x) - 1)),
dtypes=floating_types_and(torch.bfloat16, torch.float16),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_autograd=True,
assert_autodiffed=False,
supports_gradgrad=True,
supports_out=False,
sample_kwargs=lambda device, dtype, input:
({'alpha': 0.8}, {'alpha': 0.8}),
inplace_variant=lambda x, alpha=1.0:
torch.nn.functional.elu(x, alpha, inplace=True),
decorators=[
DecorateInfo(
toleranceOverride({
torch.float16: tol(atol=1e-03, rtol=1.2e-03),
torch.bfloat16: tol(atol=1e-03, rtol=1.2e-03)
}),
'TestUnaryUfuncs', device_type='cuda',
), ],
),
# Marked as a Unary function because it has some rather odd broadcasting semantics in its
# second argument
UnaryUfuncInfo(
'nn.functional.prelu',
aten_backward_name='_prelu_kernel_backward',
ref=lambda x, weight:
np.maximum(0., x) + np.minimum(0., x) *
(weight if x.ndim == 1 else weight.reshape([weight.size if i == 1 else 1 for i in range(x.ndim)])),
dtypes=floating_types_and(torch.bfloat16, torch.float16),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_autograd=True,
assert_autodiffed=False,
supports_gradgrad=True,
supports_out=False,
# test_reference_numerics only tests the case when the weight tensor is a scalar
sample_kwargs=sample_kwargs_prelu_scalar_weight,
error_inputs_func=error_inputs_prelu,
sample_inputs_func=sample_inputs_prelu,
reference_inputs_func=reference_inputs_prelu,
decorators=[
# RuntimeError: Cannot insert a Tensor that requires grad as a constant.
# Consider making it a parameter or input, or detaching the gradient
# https://github.com/pytorch/pytorch/issues/68752
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), ],
),
UnaryUfuncInfo(
'nn.functional.celu',
ref=lambda x, alpha=1.0, inplace=False:
np.maximum(0., x) + np.minimum(0., alpha * (np.exp(x / alpha) - 1)),
dtypes=floating_types_and(torch.bfloat16, torch.float16),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_autograd=True,
assert_autodiffed=False,
supports_gradgrad=True,
supports_out=False,
sample_kwargs=lambda device, dtype, input:
({'alpha': 0.8}, {'alpha': 0.8}),
inplace_variant=lambda x, alpha=1.0:
torch.nn.functional.celu(x, alpha, inplace=True),
decorators=[
DecorateInfo(
toleranceOverride({
torch.float16: tol(atol=1e-03, rtol=1.2e-03),
torch.bfloat16: tol(atol=1e-03, rtol=1.2e-03)
}),
'TestUnaryUfuncs', device_type='cuda',
), ],
),
UnaryUfuncInfo(
'nn.functional.rrelu',
aten_backward_name='rrelu_with_noise_backward',
op=lambda input, *args, **kwargs:
wrapper_set_seed(torch.nn.functional.rrelu, input, *args, **kwargs),
inplace_variant=lambda input, *args, **kwargs:
wrapper_set_seed(torch.nn.functional.rrelu, input, *args, inplace=True, **kwargs),
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
gradcheck_wrapper=wrapper_set_seed,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_out=False,
sample_kwargs=lambda device, dtype, input:
(dict(lower=0., upper=1., training=True), dict(lower=0., upper=1., training=True)),
sample_inputs_func=sample_inputs_rrelu,
error_inputs_func=error_inputs_rrelu,
decorators=(
DecorateInfo(
toleranceOverride({
torch.float16: tol(atol=1e-03, rtol=1.2e-03),
torch.bfloat16: tol(atol=1e-03, rtol=1.2e-03)
}),
'TestUnaryUfuncs', device_type='cuda',
),),
skips=(
# lambda impl
DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),
# lambda impl
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
# In-place operations do not play well with forward AD
# https://github.com/pytorch/pytorch/issues/77447
DecorateInfo(unittest.expectedFailure, 'TestFwdGradients',
'test_inplace_forward_mode_AD'),
# The noise vector that's generated in these tests is not the same elementwise
DecorateInfo(unittest.skip("Different noise"), 'TestUnaryUfuncs', 'test_batch_vs_slicing'),
DecorateInfo(unittest.skip("Different noise"), 'TestUnaryUfuncs', 'test_contig_vs_every_other'),
DecorateInfo(unittest.skip("Different noise"), 'TestUnaryUfuncs', 'test_non_contig_expand'),
DecorateInfo(unittest.skip("Different noise"), 'TestUnaryUfuncs', 'test_contig_vs_transposed'),
DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu')),
skip_correctness_check_compile_vs_eager=True,
),
UnaryUfuncInfo(
'nn.functional.selu',
ref=lambda x, inplace=False:
1.0507009873554804934193349852946 * (
np.maximum(0., x) + np.minimum(0., 1.6732632423543772848170429916717 * (np.exp(x) - 1))
),
dtypes=floating_types_and(torch.bfloat16, torch.float16),
supports_forward_ad=True, # depends on 'elu'
supports_fwgrad_bwgrad=True,
supports_autograd=True,
assert_autodiffed=False,
supports_gradgrad=True,
supports_out=False,
inplace_variant=lambda x: torch.nn.functional.selu(x, inplace=True),
decorators=[
DecorateInfo(
toleranceOverride({
torch.float16: tol(atol=1e-2, rtol=1.8e-2),
torch.bfloat16: tol(atol=1e-2, rtol=1.8e-2)
}),
'TestUnaryUfuncs', device_type='cuda',
), ],
),
OpInfo(
'torch._scaled_mm_v2',
sample_inputs_func=sample_inputs_scaled_mm_v2,
dtypes=float8_types(),
dtypesIfCUDA=empty_types() + (torch.float8_e4m3fn,),
supports_out=True,
supports_forward_ad=False,
supports_autograd=False,
decorators=[onlyCUDA, skipCUDAIf(not SM89OrLater or TEST_WITH_ROCM, 'Requires CUDA SM >= 8.9')],
skips=(
# Sample inputs isn't really parametrized on dtype
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_dtypes'),
# "add_stub" not implemented for 'Float8_e4m3fn'
# "ufunc_add_CUDA" not implemented for 'Float8_e4m3fn'
# https://github.com/pytorch/pytorch/issues/107256
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out'),
# "mul_cuda" not implemented for float8_e4m3fn
# "mul_cpu_reduced_float" not implemented for 'Float8_e4m3fn'
# https://github.com/pytorch/pytorch/issues/107256
DecorateInfo(unittest.skip("Skipped!"), 'TestSchemaCheckModeOpInfo', 'test_schema_correctness'),
# aten::_scaled_mm hit the vmap fallback which is currently disabled
DecorateInfo(unittest.skip("Skipped!"), "TestVmapOperatorsOpInfo", "test_op_has_batch_rule"),
DecorateInfo(unittest.skip("Skipped!"), "TestVmapOperatorsOpInfo", "test_vmap_exhaustive"),
DecorateInfo(unittest.expectedFailure, 'TestNNCOpInfo', 'test_nnc_correctness',
dtypes=(torch.float8_e4m3fn, torch.float8_e4m3fnuz, torch.float8_e5m2, torch.float8_e5m2fnuz)),
)
),
OpInfo(
'torch._scaled_mm',
sample_inputs_func=sample_inputs_scaled_mm,
dtypes=float8_types(),
dtypesIfCUDA=empty_types() + (torch.float8_e4m3fn,),
supports_out=True,
supports_forward_ad=False,
supports_autograd=False,
decorators=[skipXPU, skipCUDAIf(not SM89OrLater or TEST_WITH_ROCM, 'Requires CUDA SM >= 8.9')],
skips=(
# Sample inputs isn't really parametrized on dtype
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_dtypes'),
# "add_stub" not implemented for 'Float8_e4m3fn'
# "ufunc_add_CUDA" not implemented for 'Float8_e4m3fn'
# https://github.com/pytorch/pytorch/issues/107256
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out'),
# "mul_cuda" not implemented for float8_e4m3fn
# "mul_cpu_reduced_float" not implemented for 'Float8_e4m3fn'
# https://github.com/pytorch/pytorch/issues/107256
DecorateInfo(unittest.skip("Skipped!"), 'TestSchemaCheckModeOpInfo', 'test_schema_correctness'),
# aten::_scaled_mm hit the vmap fallback which is currently disabled
DecorateInfo(unittest.skip("Skipped!"), "TestVmapOperatorsOpInfo", "test_op_has_batch_rule"),
DecorateInfo(unittest.skip("Skipped!"), "TestVmapOperatorsOpInfo", "test_vmap_exhaustive"),
DecorateInfo(unittest.expectedFailure, 'TestNNCOpInfo', 'test_nnc_correctness',
dtypes=(torch.float8_e4m3fn, torch.float8_e4m3fnuz, torch.float8_e5m2, torch.float8_e5m2fnuz)),
)
),
OpInfo(
'torch.ops.aten._safe_softmax.default',
dtypes=all_types_and(torch.half, torch.bfloat16, torch.bool),
sample_inputs_func=sample_inputs_safe_softmax,
assert_jit_shape_analysis=True,
assert_autodiffed=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_out=False,
supports_cow_input_no_materialize_backward=False,
decorators=[],
skips=(
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
),
),
OpInfo(
'nn.functional.scaled_dot_product_attention',
op=lambda *args, **kwargs:
wrapper_set_seed(torch.nn.functional.scaled_dot_product_attention, *args, **kwargs),
sample_inputs_func=sample_inputs_scaled_dot_product_attention,
dtypes=floating_types_and(torch.float16, torch.bfloat16),
supports_out=False,
supports_forward_ad=False,
supports_fwgrad_bwgrad=True,
check_batched_forward_grad=False,
decorators=[DecorateInfo(toleranceOverride(
{torch.float32: tol(atol=5e-05, rtol=5e-6)}), 'TestCommon',), ],
skips=(
# When attn mask is a composite tensor this fails backward by returning a none
DecorateInfo(unittest.skip("Skipped!"), 'TestCompositeCompliance', 'test_backward', device_type='cuda'),
# This is only failing on Linux Bionic 3.10 Cuda 11.6
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_dtypes',
device_type='cuda', active_if=_get_torch_cuda_version() >= (11, 6)),
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_noncontiguous_samples',
dtypes=(torch.float32,)),
# AssertionError: JIT Test does not execute any logic
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
# Forward works for dtype=float64 which is the math path
DecorateInfo(unittest.skip("Skipped!"), 'TestFwdGradients', 'test_forward_mode_AD'),
# Not implemented for Forward AD
DecorateInfo(unittest.skip("Skipped!"), 'TestFwdGradients', 'test_fn_fwgrad_bwgrad',
device_type='cpu'),
# Not implemented for backward derivative
DecorateInfo(unittest.skip("Skipped!"), 'TestBwdGradients', 'test_fn_gradgrad',
device_type='cpu'),
# CPU and CUDA have inconsistencies for intermediate outputs
DecorateInfo(unittest.skip("Skipped!"), 'TestMeta', 'test_dispatch_meta_outplace',
device_type='cpu'),
DecorateInfo(unittest.skip("Skipped!"), 'TestMeta', 'test_dispatch_symbolic_meta_outplace',
device_type='cpu'),
# When changing input from Tensor to CompositeCompliantTensor, input.requires_grad() changes from true to false
DecorateInfo(unittest.skip("Skipped!"), 'TestCompositeCompliance', 'test_backward',
device_type='cpu'),
# OpInfo was implemented with a lambda
DecorateInfo(unittest.skip("Skipped!"), 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),
# TODO Need to understand what this is testing and why it doesn't work
DecorateInfo(unittest.skip("Skipped"), 'TestDecomp', 'test_comprehensive'),
DecorateInfo(unittest.skip('output is non-deterministic (when dropout_p > 0)'), 'TestCommon', 'test_compare_cpu'),
# TODO skip this for now since we can't skip on runtime arch support
DecorateInfo(unittest.skip('This is '), 'TestInductorOpInfo', 'test_comprehensive'),
# skip for sm < 80
DecorateInfo(unittest.skip("Skipped!"), 'TestSchemaCheckModeOpInfo', 'test_schema_correctness',
device_type='cuda', dtypes=(torch.bfloat16,), active_if=not SM80OrLater),
# FIXME
DecorateInfo(unittest.skip('test_cow_input does not work with efficient attention on ROCM'),
'TestCompositeCompliance', 'test_cow_input',
device_type='cuda', dtypes=(torch.bfloat16, torch.float16, torch.float32),
active_if=TEST_WITH_ROCM and PLATFORM_SUPPORTS_MEM_EFF_ATTENTION),),
),
OpInfo(
'torch.ops.aten._flash_attention_forward',
sample_inputs_func=sample_inputs_flash_attention_forward,
dtypes=empty_types(),
dtypesIfCUDA=custom_types(torch.float16)
if not SM80OrLater
else custom_types(torch.float16, torch.bfloat16),
supports_out=False,
supports_autograd=True,
supports_fwgrad_bwgrad=False,
supports_forward_ad=False,
check_batched_forward_grad=False,
decorators=[skipCUDAIf(not PLATFORM_SUPPORTS_FLASH_ATTENTION, "This platform doesn't support Flash Attention")],
skips=(
# Checking the scalar value of the philox seed and offset
DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_operator', device_type='cuda'),
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_noncontiguous_samples', device_type='cuda'),
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', device_type='cuda'),
# None Mismatch Tensor
DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_backward', device_type='cuda'),
)
),
OpInfo(
'torch.ops.aten._efficient_attention_forward',
sample_inputs_func=sample_inputs_efficient_attention_forward,
dtypes=empty_types(),
dtypesIfCUDA=custom_types(torch.float16, torch.float32)
if not SM80OrLater
else custom_types(torch.float16, torch.float32, torch.bfloat16),
supports_out=False,
supports_autograd=True,
supports_fwgrad_bwgrad=False,
supports_forward_ad=False,
check_batched_forward_grad=False,
# TODO: Skip because it produces a CUDA illegal memory access for some reason
skip_cow_input_backward=True,
# FIXME: mask_type == 2 (LowerRight)
decorators=[
skipCUDAIf(not PLATFORM_SUPPORTS_MEM_EFF_ATTENTION, "This platform doesn't support efficient attention"),
skipCUDAIf(TEST_WITH_ROCM, "Efficient attention on ROCM doesn't support custom_mask_type==2"),
skipXPU],
skips=(
# Checking the scaler value of the philox seed and offset
DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_operator', device_type='cuda'),
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_noncontiguous_samples', device_type='cuda'),
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', device_type='cuda'),
# None Mismatch Tensor
DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_backward', device_type='cuda'),
)
),
UnaryUfuncInfo(
'nn.functional.silu',
aten_backward_name='silu_backward',
ref=lambda x, inplace=False: x / (1 + np.exp(-x)),
dtypes=floating_types_and(torch.bfloat16, torch.float16),
supports_forward_ad=True,
supports_autograd=True,
supports_fwgrad_bwgrad=True,
assert_autodiffed=True,
supports_out=False,
inplace_variant=lambda x: torch.nn.functional.silu(x, inplace=True),
decorators=[
DecorateInfo(
toleranceOverride({
torch.float16: tol(atol=1e-3, rtol=1e-3),
torch.bfloat16: tol(atol=1e-4, rtol=1e-4)
}),
'TestUnaryUfuncs', device_type='cuda',
), ],
skips=(
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_normal',
dtypes=(torch.cfloat,), device_type='cpu'),
),
autodiff_nonfusible_nodes=["aten::silu"],
),
# TODO: combine this with the nn.functional.silu OpInfo when
# complex autodiff for silu is supported or when
# the forward bug is fixed
# Note: silu errors when given inputs that require grad
# but it doesn't support grad in their dtype
# This is why the dtypes list above passes test_dtypes,
# because it's getting lucky and failing in forward
# because test_dtypes sets requires_grad to True
# THIS IS A BUG
UnaryUfuncInfo(
'nn.functional.silu',
variant_test_name='complex',
ref=lambda x, inplace=False:
x / (1 + np.exp(-x)),
dtypes=complex_types(),
dtypesIfCUDA=complex_types(),
supports_forward_ad=False,
supports_autograd=False,
assert_autodiffed=False,
supports_out=False,
inplace_variant=lambda x: torch.nn.functional.silu(x, inplace=True),
decorators=[
DecorateInfo(
toleranceOverride({
torch.float16: tol(atol=1e-3, rtol=1e-3),
torch.bfloat16: tol(atol=1e-4, rtol=1e-4)
}),
'TestUnaryUfuncs', device_type='cuda',
), ],
skips=(
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_normal',
dtypes=(torch.cfloat,)),
# FIXME: intentionally misreports dtypes
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_dtypes'),
# FIXME: numpy reference diverges: Comparing (nan+nanj) and (-0+0j)
DecorateInfo(unittest.skip("Skipped!"),
'TestUnaryUfuncs', 'test_reference_numerics_large',
dtypes=(torch.complex64, torch.cdouble)),
DecorateInfo(unittest.skip("Skipped!"),
'TestUnaryUfuncs', 'test_reference_numerics_small',
dtypes=(torch.complex64,)),
DecorateInfo(unittest.skip("Skipped!"),
'TestUnaryUfuncs', 'test_reference_numerics_extremal',
dtypes=(torch.complex64,)))),
UnaryUfuncInfo(
'nn.functional.hardsigmoid',
aten_backward_name='hardsigmoid_backward',
ref=reference_hardsigmoid,
dtypes=floating_types_and(torch.bfloat16, torch.float16),
supports_autograd=True,
assert_autodiffed=False,
supports_gradgrad=False,
supports_forward_ad=True,
supports_out=False,
inplace_variant=partial(torch.nn.functional.hardsigmoid, inplace=True),
decorators=[
DecorateInfo(
toleranceOverride({torch.float16: tol(atol=1e-04, rtol=0.001)}), 'TestUnaryUfuncs', device_type='cuda',), ],
skips=[
# still want to test that first derivative works though second derivative isn't supported
DecorateInfo(unittest.expectedFailure, 'TestBwdGradients', "test_inplace_gradgrad")]
),
UnaryUfuncInfo(
'nn.functional.logsigmoid',
aten_name="log_sigmoid",
aten_backward_name='log_sigmoid_backward',
ref=reference_logsigmoid,
dtypes=floating_types_and(torch.half, torch.bfloat16),
supports_autograd=True,
assert_autodiffed=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_gradgrad=True,
# autodiff_nonfusible_nodes=["aten::log_sigmoid"],
decorators=[
DecorateInfo(
precisionOverride({torch.float16: 1e-2, torch.bfloat16: 5e-3}),
'TestUnaryUfuncs', 'test_reference_numerics_small'),
DecorateInfo(
precisionOverride({torch.float16: 1e-2, torch.bfloat16: 5e-3}),
'TestUnaryUfuncs', 'test_reference_numerics_large'),
DecorateInfo(
precisionOverride({torch.float16: 1e-2, torch.bfloat16: 5e-3}),
'TestUnaryUfuncs', 'test_reference_numerics_extremal'),
],
skips=(
# Resized a non-empty tensor but did not warn about it.
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning', device_type='cpu'),
),
),
UnaryUfuncInfo(
'nn.functional.mish',
aten_backward_name='mish_backward',
ref=lambda x: x * np.tanh(reference_softplus(x)),
dtypes=floating_types_and(torch.bfloat16, torch.float16),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_autograd=True,
assert_autodiffed=False,
supports_gradgrad=True,
supports_out=False,
inplace_variant=partial(torch.nn.functional.mish, inplace=True),
decorators=[
DecorateInfo(
toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1e-03)}), 'TestUnaryUfuncs',), ],
),
UnaryUfuncInfo(
'nn.functional.softsign',
ref=lambda x: x / (np.abs(x) + 1),
dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.float16, torch.bfloat16, torch.bool),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_autograd=True,
assert_autodiffed=False,
supports_gradgrad=True,
supports_out=False,
decorators=[
DecorateInfo(
toleranceOverride({torch.float16: tol(atol=1e-03, rtol=1.3e-04)}), 'TestUnaryUfuncs',), ],
skips=(
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_small',
dtypes=(torch.int, torch.int8)),),
),
UnaryUfuncInfo(
'nn.functional.tanhshrink',
ref=lambda x: x - np.tanh(x),
dtypes=all_types_and_complex_and(torch.half, torch.bfloat16),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_autograd=True,
assert_autodiffed=False,
supports_gradgrad=True,
supports_out=False,
decorators=[
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_normal',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
DecorateInfo(
toleranceOverride({torch.bfloat16: tol(atol=1e-02, rtol=1.6e-02)}), 'TestUnaryUfuncs',),
DecorateInfo(toleranceOverride({torch.complex64: tol(atol=6e-04, rtol=1e-05),
torch.bfloat16: tol(atol=1e-02, rtol=1.6e-02)}),
'TestUnaryUfuncs', 'test_reference_numerics_extremal', device_type='cuda'),
],
skips=(
# in each case, pytorch will produce a nan while numpy will not
DecorateInfo(unittest.skip("Fails on some jobs works on others!"),
'TestUnaryUfuncs', "test_reference_numerics_large",
dtypes=(torch.complex64, torch.complex128), active_if=(IS_MACOS)),
DecorateInfo(unittest.skip("Fails on some jobs works on others!"),
'TestUnaryUfuncs', "test_reference_numerics_extremal",
dtypes=(torch.complex64, torch.complex128), device_type='cpu',
active_if=(IS_MACOS or IS_WINDOWS)),
),
# tan(j * pi/2 * odd_number) is nan which also make tanhshrink nan.
reference_numerics_filter=NumericsFilter(
condition=lambda x: (close_to_int(x / (math.pi * 0.5j))
if x.is_complex() else x.new_tensor(False, dtype=torch.bool)),
safe_val=0)
),
UnaryUfuncInfo(
'nn.functional.threshold',
ref=lambda x, threshold, value: np.where(x <= threshold, value, x).astype(x.dtype),
dtypes=all_types_and(torch.half, torch.bfloat16),
inplace_variant=lambda x, threshold, value:
torch.nn.functional.threshold(x, threshold, value, inplace=True),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
assert_autodiffed=False,
supports_gradgrad=True,
supports_out=False,
sample_kwargs=lambda device, dtype, input: ({'threshold': float.fromhex('0x1.3ap-3'),
'value': -9},
{'threshold': float.fromhex('0x1.3ap-3'),
'value': -9}),
# TODO(whc) should not need sample_inputs_func, but without it
# kwargs aren't being hooked up properly
sample_inputs_func=sample_inputs_threshold,
),
OpInfo(
"nn.functional.triplet_margin_loss",
sample_inputs_func=sample_inputs_triplet_margin_loss,
error_inputs_func=error_inputs_triplet_margin_loss,
dtypes=all_types_and_complex_and(torch.half, torch.bfloat16),
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
),
OpInfo(
"nn.functional.triplet_margin_with_distance_loss",
sample_inputs_func=partial(sample_inputs_triplet_margin_loss, with_distance=True),
error_inputs_func=error_inputs_triplet_margin_loss,
dtypes=all_types_and_complex_and(torch.half, torch.bfloat16),
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
skips=(
# This test cannot handle a callable passed to `distance_function`. If we would use
# `distance_function=None`, the test would pass fine.
DecorateInfo(
unittest.expectedFailure,
"TestJit",
"test_variant_consistency_jit",
),
DecorateInfo(
unittest.expectedFailure,
"TestNormalizeOperators",
"test_normalize_operator_exhaustive",
),
),
),
BinaryUfuncInfo('nextafter',
dtypes=floating_types_and(torch.bfloat16, torch.half),
supports_autograd=False,
supports_rhs_python_scalar=False),
OpInfo(
"to",
op=lambda x, *args, **kwargs: x.to(*args, **kwargs),
dtypes=all_types_and_complex_and(torch.bfloat16, torch.float16, torch.bool),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_out=False,
sample_inputs_func=sample_inputs_to,
skips=(
# RuntimeError: undefined value cpu
DecorateInfo(
unittest.skip("Skipped!"),
"TestJit",
"test_variant_consistency_jit",
device_type="cpu",
),
# NotImplementedError: Cannot copy out of meta tensor; no data!
DecorateInfo(
unittest.skip("Skipped!"),
"TestMeta",
"test_meta_outplace",
),
# https://github.com/pytorch/pytorch/issues/84335
DecorateInfo(
unittest.skip("Skipped!"),
"TestProxyTensorOpInfo",
"test_make_fx_symbolic_exhaustive",
),
DecorateInfo(
unittest.skip("Skipped!"),
"TestNormalizeOperators",
"test_normalize_operator_exhaustive",
),
),
),
OpInfo('topk',
dtypes=all_types_and(torch.bfloat16, torch.float16),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
assert_jit_shape_analysis=True,
sample_inputs_func=sample_inputs_topk),
# Multiple variants for batch_norm to test with and without cuDNN disabled
# See https://github.com/pytorch/pytorch/pull/63218#discussion_r688549391 for more details
OpInfo('nn.functional.batch_norm',
aten_name='batch_norm',
dtypes=floating_types_and(torch.float16, torch.bfloat16),
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
assert_jit_shape_analysis=True,
allow_cow_input_materialize_forward=[1, 2],
allow_cow_input_materialize_backward=[1, 2],
sample_inputs_func=sample_inputs_batch_norm,
skips=(
# see https://github.com/pytorch/pytorch/issues/71286
DecorateInfo(unittest.expectedFailure, 'TestNNCOpInfo', 'test_nnc_correctness'),
DecorateInfo(unittest.skip('Skipped!'), 'TestNNCOpInfo', 'test_nnc_correctness',
device_type='cpu', dtypes=(torch.bfloat16, torch.float16)),
DecorateInfo(toleranceOverride({torch.float32: tol(atol=5e-05, rtol=1e-05)}),
'TestCompositeCompliance', 'test_forward_ad', device_type="cpu"),
)),
# This variant tests batch_norm with cuDNN disabled only on CUDA devices
OpInfo('nn.functional.batch_norm',
variant_test_name='without_cudnn',
aten_name='batch_norm',
dtypes=empty_types(),
dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
allow_cow_input_materialize_forward=[1, 2],
allow_cow_input_materialize_backward=[1, 2],
decorators=[onlyCUDA, disablecuDNN],
skips=(
DecorateInfo(toleranceOverride({torch.float32: tol(atol=1e-03, rtol=1e-04)}),
'TestJit', 'test_variant_consistency_jit'),
),
sample_inputs_func=sample_inputs_batch_norm),
OpInfo(
"nn.functional.binary_cross_entropy",
aten_backward_name='binary_cross_entropy_backward',
sample_inputs_func=sample_inputs_binary_cross_entropy,
dtypes=floating_types_and(torch.float16, torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
supports_out=False,
gradcheck_fast_mode=False,
supports_autograd=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
decorators=(
# RuntimeError: expected int at position 0, but got: Tensor
DecorateInfo(
unittest.skip("Skipped!"),
"TestCudaFuserOpInfo",
),
# RuntimeError: expected int at position 0, but got: Tensor
DecorateInfo(
unittest.skip("Skipped!"),
"TestNNCOpInfo",
"test_nnc_correctness",
),
# Fails for unknown reason: https://github.com/pytorch/pytorch/issues/120783
DecorateInfo(
unittest.skip("Skipped!"),
"TestCompositeCompliance",
"test_cow_input",
device_type='cuda',
),
DecorateInfo(
toleranceOverride({torch.float32: tol(atol=1e-3, rtol=1e-3)}),
"TestJit",
"test_variant_consistency_jit",
),
# RuntimeError: output with shape [] doesn't match the broadcast shape [5, 5]
DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_dispatch_meta_outplace'),
DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_dispatch_symbolic_meta_outplace'),
DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_dispatch_symbolic_meta_outplace_all_strides'),
),
skips=(
# RuntimeError: expected int at position 0, but got: Tensor
DecorateInfo(
unittest.expectedFailure,
"TestJit",
"test_variant_consistency_jit",
),
),
),
# We have to add 2 OpInfo entry for `igamma` and `igammac`.First is the
# standard entry, second is to run gradcheck tests on the second argument.
BinaryUfuncInfo('igamma',
dtypes=floating_types_and(torch.bfloat16, torch.float16),
aliases=('torch.special.gammainc',),
dtypesIfCUDA=floating_types(),
# TODO: FIXME
supports_rhs_python_scalar=False,
supports_autograd=False,
skips=(
# FIXME: incorrectly tries to pass a rhs scalar
DecorateInfo(unittest.expectedFailure, 'TestJit',
'test_jit_alias_remapping'),
)),
# TODO: FIXME, ideally by implemented grad for both inputs
# BinaryUfuncInfo('igamma',
# variant_test_name='grad_other',
# # Since autograd formula is implemented only for other and
# # gradcheck test verifies the formula for input in SampleInput,
# # we permute the arguments.
# op=lambda self, other, **kwargs: torch.igamma(other, self, **kwargs),
# inplace_variant=None,
# method_variant=None,
# supports_rhs_python_scalar=False,
# rhs_make_tensor_kwargs=dict(requires_grad=False),
# dtypes=floating_types_and(torch.bfloat16, torch.float16),
# backward_dtypesIfCPU=floating_types_and(torch.bfloat16),
# dtypesIfCUDA=floating_types(),
# backward_dtypesIfCUDA=floating_types(),
# supports_inplace_autograd=False,
# skips=(
# # Derivative wrt first tensor not implemented
# DecorateInfo(unittest.expectedFailure, "TestCommon",
# "test_floating_inputs_are_differentiable"),"),
# # test does not work with passing lambda for op
# # AssertionError: False is not true : Tensors failed to compare as equal!
# DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
# # test fails are we permute the arguments function variant
# # but not for inplace or method.
# DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager'),
# # TypeError: igamma(): argument 'input' (position 1) must be Tensor, not float
# DecorateInfo(unittest.skip('Skipped!'), 'TestBinaryUfuncs'),
# )),
BinaryUfuncInfo('igammac',
dtypes=floating_types_and(torch.bfloat16, torch.float16),
aliases=('torch.special.gammaincc',),
dtypesIfCUDA=floating_types(),
supports_autograd=False,
supports_rhs_python_scalar=False,
skips=(
# FIXME: incorrectly tries to pass a rhs scalar
DecorateInfo(unittest.expectedFailure, 'TestJit',
'test_jit_alias_remapping'),
)),
# TODO: FIXME, ideally by implementing grad for both inputs
# BinaryUfuncInfo('igammac',
# variant_test_name='grad_other',
# # Since autograd formula is implemented only for other and
# # gradcheck test verifies the formula for input in SampleInput,
# # we permute the arguments
# op=lambda self, other, **kwargs: torch.igammac(other, self, **kwargs),
# inplace_variant=None,
# method_variant=None,
# supports_rhs_python_scalar=False,
# rhs_make_tensor_kwargs=dict(requires_grad=False),
# dtypes=floating_types_and(torch.bfloat16, torch.float16),
# backward_dtypesIfCPU=floating_types_and(torch.bfloat16),
# dtypesIfCUDA=floating_types(),
# backward_dtypesIfCUDA=floating_types(),
# supports_inplace_autograd=False,
# decorators=[
# # Derivative wrt first tensor not implemented
# DecorateInfo(unittest.expectedFailure, "TestCommon",
# "test_floating_inputs_are_differentiable"),
# ],
# skips=(
# # test does not work with passing lambda for op
# # AssertionError: False is not true : Tensors failed to compare as equal!
# DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
# # test fails are we permute the arguments function variant
# # but not for inplace or method.
# DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager'),
# # TypeError: igammac(): argument 'input' (position 1) must be Tensor, not float
# DecorateInfo(unittest.skip('Skipped!'), 'TestBinaryUfuncs'),
# )),
UnaryUfuncInfo('nn.functional.softshrink',
aten_name="softshrink",
aten_backward_name='softshrink_backward',
dtypes=floating_types_and(torch.bfloat16, torch.float16),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
assert_autodiffed=False,
sample_inputs_func=sample_inputs_softshrink,
error_inputs_func=error_inputs_softshrink),
UnaryUfuncInfo('nn.functional.hardshrink',
aten_name="hardshrink",
aten_backward_name='hardshrink_backward',
dtypes=floating_types_and(torch.bfloat16, torch.float16),
assert_autodiffed=True,
sample_inputs_func=sample_inputs_hardshrink,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
autodiff_nonfusible_nodes=["aten::hardshrink"]),
UnaryUfuncInfo('nn.functional.hardtanh',
aten_name="hardtanh",
aten_backward_name='hardtanh_backward',
dtypes=floating_types_and(torch.int8, torch.int16, torch.int32, torch.int64, torch.half, torch.bfloat16),
backward_dtypes=all_types_and(torch.half, torch.bfloat16),
backward_dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
assert_autodiffed=True,
sample_inputs_func=sample_inputs_hardtanh,
error_inputs_func=error_inputs_hardtanh,
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
autodiff_nonfusible_nodes=["aten::hardtanh"]),
OpInfo('nn.functional.gelu',
aten_name="gelu",
aten_backward_name='gelu_backward',
ref=reference_gelu if TEST_SCIPY else None,
error_inputs_func=error_inputs_gelu,
supports_autograd=True,
assert_autodiffed=True,
sample_inputs_func=sample_inputs_gelu,
dtypes=floating_types_and(torch.bfloat16, torch.half),
supports_gradgrad=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
autodiff_nonfusible_nodes=["aten::gelu"],
skips=(
# AssertionError: Tensor-likes are not close!
# May not replicate in CI
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out'),
DecorateInfo(unittest.skip("Unsupported on MPS for now"), 'TestCommon', 'test_numpy_ref_mps'),
)),
UnaryUfuncInfo('nn.functional.relu6',
aten_name="relu6",
dtypes=all_types_and(torch.half, torch.bfloat16),
backward_dtypes=floating_types_and(torch.half, torch.bfloat16),
assert_autodiffed=True,
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
autodiff_nonfusible_nodes=["aten::relu6"]),
OpInfo('mm',
dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.bfloat16),
assert_autodiffed=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
sample_inputs_func=sample_inputs_mm,
skips=(
# Issue with conj and torch dispatch, see https://github.com/pytorch/pytorch/issues/82479
DecorateInfo(
unittest.skip("Skipped!"),
'TestSchemaCheckModeOpInfo',
'test_schema_correctness',
dtypes=(torch.complex64, torch.complex128)),
# Fast math on MacOS-13?
DecorateInfo(
toleranceOverride({torch.float32: tol(atol=2e-5, rtol=5e-6)}),
'TestConsistency',
'test_output_match',
active_if=lambda _: MACOS_VERSION < 14.0,
device_type='mps',
dtypes=(torch.float32,)),
)),
OpInfo('mode',
op=torch.mode,
dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
skips=(
# Resized a non-empty tensor but did not warn about it
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'),
# FIXME:
# Expected 2114 but got 1123.
# Absolute difference: 991 (up to 0.001 allowed)
# Relative difference: 0.46877956480605487 (up to 0.001 allowed)
DecorateInfo(
unittest.skip("Skipped!"),
"TestCommon",
"test_compare_cpu",
dtypes=(torch.float32,),
device_type="cuda",
),
),
sample_inputs_func=sample_inputs_mode,),
make_mvlgamma_opinfo(variant_test_name='mvlgamma_p_1',
domain=(1, None),
skips=skips_mvlgamma(),
sample_kwargs=lambda device, dtype, input: ({'p': 1}, {'d': 1})),
make_mvlgamma_opinfo(variant_test_name='mvlgamma_p_3',
domain=(2, None),
skips=skips_mvlgamma(),
sample_kwargs=lambda device, dtype, input: ({'p': 3}, {'d': 3})),
make_mvlgamma_opinfo(variant_test_name='mvlgamma_p_5',
domain=(3, None),
skips=skips_mvlgamma(),
sample_kwargs=lambda device, dtype, input: ({'p': 5}, {'d': 5})),
BinaryUfuncInfo('ne',
ref=np.not_equal,
aliases=('not_equal',),
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
always_returns_bool=True,
supports_autograd=False,
skips=(
)),
OpInfo('narrow',
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16, torch.chalf),
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
sample_inputs_func=partial(sample_inputs_narrow_narrow_copy, is_narrow=True),
reference_inputs_func=partial(reference_inputs_narrow_narrow_copy, is_narrow=True),
error_inputs_func=partial(error_inputs_narrow_narrow_copy, is_narrow=True, is_ref=False),
skips=(
# Use of .item()
DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_operator'),
DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_backward'),
DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_forward_ad'),
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'),
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'),
)),
OpInfo('narrow_copy',
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16, torch.chalf),
supports_out=True,
supports_forward_ad=False,
supports_fwgrad_bwgrad=False,
supports_autograd=False,
# https://github.com/pytorch/pytorch/issues/86931
sample_inputs_func=partial(sample_inputs_narrow_narrow_copy, is_narrow=False),
reference_inputs_func=partial(reference_inputs_narrow_narrow_copy, is_narrow=False),
error_inputs_func=partial(error_inputs_narrow_narrow_copy, is_narrow=False, is_ref=False),
skips=(
# https://github.com/pytorch/pytorch/issues/84577
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'),
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'),
# Could not run 'aten::narrow_copy.out' with arguments from the 'CUDA' backend
DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_meta_outplace',
device_type='cuda'),
DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_dispatch_meta_outplace',
device_type='cuda'),
DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_dispatch_symbolic_meta_outplace',
device_type='cuda'),
DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_dispatch_symbolic_meta_outplace_all_strides'),
)),
OpInfo('view_copy',
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
ref=lambda x, newshape: np.reshape(x, newshape).copy(),
supports_out=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_autograd=True,
sample_inputs_func=sample_inputs_view_reshape,
error_inputs_func=error_inputs_view_reshape,
skips=(
# RuntimeError: view size is not compatible with input tensor's size and stride
# (at least one dimension spans across two contiguous subspaces). Use .reshape(...) instead.
DecorateInfo(
unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace_all_strides"
),
)),
UnaryUfuncInfo('neg',
aliases=('negative', ),
ref=np.negative,
dtypes=all_types_and_complex_and(torch.half, torch.bfloat16, torch.chalf),
error_inputs_func=error_inputs_neg,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_sparse=True,
supports_sparse_csr=True,
supports_sparse_csc=True,
supports_sparse_bsr=True,
supports_sparse_bsc=True,
assert_autodiffed=True),
OpInfo('dist',
op=torch.dist,
dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16),
# Runs very slowly on slow gradcheck - alternatively reduce input sizes
gradcheck_fast_mode=True,
supports_out=False,
supports_forward_ad=True,
# torch.autograd.gradcheck.GradcheckError: While computing batched gradients, got:
# Could not allocate memory to change Tensor SizesAndStrides!
check_batched_forward_grad=False,
supports_fwgrad_bwgrad=True,
sample_inputs_func=sample_inputs_dist),
OpInfo('outer',
op=torch.outer,
aliases=('ger', ),
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# See https://github.com/pytorch/pytorch/pull/78358
check_batched_forward_grad=False,
sample_inputs_func=sample_inputs_outer,),
OpInfo('ormqr',
op=torch.ormqr,
dtypes=floating_and_complex_types(),
# https://github.com/pytorch/pytorch/issues/80411
gradcheck_fast_mode=True,
supports_forward_ad=False,
supports_fwgrad_bwgrad=False,
sample_inputs_func=sample_inputs_ormqr,
error_inputs_func=error_inputs_ormqr,
decorators=[skipCUDAIfNoCusolver, skipCPUIfNoLapack],
skips=(
# Strides are not the same!
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'),
)),
OpInfo('permute',
ref=np.transpose,
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf),
supports_out=False,
assert_autodiffed=True,
autodiff_fusible_nodes=[], # aliases inputs, shouldn't be fused
autodiff_nonfusible_nodes=[], # aliases inputs, shouldn't be fused
assert_jit_shape_analysis=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_varargs=True,
sample_inputs_func=sample_inputs_permute,
reference_inputs_func=reference_inputs_permute),
OpInfo('permute_copy',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf),
supports_out=True,
assert_autodiffed=True,
assert_jit_shape_analysis=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_varargs=False, # torch.permute is also not varargs
sample_inputs_func=sample_inputs_permute,
reference_inputs_func=reference_inputs_permute,
skips=(
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)),
)),
BinaryUfuncInfo('pow',
dtypes=all_types_and_complex_and(torch.half, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.half, torch.bfloat16, torch.chalf),
ref=np.power,
# Due to AVX2 currently not being fully supported for Float16, log_vml_cpu can't be enabled
# for Float16, causing this test to fail. pow's autograd for Float16 is thus currently
# unsupported on CPU.
backward_dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16),
backward_dtypesIfCUDA=floating_and_complex_types_and(torch.bfloat16, torch.half, torch.chalf),
# https://github.com/pytorch/pytorch/issues/80411
gradcheck_fast_mode=True,
supports_inplace_autograd=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
assert_autodiffed=True,
supports_one_python_scalar=True,
# Integer types do not support negative exponentes
rhs_make_tensor_kwargs=dict(low=0),
# Raising negative real numbers to fractional powers is not supported
lhs_make_tensor_kwargs=dict(low=0),
decorators=(
DecorateInfo(toleranceOverride({torch.complex64: tol(atol=1e-4, rtol=1.3e-05)}),
'TestBinaryUfuncs', 'test_reference_numerics'),
DecorateInfo(toleranceOverride({torch.complex64: tol(atol=1e-4, rtol=1.3e-05),
torch.complex128: tol(atol=1e-4, rtol=1.3e-05)}),
'TestBinaryUfuncs', 'test_scalar_support'),
),
skips=(
# Skipping integers because they are being raised to negative powers causing an error
DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_reference_numerics_small_values',
dtypes=[torch.int8, torch.int16, torch.int32, torch.int64]),
DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_reference_numerics_large_values',
dtypes=[torch.int16, torch.int32, torch.int64]),
# FIXME Complex values error with: Greatest absolute difference: nan at index
# Ref: https://github.com/pytorch/pytorch/issues/76853
# For `chalf`, reference computation in `numpy` is computed in `cfloat`.
# Output of `chalf` saturates to `inf` quicker than reference due to its small range
# which leads to failure of this test.
DecorateInfo(unittest.skip("Skipped!"), 'TestDecomp', 'test_quick',
dtypes=(torch.complex32,), active_if=TEST_WITH_ROCM),
# FIXME:
# Mismatched elements: 1 / 500 (0.2%)
# Greatest absolute difference: nan at index (7, 9, 0) (up to 1e-05 allowed)
# Greatest relative difference: nan at index (7, 9, 0) (up to 0.001 allowed)
DecorateInfo(unittest.skip("Skipped!"), 'TestDecomp', 'test_comprehensive',
dtypes=(torch.complex32,)),
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_complex_half_reference_testing',
dtypes=(torch.complex32,), active_if=TEST_WITH_ROCM),
DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_batch_vs_slicing',
dtypes=(torch.complex32,)),
DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_non_contig',
dtypes=(torch.complex32,)),
DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_reference_numerics',
dtypes=(torch.complex32,)),
DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_reference_numerics_small_values',
dtypes=(torch.complex32, torch.complex64, torch.complex128)),
DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_reference_numerics_large_values',
dtypes=(torch.complex32, torch.complex64, torch.complex128)),
DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_reference_numerics_extremal_values',
dtypes=(torch.complex32, torch.complex64, torch.complex128)),
)),
BinaryUfuncInfo('float_power',
ref=np.float_power,
dtypes=all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool),
promotes_int_to_float=True,
# https://github.com/pytorch/pytorch/issues/80411
gradcheck_fast_mode=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_one_python_scalar=True,
# Integer types do not support negative exponentes
rhs_make_tensor_kwargs=dict(low=0),
# Raising negative real numbers to fractional powers is not supported
lhs_make_tensor_kwargs=dict(low=0),
decorators=(
DecorateInfo(toleranceOverride({torch.complex64: tol(atol=1e-4, rtol=1.3e-05),
torch.complex128: tol(atol=1e-4, rtol=1.3e-05)}),
'TestBinaryUfuncs', 'test_scalar_support'),
),
skips=(
# FIXME
# AssertionError: Object comparison failed: torch.float64 != torch.float32
DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_type_promotion'),
# -3.43399e+38 is outside the range of representable values of type 'float'
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
# Complex values error with: Greatest absolute difference: nan at index
DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_reference_numerics_small_values',
dtypes=[torch.complex64, torch.complex128]),
DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_reference_numerics_large_values',
dtypes=[torch.complex64, torch.complex128]),
DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_reference_numerics_extremal_values',
dtypes=[torch.complex64, torch.complex128]),
# Inplace always promotes to double and thus other floating dtypes are not supported
DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_meta_inplace',
dtypes=[torch.bfloat16, torch.float16, torch.float32]),
)),
OpInfo('qr',
op=torch.qr,
dtypes=floating_and_complex_types(),
sample_inputs_func=sample_inputs_linalg_qr_geqrf,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# In-place ops
check_batched_gradgrad=False,
decorators=[skipCUDAIfNoCusolver, skipCPUIfNoLapack]),
UnaryUfuncInfo('rad2deg',
ref=np.degrees,
decorators=(precisionOverride({torch.bfloat16: 7e-1,
torch.float16: 7e-1}),),
dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_sparse=True,
supports_sparse_csr=True,
supports_sparse_csc=True,
supports_sparse_bsr=True,
supports_sparse_bsc=True,
promotes_int_to_float=True),
UnaryUfuncInfo('real',
ref=np.real,
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half, torch.chalf),
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# See https://github.com/pytorch/pytorch/issues/66357
check_batched_forward_grad=False,
skips=(
# Skip since real and imag don't have out variants.
DecorateInfo(unittest.expectedFailure, 'TestUnaryUfuncs', 'test_out_arg_all_dtypes'),
)),
OpInfo(
"roll",
ref=np.roll,
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half, torch.chalf),
error_inputs_func=error_inputs_roll,
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
sample_inputs_func=sample_inputs_roll,
decorators=(onlyNativeDeviceTypes,),
),
OpInfo(
"rot90",
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half),
error_inputs_func=error_inputs_rot90,
# Runs very slowly on slow gradcheck - alternatively reduce input sizes
gradcheck_fast_mode=True,
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
sample_inputs_func=sample_inputs_rot90,
),
# To test reference numerics against multiple values of argument `decimals`,
# we make multiple OpInfo entries with each entry corresponding to different value of decimals.
UnaryUfuncInfo('round',
ref=np.round,
aliases=('special.round',),
dtypes=all_types_and(torch.half, torch.bfloat16),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
skips=(
DecorateInfo(unittest.expectedFailure,
'TestNNCOpInfo',
'test_nnc_correctness',
dtypes=tuple(t for t in integral_types() if t != torch.uint8)),
DecorateInfo(unittest.skip("Skipped!"),
'TestNNCOpInfo',
'test_nnc_correctness',
dtypes=(torch.bfloat16,)),
),
supports_sparse=True,
supports_sparse_csr=True,
supports_sparse_csc=True,
supports_sparse_bsr=True,
supports_sparse_bsc=True,
assert_autodiffed=True,
),
UnaryUfuncInfo('round',
ref=np.round,
variant_test_name='decimals_0',
aliases=('special.round',),
dtypes=floating_types_and(torch.half, torch.bfloat16),
sample_kwargs=lambda device, dtype, input: ({'decimals': 0}, {'decimals': 0}),
sample_inputs_func=partial(sample_inputs_elementwise_unary, op_kwargs={'decimals': 0}),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
assert_autodiffed=False,
supports_sparse_csr=False),
UnaryUfuncInfo('round',
ref=np.round,
variant_test_name='decimals_3',
aliases=('special.round',),
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
sample_kwargs=lambda device, dtype, input: ({'decimals': 3}, {'decimals': 3}),
sample_inputs_func=partial(sample_inputs_elementwise_unary, op_kwargs={'decimals': 3}),
skips=(
# test_ops already tested for this overload with `decimals_0` opinfo entry
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon'),
DecorateInfo(unittest.skip("Skipped!"), 'TestFwdGradients'),
DecorateInfo(unittest.skip("Skipped!"), 'TestBwdGradients'),
DecorateInfo(unittest.skip("Skipped!"), 'TestJit'),
DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits'),
DecorateInfo(toleranceOverride({torch.bfloat16: tol(atol=1e-3, rtol=0.016)}),
"TestUnaryUfuncs", "test_reference_numerics_extremal",
device_type="cuda"),
DecorateInfo(toleranceOverride({torch.bfloat16: tol(atol=1e-3, rtol=0.016)}),
"TestUnaryUfuncs", "test_reference_numerics_normal",
device_type="cuda"),
),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
assert_autodiffed=False,
supports_sparse_csr=False),
UnaryUfuncInfo('round',
ref=np.round,
variant_test_name='decimals_neg_3',
aliases=('special.round',),
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
sample_kwargs=lambda device, dtype, input: ({'decimals': -3}, {'decimals': -3}),
sample_inputs_func=partial(sample_inputs_elementwise_unary, op_kwargs={'decimals': -3}),
skips=(
# test_ops already tested for this overload with `decimals_0` opinfo entry
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon'),
DecorateInfo(unittest.skip("Skipped!"), 'TestFwdGradients'),
DecorateInfo(unittest.skip("Skipped!"), 'TestBwdGradients'),
DecorateInfo(unittest.skip("Skipped!"), 'TestJit'),
DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits'),
),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
assert_autodiffed=False,
supports_sparse_csr=False),
UnaryUfuncInfo('sin',
ref=np.sin,
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.chalf, torch.bool, torch.half, torch.bfloat16),
assert_autodiffed=True,
handles_large_floats=False,
supports_sparse=True,
supports_sparse_csr=True,
supports_sparse_csc=True,
supports_sparse_bsr=True,
supports_sparse_bsc=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
promotes_int_to_float=True,
skips=(
# Fails on CUDA but passes on ROCm
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large',
dtypes=(torch.cdouble,), device_type='cuda'),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
dtypes=(torch.cfloat, torch.cdouble,), device_type='cpu', active_if=IS_WINDOWS),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large',
dtypes=(torch.cfloat, torch.cdouble,), device_type='cpu', active_if=IS_WINDOWS),
DecorateInfo(unittest.skip("Skipped! sparse backward not supported"),
'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'),
DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-3, rtol=2e-3)}),
"TestConsistency", "test_output_grad_match", device_type="mps"),
),
decorators=(precisionOverride({torch.bfloat16: 1e-2}),)),
UnaryUfuncInfo('sinc',
ref=np_sinc_with_fp16_as_fp32,
aliases=('special.sinc',),
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
handles_large_floats=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
promotes_int_to_float=True),
UnaryUfuncInfo('sinh',
ref=np_unary_ufunc_integer_promotion_wrapper(np.sinh),
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.chalf, torch.bool, torch.half, torch.bfloat16),
assert_autodiffed=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_sparse=True,
supports_sparse_csr=True,
supports_sparse_csc=True,
supports_sparse_bsr=True,
supports_sparse_bsc=True,
promotes_int_to_float=True,
decorators=(precisionOverride({torch.float16: 1e-2}),),
skips=(
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble],
active_if=(IS_MACOS or IS_WINDOWS)),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble],
active_if=(IS_MACOS or IS_WINDOWS)),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large',
dtypes=(torch.cdouble,)),
# Reference: https://github.com/pytorch/pytorch/issues/48641
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large',
device_type='cpu', dtypes=[torch.int8]),
DecorateInfo(unittest.skip("Skipped! sparse backward not supported"),
'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'),
)),
UnaryUfuncInfo('sign',
ref=reference_sign,
dtypes=all_types_and(torch.bool, torch.bfloat16, torch.half),
dtypesIfCUDA=all_types_and(torch.bool, torch.bfloat16, torch.half),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_sparse=True,
supports_sparse_csr=True,
supports_sparse_csc=True,
supports_sparse_bsr=True,
supports_sparse_bsc=True,
skips=(
# Reference: https://github.com/pytorch/pytorch/issues/41245
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
dtypes=[torch.bfloat16, torch.float16, torch.float32, torch.float64]),
)),
UnaryUfuncInfo('sgn',
ref=reference_sgn,
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half, torch.chalf),
backward_dtypes=floating_and_complex_types_and(torch.bfloat16, torch.half),
backward_dtypesIfCUDA=floating_and_complex_types_and(torch.bfloat16, torch.half, torch.chalf),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_sparse=True,
supports_sparse_csr=True,
supports_sparse_csc=True,
supports_sparse_bsr=True,
supports_sparse_bsc=True,
skips=(
# Reference: https://github.com/pytorch/pytorch/issues/41245
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
dtypes=[torch.bfloat16, torch.float16, torch.float32, torch.float64]),
DecorateInfo(unittest.skip("Skipped! sparse backward not supported"),
'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'),
)),
OpInfo('split',
dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool, torch.chalf),
sample_inputs_func=partial(sample_inputs_split, list_args=False),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_out=False,
autodiff_fusible_nodes=[], # aliases inputs, shouldn't be fused
autodiff_nonfusible_nodes=[], # aliases inputs, shouldn't be fused
assert_autodiffed=True),
OpInfo('split',
# Cannot declare this aten_name because of
# test_variant_consistency_jit_split_list_args_cpu_float32
decomp_aten_name='split_with_sizes',
variant_test_name='list_args',
dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool),
sample_inputs_func=partial(sample_inputs_split, list_args=True),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_out=False),
# `unsafe_split` supports only `int` for split_size argument
OpInfo('unsafe_split',
dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool, torch.chalf),
sample_inputs_func=partial(sample_inputs_split, list_args=False),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_out=False,
autodiff_fusible_nodes=[], # aliases inputs, shouldn't be fused
autodiff_nonfusible_nodes=[], # aliases inputs, shouldn't be fused
assert_autodiffed=True,
check_batched_forward_grad=False),
OpInfo('split_with_sizes',
dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool, torch.chalf),
sample_inputs_func=sample_inputs_split_with_sizes,
autodiff_fusible_nodes=[], # aliases inputs, shouldn't be fused
autodiff_nonfusible_nodes=[], # aliases inputs, shouldn't be fused
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
assert_autodiffed=True),
OpInfo('split_with_sizes_copy',
dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool, torch.chalf),
sample_inputs_func=sample_inputs_split_with_sizes,
supports_out=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
skips=(
# No error raised
DecorateInfo(unittest.expectedFailure, "TestCommon", "test_out_requires_grad_error"),
)),
BinaryUfuncInfo('__radd__',
op=torch.Tensor.__radd__,
dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool),
supports_out=False,
skips=(
DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"),
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit',),
),
assert_autodiffed=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
autodiff_nonfusible_nodes=['aten::add'],),
BinaryUfuncInfo('__rdiv__',
op=torch.Tensor.__rdiv__,
dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool),
promotes_int_to_float=True,
lhs_make_tensor_kwargs={'exclude_zero': True},
# Runs very slowly on slow gradcheck - alternatively reduce input sizes
gradcheck_fast_mode=True,
supports_out=False,
skips=(
# https://github.com/pytorch/pytorch/issues/76806
DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_type_promotion'),
DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit',),
),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
assert_autodiffed=True,
autodiff_nonfusible_nodes=['aten::mul', 'aten::reciprocal'],),
BinaryUfuncInfo('__rmul__',
op=torch.Tensor.__rmul__,
dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool),
supports_out=False,
skips=(
DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit',),
),
assert_autodiffed=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
autodiff_nonfusible_nodes=['aten::mul'],),
BinaryUfuncInfo('__rand__',
op=torch.Tensor.__rand__,
dtypes=integral_types_and(torch.bool),
supports_out=False,
supports_autograd=False,
supports_forward_ad=True,
skips=(
DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),
)),
BinaryUfuncInfo('__ror__',
op=torch.Tensor.__ror__,
dtypes=integral_types_and(torch.bool),
supports_out=False,
supports_autograd=False,
supports_forward_ad=True,
skips=(
DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),
)),
BinaryUfuncInfo('__rxor__',
op=torch.Tensor.__rxor__,
dtypes=integral_types_and(torch.bool),
supports_out=False,
supports_autograd=False,
supports_forward_ad=True,
skips=(
DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),
)),
OpInfo('__rmatmul__',
op=torch.Tensor.__rmatmul__,
dtypes=all_types_and_complex_and(torch.bfloat16, torch.float16),
dtypesIfCUDA=floating_and_complex_types_and(torch.float16,
*[torch.bfloat16]
if SM53OrLater or TEST_WITH_ROCM else []),
assert_autodiffed=True,
sample_inputs_func=partial(sample_inputs_matmul, is_rmatmul=True),
# Runs very slowly on slow gradcheck - alternatively reduce input sizes
gradcheck_fast_mode=True,
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
check_batched_forward_grad=False,
decorators=(
# NVIDIA only assures that bfloat16 is supported by bmm if SM >= 5.3
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_dtypes', device_type='cuda', active_if=not SM53OrLater),
DecorateInfo(toleranceOverride({torch.complex64: tol(atol=1e-05, rtol=1.2e-03)}),
'TestMathBits', 'test_conj_view'),
DecorateInfo(toleranceOverride({torch.float32: tol(atol=1e-05, rtol=1.2e-03)}),
'TestCommon', 'test_noncontiguous_samples'),
DecorateInfo(toleranceOverride({torch.complex64: tol(atol=1e-05, rtol=1e-05)}),
"TestDecomp", "test_comprehensive", device_type="cuda",
active_if=TEST_WITH_ROCM),
),
skips=(
DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit',),
# https://github.com/pytorch/pytorch/issues/67470
DecorateInfo(unittest.skip("67470!"),
'TestCommon', 'test_noncontiguous_samples',
device_type='cpu', dtypes=(torch.long,)),
# Fails on XLA.
# AssertionError: False is not true : Tensors failed to compare as equal
DecorateInfo(unittest.skip("Skipped!"), 'TestOpInfo', device_type='xla', dtypes=(torch.long,)),
# https://github.com/pytorch/pytorch/issues/71774
DecorateInfo(unittest.skip('Skipped!'), 'TestNNCOpInfo', 'test_nnc_correctness',
device_type='cpu', dtypes=(torch.long,)),
)),
BinaryUfuncInfo('__rmod__',
op=torch.Tensor.__rmod__,
dtypes=floating_types_and(torch.bfloat16, torch.half,),
dtypesIfCUDA=all_types_and(torch.bfloat16, torch.half),
# https://github.com/pytorch/pytorch/issues/80411
gradcheck_fast_mode=True,
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_one_python_scalar=True,
skips=(
DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit',),
),
# Support autograd after torch.remainder(Tensor, Tensor) supports
# autograd of the second argument.
# https://github.com/pytorch/pytorch/pull/58476/files#r637167630
# supports_autograd=False,
assert_autodiffed=True,
autodiff_nonfusible_nodes=['aten::remainder'],),
BinaryUfuncInfo('__rpow__',
op=torch.Tensor.__rpow__,
dtypes=all_types_and_complex_and(torch.bfloat16, torch.half),
# Reference: https://github.com/pytorch/pytorch/issues/54774
# "log2" "_vml_cpu" not implemented for Half
backward_dtypes=all_types_and_complex_and(torch.bfloat16, torch.half),
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_one_python_scalar=True,
skips=(
DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit',),
# TODO: FIXME tolerance is too high
DecorateInfo(unittest.skip('Skipped!'), 'TestFwdGradients'),
DecorateInfo(unittest.skip('Skipped!'), 'TestBwdGradients'),
),
assert_autodiffed=True,
autodiff_nonfusible_nodes=['aten::pow'],),
BinaryUfuncInfo('__rsub__',
op=torch.Tensor.__rsub__,
dtypes=all_types_and_complex_and(torch.bfloat16, torch.half),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_out=False,
supports_one_python_scalar=True,
skips=(
DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit',),
),
assert_autodiffed=True,
autodiff_nonfusible_nodes=['aten::rsub'],),
BinaryUfuncInfo('rsub',
dtypes=all_types_and_complex_and(torch.bfloat16, torch.half),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_out=False,
supports_inplace_autograd=False,
assert_autodiffed=None,
sample_inputs_func=sample_inputs_add_sub),
OpInfo('select',
aten_backward_name='select_backward',
dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool, torch.chalf),
sample_inputs_func=sample_inputs_select,
assert_jit_shape_analysis=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_out=False),
OpInfo('select_scatter',
dtypes=all_types_and(torch.bfloat16, torch.half, torch.bool),
sample_inputs_func=sample_inputs_select_scatter,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_out=False),
OpInfo('slice',
op=torch.ops.aten.slice.Tensor,
dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool, torch.chalf),
sample_inputs_func=sample_inputs_slice,
gradcheck_fast_mode=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_scripting=False,
supports_inplace_autograd=False,
supports_out=False),
OpInfo('slice_scatter',
dtypes=all_types_and(torch.bfloat16, torch.half, torch.bool),
sample_inputs_func=sample_inputs_slice_scatter,
# https://github.com/pytorch/pytorch/issues/80411
gradcheck_fast_mode=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_out=True),
UnaryUfuncInfo('signbit',
ref=np.signbit,
dtypes=all_types_and(torch.bool, torch.bfloat16, torch.half),
supports_sparse=True,
supports_sparse_csr=True,
supports_sparse_csc=True,
supports_sparse_bsr=True,
supports_sparse_bsc=True,
supports_autograd=False,),
UnaryUfuncInfo('tan',
ref=np.tan,
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.chalf, torch.bool, torch.half, torch.bfloat16),
decorators=(DecorateInfo(
toleranceOverride({torch.complex64: tol(atol=1e-04, rtol=1e-05)}),
'TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cuda'),),
assert_autodiffed=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_sparse=True,
supports_sparse_csr=True,
supports_sparse_csc=True,
supports_sparse_bsr=True,
supports_sparse_bsc=True,
promotes_int_to_float=True,
skips=(
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble],
active_if=(IS_MACOS or IS_WINDOWS)),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble],
active_if=(IS_MACOS or IS_WINDOWS)),
DecorateInfo(unittest.skip("Skipped! sparse backward not supported"),
'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'),
# FIXME:
# Mismatched elements: 2 / 400 (0.5%)
# Greatest absolute difference: inf at index (7, 16) (up to 1e-05 allowed)
# Greatest relative difference: nan at index (7, 16) (up to 0.001 allowed)
DecorateInfo(
unittest.skip("Skipped!"),
"TestInductorOpInfo",
"test_comprehensive",
dtypes=(torch.float16,),
device_type="cuda",
),
DecorateInfo(toleranceOverride({torch.complex64: tol(atol=3e-5, rtol=7e-6)}),
"TestConsistency", "test_output_match", device_type="mps"),
DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-3, rtol=2e-3)}),
"TestConsistency", "test_output_grad_match", device_type="mps"),
),
# tan(pi/2 * odd_number) is nan
reference_numerics_filter=NumericsFilter(
condition=lambda x: close_to_int(x / (math.pi * 0.5)), safe_val=math.pi)),
UnaryUfuncInfo('tanh',
ref=np.tanh,
aten_backward_name='tanh_backward',
aliases=('nn.functional.tanh',),
decorators=(precisionOverride({torch.bfloat16: 1e-2}),
DecorateInfo(
toleranceOverride({torch.complex64: tol(atol=1e-04, rtol=2e-05)}),
'TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cuda'),),
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.chalf, torch.bool, torch.half, torch.bfloat16),
assert_autodiffed=True,
assert_jit_shape_analysis=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_sparse=True,
supports_sparse_csr=True,
supports_sparse_csc=True,
supports_sparse_bsr=True,
supports_sparse_bsc=True,
promotes_int_to_float=True,
skips=(
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble],
active_if=(IS_MACOS or IS_WINDOWS)),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble],
active_if=(IS_MACOS or IS_WINDOWS)),
DecorateInfo(unittest.skip("Skipped! sparse backward not supported"),
'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'),
DecorateInfo(toleranceOverride({torch.complex64: tol(atol=3e-5, rtol=7e-6)}),
"TestConsistency", "test_output_match", device_type="mps"),
),
# tan(j * pi/2 * odd_number) is nan
reference_numerics_filter=NumericsFilter(
condition=lambda x: (close_to_int(x / (math.pi * 0.5j))
if x.is_complex() else x.new_tensor(False, dtype=torch.bool)),
safe_val=0)),
OpInfo('tensor_split',
ref=np.array_split,
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
skips=(
# Pre-existing condition; Needs to be fixed
DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_operator'),
DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_backward'),
DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_forward_ad'),
),
sample_inputs_func=sample_inputs_tensor_split,),
OpInfo('hsplit',
dtypes=all_types_and_complex_and(torch.complex32, torch.bool, torch.bfloat16, torch.float16),
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# See https://github.com/pytorch/pytorch/pull/78358
check_batched_forward_grad=False,
sample_inputs_func=sample_inputs_hsplit,
error_inputs_func=error_inputs_hsplit,),
OpInfo('vsplit',
dtypes=all_types_and_complex_and(torch.complex32, torch.bool, torch.bfloat16, torch.float16),
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# See https://github.com/pytorch/pytorch/pull/78358
check_batched_forward_grad=False,
sample_inputs_func=sample_inputs_vsplit,
error_inputs_func=error_inputs_vsplit,),
OpInfo('dsplit',
dtypes=all_types_and_complex_and(torch.complex32, torch.bool, torch.bfloat16, torch.float16),
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# See https://github.com/pytorch/pytorch/pull/78358
check_batched_forward_grad=False,
sample_inputs_func=sample_inputs_dsplit,
error_inputs_func=error_inputs_dsplit,),
OpInfo('triangular_solve',
op=torch.triangular_solve,
dtypes=floating_and_complex_types(),
sample_inputs_func=sample_inputs_legacy_solve,
check_batched_gradgrad=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
gradcheck_wrapper=lambda *args, **kwargs: gradcheck_wrapper_triangular_input(*args, idx=1, **kwargs),
decorators=[
skipCUDAIfNoMagma,
skipCPUIfNoLapack,
DecorateInfo(
toleranceOverride({torch.float32: tol(atol=3e-5, rtol=3e-6)}),
'TestConsistency', 'test_output_match', device_type='cpu',
),
],
skips=(
# AssertionError: Scalars are not equal!
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'),
# Gradcheck fails
DecorateInfo(unittest.expectedFailure, 'TestFwdGradients', 'test_fn_fwgrad_bwgrad',
dtypes=floating_and_complex_types()),
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out',
device_type='mps', dtypes=[torch.float32]),
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager',
device_type='mps', dtypes=[torch.float32]),
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit',
device_type='mps', dtypes=[torch.float32]),
)),
UnaryUfuncInfo('trunc',
aliases=('fix', ),
ref=np.trunc,
dtypes=all_types_and(torch.half, torch.bfloat16),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_sparse=True,
skips=(
DecorateInfo(unittest.expectedFailure,
'TestNNCOpInfo',
'test_nnc_correctness',
dtypes=tuple(t for t in integral_types() if t != torch.uint8)),
),
supports_sparse_csr=True,
supports_sparse_csc=True,
supports_sparse_bsr=True,
supports_sparse_bsc=True,
assert_autodiffed=True),
UnaryUfuncInfo('exp2',
aliases=('special.exp2', ),
ref=np_unary_ufunc_integer_promotion_wrapper(np.exp2),
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
promotes_int_to_float=True,
skips=(
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large',
dtypes=[torch.cdouble]),
# Reference: https://github.com/pytorch/pytorch/issues/48010
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble], active_if=IS_WINDOWS),
)),
UnaryUfuncInfo('expm1',
aliases=('special.expm1', ),
ref=np_unary_ufunc_integer_promotion_wrapper(np.expm1),
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_sparse=True,
supports_sparse_csr=True,
supports_sparse_csc=True,
supports_sparse_bsr=True,
supports_sparse_bsc=True,
promotes_int_to_float=True,
assert_autodiffed=True,
skips=(
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large',
device_type='cuda', dtypes=[torch.complex128]),
DecorateInfo(unittest.skip("Skipped! sparse backward not supported"),
'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'),
)),
UnaryUfuncInfo('nan_to_num',
ref=np.nan_to_num,
dtypes=all_types_and(torch.half, torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.half, torch.bool, torch.bfloat16),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_sparse=True,
skips=(
DecorateInfo(unittest.skip("Skipped! sparse backward not supported"),
'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'),
),
# Passing numpy_kwargs via sample_kwargs, as numpy does comparison
# with BFloat16 in float, since it currently doesn't support BFloat16.
# Ref: https://github.com/pytorch/pytorch/issues/57982#issuecomment-839150556
sample_kwargs=lambda device, dtype, input: ({},
{'posinf': torch.finfo(torch.bfloat16).max,
'neginf': torch.finfo(torch.bfloat16).min})
if dtype is torch.bfloat16 else ({}, {})),
UnaryUfuncInfo('reciprocal',
ref=np_unary_ufunc_integer_promotion_wrapper(np.reciprocal),
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
assert_autodiffed=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
promotes_int_to_float=True,
skips=(
# Reference: https://github.com/pytorch/pytorch/issues/45690
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
dtypes=[torch.cfloat, torch.cdouble]),
)),
UnaryUfuncInfo('rsqrt',
ref=lambda x: np.reciprocal(np.sqrt(x)),
domain=(0, None),
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.chalf, torch.bool, torch.half, torch.bfloat16),
decorators=(precisionOverride({torch.half: 5e-2}),),
assert_autodiffed=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
promotes_int_to_float=True,
skips=(
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
dtypes=(torch.cfloat, torch.cdouble)),
# AssertionError: Tensor-likes are not close!
# Greatest absolute difference: nan at index (700,) (up to 0.01 allowed)
# Greatest relative difference: nan at index (700,) (up to 0.001 allowed)
DecorateInfo(unittest.expectedFailure, 'TestUnaryUfuncs', 'test_reference_numerics_large',
dtypes=(torch.chalf,)),
)),
UnaryUfuncInfo('sqrt',
ref=np.sqrt,
supports_sparse=True,
domain=(0, None),
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.chalf, torch.bool, torch.half, torch.bfloat16),
assert_autodiffed=True,
supports_forward_ad=True,
supports_sparse_csr=True,
supports_sparse_csc=True,
supports_sparse_bsr=True,
supports_sparse_bsc=True,
supports_fwgrad_bwgrad=True,
promotes_int_to_float=True,
decorators=(
precisionOverride({torch.bfloat16: 7e-2}),
DecorateInfo(
toleranceOverride({torch.chalf: tol(atol=1e-2, rtol=0)}),
'TestUnaryUfuncs', 'test_reference_numerics_large'),
),
skips=(
# Reference: https://github.com/pytorch/pytorch/issues/47358
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large',
device_type='cpu', dtypes=(torch.cfloat, torch.cdouble),
active_if=IS_MACOS),
DecorateInfo(unittest.skip("Skipped! sparse backward not supported"),
'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'),
DecorateInfo(toleranceOverride({torch.complex64: tol(atol=2e-5, rtol=3e-6)}),
"TestConsistency", "test_output_match", device_type="mps"),
)),
UnaryUfuncInfo('square',
ref=np.square,
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
decorators=(precisionOverride({torch.complex64: 3e-4, torch.bfloat16: 3e-1}),),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
skips=(
# Reference: https://github.com/pytorch/pytorch/issues/52549
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large',
dtypes=[torch.cfloat, torch.cdouble]),
# >>> t = torch.tensor(complex(-0.01, float("inf")))
# >>> np.square(t.numpy())
# (-inf-infj)
# >>> t.square()
# tensor(-inf-infj)
# >>> t.cuda().square()
# tensor(inf+nanj, device='cuda:0')
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cuda', dtypes=[torch.cfloat, torch.cdouble]),
DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_meta_inplace',
dtypes=[torch.bool]),
DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_dispatch_meta_inplace',
dtypes=[torch.bool]),
DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_dispatch_symbolic_meta_inplace',
dtypes=[torch.bool]),
),),
OpInfo('lerp',
dtypes=floating_and_complex_types_and(torch.bfloat16, torch.half),
dtypesIfCUDA=floating_and_complex_types_and(torch.chalf, torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_lerp,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
assert_autodiffed=True),
UnaryUfuncInfo('angle',
ref=np.angle,
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
dtypesIfCUDA=all_types_and_complex_and(torch.chalf, torch.bool),
decorators=(precisionOverride({torch.float16: 1e-2,
torch.bfloat16: 1e-2}),),
backward_dtypes=floating_and_complex_types_and(torch.bfloat16, torch.float16),
backward_dtypesIfCUDA=floating_and_complex_types_and(torch.chalf),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_sparse_csr=True,
supports_sparse_csc=True,
supports_sparse_bsr=True,
supports_sparse_bsc=True,
supports_complex_to_float=True,
skips=(
# Ref: https://github.com/pytorch/pytorch/issues/78413
DecorateInfo(unittest.expectedFailure, 'TestUnaryUfuncs', 'test_reference_numerics_small',
dtypes=(torch.bfloat16, torch.float16, torch.float32, torch.float64),),
)),
UnaryUfuncInfo('isfinite',
ref=np.isfinite,
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16, torch.chalf),
supports_out=False,
supports_autograd=False),
UnaryUfuncInfo('isinf',
ref=np.isinf,
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16, torch.chalf),
supports_out=False,
supports_sparse=True,
supports_sparse_csr=True,
supports_sparse_csc=True,
supports_sparse_bsr=True,
supports_sparse_bsc=True,
supports_autograd=False),
UnaryUfuncInfo('isposinf',
ref=np.isposinf,
dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16),
supports_sparse=True,
supports_sparse_csr=True,
supports_sparse_csc=True,
supports_sparse_bsr=True,
supports_sparse_bsc=True,
supports_autograd=False),
UnaryUfuncInfo('isneginf',
ref=np.isneginf,
dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16),
supports_sparse=True,
supports_sparse_csr=True,
supports_sparse_csc=True,
supports_sparse_bsr=True,
supports_sparse_bsc=True,
supports_autograd=False),
UnaryUfuncInfo('isreal',
ref=np.isreal,
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16, torch.chalf),
supports_out=False,
supports_autograd=False),
UnaryUfuncInfo('isnan',
ref=np.isnan,
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
supports_out=False,
supports_sparse=True,
supports_sparse_csr=True,
supports_sparse_csc=True,
supports_sparse_bsr=True,
supports_sparse_bsc=True,
supports_autograd=False),
OpInfo('einsum',
# we need this lambda because SampleInput expects tensor input as the first argument
# TODO(@heitorschueroff) update SampleInput to handle such cases
op=lambda tensors, equation: torch.einsum(equation, tensors),
dtypes=all_types_and_complex_and(torch.half, torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.half, torch.bfloat16),
backward_dtypesIfCUDA=floating_and_complex_types_and(torch.half, torch.bfloat16),
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
check_batched_forward_grad=False,
# See https://github.com/pytorch/pytorch/issues/66357
sample_inputs_func=sample_inputs_einsum,
skips=(
DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),
# test does not work with passing lambda for op
# there's a test `test_einsum` in `test_jit.py` to handle this case
# AssertionError: JIT Test does not execute any logic
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
)),
OpInfo('svd',
op=torch.svd,
dtypes=floating_and_complex_types(),
sample_inputs_func=sample_inputs_svd,
# Runs very slowly on slow-gradcheck - alternatively reduce input sizes
gradcheck_fast_mode=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
check_batched_forward_grad=False,
# We're using at::allclose, which does not have a batching rule
check_batched_grad=False,
check_batched_gradgrad=False,
decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack, with_tf32_off],
skips=(
# Issue with conj and torch dispatch, see https://github.com/pytorch/pytorch/issues/82479
DecorateInfo(
unittest.skip("Skipped!"),
'TestSchemaCheckModeOpInfo',
'test_schema_correctness',
dtypes=(torch.complex64, torch.complex128)),
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out',
device_type='mps', dtypes=[torch.float32]),
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager',
device_type='mps', dtypes=[torch.float32]),
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit',
device_type='mps', dtypes=[torch.float32]),
)),
OpInfo('svd_lowrank',
op=lambda *args, **kwargs: wrapper_set_seed(
lambda a, b, **kwargs: torch.svd_lowrank(a @ b.mT, **kwargs),
*args, **kwargs
),
dtypes=floating_and_complex_types(),
# Runs very slowly on slow gradcheck - alternatively reduce input sizes
gradcheck_fast_mode=True,
supports_out=False,
# Due to the use of randomness
check_batched_grad=False,
check_batched_gradgrad=False,
check_batched_forward_grad=False,
supports_fwgrad_bwgrad=True,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_svd_lowrank,
decorators=[skipCUDAIfNoCusolver, skipCPUIfNoLapack, with_tf32_off,
DecorateInfo(toleranceOverride({torch.float32: tol(atol=1e-03, rtol=1e-03),
torch.complex64: tol(atol=1e-02, rtol=1e-02)}),
'TestCommon', 'test_noncontiguous_samples'),
# FIXME This should be the following, but the toleranceOverride does not seem to do anything!
# DecorateInfo(toleranceOverride({torch.complex128: tol(atol=1e-04, rtol=1e-04)}),
# 'TestFwdGradients', 'test_fn_fwgrad_bwgrad'),
DecorateInfo(unittest.skip("See comment above"),
'TestFwdGradients',
'test_fn_fwgrad_bwgrad',
dtypes=[torch.complex128]),
],
skips=(
# test does not work with passing lambda for op
DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"),
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'),
# Issue with conj and torch dispatch, see https://github.com/pytorch/pytorch/issues/82479
DecorateInfo(unittest.expectedFailure, 'TestSchemaCheckModeOpInfo', 'test_schema_correctness',
dtypes=(torch.complex64, torch.complex128)),
DecorateInfo(slowTest, 'TestCompositeCompliance', 'test_forward_ad'),
)),
OpInfo('pca_lowrank',
op=lambda *args, **kwargs: wrapper_set_seed(
lambda a, b, **kwargs: torch.pca_lowrank(a @ b.mT, **kwargs),
*args, **kwargs
),
dtypes=floating_and_complex_types(),
# Runs very slowly on slow gradcheck - alternatively reduce input sizes
gradcheck_fast_mode=True,
supports_out=False,
check_batched_forward_grad=False,
check_batched_grad=False,
check_batched_gradgrad=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
sample_inputs_func=sample_inputs_pca_lowrank,
decorators=[skipCUDAIfNoCusolver, skipCPUIfNoLapack, with_tf32_off,
DecorateInfo(toleranceOverride({torch.float32: tol(atol=1e-03, rtol=1e-03),
torch.complex64: tol(atol=4e-02, rtol=4e-02)}),
'TestCommon', 'test_noncontiguous_samples'),
DecorateInfo(toleranceOverride({torch.float32: tol(atol=1e-05, rtol=5e-05)}),
'TestOperators', 'test_grad'),
# FIXME This should be the following, but the toleranceOverride does not seem to do anything!
# DecorateInfo(toleranceOverride({torch.complex128: tol(atol=1e-04, rtol=1e-04)}),
# 'TestFwdGradients', 'test_fn_fwgrad_bwgrad'),
DecorateInfo(unittest.skip("See comment above"),
'TestFwdGradients',
'test_fn_fwgrad_bwgrad',
dtypes=[torch.complex128]),
DecorateInfo(
toleranceOverride({torch.float32: tol(atol=3e-5, rtol=1e-3)}),
'TestInductorOpInfo', 'test_comprehensive', device_type='cuda'),
],
skips=(
# test does not work with passing lambda for op
DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"),
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
# Issue with conj and torch dispatch, see https://github.com/pytorch/pytorch/issues/82479
DecorateInfo(unittest.expectedFailure, 'TestSchemaCheckModeOpInfo', 'test_schema_correctness',
dtypes=(torch.complex64, torch.complex128)),
DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'),
)),
BinaryUfuncInfo('polar',
dtypes=floating_types(),
# this function is undefined if 'abs' values are <0
supports_forward_ad=True,
lhs_make_tensor_kwargs=dict(low=0),
supports_rhs_python_scalar=False,
skips=(
# RuntimeError: Expected object of scalar type Float but got scalar type Double for second argument
DecorateInfo(unittest.skip('Skipped!'), 'TestBinaryUfuncs', 'test_type_promotion'),
DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_binary_ufuncs_mixed_dtype'),
# GradcheckError: Jacobian computed with forward mode mismatch for output 0 with respect to input 0
# Numerical:
# tensor([[0.]], dtype=torch.float64)
# Analytical:
# tensor([[-0.0047]], dtype=torch.float64, grad_fn=<CopySlices>)
DecorateInfo(unittest.expectedFailure, 'TestFwdGradients', 'test_fn_fwgrad_bwgrad'),
)),
# TODO(@kshitij12345): Refactor similar to `mvlgamma` entries.
# To test reference numerics against multiple values of argument `n`,
# we make multiple OpInfo entries with each entry corresponding to different value of n (currently 0 to 4).
# We run the op tests from test_ops.py only for `n=0` to avoid redundancy in testing.
UnaryUfuncInfo('polygamma',
op=lambda x, n, **kwargs: torch.polygamma(n, x, **kwargs),
variant_test_name='polygamma_n_0',
ref=reference_polygamma if TEST_SCIPY else None,
dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
promotes_int_to_float=True,
sample_inputs_func=sample_inputs_polygamma,
skips=(
DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"),
),
sample_kwargs=lambda device, dtype, input: ({'n': 0}, {'n': 0}),
# polygamma functions have multiple singularities at x having non-positive integer value
reference_numerics_filter=NumericsFilter(condition=lambda x: (x < 0.1) & ((x - x.round()).abs() < 1e-4),
safe_val=1)),
*(UnaryUfuncInfo('polygamma',
op=lambda x, n, **kwargs: torch.polygamma(n, x, **kwargs),
variant_test_name=f'polygamma_n_{n_}',
ref=reference_polygamma if TEST_SCIPY else None,
dtypes=all_types_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
promotes_int_to_float=True,
sample_inputs_func=sample_inputs_polygamma,
decorators=(
DecorateInfo(toleranceOverride({torch.float32: tol(atol=1e-4, rtol=1e-3)}), 'TestUnaryUfuncs'),
DecorateInfo(toleranceOverride({torch.bfloat16: tol(atol=1e1, rtol=1e-1),
torch.float32: tol(atol=1e-4, rtol=1e-2)}),
'TestUnaryUfuncs', 'test_reference_numerics_normal',
active_if=IS_WINDOWS),
),
skips=(
# Redundant tests
DecorateInfo(unittest.skip("Skipped!"), 'TestFwdGradients'),
DecorateInfo(unittest.skip("Skipped!"), 'TestBwdGradients'),
DecorateInfo(unittest.skip("Skipped!"), 'TestJit'),
DecorateInfo(unittest.skip("Skipped!"), 'TestNormalizeOperators'),
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon'),
# Mismatch: https://github.com/pytorch/pytorch/issues/55357
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal'),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large'),
),
sample_kwargs=lambda device, dtype, input: ({'n': n_}, {'n': n_}),
# polygamma functions have multiple singularities at x having non-positive integer value
reference_numerics_filter=NumericsFilter(condition=lambda x: (x < 0.1) & ((x - x.round()).abs() < 1e-4),
safe_val=1))
for n_ in (1, 2, 3, 4)),
OpInfo('ravel',
ref=np.ravel,
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf),
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# See https://github.com/pytorch/pytorch/pull/78358
check_batched_forward_grad=False,
sample_inputs_func=sample_inputs_ravel,
),
OpInfo('unravel_index',
ref=np.unravel_index,
dtypes=integral_types_and(),
supports_out=False,
supports_autograd=False,
sample_inputs_func=sample_inputs_unravel_index,
),
OpInfo('reshape',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf),
sample_inputs_func=sample_inputs_view_reshape,
reference_inputs_func=reference_inputs_view_reshape,
error_inputs_func=error_inputs_view_reshape,
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
),
OpInfo('reshape_as',
op=lambda x, other: x.reshape_as(other),
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf),
sample_inputs_func=partial(sample_inputs_view_reshape, tensor_arg=True),
reference_inputs_func=partial(reference_inputs_view_reshape, tensor_arg=True),
error_inputs_func=partial(error_inputs_view_reshape, tensor_arg=True),
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
skips=(
DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"),
)),
OpInfo('view',
op=lambda x, shape: x.view(shape),
dtypes=all_types_and_complex_and(torch.complex32, torch.bool, torch.float16, torch.bfloat16),
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
assert_jit_shape_analysis=True,
sample_inputs_func=sample_inputs_view_reshape,
reference_inputs_func=reference_inputs_view_reshape,
error_inputs_func=error_inputs_view_reshape,
skips=(
DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"),
# RuntimeError: view size is not compatible with input tensor's size and stride
# (at least one dimension spans across two contiguous subspaces). Use .reshape(...) instead.
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace_all_strides"),
)),
OpInfo('view_as',
op=lambda x, other: x.view_as(other),
dtypes=all_types_and_complex_and(torch.complex32, torch.bool, torch.float16, torch.bfloat16),
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
sample_inputs_func=partial(sample_inputs_view_reshape, tensor_arg=True),
reference_inputs_func=partial(reference_inputs_view_reshape, tensor_arg=True),
error_inputs_func=partial(error_inputs_view_reshape, tensor_arg=True),
skips=(
DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"),
# RuntimeError: view size is not compatible with input tensor's size and stride
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace_all_strides")
)),
OpInfo('atleast_1d',
dtypes=all_types_and_complex_and(torch.complex32, torch.bool, torch.float16, torch.bfloat16),
# Runs very slowly on slow gradcheck - alternatively reduce input sizes
gradcheck_fast_mode=True,
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# See https://github.com/pytorch/pytorch/pull/78358
check_batched_forward_grad=False,
sample_inputs_func=sample_inputs_atleast1d2d3d,
skips=(
# JIT does not support variadic tensors.
# RuntimeError: input->type()->kind() == TypeKind::OptionalType
# INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":252,
# please report a bug to PyTorch.
DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"),
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=[torch.float32]),
),
),
OpInfo('atleast_2d',
dtypes=all_types_and_complex_and(torch.complex32, torch.bool, torch.float16, torch.bfloat16),
# Runs very slowly on slow gradcheck - alternatively reduce input sizes
gradcheck_fast_mode=True,
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# See https://github.com/pytorch/pytorch/pull/78358
check_batched_forward_grad=False,
skips=(
DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"),
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=[torch.float32]),
),
sample_inputs_func=sample_inputs_atleast1d2d3d,
),
OpInfo('atleast_3d',
dtypes=all_types_and_complex_and(torch.complex32, torch.bool, torch.float16, torch.bfloat16),
# Runs very slowly on slow gradcheck - alternatively reduce input sizes
gradcheck_fast_mode=True,
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# See https://github.com/pytorch/pytorch/pull/78358
check_batched_forward_grad=False,
skips=(
DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"),
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=[torch.float32]),
),
sample_inputs_func=sample_inputs_atleast1d2d3d,
),
OpInfo('flatten',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf),
ref=reference_flatten,
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# See https://github.com/pytorch/pytorch/pull/78358
check_batched_forward_grad=False,
sample_inputs_func=sample_inputs_flatten,
reference_inputs_func=reference_inputs_flatten,
),
OpInfo('unflatten',
op=torch.unflatten,
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf),
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
sample_inputs_func=sample_inputs_unflatten,
),
OpInfo('column_stack',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# See https://github.com/pytorch/pytorch/pull/78358
check_batched_forward_grad=False,
sample_inputs_func=sample_inputs_column_stack,),
OpInfo('pinverse',
op=torch.pinverse,
dtypes=floating_and_complex_types(),
check_batched_grad=False,
check_batched_gradgrad=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
supports_out=False,
sample_inputs_func=sample_inputs_linalg_invertible,
decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack],
skips=(
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager',
device_type='mps', dtypes=[torch.float32]),
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit',
device_type='mps', dtypes=[torch.float32]),
)),
OpInfo('gather',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
sample_inputs_func=sample_inputs_gather,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
error_inputs_func=error_inputs_gather,
),
OpInfo('index_fill',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.complex32),
inplace_variant=torch.Tensor.index_fill_,
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# https://github.com/pytorch/pytorch/issues/66357
check_batched_forward_grad=False,
skips=(
# RuntimeError: Mismatch on aten._unique.default: Shapes torch.Size([2]) and torch.Size([1]) are not equal!
DecorateInfo(unittest.expectedFailure, 'TestFakeTensor', 'test_fake_crossref_backward_no_amp'),
# RuntimeError: Mismatch on aten._unique.default: Shapes torch.Size([2]) and torch.Size([1]) are not equal!
DecorateInfo(unittest.expectedFailure, 'TestFakeTensor', 'test_fake_crossref_backward_amp'),
),
sample_inputs_func=sample_inputs_index,
reference_inputs_func=partial(sample_inputs_index, reference=True)),
OpInfo('index_copy',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.complex32),
supports_out=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# https://github.com/pytorch/pytorch/issues/66357
check_batched_forward_grad=False,
sample_inputs_func=sample_inputs_index,
reference_inputs_func=partial(sample_inputs_index, reference=True),
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL),
OpInfo('index_select',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf),
backward_dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.bfloat16, torch.chalf),
sample_inputs_func=sample_inputs_index,
reference_inputs_func=partial(sample_inputs_index, reference=True),
error_inputs_func=error_inputs_index_select,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
assert_jit_shape_analysis=True,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL),
OpInfo('index_add',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf),
inplace_variant=torch.Tensor.index_add_,
supports_out=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# https://github.com/pytorch/pytorch/issues/66357
check_batched_forward_grad=False,
sample_inputs_func=sample_inputs_index,
reference_inputs_func=partial(sample_inputs_index, reference=True),
error_inputs_func=error_inputs_index_add,
skips=(
# boolean alpha not handled properly
DecorateInfo(unittest.expectedFailure,
'TestNNCOpInfo',
'test_nnc_correctness',
dtypes=(torch.bool,)),
),
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL),
*(OpInfo('index_reduce',
variant_test_name=reduction_type,
dtypes=all_types_and(torch.float16, torch.bfloat16),
skips=(
DecorateInfo(toleranceOverride({torch.float16: tol(atol=2e-3, rtol=3e-3)}),
'TestInductorOpInfo', 'test_comprehensive'),
),
supports_out=True,
sample_inputs_func=sample_inputs_index_reduce,
) for reduction_type in ('mean', 'prod', 'amin', 'amax')),
OpInfo('_unsafe_masked_index',
dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16, torch.bool),
supports_out=False,
supports_inplace_autograd=False,
supports_scripting=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
sample_inputs_func=sample_inputs__unsafe_masked_index,
skips=(
DecorateInfo(unittest.skip("Skipped!"), 'TestNNCOpInfo', 'test_nnc_correctness'),
DecorateInfo(slowTest, 'TestDecomp', 'test_quick_core_backward',
dtypes=(torch.float64,), active_if=IS_WINDOWS),
),),
OpInfo('_unsafe_masked_index_put_accumulate',
dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16, torch.bool),
supports_out=False,
supports_inplace_autograd=False,
supports_scripting=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
decorators=(
DecorateInfo(
toleranceOverride({torch.float16: tol(atol=2e-3, rtol=3e-2)}),
'TestInductorOpInfo', 'test_comprehensive', device_type='cpu'
),
),
sample_inputs_func=sample_inputs__unsafe_masked_index_put_accumulate,
skips=(
DecorateInfo(slowTest, 'TestDecomp', 'test_quick_core_backward',
dtypes=(torch.float64,), active_if=IS_WINDOWS),
),),
OpInfo('__getitem__',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf),
# Runs very slowly on slow gradcheck - alternatively reduce input sizes
gradcheck_fast_mode=True,
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_inplace_autograd=False,
supports_scripting=False,
op=torch.Tensor.__getitem__,
skips=(
DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"),
# AssertionError: False is not true : Scalars failed to compare as equal! 0 != 104448
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', device_type='cuda'),),
sample_inputs_func=sample_inputs_getitem),
OpInfo('index_put',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf),
supports_out=False,
supports_inplace_autograd=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# https://github.com/pytorch/pytorch/issues/66357
check_batched_forward_grad=False,
test_neg_view=False,
sample_inputs_func=sample_inputs_index_put,
skips=(
DecorateInfo(unittest.skip("Skipped"), 'TestBwdGradients', 'test_fn_grad', dtypes=[torch.float64],
device_type='cuda', active_if=(TEST_WITH_ROCM and TEST_WITH_TORCHINDUCTOR)),
)),
OpInfo('sort',
dtypes=all_types_and(torch.bool, torch.float16, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bool, torch.float16, torch.bfloat16),
sample_inputs_func=sample_inputs_sort,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
skips=(
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_non_standard_bool_values',
dtypes=[torch.bool], device_type='cuda', active_if=not TEST_WITH_ROCM),
)),
OpInfo('unique',
dtypes=all_types_and(torch.bool, torch.float16, torch.bfloat16, torch.uint16, torch.uint32, torch.uint64),
sample_inputs_func=sample_inputs_unique,
supports_out=False,
supports_autograd=False,
skips=(
# lambda impl
DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"),
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
DecorateInfo(unittest.skip('Output order is undefined when sorted=False'), 'TestCommon', 'test_compare_cpu'),
)),
OpInfo('unique_consecutive',
dtypes=all_types_and(torch.bool, torch.float16, torch.bfloat16),
sample_inputs_func=sample_inputs_unique_consecutive,
supports_out=False,
supports_autograd=False,
skips=(
# lambda impl
DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"),
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
)),
OpInfo('put',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
check_batched_forward_grad=False,
check_batched_gradgrad=False, # vmap complains of the sizes
sample_inputs_func=sample_inputs_put),
OpInfo('take',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
check_batched_grad=False, # vmap complains of the sizes
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
sample_inputs_func=sample_inputs_take,
error_inputs_func=error_inputs_take),
OpInfo('scatter',
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
sample_inputs_func=sample_inputs_scatter,
error_inputs_func=error_inputs_scatter_and_scatter_add,
skips=(
# Compiler issue on ROCm. Regression started in ROCm 6.4.
DecorateInfo(unittest.skip('Skipped!'), 'TestCommon', 'test_non_standard_bool_values',
dtypes=[torch.bool], active_if=TEST_WITH_ROCM),
)),
UnaryUfuncInfo(
'bfloat16',
op=lambda x, *args, **kwargs: x.bfloat16(*args, **kwargs),
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf),
supports_out=False,
sample_inputs_func=sample_inputs_conversion,
skips=(
# autograd tests don't handle operators that change dtype
DecorateInfo(unittest.expectedFailure, 'TestFwdGradients'),
DecorateInfo(unittest.expectedFailure, 'TestBwdGradients'),
DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"),
# RuntimeError: attribute lookup is not defined on builtin
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
DecorateInfo(unittest.skip("Skipped!"), 'TestNNCOpInfo', 'test_nnc_correctness'),
)),
UnaryUfuncInfo(
'bool',
op=lambda x, *args, **kwargs: x.bool(*args, **kwargs),
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf),
supports_out=False,
sample_inputs_func=sample_inputs_conversion,
supports_autograd=False,
skips=(
DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"),
# RuntimeError: attributis not defined on builtin
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
)),
UnaryUfuncInfo(
'byte',
op=lambda x, *args, **kwargs: x.byte(*args, **kwargs),
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
supports_out=False,
sample_inputs_func=sample_inputs_byte,
# The autograd test runner cannot handle functions that change dtype
supports_autograd=False,
skips=(
DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"),
# RuntimeError: attribute lookup is not defined on builtin
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
DecorateInfo(unittest.skip('Overflow when downcasting signed type is undefined'), 'TestCommon', 'test_compare_cpu'),
)),
UnaryUfuncInfo(
'char',
op=lambda x, *args, **kwargs: x.char(*args, **kwargs),
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf),
supports_out=False,
sample_inputs_func=sample_inputs_conversion,
# The autograd test runner cannot handle functions that change dtype
supports_autograd=False,
skips=(
DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"),
# RuntimeError: attribute lookup is not defined on builtin
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
DecorateInfo(unittest.skip('Overflow when downcasting signed type is undefined'), 'TestCommon', 'test_compare_cpu'),
)),
UnaryUfuncInfo(
'double',
op=lambda x, *args, **kwargs: x.double(*args, **kwargs),
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf),
supports_out=False,
sample_inputs_func=sample_inputs_conversion,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
skips=(
DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"),
# RuntimeError: attribute lookup is not defined on builtin
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
)),
UnaryUfuncInfo(
'float',
op=lambda x, *args, **kwargs: x.float(*args, **kwargs),
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf),
supports_out=False,
sample_inputs_func=sample_inputs_conversion,
skips=(
# autograd tests don't handle operators that change dtype
DecorateInfo(unittest.expectedFailure, 'TestFwdGradients'),
DecorateInfo(unittest.expectedFailure, 'TestBwdGradients'),
DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"),
# RuntimeError: attribute lookup is not defined on builtin
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
)),
UnaryUfuncInfo(
'half',
op=lambda x, *args, **kwargs: x.half(*args, **kwargs),
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
supports_out=False,
sample_inputs_func=sample_inputs_conversion,
supports_autograd=True,
skips=(
# autograd tests don't handle operators that change dtype
DecorateInfo(unittest.expectedFailure, 'TestFwdGradients'),
DecorateInfo(unittest.expectedFailure, 'TestBwdGradients'),
DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"),
# RuntimeError: attribute lookup is not defined on builtin
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
)),
UnaryUfuncInfo(
'int',
op=lambda x, *args, **kwargs: x.int(*args, **kwargs),
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
supports_out=False,
sample_inputs_func=sample_inputs_conversion,
supports_autograd=False,
skips=(
DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"),
# RuntimeError: attribute lookup is not defined on builtin
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
DecorateInfo(unittest.skip('Overflow when downcasting signed type is undefined'), 'TestCommon', 'test_compare_cpu'),
)),
UnaryUfuncInfo(
'long',
op=lambda x, *args, **kwargs: x.long(*args, **kwargs),
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf),
supports_out=False,
sample_inputs_func=sample_inputs_conversion,
supports_autograd=False,
skips=(
DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"),
# RuntimeError: attribute lookup is not defined on builtin
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
DecorateInfo(unittest.skip('Overflow when downcasting signed type is undefined'), 'TestCommon', 'test_compare_cpu'),
)),
UnaryUfuncInfo(
'short',
op=lambda x, *args, **kwargs: x.short(*args, **kwargs),
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
supports_out=False,
sample_inputs_func=sample_inputs_conversion,
supports_autograd=False,
skips=(
DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"),
# RuntimeError: attribute lookup is not defined on builtin
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
DecorateInfo(unittest.skip('Overflow when downcasting signed type is undefined'), 'TestCommon', 'test_compare_cpu'),
)),
UnaryUfuncInfo(
'cdouble',
op=torch.Tensor.cdouble,
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf),
supports_out=False,
sample_inputs_func=sample_inputs_conversion,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
skips=(
DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"),
# RuntimeError: attribute lookup is not defined on builtin
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
DecorateInfo(unittest.skip("Skipped!"), 'TestNNCOpInfo', 'test_nnc_correctness'),
)),
UnaryUfuncInfo(
'cfloat',
op=torch.Tensor.cfloat,
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf),
supports_out=False,
sample_inputs_func=sample_inputs_conversion,
skips=(
# autograd tests don't handle operators that change dtype
DecorateInfo(unittest.expectedFailure, 'TestFwdGradients'),
DecorateInfo(unittest.expectedFailure, 'TestBwdGradients'),
DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"),
# RuntimeError: attribute lookup is not defined on builtin
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
DecorateInfo(unittest.skip("Skipped!"), 'TestNNCOpInfo', 'test_nnc_correctness'),
)),
UnaryUfuncInfo(
'chalf',
op=lambda x, *args, **kwargs: x.chalf(*args, **kwargs),
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf),
supports_out=False,
sample_inputs_func=sample_inputs_conversion,
skips=(
# autograd tests don't handle operators that change dtype
DecorateInfo(unittest.expectedFailure, 'TestFwdGradients'),
DecorateInfo(unittest.expectedFailure, 'TestBwdGradients'),
# use of lambda doesn't work with test_normalize_operator_exhaustive
DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),
# RuntimeError: "sum_cpu" not implemented for 'ComplexHalf'
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager',
device_type='cpu'),
# TypeError: 'int' object is not iterable
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
# RuntimeError: "sum_cpu" not implemented for 'ComplexHalf'
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view',
device_type='cpu'),
# RuntimeError: "sum_cpu" not implemented for 'ComplexHalf'
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view',
device_type='cpu'),
# RuntimeError: "sum_cpu" not implemented for 'ComplexHalf'
# RuntimeError: "neg_conj_cuda" not implemented for 'ComplexHalf'
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'),
)
),
OpInfo('empty_like',
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf),
supports_out=False,
sample_inputs_func=sample_inputs_like_fns,
reference_inputs_func=reference_inputs_like_fns,
supports_autograd=False,
skips=(
# Empty tensor data is garbage so it's hard to make comparisons with it.
DecorateInfo(unittest.skip("Skipped!"),
"TestNormalizeOperators", "test_normalize_operator_exhaustive"),
# Empty tensor data is garbage so it's hard to make comparisons with it.
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_noncontiguous_samples'),
# Empty tensor data is garbage so it's hard to make comparisons with it.
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
# Empty tensor data is garbage so it's hard to make comparisons with it.
DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_conj_view'),
# Empty tensor data is garbage so it's hard to make comparisons with it.
DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_view'),
# Empty tensor data is garbage so it's hard to make comparisons with it.
DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_conj_view'),
# Empty tensor data is garbage so it's hard to make comparisons with it.
DecorateInfo(unittest.skip("Skipped!"), 'TestNNCOpInfo', 'test_nnc_correctness'),
# Empty tensor data is garbage so it's hard to make comparisons with it.
DecorateInfo(unittest.skip("Skipped!"), 'TestCudaFuserOpInfo'),
# Empty tensor data is garbage so it's hard to make comparisons with it.
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_complex_half_reference_testing'),
# Empty tensor data is garbage so it's hard to make comparisons with it.
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_non_standard_bool_values'),
DecorateInfo(unittest.skip("Expected: empty_like is not comparable"), 'TestCompositeCompliance',
'test_operator'),
DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'),
)),
OpInfo('zeros_like',
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf),
supports_out=False,
sample_inputs_func=sample_inputs_like_fns,
supports_autograd=False,
error_inputs_sparse_func=error_inputs_sparse_like_fns,
sample_inputs_sparse_coo_func=partial(sample_inputs_sparse_like_fns, layout=torch.sparse_coo),
sample_inputs_sparse_csr_func=partial(sample_inputs_sparse_like_fns, layout=torch.sparse_csr),
sample_inputs_sparse_csc_func=partial(sample_inputs_sparse_like_fns, layout=torch.sparse_csc),
sample_inputs_sparse_bsr_func=partial(sample_inputs_sparse_like_fns, layout=torch.sparse_bsr),
sample_inputs_sparse_bsc_func=partial(sample_inputs_sparse_like_fns, layout=torch.sparse_bsc),
skips=(
)),
OpInfo('ones_like',
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf),
supports_out=False,
sample_inputs_func=sample_inputs_like_fns,
supports_autograd=False,
skips=(
)),
OpInfo('randn',
dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16, torch.complex32),
op=lambda *args, **kwargs: wrapper_set_seed(torch.randn, *args, **kwargs),
supports_out=True,
sample_inputs_func=sample_inputs_randn,
supports_autograd=False,
skips=(
# Tests that assume input is a tensor or sequence of tensors
DecorateInfo(unittest.skip("Test expects tensor input"), "TestCommon", "test_noncontiguous_samples"),
DecorateInfo(unittest.skip("Test expects tensor input"), "TestVmapOperatorsOpInfo", "test_vmap_exhaustive"),
DecorateInfo(unittest.skip("Test expects tensor input"), "TestVmapOperatorsOpInfo", "test_op_has_batch_rule"),
# CPU randn generates different values based on the strides of out tensor
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out', device_type='cpu'),
# randn fails to warn when resizing its out tensor
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'),
# FX failed to normalize op - add the op to the op_skip list.
DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),
# Tests that assume input tensor has a meaningful effect on output tensor
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager'),
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'),
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'),
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'),
# AssertionError: JIT Test does not execute any logic
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
DecorateInfo(unittest.expectedFailure, 'TestDecomp', 'test_quick'),
)),
OpInfo('randn_like',
dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16, torch.complex32),
op=lambda inp, *args, **kwargs:
wrapper_set_seed(torch.randn_like, inp, *args, **kwargs),
supports_out=False,
sample_inputs_func=sample_inputs_like_fns,
supports_autograd=False,
error_inputs_sparse_func=error_inputs_sparse_like_fns,
sample_inputs_sparse_coo_func=partial(sample_inputs_sparse_like_fns, layout=torch.sparse_coo),
sample_inputs_sparse_csr_func=partial(sample_inputs_sparse_like_fns, layout=torch.sparse_csr),
sample_inputs_sparse_csc_func=partial(sample_inputs_sparse_like_fns, layout=torch.sparse_csc),
sample_inputs_sparse_bsr_func=partial(sample_inputs_sparse_like_fns, layout=torch.sparse_bsr),
sample_inputs_sparse_bsc_func=partial(sample_inputs_sparse_like_fns, layout=torch.sparse_bsc),
skips=(
DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"),
# AssertionError: JIT Test does not execute any logic
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
DecorateInfo(unittest.skip("Expected: randn_like is not comparable between dtypes"),
'TestCommon', 'test_complex_half_reference_testing'),
DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'),
)),
OpInfo('rand_like',
dtypes=floating_types_and(torch.half, torch.bfloat16, torch.complex32, torch.complex64, torch.complex128),
op=lambda inp, *args, **kwargs:
wrapper_set_seed(torch.randn_like, inp, *args, **kwargs),
supports_out=False,
sample_inputs_func=sample_inputs_like_fns,
supports_autograd=False,
skips=(
DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"),
# AssertionError: JIT Test does not execute any logic
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
DecorateInfo(unittest.skip("Expected: randn_like is not comparable between dtypes"),
'TestCommon', 'test_complex_half_reference_testing'),
DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'),
)),
OpInfo('randint',
dtypes=all_types_and(torch.half, torch.bfloat16),
op=lambda *args, **kwargs:
wrapper_set_seed(torch.randint, *args, **kwargs),
supports_out=False,
sample_inputs_func=sample_inputs_randint,
supports_autograd=False,
skips=(
# Tests that assume input is a tensor or sequence of tensors
DecorateInfo(unittest.skip("Test expects tensor input"), "TestCommon", "test_noncontiguous_samples"),
DecorateInfo(unittest.skip("Test expects tensor input"), "TestVmapOperatorsOpInfo", "test_vmap_exhaustive"),
DecorateInfo(unittest.skip("Test expects tensor input"), "TestVmapOperatorsOpInfo", "test_op_has_batch_rule"),
# CPU randint generates different values based on the strides of out tensor
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'),
# randint fails to warn when resizing its out tensor
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'),
# FX failed to normalize op - add the op to the op_skip list.
DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),
# Tests that assume input tensor has a meaningful effect on output tensor
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager'),
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'),
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'),
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'),
# AssertionError: JIT Test does not execute any logic
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
# Might need to skip until ROCm5.5
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_multiple_devices',
dtypes=[torch.float32, torch.int64], active_if=TEST_WITH_ROCM),
)),
OpInfo('randint_like',
dtypes=all_types_and(torch.half, torch.bfloat16),
op=lambda inp, *args, **kwargs:
wrapper_set_seed(torch.randint_like, inp, *args, **kwargs),
supports_out=False,
sample_inputs_func=sample_inputs_randint_like,
supports_autograd=False,
skips=(
DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"),
# AssertionError: JIT Test does not execute any logic
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'),
)),
OpInfo('full_like',
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16,
torch.uint16, torch.uint32),
supports_out=False,
sample_inputs_func=sample_inputs_full_like,
supports_autograd=False,
),
OpInfo('new_zeros',
op=lambda x, *args, **kwargs: x.new_zeros(*args, **kwargs),
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf),
supports_out=False,
sample_inputs_func=sample_inputs_new_fns,
skips=(
DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"),
),
supports_autograd=False),
OpInfo('new_ones',
op=lambda x, *args, **kwargs: x.new_ones(*args, **kwargs),
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf),
supports_out=False,
sample_inputs_func=sample_inputs_new_fns,
skips=(
DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"),
),
supports_autograd=False),
OpInfo('ones',
op=torch.ones,
supports_autograd=False,
supports_varargs=True,
is_factory_function=True,
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf),
supports_out=True,
sample_inputs_func=sample_inputs_ones_zeros,
skips=(
# Tests that assume input is a tensor or sequence of tensors
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager'),
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'),
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'),
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'),
# Same failure as arange: cannot find linspace in captured graph
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
# UserWarning not triggered : Resized a non-empty tensor but did not warn about it.
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'),
)),
OpInfo('zeros',
op=torch.zeros,
supports_autograd=False,
is_factory_function=True,
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf),
supports_out=True,
sample_inputs_func=sample_inputs_ones_zeros,
skips=(
# Tests that assume input is a tensor or sequence of tensors
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager'),
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'),
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'),
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'),
# Same failure as arange: cannot find linspace in captured graph
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
# UserWarning not triggered : Resized a non-empty tensor but did not warn about it.
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'),
)),
OpInfo('full',
op=torch.full,
supports_autograd=False,
is_factory_function=True,
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf),
supports_out=True,
sample_inputs_func=sample_inputs_full,
skips=(
# Tests that assume input is a tensor or sequence of tensors
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager'),
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'),
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'),
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'),
# Same failure as arange: cannot find linspace in captured graph
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
# UserWarning not triggered : Resized a non-empty tensor but did not warn about it.
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'),
# RuntimeError: UNSUPPORTED DTYPE: bool
DecorateInfo(unittest.expectedFailure, 'TestNNCOpInfo', 'test_nnc_correctness', dtypes=(torch.bool,)),
)),
OpInfo('new_empty',
op=lambda x, *args, **kwargs: x.new_empty(*args, **kwargs),
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf),
supports_out=False,
sample_inputs_func=sample_inputs_new_fns,
skips=(
DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"),
# Empty tensor data is garbage so it's hard to make comparisons with it.
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
# Empty tensor data is garbage so it's hard to make comparisons with it.
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager'),
# Empty tensor data is garbage so it's hard to make comparisons with it.
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_noncontiguous_samples'),
# Empty tensor data is garbage so it's hard to make comparisons with it.
DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_conj_view'),
# Empty tensor data is garbage so it's hard to make comparisons with it.
DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_view'),
# Empty tensor data is garbage so it's hard to make comparisons with it.
DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_conj_view'),
# Empty tensor data is garbage so it's hard to make comparisons with it.
DecorateInfo(unittest.skip("Skipped!"), 'TestNNCOpInfo', 'test_nnc_correctness'),
# Empty tensor data is garbage so it's hard to make comparisons with it.
DecorateInfo(unittest.skip("Skipped!"), 'TestCudaFuserOpInfo'),
# Empty tensor data is garbage so it's hard to make comparisons with it.
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_non_standard_bool_values'),
DecorateInfo(unittest.skip("Expected: new_empty is not comparable"), 'TestCompositeCompliance',
'test_operator'),
DecorateInfo(unittest.skip("Expected: new_empty is not comparable"),
'TestCommon', 'test_complex_half_reference_testing'),
DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'),
),
supports_autograd=False),
OpInfo('new_empty_strided',
op=lambda x, *args, **kwargs: x.new_empty_strided(*args, **kwargs),
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf),
supports_out=False,
sample_inputs_func=partial(sample_inputs_new_fns, is_strided=True),
supports_autograd=False,
skips=(
# FX failed to normalize op
DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"),
# Lazy tensor failures
DecorateInfo(unittest.skip("Skipped!"), 'TestLazyOpInfo', 'test_correctness'),
DecorateInfo(unittest.skip("Skipped!"), 'TestLazyOpInfo', 'test_correctness_with_reusing_ir'),
# Empty tensor data is garbage so it's hard to make comparisons with it.
DecorateInfo(unittest.skip("Expected: new_empty_strided is not comparable"),
'TestCommon', 'test_variant_consistency_eager'),
DecorateInfo(unittest.skip("Expected: new_empty_strided is not comparable"),
'TestCommon', 'test_noncontiguous_samples'),
DecorateInfo(unittest.skip("Expected: new_empty_strided is not comparable"),
'TestMathBits', 'test_conj_view'),
DecorateInfo(unittest.skip("Expected: new_empty_strided is not comparable"),
'TestMathBits', 'test_neg_view'),
DecorateInfo(unittest.skip("Expected: new_empty_strided is not comparable"),
'TestMathBits', 'test_neg_conj_view'),
DecorateInfo(unittest.skip("Expected: new_empty_strided is not comparable"),
'TestCommon', 'test_non_standard_bool_values'),
DecorateInfo(unittest.skip("Expected: new_empty_strided is not comparable"),
'TestCommon', 'test_complex_half_reference_testing'),
DecorateInfo(unittest.skip("Expected: new_empty_strided is not comparable"),
'TestCompositeCompliance', 'test_operator'),
DecorateInfo(unittest.skip("Expected: new_empty_strided is not comparable"),
'TestDecomp', 'test_comprehensive'),
DecorateInfo(unittest.skip("Expected: new_empty_strided is not comparable"),
'TestDecomp', 'test_quick'),
DecorateInfo(unittest.skip("Expected: new_empty_strided is not comparable"),
'TestJit', 'test_variant_consistency_jit'),
DecorateInfo(unittest.skip("Expected: new_empty_strided is not comparable"),
'TestProxyTensorOpInfo', 'test_make_fx_exhaustive'),
DecorateInfo(unittest.skip("Expected: new_empty_strided is not comparable"),
'TestProxyTensorOpInfo', 'test_make_fx_fake_exhaustive'),
DecorateInfo(unittest.skip("Expected: new_empty_strided is not comparable"),
'TestProxyTensorOpInfo', 'test_make_fx_symbolic_exhaustive'),
DecorateInfo(unittest.skip("Expected: new_empty_strided is not comparable"),
'TestNNCOpInfo', 'test_nnc_correctness'),
DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'),
)),
OpInfo('empty_strided',
op=lambda inp, *args, **kwargs: wrapper_set_seed(torch.empty_strided, inp, *args, **kwargs),
dtypes=all_types_and_complex_and(torch.bfloat16, torch.bool, torch.half),
supports_out=False,
supports_autograd=False,
sample_inputs_func=sample_inputs_empty_strided,
skips=(
# FX failed to normalize op - add the op to the op_skip list.
DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),
# AssertionError: JIT Test does not execute any logic
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
# Empty tensor data is garbage so it's hard to make comparisons with it.
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_noncontiguous_samples'),
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager'),
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_non_standard_bool_values'),
DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_conj_view'),
DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_view'),
DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_conj_view'),
DecorateInfo(unittest.skip('Skipped!'), 'TestCommon', 'test_compare_cpu'),
DecorateInfo(unittest.skip("Expected: empty is not comparable"), 'TestCompositeCompliance', 'test_operator'),
# Lazy tensor failures
DecorateInfo(unittest.skip("Expected: empty is not comparable"), 'TestLazyOpInfo'),
# RuntimeError: unsupported operation: more than one element of the written-to tensor refers to a single
# memory location. Please clone() the tensor before performing the operation.
DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_dispatch_meta_outplace'),
DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_dispatch_symbolic_meta_outplace'),
DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_dispatch_symbolic_meta_outplace_all_strides'),
)),
OpInfo('empty',
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf),
sample_inputs_func=sample_inputs_empty,
supports_autograd=False,
skips=(
DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"),
# Empty tensor data is garbage so it's hard to make comparisons with it.
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
# Empty tensor data is garbage so it's hard to make comparisons with it.
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager'),
# Empty tensor data is garbage so it's hard to make comparisons with it.
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_noncontiguous_samples'),
# Empty tensor data is garbage so it's hard to make comparisons with it.
DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_conj_view'),
# Empty tensor data is garbage so it's hard to make comparisons with it.
DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_view'),
# Empty tensor data is garbage so it's hard to make comparisons with it.
DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_conj_view'),
# Empty tensor data is garbage so it's hard to make comparisons with it.
DecorateInfo(unittest.skip("Skipped!"), 'TestNNCOpInfo', 'test_nnc_correctness'),
# Empty tensor data is garbage so it's hard to make comparisons with it.
DecorateInfo(unittest.skip("Skipped!"), 'TestCudaFuserOpInfo'),
# Empty tensor data is garbage so it's hard to make comparisons with it.
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_non_standard_bool_values'),
DecorateInfo(unittest.skip("Expected: empty is not comparable"), 'TestCompositeCompliance',
'test_operator'),
# requires_grad doesn't exist in the jit schema
DecorateInfo(unittest.expectedFailure, 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'),
DecorateInfo(unittest.skip("Expected: empty is not comparable"),
'TestCommon',
'test_out'),
DecorateInfo(unittest.skip("Expected: empty is not comparable"),
'TestCommon',
'test_out_warning'),
DecorateInfo(unittest.skip("Expected: empty is not comparable"),
'TestLazyOpInfo'),
DecorateInfo(unittest.skip("Expected: empty is not comparable"),
'TestCommon', 'test_complex_half_reference_testing'),
DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'),
)),
OpInfo('eye',
dtypes=all_types_complex_float8_and(torch.bool, torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_eye,
error_inputs_func=error_inputs_eye,
supports_out=True,
supports_autograd=False,
skips=(
DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),
# TODO: same as this?
# https://github.com/pytorch/pytorch/issues/81774
# also see: arange, new_full
# fails to match any schemas despite working in the interpreter
DecorateInfo(unittest.expectedFailure, 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'),
# fails to match any schemas despite working in the interpreter
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
# skip these tests since we have non tensor input
DecorateInfo(unittest.skip('Skipped!'), "TestCommon", "test_noncontiguous_samples"),
DecorateInfo(unittest.skip('Skipped!'), 'TestCommon', 'test_variant_consistency_eager'),
DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_conj_view'),
DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_conj_view'),
DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_view'),
# UserWarning not triggered : Resized a non-empty tensor but did not warn about it.
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'),
# "mul_cpu_reduced_float" not implemented for 'Float8_e4m3fn'
DecorateInfo(unittest.expectedFailure, 'TestNNCOpInfo', 'test_nnc_correctness',
dtypes=(torch.float8_e4m3fn, torch.float8_e4m3fnuz, torch.float8_e5m2, torch.float8_e5m2fnuz)),
)),
OpInfo('empty_permuted',
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf),
sample_inputs_func=sample_inputs_empty_permuted,
error_inputs_func=error_inputs_empty_permuted,
supports_out=False,
supports_autograd=False,
skips=(
DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"),
# Empty tensor data is garbage so it's hard to make comparisons with it.
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
# Empty tensor data is garbage so it's hard to make comparisons with it.
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager'),
# Empty tensor data is garbage so it's hard to make comparisons with it.
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_noncontiguous_samples'),
# Empty tensor data is garbage so it's hard to make comparisons with it.
DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_conj_view'),
# Empty tensor data is garbage so it's hard to make comparisons with it.
DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_view'),
# Empty tensor data is garbage so it's hard to make comparisons with it.
DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_conj_view'),
# Empty tensor data is garbage so it's hard to make comparisons with it.
DecorateInfo(unittest.skip("Skipped!"), 'TestNNCOpInfo', 'test_nnc_correctness'),
# Empty tensor data is garbage so it's hard to make comparisons with it.
DecorateInfo(unittest.skip("Skipped!"), 'TestCudaFuserOpInfo'),
# Empty tensor data is garbage so it's hard to make comparisons with it.
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_non_standard_bool_values'),
DecorateInfo(unittest.skip("Expected: empty_permuted is not comparable"), 'TestCompositeCompliance',
'test_operator'),
# requires_grad doesn't exist in the jit schema
DecorateInfo(unittest.expectedFailure, 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'),
DecorateInfo(unittest.skip("Expected: empty_permuted is not comparable"),
'TestCommon',
'test_out'),
DecorateInfo(unittest.skip("Expected: empty_permuted is not comparable"),
'TestCommon',
'test_out_warning'),
DecorateInfo(unittest.skip("Expected: empty_permuted is not comparable"),
'TestLazyOpInfo'),
DecorateInfo(unittest.skip("Expected: empty_permuted is not comparable"),
'TestCommon', 'test_complex_half_reference_testing'),
DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'),
)),
OpInfo('scalar_tensor',
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf),
sample_inputs_func=sample_inputs_scalar_tensor,
supports_autograd=False,
supports_out=False,
skips=(
DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),
# fails to match any schemas despite working in the interpreter
DecorateInfo(unittest.expectedFailure, 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'),
# fails to match any schemas despite working in the interpreter
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
# skip these tests since we have non tensor input
DecorateInfo(unittest.skip('Skipped!'), "TestCommon", "test_noncontiguous_samples"),
DecorateInfo(unittest.skip('Skipped!'), 'TestCommon', 'test_variant_consistency_eager'),
DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_conj_view'),
DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_conj_view'),
DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_view'),
)),
OpInfo('new_full',
op=lambda x, *args, **kwargs: x.new_full(*args, **kwargs),
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf),
supports_out=False,
sample_inputs_func=sample_inputs_new_full,
skips=(
DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"),
),
supports_autograd=False),
OpInfo('multinomial',
op=lambda inp, *args, **kwargs:
wrapper_set_seed(torch.multinomial, inp, *args, **kwargs),
method_variant=lambda inp, *args, **kwargs:
wrapper_set_seed(torch.Tensor.multinomial, inp, *args, **kwargs),
dtypes=floating_types_and(torch.bfloat16, torch.half),
supports_out=True,
sample_inputs_func=sample_inputs_multinomial,
error_inputs_func=error_inputs_multinomial,
skips=(
DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"),
# Strides are not the same!
# This may not be reproducible in CI
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out'),
# AssertionError: JIT Test does not execute any logic
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
# UserWarning not triggered : Resized a non-empty tensor but did not warn about it.
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'),
DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu')),
supports_autograd=False),
OpInfo('normal',
op=lambda inp, *args, **kwargs:
wrapper_set_seed(torch.normal, inp, *args, **kwargs),
# The inplace variant (Tensor.normal_) is different from torch.normal
inplace_variant=None,
dtypes=floating_types_and(torch.bfloat16, torch.half),
dtypesIfCUDA=floating_types_and(torch.bfloat16, torch.half),
supports_out=True,
sample_inputs_func=sample_inputs_normal_tensor_first,
skips=(
DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"),
# Tensor-likes are not close!
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'),
# AssertionError: JIT Test does not execute any logic
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
# UserWarning not triggered : Resized a non-empty tensor but did not warn about it.
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'),
# Computed gradient is incorrect -- would be an exfail but gradgrad somehow passes
DecorateInfo(unittest.skip("Gradients are incorrect!"), 'TestFwdGradients'),
DecorateInfo(unittest.skip("Gradients are incorrect!"), 'TestBwdGradients'),
DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'),
# RuntimeError: Difference from {dtype} is larger with decomposition
DecorateInfo(unittest.skip("Skipped!"), 'TestDecomp', 'test_comprehensive'),
DecorateInfo(unittest.skip("Skipped!"), 'TestDecomp', 'test_quick'),
# The inplace variant (Tensor.normal_) is different from torch.normal
# inplace variant Tensor.normal_ is decomposed using randn_like()
DecorateInfo(unittest.skip("Skipped!"), 'TestMeta', 'test_dispatch_symbolic_meta_outplace_all_strides'))),
OpInfo('normal',
# This has its own variant b/c OpInfos assume the first arg is a Tensor but it is not here
variant_test_name='number_mean',
op=lambda std, mean, *args, **kwargs:
wrapper_set_seed(torch.normal, mean, std, *args, **kwargs),
# The inplace variant (Tensor.normal_) is different from torch.normal
inplace_variant=None,
dtypes=floating_types_and(torch.bfloat16, torch.half),
dtypesIfCUDA=floating_types_and(torch.bfloat16, torch.half),
supports_out=True,
sample_inputs_func=sample_inputs_normal_tensor_second,
skips=(
DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"),
# AssertionError: JIT Test does not execute any logic
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_noncontiguous_samples'),
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager'),
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out'),
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out_warning'),
DecorateInfo(unittest.skip("Skipped!"), 'TestCompositeCompliance', 'test_backward'),
DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_view'),
DecorateInfo(unittest.skip("Skipped!"), 'TestFwdGradients'),
DecorateInfo(unittest.skip("Skipped!"), 'TestBwdGradients'),
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_compare_cpu'),
DecorateInfo(unittest.skip("Skipped!"), 'TestEagerFusionOpInfo'),
DecorateInfo(unittest.skip("Skipped!"), 'TestOperators'),
# AssertionError
DecorateInfo(unittest.skip("Skipped!"), 'TestDecomp', 'test_comprehensive'),
# AssertionError
DecorateInfo(unittest.skip("Skipped!"), 'TestDecomp', 'test_quick'),
# AssertionError in CUDA variant
DecorateInfo(unittest.skip("Skipped!"), 'TestFakeTensor', device_type='cuda'),
DecorateInfo(unittest.skip("Skipped!"), 'TestDeviceUtils', 'test_device_mode_ops'))),
OpInfo('bernoulli',
op=lambda inp, *args, **kwargs:
wrapper_set_seed(torch.bernoulli, inp, *args, **kwargs),
# The inplace variant (Tensor.bernoulli_) is different from torch.bernoulli
inplace_variant=None,
method_variant=lambda inp, *args, **kwargs:
wrapper_set_seed(torch.Tensor.bernoulli, inp, *args, **kwargs),
dtypes=floating_types_and(torch.bfloat16, torch.half),
supports_out=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
sample_inputs_func=sample_inputs_bernoulli,
error_inputs_func=error_inputs_bernoulli,
skips=(
# vmap: We do not yet support calling random operations inside of vmap
DecorateInfo(unittest.expectedFailure, 'TestFwdGradients', 'test_forward_mode_AD'),
DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"),
# AssertionError: JIT Test does not execute any logic
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
# Expected RuntimeError when doing an unsafe cast from a result of
# dtype torch.float32 into an out= with dtype torch.lon
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'),
# UserWarning not triggered : Resized a non-empty tensor but did not warn about it.
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'),
DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'))),
OpInfo('scatter_add',
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
inplace_variant=torch.Tensor.scatter_add_,
sample_inputs_func=sample_inputs_scatter_add,
error_inputs_func=error_inputs_scatter_and_scatter_add,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
skips=(
# Compiler issue on ROCm. Regression started in ROCm 6.4.
DecorateInfo(unittest.skip('Skipped!'), 'TestCommon', 'test_non_standard_bool_values',
dtypes=[torch.bool], active_if=TEST_WITH_ROCM),
)),
OpInfo('stack',
dtypes=all_types_and_complex_and(torch.complex32, torch.bool, torch.float16, torch.bfloat16),
sample_inputs_func=sample_inputs_stack,
assert_autodiffed=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
skips=(
# https://github.com/pytorch/pytorch/issues/77046
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'),
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'),
)),
OpInfo('_chunk_cat',
dtypes=all_types_and_complex_and(torch.complex32, torch.bool, torch.float16, torch.bfloat16),
sample_inputs_func=sample_inputs_chunk_cat,
error_inputs_func=error_inputs_chunk_cat,
supports_autograd=False,
supports_out=True,
),
OpInfo('hstack',
dtypes=all_types_and_complex_and(torch.complex32, torch.bool, torch.float16, torch.bfloat16),
sample_inputs_func=sample_inputs_hstack_dstack_vstack,
error_inputs_func=error_inputs_hstack_dstack_vstack,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
),
BinaryUfuncInfo('hypot',
dtypes=floating_types_and(torch.bfloat16, torch.half),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_rhs_python_scalar=False),
OpInfo('histogram',
dtypes=floating_types(),
dtypesIfCUDA=_dispatch_dtypes(), # histogram is only implemented on CPU
sample_inputs_func=sample_inputs_histogram,
supports_autograd=False,
skips=(
# JIT tests don't work with Tensor keyword arguments
# https://github.com/pytorch/pytorch/issues/58507
# RuntimeError:
# undefined value tensor:
# File "<string>", line 3
# def the_method(i0):
# return torch.histogram(i0, 1, weight=tensor(-0.5735, dtype=torch.float32), density=False)
# ~~~~~~ <--- HERE
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
# Not Implemented on XLA.
DecorateInfo(unittest.skip("Skipped!"), 'TestOpInfo', device_type='xla'),
)),
OpInfo('histogramdd',
dtypes=floating_types(),
dtypesIfCUDA=_dispatch_dtypes(), # histogramdd is only implemented on CPU
sample_inputs_func=sample_inputs_histogramdd,
error_inputs_func=error_inputs_histogramdd,
supports_autograd=False,
skips=(
# Not implemented on CUDA
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_errors', device_type='cuda'),
# JIT tests don't work with Tensor keyword arguments
# https://github.com/pytorch/pytorch/issues/58507
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
)),
OpInfo('histc',
dtypes=floating_types_and(torch.bfloat16, torch.float16),
dtypesIfCUDA=floating_types_and(torch.int8, torch.uint8, torch.int16, torch.int32, torch.int64),
sample_inputs_func=sample_inputs_histc,
supports_out=True,
supports_autograd=False,
skips=(
# CUDA histc returns a float tensor but does not correctly warn when passed an integral out tensor
# "AssertionError: RuntimeError not raised : Expected RuntimeError when doing an unsafe cast
# from a result of dtype torch.float32 into an out= with dtype torch.long"
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out', device_type='cuda'),
)),
OpInfo('bincount',
dtypes=integral_types_and(),
sample_inputs_func=sample_inputs_bincount,
supports_out=False,
supports_autograd=False,
skips=(
# JIT tests don't work with Tensor keyword arguments
# https://github.com/pytorch/pytorch/issues/58507
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
)),
OpInfo('bucketize',
dtypes=all_types_and(torch.float16, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bfloat16, torch.float16),
sample_inputs_func=sample_inputs_bucketize,
reference_inputs_func=reference_inputs_bucketize,
error_inputs_func=error_inputs_bucketize,
supports_autograd=False,
skips=(
# JIT tests don't work with Tensor keyword arguments
DecorateInfo(unittest.skip("Expected failure!"), 'TestJit', 'test_variant_consistency_jit'),
)),
OpInfo('searchsorted',
dtypes=all_types_and(torch.bfloat16, torch.float16),
dtypesIfCUDA=all_types_and(torch.bfloat16, torch.float16),
sample_inputs_func=sample_inputs_searchsorted,
supports_autograd=False,
ref=reference_searchsorted,
skips=(
# JIT tests don't work with Tensor keyword arguments
# https://github.com/pytorch/pytorch/issues/58507
DecorateInfo(unittest.skip("Expected failure!"), 'TestJit', 'test_variant_consistency_jit'),
)),
OpInfo('cat',
ref=_cat_np,
aliases=('concat', 'concatenate'),
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.complex32),
sample_inputs_func=sample_inputs_cat_concat,
reference_inputs_func=reference_inputs_cat,
error_inputs_func=error_inputs_cat,
# https://github.com/pytorch/pytorch/issues/80411
gradcheck_fast_mode=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# See https://github.com/pytorch/pytorch/issues/66357
check_batched_forward_grad=False,
assert_autodiffed=True,
skips=(
# https://github.com/pytorch/pytorch/issues/89353
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_numpy_ref_mps'),
# RuntimeError: Arguments for call not valid.
# Expected a value of type 'List[Tensor]' for argument
# 'tensors' but instead found type 'Tensor (inferred)'.
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_jit_alias_remapping'),
# see https://github.com/pytorch/pytorch/issues/71286
DecorateInfo(unittest.expectedFailure, 'TestNNCOpInfo', 'test_nnc_correctness'),
# see https://github.com/pytorch/pytorch/issues/99806
# RuntimeError: The size of tensor a (25) must match the size of tensor b (0) at non-singleton dimension 0.
DecorateInfo(unittest.expectedFailure, 'TestBwdGradients', 'test_fn_gradgrad'),
)),
OpInfo('unbind',
dtypes=all_types_and_complex_and(torch.complex32, torch.bool, torch.float16, torch.bfloat16),
ref=reference_unbind,
sample_inputs_func=sample_inputs_unbind,
error_inputs_func=error_inputs_unbind,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_gradgrad=True,
supports_out=False,
),
OpInfo('unbind_copy',
dtypes=all_types_and_complex_and(torch.complex32, torch.bool, torch.float16, torch.bfloat16),
ref=reference_unbind,
sample_inputs_func=sample_inputs_unbind,
error_inputs_func=error_inputs_unbind,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_gradgrad=True,
supports_out=True,
check_batched_grad=False,
),
OpInfo('vstack',
aliases=('row_stack',),
dtypes=all_types_and_complex_and(torch.complex32, torch.bool, torch.float16, torch.bfloat16),
sample_inputs_func=sample_inputs_hstack_dstack_vstack,
error_inputs_func=error_inputs_hstack_dstack_vstack,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
skips=(
# RuntimeError: _fn() Expected a value of type
# 'Tensor (inferred)' for argument 't0' but instead found type 'tuple'.
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_jit_alias_remapping'),)),
OpInfo('dstack',
dtypes=all_types_and_complex_and(torch.complex32, torch.bool, torch.float16, torch.bfloat16),
sample_inputs_func=sample_inputs_hstack_dstack_vstack,
error_inputs_func=error_inputs_hstack_dstack_vstack,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# See https://github.com/pytorch/pytorch/pull/78358
check_batched_forward_grad=False,
),
OpInfo('unfold',
op=lambda x, *args: x.unfold(*args),
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf),
backward_dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16),
# Runs very slowly on slow gradcheck - alternatively reduce input sizes
gradcheck_fast_mode=True,
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
check_batched_gradgrad=False,
# See https://github.com/pytorch/pytorch/issues/66357
check_batched_forward_grad=False,
skips=(
DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"),
# Skip operator schema test because this is a functional and not an operator
DecorateInfo(unittest.expectedFailure, 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'),
),
sample_inputs_func=sample_inputs_unfold),
OpInfo('unfold_copy',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf),
backward_dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16),
# Runs very slowly on slow gradcheck - alternatively reduce input sizes
gradcheck_fast_mode=True,
supports_out=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
check_batched_gradgrad=False,
# See https://github.com/pytorch/pytorch/issues/66357
check_batched_forward_grad=False,
sample_inputs_func=sample_inputs_unfold),
OpInfo('msort',
dtypes=all_types_and(torch.bool, torch.float16, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bool, torch.float16, torch.bfloat16),
check_batched_gradgrad=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
sample_inputs_func=sample_inputs_msort),
OpInfo('movedim',
aliases=('moveaxis',),
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf),
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# See https://github.com/pytorch/pytorch/pull/78358
check_batched_forward_grad=False,
sample_inputs_func=sample_movedim_moveaxis,
reference_inputs_func=reference_movedim_moveaxis,
error_inputs_func=error_movedim_moveaxis),
OpInfo('renorm',
dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16),
sample_inputs_func=sample_inputs_renorm,
error_inputs_func=error_inputs_renorm,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
skips=(
# RuntimeError: Difference from float64 is larger with decomposition
# linalg_vector_norm.default than original on output 0.
# Original max diff: 2.560596747969157e-07,
# Decomp max diff: 1.8187482915266173e-06
DecorateInfo(unittest.skip("Inconsistent accuracy"), 'TestDecomp', 'test_comprehensive',
device_type='cpu', dtypes=(torch.float16,)),
DecorateInfo(toleranceOverride({torch.float16: tol(atol=3e-4, rtol=3e-6)}),
"TestConsistency", "test_output_match", device_type="mps"),
)),
ShapeFuncInfo('repeat',
op=lambda x, dims: x.repeat(dims),
ref=np.tile,
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
# https://github.com/pytorch/pytorch/issues/80411
gradcheck_fast_mode=True,
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
sample_inputs_func=sample_repeat_tile,
skips=(
DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"),
)),
OpInfo('squeeze',
ref=_squeeze_ref,
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf),
supports_out=False,
assert_autodiffed=True,
autodiff_fusible_nodes=[], # aliases inputs, shouldn't be fused
autodiff_nonfusible_nodes=[], # aliases inputs, shouldn't be fused
assert_jit_shape_analysis=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# vmap does not support inplace views
check_inplace_batched_forward_grad=False,
# https://github.com/pytorch/pytorch/issues/66357
check_batched_forward_grad=False,
sample_inputs_func=sample_inputs_squeeze),
OpInfo('squeeze',
ref=_squeeze_ref,
variant_test_name="multiple",
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf),
supports_out=False,
assert_autodiffed=True,
autodiff_fusible_nodes=[], # aliases inputs, shouldn't be fused
autodiff_nonfusible_nodes=[], # aliases inputs, shouldn't be fused
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# vmap does not support inplace views
check_inplace_batched_forward_grad=False,
# https://github.com/pytorch/pytorch/issues/66357
check_batched_forward_grad=False,
sample_inputs_func=sample_inputs_squeeze_multiple),
OpInfo('squeeze_copy',
ref=_squeeze_ref,
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf),
supports_out=True,
assert_autodiffed=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# vmap does not support inplace views
check_inplace_batched_forward_grad=False,
# https://github.com/pytorch/pytorch/issues/66357
check_batched_forward_grad=False,
sample_inputs_func=sample_inputs_squeeze,
skips=(
DecorateInfo(
unittest.expectedFailure,
'TestJit',
'test_variant_consistency_jit',
dtypes=(torch.float32,),
),
)),
UnaryUfuncInfo(
'fill',
ref=_fill_np,
method_variant=None,
sample_kwargs=_fill_sample_kwargs,
sample_inputs_func=partial(sample_inputs_elementwise_unary, op_kwargs={'value': True}),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# https://github.com/pytorch/pytorch/issues/66357
check_batched_forward_grad=False,
dtypes=all_types_and_complex_and(torch.complex32, torch.bool, torch.float16, torch.bfloat16),
supports_out=False,
skips=(
# JIT has issue when op is passed as lambda
# AssertionError: JIT Test does not execute any logic
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
DecorateInfo(unittest.skip("No fill_ op"), 'TestCudaFuserOpInfo'),
DecorateInfo(unittest.skip("No fill_ op"), 'TestNNCOpInfo'),
)),
OpInfo('resize_',
op=lambda x, shape: x.clone().resize_(shape),
method_variant=None,
inplace_variant=torch.Tensor.resize_,
# the test fails because resize_ doesn't work with imag views as expected by the test
# https://github.com/pytorch/pytorch/issues/65945
test_neg_view=False,
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_out=False,
supports_autograd=False,
skips=(
# Cannot resize variables that require grad
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_dtypes'),
DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"),
DecorateInfo(unittest.skip("Allowed exception"), 'TestCompositeCompliance', 'test_operator'),
),
sample_inputs_func=sample_inputs_resize_ops),
OpInfo('resize_as_',
op=lambda x, other: torch.resize_as_(x.clone(), other),
method_variant=None,
inplace_variant=torch.Tensor.resize_as_,
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_out=False,
supports_autograd=False,
skips=(
# Cannot resize variables that require grad
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_dtypes'),
DecorateInfo(unittest.skip('Allowed exemption'), 'TestCompositeCompliance', 'test_operator'),
),
sample_inputs_func=sample_inputs_resize_ops),
OpInfo('take_along_dim',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_inplace_autograd=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# See https://github.com/pytorch/pytorch/pull/78358
check_batched_forward_grad=False,
sample_inputs_func=sample_inputs_take_along_dim,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
decorators=(
# RuntimeError: view size is not compatible with input tensor's size and stride
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace_all_strides"),
)),
ShapeFuncInfo('tile',
ref=np.tile,
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
# https://github.com/pytorch/pytorch/issues/80411
gradcheck_fast_mode=True,
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
sample_inputs_func=sample_repeat_tile),
OpInfo('trapz', # TODO: in the future, 'trapz' should be made a proper alias of 'trapezoid'
dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16),
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# See https://github.com/pytorch/pytorch/pull/78358
check_batched_forward_grad=False,
decorators=[
DecorateInfo(
toleranceOverride({torch.half: tol(atol=9e-4, rtol=4.3e-3)}),
'TestInductorOpInfo', 'test_comprehensive', device_type='cuda'
),
],
sample_inputs_func=sample_trapezoid),
OpInfo('trapezoid',
dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16),
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# See https://github.com/pytorch/pytorch/pull/78358
check_batched_forward_grad=False,
decorators=[
DecorateInfo(
toleranceOverride({torch.half: tol(atol=9e-4, rtol=4.3e-3)}),
'TestInductorOpInfo', 'test_comprehensive', device_type='cuda'
),
],
sample_inputs_func=sample_trapezoid),
OpInfo('cumulative_trapezoid',
dtypes=all_types_and_complex_and(torch.bfloat16, torch.float16),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# See https://github.com/pytorch/pytorch/pull/78358
check_batched_forward_grad=False,
supports_out=False,
decorators=(
DecorateInfo(
toleranceOverride({torch.float16: tol(atol=4e-3, rtol=4e-3)}),
'TestInductorOpInfo', 'test_comprehensive',
),
),
sample_inputs_func=sample_cumulative_trapezoid,),
OpInfo('unsqueeze',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf),
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# See https://github.com/pytorch/pytorch/pull/78358
check_batched_forward_grad=False,
# vmap does not support inplace views
check_inplace_batched_forward_grad=False,
assert_jit_shape_analysis=True,
assert_autodiffed=True,
autodiff_fusible_nodes=[], # aliases inputs, shouldn't be fused
autodiff_nonfusible_nodes=[], # aliases inputs, shouldn't be fused
sample_inputs_func=sample_unsqueeze),
OpInfo('unsqueeze_copy',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf),
supports_out=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# See https://github.com/pytorch/pytorch/pull/78358
check_batched_forward_grad=False,
# vmap does not support inplace views
check_inplace_batched_forward_grad=False,
assert_jit_shape_analysis=True,
assert_autodiffed=True,
autodiff_fusible_nodes=[], # aliases inputs, shouldn't be fused
autodiff_nonfusible_nodes=[], # aliases inputs, shouldn't be fused
sample_inputs_func=sample_unsqueeze,
skips=(
DecorateInfo(unittest.expectedFailure, 'TestDTensorOps', 'test_dtensor_op_db'),
DecorateInfo(
unittest.expectedFailure,
'TestJit',
'test_variant_consistency_jit',
dtypes=(torch.float32,),
),
)),
BinaryUfuncInfo('xlogy',
aliases=('special.xlogy',),
dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16),
promotes_int_to_float=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_one_python_scalar=True,
# We don't test 0 as the gradient will be NaN and it'll break
rhs_make_tensor_kwargs=dict(low=0.01)),
OpInfo('zero_',
op=lambda x: torch.zero_(x.clone()),
method_variant=None,
inplace_variant=torch.Tensor.zero_,
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
# https://github.com/pytorch/pytorch/issues/80411
gradcheck_fast_mode=True,
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_gradgrad=True,
skips=(
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
),
sample_inputs_func=sample_inputs_zero_),
OpInfo('logsumexp',
aliases=('special.logsumexp',),
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
assert_autodiffed=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
gradcheck_fast_mode=False,
sample_inputs_func=sample_inputs_logsumexp,
reference_inputs_func=reference_inputs_logsumexp),
OpInfo('trace',
dtypes=all_types_and_complex(),
dtypesIfCUDA=all_types_and_complex_and(torch.chalf, torch.bool, torch.half, torch.bfloat16),
error_inputs_func=error_inputs_trace,
supports_inplace_autograd=False,
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
sample_inputs_func=sample_inputs_trace),
OpInfo('transpose',
ref=_numpy_ref_transpose,
aliases=('swapdims', 'swapaxes'),
assert_jit_shape_analysis=True,
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half, torch.chalf),
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# vmap does not support inplace views
check_inplace_batched_forward_grad=False,
sample_inputs_func=sample_inputs_transpose_swapdims),
OpInfo('transpose_copy',
assert_jit_shape_analysis=True,
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half, torch.chalf),
supports_out=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# vmap does not support inplace views
check_inplace_batched_forward_grad=False,
sample_inputs_func=sample_inputs_transpose_swapdims,
skips=(
DecorateInfo(unittest.expectedFailure, 'TestDTensorOps', 'test_dtensor_op_db'),
DecorateInfo(
unittest.expectedFailure,
'TestJit',
'test_variant_consistency_jit',
dtypes=(torch.float32,)
),
)),
OpInfo('T',
op=lambda x: x.T,
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half, torch.chalf),
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
skips=(
# lambda impl
DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),
DecorateInfo(unittest.expectedFailure, "TestJit", "test_variant_consistency_jit"),),
sample_inputs_func=sample_inputs_T,
error_inputs_func=error_inputs_T),
OpInfo('H',
op=lambda x: x.H,
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half, torch.chalf),
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# See https://github.com/pytorch/pytorch/pull/78358
check_batched_forward_grad=False,
skips=(
# lambda impl
DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),
DecorateInfo(unittest.expectedFailure, "TestJit", "test_variant_consistency_jit"),),
sample_inputs_func=sample_inputs_T),
OpInfo('mT',
op=lambda x: x.mT,
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half, torch.chalf),
# Runs very slowly on slow gradcheck - alternatively reduce input sizes
gradcheck_fast_mode=True,
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
skips=(
# lambda impl
DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),
DecorateInfo(unittest.expectedFailure, "TestJit", "test_variant_consistency_jit"),),
sample_inputs_func=sample_inputs_adjoint),
OpInfo('mH',
op=lambda x: x.mH,
aliases=('adjoint',),
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half, torch.chalf),
# Runs very slowly on slow gradcheck - alternatively reduce input sizes
gradcheck_fast_mode=True,
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# See https://github.com/pytorch/pytorch/pull/78358
check_batched_forward_grad=False,
skips=(
# lambda impl
DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),
DecorateInfo(unittest.expectedFailure, "TestJit", "test_variant_consistency_jit"),),
sample_inputs_func=sample_inputs_adjoint),
OpInfo('tril',
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
error_inputs_func=error_inputs_tril_triu,
sample_inputs_func=sample_inputs_tril_triu,
skips=(
# Compiler issue on ROCm. Regression started in ROCm 6.4.
DecorateInfo(unittest.skip('Skipped!'), 'TestCommon', 'test_non_standard_bool_values',
dtypes=[torch.bool], active_if=TEST_WITH_ROCM),
)),
OpInfo('triu',
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
error_inputs_func=error_inputs_tril_triu,
sample_inputs_func=sample_inputs_tril_triu,
skips=(
# Compiler issue on ROCm. Regression started in ROCm 6.4.
DecorateInfo(unittest.skip('Skipped!'), 'TestCommon', 'test_non_standard_bool_values',
dtypes=[torch.bool], active_if=TEST_WITH_ROCM),
)),
OpInfo('triu_indices',
dtypes=_dispatch_dtypes((torch.int32, torch.int64)),
sample_inputs_func=sample_inputs_trilu_indices,
ref=lambda h, w, ofs=0, dtype=torch.long, device='cpu' : np.array(np.triu_indices(h, ofs, w), dtype=dtype),
supports_out=False,
supports_autograd=False,
skips=(
# skip these tests since we have non tensor input
DecorateInfo(unittest.skip('Skipped!'), 'TestCommon', 'test_noncontiguous_samples'),
DecorateInfo(unittest.skip('Skipped!'), 'TestCommon', 'test_variant_consistency_eager'),
DecorateInfo(unittest.skip('Skipped!'), 'TestJit', 'test_variant_consistency_jit'),
DecorateInfo(unittest.skip('Skipped!'), 'TestMathBits', 'test_neg_view'),
)),
OpInfo('tril_indices',
dtypes=_dispatch_dtypes((torch.int32, torch.int64)),
sample_inputs_func=sample_inputs_trilu_indices,
ref=lambda h, w, ofs=0, dtype=torch.long, device='cpu' : np.array(np.tril_indices(h, ofs, w), dtype=dtype),
supports_out=False,
supports_autograd=False,
skips=(
# skip these tests since we have non tensor input
DecorateInfo(unittest.skip('Skipped!'), 'TestCommon', 'test_noncontiguous_samples'),
DecorateInfo(unittest.skip('Skipped!'), 'TestCommon', 'test_variant_consistency_eager'),
DecorateInfo(unittest.skip('Skipped!'), 'TestJit', 'test_variant_consistency_jit'),
DecorateInfo(unittest.skip('Skipped!'), 'TestMathBits', 'test_neg_view'),
)),
OpInfo('kron',
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
# Runs very slowly on slow gradcheck - alternatively reduce input sizes
gradcheck_fast_mode=True,
supports_inplace_autograd=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
sample_inputs_func=sample_inputs_kron,
decorators=(
# RuntimeError: view size is not compatible with input tensor's size and stride
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace_all_strides"),
)),
OpInfo('inner',
dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.bfloat16),
dtypesIfROCM=floating_and_complex_types_and(torch.half, torch.bfloat16),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# See https://github.com/pytorch/pytorch/pull/78358
check_batched_forward_grad=False,
sample_inputs_func=sample_inputs_inner,
),
OpInfo('tensordot',
dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.bfloat16),
dtypesIfROCM=floating_and_complex_types_and(torch.half, torch.bfloat16),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# See https://github.com/pytorch/pytorch/pull/78358
check_batched_forward_grad=False,
sample_inputs_func=sample_inputs_tensordot,
skips=(
# Skip operator schema test because this is a functional and not an operator.
# Reference: https://github.com/pytorch/pytorch/issues/54574
DecorateInfo(unittest.skip("Skipped!"), 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'),
)
),
OpInfo('to_sparse',
op=lambda x, *args: x.to_sparse(*args),
sample_inputs_func=sample_inputs_to_sparse,
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
backward_dtypes=floating_types(),
backward_dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
supports_out=False,
supports_sparse_csr=True,
supports_sparse_csc=True,
check_batched_grad=False,
check_batched_gradgrad=False,
skips=(
# NotImplementedError: Could not run 'aten::normal_' with arguments from the 'SparseCPU' backend
DecorateInfo(unittest.skip(""), 'TestCommon', 'test_noncontiguous_samples'),
# TODO: FIXME: complex inputs requiring grad error in forward
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_dtypes'),
# lambda impl
DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"),
# Allowed exception: sparse tensors don't have strides
DecorateInfo(unittest.skip("Allowed exception"), 'TestCompositeCompliance', 'test_operator'),
DecorateInfo(unittest.skip("Allowed exception"), 'TestCompositeCompliance', 'test_backward'),
DecorateInfo(unittest.skip("Allowed exception"), 'TestTags', 'test_tags'),
# TODO: implement csr.to_sparse(sample_dim) where sampled_dim is 1.
DecorateInfo(unittest.skip("csr.to_sparse(1) not implemented. Skipped!"),
'TestSparseCSR', 'test_sparse_csr_consistency'),
# Compiler issue on ROCm. Might need to skip until ROCm5.5
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_non_standard_bool_values',
dtypes=[torch.bool], active_if=TEST_WITH_ROCM),
)
),
OpInfo('logcumsumexp',
dtypes=floating_and_complex_types_and(torch.bfloat16, torch.half),
backward_dtypes=floating_and_complex_types_and(torch.bfloat16, torch.half),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
skips=(
# AssertionError: UserWarning not triggered : Resized a non-empty tensor but did not warn about it.
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning', device_type='cuda'),
# RuntimeError: "max_values_cpu" not implemented for 'ComplexDouble'
# Falling back to non-numerically stabilized exp, causing nan in the results.
DecorateInfo(unittest.expectedFailure, 'TestFwdGradients', 'test_forward_mode_AD', dtypes=[torch.complex128]),
DecorateInfo(unittest.expectedFailure, 'TestFwdGradients', 'test_fn_fwgrad_bwgrad', dtypes=[torch.complex128]),
DecorateInfo(
toleranceOverride({
torch.float16: tol(atol=7e-5, rtol=6e-3),
}),
"TestInductorOpInfo",
"test_comprehensive",
device_type="cuda"
),
),
sample_inputs_func=sample_inputs_logcumsumexp,
error_inputs_func=error_inputs_logcumsumexp),
UnaryUfuncInfo('sigmoid',
aliases=('special.expit', 'nn.functional.sigmoid'),
aten_backward_name='sigmoid_backward',
ref=reference_sigmoid if TEST_SCIPY else None,
decorators=(precisionOverride({torch.float16: 1e-2,
torch.complex64: 1e-1,
torch.bfloat16: 1e-2}),),
skips=(
# Reference: https://github.com/pytorch/pytorch/issues/56012
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
dtypes=[torch.complex64, torch.cdouble], device_type='cuda'),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large',
dtypes=[torch.chalf, torch.complex64, torch.cdouble], device_type='cuda')),
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.complex32, torch.bool, torch.half, torch.bfloat16),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
promotes_int_to_float=True,
assert_autodiffed=True,
# sigmoid(z) = 1 / (1 + exp(-z)), at z = j * pi * odd_number, the denominator is zero
reference_numerics_filter=NumericsFilter(
condition=lambda x: (close_to_int(x / (math.pi * 1j))
if x.is_complex() else x.new_tensor(False, dtype=torch.bool)),
safe_val=0)),
UnaryUfuncInfo('digamma',
ref=scipy.special.digamma if TEST_SCIPY else None,
aliases=('special.psi', 'special.digamma',),
decorators=(precisionOverride({torch.float16: 5e-1}),),
dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
promotes_int_to_float=True),
UnaryUfuncInfo('erf',
ref=scipy.special.erf if TEST_SCIPY else None,
aliases=('special.erf', ),
decorators=(precisionOverride({torch.float16: 1e-2,
torch.bfloat16: 1e-2}),),
skips=(
DecorateInfo(unittest.skip("Skipped! sparse backward not supported"),
'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'),
),
dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16),
assert_autodiffed=True,
assert_jit_shape_analysis=True,
supports_sparse=True,
supports_sparse_csr=True,
supports_sparse_csc=True,
supports_sparse_bsr=True,
supports_sparse_bsc=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
promotes_int_to_float=True),
UnaryUfuncInfo('erfc',
ref=scipy.special.erfc if TEST_SCIPY else None,
aliases=('special.erfc', ),
decorators=(precisionOverride({torch.float16: 1e-2,
torch.bfloat16: 1e-2}),),
dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16),
assert_autodiffed=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
promotes_int_to_float=True),
UnaryUfuncInfo('erfinv',
ref=scipy.special.erfinv if TEST_SCIPY else None,
aliases=('special.erfinv', ),
decorators=(precisionOverride({torch.float16: 1e-2,
torch.bfloat16: 1e-2,
torch.float32: 1e-4}),),
dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16),
supports_sparse=True,
supports_sparse_csr=True,
supports_sparse_csc=True,
supports_sparse_bsr=True,
supports_sparse_bsc=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
promotes_int_to_float=True,
domain=(-1, 1),
skips=(
# Reference: https://github.com/pytorch/pytorch/pull/49155#issuecomment-742664611
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
active_if=TEST_SCIPY and version.parse(scipy.__version__) < version.parse("1.4.0")),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large',
active_if=TEST_SCIPY and version.parse(scipy.__version__) < version.parse("1.4.0")),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_small',
active_if=TEST_SCIPY and version.parse(scipy.__version__) < version.parse("1.4.0")),
)),
OpInfo("nn.functional.smooth_l1_loss",
ref=reference_smooth_l1_loss,
sample_inputs_func=sample_inputs_smooth_l1_loss,
dtypes=floating_types_and(torch.float16, torch.bfloat16),
backward_dtypes=floating_types_and(torch.float16, torch.bfloat16),
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
skips=(
# RuntimeError: input->type()->kind() == TypeKind::OptionalTypeINTERNAL ASSERT FAILED
# at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":270, please report a bug to PyTorch.
DecorateInfo(unittest.expectedFailure, "TestJit", "test_variant_consistency_jit"),)),
OpInfo(
"nn.functional.l1_loss",
ref=loss_reference_reduction_wrapper(lambda input, target: np.abs(input - target)),
sample_inputs_func=sample_inputs_l1_loss,
error_inputs_func=error_inputs_l1_loss,
dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16),
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
skips=(
# RuntimeError: input->type()->kind() == TypeKind::OptionalTypeINTERNAL ASSERT FAILED
# at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":270, please report a bug to PyTorch.
DecorateInfo(
unittest.expectedFailure,
"TestJit",
"test_variant_consistency_jit",
dtypes=(torch.float32,),
),
),
),
UnaryUfuncInfo('lgamma',
ref=reference_lgamma if TEST_SCIPY else None,
aliases=('special.gammaln', ),
decorators=(precisionOverride({torch.float16: 7e-1}),),
dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
promotes_int_to_float=True,
skips=(
# Reference: https://github.com/pytorch/pytorch/pull/50140#issuecomment-756150214
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
dtypes=[torch.float32, torch.float64], active_if=IS_WINDOWS),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large',
dtypes=[torch.float32, torch.float64], active_if=IS_WINDOWS),
),
# lgamma have multiple singularities at x <= 0
reference_numerics_filter=NumericsFilter(condition=lambda x: x < 0.1, safe_val=1)),
OpInfo(
'logdet',
dtypes=floating_and_complex_types(),
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
sample_inputs_func=sample_inputs_linalg_det_logdet_slogdet,
decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack]),
# `log_softmax` supports different dtypes based on whether `dtype` argument,
# is passed or not. Hence two OpInfo entries, one with dtype and other without.
OpInfo(
'log_softmax',
aliases=('special.log_softmax', 'nn.functional.log_softmax'),
supports_out=True,
aten_backward_name='_log_softmax_backward_data',
dtypes=floating_types_and(torch.float16, torch.bfloat16),
sample_inputs_func=sample_inputs_softmax_variant,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
assert_autodiffed=True),
OpInfo(
'log_softmax',
variant_test_name='with_dtype',
aliases=('special.log_softmax', 'nn.functional.log_softmax'),
supports_out=True,
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf),
sample_inputs_func=partial(sample_inputs_softmax_variant, with_dtype=True),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
assert_autodiffed=True),
UnaryUfuncInfo('logit',
aten_backward_name='logit_backward',
ref=scipy.special.logit if TEST_SCIPY else None,
domain=(0, 1),
aliases=('special.logit', ),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
promotes_int_to_float=True,
decorators=(precisionOverride({torch.bfloat16: 5e-1,
torch.float16: 5e-1}),),
dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_logit),
OpInfo('where',
# Currently only the `input` is tested in gradcheck.
# If we pass `condition` first, none of the input which supports
# autograd will be tested. Hence the following lambda.
op=lambda self, condition, other, **kwargs: torch.where(condition, self, other, **kwargs),
ref=lambda self, condition, other: np.where(condition, self, other),
sample_inputs_func=sample_inputs_where,
reference_inputs_func=reference_inputs_where,
error_inputs_func=error_inputs_where,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
decorators=(
DecorateInfo(onlyCUDA, "TestCommon", 'test_errors'),),
skips=(
# lambda impl
DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"),
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
),
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf)),
OpInfo('nonzero',
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16, torch.chalf),
sample_inputs_func=sample_inputs_nonzero,
supports_autograd=False,
skips=(
DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),
# nonzero(): argument 'out' must be Tensor, not tuple
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'),
# https://github.com/pytorch/pytorch/issues/67458
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
# nonzero is not raising a warning when the out is resized
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'),
# Can't find schemas for this operator for some reason
DecorateInfo(unittest.expectedFailure, 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'),
# Compiler issue on ROCm. Might need to skip until ROCm5.5
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_non_standard_bool_values',
dtypes=[torch.bool], active_if=TEST_WITH_ROCM),
)),
OpInfo('nonzero_static',
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16, torch.chalf),
sample_inputs_func=sample_inputs_nonzero_static,
supports_out=False,
supports_autograd=False,
decorators=[onlyCPU],
skips=(
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'),
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'),
DecorateInfo(unittest.expectedFailure, 'TestDTensorOps', 'test_dtensor_op_db'),
DecorateInfo(unittest.expectedFailure, 'TestInductorOpInfo', 'test_comprehensive'),
DecorateInfo(unittest.expectedFailure, 'TestVmapOperatorsOpInfo', 'test_op_has_batch_rule'),
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_non_standard_bool_values',
dtypes=[torch.bool], active_if=TEST_WITH_ROCM),
)),
# Following tests are for jiterator's python interface
# Jiterator can be used to author elementwise CUDA kernel
# jiterator._create_jit_fn returns a callable that behaves like a regular pytorch op
# See create_jit_fn in jiterator.py for more information
UnaryUfuncInfo(
'jiterator_unary',
op=torch.cuda.jiterator._create_jit_fn("template <typename T> T unary(T x) { return x * x + x; }"),
ref=lambda x: x * x + x,
dtypes=all_types_and_complex_and(torch.bfloat16, torch.float16, torch.bool),
supports_out=False,
supports_autograd=False, # jiterator ops doesn't have backward defined
decorators=[
onlyCUDA,
DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1e-02)}),
'TestUnaryUfuncs', 'test_reference_numerics_extremal'),
DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1e-02)}),
'TestUnaryUfuncs', 'test_reference_numerics_hard'),
DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1e-02)}),
'TestUnaryUfuncs', 'test_reference_numerics_normal'),
DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1e-02)}),
'TestUnaryUfuncs', 'test_reference_numerics_small'),
],
skips=(
# Jiterator ops doesn't support neg or conj view
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'),
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'),
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'),
# Jiterator ops doesn't support CompositeCompliantTensor
# Following test should expectedFailure, but it's causing cascading failures in CUDA, thus skipped
DecorateInfo(unittest.skip("skip"), 'TestCompositeCompliance', 'test_operator'),
# Skip reference_numerics tests for bool type, as the defined function doesn't work for bool
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
dtypes=[torch.bool]),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',
dtypes=[torch.bool]),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_normal',
dtypes=[torch.bool]),
# ROCm generates -inf+infj instead of nan+infj for complex64 for some of the results
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large',
dtypes=[torch.complex64], active_if=TEST_WITH_ROCM),
# Newer numpy generates -inf+infj instead of nan+infj for complex64 for some of the results
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large',
dtypes=[torch.complex64], device_type='cuda'),
# Expected failure: torch.jiterator_unary is not a valid op
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
# Skip Nvfuser
DecorateInfo(unittest.skip('Skipped!'), 'TestCudaFuserOpInfo'),
)
),
BinaryUfuncInfo(
'jiterator_binary',
op=torch.cuda.jiterator._create_jit_fn(
"template <typename T> T binary(T x, T y, T alpha) { return x + alpha * y; }", alpha=1),
ref=lambda input, other, *, alpha=1: (
np.add(input, other)
if alpha == 1
else np.add(input, np.multiply(alpha, other))
),
dtypes=all_types_and_complex_and(torch.bfloat16, torch.float16, torch.bool),
sample_inputs_func=partial(sample_inputs_jiterator, num_inputs=2, alpha=-3.14),
supports_out=False,
supports_autograd=False, # jiterator ops doesn't have backward defined
supports_rhs_python_scalar=False,
decorators=[onlyCUDA],
skips=(
# Jiterator ops doesn't support neg or conj view
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'),
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'),
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'),
# Jiterator ops doesn't support CompositeCompliantTensor
# Following test should expectedFailure, but it's causing cascading failures in CUDA, thus skipped
DecorateInfo(unittest.skip("skip"), 'TestCompositeCompliance', 'test_operator'),
# Expected failure: torch.jiterator_binary is not a valid op
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
# Skip Nvfuser
DecorateInfo(unittest.skip('Skipped!'), 'TestCudaFuserOpInfo'),
)
),
OpInfo(
'jiterator_4inputs_with_extra_args',
op=torch.cuda.jiterator._create_jit_fn(
"template <typename T> T binary(T i0, T i1, T i2, T i3, T alpha, T beta) { return alpha * i0 + beta * i1 + i2 + i3; }",
alpha=1, beta=1),
ref=lambda i0, i1, i2, i3, *, alpha=1, beta=1: alpha * i0 + beta * i1 + i2 + i3,
dtypes=all_types_and_complex_and(torch.bfloat16, torch.float16, torch.bool),
sample_inputs_func=partial(sample_inputs_jiterator, num_inputs=4, alpha=3.14, beta=-4.20),
supports_out=False,
supports_autograd=False, # jiterator ops doesn't have backward defined
decorators=[onlyCUDA],
skips=(
# Jiterator ops doesn't support neg or conj view
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'),
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'),
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'),
# Jiterator ops doesn't support CompositeCompliantTensor
# Following test should expectedFailure, but it's causing cascading failures in CUDA, thus skipped
DecorateInfo(unittest.skip("skip"), 'TestCompositeCompliance', 'test_operator'),
# Expected failure: torch.jiterator_4inputs_with_extra_args is not a valid op
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
# Skip Nvfuser
DecorateInfo(unittest.skip('Skipped!'), 'TestCudaFuserOpInfo'),
)
),
BinaryUfuncInfo(
'jiterator_binary_return_by_ref',
op=torch.cuda.jiterator._create_multi_output_jit_fn(
"""
template <typename T>
void binary_return_by_ref(T i0, T i1, T& out0) {
out0 = i0 + i1;
}
""",
num_outputs=1),
ref=operator.add,
dtypes=all_types_and_complex_and(torch.bfloat16, torch.float16, torch.bool),
sample_inputs_func=partial(sample_inputs_jiterator, num_inputs=2, alpha=-0.42),
supports_out=False,
supports_autograd=False, # jiterator ops doesn't have backward defined
supports_rhs_python_scalar=False,
decorators=[onlyCUDA],
skips=(
# Jiterator ops doesn't support neg or conj view
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'),
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'),
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'),
# Jiterator ops doesn't support CompositeCompliantTensor
# Following test should expectedFailure, but it's causing cascading failures in CUDA, thus skipped
DecorateInfo(unittest.skip("skip"), 'TestCompositeCompliance', 'test_operator'),
# Expected failure: torch.jiterator_4inputs_with_extra_args is not a valid op
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
# Skip Nvfuser
DecorateInfo(unittest.skip('Skipped!'), 'TestCudaFuserOpInfo'),
)
),
OpInfo(
'jiterator_2inputs_2outputs',
op=torch.cuda.jiterator._create_multi_output_jit_fn(
"""
template <typename T>
void binary_2outputs(T i0, T i1, T& out0, T& out1) {
out0 = i0 + i1;
out1 = i0 - i1;
}
""",
num_outputs=2),
ref=lambda i0, i1, *, alpha=1: (i0 + i1, i0 - i1),
dtypes=all_types_and_complex_and(torch.bfloat16, torch.float16, torch.bool),
sample_inputs_func=partial(sample_inputs_jiterator, num_inputs=2),
supports_out=False,
supports_autograd=False, # jiterator ops doesn't have backward defined
decorators=[onlyCUDA],
skips=(
# Jiterator ops doesn't support neg or conj view
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'),
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'),
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'),
# Jiterator ops doesn't support CompositeCompliantTensor
# Following test should expectedFailure, but it's causing cascading failures in CUDA, thus skipped
DecorateInfo(unittest.skip("skip"), 'TestCompositeCompliance', 'test_operator'),
# Expected failure: torch.jiterator_4inputs_with_extra_args is not a valid op
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
# Skip Nvfuser
DecorateInfo(unittest.skip('Skipped!'), 'TestCudaFuserOpInfo'),
)
),
# `torch.norm` has multiple code paths depending on the value of `p`.
# These paths have different dtype support. Also JIT supports,
# most variants but not all of them. So we split the OpInfo entries,
# for `norm` based on the code-paths and JIT support.
OpInfo(
"norm",
sample_inputs_func=sample_inputs_norm,
dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16, torch.chalf),
dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.bfloat16),
# TODO Benchmark again with the new implementation
# Runs very slowly on slow gradcheck - alternatively reduce input sizes
gradcheck_fast_mode=True,
check_batched_forward_grad=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
skips=(
# Dispatches in Python to vector_norm. Not sure how to make this test happy
# Happens to pass on complex64. Also a mystery
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit',
dtypes=(torch.float32,)),)
),
OpInfo('norm',
variant_test_name='nuc',
sample_inputs_func=sample_inputs_norm_nuc,
decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack],
check_batched_gradgrad=False,
# torch.autograd.gradcheck.GradcheckError: While computing batched gradients
# got: Could not allocate memory to change Tensor SizesAndStrides!
check_batched_forward_grad=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
dtypes=floating_and_complex_types(),
dtypesIfCUDA=floating_and_complex_types(),
skips=(
# Dispatches in Python to matrix_norm. Not sure how to make this test happy
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit',
dtypes=(torch.complex64, torch.float32,)),)
),
OpInfo('norm',
variant_test_name='fro',
sample_inputs_func=sample_inputs_norm_fro,
dtypes=floating_and_complex_types_and(torch.bfloat16, torch.float16),
dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.bfloat16),
supports_forward_ad=True,
# torch.autograd.gradcheck.GradcheckError: While computing batched gradients
# got: Could not allocate memory to change Tensor SizesAndStrides!
check_batched_forward_grad=False,
supports_fwgrad_bwgrad=True,
skips=(
# MPS has some mild accuracy issues for float16. We divide the tolerances by 10
DecorateInfo(
toleranceOverride({torch.float16: tol(atol=1e-4, rtol=0.01)}),
'TestConsistency',
'test_output_match',
),
# Issue with conj and torch dispatch, see https://github.com/pytorch/pytorch/issues/82479
DecorateInfo(
unittest.skip("Skipped!"),
'TestSchemaCheckModeOpInfo',
'test_schema_correctness',
dtypes=(torch.complex64, torch.complex128)),
# Dispatches in Python to vector_norm. Not sure how to make this test happy
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit',
dtypes=(torch.complex64, torch.float32,)),)
),
OpInfo(
"norm",
variant_test_name="inf",
sample_inputs_func=sample_inputs_norm_inf,
dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16, torch.chalf),
dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.bfloat16),
supports_forward_ad=True,
check_batched_forward_grad=False,
supports_fwgrad_bwgrad=True,
# fast gradcheck produces NaNs
gradcheck_fast_mode=False,
skips=(
DecorateInfo(
toleranceOverride({torch.float16: tol(atol=2e-3, rtol=1e-3)}),
'TestInductorOpInfo', 'test_comprehensive', device_type='cuda',
),
# Dispatches in Python to vector_norm. Not sure how to make this test happy
# Happens to pass on complex64. Also a mystery
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit',
dtypes=(torch.float32,))
),
),
OpInfo('t',
sample_inputs_func=sample_inputs_t,
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# See https://github.com/pytorch/pytorch/pull/78358
check_batched_forward_grad=False,
# vmap does not support inplace views
check_inplace_batched_forward_grad=False,
autodiff_fusible_nodes=[], # aliases inputs, shouldn't be fused
autodiff_nonfusible_nodes=[], # aliases inputs, shouldn't be fused
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
assert_autodiffed=True,
error_inputs_func=error_inputs_t),
OpInfo('t_copy',
sample_inputs_func=sample_inputs_t,
supports_out=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# See https://github.com/pytorch/pytorch/pull/78358
check_batched_forward_grad=False,
# vmap does not support inplace views
check_inplace_batched_forward_grad=False,
autodiff_fusible_nodes=[], # aliases inputs, shouldn't be fused
autodiff_nonfusible_nodes=[], # aliases inputs, shouldn't be fused
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
assert_autodiffed=True,
error_inputs_func=error_inputs_t),
OpInfo(
"nn.functional.dropout",
op=lambda input, *args, **kwargs:
wrapper_set_seed(torch.nn.functional.dropout, input, *args, **kwargs),
dtypes=floating_types_and(torch.float16, torch.bfloat16),
skips=(
DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),
# Probably because we have used lambda for the op here
# AssertionError: JIT Test does not execute any logic
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
# inplace variant dispatches to dropout kernel, while on CUDA
# the op dispatches to _fused_dropout (with a few more conditions)
# hence, different values and this skip here
DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_view', device_type='cuda'),
DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu')),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# https://github.com/pytorch/pytorch/issues/66357
check_batched_forward_grad=False,
supports_out=False,
sample_inputs_func=sample_inputs_dropout,
inplace_variant=lambda input, *args, **kwargs:
wrapper_set_seed(torch.nn.functional.dropout, input, *args, **kwargs, inplace=True)),
OpInfo(
"native_dropout_backward",
op=torch.ops.aten.native_dropout_backward.default,
aten_name="native_dropout_backward",
dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),
dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
supports_out=False,
sample_inputs_func=sample_inputs_dropout_backward,
skips=(
DecorateInfo(unittest.skip('Skipped!'), 'TestJit', 'test_variant_consistency_jit'),
# Lazy tensor failures
DecorateInfo(unittest.skip('Skipped!'), 'TestLazyOpInfo', 'test_dispatched_to_lazy'),
# These tests fail only when built with ASAN
DecorateInfo(unittest.skip("Fails with ASAN"), 'TestLazyOpInfo', 'test_correctness', active_if=TEST_WITH_ASAN),
DecorateInfo(
unittest.skip("Fails with ASAN"),
'TestLazyOpInfo',
'test_correctness_with_reusing_ir',
active_if=TEST_WITH_ASAN
),
),
),
OpInfo(
"nn.functional.dropout2d",
op=lambda input, *args, **kwargs:
wrapper_set_seed(torch.nn.functional.dropout2d, input, *args, **kwargs),
dtypes=floating_types_and(torch.float16, torch.bfloat16),
skips=(
# lambda impl
DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu')),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_out=False,
check_batched_forward_grad=False,
# As per the docs, valid input dims are (3, 4)
sample_inputs_func=partial(sample_inputs_dropout, valid_input_dim=(3, 4)),
inplace_variant=lambda input, *args, **kwargs:
wrapper_set_seed(torch.nn.functional.dropout2d, input, *args, **kwargs, inplace=True)),
OpInfo(
"nn.functional.dropout3d",
op=lambda input, *args, **kwargs:
wrapper_set_seed(torch.nn.functional.dropout3d, input, *args, **kwargs),
dtypes=floating_types_and(torch.float16, torch.bfloat16),
skips=(
# lambda impl
DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu')),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_out=False,
check_batched_forward_grad=False,
# As per the docs, valid input dims are (4, 5)
sample_inputs_func=partial(sample_inputs_dropout, valid_input_dim=(4, 5)),
inplace_variant=lambda input, *args, **kwargs:
wrapper_set_seed(torch.nn.functional.dropout3d, input, *args, **kwargs, inplace=True)),
OpInfo(
"nn.functional.alpha_dropout",
op=lambda input, *args, **kwargs:
wrapper_set_seed(torch.nn.functional.alpha_dropout, input, *args, **kwargs),
dtypes=floating_types_and(torch.float16, torch.bfloat16),
gradcheck_wrapper=wrapper_set_seed,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_out=False,
sample_inputs_func=sample_inputs_dropout,
check_batched_forward_grad=False,
inplace_variant=lambda input, *args, **kwargs:
wrapper_set_seed(torch.nn.functional.alpha_dropout, input, *args, **kwargs, inplace=True),
skips=(
# lambda impl
DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),
# AssertionError: Tensor-likes are not close!
# Fails in cuda11.7
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_compare_cpu', device_type='cuda'),
# AssertionError: Tensor-likes are not close!
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_compare_cpu', device_type='xpu'),
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),),),
# In training mode, feature_alpha_dropout currently doesn't support inputs of complex dtype
# unlike when `train=False`, it supports complex inputs, hence 2 OpInfos to cover all cases
OpInfo(
"nn.functional.feature_alpha_dropout",
op=lambda input, *args, **kwargs:
wrapper_set_seed(torch.nn.functional.feature_alpha_dropout, input, *args, **kwargs),
variant_test_name="with_train",
dtypes=floating_types_and(torch.float16, torch.bfloat16),
skips=(
# lambda impl
DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
# torch.autograd.gradcheck.GradcheckError: While computing batched gradients, got:
# vmap: We do not yet support calling random operations inside of vmap.
# Please perform random operations outside of vmap as a workaround
DecorateInfo(unittest.expectedFailure, 'TestFwdGradients', "test_forward_mode_AD"),
DecorateInfo(unittest.expectedFailure, 'TestFwdGradients', "test_inplace_forward_mode_AD"),
DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu')),
# Runs very slowly on slow gradcheck - alternatively reduce input sizes
gradcheck_fast_mode=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_out=False,
# As per the docs, valid input dims are (4, 5)
sample_inputs_func=partial(sample_inputs_dropout, train=True, valid_input_dim=(4, 5)),
inplace_variant=lambda input, *args, **kwargs:
wrapper_set_seed(torch.nn.functional.feature_alpha_dropout, input, *args, **kwargs, inplace=True)),
OpInfo(
"nn.functional.feature_alpha_dropout",
op=lambda input, *args, **kwargs:
wrapper_set_seed(torch.nn.functional.feature_alpha_dropout, input, *args, **kwargs),
variant_test_name="without_train",
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
skips=(
# lambda impl
DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),),
gradcheck_wrapper=wrapper_set_seed,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_out=False,
sample_inputs_func=partial(sample_inputs_dropout, train=False),
inplace_variant=lambda input, *args, **kwargs:
wrapper_set_seed(torch.nn.functional.feature_alpha_dropout, input, *args, **kwargs, inplace=True)),
OpInfo(
"nn.functional.one_hot",
ref=reference_one_hot,
supports_out=False,
dtypes=_dispatch_dtypes((torch.int64,)),
sample_inputs_func=sample_inputs_one_hot,
),
OpInfo(
"nn.functional.embedding",
aten_backward_name="embedding_dense_backward",
# We use lambda to reshuffle the positional arguments.
# This is because currently only the `input` field of SampleInput
# is tested in gradient tests.
op=lambda weight, idx, **kwargs: torch.nn.functional.embedding(idx, weight, **kwargs),
dtypes=floating_types_and(torch.bfloat16, torch.float16),
sample_inputs_func=sample_inputs_embedding,
allow_cow_input_materialize_forward=[0],
error_inputs_func=error_inputs_embedding,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
skips=(
# lambda impl
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),
# Fails on CI https://github.com/pytorch/pytorch/issues/85377
DecorateInfo(unittest.skip('Skipped!'), 'TestCommon', 'test_compare_cpu'),
# Reference: https://github.com/pytorch/pytorch/issues/67084
DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_view', device_type='cuda'),
# Not a problem: embedding does weird stuff to its input (it renormalizes)
DecorateInfo(unittest.skip('Allowed exemption'), 'TestCompositeCompliance', 'test_operator'),
# Fails due to non-determinism (see issue #74679)
# TODO: Investigate why more granular skips in the test don't work in CI
DecorateInfo(unittest.skip('Skipped!'),
'TestExpandedWeightFunctional',
'test_expanded_weight_forward'),
),
supports_expanded_weight=True,
supports_out=False,
),
OpInfo(
"nn.functional.embedding_bag",
# We use lambda to reshuffle the positional arguments.
# This is because currently only the `input` field of SampleInput
# is tested in gradient tests.
op=lambda weight, idx, **kwargs: torch.nn.functional.embedding_bag(idx, weight, **kwargs),
dtypes=floating_types_and(torch.bfloat16, torch.float16),
dtypesIfCUDA=floating_types_and(torch.bfloat16, torch.float16),
# backward is not supported for mode `max` and dtype `bfloat16`
backward_dtypesIfCUDA=floating_types_and(torch.float16),
sample_inputs_func=sample_inputs_embedding_bag,
skips=(
# lambda impl
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),
# Not a problem: embedding_bag does weird stuff to its input (it renormalizes)
DecorateInfo(unittest.skip('Allowed exemption'), 'TestCompositeCompliance', 'test_operator'),
),
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
supports_out=False,
supports_gradgrad=False,
allow_cow_input_materialize_forward=[0],
),
OpInfo(
"nn.functional.multi_head_attention_forward",
op=lambda input, *args, **kwargs:
wrapper_set_seed(torch.nn.functional.multi_head_attention_forward, input, *args, **kwargs),
dtypes=floating_types_and(torch.bfloat16, torch.float16),
sample_inputs_func=sample_inputs_multi_head_attention_forward,
skips=(
# Tensor-likes are not close
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_noncontiguous_samples', dtypes=(torch.float32,)),
DecorateInfo(toleranceOverride({torch.float32: tol(atol=5e-3, rtol=0)}), 'TestDecomp', 'test_comprehensive'),
# TODO skip this for now since we can't skip on runtime arch support (taken from scaled_dot_product_attention)
DecorateInfo(unittest.skip("Skipped!"), 'TestInductorOpInfo', 'test_comprehensive'),
# randomness
DecorateInfo(unittest.skip("Skipped!"), 'TestFwdGradients', 'test_forward_mode_AD'),
DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'),
# lambda impl
# AssertionError: JIT Test does not execute any logic
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"),
# tests running very slowly break slow tests, so we skip them instead of using `slowTest`.
DecorateInfo(unittest.skip("Skipped!"), 'TestCompositeCompliance', 'test_forward_ad'),
DecorateInfo(unittest.skip("Skipped!"), 'TestCompositeCompliance', 'test_operator'),
DecorateInfo(
unittest.skip("Skipped - baddbmm decomp does not have enough precision for 16-bit float"),
'TestDecomp',
'test_comprehensive',
dtypes=(torch.bfloat16, torch.float16),
),
DecorateInfo(
unittest.skip("Skipped - baddbmm decomp does not have enough precision for 16-bit float"),
'TestDecomp',
'test_quick',
dtypes=(torch.bfloat16, torch.float16))),
supports_out=False,
supports_gradgrad=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# Runs very slowly on slow gradcheck - alternatively reduce input sizes
gradcheck_fast_mode=True,
),
UnaryUfuncInfo(
"nn.functional.softplus",
aten_backward_name='softplus_backward',
ref=reference_softplus,
sample_kwargs=lambda device, dtype, input: ({'beta': 3, 'threshold': .2}, {'beta': 3, 'threshold': .2}),
sample_inputs_func=partial(sample_inputs_elementwise_unary, op_kwargs={'beta': 3, 'threshold': .2}),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
dtypes=floating_types_and(torch.bfloat16, torch.float16),
decorators=(
DecorateInfo(
toleranceOverride
({
torch.half: tol(atol=1e-2, rtol=1e-2),
torch.bfloat16: tol(atol=1e-2, rtol=1e-2),
}),
'TestUnaryUfuncs'),
),
),
OpInfo(
"nn.functional.mse_loss",
aten_backward_name='mse_loss_backward',
ref=loss_reference_reduction_wrapper(lambda input, target: (input - target) ** 2),
sample_inputs_func=sample_inputs_loss,
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
dtypes=floating_types_and(torch.float16, torch.bfloat16),
skips=(
# RuntimeError: input->type()->kind() == TypeKind::OptionalType
# INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":252,
# please report a bug to PyTorch.
DecorateInfo(unittest.expectedFailure, "TestJit", "test_variant_consistency_jit", dtypes=(torch.float32,),),
),
),
OpInfo(
"nn.functional.grid_sample",
dtypes=floating_types_and(torch.float16, torch.bfloat16),
supports_out=False,
sample_inputs_func=sample_inputs_grid_sample,
reference_inputs_func=reference_inputs_grid_sample,
supports_gradgrad=False,
gradcheck_nondet_tol=1e-15),
# TODO: delete this OpInfo once we add meta support for grid_sampler_3d
OpInfo(
"grid_sampler_2d",
dtypes=floating_types_and(torch.float16, torch.bfloat16),
supports_out=False,
sample_inputs_func=sample_inputs_grid_sampler_2d,
supports_gradgrad=False,
gradcheck_nondet_tol=1e-15,
skips=(
DecorateInfo(slowTest, 'TestDecomp', 'test_comprehensive', dtypes=(torch.float32, torch.float64),
active_if=IS_WINDOWS),
),),
# TODO: Remove grid_sampler_3d tests once `nn.functional.grid_sample` has
# MPS support for all cases.
OpInfo(
"grid_sampler_3d",
dtypes=floating_types_and(torch.float16, torch.bfloat16),
supports_out=False,
sample_inputs_func=sample_inputs_grid_sampler_3d,
supports_gradgrad=False,
gradcheck_nondet_tol=1e-15,
skips=(
# NOTE: Only run on MPS
DecorateInfo(unittest.skip('Skipped!'), device_type='cpu'),
DecorateInfo(unittest.skip('Skipped!'), device_type='cuda'),
DecorateInfo(unittest.skip('Skipped!'), device_type='xpu'),
DecorateInfo(unittest.skip('Skipped!'), device_type='meta'),
),),
OpInfo(
"argwhere",
ref=np.argwhere,
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_out=False,
supports_autograd=False,
sample_inputs_func=sample_inputs_argwhere,
skips=(
# Compiler issue on ROCm. Might need to skip until ROCm5.5
DecorateInfo(unittest.skip('Skipped!'), 'TestCommon', 'test_non_standard_bool_values',
dtypes=[torch.bool], active_if=TEST_WITH_ROCM),
),
),
ReductionOpInfo(
'all',
identity=True,
supports_autograd=False,
result_dtype=torch.bool,
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
ref=reference_reduction_numpy(np.all),
skips=(
# FIXME: uint8 input returns uint8 instead of bool
DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_result_dtype', dtypes=[torch.uint8]),
),
),
ReductionOpInfo(
'any',
identity=False,
supports_autograd=False,
result_dtype=torch.bool,
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
ref=reference_reduction_numpy(np.any),
skips=(
# FIXME: uint8 input returns uint8 instead of bool
DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_result_dtype', dtypes=[torch.uint8]),
),
),
ReductionOpInfo(
'amax',
nan_policy='propagate',
supports_forward_ad=True,
check_batched_forward_grad=False,
supports_fwgrad_bwgrad=True,
dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),
ref=reference_reduction_numpy(np.amax),
skips=(
# FIXME: reduces all dimensions when dim=[]
DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_empty'),
DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_empty_keepdim'),
),
error_inputs_func=error_inputs_aminmax_amax_amin,
),
ReductionOpInfo(
'amin',
nan_policy='propagate',
supports_forward_ad=True,
check_batched_forward_grad=False,
supports_fwgrad_bwgrad=True,
dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),
ref=reference_reduction_numpy(np.amin),
skips=(
# FIXME: reduces all dimensions when dim=[]
DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_empty'),
DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_empty_keepdim'),
),
error_inputs_func=error_inputs_aminmax_amax_amin,
),
ReductionOpInfo(
'argmax',
supports_multiple_dims=False,
supports_autograd=False,
assert_jit_shape_analysis=True,
result_dtype=torch.int64,
dtypes=all_types_and(torch.float16, torch.bfloat16),
ref=reference_reduction_numpy(np.argmax, supports_keepdims=False),
),
ReductionOpInfo(
'argmin',
supports_multiple_dims=False,
supports_autograd=False,
result_dtype=torch.int64,
dtypes=all_types_and(torch.float16, torch.bfloat16),
ref=reference_reduction_numpy(np.argmin, supports_keepdims=False),
),
ReductionOpInfo(
'count_nonzero',
identity=0,
supports_out=False,
supports_autograd=False,
result_dtype=torch.int64,
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
sample_inputs_func=sample_inputs_reduction_count_nonzero,
ref=reference_reduction_numpy(np.count_nonzero),
skips=(
# FIXME: count_nonzero does not accept keepdim kwarg
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_default_keepdim'),
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_none_keepdim'),
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_single_keepdim'),
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty_keepdim'),
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_multi_keepdim'),
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_multi_unsorted_keepdim'),
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_offbounds_keepdim'),
# FIXME: dim=[] reduces all dimensions
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty'),
),
),
ReductionOpInfo(
'mean',
nan_policy='propagate',
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# FIXME: mean needs 'dim' parameter when using the 'out' overload.
# Adding it with 'generate_args_kwargs' does not work, since these also get passed
# onto the reference implementations.
supports_out=True,
assert_autodiffed=True,
assert_jit_shape_analysis=True,
promotes_int_to_float=True,
dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16),
ref=reference_reduction_numpy(np.mean),
error_inputs_func=error_inputs_mean,
skips=(
# AssertionError: RuntimeError not raised : Expected RuntimeError when doing an unsafe cast from a result
# of dtype torch.float32 into an out= with dtype torch.long
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out', device_type='cuda', dtypes=[torch.float32]),
# FIXME: mean does not support passing keepdim without passing dim
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_default_keepdim'),
# FIXME: mean reduces all dimensions when dim=[]
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty'),
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty_keepdim'),
# FIXME: improve precision
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_small_input',
dtypes=[torch.float16]),
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_extremal_values',
device_type='cuda', dtypes=[torch.complex64]),
),
),
ReductionOpInfo(
'nanmean',
nan_policy='omit',
assert_autodiffed=True,
promotes_int_to_float=True,
supports_forward_ad=True,
check_batched_forward_grad=False,
supports_fwgrad_bwgrad=True,
dtypes=floating_types_and(torch.float16, torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.bfloat16, torch.chalf),
sample_inputs_func=sample_inputs_nan_reduction(supports_multiple_dims=True),
ref=reference_reduction_numpy(np.nanmean),
skips=(
# AssertionError: False is not true :
# Failure in testing nodes' autodifferentiation.
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
# FIXME: prod reduces all dimensions when dim=[]
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty'),
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty_keepdim'),
# FIXME: improve precision
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_small_input',
dtypes=[torch.float16]),
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_duplicate_values',
device_type='cuda', dtypes=[torch.float16]),
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_extremal_values',
device_type='cuda', dtypes=[torch.complex64]),
DecorateInfo(toleranceOverride({torch.float16: tol(atol=2e-5, rtol=4e-2)}),
"TestConsistency", "test_output_match", device_type="mps"),
),
),
ReductionOpInfo(
'std',
nan_policy='propagate',
supports_out=True,
complex_to_real=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
assert_autodiffed=True,
promotes_int_to_float=True,
check_batched_forward_grad=False,
dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_std_var,
ref=reference_std_var(np.std),
generate_args_kwargs=generate_std_var_kwargs,
skips=(
# FIXME: cannot specify keepdim without dim
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_default_keepdim'),
# FIXME: dim=[] reduces all dimensions
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty'),
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty_keepdim'),
# FIXME: improve precision
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_small_input',
dtypes=(torch.float16,)),
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_duplicate_values',
dtypes=(torch.float16,)),
),
),
ReductionOpInfo(
'std',
variant_test_name='unbiased',
nan_policy='propagate',
supports_out=False,
complex_to_real=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
assert_autodiffed=True,
promotes_int_to_float=True,
check_batched_forward_grad=False,
dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_std_var_unbiased,
skips=(
# FIXME: dim=[] reduces all dimensions
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty'),
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty_keepdim'),
),
),
ReductionOpInfo(
'var',
nan_policy='propagate',
supports_out=True,
assert_autodiffed=True,
promotes_int_to_float=True,
complex_to_real=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
check_batched_forward_grad=False,
dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_std_var,
ref=reference_std_var(np.var),
generate_args_kwargs=generate_std_var_kwargs,
skips=(
# FIXME: cannot specify keepdim without dim
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_default_keepdim'),
# FIXME: dim=[] reduces all dimensions
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty'),
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty_keepdim'),
# FIXME: improve precision
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_small_input'),
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_duplicate_values'),
# NumPy is giving NaN for this
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_large_input'),
),
),
ReductionOpInfo(
'var',
variant_test_name='unbiased',
nan_policy='propagate',
supports_out=False,
complex_to_real=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
assert_autodiffed=True,
promotes_int_to_float=True,
check_batched_forward_grad=False,
dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_std_var_unbiased,
skips=(
# FIXME: dim=[] reduces all dimensions
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty'),
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty_keepdim'),
),
),
ReductionOpInfo(
'prod',
identity=1,
nan_policy='propagate',
supports_multiple_dims=False,
# https://github.com/pytorch/pytorch/issues/80411
gradcheck_fast_mode=True,
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
promotes_int_to_int64=True,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf),
sample_inputs_func=sample_inputs_prod,
ref=prod_numpy,
skips=(
# FIXME: prod does not support passing keepdim without passing dim
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_default_keepdim'),
# FIXME: prod reduces all dimensions when dim=[]
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty'),
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty_keepdim'),
# FIXME: prod does not support passing None to dim
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_none'),
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_none_keepdim'),
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_small_input',
dtypes=[torch.float16, torch.complex64]),
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_duplicate_values',
dtypes=[torch.uint8, torch.float16, torch.complex64]),
# FIXME: ValueError: The data in MaskedTensor a and Tensor b do not match
DecorateInfo(unittest.skip("Skipped!"), 'TestOperators', 'test_reduction_all',
dtypes=[torch.float16]),
),
),
ReductionOpInfo(
'sum',
identity=0,
nan_policy='propagate',
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
promotes_int_to_int64=True,
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf),
ref=reference_reduction_numpy(np.sum),
error_inputs_sparse_func=error_inputs_sparse_reduction_sum,
sample_inputs_sparse_coo_func=partial(sample_inputs_sparse_reduction_sum, layout=torch.sparse_coo),
sample_inputs_sparse_csr_func=partial(sample_inputs_sparse_reduction_sum, layout=torch.sparse_csr),
sample_inputs_sparse_csc_func=partial(sample_inputs_sparse_reduction_sum, layout=torch.sparse_csc),
sample_inputs_sparse_bsr_func=partial(sample_inputs_sparse_reduction_sum, layout=torch.sparse_bsr),
sample_inputs_sparse_bsc_func=partial(sample_inputs_sparse_reduction_sum, layout=torch.sparse_bsc),
skips=(
# FIXME: sum does not support passing keepdim without passing dim
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_default_keepdim'),
# FIXME: sum reduces all dimensions when dim=[]
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty'),
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty_keepdim'),
# FIXME: improve precision
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_small_input',
dtypes=[torch.float16]),
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_duplicate_values',
dtypes=[torch.float16]),
DecorateInfo(unittest.skip("Skipped!"), 'TestOperators', 'test_reduction_all',
dtypes=[torch.float32]),
),
),
ReductionOpInfo(
'nansum',
identity=0,
nan_policy='omit',
supports_out=True,
promotes_int_to_int64=True,
supports_forward_ad=True,
check_batched_forward_grad=False,
supports_fwgrad_bwgrad=True,
dtypes=all_types_and(torch.bool, torch.float16, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf),
sample_inputs_func=sample_inputs_nan_reduction(supports_multiple_dims=True),
ref=reference_reduction_numpy(np.nansum),
skips=(
# please report a bug to PyTorch.
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
# FIXME: nansum reduces all dimensions when dim=[]
DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_empty'),
DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_empty_keepdim'),
# FIXME: flaky test so skipped instead of xfailed
# possibly bad low precision reference in numpy
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_small_input',
dtypes=[torch.float16]),
DecorateInfo(toleranceOverride({torch.float16: tol(atol=3e-3, rtol=4e-2)}),
"TestConsistency", "test_output_match", device_type="mps"),
),
),
ReductionOpInfo(
'hash_tensor',
result_dtype=torch.uint64,
supports_autograd=False,
dtypes=all_types_and(torch.bool, torch.float16, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bool, torch.float16, torch.bfloat16),
ref=reference_hash_tensor,
skips=(
# hash_tensor reduces all dimensions when dim=[] (as do sum, prod etc.)
DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_empty'),
DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_empty_keepdim'),
# aten::hash_tensor hit the vmap fallback which is currently disabled
DecorateInfo(unittest.skip("Skipped!"), "TestVmapOperatorsOpInfo", "test_op_has_batch_rule"),
DecorateInfo(unittest.skip("Skipped!"), "TestVmapOperatorsOpInfo", "test_vmap_exhaustive"),
# NYI
DecorateInfo(unittest.expectedFailure, 'TestInductorOpInfo', 'test_comprehensive'),
# Sharding strategy NYI
DecorateInfo(unittest.expectedFailure, 'TestDTensorOps', 'test_dtensor_op_db'),
)
),
OpInfo(
"nn.functional.ctc_loss",
dtypes=floating_types(),
supports_out=False,
sample_inputs_func=sample_inputs_ctc_loss,
# gradcheck_wrapper, see https://github.com/pytorch/pytorch/issues/52241
gradcheck_wrapper=gradcheck_wrapper_ctc_loss,
skips=(
# RuntimeError: derivative for aten::_ctc_loss_backward is not implemented
DecorateInfo(
unittest.expectedFailure,
"TestBwdGradients",
"test_fn_gradgrad",
dtypes=(torch.float64,),
),
# RuntimeError: derivative for aten::_ctc_loss_backward is not implemented
DecorateInfo(
unittest.skip("Skipped!"),
"TestJit",
"test_variant_consistency_jit",
dtypes=(torch.float32,),
),
# Ref: https://github.com/pytorch/pytorch/issues/85231
DecorateInfo(unittest.skip("Fails with ASAN"),
'TestProxyTensorOpInfo',
'test_make_fx_fake_exhaustive', active_if=TEST_WITH_ASAN),
),
),
OpInfo(
"nn.functional.cosine_embedding_loss",
dtypes=all_types_and(torch.half, torch.bfloat16, torch.bool),
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
decorators=[
DecorateInfo(
toleranceOverride({torch.float16: tol(atol=1e-4, rtol=2e-3)}),
'TestInductorOpInfo', 'test_comprehensive', device_type="cuda",
),
],
sample_inputs_func=sample_inputs_cosine_embedding_loss,
),
OpInfo(
"nn.functional.nll_loss",
dtypes=floating_types_and(torch.float16, torch.bfloat16),
supports_out=False,
sample_inputs_func=sample_inputs_nll_loss,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
assert_jit_shape_analysis=True,
skips=(
# RuntimeError:
# undefined value tensor:
# File "<string>", line 3
# def the_method(i0, i1):
# return torch.nn.functional.nll_loss(i0, i1, weight=tensor([8.4784, 1.7658, 4.3228], dtype=torch.float32))
# ~~~~~~ <--- HERE
DecorateInfo(unittest.skip("Skipped!"), "TestJit", "test_variant_consistency_jit", dtypes=(torch.float32,),),
# Fails for unknown reason: https://github.com/pytorch/pytorch/issues/120782
DecorateInfo(
unittest.skip("Skipped!"),
"TestCompositeCompliance",
"test_cow_input",
device_type='cuda',
),
DecorateInfo(unittest.skip("FP16 nll_loss cases have not been enabled on MPS yet"),
dtypes=(torch.half,), device_type="mps"),
),
),
OpInfo(
"nn.functional.gaussian_nll_loss",
dtypes=floating_types_and(torch.half, torch.bfloat16),
# Runs very slowly on slow gradcheck - alternatively reduce input sizes
gradcheck_fast_mode=True,
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
sample_inputs_func=sample_inputs_gaussian_nll_loss,
error_inputs_func=error_inputs_gaussian_nll_loss,
skips=(
# Pre-existing condition (calls .item); needs to be fixed
DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_backward'),
DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_forward_ad'),
# Pre-existing condition (calls .item); needs to be fixed
DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_operator'),
# JIT does not support variadic tensors.
# RuntimeError: input->type()->kind() == TypeKind::OptionalType
# INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":270,
# please report a bug to PyTorch.
DecorateInfo(unittest.skip("Skipped!"), "TestJit", "test_variant_consistency_jit", dtypes=(torch.float32,),),
DecorateInfo(toleranceOverride({torch.float16: tol(atol=8e-3, rtol=2e-3)}),
"TestConsistency", "test_output_match", device_type="mps"),
DecorateInfo(toleranceOverride({torch.float16: tol(atol=8e-3, rtol=2e-3)}),
"TestConsistency", "test_output_grad_match", device_type="mps"),
),
),
OpInfo(
"nn.functional.hinge_embedding_loss",
dtypes=floating_types_and(torch.half, torch.bfloat16),
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
sample_inputs_func=sample_inputs_hinge_embedding_loss,
error_inputs_func=error_inputs_hinge_embedding_loss,
reference_inputs_func=reference_inputs_hinge_embedding_loss,
),
OpInfo(
"nn.functional.huber_loss",
aten_backward_name='huber_loss_backward',
dtypes=floating_types_and(torch.float16, torch.bfloat16),
supports_out=False,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_huber_loss,
error_inputs_func=error_inputs_huber_loss,
skips=(
# JIT does not support variadic tensors.
# RuntimeError: input->type()->kind() == TypeKind::OptionalType
# INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":270,
# please report a bug to PyTorch.
DecorateInfo(unittest.skip("Skipped!"), "TestJit", "test_variant_consistency_jit", dtypes=(torch.float32,),),
)
),
OpInfo(
"nn.functional.pdist",
ref=reference_pdist,
sample_inputs_func=sample_inputs_pdist,
dtypes=floating_types(),
supports_out=False,
supports_gradgrad=False,
skips=(
DecorateInfo(unittest.skip("Unsupported on MPS for now"), 'TestCommon', 'test_numpy_ref_mps'),
)
),
OpInfo(
"nn.functional.poisson_nll_loss",
dtypes=all_types_and(torch.half, torch.bfloat16),
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
sample_inputs_func=sample_inputs_poisson_nll_loss,
error_inputs_func=error_inputs_poisson_nll_loss,
),
OpInfo(
"argsort",
dtypes=all_types_and(torch.bool, torch.float16, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bool, torch.float16, torch.bfloat16),
sample_inputs_func=sample_inputs_sort,
supports_out=False,
supports_autograd=False,
skips=(
DecorateInfo(
unittest.skip("Skipped!"),
"TestJit",
"test_variant_consistency_jit",
dtypes=(torch.float32,),
),
DecorateInfo(
unittest.expectedFailure,
"TestCommon",
"test_non_standard_bool_values",
dtypes=[torch.bool],
device_type='cuda',
active_if=not TEST_WITH_ROCM
),
),
),
OpInfo(
"repeat_interleave",
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf),
backward_dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.bfloat16, torch.chalf),
sample_inputs_func=sample_inputs_repeat_interleave,
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# See https://github.com/pytorch/pytorch/pull/78358
check_batched_forward_grad=False,
skips=(
DecorateInfo(
unittest.skip("Skipped!"),
"TestJit",
"test_variant_consistency_jit",
dtypes=(torch.float32, torch.complex64),
),
),
),
OpInfo(
"nn.functional.pairwise_distance",
ref=lambda a, b, p=2.0, eps=1e-6, keepdim=False: (
np.sum(np.abs(a - b + eps) ** p, axis=-1, keepdims=keepdim) ** (1 / p)
),
sample_inputs_func=sample_inputs_pairwise_distance,
dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16),
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
skips=(
DecorateInfo(
unittest.skip("Skipped!"),
"TestJit",
"test_variant_consistency_jit",
dtypes=(torch.float32, torch.complex64),
),
),
),
OpInfo(
"nn.functional.pixel_shuffle",
sample_inputs_func=sample_inputs_pixel_shuffle,
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
skips=(
DecorateInfo(
unittest.skip("Skipped!"),
"TestJit",
"test_variant_consistency_jit",
dtypes=(torch.float32, torch.complex64),
),
),
),
OpInfo(
"nn.functional.pixel_unshuffle",
sample_inputs_func=sample_inputs_pixel_unshuffle,
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
skips=(
DecorateInfo(
unittest.skip("Skipped!"),
"TestJit",
"test_variant_consistency_jit",
dtypes=(torch.float32, torch.complex64),
),
),
),
OpInfo(
"nn.functional.channel_shuffle",
sample_inputs_func=sample_inputs_channel_shuffle,
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
allow_cow_input_materialize_forward=[0],
allow_cow_input_materialize_backward=[0, 'output grad 0'],
skips=(
# Skip due to NotImplementedError for MPS device.
DecorateInfo(unittest.expectedFailure, 'TestConsistency'),
DecorateInfo(unittest.expectedFailure, 'TestDTensorOps', 'test_dtensor_op_db'),
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace_all_strides"),
),
),
OpInfo(
"nn.functional.kl_div",
sample_inputs_func=sample_inputs_kl_div,
dtypes=floating_types_and(torch.float16, torch.bfloat16),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16),
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
),
OpInfo(
"diagflat",
ref=lambda input, offset=0: np.diagflat(input, k=offset),
sample_inputs_func=sample_inputs_diagflat,
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# See https://github.com/pytorch/pytorch/pull/78358
check_batched_forward_grad=False,
),
OpInfo(
'scatter_reduce',
variant_test_name='sum',
inplace_variant=torch.Tensor.scatter_reduce_,
# complex not added to dtypes as complex gradients are not properly handled
# and scatter_reduce hasn't been added to the whitelist in gen_variable_type yet
dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
sample_inputs_func=sample_inputs_scatter_reduce,
skips=(
# Compiler issue on ROCm. Regression started in ROCm 6.4.
DecorateInfo(unittest.skip('Skipped!'), 'TestCommon', 'test_non_standard_bool_values',
dtypes=[torch.bool], active_if=TEST_WITH_ROCM),
),
),
OpInfo(
'scatter_reduce',
variant_test_name='prod',
# complex not added to dtypes as complex gradients are not properly handled
# and scatter_reduce hasn't been added to the whitelist in gen_variable_type yet
dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),
dtypesIfCUDA=all_types_and(torch.float16, torch.bfloat16),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16),
sample_inputs_func=sample_inputs_scatter_reduce,
skips=(
# Not implemented
DecorateInfo(unittest.expectedFailure, 'TestFwdGradients', 'test_forward_mode_AD'),
DecorateInfo(unittest.expectedFailure, 'TestFwdGradients', 'test_inplace_forward_mode_AD'),
DecorateInfo(unittest.expectedFailure, 'TestFwdGradients', 'test_fn_fwgrad_bwgrad'),
),
),
OpInfo(
'scatter_reduce',
variant_test_name='mean',
# complex not added to dtypes as complex gradients are not properly handled
# and scatter_reduce hasn't been added to the whitelist in gen_variable_type yet
dtypes=all_types_and(torch.float16, torch.bfloat16),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.float16, torch.bfloat16),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
sample_inputs_func=sample_inputs_scatter_reduce,
),
OpInfo(
'scatter_reduce',
variant_test_name='amin',
dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),
dtypesIfCUDA=all_types_and(torch.float16, torch.bfloat16),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16),
supports_forward_ad=True,
check_batched_forward_grad=False,
supports_fwgrad_bwgrad=True,
sample_inputs_func=sample_inputs_scatter_reduce,
),
OpInfo(
'scatter_reduce',
variant_test_name='amax',
dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),
dtypesIfCUDA=all_types_and(torch.float16, torch.bfloat16),
dtypesIfHpu=custom_types(torch.float32, torch.bfloat16),
supports_forward_ad=True,
check_batched_forward_grad=False,
supports_fwgrad_bwgrad=True,
sample_inputs_func=sample_inputs_scatter_reduce,
),
OpInfo(
'_segment_reduce',
aten_name='segment_reduce',
variant_test_name='lengths',
dtypes=floating_types_and(torch.float16, torch.bfloat16),
supports_out=False,
# RuntimeError: derivative for aten::_segment_reduce_backward is not implemented
supports_gradgrad=False,
sample_inputs_func=sample_inputs_segment_reduce,
skips=(
# FIXME: CUDA driver API confirmed a leak in
# __main__.TestJitCUDA.test_variant_consistency_jit_segment_reduce_cuda_float32
DecorateInfo(
unittest.skip("Skipped!"),
"TestJit",
"test_variant_consistency_jit",
device_type="cuda",
),
),
),
OpInfo(
'_segment_reduce',
aten_name='segment_reduce',
variant_test_name='offsets',
dtypes=floating_types_and(torch.float16, torch.bfloat16),
supports_out=False,
# RuntimeError: derivative for aten::_segment_reduce_backward is not implemented
supports_gradgrad=False,
sample_inputs_func=partial(sample_inputs_segment_reduce, mode='offsets'),
skips=(
# FIXME: CUDA driver API confirmed a leak in
# __main__.TestJitCUDA.test_variant_consistency_jit_segment_reduce_cuda_float32
DecorateInfo(
unittest.skip("Skipped!"),
"TestJit",
"test_variant_consistency_jit",
device_type="cuda",
),
),
),
]
op_db += opinfo.definitions.op_db
# Separate registry for experimental Python Reference OpInfos.
python_ref_db = [
#
# Elementwise Unary OpInfos
#
ElementwiseUnaryPythonRefInfo(
"_refs.abs",
torch_opinfo_name="abs",
skips=(
# Reference: https://github.com/pytorch/pytorch/issues/49224
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs',
'test_reference_numerics_small',
dtypes=[torch.int8], active_if=TEST_WITH_ASAN),
),
),
ElementwiseUnaryPythonRefInfo(
"_refs.acos",
torch_opinfo_name="acos",
skips=(
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs',
'test_reference_numerics_normal',
device_type='cuda', dtypes=[torch.cdouble],
active_if=IS_WINDOWS),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs',
'test_reference_numerics_extremal',
device_type='cuda', dtypes=[torch.cdouble],
active_if=IS_WINDOWS),
# Failing with wrong imaginary sign on at least some Windows jobs
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs',
'test_reference_numerics_small',
device_type='cuda', dtypes=[torch.cdouble],
active_if=IS_WINDOWS),
# Failing with wrong imaginary sign on at least some Windows jobs
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs',
'test_reference_numerics_large',
device_type='cuda', dtypes=[torch.cdouble],
active_if=IS_WINDOWS),
)
),
ElementwiseUnaryPythonRefInfo(
"_refs.acosh",
torch_opinfo_name="acosh",
skips=(
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs',
'test_reference_numerics_normal',
device_type='cuda', dtypes=[torch.cdouble],
active_if=IS_WINDOWS),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs',
'test_reference_numerics_extremal',
device_type='cuda', dtypes=[torch.cdouble],
active_if=IS_WINDOWS),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs',
'test_reference_numerics_extremal',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs',
'test_reference_numerics_large',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs',
'test_reference_numerics_extremal',
device_type='cuda', dtypes=[torch.cdouble],
active_if=IS_WINDOWS),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs',
'test_reference_numerics_large',
device_type='cuda', dtypes=[torch.cdouble],
active_if=IS_WINDOWS),
# Failing with wrong imaginary sign on at least some Windows jobs
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs',
'test_reference_numerics_small',
device_type='cuda', dtypes=[torch.cdouble],
active_if=IS_WINDOWS),
),
),
ElementwiseUnaryPythonRefInfo(
"_refs.asin",
torch_opinfo_name="asin",
decorators=[
DecorateInfo(
toleranceOverride({torch.float16: tol(atol=1e-05, rtol=1e-03)}),
'TestUnaryUfuncs', device_type='cuda'),
DecorateInfo(
toleranceOverride({torch.complex64: tol(atol=5e-05, rtol=2e-05)}),
'TestUnaryUfuncs', 'test_reference_numerics_extremal', device_type='cpu'
),
precisionOverride({torch.bfloat16: 1e-2}),
],
skips=(
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs',
'test_reference_numerics_extremal',
device_type='cuda', dtypes=[torch.cdouble],
active_if=IS_WINDOWS),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs',
'test_reference_numerics_large',
device_type='cuda', dtypes=[torch.cdouble],
active_if=IS_WINDOWS),
),
),
ElementwiseUnaryPythonRefInfo(
"_refs.asinh",
torch_opinfo_name="asinh",
decorators=(precisionOverride({torch.bfloat16: 5e-2}),),
skips=(
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs',
'test_reference_numerics_extremal',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs',
'test_reference_numerics_large',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs',
'test_reference_numerics_small',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs',
'test_reference_numerics_normal',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs',
'test_reference_numerics_extremal',
device_type='cuda', dtypes=[torch.cdouble],
active_if=IS_WINDOWS),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs',
'test_reference_numerics_large',
device_type='cuda', dtypes=[torch.cdouble],
active_if=IS_WINDOWS),
),
),
PythonRefInfo(
"_refs.lerp",
torch_opinfo_name="lerp",
),
PythonRefInfo(
"_refs.ones",
torch_opinfo_name="ones",
skips=(
# Tests that assume input is a tensor or sequence of tensors
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'),
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'),
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'),
),
),
PythonRefInfo(
"_refs.zeros",
torch_opinfo_name="zeros",
skips=(
# Tests that assume input is a tensor or sequence of tensors
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'),
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'),
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'),
),
),
PythonRefInfo(
"_refs.cauchy",
torch_opinfo_name="cauchy",
decorators=(
# TODO: RuntimeError: no _refs support for torch.rand_like
DecorateInfo(unittest.skip("TODO: RuntimeError: no _refs support for torch.rand_like"),
'TestCommon',
'test_python_ref'),
# AssertionError: Tensor-likes are not close!
DecorateInfo(unittest.skip("Expected: cauchy is not comparable"),
'TestCommon',
'test_out'),
DecorateInfo(unittest.skip("Expected: cauchy is not comparable"),
'TestCommon',
'test_out_warning'),
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_executor'),
DecorateInfo(unittest.skip("Expected: cauchy is not comparable"),
'TestCommon',
'test_python_ref_torch_fallback'),
DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'),
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'),
)
),
PythonRefInfo(
"_refs.exponential",
torch_opinfo_name="exponential",
supports_out=True,
decorators=(
# dtypes that do not support check_uniform_bounds of rand_like
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_meta',
dtypes=(torch.int8, torch.uint8, torch.int16, torch.int32, torch.int64)),
DecorateInfo(unittest.skip('Skipped!'), 'TestCommon', 'test_dtypes'),
# TODO: RuntimeError: no _refs support for torch.rand_like
DecorateInfo(unittest.skip("TODO: RuntimeError: no _refs support for torch.rand_like"),
'TestCommon',
'test_python_ref'),
# AssertionError: Tensor-likes are not close!
DecorateInfo(unittest.skip("Expected: exponential is not comparable"),
'TestCommon',
'test_out'),
DecorateInfo(unittest.skip("Expected: exponential is not comparable"),
'TestCommon',
'test_out_warning'),
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_executor'),
DecorateInfo(unittest.skip("Expected: exponential is not comparable"),
'TestCommon',
'test_python_ref_torch_fallback'),
DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'),
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'),
)
),
PythonRefInfo(
"_refs.geometric",
torch_opinfo_name="geometric",
supports_out=True,
decorators=(
# dtypes that do not support check_uniform_bounds of rand_like
DecorateInfo(unittest.skip('Skipped!'), 'TestCommon', 'test_dtypes'),
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_meta',
dtypes=(torch.int8, torch.uint8, torch.int16, torch.int32, torch.int64)),
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_torch_fallback',
dtypes=(torch.int8, torch.uint8, torch.int16, torch.int32, torch.int64)),
# TODO: RuntimeError: no _refs support for torch.rand_like
DecorateInfo(unittest.skip("TODO: RuntimeError: no _refs support for torch.rand_like"),
'TestCommon',
'test_python_ref'),
DecorateInfo(unittest.skip("Expected: geometric is not comparable"),
'TestCommon',
'test_python_ref_executor', device_type='cuda'),
# AssertionError: Tensor-likes are not close!
DecorateInfo(unittest.skip("Expected: geometric is not comparable"),
'TestCommon',
'test_out'),
DecorateInfo(unittest.skip("Expected: geometric is not comparable"),
'TestCommon',
'test_out_warning'),
DecorateInfo(unittest.skip("Expected: geometric is not comparable"),
'TestCommon',
'test_python_ref_torch_fallback'),
DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'),
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'),
)
),
PythonRefInfo(
"_refs.log_normal",
torch_opinfo_name="log_normal",
supports_out=True,
decorators=(
# TODO: RuntimeError: no _refs support for torch.rand_like
DecorateInfo(unittest.skip("TODO: RuntimeError: no _refs support for torch.rand_like"),
'TestCommon',
'test_python_ref'),
DecorateInfo(unittest.skip("Expected: log_normal is not comparable"),
'TestCommon',
'test_python_ref_executor', device_type='cuda'),
# AssertionError: Tensor-likes are not close!
DecorateInfo(unittest.skip("Expected: log_normal is not comparable"),
'TestCommon',
'test_out'),
DecorateInfo(unittest.skip("Expected: log_normal is not comparable"),
'TestCommon',
'test_out_warning'),
DecorateInfo(unittest.skip("Expected: log_normal is not comparable"),
'TestCommon',
'test_python_ref_torch_fallback'),
DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'),
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'),
)
),
PythonRefInfo(
"_refs.normal",
torch_opinfo_name="normal",
supports_out=True,
decorators=(
# TODO: RuntimeError: no _refs support for torch.rand_like
DecorateInfo(unittest.skip("TODO: RuntimeError: no _refs support for torch.rand_like"),
'TestCommon',
'test_python_ref'),
# AssertionError: Tensor-likes are not close!
DecorateInfo(unittest.skip("Expected: normal is not comparable"),
'TestCommon',
'test_out'),
DecorateInfo(unittest.skip("Expected: normal is not comparable"),
'TestCommon',
'test_out_warning'),
DecorateInfo(unittest.skip("Expected: normal is not comparable"),
'TestCommon',
'test_python_ref_torch_fallback'),
DecorateInfo(unittest.skip("Expected: normal is not comparable"), 'TestDecomp', 'test_comprehensive'),
DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'),
DecorateInfo(unittest.skip("make_traced() doesn't set seed properly!"), 'TestCommon', 'test_python_ref_executor'),
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'),
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'),
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'),
)
),
PythonRefInfo(
"_refs.normal",
torch_opinfo_name="normal",
torch_opinfo_variant_name="number_mean",
supports_out=True,
decorators=(
# TODO: RuntimeError: no _refs support for torch.rand_like
DecorateInfo(unittest.skip("TODO: RuntimeError: no _refs support for torch.rand_like"),
'TestCommon',
'test_python_ref'),
# AssertionError: Tensor-likes are not close!
DecorateInfo(unittest.skip("Expected: normal is not comparable"),
'TestCommon',
'test_out'),
DecorateInfo(unittest.skip("Expected: normal is not comparable"),
'TestCommon',
'test_out_warning'),
DecorateInfo(unittest.skip("Expected: normal is not comparable"),
'TestCommon',
'test_python_ref_torch_fallback'),
DecorateInfo(unittest.skip("Expected: normal is not comparable"), 'TestDecomp', 'test_comprehensive'),
DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'),
DecorateInfo(unittest.skip("make_traced() doesn't set seed properly!"), 'TestCommon', 'test_python_ref_executor'),
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'),
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'),
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'),
)
),
PythonRefInfo(
"_refs.normal_",
op=torch.Tensor.normal_,
torch_opinfo_name="normal",
torch_opinfo_variant_name="in_place",
supports_out=False,
decorators=(
# TODO: RuntimeError: no _refs support for torch.rand_like
DecorateInfo(unittest.skip("TODO: RuntimeError: no _refs support for torch.rand_like"),
'TestCommon',
'test_python_ref'),
# AssertionError: Tensor-likes are not close!
DecorateInfo(unittest.skip("Expected: normal is not comparable"),
'TestCommon',
'test_out'),
DecorateInfo(unittest.skip("Expected: normal is not comparable"),
'TestCommon',
'test_out_warning'),
DecorateInfo(unittest.skip("Expected: normal is not comparable"),
'TestCommon',
'test_python_ref_torch_fallback'),
DecorateInfo(unittest.skip("Expected: normal is not comparable"), 'TestDecomp', 'test_comprehensive'),
DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'),
DecorateInfo(unittest.skip("make_traced() doesn't set seed properly!"), 'TestCommon', 'test_python_ref_executor'),
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'),
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'),
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'),
)
),
PythonRefInfo(
"_refs.arange",
torch_opinfo_name="arange",
skips=(
# Tests that assume input is a tensor or sequence of tensors
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'),
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'),
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'),
),
),
PythonRefInfo(
"_refs.linspace",
torch_opinfo_name="linspace",
skips=(
# Tests that assume input is a tensor or sequence of tensors
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'),
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'),
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'),
# cpu implementation is wrong on some integral types
# https://github.com/pytorch/pytorch/issues/81996
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_torch_fallback',
dtypes=(torch.int8, torch.uint8, torch.int16, torch.int32, torch.int64), device_type="cpu"),
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref',
dtypes=(torch.int8, torch.uint8, torch.int16, torch.int32, torch.int64), device_type="cpu"),
# cuda implementation is off-by-one on some inputs due to precision issues
# https://github.com/pytorch/pytorch/issues/82230
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_torch_fallback',
dtypes=(torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64),
device_type="cuda"),
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref',
dtypes=(torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64),
device_type="cuda"),
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_executor',
dtypes=(torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64),
device_type="cuda"),
),
),
PythonRefInfo(
"_refs.linspace",
torch_opinfo_name="linspace",
torch_opinfo_variant_name="tensor_overload",
skips=(
# TypeError: 'int' object is not subscriptable
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'),
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'),
# cpu implementation is wrong on some integral types
# https://github.com/pytorch/pytorch/issues/81996
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_torch_fallback',
dtypes=(torch.int8, torch.uint8, torch.int16, torch.int32, torch.int64), device_type="cpu"),
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref',
dtypes=(torch.int8, torch.uint8, torch.int16, torch.int32, torch.int64), device_type="cpu"),
# cuda implementation is off-by-one on some inputs due to precision issues
# https://github.com/pytorch/pytorch/issues/82230
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_torch_fallback',
dtypes=(torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64),
device_type="cuda"),
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref',
dtypes=(torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64),
device_type="cuda"),
# TODO torch.ops.aten.copy is not in _refs
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref',
dtypes=(torch.float32, torch.float64, torch.float16, torch.complex64, torch.complex128, torch.bfloat16),
device_type="cuda"),
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref',
dtypes=(torch.float32, torch.float64, torch.float16, torch.complex64, torch.complex128, torch.bfloat16),
device_type="cpu"),
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_executor',
dtypes=(torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64),
device_type="cuda"),
),
),
PythonRefInfo(
"_refs.logspace",
torch_opinfo_name="logspace",
skips=(
# Tests that assume input is a tensor or sequence of tensors
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'),
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'),
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'),
# Off-by-one issue when casting floats to ints
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_torch_fallback',
dtypes=(torch.int16, torch.int32, torch.int64),
device_type="cuda"),
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref',
dtypes=(torch.int16, torch.int32, torch.int64),
device_type="cuda"),
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_executor',
dtypes=(torch.int16, torch.int32, torch.int64),
device_type="cuda"),
),
),
PythonRefInfo(
"_refs.logspace",
torch_opinfo_name="logspace",
torch_opinfo_variant_name="tensor_overload",
skips=(
# TypeError: 'int' object is not subscriptable
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'),
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'),
# Off-by-one issue when casting floats to ints
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_torch_fallback',
dtypes=(torch.int16, torch.int32, torch.int64),
device_type="cuda"),
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref',
dtypes=(torch.int16, torch.int32, torch.int64),
device_type="cuda"),
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_executor',
dtypes=(torch.int16, torch.int32, torch.int64),
device_type="cuda"),
# TODO copy doesn't have prim refs
DecorateInfo(
unittest.expectedFailure, 'TestCommon', 'test_python_ref',
dtypes=(
torch.float32, torch.float64, torch.float16, torch.complex64,
torch.complex128, torch.bfloat16, torch.int8, torch.uint8
),
device_type="cuda"
),
DecorateInfo(
unittest.expectedFailure, 'TestCommon', 'test_python_ref',
dtypes=(
torch.float32, torch.float64, torch.float16,
torch.complex64, torch.complex128, torch.bfloat16,
torch.int16, torch.int32, torch.int64, torch.int8, torch.uint8
),
device_type="cpu"),
),
),
PythonRefInfo(
"_refs.meshgrid",
torch_opinfo_name="meshgrid",
torch_opinfo_variant_name="variadic_tensors",
),
PythonRefInfo(
"_refs.take_along_dim",
torch_opinfo_name="take_along_dim",
skips=(
DecorateInfo(unittest.expectedFailure,
'TestCommon',
'test_python_ref'),
),
),
PythonRefInfo(
"_refs.to",
torch_opinfo_name="to",
),
PythonRefInfo(
"_refs.triu",
torch_opinfo_name="triu",
),
PythonRefInfo(
"_refs.tril",
torch_opinfo_name="tril",
),
PythonRefInfo(
"_refs.triu_indices",
torch_opinfo_name="triu_indices",
# the implementation uses torch.stack that violates view consistency
validate_view_consistency=False,
skips=(
# skip these tests since we have non tensor input
DecorateInfo(unittest.skip('Skipped!'), 'TestCommon', 'test_noncontiguous_samples'),
DecorateInfo(unittest.skip('Skipped!'), 'TestCommon', 'test_variant_consistency_eager'),
DecorateInfo(unittest.skip('Skipped!'), 'TestJit', 'test_variant_consistency_jit'),
DecorateInfo(unittest.skip('Skipped!'), 'TestMathBits', 'test_neg_view'),
)),
PythonRefInfo(
"_refs.tril_indices",
torch_opinfo_name="tril_indices",
# the implementation uses torch.stack that violates view consistency
validate_view_consistency=False,
skips=(
# skip these tests since we have non tensor input
DecorateInfo(unittest.skip('Skipped!'), 'TestCommon', 'test_noncontiguous_samples'),
DecorateInfo(unittest.skip('Skipped!'), 'TestCommon', 'test_variant_consistency_eager'),
DecorateInfo(unittest.skip('Skipped!'), 'TestJit', 'test_variant_consistency_jit'),
DecorateInfo(unittest.skip('Skipped!'), 'TestMathBits', 'test_neg_view'),
)),
PythonRefInfo(
"_refs.meshgrid",
torch_opinfo_name="meshgrid",
torch_opinfo_variant_name="list_of_tensors",
),
PythonRefInfo(
"_refs.movedim",
aliases=('moveaxis',),
torch_opinfo_name="movedim",
),
PythonRefInfo(
"_refs.bucketize",
torch_opinfo_name="bucketize",
skips=(
# RuntimeError: It appears that you're trying to get value out of a tracing tensor with
# aten._local_scalar_dense.default - erroring out! [...]
# triggered by mid_val = boundaries[mid]
DecorateInfo(unittest.expectedFailure, "TestCommon", "test_python_ref_executor"),
)
),
PythonRefInfo(
"_refs.equal",
torch_opinfo_name="equal",
skips=(
# RuntimeError: Cannot cast FakeTensor to number
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_meta',),
)
),
ElementwiseUnaryPythonRefInfo(
"_refs.atan",
torch_opinfo_name="atan",
decorators=(precisionOverride({torch.bfloat16: 1e-2}),),
skips=(
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs',
'test_reference_numerics_extremal',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs',
'test_reference_numerics_large',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs',
'test_reference_numerics_small',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs',
'test_reference_numerics_extremal',
device_type='cuda', dtypes=[torch.cfloat, torch.cdouble],
active_if=IS_WINDOWS),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs',
'test_reference_numerics_large',
device_type='cuda', dtypes=[torch.cfloat, torch.cdouble],
active_if=IS_WINDOWS),
),
),
ElementwiseUnaryPythonRefInfo(
"_refs.atanh",
torch_opinfo_name="atanh",
decorators=(precisionOverride({torch.bfloat16: 1e-2}),),
skips=(
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs',
'test_reference_numerics_small',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs',
'test_reference_numerics_extremal',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs',
'test_reference_numerics_large',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs',
'test_reference_numerics_extremal',
device_type='cuda', dtypes=[torch.cfloat, torch.cdouble],
active_if=IS_WINDOWS),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs',
'test_reference_numerics_large',
device_type='cuda', dtypes=[torch.cfloat],
active_if=IS_WINDOWS),
),
),
ElementwiseUnaryPythonRefInfo(
"_refs.bitwise_not",
torch_opinfo_name="bitwise_not",
),
ElementwiseUnaryPythonRefInfo(
"_refs.ceil",
torch_opinfo_name="ceil",
# Fails on int32
# https://github.com/pytorch/pytorch/issues/85258
),
PythonRefInfo(
"_refs.item",
torch_opinfo_name="item",
skips=(
# RuntimeError: Cannot cast FakeTensor(FakeTensor(..., device='meta', size=()), cpu) to number
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_meta'),
# ValueError: Can't convert a tensor with 10 elements to a number!
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_errors'),),
),
ElementwiseUnaryPythonRefInfo(
"_refs.conj_physical",
torch_opinfo_name="conj_physical",
),
ElementwiseUnaryPythonRefInfo(
"_refs.cos",
torch_opinfo_name="cos",
decorators=(precisionOverride({torch.bfloat16: 1e-2}),),
skips=(
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs',
'test_reference_numerics_large',
dtypes=(torch.cfloat, torch.cdouble,), device_type='cpu',
active_if=IS_WINDOWS),
# This fails on CUDA but passes on ROCm
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs',
'test_reference_numerics_large',
dtypes=(torch.cdouble,), device_type='cuda'),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs',
'test_reference_numerics_extremal',
dtypes=[torch.cfloat, torch.cdouble], active_if=IS_WINDOWS),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs',
'test_reference_numerics_extremal',
device_type='cpu',
dtypes=[torch.cfloat, torch.cdouble], active_if=IS_MACOS),
# AssertionError: Tensor-likes are not close!
# Greatest absolute difference: nan at index (700,) (up to 1e-05 allowed)
# Greatest relative difference: nan at index (700,) (up to 0.001 allowed)
DecorateInfo(unittest.expectedFailure, 'TestUnaryUfuncs',
'test_reference_numerics_large',
device_type='cuda',
dtypes=(torch.chalf,), active_if=IS_WINDOWS),
),
),
ElementwiseUnaryPythonRefInfo(
"_refs.cosh",
torch_opinfo_name="cosh",
skips=(
# Reference: https://github.com/pytorch/pytorch/issues/48641
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs',
'test_reference_numerics_large',
device_type='cpu', dtypes=[torch.int8]),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs',
'test_reference_numerics_large',
dtypes=[torch.cdouble]),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs',
'test_reference_numerics_extremal',
dtypes=[torch.cfloat, torch.cdouble], active_if=IS_WINDOWS),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs',
'test_reference_numerics_large',
dtypes=[torch.cfloat, torch.cdouble], active_if=IS_WINDOWS),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs',
'test_reference_numerics_extremal',
device_type='cpu',
dtypes=[torch.cfloat, torch.cdouble], active_if=IS_MACOS),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs',
'test_reference_numerics_large',
device_type='cpu',
dtypes=[torch.cfloat, torch.cdouble], active_if=IS_MACOS),
# AssertionError: Tensor-likes are not close!
# Greatest absolute difference: nan at index (6000,) (up to 1e-05 allowed)
# Greatest relative difference: nan at index (6000,) (up to 0.001 allowed)
DecorateInfo(unittest.expectedFailure, 'TestUnaryUfuncs',
'test_reference_numerics_large',
device_type='cuda',
dtypes=(torch.chalf,), active_if=IS_WINDOWS),
),
),
ElementwiseUnaryPythonRefInfo(
"_refs.digamma",
torch_opinfo_name="digamma",
),
ElementwiseUnaryPythonRefInfo(
"_refs.erf",
torch_opinfo_name="erf",
),
ElementwiseUnaryPythonRefInfo(
"_refs.erfinv",
torch_opinfo_name="erfinv",
decorators=(precisionOverride({torch.float16: 1e-2,
torch.bfloat16: 1e-2,
torch.float32: 1e-4}),),
skips=(
# Reference: https://github.com/pytorch/pytorch/pull/49155#issuecomment-742664611
DecorateInfo(
unittest.skip("Skipped!"), 'TestUnaryUfuncs',
'test_reference_numerics_extremal',
active_if=TEST_SCIPY and version.parse(scipy.__version__) < version.parse("1.4.0")),
DecorateInfo(
unittest.skip("Skipped!"), 'TestUnaryUfuncs',
'test_reference_numerics_large',
active_if=TEST_SCIPY and version.parse(scipy.__version__) < version.parse("1.4.0")),
DecorateInfo(
unittest.skip("Skipped!"), 'TestUnaryUfuncs',
'test_reference_numerics_small',
active_if=TEST_SCIPY and version.parse(scipy.__version__) < version.parse("1.4.0")),
),
),
ElementwiseUnaryPythonRefInfo(
"_refs.erfc",
torch_opinfo_name="erfc",
),
ElementwiseUnaryPythonRefInfo(
"_refs.exp",
torch_opinfo_name="exp",
skips=(
# Reference: https://github.com/pytorch/pytorch/issues/48010
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs',
'test_reference_numerics_extremal',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble], active_if=IS_WINDOWS),
),
),
ElementwiseUnaryPythonRefInfo(
"_refs.expm1",
torch_opinfo_name="expm1",
),
ElementwiseUnaryPythonRefInfo(
"_refs.exp2",
torch_opinfo_name="exp2",
skips=(
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs',
'test_reference_numerics_large',
dtypes=[torch.cdouble]),
# Reference: https://github.com/pytorch/pytorch/issues/48010
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs',
'test_reference_numerics_extremal',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble], active_if=IS_WINDOWS),
),
),
ElementwiseUnaryPythonRefInfo(
"_refs.fill",
torch_opinfo_name="fill",
supports_out=True,
),
ElementwiseUnaryPythonRefInfo(
"_refs.floor",
torch_opinfo_name="floor",
# Fails on int32
# https://github.com/pytorch/pytorch/issues/85258
),
ElementwiseUnaryPythonRefInfo(
"_refs.frexp",
torch_opinfo_name="frexp",
# Skipped due to numerical failures on Windows CI.
# This is also skipped in frexp earlier in the file.
skips=(
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
active_if=IS_WINDOWS),
),
),
ElementwiseUnaryPythonRefInfo(
"_refs.frac",
torch_opinfo_name="frac",
skips=(
DecorateInfo(
unittest.skip("Skipped!"), 'TestUnaryUfuncs',
'test_reference_numerics_extremal',
dtypes=(torch.bfloat16, torch.float16, torch.float32, torch.float64)),
),
),
ElementwiseUnaryPythonRefInfo(
"_refs.imag",
torch_opinfo_name="imag",
),
ElementwiseUnaryPythonRefInfo(
"_refs.isfinite",
torch_opinfo_name="isfinite",
supports_out=True,
),
ElementwiseUnaryPythonRefInfo(
"_refs.isinf",
torch_opinfo_name="isinf",
supports_out=True,
),
ElementwiseUnaryPythonRefInfo(
"_refs.isposinf",
torch_opinfo_name="isposinf",
supports_out=True,
),
ElementwiseUnaryPythonRefInfo(
"_refs.isneginf",
torch_opinfo_name="isneginf",
supports_out=True,
),
ElementwiseUnaryPythonRefInfo(
"_refs.isnan",
torch_opinfo_name="isnan",
supports_out=True,
),
ElementwiseUnaryPythonRefInfo(
"_refs.isreal",
torch_opinfo_name="isreal",
supports_out=True,
),
ElementwiseUnaryPythonRefInfo(
"_refs.i0",
torch_opinfo_name="i0",
decorators=(precisionOverride({torch.bfloat16: 3e-1,
torch.float16: 5e-1}),),
skips=(
DecorateInfo(unittest.skip("Skipped!"),
'TestUnaryUfuncs',
'test_reference_numerics_large',
dtypes=(torch.int8,)),
),
),
ElementwiseUnaryPythonRefInfo(
"_refs.lgamma",
torch_opinfo_name="lgamma",
decorators=(precisionOverride({torch.float16: 7e-1}),),
skips=(
# Reference: https://github.com/pytorch/pytorch/pull/50140#issuecomment-756150214
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs',
'test_reference_numerics_extremal',
dtypes=[torch.float32, torch.float64], active_if=IS_WINDOWS),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs',
'test_reference_numerics_large',
dtypes=[torch.float32, torch.float64], active_if=IS_WINDOWS),
),
),
ElementwiseUnaryPythonRefInfo(
"_refs.special.multigammaln",
torch_opinfo_name="mvlgamma",
torch_opinfo_variant_name="mvlgamma_p_1",
skips=skips_mvlgamma(),
decorators=(
DecorateInfo(torch.testing._internal.common_utils.markDynamoStrictTest, 'TestUnaryUfuncs',
'test_reference_numerics_large'),
DecorateInfo(torch.testing._internal.common_utils.xfailIfTorchDynamo, 'TestUnaryUfuncs',
'test_reference_numerics_large'),
),
),
ElementwiseUnaryPythonRefInfo(
"_refs.special.multigammaln",
torch_opinfo_name="mvlgamma",
torch_opinfo_variant_name="mvlgamma_p_3",
skips=skips_mvlgamma(),
),
ElementwiseUnaryPythonRefInfo(
"_refs.special.multigammaln",
torch_opinfo_name="mvlgamma",
torch_opinfo_variant_name="mvlgamma_p_5",
skips=skips_mvlgamma(),
),
ElementwiseUnaryPythonRefInfo(
"_refs.log",
torch_opinfo_name="log",
decorators=(precisionOverride({torch.bfloat16: 5e-2}),),
skips=(
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs',
'test_reference_numerics_extremal',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble],
active_if=IS_WINDOWS),
),
),
ElementwiseUnaryPythonRefInfo(
"_refs.log1p",
torch_opinfo_name="log1p",
),
ElementwiseUnaryPythonRefInfo(
"_refs.log10",
torch_opinfo_name="log10",
decorators=(precisionOverride({torch.bfloat16: 5e-2}),),
skips=(
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs',
'test_reference_numerics_extremal',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble],
active_if=IS_WINDOWS),
),
),
ElementwiseUnaryPythonRefInfo(
"_refs.log2",
torch_opinfo_name="log2",
decorators=(precisionOverride({torch.bfloat16: 1e-1}),),
skips=(
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs',
'test_reference_numerics_extremal',
dtypes=[torch.cfloat, torch.cdouble]),
),
),
PythonRefInfo(
"_refs.logsumexp",
torch_opinfo_name="logsumexp",
# When keepdim=False logsumexp function uses squeeze operation
# that is not yet exposed in nvFuser's Python API.
),
PythonRefInfo(
"_refs.log_softmax",
torch_opinfo_name="log_softmax",
torch_opinfo_variant_name="with_dtype",
),
ElementwiseUnaryPythonRefInfo(
"_refs.nan_to_num",
torch_opinfo_name="nan_to_num",
),
ElementwiseUnaryPythonRefInfo(
"_refs.neg",
torch_opinfo_name="neg",
),
ElementwiseUnaryPythonRefInfo(
"_refs.positive",
torch_opinfo_name="positive",
),
ElementwiseUnaryPythonRefInfo(
"_refs.real",
torch_opinfo_name="real",
),
ElementwiseUnaryPythonRefInfo(
"_refs.reciprocal",
torch_opinfo_name="reciprocal",
skips=(
# Reference: https://github.com/pytorch/pytorch/issues/45690
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs',
'test_reference_numerics_extremal',
dtypes=[torch.cfloat, torch.cdouble]),
),
),
ElementwiseUnaryPythonRefInfo(
"_refs.round",
torch_opinfo_name="round",
# Fails on int32
# https://github.com/pytorch/pytorch/issues/85258
skips=(
DecorateInfo(toleranceOverride({torch.bfloat16: tol(atol=1e-3, rtol=0.016)}),
"TestUnaryUfuncs", "test_reference_numerics_extremal",
device_type="cuda"),
DecorateInfo(toleranceOverride({torch.bfloat16: tol(atol=1e-3, rtol=0.016)}),
"TestUnaryUfuncs", "test_reference_numerics_normal",
device_type="cuda"),
),
),
ElementwiseUnaryPythonRefInfo(
"_refs.rsqrt",
torch_opinfo_name="rsqrt",
decorators=(precisionOverride({torch.half: 5e-2}),),
skips=(
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs',
'test_reference_numerics_extremal',
dtypes=(torch.cfloat, torch.cdouble)),
# AssertionError: Tensor-likes are not close!
# Greatest absolute difference: nan at index (700,) (up to 0.01 allowed)
# Greatest relative difference: nan at index (700,) (up to 0.001 allowed)
DecorateInfo(unittest.expectedFailure, 'TestUnaryUfuncs',
'test_reference_numerics_large',
dtypes=(torch.chalf,)),
),
),
ElementwiseUnaryPythonRefInfo(
"_refs.sigmoid",
torch_opinfo_name="sigmoid",
aliases=('_refs.special.expit',),
# Reference: https://github.com/pytorch/pytorch/issues/56012
handles_complex_extremal_values=False,
handles_large_floats=False,
decorators=(precisionOverride({torch.float16: 1e-2,
torch.complex64: 1e-1,
torch.bfloat16: 1e-2}),),
skips=(
# Reference: https://github.com/pytorch/pytorch/issues/56012
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs',
'test_reference_numerics_extremal',
dtypes=[torch.complex64, torch.cdouble], device_type='cuda'),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs',
'test_reference_numerics_large',
dtypes=[torch.chalf, torch.complex64, torch.cdouble], device_type='cuda')
),
),
ElementwiseUnaryPythonRefInfo(
"_refs.sign",
torch_opinfo_name="sign",
skips=(
# Reference: https://github.com/pytorch/pytorch/issues/41245
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs',
'test_reference_numerics_extremal',
dtypes=[torch.bfloat16, torch.float16, torch.float32,
torch.float64]),
),
),
ElementwiseUnaryPythonRefInfo(
"_refs.sgn",
torch_opinfo_name="sgn",
# This is an issue with the vectorised abs on CPU
handles_complex_extremal_values=False,
handles_large_floats=False,
skips=(
# Reference: https://github.com/pytorch/pytorch/issues/41245
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs',
'test_reference_numerics_extremal',
dtypes=[torch.bfloat16, torch.float16, torch.float32,
torch.float64]),
),
),
ElementwiseUnaryPythonRefInfo(
"_refs.signbit",
torch_opinfo_name="signbit",
),
ElementwiseUnaryPythonRefInfo(
"_refs.sin",
torch_opinfo_name="sin",
decorators=(precisionOverride({torch.bfloat16: 1e-2}),),
skips=(
# Fails on CUDA but passes on ROCm
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs',
'test_reference_numerics_large',
dtypes=(torch.cdouble,), device_type='cuda'),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs',
'test_reference_numerics_extremal',
dtypes=(torch.cfloat, torch.cdouble,), device_type='cpu',
active_if=IS_WINDOWS),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs',
'test_reference_numerics_large',
dtypes=(torch.cfloat, torch.cdouble,), device_type='cpu',
active_if=IS_WINDOWS),
),
),
ElementwiseUnaryPythonRefInfo(
"_refs.sinc",
torch_opinfo_name="sinc",
decorators=(precisionOverride({torch.bfloat16: 1e-2,
torch.float16: 1e-2}),),
skips=(
# Reference: https://github.com/pytorch/pytorch/issues/49133
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs',
'test_reference_numerics_small',
dtypes=[torch.cfloat]),
),
),
ElementwiseUnaryPythonRefInfo(
"_refs.sinh",
torch_opinfo_name="sinh",
decorators=(precisionOverride({torch.float16: 1e-2}),),
skips=(
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs',
'test_reference_numerics_extremal',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble],
active_if=(IS_MACOS or IS_WINDOWS)),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs',
'test_reference_numerics_large',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble],
active_if=(IS_MACOS or IS_WINDOWS)),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs',
'test_reference_numerics_large',
dtypes=(torch.cdouble,)),
# Reference: https://github.com/pytorch/pytorch/issues/48641
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs',
'test_reference_numerics_large',
device_type='cpu', dtypes=[torch.int8]),
),
),
PythonRefInfo(
"_refs.softmax",
torch_opinfo_name="softmax",
torch_opinfo_variant_name="with_dtype",
),
ElementwiseUnaryPythonRefInfo(
"_refs.sqrt",
torch_opinfo_name="sqrt",
decorators=(
precisionOverride({torch.bfloat16: 7e-2}),
DecorateInfo(
toleranceOverride({torch.chalf: tol(atol=1e-2, rtol=0)}),
'TestUnaryUfuncs', 'test_reference_numerics_large'),
),
skips=(
# Reference: https://github.com/pytorch/pytorch/issues/47358
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs',
'test_reference_numerics_large',
device_type='cpu', dtypes=(torch.cfloat, torch.cdouble),
active_if=IS_MACOS),
# Reference: https://github.com/pytorch/pytorch/pull/47293#issuecomment-721774436
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs',
'test_reference_numerics_large',
dtypes=(torch.bfloat16,)),
),
),
ElementwiseUnaryPythonRefInfo(
"_refs.square",
torch_opinfo_name="square",
decorators=(precisionOverride({torch.complex64: 3e-4, torch.bfloat16: 3e-1}),),
skips=(
# AssertionError: Reference result was farther (2.2417024338305655e-07) from the precise computation
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_python_ref_executor', dtypes=(torch.complex64,)),
# Reference: https://github.com/pytorch/pytorch/issues/52549
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs',
'test_reference_numerics_large',
dtypes=[torch.cfloat, torch.cdouble]),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs',
'test_reference_numerics_extremal',
device_type='cuda', dtypes=[torch.cfloat, torch.cdouble]),
),
),
ElementwiseUnaryPythonRefInfo(
"_refs.tan",
torch_opinfo_name="tan",
decorators=[
DecorateInfo(
toleranceOverride({torch.complex64: tol(atol=1e-04, rtol=1e-05)}),
'TestUnaryUfuncs', 'test_reference_numerics_extremal', device_type='cuda'),
],
skips=(
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs',
'test_reference_numerics_extremal',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble],
active_if=(IS_MACOS or IS_WINDOWS)),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs',
'test_reference_numerics_large',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble],
active_if=(IS_MACOS or IS_WINDOWS)),
)
),
ElementwiseUnaryPythonRefInfo(
"_refs.tanh",
torch_opinfo_name="tanh",
decorators=[
DecorateInfo(
toleranceOverride({torch.complex64: tol(atol=1e-04, rtol=2e-05)}),
'TestUnaryUfuncs', 'test_reference_numerics_extremal', device_type='cuda'),
],
skips=(
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs',
'test_reference_numerics_extremal',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble],
active_if=(IS_MACOS or IS_WINDOWS)),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs',
'test_reference_numerics_large',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble],
active_if=(IS_MACOS or IS_WINDOWS)),
),
),
ElementwiseUnaryPythonRefInfo(
"_refs.trunc",
torch_opinfo_name="trunc",
# Fails on int32
# https://github.com/pytorch/pytorch/issues/85258
),
PythonRefInfo(
"_refs.special.log_softmax",
torch_opinfo_name="log_softmax", # alias
torch_opinfo_variant_name="with_dtype",
supports_out=False,
),
PythonRefInfo(
"_refs.special.softmax",
torch_opinfo_name="softmax", # alias
torch_opinfo_variant_name="with_dtype",
supports_out=False,
),
#
# Elementwise Unary Special OpInfos
#
ElementwiseUnaryPythonRefInfo(
"_refs.special.logit",
torch_opinfo_name="logit",
),
#
# Elementwise Unary nn.functional OpInfos
#
PythonRefInfo(
"_refs.nn.functional.alpha_dropout",
torch_opinfo_name="nn.functional.alpha_dropout",
decorators=(
DecorateInfo(unittest.skip("Expected: dropout is not comparable"),
'TestCommon',
'test_python_ref'),
# AssertionError: Tensor-likes are not close!
DecorateInfo(unittest.skip("Expected: dropout is not comparable"),
'TestCommon',
'test_python_ref_torch_fallback'),
DecorateInfo(unittest.skip("Expected: dropout is not comparable"),
'TestCommon',
'test_python_ref_executor', device_type='cuda'),
# AssertionError: Tensor-likes are not close!
DecorateInfo(unittest.skip("Expected: dropout is not comparable"),
'TestMathBits',
'test_neg_view'),
# AssertionError: Tensor-likes are not close!
DecorateInfo(unittest.skip("Expected: dropout is not comparable"),
'TestCommon',
'test_compare_cpu'),
)
),
ElementwiseUnaryPythonRefInfo(
"_refs.nn.functional.celu",
torch_opinfo_name="nn.functional.celu",
supports_out=True,
),
PythonRefInfo(
"_refs.nn.functional.channel_shuffle",
torch_opinfo_name="nn.functional.channel_shuffle",
supports_out=True,
),
ElementwiseUnaryPythonRefInfo(
"_refs.nn.functional.threshold",
torch_opinfo_name="nn.functional.threshold",
supports_out=True,
),
PythonRefInfo(
"_refs.nn.functional.dropout",
torch_opinfo_name="nn.functional.dropout",
decorators=(
DecorateInfo(unittest.skip("Expected: dropout is not comparable"),
'TestCommon',
'test_python_ref'),
DecorateInfo(unittest.skip("Expected: dropout is not comparable"),
'TestCommon',
'test_python_ref_torch_fallback'),
DecorateInfo(unittest.skip("Expected: dropout is not comparable"),
'TestCommon',
'test_out'),
DecorateInfo(unittest.skip("Expected: dropout is not comparable"),
'TestCommon',
'test_out_warning'),
DecorateInfo(unittest.skip("Expected: dropout is not comparable"),
'TestMathBits',
'test_conj_view'),
DecorateInfo(unittest.skip("Expected: dropout is not comparable"),
'TestMathBits',
'test_neg_conj_view'),
DecorateInfo(unittest.skip("Expected: dropout is not comparable"),
'TestMathBits',
'test_neg_view'),
# dropout is not comparable
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_executor'),
DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'),
)
),
ElementwiseUnaryPythonRefInfo(
"_refs.nn.functional.elu",
torch_opinfo_name="nn.functional.elu",
supports_out=True,
decorators=[
DecorateInfo(
toleranceOverride({
torch.float16: tol(atol=1e-03, rtol=1.2e-03),
torch.bfloat16: tol(atol=1e-03, rtol=1.2e-03)
}),
'TestUnaryUfuncs', device_type='cuda',
), ],
),
ElementwiseUnaryPythonRefInfo(
"_refs.nn.functional.hardtanh",
torch_opinfo_name="nn.functional.hardtanh",
supports_out=True,
),
PythonRefInfo( # TODO: Port this to an UnaryOpInfo
"_refs.nn.functional.gelu",
torch_opinfo_name="nn.functional.gelu",
),
PythonRefInfo(
"_refs.nn.functional.layer_norm",
torch_opinfo_name="nn.functional.layer_norm",
skips=(
# Reference result was farther (3.5762786809723224e-07) from the precise computation
# than the torch result was (2.5068410824946596e-07)!
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_python_ref',
dtypes=(torch.float32,), device_type='cpu'),
),
),
PythonRefInfo(
"_refs.nn.functional.glu",
torch_opinfo_name="nn.functional.glu",
supports_out=True,
),
PythonRefInfo(
"_refs.nn.functional.pairwise_distance",
torch_opinfo_name="nn.functional.pairwise_distance",
supports_out=True,
),
PythonRefInfo(
"_refs.nn.functional.pdist",
torch_opinfo_name="nn.functional.pdist",
supports_out=True,
skips=(
# RunTimeError: no _refs support for torch.Tensor.index_select
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref'),
# Reference result was farther (1.946091651916504e-05) from the precise
# computation than the torch result was (1.1920928955078125e-06)!
DecorateInfo(
unittest.expectedFailure,
'TestCommon',
'test_python_ref_torch_fallback',
dtypes=(torch.float32,),
device_type='cpu',
),
)),
PythonRefInfo(
"_refs.nn.functional.leaky_relu",
torch_opinfo_name="nn.functional.leaky_relu",
supports_out=True,
),
PythonRefInfo(
"_refs.nn.functional.log_softmax",
torch_opinfo_name="log_softmax", # alias
torch_opinfo_variant_name="with_dtype",
supports_out=False,
),
PythonRefInfo(
"_refs.nn.functional.pixel_shuffle",
torch_opinfo_name="nn.functional.pixel_shuffle",
),
PythonRefInfo(
"_refs.nn.functional.pixel_unshuffle",
torch_opinfo_name="nn.functional.pixel_unshuffle",
),
PythonRefInfo(
"_refs.nn.functional.poisson_nll_loss",
torch_opinfo_name="nn.functional.poisson_nll_loss",
),
ElementwiseUnaryPythonRefInfo(
"_refs.nn.functional.prelu",
torch_opinfo_name="nn.functional.prelu",
),
ElementwiseUnaryPythonRefInfo(
"_refs.nn.functional.relu",
torch_opinfo_name="nn.functional.relu",
supports_out=True,
),
ElementwiseUnaryPythonRefInfo(
"_refs.nn.functional.relu6",
torch_opinfo_name="nn.functional.relu6",
supports_out=True,
),
ElementwiseUnaryPythonRefInfo(
"_refs.nn.functional.mish",
torch_opinfo_name="nn.functional.mish",
supports_out=True,
decorators=[
DecorateInfo(
toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1e-03)}),
'TestUnaryUfuncs',), ],
),
ElementwiseUnaryPythonRefInfo(
"_refs.nn.functional.selu",
torch_opinfo_name="nn.functional.selu",
supports_out=True,
decorators=[
DecorateInfo(
toleranceOverride({
torch.float16: tol(atol=1e-2, rtol=1.8e-2),
torch.bfloat16: tol(atol=1e-2, rtol=1.8e-2)
}),
'TestUnaryUfuncs', device_type='cuda',
), ],
),
PythonRefInfo(
"_refs.nn.functional.softmax",
torch_opinfo_name="softmax", # alias
torch_opinfo_variant_name="with_dtype",
supports_out=False,
),
PythonRefInfo(
"_refs.nn.functional.softmin",
torch_opinfo_name="nn.functional.softmin",
torch_opinfo_variant_name="with_dtype",
supports_out=False,
),
ElementwiseUnaryPythonRefInfo(
"_refs.nn.functional.softplus",
torch_opinfo_name="nn.functional.softplus",
),
PythonRefInfo(
"_refs.nn.functional.l1_loss",
torch_opinfo_name="nn.functional.l1_loss",
),
PythonRefInfo(
"_refs.nn.functional.margin_ranking_loss",
torch_opinfo_name="nn.functional.margin_ranking_loss",
),
PythonRefInfo(
"_refs.nn.functional.mse_loss",
torch_opinfo_name="nn.functional.mse_loss",
),
PythonRefInfo(
"_refs.nn.functional.smooth_l1_loss",
torch_opinfo_name="nn.functional.smooth_l1_loss",
),
PythonRefInfo(
"_refs.nn.functional.hinge_embedding_loss",
torch_opinfo_name="nn.functional.hinge_embedding_loss"
),
PythonRefInfo(
"_refs.nn.functional.nll_loss",
torch_opinfo_name="nn.functional.nll_loss",
# The corresponding PyTorch op doesn't support out. But the ref is
# registered as a decomp and ATen has an out variant.
supports_out=True,
# For simpler indexing, we flatten target indices, then reshape the result tensor.
# This creates inconsistent view state with reference impl.
validate_view_consistency=False,
skips=(
# RuntimeError: It appears that you're trying to get value out of a tracing tensor - erroring out!
DecorateInfo(
unittest.expectedFailure, 'TestCommon', 'test_python_ref_executor', device_type="cuda"
),
),
),
PythonRefInfo(
"_refs.nn.functional.huber_loss",
torch_opinfo_name="nn.functional.huber_loss",
# The corresponding PyTorch op doesn't support out. But the ref is
# registered as a decomp and ATen has an out variant.
supports_out=True,
),
ElementwiseUnaryPythonRefInfo(
"_refs.nn.functional.tanhshrink",
torch_opinfo_name="nn.functional.tanhshrink",
decorators=[
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs',
'test_reference_numerics_normal',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
DecorateInfo(
toleranceOverride({torch.bfloat16: tol(atol=1e-02, rtol=1.6e-02),
torch.complex64: tol(atol=6e-04, rtol=1e-05)}),
'TestUnaryUfuncs', 'test_reference_numerics_extremal', device_type='cuda'),
],
skips=(
# in each case, pytorch will produce a nan while numpy will not
DecorateInfo(unittest.skip("Fails on some jobs works on others!"),
'TestUnaryUfuncs', "test_reference_numerics_large",
dtypes=(torch.complex64, torch.complex128),
active_if=(IS_MACOS)),
DecorateInfo(unittest.skip("Fails on some jobs works on others!"),
'TestUnaryUfuncs', "test_reference_numerics_extremal",
dtypes=(torch.complex64, torch.complex128),
device_type='cpu',
active_if=(IS_MACOS or IS_WINDOWS)),
),
),
ElementwiseUnaryPythonRefInfo(
"_refs.nn.functional.hardshrink",
torch_opinfo_name="nn.functional.hardshrink",
),
ElementwiseUnaryPythonRefInfo(
"_refs.nn.functional.softshrink",
torch_opinfo_name="nn.functional.softshrink",
),
#
# Elementwise Binary Reference OpInfos
#
ElementwiseBinaryPythonRefInfo(
"_refs.add",
torch_opinfo_name="add",
# https://github.com/pytorch/pytorch/issues/76944
supports_two_python_scalars=True,
supports_one_python_scalar=True,
decorators=(
DecorateInfo(
toleranceOverride({torch.chalf: tol(atol=1e-2, rtol=0)}),
'TestBinaryUfuncs', 'test_reference_numerics'),
),
skips=(
DecorateInfo(unittest.skip("Skipped!"),
'TestBinaryUfuncs',
'test_reference_numerics_extremal_values',
dtypes=(torch.complex64, torch.complex128)),
),
),
ElementwiseBinaryPythonRefInfo(
"_refs.atan2",
torch_opinfo_name="atan2",
),
ElementwiseBinaryPythonRefInfo(
"_refs.bitwise_and",
torch_opinfo_name="bitwise_and",
),
ElementwiseBinaryPythonRefInfo(
"_refs.bitwise_left_shift",
torch_opinfo_name="bitwise_left_shift",
skips=(
# https://github.com/pytorch/pytorch/issues/70904
DecorateInfo(unittest.skip("Some inputs produce undefined outputs"), 'TestCommon', 'test_compare_cpu'),
),
),
ElementwiseBinaryPythonRefInfo(
"_refs.bitwise_right_shift",
torch_opinfo_name="bitwise_right_shift",
skips=(
# # https://github.com/pytorch/pytorch/issues/70904
DecorateInfo(unittest.skip("Skipped some inputs produce undefined outputs"), 'TestCommon', 'test_compare_cpu'),
),
),
ElementwiseBinaryPythonRefInfo(
"_refs.bitwise_or",
torch_opinfo_name="bitwise_or",
),
ElementwiseBinaryPythonRefInfo(
"_refs.bitwise_xor",
torch_opinfo_name="bitwise_xor",
),
ElementwiseBinaryPythonRefInfo(
"_refs.copysign",
torch_opinfo_name="copysign",
skips=(
# RuntimeError: Expected divisor (b) to be on the same device (cuda:0) as dividend (a), but it is found on cpu!
DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_type_promotion'),
# FIXME output 0: meta disagrees with real impl
DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_binary_ufuncs_mixed_dtype'),
)
),
ElementwiseBinaryPythonRefInfo(
"_refs.div",
torch_opinfo_name="div",
torch_opinfo_variant_name="no_rounding_mode",
# https://github.com/pytorch/pytorch/issues/76944
supports_two_python_scalars=True,
supports_one_python_scalar=True,
skips=(
# NotImplementedError: argument of type: <class 'complex'>
DecorateInfo(
unittest.skip("Skipped!"), 'TestCommon', 'test_python_ref_executor',
dtypes=(torch.complex32, torch.complex64, torch.complex128,)
),
# Reference result was farther (0.7433461727239705) from the precise
# computation than the torch result was (nan)!
DecorateInfo(
unittest.expectedFailure, 'TestCommon', 'test_python_ref',
dtypes=(torch.complex32,), device_type="cuda"
),
# Reference result was farther (0.7433461727239705) from the precise
# computation than the torch result was (nan)!
DecorateInfo(
unittest.expectedFailure, 'TestCommon', 'test_python_ref_torch_fallback',
dtypes=(torch.complex32,), device_type="cuda"
),
),
),
ElementwiseBinaryPythonRefInfo(
"_refs.div",
torch_opinfo_name="div",
torch_opinfo_variant_name="trunc_rounding",
# https://github.com/pytorch/pytorch/issues/76944
supports_two_python_scalars=True,
supports_one_python_scalar=True,
decorators=(
# See https://github.com/pytorch/pytorch/issues/111126
DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_type_promotion'),
),
),
ElementwiseBinaryPythonRefInfo(
"_refs.div",
torch_opinfo_name="div",
torch_opinfo_variant_name="floor_rounding",
# https://github.com/pytorch/pytorch/issues/76944
supports_two_python_scalars=True,
supports_one_python_scalar=True,
decorators=(
# See https://github.com/pytorch/pytorch/issues/111126
DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_type_promotion'),
# Reference result was farther (nan) from the precise computation than the
# torch result was (inf)!
DecorateInfo(
unittest.expectedFailure,
"TestCommon",
"test_python_ref",
dtypes=(torch.bfloat16,),
device_type="cpu",
active_if=not IS_S390X,
),
),
),
ElementwiseBinaryPythonRefInfo(
"_refs.eq",
torch_opinfo_name="eq",
),
ElementwiseBinaryPythonRefInfo(
"_refs.float_power",
torch_opinfo_name="float_power",
skips=(
# Test doesn't account for float -> double type promotion
DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_type_promotion'),
# Complex values error with: Greatest absolute difference: nan at index
DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs',
'test_reference_numerics_small_values',
dtypes=[torch.complex64, torch.complex128]),
DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs',
'test_reference_numerics_large_values',
dtypes=[torch.complex64, torch.complex128]),
DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs',
'test_reference_numerics_extremal_values',
dtypes=[torch.complex64, torch.complex128]),
),
),
ElementwiseBinaryPythonRefInfo(
"_refs.logaddexp",
torch_opinfo_name="logaddexp",
skips=(
# failure due to mismatch in edge cases, which boils down to what torch.exp(inf + infj) should be
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref',
dtypes=(torch.complex32, torch.complex64, torch.complex128)),
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_torch_fallback',
dtypes=(torch.complex32, torch.complex64, torch.complex128)),
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_executor',
dtypes=(torch.complex32, torch.complex64, torch.complex128)),
),
),
PythonRefInfo(
"_refs.logaddexp2",
torch_opinfo_name="logaddexp2",
),
ElementwiseBinaryPythonRefInfo(
"_refs.floor_divide",
torch_opinfo_name="floor_divide",
rhs_make_tensor_kwargs=dict(exclude_zero=True),
# https://github.com/pytorch/pytorch/issues/76944
supports_two_python_scalars=True,
supports_one_python_scalar=True,
# bfloat16 floor_divide compared with a float32 reference works inconsistently
skips=(
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_python_ref',
dtypes=(torch.bfloat16,)),
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_python_ref_torch_fallback',
dtypes=(torch.bfloat16,)),
# bfloat16 floor_divide compared with a float32 reference works inconsistently
DecorateInfo(unittest.skip('Skipped!'), 'TestBinaryUfuncs',
dtypes=(torch.bfloat16,)),
# int8 floor divide has different results for -128 // -1 vs. NumPy
DecorateInfo(unittest.skip('Skipped!'), 'TestBinaryUfuncs',
'test_reference_numerics_small_values',
dtypes=(torch.int8,)),
# The following tests fails on some jobs
DecorateInfo(unittest.skip('Skipped!'), 'TestBinaryUfuncs',
'test_reference_numerics_extremal_values',
dtypes=(torch.float16,)),
DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-3, rtol=5e-3)}),
'TestBinaryUfuncs', 'test_reference_numerics'),
# FIXME output 0: meta disagrees with real impl
DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_binary_ufuncs_mixed_dtype'),
),
),
ElementwiseBinaryPythonRefInfo(
"_refs.fmax",
torch_opinfo_name="fmax",
supports_rhs_python_scalar=False,
),
ElementwiseBinaryPythonRefInfo(
"_refs.fmin",
torch_opinfo_name="fmin",
supports_rhs_python_scalar=False,
),
ElementwiseBinaryPythonRefInfo(
"_refs.fmod",
torch_opinfo_name="fmod",
rhs_make_tensor_kwargs={'exclude_zero': True},
supports_rhs_python_scalar=True,
skips=(
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_python_ref',
dtypes=(torch.bfloat16,), device_type='cpu'),
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_python_ref_torch_fallback',
dtypes=(torch.bfloat16,), device_type='cpu'),
DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs',
'test_contig_vs_every_other',
dtypes=(torch.bfloat16,)),
DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs',
'test_non_contig',
dtypes=(torch.bfloat16,)),
DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs',
'test_reference_numerics',
dtypes=(torch.bfloat16,)),
DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs',
'test_reference_numerics_small_values',
dtypes=(torch.uint8,)),
),
),
ElementwiseBinaryPythonRefInfo(
"_refs.gcd",
torch_opinfo_name="gcd",
skips=(
DecorateInfo(unittest.expectedFailure,
'TestBinaryUfuncs',
'test_reference_numerics_small_values',
dtypes=(torch.int8,)),
),
),
ElementwiseBinaryPythonRefInfo(
"_refs.ge",
torch_opinfo_name="ge",
),
ElementwiseBinaryPythonRefInfo(
"_refs.gt",
torch_opinfo_name="gt",
),
ElementwiseBinaryPythonRefInfo(
"_refs.heaviside",
torch_opinfo_name="heaviside",
supports_rhs_python_scalar=False,
skips=(
# PyTorch's heaviside does not appear to propagate NaNs
DecorateInfo(unittest.skip("Skipped!"),
'TestBinaryUfuncs',
'test_reference_numerics_extremal_values'),
),
),
ElementwiseBinaryPythonRefInfo(
"_refs.hypot",
torch_opinfo_name="hypot",
supports_rhs_python_scalar=False,
),
ElementwiseBinaryPythonRefInfo(
"_refs.igamma",
torch_opinfo_name="igamma",
),
ElementwiseBinaryPythonRefInfo(
"_refs.igammac",
torch_opinfo_name="igammac",
),
ElementwiseBinaryPythonRefInfo(
"_refs.isclose",
torch_opinfo_name="isclose",
skips=(
# Intentional xfail -- isclose does not type promote
DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_type_promotion'),
DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_binary_ufuncs_mixed_dtype'),
DecorateInfo(unittest.skip("Skipped!"),
'TestBinaryUfuncs',
'test_reference_numerics_extremal_values'),
),
),
ElementwiseBinaryPythonRefInfo(
"_refs.lcm",
torch_opinfo_name="lcm",
),
ElementwiseBinaryPythonRefInfo(
"_refs.le",
torch_opinfo_name="le",
),
ElementwiseBinaryPythonRefInfo(
"_refs.logical_and",
torch_opinfo_name="logical_and",
),
ElementwiseUnaryPythonRefInfo(
"_refs.logical_not",
torch_opinfo_name="logical_not",
),
ElementwiseBinaryPythonRefInfo(
"_refs.logical_or",
torch_opinfo_name="logical_or",
),
ElementwiseBinaryPythonRefInfo(
"_refs.logical_xor",
torch_opinfo_name="logical_xor",
),
ElementwiseBinaryPythonRefInfo(
"_refs.lt",
torch_opinfo_name="lt",
),
ElementwiseBinaryPythonRefInfo(
"_refs.maximum",
torch_opinfo_name="maximum",
skips=(
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_errors'),
),
),
ElementwiseBinaryPythonRefInfo(
"_refs.minimum",
torch_opinfo_name="minimum",
skips=(
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_errors'),
),
),
ElementwiseBinaryPythonRefInfo(
"_refs.mul",
torch_opinfo_name="mul",
# https://github.com/pytorch/pytorch/issues/76944
supports_two_python_scalars=True,
supports_one_python_scalar=True,
skips=(
# Reference result was farther (0.0) from the precise computation
# than the torch result was (nan)!
DecorateInfo(
unittest.expectedFailure, 'TestCommon', 'test_python_ref_executor',
dtypes=(torch.complex32,),
),
# Reference result was farther (0.0) from the precise computation
# than the torch result was (nan)!
DecorateInfo(
unittest.expectedFailure, 'TestCommon', 'test_python_ref',
dtypes=(torch.complex32,), device_type='cuda'
),
# Reference result was farther (0.0) from the precise computation
# than the torch result was (nan)!
DecorateInfo(
unittest.expectedFailure, 'TestCommon', 'test_python_ref_torch_fallback',
dtypes=(torch.complex32,), device_type='cuda'
),
)
),
ElementwiseBinaryPythonRefInfo(
"_refs.ne",
torch_opinfo_name="ne",
),
ElementwiseBinaryPythonRefInfo(
"_refs.nextafter",
torch_opinfo_name="nextafter",
),
ElementwiseBinaryPythonRefInfo(
"_refs.pow",
torch_opinfo_name="pow",
decorators=(
DecorateInfo(
toleranceOverride({torch.complex64: tol(atol=1e-4, rtol=1.3e-05)}),
'TestBinaryUfuncs', 'test_reference_numerics'),
DecorateInfo(
toleranceOverride({torch.complex64: tol(atol=1e-4, rtol=1.3e-05),
torch.complex128: tol(atol=1e-4, rtol=1.3e-05)}),
'TestBinaryUfuncs', 'test_scalar_support'),
),
skips=(
# Reference result was farther (inf) from the precise
# computation than the torch result was (nan)!
DecorateInfo(
unittest.expectedFailure, 'TestCommon', 'test_python_ref_executor',
dtypes=(torch.complex32,),
),
# Reference result was farther (inf) from the precise
# computation than the torch result was (nan)!
DecorateInfo(
unittest.expectedFailure, 'TestCommon', 'test_python_ref',
dtypes=(torch.complex32,), device_type="cuda"
),
# Reference result was farther (inf) from the precise
# computation than the torch result was (nan)!
DecorateInfo(
unittest.expectedFailure, 'TestCommon', 'test_python_ref_torch_fallback',
dtypes=(torch.complex32,), device_type="cuda"
),
# Skipping integers because they are being raised to negative powers causing an error
DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs',
'test_reference_numerics_small_values',
dtypes=[torch.int8, torch.int16, torch.int32, torch.int64]),
DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs',
'test_reference_numerics_large_values',
dtypes=[torch.int16, torch.int32, torch.int64]),
DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs',
'test_reference_numerics',
dtypes=(torch.complex32,)),
DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs',
'test_reference_numerics_small_values',
dtypes=(torch.complex32, torch.complex64, torch.complex128)),
DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs',
'test_reference_numerics_large_values',
dtypes=(torch.complex32, torch.complex64, torch.complex128)),
DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs',
'test_reference_numerics_extremal_values',
dtypes=(torch.complex32, torch.complex64, torch.complex128)),
),
),
ElementwiseBinaryPythonRefInfo(
"_refs.remainder",
torch_opinfo_name="remainder",
skips=(
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_python_ref',
dtypes=(torch.bfloat16,), device_type='cpu'),
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_python_ref_torch_fallback',
dtypes=(torch.bfloat16,), device_type='cpu'),
DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs',
'test_reference_numerics',
dtypes=(torch.bfloat16,)),
DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs',
'test_reference_numerics_small_values',
dtypes=(torch.uint8,)),
),
),
ElementwiseBinaryPythonRefInfo(
"_refs.rsub",
torch_opinfo_name="rsub",
# https://github.com/pytorch/pytorch/issues/76944
skips=(
# Reference result was farther (nan) from the precise computation than
# the torch result was (nan)!
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref',
dtypes=(torch.chalf,), device_type='cpu'),
# Reference result was farther (nan) from the precise computation than
# the torch result was (nan)!
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_torch_fallback',
dtypes=(torch.chalf,), device_type='cpu'),
),
),
ElementwiseBinaryPythonRefInfo(
"_refs.sub",
torch_opinfo_name="sub",
# https://github.com/pytorch/pytorch/issues/76944
supports_two_python_scalars=True,
supports_one_python_scalar=True,
decorators=(
DecorateInfo(
toleranceOverride({torch.float16: tol(atol=1e-2, rtol=0),
torch.bfloat16: tol(atol=1e-5, rtol=5e-3),
torch.complex32: tol(atol=1e-5, rtol=1e-3)}),
'TestBinaryUfuncs', 'test_reference_numerics'),
DecorateInfo(
toleranceOverride({torch.chalf: tol(atol=1e-2, rtol=0)}),
'TestCommon', 'test_complex_half_reference_testing', device_type='cpu'),
DecorateInfo(
toleranceOverride({torch.chalf: tol(atol=5e-3, rtol=0)}),
'TestDecomp', 'test_comprehensive', device_type='cpu'),
DecorateInfo(
toleranceOverride({torch.chalf: tol(atol=5e-3, rtol=0)}),
'TestDecomp', 'test_quick', device_type='cpu'),
),
skips=(
DecorateInfo(unittest.skip("Skipped!"),
'TestBinaryUfuncs',
'test_reference_numerics',
dtypes=(torch.uint8,)),
DecorateInfo(unittest.skip("Skipped!"),
'TestBinaryUfuncs',
'test_reference_numerics_small_values',
dtypes=(torch.uint8,)),
),
),
ElementwiseBinaryPythonRefInfo(
"_refs.true_divide",
torch_opinfo_name="true_divide",
# https://github.com/pytorch/pytorch/issues/76944
supports_two_python_scalars=True,
supports_one_python_scalar=True,
skips=(
# Reference result was farther (0.7433461727239705) from the precise
# computation than the torch result was (nan)!
DecorateInfo(
unittest.expectedFailure, 'TestCommon', 'test_python_ref_executor',
dtypes=(torch.complex32,),
),
# Reference result was farther (0.7433461727239705) from the precise
# computation than the torch result was (nan)!
DecorateInfo(
unittest.expectedFailure, 'TestCommon', 'test_python_ref',
dtypes=(torch.complex32,), device_type="cuda"
),
# Reference result was farther (0.7433461727239705) from the precise
# computation than the torch result was (nan)!
DecorateInfo(
unittest.expectedFailure, 'TestCommon', 'test_python_ref_torch_fallback',
dtypes=(torch.complex32,), device_type="cuda"
),
),
),
#
# Elementwise Ternary Reference OpInfos
#
PythonRefInfo(
"_refs.addcdiv",
torch_opinfo_name="addcdiv",
),
PythonRefInfo(
"_refs.addcmul",
torch_opinfo_name="addcmul",
skips=(
# Reference result was farther (1.3343989849090576e-05)
# from the precise computation than the torch result
# was (9.592622518539429e-06)!
# FIXME: enable dtype-based tolerances in test_ops.py:TestCommon._ref_test_helper
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_python_ref',
dtypes=(torch.float16,), device_type="cpu"),
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_python_ref_torch_fallback',
dtypes=(torch.float16,), device_type="cpu"),
),
),
ElementwiseBinaryPythonRefInfo(
"_refs.clamp_min",
torch_opinfo_name="clamp_min",
skips=(
# test error disabled since rhs non-tensor python scalar is supported
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_errors'),
),
),
ElementwiseBinaryPythonRefInfo(
"_refs.clamp_max",
torch_opinfo_name="clamp_max",
skips=(
# test error disabled since rhs non-tensor python scalar is supported
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_errors'),
),
),
PythonRefInfo(
"_refs.clamp",
torch_opinfo_name="clamp",
),
PythonRefInfo(
"_refs.nn.functional.triplet_margin_loss",
torch_opinfo_name="nn.functional.triplet_margin_loss",
supports_out=False,
# TODO: Uses minimum and clamp
skips=(
# AssertionError: Tensor-likes are not close!
# Greatest absolute difference: 6.103515625e-05 at index (4,) (up to 1e-05 allowed)
# Greatest relative difference: 8.519846983548175e-06 at index (4,) (up to 1.3e-06 allowed)
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_python_ref',
dtypes=(torch.uint8,), device_type="cpu"),
)
),
ElementwiseBinaryPythonRefInfo(
"_refs.xlogy",
torch_opinfo_name="xlogy",
supports_one_python_scalar=True,
),
#
# Elementwise Binary Special OpInfos
#
ElementwiseBinaryPythonRefInfo(
"_refs.special.xlog1py",
torch_opinfo_name="special.xlog1py",
supports_one_python_scalar=True,
),
#
# Data Conversion & Data Movement Opinfos
#
ElementwiseUnaryPythonRefInfo(
"_refs._conversions.bfloat16",
torch_opinfo_name="bfloat16",
# TODO: If self already has the correct dtype and device, then self is
# returned ignoring memory_format.
# https://github.com/pytorch/pytorch/issues/86558
validate_view_consistency=False,
),
ElementwiseUnaryPythonRefInfo(
"_refs._conversions.bool",
torch_opinfo_name="bool",
# TODO: If self already has the correct dtype and device, then self is
# returned ignoring memory_format.
# https://github.com/pytorch/pytorch/issues/86558
validate_view_consistency=False,
),
ElementwiseUnaryPythonRefInfo(
"_refs._conversions.byte",
torch_opinfo_name="byte",
# TODO: If self already has the correct dtype and device, then self is
# returned ignoring memory_format.
# https://github.com/pytorch/pytorch/issues/86558
validate_view_consistency=False,
skips=(
DecorateInfo(unittest.skip('Overflow when downcasting signed type is undefined'), 'TestCommon', 'test_compare_cpu'),
)
),
ElementwiseUnaryPythonRefInfo(
"_refs._conversions.char",
torch_opinfo_name="char",
# TODO: If self already has the correct dtype and device, then self is
# returned ignoring memory_format.
# https://github.com/pytorch/pytorch/issues/86558
validate_view_consistency=False,
skips=(
DecorateInfo(unittest.skip('Overflow when downcasting signed type is undefined'), 'TestCommon', 'test_compare_cpu'),
)
),
ElementwiseBinaryPythonRefInfo(
"_refs._conversions.complex",
torch_opinfo_name="complex",
error_inputs_func=partial(error_inputs_complex, is_ref=True),
skips=(
# Tests don't account for complex's type promotion semantics
DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_type_promotion'),
DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_binary_ufuncs_mixed_dtype'),
)
),
ElementwiseBinaryPythonRefInfo(
"_refs._conversions.polar",
torch_opinfo_name="polar",
skips=(
# Tests don't account for complex's type promotion semantics
DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_type_promotion'),
DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_binary_ufuncs_mixed_dtype'),
)
),
ElementwiseUnaryPythonRefInfo(
"_refs._conversions.double",
torch_opinfo_name="double",
# TODO: If self already has the correct dtype and device, then self is
# returned ignoring memory_format.
# https://github.com/pytorch/pytorch/issues/86558
validate_view_consistency=False,
),
ElementwiseUnaryPythonRefInfo(
"_refs._conversions.float",
torch_opinfo_name="float",
# TODO: If self already has the correct dtype and device, then self is
# returned ignoring memory_format.
# https://github.com/pytorch/pytorch/issues/86558
validate_view_consistency=False,
),
ElementwiseUnaryPythonRefInfo(
"_refs._conversions.half",
torch_opinfo_name="half",
# TODO: If self already has the correct dtype and device, then self is
# returned ignoring memory_format.
# https://github.com/pytorch/pytorch/issues/86558
validate_view_consistency=False,
),
ElementwiseUnaryPythonRefInfo(
"_refs._conversions.int",
torch_opinfo_name="int",
# TODO: If self already has the correct dtype and device, then self is
# returned ignoring memory_format.
# https://github.com/pytorch/pytorch/issues/86558
validate_view_consistency=False,
skips=(
DecorateInfo(unittest.skip('Overflow when downcasting signed type is undefined'), 'TestCommon', 'test_compare_cpu'),
)
),
ElementwiseUnaryPythonRefInfo(
"_refs._conversions.long",
torch_opinfo_name="long",
# TODO: If self already has the correct dtype and device, then self is
# returned ignoring memory_format.
# https://github.com/pytorch/pytorch/issues/86558
validate_view_consistency=False,
skips=(
DecorateInfo(unittest.skip('Overflow when downcasting signed type is undefined'), 'TestCommon', 'test_compare_cpu'),
)
),
ElementwiseUnaryPythonRefInfo(
"_refs._conversions.short",
torch_opinfo_name="short",
# TODO: If self already has the correct dtype and device, then self is
# returned ignoring memory_format.
# https://github.com/pytorch/pytorch/issues/86558
validate_view_consistency=False,
skips=(
DecorateInfo(unittest.skip('Overflow when downcasting signed type is undefined'), 'TestCommon', 'test_compare_cpu'),
)
),
ElementwiseUnaryPythonRefInfo(
"_refs._conversions.chalf",
torch_opinfo_name="chalf",
# TODO: If self already has the correct dtype and device, then self is
# returned ignoring memory_format.
# https://github.com/pytorch/pytorch/issues/86558
validate_view_consistency=False,
),
ElementwiseUnaryPythonRefInfo(
"_refs._conversions.cfloat",
torch_opinfo_name="cfloat",
# TODO: If self already has the correct dtype and device, then self is
# returned ignoring memory_format.
# https://github.com/pytorch/pytorch/issues/86558
validate_view_consistency=False,
),
ElementwiseUnaryPythonRefInfo(
"_refs._conversions.cdouble",
torch_opinfo_name="cdouble",
# TODO: If self already has the correct dtype and device, then self is
# returned ignoring memory_format.
# https://github.com/pytorch/pytorch/issues/86558
validate_view_consistency=False,
),
PythonRefInfo(
"_refs.clone",
torch_opinfo_name="clone",
),
#
# View & Shape OpInfos
#
PythonRefInfo(
"_refs.alias_copy",
torch_opinfo_name="alias_copy",
supports_out=True,
),
PythonRefInfo(
"_refs.atleast_1d",
torch_opinfo_name="atleast_1d",
validate_view_consistency=False,
),
PythonRefInfo(
"_refs.atleast_2d",
torch_opinfo_name="atleast_2d",
validate_view_consistency=False,
),
PythonRefInfo(
"_refs.atleast_3d",
torch_opinfo_name="atleast_3d",
validate_view_consistency=False,
),
PythonRefInfo(
"_refs.as_strided",
torch_opinfo_name="as_strided",
# FIXME: doesn't support chalf
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
skips=(
# cloned_mutable_input.is_same(returned_output) INTERNAL ASSERT FAILED
DecorateInfo(unittest.skip("Errors when storage_offset is included"), 'TestMathBits', 'test_neg_view'),
DecorateInfo(unittest.skip("Errors when storage_offset is included"), 'TestMathBits', 'test_conj_view'),
DecorateInfo(unittest.skip("Errors when storage_offset is included"), 'TestMathBits', 'test_neg_conj_view'),
),
),
PythonRefInfo(
"_refs.as_strided_copy",
torch_opinfo_name="as_strided_copy",
supports_out=True,
# FIXME: doesn't support chalf
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
skips=(
# cloned_mutable_input.is_same(returned_output) INTERNAL ASSERT FAILED
DecorateInfo(unittest.skip("Errors when storage_offset is included"), 'TestMathBits', 'test_neg_view'),
DecorateInfo(unittest.skip("Errors when storage_offset is included"), 'TestMathBits', 'test_conj_view'),
DecorateInfo(unittest.skip("Errors when storage_offset is included"), 'TestMathBits', 'test_neg_conj_view'),
# The view function this decompose into does not have a ref
DecorateInfo(unittest.expectedFailure, "TestCommon", "test_python_ref"),
),
),
PythonRefInfo(
"_refs.as_strided",
torch_opinfo_name="as_strided",
torch_opinfo_variant_name="partial_views",
# FIXME: doesn't support chalf
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
skips=(
# cloned_mutable_input.is_same(returned_output) INTERNAL ASSERT FAILED
DecorateInfo(unittest.skip("Errors when storage_offset is included"), 'TestMathBits', 'test_neg_view'),
DecorateInfo(unittest.skip("Errors when storage_offset is included"), 'TestMathBits', 'test_conj_view'),
DecorateInfo(unittest.skip("Errors when storage_offset is included"), 'TestMathBits', 'test_neg_conj_view'),
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_compare_cpu'),
),
),
PythonRefInfo(
"_refs.as_strided_scatter",
torch_opinfo_name="as_strided_scatter",
# returns a view of an intermediate tensor (as_strided)
validate_view_consistency=False,
),
PythonRefInfo(
"_refs.block_diag",
torch_opinfo_name="block_diag",
),
PythonRefInfo(
"_refs.broadcast_shapes",
torch_opinfo_name="broadcast_shapes",
),
PythonRefInfo(
"_refs.broadcast_tensors",
torch_opinfo_name="broadcast_tensors",
),
PythonRefInfo(
"_refs.broadcast_to",
torch_opinfo_name="broadcast_to",
),
PythonRefInfo(
"_refs.cat",
torch_opinfo_name="cat",
skips=(
# FIXME: AssertionError: RuntimeError not raised
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_errors'),
),
),
PythonRefInfo(
"_refs.chunk",
torch_opinfo_name="chunk",
),
PythonRefInfo(
"_refs.column_stack",
torch_opinfo_name="column_stack",
),
ElementwiseUnaryPythonRefInfo(
"_refs.conj",
torch_opinfo_name="conj",
),
PythonRefInfo(
"_refs.constant_pad_nd",
torch_opinfo_name="constant_pad_nd",
),
PythonRefInfo(
"_refs.contiguous",
torch_opinfo_name="contiguous",
),
ElementwiseUnaryPythonRefInfo(
"_refs.deg2rad",
torch_opinfo_name="deg2rad",
decorators=(precisionOverride({torch.bfloat16: 7e-1,
torch.float16: 7e-1}),),
),
PythonRefInfo(
"_refs.dsplit",
torch_opinfo_name="dsplit",
),
PythonRefInfo(
"_refs.diag",
torch_opinfo_name="diag",
),
PythonRefInfo(
"_refs.diagonal",
torch_opinfo_name="diagonal",
),
PythonRefInfo(
"_refs.diagonal_copy",
torch_opinfo_name="diagonal_copy",
supports_out=True,
),
PythonRefInfo(
"_refs.diagonal_scatter",
torch_opinfo_name="diagonal_scatter",
supports_out=True,
# returns a view of an intermediate tensor (as_strided)
validate_view_consistency=False,
),
PythonRefInfo(
"_refs.diag_embed",
torch_opinfo_name="diag_embed",
supports_out=True,
),
PythonRefInfo(
"_refs.dstack",
torch_opinfo_name="dstack",
skips=(
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_errors'),
),
),
PythonRefInfo(
"_refs.expand",
torch_opinfo_name="expand",
),
PythonRefInfo(
"_refs.expand_as",
torch_opinfo_name="expand_as",
),
PythonRefInfo(
"_refs.expand_copy",
torch_opinfo_name="expand_copy",
supports_out=True,
),
PythonRefInfo(
"_refs.flatten",
torch_opinfo_name="flatten",
),
PythonRefInfo(
"_refs.flip",
torch_opinfo_name="flip",
),
PythonRefInfo(
"_refs.fliplr",
torch_opinfo_name="fliplr",
),
PythonRefInfo(
"_refs.flipud",
torch_opinfo_name="flipud",
),
PythonRefInfo(
"_refs.hstack",
torch_opinfo_name="hstack",
skips=(
# https://github.com/pytorch/pytorch/issues/78613
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_errors'),
),
),
PythonRefInfo(
"_refs.narrow",
torch_opinfo_name="narrow",
error_inputs_func=partial(error_inputs_narrow_narrow_copy, is_narrow=True, is_ref=True),
),
PythonRefInfo(
"_refs.narrow_copy",
torch_opinfo_name="narrow_copy",
supports_out=True,
error_inputs_func=partial(error_inputs_narrow_narrow_copy, is_narrow=False, is_ref=True),
skips=(
# The view function this decompose into does not have a ref
DecorateInfo(unittest.expectedFailure, "TestCommon", "test_python_ref"),
),
),
PythonRefInfo(
"_refs.nn.functional.group_norm",
torch_opinfo_name="nn.functional.group_norm",
validate_view_consistency=False,
),
PythonRefInfo(
"_refs.native_layer_norm",
torch_opinfo_name="native_layer_norm",
skips=(
DecorateInfo(unittest.skip("Skipped!"), "TestCommon", "test_python_ref",
device_type="cpu", dtypes=(torch.float32,)),
DecorateInfo(unittest.skip("Skipped!"), "TestCommon", "test_python_ref_torch_fallback",
device_type="cpu", dtypes=(torch.float32,)),
),
),
PythonRefInfo(
"_refs.permute",
torch_opinfo_name="permute",
),
PythonRefInfo(
"_refs.permute_copy",
torch_opinfo_name="permute_copy",
supports_out=True,
),
ElementwiseUnaryPythonRefInfo(
"_refs.rad2deg",
torch_opinfo_name="rad2deg",
decorators=(precisionOverride({torch.bfloat16: 7e-1,
torch.float16: 7e-1}),),
),
PythonRefInfo(
"_refs.ravel",
torch_opinfo_name="ravel",
),
PythonRefInfo(
"_refs.renorm",
torch_opinfo_name="renorm",
),
PythonRefInfo(
"_refs.repeat",
torch_opinfo_name="repeat",
validate_view_consistency=False,
),
PythonRefInfo(
"_refs.reshape",
torch_opinfo_name="reshape",
),
PythonRefInfo(
"_refs.reshape_as",
torch_opinfo_name="reshape_as",
),
PythonRefInfo(
"_refs.roll",
torch_opinfo_name="roll",
validate_view_consistency=False,
),
PythonRefInfo(
"_refs.rot90",
torch_opinfo_name="rot90",
validate_view_consistency=False,
),
PythonRefInfo(
"_refs.select_scatter",
torch_opinfo_name="select_scatter",
),
PythonRefInfo(
"_refs.stack",
torch_opinfo_name="stack",
validate_view_consistency=False,
),
PythonRefInfo(
"_refs.squeeze",
torch_opinfo_name="squeeze",
),
PythonRefInfo(
"_refs.squeeze_copy",
torch_opinfo_name="squeeze_copy",
supports_out=True,
),
PythonRefInfo(
"_refs.squeeze",
torch_opinfo_name="squeeze",
torch_opinfo_variant_name="multiple",
),
PythonRefInfo(
"_refs.tensor_split",
torch_opinfo_name="tensor_split",
skips=(
# RuntimeError: no _refs support for torch.Tensor.tolist
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref'),
),
),
PythonRefInfo(
"_refs.hsplit",
torch_opinfo_name="hsplit",
),
PythonRefInfo(
"_refs.vsplit",
torch_opinfo_name="vsplit",
),
PythonRefInfo(
"_refs.dot",
torch_opinfo_name="dot",
error_inputs_func=partial(error_inputs_dot_vdot, is_ref=True),
# .conj() does not set ._is_view() correctly in ATen
validate_view_consistency=False,
skips=(
# RuntimeError: no _refs support for torch.Tensor.is_conj
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref', dtypes=[torch.complex64, torch.complex128]),
),
),
PythonRefInfo(
"_refs.vdot",
torch_opinfo_name="vdot",
error_inputs_func=partial(error_inputs_dot_vdot, is_ref=True),
# .conj() does not set ._is_view() correctly in ATen
validate_view_consistency=False,
skips=(
# RuntimeError: no _refs support for torch.Tensor.is_conj
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref', dtypes=[torch.complex64, torch.complex128]),
),
),
PythonRefInfo(
"_refs.transpose",
torch_opinfo_name="transpose",
),
PythonRefInfo(
"_refs.transpose_copy",
torch_opinfo_name="transpose_copy",
supports_out=True,
),
PythonRefInfo(
"_refs.t",
torch_opinfo_name="t",
),
PythonRefInfo(
"_refs.t_copy",
torch_opinfo_name="t_copy",
supports_out=True,
),
PythonRefInfo(
"_refs.T",
torch_opinfo_name="T",
error_inputs_func=partial(error_inputs_T, has_ndims_error=True),
),
PythonRefInfo(
"_refs.unbind_copy",
torch_opinfo_name="unbind_copy",
),
PythonRefInfo(
"_refs.unfold",
torch_opinfo_name="unfold",
),
PythonRefInfo(
"_refs.unfold_copy",
torch_opinfo_name="unfold_copy",
supports_out=True,
),
PythonRefInfo(
"_refs.unsqueeze",
torch_opinfo_name="unsqueeze",
),
PythonRefInfo(
"_refs.unsqueeze_copy",
torch_opinfo_name="unsqueeze_copy",
supports_out=True,
),
PythonRefInfo(
"_refs.view",
torch_opinfo_name="view",
),
PythonRefInfo(
"_refs.view_as",
torch_opinfo_name="view_as",
),
PythonRefInfo(
"_refs.view_copy",
torch_opinfo_name="view_copy",
supports_out=True,
),
PythonRefInfo(
"_refs.vstack",
torch_opinfo_name="vstack",
skips=(
# https://github.com/pytorch/pytorch/issues/78613
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_errors'),
),
),
PythonRefInfo(
"_refs.unflatten",
torch_opinfo_name="unflatten",
),
PythonRefInfo(
"_refs.unbind",
torch_opinfo_name="unbind",
),
#
# Reduction Reference OpInfos
#
ReductionPythonRefInfo(
"_refs.all",
torch_opinfo_name="all",
skips=(
# FIXME: uint8 input returns uint8 instead of bool
DecorateInfo(
unittest.expectedFailure, 'TestReductions', 'test_result_dtype',
dtypes=[torch.uint8]),
),
),
ReductionPythonRefInfo(
"_refs.amax",
torch_opinfo_name="amax",
error_inputs_func=partial(error_inputs_aminmax_amax_amin, is_ref=True),
skips=(
# FIXME: reduces all dimensions when dim=[]
DecorateInfo(
unittest.expectedFailure, 'TestReductions', 'test_dim_empty'),
DecorateInfo(
unittest.expectedFailure, 'TestReductions', 'test_dim_empty_keepdim'),
),
),
ReductionPythonRefInfo(
"_refs.amin",
torch_opinfo_name="amin",
error_inputs_func=partial(error_inputs_aminmax_amax_amin, is_ref=True),
skips=(
# FIXME: reduces all dimensions when dim=[]
DecorateInfo(
unittest.expectedFailure, 'TestReductions', 'test_dim_empty'),
DecorateInfo(
unittest.expectedFailure, 'TestReductions', 'test_dim_empty_keepdim'),
),
),
ReductionPythonRefInfo(
"_refs.any",
torch_opinfo_name="any",
skips=(
# FIXME: uint8 input returns uint8 instead of bool
DecorateInfo(
unittest.expectedFailure, 'TestReductions', 'test_result_dtype',
dtypes=[torch.uint8]),
),
),
ReductionPythonRefInfo(
"_refs.count_nonzero",
torch_opinfo_name="count_nonzero",
skips=(
# FIXME: count_nonzero does not accept keepdim kwarg
DecorateInfo(
unittest.skip("Skipped!"), 'TestReductions',
'test_dim_default_keepdim'),
DecorateInfo(
unittest.skip("Skipped!"), 'TestReductions', 'test_dim_none_keepdim'),
DecorateInfo(
unittest.skip("Skipped!"), 'TestReductions', 'test_dim_single_keepdim'),
DecorateInfo(
unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty_keepdim'),
DecorateInfo(
unittest.skip("Skipped!"), 'TestReductions', 'test_dim_multi_keepdim'),
DecorateInfo(
unittest.skip("Skipped!"), 'TestReductions',
'test_dim_multi_unsorted_keepdim'),
# FIXME: dim=[] reduces all dimensions
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty'),
),
),
ReductionPythonRefInfo(
"_refs.mean",
torch_opinfo_name="mean",
supports_out=True,
error_inputs_func=partial(error_inputs_mean, is_ref=True),
skips=(
# FIXME: reduces all dimensions when dim=[]
DecorateInfo(
unittest.expectedFailure, 'TestReductions', 'test_dim_empty'),
DecorateInfo(
unittest.expectedFailure, 'TestReductions', 'test_dim_empty_keepdim'),
),
),
ReductionPythonRefInfo(
"_refs.std",
torch_opinfo_name="std",
supports_out=True,
skips=(
# FIXME: reduces all dimensions when dim=[]
DecorateInfo(
unittest.expectedFailure, 'TestReductions', 'test_dim_empty'),
DecorateInfo(
unittest.expectedFailure, 'TestReductions', 'test_dim_empty_keepdim'),
# FIXME: improve precision
DecorateInfo(
unittest.skip("Skipped!"), 'TestReductions', 'test_ref_small_input',
dtypes=(torch.float16,)),
DecorateInfo(
unittest.skip("Skipped!"), 'TestReductions',
'test_ref_duplicate_values',
dtypes=(torch.float16,)),
),
),
# std_mean and var_mean are not ReductionInfos
PythonRefInfo(
"_refs.std_mean",
torch_opinfo_name="std_mean",
),
ReductionPythonRefInfo(
"_refs.sum",
torch_opinfo_name="sum",
supports_out=True,
skips=(
# FIXME: doesn't test out behavior properly for this operator
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'),
# FIXME: mean reduces all dimensions when dim=[]
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty'),
DecorateInfo(
unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty_keepdim'),
# FIXME: improve precision
DecorateInfo(
unittest.skip("Skipped!"), 'TestReductions', 'test_ref_small_input',
dtypes=[torch.float16]),
DecorateInfo(
unittest.skip("Skipped!"), 'TestReductions',
'test_ref_duplicate_values',
dtypes=[torch.float16]),
DecorateInfo(
unittest.skip("Skipped!"), 'TestOperators', 'test_reduction_all',
dtypes=[torch.float32]),
),
),
PythonRefInfo(
"_refs.cumsum",
torch_opinfo_name="cumsum",
supports_out=True,
skips=(
# doesn't test out behavior properly for this operator
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'),
),
),
PythonRefInfo(
"_refs.cumprod",
torch_opinfo_name="cumprod",
supports_out=True,
skips=(
# doesn't test out behavior properly for this operator
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'),
),
),
PythonRefInfo(
"_refs.sum_to_size",
torch_opinfo_name="sum_to_size",
validate_view_consistency=False,
),
ReductionPythonRefInfo(
"_refs.prod",
torch_opinfo_name="prod",
supports_out=True,
supports_multiple_dims=True,
skips=(
# FIXME: doesn't test out behavior properly for this operator
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'),
# FIXME: reduces all dimensions when dim=[]
DecorateInfo(
unittest.expectedFailure, 'TestReductions', 'test_dim_empty'),
DecorateInfo(
unittest.expectedFailure, 'TestReductions', 'test_dim_empty_keepdim'),
# FIXME: improve precision
DecorateInfo(
unittest.skip("Skipped!"), 'TestReductions', 'test_ref_small_input',
dtypes=[torch.float16, torch.complex64]),
),
),
ReductionPythonRefInfo(
"_refs.var",
torch_opinfo_name="var",
supports_out=True,
skips=(
# FIXME: reduces all dimensions when dim=[]
DecorateInfo(
unittest.expectedFailure, 'TestReductions', 'test_dim_empty'),
DecorateInfo(
unittest.expectedFailure, 'TestReductions', 'test_dim_empty_keepdim'),
# FIXME: improve precision
DecorateInfo(
unittest.skip("Skipped!"), 'TestReductions', 'test_ref_small_input'),
),
),
PythonRefInfo(
"_refs.var_mean",
torch_opinfo_name="var_mean",
validate_view_consistency=False,
),
#
# Linear Algebra Operators
#
PythonRefInfo(
"_refs.addr",
torch_opinfo_name="addr",
decorators=(
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref',),
),
),
PythonRefInfo(
"_refs.trace",
torch_opinfo_name="trace",
),
PythonRefInfo(
"_refs.norm",
torch_opinfo_name="norm",
supports_out=True,
# Uses vector_norm inside and vector_norm is affected by
# https://github.com/pytorch/pytorch/issues/77216
validate_view_consistency=False,
),
#
# Tensor Creation Reference OpInfos
#
PythonRefInfo(
"_refs.empty",
torch_opinfo_name="empty",
skips=(
DecorateInfo(unittest.skip("Expected: empty is not comparable"),
'TestCommon',
'test_python_ref'),
DecorateInfo(unittest.skip("Expected: empty is not comparable"),
'TestCommon',
'test_python_ref_torch_fallback'),
DecorateInfo(unittest.skip("Expected: empty is not comparable"),
'TestCommon',
'test_out'),
DecorateInfo(unittest.skip("Expected: empty is not comparable"),
'TestCommon',
'test_out_warning'),
DecorateInfo(unittest.skip("Expected: empty is not comparable"),
'TestMathBits',
'test_conj_view'),
DecorateInfo(unittest.skip("Expected: empty is not comparable"),
'TestMathBits',
'test_neg_conj_view'),
DecorateInfo(unittest.skip("Expected: empty is not comparable"),
'TestMathBits',
'test_neg_view'),
# FIXME: shouldn't check empty results
DecorateInfo(unittest.skip("Can't check result for empty"), 'TestCommon', 'test_python_ref_executor'),
DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'),
),
),
PythonRefInfo(
"_refs.empty_like",
torch_opinfo_name="empty_like",
skips=(
DecorateInfo(unittest.skip("Expected: empty is not comparable"),
'TestCommon',
'test_python_ref'),
DecorateInfo(unittest.skip("Expected: empty is not comparable"),
'TestCommon',
'test_python_ref_torch_fallback'),
DecorateInfo(unittest.skip("Expected: empty is not comparable"),
'TestCommon',
'test_out'),
DecorateInfo(unittest.skip("Expected: empty is not comparable"),
'TestCommon',
'test_out_warning'),
DecorateInfo(unittest.skip("Expected: empty is not comparable"),
'TestMathBits',
'test_conj_view'),
DecorateInfo(unittest.skip("Expected: empty is not comparable"),
'TestMathBits',
'test_neg_conj_view'),
DecorateInfo(unittest.skip("Expected: empty is not comparable"),
'TestMathBits',
'test_neg_view'),
# FIXME: should not compare results of empty_like
DecorateInfo(unittest.skip("Can't check result for empty_like"), 'TestCommon', 'test_python_ref_executor'),
DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'),
),
),
PythonRefInfo(
"_refs.randn",
torch_opinfo_name="randn",
op=lambda *args, **kwargs: wrapper_set_seed(refs.randn, *args, **kwargs),
skips=(
# see https://github.com/pytorch/pytorch/issues/85121
DecorateInfo(unittest.skip("make_traced() doesn't set seed properly!"),
'TestCommon',
'test_python_ref_executor'),
# These tests expect the input to be a tensor or a sequence of tensors
DecorateInfo(unittest.skip("Test expects tensor input"), "TestCommon", "test_noncontiguous_samples"),
DecorateInfo(unittest.skip("Test expects tensor input"), 'TestMathBits', 'test_neg_view'),
DecorateInfo(unittest.skip("Test expects tensor input"), 'TestMathBits', 'test_conj_view'),
DecorateInfo(unittest.skip("Test expects tensor input"), 'TestMathBits', 'test_neg_conj_view'),
),
),
PythonRefInfo(
"_refs.eye",
torch_opinfo_name="eye",
skips=(
# skip these tests since we have non tensor input
DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_conj_view'),
DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_conj_view'),
DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_view'),
),
),
PythonRefInfo(
"_refs.new_empty",
torch_opinfo_name="new_empty",
skips=(
DecorateInfo(unittest.skip("Expected: empty is not comparable"),
'TestCommon',
'test_python_ref'),
DecorateInfo(unittest.skip("Expected: empty is not comparable"),
'TestCommon',
'test_python_ref_torch_fallback'),
DecorateInfo(unittest.skip("Expected: empty is not comparable"),
'TestCommon',
'test_out'),
DecorateInfo(unittest.skip("Expected: empty is not comparable"),
'TestCommon',
'test_out_warning'),
DecorateInfo(unittest.skip("Expected: empty is not comparable"),
'TestMathBits',
'test_conj_view'),
DecorateInfo(unittest.skip("Expected: empty is not comparable"),
'TestMathBits',
'test_neg_conj_view'),
DecorateInfo(unittest.skip("Expected: empty is not comparable"),
'TestMathBits',
'test_neg_view'),
# FIXME: should not compare results of empty_like
DecorateInfo(unittest.skip("Can't check result for new_empty"), 'TestCommon', 'test_python_ref_executor'),
DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'),
),
),
PythonRefInfo(
"_refs.new_empty_strided",
torch_opinfo_name="new_empty_strided",
skips=(
DecorateInfo(unittest.skip("Expected: empty_strided is not comparable"),
'TestCommon',
'test_python_ref'),
DecorateInfo(unittest.skip("Expected: empty_strided is not comparable"),
'TestCommon',
'test_python_ref_torch_fallback'),
DecorateInfo(unittest.skip("Expected: empty_strided is not comparable"),
'TestMathBits',
'test_conj_view'),
DecorateInfo(unittest.skip("Expected: empty_strided is not comparable"),
'TestMathBits',
'test_neg_conj_view'),
DecorateInfo(unittest.skip("Expected: empty_strided is not comparable"),
'TestMathBits',
'test_neg_view'),
DecorateInfo(unittest.skip("Expected: empty_strided is not comparable"),
'TestCommon',
'test_python_ref_executor'),
DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'),
),
),
PythonRefInfo(
"_refs.empty_strided",
torch_opinfo_name="empty_strided",
skips=(
DecorateInfo(unittest.skip("Expected: empty_strided is not comparable"),
'TestCommon',
'test_python_ref'),
DecorateInfo(unittest.skip("Expected: empty_strided is not comparable"),
'TestCommon',
'test_python_ref_torch_fallback'),
DecorateInfo(unittest.skip("Expected: empty_strided is not comparable"),
'TestMathBits',
'test_conj_view'),
DecorateInfo(unittest.skip("Expected: empty_strided is not comparable"),
'TestMathBits',
'test_neg_conj_view'),
DecorateInfo(unittest.skip("Expected: empty_strided is not comparable"),
'TestMathBits',
'test_neg_view'),
DecorateInfo(unittest.skip("Expected: empty_strided is not comparable"),
'TestCommon',
'test_python_ref_executor'),
DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'),
),
),
PythonRefInfo(
"_refs.new_full",
torch_opinfo_name="new_full",
),
PythonRefInfo(
"_refs.new_ones",
torch_opinfo_name="new_ones",
),
PythonRefInfo(
"_refs.new_zeros",
torch_opinfo_name="new_zeros",
),
#
# Conditional Reference OpInfos
#
PythonRefInfo(
"_refs.masked_fill",
torch_opinfo_name="masked_fill",
skips=(
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_errors'),
),
),
PythonRefInfo(
"_refs.where",
torch_opinfo_name="where",
op=lambda self, condition, other: refs.where(condition, self, other),
supports_out=False,
skips=(
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_errors', device_type='cuda'),
),
),
PythonRefInfo(
"_refs.index_select",
torch_opinfo_name="index_select",
# empty_strided
skips=(
# no _refs support for Tensor.__setitem__
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref'),
# Sample out= with a stride of zero. This _out operation checks that the input has no
# inner overlap
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_errors'),)
),
PythonRefInfo(
"_refs.index_copy",
torch_opinfo_name="index_copy",
# empty_strided
skips=(
# no _refs support for Tensor.__setitem__
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref'),
),
),
PythonRefInfo(
"_refs.index_add",
torch_opinfo_name="index_add",
# empty_strided
skips=(
# no _refs support for Tensor.__setitem__
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref'),
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_errors'),
),
),
PythonRefInfo(
"_refs.index_fill",
torch_opinfo_name="index_fill",
# empty_strided
skips=(
# no _refs support for Tensor.__setitem__
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref'),)
),
#
# Test-related functions
#
PythonRefInfo(
"_refs.allclose",
torch_opinfo_name="allclose",
),
#
# Misc functions
#
PythonRefInfo(
"_refs.stft",
torch_opinfo_name="stft",
skips=[
# RuntimeError: no _refs support for aten.pad
DecorateInfo(
unittest.expectedFailure, 'TestCommon', 'test_python_ref'
),
],
),
PythonRefInfo(
"_refs.istft",
torch_opinfo_name="istft",
skips=[
# RuntimeError: no _refs support for aten.unfold_backward
DecorateInfo(
unittest.expectedFailure, 'TestCommon', 'test_python_ref'
),
DecorateInfo(
unittest.skip("Expected: unfold_backward() got an unexpected keyword argument 'input_sizes'"),
'TestCommon',
'test_python_ref_executor',
dtypes=(torch.complex64, torch.complex128),
),
],
),
PythonRefInfo(
"_refs.view_as_complex",
torch_opinfo_name="view_as_complex",
),
PythonRefInfo(
"_refs.split_with_sizes",
torch_opinfo_name="split_with_sizes",
),
]
python_ref_db += opinfo.definitions.python_ref_db
# Common operator groupings
ops_and_refs = op_db + python_ref_db
unary_ufuncs = [op for op in ops_and_refs if isinstance(op, UnaryUfuncInfo)]
binary_ufuncs = [op for op in ops_and_refs if isinstance(op, BinaryUfuncInfo)]
binary_ufuncs_and_refs = tuple(op for op in ops_and_refs if isinstance(op, BinaryUfuncInfo))
spectral_funcs = [op for op in ops_and_refs if isinstance(op, SpectralFuncInfo)]
sparse_unary_ufuncs = [op for op in op_db if isinstance(op, UnaryUfuncInfo) and op.supports_sparse]
sparse_csr_unary_ufuncs = [op for op in op_db if isinstance(op, UnaryUfuncInfo) and op.supports_sparse_csr]
sparse_reduction_ops = [op for op in op_db if isinstance(op, ReductionOpInfo) and op.supports_sparse]
shape_funcs = [op for op in ops_and_refs if isinstance(op, ShapeFuncInfo)]
reduction_ops = [op for op in ops_and_refs if isinstance(op, ReductionOpInfo)]
reference_filtered_ops = [op for op in reduction_ops if op.ref is not None]
reference_masked_ops = [op for op in reference_filtered_ops if op.name.startswith('masked.')]
sparse_masked_reduction_ops = [op for op in sparse_reduction_ops if op.name.startswith('masked.')]
def index_variable(shape, max_indices, device=torch.device('cpu')):
if not isinstance(shape, tuple):
shape = (shape,)
return torch.testing.make_tensor(*shape, dtype=torch.long, device=device, low=0, high=max_indices)
def gather_variable(shape, index_dim, max_indices, duplicate=False, device=torch.device('cpu')):
assert len(shape) == 2
assert index_dim < 2
batch_dim = 1 - index_dim
index = torch.zeros(*shape, dtype=torch.long, device=device)
for i in range(shape[index_dim]):
index.select(index_dim, i).copy_(
torch.randperm(max_indices, device=device)[:shape[batch_dim]])
if duplicate:
index.select(batch_dim, 0).copy_(index.select(batch_dim, 1))
return index
def bernoulli_scalar():
return torch.tensor(0, dtype=torch.bool).bernoulli_()
def mask_not_all_zeros(shape):
assert len(shape) > 0
while True:
result = torch.randn(shape).gt(0)
if result.sum() > 0:
return result
# Copied from functorch
def xfail(op_name, variant_name='', *, device_type=None, dtypes=None):
return (op_name, variant_name, device_type, dtypes, True)
def skip(op_name, variant_name='', *, device_type=None, dtypes=None):
return (op_name, variant_name, device_type, dtypes, False)
def skipOps(test_case_name, base_test_name, to_skip):
all_opinfos = op_db
for xfail in to_skip:
op_name, variant_name, device_type, dtypes, expected_failure = xfail
matching_opinfos = [o for o in all_opinfos
if o.name == op_name and o.variant_test_name == variant_name]
assert len(matching_opinfos) >= 1, f"Couldn't find OpInfo for {xfail}"
for op in matching_opinfos:
decorators = list(op.decorators)
if expected_failure:
decorator = DecorateInfo(unittest.expectedFailure,
test_case_name, base_test_name,
device_type=device_type, dtypes=dtypes)
decorators.append(decorator)
else:
decorator = DecorateInfo(unittest.skip("Skipped!"),
test_case_name, base_test_name,
device_type=device_type, dtypes=dtypes)
decorators.append(decorator)
op.decorators = tuple(decorators)
# This decorator doesn't modify fn in any way
def wrapped(fn):
return fn
return wrapped
| foreach_pointwise_sample_func |
python | pandas-dev__pandas | pandas/tests/frame/test_stack_unstack.py | {
"start": 60001,
"end": 100501
} | class ____:
def test_unstack(self, multiindex_year_month_day_dataframe_random_data):
# just check that it works for now
ymd = multiindex_year_month_day_dataframe_random_data
unstacked = ymd.unstack()
unstacked.unstack()
# test that ints work
ymd.astype(int).unstack()
# test that int32 work
ymd.astype(np.int32).unstack()
@pytest.mark.parametrize(
"result_rows,result_columns,index_product,expected_row",
[
(
[[1, 1, None, None, 30.0, None], [2, 2, None, None, 30.0, None]],
["ix1", "ix2", "col1", "col2", "col3", "col4"],
2,
[None, None, 30.0, None],
),
(
[[1, 1, None, None, 30.0], [2, 2, None, None, 30.0]],
["ix1", "ix2", "col1", "col2", "col3"],
2,
[None, None, 30.0],
),
(
[[1, 1, None, None, 30.0], [2, None, None, None, 30.0]],
["ix1", "ix2", "col1", "col2", "col3"],
None,
[None, None, 30.0],
),
],
)
def test_unstack_partial(
self, result_rows, result_columns, index_product, expected_row
):
# check for regressions on this issue:
# https://github.com/pandas-dev/pandas/issues/19351
# make sure DataFrame.unstack() works when its run on a subset of the DataFrame
# and the Index levels contain values that are not present in the subset
result = DataFrame(result_rows, columns=result_columns).set_index(
["ix1", "ix2"]
)
result = result.iloc[1:2].unstack("ix2")
expected = DataFrame(
[expected_row],
columns=MultiIndex.from_product(
[result_columns[2:], [index_product]], names=[None, "ix2"]
),
index=Index([2], name="ix1"),
)
tm.assert_frame_equal(result, expected)
def test_unstack_multiple_no_empty_columns(self):
index = MultiIndex.from_tuples(
[(0, "foo", 0), (0, "bar", 0), (1, "baz", 1), (1, "qux", 1)]
)
s = Series(np.random.default_rng(2).standard_normal(4), index=index)
unstacked = s.unstack([1, 2])
expected = unstacked.dropna(axis=1, how="all")
tm.assert_frame_equal(unstacked, expected)
@pytest.mark.filterwarnings(
"ignore:The previous implementation of stack is deprecated"
)
def test_stack(self, multiindex_year_month_day_dataframe_random_data, future_stack):
ymd = multiindex_year_month_day_dataframe_random_data
# regular roundtrip
unstacked = ymd.unstack()
restacked = unstacked.stack(future_stack=future_stack)
if future_stack:
# NA values in unstacked persist to restacked in version 3
restacked = restacked.dropna(how="all")
tm.assert_frame_equal(restacked, ymd)
unlexsorted = ymd.sort_index(level=2)
unstacked = unlexsorted.unstack(2)
restacked = unstacked.stack(future_stack=future_stack)
if future_stack:
# NA values in unstacked persist to restacked in version 3
restacked = restacked.dropna(how="all")
tm.assert_frame_equal(restacked.sort_index(level=0), ymd)
unlexsorted = unlexsorted[::-1]
unstacked = unlexsorted.unstack(1)
restacked = unstacked.stack(future_stack=future_stack).swaplevel(1, 2)
if future_stack:
# NA values in unstacked persist to restacked in version 3
restacked = restacked.dropna(how="all")
tm.assert_frame_equal(restacked.sort_index(level=0), ymd)
unlexsorted = unlexsorted.swaplevel(0, 1)
unstacked = unlexsorted.unstack(0).swaplevel(0, 1, axis=1)
restacked = unstacked.stack(0, future_stack=future_stack).swaplevel(1, 2)
if future_stack:
# NA values in unstacked persist to restacked in version 3
restacked = restacked.dropna(how="all")
tm.assert_frame_equal(restacked.sort_index(level=0), ymd)
# columns unsorted
unstacked = ymd.unstack()
restacked = unstacked.stack(future_stack=future_stack)
if future_stack:
# NA values in unstacked persist to restacked in version 3
restacked = restacked.dropna(how="all")
tm.assert_frame_equal(restacked, ymd)
# more than 2 levels in the columns
unstacked = ymd.unstack(1).unstack(1)
result = unstacked.stack(1, future_stack=future_stack)
expected = ymd.unstack()
tm.assert_frame_equal(result, expected)
result = unstacked.stack(2, future_stack=future_stack)
expected = ymd.unstack(1)
tm.assert_frame_equal(result, expected)
result = unstacked.stack(0, future_stack=future_stack)
expected = ymd.stack(future_stack=future_stack).unstack(1).unstack(1)
tm.assert_frame_equal(result, expected)
# not all levels present in each echelon
unstacked = ymd.unstack(2).loc[:, ::3]
stacked = unstacked.stack(future_stack=future_stack).stack(
future_stack=future_stack
)
ymd_stacked = ymd.stack(future_stack=future_stack)
if future_stack:
# NA values in unstacked persist to restacked in version 3
stacked = stacked.dropna(how="all")
ymd_stacked = ymd_stacked.dropna(how="all")
tm.assert_series_equal(stacked, ymd_stacked.reindex(stacked.index))
# stack with negative number
result = ymd.unstack(0).stack(-2, future_stack=future_stack)
expected = ymd.unstack(0).stack(0, future_stack=future_stack)
tm.assert_equal(result, expected)
@pytest.mark.filterwarnings(
"ignore:The previous implementation of stack is deprecated"
)
@pytest.mark.parametrize(
"idx, exp_idx",
[
[
list("abab"),
MultiIndex(
levels=[["a", "b"], ["1st", "2nd"]],
codes=[np.tile(np.arange(2).repeat(3), 2), np.tile([0, 1, 0], 4)],
),
],
[
MultiIndex.from_tuples((("a", 2), ("b", 1), ("a", 1), ("b", 2))),
MultiIndex(
levels=[["a", "b"], [1, 2], ["1st", "2nd"]],
codes=[
np.tile(np.arange(2).repeat(3), 2),
np.repeat([1, 0, 1], [3, 6, 3]),
np.tile([0, 1, 0], 4),
],
),
],
],
)
def test_stack_duplicate_index(self, idx, exp_idx, future_stack):
# GH10417
df = DataFrame(
np.arange(12).reshape(4, 3),
index=idx,
columns=["1st", "2nd", "1st"],
)
if future_stack:
msg = "Columns with duplicate values are not supported in stack"
with pytest.raises(ValueError, match=msg):
df.stack(future_stack=future_stack)
else:
result = df.stack(future_stack=future_stack)
expected = Series(np.arange(12), index=exp_idx)
tm.assert_series_equal(result, expected)
assert result.index.is_unique is False
li, ri = result.index, expected.index
tm.assert_index_equal(li, ri)
@pytest.mark.filterwarnings(
"ignore:The previous implementation of stack is deprecated"
)
def test_unstack_odd_failure(self, future_stack):
mi = MultiIndex.from_arrays(
[
["Fri"] * 4 + ["Sat"] * 2 + ["Sun"] * 2 + ["Thu"] * 3,
["Dinner"] * 2 + ["Lunch"] * 2 + ["Dinner"] * 5 + ["Lunch"] * 2,
["No", "Yes"] * 4 + ["No", "No", "Yes"],
],
names=["day", "time", "smoker"],
)
df = DataFrame(
{
"sum": np.arange(11, dtype="float64"),
"len": np.arange(11, dtype="float64"),
},
index=mi,
)
# it works, #2100
result = df.unstack(2)
recons = result.stack(future_stack=future_stack)
if future_stack:
# NA values in unstacked persist to restacked in version 3
recons = recons.dropna(how="all")
tm.assert_frame_equal(recons, df)
@pytest.mark.filterwarnings(
"ignore:The previous implementation of stack is deprecated"
)
def test_stack_mixed_dtype(self, multiindex_dataframe_random_data, future_stack):
frame = multiindex_dataframe_random_data
df = frame.T
df["foo", "four"] = "foo"
df = df.sort_index(level=1, axis=1)
stacked = df.stack(future_stack=future_stack)
result = df["foo"].stack(future_stack=future_stack).sort_index()
tm.assert_series_equal(stacked["foo"], result, check_names=False)
assert result.name is None
assert stacked["bar"].dtype == np.float64
@pytest.mark.filterwarnings(
"ignore:The previous implementation of stack is deprecated"
)
def test_unstack_bug(self, future_stack):
df = DataFrame(
{
"state": ["naive", "naive", "naive", "active", "active", "active"],
"exp": ["a", "b", "b", "b", "a", "a"],
"barcode": [1, 2, 3, 4, 1, 3],
"v": ["hi", "hi", "bye", "bye", "bye", "peace"],
"extra": np.arange(6.0),
}
)
result = df.groupby(["state", "exp", "barcode", "v"]).apply(len)
unstacked = result.unstack()
restacked = unstacked.stack(future_stack=future_stack)
tm.assert_series_equal(restacked, result.reindex(restacked.index).astype(float))
@pytest.mark.filterwarnings(
"ignore:The previous implementation of stack is deprecated"
)
def test_stack_unstack_preserve_names(
self, multiindex_dataframe_random_data, future_stack
):
frame = multiindex_dataframe_random_data
unstacked = frame.unstack()
assert unstacked.index.name == "first"
assert unstacked.columns.names == ["exp", "second"]
restacked = unstacked.stack(future_stack=future_stack)
assert restacked.index.names == frame.index.names
@pytest.mark.filterwarnings(
"ignore:The previous implementation of stack is deprecated"
)
@pytest.mark.parametrize("method", ["stack", "unstack"])
def test_stack_unstack_wrong_level_name(
self, method, multiindex_dataframe_random_data, future_stack
):
# GH 18303 - wrong level name should raise
frame = multiindex_dataframe_random_data
# A DataFrame with flat axes:
df = frame.loc["foo"]
kwargs = {"future_stack": future_stack} if method == "stack" else {}
with pytest.raises(KeyError, match="does not match index name"):
getattr(df, method)("mistake", **kwargs)
if method == "unstack":
# Same on a Series:
s = df.iloc[:, 0]
with pytest.raises(KeyError, match="does not match index name"):
getattr(s, method)("mistake", **kwargs)
def test_unstack_level_name(self, multiindex_dataframe_random_data):
frame = multiindex_dataframe_random_data
result = frame.unstack("second")
expected = frame.unstack(level=1)
tm.assert_frame_equal(result, expected)
@pytest.mark.filterwarnings(
"ignore:The previous implementation of stack is deprecated"
)
def test_stack_level_name(self, multiindex_dataframe_random_data, future_stack):
frame = multiindex_dataframe_random_data
unstacked = frame.unstack("second")
result = unstacked.stack("exp", future_stack=future_stack)
expected = frame.unstack().stack(0, future_stack=future_stack)
tm.assert_frame_equal(result, expected)
result = frame.stack("exp", future_stack=future_stack)
expected = frame.stack(future_stack=future_stack)
tm.assert_series_equal(result, expected)
@pytest.mark.filterwarnings(
"ignore:The previous implementation of stack is deprecated"
)
def test_stack_unstack_multiple(
self, multiindex_year_month_day_dataframe_random_data, future_stack
):
ymd = multiindex_year_month_day_dataframe_random_data
unstacked = ymd.unstack(["year", "month"])
expected = ymd.unstack("year").unstack("month")
tm.assert_frame_equal(unstacked, expected)
assert unstacked.columns.names == expected.columns.names
# series
s = ymd["A"]
s_unstacked = s.unstack(["year", "month"])
tm.assert_frame_equal(s_unstacked, expected["A"])
restacked = unstacked.stack(["year", "month"], future_stack=future_stack)
if future_stack:
# NA values in unstacked persist to restacked in version 3
restacked = restacked.dropna(how="all")
restacked = restacked.swaplevel(0, 1).swaplevel(1, 2)
restacked = restacked.sort_index(level=0)
tm.assert_frame_equal(restacked, ymd)
assert restacked.index.names == ymd.index.names
# GH #451
unstacked = ymd.unstack([1, 2])
expected = ymd.unstack(1).unstack(1).dropna(axis=1, how="all")
tm.assert_frame_equal(unstacked, expected)
unstacked = ymd.unstack([2, 1])
expected = ymd.unstack(2).unstack(1).dropna(axis=1, how="all")
tm.assert_frame_equal(unstacked, expected.loc[:, unstacked.columns])
@pytest.mark.filterwarnings(
"ignore:The previous implementation of stack is deprecated"
)
def test_stack_names_and_numbers(
self, multiindex_year_month_day_dataframe_random_data, future_stack
):
ymd = multiindex_year_month_day_dataframe_random_data
unstacked = ymd.unstack(["year", "month"])
# Can't use mixture of names and numbers to stack
with pytest.raises(ValueError, match="level should contain"):
unstacked.stack([0, "month"], future_stack=future_stack)
@pytest.mark.filterwarnings(
"ignore:The previous implementation of stack is deprecated"
)
def test_stack_multiple_out_of_bounds(
self, multiindex_year_month_day_dataframe_random_data, future_stack
):
# nlevels == 3
ymd = multiindex_year_month_day_dataframe_random_data
unstacked = ymd.unstack(["year", "month"])
with pytest.raises(IndexError, match="Too many levels"):
unstacked.stack([2, 3], future_stack=future_stack)
with pytest.raises(IndexError, match="not a valid level number"):
unstacked.stack([-4, -3], future_stack=future_stack)
def test_unstack_period_series(self):
# GH4342
idx1 = pd.PeriodIndex(
["2013-01", "2013-01", "2013-02", "2013-02", "2013-03", "2013-03"],
freq="M",
name="period",
)
idx2 = Index(["A", "B"] * 3, name="str")
value = [1, 2, 3, 4, 5, 6]
idx = MultiIndex.from_arrays([idx1, idx2])
s = Series(value, index=idx)
result1 = s.unstack()
result2 = s.unstack(level=1)
result3 = s.unstack(level=0)
e_idx = pd.PeriodIndex(
["2013-01", "2013-02", "2013-03"], freq="M", name="period"
)
expected = DataFrame(
{"A": [1, 3, 5], "B": [2, 4, 6]}, index=e_idx, columns=["A", "B"]
)
expected.columns.name = "str"
tm.assert_frame_equal(result1, expected)
tm.assert_frame_equal(result2, expected)
tm.assert_frame_equal(result3, expected.T)
idx1 = pd.PeriodIndex(
["2013-01", "2013-01", "2013-02", "2013-02", "2013-03", "2013-03"],
freq="M",
name="period1",
)
idx2 = pd.PeriodIndex(
["2013-12", "2013-11", "2013-10", "2013-09", "2013-08", "2013-07"],
freq="M",
name="period2",
)
idx = MultiIndex.from_arrays([idx1, idx2])
s = Series(value, index=idx)
result1 = s.unstack()
result2 = s.unstack(level=1)
result3 = s.unstack(level=0)
e_idx = pd.PeriodIndex(
["2013-01", "2013-02", "2013-03"], freq="M", name="period1"
)
e_cols = pd.PeriodIndex(
["2013-07", "2013-08", "2013-09", "2013-10", "2013-11", "2013-12"],
freq="M",
name="period2",
)
expected = DataFrame(
[
[np.nan, np.nan, np.nan, np.nan, 2, 1],
[np.nan, np.nan, 4, 3, np.nan, np.nan],
[6, 5, np.nan, np.nan, np.nan, np.nan],
],
index=e_idx,
columns=e_cols,
)
tm.assert_frame_equal(result1, expected)
tm.assert_frame_equal(result2, expected)
tm.assert_frame_equal(result3, expected.T)
def test_unstack_period_frame(self):
# GH4342
idx1 = pd.PeriodIndex(
["2014-01", "2014-02", "2014-02", "2014-02", "2014-01", "2014-01"],
freq="M",
name="period1",
)
idx2 = pd.PeriodIndex(
["2013-12", "2013-12", "2014-02", "2013-10", "2013-10", "2014-02"],
freq="M",
name="period2",
)
value = {"A": [1, 2, 3, 4, 5, 6], "B": [6, 5, 4, 3, 2, 1]}
idx = MultiIndex.from_arrays([idx1, idx2])
df = DataFrame(value, index=idx)
result1 = df.unstack()
result2 = df.unstack(level=1)
result3 = df.unstack(level=0)
e_1 = pd.PeriodIndex(["2014-01", "2014-02"], freq="M", name="period1")
e_2 = pd.PeriodIndex(
["2013-10", "2013-12", "2014-02", "2013-10", "2013-12", "2014-02"],
freq="M",
name="period2",
)
e_cols = MultiIndex.from_arrays(["A A A B B B".split(), e_2])
expected = DataFrame(
[[5, 1, 6, 2, 6, 1], [4, 2, 3, 3, 5, 4]], index=e_1, columns=e_cols
)
tm.assert_frame_equal(result1, expected)
tm.assert_frame_equal(result2, expected)
e_1 = pd.PeriodIndex(
["2014-01", "2014-02", "2014-01", "2014-02"], freq="M", name="period1"
)
e_2 = pd.PeriodIndex(
["2013-10", "2013-12", "2014-02"], freq="M", name="period2"
)
e_cols = MultiIndex.from_arrays(["A A B B".split(), e_1])
expected = DataFrame(
[[5, 4, 2, 3], [1, 2, 6, 5], [6, 3, 1, 4]], index=e_2, columns=e_cols
)
tm.assert_frame_equal(result3, expected)
@pytest.mark.filterwarnings(
"ignore:The previous implementation of stack is deprecated"
)
def test_stack_multiple_bug(self, future_stack, using_infer_string):
# bug when some uniques are not present in the data GH#3170
id_col = ([1] * 3) + ([2] * 3)
name = (["a"] * 3) + (["b"] * 3)
date = pd.to_datetime(["2013-01-03", "2013-01-04", "2013-01-05"] * 2)
var1 = np.random.default_rng(2).integers(0, 100, 6)
df = DataFrame({"ID": id_col, "NAME": name, "DATE": date, "VAR1": var1})
multi = df.set_index(["DATE", "ID"])
multi.columns.name = "Params"
unst = multi.unstack("ID")
msg = re.escape("agg function failed [how->mean,dtype->")
if using_infer_string:
msg = "dtype 'str' does not support operation 'mean'"
with pytest.raises(TypeError, match=msg):
unst.resample("W-THU").mean()
down = unst.resample("W-THU").mean(numeric_only=True)
rs = down.stack("ID", future_stack=future_stack)
xp = (
unst.loc[:, ["VAR1"]]
.resample("W-THU")
.mean()
.stack("ID", future_stack=future_stack)
)
xp.columns.name = "Params"
tm.assert_frame_equal(rs, xp)
@pytest.mark.filterwarnings(
"ignore:The previous implementation of stack is deprecated"
)
def test_stack_dropna(self, future_stack):
# GH#3997
df = DataFrame({"A": ["a1", "a2"], "B": ["b1", "b2"], "C": [1, 1]})
df = df.set_index(["A", "B"])
dropna = False if not future_stack else lib.no_default
stacked = df.unstack().stack(dropna=dropna, future_stack=future_stack)
assert len(stacked) > len(stacked.dropna())
if future_stack:
with pytest.raises(ValueError, match="dropna must be unspecified"):
df.unstack().stack(dropna=True, future_stack=future_stack)
else:
stacked = df.unstack().stack(dropna=True, future_stack=future_stack)
tm.assert_frame_equal(stacked, stacked.dropna())
def test_unstack_multiple_hierarchical(self, future_stack):
df = DataFrame(
index=[
[0, 0, 0, 0, 1, 1, 1, 1],
[0, 0, 1, 1, 0, 0, 1, 1],
[0, 1, 0, 1, 0, 1, 0, 1],
],
columns=[[0, 0, 1, 1], [0, 1, 0, 1]],
)
df.index.names = ["a", "b", "c"]
df.columns.names = ["d", "e"]
# it works!
df.unstack(["b", "c"])
def test_unstack_sparse_keyspace(self):
# memory problems with naive impl GH#2278
# Generate Long File & Test Pivot
NUM_ROWS = 1000
df = DataFrame(
{
"A": np.random.default_rng(2).integers(100, size=NUM_ROWS),
"B": np.random.default_rng(3).integers(300, size=NUM_ROWS),
"C": np.random.default_rng(4).integers(-7, 7, size=NUM_ROWS),
"D": np.random.default_rng(5).integers(-19, 19, size=NUM_ROWS),
"E": np.random.default_rng(6).integers(3000, size=NUM_ROWS),
"F": np.random.default_rng(7).standard_normal(NUM_ROWS),
}
)
idf = df.set_index(["A", "B", "C", "D", "E"])
# it works! is sufficient
idf.unstack("E")
@pytest.mark.filterwarnings(
"ignore:The previous implementation of stack is deprecated"
)
def test_unstack_unobserved_keys(self, future_stack):
# related to GH#2278 refactoring
levels = [[0, 1], [0, 1, 2, 3]]
codes = [[0, 0, 1, 1], [0, 2, 0, 2]]
index = MultiIndex(levels, codes)
df = DataFrame(np.random.default_rng(2).standard_normal((4, 2)), index=index)
result = df.unstack()
assert len(result.columns) == 4
recons = result.stack(future_stack=future_stack)
tm.assert_frame_equal(recons, df)
@pytest.mark.slow
def test_unstack_number_of_levels_larger_than_int32_warns(
self, performance_warning, monkeypatch
):
# GH#20601
# GH 26314: Change ValueError to PerformanceWarning
class MockUnstacker(reshape_lib._Unstacker):
def __init__(self, *args, **kwargs) -> None:
# __init__ will raise the warning
super().__init__(*args, **kwargs)
raise Exception("Don't compute final result.")
def _make_selectors(self) -> None:
pass
with monkeypatch.context() as m:
m.setattr(reshape_lib, "_Unstacker", MockUnstacker)
df = DataFrame(
np.zeros((2**16, 2)),
index=[np.arange(2**16), np.arange(2**16)],
)
msg = "The following operation may generate"
with tm.assert_produces_warning(performance_warning, match=msg):
with pytest.raises(Exception, match="Don't compute final result."):
df.unstack()
@pytest.mark.filterwarnings(
"ignore:The previous implementation of stack is deprecated"
)
@pytest.mark.parametrize(
"levels",
itertools.chain.from_iterable(
itertools.product(itertools.permutations([0, 1, 2], width), repeat=2)
for width in [2, 3]
),
)
@pytest.mark.parametrize("stack_lev", range(2))
def test_stack_order_with_unsorted_levels(
self, levels, stack_lev, sort, future_stack
):
# GH#16323
# deep check for 1-row case
columns = MultiIndex(levels=levels, codes=[[0, 0, 1, 1], [0, 1, 0, 1]])
df = DataFrame(columns=columns, data=[range(4)])
kwargs = {} if future_stack else {"sort": sort}
df_stacked = df.stack(stack_lev, future_stack=future_stack, **kwargs)
for row in df.index:
for col in df.columns:
expected = df.loc[row, col]
result_row = row, col[stack_lev]
result_col = col[1 - stack_lev]
result = df_stacked.loc[result_row, result_col]
assert result == expected
@pytest.mark.filterwarnings(
"ignore:The previous implementation of stack is deprecated"
)
def test_stack_order_with_unsorted_levels_multi_row(self, future_stack):
# GH#16323
# check multi-row case
mi = MultiIndex(
levels=[["A", "C", "B"], ["B", "A", "C"]],
codes=[np.repeat(range(3), 3), np.tile(range(3), 3)],
)
df = DataFrame(
columns=mi, index=range(5), data=np.arange(5 * len(mi)).reshape(5, -1)
)
assert all(
df.loc[row, col]
== df.stack(0, future_stack=future_stack).loc[(row, col[0]), col[1]]
for row in df.index
for col in df.columns
)
@pytest.mark.filterwarnings(
"ignore:The previous implementation of stack is deprecated"
)
def test_stack_order_with_unsorted_levels_multi_row_2(self, future_stack):
# GH#53636
levels = ((0, 1), (1, 0))
stack_lev = 1
columns = MultiIndex(levels=levels, codes=[[0, 0, 1, 1], [0, 1, 0, 1]])
df = DataFrame(columns=columns, data=[range(4)], index=[1, 0, 2, 3])
kwargs = {} if future_stack else {"sort": True}
result = df.stack(stack_lev, future_stack=future_stack, **kwargs)
expected_index = MultiIndex(
levels=[[0, 1, 2, 3], [0, 1]],
codes=[[1, 1, 0, 0, 2, 2, 3, 3], [1, 0, 1, 0, 1, 0, 1, 0]],
)
expected = DataFrame(
{
0: [0, 1, 0, 1, 0, 1, 0, 1],
1: [2, 3, 2, 3, 2, 3, 2, 3],
},
index=expected_index,
)
tm.assert_frame_equal(result, expected)
@pytest.mark.filterwarnings(
"ignore:The previous implementation of stack is deprecated"
)
def test_stack_unstack_unordered_multiindex(self, future_stack):
# GH# 18265
values = np.arange(5)
data = np.vstack(
[
[f"b{x}" for x in values], # b0, b1, ..
[f"a{x}" for x in values], # a0, a1, ..
]
)
df = DataFrame(data.T, columns=["b", "a"])
df.columns.name = "first"
second_level_dict = {"x": df}
multi_level_df = pd.concat(second_level_dict, axis=1)
multi_level_df.columns.names = ["second", "first"]
df = multi_level_df.reindex(sorted(multi_level_df.columns), axis=1)
result = df.stack(["first", "second"], future_stack=future_stack).unstack(
["first", "second"]
)
expected = DataFrame(
[["a0", "b0"], ["a1", "b1"], ["a2", "b2"], ["a3", "b3"], ["a4", "b4"]],
index=range(5),
columns=MultiIndex.from_tuples(
[("a", "x"), ("b", "x")], names=["first", "second"]
),
)
tm.assert_frame_equal(result, expected)
def test_unstack_preserve_types(
self, multiindex_year_month_day_dataframe_random_data, using_infer_string
):
# GH#403
ymd = multiindex_year_month_day_dataframe_random_data
ymd["E"] = "foo"
ymd["F"] = 2
unstacked = ymd.unstack("month")
assert unstacked["A", 1].dtype == np.float64
assert (
unstacked["E", 1].dtype == np.object_
if not using_infer_string
else "string"
)
assert unstacked["F", 1].dtype == np.float64
@pytest.mark.filterwarnings(
"ignore:The previous implementation of stack is deprecated"
)
def test_unstack_group_index_overflow(self, future_stack):
codes = np.tile(np.arange(500), 2)
level = np.arange(500)
index = MultiIndex(
levels=[level] * 8 + [[0, 1]],
codes=[codes] * 8 + [np.arange(2).repeat(500)],
)
s = Series(np.arange(1000), index=index)
result = s.unstack()
assert result.shape == (500, 2)
# test roundtrip
stacked = result.stack(future_stack=future_stack)
tm.assert_series_equal(s, stacked.reindex(s.index))
# put it at beginning
index = MultiIndex(
levels=[[0, 1]] + [level] * 8,
codes=[np.arange(2).repeat(500)] + [codes] * 8,
)
s = Series(np.arange(1000), index=index)
result = s.unstack(0)
assert result.shape == (500, 2)
# put it in middle
index = MultiIndex(
levels=[level] * 4 + [[0, 1]] + [level] * 4,
codes=([codes] * 4 + [np.arange(2).repeat(500)] + [codes] * 4),
)
s = Series(np.arange(1000), index=index)
result = s.unstack(4)
assert result.shape == (500, 2)
def test_unstack_with_missing_int_cast_to_float(self):
# https://github.com/pandas-dev/pandas/issues/37115
df = DataFrame(
{
"a": ["A", "A", "B"],
"b": ["ca", "cb", "cb"],
"v": [10] * 3,
}
).set_index(["a", "b"])
# add another int column to get 2 blocks
df["is_"] = 1
assert len(df._mgr.blocks) == 2
result = df.unstack("b")
result[("is_", "ca")] = result[("is_", "ca")].fillna(0)
expected = DataFrame(
[[10.0, 10.0, 1.0, 1.0], [np.nan, 10.0, 0.0, 1.0]],
index=Index(["A", "B"], name="a"),
columns=MultiIndex.from_tuples(
[("v", "ca"), ("v", "cb"), ("is_", "ca"), ("is_", "cb")],
names=[None, "b"],
),
)
tm.assert_frame_equal(result, expected)
def test_unstack_with_level_has_nan(self):
# GH 37510
df1 = DataFrame(
{
"L1": [1, 2, 3, 4],
"L2": [3, 4, 1, 2],
"L3": [1, 1, 1, 1],
"x": [1, 2, 3, 4],
}
)
df1 = df1.set_index(["L1", "L2", "L3"])
new_levels = ["n1", "n2", "n3", None]
df1.index = df1.index.set_levels(levels=new_levels, level="L1")
df1.index = df1.index.set_levels(levels=new_levels, level="L2")
result = df1.unstack("L3")[("x", 1)].sort_index().index
expected = MultiIndex(
levels=[["n1", "n2", "n3", None], ["n1", "n2", "n3", None]],
codes=[[0, 1, 2, 3], [2, 3, 0, 1]],
names=["L1", "L2"],
)
tm.assert_index_equal(result, expected)
@pytest.mark.filterwarnings(
"ignore:The previous implementation of stack is deprecated"
)
def test_stack_nan_in_multiindex_columns(self, future_stack):
# GH#39481
df = DataFrame(
np.zeros([1, 5]),
columns=MultiIndex.from_tuples(
[
(0, None, None),
(0, 2, 0),
(0, 2, 1),
(0, 3, 0),
(0, 3, 1),
],
),
)
result = df.stack(2, future_stack=future_stack)
if future_stack:
index = MultiIndex(levels=[[0], [0.0, 1.0]], codes=[[0, 0, 0], [-1, 0, 1]])
columns = MultiIndex(levels=[[0], [2, 3]], codes=[[0, 0, 0], [-1, 0, 1]])
else:
index = Index([(0, None), (0, 0), (0, 1)])
columns = Index([(0, None), (0, 2), (0, 3)])
expected = DataFrame(
[[0.0, np.nan, np.nan], [np.nan, 0.0, 0.0], [np.nan, 0.0, 0.0]],
index=index,
columns=columns,
)
tm.assert_frame_equal(result, expected)
@pytest.mark.filterwarnings(
"ignore:The previous implementation of stack is deprecated"
)
def test_multi_level_stack_categorical(self, future_stack):
# GH 15239
midx = MultiIndex.from_arrays(
[
["A"] * 2 + ["B"] * 2,
pd.Categorical(list("abab")),
pd.Categorical(list("ccdd")),
]
)
df = DataFrame(np.arange(8).reshape(2, 4), columns=midx)
result = df.stack([1, 2], future_stack=future_stack)
if future_stack:
expected = DataFrame(
[
[0, np.nan],
[1, np.nan],
[np.nan, 2],
[np.nan, 3],
[4, np.nan],
[5, np.nan],
[np.nan, 6],
[np.nan, 7],
],
columns=["A", "B"],
index=MultiIndex.from_arrays(
[
[0] * 4 + [1] * 4,
pd.Categorical(list("abababab")),
pd.Categorical(list("ccddccdd")),
]
),
)
else:
expected = DataFrame(
[
[0, np.nan],
[np.nan, 2],
[1, np.nan],
[np.nan, 3],
[4, np.nan],
[np.nan, 6],
[5, np.nan],
[np.nan, 7],
],
columns=["A", "B"],
index=MultiIndex.from_arrays(
[
[0] * 4 + [1] * 4,
pd.Categorical(list("aabbaabb")),
pd.Categorical(list("cdcdcdcd")),
]
),
)
tm.assert_frame_equal(result, expected, check_index_type=False)
@pytest.mark.filterwarnings(
"ignore:The previous implementation of stack is deprecated"
)
def test_stack_nan_level(self, future_stack):
# GH 9406
df_nan = DataFrame(
np.arange(4).reshape(2, 2),
columns=MultiIndex.from_tuples(
[("A", np.nan), ("B", "b")], names=["Upper", "Lower"]
),
index=Index([0, 1], name="Num"),
dtype=np.float64,
)
result = df_nan.stack(future_stack=future_stack)
if future_stack:
index = MultiIndex(
levels=[[0, 1], [np.nan, "b"]],
codes=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=["Num", "Lower"],
)
else:
index = MultiIndex.from_tuples(
[(0, np.nan), (0, "b"), (1, np.nan), (1, "b")], names=["Num", "Lower"]
)
expected = DataFrame(
[[0.0, np.nan], [np.nan, 1], [2.0, np.nan], [np.nan, 3.0]],
columns=Index(["A", "B"], name="Upper"),
index=index,
)
tm.assert_frame_equal(result, expected)
def test_unstack_categorical_columns(self):
# GH 14018
idx = MultiIndex.from_product([["A"], [0, 1]])
df = DataFrame({"cat": pd.Categorical(["a", "b"])}, index=idx)
result = df.unstack()
expected = DataFrame(
{
0: pd.Categorical(["a"], categories=["a", "b"]),
1: pd.Categorical(["b"], categories=["a", "b"]),
},
index=["A"],
)
expected.columns = MultiIndex.from_tuples([("cat", 0), ("cat", 1)])
tm.assert_frame_equal(result, expected)
@pytest.mark.filterwarnings(
"ignore:The previous implementation of stack is deprecated"
)
def test_stack_unsorted(self, future_stack):
# GH 16925
PAE = ["ITA", "FRA"]
VAR = ["A1", "A2"]
TYP = ["CRT", "DBT", "NET"]
MI = MultiIndex.from_product([PAE, VAR, TYP], names=["PAE", "VAR", "TYP"])
V = list(range(len(MI)))
DF = DataFrame(data=V, index=MI, columns=["VALUE"])
DF = DF.unstack(["VAR", "TYP"])
DF.columns = DF.columns.droplevel(0)
DF.loc[:, ("A0", "NET")] = 9999
result = DF.stack(["VAR", "TYP"], future_stack=future_stack).sort_index()
expected = (
DF.sort_index(axis=1)
.stack(["VAR", "TYP"], future_stack=future_stack)
.sort_index()
)
tm.assert_series_equal(result, expected)
@pytest.mark.filterwarnings(
"ignore:The previous implementation of stack is deprecated"
)
def test_stack_nullable_dtype(self, future_stack):
# GH#43561
columns = MultiIndex.from_product(
[["54511", "54515"], ["r", "t_mean"]], names=["station", "element"]
)
index = Index([1, 2, 3], name="time")
arr = np.array([[50, 226, 10, 215], [10, 215, 9, 220], [305, 232, 111, 220]])
df = DataFrame(arr, columns=columns, index=index, dtype=pd.Int64Dtype())
result = df.stack("station", future_stack=future_stack)
expected = (
df.astype(np.int64)
.stack("station", future_stack=future_stack)
.astype(pd.Int64Dtype())
)
tm.assert_frame_equal(result, expected)
# non-homogeneous case
df[df.columns[0]] = df[df.columns[0]].astype(pd.Float64Dtype())
result = df.stack("station", future_stack=future_stack)
expected = DataFrame(
{
"r": pd.array(
[50.0, 10.0, 10.0, 9.0, 305.0, 111.0], dtype=pd.Float64Dtype()
),
"t_mean": pd.array(
[226, 215, 215, 220, 232, 220], dtype=pd.Int64Dtype()
),
},
index=MultiIndex.from_product([index, columns.levels[0]]),
)
expected.columns.name = "element"
tm.assert_frame_equal(result, expected)
def test_unstack_mixed_level_names(self):
# GH#48763
arrays = [["a", "a"], [1, 2], ["red", "blue"]]
idx = MultiIndex.from_arrays(arrays, names=("x", 0, "y"))
df = DataFrame({"m": [1, 2]}, index=idx)
result = df.unstack("x")
expected = DataFrame(
[[1], [2]],
columns=MultiIndex.from_tuples([("m", "a")], names=[None, "x"]),
index=MultiIndex.from_tuples([(1, "red"), (2, "blue")], names=[0, "y"]),
)
tm.assert_frame_equal(result, expected)
@pytest.mark.filterwarnings("ignore:The previous implementation of stack is deprecated")
def test_stack_tuple_columns(future_stack):
# GH#54948 - test stack when the input has a non-MultiIndex with tuples
df = DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]], columns=[("a", 1), ("a", 2), ("b", 1)]
)
result = df.stack(future_stack=future_stack)
expected = Series(
[1, 2, 3, 4, 5, 6, 7, 8, 9],
index=MultiIndex(
levels=[range(3), [("a", 1), ("a", 2), ("b", 1)]],
codes=[[0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 1, 2, 0, 1, 2, 0, 1, 2]],
),
)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"dtype, na_value",
[
("float64", np.nan),
("Float64", np.nan),
("Float64", pd.NA),
("Int64", pd.NA),
],
)
@pytest.mark.parametrize("test_multiindex", [True, False])
def test_stack_preserves_na(dtype, na_value, test_multiindex):
# GH#56573
if test_multiindex:
index = MultiIndex.from_arrays(2 * [Index([na_value], dtype=dtype)])
else:
index = Index([na_value], dtype=dtype)
df = DataFrame({"a": [1]}, index=index)
result = df.stack()
if test_multiindex:
expected_index = MultiIndex.from_arrays(
[
Index([na_value], dtype=dtype),
Index([na_value], dtype=dtype),
Index(["a"]),
]
)
else:
expected_index = MultiIndex.from_arrays(
[
Index([na_value], dtype=dtype),
Index(["a"]),
]
)
expected = Series(1, index=expected_index)
tm.assert_series_equal(result, expected)
| TestStackUnstackMultiLevel |
python | pytorch__pytorch | torch/_dynamo/variables/lists.py | {
"start": 21819,
"end": 29780
} | class ____(BaseListVariable):
"""
Implement methods common to List and other List-like things
"""
def call_method(
self,
tx: "InstructionTranslator",
name: str,
args: list[VariableTracker],
kwargs: dict[str, VariableTracker],
) -> VariableTracker:
from .tensor import SymNodeVariable
if name == "append" and self.is_mutable():
if kwargs or len(args) != 1:
raise_args_mismatch(
tx,
name,
"1 args and 0 kwargs",
f"{len(args)} args and {len(kwargs)} kwargs",
)
(arg,) = args
tx.output.side_effects.mutation(self)
self.items.append(arg)
return ConstantVariable.create(None)
elif name == "extend" and self.is_mutable():
if kwargs or len(args) != 1:
raise_args_mismatch(
tx,
name,
"1 args and 0 kwargs",
f"{len(args)} args and {len(kwargs)} kwargs",
)
if not args[0].has_force_unpack_var_sequence(tx):
msg = ConstantVariable.create(f"{type(args[0])} object is not iterable")
raise_observed_exception(TypeError, tx, args=[msg])
(arg,) = args
arg.force_apply_to_var_sequence(
tx, lambda item: self.call_method(tx, "append", [item], {})
)
return ConstantVariable.create(None)
elif name == "insert" and self.is_mutable():
if kwargs or len(args) != 2:
raise_args_mismatch(
tx,
name,
"2 args and 0 kwargs",
f"{len(args)} args and {len(kwargs)} kwargs",
)
idx, value = args
if isinstance(idx, SymNodeVariable):
const_idx = idx.evaluate_expr()
else:
const_idx = idx.as_python_constant()
tx.output.side_effects.mutation(self)
self.items.insert(const_idx, value)
return ConstantVariable.create(None)
elif name == "pop" and self.is_mutable():
if kwargs or len(args) > 1:
raise_args_mismatch(
tx,
name,
"at most 1 args and 0 kwargs",
f"{len(args)} args and {len(kwargs)} kwargs",
)
if len(self.items) == 0:
msg = ConstantVariable.create("pop from empty list")
raise_observed_exception(IndexError, tx, args=[msg])
if len(args):
idx = args[0].as_python_constant()
if idx > len(self.items):
msg = ConstantVariable.create("pop index out of range")
raise_observed_exception(IndexError, tx, args=[msg])
tx.output.side_effects.mutation(self)
return self.items.pop(*[a.as_python_constant() for a in args])
elif name == "clear" and self.is_mutable():
if args or kwargs:
raise_args_mismatch(
tx,
name,
"0 args and 0 kwargs",
f"{len(args)} args and {len(kwargs)} kwargs",
)
tx.output.side_effects.mutation(self)
self.items.clear()
return ConstantVariable.create(None)
elif (
name == "__setitem__"
and self.is_mutable()
and args
and (
args[0].is_python_constant()
or isinstance(args[0], SymNodeVariable)
or (
isinstance(args[0], SliceVariable)
and all(
s.is_python_constant() or isinstance(s, SymNodeVariable)
for s in args[0].items
)
)
)
):
if kwargs:
raise_args_mismatch(tx, name, "0 kwargs", f"{len(kwargs)} kwargs")
key, value = args
tx.output.side_effects.mutation(self)
if isinstance(key, SymNodeVariable):
self.items[key.evaluate_expr()] = value
elif isinstance(key, SliceVariable):
if key.is_python_constant():
self.items[key.as_python_constant()] = list(value.items) # type: ignore[attr-defined]
else:
items_slice = slice(
*[
(
s.evaluate_expr()
if isinstance(s, SymNodeVariable)
else s.as_python_constant()
)
for s in key.items
]
)
self.items[items_slice] = list(value.items) # type: ignore[attr-defined]
else:
self.items[key.as_python_constant()] = value
return ConstantVariable.create(None)
elif name == "__delitem__" and self.is_mutable():
if kwargs or len(args) != 1:
raise_args_mismatch(
tx,
name,
"1 args and 0 kwargs",
f"{len(args)} args and {len(kwargs)} kwargs",
)
tx.output.side_effects.mutation(self)
if args[0].is_python_constant() and isinstance(
args[0].as_python_constant(), (int, slice)
):
if isinstance(args[0], SymNodeVariable):
idx = args[0].evaluate_expr()
else:
idx = args[0].as_python_constant()
try:
self.items.__delitem__(idx)
except (IndexError, ValueError) as exc:
raise_observed_exception(
type(exc),
tx,
args=list(map(ConstantVariable.create, exc.args)),
)
else:
msg = ConstantVariable.create(
f"list indices must be integers or slices, not {args[0].python_type_name()}"
)
raise_observed_exception(TypeError, tx, args=[msg])
return ConstantVariable.create(None)
elif name == "copy":
# List copy() doesn't have args and kwargs
if args or kwargs:
raise_args_mismatch(
tx,
name,
"0 args and 0 kwargs",
f"{len(args)} args and {len(kwargs)} kwargs",
)
items_lst: list[VariableTracker] = list(self.items)
return self.modified(items_lst, mutation_type=ValueMutationNew())
elif name == "reverse" and self.is_mutable():
if args or kwargs:
raise_args_mismatch(
tx,
name,
"0 args and 0 kwargs",
f"{len(args)} args and {len(kwargs)} kwargs",
)
self.items.reverse()
tx.output.side_effects.mutation(self)
return ConstantVariable.create(None)
elif name == "remove" and self.is_mutable():
if kwargs or len(args) != 1:
raise_args_mismatch(
tx,
name,
"1 args and 0 kwargs",
f"{len(args)} args and {len(kwargs)} kwargs",
)
idx = self.call_method(tx, "index", args, kwargs)
self.call_method(tx, "pop", [idx], {})
return ConstantVariable.create(None)
else:
return super().call_method(tx, name, args, kwargs)
| CommonListMethodsVariable |
python | tornadoweb__tornado | tornado/wsgi.py | {
"start": 2013,
"end": 10799
} | class ____:
r"""Makes a WSGI-compatible application runnable on Tornado's HTTP server.
.. warning::
WSGI is a *synchronous* interface, while Tornado's concurrency model
is based on single-threaded *asynchronous* execution. Many of Tornado's
distinguishing features are not available in WSGI mode, including efficient
long-polling and websockets. The primary purpose of `WSGIContainer` is
to support both WSGI applications and native Tornado ``RequestHandlers`` in
a single process. WSGI-only applications are likely to be better off
with a dedicated WSGI server such as ``gunicorn`` or ``uwsgi``.
Wrap a WSGI application in a `WSGIContainer` to make it implement the Tornado
`.HTTPServer` ``request_callback`` interface. The `WSGIContainer` object can
then be passed to classes from the `tornado.routing` module,
`tornado.web.FallbackHandler`, or to `.HTTPServer` directly.
This class is intended to let other frameworks (Django, Flask, etc)
run on the Tornado HTTP server and I/O loop.
Realistic usage will be more complicated, but the simplest possible example uses a
hand-written WSGI application with `.HTTPServer`::
def simple_app(environ, start_response):
status = "200 OK"
response_headers = [("Content-type", "text/plain")]
start_response(status, response_headers)
return [b"Hello world!\n"]
async def main():
container = tornado.wsgi.WSGIContainer(simple_app)
http_server = tornado.httpserver.HTTPServer(container)
http_server.listen(8888)
await asyncio.Event().wait()
asyncio.run(main())
The recommended pattern is to use the `tornado.routing` module to set up routing
rules between your WSGI application and, typically, a `tornado.web.Application`.
Alternatively, `tornado.web.Application` can be used as the top-level router
and `tornado.web.FallbackHandler` can embed a `WSGIContainer` within it.
If the ``executor`` argument is provided, the WSGI application will be executed
on that executor. This must be an instance of `concurrent.futures.Executor`,
typically a ``ThreadPoolExecutor`` (``ProcessPoolExecutor`` is not supported).
If no ``executor`` is given, the application will run on the event loop thread in
Tornado 6.3; this will change to use an internal thread pool by default in
Tornado 7.0.
.. warning::
By default, the WSGI application is executed on the event loop's thread. This
limits the server to one request at a time (per process), making it less scalable
than most other WSGI servers. It is therefore highly recommended that you pass
a ``ThreadPoolExecutor`` when constructing the `WSGIContainer`, after verifying
that your application is thread-safe. The default will change to use a
``ThreadPoolExecutor`` in Tornado 7.0.
.. versionadded:: 6.3
The ``executor`` parameter.
.. deprecated:: 6.3
The default behavior of running the WSGI application on the event loop thread
is deprecated and will change in Tornado 7.0 to use a thread pool by default.
"""
def __init__(
self,
wsgi_application: "WSGIAppType",
executor: Optional[concurrent.futures.Executor] = None,
) -> None:
self.wsgi_application = wsgi_application
self.executor = dummy_executor if executor is None else executor
def __call__(self, request: httputil.HTTPServerRequest) -> None:
IOLoop.current().spawn_callback(self.handle_request, request)
async def handle_request(self, request: httputil.HTTPServerRequest) -> None:
data = {} # type: Dict[str, Any]
response = [] # type: List[bytes]
def start_response(
status: str,
headers: List[Tuple[str, str]],
exc_info: Optional[
Tuple[
"Optional[Type[BaseException]]",
Optional[BaseException],
Optional[TracebackType],
]
] = None,
) -> Callable[[bytes], Any]:
data["status"] = status
data["headers"] = headers
return response.append
loop = IOLoop.current()
app_response = await loop.run_in_executor(
self.executor,
self.wsgi_application,
self.environ(request),
start_response,
)
try:
app_response_iter = iter(app_response)
def next_chunk() -> Optional[bytes]:
try:
return next(app_response_iter)
except StopIteration:
# StopIteration is special and is not allowed to pass through
# coroutines normally.
return None
while True:
chunk = await loop.run_in_executor(self.executor, next_chunk)
if chunk is None:
break
response.append(chunk)
finally:
if hasattr(app_response, "close"):
app_response.close() # type: ignore
body = b"".join(response)
if not data:
raise Exception("WSGI app did not call start_response")
status_code_str, reason = data["status"].split(" ", 1)
status_code = int(status_code_str)
headers = data["headers"] # type: List[Tuple[str, str]]
header_set = {k.lower() for (k, v) in headers}
body = escape.utf8(body)
if status_code != 304:
if "content-length" not in header_set:
headers.append(("Content-Length", str(len(body))))
if "content-type" not in header_set:
headers.append(("Content-Type", "text/html; charset=UTF-8"))
if "server" not in header_set:
headers.append(("Server", "TornadoServer/%s" % tornado.version))
start_line = httputil.ResponseStartLine("HTTP/1.1", status_code, reason)
header_obj = httputil.HTTPHeaders()
for key, value in headers:
header_obj.add(key, value)
assert request.connection is not None
request.connection.write_headers(start_line, header_obj, chunk=body)
request.connection.finish()
self._log(status_code, request)
def environ(self, request: httputil.HTTPServerRequest) -> Dict[str, Any]:
"""Converts a `tornado.httputil.HTTPServerRequest` to a WSGI environment.
.. versionchanged:: 6.3
No longer a static method.
"""
hostport = request.host.split(":")
if len(hostport) == 2:
host = hostport[0]
port = int(hostport[1])
else:
host = request.host
port = 443 if request.protocol == "https" else 80
environ = {
"REQUEST_METHOD": request.method,
"SCRIPT_NAME": "",
"PATH_INFO": to_wsgi_str(
escape.url_unescape(request.path, encoding=None, plus=False)
),
"QUERY_STRING": request.query,
"REMOTE_ADDR": request.remote_ip,
"SERVER_NAME": host,
"SERVER_PORT": str(port),
"SERVER_PROTOCOL": request.version,
"wsgi.version": (1, 0),
"wsgi.url_scheme": request.protocol,
"wsgi.input": BytesIO(escape.utf8(request.body)),
"wsgi.errors": sys.stderr,
"wsgi.multithread": self.executor is not dummy_executor,
"wsgi.multiprocess": True,
"wsgi.run_once": False,
}
if "Content-Type" in request.headers:
environ["CONTENT_TYPE"] = request.headers.pop("Content-Type")
if "Content-Length" in request.headers:
environ["CONTENT_LENGTH"] = request.headers.pop("Content-Length")
for key, value in request.headers.items():
environ["HTTP_" + key.replace("-", "_").upper()] = value
return environ
def _log(self, status_code: int, request: httputil.HTTPServerRequest) -> None:
if status_code < 400:
log_method = access_log.info
elif status_code < 500:
log_method = access_log.warning
else:
log_method = access_log.error
request_time = 1000.0 * request.request_time()
assert request.method is not None
assert request.uri is not None
summary = (
request.method # type: ignore[operator]
+ " "
+ request.uri
+ " ("
+ request.remote_ip
+ ")"
)
log_method("%d %s %.2fms", status_code, summary, request_time)
HTTPRequest = httputil.HTTPServerRequest
| WSGIContainer |
python | jmcnamara__XlsxWriter | xlsxwriter/test/worksheet/test_cond_format14.py | {
"start": 345,
"end": 4433
} | class ____(unittest.TestCase):
"""
Test assembling a complete Worksheet file.
"""
def test_assemble_xml_file(self):
"""Test writing a worksheet with conditional formatting."""
self.maxDiff = None
fh = StringIO()
worksheet = Worksheet()
worksheet._set_filehandle(fh)
worksheet.select()
worksheet.write("A1", 1)
worksheet.write("A2", 2)
worksheet.write("A3", 3)
worksheet.write("A4", 4)
worksheet.write("A5", 5)
worksheet.write("A6", 6)
worksheet.write("A7", 7)
worksheet.write("A8", 8)
worksheet.write("A9", 9)
worksheet.write("A10", 10)
worksheet.write("A11", 11)
worksheet.write("A12", 12)
worksheet.conditional_format("A1:A12", {"type": "data_bar"})
worksheet._assemble_xml_file()
exp = _xml_to_list(
"""
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<worksheet xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main" xmlns:r="http://schemas.openxmlformats.org/officeDocument/2006/relationships">
<dimension ref="A1:A12"/>
<sheetViews>
<sheetView tabSelected="1" workbookViewId="0"/>
</sheetViews>
<sheetFormatPr defaultRowHeight="15"/>
<sheetData>
<row r="1" spans="1:1">
<c r="A1">
<v>1</v>
</c>
</row>
<row r="2" spans="1:1">
<c r="A2">
<v>2</v>
</c>
</row>
<row r="3" spans="1:1">
<c r="A3">
<v>3</v>
</c>
</row>
<row r="4" spans="1:1">
<c r="A4">
<v>4</v>
</c>
</row>
<row r="5" spans="1:1">
<c r="A5">
<v>5</v>
</c>
</row>
<row r="6" spans="1:1">
<c r="A6">
<v>6</v>
</c>
</row>
<row r="7" spans="1:1">
<c r="A7">
<v>7</v>
</c>
</row>
<row r="8" spans="1:1">
<c r="A8">
<v>8</v>
</c>
</row>
<row r="9" spans="1:1">
<c r="A9">
<v>9</v>
</c>
</row>
<row r="10" spans="1:1">
<c r="A10">
<v>10</v>
</c>
</row>
<row r="11" spans="1:1">
<c r="A11">
<v>11</v>
</c>
</row>
<row r="12" spans="1:1">
<c r="A12">
<v>12</v>
</c>
</row>
</sheetData>
<conditionalFormatting sqref="A1:A12">
<cfRule type="dataBar" priority="1">
<dataBar>
<cfvo type="min" val="0"/>
<cfvo type="max" val="0"/>
<color rgb="FF638EC6"/>
</dataBar>
</cfRule>
</conditionalFormatting>
<pageMargins left="0.7" right="0.7" top="0.75" bottom="0.75" header="0.3" footer="0.3"/>
</worksheet>
"""
)
got = _xml_to_list(fh.getvalue())
self.assertEqual(exp, got)
| TestAssembleWorksheet |
python | apache__airflow | helm-tests/tests/helm_tests/airflow_aux/test_annotations.py | {
"start": 1259,
"end": 16271
} | class ____:
"""Tests Service Account Annotations."""
@pytest.mark.parametrize(
("values", "show_only", "expected_annotations"),
[
(
{
"cleanup": {
"enabled": True,
"serviceAccount": {
"annotations": {
"example": "cleanup",
},
},
},
},
"templates/cleanup/cleanup-serviceaccount.yaml",
{
"example": "cleanup",
},
),
(
{
"databaseCleanup": {
"enabled": True,
"serviceAccount": {
"annotations": {
"example": "database-cleanup",
},
},
},
},
"templates/database-cleanup/database-cleanup-serviceaccount.yaml",
{
"example": "database-cleanup",
},
),
(
{
"scheduler": {
"serviceAccount": {
"annotations": {
"example": "scheduler",
},
},
},
},
"templates/scheduler/scheduler-serviceaccount.yaml",
{
"example": "scheduler",
},
),
(
{
"apiServer": {
"serviceAccount": {
"annotations": {
"example": "api-server",
},
},
},
},
"templates/api-server/api-server-serviceaccount.yaml",
{
"example": "api-server",
},
),
(
{
"workers": {
"serviceAccount": {
"annotations": {
"example": "worker",
},
},
},
},
"templates/workers/worker-serviceaccount.yaml",
{
"example": "worker",
},
),
(
{
"workers": {
"useWorkerDedicatedServiceAccounts": True,
"celery": {
"serviceAccount": {
"annotations": {
"example": "worker",
},
},
},
},
},
"templates/workers/worker-celery-serviceaccount.yaml",
{
"example": "worker",
},
),
(
{
"workers": {
"useWorkerDedicatedServiceAccounts": True,
"serviceAccount": {
"annotations": {
"example": "missing",
},
},
"celery": {
"serviceAccount": {
"annotations": {
"example": "worker",
},
},
},
},
},
"templates/workers/worker-celery-serviceaccount.yaml",
{
"example": "worker",
},
),
(
{
"executor": "KubernetesExecutor",
"workers": {
"useWorkerDedicatedServiceAccounts": True,
"kubernetes": {
"serviceAccount": {
"annotations": {
"example": "worker",
},
},
},
},
},
"templates/workers/worker-kubernetes-serviceaccount.yaml",
{
"example": "worker",
},
),
(
{
"executor": "KubernetesExecutor",
"workers": {
"useWorkerDedicatedServiceAccounts": True,
"serviceAccount": {
"annotations": {
"example": "missing",
},
},
"kubernetes": {
"serviceAccount": {
"annotations": {
"example": "worker",
},
},
},
},
},
"templates/workers/worker-kubernetes-serviceaccount.yaml",
{
"example": "worker",
},
),
(
{
"flower": {
"enabled": True,
"serviceAccount": {
"annotations": {
"example": "flower",
},
},
},
},
"templates/flower/flower-serviceaccount.yaml",
{
"example": "flower",
},
),
(
{
"statsd": {
"serviceAccount": {
"annotations": {
"example": "statsd",
},
},
},
},
"templates/statsd/statsd-serviceaccount.yaml",
{
"example": "statsd",
},
),
(
{
"redis": {
"serviceAccount": {
"annotations": {
"example": "redis",
},
},
},
},
"templates/redis/redis-serviceaccount.yaml",
{
"example": "redis",
},
),
(
{
"pgbouncer": {
"enabled": True,
"serviceAccount": {
"annotations": {
"example": "pgbouncer",
},
},
},
},
"templates/pgbouncer/pgbouncer-serviceaccount.yaml",
{
"example": "pgbouncer",
},
),
(
{
"createUserJob": {
"serviceAccount": {
"annotations": {
"example": "createuser",
},
},
},
},
"templates/jobs/create-user-job-serviceaccount.yaml",
{
"example": "createuser",
},
),
(
{
"migrateDatabaseJob": {
"serviceAccount": {
"annotations": {
"example": "migratedb",
},
},
},
},
"templates/jobs/migrate-database-job-serviceaccount.yaml",
{
"example": "migratedb",
},
),
(
{
"triggerer": {
"serviceAccount": {
"annotations": {
"example": "triggerer",
},
},
},
},
"templates/triggerer/triggerer-serviceaccount.yaml",
{
"example": "triggerer",
},
),
(
{
"dagProcessor": {
"enabled": True,
"serviceAccount": {
"annotations": {
"example": "dag-processor",
},
},
},
},
"templates/dag-processor/dag-processor-serviceaccount.yaml",
{
"example": "dag-processor",
},
),
],
)
def test_annotations_are_added(self, values, show_only, expected_annotations):
k8s_objects = render_chart(
values=values,
show_only=[show_only],
)
# This test relies on the convention that the helm chart puts a single
# ServiceAccount in its own .yaml file, so by specifying `show_only`,
# we should only get a single k8s_object here - the target object that
# we hope to test on.
assert len(k8s_objects) == 1
obj = k8s_objects[0]
for k, v in expected_annotations.items():
assert k in obj["metadata"]["annotations"]
assert v == obj["metadata"]["annotations"][k]
def test_annotations_on_webserver(self):
"""Test annotations are added on webserver for Airflow 1 & 2"""
k8s_objects = render_chart(
values={
"airflowVersion": "2.10.0",
"webserver": {
"serviceAccount": {
"annotations": {
"example": "webserver",
},
},
},
},
show_only=["templates/webserver/webserver-serviceaccount.yaml"],
)
assert len(k8s_objects) == 1
obj = k8s_objects[0]
assert obj["metadata"]["annotations"] == {"example": "webserver"}
@pytest.mark.parametrize(
("values", "show_only", "expected_annotations"),
[
(
{
"scheduler": {
"podAnnotations": {
"example": "scheduler",
},
},
},
"templates/scheduler/scheduler-deployment.yaml",
{
"example": "scheduler",
},
),
(
{
"apiServer": {
"podAnnotations": {
"example": "api-server",
},
},
},
"templates/api-server/api-server-deployment.yaml",
{
"example": "api-server",
},
),
(
{
"workers": {
"podAnnotations": {
"example": "worker",
},
},
},
"templates/workers/worker-deployment.yaml",
{
"example": "worker",
},
),
(
{
"flower": {
"enabled": True,
"podAnnotations": {
"example": "flower",
},
},
},
"templates/flower/flower-deployment.yaml",
{
"example": "flower",
},
),
(
{
"triggerer": {
"podAnnotations": {
"example": "triggerer",
},
},
},
"templates/triggerer/triggerer-deployment.yaml",
{
"example": "triggerer",
},
),
(
{
"dagProcessor": {
"enabled": True,
"podAnnotations": {
"example": "dag-processor",
},
},
},
"templates/dag-processor/dag-processor-deployment.yaml",
{
"example": "dag-processor",
},
),
(
{
"cleanup": {
"enabled": True,
"podAnnotations": {
"example": "cleanup",
},
}
},
"templates/cleanup/cleanup-cronjob.yaml",
{
"example": "cleanup",
},
),
(
{
"databaseCleanup": {
"enabled": True,
"podAnnotations": {
"example": "database-cleanup",
},
}
},
"templates/database-cleanup/database-cleanup-cronjob.yaml",
{
"example": "database-cleanup",
},
),
(
{
"redis": {
"podAnnotations": {
"example": "redis",
},
},
},
"templates/redis/redis-statefulset.yaml",
{
"example": "redis",
},
),
(
{
"statsd": {
"podAnnotations": {
"example": "statsd",
},
},
},
"templates/statsd/statsd-deployment.yaml",
{
"example": "statsd",
},
),
(
{
"pgbouncer": {
"enabled": True,
"podAnnotations": {
"example": "pgbouncer",
},
},
},
"templates/pgbouncer/pgbouncer-deployment.yaml",
{
"example": "pgbouncer",
},
),
],
)
| TestServiceAccountAnnotations |
python | mlflow__mlflow | mlflow/tracking/context/default_context.py | {
"start": 771,
"end": 1128
} | class ____(RunContextProvider):
def in_context(self):
return True
def tags(self):
creds = read_mlflow_creds()
return {
MLFLOW_USER: creds.username or _get_user(),
MLFLOW_SOURCE_NAME: _get_source_name(),
MLFLOW_SOURCE_TYPE: SourceType.to_string(_get_source_type()),
}
| DefaultRunContext |
python | getsentry__sentry | src/sentry/notifications/notification_action/issue_alert_registry/handlers/webhook_issue_alert_handler.py | {
"start": 389,
"end": 707
} | class ____(BaseIssueAlertHandler):
@classmethod
def get_integration_id(cls, action: Action, mapping: ActionFieldMapping) -> dict[str, Any]:
return {}
@classmethod
def get_target_display(cls, action: Action, mapping: ActionFieldMapping) -> dict[str, Any]:
return {}
| WebhookIssueAlertHandler |
python | MongoEngine__mongoengine | tests/fields/test_decimal_field.py | {
"start": 150,
"end": 4695
} | class ____(MongoDBTestCase):
def test_storage(self):
class Person(Document):
float_value = DecimalField(precision=4)
string_value = DecimalField(precision=4, force_string=True)
Person.drop_collection()
values_to_store = [
10,
10.1,
10.11,
"10.111",
Decimal("10.1111"),
Decimal("10.11111"),
]
for store_at_creation in [True, False]:
for value in values_to_store:
# to_python is called explicitly if values were sent in the kwargs of __init__
if store_at_creation:
Person(float_value=value, string_value=value).save()
else:
person = Person.objects.create()
person.float_value = value
person.string_value = value
person.save()
# How its stored
expected = [
{"float_value": 10.0, "string_value": "10.0000"},
{"float_value": 10.1, "string_value": "10.1000"},
{"float_value": 10.11, "string_value": "10.1100"},
{"float_value": 10.111, "string_value": "10.1110"},
{"float_value": 10.1111, "string_value": "10.1111"},
{"float_value": 10.1111, "string_value": "10.1111"},
]
expected.extend(expected)
actual = list(Person.objects.exclude("id").as_pymongo())
assert expected == actual
# How it comes out locally
expected = [
Decimal("10.0000"),
Decimal("10.1000"),
Decimal("10.1100"),
Decimal("10.1110"),
Decimal("10.1111"),
Decimal("10.1111"),
]
expected.extend(expected)
for field_name in ["float_value", "string_value"]:
actual = list(Person.objects().scalar(field_name))
assert expected == actual
def test_save_none(self):
class Person(Document):
value = DecimalField()
Person.drop_collection()
person = Person(value=None)
assert person.value is None
person.save()
fetched_person = Person.objects.first()
fetched_person.value is None
assert Person.objects(value=None).first() is not None
def test_validation(self):
"""Ensure that invalid values cannot be assigned to decimal fields."""
class Person(Document):
height = DecimalField(min_value=Decimal("0.1"), max_value=Decimal("3.5"))
Person.drop_collection()
Person(height=Decimal("1.89")).save()
person = Person.objects.first()
assert person.height == Decimal("1.89")
person.height = "2.0"
person.save()
person.height = 0.01
with pytest.raises(ValidationError):
person.validate()
person.height = Decimal("0.01")
with pytest.raises(ValidationError):
person.validate()
person.height = Decimal("4.0")
with pytest.raises(ValidationError):
person.validate()
person.height = "something invalid"
with pytest.raises(ValidationError):
person.validate()
person_2 = Person(height="something invalid")
with pytest.raises(ValidationError):
person_2.validate()
def test_comparison(self):
class Person(Document):
money = DecimalField()
Person.drop_collection()
Person(money=6).save()
Person(money=7).save()
Person(money=8).save()
Person(money=10).save()
assert 2 == Person.objects(money__gt=Decimal("7")).count()
assert 2 == Person.objects(money__gt=7).count()
assert 2 == Person.objects(money__gt="7").count()
assert 3 == Person.objects(money__gte="7").count()
def test_precision_0(self):
"""prevent regression of a bug that was raising an exception when using precision=0"""
class TestDoc(Document):
d = DecimalField(precision=0)
TestDoc.drop_collection()
td = TestDoc(d=Decimal("12.00032678131263"))
assert td.d == Decimal("12")
def test_precision_negative_raise(self):
"""prevent regression of a bug that was raising an exception when using precision=0"""
with pytest.raises(
ValidationError, match="precision must be a positive integer"
):
class TestDoc(Document):
dneg = DecimalField(precision=-1)
| TestDecimalField |
python | cython__cython | Cython/Build/Tests/TestInline.py | {
"start": 302,
"end": 3489
} | class ____(CythonTest):
def setUp(self):
CythonTest.setUp(self)
self._call_kwds = dict(test_kwds)
if os.path.isdir('TEST_TMP'):
lib_dir = os.path.join('TEST_TMP','inline')
else:
lib_dir = tempfile.mkdtemp(prefix='cython_inline_')
self._call_kwds['lib_dir'] = lib_dir
def test_simple(self):
self.assertEqual(inline("return 1+2", **self._call_kwds), 3)
def test_types(self):
self.assertEqual(inline("""
cimport cython
return cython.typeof(a), cython.typeof(b)
""", a=1.0, b=[], **self._call_kwds), ('double', 'list object'))
def test_locals(self):
a = 1
b = 2
self.assertEqual(inline("return a+b", **self._call_kwds), 3)
def test_globals(self):
self.assertEqual(inline("return global_value + 1", **self._call_kwds), global_value + 1)
def test_no_return(self):
self.assertEqual(inline("""
a = 1
cdef double b = 2
cdef c = []
""", **self._call_kwds), dict(a=1, b=2.0, c=[]))
def test_def_node(self):
foo = inline("def foo(x): return x * x", **self._call_kwds)['foo']
self.assertEqual(foo(7), 49)
def test_class_ref(self):
class Type:
pass
tp = inline("Type")['Type']
self.assertEqual(tp, Type)
def test_pure(self):
import cython as cy
b = inline("""
b = cy.declare(float, a)
c = cy.declare(cy.pointer(cy.float), &b)
return b
""", a=3, **self._call_kwds)
self.assertEqual(type(b), float)
def test_compiler_directives(self):
self.assertEqual(
inline('return sum(x)',
x=[1, 2, 3],
cython_compiler_directives={'boundscheck': False}),
6
)
def test_lang_version(self):
# GH-3419. Caching for inline code didn't always respect compiler directives.
inline_divcode = "def f(int a, int b): return a/b"
self.assertEqual(
inline(inline_divcode, language_level=2)['f'](5,2),
2
)
self.assertEqual(
inline(inline_divcode, language_level=3)['f'](5,2),
2.5
)
self.assertEqual(
inline(inline_divcode, language_level=2)['f'](5,2),
2
)
def test_repeated_use(self):
inline_mulcode = "def f(int a, int b): return a * b"
self.assertEqual(inline(inline_mulcode)['f'](5, 2), 10)
self.assertEqual(inline(inline_mulcode)['f'](5, 3), 15)
self.assertEqual(inline(inline_mulcode)['f'](6, 2), 12)
self.assertEqual(inline(inline_mulcode)['f'](5, 2), 10)
f = inline(inline_mulcode)['f']
self.assertEqual(f(5, 2), 10)
self.assertEqual(f(5, 3), 15)
@unittest.skipIf(not has_numpy, "NumPy is not available")
def test_numpy(self):
import numpy
a = numpy.ndarray((10, 20))
a[0,0] = 10
self.assertEqual(safe_type(a), 'numpy.ndarray[numpy.float64_t, ndim=2]')
self.assertEqual(inline("return a[0,0]", a=a, **self._call_kwds), 10.0)
| TestInline |
python | kamyu104__LeetCode-Solutions | Python/minimum-initial-energy-to-finish-tasks.py | {
"start": 632,
"end": 1079
} | class ____(object):
def minimumEffort(self, tasks):
"""
:type tasks: List[List[int]]
:rtype: int
"""
tasks.sort(key=lambda x: x[0]-x[1]) # sort by save in desc
result = curr = 0
for a, m in tasks: # we need to pick all the saves, so greedily to pick the most save first is always better
result += max(m-curr, 0)
curr = max(curr, m)-a
return result
| Solution2 |
python | google__flatbuffers | tests/monster_test_generated.py | {
"start": 11343,
"end": 12688
} | class ____(object):
# Vec3T
def __init__(
self,
x = 0.0,
y = 0.0,
z = 0.0,
test1 = 0.0,
test2 = 0,
test3 = None,
):
self.x = x # type: float
self.y = y # type: float
self.z = z # type: float
self.test1 = test1 # type: float
self.test2 = test2 # type: int
self.test3 = test3 # type: Optional[TestT]
@classmethod
def InitFromBuf(cls, buf, pos):
vec3 = Vec3()
vec3.Init(buf, pos)
return cls.InitFromObj(vec3)
@classmethod
def InitFromPackedBuf(cls, buf, pos=0):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
return cls.InitFromBuf(buf, pos+n)
@classmethod
def InitFromObj(cls, vec3):
x = Vec3T()
x._UnPack(vec3)
return x
# Vec3T
def _UnPack(self, vec3):
if vec3 is None:
return
self.x = vec3.X()
self.y = vec3.Y()
self.z = vec3.Z()
self.test1 = vec3.Test1()
self.test2 = vec3.Test2()
if vec3.Test3(Test()) is not None:
self.test3 = TestT.InitFromObj(vec3.Test3(Test()))
# Vec3T
def Pack(self, builder):
return CreateVec3(builder, self.x, self.y, self.z, self.test1, self.test2, self.test3.a, self.test3.b)
| Vec3T |
python | ray-project__ray | python/ray/llm/_internal/serve/serving_patterns/data_parallel/dp_rank_assigner.py | {
"start": 167,
"end": 4768
} | class ____:
"""
Data Parallel Rank Assigner.
This class is used to assign a rank to each replica in the data parallel
deployment.
"""
def __init__(self, dp_size: int, dp_size_per_node: Optional[int] = None):
self.dp_size: int = dp_size
self.dp_size_per_node: Optional[int] = dp_size_per_node
self.lock: asyncio.Lock = asyncio.Lock()
self.dp_address: Optional[str] = None
self.dp_rpc_port: Optional[int] = None
self.master_info_event: asyncio.Event = asyncio.Event()
# Fields for _register_random_placement():
# Next rank to assign
self.next_rank: Optional[int] = None
# Fields for _register_node_pack_placement():
# Number of nodes to assign to
self.num_nodes: Optional[int] = None
# Map from node id to available ranks
self.node_to_avail_ranks: Dict[str, List[int]] = {}
if dp_size_per_node is None:
self.next_rank = 0
logger.info(
f"Using random placement rank assigner for DP size {self.dp_size}"
)
else:
if self.dp_size_per_node <= 0:
raise ValueError(
f"dp_size_per_node {self.dp_size_per_node} must be greater than 0"
)
if self.dp_size % self.dp_size_per_node != 0:
raise ValueError(
f"dp_size {self.dp_size} must be divisible by dp_size_per_node {self.dp_size_per_node}"
)
self.num_nodes = self.dp_size // self.dp_size_per_node
logger.info(
f"Using node pack placement rank assigner for DP size {self.dp_size}"
f"with dp_size_per_node {self.dp_size_per_node}"
)
async def register(self, node_id: Optional[str] = None):
"""
Register a replica and assign a rank to it.
Args:
node_id: The node id of the replica.
Returns:
The rank of the replica.
"""
if self.dp_size_per_node is None:
return await self._register_random_placement()
else:
if node_id is None:
raise ValueError("node_id is required for node pack placement")
return await self._register_node_pack_placement(node_id)
async def _register_random_placement(self):
"""
Assign a rank based on random placement.
The ranks are assigned in a random order, regardless of its node id.
"""
async with self.lock:
if self.next_rank >= self.dp_size:
raise ValueError(
f"Attempted to assign rank {self.next_rank} but dp_size is {self.dp_size}"
)
# TODO(rui): instead of using the naive increment approach,
# we should use the Ray Serve Replica Rank API to assign ranks.
rank = self.next_rank
self.next_rank += 1
return rank
async def _register_node_pack_placement(self, node_id: str):
"""
Assign a rank based on node pack placement.
This should be used for DeepEP which assumes that the ranks ranging from
[dp_rank_per_node * node_rank, dp_rank_per_node * (node_rank + 1) - 1] are
assigned to the same node.
For example, if dp_size_per_node is 8, and there are 16 ranks in total, then
the ranks [0, 7] should be assigned to one node, and ranks [8, 15] should be
assigned to another node.
"""
async with self.lock:
if not self.node_to_avail_ranks:
self.node_to_avail_ranks[node_id] = list(
range(1, self.dp_size_per_node)
)
return 0
elif node_id not in self.node_to_avail_ranks:
node_rank = len(self.node_to_avail_ranks)
assert node_rank < self.num_nodes
rank = node_rank * self.dp_size_per_node
self.node_to_avail_ranks[node_id] = list(
range(rank + 1, rank + self.dp_size_per_node)
)
return rank
else:
rank = self.node_to_avail_ranks[node_id].pop(0)
return rank
async def set_dp_master_info(self, dp_address: str, dp_rpc_port: int):
self.dp_address = dp_address
self.dp_rpc_port = dp_rpc_port
self.master_info_event.set()
async def get_dp_master_info(self):
await self.master_info_event.wait()
return self.dp_address, self.dp_rpc_port
| _DPRankAssigner |
python | openai__openai-python | src/openai/types/realtime/response_mcp_call_in_progress.py | {
"start": 202,
"end": 569
} | class ____(BaseModel):
event_id: str
"""The unique ID of the server event."""
item_id: str
"""The ID of the MCP tool call item."""
output_index: int
"""The index of the output item in the response."""
type: Literal["response.mcp_call.in_progress"]
"""The event type, must be `response.mcp_call.in_progress`."""
| ResponseMcpCallInProgress |
python | plotly__plotly.py | plotly/graph_objs/cone/_colorbar.py | {
"start": 233,
"end": 61389
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "cone"
_path_str = "cone.colorbar"
_valid_props = {
"bgcolor",
"bordercolor",
"borderwidth",
"dtick",
"exponentformat",
"labelalias",
"len",
"lenmode",
"minexponent",
"nticks",
"orientation",
"outlinecolor",
"outlinewidth",
"separatethousands",
"showexponent",
"showticklabels",
"showtickprefix",
"showticksuffix",
"thickness",
"thicknessmode",
"tick0",
"tickangle",
"tickcolor",
"tickfont",
"tickformat",
"tickformatstopdefaults",
"tickformatstops",
"ticklabeloverflow",
"ticklabelposition",
"ticklabelstep",
"ticklen",
"tickmode",
"tickprefix",
"ticks",
"ticksuffix",
"ticktext",
"ticktextsrc",
"tickvals",
"tickvalssrc",
"tickwidth",
"title",
"x",
"xanchor",
"xpad",
"xref",
"y",
"yanchor",
"ypad",
"yref",
}
@property
def bgcolor(self):
"""
Sets the color of padded area.
The 'bgcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["bgcolor"]
@bgcolor.setter
def bgcolor(self, val):
self["bgcolor"] = val
@property
def bordercolor(self):
"""
Sets the axis line color.
The 'bordercolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["bordercolor"]
@bordercolor.setter
def bordercolor(self, val):
self["bordercolor"] = val
@property
def borderwidth(self):
"""
Sets the width (in px) or the border enclosing this color bar.
The 'borderwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["borderwidth"]
@borderwidth.setter
def borderwidth(self, val):
self["borderwidth"] = val
@property
def dtick(self):
"""
Sets the step in-between ticks on this axis. Use with `tick0`.
Must be a positive number, or special strings available to
"log" and "date" axes. If the axis `type` is "log", then ticks
are set every 10^(n*dtick) where n is the tick number. For
example, to set a tick mark at 1, 10, 100, 1000, ... set dtick
to 1. To set tick marks at 1, 100, 10000, ... set dtick to 2.
To set tick marks at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special values;
"L<f>", where `f` is a positive number, gives ticks linearly
spaced in value (but not position). For example `tick0` = 0.1,
`dtick` = "L0.5" will put ticks at 0.1, 0.6, 1.1, 1.6 etc. To
show powers of 10 plus small digits between, use "D1" (all
digits) or "D2" (only 2 and 5). `tick0` is ignored for "D1" and
"D2". If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval between
ticks to one day, set `dtick` to 86400000.0. "date" also has
special values "M<n>" gives ticks spaced by a number of months.
`n` must be a positive integer. To set ticks on the 15th of
every third month, set `tick0` to "2000-01-15" and `dtick` to
"M3". To set ticks every 4 years, set `dtick` to "M48"
The 'dtick' property accepts values of any type
Returns
-------
Any
"""
return self["dtick"]
@dtick.setter
def dtick(self, val):
self["dtick"] = val
@property
def exponentformat(self):
"""
Determines a formatting rule for the tick exponents. For
example, consider the number 1,000,000,000. If "none", it
appears as 1,000,000,000. If "e", 1e+9. If "E", 1E+9. If
"power", 1x10^9 (with 9 in a super script). If "SI", 1G. If
"B", 1B. "SI" uses prefixes from "femto" f (10^-15) to "tera" T
(10^12). *SI extended* covers instead the full SI range from
"quecto" q (10^-30) to "quetta" Q (10^30). If "SI" or *SI
extended* is used and the exponent is beyond the above ranges,
the formatting rule will automatically be switched to the power
notation.
The 'exponentformat' property is an enumeration that may be specified as:
- One of the following enumeration values:
['none', 'e', 'E', 'power', 'SI', 'B', 'SI extended']
Returns
-------
Any
"""
return self["exponentformat"]
@exponentformat.setter
def exponentformat(self, val):
self["exponentformat"] = val
@property
def labelalias(self):
"""
Replacement text for specific tick or hover labels. For example
using {US: 'USA', CA: 'Canada'} changes US to USA and CA to
Canada. The labels we would have shown must match the keys
exactly, after adding any tickprefix or ticksuffix. For
negative numbers the minus sign symbol used (U+2212) is wider
than the regular ascii dash. That means you need to use −1
instead of -1. labelalias can be used with any axis type, and
both keys (if needed) and values (if desired) can include html-
like tags or MathJax.
The 'labelalias' property accepts values of any type
Returns
-------
Any
"""
return self["labelalias"]
@labelalias.setter
def labelalias(self, val):
self["labelalias"] = val
@property
def len(self):
"""
Sets the length of the color bar This measure excludes the
padding of both ends. That is, the color bar length is this
length minus the padding on both ends.
The 'len' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["len"]
@len.setter
def len(self, val):
self["len"] = val
@property
def lenmode(self):
"""
Determines whether this color bar's length (i.e. the measure in
the color variation direction) is set in units of plot
"fraction" or in *pixels. Use `len` to set the value.
The 'lenmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['fraction', 'pixels']
Returns
-------
Any
"""
return self["lenmode"]
@lenmode.setter
def lenmode(self, val):
self["lenmode"] = val
@property
def minexponent(self):
"""
Hide SI prefix for 10^n if |n| is below this number. This only
has an effect when `tickformat` is "SI" or "B".
The 'minexponent' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["minexponent"]
@minexponent.setter
def minexponent(self, val):
self["minexponent"] = val
@property
def nticks(self):
"""
Specifies the maximum number of ticks for the particular axis.
The actual number of ticks will be chosen automatically to be
less than or equal to `nticks`. Has an effect only if
`tickmode` is set to "auto".
The 'nticks' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 9223372036854775807]
Returns
-------
int
"""
return self["nticks"]
@nticks.setter
def nticks(self, val):
self["nticks"] = val
@property
def orientation(self):
"""
Sets the orientation of the colorbar.
The 'orientation' property is an enumeration that may be specified as:
- One of the following enumeration values:
['h', 'v']
Returns
-------
Any
"""
return self["orientation"]
@orientation.setter
def orientation(self, val):
self["orientation"] = val
@property
def outlinecolor(self):
"""
Sets the axis line color.
The 'outlinecolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["outlinecolor"]
@outlinecolor.setter
def outlinecolor(self, val):
self["outlinecolor"] = val
@property
def outlinewidth(self):
"""
Sets the width (in px) of the axis line.
The 'outlinewidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["outlinewidth"]
@outlinewidth.setter
def outlinewidth(self, val):
self["outlinewidth"] = val
@property
def separatethousands(self):
"""
If "true", even 4-digit integers are separated
The 'separatethousands' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["separatethousands"]
@separatethousands.setter
def separatethousands(self, val):
self["separatethousands"] = val
@property
def showexponent(self):
"""
If "all", all exponents are shown besides their significands.
If "first", only the exponent of the first tick is shown. If
"last", only the exponent of the last tick is shown. If "none",
no exponents appear.
The 'showexponent' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showexponent"]
@showexponent.setter
def showexponent(self, val):
self["showexponent"] = val
@property
def showticklabels(self):
"""
Determines whether or not the tick labels are drawn.
The 'showticklabels' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showticklabels"]
@showticklabels.setter
def showticklabels(self, val):
self["showticklabels"] = val
@property
def showtickprefix(self):
"""
If "all", all tick labels are displayed with a prefix. If
"first", only the first tick is displayed with a prefix. If
"last", only the last tick is displayed with a suffix. If
"none", tick prefixes are hidden.
The 'showtickprefix' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showtickprefix"]
@showtickprefix.setter
def showtickprefix(self, val):
self["showtickprefix"] = val
@property
def showticksuffix(self):
"""
Same as `showtickprefix` but for tick suffixes.
The 'showticksuffix' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showticksuffix"]
@showticksuffix.setter
def showticksuffix(self, val):
self["showticksuffix"] = val
@property
def thickness(self):
"""
Sets the thickness of the color bar This measure excludes the
size of the padding, ticks and labels.
The 'thickness' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["thickness"]
@thickness.setter
def thickness(self, val):
self["thickness"] = val
@property
def thicknessmode(self):
"""
Determines whether this color bar's thickness (i.e. the measure
in the constant color direction) is set in units of plot
"fraction" or in "pixels". Use `thickness` to set the value.
The 'thicknessmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['fraction', 'pixels']
Returns
-------
Any
"""
return self["thicknessmode"]
@thicknessmode.setter
def thicknessmode(self, val):
self["thicknessmode"] = val
@property
def tick0(self):
"""
Sets the placement of the first tick on this axis. Use with
`dtick`. If the axis `type` is "log", then you must take the
log of your starting tick (e.g. to set the starting tick to
100, set the `tick0` to 2) except when `dtick`=*L<f>* (see
`dtick` for more info). If the axis `type` is "date", it should
be a date string, like date data. If the axis `type` is
"category", it should be a number, using the scale where each
category is assigned a serial number from zero in the order it
appears.
The 'tick0' property accepts values of any type
Returns
-------
Any
"""
return self["tick0"]
@tick0.setter
def tick0(self, val):
self["tick0"] = val
@property
def tickangle(self):
"""
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the tick
labels vertically.
The 'tickangle' property is a angle (in degrees) that may be
specified as a number between -180 and 180.
Numeric values outside this range are converted to the equivalent value
(e.g. 270 is converted to -90).
Returns
-------
int|float
"""
return self["tickangle"]
@tickangle.setter
def tickangle(self, val):
self["tickangle"] = val
@property
def tickcolor(self):
"""
Sets the tick color.
The 'tickcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["tickcolor"]
@tickcolor.setter
def tickcolor(self, val):
self["tickcolor"] = val
@property
def tickfont(self):
"""
Sets the color bar's tick label font
The 'tickfont' property is an instance of Tickfont
that may be specified as:
- An instance of :class:`plotly.graph_objs.cone.colorbar.Tickfont`
- A dict of string/value properties that will be passed
to the Tickfont constructor
Returns
-------
plotly.graph_objs.cone.colorbar.Tickfont
"""
return self["tickfont"]
@tickfont.setter
def tickfont(self, val):
self["tickfont"] = val
@property
def tickformat(self):
"""
Sets the tick label formatting rule using d3 formatting mini-
languages which are very similar to those in Python. For
numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format. And for
dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to d3's date
formatter: "%h" for half of the year as a decimal number as
well as "%{n}f" for fractional seconds with n digits. For
example, *2016-10-13 09:15:23.456* with tickformat
"%H~%M~%S.%2f" would display "09~15~23.46"
The 'tickformat' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["tickformat"]
@tickformat.setter
def tickformat(self, val):
self["tickformat"] = val
@property
def tickformatstops(self):
"""
The 'tickformatstops' property is a tuple of instances of
Tickformatstop that may be specified as:
- A list or tuple of instances of plotly.graph_objs.cone.colorbar.Tickformatstop
- A list or tuple of dicts of string/value properties that
will be passed to the Tickformatstop constructor
Returns
-------
tuple[plotly.graph_objs.cone.colorbar.Tickformatstop]
"""
return self["tickformatstops"]
@tickformatstops.setter
def tickformatstops(self, val):
self["tickformatstops"] = val
@property
def tickformatstopdefaults(self):
"""
When used in a template (as
layout.template.data.cone.colorbar.tickformatstopdefaults),
sets the default property values to use for elements of
cone.colorbar.tickformatstops
The 'tickformatstopdefaults' property is an instance of Tickformatstop
that may be specified as:
- An instance of :class:`plotly.graph_objs.cone.colorbar.Tickformatstop`
- A dict of string/value properties that will be passed
to the Tickformatstop constructor
Returns
-------
plotly.graph_objs.cone.colorbar.Tickformatstop
"""
return self["tickformatstopdefaults"]
@tickformatstopdefaults.setter
def tickformatstopdefaults(self, val):
self["tickformatstopdefaults"] = val
@property
def ticklabeloverflow(self):
"""
Determines how we handle tick labels that would overflow either
the graph div or the domain of the axis. The default value for
inside tick labels is *hide past domain*. In other cases the
default is *hide past div*.
The 'ticklabeloverflow' property is an enumeration that may be specified as:
- One of the following enumeration values:
['allow', 'hide past div', 'hide past domain']
Returns
-------
Any
"""
return self["ticklabeloverflow"]
@ticklabeloverflow.setter
def ticklabeloverflow(self, val):
self["ticklabeloverflow"] = val
@property
def ticklabelposition(self):
"""
Determines where tick labels are drawn relative to the ticks.
Left and right options are used when `orientation` is "h", top
and bottom when `orientation` is "v".
The 'ticklabelposition' property is an enumeration that may be specified as:
- One of the following enumeration values:
['outside', 'inside', 'outside top', 'inside top',
'outside left', 'inside left', 'outside right', 'inside
right', 'outside bottom', 'inside bottom']
Returns
-------
Any
"""
return self["ticklabelposition"]
@ticklabelposition.setter
def ticklabelposition(self, val):
self["ticklabelposition"] = val
@property
def ticklabelstep(self):
"""
Sets the spacing between tick labels as compared to the spacing
between ticks. A value of 1 (default) means each tick gets a
label. A value of 2 means shows every 2nd label. A larger value
n means only every nth tick is labeled. `tick0` determines
which labels are shown. Not implemented for axes with `type`
"log" or "multicategory", or when `tickmode` is "array".
The 'ticklabelstep' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 9223372036854775807]
Returns
-------
int
"""
return self["ticklabelstep"]
@ticklabelstep.setter
def ticklabelstep(self, val):
self["ticklabelstep"] = val
@property
def ticklen(self):
"""
Sets the tick length (in px).
The 'ticklen' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["ticklen"]
@ticklen.setter
def ticklen(self, val):
self["ticklen"] = val
@property
def tickmode(self):
"""
Sets the tick mode for this axis. If "auto", the number of
ticks is set via `nticks`. If "linear", the placement of the
ticks is determined by a starting position `tick0` and a tick
step `dtick` ("linear" is the default value if `tick0` and
`dtick` are provided). If "array", the placement of the ticks
is set via `tickvals` and the tick text is `ticktext`. ("array"
is the default value if `tickvals` is provided).
The 'tickmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['auto', 'linear', 'array']
Returns
-------
Any
"""
return self["tickmode"]
@tickmode.setter
def tickmode(self, val):
self["tickmode"] = val
@property
def tickprefix(self):
"""
Sets a tick label prefix.
The 'tickprefix' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["tickprefix"]
@tickprefix.setter
def tickprefix(self, val):
self["tickprefix"] = val
@property
def ticks(self):
"""
Determines whether ticks are drawn or not. If "", this axis'
ticks are not drawn. If "outside" ("inside"), this axis' are
drawn outside (inside) the axis lines.
The 'ticks' property is an enumeration that may be specified as:
- One of the following enumeration values:
['outside', 'inside', '']
Returns
-------
Any
"""
return self["ticks"]
@ticks.setter
def ticks(self, val):
self["ticks"] = val
@property
def ticksuffix(self):
"""
Sets a tick label suffix.
The 'ticksuffix' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["ticksuffix"]
@ticksuffix.setter
def ticksuffix(self, val):
self["ticksuffix"] = val
@property
def ticktext(self):
"""
Sets the text displayed at the ticks position via `tickvals`.
Only has an effect if `tickmode` is set to "array". Used with
`tickvals`.
The 'ticktext' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["ticktext"]
@ticktext.setter
def ticktext(self, val):
self["ticktext"] = val
@property
def ticktextsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `ticktext`.
The 'ticktextsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["ticktextsrc"]
@ticktextsrc.setter
def ticktextsrc(self, val):
self["ticktextsrc"] = val
@property
def tickvals(self):
"""
Sets the values at which ticks on this axis appear. Only has an
effect if `tickmode` is set to "array". Used with `ticktext`.
The 'tickvals' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["tickvals"]
@tickvals.setter
def tickvals(self, val):
self["tickvals"] = val
@property
def tickvalssrc(self):
"""
Sets the source reference on Chart Studio Cloud for `tickvals`.
The 'tickvalssrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["tickvalssrc"]
@tickvalssrc.setter
def tickvalssrc(self, val):
self["tickvalssrc"] = val
@property
def tickwidth(self):
"""
Sets the tick width (in px).
The 'tickwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["tickwidth"]
@tickwidth.setter
def tickwidth(self, val):
self["tickwidth"] = val
@property
def title(self):
"""
The 'title' property is an instance of Title
that may be specified as:
- An instance of :class:`plotly.graph_objs.cone.colorbar.Title`
- A dict of string/value properties that will be passed
to the Title constructor
Returns
-------
plotly.graph_objs.cone.colorbar.Title
"""
return self["title"]
@title.setter
def title(self, val):
self["title"] = val
@property
def x(self):
"""
Sets the x position with respect to `xref` of the color bar (in
plot fraction). When `xref` is "paper", defaults to 1.02 when
`orientation` is "v" and 0.5 when `orientation` is "h". When
`xref` is "container", defaults to 1 when `orientation` is "v"
and 0.5 when `orientation` is "h". Must be between 0 and 1 if
`xref` is "container" and between "-2" and 3 if `xref` is
"paper".
The 'x' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["x"]
@x.setter
def x(self, val):
self["x"] = val
@property
def xanchor(self):
"""
Sets this color bar's horizontal position anchor. This anchor
binds the `x` position to the "left", "center" or "right" of
the color bar. Defaults to "left" when `orientation` is "v" and
"center" when `orientation` is "h".
The 'xanchor' property is an enumeration that may be specified as:
- One of the following enumeration values:
['left', 'center', 'right']
Returns
-------
Any
"""
return self["xanchor"]
@xanchor.setter
def xanchor(self, val):
self["xanchor"] = val
@property
def xpad(self):
"""
Sets the amount of padding (in px) along the x direction.
The 'xpad' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["xpad"]
@xpad.setter
def xpad(self, val):
self["xpad"] = val
@property
def xref(self):
"""
Sets the container `x` refers to. "container" spans the entire
`width` of the plot. "paper" refers to the width of the
plotting area only.
The 'xref' property is an enumeration that may be specified as:
- One of the following enumeration values:
['container', 'paper']
Returns
-------
Any
"""
return self["xref"]
@xref.setter
def xref(self, val):
self["xref"] = val
@property
def y(self):
"""
Sets the y position with respect to `yref` of the color bar (in
plot fraction). When `yref` is "paper", defaults to 0.5 when
`orientation` is "v" and 1.02 when `orientation` is "h". When
`yref` is "container", defaults to 0.5 when `orientation` is
"v" and 1 when `orientation` is "h". Must be between 0 and 1 if
`yref` is "container" and between "-2" and 3 if `yref` is
"paper".
The 'y' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["y"]
@y.setter
def y(self, val):
self["y"] = val
@property
def yanchor(self):
"""
Sets this color bar's vertical position anchor This anchor
binds the `y` position to the "top", "middle" or "bottom" of
the color bar. Defaults to "middle" when `orientation` is "v"
and "bottom" when `orientation` is "h".
The 'yanchor' property is an enumeration that may be specified as:
- One of the following enumeration values:
['top', 'middle', 'bottom']
Returns
-------
Any
"""
return self["yanchor"]
@yanchor.setter
def yanchor(self, val):
self["yanchor"] = val
@property
def ypad(self):
"""
Sets the amount of padding (in px) along the y direction.
The 'ypad' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["ypad"]
@ypad.setter
def ypad(self, val):
self["ypad"] = val
@property
def yref(self):
"""
Sets the container `y` refers to. "container" spans the entire
`height` of the plot. "paper" refers to the height of the
plotting area only.
The 'yref' property is an enumeration that may be specified as:
- One of the following enumeration values:
['container', 'paper']
Returns
-------
Any
"""
return self["yref"]
@yref.setter
def yref(self, val):
self["yref"] = val
@property
def _prop_descriptions(self):
return """\
bgcolor
Sets the color of padded area.
bordercolor
Sets the axis line color.
borderwidth
Sets the width (in px) or the border enclosing this
color bar.
dtick
Sets the step in-between ticks on this axis. Use with
`tick0`. Must be a positive number, or special strings
available to "log" and "date" axes. If the axis `type`
is "log", then ticks are set every 10^(n*dtick) where n
is the tick number. For example, to set a tick mark at
1, 10, 100, 1000, ... set dtick to 1. To set tick marks
at 1, 100, 10000, ... set dtick to 2. To set tick marks
at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special
values; "L<f>", where `f` is a positive number, gives
ticks linearly spaced in value (but not position). For
example `tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10 plus
small digits between, use "D1" (all digits) or "D2"
(only 2 and 5). `tick0` is ignored for "D1" and "D2".
If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to 86400000.0.
"date" also has special values "M<n>" gives ticks
spaced by a number of months. `n` must be a positive
integer. To set ticks on the 15th of every third month,
set `tick0` to "2000-01-15" and `dtick` to "M3". To set
ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick exponents.
For example, consider the number 1,000,000,000. If
"none", it appears as 1,000,000,000. If "e", 1e+9. If
"E", 1E+9. If "power", 1x10^9 (with 9 in a super
script). If "SI", 1G. If "B", 1B. "SI" uses prefixes
from "femto" f (10^-15) to "tera" T (10^12). *SI
extended* covers instead the full SI range from
"quecto" q (10^-30) to "quetta" Q (10^30). If "SI" or
*SI extended* is used and the exponent is beyond the
above ranges, the formatting rule will automatically be
switched to the power notation.
labelalias
Replacement text for specific tick or hover labels. For
example using {US: 'USA', CA: 'Canada'} changes US to
USA and CA to Canada. The labels we would have shown
must match the keys exactly, after adding any
tickprefix or ticksuffix. For negative numbers the
minus sign symbol used (U+2212) is wider than the
regular ascii dash. That means you need to use −1
instead of -1. labelalias can be used with any axis
type, and both keys (if needed) and values (if desired)
can include html-like tags or MathJax.
len
Sets the length of the color bar This measure excludes
the padding of both ends. That is, the color bar length
is this length minus the padding on both ends.
lenmode
Determines whether this color bar's length (i.e. the
measure in the color variation direction) is set in
units of plot "fraction" or in *pixels. Use `len` to
set the value.
minexponent
Hide SI prefix for 10^n if |n| is below this number.
This only has an effect when `tickformat` is "SI" or
"B".
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks will be
chosen automatically to be less than or equal to
`nticks`. Has an effect only if `tickmode` is set to
"auto".
orientation
Sets the orientation of the colorbar.
outlinecolor
Sets the axis line color.
outlinewidth
Sets the width (in px) of the axis line.
separatethousands
If "true", even 4-digit integers are separated
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of the
first tick is shown. If "last", only the exponent of
the last tick is shown. If "none", no exponents appear.
showticklabels
Determines whether or not the tick labels are drawn.
showtickprefix
If "all", all tick labels are displayed with a prefix.
If "first", only the first tick is displayed with a
prefix. If "last", only the last tick is displayed with
a suffix. If "none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
thickness
Sets the thickness of the color bar This measure
excludes the size of the padding, ticks and labels.
thicknessmode
Determines whether this color bar's thickness (i.e. the
measure in the constant color direction) is set in
units of plot "fraction" or in "pixels". Use
`thickness` to set the value.
tick0
Sets the placement of the first tick on this axis. Use
with `dtick`. If the axis `type` is "log", then you
must take the log of your starting tick (e.g. to set
the starting tick to 100, set the `tick0` to 2) except
when `dtick`=*L<f>* (see `dtick` for more info). If the
axis `type` is "date", it should be a date string, like
date data. If the axis `type` is "category", it should
be a number, using the scale where each category is
assigned a serial number from zero in the order it
appears.
tickangle
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the
tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the color bar's tick label font
tickformat
Sets the tick label formatting rule using d3 formatting
mini-languages which are very similar to those in
Python. For numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
And for dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to
d3's date formatter: "%h" for half of the year as a
decimal number as well as "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display "09~15~23.46"
tickformatstops
A tuple of :class:`plotly.graph_objects.cone.colorbar.T
ickformatstop` instances or dicts with compatible
properties
tickformatstopdefaults
When used in a template (as layout.template.data.cone.c
olorbar.tickformatstopdefaults), sets the default
property values to use for elements of
cone.colorbar.tickformatstops
ticklabeloverflow
Determines how we handle tick labels that would
overflow either the graph div or the domain of the
axis. The default value for inside tick labels is *hide
past domain*. In other cases the default is *hide past
div*.
ticklabelposition
Determines where tick labels are drawn relative to the
ticks. Left and right options are used when
`orientation` is "h", top and bottom when `orientation`
is "v".
ticklabelstep
Sets the spacing between tick labels as compared to the
spacing between ticks. A value of 1 (default) means
each tick gets a label. A value of 2 means shows every
2nd label. A larger value n means only every nth tick
is labeled. `tick0` determines which labels are shown.
Not implemented for axes with `type` "log" or
"multicategory", or when `tickmode` is "array".
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto", the number
of ticks is set via `nticks`. If "linear", the
placement of the ticks is determined by a starting
position `tick0` and a tick step `dtick` ("linear" is
the default value if `tick0` and `dtick` are provided).
If "array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`. ("array" is
the default value if `tickvals` is provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If "", this
axis' ticks are not drawn. If "outside" ("inside"),
this axis' are drawn outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position via
`tickvals`. Only has an effect if `tickmode` is set to
"array". Used with `tickvals`.
ticktextsrc
Sets the source reference on Chart Studio Cloud for
`ticktext`.
tickvals
Sets the values at which ticks on this axis appear.
Only has an effect if `tickmode` is set to "array".
Used with `ticktext`.
tickvalssrc
Sets the source reference on Chart Studio Cloud for
`tickvals`.
tickwidth
Sets the tick width (in px).
title
:class:`plotly.graph_objects.cone.colorbar.Title`
instance or dict with compatible properties
x
Sets the x position with respect to `xref` of the color
bar (in plot fraction). When `xref` is "paper",
defaults to 1.02 when `orientation` is "v" and 0.5 when
`orientation` is "h". When `xref` is "container",
defaults to 1 when `orientation` is "v" and 0.5 when
`orientation` is "h". Must be between 0 and 1 if `xref`
is "container" and between "-2" and 3 if `xref` is
"paper".
xanchor
Sets this color bar's horizontal position anchor. This
anchor binds the `x` position to the "left", "center"
or "right" of the color bar. Defaults to "left" when
`orientation` is "v" and "center" when `orientation` is
"h".
xpad
Sets the amount of padding (in px) along the x
direction.
xref
Sets the container `x` refers to. "container" spans the
entire `width` of the plot. "paper" refers to the width
of the plotting area only.
y
Sets the y position with respect to `yref` of the color
bar (in plot fraction). When `yref` is "paper",
defaults to 0.5 when `orientation` is "v" and 1.02 when
`orientation` is "h". When `yref` is "container",
defaults to 0.5 when `orientation` is "v" and 1 when
`orientation` is "h". Must be between 0 and 1 if `yref`
is "container" and between "-2" and 3 if `yref` is
"paper".
yanchor
Sets this color bar's vertical position anchor This
anchor binds the `y` position to the "top", "middle" or
"bottom" of the color bar. Defaults to "middle" when
`orientation` is "v" and "bottom" when `orientation` is
"h".
ypad
Sets the amount of padding (in px) along the y
direction.
yref
Sets the container `y` refers to. "container" spans the
entire `height` of the plot. "paper" refers to the
height of the plotting area only.
"""
def __init__(
self,
arg=None,
bgcolor=None,
bordercolor=None,
borderwidth=None,
dtick=None,
exponentformat=None,
labelalias=None,
len=None,
lenmode=None,
minexponent=None,
nticks=None,
orientation=None,
outlinecolor=None,
outlinewidth=None,
separatethousands=None,
showexponent=None,
showticklabels=None,
showtickprefix=None,
showticksuffix=None,
thickness=None,
thicknessmode=None,
tick0=None,
tickangle=None,
tickcolor=None,
tickfont=None,
tickformat=None,
tickformatstops=None,
tickformatstopdefaults=None,
ticklabeloverflow=None,
ticklabelposition=None,
ticklabelstep=None,
ticklen=None,
tickmode=None,
tickprefix=None,
ticks=None,
ticksuffix=None,
ticktext=None,
ticktextsrc=None,
tickvals=None,
tickvalssrc=None,
tickwidth=None,
title=None,
x=None,
xanchor=None,
xpad=None,
xref=None,
y=None,
yanchor=None,
ypad=None,
yref=None,
**kwargs,
):
"""
Construct a new ColorBar object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.cone.ColorBar`
bgcolor
Sets the color of padded area.
bordercolor
Sets the axis line color.
borderwidth
Sets the width (in px) or the border enclosing this
color bar.
dtick
Sets the step in-between ticks on this axis. Use with
`tick0`. Must be a positive number, or special strings
available to "log" and "date" axes. If the axis `type`
is "log", then ticks are set every 10^(n*dtick) where n
is the tick number. For example, to set a tick mark at
1, 10, 100, 1000, ... set dtick to 1. To set tick marks
at 1, 100, 10000, ... set dtick to 2. To set tick marks
at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special
values; "L<f>", where `f` is a positive number, gives
ticks linearly spaced in value (but not position). For
example `tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10 plus
small digits between, use "D1" (all digits) or "D2"
(only 2 and 5). `tick0` is ignored for "D1" and "D2".
If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to 86400000.0.
"date" also has special values "M<n>" gives ticks
spaced by a number of months. `n` must be a positive
integer. To set ticks on the 15th of every third month,
set `tick0` to "2000-01-15" and `dtick` to "M3". To set
ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick exponents.
For example, consider the number 1,000,000,000. If
"none", it appears as 1,000,000,000. If "e", 1e+9. If
"E", 1E+9. If "power", 1x10^9 (with 9 in a super
script). If "SI", 1G. If "B", 1B. "SI" uses prefixes
from "femto" f (10^-15) to "tera" T (10^12). *SI
extended* covers instead the full SI range from
"quecto" q (10^-30) to "quetta" Q (10^30). If "SI" or
*SI extended* is used and the exponent is beyond the
above ranges, the formatting rule will automatically be
switched to the power notation.
labelalias
Replacement text for specific tick or hover labels. For
example using {US: 'USA', CA: 'Canada'} changes US to
USA and CA to Canada. The labels we would have shown
must match the keys exactly, after adding any
tickprefix or ticksuffix. For negative numbers the
minus sign symbol used (U+2212) is wider than the
regular ascii dash. That means you need to use −1
instead of -1. labelalias can be used with any axis
type, and both keys (if needed) and values (if desired)
can include html-like tags or MathJax.
len
Sets the length of the color bar This measure excludes
the padding of both ends. That is, the color bar length
is this length minus the padding on both ends.
lenmode
Determines whether this color bar's length (i.e. the
measure in the color variation direction) is set in
units of plot "fraction" or in *pixels. Use `len` to
set the value.
minexponent
Hide SI prefix for 10^n if |n| is below this number.
This only has an effect when `tickformat` is "SI" or
"B".
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks will be
chosen automatically to be less than or equal to
`nticks`. Has an effect only if `tickmode` is set to
"auto".
orientation
Sets the orientation of the colorbar.
outlinecolor
Sets the axis line color.
outlinewidth
Sets the width (in px) of the axis line.
separatethousands
If "true", even 4-digit integers are separated
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of the
first tick is shown. If "last", only the exponent of
the last tick is shown. If "none", no exponents appear.
showticklabels
Determines whether or not the tick labels are drawn.
showtickprefix
If "all", all tick labels are displayed with a prefix.
If "first", only the first tick is displayed with a
prefix. If "last", only the last tick is displayed with
a suffix. If "none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
thickness
Sets the thickness of the color bar This measure
excludes the size of the padding, ticks and labels.
thicknessmode
Determines whether this color bar's thickness (i.e. the
measure in the constant color direction) is set in
units of plot "fraction" or in "pixels". Use
`thickness` to set the value.
tick0
Sets the placement of the first tick on this axis. Use
with `dtick`. If the axis `type` is "log", then you
must take the log of your starting tick (e.g. to set
the starting tick to 100, set the `tick0` to 2) except
when `dtick`=*L<f>* (see `dtick` for more info). If the
axis `type` is "date", it should be a date string, like
date data. If the axis `type` is "category", it should
be a number, using the scale where each category is
assigned a serial number from zero in the order it
appears.
tickangle
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the
tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the color bar's tick label font
tickformat
Sets the tick label formatting rule using d3 formatting
mini-languages which are very similar to those in
Python. For numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
And for dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to
d3's date formatter: "%h" for half of the year as a
decimal number as well as "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display "09~15~23.46"
tickformatstops
A tuple of :class:`plotly.graph_objects.cone.colorbar.T
ickformatstop` instances or dicts with compatible
properties
tickformatstopdefaults
When used in a template (as layout.template.data.cone.c
olorbar.tickformatstopdefaults), sets the default
property values to use for elements of
cone.colorbar.tickformatstops
ticklabeloverflow
Determines how we handle tick labels that would
overflow either the graph div or the domain of the
axis. The default value for inside tick labels is *hide
past domain*. In other cases the default is *hide past
div*.
ticklabelposition
Determines where tick labels are drawn relative to the
ticks. Left and right options are used when
`orientation` is "h", top and bottom when `orientation`
is "v".
ticklabelstep
Sets the spacing between tick labels as compared to the
spacing between ticks. A value of 1 (default) means
each tick gets a label. A value of 2 means shows every
2nd label. A larger value n means only every nth tick
is labeled. `tick0` determines which labels are shown.
Not implemented for axes with `type` "log" or
"multicategory", or when `tickmode` is "array".
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto", the number
of ticks is set via `nticks`. If "linear", the
placement of the ticks is determined by a starting
position `tick0` and a tick step `dtick` ("linear" is
the default value if `tick0` and `dtick` are provided).
If "array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`. ("array" is
the default value if `tickvals` is provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If "", this
axis' ticks are not drawn. If "outside" ("inside"),
this axis' are drawn outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position via
`tickvals`. Only has an effect if `tickmode` is set to
"array". Used with `tickvals`.
ticktextsrc
Sets the source reference on Chart Studio Cloud for
`ticktext`.
tickvals
Sets the values at which ticks on this axis appear.
Only has an effect if `tickmode` is set to "array".
Used with `ticktext`.
tickvalssrc
Sets the source reference on Chart Studio Cloud for
`tickvals`.
tickwidth
Sets the tick width (in px).
title
:class:`plotly.graph_objects.cone.colorbar.Title`
instance or dict with compatible properties
x
Sets the x position with respect to `xref` of the color
bar (in plot fraction). When `xref` is "paper",
defaults to 1.02 when `orientation` is "v" and 0.5 when
`orientation` is "h". When `xref` is "container",
defaults to 1 when `orientation` is "v" and 0.5 when
`orientation` is "h". Must be between 0 and 1 if `xref`
is "container" and between "-2" and 3 if `xref` is
"paper".
xanchor
Sets this color bar's horizontal position anchor. This
anchor binds the `x` position to the "left", "center"
or "right" of the color bar. Defaults to "left" when
`orientation` is "v" and "center" when `orientation` is
"h".
xpad
Sets the amount of padding (in px) along the x
direction.
xref
Sets the container `x` refers to. "container" spans the
entire `width` of the plot. "paper" refers to the width
of the plotting area only.
y
Sets the y position with respect to `yref` of the color
bar (in plot fraction). When `yref` is "paper",
defaults to 0.5 when `orientation` is "v" and 1.02 when
`orientation` is "h". When `yref` is "container",
defaults to 0.5 when `orientation` is "v" and 1 when
`orientation` is "h". Must be between 0 and 1 if `yref`
is "container" and between "-2" and 3 if `yref` is
"paper".
yanchor
Sets this color bar's vertical position anchor This
anchor binds the `y` position to the "top", "middle" or
"bottom" of the color bar. Defaults to "middle" when
`orientation` is "v" and "bottom" when `orientation` is
"h".
ypad
Sets the amount of padding (in px) along the y
direction.
yref
Sets the container `y` refers to. "container" spans the
entire `height` of the plot. "paper" refers to the
height of the plotting area only.
Returns
-------
ColorBar
"""
super().__init__("colorbar")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.cone.ColorBar
constructor must be a dict or
an instance of :class:`plotly.graph_objs.cone.ColorBar`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("bgcolor", arg, bgcolor)
self._set_property("bordercolor", arg, bordercolor)
self._set_property("borderwidth", arg, borderwidth)
self._set_property("dtick", arg, dtick)
self._set_property("exponentformat", arg, exponentformat)
self._set_property("labelalias", arg, labelalias)
self._set_property("len", arg, len)
self._set_property("lenmode", arg, lenmode)
self._set_property("minexponent", arg, minexponent)
self._set_property("nticks", arg, nticks)
self._set_property("orientation", arg, orientation)
self._set_property("outlinecolor", arg, outlinecolor)
self._set_property("outlinewidth", arg, outlinewidth)
self._set_property("separatethousands", arg, separatethousands)
self._set_property("showexponent", arg, showexponent)
self._set_property("showticklabels", arg, showticklabels)
self._set_property("showtickprefix", arg, showtickprefix)
self._set_property("showticksuffix", arg, showticksuffix)
self._set_property("thickness", arg, thickness)
self._set_property("thicknessmode", arg, thicknessmode)
self._set_property("tick0", arg, tick0)
self._set_property("tickangle", arg, tickangle)
self._set_property("tickcolor", arg, tickcolor)
self._set_property("tickfont", arg, tickfont)
self._set_property("tickformat", arg, tickformat)
self._set_property("tickformatstops", arg, tickformatstops)
self._set_property("tickformatstopdefaults", arg, tickformatstopdefaults)
self._set_property("ticklabeloverflow", arg, ticklabeloverflow)
self._set_property("ticklabelposition", arg, ticklabelposition)
self._set_property("ticklabelstep", arg, ticklabelstep)
self._set_property("ticklen", arg, ticklen)
self._set_property("tickmode", arg, tickmode)
self._set_property("tickprefix", arg, tickprefix)
self._set_property("ticks", arg, ticks)
self._set_property("ticksuffix", arg, ticksuffix)
self._set_property("ticktext", arg, ticktext)
self._set_property("ticktextsrc", arg, ticktextsrc)
self._set_property("tickvals", arg, tickvals)
self._set_property("tickvalssrc", arg, tickvalssrc)
self._set_property("tickwidth", arg, tickwidth)
self._set_property("title", arg, title)
self._set_property("x", arg, x)
self._set_property("xanchor", arg, xanchor)
self._set_property("xpad", arg, xpad)
self._set_property("xref", arg, xref)
self._set_property("y", arg, y)
self._set_property("yanchor", arg, yanchor)
self._set_property("ypad", arg, ypad)
self._set_property("yref", arg, yref)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| ColorBar |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/dataclassConverter1.py | {
"start": 1022,
"end": 2259
} | class ____(ModelBase):
field0: int = model_field(converter=converter_simple)
field1: int = model_field(converter=converter_with_param_before_args)
field2: int = model_field(converter=converter_with_args)
field3: int = model_field(converter=converter_with_extra_defaulted_params)
field4: int = model_field(converter=converter_with_default_for_first_param)
field5: int | str = model_field(
converter=converter_with_more_specialized_return_type
)
field6: ConverterClass = model_field(converter=ConverterClass)
reveal_type(
DC1.__init__,
expected_text="(self: DC1, field0: str, field1: str, field2: str, field3: str, field4: str, field5: str, field6: str | bytes) -> None",
)
# This overload will be ignored because it has too many arguments.
@overload
def overloaded_converter(s: float, secondParam: str, /) -> int: ...
# This overload will be ignored because its return type doesn't match the field type.
@overload
def overloaded_converter(s: float) -> str: ...
@overload
def overloaded_converter(s: str) -> int: ...
@overload
def overloaded_converter(s: list[str]) -> int: ...
def overloaded_converter(s: float | str | list[str], *args: str) -> int | float | str:
return 0
| DC1 |
python | kamyu104__LeetCode-Solutions | Python/find-sum-of-array-product-of-magical-sequences.py | {
"start": 1723,
"end": 3431
} | class ____(object):
def magicalSum(self, m, k, nums):
"""
:type m: int
:type k: int
:type nums: List[int]
:rtype: int
"""
def popcount(x):
return bin(x).count('1')
MOD = 10**9+7
fact, inv, inv_fact = [[1]*2 for _ in xrange(3)]
def nCr(n, k):
while len(inv) <= n: # lazy initialization
fact.append(fact[-1]*len(inv) % MOD)
inv.append(inv[MOD%len(inv)]*(MOD-MOD//len(inv)) % MOD) # https://cp-algorithms.com/algebra/module-inverse.html
inv_fact.append(inv_fact[-1]*inv[-1] % MOD)
return (fact[n]*inv_fact[n-k] % MOD) * inv_fact[k] % MOD
dp = [[[0]*(m+1) for _ in xrange(k+1)] for _ in xrange(m+1)] # dp[c][b][l]: sum of carry c with b set bits with remain size of l
dp[0][0][m] = 1
for x in nums:
new_dp = [[[0]*(m+1) for _ in xrange(k+1)] for _ in xrange(m+1)]
for c in xrange(m+1):
for b in xrange(k+1):
for l in xrange(m+1):
if not dp[c][b][l]:
continue
base = 1
for cnt in xrange(l+1):
nc, nb, nl = (c+cnt)>>1, b+((c+cnt)&1), l-cnt
if nb > k:
continue
new_dp[nc][nb][nl] = (new_dp[nc][nb][nl]+dp[c][b][l]*base*nCr(l, cnt)) % MOD
base = (base*x)%MOD
dp = new_dp
return reduce(lambda accu, x: (accu+x)%MOD, (dp[c][k-popcount(c)][0] for c in xrange(m+1) if k-popcount(c) >= 0), 0)
| Solution2 |
python | plotly__plotly.py | plotly/graph_objs/box/_line.py | {
"start": 233,
"end": 2932
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "box"
_path_str = "box.line"
_valid_props = {"color", "width"}
@property
def color(self):
"""
Sets the color of line bounding the box(es).
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def width(self):
"""
Sets the width (in px) of line bounding the box(es).
The 'width' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["width"]
@width.setter
def width(self, val):
self["width"] = val
@property
def _prop_descriptions(self):
return """\
color
Sets the color of line bounding the box(es).
width
Sets the width (in px) of line bounding the box(es).
"""
def __init__(self, arg=None, color=None, width=None, **kwargs):
"""
Construct a new Line object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.box.Line`
color
Sets the color of line bounding the box(es).
width
Sets the width (in px) of line bounding the box(es).
Returns
-------
Line
"""
super().__init__("line")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.box.Line
constructor must be a dict or
an instance of :class:`plotly.graph_objs.box.Line`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("color", arg, color)
self._set_property("width", arg, width)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Line |
python | huggingface__transformers | src/transformers/models/x_clip/modeling_x_clip.py | {
"start": 48189,
"end": 63343
} | class ____(XCLIPPreTrainedModel):
config: XCLIPConfig
def __init__(self, config: XCLIPConfig):
super().__init__(config)
if not isinstance(config.text_config, XCLIPTextConfig):
raise TypeError(
"config.text_config is expected to be of type XCLIPTextConfig but is of type"
f" {type(config.text_config)}."
)
if not isinstance(config.vision_config, XCLIPVisionConfig):
raise TypeError(
"config.vision_config is expected to be of type XCLIPVisionConfig but is of type"
f" {type(config.vision_config)}."
)
text_config = config.text_config
vision_config = config.vision_config
# The module using it is not a PreTrainedModel subclass so we need this
text_config._attn_implementation = config._attn_implementation
# The module using it is not a PreTrainedModel subclass so we need this
vision_config._attn_implementation = config._attn_implementation
self.projection_dim = config.projection_dim
self.text_embed_dim = text_config.hidden_size
self.vision_embed_dim = vision_config.hidden_size
self.text_model = XCLIPTextTransformer(text_config)
self.vision_model = XCLIPVisionTransformer(vision_config)
self.visual_projection = nn.Linear(self.vision_embed_dim, self.projection_dim, bias=False)
self.text_projection = nn.Linear(self.text_embed_dim, self.projection_dim, bias=False)
self.logit_scale = nn.Parameter(torch.tensor(self.config.logit_scale_init_value))
self.prompts_visual_layernorm = nn.LayerNorm(self.vision_embed_dim, eps=config.vision_config.layer_norm_eps)
self.prompts_visual_projection = nn.Parameter(torch.randn(self.vision_embed_dim, self.projection_dim))
mit_config = copy.copy(vision_config)
mit_config.hidden_size = vision_config.mit_hidden_size
mit_config.intermediate_size = vision_config.mit_intermediate_size
mit_config.num_hidden_layers = vision_config.mit_num_hidden_layers
mit_config.num_attention_heads = vision_config.mit_num_attention_heads
self.mit = XCLIPMultiframeIntegrationTransformer(mit_config)
self.prompts_generator = XCLIPPromptGenerator(config)
# Initialize weights and apply final processing
self.post_init()
@filter_out_non_signature_kwargs()
@auto_docstring
def get_text_features(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
) -> torch.FloatTensor:
r"""
Returns:
text_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by
applying the projection layer to the pooled output of [`XCLIPTextModel`].
Examples:
```python
>>> import torch
>>> from transformers import AutoTokenizer, AutoModel
>>> tokenizer = AutoTokenizer.from_pretrained("microsoft/xclip-base-patch32")
>>> model = AutoModel.from_pretrained("microsoft/xclip-base-patch32")
>>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="pt")
>>> with torch.inference_mode():
... text_features = model.get_text_features(**inputs)
```"""
text_outputs: BaseModelOutputWithPooling = self.text_model(
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
)
text_features = self.text_projection(text_outputs.pooler_output)
return text_features
@filter_out_non_signature_kwargs()
@auto_docstring
def get_video_features(
self,
pixel_values: torch.Tensor,
) -> torch.FloatTensor:
r"""
Returns:
video_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The video embeddings obtained by
applying the projection layer to the pooled output of [`XCLIPVisionModel`] and
[`XCLIPMultiframeIntegrationTransformer`].
Examples:
```python
>>> import av
>>> import torch
>>> import numpy as np
>>> from transformers import AutoProcessor, AutoModel
>>> from huggingface_hub import hf_hub_download
>>> np.random.seed(0)
>>> def read_video_pyav(container, indices):
... '''
... Decode the video with PyAV decoder.
... Args:
... container (`av.container.input.InputContainer`): PyAV container.
... indices (`list[int]`): List of frame indices to decode.
... Returns:
... result (np.ndarray): np array of decoded frames of shape (num_frames, height, width, 3).
... '''
... frames = []
... container.seek(0)
... start_index = indices[0]
... end_index = indices[-1]
... for i, frame in enumerate(container.decode(video=0)):
... if i > end_index:
... break
... if i >= start_index and i in indices:
... frames.append(frame)
... return np.stack([x.to_ndarray(format="rgb24") for x in frames])
>>> def sample_frame_indices(clip_len, frame_sample_rate, seg_len):
... '''
... Sample a given number of frame indices from the video.
... Args:
... clip_len (`int`): Total number of frames to sample.
... frame_sample_rate (`int`): Sample every n-th frame.
... seg_len (`int`): Maximum allowed index of sample's last frame.
... Returns:
... indices (`list[int]`): List of sampled frame indices
... '''
... converted_len = int(clip_len * frame_sample_rate)
... end_idx = np.random.randint(converted_len, seg_len)
... start_idx = end_idx - converted_len
... indices = np.linspace(start_idx, end_idx, num=clip_len)
... indices = np.clip(indices, start_idx, end_idx - 1).astype(np.int64)
... return indices
>>> # video clip consists of 300 frames (10 seconds at 30 FPS)
>>> file_path = hf_hub_download(
... repo_id="nielsr/video-demo", filename="eating_spaghetti.mp4", repo_type="dataset"
... )
>>> container = av.open(file_path)
>>> # sample 8 frames
>>> indices = sample_frame_indices(clip_len=8, frame_sample_rate=1, seg_len=container.streams.video[0].frames)
>>> video = read_video_pyav(container, indices)
>>> processor = AutoProcessor.from_pretrained("microsoft/xclip-base-patch32")
>>> model = AutoModel.from_pretrained("microsoft/xclip-base-patch32")
>>> inputs = processor(videos=list(video), return_tensors="pt")
>>> video_features = model.get_video_features(**inputs)
```"""
batch_size, num_frames, num_channels, height, width = pixel_values.shape
pixel_values = pixel_values.reshape(-1, num_channels, height, width)
vision_outputs: BaseModelOutputWithPooling = self.vision_model(pixel_values=pixel_values)
video_embeds = vision_outputs.pooler_output
video_embeds = self.visual_projection(video_embeds)
cls_features = video_embeds.view(batch_size, num_frames, -1)
mit_outputs: BaseModelOutputWithPooling = self.mit(cls_features)
video_embeds = mit_outputs.pooler_output
return video_embeds
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
pixel_values: Optional[torch.FloatTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
return_loss: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
interpolate_pos_encoding: bool = False,
return_dict: Optional[bool] = None,
) -> Union[tuple, XCLIPOutput]:
r"""
return_loss (`bool`, *optional*):
Whether or not to return the contrastive loss.
Examples:
```python
>>> import av
>>> import torch
>>> import numpy as np
>>> from transformers import AutoProcessor, AutoModel
>>> from huggingface_hub import hf_hub_download
>>> np.random.seed(0)
>>> def read_video_pyav(container, indices):
... '''
... Decode the video with PyAV decoder.
... Args:
... container (`av.container.input.InputContainer`): PyAV container.
... indices (`list[int]`): List of frame indices to decode.
... Returns:
... result (np.ndarray): np array of decoded frames of shape (num_frames, height, width, 3).
... '''
... frames = []
... container.seek(0)
... start_index = indices[0]
... end_index = indices[-1]
... for i, frame in enumerate(container.decode(video=0)):
... if i > end_index:
... break
... if i >= start_index and i in indices:
... frames.append(frame)
... return np.stack([x.to_ndarray(format="rgb24") for x in frames])
>>> def sample_frame_indices(clip_len, frame_sample_rate, seg_len):
... '''
... Sample a given number of frame indices from the video.
... Args:
... clip_len (`int`): Total number of frames to sample.
... frame_sample_rate (`int`): Sample every n-th frame.
... seg_len (`int`): Maximum allowed index of sample's last frame.
... Returns:
... indices (`list[int]`): List of sampled frame indices
... '''
... converted_len = int(clip_len * frame_sample_rate)
... end_idx = np.random.randint(converted_len, seg_len)
... start_idx = end_idx - converted_len
... indices = np.linspace(start_idx, end_idx, num=clip_len)
... indices = np.clip(indices, start_idx, end_idx - 1).astype(np.int64)
... return indices
>>> # video clip consists of 300 frames (10 seconds at 30 FPS)
>>> file_path = hf_hub_download(
... repo_id="nielsr/video-demo", filename="eating_spaghetti.mp4", repo_type="dataset"
... )
>>> container = av.open(file_path)
>>> # sample 8 frames
>>> indices = sample_frame_indices(clip_len=8, frame_sample_rate=1, seg_len=container.streams.video[0].frames)
>>> video = read_video_pyav(container, indices)
>>> processor = AutoProcessor.from_pretrained("microsoft/xclip-base-patch32")
>>> model = AutoModel.from_pretrained("microsoft/xclip-base-patch32")
>>> inputs = processor(
... text=["playing sports", "eating spaghetti", "go shopping"],
... videos=list(video),
... return_tensors="pt",
... padding=True,
... )
>>> # forward pass
>>> with torch.no_grad():
... outputs = model(**inputs)
>>> logits_per_video = outputs.logits_per_video # this is the video-text similarity score
>>> probs = logits_per_video.softmax(dim=1) # we can take the softmax to get the label probabilities
>>> print(probs)
tensor([[1.9496e-04, 9.9960e-01, 2.0825e-04]])
```"""
# Use X_CLIP model's config for some fields (if specified) instead of those of vision & text components.
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
batch_size, num_frames, num_channels, height, width = pixel_values.shape
pixel_values = pixel_values.reshape(-1, num_channels, height, width)
vision_outputs = self.vision_model(
pixel_values=pixel_values,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
interpolate_pos_encoding=interpolate_pos_encoding,
return_dict=return_dict,
)
video_embeds = vision_outputs[1]
video_embeds = self.visual_projection(video_embeds)
cls_features = video_embeds.view(batch_size, num_frames, -1)
mit_outputs = self.mit(
cls_features,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
video_embeds = mit_outputs[1]
img_features = vision_outputs[0][:, 1:, :]
img_features = self.prompts_visual_layernorm(img_features)
img_features = img_features @ self.prompts_visual_projection
img_features = img_features.view(batch_size, num_frames, -1, video_embeds.shape[-1])
img_features = img_features.mean(dim=1, keepdim=False)
text_outputs = self.text_model(
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
text_embeds = text_outputs[1]
text_embeds = self.text_projection(text_embeds)
text_embeds = text_embeds.unsqueeze(0).expand(batch_size, -1, -1)
text_embeds = text_embeds + self.prompts_generator(text_embeds, img_features)
# normalized features
video_embeds = video_embeds / video_embeds.norm(p=2, dim=-1, keepdim=True)
text_embeds = text_embeds / text_embeds.norm(p=2, dim=-1, keepdim=True)
# cosine similarity as logits
logit_scale = self.logit_scale.exp()
logits_per_video = torch.einsum("bd,bkd->bk", video_embeds, logit_scale * text_embeds)
logits_per_text = logits_per_video.T
loss = None
if return_loss:
loss = x_clip_loss(logits_per_text)
if not return_dict:
output = (logits_per_video, logits_per_text, text_embeds, video_embeds, text_outputs, vision_outputs)
return ((loss,) + output) if loss is not None else output
return XCLIPOutput(
loss=loss,
logits_per_video=logits_per_video,
logits_per_text=logits_per_text,
text_embeds=text_embeds,
video_embeds=video_embeds,
text_model_output=text_outputs,
vision_model_output=vision_outputs,
mit_output=mit_outputs,
)
__all__ = ["XCLIPModel", "XCLIPPreTrainedModel", "XCLIPTextModel", "XCLIPVisionModel"]
| XCLIPModel |
python | miyuchina__mistletoe | test/test_block_token.py | {
"start": 26081,
"end": 26686
} | class ____(unittest.TestCase):
def setUp(self):
block_token.add_token(block_token.HtmlBlock)
self.addCleanup(block_token.reset_tokens)
def test_textarea_block_may_contain_blank_lines(self):
lines = ['<textarea>\n',
'\n',
'*foo*\n',
'\n',
'_bar_\n',
'\n',
'</textarea>\n']
document = block_token.Document(lines)
tokens = document.children
self.assertEqual(1, len(tokens))
self.assertIsInstance(tokens[0], block_token.HtmlBlock)
| TestHtmlBlock |
python | run-llama__llama_index | llama-index-core/llama_index/core/storage/docstore/registry.py | {
"start": 207,
"end": 648
} | class ____(str, Enum):
MONGO = "mongo"
SIMPLE = "simple"
DOCSTORE_TYPE_TO_CLASS: Dict[DocumentStoreType, Type[BaseDocumentStore]] = {
DocumentStoreType.SIMPLE: SimpleDocumentStore,
}
DOCSTORE_CLASS_TO_TYPE: Dict[Type[BaseDocumentStore], DocumentStoreType] = {
cls_: type_ for type_, cls_ in DOCSTORE_TYPE_TO_CLASS.items()
}
def get_default_docstore() -> BaseDocumentStore:
return SimpleDocumentStore()
| DocumentStoreType |
python | pydantic__pydantic | .github/actions/people/people.py | {
"start": 4584,
"end": 4677
} | class ____(BaseModel):
"""Container for issue edges."""
edges: list[IssuesEdge]
| Issues |
python | chroma-core__chroma | chromadb/test/ef/test_custom_ef.py | {
"start": 238,
"end": 427
} | class ____(EmbeddingFunction[Embeddable]):
def __call__(self, input: Embeddable) -> Embeddings:
return cast(Embeddings, np.array([1, 2, 3]).tolist())
| LegacyCustomEmbeddingFunction |
python | apache__airflow | airflow-core/tests/unit/api_fastapi/execution_api/versions/head/test_task_instances.py | {
"start": 92015,
"end": 95465
} | class ____:
@pytest.mark.parametrize(
"logical_date",
[
datetime(2025, 6, 6, tzinfo=timezone.utc),
None,
],
)
def test_ti_inactive_inlets_and_outlets(self, logical_date, client, dag_maker):
"""Test the inactive assets in inlets and outlets can be found."""
with dag_maker("test_inlets_and_outlets"):
EmptyOperator(
task_id="task1",
inlets=[Asset(name="inlet-name"), Asset(name="inlet-name", uri="but-different-uri")],
outlets=[
Asset(name="outlet-name", uri="uri"),
Asset(name="outlet-name", uri="second-different-uri"),
],
)
dr = dag_maker.create_dagrun(logical_date=logical_date)
task1_ti = dr.get_task_instance("task1")
response = client.get(f"/execution/task-instances/{task1_ti.id}/validate-inlets-and-outlets")
assert response.status_code == 200
inactive_assets = response.json()["inactive_assets"]
expected_inactive_assets = (
{
"name": "inlet-name",
"type": "Asset",
"uri": "but-different-uri",
},
{
"name": "outlet-name",
"type": "Asset",
"uri": "second-different-uri",
},
)
for asset in expected_inactive_assets:
assert asset in inactive_assets
@pytest.mark.parametrize(
"logical_date",
[
datetime(2025, 6, 6, tzinfo=timezone.utc),
None,
],
)
def test_ti_inactive_inlets_and_outlets_without_inactive_assets(self, logical_date, client, dag_maker):
"""Test the task without inactive assets in its inlets or outlets returns empty list."""
with dag_maker("test_inlets_and_outlets_inactive"):
EmptyOperator(
task_id="inactive_task1",
inlets=[Asset(name="inlet-name")],
outlets=[Asset(name="outlet-name", uri="uri")],
)
dr = dag_maker.create_dagrun(logical_date=logical_date)
task1_ti = dr.get_task_instance("inactive_task1")
response = client.get(f"/execution/task-instances/{task1_ti.id}/validate-inlets-and-outlets")
assert response.status_code == 200
assert response.json() == {"inactive_assets": []}
def test_ti_run_with_null_conf(self, client, session, create_task_instance):
"""Test that task instances can start when dag_run.conf is NULL."""
ti = create_task_instance(
task_id="test_ti_run_with_null_conf",
state=State.QUEUED,
dagrun_state=DagRunState.RUNNING,
session=session,
)
# Set conf to NULL to simulate Airflow 2.x upgrade or offline migration
ti.dag_run.conf = None
session.commit()
response = client.patch(
f"/execution/task-instances/{ti.id}/run",
json={
"state": "running",
"pid": 100,
"hostname": "test-hostname",
"unixname": "test-user",
"start_date": timezone.utcnow().isoformat(),
},
)
assert response.status_code == 200, f"Response: {response.text}"
context = response.json()
assert context["dag_run"]["conf"] is None
| TestInvactiveInletsAndOutlets |
python | django__django | tests/model_meta/models.py | {
"start": 439,
"end": 1733
} | class ____(models.Model):
# DATA fields
data_abstract = models.CharField(max_length=10)
fk_abstract = models.ForeignKey(
Relation, models.CASCADE, related_name="fk_abstract_rel"
)
# M2M fields
m2m_abstract = models.ManyToManyField(Relation, related_name="m2m_abstract_rel")
friends_abstract = models.ManyToManyField("self", symmetrical=True)
following_abstract = models.ManyToManyField(
"self", related_name="followers_abstract", symmetrical=False
)
# VIRTUAL fields
data_not_concrete_abstract = models.ForeignObject(
Relation,
on_delete=models.CASCADE,
from_fields=["abstract_non_concrete_id"],
to_fields=["id"],
related_name="fo_abstract_rel",
)
# GFK fields
content_type_abstract = models.ForeignKey(
ContentType, models.CASCADE, related_name="+"
)
object_id_abstract = models.PositiveIntegerField()
content_object_abstract = GenericForeignKey(
"content_type_abstract", "object_id_abstract"
)
# GR fields
generic_relation_abstract = GenericRelation(Relation)
class Meta:
abstract = True
@property
def test_property(self):
return 1
test_instance_only_descriptor = InstanceOnlyDescriptor()
| AbstractPerson |
python | openai__openai-python | src/openai/types/beta/threads/run.py | {
"start": 1228,
"end": 1378
} | class ____(BaseModel):
tool_calls: List[RequiredActionFunctionToolCall]
"""A list of the relevant tool calls."""
| RequiredActionSubmitToolOutputs |
python | ray-project__ray | python/ray/train/v2/_internal/callbacks/working_dir_setup.py | {
"start": 281,
"end": 933
} | class ____(WorkerGroupCallback):
def after_worker_group_start(self, worker_group: WorkerGroup):
def chdir_to_working_dir() -> None:
"""Create the local working directory for the experiment."""
local_working_directory = (
get_train_context().get_storage().local_working_directory
)
os.makedirs(local_working_directory, exist_ok=True)
logger.debug(
f"Changing the working directory to: {local_working_directory}"
)
os.chdir(local_working_directory)
worker_group.execute(chdir_to_working_dir)
| WorkingDirectorySetupCallback |
python | pytorch__pytorch | torch/fx/proxy.py | {
"start": 26692,
"end": 27511
} | class ____(Proxy):
@compatibility(is_backward_compatible=True)
def __init__(self, root: Proxy, attr: str):
self.root = root
self.attr = attr
self.tracer = root.tracer
self._node: Optional[Node] = None
@property
def node(self):
# the node for attributes is added lazily, since most will just be method calls
# which do not rely on the getitem call
if self._node is None:
self._node = self.tracer.create_proxy(
"call_function", getattr, (self.root, self.attr), {}
).node
return self._node
def __call__(self, *args, **kwargs):
return self.tracer.create_proxy(
"call_method", self.attr, (self.root,) + args, kwargs
)
@compatibility(is_backward_compatible=False)
| Attribute |
python | readthedocs__readthedocs.org | readthedocs/search/api/v3/executor.py | {
"start": 293,
"end": 8532
} | class ____:
"""
Parse the query, search, and return the projects used in the search.
:param arguments_required: If `True` and the user didn't provide
any arguments in the query, we don't perform the search.
:param default_all: If `True` and `arguments_required` is `False`
we search all projects by default, otherwise we search all projects
the user has access to.
:param max_projects: The maximum number of projects used in the search.
This limit is only applied for projects given explicitly,
not when we default to search all projects.
"""
def __init__(
self, *, request, query, arguments_required=True, default_all=False, max_projects=100
):
self.request = request
self.query = query
self.arguments_required = arguments_required
self.default_all = default_all
self.max_projects = max_projects
@cached_property
def projects(self):
"""
Return all projects used in this search.
If empty, it will search all projects.
:returns: A list of tuples (project, version).
"""
projects = islice(self._get_projects_to_search(), self.max_projects)
# Make sure we are using just one version per-project,
# searching multiple versions of the same projects isn't supported yet.
projects_dict = dict(projects)
return list(projects_dict.items())
def search(self, **kwargs):
"""
Perform the search.
:param kwargs: All kwargs are passed to the `PageSearch` constructor.
"""
if not self._has_arguments and self.arguments_required:
return None
projects = {project.slug: version.slug for project, version in self.projects}
# If the search is done without projects, ES will search on all projects.
# If we don't have projects and the user provided arguments,
# it means we don't have anything to search on (no results).
# Or if we don't have projects and we don't allow searching all,
# we also just return.
if not projects and (self._has_arguments or not self.default_all):
return None
search = PageSearch(
query=self.parser.query,
projects=projects,
**kwargs,
)
return search
def _get_projects_to_search(self):
"""
Return an iterator of (project, version) used in this search.
An iterator (yield syntax) is used so we can stop at
``self.max_projects``, this way we avoid fetching projects
that we won't use.
"""
if not self._has_arguments:
if self.arguments_required:
return None
yield from self._get_default_projects()
return None
for value in self.parser.arguments["project"]:
project, version = self._get_project_and_version(value)
if version and self._has_permission(self.request, version):
yield project, version
for value in self.parser.arguments["subprojects"]:
project, version = self._get_project_and_version(value)
# Add the project itself.
if version and self._has_permission(self.request, version):
yield project, version
if project:
# If the user didn't provide a version, version_slug will be `None`,
# and we add all subprojects with their default version,
# otherwise we will add all projects that match the given version.
_, version_slug = self._split_project_and_version(value)
yield from self._get_subprojects(
project=project,
version_slug=version_slug,
)
# Add all projects the user has access to.
if self.parser.arguments["user"] == "@me":
yield from self._get_projects_from_user()
def _get_projects_from_user(self):
for project in Project.objects.for_user(user=self.request.user):
version = self._get_project_version(
project=project,
version_slug=project.default_version,
include_hidden=False,
)
if version and self._has_permission(self.request, version):
yield project, version
def _get_subprojects(self, project, version_slug=None):
"""
Get a tuple (project, version) of all subprojects of `project`.
If `version_slug` doesn't match a version of the subproject,
the default version will be used.
If `version_slug` is None, we will always use the default version.
"""
relationships = project.subprojects.select_related("child")
for relationship in relationships:
subproject = relationship.child
# NOTE: Since we already have the superproject relationship,
# we can set it here to avoid an extra query later
# when using Project.parent_relationship property.
# The superproject instannce is also shared among all subprojects.
subproject._superprojects = [relationship]
version = None
if version_slug:
version = self._get_project_version(
project=subproject,
version_slug=version_slug,
include_hidden=False,
)
# Fallback to the default version of the subproject.
if not version and subproject.default_version:
version = self._get_project_version(
project=subproject,
version_slug=subproject.default_version,
include_hidden=False,
)
if version and self._has_permission(self.request, version):
yield subproject, version
def _has_permission(self, request, version):
"""
Check if `user` is authorized to access `version`.
The queryset from `_get_project_version` already filters public
projects. This is mainly to be overridden in .com to make use of
the auth backends in the proxied API.
"""
return True
def _get_project_version(self, project, version_slug, include_hidden=True):
"""
Get a version from a given project.
:param project: A `Project` object.
:param version_slug: The version slug.
:param include_hidden: If hidden versions should be considered.
"""
return (
project.versions(manager=INTERNAL)
.public(
user=self.request.user,
only_built=True,
include_hidden=include_hidden,
)
.filter(slug=version_slug)
.first()
)
@cached_property
def _has_arguments(self):
return any(self.parser.arguments.values())
def _get_default_projects(self):
if self.default_all:
# Default to search all.
return []
return self._get_projects_from_user()
@cached_property
def parser(self):
parser = SearchQueryParser(self.query)
parser.parse()
return parser
def _split_project_and_version(self, term):
"""
Split a term of the form ``{project}/{version}``.
:returns: A tuple of project and version.
If the version part isn't found, `None` will be returned in its place.
"""
parts = term.split("/", maxsplit=1)
if len(parts) > 1:
return parts
return parts[0], None
def _get_project_and_version(self, value):
project_slug, version_slug = self._split_project_and_version(value)
project = Project.objects.filter(slug=project_slug).first()
if not project:
return None, None
if not version_slug:
version_slug = project.default_version
if version_slug:
version = self._get_project_version(
project=project,
version_slug=version_slug,
)
return project, version
return None, None
| SearchExecutor |
python | ZoranPandovski__al-go-rithms | machine_learning/python/astar.py | {
"start": 608,
"end": 1421
} | class ____:
"""
Class cell represents a cell in the world which have the property
position : The position of the represented by tupleof x and y
coordinates initially set to (0,0)
parent : This contains the parent cell object which we visited
before arrinving this cell
g,h,f : The parameters for constructing the heuristic function
which can be any function. for simplicity used line
distance
"""
def __init__(self):
self.position = (0, 0)
self.parent = None
self.g = 0
self.h = 0
self.f = 0
"""
overrides equals method because otherwise cell assign will give
wrong results
"""
def __eq__(self, cell):
return self.position == cell.position
def showcell(self):
print(self.position)
| Cell |
python | pytorch__pytorch | torch/_inductor/index_propagation.py | {
"start": 5873,
"end": 6386
} | class ____:
value: Any # Either an IR value, or TypedExpr if is_symbolic is true
is_symbolic: bool = False
@staticmethod
def new_symbolic(expr: TypedExpr) -> "IndexPropVar":
return IndexPropVar(expr, is_symbolic=True)
def __post_init__(self):
assert not self.is_symbolic or isinstance(self.value, TypedExpr), (
"Symbolic IndexPropVar must contain a TypedExpr"
)
IndexPropResult: TypeAlias = Union[IndexPropVar, tuple["IndexPropResult", ...]]
| IndexPropVar |
python | mahmoud__boltons | boltons/urlutils.py | {
"start": 5176,
"end": 14740
} | class ____(ValueError):
"""Exception inheriting from :exc:`ValueError`, raised when failing to
parse a URL. Mostly raised on invalid ports and IPv6 addresses.
"""
pass
DEFAULT_ENCODING = 'utf8'
def to_unicode(obj):
try:
return str(obj)
except UnicodeDecodeError:
return str(obj, encoding=DEFAULT_ENCODING)
# regex from gruber via tornado
# doesn't support ipv6
# doesn't support mailto (netloc-less schemes)
_FIND_ALL_URL_RE = re.compile(r"""\b((?:([\w-]+):(/{1,3})|www[.])(?:(?:(?:[^\s&()<>]|&|")*(?:[^!"#$%'()*+,.:;<=>?@\[\]^`{|}~\s]))|(?:\((?:[^\s&()]|&|")*\)))+)""")
def find_all_links(text, with_text=False, default_scheme='https', schemes=()):
"""This function uses heuristics to searches plain text for strings
that look like URLs, returning a :class:`list` of :class:`URL`
objects. It supports limiting the accepted schemes, and returning
interleaved text as well.
>>> find_all_links('Visit https://boltons.rtfd.org!')
[URL(u'https://boltons.rtfd.org')]
>>> find_all_links('Visit https://boltons.rtfd.org!', with_text=True)
[u'Visit ', URL(u'https://boltons.rtfd.org'), u'!']
Args:
text (str): The text to search.
with_text (bool): Whether or not to interleave plaintext blocks
with the returned URL objects. Having all tokens can be
useful for transforming the text, e.g., replacing links with
HTML equivalents. Defaults to ``False``.
default_scheme (str): Many URLs are written without the scheme
component. This function can match a reasonable subset of
those, provided *default_scheme* is set to a string. Set to
``False`` to disable matching scheme-less URLs. Defaults to
``'https'``.
schemes (list): A list of strings that a URL's scheme must
match in order to be included in the results. Defaults to
empty, which matches all schemes.
.. note:: Currently this function does not support finding IPv6
addresses or URLs with netloc-less schemes, like mailto.
"""
text = to_unicode(text)
prev_end, start, end = 0, None, None
ret = []
_add = ret.append
def _add_text(t):
if ret and isinstance(ret[-1], str):
ret[-1] += t
else:
_add(t)
for match in _FIND_ALL_URL_RE.finditer(text):
start, end = match.start(1), match.end(1)
if prev_end < start and with_text:
_add(text[prev_end:start])
prev_end = end
try:
cur_url_text = match.group(0)
cur_url = URL(cur_url_text)
if not cur_url.scheme:
if default_scheme:
cur_url = URL(default_scheme + '://' + cur_url_text)
else:
_add_text(text[start:end])
continue
if schemes and cur_url.scheme not in schemes:
_add_text(text[start:end])
else:
_add(cur_url)
except URLParseError:
# currently this should only be hit with broken port
# strings. the regex above doesn't support ipv6 addresses
if with_text:
_add_text(text[start:end])
if with_text:
tail = text[prev_end:]
if tail:
_add_text(tail)
return ret
def _make_quote_map(safe_chars):
ret = {}
# v is included in the dict for py3 mostly, because bytestrings
# are iterables of ints, of course!
for i, v in zip(range(256), range(256)):
c = chr(v)
if c in safe_chars:
ret[c] = ret[v] = c
else:
ret[c] = ret[v] = f'%{i:02X}'
return ret
_USERINFO_PART_QUOTE_MAP = _make_quote_map(_USERINFO_SAFE)
_PATH_PART_QUOTE_MAP = _make_quote_map(_PATH_SAFE)
_QUERY_PART_QUOTE_MAP = _make_quote_map(_QUERY_SAFE)
_FRAGMENT_QUOTE_MAP = _make_quote_map(_FRAGMENT_SAFE)
def quote_path_part(text, full_quote=True):
"""
Percent-encode a single segment of a URL path.
"""
if full_quote:
bytestr = normalize('NFC', to_unicode(text)).encode('utf8')
return ''.join([_PATH_PART_QUOTE_MAP[b] for b in bytestr])
return ''.join([_PATH_PART_QUOTE_MAP[t] if t in _PATH_DELIMS else t
for t in text])
def quote_query_part(text, full_quote=True):
"""
Percent-encode a single query string key or value.
"""
if full_quote:
bytestr = normalize('NFC', to_unicode(text)).encode('utf8')
return ''.join([_QUERY_PART_QUOTE_MAP[b] for b in bytestr])
return ''.join([_QUERY_PART_QUOTE_MAP[t] if t in _QUERY_DELIMS else t
for t in text])
def quote_fragment_part(text, full_quote=True):
"""Quote the fragment part of the URL. Fragments don't have
subdelimiters, so the whole URL fragment can be passed.
"""
if full_quote:
bytestr = normalize('NFC', to_unicode(text)).encode('utf8')
return ''.join([_FRAGMENT_QUOTE_MAP[b] for b in bytestr])
return ''.join([_FRAGMENT_QUOTE_MAP[t] if t in _FRAGMENT_DELIMS else t
for t in text])
def quote_userinfo_part(text, full_quote=True):
"""Quote special characters in either the username or password
section of the URL. Note that userinfo in URLs is considered
deprecated in many circles (especially browsers), and support for
percent-encoded userinfo can be spotty.
"""
if full_quote:
bytestr = normalize('NFC', to_unicode(text)).encode('utf8')
return ''.join([_USERINFO_PART_QUOTE_MAP[b] for b in bytestr])
return ''.join([_USERINFO_PART_QUOTE_MAP[t] if t in _USERINFO_DELIMS
else t for t in text])
def unquote(string, encoding='utf-8', errors='replace'):
"""Percent-decode a string, by replacing %xx escapes with their
single-character equivalent. The optional *encoding* and *errors*
parameters specify how to decode percent-encoded sequences into
Unicode characters, as accepted by the :meth:`bytes.decode()` method. By
default, percent-encoded sequences are decoded with UTF-8, and
invalid sequences are replaced by a placeholder character.
>>> unquote(u'abc%20def')
u'abc def'
"""
if '%' not in string:
string.split
return string
if encoding is None:
encoding = 'utf-8'
if errors is None:
errors = 'replace'
bits = _ASCII_RE.split(string)
res = [bits[0]]
append = res.append
for i in range(1, len(bits), 2):
append(unquote_to_bytes(bits[i]).decode(encoding, errors))
append(bits[i + 1])
return ''.join(res)
def unquote_to_bytes(string):
"""unquote_to_bytes('abc%20def') -> b'abc def'."""
# Note: strings are encoded as UTF-8. This is only an issue if it contains
# unescaped non-ASCII characters, which URIs should not.
if not string:
# Is it a string-like object?
string.split
return b''
if isinstance(string, str):
string = string.encode('utf-8')
bits = string.split(b'%')
if len(bits) == 1:
return string
# import pdb;pdb.set_trace()
res = [bits[0]]
append = res.append
for item in bits[1:]:
try:
append(_HEX_CHAR_MAP[item[:2]])
append(item[2:])
except KeyError:
append(b'%')
append(item)
return b''.join(res)
def register_scheme(text, uses_netloc=None, default_port=None):
"""Registers new scheme information, resulting in correct port and
slash behavior from the URL object. There are dozens of standard
schemes preregistered, so this function is mostly meant for
proprietary internal customizations or stopgaps on missing
standards information. If a scheme seems to be missing, please
`file an issue`_!
Args:
text (str): Text representing the scheme.
(the 'http' in 'http://hatnote.com')
uses_netloc (bool): Does the scheme support specifying a
network host? For instance, "http" does, "mailto" does not.
default_port (int): The default port, if any, for netloc-using
schemes.
.. _file an issue: https://github.com/mahmoud/boltons/issues
"""
text = text.lower()
if default_port is not None:
try:
default_port = int(default_port)
except ValueError:
raise ValueError('default_port expected integer or None, not %r'
% (default_port,))
if uses_netloc is True:
SCHEME_PORT_MAP[text] = default_port
elif uses_netloc is False:
if default_port is not None:
raise ValueError('unexpected default port while specifying'
' non-netloc scheme: %r' % default_port)
NO_NETLOC_SCHEMES.add(text)
elif uses_netloc is not None:
raise ValueError('uses_netloc expected True, False, or None')
return
def resolve_path_parts(path_parts):
"""Normalize the URL path by resolving segments of '.' and '..',
resulting in a dot-free path. See RFC 3986 section 5.2.4, Remove
Dot Segments.
"""
# TODO: what to do with multiple slashes
ret = []
for part in path_parts:
if part == '.':
pass
elif part == '..':
if ret and (len(ret) > 1 or ret[0]): # prevent unrooting
ret.pop()
else:
ret.append(part)
if list(path_parts[-1:]) in (['.'], ['..']):
ret.append('')
return ret
| URLParseError |
python | django__django | tests/model_regress/tests.py | {
"start": 9069,
"end": 9472
} | class ____(TestCase):
"""
Regression test for #13640: cannot filter by objects with 'evaluate' attr
"""
def test_model_with_evaluate_method(self):
"""
You can filter by objects that have an 'evaluate' attr
"""
dept = Department.objects.create(pk=1, name="abc")
dept.evaluate = "abc"
Worker.objects.filter(department=dept)
| EvaluateMethodTest |
python | getsentry__sentry | tests/sentry_plugins/heroku/test_plugin.py | {
"start": 4680,
"end": 8460
} | class ____(TestCase):
@pytest.fixture(autouse=True)
def patch_is_valid_signature(self) -> Generator[None]:
with patch.object(HerokuReleaseHook, "is_valid_signature"):
yield
@patch.object(HerokuReleaseHook, "set_refs")
def test_user_success(self, set_refs_mock: MagicMock) -> None:
user = self.create_user()
organization = self.create_organization(owner=user)
project = self.create_project(organization=organization)
hook = HerokuReleaseHook(project)
req = Mock()
body: dict[str, Any] = {
"data": {
"user": {"email": user.email},
"slug": {"commit": "abcd123"},
"app": {"name": "example"},
},
"action": "update",
}
req.body = orjson.dumps(body)
hook.handle(req)
assert Release.objects.filter(version=body["data"]["slug"]["commit"]).exists()
assert set_refs_mock.call_count == 1
@patch.object(HerokuReleaseHook, "set_refs")
def test_only_run_on_update(self, set_refs_mock: MagicMock) -> None:
user = self.create_user()
organization = self.create_organization(owner=user)
project = self.create_project(organization=organization)
hook = HerokuReleaseHook(project)
req = Mock()
body: dict[str, Any] = {
"data": {
"user": {"email": user.email},
"slug": {"commit": "abcd123"},
"app": {"name": "example"},
},
"action": "create",
}
req.body = orjson.dumps(body)
hook.handle(req)
assert not Release.objects.filter(version=body["data"]["slug"]["commit"]).exists()
assert set_refs_mock.call_count == 0
@patch.object(HerokuReleaseHook, "set_refs")
def test_actor_email_success(self, set_refs_mock: MagicMock) -> None:
user = self.create_user()
organization = self.create_organization(owner=user)
project = self.create_project(organization=organization)
hook = HerokuReleaseHook(project)
req = Mock()
body: dict[str, Any] = {
"data": {
"actor": {"email": user.email},
"slug": {"commit": "abcd123"},
"app": {"name": "example"},
},
"action": "update",
}
req.body = orjson.dumps(body)
hook.handle(req)
assert Release.objects.filter(version=body["data"]["slug"]["commit"]).exists()
assert set_refs_mock.call_count == 1
def test_email_mismatch(self) -> None:
user = self.create_user()
organization = self.create_organization(owner=user)
project = self.create_project(organization=organization)
hook = HerokuReleaseHook(project)
req = Mock()
body: dict[str, Any] = {
"data": {
"user": {"email": "wrong@example.com"},
"slug": {"commit": "v999"},
"app": {"name": "example"},
},
"action": "update",
}
req.body = orjson.dumps(body)
hook.handle(req)
assert Release.objects.filter(version=body["data"]["slug"]["commit"]).exists()
def test_bad_version(self) -> None:
project = self.create_project()
user = self.create_user()
hook = HerokuReleaseHook(project)
req = Mock()
body = {
"data": {
"actor": {"email": user.email},
"slug": {"commit": ""},
"app": {"name": "example"},
},
"action": "update",
}
req.body = orjson.dumps(body)
with pytest.raises(HookValidationError):
hook.handle(req)
| HookHandleTest |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typedDict14.py | {
"start": 181,
"end": 486
} | class ____(TypedDict):
v3: Thing | None
v4: list[str | int] | None
thing2: Thing2 = {"v3": None, "v4": None}
thing2["v3"] = {"v1": False, "v2": "a"}
thing2["v4"] = []
thing2["v4"] = [3]
thing2["v4"] = ["hi"]
thing2["v4"] = ["hi", 4]
# This should generate an error
thing2["v4"] = ["hi", 4.0]
| Thing2 |
python | getsentry__sentry | src/sentry/exceptions.py | {
"start": 962,
"end": 1098
} | class ____(Exception):
# Tried to build a metrics enhanced performance query but it was incompatible
pass
| IncompatibleMetricsQuery |
python | kamyu104__LeetCode-Solutions | Python/single-number-iii.py | {
"start": 66,
"end": 369
} | class ____(object):
# @param {integer[]} nums
# @return {integer[]}
def singleNumber(self, nums):
x_xor_y = reduce(operator.xor, nums)
bit = x_xor_y & -x_xor_y
result = [0, 0]
for i in nums:
result[bool(i & bit)] ^= i
return result
| Solution |
python | django-haystack__django-haystack | test_haystack/test_fields.py | {
"start": 9994,
"end": 10929
} | class ____(TestCase):
def test_init(self):
try:
foo = IntegerField(model_attr="foo")
except:
self.fail()
def test_prepare(self):
mock = MockModel()
mock.pk = 1
pk = IntegerField(model_attr="pk")
self.assertEqual(pk.prepare(mock), 1)
# Simulate failed lookups.
mock_tag = MockTag.objects.create(name="primary")
mock = MockModel()
mock.tag = mock_tag
tag_count = IntegerField(model_attr="tag__count")
self.assertRaises(SearchFieldError, tag_count.prepare, mock)
# Simulate default=1.
mock = MockModel()
default = IntegerField(default=1)
self.assertEqual(default.prepare(mock), 1)
# Simulate null=True.
mock = MockModel()
pk_none = IntegerField(model_attr="pk", null=True)
self.assertEqual(pk_none.prepare(mock), None)
| IntegerFieldTestCase |
python | wandb__wandb | wandb/sdk/artifacts/_generated/fragments.py | {
"start": 7471,
"end": 7873
} | class ____(GQLResult):
typename__: Typename[Literal["Member"]] = "Member"
id: Optional[str]
role: Optional[str]
pending: Optional[bool]
email: Optional[str]
username: Optional[str]
name: str
photo_url: Optional[str] = Field(alias="photoUrl")
account_type: Optional[str] = Field(alias="accountType")
api_key: Optional[str] = Field(alias="apiKey")
| TeamMemberFragment |
python | catalyst-team__catalyst | catalyst/contrib/layers/arcface.py | {
"start": 3285,
"end": 6988
} | class ____(nn.Module):
"""Implementation of
`Sub-center ArcFace: Boosting Face Recognition
by Large-scale Noisy Web Faces`_.
.. _Sub-center ArcFace\: Boosting Face Recognition \
by Large-scale Noisy Web Faces:
https://ibug.doc.ic.ac.uk/media/uploads/documents/eccv_1445.pdf
Args:
in_features: size of each input sample.
out_features: size of each output sample.
s: norm of input feature,
Default: ``64.0``.
m: margin.
Default: ``0.5``.
k: number of possible class centroids.
Default: ``3``.
eps (float, optional): operation accuracy.
Default: ``1e-6``.
Shape:
- Input: :math:`(batch, H_{in})` where
:math:`H_{in} = in\_features`.
- Output: :math:`(batch, H_{out})` where
:math:`H_{out} = out\_features`.
Example:
>>> layer = SubCenterArcFace(5, 10, s=1.31, m=0.35, k=2)
>>> loss_fn = nn.CrosEntropyLoss()
>>> embedding = torch.randn(3, 5, requires_grad=True)
>>> target = torch.empty(3, dtype=torch.long).random_(10)
>>> output = layer(embedding, target)
>>> loss = loss_fn(output, target)
>>> self.engine.backward(loss)
"""
def __init__( # noqa: D107
self,
in_features: int,
out_features: int,
s: float = 64.0,
m: float = 0.5,
k: int = 3,
eps: float = 1e-6,
):
super(SubCenterArcFace, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.s = s
self.m = m
self.k = k
self.eps = eps
self.weight = nn.Parameter(torch.FloatTensor(k, in_features, out_features))
nn.init.xavier_uniform_(self.weight)
self.threshold = math.pi - self.m
def __repr__(self) -> str:
"""Object representation."""
rep = (
"SubCenterArcFace("
f"in_features={self.in_features},"
f"out_features={self.out_features},"
f"s={self.s},"
f"m={self.m},"
f"k={self.k},"
f"eps={self.eps}"
")"
)
return rep
def forward(
self, input: torch.Tensor, target: torch.LongTensor = None
) -> torch.Tensor:
"""
Args:
input: input features,
expected shapes ``BxF`` where ``B``
is batch dimension and ``F`` is an
input feature dimension.
target: target classes,
expected shapes ``B`` where
``B`` is batch dimension.
If `None` then will be returned
projection on centroids.
Default is `None`.
Returns:
tensor (logits) with shapes ``BxC``
where ``C`` is a number of classes.
"""
feats = F.normalize(input).unsqueeze(0).expand(self.k, *input.shape) # k*b*f
wght = F.normalize(self.weight, dim=1) # k*f*c
cos_theta = torch.bmm(feats, wght) # k*b*f
cos_theta = torch.max(cos_theta, dim=0)[0] # b*f
theta = torch.acos(torch.clamp(cos_theta, -1.0 + self.eps, 1.0 - self.eps))
if target is None:
return cos_theta
one_hot = torch.zeros_like(cos_theta)
one_hot.scatter_(1, target.view(-1, 1).long(), 1)
selected = torch.where(
theta > self.threshold, torch.zeros_like(one_hot), one_hot
)
logits = torch.cos(torch.where(selected.bool(), theta + self.m, theta))
logits *= self.s
return logits
__all__ = ["ArcFace", "SubCenterArcFace"]
| SubCenterArcFace |
python | tensorflow__tensorflow | tensorflow/python/ops/lookup_ops.py | {
"start": 9812,
"end": 10089
} | class ____(InitializableLookupTableBase):
@property
def initializer(self):
return self._init_op
@registration.register_tf_serializable(
predicate=lambda obj: isinstance(obj, StaticHashTable))
@tf_export("lookup.StaticHashTable", v1=[])
| InitializableLookupTableBaseV1 |
python | celery__celery | t/unit/backends/test_base.py | {
"start": 1265,
"end": 1584
} | class ____:
class Nested(Exception):
pass
Oldstyle = None
Unpickleable = subclass_exception(
'Unpickleable', KeyError, 'foo.module',
)
Impossible = subclass_exception(
'Impossible', object, 'foo.module',
)
Lookalike = subclass_exception(
'Lookalike', wrapobject, 'foo.module',
)
| objectexception |
python | tensorflow__tensorflow | tensorflow/python/autograph/pyct/static_analysis/activity_test.py | {
"start": 3585,
"end": 4835
} | class ____(test.TestCase):
def _parse_and_analyze(self, test_fn):
# TODO(mdan): Use a custom FunctionTransformer here.
node, source = parser.parse_entity(test_fn, future_features=())
entity_info = transformer.EntityInfo(
name=test_fn.__name__,
source_code=source,
source_file=None,
future_features=(),
namespace={})
node = qual_names.resolve(node)
namer = naming.Namer({})
ctx = transformer.Context(entity_info, namer, None)
node = activity.resolve(node, ctx)
return node, entity_info
def assertSymbolSetsAre(self, expected, actual, name):
expected = set(expected)
actual = set(str(s) for s in actual)
self.assertSetEqual(
expected, actual, 'for symbol set: %s\n'
' Expected: %s\n'
' Got: %s\n'
' Missing: %s\n'
' Extra: %s\n' % (name.upper(), expected, actual,
expected - actual, actual - expected))
def assertScopeIs(self, scope, used, modified):
"""Assert the scope contains specific used, modified & created variables."""
self.assertSymbolSetsAre(used, scope.read, 'read')
self.assertSymbolSetsAre(modified, scope.modified, 'modified')
| ActivityAnalyzerTestBase |
python | sympy__sympy | sympy/physics/secondquant.py | {
"start": 1901,
"end": 1971
} | class ____(SecondQuantizationError):
pass
| AppliesOnlyToSymbolicIndex |
python | readthedocs__readthedocs.org | readthedocs/search/tests/test_views.py | {
"start": 480,
"end": 4759
} | class ____:
@pytest.fixture(autouse=True)
def setup(self):
self.url = reverse("search")
def _get_search_result(self, url, client, search_params):
resp = client.get(url, search_params)
assert resp.status_code == 200
results = resp.context["results"]
facets = resp.context["facets"]
return results, facets
def test_search_by_project_name(self, client, project, all_projects):
results, _ = self._get_search_result(
url=self.url,
client=client,
search_params={"q": project.name, "type": "project"},
)
assert len(results) == 1
assert project.name == results[0]["name"]
for proj in all_projects[1:]:
assert proj.name != results[0]["name"]
def test_search_project_have_correct_language_facets(self, client, project):
"""Test that searching project should have correct language facets in the results"""
# Create a project in bn and add it as a translation
get(Project, language="bn", name=project.name)
results, facets = self._get_search_result(
url=self.url,
client=client,
search_params={"q": project.name, "type": "project"},
)
lang_facets = facets["language"]
lang_facets_str = [facet[0] for facet in lang_facets]
# There should be 2 languages
assert len(lang_facets) == 2
assert sorted(lang_facets_str) == sorted(["en", "bn"])
for facet in lang_facets:
assert facet[2] == False # because none of the facets are applied
def test_search_project_filter_language(self, client, project):
"""Test that searching project filtered according to language."""
# Create a project in bn and add it as a translation
translate = get(Project, language="bn", name=project.name)
search_params = {"q": project.name, "language": "bn", "type": "project"}
results, facets = self._get_search_result(
url=self.url,
client=client,
search_params=search_params,
)
# There should be only 1 result
assert len(results) == 1
lang_facets = facets["language"]
lang_facets_str = [facet[0] for facet in lang_facets]
# There should be 2 languages because both `en` and `bn` should show there
assert len(lang_facets) == 2
assert sorted(lang_facets_str) == sorted(["en", "bn"])
@override_settings(ALLOW_PRIVATE_REPOS=True)
def test_search_only_projects_owned_by_the_user(self, client, all_projects):
project = Project.objects.get(slug="docs")
user = get(User)
user.projects.add(project)
client.force_login(user)
results, _ = self._get_search_result(
url=self.url,
client=client,
search_params={
# Search for all projects.
"q": " ".join(project.slug for project in all_projects),
"type": "project",
},
)
assert len(results) > 0
other_projects = [
project.slug for project in all_projects if project.slug != "docs"
]
for result in results:
assert result["name"] == "docs"
assert result["name"] not in other_projects
@override_settings(ALLOW_PRIVATE_REPOS=True)
def test_search_no_owned_projects(self, client, all_projects):
user = get(User)
assert user.projects.all().count() == 0
client.force_login(user)
results, _ = self._get_search_result(
url=self.url,
client=client,
search_params={
# Search for all projects.
"q": " ".join(project.slug for project in all_projects),
"type": "project",
},
)
assert len(results) == 0
def test_search_empty_query(self, client):
results, facets = self._get_search_result(
url=self.url,
client=client,
search_params={"q": "", "type": "project"},
)
assert results == []
assert facets == {}
@pytest.mark.django_db
@pytest.mark.search
@pytest.mark.usefixtures("all_projects")
| TestProjectSearch |
python | spyder-ide__spyder | spyder/config/manager.py | {
"start": 1081,
"end": 31525
} | class ____(object):
"""
Configuration manager to provide access to user/site/project config.
"""
def __init__(self, parent=None, active_project_callback=None,
conf_path=None):
"""
Configuration manager to provide access to user/site/project config.
"""
path = conf_path if conf_path else self.get_user_config_path()
if not osp.isdir(path):
os.makedirs(path)
# Site configuration defines the system defaults if a file
# is found in the site location
conf_paths = get_conf_paths()
site_defaults = DEFAULTS
for conf_path in reversed(conf_paths):
conf_fpath = os.path.join(conf_path, 'spyder.ini')
if os.path.isfile(conf_fpath):
site_config = UserConfig(
'spyder',
path=conf_path,
defaults=site_defaults,
load=False,
version=CONF_VERSION,
backup=False,
raw_mode=True,
remove_obsolete=False,
)
site_defaults = site_config.to_list()
self._parent = parent
self._active_project_callback = active_project_callback
self._user_config = MultiUserConfig(
NAME_MAP,
path=path,
defaults=site_defaults,
load=True,
version=CONF_VERSION,
backup=True,
raw_mode=True,
remove_obsolete=False,
)
# This is useful to know in order to execute certain operations when
# bumping CONF_VERSION
self.old_spyder_version = (
self._user_config._configs_map['spyder']._old_version)
# Store plugin configurations when CONF_FILE = True
self._plugin_configs = {}
# TODO: To be implemented in following PR
self._project_configs = {} # Cache project configurations
# Object observer map
# This dict maps from a configuration key (str/tuple) to a set
# of objects that should be notified on changes to the corresponding
# subscription key per section. The observer objects must be hashable.
self._observers: Dict[
ConfigurationKey, Dict[str, Set[ConfigurationObserver]]
] = {}
# Set of suscription keys per observer object
# This dict maps from a observer object to the set of configuration
# keys that the object is subscribed to per section.
self._observer_map_keys: Dict[
ConfigurationObserver, Dict[str, Set[ConfigurationKey]]
] = weakref.WeakKeyDictionary()
# List of options with disabled notifications.
# This holds a list of (section, option) options that won't be notified
# to observers. It can be used to temporarily disable notifications for
# some options.
self._disabled_options: List[Tuple(str, ConfigurationKey)] = []
# Mapping for shortcuts that need to be notified
self._shortcuts_to_notify: Dict[(str, str), Optional[str]] = {}
# Setup
self.remove_deprecated_config_locations()
def unregister_plugin(self, plugin_instance):
conf_section = plugin_instance.CONF_SECTION
if conf_section in self._plugin_configs:
self._plugin_configs.pop(conf_section, None)
def register_plugin(self, plugin_class):
"""Register plugin configuration."""
conf_section = plugin_class.CONF_SECTION
if plugin_class.CONF_FILE and conf_section:
path = self.get_plugin_config_path(conf_section)
version = plugin_class.CONF_VERSION
version = version if version else '0.0.0'
name_map = plugin_class._CONF_NAME_MAP
name_map = name_map if name_map else {'spyder': []}
defaults = plugin_class.CONF_DEFAULTS
if conf_section in self._plugin_configs:
raise RuntimeError('A plugin with section "{}" already '
'exists!'.format(conf_section))
plugin_config = MultiUserConfig(
name_map,
path=path,
defaults=defaults,
load=True,
version=version,
backup=True,
raw_mode=True,
remove_obsolete=False,
external_plugin=True
)
# Recreate external plugin configs to deal with part two
# (the shortcut conflicts) of spyder-ide/spyder#11132
if check_version(self.old_spyder_version, '54.0.0', '<'):
# Remove all previous .ini files
try:
plugin_config.cleanup()
except EnvironmentError:
pass
# Recreate config
plugin_config = MultiUserConfig(
name_map,
path=path,
defaults=defaults,
load=True,
version=version,
backup=True,
raw_mode=True,
remove_obsolete=False,
external_plugin=True
)
self._plugin_configs[conf_section] = (plugin_class, plugin_config)
def remove_deprecated_config_locations(self):
"""Removing old .spyder.ini location."""
old_location = osp.join(get_home_dir(), '.spyder.ini')
if osp.isfile(old_location):
os.remove(old_location)
def get_active_conf(self, section=None):
"""
Return the active user or project configuration for plugin.
"""
# Add a check for shortcuts!
if section is None:
config = self._user_config
elif section in self._plugin_configs:
_, config = self._plugin_configs[section]
else:
# TODO: implement project configuration on the following PR
config = self._user_config
return config
def get_user_config_path(self):
"""Return the user configuration path."""
base_path = get_conf_path()
path = osp.join(base_path, 'config')
if not osp.isdir(path):
os.makedirs(path)
return path
def get_plugin_config_path(self, plugin_folder):
"""Return the plugin configuration path."""
base_path = get_conf_path()
path = osp.join(base_path, 'plugins')
if plugin_folder is None:
raise RuntimeError('Plugin needs to define `CONF_SECTION`!')
path = osp.join(base_path, 'plugins', plugin_folder)
if not osp.isdir(path):
os.makedirs(path)
return path
# --- Observer pattern
# ------------------------------------------------------------------------
def observe_configuration(self,
observer: ConfigurationObserver,
section: str,
option: Optional[ConfigurationKey] = None):
"""
Register an `observer` object to listen for changes in the option
`option` on the configuration `section`.
Parameters
----------
observer: ConfigurationObserver
Object that conforms to the `ConfigurationObserver` protocol.
section: str
Name of the configuration section that contains the option
:param:`option`
option: Optional[ConfigurationKey]
Name of the option on the configuration section :param:`section`
that the object is going to suscribe to. If None, the observer
will observe any changes on any of the options of the configuration
section.
"""
section_sets = self._observers.get(section, {})
option = option if option is not None else '__section'
option_set = section_sets.get(option, weakref.WeakSet())
option_set |= {observer}
section_sets[option] = option_set
self._observers[section] = section_sets
observer_section_sets = self._observer_map_keys.get(observer, {})
section_set = observer_section_sets.get(section, set({}))
section_set |= {option}
observer_section_sets[section] = section_set
self._observer_map_keys[observer] = observer_section_sets
def unobserve_configuration(self,
observer: ConfigurationObserver,
section: Optional[str] = None,
option: Optional[ConfigurationKey] = None):
"""
Remove an observer to prevent it to receive further changes
on the values of the option `option` of the configuration section
`section`.
Parameters
----------
observer: ConfigurationObserver
Object that conforms to the `ConfigurationObserver` protocol.
section: Optional[str]
Name of the configuration section that contains the option
:param:`option`. If None, the observer is unregistered from all
options for all sections that it has registered to.
option: Optional[ConfigurationKey]
Name of the configuration option on the configuration
:param:`section` that the observer is going to be unsubscribed
from. If None, the observer is unregistered from all the options of
the section `section`.
"""
if observer not in self._observer_map_keys:
return
observer_sections = self._observer_map_keys[observer]
if section is not None:
section_options = observer_sections[section]
section_observers = self._observers[section]
if option is None:
for option in section_options:
option_observers = section_observers[option]
option_observers.remove(observer)
observer_sections.pop(section)
else:
option_observers = section_observers[option]
option_observers.remove(observer)
else:
for section in observer_sections:
section_options = observer_sections[section]
section_observers = self._observers[section]
for option in section_options:
option_observers = section_observers[option]
option_observers.remove(observer)
self._observer_map_keys.pop(observer)
def notify_all_observers(self):
"""
Notify all the observers subscribed to all the sections and options.
"""
for section in self._observers:
self.notify_section_all_observers(section)
def notify_observers(
self,
section: str,
option: ConfigurationKey,
recursive_notification: bool = True,
secure: bool = False,
):
"""
Notify observers of a change in the option `option` of configuration
section `section`.
Parameters
----------
section: str
Name of the configuration section whose option did changed.
option: ConfigurationKey
Name/Path to the option that did changed.
recursive_notification: bool
If True, all objects that observe all changes on the
configuration section and objects that observe partial tuple paths
are notified. For example if the option `opt` of section `sec`
changes, then the observers for section `sec` are notified.
Likewise, if the option `(a, b, c)` changes, then observers for
`(a, b, c)`, `(a, b)` and a are notified as well.
secure: bool
Whether this is a secure option or not.
"""
if recursive_notification:
# Notify to section listeners
self._notify_section(section)
if isinstance(option, tuple) and recursive_notification:
# Notify to partial tuple observers
# e.g., If the option is (a, b, c), observers subscribed to
# (a, b, c), (a, b) and a are notified
option_list = list(option)
while option_list != []:
tuple_option = tuple(option_list)
if len(option_list) == 1:
tuple_option = tuple_option[0]
value = self.get(section, tuple_option)
self._notify_option(section, tuple_option, value)
option_list.pop(-1)
else:
if option == '__section':
self._notify_section(section)
else:
if section == "shortcuts":
self._notify_shortcut(option)
else:
value = self.get(section, option, secure=secure)
self._notify_option(section, option, value)
def _notify_option(self, section: str, option: ConfigurationKey,
value: Any):
section_observers = self._observers.get(section, {})
option_observers = section_observers.get(option, set({}))
if (section, option) in self._disabled_options:
logger.debug(
f"Don't send notification to observers of disabled option "
f"{option} in configuration section {section}"
)
return
elif len(option_observers) > 0:
logger.debug(
f"Sending notification to observers of {option} option in "
f"configuration section {section}"
)
for observer in list(option_observers):
try:
observer.on_configuration_change(option, section, value)
except RuntimeError:
# Prevent errors when Qt Objects are destroyed
self.unobserve_configuration(observer)
def _notify_section(self, section: str):
section_values = dict(self.items(section) or [])
self._notify_option(section, '__section', section_values)
def _notify_shortcut(self, option: str):
# We need this mapping for two reasons:
# 1. We don't need to notify changes for all shortcuts, only for
# widget shortcuts, which are the ones with associated observers
# (see SpyderShortcutsMixin.register_shortcut_for_widget).
# 2. Besides context and name, we need the plugin_name to correctly get
# the shortcut value to notify. That's not saved in our config
# system, but it is in SHORTCUTS_FOR_WIDGETS_DATA.
if not self._shortcuts_to_notify:
# Populate mapping only once
self._shortcuts_to_notify = {
(data.context, data.name): data.plugin_name
for data in SHORTCUTS_FOR_WIDGETS_DATA
}
context, name = option.split("/")
if (context, name) in self._shortcuts_to_notify:
plugin_name = self._shortcuts_to_notify[(context, name)]
value = self.get_shortcut(context, name, plugin_name)
self._notify_option("shortcuts", option, value)
def notify_section_all_observers(self, section: str):
"""Notify all the observers subscribed to any option of a section."""
option_observers = self._observers[section]
section_prefix = PrefixedTuple()
# Notify section observers
CONF.notify_observers(section, '__section')
for option in option_observers:
if isinstance(option, tuple):
section_prefix.add_path(option)
else:
try:
self.notify_observers(section, option)
except cp.NoOptionError:
# Skip notification if the option/section does not exist.
# This prevents unexpected errors in the test suite.
pass
# Notify prefixed observers
for prefix in section_prefix:
try:
self.notify_observers(section, prefix)
except cp.NoOptionError:
# See above explanation.
pass
def disable_notifications(self, section: str, option: ConfigurationKey):
"""Disable notitications for `option` in `section`."""
logger.debug(
f"Disable notifications for option {option} option in section "
f"{section}"
)
self._disabled_options.append((section, option))
def restore_notifications(self, section: str, option: ConfigurationKey):
"""Restore notitications for disabled `option` in `section`."""
logger.debug(
f"Restore notifications for option {option} option in section "
f"{section}"
)
try:
self._disabled_options.remove((section, option))
except ValueError:
pass
# --- Projects
# ------------------------------------------------------------------------
def register_config(self, root_path, config):
"""
Register configuration with `root_path`.
Useful for registering project configurations as they are opened.
"""
if self.is_project_root(root_path):
if root_path not in self._project_configs:
self._project_configs[root_path] = config
else:
# Validate which are valid site config locations
self._site_config = config
def get_active_project(self):
"""Return the `root_path` of the current active project."""
callback = self._active_project_callback
if self._active_project_callback:
return callback()
def is_project_root(self, root_path):
"""Check if `root_path` corresponds to a valid spyder project."""
return False
def get_project_config_path(self, project_root):
"""Return the project configuration path."""
path = osp.join(project_root, '.spyproj', 'config')
if not osp.isdir(path):
os.makedirs(path)
# MultiUserConf/UserConf interface
# ------------------------------------------------------------------------
def items(self, section):
"""Return all the items option/values for the given section."""
config = self.get_active_conf(section)
return config.items(section)
def options(self, section):
"""Return all the options for the given section."""
config = self.get_active_conf(section)
return config.options(section)
def get(self, section, option, default=NoDefault, secure=False):
"""
Get an `option` on a given `section`.
If section is None, the `option` is requested from default section.
"""
config = self.get_active_conf(section)
if isinstance(option, tuple) and len(option) == 1:
option = option[0]
if isinstance(option, tuple):
base_option = option[0]
intermediate_options = option[1:-1]
last_option = option[-1]
base_conf = config.get(
section=section, option=base_option, default={})
next_ptr = base_conf
for opt in intermediate_options:
next_ptr = next_ptr.get(opt, {})
value = next_ptr.get(last_option, None)
if value is None:
value = default
if default is NoDefault:
raise cp.NoOptionError(option, section)
else:
if secure:
logger.debug(
f"Retrieving option {option} with keyring because it "
f"was marked as secure."
)
value = keyring.get_password(section, option)
# This happens when `option` was not actually saved by keyring
if value is None:
value = ""
else:
value = config.get(
section=section, option=option, default=default
)
return value
def set(self, section, option, value, verbose=False, save=True,
recursive_notification=True, notification=True, secure=False):
"""
Set an `option` on a given `section`.
If section is None, the `option` is added to the default section.
"""
original_option = option
if isinstance(option, tuple):
base_option = option[0]
intermediate_options = option[1:-1]
last_option = option[-1]
base_conf = self.get(section, base_option, {})
conf_ptr = base_conf
for opt in intermediate_options:
next_ptr = conf_ptr.get(opt, {})
conf_ptr[opt] = next_ptr
conf_ptr = next_ptr
conf_ptr[last_option] = value
value = base_conf
option = base_option
config = self.get_active_conf(section)
if secure:
logger.debug(
f"Saving option {option} with keyring because it was marked "
f"as secure."
)
# Catch error when there's no keyring backend available.
# Fixes spyder-ide/spyder#22623
try:
keyring.set_password(section, option, value)
except NoKeyringError:
# This file must not have top-level Qt imports. This also
# prevents possible circular imports.
from qtpy.QtWidgets import QMessageBox
from spyder_kernels.utils.pythonenv import is_conda_env
pkg_manager = "conda" if is_conda_env(sys.prefix) else "pip"
msg = _(
"It was not possible to save a configuration setting "
"securely. A possible solution is to install the "
"<tt>keyrings.alt</tt> package with {}.<br><br>"
"<bb>Note</bb>: That package may have security risks or "
"other implications. Hence, it's not advised to use it in "
"general production or security-sensitive systems."
).format(pkg_manager)
QMessageBox.critical(
None,
_("Error"),
msg,
QMessageBox.Ok,
)
else:
config.set(
section=section,
option=option,
value=value,
verbose=verbose,
save=save,
)
if notification:
self.notify_observers(
section, original_option, recursive_notification, secure
)
def get_default(self, section, option):
"""
Get Default value for a given `section` and `option`.
This is useful for type checking in `get` method.
"""
config = self.get_active_conf(section)
if isinstance(option, tuple):
base_option = option[0]
intermediate_options = option[1:-1]
last_option = option[-1]
base_default = config.get_default(section, base_option)
conf_ptr = base_default
for opt in intermediate_options:
conf_ptr = conf_ptr[opt]
return conf_ptr[last_option]
return config.get_default(section, option)
def remove_section(self, section):
"""Remove `section` and all options within it."""
config = self.get_active_conf(section)
config.remove_section(section)
def remove_option(self, section, option, secure=False):
"""Remove `option` from `section`."""
config = self.get_active_conf(section)
if isinstance(option, tuple):
# The actual option saved in the config
base_option = option[0]
# Keys of the nested dicts where the option to remove is contained
intermediate_options = option[1:-1]
# Key of the option to remove
last_option = option[-1]
# Get config value (which is a dictionary)
base_conf = self.get(section, base_option)
# Get reference to the actual dictionary containing the option
# that needs to be removed
conf_ptr = base_conf
for opt in intermediate_options:
conf_ptr = conf_ptr[opt]
# Remove option and set updated config values for the actual option
# while checking that the option to be removed is actually a value
# available in the config.
# See spyder-ide/spyder#21161
if last_option in conf_ptr:
conf_ptr.pop(last_option)
self.set(section, base_option, base_conf)
self.notify_observers(section, base_option)
else:
if secure:
logger.debug(
f"Deleting option {option} with keyring because it was "
f"marked as secure."
)
try:
keyring.delete_password(section, option)
except Exception:
pass
else:
config.remove_option(section, option)
def reset_to_defaults(self, section=None, notification=True):
"""Reset config to Default values."""
config = self.get_active_conf(section)
config.reset_to_defaults(section=section)
if notification:
if section is not None:
self.notify_section_all_observers(section)
else:
self.notify_all_observers()
def reset_manager(self):
for observer in self._observer_map_keys.copy():
self.unobserve_configuration(observer)
self._plugin_configs = {}
# Shortcut configuration management
# ------------------------------------------------------------------------
def _get_shortcut_config(self, context, plugin_name=None):
"""
Return the shortcut configuration for global or plugin configs.
Context must be either '_' for global or the name of a plugin.
"""
context = context.lower()
config = self._user_config
if plugin_name in self._plugin_configs:
plugin_class, config = self._plugin_configs[plugin_name]
# Check if plugin has a separate file
if not plugin_class.CONF_FILE:
config = self._user_config
elif context in self._plugin_configs:
plugin_class, config = self._plugin_configs[context]
# Check if plugin has a separate file
if not plugin_class.CONF_FILE:
config = self._user_config
elif context in (self._user_config.sections()
+ EXTRA_VALID_SHORTCUT_CONTEXTS):
config = self._user_config
else:
raise ValueError(_("Shortcut context must match '_' or the "
"plugin `CONF_SECTION`!"))
return config
def get_shortcut(self, context, name, plugin_name=None):
"""
Get keyboard shortcut (key sequence string).
Context must be either '_' for global or the name of a plugin.
"""
config = self._get_shortcut_config(context, plugin_name)
return config.get('shortcuts', context + '/' + name.lower())
def set_shortcut(self, context, name, keystr, plugin_name=None):
"""
Set keyboard shortcut (key sequence string).
Context must be either '_' for global or the name of a plugin.
"""
config = self._get_shortcut_config(context, plugin_name)
option = f"{context}/{name}"
current_shortcut = config.get("shortcuts", option, default="")
if current_shortcut != keystr:
config.set('shortcuts', option, keystr)
self.notify_observers("shortcuts", option)
def iter_shortcuts(self):
"""Iterate over keyboard shortcuts."""
for context_name, keystr in self._user_config.items('shortcuts'):
if context_name == 'enable':
continue
if 'additional_configuration' not in context_name:
context, name = context_name.split('/', 1)
yield context, name, keystr
for __, (__, plugin_config) in self._plugin_configs.items():
items = plugin_config.items('shortcuts')
if items:
for context_name, keystr in items:
context, name = context_name.split('/', 1)
yield context, name, keystr
def reset_shortcuts(self):
"""Reset keyboard shortcuts to default values."""
self._user_config.reset_to_defaults(section='shortcuts')
for __, (__, plugin_config) in self._plugin_configs.items():
# TODO: check if the section exists?
plugin_config.reset_to_defaults(section='shortcuts')
# This necessary to notify the observers of widget shortcuts
self.notify_section_all_observers(section="shortcuts")
try:
CONF = ConfigurationManager()
except Exception:
from qtpy.QtWidgets import QApplication, QMessageBox
# Print traceback to show error in the terminal in case it's needed
print(traceback.format_exc()) # spyder: test-skip
# Check if there's an app already running
app = QApplication.instance()
# Create app, if there's none, in order to display the message below.
# NOTE: Don't use the functions we have to create a QApplication here
# because they could import CONF at some point, which would make this
# fallback fail.
# See issue spyder-ide/spyder#17889
if app is None:
app = QApplication(['Spyder', '--no-sandbox'])
app.setApplicationName('Spyder')
reset_reply = QMessageBox.critical(
None, 'Spyder',
_("There was an error while loading Spyder configuration options. "
"You need to reset them for Spyder to be able to launch.\n\n"
"Do you want to proceed?"),
QMessageBox.Yes, QMessageBox.No)
if reset_reply == QMessageBox.Yes:
reset_config_files()
QMessageBox.information(
None, 'Spyder',
_("Spyder configuration files resetted!"))
os._exit(0)
| ConfigurationManager |
python | tensorflow__tensorflow | tensorflow/python/data/ops/ignore_errors_op.py | {
"start": 998,
"end": 1605
} | class ____(dataset_ops.UnaryUnchangedStructureDataset):
"""A `Dataset` that drops erroneous elements from its input."""
def __init__(self, input_dataset, log_warning, name=None):
"""See `Dataset.ignore_errors` for details."""
self._input_dataset = input_dataset
self._name = name
variant_tensor = (
gen_experimental_dataset_ops.ignore_errors_dataset(
self._input_dataset._variant_tensor, # pylint: disable=protected-access
log_warning=log_warning,
**self._flat_structure))
super().__init__(input_dataset, variant_tensor)
| _IgnoreErrorsDataset |
python | walkccc__LeetCode | solutions/1153. String Transforms Into Another String/1153.py | {
"start": 0,
"end": 399
} | class ____:
def canConvert(self, str1: str, str2: str) -> bool:
if str1 == str2:
return True
mappings = {}
for a, b in zip(str1, str2):
if mappings.get(a, b) != b:
return False
mappings[a] = b
# No letter in the str1 maps to > 1 letter in the str2 and there is at
# lest one temporary letter can break any loops.
return len(set(str2)) < 26
| Solution |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/waiters/test_kinesis_analytics.py | {
"start": 1162,
"end": 1416
} | class ____:
def test_service_waiters(self):
assert "application_start_complete" in KinesisAnalyticsV2Hook().list_waiters()
assert "application_stop_complete" in KinesisAnalyticsV2Hook().list_waiters()
| TestKinesisAnalyticsV2CustomWaiters |
python | mitsuhiko__rye | rye-devtools/src/rye_devtools/find_downloads.py | {
"start": 514,
"end": 606
} | class ____(StrEnum):
CPYTHON = "cpython"
PYPY = "pypy"
@dataclass
| PythonImplementation |
python | dask__dask | dask/dataframe/dask_expr/_cumulative.py | {
"start": 4147,
"end": 4340
} | class ____(CumulativeAggregations):
chunk_operation = M.cummin
aggregate_operation = staticmethod(methods.cummin_aggregate)
neutral_element = math.inf # type: ignore[assignment]
| CumMin |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/hooks/dataprep.py | {
"start": 1731,
"end": 1969
} | class ____(str, Enum):
"""Types of job group run statuses."""
CREATED = "Created"
UNDEFINED = "undefined"
IN_PROGRESS = "InProgress"
COMPLETE = "Complete"
FAILED = "Failed"
CANCELED = "Canceled"
| JobGroupStatuses |
python | sqlalchemy__sqlalchemy | test/sql/test_metadata.py | {
"start": 60783,
"end": 75060
} | class ____(fixtures.TestBase, AssertsCompiledSQL):
@testing.requires.temporary_tables
@testing.skip_if("mssql", "different col format")
def test_prefixes(self):
from sqlalchemy import Table
table1 = Table(
"temporary_table_1",
MetaData(),
Column("col1", Integer),
prefixes=["TEMPORARY"],
)
self.assert_compile(
schema.CreateTable(table1),
"CREATE TEMPORARY TABLE temporary_table_1 (col1 INTEGER)",
)
table2 = Table(
"temporary_table_2",
MetaData(),
Column("col1", Integer),
prefixes=["VIRTUAL"],
)
self.assert_compile(
schema.CreateTable(table2),
"CREATE VIRTUAL TABLE temporary_table_2 (col1 INTEGER)",
)
@testing.combinations((None, []), ((), []), ([], []), (["foo"], ["foo"]))
def test_prefixes_parameter_parsing(self, arg, expected):
"""test #6685"""
table = Table("foo", MetaData(), Column("bar", Integer), prefixes=arg)
eq_(table._prefixes, expected)
def test_table_info(self):
metadata = MetaData()
t1 = Table("foo", metadata, info={"x": "y"})
t2 = Table("bar", metadata, info={})
t3 = Table("bat", metadata)
assert t1.info == {"x": "y"}
assert t2.info == {}
assert t3.info == {}
for t in (t1, t2, t3):
t.info["bar"] = "zip"
assert t.info["bar"] == "zip"
def test_invalid_objects(self):
assert_raises_message(
tsa.exc.ArgumentError,
"'SchemaItem' object, such as a 'Column' or a "
"'Constraint' expected, got <.*ColumnClause at .*; q>",
Table,
"asdf",
MetaData(),
tsa.column("q", Integer),
)
assert_raises_message(
tsa.exc.ArgumentError,
r"'SchemaItem' object, such as a 'Column' or a "
r"'Constraint' expected, got String\(\)",
Table,
"asdf",
MetaData(),
String(),
)
assert_raises_message(
tsa.exc.ArgumentError,
"'SchemaItem' object, such as a 'Column' or a "
"'Constraint' expected, got 12",
Table,
"asdf",
MetaData(),
12,
)
assert_raises_message(
tsa.exc.ArgumentError,
"'SchemaItem' object, such as a 'Column' or a "
"'Constraint' expected, got "
r"\(Column\('q', Integer\(\), table=None\), "
r"Column\('p', Integer\(\), table=None\)\)",
Table,
"asdf",
MetaData(),
(Column("q", Integer), Column("p", Integer)),
)
def test_reset_exported_passes(self):
m = MetaData()
t = Table("t", m, Column("foo", Integer))
eq_(list(t.c), [t.c.foo])
t._reset_exported()
eq_(list(t.c), [t.c.foo])
def test_foreign_key_constraints_collection(self):
metadata = MetaData()
t1 = Table("foo", metadata, Column("a", Integer))
eq_(t1.foreign_key_constraints, set())
fk1 = ForeignKey("q.id")
fk2 = ForeignKey("j.id")
fk3 = ForeignKeyConstraint(["b", "c"], ["r.x", "r.y"])
t1.append_column(Column("b", Integer, fk1))
eq_(t1.foreign_key_constraints, {fk1.constraint})
t1.append_column(Column("c", Integer, fk2))
eq_(t1.foreign_key_constraints, {fk1.constraint, fk2.constraint})
t1.append_constraint(fk3)
eq_(
t1.foreign_key_constraints,
{fk1.constraint, fk2.constraint, fk3},
)
def test_c_immutable(self):
m = MetaData()
t1 = Table("t", m, Column("x", Integer), Column("y", Integer))
assert_raises(TypeError, t1.c.extend, [Column("z", Integer)])
def assign():
t1.c["z"] = Column("z", Integer)
assert_raises(TypeError, assign)
def assign2():
t1.c.z = Column("z", Integer)
assert_raises(TypeError, assign2)
def test_c_mutate_after_unpickle(self):
m = MetaData()
y = Column("y", Integer)
t1 = Table("t", m, Column("x", Integer), y)
# note we are testing immutable column collection here
t2 = pickle.loads(pickle.dumps(t1))
z = Column("z", Integer)
g = Column("g", Integer)
t2.append_column(z)
is_(t1.c.contains_column(y), True)
is_(t2.c.contains_column(y), False)
y2 = t2.c.y
is_(t2.c.contains_column(y2), True)
is_(t2.c.contains_column(z), True)
is_(t2.c.contains_column(g), False)
def test_table_ctor_duplicated_column_name(self):
# when it will raise
with testing.expect_raises_message(
exc.ArgumentError,
"A column with name 'col' is already present in table 't'",
):
Table(
"t",
MetaData(),
Column("a", Integer),
Column("col", Integer),
Column("col", String),
)
@testing.combinations(
((0,),), ((0, 1),), ((1, 2),), ((3,),), ((-2,),), argnames="positions"
)
@testing.variation("add_to_pk", [True, False])
@testing.variation("existing_pk", [True, False])
def test_insert_column_table(self, positions, add_to_pk, existing_pk):
t = Table(
"t",
MetaData(),
Column("a", Integer, primary_key=bool(existing_pk)),
Column("b", Integer),
Column("c", Integer),
)
expected_cols = ["a", "b", "c"]
if existing_pk:
expected_pk_cols = ["a"]
else:
expected_pk_cols = []
for pos in positions:
t.insert_column(
Column(f"i{pos}", Integer, primary_key=bool(add_to_pk)), pos
)
expected_cols.insert(pos, f"i{pos}")
if add_to_pk:
expected_pk_cols.append(f"i{pos}")
eq_([c.key for c in t.c], expected_cols)
eq_([c.key for c in t.primary_key], expected_pk_cols)
@testing.combinations(-4, -3, -2, -1, 0, 1, 2, 3)
def test_replace_col_with_index(self, new_index):
t = Table(
"t",
MetaData(),
Column("a", Integer),
Column("b", Integer),
Column("c", Integer),
Column("d", Integer),
)
newcol = Column("b", String)
expected = ["a", "q", "c", "d"]
expected.insert(new_index, "b")
expected.remove("q")
t.insert_column(newcol, index=new_index, replace_existing=True)
is_(t.c.b, newcol)
is_(t.c.b.type._type_affinity, String)
eq_([c.key for c in t.c], expected)
effective_positive_index = (
new_index if new_index >= 0 else max(0, 4 + new_index)
)
if effective_positive_index > 1:
# because we replaced
effective_positive_index -= 1
is_(t.c[effective_positive_index], newcol)
@testing.combinations(
((0,),), ((0, 1),), ((1, 2),), ((3,),), argnames="positions"
)
def test_insert_column_tableclause(self, positions):
t = table(
"t",
column("a", Integer),
column("b", Integer),
column("c", Integer),
)
expected_cols = ["a", "b", "c"]
for pos in positions:
t.insert_column(column(f"i{pos}", Integer), pos)
expected_cols.insert(pos, f"i{pos}")
eq_([c.key for c in t.c], expected_cols)
def test_append_column_existing_name(self):
t = Table("t", MetaData(), Column("col", Integer))
with testing.expect_raises_message(
exc.DuplicateColumnError,
r"A column with name 'col' is already present in table 't'. "
r"Specify replace_existing=True to Table.append_column\(\) or "
r"Table.insert_column\(\) to "
r"replace an existing column.",
):
t.append_column(Column("col", String))
def test_append_column_existing_key(self):
t = Table("t", MetaData(), Column("col", Integer, key="c2"))
with testing.expect_raises_message(
exc.DuplicateColumnError,
r"A column with key 'c2' is already present in table 't'. "
r"Specify replace_existing=True to Table.append_column\(\) or "
r"Table.insert_column\(\) to "
r"replace an existing column.",
):
t.append_column(Column("col", String, key="c2"))
@testing.variation("field", ["name", "key"])
def test_append_column_replace_existing(self, field: Variation):
if field.name:
t = Table("t", MetaData(), Column("col", Integer))
t.append_column(Column("col", String), replace_existing=True)
is_true(isinstance(t.c.col.type, String))
elif field.key:
t = Table("t", MetaData(), Column("col", Integer, key="c2"))
t.append_column(
Column("col", String, key="c2"), replace_existing=True
)
is_true(isinstance(t.c.c2.type, String))
else:
field.fail()
def test_autoincrement_replace(self):
m = MetaData()
t = Table("t", m, Column("id", Integer, primary_key=True))
is_(t._autoincrement_column, t.c.id)
t = Table(
"t",
m,
Column("id", Integer, primary_key=True),
extend_existing=True,
)
is_(t._autoincrement_column, t.c.id)
def test_pk_args_standalone(self):
m = MetaData()
t = Table(
"t",
m,
Column("x", Integer, primary_key=True),
PrimaryKeyConstraint(mssql_clustered=True),
)
eq_(list(t.primary_key), [t.c.x])
eq_(t.primary_key.dialect_kwargs, {"mssql_clustered": True})
def test_pk_cols_sets_flags(self):
m = MetaData()
t = Table(
"t",
m,
Column("x", Integer),
Column("y", Integer),
Column("z", Integer),
PrimaryKeyConstraint("x", "y"),
)
eq_(t.c.x.primary_key, True)
eq_(t.c.y.primary_key, True)
eq_(t.c.z.primary_key, False)
def test_pk_col_mismatch_one(self):
m = MetaData()
with expect_warnings(
"Table 't' specifies columns 'x' as primary_key=True, "
"not matching locally specified columns 'q'"
):
Table(
"t",
m,
Column("x", Integer, primary_key=True),
Column("q", Integer),
PrimaryKeyConstraint("q"),
)
def test_pk_col_mismatch_two(self):
m = MetaData()
with expect_warnings(
"Table 't' specifies columns 'a', 'b', 'c' as primary_key=True, "
"not matching locally specified columns 'b', 'c'"
):
Table(
"t",
m,
Column("a", Integer, primary_key=True),
Column("b", Integer, primary_key=True),
Column("c", Integer, primary_key=True),
PrimaryKeyConstraint("b", "c"),
)
@testing.emits_warning("Table 't'")
def test_pk_col_mismatch_three(self):
m = MetaData()
t = Table(
"t",
m,
Column("x", Integer, primary_key=True),
Column("q", Integer),
PrimaryKeyConstraint("q"),
)
eq_(list(t.primary_key), [t.c.q])
@testing.emits_warning("Table 't'")
def test_pk_col_mismatch_four(self):
m = MetaData()
t = Table(
"t",
m,
Column("a", Integer, primary_key=True),
Column("b", Integer, primary_key=True),
Column("c", Integer, primary_key=True),
PrimaryKeyConstraint("b", "c"),
)
eq_(list(t.primary_key), [t.c.b, t.c.c])
def test_pk_always_flips_nullable(self):
m = MetaData()
t1 = Table("t1", m, Column("x", Integer), PrimaryKeyConstraint("x"))
t2 = Table("t2", m, Column("x", Integer, primary_key=True))
eq_(list(t1.primary_key), [t1.c.x])
eq_(list(t2.primary_key), [t2.c.x])
assert t1.c.x.primary_key
assert t2.c.x.primary_key
assert not t2.c.x.nullable
assert not t1.c.x.nullable
def test_pk_can_be_nullable(self):
m = MetaData()
t1 = Table(
"t1",
m,
Column("x", Integer, nullable=True),
PrimaryKeyConstraint("x"),
)
t2 = Table(
"t2", m, Column("x", Integer, primary_key=True, nullable=True)
)
eq_(list(t1.primary_key), [t1.c.x])
eq_(list(t2.primary_key), [t2.c.x])
assert t1.c.x.primary_key
assert t2.c.x.primary_key
assert t2.c.x.nullable
assert t1.c.x.nullable
def test_must_exist(self):
with testing.expect_raises_message(
exc.InvalidRequestError, "Table 'foo' not defined"
):
Table("foo", MetaData(), must_exist=True)
@testing.combinations(
("comment", ("A", "B", "A")),
("implicit_returning", (True, False, True)),
("info", ({"A": 1}, {"A": 2}, {"A": 1})),
)
def test_extend_attributes(self, attrib, attrib_values):
"""
ensure `extend_existing` is compatible with simple attributes
"""
metadata = MetaData()
for counter, _attrib_value in enumerate(attrib_values):
_extend_existing = True if (counter > 0) else False
_kwargs = {
"extend_existing": _extend_existing,
attrib: _attrib_value,
}
table_a = Table(
"a",
metadata,
Column("foo", String, primary_key=True),
**_kwargs,
)
eq_(getattr(table_a, attrib), _attrib_value)
eq_(getattr(metadata.tables["a"], attrib), _attrib_value)
| TableTest |
python | huggingface__transformers | src/transformers/models/mllama/modeling_mllama.py | {
"start": 31146,
"end": 34175
} | class ____(nn.Module):
inv_freq: torch.Tensor # fix linting for `register_buffer`
def __init__(self, config: MllamaTextConfig, device=None):
super().__init__()
self.max_seq_len_cached = config.max_position_embeddings
self.original_max_seq_len = config.max_position_embeddings
self.config = config
self.rope_type = self.config.rope_parameters["rope_type"]
rope_init_fn: Callable = self.compute_default_rope_parameters
if self.rope_type != "default":
rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
inv_freq, self.attention_scaling = rope_init_fn(self.config, device)
self.register_buffer("inv_freq", inv_freq, persistent=False)
self.original_inv_freq = inv_freq
@staticmethod
def compute_default_rope_parameters(
config: Optional[MllamaTextConfig] = None,
device: Optional["torch.device"] = None,
seq_len: Optional[int] = None,
) -> tuple["torch.Tensor", float]:
"""
Computes the inverse frequencies according to the original RoPE implementation
Args:
config ([`~transformers.PreTrainedConfig`]):
The model configuration.
device (`torch.device`):
The device to use for initialization of the inverse frequencies.
seq_len (`int`, *optional*):
The current sequence length. Unused for this type of RoPE.
Returns:
Tuple of (`torch.Tensor`, `float`), containing the inverse frequencies for the RoPE embeddings and the
post-processing scaling factor applied to the computed cos/sin (unused in this type of RoPE).
"""
base = config.rope_parameters["rope_theta"]
dim = getattr(config, "head_dim", None) or config.hidden_size // config.num_attention_heads
attention_factor = 1.0 # Unused in this type of RoPE
# Compute the inverse frequencies
inv_freq = 1.0 / (
base ** (torch.arange(0, dim, 2, dtype=torch.int64).to(device=device, dtype=torch.float) / dim)
)
return inv_freq, attention_factor
# Ignore copy
@torch.no_grad()
@dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope)
def forward(self, x, position_ids):
inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1)
position_ids_expanded = position_ids[:, None, :].float()
device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu"
with torch.autocast(device_type=device_type, enabled=False): # Force float32
freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
emb = torch.cat((freqs, freqs), dim=-1)
cos = emb.cos() * self.attention_scaling
sin = emb.sin() * self.attention_scaling
return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
@auto_docstring
| MllamaRotaryEmbedding |
python | patrick-kidger__equinox | equinox/_ad.py | {
"start": 1009,
"end": 2410
} | class ____(Module):
_fun: Callable
_has_aux: bool
_gradkwargs: dict[str, Any]
@property
def __wrapped__(self):
return self._fun
def __call__(self, *args, **kwargs):
@ft.partial(jax.value_and_grad, has_aux=self._has_aux, **self._gradkwargs)
def fun_value_and_grad(_diff_x, _nondiff_x, *_args, **_kwargs):
_x = combine(_diff_x, _nondiff_x)
return self._fun(_x, *_args, **_kwargs)
if len(args) == 0:
if len(kwargs) == 0:
raise TypeError(
"Functions wrapped with `equinox.filter_{grad, value_and_grad}` "
"must have at least one positional argument. (This is the "
"argument that is differentiated.)"
)
else:
raise TypeError(
"Functions wrapped with `equinox.filter_{grad, value_and_grad}` "
"must have their first argument passed by position, not keyword. "
"(This is the argument that is differentiated.)"
)
x, *args = args
diff_x, nondiff_x = partition(x, is_inexact_array)
return fun_value_and_grad(diff_x, nondiff_x, *args, **kwargs)
def __get__(self, instance, owner):
if instance is None:
return self
return Partial(self, instance)
| _ValueAndGradWrapper |
python | catalyst-team__catalyst | catalyst/contrib/data/dataset.py | {
"start": 1692,
"end": 2845
} | class ____(Dataset):
"""Abstraction to merge several datasets into one dataset."""
def __init__(self, *datasets: Dataset, dict_transform: Optional[Callable] = None):
"""
Args:
datasets: params count of datasets to merge
dict_transform: transforms common for all datasets.
(for example normalize image, add blur, crop/resize/etc)
"""
self.length = len(datasets[0])
assert all(len(x) == self.length for x in datasets)
self.datasets = datasets
self.dict_transform = dict_transform
def __getitem__(self, index: int) -> Any:
"""Get item from all datasets.
Args:
index: index to value from all datasets
Returns:
list: list of value in every dataset
"""
dcts = [x[index] for x in self.datasets]
dct = merge_dicts(*dcts)
if self.dict_transform is not None:
dct = self.dict_transform(dct)
return dct
def __len__(self) -> int:
"""
Returns:
int: length of the dataset
"""
return self.length
| MergeDataset |
python | scrapy__scrapy | tests/test_utils_iterators.py | {
"start": 13486,
"end": 19427
} | class ____:
def test_csviter_defaults(self):
body = get_testdata("feeds", "feed-sample3.csv")
response = TextResponse(url="http://example.com/", body=body)
csv = csviter(response)
result = list(csv)
assert result == [
{"id": "1", "name": "alpha", "value": "foobar"},
{"id": "2", "name": "unicode", "value": "\xfan\xedc\xf3d\xe9\u203d"},
{"id": "3", "name": "multi", "value": "foo\nbar"},
{"id": "4", "name": "empty", "value": ""},
]
# explicit type check cuz' we no like stinkin' autocasting! yarrr
for result_row in result:
assert all(isinstance(k, str) for k in result_row)
assert all(isinstance(v, str) for v in result_row.values())
def test_csviter_delimiter(self):
body = get_testdata("feeds", "feed-sample3.csv").replace(b",", b"\t")
response = TextResponse(url="http://example.com/", body=body)
csv = csviter(response, delimiter="\t")
assert list(csv) == [
{"id": "1", "name": "alpha", "value": "foobar"},
{"id": "2", "name": "unicode", "value": "\xfan\xedc\xf3d\xe9\u203d"},
{"id": "3", "name": "multi", "value": "foo\nbar"},
{"id": "4", "name": "empty", "value": ""},
]
def test_csviter_quotechar(self):
body1 = get_testdata("feeds", "feed-sample6.csv")
body2 = get_testdata("feeds", "feed-sample6.csv").replace(b",", b"|")
response1 = TextResponse(url="http://example.com/", body=body1)
csv1 = csviter(response1, quotechar="'")
assert list(csv1) == [
{"id": "1", "name": "alpha", "value": "foobar"},
{"id": "2", "name": "unicode", "value": "\xfan\xedc\xf3d\xe9\u203d"},
{"id": "3", "name": "multi", "value": "foo\nbar"},
{"id": "4", "name": "empty", "value": ""},
]
response2 = TextResponse(url="http://example.com/", body=body2)
csv2 = csviter(response2, delimiter="|", quotechar="'")
assert list(csv2) == [
{"id": "1", "name": "alpha", "value": "foobar"},
{"id": "2", "name": "unicode", "value": "\xfan\xedc\xf3d\xe9\u203d"},
{"id": "3", "name": "multi", "value": "foo\nbar"},
{"id": "4", "name": "empty", "value": ""},
]
def test_csviter_wrong_quotechar(self):
body = get_testdata("feeds", "feed-sample6.csv")
response = TextResponse(url="http://example.com/", body=body)
csv = csviter(response)
assert list(csv) == [
{"'id'": "1", "'name'": "'alpha'", "'value'": "'foobar'"},
{
"'id'": "2",
"'name'": "'unicode'",
"'value'": "'\xfan\xedc\xf3d\xe9\u203d'",
},
{"'id'": "'3'", "'name'": "'multi'", "'value'": "'foo"},
{"'id'": "4", "'name'": "'empty'", "'value'": ""},
]
def test_csviter_delimiter_binary_response_assume_utf8_encoding(self):
body = get_testdata("feeds", "feed-sample3.csv").replace(b",", b"\t")
response = Response(url="http://example.com/", body=body)
csv = csviter(response, delimiter="\t")
assert list(csv) == [
{"id": "1", "name": "alpha", "value": "foobar"},
{"id": "2", "name": "unicode", "value": "\xfan\xedc\xf3d\xe9\u203d"},
{"id": "3", "name": "multi", "value": "foo\nbar"},
{"id": "4", "name": "empty", "value": ""},
]
def test_csviter_headers(self):
sample = get_testdata("feeds", "feed-sample3.csv").splitlines()
headers, body = sample[0].split(b","), b"\n".join(sample[1:])
response = TextResponse(url="http://example.com/", body=body)
csv = csviter(response, headers=[h.decode("utf-8") for h in headers])
assert list(csv) == [
{"id": "1", "name": "alpha", "value": "foobar"},
{"id": "2", "name": "unicode", "value": "\xfan\xedc\xf3d\xe9\u203d"},
{"id": "3", "name": "multi", "value": "foo\nbar"},
{"id": "4", "name": "empty", "value": ""},
]
def test_csviter_falserow(self):
body = get_testdata("feeds", "feed-sample3.csv")
body = b"\n".join((body, b"a,b", b"a,b,c,d"))
response = TextResponse(url="http://example.com/", body=body)
csv = csviter(response)
assert list(csv) == [
{"id": "1", "name": "alpha", "value": "foobar"},
{"id": "2", "name": "unicode", "value": "\xfan\xedc\xf3d\xe9\u203d"},
{"id": "3", "name": "multi", "value": "foo\nbar"},
{"id": "4", "name": "empty", "value": ""},
]
def test_csviter_exception(self):
body = get_testdata("feeds", "feed-sample3.csv")
response = TextResponse(url="http://example.com/", body=body)
my_iter = csviter(response)
next(my_iter)
next(my_iter)
next(my_iter)
next(my_iter)
with pytest.raises(StopIteration):
next(my_iter)
def test_csviter_encoding(self):
body1 = get_testdata("feeds", "feed-sample4.csv")
body2 = get_testdata("feeds", "feed-sample5.csv")
response = TextResponse(
url="http://example.com/", body=body1, encoding="latin1"
)
csv = csviter(response)
assert list(csv) == [
{"id": "1", "name": "latin1", "value": "test"},
{"id": "2", "name": "something", "value": "\xf1\xe1\xe9\xf3"},
]
response = TextResponse(url="http://example.com/", body=body2, encoding="cp852")
csv = csviter(response)
assert list(csv) == [
{"id": "1", "name": "cp852", "value": "test"},
{
"id": "2",
"name": "something",
"value": "\u255a\u2569\u2569\u2569\u2550\u2550\u2557",
},
]
| TestUtilsCsv |
python | encode__django-rest-framework | rest_framework/parsers.py | {
"start": 907,
"end": 1428
} | class ____:
"""
All parsers should extend `BaseParser`, specifying a `media_type`
attribute, and overriding the `.parse()` method.
"""
media_type = None
def parse(self, stream, media_type=None, parser_context=None):
"""
Given a stream to read from, return the parsed representation.
Should return parsed data, or a `DataAndFiles` object consisting of the
parsed data and files.
"""
raise NotImplementedError(".parse() must be overridden.")
| BaseParser |
python | xlwings__xlwings | xlwings/constants.py | {
"start": 110839,
"end": 111003
} | class ____:
xlAlways = 1 # from enum XlRobustConnect
xlAsRequired = 0 # from enum XlRobustConnect
xlNever = 2 # from enum XlRobustConnect
| RobustConnect |
python | great-expectations__great_expectations | contrib/experimental/great_expectations_experimental/expectations/expect_column_to_have_no_months_missing.py | {
"start": 1631,
"end": 6116
} | class ____(ColumnAggregateExpectation):
"""
This metric expects data to include dates from consecutive months, with no months missing in
between (relative to the maximal and minimal month existing in the data).
User can define a threshold which allows for some months to be missing (equal or lower than the threshold).
Expectation fails if the number of missing months is over the threshold.
Keyword args:
- threshold (int)
"""
from datetime import datetime
today = datetime.now()
months_ago = {
1: today - relativedelta(months=1),
2: today - relativedelta(months=2),
3: today - relativedelta(months=3),
4: today - relativedelta(months=4),
}
examples = [
{
"data": {
"column_two_months_missing": [
today,
months_ago[1],
months_ago[1],
months_ago[4],
],
"column_one_month_missing": [
today,
months_ago[2],
months_ago[2],
months_ago[2],
],
"column_none_missing": [
today,
months_ago[1],
months_ago[1],
months_ago[2],
],
},
"suppress_test_for": ["mssql"],
"tests": [
{
"title": "negative_missing_two_months",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "column_two_months_missing", "threshold": 1},
"out": {"success": False},
},
{
"title": "positive_missing_two_months",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "column_two_months_missing", "threshold": 4},
"out": {"success": True},
},
{
"title": "negative_missing_one_month",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "column_one_month_missing", "threshold": 0},
"out": {"success": False},
},
{
"title": "positive_none_missing",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "column_none_missing", "threshold": 2},
"out": {"success": True},
},
],
}
]
# Setting necessary computation metric dependencies and defining kwargs, as well as assigning kwargs default values
metric_dependencies = ("column.distinct_months",)
success_keys = ("threshold",)
# Default values
default_kwarg_values = {}
library_metadata = {
"maturity": "experimental",
"contributors": [
"@hadasm",
],
"tags": ["date-column"],
}
def _validate(
self,
metrics: Dict,
runtime_configuration: dict = None,
execution_engine: ExecutionEngine = None,
):
from datetime import datetime
dist_months_as_str = metrics["column.distinct_months"]
distinct_months_sorted = sorted(
[datetime.strptime(month_str, MONTH_FORMAT) for month_str in dist_months_as_str]
)
min_month, max_month = distinct_months_sorted[0], distinct_months_sorted[-1]
months_diff = relativedelta(max_month, min_month).months
month_set = {min_month + relativedelta(months=n_month) for n_month in range(months_diff)}
n_missing_months = len(month_set - set(distinct_months_sorted))
threshold = self._get_success_kwargs().get("threshold")
success: bool = n_missing_months <= threshold
return {
"success": success,
"result": {
"Number of missing months": n_missing_months,
"Total unique months": len(distinct_months_sorted),
"Threshold": threshold,
"Min date": min_month,
"Max date": max_month,
},
}
if __name__ == "__main__":
ExpectColumnToHaveNoMonthsMissing().print_diagnostic_checklist()
| ExpectColumnToHaveNoMonthsMissing |
python | milvus-io__pymilvus | tests/test_types.py | {
"start": 2059,
"end": 2612
} | class ____:
def test_consistency_level_int(self):
for v in ConsistencyLevel.values():
assert v == get_consistency_level(v)
def test_consistency_level_str(self):
for k in ConsistencyLevel.keys():
assert ConsistencyLevel.Value(k) == get_consistency_level(k)
@pytest.mark.parametrize("invalid", [6, 100, "not supported", "中文", 1.0])
def test_consistency_level_invalid(self, invalid):
with pytest.raises(InvalidConsistencyLevel):
get_consistency_level(invalid)
| TestConsistencyLevel |
python | redis__redis-py | redis/commands/bf/info.py | {
"start": 1644,
"end": 1997
} | class ____:
width = None
depth = None
count = None
def __init__(self, args):
response = dict(zip(map(nativestr, args[::2]), args[1::2]))
self.width = response["width"]
self.depth = response["depth"]
self.count = response["count"]
def __getitem__(self, item):
return getattr(self, item)
| CMSInfo |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 208857,
"end": 209894
} | class ____(sgqlc.types.Input):
"""Autogenerated input type of CreateTeamDiscussion"""
__schema__ = github_schema
__field_names__ = ("team_id", "title", "body", "private", "client_mutation_id")
team_id = sgqlc.types.Field(sgqlc.types.non_null(ID), graphql_name="teamId")
"""The ID of the team to which the discussion belongs."""
title = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="title")
"""The title of the discussion."""
body = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="body")
"""The content of the discussion."""
private = sgqlc.types.Field(Boolean, graphql_name="private")
"""If true, restricts the visibility of this discussion to team
members and organization admins. If false or not specified, allows
any organization member to view this discussion.
"""
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
| CreateTeamDiscussionInput |
python | skorch-dev__skorch | skorch/tests/test_dataset.py | {
"start": 7890,
"end": 9802
} | class ____:
@pytest.fixture(scope='module')
def module_cls(self):
"""Return a simple module that concatenates its 2 inputs in
forward step.
"""
class MyModule(nn.Module):
def __init__(self):
super(MyModule, self).__init__()
self.dense = nn.Linear(20, 2)
# pylint: disable=arguments-differ
def forward(self, X0, X1):
X = torch.cat((X0, X1), 1)
X = F.softmax(self.dense(X), dim=-1)
return X
return MyModule
@pytest.fixture(scope='module')
def data(self):
X, y = make_classification(1000, 20, n_informative=10, random_state=0)
X = X.astype(np.float32)
return X[:, :10], X[:, 10:], y
@pytest.fixture(scope='module')
def net_cls(self):
from skorch import NeuralNetClassifier
return NeuralNetClassifier
@pytest.fixture(scope='module')
def net(self, net_cls, module_cls):
return net_cls(
module_cls,
max_epochs=2,
lr=0.1,
)
def test_fit_predict_proba(self, net, data):
X = {'X0': data[0], 'X1': data[1]}
y = data[2]
net.fit(X, y)
y_proba = net.predict_proba(X)
assert np.allclose(y_proba.sum(1), 1)
# Issue #142: check that all batch sizes are consistent with
# `net.batch_size`, even when the input type is a dictionary.
# Note that we allow for different batch sizes as the total
# number of samples may not be divisible by the batch size.
batch_sizes = lambda n: set(sum(net.history[:, 'batches', :, n], []))
train_batch_sizes = batch_sizes('train_batch_size')
valid_batch_sizes = batch_sizes('valid_batch_size')
assert net.batch_size in train_batch_sizes
assert net.batch_size in valid_batch_sizes
| TestNetWithDict |
python | pyca__cryptography | tests/hazmat/primitives/test_ec.py | {
"start": 8349,
"end": 25248
} | class ____:
def test_signing_with_example_keys(self, backend, subtests):
vectors = itertools.product(
load_vectors_from_file(
os.path.join(
"asymmetric", "ECDSA", "FIPS_186-3", "KeyPair.rsp"
),
load_fips_ecdsa_key_pair_vectors,
),
_HASH_TYPES.values(),
)
for vector, hash_type in vectors:
with subtests.test():
curve = ec._CURVE_TYPES[vector["curve"]]
_skip_ecdsa_vector(backend, curve, hash_type)
key = ec.EllipticCurvePrivateNumbers(
vector["d"],
ec.EllipticCurvePublicNumbers(
vector["x"], vector["y"], curve
),
).private_key(backend)
assert key
pkey = key.public_key()
assert pkey
signature = key.sign(
b"YELLOW SUBMARINE", ec.ECDSA(hash_type())
)
pkey.verify(
signature, b"YELLOW SUBMARINE", ec.ECDSA(hash_type())
)
@pytest.mark.parametrize("curve", ec._CURVE_TYPES.values())
def test_generate_vector_curves(self, backend, curve):
_skip_curve_unsupported(backend, curve)
key = ec.generate_private_key(curve, backend)
assert key
assert type(key.curve) is type(curve)
assert key.curve.key_size
pkey = key.public_key()
assert pkey
assert type(pkey.curve) is type(curve)
assert key.curve.key_size == pkey.curve.key_size
def test_generate_unknown_curve(self, backend):
with raises_unsupported_algorithm(
exceptions._Reasons.UNSUPPORTED_ELLIPTIC_CURVE
):
ec.generate_private_key(DummyCurve(), backend)
assert (
backend.elliptic_curve_signature_algorithm_supported(
ec.ECDSA(hashes.SHA256()), DummyCurve()
)
is False
)
@pytest.mark.skip_fips(
reason="Some FIPS curves aren't supported but work anyways"
)
@pytest.mark.parametrize("curve", ec._CURVE_TYPES.values())
def test_generate_unsupported_curve(
self, backend, curve: ec.EllipticCurve
):
if backend.elliptic_curve_supported(curve):
return
with raises_unsupported_algorithm(
exceptions._Reasons.UNSUPPORTED_ELLIPTIC_CURVE
):
ec.generate_private_key(curve)
def test_unknown_signature_algoritm(self, backend):
_skip_curve_unsupported(backend, ec.SECP192R1())
key = ec.generate_private_key(ec.SECP192R1(), backend)
with raises_unsupported_algorithm(
exceptions._Reasons.UNSUPPORTED_PUBLIC_KEY_ALGORITHM
):
key.sign(b"somedata", DummySignatureAlgorithm())
with raises_unsupported_algorithm(
exceptions._Reasons.UNSUPPORTED_PUBLIC_KEY_ALGORITHM
):
key.public_key().verify(
b"signature", b"data", DummySignatureAlgorithm()
)
assert (
backend.elliptic_curve_signature_algorithm_supported(
DummySignatureAlgorithm(), ec.SECP192R1()
)
is False
)
def test_load_invalid_ec_key_from_numbers(self, backend):
_skip_curve_unsupported(backend, ec.SECP256R1())
numbers = ec.EllipticCurvePrivateNumbers(
357646505660320080863666618182642070958081774038609089496899025506,
ec.EllipticCurvePublicNumbers(
47250808410327023131573602008345894927686381772325561185532964,
1120253292479243545483756778742719537373113335231773536789915,
ec.SECP256R1(),
),
)
with pytest.raises(ValueError):
numbers.private_key(backend)
numbers = ec.EllipticCurvePrivateNumbers(
357646505660320080863666618182642070958081774038609089496899025506,
ec.EllipticCurvePublicNumbers(
-4725080841032702313157360200834589492768638177232556118553296,
1120253292479243545483756778742719537373113335231773536789915,
ec.SECP256R1(),
),
)
with pytest.raises(ValueError):
numbers.private_key(backend)
numbers = ec.EllipticCurvePrivateNumbers(
357646505660320080863666618182642070958081774038609089496899025506,
ec.EllipticCurvePublicNumbers(
47250808410327023131573602008345894927686381772325561185532964,
-1120253292479243545483756778742719537373113335231773536789915,
ec.SECP256R1(),
),
)
with pytest.raises(ValueError):
numbers.private_key(backend)
def test_load_invalid_public_ec_key_from_numbers(self, backend):
_skip_curve_unsupported(backend, ec.SECP521R1())
# Bad X coordinate
numbers = ec.EllipticCurvePublicNumbers(
int(
"000003647356b91f8ace114c7247ecf4f4a622553fc025e04a178f179ef27"
"9090c184af678a4c78f635483bdd8aa544851c6ef291c1f0d6a241ebfd145"
"77d1d30d9903ce",
16,
),
int(
"000001499bc7e079322ea0fcfbd6b40103fa6a1536c2257b182db0df4b369"
"6ec643adf100eb4f2025d1b873f82e5a475d6e4400ba777090eeb4563a115"
"09e4c87319dc26",
16,
),
ec.SECP521R1(),
)
with pytest.raises(ValueError):
numbers.public_key(backend)
# Bad Y coordinate
numbers = ec.EllipticCurvePublicNumbers(
int(
"0000019aadc221cc0525118ab6d5aa1f64720603de0be128cbfea0b381ad8"
"02a2facc6370bb58cf88b3f0c692bc654ee19d6cad198f10d4b681b396f20"
"d2e40603fa945b",
16,
),
int(
"0000025da392803a320717a08d4cb3dea932039badff363b71bdb8064e726"
"6c7f4f4b748d4d425347fc33e3885d34b750fa7fcd5691f4d90c89522ce33"
"feff5db10088a5",
16,
),
ec.SECP521R1(),
)
with pytest.raises(ValueError):
numbers.public_key(backend)
def test_load_invalid_ec_key_from_pem(self, backend):
_skip_curve_unsupported(backend, ec.SECP256R1())
match = r"infinity|invalid form|Invalid key"
with pytest.raises(ValueError, match=match):
serialization.load_pem_public_key(
textwrap.dedent(
"""
-----BEGIN PUBLIC KEY-----
MBkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDAgAA
-----END PUBLIC KEY-----
"""
).encode(),
backend=backend,
)
with pytest.raises(ValueError, match=match):
serialization.load_pem_private_key(
textwrap.dedent(
"""
-----BEGIN PRIVATE KEY-----
MEECAQAwEwYHKoZIzj0CAQYIKoZIzj0DAQcEJzAlAgEBBCD/////AAAAAP//////
////vOb6racXnoTzucrC/GMlUQ==
-----END PRIVATE KEY-----
"""
).encode(),
password=None,
backend=backend,
)
def test_load_private_scalar_greater_than_order_pem(self, backend):
_skip_curve_unsupported(backend, ec.SECP256R1())
data = load_vectors_from_file(
os.path.join(
"asymmetric", "PKCS8", "ec-invalid-private-scalar.pem"
),
lambda pemfile: pemfile.read().encode(),
)
with pytest.raises(ValueError):
serialization.load_pem_private_key(data, password=None)
def test_signatures(self, backend, subtests):
vectors = itertools.chain(
load_vectors_from_file(
os.path.join(
"asymmetric", "ECDSA", "FIPS_186-3", "SigGen.txt"
),
load_fips_ecdsa_signing_vectors,
),
load_vectors_from_file(
os.path.join("asymmetric", "ECDSA", "SECP256K1", "SigGen.txt"),
load_fips_ecdsa_signing_vectors,
),
)
for vector in vectors:
with subtests.test():
hash_type = _HASH_TYPES[vector["digest_algorithm"]]
curve = ec._CURVE_TYPES[vector["curve"]]
_skip_ecdsa_vector(backend, curve, hash_type)
key = ec.EllipticCurvePublicNumbers(
vector["x"], vector["y"], curve
).public_key(backend)
signature = encode_dss_signature(vector["r"], vector["s"])
key.verify(signature, vector["message"], ec.ECDSA(hash_type()))
def test_signature_failures(self, backend, subtests):
vectors = load_vectors_from_file(
os.path.join("asymmetric", "ECDSA", "FIPS_186-3", "SigVer.rsp"),
load_fips_ecdsa_signing_vectors,
)
for vector in vectors:
with subtests.test():
hash_type = _HASH_TYPES[vector["digest_algorithm"]]
curve = ec._CURVE_TYPES[vector["curve"]]
_skip_ecdsa_vector(backend, curve, hash_type)
key = ec.EllipticCurvePublicNumbers(
vector["x"], vector["y"], curve
).public_key(backend)
signature = encode_dss_signature(vector["r"], vector["s"])
if vector["fail"] is True:
with pytest.raises(exceptions.InvalidSignature):
key.verify(
signature, vector["message"], ec.ECDSA(hash_type())
)
else:
key.verify(
signature, vector["message"], ec.ECDSA(hash_type())
)
def test_unsupported_deterministic_nonce(self, backend):
if backend.ecdsa_deterministic_supported():
pytest.skip(
f"ECDSA deterministic signing is supported by this"
f" backend {backend}"
)
with pytest.raises(exceptions.UnsupportedAlgorithm):
ec.ECDSA(hashes.SHA256(), deterministic_signing=True)
def test_deterministic_nonce(self, backend, subtests):
if not backend.ecdsa_deterministic_supported():
pytest.skip(
f"ECDSA deterministic signing is not supported by this"
f" backend {backend}"
)
supported_hash_algorithms = {
"SHA1": hashes.SHA1(),
"SHA224": hashes.SHA224(),
"SHA256": hashes.SHA256(),
"SHA384": hashes.SHA384(),
"SHA512": hashes.SHA512(),
}
curves = {
"B-163": ec.SECT163R2(),
"B-233": ec.SECT233R1(),
"B-283": ec.SECT283R1(),
"B-409": ec.SECT409R1(),
"B-571": ec.SECT571R1(),
"K-163": ec.SECT163K1(),
"K-233": ec.SECT233K1(),
"K-283": ec.SECT283K1(),
"K-409": ec.SECT409K1(),
"K-571": ec.SECT571K1(),
"P-192": ec.SECP192R1(),
"P-224": ec.SECP224R1(),
"P-256": ec.SECP256R1(),
"P-384": ec.SECP384R1(),
"P-521": ec.SECP521R1(),
}
vectors = load_vectors_from_file(
os.path.join(
"asymmetric", "ECDSA", "RFC6979", "evppkey_ecdsa_rfc6979.txt"
),
load_rfc6979_vectors,
)
for vector in vectors:
with subtests.test():
input = bytes(vector["input"], "utf-8")
output = bytes.fromhex(vector["output"])
key = bytes("\n".join(vector["key"]), "utf-8")
curve = curves[vector["key_name"].split("_")[0]]
_skip_curve_unsupported(backend, curve)
if "digest_sign" in vector:
algorithm = vector["digest_sign"]
hash_algorithm = supported_hash_algorithms[algorithm]
algorithm = ec.ECDSA(
hash_algorithm,
deterministic_signing=vector["deterministic_nonce"],
)
private_key = serialization.load_pem_private_key(
key, password=None
)
assert isinstance(private_key, EllipticCurvePrivateKey)
signature = private_key.sign(input, algorithm)
assert signature == output
else:
assert "digest_verify" in vector
algorithm = vector["digest_verify"]
assert algorithm in supported_hash_algorithms
hash_algorithm = supported_hash_algorithms[algorithm]
algorithm = ec.ECDSA(hash_algorithm)
public_key = serialization.load_pem_public_key(key)
assert isinstance(public_key, EllipticCurvePublicKey)
if vector["verify_error"]:
with pytest.raises(exceptions.InvalidSignature):
public_key.verify(output, input, algorithm)
else:
public_key.verify(output, input, algorithm)
def test_sign(self, backend):
_skip_curve_unsupported(backend, ec.SECP256R1())
message = b"one little message"
algorithm = ec.ECDSA(hashes.SHA256())
private_key = ec.generate_private_key(ec.SECP256R1(), backend)
signature = private_key.sign(message, algorithm)
public_key = private_key.public_key()
public_key.verify(signature, message, algorithm)
def test_sign_verify_buffers(self, backend):
_skip_curve_unsupported(backend, ec.SECP256R1())
message = bytearray(b"one little message")
algorithm = ec.ECDSA(hashes.SHA256())
private_key = ec.generate_private_key(ec.SECP256R1(), backend)
signature = private_key.sign(message, algorithm)
public_key = private_key.public_key()
public_key.verify(bytearray(signature), message, algorithm)
def test_sign_prehashed(self, backend):
_skip_curve_unsupported(backend, ec.SECP256R1())
message = b"one little message"
h = hashes.Hash(hashes.SHA256(), backend)
h.update(message)
data = h.finalize()
algorithm = ec.ECDSA(Prehashed(hashes.SHA256()))
private_key = ec.generate_private_key(ec.SECP256R1(), backend)
signature = private_key.sign(data, algorithm)
public_key = private_key.public_key()
public_key.verify(signature, message, ec.ECDSA(hashes.SHA256()))
def test_sign_prehashed_digest_mismatch(self, backend):
_skip_curve_unsupported(backend, ec.SECP256R1())
message = b"one little message"
h = hashes.Hash(hashes.SHA224(), backend)
h.update(message)
data = h.finalize()
algorithm = ec.ECDSA(Prehashed(hashes.SHA256()))
private_key = ec.generate_private_key(ec.SECP256R1(), backend)
with pytest.raises(ValueError):
private_key.sign(data, algorithm)
def test_verify(self, backend):
_skip_curve_unsupported(backend, ec.SECP256R1())
message = b"one little message"
algorithm = ec.ECDSA(hashes.SHA256())
private_key = ec.generate_private_key(ec.SECP256R1(), backend)
signature = private_key.sign(message, algorithm)
public_key = private_key.public_key()
public_key.verify(signature, message, algorithm)
def test_verify_prehashed(self, backend):
_skip_curve_unsupported(backend, ec.SECP256R1())
message = b"one little message"
algorithm = ec.ECDSA(hashes.SHA256())
private_key = ec.generate_private_key(ec.SECP256R1(), backend)
signature = private_key.sign(message, algorithm)
h = hashes.Hash(hashes.SHA256(), backend)
h.update(message)
data = h.finalize()
public_key = private_key.public_key()
public_key.verify(
signature, data, ec.ECDSA(Prehashed(hashes.SHA256()))
)
def test_verify_prehashed_digest_mismatch(self, backend):
_skip_curve_unsupported(backend, ec.SECP256R1())
message = b"one little message"
private_key = ec.generate_private_key(ec.SECP256R1(), backend)
h = hashes.Hash(hashes.SHA224(), backend)
h.update(message)
data = h.finalize()
public_key = private_key.public_key()
with pytest.raises(ValueError):
public_key.verify(
b"\x00" * 32, data, ec.ECDSA(Prehashed(hashes.SHA256()))
)
| TestECDSAVectors |
python | ansible__ansible | lib/ansible/plugins/doc_fragments/template_common.py | {
"start": 193,
"end": 4666
} | class ____(object):
# Standard template documentation fragment, use by template and win_template.
DOCUMENTATION = r"""
description:
- Templates are processed by the L(Jinja2 templating language,https://jinja.palletsprojects.com/en/stable/).
- Documentation on the template formatting can be found in the
L(Template Designer Documentation,https://jinja.palletsprojects.com/en/stable/templates/).
- Additional variables listed below can be used in templates.
- C(ansible_managed) (configurable via the C(defaults) section of C(ansible.cfg)) contains a string which can be used to
describe the template name, host, modification time of the template file and the owner uid.
- C(template_host) contains the node name of the template's machine.
- C(template_uid) is the numeric user id of the owner.
- C(template_path) is the path of the template.
- C(template_fullpath) is the absolute path of the template.
- C(template_destpath) is the path of the template on the remote system (added in 2.8).
- C(template_run_date) is the date that the template was rendered.
options:
src:
description:
- Path of a Jinja2 formatted template on the Ansible controller.
- This can be a relative or an absolute path.
- The file must be encoded with C(utf-8) but O(output_encoding) can be used to control the encoding of the output
template.
type: path
required: yes
dest:
description:
- Location to render the template to on the remote machine.
type: path
required: yes
newline_sequence:
description:
- Specify the newline sequence to use for templating files.
type: str
choices: [ '\n', '\r', '\r\n' ]
default: '\n'
version_added: '2.4'
block_start_string:
description:
- The string marking the beginning of a block.
type: str
default: '{%'
version_added: '2.4'
block_end_string:
description:
- The string marking the end of a block.
type: str
default: '%}'
version_added: '2.4'
variable_start_string:
description:
- The string marking the beginning of a print statement.
type: str
default: '{{'
version_added: '2.4'
variable_end_string:
description:
- The string marking the end of a print statement.
type: str
default: '}}'
version_added: '2.4'
comment_start_string:
description:
- The string marking the beginning of a comment statement.
type: str
version_added: '2.12'
comment_end_string:
description:
- The string marking the end of a comment statement.
type: str
version_added: '2.12'
trim_blocks:
description:
- Determine when newlines should be removed from blocks.
- When set to V(yes) the first newline after a block is removed (block, not variable tag!).
type: bool
default: yes
version_added: '2.4'
lstrip_blocks:
description:
- Determine when leading spaces and tabs should be stripped.
- When set to V(yes) leading spaces and tabs are stripped from the start of a line to a block.
type: bool
default: no
version_added: '2.6'
force:
description:
- Determine when the file is being transferred if the destination already exists.
- When set to C(yes), replace the remote file when contents are different than the source.
- When set to C(no), the file will only be transferred if the destination does not exist.
type: bool
default: yes
output_encoding:
description:
- Overrides the encoding used to write the template file defined by O(dest).
- It defaults to C(utf-8), but any encoding supported by python can be used.
- The source template file must always be encoded using C(utf-8), for homogeneity.
type: str
default: utf-8
version_added: '2.7'
notes:
- Including a string that uses a date in the template will result in the template being marked 'changed' each time.
- Since Ansible 0.9, templates are loaded with O(trim_blocks=True).
- >
Also, you can override jinja2 settings by adding a special header to template file.
that is C(#jinja2:variable_start_string:'[%', variable_end_string:'%]', trim_blocks: False)
which changes the variable interpolation markers to C([% var %]) instead of C({{ var }}).
This is the best way to prevent evaluation of things that look like, but should not be Jinja2.
- To find Byte Order Marks in files, use C(Format-Hex <file> -Count 16) on Windows, and use C(od -a -t x1 -N 16 <file>)
on Linux.
"""
| ModuleDocFragment |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 588457,
"end": 589459
} | class ____(sgqlc.types.relay.Connection):
"""The connection type for OrganizationInvitation."""
__schema__ = github_schema
__field_names__ = ("edges", "nodes", "page_info", "total_count", "total_unique_user_count")
edges = sgqlc.types.Field(sgqlc.types.list_of("EnterpriseFailedInvitationEdge"), graphql_name="edges")
"""A list of edges."""
nodes = sgqlc.types.Field(sgqlc.types.list_of("OrganizationInvitation"), graphql_name="nodes")
"""A list of nodes."""
page_info = sgqlc.types.Field(sgqlc.types.non_null("PageInfo"), graphql_name="pageInfo")
"""Information to aid in pagination."""
total_count = sgqlc.types.Field(sgqlc.types.non_null(Int), graphql_name="totalCount")
"""Identifies the total count of items in the connection."""
total_unique_user_count = sgqlc.types.Field(sgqlc.types.non_null(Int), graphql_name="totalUniqueUserCount")
"""Identifies the total count of unique users in the connection."""
| EnterpriseFailedInvitationConnection |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/protocol53.py | {
"start": 359,
"end": 422
} | class ____:
def m(self) -> "Impl_CoRecurs": ...
| Impl_CoRecurs |
python | pydantic__pydantic | tests/mypy/outputs/mypy-default_ini/pydantic_settings.py | {
"start": 65,
"end": 867
} | class ____(BaseSettings):
foo: str
s = Settings()
# MYPY: error: Missing named argument "foo" for "Settings" [call-arg]
s = Settings(foo='test', _case_sensitive=True, _env_prefix='test__', _env_file='test')
# MYPY: error: Unexpected keyword argument "_case_sensitive" for "Settings" [call-arg]
# MYPY: error: Unexpected keyword argument "_env_prefix" for "Settings" [call-arg]
# MYPY: error: Unexpected keyword argument "_env_file" for "Settings" [call-arg]
s = Settings(foo='test', _case_sensitive=1, _env_prefix=2, _env_file=3)
# MYPY: error: Unexpected keyword argument "_case_sensitive" for "Settings" [call-arg]
# MYPY: error: Unexpected keyword argument "_env_prefix" for "Settings" [call-arg]
# MYPY: error: Unexpected keyword argument "_env_file" for "Settings" [call-arg]
| Settings |
python | sqlalchemy__sqlalchemy | test/sql/test_text.py | {
"start": 18084,
"end": 26812
} | class ____(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = "default"
def test_basic_toplevel_resultmap_positional(self):
t = text("select id, name from user").columns(
column("id", Integer), column("name")
)
col_pos = {col.name: idx for idx, col in enumerate(t.selected_columns)}
compiled = t.compile()
eq_(
compiled._create_result_map(),
{
"id": (
"id",
(t.selected_columns.id, "id", "id", "id"),
t.selected_columns.id.type,
col_pos["id"],
),
"name": (
"name",
(t.selected_columns.name, "name", "name", "name"),
t.selected_columns.name.type,
col_pos["name"],
),
},
)
def test_basic_toplevel_resultmap(self):
t = text("select id, name from user").columns(id=Integer, name=String)
col_pos = {col.name: idx for idx, col in enumerate(t.selected_columns)}
compiled = t.compile()
eq_(
compiled._create_result_map(),
{
"id": (
"id",
(t.selected_columns.id, "id", "id", "id"),
t.selected_columns.id.type,
col_pos["id"],
),
"name": (
"name",
(t.selected_columns.name, "name", "name", "name"),
t.selected_columns.name.type,
col_pos["name"],
),
},
)
def test_basic_subquery_resultmap(self):
t = (
text("select id, name from user")
.columns(id=Integer, name=String)
.subquery()
)
stmt = select(table1.c.myid).select_from(
table1.join(t, table1.c.myid == t.c.id)
)
compiled = stmt.compile()
eq_(
compiled._create_result_map(),
{
"myid": (
"myid",
(table1.c.myid, "myid", "myid", "mytable_myid"),
table1.c.myid.type,
0,
)
},
)
def test_column_collection_ordered(self):
t = text("select a, b, c from foo").columns(
column("a"), column("b"), column("c")
)
eq_(t.selected_columns.keys(), ["a", "b", "c"])
def test_column_collection_pos_plus_bykey(self):
# overlapping positional names + type names
t = text("select a, b, c from foo").columns(
column("a"), column("b"), b=Integer, c=String
)
eq_(t.selected_columns.keys(), ["a", "b", "c"])
eq_(t.selected_columns.b.type._type_affinity, Integer)
eq_(t.selected_columns.c.type._type_affinity, String)
def _xy_table_fixture(self):
m = MetaData()
t = Table("t", m, Column("x", Integer), Column("y", Integer))
return t
def _mapping(self, stmt):
compiled = stmt.compile()
return {
elem: key
for key, elements in compiled._create_result_map().items()
for elem in elements[1]
}
def test_select_label_alt_name(self):
t = self._xy_table_fixture()
l1, l2 = t.c.x.label("a"), t.c.y.label("b")
s = text("select x AS a, y AS b FROM t").columns(l1, l2)
mapping = self._mapping(s)
assert l1 in mapping
assert t.c.x not in mapping
def test_select_alias_label_alt_name(self):
t = self._xy_table_fixture()
l1, l2 = t.c.x.label("a"), t.c.y.label("b")
s = text("select x AS a, y AS b FROM t").columns(l1, l2).alias()
mapping = self._mapping(s)
assert l1 in mapping
assert t.c.x not in mapping
def test_select_column(self):
t = self._xy_table_fixture()
x, y = t.c.x, t.c.y
s = text("select x, y FROM t").columns(x, y)
mapping = self._mapping(s)
assert t.c.x in mapping
def test_select_alias_column(self):
t = self._xy_table_fixture()
x, y = t.c.x, t.c.y
s = text("select x, y FROM t").columns(x, y).alias()
mapping = self._mapping(s)
assert t.c.x in mapping
def test_select_table_alias_column(self):
t = self._xy_table_fixture()
x = t.c.x
ta = t.alias()
s = text("select ta.x, ta.y FROM t AS ta").columns(ta.c.x, ta.c.y)
mapping = self._mapping(s)
assert x not in mapping
def test_subquery_accessors(self):
t = self._xy_table_fixture()
s = text("SELECT x from t").columns(t.c.x)
self.assert_compile(
select(s.scalar_subquery()), "SELECT (SELECT x from t) AS anon_1"
)
self.assert_compile(
select(s.subquery()),
"SELECT anon_1.x FROM (SELECT x from t) AS anon_1",
)
def test_select_label_alt_name_table_alias_column(self):
t = self._xy_table_fixture()
x = t.c.x
ta = t.alias()
l1, l2 = ta.c.x.label("a"), ta.c.y.label("b")
s = text("SELECT ta.x AS a, ta.y AS b FROM t AS ta").columns(l1, l2)
mapping = self._mapping(s)
assert x not in mapping
assert l1 in mapping
assert ta.c.x not in mapping
def test_cte(self):
t = (
text("select id, name from user")
.columns(id=Integer, name=String)
.cte("t")
)
s = select(table1).where(table1.c.myid == t.c.id)
self.assert_compile(
s,
"WITH t AS (select id, name from user) "
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable, t WHERE mytable.myid = t.id",
)
def test_cte_recursive(self):
t = (
text("select id, name from user")
.columns(id=Integer, name=String)
.cte("t", recursive=True)
)
s = select(table1).where(table1.c.myid == t.c.id)
self.assert_compile(
s,
"WITH RECURSIVE t(id, name) AS (select id, name from user) "
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable, t WHERE mytable.myid = t.id",
)
def test_unions(self):
s1 = text("select id, name from user where id > 5").columns(
id=Integer, name=String
)
s2 = text("select id, name from user where id < 15").columns(
id=Integer, name=String
)
stmt = union(s1, s2)
eq_(stmt.selected_columns.keys(), ["id", "name"])
self.assert_compile(
stmt,
"select id, name from user where id > 5 UNION "
"select id, name from user where id < 15",
)
def test_subquery(self):
t = (
text("select id, name from user")
.columns(id=Integer, name=String)
.subquery()
)
stmt = (
select(table1.c.myid)
.select_from(table1.join(t, table1.c.myid == t.c.id))
.order_by(t.c.name)
)
self.assert_compile(
stmt,
"SELECT mytable.myid FROM mytable JOIN "
"(select id, name from user) AS anon_1 "
"ON mytable.myid = anon_1.id ORDER BY anon_1.name",
)
def test_alias(self):
t = (
text("select id, name from user")
.columns(id=Integer, name=String)
.alias("t")
)
s = select(table1).where(table1.c.myid == t.c.id)
self.assert_compile(
s,
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable, (select id, name from user) AS t "
"WHERE mytable.myid = t.id",
)
def test_scalar_subquery(self):
t = text("select id from user").columns(id=Integer)
subq = t.scalar_subquery()
assert subq.type._type_affinity is Integer()._type_affinity
s = select(table1.c.myid, subq).where(table1.c.myid == subq)
self.assert_compile(
s,
"SELECT mytable.myid, (select id from user) AS anon_1 "
"FROM mytable WHERE mytable.myid = (select id from user)",
)
def test_build_bindparams(self):
t = text("select id from user :foo :bar :bat")
t = t.bindparams(bindparam("foo", type_=Integer))
t = t.columns(id=Integer)
t = t.bindparams(bar=String)
t = t.bindparams(bindparam("bat", value="bat"))
eq_(set(t.element._bindparams), {"bat", "foo", "bar"})
| AsFromTest |
python | ansible__ansible | test/integration/targets/jinja_plugins/test_plugins/bad_test.py | {
"start": 180,
"end": 255
} | class ____:
def tests(self):
raise TypeError('bad_test')
| TestModule |
python | spyder-ide__spyder | spyder/plugins/editor/widgets/window.py | {
"start": 12423,
"end": 19744
} | class ____(QMainWindow, SpyderWidgetMixin):
CONF_SECTION = "editor"
sig_window_state_changed = Signal(object)
def __init__(self, main_widget, menu_actions, outline_plugin, parent=None):
# Parent needs to be `None` if the created widget is meant to be
# independent. See spyder-ide/spyder#17803
super().__init__(parent, class_parent=main_widget)
self.setAttribute(Qt.WA_DeleteOnClose)
# ---- Attributes
self.main_widget = main_widget
self.window_size = None
self.toolbars = []
# ---- Main widget
self.editorwidget = EditorWidget(
self,
main_widget,
menu_actions,
outline_plugin
)
self.sig_window_state_changed.connect(
self.editorwidget.on_window_state_changed
)
self.setCentralWidget(self.editorwidget)
# ---- Style
self.setStyleSheet(str(APP_STYLESHEET))
if not sys.platform == "darwin":
self.menuBar().setStyleSheet(str(MENUBAR_STYLESHEET))
# Give focus to current editor to update/show all status bar widgets
editorstack = self.editorwidget.editorsplitter.editorstack
editor = editorstack.get_current_editor()
if editor is not None:
editor.setFocus()
self.setWindowTitle("Spyder - %s" % main_widget.windowTitle())
self.setWindowIcon(main_widget.windowIcon())
# ---- Add toolbars
toolbar_list = [
ApplicationToolbars.File,
ApplicationToolbars.Run,
ApplicationToolbars.Debug
]
for toolbar_id in toolbar_list:
# This is necessary to run tests for this widget without Spyder's
# main window
try:
toolbar = self.get_toolbar(toolbar_id, plugin=Plugins.Toolbar)
except KeyError:
continue
new_toolbar = ApplicationToolbar(self, toolbar_id, toolbar._title)
for action in toolbar.actions():
new_toolbar.add_item(action)
new_toolbar.render()
new_toolbar.setMovable(False)
self.addToolBar(new_toolbar)
self.toolbars.append(new_toolbar)
# ---- Add menus
menu_list = [
ApplicationMenus.File,
ApplicationMenus.Edit,
ApplicationMenus.Search,
ApplicationMenus.Source,
ApplicationMenus.Run,
ApplicationMenus.Tools,
EditorMainWindowMenus.Window,
ApplicationMenus.Help
]
for menu_id in menu_list:
if menu_id == EditorMainWindowMenus.Window:
window_menu = self._create_window_menu()
self.menuBar().addMenu(window_menu)
else:
# This is necessary to run tests for this widget without
# Spyder's main window
try:
self.menuBar().addMenu(
self.get_menu(menu_id, plugin=Plugins.MainMenu)
)
except KeyError:
continue
# ---- Qt methods
# -------------------------------------------------------------------------
def resizeEvent(self, event):
"""Reimplement Qt method"""
if not self.isMaximized() and not self.isFullScreen():
self.window_size = self.size()
QMainWindow.resizeEvent(self, event)
def closeEvent(self, event):
"""Reimplement Qt method"""
self.editorwidget.unregister_all_editorstacks()
if self.main_widget.windowwidget is not None:
self.main_widget.dockwidget.setWidget(self.main_widget)
self.main_widget.dockwidget.setVisible(True)
self.main_widget.switch_to_plugin()
QMainWindow.closeEvent(self, event)
if self.main_widget.windowwidget is not None:
self.main_widget.windowwidget = None
def changeEvent(self, event):
"""
Override Qt method to emit a custom `sig_windowstate_changed` signal
when there's a change in the window state.
"""
if event.type() == QEvent.WindowStateChange:
self.sig_window_state_changed.emit(self.windowState())
super().changeEvent(event)
# ---- Public API
# -------------------------------------------------------------------------
def get_layout_settings(self):
"""Return layout state"""
splitsettings = self.editorwidget.editorsplitter.get_layout_settings()
return dict(size=(self.window_size.width(), self.window_size.height()),
pos=(self.pos().x(), self.pos().y()),
is_maximized=self.isMaximized(),
is_fullscreen=self.isFullScreen(),
hexstate=qbytearray_to_str(self.saveState()),
splitsettings=splitsettings)
def set_layout_settings(self, settings):
"""Restore layout state"""
size = settings.get('size')
if size is not None:
self.resize(QSize(*size))
self.window_size = self.size()
pos = settings.get('pos')
if pos is not None:
self.move(QPoint(*pos))
hexstate = settings.get('hexstate')
if hexstate is not None:
self.restoreState(
QByteArray().fromHex(str(hexstate).encode('utf-8'))
)
if settings.get('is_maximized'):
self.setWindowState(Qt.WindowMaximized)
if settings.get('is_fullscreen'):
self.setWindowState(Qt.WindowFullScreen)
splitsettings = settings.get('splitsettings')
if splitsettings is not None:
self.editorwidget.editorsplitter.set_layout_settings(splitsettings)
# ---- Private API
# -------------------------------------------------------------------------
def _create_window_menu(self):
# Create menu
window_menu = self._create_menu(
menu_id=EditorMainWindowMenus.Window,
parent=self,
title=_("&Window"),
register=False,
MenuClass=ApplicationMenu
)
# Create Outline action
self.toggle_outline_action = self.create_action(
EditorMainWindowActions.ToggleOutline,
_("Outline"),
toggled=True,
option="show_outline_in_editor_window"
)
window_menu.add_action(
self.toggle_outline_action,
section=WindowMenuSections.Outline
)
# Add toolbar toggle window actions
visible_toolbars = self.get_conf(
'last_visible_toolbars',
section='toolbar'
)
for toolbar in self.toolbars:
toolbar_action = toolbar.toggleViewAction()
toolbar_action.action_id = f'toolbar_{toolbar.ID}'
if toolbar.ID not in visible_toolbars:
toolbar_action.setChecked(False)
toolbar.setVisible(False)
else:
toolbar_action.setChecked(True)
toolbar.setVisible(True)
window_menu.add_action(
toolbar_action,
section=WindowMenuSections.Toolbars
)
return window_menu
| EditorMainWindow |
python | apache__airflow | providers/fab/src/airflow/providers/fab/www/api_connexion/exceptions.py | {
"start": 3201,
"end": 3700
} | class ____(ProblemException):
"""Raise when the server processes a bad request."""
def __init__(
self,
title: str = "Bad Request",
detail: str | None = None,
headers: dict | None = None,
**kwargs: Any,
) -> None:
super().__init__(
status=HTTPStatus.BAD_REQUEST,
type=EXCEPTIONS_LINK_MAP[400],
title=title,
detail=detail,
headers=headers,
**kwargs,
)
| BadRequest |
python | mwaskom__seaborn | seaborn/_core/properties.py | {
"start": 10909,
"end": 11226
} | class ____(IntervalProperty):
"""Thickness of the edges on a patch mark, in points."""
@property
def default_range(self) -> tuple[float, float]:
"""Min and max values used by default for semantic mapping."""
base = mpl.rcParams["patch.linewidth"]
return base * .5, base * 2
| EdgeWidth |
python | Pylons__pyramid | tests/test_settings.py | {
"start": 1014,
"end": 1557
} | class ____(unittest.TestCase):
def _callFUT(self, val):
from pyramid.settings import aslist_cronly
return aslist_cronly(val)
def test_with_list(self):
result = self._callFUT(['abc', 'def'])
self.assertEqual(result, ['abc', 'def'])
def test_with_string(self):
result = self._callFUT('abc def')
self.assertEqual(result, ['abc def'])
def test_with_string_crsep(self):
result = self._callFUT(' abc\n def')
self.assertEqual(result, ['abc', 'def'])
| Test_aslist_cronly |
python | openai__openai-python | src/openai/types/chat/chat_completion_message.py | {
"start": 424,
"end": 774
} | class ____(BaseModel):
end_index: int
"""The index of the last character of the URL citation in the message."""
start_index: int
"""The index of the first character of the URL citation in the message."""
title: str
"""The title of the web resource."""
url: str
"""The URL of the web resource."""
| AnnotationURLCitation |
python | apache__airflow | providers/google/src/airflow/providers/google/leveldb/hooks/leveldb.py | {
"start": 1315,
"end": 6652
} | class ____(BaseHook):
"""
Plyvel Wrapper to Interact With LevelDB Database.
`LevelDB Connection Documentation <https://plyvel.readthedocs.io/en/latest/>`__
"""
conn_name_attr = "leveldb_conn_id"
default_conn_name = "leveldb_default"
conn_type = "leveldb"
hook_name = "LevelDB"
@classmethod
def get_connection_form_widgets(cls) -> dict[str, Any]:
"""Return connection widgets to add to LevelDB connection form."""
from flask_babel import lazy_gettext
from wtforms import BooleanField
return {
"create_if_missing": BooleanField(
lazy_gettext("Create a database if it does not exist"), default=False
),
"error_if_exists": BooleanField(
lazy_gettext("Raise an exception if the database already exists"), default=False
),
}
@classmethod
def get_ui_field_behaviour(cls) -> dict[str, Any]:
"""Return custom UI field behaviour for LevelDB connection."""
return {
"hidden_fields": ["login", "password", "schema", "port"],
"relabeling": {},
}
def __init__(self, leveldb_conn_id: str = default_conn_name):
super().__init__()
self.leveldb_conn_id = leveldb_conn_id
self.connection = self.get_connection(leveldb_conn_id)
self.db: plyvel.DB | None = None
def get_conn(self, name: str = "/tmp/testdb/", create_if_missing: bool = False, **kwargs) -> DB:
"""
Create `Plyvel DB <https://plyvel.readthedocs.io/en/latest/api.html#DB>`__.
:param name: path to create database e.g. `/tmp/testdb/`)
:param create_if_missing: whether a new database should be created if needed
:param kwargs: other options of creation plyvel.DB. See more in the link above.
:returns: DB
"""
if self.db is not None:
return self.db
self.db = plyvel.DB(name=name, create_if_missing=create_if_missing, **kwargs)
return self.db
def close_conn(self) -> None:
"""Close connection."""
db = self.db
if db is not None:
db.close()
self.db = None
def run(
self,
command: str,
key: bytes,
value: bytes | None = None,
keys: list[bytes] | None = None,
values: list[bytes] | None = None,
) -> bytes | None:
"""
Execute operation with leveldb.
:param command: command of plyvel(python wrap for leveldb) for DB object e.g.
``"put"``, ``"get"``, ``"delete"``, ``"write_batch"``.
:param key: key for command(put,get,delete) execution(, e.g. ``b'key'``, ``b'another-key'``)
:param value: value for command(put) execution(bytes, e.g. ``b'value'``, ``b'another-value'``)
:param keys: keys for command(write_batch) execution(list[bytes], e.g. ``[b'key', b'another-key'])``
:param values: values for command(write_batch) execution e.g. ``[b'value'``, ``b'another-value']``
:returns: value from get or None
"""
if command == "put":
if not value:
raise ValueError("Please provide `value`!")
return self.put(key, value)
if command == "get":
return self.get(key)
if command == "delete":
return self.delete(key)
if command == "write_batch":
if not keys:
raise ValueError("Please provide `keys`!")
if not values:
raise ValueError("Please provide `values`!")
return self.write_batch(keys, values)
raise LevelDBHookException("Unknown command for LevelDB hook")
def put(self, key: bytes, value: bytes):
"""
Put a single value into a leveldb db by key.
:param key: key for put execution, e.g. ``b'key'``, ``b'another-key'``
:param value: value for put execution e.g. ``b'value'``, ``b'another-value'``
"""
if not self.db:
raise AirflowException(DB_NOT_INITIALIZED_BEFORE)
self.db.put(key, value)
def get(self, key: bytes) -> bytes:
"""
Get a single value into a leveldb db by key.
:param key: key for get execution, e.g. ``b'key'``, ``b'another-key'``
:returns: value of key from db.get
"""
if not self.db:
raise AirflowException(DB_NOT_INITIALIZED_BEFORE)
return self.db.get(key)
def delete(self, key: bytes):
"""
Delete a single value in a leveldb db by key.
:param key: key for delete execution, e.g. ``b'key'``, ``b'another-key'``
"""
if not self.db:
raise AirflowException(DB_NOT_INITIALIZED_BEFORE)
self.db.delete(key)
def write_batch(self, keys: list[bytes], values: list[bytes]):
"""
Write batch of values in a leveldb db by keys.
:param keys: keys for write_batch execution e.g. ``[b'key', b'another-key']``
:param values: values for write_batch execution e.g. ``[b'value', b'another-value']``
"""
if not self.db:
raise AirflowException(DB_NOT_INITIALIZED_BEFORE)
with self.db.write_batch() as batch:
for i, key in enumerate(keys):
batch.put(key, values[i])
| LevelDBHook |
python | takluyver__flit | flit_core/flit_core/common.py | {
"start": 3690,
"end": 3735
} | class ____(ProblemInModule): pass
| InvalidVersion |
python | kennethreitz__tablib | src/tablib/formats/_cli.py | {
"start": 195,
"end": 611
} | class ____:
""" Class responsible to export to CLI Format """
title = 'cli'
DEFAULT_FMT = 'plain'
@classmethod
def export_set(cls, dataset, **kwargs):
"""Returns CLI representation of a Dataset."""
if dataset.headers:
kwargs.setdefault('headers', dataset.headers)
kwargs.setdefault('tablefmt', cls.DEFAULT_FMT)
return Tabulate(dataset, **kwargs)
| CLIFormat |
python | python__mypy | mypyc/analysis/ircheck.py | {
"start": 5508,
"end": 13713
} | class ____(OpVisitor[None]):
def __init__(self, parent_fn: FuncIR) -> None:
self.parent_fn = parent_fn
self.errors: list[FnError] = []
def fail(self, source: Op, desc: str) -> None:
self.errors.append(FnError(source=source, desc=desc))
def check_control_op_targets(self, op: ControlOp) -> None:
for target in op.targets():
if target not in self.parent_fn.blocks:
self.fail(source=op, desc=f"Invalid control operation target: {target.label}")
def check_type_coercion(self, op: Op, src: RType, dest: RType) -> None:
if not can_coerce_to(src, dest):
self.fail(
source=op, desc=f"Cannot coerce source type {src.name} to dest type {dest.name}"
)
def check_compatibility(self, op: Op, t: RType, s: RType) -> None:
if not can_coerce_to(t, s) or not can_coerce_to(s, t):
self.fail(source=op, desc=f"{t.name} and {s.name} are not compatible")
def expect_float(self, op: Op, v: Value) -> None:
if not is_float_rprimitive(v.type):
self.fail(op, f"Float expected (actual type is {v.type})")
def expect_non_float(self, op: Op, v: Value) -> None:
if is_float_rprimitive(v.type):
self.fail(op, "Float not expected")
def visit_goto(self, op: Goto) -> None:
self.check_control_op_targets(op)
def visit_branch(self, op: Branch) -> None:
self.check_control_op_targets(op)
def visit_return(self, op: Return) -> None:
self.check_type_coercion(op, op.value.type, self.parent_fn.decl.sig.ret_type)
def visit_unreachable(self, op: Unreachable) -> None:
# Unreachables are checked at a higher level since validation
# requires access to the entire basic block.
pass
def visit_assign(self, op: Assign) -> None:
self.check_type_coercion(op, op.src.type, op.dest.type)
def visit_assign_multi(self, op: AssignMulti) -> None:
for src in op.src:
assert isinstance(op.dest.type, RArray)
self.check_type_coercion(op, src.type, op.dest.type.item_type)
def visit_load_error_value(self, op: LoadErrorValue) -> None:
# Currently it is assumed that all types have an error value.
# Once this is fixed we can validate that the rtype here actually
# has an error value.
pass
def check_tuple_items_valid_literals(self, op: LoadLiteral, t: tuple[object, ...]) -> None:
for x in t:
if x is not None and not isinstance(x, (str, bytes, bool, int, float, complex, tuple)):
self.fail(op, f"Invalid type for item of tuple literal: {type(x)})")
if isinstance(x, tuple):
self.check_tuple_items_valid_literals(op, x)
def check_frozenset_items_valid_literals(self, op: LoadLiteral, s: frozenset[object]) -> None:
for x in s:
if x is None or isinstance(x, (str, bytes, bool, int, float, complex)):
pass
elif isinstance(x, tuple):
self.check_tuple_items_valid_literals(op, x)
else:
self.fail(op, f"Invalid type for item of frozenset literal: {type(x)})")
def visit_load_literal(self, op: LoadLiteral) -> None:
expected_type = None
if op.value is None:
expected_type = "builtins.object"
elif isinstance(op.value, int):
expected_type = "builtins.int"
elif isinstance(op.value, str):
expected_type = "builtins.str"
elif isinstance(op.value, bytes):
expected_type = "builtins.bytes"
elif isinstance(op.value, bool):
expected_type = "builtins.object"
elif isinstance(op.value, float):
expected_type = "builtins.float"
elif isinstance(op.value, complex):
expected_type = "builtins.object"
elif isinstance(op.value, tuple):
expected_type = "builtins.tuple"
self.check_tuple_items_valid_literals(op, op.value)
elif isinstance(op.value, frozenset):
# There's no frozenset_rprimitive type since it'd be pretty useless so we just pretend
# it's a set (when it's really a frozenset).
expected_type = "builtins.set"
self.check_frozenset_items_valid_literals(op, op.value)
assert expected_type is not None, "Missed a case for LoadLiteral check"
if op.type.name not in [expected_type, "builtins.object"]:
self.fail(
op,
f"Invalid literal value for type: value has "
f"type {expected_type}, but op has type {op.type.name}",
)
def visit_get_attr(self, op: GetAttr) -> None:
# Nothing to do.
pass
def visit_set_attr(self, op: SetAttr) -> None:
# Nothing to do.
pass
# Static operations cannot be checked at the function level.
def visit_load_static(self, op: LoadStatic) -> None:
pass
def visit_init_static(self, op: InitStatic) -> None:
pass
def visit_tuple_get(self, op: TupleGet) -> None:
# Nothing to do.
pass
def visit_tuple_set(self, op: TupleSet) -> None:
# Nothing to do.
pass
def visit_inc_ref(self, op: IncRef) -> None:
# Nothing to do.
pass
def visit_dec_ref(self, op: DecRef) -> None:
# Nothing to do.
pass
def visit_call(self, op: Call) -> None:
# Length is checked in constructor, and return type is set
# in a way that can't be incorrect
for arg_value, arg_runtime in zip(op.args, op.fn.sig.args):
self.check_type_coercion(op, arg_value.type, arg_runtime.type)
def visit_method_call(self, op: MethodCall) -> None:
# Similar to above, but we must look up method first.
method_decl = op.receiver_type.class_ir.method_decl(op.method)
if method_decl.kind == FUNC_STATICMETHOD:
decl_index = 0
else:
decl_index = 1
if len(op.args) + decl_index != len(method_decl.sig.args):
self.fail(op, "Incorrect number of args for method call.")
# Skip the receiver argument (self)
for arg_value, arg_runtime in zip(op.args, method_decl.sig.args[decl_index:]):
self.check_type_coercion(op, arg_value.type, arg_runtime.type)
def visit_cast(self, op: Cast) -> None:
pass
def visit_box(self, op: Box) -> None:
pass
def visit_unbox(self, op: Unbox) -> None:
pass
def visit_raise_standard_error(self, op: RaiseStandardError) -> None:
pass
def visit_call_c(self, op: CallC) -> None:
pass
def visit_primitive_op(self, op: PrimitiveOp) -> None:
pass
def visit_truncate(self, op: Truncate) -> None:
pass
def visit_extend(self, op: Extend) -> None:
pass
def visit_load_global(self, op: LoadGlobal) -> None:
pass
def visit_int_op(self, op: IntOp) -> None:
self.expect_non_float(op, op.lhs)
self.expect_non_float(op, op.rhs)
def visit_comparison_op(self, op: ComparisonOp) -> None:
self.check_compatibility(op, op.lhs.type, op.rhs.type)
self.expect_non_float(op, op.lhs)
self.expect_non_float(op, op.rhs)
def visit_float_op(self, op: FloatOp) -> None:
self.expect_float(op, op.lhs)
self.expect_float(op, op.rhs)
def visit_float_neg(self, op: FloatNeg) -> None:
self.expect_float(op, op.src)
def visit_float_comparison_op(self, op: FloatComparisonOp) -> None:
self.expect_float(op, op.lhs)
self.expect_float(op, op.rhs)
def visit_load_mem(self, op: LoadMem) -> None:
pass
def visit_set_mem(self, op: SetMem) -> None:
pass
def visit_get_element_ptr(self, op: GetElementPtr) -> None:
pass
def visit_set_element(self, op: SetElement) -> None:
pass
def visit_load_address(self, op: LoadAddress) -> None:
pass
def visit_keep_alive(self, op: KeepAlive) -> None:
pass
def visit_unborrow(self, op: Unborrow) -> None:
pass
| OpChecker |
python | openai__openai-python | src/openai/lib/_realtime.py | {
"start": 752,
"end": 2344
} | class ____(Calls):
@override
def create(
self,
*,
sdp: str,
session: RealtimeSessionCreateRequestParam | Omit = omit,
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> _legacy_response.HttpxBinaryResponseContent:
if session is omit:
extra_headers = {"Accept": "application/sdp", "Content-Type": "application/sdp", **(extra_headers or {})}
return self._post(
"/realtime/calls",
body=sdp.encode("utf-8"),
options=make_request_options(extra_headers=extra_headers, extra_query=extra_query, timeout=timeout),
cast_to=_legacy_response.HttpxBinaryResponseContent,
)
extra_headers = {"Accept": "application/sdp", "Content-Type": "multipart/form-data", **(extra_headers or {})}
session_payload = maybe_transform(session, RealtimeSessionCreateRequestParam)
files = [
("sdp", (None, sdp.encode("utf-8"), "application/sdp")),
("session", (None, json.dumps(session_payload).encode("utf-8"), "application/json")),
]
return self._post(
"/realtime/calls",
files=files,
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=_legacy_response.HttpxBinaryResponseContent,
)
| _Calls |
python | Textualize__textual | tests/animations/test_scrolling_animation.py | {
"start": 257,
"end": 2414
} | class ____(App[None]):
def compose(self) -> ComposeResult:
with VerticalScroll():
for _ in range(100):
yield Label()
async def test_scrolling_animates_on_full() -> None:
app = TallApp()
app.animation_level = "full"
async with app.run_test() as pilot:
vertical_scroll = app.query_one(VerticalScroll)
animator = app.animator
# Freeze time at 0 before triggering the animation.
animator._get_time = lambda *_: 0
vertical_scroll.scroll_end(duration=10000)
await pilot.pause()
# Freeze time after the animation start and before animation end.
animator._get_time = lambda *_: 0.01
# Move to the next frame.
animator()
assert animator.is_being_animated(vertical_scroll, "scroll_y")
async def test_scrolling_animates_on_basic() -> None:
app = TallApp()
app.animation_level = "basic"
async with app.run_test() as pilot:
vertical_scroll = app.query_one(VerticalScroll)
animator = app.animator
# Freeze time at 0 before triggering the animation.
animator._get_time = lambda *_: 0
vertical_scroll.scroll_end(duration=10000)
await pilot.pause()
# Freeze time after the animation start and before animation end.
animator._get_time = lambda *_: 0.01
# Move to the next frame.
animator()
assert animator.is_being_animated(vertical_scroll, "scroll_y")
async def test_scrolling_does_not_animate_on_none() -> None:
app = TallApp()
app.animation_level = "none"
async with app.run_test() as pilot:
vertical_scroll = app.query_one(VerticalScroll)
animator = app.animator
# Freeze time at 0 before triggering the animation.
animator._get_time = lambda *_: 0
vertical_scroll.scroll_end(duration=10000)
await pilot.pause()
# Freeze time after the animation start and before animation end.
animator._get_time = lambda *_: 0.01
# Move to the next frame.
animator()
assert not animator.is_being_animated(vertical_scroll, "scroll_y")
| TallApp |
python | redis__redis-py | redis/exceptions.py | {
"start": 176,
"end": 231
} | class ____(ConnectionError):
pass
| AuthenticationError |
python | PyCQA__pylint | pylint/pyreverse/utils.py | {
"start": 3201,
"end": 8346
} | class ____:
"""Visit a project by traversing the locals dictionary.
* visit_<class name> on entering a node, where class name is the class of
the node in lower case
* leave_<class name> on leaving a node, where class name is the class of
the node in lower case
"""
def __init__(self) -> None:
self._cache: dict[type[nodes.NodeNG], _CallbackTupleT] = {}
self._visited: set[nodes.NodeNG] = set()
def get_callbacks(self, node: nodes.NodeNG) -> _CallbackTupleT:
"""Get callbacks from handler for the visited node."""
klass = node.__class__
methods = self._cache.get(klass)
if methods is None:
kid = klass.__name__.lower()
e_method = getattr(
self, f"visit_{kid}", getattr(self, "visit_default", None)
)
l_method = getattr(
self, f"leave_{kid}", getattr(self, "leave_default", None)
)
self._cache[klass] = (e_method, l_method)
else:
e_method, l_method = methods
return e_method, l_method
def visit(self, node: nodes.NodeNG) -> Any:
"""Launch the visit starting from the given node."""
if node in self._visited:
return None
self._visited.add(node)
methods = self.get_callbacks(node)
if methods[0] is not None:
methods[0](node)
if hasattr(node, "locals"): # skip Instance and other proxy
for local_node in node.values():
self.visit(local_node)
if methods[1] is not None:
return methods[1](node)
return None
def get_annotation_label(ann: nodes.Name | nodes.NodeNG) -> str:
if isinstance(ann, nodes.Name) and ann.name is not None:
return ann.name # type: ignore[no-any-return]
if isinstance(ann, nodes.NodeNG):
return ann.as_string() # type: ignore[no-any-return]
return ""
def get_annotation(
node: nodes.AssignAttr | nodes.AssignName,
) -> nodes.Name | nodes.Subscript | None:
"""Return the annotation for `node`."""
ann = None
if isinstance(node.parent, nodes.AnnAssign):
ann = node.parent.annotation
elif isinstance(node, nodes.AssignAttr):
init_method = node.parent.parent
try:
annotations = dict(zip(init_method.locals, init_method.args.annotations))
ann = annotations.get(node.parent.value.name)
except AttributeError:
pass
else:
return ann
try:
default, *_ = node.infer()
except astroid.InferenceError:
default = ""
label = get_annotation_label(ann)
if (
ann
and getattr(default, "value", "value") is None
and not label.startswith("Optional")
and not (
isinstance(ann, nodes.BinOp)
and any(
isinstance(child, nodes.Const) and child.value is None
for child in ann.get_children()
)
)
):
label = rf"Optional[{label}]"
if label and ann:
ann.name = label
return ann
def infer_node(node: nodes.AssignAttr | nodes.AssignName) -> set[InferenceResult]:
"""Return a set containing the node annotation if it exists
otherwise return a set of the inferred types using the NodeNG.infer method.
"""
ann = get_annotation(node)
try:
if ann:
if isinstance(ann, nodes.Subscript) or (
isinstance(ann, nodes.BinOp) and ann.op == "|"
):
return {ann}
return set(ann.infer())
return set(node.infer())
except astroid.InferenceError:
return {ann} if ann else set()
def check_graphviz_availability() -> None:
"""Check if the ``dot`` command is available on the machine.
This is needed if image output is desired and ``dot`` is used to convert
from *.dot or *.gv into the final output format.
"""
if shutil.which("dot") is None:
print("'Graphviz' needs to be installed for your chosen output format.")
sys.exit(32)
def check_if_graphviz_supports_format(output_format: str) -> None:
"""Check if the ``dot`` command supports the requested output format.
This is needed if image output is desired and ``dot`` is used to convert
from *.gv into the final output format.
"""
dot_output = subprocess.run(
["dot", "-T?"], capture_output=True, check=False, encoding="utf-8"
)
match = re.match(
pattern=r".*Use one of: (?P<formats>(\S*\s?)+)",
string=dot_output.stderr.strip(),
)
if not match:
print(
"Unable to determine Graphviz supported output formats. "
"Pyreverse will continue, but subsequent error messages "
"regarding the output format may come from Graphviz directly."
)
return
supported_formats = match.group("formats")
if output_format not in supported_formats.split():
print(
f"Format {output_format} is not supported by Graphviz. It supports: {supported_formats}"
)
sys.exit(32)
| LocalsVisitor |
python | django__django | tests/urlpatterns_reverse/utils.py | {
"start": 61,
"end": 714
} | class ____:
urlpatterns = [
path("inner/", views.empty_view, name="urlobject-view"),
re_path(
r"^inner/(?P<arg1>[0-9]+)/(?P<arg2>[0-9]+)/$",
views.empty_view,
name="urlobject-view",
),
re_path(r"^inner/\+\\\$\*/$", views.empty_view, name="urlobject-special-view"),
]
def __init__(self, app_name, namespace=None):
self.app_name = app_name
self.namespace = namespace
@property
def urls(self):
return (self.urlpatterns, self.app_name), self.namespace
@property
def app_urls(self):
return self.urlpatterns, self.app_name
| URLObject |
python | apache__airflow | providers/opensearch/src/airflow/providers/opensearch/log/os_task_handler.py | {
"start": 3814,
"end": 26498
} | class ____(FileTaskHandler, ExternalLoggingMixin, LoggingMixin):
"""
OpensearchTaskHandler is a Python log handler that reads and writes logs to OpenSearch.
Like the ElasticsearchTaskHandler, Airflow itself does not handle the indexing of logs.
Instead, logs are flushed to local files, and additional software (e.g., Filebeat, Logstash)
may be required to ship logs to OpenSearch. This handler then enables fetching and displaying
logs from OpenSearch.
To efficiently query and sort Elasticsearch results, this handler assumes each
log message has a field `log_id` consists of ti primary keys:
`log_id = {dag_id}-{task_id}-{logical_date}-{try_number}`
Log messages with specific log_id are sorted based on `offset`,
which is a unique integer indicates log message's order.
Timestamps here are unreliable because multiple log messages
might have the same timestamp.
:param base_log_folder: Base folder to store logs locally.
:param end_of_log_mark: A marker string to signify the end of logs.
:param write_stdout: Whether to also write logs to stdout.
:param json_format: Whether to format logs as JSON.
:param json_fields: Comma-separated list of fields to include in the JSON log output.
:param host: OpenSearch host name.
:param port: OpenSearch port.
:param username: Username for OpenSearch authentication.
:param password: Password for OpenSearch authentication.
:param host_field: The field name for the host in the logs (default is "host").
:param offset_field: The field name for the log offset (default is "offset").
:param index_patterns: Index pattern or template for storing logs.
:param index_patterns_callable: Callable that dynamically generates index patterns based on context.
:param os_kwargs: Additional OpenSearch client options. This can be set to "default_os_kwargs" to
load the default configuration from Airflow's settings.
"""
PAGE = 0
MAX_LINE_PER_PAGE = 1000
LOG_NAME = "Opensearch"
trigger_should_wrap = True
def __init__(
self,
base_log_folder: str,
end_of_log_mark: str,
write_stdout: bool,
json_format: bool,
json_fields: str,
host: str,
port: int,
username: str,
password: str,
host_field: str = "host",
offset_field: str = "offset",
index_patterns: str = conf.get("opensearch", "index_patterns", fallback="_all"),
index_patterns_callable: str = conf.get("opensearch", "index_patterns_callable", fallback=""),
os_kwargs: dict | None | Literal["default_os_kwargs"] = "default_os_kwargs",
max_bytes: int = 0,
backup_count: int = 0,
delay: bool = False,
) -> None:
os_kwargs = os_kwargs or {}
if os_kwargs == "default_os_kwargs":
os_kwargs = get_os_kwargs_from_config()
# support log file size handling of FileTaskHandler
super().__init__(
base_log_folder=base_log_folder, max_bytes=max_bytes, backup_count=backup_count, delay=delay
)
self.closed = False
self.mark_end_on_close = True
self.end_of_log_mark = end_of_log_mark.strip()
self.write_stdout = write_stdout
self.json_format = json_format
self.json_fields = [label.strip() for label in json_fields.split(",")]
self.host = self.format_url(host)
self.host_field = host_field
self.offset_field = offset_field
self.index_patterns = index_patterns
self.index_patterns_callable = index_patterns_callable
self.context_set = False
self.client = OpenSearch(
hosts=[{"host": host, "port": port}],
http_auth=(username, password),
**os_kwargs,
)
self.formatter: logging.Formatter
self.handler: logging.FileHandler | logging.StreamHandler
self._doc_type_map: dict[Any, Any] = {}
self._doc_type: list[Any] = []
def set_context(self, ti: TaskInstance, *, identifier: str | None = None) -> None:
"""
Provide task_instance context to airflow task handler.
:param ti: task instance object
:param identifier: if set, identifies the Airflow component which is relaying logs from
exceptional scenarios related to the task instance
"""
is_trigger_log_context = getattr(ti, "is_trigger_log_context", None)
is_ti_raw = getattr(ti, "raw", None)
self.mark_end_on_close = not is_ti_raw and not is_trigger_log_context
date_key = "logical_date" if AIRFLOW_V_3_0_PLUS else "execution_date"
if self.json_format:
self.formatter = OpensearchJSONFormatter(
fmt=self.formatter._fmt,
json_fields=[*self.json_fields, self.offset_field],
extras={
"dag_id": str(ti.dag_id),
"task_id": str(ti.task_id),
date_key: (
self._clean_date(ti.logical_date)
if AIRFLOW_V_3_0_PLUS
else self._clean_date(ti.execution_date)
),
"try_number": str(ti.try_number),
"log_id": self._render_log_id(ti, ti.try_number),
},
)
if self.write_stdout:
if self.context_set:
# We don't want to re-set up the handler if this logger has
# already been initialized
return
self.handler = logging.StreamHandler(stream=sys.__stdout__)
self.handler.setLevel(self.level)
self.handler.setFormatter(self.formatter)
else:
super().set_context(ti, identifier=identifier)
self.context_set = True
def emit(self, record):
if self.handler:
setattr(record, self.offset_field, int(time.time() * (10**9)))
self.handler.emit(record)
def close(self) -> None:
# When application exit, system shuts down all handlers by
# calling close method. Here we check if logger is already
# closed to prevent uploading the log to remote storage multiple
# times when `logging.shutdown` is called.
if self.closed:
return
if not self.mark_end_on_close:
# when we're closing due to task deferral, don't mark end of log
self.closed = True
return
# Case which context of the handler was not set.
if self.handler is None:
self.closed = True
return
# Reopen the file stream, because FileHandler.close() would be called
# first in logging.shutdown() and the stream in it would be set to None.
if self.handler.stream is None or self.handler.stream.closed:
self.handler.stream = self.handler._open() # type: ignore[union-attr]
# Mark the end of file using end of log mark,
# so we know where to stop while auto-tailing.
self.emit(logging.makeLogRecord({"msg": self.end_of_log_mark}))
if self.write_stdout:
self.handler.close()
sys.stdout = sys.__stdout__
super().close()
self.closed = True
def _read_grouped_logs(self):
return True
@staticmethod
def _clean_date(value: datetime | None) -> str:
"""
Clean up a date value so that it is safe to query in elasticsearch by removing reserved characters.
https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-query-string-query.html#_reserved_characters
"""
if value is None:
return ""
return value.strftime("%Y_%m_%dT%H_%M_%S_%f")
def _render_log_id(self, ti: TaskInstance | TaskInstanceKey, try_number: int) -> str:
from airflow.models.taskinstance import TaskInstanceKey
with create_session() as session:
if isinstance(ti, TaskInstanceKey):
ti = _ensure_ti(ti, session)
dag_run = ti.get_dagrun(session=session)
if USE_PER_RUN_LOG_ID:
log_id_template = dag_run.get_log_template(session=session).elasticsearch_id
if self.json_format:
data_interval_start = self._clean_date(dag_run.data_interval_start)
data_interval_end = self._clean_date(dag_run.data_interval_end)
logical_date = self._clean_date(dag_run.logical_date)
else:
data_interval_start = (
dag_run.data_interval_start.isoformat() if dag_run.data_interval_start else ""
)
data_interval_end = dag_run.data_interval_end.isoformat() if dag_run.data_interval_end else ""
logical_date = dag_run.logical_date.isoformat() if dag_run.logical_date else ""
return log_id_template.format(
dag_id=ti.dag_id,
task_id=ti.task_id,
run_id=getattr(ti, "run_id", ""),
data_interval_start=data_interval_start,
data_interval_end=data_interval_end,
logical_date=logical_date,
execution_date=logical_date,
try_number=try_number,
map_index=getattr(ti, "map_index", ""),
)
def _read(
self, ti: TaskInstance, try_number: int, metadata: LogMetadata | None = None
) -> tuple[OsLogMsgType, LogMetadata]:
"""
Endpoint for streaming log.
:param ti: task instance object
:param try_number: try_number of the task instance
:param metadata: log metadata,
can be used for steaming log reading and auto-tailing.
:return: a list of tuple with host and log documents, metadata.
"""
if not metadata:
# LogMetadata(TypedDict) is used as type annotation for log_reader; added ignore to suppress mypy error
metadata = {"offset": 0} # type: ignore[assignment]
metadata = cast("LogMetadata", metadata)
if "offset" not in metadata:
metadata["offset"] = 0
offset = metadata["offset"]
log_id = self._render_log_id(ti, try_number)
response = self._os_read(log_id, offset, ti)
if response is not None and response.hits:
logs_by_host = self._group_logs_by_host(response)
next_offset = attrgetter(self.offset_field)(response[-1])
else:
logs_by_host = None
next_offset = offset
# Ensure a string here. Large offset numbers will get JSON.parsed incorrectly
# on the client. Sending as a string prevents this issue.
# https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Number/MAX_SAFE_INTEGER
metadata["offset"] = str(next_offset)
# end_of_log_mark may contain characters like '\n' which is needed to
# have the log uploaded but will not be stored in elasticsearch.
metadata["end_of_log"] = False
if logs_by_host:
if any(x[-1].message == self.end_of_log_mark for x in logs_by_host.values()):
metadata["end_of_log"] = True
cur_ts = pendulum.now()
if "last_log_timestamp" in metadata:
last_log_ts = timezone.parse(metadata["last_log_timestamp"])
# if we are not getting any logs at all after more than N seconds of trying,
# assume logs do not exist
if int(next_offset) == 0 and cur_ts.diff(last_log_ts).in_seconds() > 5:
metadata["end_of_log"] = True
missing_log_message = (
f"*** Log {log_id} not found in Opensearch. "
"If your task started recently, please wait a moment and reload this page. "
"Otherwise, the logs for this task instance may have been removed."
)
if AIRFLOW_V_3_0_PLUS:
from airflow.utils.log.file_task_handler import StructuredLogMessage
# return list of StructuredLogMessage for Airflow 3.0+
return [StructuredLogMessage(event=missing_log_message)], metadata
return [("", missing_log_message)], metadata # type: ignore[list-item]
if (
# Assume end of log after not receiving new log for N min,
cur_ts.diff(last_log_ts).in_minutes() >= 5
# if max_offset specified, respect it
or ("max_offset" in metadata and int(offset) >= int(metadata["max_offset"]))
):
metadata["end_of_log"] = True
if int(offset) != int(next_offset) or "last_log_timestamp" not in metadata:
metadata["last_log_timestamp"] = str(cur_ts)
# If we hit the end of the log, remove the actual end_of_log message
# to prevent it from showing in the UI.
def concat_logs(hits: list[Hit]):
log_range = (len(hits) - 1) if hits[-1].message == self.end_of_log_mark else len(hits)
return "\n".join(self._format_msg(hits[i]) for i in range(log_range))
if logs_by_host:
if AIRFLOW_V_3_0_PLUS:
from airflow.utils.log.file_task_handler import StructuredLogMessage
header = [
StructuredLogMessage(
event="::group::Log message source details",
sources=[host for host in logs_by_host.keys()],
), # type: ignore[call-arg]
StructuredLogMessage(event="::endgroup::"),
]
# Flatten all hits, filter to only desired fields, and construct StructuredLogMessage objects
message = header + [
StructuredLogMessage(
**{k: v for k, v in hit.to_dict().items() if k.lower() in TASK_LOG_FIELDS}
)
for hits in logs_by_host.values()
for hit in hits
]
else:
message = [(host, concat_logs(hits)) for host, hits in logs_by_host.items()] # type: ignore[misc]
else:
message = []
return message, metadata
def _os_read(self, log_id: str, offset: int | str, ti: TaskInstance) -> OpensearchResponse | None:
"""
Return the logs matching log_id in Elasticsearch and next offset or ''.
:param log_id: the log_id of the log to read.
:param offset: the offset start to read log from.
:param ti: the task instance object
:meta private:
"""
query: dict[Any, Any] = {
"query": {
"bool": {
"filter": [{"range": {self.offset_field: {"gt": int(offset)}}}],
"must": [{"match_phrase": {"log_id": log_id}}],
}
}
}
index_patterns = self._get_index_patterns(ti)
try:
max_log_line = self.client.count(index=index_patterns, body=query)["count"]
except NotFoundError as e:
self.log.exception("The target index pattern %s does not exist", index_patterns)
raise e
if max_log_line != 0:
try:
res = self.client.search(
index=index_patterns,
body=query,
sort=[self.offset_field],
size=self.MAX_LINE_PER_PAGE,
from_=self.MAX_LINE_PER_PAGE * self.PAGE,
)
return OpensearchResponse(self, res)
except Exception as err:
self.log.exception("Could not read log with log_id: %s. Exception: %s", log_id, err)
return None
def _get_index_patterns(self, ti: TaskInstance | None) -> str:
"""
Get index patterns by calling index_patterns_callable, if provided, or the configured index_patterns.
:param ti: A TaskInstance object or None.
"""
if self.index_patterns_callable:
self.log.debug("Using index_patterns_callable: %s", self.index_patterns_callable)
index_pattern_callable_obj = import_string(self.index_patterns_callable)
return index_pattern_callable_obj(ti)
self.log.debug("Using index_patterns: %s", self.index_patterns)
return self.index_patterns
def _get_result(self, hit: dict[Any, Any], parent_class=None) -> Hit:
"""
Process a hit (i.e., a result) from an Elasticsearch response and transform it into a class instance.
The transformation depends on the contents of the hit. If the document in hit contains a nested field,
the '_resolve_nested' method is used to determine the appropriate class (based on the nested path).
If the hit has a document type that is present in the '_doc_type_map', the corresponding class is
used. If not, the method iterates over the '_doc_type' classes and uses the first one whose '_matches'
method returns True for the hit.
If the hit contains any 'inner_hits', these are also processed into 'ElasticSearchResponse' instances
using the determined class.
Finally, the transformed hit is returned. If the determined class has a 'from_es' method, this is
used to transform the hit
An example of the hit argument:
{'_id': 'jdeZT4kBjAZqZnexVUxk',
'_index': '.ds-filebeat-8.8.2-2023.07.09-000001',
'_score': 2.482621,
'_source': {'@timestamp': '2023-07-13T14:13:15.140Z',
'asctime': '2023-07-09T07:47:43.907+0000',
'container': {'id': 'airflow'},
'dag_id': 'example_bash_operator',
'ecs': {'version': '8.0.0'},
'logical_date': '2023_07_09T07_47_32_000000',
'filename': 'taskinstance.py',
'input': {'type': 'log'},
'levelname': 'INFO',
'lineno': 1144,
'log': {'file': {'path': "/opt/airflow/Documents/GitHub/airflow/logs/
dag_id=example_bash_operator'/run_id=owen_run_run/
task_id=run_after_loop/attempt=1.log"},
'offset': 0},
'log.offset': 1688888863907337472,
'log_id': 'example_bash_operator-run_after_loop-owen_run_run--1-1',
'message': 'Dependencies all met for dep_context=non-requeueable '
'deps ti=<TaskInstance: '
'example_bash_operator.run_after_loop owen_run_run '
'[queued]>',
'task_id': 'run_after_loop',
'try_number': '1'},
'_type': '_doc'}
"""
doc_class = Hit
dt = hit.get("_type")
if "_nested" in hit:
doc_class = self._resolve_nested(hit, parent_class)
elif dt in self._doc_type_map:
doc_class = self._doc_type_map[dt]
else:
for doc_type in self._doc_type:
if hasattr(doc_type, "_matches") and doc_type._matches(hit):
doc_class = doc_type
break
for t in hit.get("inner_hits", ()):
hit["inner_hits"][t] = OpensearchResponse(self, hit["inner_hits"][t], doc_class=doc_class)
# callback should get the Hit class if "from_es" is not defined
callback: type[Hit] | Callable[..., Any] = getattr(doc_class, "from_es", doc_class)
return callback(hit)
def _resolve_nested(self, hit: dict[Any, Any], parent_class=None) -> type[Hit]:
"""
Resolve nested hits from Elasticsearch by iteratively navigating the `_nested` field.
The result is used to fetch the appropriate document class to handle the hit.
This method can be used with nested Elasticsearch fields which are structured
as dictionaries with "field" and "_nested" keys.
"""
doc_class = Hit
nested_path: list[str] = []
nesting = hit["_nested"]
while nesting and "field" in nesting:
nested_path.append(nesting["field"])
nesting = nesting.get("_nested")
nested_path_str = ".".join(nested_path)
if hasattr(parent_class, "_index"):
nested_field = parent_class._index.resolve_field(nested_path_str)
if nested_field is not None:
return nested_field._doc_class
return doc_class
def _group_logs_by_host(self, response: OpensearchResponse) -> dict[str, list[Hit]]:
grouped_logs = defaultdict(list)
for hit in response:
key = getattr_nested(hit, self.host_field, None) or self.host
grouped_logs[key].append(hit)
return grouped_logs
def _format_msg(self, hit: Hit):
"""Format ES Record to match settings.LOG_FORMAT when used with json_format."""
# Using formatter._style.format makes it future proof i.e.
# if we change the formatter style from '%' to '{' or '$', this will still work
if self.json_format:
with contextlib.suppress(Exception):
return self.formatter._style.format(
logging.makeLogRecord({**LOG_LINE_DEFAULTS, **hit.to_dict()})
)
# Just a safe-guard to preserve backwards-compatibility
return hit.message
@property
def supports_external_link(self) -> bool:
"""
Whether we can support external links.
TODO: It should support frontend just like ElasticSearchTaskhandler.
"""
return False
def get_external_log_url(self, task_instance, try_number) -> str:
"""
Create an address for an external log collecting service.
TODO: It should support frontend just like ElasticSearchTaskhandler.
"""
return ""
@property
def log_name(self) -> str:
"""The log name."""
return self.LOG_NAME
@staticmethod
def format_url(host: str) -> str:
"""
Format the given host string to ensure it starts with 'http' and check if it represents a valid URL.
:params host: The host string to format and check.
"""
parsed_url = urlparse(host)
if parsed_url.scheme not in ("http", "https"):
host = "http://" + host
parsed_url = urlparse(host)
if not parsed_url.netloc:
raise ValueError(f"'{host}' is not a valid URL.")
return host
| OpensearchTaskHandler |
python | huggingface__transformers | src/transformers/models/bamba/modular_bamba.py | {
"start": 31943,
"end": 36268
} | class ____(JambaAttentionDecoderLayer):
def __init__(self, config: BambaConfig, layer_idx: int, layer_type: str = "mamba"):
super().__init__(config, layer_idx)
del self.self_attn
num_experts = 1
ffn_layer_class = BambaMLP if num_experts == 1 else None
self.feed_forward = ffn_layer_class(config)
self.layer_type = layer_type
if layer_type == "mamba":
self.mamba = BambaMixer(config=config, layer_idx=layer_idx)
elif layer_type == "attention":
self.self_attn = BambaAttention(config, layer_idx)
else:
raise ValueError("Invalid layer_type")
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[HybridMambaAttentionDynamicCache] = None,
output_attentions: Optional[bool] = False,
use_cache: Optional[bool] = False,
cache_position: Optional[torch.LongTensor] = None,
position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None,
**kwargs: Unpack[BambaFlashAttentionKwargs],
) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]:
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
`(batch, sequence_length)` where padding elements are indicated by 0.
past_key_values (`HybridMambaAttentionDynamicCache`, *optional*): cached past key and value projection states
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
(see `past_key_values`).
cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
Indices depicting the position of the input sequence tokens in the sequence.
position_embeddings (`tuple[torch.FloatTensor, torch.FloatTensor]`, *optional*):
Tuple containing the cosine and sine positional embeddings of shape `(batch_size, seq_len, head_dim)`,
with `head_dim` being the embedding dimension of each attention head.
kwargs (`dict`, *optional*):
Arbitrary kwargs. Can be used to provide `BambaFlashAttentionKwargs` for
padding-free training and/or improve torch.compile performance.
"""
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
# this is a hybrid decoder layer
if self.layer_type == "mamba":
hidden_states = self.mamba(
hidden_states=hidden_states,
cache_params=past_key_values,
cache_position=cache_position,
attention_mask=attention_mask,
**kwargs,
)
self_attn_weights = None
elif self.layer_type == "attention":
hidden_states, self_attn_weights = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
output_attentions=output_attentions,
use_cache=use_cache,
cache_position=cache_position,
position_embeddings=position_embeddings,
**kwargs,
)
# residual connection after attention
hidden_states = residual + hidden_states
# feed-forward
residual = hidden_states
hidden_states = self.pre_ff_layernorm(hidden_states)
hidden_states = self.feed_forward(hidden_states)
hidden_states = residual + hidden_states
outputs = (hidden_states,)
if output_attentions:
outputs += (self_attn_weights,)
return outputs
@auto_docstring
| BambaDecoderLayer |
python | huggingface__transformers | src/transformers/models/oneformer/modeling_oneformer.py | {
"start": 46349,
"end": 47965
} | class ____(nn.Module):
"""
BatchNorm2d where the batch statistics and the affine parameters are fixed.
Copy-paste from torchvision.misc.ops with added eps before rqsrt, without which any other models than
torchvision.models.resnet[18,34,50,101] produce nans.
"""
def __init__(self, n):
super().__init__()
self.register_buffer("weight", torch.ones(n))
self.register_buffer("bias", torch.zeros(n))
self.register_buffer("running_mean", torch.zeros(n))
self.register_buffer("running_var", torch.ones(n))
def _load_from_state_dict(
self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs
):
num_batches_tracked_key = prefix + "num_batches_tracked"
if num_batches_tracked_key in state_dict:
del state_dict[num_batches_tracked_key]
super()._load_from_state_dict(
state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs
)
def forward(self, x):
weight = self.weight.reshape(1, -1, 1, 1)
bias = self.bias.reshape(1, -1, 1, 1)
running_var = self.running_var.reshape(1, -1, 1, 1)
running_mean = self.running_mean.reshape(1, -1, 1, 1)
epsilon = 1e-5
scale = weight * (running_var + epsilon).rsqrt()
bias = bias - running_mean * scale
return x * scale + bias
# Modified from transformers.models.detr.modeling_deformable_detr.DeformableDetrMultiscaleDeformableAttention with DeformableDetr->OneFormerPixelDecoderEncoder
| OneFormerPixelDecoderFrozenBatchNorm2d |
python | pyodide__pyodide | src/py/_pyodide/jsbind.py | {
"start": 1754,
"end": 1814
} | class ____:
js2py = js2py_deep
py2js = py2js_deep
| Deep |
python | getsentry__sentry | src/sentry/api/endpoints/project_overview.py | {
"start": 913,
"end": 1068
} | class ____(StaffPermissionMixin, RelaxedProjectPermission):
pass
@extend_schema(tags=["Projects"])
@region_silo_endpoint
| RelaxedProjectAndStaffPermission |
python | scrapy__scrapy | tests/test_loader.py | {
"start": 6033,
"end": 6120
} | class ____(InitializationTestMixin):
item_class = NameItem
| TestInitializationFromItem |
python | sqlalchemy__sqlalchemy | test/sql/test_operators.py | {
"start": 97970,
"end": 103883
} | class ____(fixtures.TestBase, testing.AssertsCompiledSQL):
__dialect__ = "default"
table1 = table("mytable", column("myid", Integer), column("name", String))
@testing.combinations(
(~literal(5, NullType), "NOT :param_1"),
(~-literal(5, NullType), "NOT -:param_1"),
)
def test_nonsensical_negates(self, expr, expected):
"""exercise codepaths in the UnaryExpression._negate() method where the
type is not BOOLEAN"""
self.assert_compile(expr, expected)
def test_negate_operators_1(self):
for py_op, op in ((operator.neg, "-"), (operator.inv, "NOT ")):
for expr, expected in (
(self.table1.c.myid, "mytable.myid"),
(literal(5, Integer), ":param_1"),
):
self.assert_compile(py_op(expr), "%s%s" % (op, expected))
def test_negate_operators_2(self):
self.assert_compile(
self.table1.select().where(
(self.table1.c.myid != 12) & ~(self.table1.c.name == "john")
),
"SELECT mytable.myid, mytable.name FROM "
"mytable WHERE mytable.myid != :myid_1 "
"AND mytable.name != :name_1",
)
def test_negate_operators_3(self):
self.assert_compile(
self.table1.select().where(
(self.table1.c.myid != 12)
& ~(self.table1.c.name.between("jack", "john"))
),
"SELECT mytable.myid, mytable.name FROM "
"mytable WHERE mytable.myid != :myid_1 AND "
"mytable.name NOT BETWEEN :name_1 AND :name_2",
)
def test_negate_operators_4(self):
self.assert_compile(
self.table1.select().where(
(self.table1.c.myid != 12)
& ~and_(
self.table1.c.name == "john",
self.table1.c.name == "ed",
self.table1.c.name == "fred",
)
),
"SELECT mytable.myid, mytable.name FROM "
"mytable WHERE mytable.myid != :myid_1 AND "
"NOT (mytable.name = :name_1 AND mytable.name = :name_2 "
"AND mytable.name = :name_3)",
)
def test_negate_operators_5(self):
self.assert_compile(
self.table1.select().where(
(self.table1.c.myid != "12")
& ~and_(
literal("somethingboolean", Boolean), literal("q", Boolean)
)
),
"SELECT mytable.myid, mytable.name FROM "
"mytable WHERE mytable.myid != :myid_1 AND NOT "
"(:param_1 = 1 AND :param_2 = 1)",
)
def test_negate_operators_6(self):
self.assert_compile(
self.table1.select().where(
(self.table1.c.myid != "12")
& ~literal("somethingboolean", Boolean)
),
"SELECT mytable.myid, mytable.name FROM "
"mytable WHERE mytable.myid != :myid_1 AND NOT :param_1",
supports_native_boolean=True,
use_default_dialect=True,
)
def test_negate_operator_type(self):
is_((-self.table1.c.myid).type, self.table1.c.myid.type)
def test_negate_operator_label(self):
orig_expr = or_(
self.table1.c.myid == 1, self.table1.c.myid == 2
).label("foo")
expr = not_(orig_expr)
isinstance(expr, Label)
eq_(expr.name, "foo")
is_not(expr, orig_expr)
is_(expr._element.operator, operator.inv) # e.g. and not false_
self.assert_compile(
expr,
"NOT (mytable.myid = :myid_1 OR mytable.myid = :myid_2)",
dialect=default.DefaultDialect(supports_native_boolean=False),
)
def test_negate_operator_self_group(self):
orig_expr = or_(
self.table1.c.myid == 1, self.table1.c.myid == 2
).self_group()
expr = not_(orig_expr)
is_not(expr, orig_expr)
self.assert_compile(
expr,
"NOT (mytable.myid = :myid_1 OR mytable.myid = :myid_2)",
dialect=default.DefaultDialect(supports_native_boolean=False),
)
def test_implicitly_boolean(self):
# test for expressions that the database always considers as boolean
# even if there is no boolean datatype.
assert not self.table1.c.myid._is_implicitly_boolean
assert (self.table1.c.myid == 5)._is_implicitly_boolean
assert (self.table1.c.myid == 5).self_group()._is_implicitly_boolean
assert (self.table1.c.myid == 5).label("x")._is_implicitly_boolean
assert not_(self.table1.c.myid == 5)._is_implicitly_boolean
assert or_(
self.table1.c.myid == 5, self.table1.c.myid == 7
)._is_implicitly_boolean
assert not column("x", Boolean)._is_implicitly_boolean
assert not (self.table1.c.myid + 5)._is_implicitly_boolean
assert not not_(column("x", Boolean))._is_implicitly_boolean
assert (
not select(self.table1.c.myid)
.scalar_subquery()
._is_implicitly_boolean
)
assert not text("x = y")._is_implicitly_boolean
assert not literal_column("x = y")._is_implicitly_boolean
def test_scalar_select(self):
t = self.table1
expr = select(t.c.myid).where(t.c.myid > 5).scalar_subquery()
self.assert_compile(
not_(expr),
"NOT (SELECT mytable.myid FROM mytable "
"WHERE mytable.myid > :myid_1)",
params={"myid_1": 5},
)
def test_scalar_values(self):
t = self.table1
expr = values(t.c.myid).data([(7,), (42,)]).scalar_values()
self.assert_compile(
not_(expr),
"NOT (VALUES (:param_1), (:param_2))",
params={"param_1": 7, "param_2": 42},
)
| NegationTest |
python | mlflow__mlflow | mlflow/genai/judges/tools/search_trace_regex.py | {
"start": 806,
"end": 1029
} | class ____:
"""Result of searching a trace with a regex pattern."""
pattern: str
total_matches: int
matches: list[RegexMatch]
error: str | None = None
@experimental(version="3.4.0")
| SearchTraceRegexResult |
python | jd__tenacity | tests/test_asyncio.py | {
"start": 4851,
"end": 11666
} | class ____(unittest.TestCase):
@asynctest
async def test_do_max_attempts(self):
attempts = 0
retrying = tasyncio.AsyncRetrying(stop=stop_after_attempt(3))
try:
async for attempt in retrying:
with attempt:
attempts += 1
raise Exception
except RetryError:
pass
assert attempts == 3
@asynctest
async def test_reraise(self):
class CustomError(Exception):
pass
try:
async for attempt in tasyncio.AsyncRetrying(
stop=stop_after_attempt(1), reraise=True
):
with attempt:
raise CustomError()
except CustomError:
pass
else:
raise Exception
@asynctest
async def test_sleeps(self):
start = current_time_ms()
try:
async for attempt in tasyncio.AsyncRetrying(
stop=stop_after_attempt(1), wait=wait_fixed(1)
):
with attempt:
raise Exception()
except RetryError:
pass
t = current_time_ms() - start
self.assertLess(t, 1.1)
@asynctest
async def test_retry_with_result(self):
async def test():
attempts = 0
# mypy doesn't have great lambda support
def lt_3(x: float) -> bool:
return x < 3
async for attempt in tasyncio.AsyncRetrying(retry=retry_if_result(lt_3)):
with attempt:
attempts += 1
attempt.retry_state.set_result(attempts)
return attempts
result = await test()
self.assertEqual(3, result)
@asynctest
async def test_retry_with_async_result(self):
async def test():
attempts = 0
async def lt_3(x: float) -> bool:
return x < 3
async for attempt in tasyncio.AsyncRetrying(
retry=tasyncio.retry_if_result(lt_3)
):
with attempt:
attempts += 1
assert attempt.retry_state.outcome # help mypy
if not attempt.retry_state.outcome.failed:
attempt.retry_state.set_result(attempts)
return attempts
result = await test()
self.assertEqual(3, result)
@asynctest
async def test_retry_with_async_exc(self):
async def test():
attempts = 0
class CustomException(Exception):
pass
async def is_exc(e: BaseException) -> bool:
return isinstance(e, CustomException)
async for attempt in tasyncio.AsyncRetrying(
retry=tasyncio.retry_if_exception(is_exc)
):
with attempt:
attempts += 1
if attempts < 3:
raise CustomException()
assert attempt.retry_state.outcome # help mypy
if not attempt.retry_state.outcome.failed:
attempt.retry_state.set_result(attempts)
return attempts
result = await test()
self.assertEqual(3, result)
@asynctest
async def test_retry_with_async_result_or(self):
async def test():
attempts = 0
async def lt_3(x: float) -> bool:
return x < 3
class CustomException(Exception):
pass
def is_exc(e: BaseException) -> bool:
return isinstance(e, CustomException)
retry_strategy = tasyncio.retry_if_result(lt_3) | retry_if_exception(is_exc)
async for attempt in tasyncio.AsyncRetrying(retry=retry_strategy):
with attempt:
attempts += 1
if 2 < attempts < 4:
raise CustomException()
assert attempt.retry_state.outcome # help mypy
if not attempt.retry_state.outcome.failed:
attempt.retry_state.set_result(attempts)
return attempts
result = await test()
self.assertEqual(4, result)
@asynctest
async def test_retry_with_async_result_ror(self):
async def test():
attempts = 0
def lt_3(x: float) -> bool:
return x < 3
class CustomException(Exception):
pass
async def is_exc(e: BaseException) -> bool:
return isinstance(e, CustomException)
retry_strategy = retry_if_result(lt_3) | tasyncio.retry_if_exception(is_exc)
async for attempt in tasyncio.AsyncRetrying(retry=retry_strategy):
with attempt:
attempts += 1
if 2 < attempts < 4:
raise CustomException()
assert attempt.retry_state.outcome # help mypy
if not attempt.retry_state.outcome.failed:
attempt.retry_state.set_result(attempts)
return attempts
result = await test()
self.assertEqual(4, result)
@asynctest
async def test_retry_with_async_result_and(self):
async def test():
attempts = 0
async def lt_3(x: float) -> bool:
return x < 3
def gt_0(x: float) -> bool:
return x > 0
retry_strategy = tasyncio.retry_if_result(lt_3) & retry_if_result(gt_0)
async for attempt in tasyncio.AsyncRetrying(retry=retry_strategy):
with attempt:
attempts += 1
attempt.retry_state.set_result(attempts)
return attempts
result = await test()
self.assertEqual(3, result)
@asynctest
async def test_retry_with_async_result_rand(self):
async def test():
attempts = 0
async def lt_3(x: float) -> bool:
return x < 3
def gt_0(x: float) -> bool:
return x > 0
retry_strategy = retry_if_result(gt_0) & tasyncio.retry_if_result(lt_3)
async for attempt in tasyncio.AsyncRetrying(retry=retry_strategy):
with attempt:
attempts += 1
attempt.retry_state.set_result(attempts)
return attempts
result = await test()
self.assertEqual(3, result)
@asynctest
async def test_async_retying_iterator(self):
thing = NoIOErrorAfterCount(5)
with pytest.raises(TypeError):
for attempts in AsyncRetrying():
with attempts:
await _async_function(thing)
| TestContextManager |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.