Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- llava_video/lib/python3.10/site-packages/matplotlib/_fontconfig_pattern.py +111 -0
- llava_video/lib/python3.10/site-packages/matplotlib/font_manager.pyi +136 -0
- llava_video/lib/python3.10/site-packages/matplotlib/hatch.py +225 -0
- llava_video/lib/python3.10/site-packages/matplotlib/legend_handler.py +813 -0
- llava_video/lib/python3.10/site-packages/matplotlib/rcsetup.pyi +159 -0
- llava_video/lib/python3.10/site-packages/matplotlib/widgets.pyi +488 -0
- pllava/lib/python3.10/site-packages/torch/include/ATen/ops/_amp_update_scale_meta_dispatch.h +23 -0
- pllava/lib/python3.10/site-packages/torch/include/ATen/ops/_batch_norm_no_update_ops.h +39 -0
- pllava/lib/python3.10/site-packages/torch/include/ATen/ops/_cdist_backward.h +39 -0
- pllava/lib/python3.10/site-packages/torch/include/ATen/ops/_conj_copy.h +39 -0
- pllava/lib/python3.10/site-packages/torch/include/ATen/ops/_convert_indices_from_coo_to_csr_meta.h +27 -0
- pllava/lib/python3.10/site-packages/torch/include/ATen/ops/_efficient_attention_backward_cuda_dispatch.h +24 -0
- pllava/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_log1p_cuda_dispatch.h +24 -0
- pllava/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_mul.h +101 -0
- pllava/lib/python3.10/site-packages/torch/include/ATen/ops/_linalg_check_errors_native.h +21 -0
- pllava/lib/python3.10/site-packages/torch/include/ATen/ops/_make_dual_compositeexplicitautograd_dispatch.h +23 -0
- pllava/lib/python3.10/site-packages/torch/include/ATen/ops/_native_batch_norm_legit_ops.h +72 -0
- pllava/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_view_from_buffer_copy.h +39 -0
- pllava/lib/python3.10/site-packages/torch/include/ATen/ops/_pad_packed_sequence_compositeimplicitautograd_dispatch.h +23 -0
- pllava/lib/python3.10/site-packages/torch/include/ATen/ops/_prelu_kernel_cpu_dispatch.h +23 -0
- pllava/lib/python3.10/site-packages/torch/include/ATen/ops/_print_ops.h +28 -0
- pllava/lib/python3.10/site-packages/torch/include/ATen/ops/_scaled_dot_product_fused_attention_overrideable_backward_ops.h +28 -0
- pllava/lib/python3.10/site-packages/torch/include/ATen/ops/_sobol_engine_ff_compositeimplicitautograd_dispatch.h +23 -0
- pllava/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_bsr_tensor_unsafe_ops.h +28 -0
- pllava/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_semi_structured_linear_native.h +21 -0
- pllava/lib/python3.10/site-packages/torch/include/ATen/ops/_test_check_tensor_native.h +21 -0
- pllava/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_nearest_exact1d_backward_meta_dispatch.h +28 -0
- pllava/lib/python3.10/site-packages/torch/include/ATen/ops/acos_meta_dispatch.h +26 -0
- pllava/lib/python3.10/site-packages/torch/include/ATen/ops/align_as_native.h +21 -0
- pllava/lib/python3.10/site-packages/torch/include/ATen/ops/align_tensors_compositeimplicitautograd_dispatch.h +23 -0
- pllava/lib/python3.10/site-packages/torch/include/ATen/ops/asin.h +44 -0
- pllava/lib/python3.10/site-packages/torch/include/ATen/ops/batch_norm_backward_elemt_compositeexplicitautograd_dispatch.h +24 -0
- pllava/lib/python3.10/site-packages/torch/include/ATen/ops/bitwise_or_native.h +28 -0
- pllava/lib/python3.10/site-packages/torch/include/ATen/ops/complex_native.h +22 -0
- pllava/lib/python3.10/site-packages/torch/include/ATen/ops/concatenate_native.h +24 -0
- pllava/lib/python3.10/site-packages/torch/include/ATen/ops/cudnn_convolution_transpose.h +91 -0
- pllava/lib/python3.10/site-packages/torch/include/ATen/ops/eq_compositeexplicitautogradnonfunctional_dispatch.h +26 -0
- pllava/lib/python3.10/site-packages/torch/include/ATen/ops/exp_cuda_dispatch.h +26 -0
- pllava/lib/python3.10/site-packages/torch/include/ATen/ops/fill_ops.h +83 -0
- pllava/lib/python3.10/site-packages/torch/include/ATen/ops/flatten_dense_tensors_compositeimplicitautograd_dispatch.h +23 -0
- pllava/lib/python3.10/site-packages/torch/include/ATen/ops/from_file_cpu_dispatch.h +24 -0
- pllava/lib/python3.10/site-packages/torch/include/ATen/ops/glu.h +39 -0
- pllava/lib/python3.10/site-packages/torch/include/ATen/ops/infinitely_differentiable_gelu_backward_native.h +21 -0
- pllava/lib/python3.10/site-packages/torch/include/ATen/ops/is_nonzero_native.h +21 -0
- pllava/lib/python3.10/site-packages/torch/include/ATen/ops/isneginf_meta_dispatch.h +25 -0
- pllava/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_cross_cuda_dispatch.h +25 -0
- pllava/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_householder_product_cpu_dispatch.h +25 -0
- pllava/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lstsq_compositeexplicitautograd_dispatch.h +23 -0
- pllava/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_cpu_dispatch.h +25 -0
- pllava/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_solve_triangular_ops.h +39 -0
llava_video/lib/python3.10/site-packages/matplotlib/_fontconfig_pattern.py
ADDED
|
@@ -0,0 +1,111 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
A module for parsing and generating `fontconfig patterns`_.
|
| 3 |
+
|
| 4 |
+
.. _fontconfig patterns:
|
| 5 |
+
https://www.freedesktop.org/software/fontconfig/fontconfig-user.html
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
# This class logically belongs in `matplotlib.font_manager`, but placing it
|
| 9 |
+
# there would have created cyclical dependency problems, because it also needs
|
| 10 |
+
# to be available from `matplotlib.rcsetup` (for parsing matplotlibrc files).
|
| 11 |
+
|
| 12 |
+
from functools import lru_cache, partial
|
| 13 |
+
import re
|
| 14 |
+
|
| 15 |
+
from pyparsing import (
|
| 16 |
+
Group, Optional, ParseException, Regex, StringEnd, Suppress, ZeroOrMore, oneOf)
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
_family_punc = r'\\\-:,'
|
| 20 |
+
_family_unescape = partial(re.compile(r'\\(?=[%s])' % _family_punc).sub, '')
|
| 21 |
+
_family_escape = partial(re.compile(r'(?=[%s])' % _family_punc).sub, r'\\')
|
| 22 |
+
_value_punc = r'\\=_:,'
|
| 23 |
+
_value_unescape = partial(re.compile(r'\\(?=[%s])' % _value_punc).sub, '')
|
| 24 |
+
_value_escape = partial(re.compile(r'(?=[%s])' % _value_punc).sub, r'\\')
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
_CONSTANTS = {
|
| 28 |
+
'thin': ('weight', 'light'),
|
| 29 |
+
'extralight': ('weight', 'light'),
|
| 30 |
+
'ultralight': ('weight', 'light'),
|
| 31 |
+
'light': ('weight', 'light'),
|
| 32 |
+
'book': ('weight', 'book'),
|
| 33 |
+
'regular': ('weight', 'regular'),
|
| 34 |
+
'normal': ('weight', 'normal'),
|
| 35 |
+
'medium': ('weight', 'medium'),
|
| 36 |
+
'demibold': ('weight', 'demibold'),
|
| 37 |
+
'semibold': ('weight', 'semibold'),
|
| 38 |
+
'bold': ('weight', 'bold'),
|
| 39 |
+
'extrabold': ('weight', 'extra bold'),
|
| 40 |
+
'black': ('weight', 'black'),
|
| 41 |
+
'heavy': ('weight', 'heavy'),
|
| 42 |
+
'roman': ('slant', 'normal'),
|
| 43 |
+
'italic': ('slant', 'italic'),
|
| 44 |
+
'oblique': ('slant', 'oblique'),
|
| 45 |
+
'ultracondensed': ('width', 'ultra-condensed'),
|
| 46 |
+
'extracondensed': ('width', 'extra-condensed'),
|
| 47 |
+
'condensed': ('width', 'condensed'),
|
| 48 |
+
'semicondensed': ('width', 'semi-condensed'),
|
| 49 |
+
'expanded': ('width', 'expanded'),
|
| 50 |
+
'extraexpanded': ('width', 'extra-expanded'),
|
| 51 |
+
'ultraexpanded': ('width', 'ultra-expanded'),
|
| 52 |
+
}
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
@lru_cache # The parser instance is a singleton.
|
| 56 |
+
def _make_fontconfig_parser():
|
| 57 |
+
def comma_separated(elem):
|
| 58 |
+
return elem + ZeroOrMore(Suppress(",") + elem)
|
| 59 |
+
|
| 60 |
+
family = Regex(fr"([^{_family_punc}]|(\\[{_family_punc}]))*")
|
| 61 |
+
size = Regex(r"([0-9]+\.?[0-9]*|\.[0-9]+)")
|
| 62 |
+
name = Regex(r"[a-z]+")
|
| 63 |
+
value = Regex(fr"([^{_value_punc}]|(\\[{_value_punc}]))*")
|
| 64 |
+
prop = Group((name + Suppress("=") + comma_separated(value)) | oneOf(_CONSTANTS))
|
| 65 |
+
return (
|
| 66 |
+
Optional(comma_separated(family)("families"))
|
| 67 |
+
+ Optional("-" + comma_separated(size)("sizes"))
|
| 68 |
+
+ ZeroOrMore(":" + prop("properties*"))
|
| 69 |
+
+ StringEnd()
|
| 70 |
+
)
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
# `parse_fontconfig_pattern` is a bottleneck during the tests because it is
|
| 74 |
+
# repeatedly called when the rcParams are reset (to validate the default
|
| 75 |
+
# fonts). In practice, the cache size doesn't grow beyond a few dozen entries
|
| 76 |
+
# during the test suite.
|
| 77 |
+
@lru_cache
|
| 78 |
+
def parse_fontconfig_pattern(pattern):
|
| 79 |
+
"""
|
| 80 |
+
Parse a fontconfig *pattern* into a dict that can initialize a
|
| 81 |
+
`.font_manager.FontProperties` object.
|
| 82 |
+
"""
|
| 83 |
+
parser = _make_fontconfig_parser()
|
| 84 |
+
try:
|
| 85 |
+
parse = parser.parseString(pattern)
|
| 86 |
+
except ParseException as err:
|
| 87 |
+
# explain becomes a plain method on pyparsing 3 (err.explain(0)).
|
| 88 |
+
raise ValueError("\n" + ParseException.explain(err, 0)) from None
|
| 89 |
+
parser.resetCache()
|
| 90 |
+
props = {}
|
| 91 |
+
if "families" in parse:
|
| 92 |
+
props["family"] = [*map(_family_unescape, parse["families"])]
|
| 93 |
+
if "sizes" in parse:
|
| 94 |
+
props["size"] = [*parse["sizes"]]
|
| 95 |
+
for prop in parse.get("properties", []):
|
| 96 |
+
if len(prop) == 1:
|
| 97 |
+
prop = _CONSTANTS[prop[0]]
|
| 98 |
+
k, *v = prop
|
| 99 |
+
props.setdefault(k, []).extend(map(_value_unescape, v))
|
| 100 |
+
return props
|
| 101 |
+
|
| 102 |
+
|
| 103 |
+
def generate_fontconfig_pattern(d):
|
| 104 |
+
"""Convert a `.FontProperties` to a fontconfig pattern string."""
|
| 105 |
+
kvs = [(k, getattr(d, f"get_{k}")())
|
| 106 |
+
for k in ["style", "variant", "weight", "stretch", "file", "size"]]
|
| 107 |
+
# Families is given first without a leading keyword. Other entries (which
|
| 108 |
+
# are necessarily scalar) are given as key=value, skipping Nones.
|
| 109 |
+
return (",".join(_family_escape(f) for f in d.get_family())
|
| 110 |
+
+ "".join(f":{k}={_value_escape(str(v))}"
|
| 111 |
+
for k, v in kvs if v is not None))
|
llava_video/lib/python3.10/site-packages/matplotlib/font_manager.pyi
ADDED
|
@@ -0,0 +1,136 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from dataclasses import dataclass
|
| 2 |
+
import os
|
| 3 |
+
|
| 4 |
+
from matplotlib._afm import AFM
|
| 5 |
+
from matplotlib import ft2font
|
| 6 |
+
|
| 7 |
+
from pathlib import Path
|
| 8 |
+
|
| 9 |
+
from collections.abc import Iterable
|
| 10 |
+
from typing import Any, Literal
|
| 11 |
+
|
| 12 |
+
font_scalings: dict[str | None, float]
|
| 13 |
+
stretch_dict: dict[str, int]
|
| 14 |
+
weight_dict: dict[str, int]
|
| 15 |
+
font_family_aliases: set[str]
|
| 16 |
+
MSFolders: str
|
| 17 |
+
MSFontDirectories: list[str]
|
| 18 |
+
MSUserFontDirectories: list[str]
|
| 19 |
+
X11FontDirectories: list[str]
|
| 20 |
+
OSXFontDirectories: list[str]
|
| 21 |
+
|
| 22 |
+
def get_fontext_synonyms(fontext: str) -> list[str]: ...
|
| 23 |
+
def list_fonts(directory: str, extensions: Iterable[str]) -> list[str]: ...
|
| 24 |
+
def win32FontDirectory() -> str: ...
|
| 25 |
+
def _get_fontconfig_fonts() -> list[Path]: ...
|
| 26 |
+
def findSystemFonts(
|
| 27 |
+
fontpaths: Iterable[str | os.PathLike | Path] | None = ..., fontext: str = ...
|
| 28 |
+
) -> list[str]: ...
|
| 29 |
+
@dataclass
|
| 30 |
+
class FontEntry:
|
| 31 |
+
fname: str = ...
|
| 32 |
+
name: str = ...
|
| 33 |
+
style: str = ...
|
| 34 |
+
variant: str = ...
|
| 35 |
+
weight: str | int = ...
|
| 36 |
+
stretch: str = ...
|
| 37 |
+
size: str = ...
|
| 38 |
+
def _repr_html_(self) -> str: ...
|
| 39 |
+
def _repr_png_(self) -> bytes: ...
|
| 40 |
+
|
| 41 |
+
def ttfFontProperty(font: ft2font.FT2Font) -> FontEntry: ...
|
| 42 |
+
def afmFontProperty(fontpath: str, font: AFM) -> FontEntry: ...
|
| 43 |
+
|
| 44 |
+
class FontProperties:
|
| 45 |
+
def __init__(
|
| 46 |
+
self,
|
| 47 |
+
family: str | Iterable[str] | None = ...,
|
| 48 |
+
style: Literal["normal", "italic", "oblique"] | None = ...,
|
| 49 |
+
variant: Literal["normal", "small-caps"] | None = ...,
|
| 50 |
+
weight: int | str | None = ...,
|
| 51 |
+
stretch: int | str | None = ...,
|
| 52 |
+
size: float | str | None = ...,
|
| 53 |
+
fname: str | os.PathLike | Path | None = ...,
|
| 54 |
+
math_fontfamily: str | None = ...,
|
| 55 |
+
) -> None: ...
|
| 56 |
+
def __hash__(self) -> int: ...
|
| 57 |
+
def __eq__(self, other: object) -> bool: ...
|
| 58 |
+
def get_family(self) -> list[str]: ...
|
| 59 |
+
def get_name(self) -> str: ...
|
| 60 |
+
def get_style(self) -> Literal["normal", "italic", "oblique"]: ...
|
| 61 |
+
def get_variant(self) -> Literal["normal", "small-caps"]: ...
|
| 62 |
+
def get_weight(self) -> int | str: ...
|
| 63 |
+
def get_stretch(self) -> int | str: ...
|
| 64 |
+
def get_size(self) -> float: ...
|
| 65 |
+
def get_file(self) -> str | bytes | None: ...
|
| 66 |
+
def get_fontconfig_pattern(self) -> dict[str, list[Any]]: ...
|
| 67 |
+
def set_family(self, family: str | Iterable[str] | None) -> None: ...
|
| 68 |
+
def set_style(
|
| 69 |
+
self, style: Literal["normal", "italic", "oblique"] | None
|
| 70 |
+
) -> None: ...
|
| 71 |
+
def set_variant(self, variant: Literal["normal", "small-caps"] | None) -> None: ...
|
| 72 |
+
def set_weight(self, weight: int | str | None) -> None: ...
|
| 73 |
+
def set_stretch(self, stretch: int | str | None) -> None: ...
|
| 74 |
+
def set_size(self, size: float | str | None) -> None: ...
|
| 75 |
+
def set_file(self, file: str | os.PathLike | Path | None) -> None: ...
|
| 76 |
+
def set_fontconfig_pattern(self, pattern: str) -> None: ...
|
| 77 |
+
def get_math_fontfamily(self) -> str: ...
|
| 78 |
+
def set_math_fontfamily(self, fontfamily: str | None) -> None: ...
|
| 79 |
+
def copy(self) -> FontProperties: ...
|
| 80 |
+
# Aliases
|
| 81 |
+
set_name = set_family
|
| 82 |
+
get_slant = get_style
|
| 83 |
+
set_slant = set_style
|
| 84 |
+
get_size_in_points = get_size
|
| 85 |
+
|
| 86 |
+
def json_dump(data: FontManager, filename: str | Path | os.PathLike) -> None: ...
|
| 87 |
+
def json_load(filename: str | Path | os.PathLike) -> FontManager: ...
|
| 88 |
+
|
| 89 |
+
class FontManager:
|
| 90 |
+
__version__: int
|
| 91 |
+
default_size: float | None
|
| 92 |
+
defaultFamily: dict[str, str]
|
| 93 |
+
afmlist: list[FontEntry]
|
| 94 |
+
ttflist: list[FontEntry]
|
| 95 |
+
def __init__(self, size: float | None = ..., weight: str = ...) -> None: ...
|
| 96 |
+
def addfont(self, path: str | Path | os.PathLike) -> None: ...
|
| 97 |
+
@property
|
| 98 |
+
def defaultFont(self) -> dict[str, str]: ...
|
| 99 |
+
def get_default_weight(self) -> str: ...
|
| 100 |
+
@staticmethod
|
| 101 |
+
def get_default_size() -> float: ...
|
| 102 |
+
def set_default_weight(self, weight: str) -> None: ...
|
| 103 |
+
def score_family(
|
| 104 |
+
self, families: str | list[str] | tuple[str], family2: str
|
| 105 |
+
) -> float: ...
|
| 106 |
+
def score_style(self, style1: str, style2: str) -> float: ...
|
| 107 |
+
def score_variant(self, variant1: str, variant2: str) -> float: ...
|
| 108 |
+
def score_stretch(self, stretch1: str | int, stretch2: str | int) -> float: ...
|
| 109 |
+
def score_weight(self, weight1: str | float, weight2: str | float) -> float: ...
|
| 110 |
+
def score_size(self, size1: str | float, size2: str | float) -> float: ...
|
| 111 |
+
def findfont(
|
| 112 |
+
self,
|
| 113 |
+
prop: str | FontProperties,
|
| 114 |
+
fontext: Literal["ttf", "afm"] = ...,
|
| 115 |
+
directory: str | None = ...,
|
| 116 |
+
fallback_to_default: bool = ...,
|
| 117 |
+
rebuild_if_missing: bool = ...,
|
| 118 |
+
) -> str: ...
|
| 119 |
+
def get_font_names(self) -> list[str]: ...
|
| 120 |
+
|
| 121 |
+
def is_opentype_cff_font(filename: str) -> bool: ...
|
| 122 |
+
def get_font(
|
| 123 |
+
font_filepaths: Iterable[str | Path | bytes] | str | Path | bytes,
|
| 124 |
+
hinting_factor: int | None = ...,
|
| 125 |
+
) -> ft2font.FT2Font: ...
|
| 126 |
+
|
| 127 |
+
fontManager: FontManager
|
| 128 |
+
|
| 129 |
+
def findfont(
|
| 130 |
+
prop: str | FontProperties,
|
| 131 |
+
fontext: Literal["ttf", "afm"] = ...,
|
| 132 |
+
directory: str | None = ...,
|
| 133 |
+
fallback_to_default: bool = ...,
|
| 134 |
+
rebuild_if_missing: bool = ...,
|
| 135 |
+
) -> str: ...
|
| 136 |
+
def get_font_names() -> list[str]: ...
|
llava_video/lib/python3.10/site-packages/matplotlib/hatch.py
ADDED
|
@@ -0,0 +1,225 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Contains classes for generating hatch patterns."""
|
| 2 |
+
|
| 3 |
+
import numpy as np
|
| 4 |
+
|
| 5 |
+
from matplotlib import _api
|
| 6 |
+
from matplotlib.path import Path
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
class HatchPatternBase:
|
| 10 |
+
"""The base class for a hatch pattern."""
|
| 11 |
+
pass
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
class HorizontalHatch(HatchPatternBase):
|
| 15 |
+
def __init__(self, hatch, density):
|
| 16 |
+
self.num_lines = int((hatch.count('-') + hatch.count('+')) * density)
|
| 17 |
+
self.num_vertices = self.num_lines * 2
|
| 18 |
+
|
| 19 |
+
def set_vertices_and_codes(self, vertices, codes):
|
| 20 |
+
steps, stepsize = np.linspace(0.0, 1.0, self.num_lines, False,
|
| 21 |
+
retstep=True)
|
| 22 |
+
steps += stepsize / 2.
|
| 23 |
+
vertices[0::2, 0] = 0.0
|
| 24 |
+
vertices[0::2, 1] = steps
|
| 25 |
+
vertices[1::2, 0] = 1.0
|
| 26 |
+
vertices[1::2, 1] = steps
|
| 27 |
+
codes[0::2] = Path.MOVETO
|
| 28 |
+
codes[1::2] = Path.LINETO
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
class VerticalHatch(HatchPatternBase):
|
| 32 |
+
def __init__(self, hatch, density):
|
| 33 |
+
self.num_lines = int((hatch.count('|') + hatch.count('+')) * density)
|
| 34 |
+
self.num_vertices = self.num_lines * 2
|
| 35 |
+
|
| 36 |
+
def set_vertices_and_codes(self, vertices, codes):
|
| 37 |
+
steps, stepsize = np.linspace(0.0, 1.0, self.num_lines, False,
|
| 38 |
+
retstep=True)
|
| 39 |
+
steps += stepsize / 2.
|
| 40 |
+
vertices[0::2, 0] = steps
|
| 41 |
+
vertices[0::2, 1] = 0.0
|
| 42 |
+
vertices[1::2, 0] = steps
|
| 43 |
+
vertices[1::2, 1] = 1.0
|
| 44 |
+
codes[0::2] = Path.MOVETO
|
| 45 |
+
codes[1::2] = Path.LINETO
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
class NorthEastHatch(HatchPatternBase):
|
| 49 |
+
def __init__(self, hatch, density):
|
| 50 |
+
self.num_lines = int(
|
| 51 |
+
(hatch.count('/') + hatch.count('x') + hatch.count('X')) * density)
|
| 52 |
+
if self.num_lines:
|
| 53 |
+
self.num_vertices = (self.num_lines + 1) * 2
|
| 54 |
+
else:
|
| 55 |
+
self.num_vertices = 0
|
| 56 |
+
|
| 57 |
+
def set_vertices_and_codes(self, vertices, codes):
|
| 58 |
+
steps = np.linspace(-0.5, 0.5, self.num_lines + 1)
|
| 59 |
+
vertices[0::2, 0] = 0.0 + steps
|
| 60 |
+
vertices[0::2, 1] = 0.0 - steps
|
| 61 |
+
vertices[1::2, 0] = 1.0 + steps
|
| 62 |
+
vertices[1::2, 1] = 1.0 - steps
|
| 63 |
+
codes[0::2] = Path.MOVETO
|
| 64 |
+
codes[1::2] = Path.LINETO
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
class SouthEastHatch(HatchPatternBase):
|
| 68 |
+
def __init__(self, hatch, density):
|
| 69 |
+
self.num_lines = int(
|
| 70 |
+
(hatch.count('\\') + hatch.count('x') + hatch.count('X'))
|
| 71 |
+
* density)
|
| 72 |
+
if self.num_lines:
|
| 73 |
+
self.num_vertices = (self.num_lines + 1) * 2
|
| 74 |
+
else:
|
| 75 |
+
self.num_vertices = 0
|
| 76 |
+
|
| 77 |
+
def set_vertices_and_codes(self, vertices, codes):
|
| 78 |
+
steps = np.linspace(-0.5, 0.5, self.num_lines + 1)
|
| 79 |
+
vertices[0::2, 0] = 0.0 + steps
|
| 80 |
+
vertices[0::2, 1] = 1.0 + steps
|
| 81 |
+
vertices[1::2, 0] = 1.0 + steps
|
| 82 |
+
vertices[1::2, 1] = 0.0 + steps
|
| 83 |
+
codes[0::2] = Path.MOVETO
|
| 84 |
+
codes[1::2] = Path.LINETO
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
class Shapes(HatchPatternBase):
|
| 88 |
+
filled = False
|
| 89 |
+
|
| 90 |
+
def __init__(self, hatch, density):
|
| 91 |
+
if self.num_rows == 0:
|
| 92 |
+
self.num_shapes = 0
|
| 93 |
+
self.num_vertices = 0
|
| 94 |
+
else:
|
| 95 |
+
self.num_shapes = ((self.num_rows // 2 + 1) * (self.num_rows + 1) +
|
| 96 |
+
(self.num_rows // 2) * self.num_rows)
|
| 97 |
+
self.num_vertices = (self.num_shapes *
|
| 98 |
+
len(self.shape_vertices) *
|
| 99 |
+
(1 if self.filled else 2))
|
| 100 |
+
|
| 101 |
+
def set_vertices_and_codes(self, vertices, codes):
|
| 102 |
+
offset = 1.0 / self.num_rows
|
| 103 |
+
shape_vertices = self.shape_vertices * offset * self.size
|
| 104 |
+
shape_codes = self.shape_codes
|
| 105 |
+
if not self.filled:
|
| 106 |
+
shape_vertices = np.concatenate( # Forward, then backward.
|
| 107 |
+
[shape_vertices, shape_vertices[::-1] * 0.9])
|
| 108 |
+
shape_codes = np.concatenate([shape_codes, shape_codes])
|
| 109 |
+
vertices_parts = []
|
| 110 |
+
codes_parts = []
|
| 111 |
+
for row in range(self.num_rows + 1):
|
| 112 |
+
if row % 2 == 0:
|
| 113 |
+
cols = np.linspace(0, 1, self.num_rows + 1)
|
| 114 |
+
else:
|
| 115 |
+
cols = np.linspace(offset / 2, 1 - offset / 2, self.num_rows)
|
| 116 |
+
row_pos = row * offset
|
| 117 |
+
for col_pos in cols:
|
| 118 |
+
vertices_parts.append(shape_vertices + [col_pos, row_pos])
|
| 119 |
+
codes_parts.append(shape_codes)
|
| 120 |
+
np.concatenate(vertices_parts, out=vertices)
|
| 121 |
+
np.concatenate(codes_parts, out=codes)
|
| 122 |
+
|
| 123 |
+
|
| 124 |
+
class Circles(Shapes):
|
| 125 |
+
def __init__(self, hatch, density):
|
| 126 |
+
path = Path.unit_circle()
|
| 127 |
+
self.shape_vertices = path.vertices
|
| 128 |
+
self.shape_codes = path.codes
|
| 129 |
+
super().__init__(hatch, density)
|
| 130 |
+
|
| 131 |
+
|
| 132 |
+
class SmallCircles(Circles):
|
| 133 |
+
size = 0.2
|
| 134 |
+
|
| 135 |
+
def __init__(self, hatch, density):
|
| 136 |
+
self.num_rows = (hatch.count('o')) * density
|
| 137 |
+
super().__init__(hatch, density)
|
| 138 |
+
|
| 139 |
+
|
| 140 |
+
class LargeCircles(Circles):
|
| 141 |
+
size = 0.35
|
| 142 |
+
|
| 143 |
+
def __init__(self, hatch, density):
|
| 144 |
+
self.num_rows = (hatch.count('O')) * density
|
| 145 |
+
super().__init__(hatch, density)
|
| 146 |
+
|
| 147 |
+
|
| 148 |
+
class SmallFilledCircles(Circles):
|
| 149 |
+
size = 0.1
|
| 150 |
+
filled = True
|
| 151 |
+
|
| 152 |
+
def __init__(self, hatch, density):
|
| 153 |
+
self.num_rows = (hatch.count('.')) * density
|
| 154 |
+
super().__init__(hatch, density)
|
| 155 |
+
|
| 156 |
+
|
| 157 |
+
class Stars(Shapes):
|
| 158 |
+
size = 1.0 / 3.0
|
| 159 |
+
filled = True
|
| 160 |
+
|
| 161 |
+
def __init__(self, hatch, density):
|
| 162 |
+
self.num_rows = (hatch.count('*')) * density
|
| 163 |
+
path = Path.unit_regular_star(5)
|
| 164 |
+
self.shape_vertices = path.vertices
|
| 165 |
+
self.shape_codes = np.full(len(self.shape_vertices), Path.LINETO,
|
| 166 |
+
dtype=Path.code_type)
|
| 167 |
+
self.shape_codes[0] = Path.MOVETO
|
| 168 |
+
super().__init__(hatch, density)
|
| 169 |
+
|
| 170 |
+
_hatch_types = [
|
| 171 |
+
HorizontalHatch,
|
| 172 |
+
VerticalHatch,
|
| 173 |
+
NorthEastHatch,
|
| 174 |
+
SouthEastHatch,
|
| 175 |
+
SmallCircles,
|
| 176 |
+
LargeCircles,
|
| 177 |
+
SmallFilledCircles,
|
| 178 |
+
Stars
|
| 179 |
+
]
|
| 180 |
+
|
| 181 |
+
|
| 182 |
+
def _validate_hatch_pattern(hatch):
|
| 183 |
+
valid_hatch_patterns = set(r'-+|/\xXoO.*')
|
| 184 |
+
if hatch is not None:
|
| 185 |
+
invalids = set(hatch).difference(valid_hatch_patterns)
|
| 186 |
+
if invalids:
|
| 187 |
+
valid = ''.join(sorted(valid_hatch_patterns))
|
| 188 |
+
invalids = ''.join(sorted(invalids))
|
| 189 |
+
_api.warn_deprecated(
|
| 190 |
+
'3.4',
|
| 191 |
+
removal='3.11', # one release after custom hatches (#20690)
|
| 192 |
+
message=f'hatch must consist of a string of "{valid}" or '
|
| 193 |
+
'None, but found the following invalid values '
|
| 194 |
+
f'"{invalids}". Passing invalid values is deprecated '
|
| 195 |
+
'since %(since)s and will become an error in %(removal)s.'
|
| 196 |
+
)
|
| 197 |
+
|
| 198 |
+
|
| 199 |
+
def get_path(hatchpattern, density=6):
|
| 200 |
+
"""
|
| 201 |
+
Given a hatch specifier, *hatchpattern*, generates Path to render
|
| 202 |
+
the hatch in a unit square. *density* is the number of lines per
|
| 203 |
+
unit square.
|
| 204 |
+
"""
|
| 205 |
+
density = int(density)
|
| 206 |
+
|
| 207 |
+
patterns = [hatch_type(hatchpattern, density)
|
| 208 |
+
for hatch_type in _hatch_types]
|
| 209 |
+
num_vertices = sum([pattern.num_vertices for pattern in patterns])
|
| 210 |
+
|
| 211 |
+
if num_vertices == 0:
|
| 212 |
+
return Path(np.empty((0, 2)))
|
| 213 |
+
|
| 214 |
+
vertices = np.empty((num_vertices, 2))
|
| 215 |
+
codes = np.empty(num_vertices, Path.code_type)
|
| 216 |
+
|
| 217 |
+
cursor = 0
|
| 218 |
+
for pattern in patterns:
|
| 219 |
+
if pattern.num_vertices != 0:
|
| 220 |
+
vertices_chunk = vertices[cursor:cursor + pattern.num_vertices]
|
| 221 |
+
codes_chunk = codes[cursor:cursor + pattern.num_vertices]
|
| 222 |
+
pattern.set_vertices_and_codes(vertices_chunk, codes_chunk)
|
| 223 |
+
cursor += pattern.num_vertices
|
| 224 |
+
|
| 225 |
+
return Path(vertices, codes)
|
llava_video/lib/python3.10/site-packages/matplotlib/legend_handler.py
ADDED
|
@@ -0,0 +1,813 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Default legend handlers.
|
| 3 |
+
|
| 4 |
+
.. important::
|
| 5 |
+
|
| 6 |
+
This is a low-level legend API, which most end users do not need.
|
| 7 |
+
|
| 8 |
+
We recommend that you are familiar with the :ref:`legend guide
|
| 9 |
+
<legend_guide>` before reading this documentation.
|
| 10 |
+
|
| 11 |
+
Legend handlers are expected to be a callable object with a following
|
| 12 |
+
signature::
|
| 13 |
+
|
| 14 |
+
legend_handler(legend, orig_handle, fontsize, handlebox)
|
| 15 |
+
|
| 16 |
+
Where *legend* is the legend itself, *orig_handle* is the original
|
| 17 |
+
plot, *fontsize* is the fontsize in pixels, and *handlebox* is an
|
| 18 |
+
`.OffsetBox` instance. Within the call, you should create relevant
|
| 19 |
+
artists (using relevant properties from the *legend* and/or
|
| 20 |
+
*orig_handle*) and add them into the *handlebox*. The artists need to
|
| 21 |
+
be scaled according to the *fontsize* (note that the size is in pixels,
|
| 22 |
+
i.e., this is dpi-scaled value).
|
| 23 |
+
|
| 24 |
+
This module includes definition of several legend handler classes
|
| 25 |
+
derived from the base class (HandlerBase) with the following method::
|
| 26 |
+
|
| 27 |
+
def legend_artist(self, legend, orig_handle, fontsize, handlebox)
|
| 28 |
+
"""
|
| 29 |
+
|
| 30 |
+
from itertools import cycle
|
| 31 |
+
|
| 32 |
+
import numpy as np
|
| 33 |
+
|
| 34 |
+
from matplotlib import cbook
|
| 35 |
+
from matplotlib.lines import Line2D
|
| 36 |
+
from matplotlib.patches import Rectangle
|
| 37 |
+
import matplotlib.collections as mcoll
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
def update_from_first_child(tgt, src):
|
| 41 |
+
first_child = next(iter(src.get_children()), None)
|
| 42 |
+
if first_child is not None:
|
| 43 |
+
tgt.update_from(first_child)
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
class HandlerBase:
|
| 47 |
+
"""
|
| 48 |
+
A base class for default legend handlers.
|
| 49 |
+
|
| 50 |
+
The derived classes are meant to override *create_artists* method, which
|
| 51 |
+
has the following signature::
|
| 52 |
+
|
| 53 |
+
def create_artists(self, legend, orig_handle,
|
| 54 |
+
xdescent, ydescent, width, height, fontsize,
|
| 55 |
+
trans):
|
| 56 |
+
|
| 57 |
+
The overridden method needs to create artists of the given
|
| 58 |
+
transform that fits in the given dimension (xdescent, ydescent,
|
| 59 |
+
width, height) that are scaled by fontsize if necessary.
|
| 60 |
+
|
| 61 |
+
"""
|
| 62 |
+
def __init__(self, xpad=0., ypad=0., update_func=None):
|
| 63 |
+
"""
|
| 64 |
+
Parameters
|
| 65 |
+
----------
|
| 66 |
+
xpad : float, optional
|
| 67 |
+
Padding in x-direction.
|
| 68 |
+
ypad : float, optional
|
| 69 |
+
Padding in y-direction.
|
| 70 |
+
update_func : callable, optional
|
| 71 |
+
Function for updating the legend handler properties from another
|
| 72 |
+
legend handler, used by `~HandlerBase.update_prop`.
|
| 73 |
+
"""
|
| 74 |
+
self._xpad, self._ypad = xpad, ypad
|
| 75 |
+
self._update_prop_func = update_func
|
| 76 |
+
|
| 77 |
+
def _update_prop(self, legend_handle, orig_handle):
|
| 78 |
+
if self._update_prop_func is None:
|
| 79 |
+
self._default_update_prop(legend_handle, orig_handle)
|
| 80 |
+
else:
|
| 81 |
+
self._update_prop_func(legend_handle, orig_handle)
|
| 82 |
+
|
| 83 |
+
def _default_update_prop(self, legend_handle, orig_handle):
|
| 84 |
+
legend_handle.update_from(orig_handle)
|
| 85 |
+
|
| 86 |
+
def update_prop(self, legend_handle, orig_handle, legend):
|
| 87 |
+
|
| 88 |
+
self._update_prop(legend_handle, orig_handle)
|
| 89 |
+
|
| 90 |
+
legend._set_artist_props(legend_handle)
|
| 91 |
+
legend_handle.set_clip_box(None)
|
| 92 |
+
legend_handle.set_clip_path(None)
|
| 93 |
+
|
| 94 |
+
def adjust_drawing_area(self, legend, orig_handle,
|
| 95 |
+
xdescent, ydescent, width, height, fontsize,
|
| 96 |
+
):
|
| 97 |
+
xdescent = xdescent - self._xpad * fontsize
|
| 98 |
+
ydescent = ydescent - self._ypad * fontsize
|
| 99 |
+
width = width - self._xpad * fontsize
|
| 100 |
+
height = height - self._ypad * fontsize
|
| 101 |
+
return xdescent, ydescent, width, height
|
| 102 |
+
|
| 103 |
+
def legend_artist(self, legend, orig_handle,
|
| 104 |
+
fontsize, handlebox):
|
| 105 |
+
"""
|
| 106 |
+
Return the artist that this HandlerBase generates for the given
|
| 107 |
+
original artist/handle.
|
| 108 |
+
|
| 109 |
+
Parameters
|
| 110 |
+
----------
|
| 111 |
+
legend : `~matplotlib.legend.Legend`
|
| 112 |
+
The legend for which these legend artists are being created.
|
| 113 |
+
orig_handle : :class:`matplotlib.artist.Artist` or similar
|
| 114 |
+
The object for which these legend artists are being created.
|
| 115 |
+
fontsize : int
|
| 116 |
+
The fontsize in pixels. The artists being created should
|
| 117 |
+
be scaled according to the given fontsize.
|
| 118 |
+
handlebox : `~matplotlib.offsetbox.OffsetBox`
|
| 119 |
+
The box which has been created to hold this legend entry's
|
| 120 |
+
artists. Artists created in the `legend_artist` method must
|
| 121 |
+
be added to this handlebox inside this method.
|
| 122 |
+
|
| 123 |
+
"""
|
| 124 |
+
xdescent, ydescent, width, height = self.adjust_drawing_area(
|
| 125 |
+
legend, orig_handle,
|
| 126 |
+
handlebox.xdescent, handlebox.ydescent,
|
| 127 |
+
handlebox.width, handlebox.height,
|
| 128 |
+
fontsize)
|
| 129 |
+
artists = self.create_artists(legend, orig_handle,
|
| 130 |
+
xdescent, ydescent, width, height,
|
| 131 |
+
fontsize, handlebox.get_transform())
|
| 132 |
+
|
| 133 |
+
# create_artists will return a list of artists.
|
| 134 |
+
for a in artists:
|
| 135 |
+
handlebox.add_artist(a)
|
| 136 |
+
|
| 137 |
+
# we only return the first artist
|
| 138 |
+
return artists[0]
|
| 139 |
+
|
| 140 |
+
def create_artists(self, legend, orig_handle,
|
| 141 |
+
xdescent, ydescent, width, height, fontsize,
|
| 142 |
+
trans):
|
| 143 |
+
"""
|
| 144 |
+
Return the legend artists generated.
|
| 145 |
+
|
| 146 |
+
Parameters
|
| 147 |
+
----------
|
| 148 |
+
legend : `~matplotlib.legend.Legend`
|
| 149 |
+
The legend for which these legend artists are being created.
|
| 150 |
+
orig_handle : `~matplotlib.artist.Artist` or similar
|
| 151 |
+
The object for which these legend artists are being created.
|
| 152 |
+
xdescent, ydescent, width, height : int
|
| 153 |
+
The rectangle (*xdescent*, *ydescent*, *width*, *height*) that the
|
| 154 |
+
legend artists being created should fit within.
|
| 155 |
+
fontsize : int
|
| 156 |
+
The fontsize in pixels. The legend artists being created should
|
| 157 |
+
be scaled according to the given fontsize.
|
| 158 |
+
trans : `~matplotlib.transforms.Transform`
|
| 159 |
+
The transform that is applied to the legend artists being created.
|
| 160 |
+
Typically from unit coordinates in the handler box to screen
|
| 161 |
+
coordinates.
|
| 162 |
+
"""
|
| 163 |
+
raise NotImplementedError('Derived must override')
|
| 164 |
+
|
| 165 |
+
|
| 166 |
+
class HandlerNpoints(HandlerBase):
|
| 167 |
+
"""
|
| 168 |
+
A legend handler that shows *numpoints* points in the legend entry.
|
| 169 |
+
"""
|
| 170 |
+
|
| 171 |
+
def __init__(self, marker_pad=0.3, numpoints=None, **kwargs):
|
| 172 |
+
"""
|
| 173 |
+
Parameters
|
| 174 |
+
----------
|
| 175 |
+
marker_pad : float
|
| 176 |
+
Padding between points in legend entry.
|
| 177 |
+
numpoints : int
|
| 178 |
+
Number of points to show in legend entry.
|
| 179 |
+
**kwargs
|
| 180 |
+
Keyword arguments forwarded to `.HandlerBase`.
|
| 181 |
+
"""
|
| 182 |
+
super().__init__(**kwargs)
|
| 183 |
+
|
| 184 |
+
self._numpoints = numpoints
|
| 185 |
+
self._marker_pad = marker_pad
|
| 186 |
+
|
| 187 |
+
def get_numpoints(self, legend):
|
| 188 |
+
if self._numpoints is None:
|
| 189 |
+
return legend.numpoints
|
| 190 |
+
else:
|
| 191 |
+
return self._numpoints
|
| 192 |
+
|
| 193 |
+
def get_xdata(self, legend, xdescent, ydescent, width, height, fontsize):
|
| 194 |
+
numpoints = self.get_numpoints(legend)
|
| 195 |
+
if numpoints > 1:
|
| 196 |
+
# we put some pad here to compensate the size of the marker
|
| 197 |
+
pad = self._marker_pad * fontsize
|
| 198 |
+
xdata = np.linspace(-xdescent + pad,
|
| 199 |
+
-xdescent + width - pad,
|
| 200 |
+
numpoints)
|
| 201 |
+
xdata_marker = xdata
|
| 202 |
+
else:
|
| 203 |
+
xdata = [-xdescent, -xdescent + width]
|
| 204 |
+
xdata_marker = [-xdescent + 0.5 * width]
|
| 205 |
+
return xdata, xdata_marker
|
| 206 |
+
|
| 207 |
+
|
| 208 |
+
class HandlerNpointsYoffsets(HandlerNpoints):
|
| 209 |
+
"""
|
| 210 |
+
A legend handler that shows *numpoints* in the legend, and allows them to
|
| 211 |
+
be individually offset in the y-direction.
|
| 212 |
+
"""
|
| 213 |
+
|
| 214 |
+
def __init__(self, numpoints=None, yoffsets=None, **kwargs):
|
| 215 |
+
"""
|
| 216 |
+
Parameters
|
| 217 |
+
----------
|
| 218 |
+
numpoints : int
|
| 219 |
+
Number of points to show in legend entry.
|
| 220 |
+
yoffsets : array of floats
|
| 221 |
+
Length *numpoints* list of y offsets for each point in
|
| 222 |
+
legend entry.
|
| 223 |
+
**kwargs
|
| 224 |
+
Keyword arguments forwarded to `.HandlerNpoints`.
|
| 225 |
+
"""
|
| 226 |
+
super().__init__(numpoints=numpoints, **kwargs)
|
| 227 |
+
self._yoffsets = yoffsets
|
| 228 |
+
|
| 229 |
+
def get_ydata(self, legend, xdescent, ydescent, width, height, fontsize):
|
| 230 |
+
if self._yoffsets is None:
|
| 231 |
+
ydata = height * legend._scatteryoffsets
|
| 232 |
+
else:
|
| 233 |
+
ydata = height * np.asarray(self._yoffsets)
|
| 234 |
+
|
| 235 |
+
return ydata
|
| 236 |
+
|
| 237 |
+
|
| 238 |
+
class HandlerLine2DCompound(HandlerNpoints):
|
| 239 |
+
"""
|
| 240 |
+
Original handler for `.Line2D` instances, that relies on combining
|
| 241 |
+
a line-only with a marker-only artist. May be deprecated in the future.
|
| 242 |
+
"""
|
| 243 |
+
|
| 244 |
+
def create_artists(self, legend, orig_handle,
|
| 245 |
+
xdescent, ydescent, width, height, fontsize,
|
| 246 |
+
trans):
|
| 247 |
+
# docstring inherited
|
| 248 |
+
xdata, xdata_marker = self.get_xdata(legend, xdescent, ydescent,
|
| 249 |
+
width, height, fontsize)
|
| 250 |
+
|
| 251 |
+
ydata = np.full_like(xdata, ((height - ydescent) / 2))
|
| 252 |
+
legline = Line2D(xdata, ydata)
|
| 253 |
+
|
| 254 |
+
self.update_prop(legline, orig_handle, legend)
|
| 255 |
+
legline.set_drawstyle('default')
|
| 256 |
+
legline.set_marker("")
|
| 257 |
+
|
| 258 |
+
legline_marker = Line2D(xdata_marker, ydata[:len(xdata_marker)])
|
| 259 |
+
self.update_prop(legline_marker, orig_handle, legend)
|
| 260 |
+
legline_marker.set_linestyle('None')
|
| 261 |
+
if legend.markerscale != 1:
|
| 262 |
+
newsz = legline_marker.get_markersize() * legend.markerscale
|
| 263 |
+
legline_marker.set_markersize(newsz)
|
| 264 |
+
# we don't want to add this to the return list because
|
| 265 |
+
# the texts and handles are assumed to be in one-to-one
|
| 266 |
+
# correspondence.
|
| 267 |
+
legline._legmarker = legline_marker
|
| 268 |
+
|
| 269 |
+
legline.set_transform(trans)
|
| 270 |
+
legline_marker.set_transform(trans)
|
| 271 |
+
|
| 272 |
+
return [legline, legline_marker]
|
| 273 |
+
|
| 274 |
+
|
| 275 |
+
class HandlerLine2D(HandlerNpoints):
|
| 276 |
+
"""
|
| 277 |
+
Handler for `.Line2D` instances.
|
| 278 |
+
|
| 279 |
+
See Also
|
| 280 |
+
--------
|
| 281 |
+
HandlerLine2DCompound : An earlier handler implementation, which used one
|
| 282 |
+
artist for the line and another for the marker(s).
|
| 283 |
+
"""
|
| 284 |
+
|
| 285 |
+
def create_artists(self, legend, orig_handle,
|
| 286 |
+
xdescent, ydescent, width, height, fontsize,
|
| 287 |
+
trans):
|
| 288 |
+
# docstring inherited
|
| 289 |
+
xdata, xdata_marker = self.get_xdata(legend, xdescent, ydescent,
|
| 290 |
+
width, height, fontsize)
|
| 291 |
+
|
| 292 |
+
markevery = None
|
| 293 |
+
if self.get_numpoints(legend) == 1:
|
| 294 |
+
# Special case: one wants a single marker in the center
|
| 295 |
+
# and a line that extends on both sides. One will use a
|
| 296 |
+
# 3 points line, but only mark the #1 (i.e. middle) point.
|
| 297 |
+
xdata = np.linspace(xdata[0], xdata[-1], 3)
|
| 298 |
+
markevery = [1]
|
| 299 |
+
|
| 300 |
+
ydata = np.full_like(xdata, (height - ydescent) / 2)
|
| 301 |
+
legline = Line2D(xdata, ydata, markevery=markevery)
|
| 302 |
+
|
| 303 |
+
self.update_prop(legline, orig_handle, legend)
|
| 304 |
+
|
| 305 |
+
if legend.markerscale != 1:
|
| 306 |
+
newsz = legline.get_markersize() * legend.markerscale
|
| 307 |
+
legline.set_markersize(newsz)
|
| 308 |
+
|
| 309 |
+
legline.set_transform(trans)
|
| 310 |
+
|
| 311 |
+
return [legline]
|
| 312 |
+
|
| 313 |
+
|
| 314 |
+
class HandlerPatch(HandlerBase):
|
| 315 |
+
"""
|
| 316 |
+
Handler for `.Patch` instances.
|
| 317 |
+
"""
|
| 318 |
+
|
| 319 |
+
def __init__(self, patch_func=None, **kwargs):
|
| 320 |
+
"""
|
| 321 |
+
Parameters
|
| 322 |
+
----------
|
| 323 |
+
patch_func : callable, optional
|
| 324 |
+
The function that creates the legend key artist.
|
| 325 |
+
*patch_func* should have the signature::
|
| 326 |
+
|
| 327 |
+
def patch_func(legend=legend, orig_handle=orig_handle,
|
| 328 |
+
xdescent=xdescent, ydescent=ydescent,
|
| 329 |
+
width=width, height=height, fontsize=fontsize)
|
| 330 |
+
|
| 331 |
+
Subsequently, the created artist will have its ``update_prop``
|
| 332 |
+
method called and the appropriate transform will be applied.
|
| 333 |
+
|
| 334 |
+
**kwargs
|
| 335 |
+
Keyword arguments forwarded to `.HandlerBase`.
|
| 336 |
+
"""
|
| 337 |
+
super().__init__(**kwargs)
|
| 338 |
+
self._patch_func = patch_func
|
| 339 |
+
|
| 340 |
+
def _create_patch(self, legend, orig_handle,
|
| 341 |
+
xdescent, ydescent, width, height, fontsize):
|
| 342 |
+
if self._patch_func is None:
|
| 343 |
+
p = Rectangle(xy=(-xdescent, -ydescent),
|
| 344 |
+
width=width, height=height)
|
| 345 |
+
else:
|
| 346 |
+
p = self._patch_func(legend=legend, orig_handle=orig_handle,
|
| 347 |
+
xdescent=xdescent, ydescent=ydescent,
|
| 348 |
+
width=width, height=height, fontsize=fontsize)
|
| 349 |
+
return p
|
| 350 |
+
|
| 351 |
+
def create_artists(self, legend, orig_handle,
|
| 352 |
+
xdescent, ydescent, width, height, fontsize, trans):
|
| 353 |
+
# docstring inherited
|
| 354 |
+
p = self._create_patch(legend, orig_handle,
|
| 355 |
+
xdescent, ydescent, width, height, fontsize)
|
| 356 |
+
self.update_prop(p, orig_handle, legend)
|
| 357 |
+
p.set_transform(trans)
|
| 358 |
+
return [p]
|
| 359 |
+
|
| 360 |
+
|
| 361 |
+
class HandlerStepPatch(HandlerBase):
|
| 362 |
+
"""
|
| 363 |
+
Handler for `~.matplotlib.patches.StepPatch` instances.
|
| 364 |
+
"""
|
| 365 |
+
|
| 366 |
+
@staticmethod
|
| 367 |
+
def _create_patch(orig_handle, xdescent, ydescent, width, height):
|
| 368 |
+
return Rectangle(xy=(-xdescent, -ydescent), width=width,
|
| 369 |
+
height=height, color=orig_handle.get_facecolor())
|
| 370 |
+
|
| 371 |
+
@staticmethod
|
| 372 |
+
def _create_line(orig_handle, width, height):
|
| 373 |
+
# Unfilled StepPatch should show as a line
|
| 374 |
+
legline = Line2D([0, width], [height/2, height/2],
|
| 375 |
+
color=orig_handle.get_edgecolor(),
|
| 376 |
+
linestyle=orig_handle.get_linestyle(),
|
| 377 |
+
linewidth=orig_handle.get_linewidth(),
|
| 378 |
+
)
|
| 379 |
+
|
| 380 |
+
# Overwrite manually because patch and line properties don't mix
|
| 381 |
+
legline.set_drawstyle('default')
|
| 382 |
+
legline.set_marker("")
|
| 383 |
+
return legline
|
| 384 |
+
|
| 385 |
+
def create_artists(self, legend, orig_handle,
|
| 386 |
+
xdescent, ydescent, width, height, fontsize, trans):
|
| 387 |
+
# docstring inherited
|
| 388 |
+
if orig_handle.get_fill() or (orig_handle.get_hatch() is not None):
|
| 389 |
+
p = self._create_patch(orig_handle, xdescent, ydescent, width,
|
| 390 |
+
height)
|
| 391 |
+
self.update_prop(p, orig_handle, legend)
|
| 392 |
+
else:
|
| 393 |
+
p = self._create_line(orig_handle, width, height)
|
| 394 |
+
p.set_transform(trans)
|
| 395 |
+
return [p]
|
| 396 |
+
|
| 397 |
+
|
| 398 |
+
class HandlerLineCollection(HandlerLine2D):
|
| 399 |
+
"""
|
| 400 |
+
Handler for `.LineCollection` instances.
|
| 401 |
+
"""
|
| 402 |
+
def get_numpoints(self, legend):
|
| 403 |
+
if self._numpoints is None:
|
| 404 |
+
return legend.scatterpoints
|
| 405 |
+
else:
|
| 406 |
+
return self._numpoints
|
| 407 |
+
|
| 408 |
+
def _default_update_prop(self, legend_handle, orig_handle):
|
| 409 |
+
lw = orig_handle.get_linewidths()[0]
|
| 410 |
+
dashes = orig_handle._us_linestyles[0]
|
| 411 |
+
color = orig_handle.get_colors()[0]
|
| 412 |
+
legend_handle.set_color(color)
|
| 413 |
+
legend_handle.set_linestyle(dashes)
|
| 414 |
+
legend_handle.set_linewidth(lw)
|
| 415 |
+
|
| 416 |
+
def create_artists(self, legend, orig_handle,
|
| 417 |
+
xdescent, ydescent, width, height, fontsize, trans):
|
| 418 |
+
# docstring inherited
|
| 419 |
+
xdata, xdata_marker = self.get_xdata(legend, xdescent, ydescent,
|
| 420 |
+
width, height, fontsize)
|
| 421 |
+
ydata = np.full_like(xdata, (height - ydescent) / 2)
|
| 422 |
+
legline = Line2D(xdata, ydata)
|
| 423 |
+
|
| 424 |
+
self.update_prop(legline, orig_handle, legend)
|
| 425 |
+
legline.set_transform(trans)
|
| 426 |
+
|
| 427 |
+
return [legline]
|
| 428 |
+
|
| 429 |
+
|
| 430 |
+
class HandlerRegularPolyCollection(HandlerNpointsYoffsets):
|
| 431 |
+
r"""Handler for `.RegularPolyCollection`\s."""
|
| 432 |
+
|
| 433 |
+
def __init__(self, yoffsets=None, sizes=None, **kwargs):
|
| 434 |
+
super().__init__(yoffsets=yoffsets, **kwargs)
|
| 435 |
+
|
| 436 |
+
self._sizes = sizes
|
| 437 |
+
|
| 438 |
+
def get_numpoints(self, legend):
|
| 439 |
+
if self._numpoints is None:
|
| 440 |
+
return legend.scatterpoints
|
| 441 |
+
else:
|
| 442 |
+
return self._numpoints
|
| 443 |
+
|
| 444 |
+
def get_sizes(self, legend, orig_handle,
|
| 445 |
+
xdescent, ydescent, width, height, fontsize):
|
| 446 |
+
if self._sizes is None:
|
| 447 |
+
handle_sizes = orig_handle.get_sizes()
|
| 448 |
+
if not len(handle_sizes):
|
| 449 |
+
handle_sizes = [1]
|
| 450 |
+
size_max = max(handle_sizes) * legend.markerscale ** 2
|
| 451 |
+
size_min = min(handle_sizes) * legend.markerscale ** 2
|
| 452 |
+
|
| 453 |
+
numpoints = self.get_numpoints(legend)
|
| 454 |
+
if numpoints < 4:
|
| 455 |
+
sizes = [.5 * (size_max + size_min), size_max,
|
| 456 |
+
size_min][:numpoints]
|
| 457 |
+
else:
|
| 458 |
+
rng = (size_max - size_min)
|
| 459 |
+
sizes = rng * np.linspace(0, 1, numpoints) + size_min
|
| 460 |
+
else:
|
| 461 |
+
sizes = self._sizes
|
| 462 |
+
|
| 463 |
+
return sizes
|
| 464 |
+
|
| 465 |
+
def update_prop(self, legend_handle, orig_handle, legend):
|
| 466 |
+
|
| 467 |
+
self._update_prop(legend_handle, orig_handle)
|
| 468 |
+
|
| 469 |
+
legend_handle.set_figure(legend.get_figure(root=False))
|
| 470 |
+
# legend._set_artist_props(legend_handle)
|
| 471 |
+
legend_handle.set_clip_box(None)
|
| 472 |
+
legend_handle.set_clip_path(None)
|
| 473 |
+
|
| 474 |
+
def create_collection(self, orig_handle, sizes, offsets, offset_transform):
|
| 475 |
+
return type(orig_handle)(
|
| 476 |
+
orig_handle.get_numsides(),
|
| 477 |
+
rotation=orig_handle.get_rotation(), sizes=sizes,
|
| 478 |
+
offsets=offsets, offset_transform=offset_transform,
|
| 479 |
+
)
|
| 480 |
+
|
| 481 |
+
def create_artists(self, legend, orig_handle,
|
| 482 |
+
xdescent, ydescent, width, height, fontsize,
|
| 483 |
+
trans):
|
| 484 |
+
# docstring inherited
|
| 485 |
+
xdata, xdata_marker = self.get_xdata(legend, xdescent, ydescent,
|
| 486 |
+
width, height, fontsize)
|
| 487 |
+
|
| 488 |
+
ydata = self.get_ydata(legend, xdescent, ydescent,
|
| 489 |
+
width, height, fontsize)
|
| 490 |
+
|
| 491 |
+
sizes = self.get_sizes(legend, orig_handle, xdescent, ydescent,
|
| 492 |
+
width, height, fontsize)
|
| 493 |
+
|
| 494 |
+
p = self.create_collection(
|
| 495 |
+
orig_handle, sizes,
|
| 496 |
+
offsets=list(zip(xdata_marker, ydata)), offset_transform=trans)
|
| 497 |
+
|
| 498 |
+
self.update_prop(p, orig_handle, legend)
|
| 499 |
+
p.set_offset_transform(trans)
|
| 500 |
+
return [p]
|
| 501 |
+
|
| 502 |
+
|
| 503 |
+
class HandlerPathCollection(HandlerRegularPolyCollection):
|
| 504 |
+
r"""Handler for `.PathCollection`\s, which are used by `~.Axes.scatter`."""
|
| 505 |
+
|
| 506 |
+
def create_collection(self, orig_handle, sizes, offsets, offset_transform):
|
| 507 |
+
return type(orig_handle)(
|
| 508 |
+
[orig_handle.get_paths()[0]], sizes=sizes,
|
| 509 |
+
offsets=offsets, offset_transform=offset_transform,
|
| 510 |
+
)
|
| 511 |
+
|
| 512 |
+
|
| 513 |
+
class HandlerCircleCollection(HandlerRegularPolyCollection):
|
| 514 |
+
r"""Handler for `.CircleCollection`\s."""
|
| 515 |
+
|
| 516 |
+
def create_collection(self, orig_handle, sizes, offsets, offset_transform):
|
| 517 |
+
return type(orig_handle)(
|
| 518 |
+
sizes, offsets=offsets, offset_transform=offset_transform)
|
| 519 |
+
|
| 520 |
+
|
| 521 |
+
class HandlerErrorbar(HandlerLine2D):
|
| 522 |
+
"""Handler for Errorbars."""
|
| 523 |
+
|
| 524 |
+
def __init__(self, xerr_size=0.5, yerr_size=None,
|
| 525 |
+
marker_pad=0.3, numpoints=None, **kwargs):
|
| 526 |
+
|
| 527 |
+
self._xerr_size = xerr_size
|
| 528 |
+
self._yerr_size = yerr_size
|
| 529 |
+
|
| 530 |
+
super().__init__(marker_pad=marker_pad, numpoints=numpoints, **kwargs)
|
| 531 |
+
|
| 532 |
+
def get_err_size(self, legend, xdescent, ydescent,
|
| 533 |
+
width, height, fontsize):
|
| 534 |
+
xerr_size = self._xerr_size * fontsize
|
| 535 |
+
|
| 536 |
+
if self._yerr_size is None:
|
| 537 |
+
yerr_size = xerr_size
|
| 538 |
+
else:
|
| 539 |
+
yerr_size = self._yerr_size * fontsize
|
| 540 |
+
|
| 541 |
+
return xerr_size, yerr_size
|
| 542 |
+
|
| 543 |
+
def create_artists(self, legend, orig_handle,
|
| 544 |
+
xdescent, ydescent, width, height, fontsize,
|
| 545 |
+
trans):
|
| 546 |
+
# docstring inherited
|
| 547 |
+
plotlines, caplines, barlinecols = orig_handle
|
| 548 |
+
|
| 549 |
+
xdata, xdata_marker = self.get_xdata(legend, xdescent, ydescent,
|
| 550 |
+
width, height, fontsize)
|
| 551 |
+
|
| 552 |
+
ydata = np.full_like(xdata, (height - ydescent) / 2)
|
| 553 |
+
legline = Line2D(xdata, ydata)
|
| 554 |
+
|
| 555 |
+
xdata_marker = np.asarray(xdata_marker)
|
| 556 |
+
ydata_marker = np.asarray(ydata[:len(xdata_marker)])
|
| 557 |
+
|
| 558 |
+
xerr_size, yerr_size = self.get_err_size(legend, xdescent, ydescent,
|
| 559 |
+
width, height, fontsize)
|
| 560 |
+
|
| 561 |
+
legline_marker = Line2D(xdata_marker, ydata_marker)
|
| 562 |
+
|
| 563 |
+
# when plotlines are None (only errorbars are drawn), we just
|
| 564 |
+
# make legline invisible.
|
| 565 |
+
if plotlines is None:
|
| 566 |
+
legline.set_visible(False)
|
| 567 |
+
legline_marker.set_visible(False)
|
| 568 |
+
else:
|
| 569 |
+
self.update_prop(legline, plotlines, legend)
|
| 570 |
+
|
| 571 |
+
legline.set_drawstyle('default')
|
| 572 |
+
legline.set_marker('none')
|
| 573 |
+
|
| 574 |
+
self.update_prop(legline_marker, plotlines, legend)
|
| 575 |
+
legline_marker.set_linestyle('None')
|
| 576 |
+
|
| 577 |
+
if legend.markerscale != 1:
|
| 578 |
+
newsz = legline_marker.get_markersize() * legend.markerscale
|
| 579 |
+
legline_marker.set_markersize(newsz)
|
| 580 |
+
|
| 581 |
+
handle_barlinecols = []
|
| 582 |
+
handle_caplines = []
|
| 583 |
+
|
| 584 |
+
if orig_handle.has_xerr:
|
| 585 |
+
verts = [((x - xerr_size, y), (x + xerr_size, y))
|
| 586 |
+
for x, y in zip(xdata_marker, ydata_marker)]
|
| 587 |
+
coll = mcoll.LineCollection(verts)
|
| 588 |
+
self.update_prop(coll, barlinecols[0], legend)
|
| 589 |
+
handle_barlinecols.append(coll)
|
| 590 |
+
|
| 591 |
+
if caplines:
|
| 592 |
+
capline_left = Line2D(xdata_marker - xerr_size, ydata_marker)
|
| 593 |
+
capline_right = Line2D(xdata_marker + xerr_size, ydata_marker)
|
| 594 |
+
self.update_prop(capline_left, caplines[0], legend)
|
| 595 |
+
self.update_prop(capline_right, caplines[0], legend)
|
| 596 |
+
capline_left.set_marker("|")
|
| 597 |
+
capline_right.set_marker("|")
|
| 598 |
+
|
| 599 |
+
handle_caplines.append(capline_left)
|
| 600 |
+
handle_caplines.append(capline_right)
|
| 601 |
+
|
| 602 |
+
if orig_handle.has_yerr:
|
| 603 |
+
verts = [((x, y - yerr_size), (x, y + yerr_size))
|
| 604 |
+
for x, y in zip(xdata_marker, ydata_marker)]
|
| 605 |
+
coll = mcoll.LineCollection(verts)
|
| 606 |
+
self.update_prop(coll, barlinecols[0], legend)
|
| 607 |
+
handle_barlinecols.append(coll)
|
| 608 |
+
|
| 609 |
+
if caplines:
|
| 610 |
+
capline_left = Line2D(xdata_marker, ydata_marker - yerr_size)
|
| 611 |
+
capline_right = Line2D(xdata_marker, ydata_marker + yerr_size)
|
| 612 |
+
self.update_prop(capline_left, caplines[0], legend)
|
| 613 |
+
self.update_prop(capline_right, caplines[0], legend)
|
| 614 |
+
capline_left.set_marker("_")
|
| 615 |
+
capline_right.set_marker("_")
|
| 616 |
+
|
| 617 |
+
handle_caplines.append(capline_left)
|
| 618 |
+
handle_caplines.append(capline_right)
|
| 619 |
+
|
| 620 |
+
artists = [
|
| 621 |
+
*handle_barlinecols, *handle_caplines, legline, legline_marker,
|
| 622 |
+
]
|
| 623 |
+
for artist in artists:
|
| 624 |
+
artist.set_transform(trans)
|
| 625 |
+
return artists
|
| 626 |
+
|
| 627 |
+
|
| 628 |
+
class HandlerStem(HandlerNpointsYoffsets):
|
| 629 |
+
"""
|
| 630 |
+
Handler for plots produced by `~.Axes.stem`.
|
| 631 |
+
"""
|
| 632 |
+
|
| 633 |
+
def __init__(self, marker_pad=0.3, numpoints=None,
|
| 634 |
+
bottom=None, yoffsets=None, **kwargs):
|
| 635 |
+
"""
|
| 636 |
+
Parameters
|
| 637 |
+
----------
|
| 638 |
+
marker_pad : float, default: 0.3
|
| 639 |
+
Padding between points in legend entry.
|
| 640 |
+
numpoints : int, optional
|
| 641 |
+
Number of points to show in legend entry.
|
| 642 |
+
bottom : float, optional
|
| 643 |
+
|
| 644 |
+
yoffsets : array of floats, optional
|
| 645 |
+
Length *numpoints* list of y offsets for each point in
|
| 646 |
+
legend entry.
|
| 647 |
+
**kwargs
|
| 648 |
+
Keyword arguments forwarded to `.HandlerNpointsYoffsets`.
|
| 649 |
+
"""
|
| 650 |
+
super().__init__(marker_pad=marker_pad, numpoints=numpoints,
|
| 651 |
+
yoffsets=yoffsets, **kwargs)
|
| 652 |
+
self._bottom = bottom
|
| 653 |
+
|
| 654 |
+
def get_ydata(self, legend, xdescent, ydescent, width, height, fontsize):
|
| 655 |
+
if self._yoffsets is None:
|
| 656 |
+
ydata = height * (0.5 * legend._scatteryoffsets + 0.5)
|
| 657 |
+
else:
|
| 658 |
+
ydata = height * np.asarray(self._yoffsets)
|
| 659 |
+
|
| 660 |
+
return ydata
|
| 661 |
+
|
| 662 |
+
def create_artists(self, legend, orig_handle,
|
| 663 |
+
xdescent, ydescent, width, height, fontsize,
|
| 664 |
+
trans):
|
| 665 |
+
# docstring inherited
|
| 666 |
+
markerline, stemlines, baseline = orig_handle
|
| 667 |
+
# Check to see if the stemcontainer is storing lines as a list or a
|
| 668 |
+
# LineCollection. Eventually using a list will be removed, and this
|
| 669 |
+
# logic can also be removed.
|
| 670 |
+
using_linecoll = isinstance(stemlines, mcoll.LineCollection)
|
| 671 |
+
|
| 672 |
+
xdata, xdata_marker = self.get_xdata(legend, xdescent, ydescent,
|
| 673 |
+
width, height, fontsize)
|
| 674 |
+
|
| 675 |
+
ydata = self.get_ydata(legend, xdescent, ydescent,
|
| 676 |
+
width, height, fontsize)
|
| 677 |
+
|
| 678 |
+
if self._bottom is None:
|
| 679 |
+
bottom = 0.
|
| 680 |
+
else:
|
| 681 |
+
bottom = self._bottom
|
| 682 |
+
|
| 683 |
+
leg_markerline = Line2D(xdata_marker, ydata[:len(xdata_marker)])
|
| 684 |
+
self.update_prop(leg_markerline, markerline, legend)
|
| 685 |
+
|
| 686 |
+
leg_stemlines = [Line2D([x, x], [bottom, y])
|
| 687 |
+
for x, y in zip(xdata_marker, ydata)]
|
| 688 |
+
|
| 689 |
+
if using_linecoll:
|
| 690 |
+
# change the function used by update_prop() from the default
|
| 691 |
+
# to one that handles LineCollection
|
| 692 |
+
with cbook._setattr_cm(
|
| 693 |
+
self, _update_prop_func=self._copy_collection_props):
|
| 694 |
+
for line in leg_stemlines:
|
| 695 |
+
self.update_prop(line, stemlines, legend)
|
| 696 |
+
|
| 697 |
+
else:
|
| 698 |
+
for lm, m in zip(leg_stemlines, stemlines):
|
| 699 |
+
self.update_prop(lm, m, legend)
|
| 700 |
+
|
| 701 |
+
leg_baseline = Line2D([np.min(xdata), np.max(xdata)],
|
| 702 |
+
[bottom, bottom])
|
| 703 |
+
self.update_prop(leg_baseline, baseline, legend)
|
| 704 |
+
|
| 705 |
+
artists = [*leg_stemlines, leg_baseline, leg_markerline]
|
| 706 |
+
for artist in artists:
|
| 707 |
+
artist.set_transform(trans)
|
| 708 |
+
return artists
|
| 709 |
+
|
| 710 |
+
def _copy_collection_props(self, legend_handle, orig_handle):
|
| 711 |
+
"""
|
| 712 |
+
Copy properties from the `.LineCollection` *orig_handle* to the
|
| 713 |
+
`.Line2D` *legend_handle*.
|
| 714 |
+
"""
|
| 715 |
+
legend_handle.set_color(orig_handle.get_color()[0])
|
| 716 |
+
legend_handle.set_linestyle(orig_handle.get_linestyle()[0])
|
| 717 |
+
|
| 718 |
+
|
| 719 |
+
class HandlerTuple(HandlerBase):
|
| 720 |
+
"""
|
| 721 |
+
Handler for Tuple.
|
| 722 |
+
"""
|
| 723 |
+
|
| 724 |
+
def __init__(self, ndivide=1, pad=None, **kwargs):
|
| 725 |
+
"""
|
| 726 |
+
Parameters
|
| 727 |
+
----------
|
| 728 |
+
ndivide : int or None, default: 1
|
| 729 |
+
The number of sections to divide the legend area into. If None,
|
| 730 |
+
use the length of the input tuple.
|
| 731 |
+
pad : float, default: :rc:`legend.borderpad`
|
| 732 |
+
Padding in units of fraction of font size.
|
| 733 |
+
**kwargs
|
| 734 |
+
Keyword arguments forwarded to `.HandlerBase`.
|
| 735 |
+
"""
|
| 736 |
+
self._ndivide = ndivide
|
| 737 |
+
self._pad = pad
|
| 738 |
+
super().__init__(**kwargs)
|
| 739 |
+
|
| 740 |
+
def create_artists(self, legend, orig_handle,
|
| 741 |
+
xdescent, ydescent, width, height, fontsize,
|
| 742 |
+
trans):
|
| 743 |
+
# docstring inherited
|
| 744 |
+
handler_map = legend.get_legend_handler_map()
|
| 745 |
+
|
| 746 |
+
if self._ndivide is None:
|
| 747 |
+
ndivide = len(orig_handle)
|
| 748 |
+
else:
|
| 749 |
+
ndivide = self._ndivide
|
| 750 |
+
|
| 751 |
+
if self._pad is None:
|
| 752 |
+
pad = legend.borderpad * fontsize
|
| 753 |
+
else:
|
| 754 |
+
pad = self._pad * fontsize
|
| 755 |
+
|
| 756 |
+
if ndivide > 1:
|
| 757 |
+
width = (width - pad * (ndivide - 1)) / ndivide
|
| 758 |
+
|
| 759 |
+
xds_cycle = cycle(xdescent - (width + pad) * np.arange(ndivide))
|
| 760 |
+
|
| 761 |
+
a_list = []
|
| 762 |
+
for handle1 in orig_handle:
|
| 763 |
+
handler = legend.get_legend_handler(handler_map, handle1)
|
| 764 |
+
_a_list = handler.create_artists(
|
| 765 |
+
legend, handle1,
|
| 766 |
+
next(xds_cycle), ydescent, width, height, fontsize, trans)
|
| 767 |
+
a_list.extend(_a_list)
|
| 768 |
+
|
| 769 |
+
return a_list
|
| 770 |
+
|
| 771 |
+
|
| 772 |
+
class HandlerPolyCollection(HandlerBase):
|
| 773 |
+
"""
|
| 774 |
+
Handler for `.PolyCollection` used in `~.Axes.fill_between` and
|
| 775 |
+
`~.Axes.stackplot`.
|
| 776 |
+
"""
|
| 777 |
+
def _update_prop(self, legend_handle, orig_handle):
|
| 778 |
+
def first_color(colors):
|
| 779 |
+
if colors.size == 0:
|
| 780 |
+
return (0, 0, 0, 0)
|
| 781 |
+
return tuple(colors[0])
|
| 782 |
+
|
| 783 |
+
def get_first(prop_array):
|
| 784 |
+
if len(prop_array):
|
| 785 |
+
return prop_array[0]
|
| 786 |
+
else:
|
| 787 |
+
return None
|
| 788 |
+
|
| 789 |
+
# orig_handle is a PolyCollection and legend_handle is a Patch.
|
| 790 |
+
# Directly set Patch color attributes (must be RGBA tuples).
|
| 791 |
+
legend_handle._facecolor = first_color(orig_handle.get_facecolor())
|
| 792 |
+
legend_handle._edgecolor = first_color(orig_handle.get_edgecolor())
|
| 793 |
+
legend_handle._original_facecolor = orig_handle._original_facecolor
|
| 794 |
+
legend_handle._original_edgecolor = orig_handle._original_edgecolor
|
| 795 |
+
legend_handle._fill = orig_handle.get_fill()
|
| 796 |
+
legend_handle._hatch = orig_handle.get_hatch()
|
| 797 |
+
# Hatch color is anomalous in having no getters and setters.
|
| 798 |
+
legend_handle._hatch_color = orig_handle._hatch_color
|
| 799 |
+
# Setters are fine for the remaining attributes.
|
| 800 |
+
legend_handle.set_linewidth(get_first(orig_handle.get_linewidths()))
|
| 801 |
+
legend_handle.set_linestyle(get_first(orig_handle.get_linestyles()))
|
| 802 |
+
legend_handle.set_transform(get_first(orig_handle.get_transforms()))
|
| 803 |
+
legend_handle.set_figure(orig_handle.get_figure())
|
| 804 |
+
# Alpha is already taken into account by the color attributes.
|
| 805 |
+
|
| 806 |
+
def create_artists(self, legend, orig_handle,
|
| 807 |
+
xdescent, ydescent, width, height, fontsize, trans):
|
| 808 |
+
# docstring inherited
|
| 809 |
+
p = Rectangle(xy=(-xdescent, -ydescent),
|
| 810 |
+
width=width, height=height)
|
| 811 |
+
self.update_prop(p, orig_handle, legend)
|
| 812 |
+
p.set_transform(trans)
|
| 813 |
+
return [p]
|
llava_video/lib/python3.10/site-packages/matplotlib/rcsetup.pyi
ADDED
|
@@ -0,0 +1,159 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from cycler import Cycler
|
| 2 |
+
|
| 3 |
+
from collections.abc import Callable, Iterable
|
| 4 |
+
from typing import Any, Literal, TypeVar
|
| 5 |
+
from matplotlib.typing import ColorType, LineStyleType, MarkEveryType
|
| 6 |
+
|
| 7 |
+
interactive_bk: list[str]
|
| 8 |
+
non_interactive_bk: list[str]
|
| 9 |
+
all_backends: list[str]
|
| 10 |
+
|
| 11 |
+
_T = TypeVar("_T")
|
| 12 |
+
|
| 13 |
+
def _listify_validator(s: Callable[[Any], _T]) -> Callable[[Any], list[_T]]: ...
|
| 14 |
+
|
| 15 |
+
class ValidateInStrings:
|
| 16 |
+
key: str
|
| 17 |
+
ignorecase: bool
|
| 18 |
+
valid: dict[str, str]
|
| 19 |
+
def __init__(
|
| 20 |
+
self,
|
| 21 |
+
key: str,
|
| 22 |
+
valid: Iterable[str],
|
| 23 |
+
ignorecase: bool = ...,
|
| 24 |
+
*,
|
| 25 |
+
_deprecated_since: str | None = ...
|
| 26 |
+
) -> None: ...
|
| 27 |
+
def __call__(self, s: Any) -> str: ...
|
| 28 |
+
|
| 29 |
+
def validate_any(s: Any) -> Any: ...
|
| 30 |
+
def validate_anylist(s: Any) -> list[Any]: ...
|
| 31 |
+
def validate_bool(b: Any) -> bool: ...
|
| 32 |
+
def validate_axisbelow(s: Any) -> bool | Literal["line"]: ...
|
| 33 |
+
def validate_dpi(s: Any) -> Literal["figure"] | float: ...
|
| 34 |
+
def validate_string(s: Any) -> str: ...
|
| 35 |
+
def validate_string_or_None(s: Any) -> str | None: ...
|
| 36 |
+
def validate_stringlist(s: Any) -> list[str]: ...
|
| 37 |
+
def validate_int(s: Any) -> int: ...
|
| 38 |
+
def validate_int_or_None(s: Any) -> int | None: ...
|
| 39 |
+
def validate_float(s: Any) -> float: ...
|
| 40 |
+
def validate_float_or_None(s: Any) -> float | None: ...
|
| 41 |
+
def validate_floatlist(s: Any) -> list[float]: ...
|
| 42 |
+
def _validate_marker(s: Any) -> int | str: ...
|
| 43 |
+
def _validate_markerlist(s: Any) -> list[int | str]: ...
|
| 44 |
+
def validate_fonttype(s: Any) -> int: ...
|
| 45 |
+
|
| 46 |
+
_auto_backend_sentinel: object
|
| 47 |
+
|
| 48 |
+
def validate_backend(s: Any) -> str: ...
|
| 49 |
+
def validate_color_or_inherit(s: Any) -> Literal["inherit"] | ColorType: ...
|
| 50 |
+
def validate_color_or_auto(s: Any) -> ColorType | Literal["auto"]: ...
|
| 51 |
+
def validate_color_for_prop_cycle(s: Any) -> ColorType: ...
|
| 52 |
+
def validate_color(s: Any) -> ColorType: ...
|
| 53 |
+
def validate_colorlist(s: Any) -> list[ColorType]: ...
|
| 54 |
+
def _validate_color_or_linecolor(
|
| 55 |
+
s: Any,
|
| 56 |
+
) -> ColorType | Literal["linecolor", "markerfacecolor", "markeredgecolor"] | None: ...
|
| 57 |
+
def validate_aspect(s: Any) -> Literal["auto", "equal"] | float: ...
|
| 58 |
+
def validate_fontsize_None(
|
| 59 |
+
s: Any,
|
| 60 |
+
) -> Literal[
|
| 61 |
+
"xx-small",
|
| 62 |
+
"x-small",
|
| 63 |
+
"small",
|
| 64 |
+
"medium",
|
| 65 |
+
"large",
|
| 66 |
+
"x-large",
|
| 67 |
+
"xx-large",
|
| 68 |
+
"smaller",
|
| 69 |
+
"larger",
|
| 70 |
+
] | float | None: ...
|
| 71 |
+
def validate_fontsize(
|
| 72 |
+
s: Any,
|
| 73 |
+
) -> Literal[
|
| 74 |
+
"xx-small",
|
| 75 |
+
"x-small",
|
| 76 |
+
"small",
|
| 77 |
+
"medium",
|
| 78 |
+
"large",
|
| 79 |
+
"x-large",
|
| 80 |
+
"xx-large",
|
| 81 |
+
"smaller",
|
| 82 |
+
"larger",
|
| 83 |
+
] | float: ...
|
| 84 |
+
def validate_fontsizelist(
|
| 85 |
+
s: Any,
|
| 86 |
+
) -> list[
|
| 87 |
+
Literal[
|
| 88 |
+
"xx-small",
|
| 89 |
+
"x-small",
|
| 90 |
+
"small",
|
| 91 |
+
"medium",
|
| 92 |
+
"large",
|
| 93 |
+
"x-large",
|
| 94 |
+
"xx-large",
|
| 95 |
+
"smaller",
|
| 96 |
+
"larger",
|
| 97 |
+
]
|
| 98 |
+
| float
|
| 99 |
+
]: ...
|
| 100 |
+
def validate_fontweight(
|
| 101 |
+
s: Any,
|
| 102 |
+
) -> Literal[
|
| 103 |
+
"ultralight",
|
| 104 |
+
"light",
|
| 105 |
+
"normal",
|
| 106 |
+
"regular",
|
| 107 |
+
"book",
|
| 108 |
+
"medium",
|
| 109 |
+
"roman",
|
| 110 |
+
"semibold",
|
| 111 |
+
"demibold",
|
| 112 |
+
"demi",
|
| 113 |
+
"bold",
|
| 114 |
+
"heavy",
|
| 115 |
+
"extra bold",
|
| 116 |
+
"black",
|
| 117 |
+
] | int: ...
|
| 118 |
+
def validate_fontstretch(
|
| 119 |
+
s: Any,
|
| 120 |
+
) -> Literal[
|
| 121 |
+
"ultra-condensed",
|
| 122 |
+
"extra-condensed",
|
| 123 |
+
"condensed",
|
| 124 |
+
"semi-condensed",
|
| 125 |
+
"normal",
|
| 126 |
+
"semi-expanded",
|
| 127 |
+
"expanded",
|
| 128 |
+
"extra-expanded",
|
| 129 |
+
"ultra-expanded",
|
| 130 |
+
] | int: ...
|
| 131 |
+
def validate_font_properties(s: Any) -> dict[str, Any]: ...
|
| 132 |
+
def validate_whiskers(s: Any) -> list[float] | float: ...
|
| 133 |
+
def validate_ps_distiller(s: Any) -> None | Literal["ghostscript", "xpdf"]: ...
|
| 134 |
+
|
| 135 |
+
validate_fillstyle: ValidateInStrings
|
| 136 |
+
|
| 137 |
+
def validate_fillstylelist(
|
| 138 |
+
s: Any,
|
| 139 |
+
) -> list[Literal["full", "left", "right", "bottom", "top", "none"]]: ...
|
| 140 |
+
def validate_markevery(s: Any) -> MarkEveryType: ...
|
| 141 |
+
def _validate_linestyle(s: Any) -> LineStyleType: ...
|
| 142 |
+
def validate_markeverylist(s: Any) -> list[MarkEveryType]: ...
|
| 143 |
+
def validate_bbox(s: Any) -> Literal["tight", "standard"] | None: ...
|
| 144 |
+
def validate_sketch(s: Any) -> None | tuple[float, float, float]: ...
|
| 145 |
+
def validate_hatch(s: Any) -> str: ...
|
| 146 |
+
def validate_hatchlist(s: Any) -> list[str]: ...
|
| 147 |
+
def validate_dashlist(s: Any) -> list[list[float]]: ...
|
| 148 |
+
|
| 149 |
+
# TODO: copy cycler overloads?
|
| 150 |
+
def cycler(*args, **kwargs) -> Cycler: ...
|
| 151 |
+
def validate_cycler(s: Any) -> Cycler: ...
|
| 152 |
+
def validate_hist_bins(
|
| 153 |
+
s: Any,
|
| 154 |
+
) -> Literal["auto", "sturges", "fd", "doane", "scott", "rice", "sqrt"] | int | list[
|
| 155 |
+
float
|
| 156 |
+
]: ...
|
| 157 |
+
|
| 158 |
+
# At runtime is added in __init__.py
|
| 159 |
+
defaultParams: dict[str, Any]
|
llava_video/lib/python3.10/site-packages/matplotlib/widgets.pyi
ADDED
|
@@ -0,0 +1,488 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .artist import Artist
|
| 2 |
+
from .axes import Axes
|
| 3 |
+
from .backend_bases import FigureCanvasBase, Event, MouseEvent, MouseButton
|
| 4 |
+
from .collections import LineCollection
|
| 5 |
+
from .figure import Figure
|
| 6 |
+
from .lines import Line2D
|
| 7 |
+
from .patches import Polygon, Rectangle
|
| 8 |
+
from .text import Text
|
| 9 |
+
|
| 10 |
+
import PIL.Image
|
| 11 |
+
|
| 12 |
+
from collections.abc import Callable, Collection, Iterable, Sequence
|
| 13 |
+
from typing import Any, Literal
|
| 14 |
+
from numpy.typing import ArrayLike
|
| 15 |
+
from .typing import ColorType
|
| 16 |
+
import numpy as np
|
| 17 |
+
|
| 18 |
+
class LockDraw:
|
| 19 |
+
def __init__(self) -> None: ...
|
| 20 |
+
def __call__(self, o: Any) -> None: ...
|
| 21 |
+
def release(self, o: Any) -> None: ...
|
| 22 |
+
def available(self, o: Any) -> bool: ...
|
| 23 |
+
def isowner(self, o: Any) -> bool: ...
|
| 24 |
+
def locked(self) -> bool: ...
|
| 25 |
+
|
| 26 |
+
class Widget:
|
| 27 |
+
drawon: bool
|
| 28 |
+
eventson: bool
|
| 29 |
+
active: bool
|
| 30 |
+
def set_active(self, active: bool) -> None: ...
|
| 31 |
+
def get_active(self) -> None: ...
|
| 32 |
+
def ignore(self, event) -> bool: ...
|
| 33 |
+
|
| 34 |
+
class AxesWidget(Widget):
|
| 35 |
+
ax: Axes
|
| 36 |
+
def __init__(self, ax: Axes) -> None: ...
|
| 37 |
+
@property
|
| 38 |
+
def canvas(self) -> FigureCanvasBase | None: ...
|
| 39 |
+
def connect_event(self, event: Event, callback: Callable) -> None: ...
|
| 40 |
+
def disconnect_events(self) -> None: ...
|
| 41 |
+
|
| 42 |
+
class Button(AxesWidget):
|
| 43 |
+
label: Text
|
| 44 |
+
color: ColorType
|
| 45 |
+
hovercolor: ColorType
|
| 46 |
+
def __init__(
|
| 47 |
+
self,
|
| 48 |
+
ax: Axes,
|
| 49 |
+
label: str,
|
| 50 |
+
image: ArrayLike | PIL.Image.Image | None = ...,
|
| 51 |
+
color: ColorType = ...,
|
| 52 |
+
hovercolor: ColorType = ...,
|
| 53 |
+
*,
|
| 54 |
+
useblit: bool = ...
|
| 55 |
+
) -> None: ...
|
| 56 |
+
def on_clicked(self, func: Callable[[Event], Any]) -> int: ...
|
| 57 |
+
def disconnect(self, cid: int) -> None: ...
|
| 58 |
+
|
| 59 |
+
class SliderBase(AxesWidget):
|
| 60 |
+
orientation: Literal["horizontal", "vertical"]
|
| 61 |
+
closedmin: bool
|
| 62 |
+
closedmax: bool
|
| 63 |
+
valmin: float
|
| 64 |
+
valmax: float
|
| 65 |
+
valstep: float | ArrayLike | None
|
| 66 |
+
drag_active: bool
|
| 67 |
+
valfmt: str
|
| 68 |
+
def __init__(
|
| 69 |
+
self,
|
| 70 |
+
ax: Axes,
|
| 71 |
+
orientation: Literal["horizontal", "vertical"],
|
| 72 |
+
closedmin: bool,
|
| 73 |
+
closedmax: bool,
|
| 74 |
+
valmin: float,
|
| 75 |
+
valmax: float,
|
| 76 |
+
valfmt: str,
|
| 77 |
+
dragging: Slider | None,
|
| 78 |
+
valstep: float | ArrayLike | None,
|
| 79 |
+
) -> None: ...
|
| 80 |
+
def disconnect(self, cid: int) -> None: ...
|
| 81 |
+
def reset(self) -> None: ...
|
| 82 |
+
|
| 83 |
+
class Slider(SliderBase):
|
| 84 |
+
slidermin: Slider | None
|
| 85 |
+
slidermax: Slider | None
|
| 86 |
+
val: float
|
| 87 |
+
valinit: float
|
| 88 |
+
track: Rectangle
|
| 89 |
+
poly: Polygon
|
| 90 |
+
hline: Line2D
|
| 91 |
+
vline: Line2D
|
| 92 |
+
label: Text
|
| 93 |
+
valtext: Text
|
| 94 |
+
def __init__(
|
| 95 |
+
self,
|
| 96 |
+
ax: Axes,
|
| 97 |
+
label: str,
|
| 98 |
+
valmin: float,
|
| 99 |
+
valmax: float,
|
| 100 |
+
*,
|
| 101 |
+
valinit: float = ...,
|
| 102 |
+
valfmt: str | None = ...,
|
| 103 |
+
closedmin: bool = ...,
|
| 104 |
+
closedmax: bool = ...,
|
| 105 |
+
slidermin: Slider | None = ...,
|
| 106 |
+
slidermax: Slider | None = ...,
|
| 107 |
+
dragging: bool = ...,
|
| 108 |
+
valstep: float | ArrayLike | None = ...,
|
| 109 |
+
orientation: Literal["horizontal", "vertical"] = ...,
|
| 110 |
+
initcolor: ColorType = ...,
|
| 111 |
+
track_color: ColorType = ...,
|
| 112 |
+
handle_style: dict[str, Any] | None = ...,
|
| 113 |
+
**kwargs
|
| 114 |
+
) -> None: ...
|
| 115 |
+
def set_val(self, val: float) -> None: ...
|
| 116 |
+
def on_changed(self, func: Callable[[float], Any]) -> int: ...
|
| 117 |
+
|
| 118 |
+
class RangeSlider(SliderBase):
|
| 119 |
+
val: tuple[float, float]
|
| 120 |
+
valinit: tuple[float, float]
|
| 121 |
+
track: Rectangle
|
| 122 |
+
poly: Polygon
|
| 123 |
+
label: Text
|
| 124 |
+
valtext: Text
|
| 125 |
+
def __init__(
|
| 126 |
+
self,
|
| 127 |
+
ax: Axes,
|
| 128 |
+
label: str,
|
| 129 |
+
valmin: float,
|
| 130 |
+
valmax: float,
|
| 131 |
+
*,
|
| 132 |
+
valinit: tuple[float, float] | None = ...,
|
| 133 |
+
valfmt: str | None = ...,
|
| 134 |
+
closedmin: bool = ...,
|
| 135 |
+
closedmax: bool = ...,
|
| 136 |
+
dragging: bool = ...,
|
| 137 |
+
valstep: float | ArrayLike | None = ...,
|
| 138 |
+
orientation: Literal["horizontal", "vertical"] = ...,
|
| 139 |
+
track_color: ColorType = ...,
|
| 140 |
+
handle_style: dict[str, Any] | None = ...,
|
| 141 |
+
**kwargs
|
| 142 |
+
) -> None: ...
|
| 143 |
+
def set_min(self, min: float) -> None: ...
|
| 144 |
+
def set_max(self, max: float) -> None: ...
|
| 145 |
+
def set_val(self, val: ArrayLike) -> None: ...
|
| 146 |
+
def on_changed(self, func: Callable[[tuple[float, float]], Any]) -> int: ...
|
| 147 |
+
|
| 148 |
+
class CheckButtons(AxesWidget):
|
| 149 |
+
labels: list[Text]
|
| 150 |
+
def __init__(
|
| 151 |
+
self,
|
| 152 |
+
ax: Axes,
|
| 153 |
+
labels: Sequence[str],
|
| 154 |
+
actives: Iterable[bool] | None = ...,
|
| 155 |
+
*,
|
| 156 |
+
useblit: bool = ...,
|
| 157 |
+
label_props: dict[str, Any] | None = ...,
|
| 158 |
+
frame_props: dict[str, Any] | None = ...,
|
| 159 |
+
check_props: dict[str, Any] | None = ...,
|
| 160 |
+
) -> None: ...
|
| 161 |
+
def set_label_props(self, props: dict[str, Any]) -> None: ...
|
| 162 |
+
def set_frame_props(self, props: dict[str, Any]) -> None: ...
|
| 163 |
+
def set_check_props(self, props: dict[str, Any]) -> None: ...
|
| 164 |
+
def set_active(self, index: int, state: bool | None = ...) -> None: ... # type: ignore[override]
|
| 165 |
+
def clear(self) -> None: ...
|
| 166 |
+
def get_status(self) -> list[bool]: ...
|
| 167 |
+
def get_checked_labels(self) -> list[str]: ...
|
| 168 |
+
def on_clicked(self, func: Callable[[str | None], Any]) -> int: ...
|
| 169 |
+
def disconnect(self, cid: int) -> None: ...
|
| 170 |
+
|
| 171 |
+
class TextBox(AxesWidget):
|
| 172 |
+
label: Text
|
| 173 |
+
text_disp: Text
|
| 174 |
+
cursor_index: int
|
| 175 |
+
cursor: LineCollection
|
| 176 |
+
color: ColorType
|
| 177 |
+
hovercolor: ColorType
|
| 178 |
+
capturekeystrokes: bool
|
| 179 |
+
def __init__(
|
| 180 |
+
self,
|
| 181 |
+
ax: Axes,
|
| 182 |
+
label: str,
|
| 183 |
+
initial: str = ...,
|
| 184 |
+
*,
|
| 185 |
+
color: ColorType = ...,
|
| 186 |
+
hovercolor: ColorType = ...,
|
| 187 |
+
label_pad: float = ...,
|
| 188 |
+
textalignment: Literal["left", "center", "right"] = ...,
|
| 189 |
+
) -> None: ...
|
| 190 |
+
@property
|
| 191 |
+
def text(self) -> str: ...
|
| 192 |
+
def set_val(self, val: str) -> None: ...
|
| 193 |
+
def begin_typing(self) -> None: ...
|
| 194 |
+
def stop_typing(self) -> None: ...
|
| 195 |
+
def on_text_change(self, func: Callable[[str], Any]) -> int: ...
|
| 196 |
+
def on_submit(self, func: Callable[[str], Any]) -> int: ...
|
| 197 |
+
def disconnect(self, cid: int) -> None: ...
|
| 198 |
+
|
| 199 |
+
class RadioButtons(AxesWidget):
|
| 200 |
+
activecolor: ColorType
|
| 201 |
+
value_selected: str
|
| 202 |
+
labels: list[Text]
|
| 203 |
+
def __init__(
|
| 204 |
+
self,
|
| 205 |
+
ax: Axes,
|
| 206 |
+
labels: Iterable[str],
|
| 207 |
+
active: int = ...,
|
| 208 |
+
activecolor: ColorType | None = ...,
|
| 209 |
+
*,
|
| 210 |
+
useblit: bool = ...,
|
| 211 |
+
label_props: dict[str, Any] | Sequence[dict[str, Any]] | None = ...,
|
| 212 |
+
radio_props: dict[str, Any] | None = ...,
|
| 213 |
+
) -> None: ...
|
| 214 |
+
def set_label_props(self, props: dict[str, Any]) -> None: ...
|
| 215 |
+
def set_radio_props(self, props: dict[str, Any]) -> None: ...
|
| 216 |
+
def set_active(self, index: int) -> None: ...
|
| 217 |
+
def clear(self) -> None: ...
|
| 218 |
+
def on_clicked(self, func: Callable[[str | None], Any]) -> int: ...
|
| 219 |
+
def disconnect(self, cid: int) -> None: ...
|
| 220 |
+
|
| 221 |
+
class SubplotTool(Widget):
|
| 222 |
+
figure: Figure
|
| 223 |
+
targetfig: Figure
|
| 224 |
+
buttonreset: Button
|
| 225 |
+
def __init__(self, targetfig: Figure, toolfig: Figure) -> None: ...
|
| 226 |
+
|
| 227 |
+
class Cursor(AxesWidget):
|
| 228 |
+
visible: bool
|
| 229 |
+
horizOn: bool
|
| 230 |
+
vertOn: bool
|
| 231 |
+
useblit: bool
|
| 232 |
+
lineh: Line2D
|
| 233 |
+
linev: Line2D
|
| 234 |
+
background: Any
|
| 235 |
+
needclear: bool
|
| 236 |
+
def __init__(
|
| 237 |
+
self,
|
| 238 |
+
ax: Axes,
|
| 239 |
+
*,
|
| 240 |
+
horizOn: bool = ...,
|
| 241 |
+
vertOn: bool = ...,
|
| 242 |
+
useblit: bool = ...,
|
| 243 |
+
**lineprops
|
| 244 |
+
) -> None: ...
|
| 245 |
+
def clear(self, event: Event) -> None: ...
|
| 246 |
+
def onmove(self, event: Event) -> None: ...
|
| 247 |
+
|
| 248 |
+
class MultiCursor(Widget):
|
| 249 |
+
axes: Sequence[Axes]
|
| 250 |
+
horizOn: bool
|
| 251 |
+
vertOn: bool
|
| 252 |
+
visible: bool
|
| 253 |
+
useblit: bool
|
| 254 |
+
vlines: list[Line2D]
|
| 255 |
+
hlines: list[Line2D]
|
| 256 |
+
def __init__(
|
| 257 |
+
self,
|
| 258 |
+
canvas: Any,
|
| 259 |
+
axes: Sequence[Axes],
|
| 260 |
+
*,
|
| 261 |
+
useblit: bool = ...,
|
| 262 |
+
horizOn: bool = ...,
|
| 263 |
+
vertOn: bool = ...,
|
| 264 |
+
**lineprops
|
| 265 |
+
) -> None: ...
|
| 266 |
+
def connect(self) -> None: ...
|
| 267 |
+
def disconnect(self) -> None: ...
|
| 268 |
+
def clear(self, event: Event) -> None: ...
|
| 269 |
+
def onmove(self, event: Event) -> None: ...
|
| 270 |
+
|
| 271 |
+
class _SelectorWidget(AxesWidget):
|
| 272 |
+
onselect: Callable[[float, float], Any]
|
| 273 |
+
useblit: bool
|
| 274 |
+
background: Any
|
| 275 |
+
validButtons: list[MouseButton]
|
| 276 |
+
def __init__(
|
| 277 |
+
self,
|
| 278 |
+
ax: Axes,
|
| 279 |
+
onselect: Callable[[float, float], Any] | None = ...,
|
| 280 |
+
useblit: bool = ...,
|
| 281 |
+
button: MouseButton | Collection[MouseButton] | None = ...,
|
| 282 |
+
state_modifier_keys: dict[str, str] | None = ...,
|
| 283 |
+
use_data_coordinates: bool = ...,
|
| 284 |
+
) -> None: ...
|
| 285 |
+
def update_background(self, event: Event) -> None: ...
|
| 286 |
+
def connect_default_events(self) -> None: ...
|
| 287 |
+
def ignore(self, event: Event) -> bool: ...
|
| 288 |
+
def update(self) -> None: ...
|
| 289 |
+
def press(self, event: Event) -> bool: ...
|
| 290 |
+
def release(self, event: Event) -> bool: ...
|
| 291 |
+
def onmove(self, event: Event) -> bool: ...
|
| 292 |
+
def on_scroll(self, event: Event) -> None: ...
|
| 293 |
+
def on_key_press(self, event: Event) -> None: ...
|
| 294 |
+
def on_key_release(self, event: Event) -> None: ...
|
| 295 |
+
def set_visible(self, visible: bool) -> None: ...
|
| 296 |
+
def get_visible(self) -> bool: ...
|
| 297 |
+
def clear(self) -> None: ...
|
| 298 |
+
@property
|
| 299 |
+
def artists(self) -> tuple[Artist]: ...
|
| 300 |
+
def set_props(self, **props) -> None: ...
|
| 301 |
+
def set_handle_props(self, **handle_props) -> None: ...
|
| 302 |
+
def add_state(self, state: str) -> None: ...
|
| 303 |
+
def remove_state(self, state: str) -> None: ...
|
| 304 |
+
|
| 305 |
+
class SpanSelector(_SelectorWidget):
|
| 306 |
+
snap_values: ArrayLike | None
|
| 307 |
+
onmove_callback: Callable[[float, float], Any]
|
| 308 |
+
minspan: float
|
| 309 |
+
grab_range: float
|
| 310 |
+
drag_from_anywhere: bool
|
| 311 |
+
ignore_event_outside: bool
|
| 312 |
+
def __init__(
|
| 313 |
+
self,
|
| 314 |
+
ax: Axes,
|
| 315 |
+
onselect: Callable[[float, float], Any],
|
| 316 |
+
direction: Literal["horizontal", "vertical"],
|
| 317 |
+
*,
|
| 318 |
+
minspan: float = ...,
|
| 319 |
+
useblit: bool = ...,
|
| 320 |
+
props: dict[str, Any] | None = ...,
|
| 321 |
+
onmove_callback: Callable[[float, float], Any] | None = ...,
|
| 322 |
+
interactive: bool = ...,
|
| 323 |
+
button: MouseButton | Collection[MouseButton] | None = ...,
|
| 324 |
+
handle_props: dict[str, Any] | None = ...,
|
| 325 |
+
grab_range: float = ...,
|
| 326 |
+
state_modifier_keys: dict[str, str] | None = ...,
|
| 327 |
+
drag_from_anywhere: bool = ...,
|
| 328 |
+
ignore_event_outside: bool = ...,
|
| 329 |
+
snap_values: ArrayLike | None = ...,
|
| 330 |
+
) -> None: ...
|
| 331 |
+
def new_axes(
|
| 332 |
+
self,
|
| 333 |
+
ax: Axes,
|
| 334 |
+
*,
|
| 335 |
+
_props: dict[str, Any] | None = ...,
|
| 336 |
+
_init: bool = ...,
|
| 337 |
+
) -> None: ...
|
| 338 |
+
def connect_default_events(self) -> None: ...
|
| 339 |
+
@property
|
| 340 |
+
def direction(self) -> Literal["horizontal", "vertical"]: ...
|
| 341 |
+
@direction.setter
|
| 342 |
+
def direction(self, direction: Literal["horizontal", "vertical"]) -> None: ...
|
| 343 |
+
@property
|
| 344 |
+
def extents(self) -> tuple[float, float]: ...
|
| 345 |
+
@extents.setter
|
| 346 |
+
def extents(self, extents: tuple[float, float]) -> None: ...
|
| 347 |
+
|
| 348 |
+
class ToolLineHandles:
|
| 349 |
+
ax: Axes
|
| 350 |
+
def __init__(
|
| 351 |
+
self,
|
| 352 |
+
ax: Axes,
|
| 353 |
+
positions: ArrayLike,
|
| 354 |
+
direction: Literal["horizontal", "vertical"],
|
| 355 |
+
*,
|
| 356 |
+
line_props: dict[str, Any] | None = ...,
|
| 357 |
+
useblit: bool = ...,
|
| 358 |
+
) -> None: ...
|
| 359 |
+
@property
|
| 360 |
+
def artists(self) -> tuple[Line2D]: ...
|
| 361 |
+
@property
|
| 362 |
+
def positions(self) -> list[float]: ...
|
| 363 |
+
@property
|
| 364 |
+
def direction(self) -> Literal["horizontal", "vertical"]: ...
|
| 365 |
+
def set_data(self, positions: ArrayLike) -> None: ...
|
| 366 |
+
def set_visible(self, value: bool) -> None: ...
|
| 367 |
+
def set_animated(self, value: bool) -> None: ...
|
| 368 |
+
def remove(self) -> None: ...
|
| 369 |
+
def closest(self, x: float, y: float) -> tuple[int, float]: ...
|
| 370 |
+
|
| 371 |
+
class ToolHandles:
|
| 372 |
+
ax: Axes
|
| 373 |
+
def __init__(
|
| 374 |
+
self,
|
| 375 |
+
ax: Axes,
|
| 376 |
+
x: ArrayLike,
|
| 377 |
+
y: ArrayLike,
|
| 378 |
+
*,
|
| 379 |
+
marker: str = ...,
|
| 380 |
+
marker_props: dict[str, Any] | None = ...,
|
| 381 |
+
useblit: bool = ...,
|
| 382 |
+
) -> None: ...
|
| 383 |
+
@property
|
| 384 |
+
def x(self) -> ArrayLike: ...
|
| 385 |
+
@property
|
| 386 |
+
def y(self) -> ArrayLike: ...
|
| 387 |
+
@property
|
| 388 |
+
def artists(self) -> tuple[Line2D]: ...
|
| 389 |
+
def set_data(self, pts: ArrayLike, y: ArrayLike | None = ...) -> None: ...
|
| 390 |
+
def set_visible(self, val: bool) -> None: ...
|
| 391 |
+
def set_animated(self, val: bool) -> None: ...
|
| 392 |
+
def closest(self, x: float, y: float) -> tuple[int, float]: ...
|
| 393 |
+
|
| 394 |
+
class RectangleSelector(_SelectorWidget):
|
| 395 |
+
drag_from_anywhere: bool
|
| 396 |
+
ignore_event_outside: bool
|
| 397 |
+
minspanx: float
|
| 398 |
+
minspany: float
|
| 399 |
+
spancoords: Literal["data", "pixels"]
|
| 400 |
+
grab_range: float
|
| 401 |
+
def __init__(
|
| 402 |
+
self,
|
| 403 |
+
ax: Axes,
|
| 404 |
+
onselect: Callable[[MouseEvent, MouseEvent], Any] | None = ...,
|
| 405 |
+
*,
|
| 406 |
+
minspanx: float = ...,
|
| 407 |
+
minspany: float = ...,
|
| 408 |
+
useblit: bool = ...,
|
| 409 |
+
props: dict[str, Any] | None = ...,
|
| 410 |
+
spancoords: Literal["data", "pixels"] = ...,
|
| 411 |
+
button: MouseButton | Collection[MouseButton] | None = ...,
|
| 412 |
+
grab_range: float = ...,
|
| 413 |
+
handle_props: dict[str, Any] | None = ...,
|
| 414 |
+
interactive: bool = ...,
|
| 415 |
+
state_modifier_keys: dict[str, str] | None = ...,
|
| 416 |
+
drag_from_anywhere: bool = ...,
|
| 417 |
+
ignore_event_outside: bool = ...,
|
| 418 |
+
use_data_coordinates: bool = ...,
|
| 419 |
+
) -> None: ...
|
| 420 |
+
@property
|
| 421 |
+
def corners(self) -> tuple[np.ndarray, np.ndarray]: ...
|
| 422 |
+
@property
|
| 423 |
+
def edge_centers(self) -> tuple[np.ndarray, np.ndarray]: ...
|
| 424 |
+
@property
|
| 425 |
+
def center(self) -> tuple[float, float]: ...
|
| 426 |
+
@property
|
| 427 |
+
def extents(self) -> tuple[float, float, float, float]: ...
|
| 428 |
+
@extents.setter
|
| 429 |
+
def extents(self, extents: tuple[float, float, float, float]) -> None: ...
|
| 430 |
+
@property
|
| 431 |
+
def rotation(self) -> float: ...
|
| 432 |
+
@rotation.setter
|
| 433 |
+
def rotation(self, value: float) -> None: ...
|
| 434 |
+
@property
|
| 435 |
+
def geometry(self) -> np.ndarray: ...
|
| 436 |
+
|
| 437 |
+
class EllipseSelector(RectangleSelector): ...
|
| 438 |
+
|
| 439 |
+
class LassoSelector(_SelectorWidget):
|
| 440 |
+
verts: None | list[tuple[float, float]]
|
| 441 |
+
def __init__(
|
| 442 |
+
self,
|
| 443 |
+
ax: Axes,
|
| 444 |
+
onselect: Callable[[list[tuple[float, float]]], Any] | None = ...,
|
| 445 |
+
*,
|
| 446 |
+
useblit: bool = ...,
|
| 447 |
+
props: dict[str, Any] | None = ...,
|
| 448 |
+
button: MouseButton | Collection[MouseButton] | None = ...,
|
| 449 |
+
) -> None: ...
|
| 450 |
+
|
| 451 |
+
class PolygonSelector(_SelectorWidget):
|
| 452 |
+
grab_range: float
|
| 453 |
+
def __init__(
|
| 454 |
+
self,
|
| 455 |
+
ax: Axes,
|
| 456 |
+
onselect: Callable[[ArrayLike, ArrayLike], Any] | None = ...,
|
| 457 |
+
*,
|
| 458 |
+
useblit: bool = ...,
|
| 459 |
+
props: dict[str, Any] | None = ...,
|
| 460 |
+
handle_props: dict[str, Any] | None = ...,
|
| 461 |
+
grab_range: float = ...,
|
| 462 |
+
draw_bounding_box: bool = ...,
|
| 463 |
+
box_handle_props: dict[str, Any] | None = ...,
|
| 464 |
+
box_props: dict[str, Any] | None = ...
|
| 465 |
+
) -> None: ...
|
| 466 |
+
def onmove(self, event: Event) -> bool: ...
|
| 467 |
+
@property
|
| 468 |
+
def verts(self) -> list[tuple[float, float]]: ...
|
| 469 |
+
@verts.setter
|
| 470 |
+
def verts(self, xys: Sequence[tuple[float, float]]) -> None: ...
|
| 471 |
+
|
| 472 |
+
class Lasso(AxesWidget):
|
| 473 |
+
useblit: bool
|
| 474 |
+
background: Any
|
| 475 |
+
verts: list[tuple[float, float]] | None
|
| 476 |
+
line: Line2D
|
| 477 |
+
callback: Callable[[list[tuple[float, float]]], Any]
|
| 478 |
+
def __init__(
|
| 479 |
+
self,
|
| 480 |
+
ax: Axes,
|
| 481 |
+
xy: tuple[float, float],
|
| 482 |
+
callback: Callable[[list[tuple[float, float]]], Any],
|
| 483 |
+
*,
|
| 484 |
+
useblit: bool = ...,
|
| 485 |
+
props: dict[str, Any] | None = ...,
|
| 486 |
+
) -> None: ...
|
| 487 |
+
def onrelease(self, event: Event) -> None: ...
|
| 488 |
+
def onmove(self, event: Event) -> None: ...
|
pllava/lib/python3.10/site-packages/torch/include/ATen/ops/_amp_update_scale_meta_dispatch.h
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace meta {
|
| 19 |
+
|
| 20 |
+
TORCH_API at::Tensor & _amp_update_scale_(at::Tensor & self, at::Tensor & growth_tracker, const at::Tensor & found_inf, double scale_growth_factor, double scale_backoff_factor, int64_t growth_interval);
|
| 21 |
+
|
| 22 |
+
} // namespace meta
|
| 23 |
+
} // namespace at
|
pllava/lib/python3.10/site-packages/torch/include/ATen/ops/_batch_norm_no_update_ops.h
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Operator.h
|
| 4 |
+
|
| 5 |
+
#include <tuple>
|
| 6 |
+
#include <vector>
|
| 7 |
+
|
| 8 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 9 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 10 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 11 |
+
#include <ATen/core/ATen_fwd.h>
|
| 12 |
+
|
| 13 |
+
namespace at {
|
| 14 |
+
namespace _ops {
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
struct TORCH_API _batch_norm_no_update {
|
| 18 |
+
using schema = ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> (const at::Tensor &, const ::std::optional<at::Tensor> &, const ::std::optional<at::Tensor> &, const ::std::optional<at::Tensor> &, const ::std::optional<at::Tensor> &, double, double);
|
| 19 |
+
using ptr_schema = schema*;
|
| 20 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 21 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_batch_norm_no_update")
|
| 22 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
|
| 23 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_batch_norm_no_update(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, float momentum, float eps) -> (Tensor, Tensor, Tensor, Tensor)")
|
| 24 |
+
static ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> call(const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, const ::std::optional<at::Tensor> & running_mean, const ::std::optional<at::Tensor> & running_var, double momentum, double eps);
|
| 25 |
+
static ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, const ::std::optional<at::Tensor> & running_mean, const ::std::optional<at::Tensor> & running_var, double momentum, double eps);
|
| 26 |
+
};
|
| 27 |
+
|
| 28 |
+
struct TORCH_API _batch_norm_no_update_out {
|
| 29 |
+
using schema = ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> (const at::Tensor &, const ::std::optional<at::Tensor> &, const ::std::optional<at::Tensor> &, const ::std::optional<at::Tensor> &, const ::std::optional<at::Tensor> &, double, double, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &);
|
| 30 |
+
using ptr_schema = schema*;
|
| 31 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 32 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_batch_norm_no_update")
|
| 33 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out")
|
| 34 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_batch_norm_no_update.out(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, float momentum, float eps, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!))")
|
| 35 |
+
static ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> call(const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, const ::std::optional<at::Tensor> & running_mean, const ::std::optional<at::Tensor> & running_var, double momentum, double eps, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3);
|
| 36 |
+
static ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, const ::std::optional<at::Tensor> & running_mean, const ::std::optional<at::Tensor> & running_var, double momentum, double eps, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3);
|
| 37 |
+
};
|
| 38 |
+
|
| 39 |
+
}} // namespace at::_ops
|
pllava/lib/python3.10/site-packages/torch/include/ATen/ops/_cdist_backward.h
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Function.h
|
| 4 |
+
|
| 5 |
+
#include <ATen/Context.h>
|
| 6 |
+
#include <ATen/DeviceGuard.h>
|
| 7 |
+
#include <ATen/TensorUtils.h>
|
| 8 |
+
#include <ATen/TracerMode.h>
|
| 9 |
+
#include <ATen/core/Generator.h>
|
| 10 |
+
#include <ATen/core/Reduction.h>
|
| 11 |
+
#include <ATen/core/Tensor.h>
|
| 12 |
+
#include <c10/core/Scalar.h>
|
| 13 |
+
#include <c10/core/Storage.h>
|
| 14 |
+
#include <c10/core/TensorOptions.h>
|
| 15 |
+
#include <c10/util/Deprecated.h>
|
| 16 |
+
#include <optional>
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
#include <ATen/ops/_cdist_backward_ops.h>
|
| 21 |
+
|
| 22 |
+
namespace at {
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
// aten::_cdist_backward(Tensor grad, Tensor x1, Tensor x2, float p, Tensor cdist) -> Tensor
|
| 26 |
+
inline at::Tensor _cdist_backward(const at::Tensor & grad, const at::Tensor & x1, const at::Tensor & x2, double p, const at::Tensor & cdist) {
|
| 27 |
+
return at::_ops::_cdist_backward::call(grad, x1, x2, p, cdist);
|
| 28 |
+
}
|
| 29 |
+
|
| 30 |
+
// aten::_cdist_backward.out(Tensor grad, Tensor x1, Tensor x2, float p, Tensor cdist, *, Tensor(a!) out) -> Tensor(a!)
|
| 31 |
+
inline at::Tensor & _cdist_backward_out(at::Tensor & out, const at::Tensor & grad, const at::Tensor & x1, const at::Tensor & x2, double p, const at::Tensor & cdist) {
|
| 32 |
+
return at::_ops::_cdist_backward_out::call(grad, x1, x2, p, cdist, out);
|
| 33 |
+
}
|
| 34 |
+
// aten::_cdist_backward.out(Tensor grad, Tensor x1, Tensor x2, float p, Tensor cdist, *, Tensor(a!) out) -> Tensor(a!)
|
| 35 |
+
inline at::Tensor & _cdist_backward_outf(const at::Tensor & grad, const at::Tensor & x1, const at::Tensor & x2, double p, const at::Tensor & cdist, at::Tensor & out) {
|
| 36 |
+
return at::_ops::_cdist_backward_out::call(grad, x1, x2, p, cdist, out);
|
| 37 |
+
}
|
| 38 |
+
|
| 39 |
+
}
|
pllava/lib/python3.10/site-packages/torch/include/ATen/ops/_conj_copy.h
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Function.h
|
| 4 |
+
|
| 5 |
+
#include <ATen/Context.h>
|
| 6 |
+
#include <ATen/DeviceGuard.h>
|
| 7 |
+
#include <ATen/TensorUtils.h>
|
| 8 |
+
#include <ATen/TracerMode.h>
|
| 9 |
+
#include <ATen/core/Generator.h>
|
| 10 |
+
#include <ATen/core/Reduction.h>
|
| 11 |
+
#include <ATen/core/Tensor.h>
|
| 12 |
+
#include <c10/core/Scalar.h>
|
| 13 |
+
#include <c10/core/Storage.h>
|
| 14 |
+
#include <c10/core/TensorOptions.h>
|
| 15 |
+
#include <c10/util/Deprecated.h>
|
| 16 |
+
#include <optional>
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
#include <ATen/ops/_conj_copy_ops.h>
|
| 21 |
+
|
| 22 |
+
namespace at {
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
// aten::_conj_copy(Tensor self) -> Tensor
|
| 26 |
+
inline at::Tensor _conj_copy(const at::Tensor & self) {
|
| 27 |
+
return at::_ops::_conj_copy::call(self);
|
| 28 |
+
}
|
| 29 |
+
|
| 30 |
+
// aten::_conj_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
|
| 31 |
+
inline at::Tensor & _conj_copy_out(at::Tensor & out, const at::Tensor & self) {
|
| 32 |
+
return at::_ops::_conj_copy_out::call(self, out);
|
| 33 |
+
}
|
| 34 |
+
// aten::_conj_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
|
| 35 |
+
inline at::Tensor & _conj_copy_outf(const at::Tensor & self, at::Tensor & out) {
|
| 36 |
+
return at::_ops::_conj_copy_out::call(self, out);
|
| 37 |
+
}
|
| 38 |
+
|
| 39 |
+
}
|
pllava/lib/python3.10/site-packages/torch/include/ATen/ops/_convert_indices_from_coo_to_csr_meta.h
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from NativeMetaFunction.h
|
| 4 |
+
|
| 5 |
+
#include <c10/core/Scalar.h>
|
| 6 |
+
#include <c10/core/Storage.h>
|
| 7 |
+
#include <c10/core/TensorOptions.h>
|
| 8 |
+
#include <c10/util/Deprecated.h>
|
| 9 |
+
#include <optional>
|
| 10 |
+
#include <c10/core/QScheme.h>
|
| 11 |
+
#include <ATen/core/Reduction.h>
|
| 12 |
+
#include <ATen/TensorIterator.h>
|
| 13 |
+
#include <ATen/TensorMeta.h>
|
| 14 |
+
#include <tuple>
|
| 15 |
+
#include <vector>
|
| 16 |
+
|
| 17 |
+
namespace at {
|
| 18 |
+
namespace meta {
|
| 19 |
+
|
| 20 |
+
struct TORCH_API structured__convert_indices_from_coo_to_csr : public at::impl::MetaBase {
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
void meta(const at::Tensor & self, int64_t size, bool out_int32);
|
| 24 |
+
};
|
| 25 |
+
|
| 26 |
+
} // namespace native
|
| 27 |
+
} // namespace at
|
pllava/lib/python3.10/site-packages/torch/include/ATen/ops/_efficient_attention_backward_cuda_dispatch.h
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace cuda {
|
| 19 |
+
|
| 20 |
+
TORCH_API ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> _efficient_attention_backward(const at::Tensor & grad_out_, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const ::std::optional<at::Tensor> & bias, const at::Tensor & out, const ::std::optional<at::Tensor> & cu_seqlens_q, const ::std::optional<at::Tensor> & cu_seqlens_k, int64_t max_seqlen_q, int64_t max_seqlen_k, const at::Tensor & logsumexp, double dropout_p, const at::Tensor & philox_seed, const at::Tensor & philox_offset, int64_t custom_mask_type, bool bias_requires_grad, ::std::optional<double> scale=::std::nullopt, ::std::optional<int64_t> num_splits_key=::std::nullopt, ::std::optional<int64_t> window_size=::std::nullopt, bool shared_storage_dqdkdv=false);
|
| 21 |
+
TORCH_API ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> _efficient_attention_backward_symint(const at::Tensor & grad_out_, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const ::std::optional<at::Tensor> & bias, const at::Tensor & out, const ::std::optional<at::Tensor> & cu_seqlens_q, const ::std::optional<at::Tensor> & cu_seqlens_k, c10::SymInt max_seqlen_q, c10::SymInt max_seqlen_k, const at::Tensor & logsumexp, double dropout_p, const at::Tensor & philox_seed, const at::Tensor & philox_offset, int64_t custom_mask_type, bool bias_requires_grad, ::std::optional<double> scale=::std::nullopt, ::std::optional<int64_t> num_splits_key=::std::nullopt, ::std::optional<int64_t> window_size=::std::nullopt, bool shared_storage_dqdkdv=false);
|
| 22 |
+
|
| 23 |
+
} // namespace cuda
|
| 24 |
+
} // namespace at
|
pllava/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_log1p_cuda_dispatch.h
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace cuda {
|
| 19 |
+
|
| 20 |
+
TORCH_API ::std::vector<at::Tensor> _foreach_log1p(at::TensorList self);
|
| 21 |
+
TORCH_API void _foreach_log1p_(at::TensorList self);
|
| 22 |
+
|
| 23 |
+
} // namespace cuda
|
| 24 |
+
} // namespace at
|
pllava/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_mul.h
ADDED
|
@@ -0,0 +1,101 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Function.h
|
| 4 |
+
|
| 5 |
+
#include <ATen/Context.h>
|
| 6 |
+
#include <ATen/DeviceGuard.h>
|
| 7 |
+
#include <ATen/TensorUtils.h>
|
| 8 |
+
#include <ATen/TracerMode.h>
|
| 9 |
+
#include <ATen/core/Generator.h>
|
| 10 |
+
#include <ATen/core/Reduction.h>
|
| 11 |
+
#include <ATen/core/Tensor.h>
|
| 12 |
+
#include <c10/core/Scalar.h>
|
| 13 |
+
#include <c10/core/Storage.h>
|
| 14 |
+
#include <c10/core/TensorOptions.h>
|
| 15 |
+
#include <c10/util/Deprecated.h>
|
| 16 |
+
#include <optional>
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
#include <ATen/ops/_foreach_mul_ops.h>
|
| 21 |
+
|
| 22 |
+
namespace at {
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
// aten::_foreach_mul.Scalar(Tensor[] self, Scalar scalar) -> Tensor[]
|
| 26 |
+
inline ::std::vector<at::Tensor> _foreach_mul(at::TensorList self, const at::Scalar & scalar) {
|
| 27 |
+
return at::_ops::_foreach_mul_Scalar::call(self, scalar);
|
| 28 |
+
}
|
| 29 |
+
|
| 30 |
+
// aten::_foreach_mul_.Scalar(Tensor(a!)[] self, Scalar scalar) -> ()
|
| 31 |
+
inline void _foreach_mul_(at::TensorList self, const at::Scalar & scalar) {
|
| 32 |
+
return at::_ops::_foreach_mul__Scalar::call(self, scalar);
|
| 33 |
+
}
|
| 34 |
+
|
| 35 |
+
// aten::_foreach_mul.List(Tensor[] self, Tensor[] other) -> Tensor[]
|
| 36 |
+
inline ::std::vector<at::Tensor> _foreach_mul(at::TensorList self, at::TensorList other) {
|
| 37 |
+
return at::_ops::_foreach_mul_List::call(self, other);
|
| 38 |
+
}
|
| 39 |
+
|
| 40 |
+
// aten::_foreach_mul_.List(Tensor(a!)[] self, Tensor[] other) -> ()
|
| 41 |
+
inline void _foreach_mul_(at::TensorList self, at::TensorList other) {
|
| 42 |
+
return at::_ops::_foreach_mul__List::call(self, other);
|
| 43 |
+
}
|
| 44 |
+
|
| 45 |
+
// aten::_foreach_mul.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[]
|
| 46 |
+
inline ::std::vector<at::Tensor> _foreach_mul(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
|
| 47 |
+
return at::_ops::_foreach_mul_ScalarList::call(self, scalars);
|
| 48 |
+
}
|
| 49 |
+
|
| 50 |
+
// aten::_foreach_mul_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> ()
|
| 51 |
+
inline void _foreach_mul_(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
|
| 52 |
+
return at::_ops::_foreach_mul__ScalarList::call(self, scalars);
|
| 53 |
+
}
|
| 54 |
+
|
| 55 |
+
// aten::_foreach_mul.Tensor(Tensor[] self, Tensor other) -> Tensor[]
|
| 56 |
+
inline ::std::vector<at::Tensor> _foreach_mul(at::TensorList self, const at::Tensor & other) {
|
| 57 |
+
return at::_ops::_foreach_mul_Tensor::call(self, other);
|
| 58 |
+
}
|
| 59 |
+
|
| 60 |
+
// aten::_foreach_mul_.Tensor(Tensor(a!)[] self, Tensor other) -> ()
|
| 61 |
+
inline void _foreach_mul_(at::TensorList self, const at::Tensor & other) {
|
| 62 |
+
return at::_ops::_foreach_mul__Tensor::call(self, other);
|
| 63 |
+
}
|
| 64 |
+
|
| 65 |
+
// aten::_foreach_mul.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> ()
|
| 66 |
+
inline void _foreach_mul_out(at::TensorList out, at::TensorList self, const at::Scalar & scalar) {
|
| 67 |
+
return at::_ops::_foreach_mul_Scalar_out::call(self, scalar, out);
|
| 68 |
+
}
|
| 69 |
+
// aten::_foreach_mul.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> ()
|
| 70 |
+
inline void _foreach_mul_outf(at::TensorList self, const at::Scalar & scalar, at::TensorList out) {
|
| 71 |
+
return at::_ops::_foreach_mul_Scalar_out::call(self, scalar, out);
|
| 72 |
+
}
|
| 73 |
+
|
| 74 |
+
// aten::_foreach_mul.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> ()
|
| 75 |
+
inline void _foreach_mul_out(at::TensorList out, at::TensorList self, at::TensorList other) {
|
| 76 |
+
return at::_ops::_foreach_mul_List_out::call(self, other, out);
|
| 77 |
+
}
|
| 78 |
+
// aten::_foreach_mul.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> ()
|
| 79 |
+
inline void _foreach_mul_outf(at::TensorList self, at::TensorList other, at::TensorList out) {
|
| 80 |
+
return at::_ops::_foreach_mul_List_out::call(self, other, out);
|
| 81 |
+
}
|
| 82 |
+
|
| 83 |
+
// aten::_foreach_mul.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> ()
|
| 84 |
+
inline void _foreach_mul_out(at::TensorList out, at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
|
| 85 |
+
return at::_ops::_foreach_mul_ScalarList_out::call(self, scalars, out);
|
| 86 |
+
}
|
| 87 |
+
// aten::_foreach_mul.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> ()
|
| 88 |
+
inline void _foreach_mul_outf(at::TensorList self, at::ArrayRef<at::Scalar> scalars, at::TensorList out) {
|
| 89 |
+
return at::_ops::_foreach_mul_ScalarList_out::call(self, scalars, out);
|
| 90 |
+
}
|
| 91 |
+
|
| 92 |
+
// aten::_foreach_mul.Tensor_out(Tensor[] self, Tensor other, *, Tensor(a!)[] out) -> ()
|
| 93 |
+
inline void _foreach_mul_out(at::TensorList out, at::TensorList self, const at::Tensor & other) {
|
| 94 |
+
return at::_ops::_foreach_mul_Tensor_out::call(self, other, out);
|
| 95 |
+
}
|
| 96 |
+
// aten::_foreach_mul.Tensor_out(Tensor[] self, Tensor other, *, Tensor(a!)[] out) -> ()
|
| 97 |
+
inline void _foreach_mul_outf(at::TensorList self, const at::Tensor & other, at::TensorList out) {
|
| 98 |
+
return at::_ops::_foreach_mul_Tensor_out::call(self, other, out);
|
| 99 |
+
}
|
| 100 |
+
|
| 101 |
+
}
|
pllava/lib/python3.10/site-packages/torch/include/ATen/ops/_linalg_check_errors_native.h
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from NativeFunction.h
|
| 4 |
+
|
| 5 |
+
#include <c10/core/Scalar.h>
|
| 6 |
+
#include <c10/core/Storage.h>
|
| 7 |
+
#include <c10/core/TensorOptions.h>
|
| 8 |
+
#include <c10/util/Deprecated.h>
|
| 9 |
+
#include <optional>
|
| 10 |
+
#include <c10/core/QScheme.h>
|
| 11 |
+
#include <ATen/core/Reduction.h>
|
| 12 |
+
#include <ATen/core/Tensor.h>
|
| 13 |
+
#include <tuple>
|
| 14 |
+
#include <vector>
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
namespace at {
|
| 18 |
+
namespace native {
|
| 19 |
+
TORCH_API void _linalg_check_errors(const at::Tensor & info, c10::string_view api_name, bool is_matrix);
|
| 20 |
+
} // namespace native
|
| 21 |
+
} // namespace at
|
pllava/lib/python3.10/site-packages/torch/include/ATen/ops/_make_dual_compositeexplicitautograd_dispatch.h
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace compositeexplicitautograd {
|
| 19 |
+
|
| 20 |
+
TORCH_API at::Tensor _make_dual(const at::Tensor & primal, const at::Tensor & tangent, int64_t level);
|
| 21 |
+
|
| 22 |
+
} // namespace compositeexplicitautograd
|
| 23 |
+
} // namespace at
|
pllava/lib/python3.10/site-packages/torch/include/ATen/ops/_native_batch_norm_legit_ops.h
ADDED
|
@@ -0,0 +1,72 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Operator.h
|
| 4 |
+
|
| 5 |
+
#include <tuple>
|
| 6 |
+
#include <vector>
|
| 7 |
+
|
| 8 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 9 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 10 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 11 |
+
#include <ATen/core/ATen_fwd.h>
|
| 12 |
+
|
| 13 |
+
namespace at {
|
| 14 |
+
namespace _ops {
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
struct TORCH_API _native_batch_norm_legit {
|
| 18 |
+
using schema = ::std::tuple<at::Tensor,at::Tensor,at::Tensor> (const at::Tensor &, const ::std::optional<at::Tensor> &, const ::std::optional<at::Tensor> &, at::Tensor &, at::Tensor &, bool, double, double);
|
| 19 |
+
using ptr_schema = schema*;
|
| 20 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 21 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_native_batch_norm_legit")
|
| 22 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
|
| 23 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_native_batch_norm_legit(Tensor input, Tensor? weight, Tensor? bias, Tensor(a!) running_mean, Tensor(b!) running_var, bool training, float momentum, float eps) -> (Tensor, Tensor, Tensor)")
|
| 24 |
+
static ::std::tuple<at::Tensor,at::Tensor,at::Tensor> call(const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, at::Tensor & running_mean, at::Tensor & running_var, bool training, double momentum, double eps);
|
| 25 |
+
static ::std::tuple<at::Tensor,at::Tensor,at::Tensor> redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, at::Tensor & running_mean, at::Tensor & running_var, bool training, double momentum, double eps);
|
| 26 |
+
};
|
| 27 |
+
|
| 28 |
+
struct TORCH_API _native_batch_norm_legit_out {
|
| 29 |
+
using schema = ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> (const at::Tensor &, const ::std::optional<at::Tensor> &, const ::std::optional<at::Tensor> &, at::Tensor &, at::Tensor &, bool, double, double, at::Tensor &, at::Tensor &, at::Tensor &);
|
| 30 |
+
using ptr_schema = schema*;
|
| 31 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 32 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_native_batch_norm_legit")
|
| 33 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out")
|
| 34 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_native_batch_norm_legit.out(Tensor input, Tensor? weight, Tensor? bias, Tensor(a!) running_mean, Tensor(b!) running_var, bool training, float momentum, float eps, *, Tensor(d!) out, Tensor(e!) save_mean, Tensor(f!) save_invstd) -> (Tensor(d!), Tensor(e!), Tensor(f!))")
|
| 35 |
+
static ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> call(const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, at::Tensor & running_mean, at::Tensor & running_var, bool training, double momentum, double eps, at::Tensor & out, at::Tensor & save_mean, at::Tensor & save_invstd);
|
| 36 |
+
static ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, at::Tensor & running_mean, at::Tensor & running_var, bool training, double momentum, double eps, at::Tensor & out, at::Tensor & save_mean, at::Tensor & save_invstd);
|
| 37 |
+
};
|
| 38 |
+
|
| 39 |
+
struct TORCH_API _native_batch_norm_legit_no_stats {
|
| 40 |
+
using schema = ::std::tuple<at::Tensor,at::Tensor,at::Tensor> (const at::Tensor &, const ::std::optional<at::Tensor> &, const ::std::optional<at::Tensor> &, bool, double, double);
|
| 41 |
+
using ptr_schema = schema*;
|
| 42 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 43 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_native_batch_norm_legit")
|
| 44 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "no_stats")
|
| 45 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_native_batch_norm_legit.no_stats(Tensor input, Tensor? weight, Tensor? bias, bool training, float momentum, float eps) -> (Tensor, Tensor, Tensor)")
|
| 46 |
+
static ::std::tuple<at::Tensor,at::Tensor,at::Tensor> call(const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, bool training, double momentum, double eps);
|
| 47 |
+
static ::std::tuple<at::Tensor,at::Tensor,at::Tensor> redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, bool training, double momentum, double eps);
|
| 48 |
+
};
|
| 49 |
+
|
| 50 |
+
struct TORCH_API _native_batch_norm_legit_no_stats_out {
|
| 51 |
+
using schema = ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> (const at::Tensor &, const ::std::optional<at::Tensor> &, const ::std::optional<at::Tensor> &, bool, double, double, at::Tensor &, at::Tensor &, at::Tensor &);
|
| 52 |
+
using ptr_schema = schema*;
|
| 53 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 54 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_native_batch_norm_legit")
|
| 55 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "no_stats_out")
|
| 56 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_native_batch_norm_legit.no_stats_out(Tensor input, Tensor? weight, Tensor? bias, bool training, float momentum, float eps, *, Tensor(a!) out, Tensor(b!) save_mean, Tensor(c!) save_invstd) -> (Tensor(a!), Tensor(b!), Tensor(c!))")
|
| 57 |
+
static ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> call(const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, bool training, double momentum, double eps, at::Tensor & out, at::Tensor & save_mean, at::Tensor & save_invstd);
|
| 58 |
+
static ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, bool training, double momentum, double eps, at::Tensor & out, at::Tensor & save_mean, at::Tensor & save_invstd);
|
| 59 |
+
};
|
| 60 |
+
|
| 61 |
+
struct TORCH_API _native_batch_norm_legit_functional {
|
| 62 |
+
using schema = ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> (const at::Tensor &, const ::std::optional<at::Tensor> &, const ::std::optional<at::Tensor> &, const at::Tensor &, const at::Tensor &, bool, double, double);
|
| 63 |
+
using ptr_schema = schema*;
|
| 64 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 65 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_native_batch_norm_legit_functional")
|
| 66 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
|
| 67 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_native_batch_norm_legit_functional(Tensor input, Tensor? weight, Tensor? bias, Tensor running_mean, Tensor running_var, bool training, float momentum, float eps) -> (Tensor, Tensor, Tensor, Tensor running_mean_out, Tensor running_var_out)")
|
| 68 |
+
static ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> call(const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, const at::Tensor & running_mean, const at::Tensor & running_var, bool training, double momentum, double eps);
|
| 69 |
+
static ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, const at::Tensor & running_mean, const at::Tensor & running_var, bool training, double momentum, double eps);
|
| 70 |
+
};
|
| 71 |
+
|
| 72 |
+
}} // namespace at::_ops
|
pllava/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_view_from_buffer_copy.h
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Function.h
|
| 4 |
+
|
| 5 |
+
#include <ATen/Context.h>
|
| 6 |
+
#include <ATen/DeviceGuard.h>
|
| 7 |
+
#include <ATen/TensorUtils.h>
|
| 8 |
+
#include <ATen/TracerMode.h>
|
| 9 |
+
#include <ATen/core/Generator.h>
|
| 10 |
+
#include <ATen/core/Reduction.h>
|
| 11 |
+
#include <ATen/core/Tensor.h>
|
| 12 |
+
#include <c10/core/Scalar.h>
|
| 13 |
+
#include <c10/core/Storage.h>
|
| 14 |
+
#include <c10/core/TensorOptions.h>
|
| 15 |
+
#include <c10/util/Deprecated.h>
|
| 16 |
+
#include <optional>
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
#include <ATen/ops/_nested_view_from_buffer_copy_ops.h>
|
| 21 |
+
|
| 22 |
+
namespace at {
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
// aten::_nested_view_from_buffer_copy(Tensor self, Tensor nested_size, Tensor nested_strides, Tensor offsets) -> Tensor
|
| 26 |
+
inline at::Tensor _nested_view_from_buffer_copy(const at::Tensor & self, const at::Tensor & nested_size, const at::Tensor & nested_strides, const at::Tensor & offsets) {
|
| 27 |
+
return at::_ops::_nested_view_from_buffer_copy::call(self, nested_size, nested_strides, offsets);
|
| 28 |
+
}
|
| 29 |
+
|
| 30 |
+
// aten::_nested_view_from_buffer_copy.out(Tensor self, Tensor nested_size, Tensor nested_strides, Tensor offsets, *, Tensor(a!) out) -> Tensor(a!)
|
| 31 |
+
inline at::Tensor & _nested_view_from_buffer_copy_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & nested_size, const at::Tensor & nested_strides, const at::Tensor & offsets) {
|
| 32 |
+
return at::_ops::_nested_view_from_buffer_copy_out::call(self, nested_size, nested_strides, offsets, out);
|
| 33 |
+
}
|
| 34 |
+
// aten::_nested_view_from_buffer_copy.out(Tensor self, Tensor nested_size, Tensor nested_strides, Tensor offsets, *, Tensor(a!) out) -> Tensor(a!)
|
| 35 |
+
inline at::Tensor & _nested_view_from_buffer_copy_outf(const at::Tensor & self, const at::Tensor & nested_size, const at::Tensor & nested_strides, const at::Tensor & offsets, at::Tensor & out) {
|
| 36 |
+
return at::_ops::_nested_view_from_buffer_copy_out::call(self, nested_size, nested_strides, offsets, out);
|
| 37 |
+
}
|
| 38 |
+
|
| 39 |
+
}
|
pllava/lib/python3.10/site-packages/torch/include/ATen/ops/_pad_packed_sequence_compositeimplicitautograd_dispatch.h
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace compositeimplicitautograd {
|
| 19 |
+
|
| 20 |
+
TORCH_API ::std::tuple<at::Tensor,at::Tensor> _pad_packed_sequence(const at::Tensor & data, const at::Tensor & batch_sizes, bool batch_first, const at::Scalar & padding_value, int64_t total_length);
|
| 21 |
+
|
| 22 |
+
} // namespace compositeimplicitautograd
|
| 23 |
+
} // namespace at
|
pllava/lib/python3.10/site-packages/torch/include/ATen/ops/_prelu_kernel_cpu_dispatch.h
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace cpu {
|
| 19 |
+
|
| 20 |
+
TORCH_API at::Tensor _prelu_kernel(const at::Tensor & self, const at::Tensor & weight);
|
| 21 |
+
|
| 22 |
+
} // namespace cpu
|
| 23 |
+
} // namespace at
|
pllava/lib/python3.10/site-packages/torch/include/ATen/ops/_print_ops.h
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Operator.h
|
| 4 |
+
|
| 5 |
+
#include <tuple>
|
| 6 |
+
#include <vector>
|
| 7 |
+
|
| 8 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 9 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 10 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 11 |
+
#include <ATen/core/ATen_fwd.h>
|
| 12 |
+
|
| 13 |
+
namespace at {
|
| 14 |
+
namespace _ops {
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
struct TORCH_API _print {
|
| 18 |
+
using schema = void (c10::string_view);
|
| 19 |
+
using ptr_schema = schema*;
|
| 20 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 21 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_print")
|
| 22 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
|
| 23 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_print(str s) -> ()")
|
| 24 |
+
static void call(c10::string_view s);
|
| 25 |
+
static void redispatch(c10::DispatchKeySet dispatchKeySet, c10::string_view s);
|
| 26 |
+
};
|
| 27 |
+
|
| 28 |
+
}} // namespace at::_ops
|
pllava/lib/python3.10/site-packages/torch/include/ATen/ops/_scaled_dot_product_fused_attention_overrideable_backward_ops.h
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Operator.h
|
| 4 |
+
|
| 5 |
+
#include <tuple>
|
| 6 |
+
#include <vector>
|
| 7 |
+
|
| 8 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 9 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 10 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 11 |
+
#include <ATen/core/ATen_fwd.h>
|
| 12 |
+
|
| 13 |
+
namespace at {
|
| 14 |
+
namespace _ops {
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
struct TORCH_API _scaled_dot_product_fused_attention_overrideable_backward {
|
| 18 |
+
using schema = ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, ::std::array<bool,4>, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, c10::SymInt, c10::SymInt, double, bool, const at::Tensor &, const at::Tensor &, ::std::optional<double>);
|
| 19 |
+
using ptr_schema = schema*;
|
| 20 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 21 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_scaled_dot_product_fused_attention_overrideable_backward")
|
| 22 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
|
| 23 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_scaled_dot_product_fused_attention_overrideable_backward(Tensor grad_out, Tensor query, Tensor key, Tensor value, Tensor attn_bias, bool[4] grad_input_mask, Tensor out, Tensor logsumexp, Tensor cum_seq_q, Tensor cum_seq_k, SymInt max_q, SymInt max_k, float dropout_p, bool is_causal, Tensor philox_seed, Tensor philox_offset, *, float? scale=None) -> (Tensor grad_query, Tensor grad_key, Tensor grad_value, Tensor grad_attn_bias)")
|
| 24 |
+
static ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> call(const at::Tensor & grad_out, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const at::Tensor & attn_bias, ::std::array<bool,4> grad_input_mask, const at::Tensor & out, const at::Tensor & logsumexp, const at::Tensor & cum_seq_q, const at::Tensor & cum_seq_k, c10::SymInt max_q, c10::SymInt max_k, double dropout_p, bool is_causal, const at::Tensor & philox_seed, const at::Tensor & philox_offset, ::std::optional<double> scale);
|
| 25 |
+
static ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_out, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const at::Tensor & attn_bias, ::std::array<bool,4> grad_input_mask, const at::Tensor & out, const at::Tensor & logsumexp, const at::Tensor & cum_seq_q, const at::Tensor & cum_seq_k, c10::SymInt max_q, c10::SymInt max_k, double dropout_p, bool is_causal, const at::Tensor & philox_seed, const at::Tensor & philox_offset, ::std::optional<double> scale);
|
| 26 |
+
};
|
| 27 |
+
|
| 28 |
+
}} // namespace at::_ops
|
pllava/lib/python3.10/site-packages/torch/include/ATen/ops/_sobol_engine_ff_compositeimplicitautograd_dispatch.h
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace compositeimplicitautograd {
|
| 19 |
+
|
| 20 |
+
TORCH_API at::Tensor & _sobol_engine_ff_(at::Tensor & self, int64_t n, const at::Tensor & sobolstate, int64_t dimension, int64_t num_generated);
|
| 21 |
+
|
| 22 |
+
} // namespace compositeimplicitautograd
|
| 23 |
+
} // namespace at
|
pllava/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_bsr_tensor_unsafe_ops.h
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Operator.h
|
| 4 |
+
|
| 5 |
+
#include <tuple>
|
| 6 |
+
#include <vector>
|
| 7 |
+
|
| 8 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 9 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 10 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 11 |
+
#include <ATen/core/ATen_fwd.h>
|
| 12 |
+
|
| 13 |
+
namespace at {
|
| 14 |
+
namespace _ops {
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
struct TORCH_API _sparse_bsr_tensor_unsafe {
|
| 18 |
+
using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, at::IntArrayRef, ::std::optional<at::ScalarType>, ::std::optional<at::Layout>, ::std::optional<at::Device>, ::std::optional<bool>);
|
| 19 |
+
using ptr_schema = schema*;
|
| 20 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 21 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_sparse_bsr_tensor_unsafe")
|
| 22 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
|
| 23 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_sparse_bsr_tensor_unsafe(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor")
|
| 24 |
+
static at::Tensor call(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory);
|
| 25 |
+
static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory);
|
| 26 |
+
};
|
| 27 |
+
|
| 28 |
+
}} // namespace at::_ops
|
pllava/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_semi_structured_linear_native.h
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from NativeFunction.h
|
| 4 |
+
|
| 5 |
+
#include <c10/core/Scalar.h>
|
| 6 |
+
#include <c10/core/Storage.h>
|
| 7 |
+
#include <c10/core/TensorOptions.h>
|
| 8 |
+
#include <c10/util/Deprecated.h>
|
| 9 |
+
#include <optional>
|
| 10 |
+
#include <c10/core/QScheme.h>
|
| 11 |
+
#include <ATen/core/Reduction.h>
|
| 12 |
+
#include <ATen/core/Tensor.h>
|
| 13 |
+
#include <tuple>
|
| 14 |
+
#include <vector>
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
namespace at {
|
| 18 |
+
namespace native {
|
| 19 |
+
TORCH_API at::Tensor _sparse_semi_structured_linear(const at::Tensor & input, const at::Tensor & weight, const at::Tensor & meta, const ::std::optional<at::Tensor> & bias={}, ::std::optional<c10::string_view> activation=::std::nullopt, ::std::optional<at::ScalarType> out_dtype=::std::nullopt);
|
| 20 |
+
} // namespace native
|
| 21 |
+
} // namespace at
|
pllava/lib/python3.10/site-packages/torch/include/ATen/ops/_test_check_tensor_native.h
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from NativeFunction.h
|
| 4 |
+
|
| 5 |
+
#include <c10/core/Scalar.h>
|
| 6 |
+
#include <c10/core/Storage.h>
|
| 7 |
+
#include <c10/core/TensorOptions.h>
|
| 8 |
+
#include <c10/util/Deprecated.h>
|
| 9 |
+
#include <optional>
|
| 10 |
+
#include <c10/core/QScheme.h>
|
| 11 |
+
#include <ATen/core/Reduction.h>
|
| 12 |
+
#include <ATen/core/Tensor.h>
|
| 13 |
+
#include <tuple>
|
| 14 |
+
#include <vector>
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
namespace at {
|
| 18 |
+
namespace native {
|
| 19 |
+
TORCH_API at::Tensor _test_check_tensor(const at::Tensor & self);
|
| 20 |
+
} // namespace native
|
| 21 |
+
} // namespace at
|
pllava/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_nearest_exact1d_backward_meta_dispatch.h
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace meta {
|
| 19 |
+
|
| 20 |
+
TORCH_API at::Tensor _upsample_nearest_exact1d_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, ::std::optional<double> scales=::std::nullopt);
|
| 21 |
+
TORCH_API at::Tensor _upsample_nearest_exact1d_backward_symint(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, ::std::optional<double> scales=::std::nullopt);
|
| 22 |
+
TORCH_API at::Tensor & _upsample_nearest_exact1d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, ::std::optional<double> scales=::std::nullopt);
|
| 23 |
+
TORCH_API at::Tensor & _upsample_nearest_exact1d_backward_outf(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, ::std::optional<double> scales, at::Tensor & grad_input);
|
| 24 |
+
TORCH_API at::Tensor & _upsample_nearest_exact1d_backward_symint_out(at::Tensor & grad_input, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, ::std::optional<double> scales=::std::nullopt);
|
| 25 |
+
TORCH_API at::Tensor & _upsample_nearest_exact1d_backward_symint_outf(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, ::std::optional<double> scales, at::Tensor & grad_input);
|
| 26 |
+
|
| 27 |
+
} // namespace meta
|
| 28 |
+
} // namespace at
|
pllava/lib/python3.10/site-packages/torch/include/ATen/ops/acos_meta_dispatch.h
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace meta {
|
| 19 |
+
|
| 20 |
+
TORCH_API at::Tensor acos(const at::Tensor & self);
|
| 21 |
+
TORCH_API at::Tensor & acos_out(at::Tensor & out, const at::Tensor & self);
|
| 22 |
+
TORCH_API at::Tensor & acos_outf(const at::Tensor & self, at::Tensor & out);
|
| 23 |
+
TORCH_API at::Tensor & acos_(at::Tensor & self);
|
| 24 |
+
|
| 25 |
+
} // namespace meta
|
| 26 |
+
} // namespace at
|
pllava/lib/python3.10/site-packages/torch/include/ATen/ops/align_as_native.h
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from NativeFunction.h
|
| 4 |
+
|
| 5 |
+
#include <c10/core/Scalar.h>
|
| 6 |
+
#include <c10/core/Storage.h>
|
| 7 |
+
#include <c10/core/TensorOptions.h>
|
| 8 |
+
#include <c10/util/Deprecated.h>
|
| 9 |
+
#include <optional>
|
| 10 |
+
#include <c10/core/QScheme.h>
|
| 11 |
+
#include <ATen/core/Reduction.h>
|
| 12 |
+
#include <ATen/core/Tensor.h>
|
| 13 |
+
#include <tuple>
|
| 14 |
+
#include <vector>
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
namespace at {
|
| 18 |
+
namespace native {
|
| 19 |
+
TORCH_API at::Tensor align_as(const at::Tensor & self, const at::Tensor & other);
|
| 20 |
+
} // namespace native
|
| 21 |
+
} // namespace at
|
pllava/lib/python3.10/site-packages/torch/include/ATen/ops/align_tensors_compositeimplicitautograd_dispatch.h
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace compositeimplicitautograd {
|
| 19 |
+
|
| 20 |
+
TORCH_API ::std::vector<at::Tensor> align_tensors(at::TensorList tensors);
|
| 21 |
+
|
| 22 |
+
} // namespace compositeimplicitautograd
|
| 23 |
+
} // namespace at
|
pllava/lib/python3.10/site-packages/torch/include/ATen/ops/asin.h
ADDED
|
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Function.h
|
| 4 |
+
|
| 5 |
+
#include <ATen/Context.h>
|
| 6 |
+
#include <ATen/DeviceGuard.h>
|
| 7 |
+
#include <ATen/TensorUtils.h>
|
| 8 |
+
#include <ATen/TracerMode.h>
|
| 9 |
+
#include <ATen/core/Generator.h>
|
| 10 |
+
#include <ATen/core/Reduction.h>
|
| 11 |
+
#include <ATen/core/Tensor.h>
|
| 12 |
+
#include <c10/core/Scalar.h>
|
| 13 |
+
#include <c10/core/Storage.h>
|
| 14 |
+
#include <c10/core/TensorOptions.h>
|
| 15 |
+
#include <c10/util/Deprecated.h>
|
| 16 |
+
#include <optional>
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
#include <ATen/ops/asin_ops.h>
|
| 21 |
+
|
| 22 |
+
namespace at {
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
// aten::asin(Tensor self) -> Tensor
|
| 26 |
+
inline at::Tensor asin(const at::Tensor & self) {
|
| 27 |
+
return at::_ops::asin::call(self);
|
| 28 |
+
}
|
| 29 |
+
|
| 30 |
+
// aten::asin_(Tensor(a!) self) -> Tensor(a!)
|
| 31 |
+
inline at::Tensor & asin_(at::Tensor & self) {
|
| 32 |
+
return at::_ops::asin_::call(self);
|
| 33 |
+
}
|
| 34 |
+
|
| 35 |
+
// aten::asin.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
|
| 36 |
+
inline at::Tensor & asin_out(at::Tensor & out, const at::Tensor & self) {
|
| 37 |
+
return at::_ops::asin_out::call(self, out);
|
| 38 |
+
}
|
| 39 |
+
// aten::asin.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
|
| 40 |
+
inline at::Tensor & asin_outf(const at::Tensor & self, at::Tensor & out) {
|
| 41 |
+
return at::_ops::asin_out::call(self, out);
|
| 42 |
+
}
|
| 43 |
+
|
| 44 |
+
}
|
pllava/lib/python3.10/site-packages/torch/include/ATen/ops/batch_norm_backward_elemt_compositeexplicitautograd_dispatch.h
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace compositeexplicitautograd {
|
| 19 |
+
|
| 20 |
+
TORCH_API at::Tensor & batch_norm_backward_elemt_out(at::Tensor & out, const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const ::std::optional<at::Tensor> & weight, const at::Tensor & sum_dy, const at::Tensor & sum_dy_xmu, const at::Tensor & count);
|
| 21 |
+
TORCH_API at::Tensor & batch_norm_backward_elemt_outf(const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const ::std::optional<at::Tensor> & weight, const at::Tensor & sum_dy, const at::Tensor & sum_dy_xmu, const at::Tensor & count, at::Tensor & out);
|
| 22 |
+
|
| 23 |
+
} // namespace compositeexplicitautograd
|
| 24 |
+
} // namespace at
|
pllava/lib/python3.10/site-packages/torch/include/ATen/ops/bitwise_or_native.h
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from NativeFunction.h
|
| 4 |
+
|
| 5 |
+
#include <c10/core/Scalar.h>
|
| 6 |
+
#include <c10/core/Storage.h>
|
| 7 |
+
#include <c10/core/TensorOptions.h>
|
| 8 |
+
#include <c10/util/Deprecated.h>
|
| 9 |
+
#include <optional>
|
| 10 |
+
#include <c10/core/QScheme.h>
|
| 11 |
+
#include <ATen/core/Reduction.h>
|
| 12 |
+
#include <ATen/core/Tensor.h>
|
| 13 |
+
#include <tuple>
|
| 14 |
+
#include <vector>
|
| 15 |
+
#include <ATen/ops/bitwise_or_meta.h>
|
| 16 |
+
|
| 17 |
+
namespace at {
|
| 18 |
+
namespace native {
|
| 19 |
+
struct TORCH_API structured_bitwise_or_out : public at::meta::structured_bitwise_or_Tensor {
|
| 20 |
+
void impl(const at::Tensor & self, const at::Tensor & other, const at::Tensor & out);
|
| 21 |
+
};
|
| 22 |
+
TORCH_API at::Tensor bitwise_or(const at::Tensor & self, const at::Scalar & other);
|
| 23 |
+
TORCH_API at::Tensor & bitwise_or_out(const at::Tensor & self, const at::Scalar & other, at::Tensor & out);
|
| 24 |
+
TORCH_API at::Tensor & bitwise_or_(at::Tensor & self, const at::Scalar & other);
|
| 25 |
+
TORCH_API at::Tensor bitwise_or(const at::Scalar & self, const at::Tensor & other);
|
| 26 |
+
TORCH_API at::Tensor & bitwise_or_Scalar_Tensor_out(const at::Scalar & self, const at::Tensor & other, at::Tensor & out);
|
| 27 |
+
} // namespace native
|
| 28 |
+
} // namespace at
|
pllava/lib/python3.10/site-packages/torch/include/ATen/ops/complex_native.h
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from NativeFunction.h
|
| 4 |
+
|
| 5 |
+
#include <c10/core/Scalar.h>
|
| 6 |
+
#include <c10/core/Storage.h>
|
| 7 |
+
#include <c10/core/TensorOptions.h>
|
| 8 |
+
#include <c10/util/Deprecated.h>
|
| 9 |
+
#include <optional>
|
| 10 |
+
#include <c10/core/QScheme.h>
|
| 11 |
+
#include <ATen/core/Reduction.h>
|
| 12 |
+
#include <ATen/core/Tensor.h>
|
| 13 |
+
#include <tuple>
|
| 14 |
+
#include <vector>
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
namespace at {
|
| 18 |
+
namespace native {
|
| 19 |
+
TORCH_API at::Tensor complex(const at::Tensor & real, const at::Tensor & imag);
|
| 20 |
+
TORCH_API at::Tensor & complex_out(const at::Tensor & real, const at::Tensor & imag, at::Tensor & out);
|
| 21 |
+
} // namespace native
|
| 22 |
+
} // namespace at
|
pllava/lib/python3.10/site-packages/torch/include/ATen/ops/concatenate_native.h
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from NativeFunction.h
|
| 4 |
+
|
| 5 |
+
#include <c10/core/Scalar.h>
|
| 6 |
+
#include <c10/core/Storage.h>
|
| 7 |
+
#include <c10/core/TensorOptions.h>
|
| 8 |
+
#include <c10/util/Deprecated.h>
|
| 9 |
+
#include <optional>
|
| 10 |
+
#include <c10/core/QScheme.h>
|
| 11 |
+
#include <ATen/core/Reduction.h>
|
| 12 |
+
#include <ATen/core/Tensor.h>
|
| 13 |
+
#include <tuple>
|
| 14 |
+
#include <vector>
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
namespace at {
|
| 18 |
+
namespace native {
|
| 19 |
+
TORCH_API at::Tensor concatenate(at::TensorList tensors, int64_t dim=0);
|
| 20 |
+
TORCH_API at::Tensor & concatenate_out(at::TensorList tensors, int64_t dim, at::Tensor & out);
|
| 21 |
+
TORCH_API at::Tensor concatenate(at::TensorList tensors, at::Dimname dim);
|
| 22 |
+
TORCH_API at::Tensor & concatenate_out(at::TensorList tensors, at::Dimname dim, at::Tensor & out);
|
| 23 |
+
} // namespace native
|
| 24 |
+
} // namespace at
|
pllava/lib/python3.10/site-packages/torch/include/ATen/ops/cudnn_convolution_transpose.h
ADDED
|
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Function.h
|
| 4 |
+
|
| 5 |
+
#include <ATen/Context.h>
|
| 6 |
+
#include <ATen/DeviceGuard.h>
|
| 7 |
+
#include <ATen/TensorUtils.h>
|
| 8 |
+
#include <ATen/TracerMode.h>
|
| 9 |
+
#include <ATen/core/Generator.h>
|
| 10 |
+
#include <ATen/core/Reduction.h>
|
| 11 |
+
#include <ATen/core/Tensor.h>
|
| 12 |
+
#include <c10/core/Scalar.h>
|
| 13 |
+
#include <c10/core/Storage.h>
|
| 14 |
+
#include <c10/core/TensorOptions.h>
|
| 15 |
+
#include <c10/util/Deprecated.h>
|
| 16 |
+
#include <optional>
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
#include <ATen/ops/cudnn_convolution_transpose_ops.h>
|
| 21 |
+
|
| 22 |
+
namespace at {
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
// aten::cudnn_convolution_transpose(Tensor self, Tensor weight, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic, bool allow_tf32) -> Tensor
|
| 26 |
+
inline at::Tensor cudnn_convolution_transpose(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, bool allow_tf32) {
|
| 27 |
+
return at::_ops::cudnn_convolution_transpose::call(self, weight, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(output_padding), c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(dilation), groups, benchmark, deterministic, allow_tf32);
|
| 28 |
+
}
|
| 29 |
+
namespace symint {
|
| 30 |
+
template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
|
| 31 |
+
at::Tensor cudnn_convolution_transpose(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, bool allow_tf32) {
|
| 32 |
+
return at::_ops::cudnn_convolution_transpose::call(self, weight, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(output_padding), c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(dilation), groups, benchmark, deterministic, allow_tf32);
|
| 33 |
+
}
|
| 34 |
+
}
|
| 35 |
+
|
| 36 |
+
// aten::cudnn_convolution_transpose(Tensor self, Tensor weight, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic, bool allow_tf32) -> Tensor
|
| 37 |
+
inline at::Tensor cudnn_convolution_transpose_symint(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, bool benchmark, bool deterministic, bool allow_tf32) {
|
| 38 |
+
return at::_ops::cudnn_convolution_transpose::call(self, weight, padding, output_padding, stride, dilation, groups, benchmark, deterministic, allow_tf32);
|
| 39 |
+
}
|
| 40 |
+
namespace symint {
|
| 41 |
+
template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
|
| 42 |
+
at::Tensor cudnn_convolution_transpose(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, bool benchmark, bool deterministic, bool allow_tf32) {
|
| 43 |
+
return at::_ops::cudnn_convolution_transpose::call(self, weight, padding, output_padding, stride, dilation, groups, benchmark, deterministic, allow_tf32);
|
| 44 |
+
}
|
| 45 |
+
}
|
| 46 |
+
|
| 47 |
+
// aten::cudnn_convolution_transpose.out(Tensor self, Tensor weight, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic, bool allow_tf32, *, Tensor(a!) out) -> Tensor(a!)
|
| 48 |
+
inline at::Tensor & cudnn_convolution_transpose_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, bool allow_tf32) {
|
| 49 |
+
return at::_ops::cudnn_convolution_transpose_out::call(self, weight, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(output_padding), c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(dilation), groups, benchmark, deterministic, allow_tf32, out);
|
| 50 |
+
}
|
| 51 |
+
namespace symint {
|
| 52 |
+
template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
|
| 53 |
+
at::Tensor & cudnn_convolution_transpose_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, bool allow_tf32) {
|
| 54 |
+
return at::_ops::cudnn_convolution_transpose_out::call(self, weight, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(output_padding), c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(dilation), groups, benchmark, deterministic, allow_tf32, out);
|
| 55 |
+
}
|
| 56 |
+
}
|
| 57 |
+
|
| 58 |
+
// aten::cudnn_convolution_transpose.out(Tensor self, Tensor weight, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic, bool allow_tf32, *, Tensor(a!) out) -> Tensor(a!)
|
| 59 |
+
inline at::Tensor & cudnn_convolution_transpose_outf(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, bool allow_tf32, at::Tensor & out) {
|
| 60 |
+
return at::_ops::cudnn_convolution_transpose_out::call(self, weight, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(output_padding), c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(dilation), groups, benchmark, deterministic, allow_tf32, out);
|
| 61 |
+
}
|
| 62 |
+
namespace symint {
|
| 63 |
+
template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
|
| 64 |
+
at::Tensor & cudnn_convolution_transpose_outf(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, bool allow_tf32, at::Tensor & out) {
|
| 65 |
+
return at::_ops::cudnn_convolution_transpose_out::call(self, weight, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(output_padding), c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(dilation), groups, benchmark, deterministic, allow_tf32, out);
|
| 66 |
+
}
|
| 67 |
+
}
|
| 68 |
+
|
| 69 |
+
// aten::cudnn_convolution_transpose.out(Tensor self, Tensor weight, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic, bool allow_tf32, *, Tensor(a!) out) -> Tensor(a!)
|
| 70 |
+
inline at::Tensor & cudnn_convolution_transpose_symint_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, bool benchmark, bool deterministic, bool allow_tf32) {
|
| 71 |
+
return at::_ops::cudnn_convolution_transpose_out::call(self, weight, padding, output_padding, stride, dilation, groups, benchmark, deterministic, allow_tf32, out);
|
| 72 |
+
}
|
| 73 |
+
namespace symint {
|
| 74 |
+
template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
|
| 75 |
+
at::Tensor & cudnn_convolution_transpose_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, bool benchmark, bool deterministic, bool allow_tf32) {
|
| 76 |
+
return at::_ops::cudnn_convolution_transpose_out::call(self, weight, padding, output_padding, stride, dilation, groups, benchmark, deterministic, allow_tf32, out);
|
| 77 |
+
}
|
| 78 |
+
}
|
| 79 |
+
|
| 80 |
+
// aten::cudnn_convolution_transpose.out(Tensor self, Tensor weight, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic, bool allow_tf32, *, Tensor(a!) out) -> Tensor(a!)
|
| 81 |
+
inline at::Tensor & cudnn_convolution_transpose_symint_outf(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, bool benchmark, bool deterministic, bool allow_tf32, at::Tensor & out) {
|
| 82 |
+
return at::_ops::cudnn_convolution_transpose_out::call(self, weight, padding, output_padding, stride, dilation, groups, benchmark, deterministic, allow_tf32, out);
|
| 83 |
+
}
|
| 84 |
+
namespace symint {
|
| 85 |
+
template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
|
| 86 |
+
at::Tensor & cudnn_convolution_transpose_outf(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, bool benchmark, bool deterministic, bool allow_tf32, at::Tensor & out) {
|
| 87 |
+
return at::_ops::cudnn_convolution_transpose_out::call(self, weight, padding, output_padding, stride, dilation, groups, benchmark, deterministic, allow_tf32, out);
|
| 88 |
+
}
|
| 89 |
+
}
|
| 90 |
+
|
| 91 |
+
}
|
pllava/lib/python3.10/site-packages/torch/include/ATen/ops/eq_compositeexplicitautogradnonfunctional_dispatch.h
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace compositeexplicitautogradnonfunctional {
|
| 19 |
+
|
| 20 |
+
TORCH_API at::Tensor eq(const at::Tensor & self, const at::Scalar & other);
|
| 21 |
+
TORCH_API at::Tensor & eq_(at::Tensor & self, const at::Scalar & other);
|
| 22 |
+
TORCH_API at::Tensor eq(const at::Tensor & self, const at::Tensor & other);
|
| 23 |
+
TORCH_API at::Tensor & eq_(at::Tensor & self, const at::Tensor & other);
|
| 24 |
+
|
| 25 |
+
} // namespace compositeexplicitautogradnonfunctional
|
| 26 |
+
} // namespace at
|
pllava/lib/python3.10/site-packages/torch/include/ATen/ops/exp_cuda_dispatch.h
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace cuda {
|
| 19 |
+
|
| 20 |
+
TORCH_API at::Tensor exp(const at::Tensor & self);
|
| 21 |
+
TORCH_API at::Tensor & exp_out(at::Tensor & out, const at::Tensor & self);
|
| 22 |
+
TORCH_API at::Tensor & exp_outf(const at::Tensor & self, at::Tensor & out);
|
| 23 |
+
TORCH_API at::Tensor & exp_(at::Tensor & self);
|
| 24 |
+
|
| 25 |
+
} // namespace cuda
|
| 26 |
+
} // namespace at
|
pllava/lib/python3.10/site-packages/torch/include/ATen/ops/fill_ops.h
ADDED
|
@@ -0,0 +1,83 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Operator.h
|
| 4 |
+
|
| 5 |
+
#include <tuple>
|
| 6 |
+
#include <vector>
|
| 7 |
+
|
| 8 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 9 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 10 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 11 |
+
#include <ATen/core/ATen_fwd.h>
|
| 12 |
+
|
| 13 |
+
namespace at {
|
| 14 |
+
namespace _ops {
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
struct TORCH_API fill_Scalar {
|
| 18 |
+
using schema = at::Tensor (const at::Tensor &, const at::Scalar &);
|
| 19 |
+
using ptr_schema = schema*;
|
| 20 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 21 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::fill")
|
| 22 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar")
|
| 23 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "fill.Scalar(Tensor self, Scalar value) -> Tensor")
|
| 24 |
+
static at::Tensor call(const at::Tensor & self, const at::Scalar & value);
|
| 25 |
+
static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & value);
|
| 26 |
+
};
|
| 27 |
+
|
| 28 |
+
struct TORCH_API fill_Tensor {
|
| 29 |
+
using schema = at::Tensor (const at::Tensor &, const at::Tensor &);
|
| 30 |
+
using ptr_schema = schema*;
|
| 31 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 32 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::fill")
|
| 33 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor")
|
| 34 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "fill.Tensor(Tensor self, Tensor value) -> Tensor")
|
| 35 |
+
static at::Tensor call(const at::Tensor & self, const at::Tensor & value);
|
| 36 |
+
static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & value);
|
| 37 |
+
};
|
| 38 |
+
|
| 39 |
+
struct TORCH_API fill__Scalar {
|
| 40 |
+
using schema = at::Tensor & (at::Tensor &, const at::Scalar &);
|
| 41 |
+
using ptr_schema = schema*;
|
| 42 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 43 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::fill_")
|
| 44 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar")
|
| 45 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "fill_.Scalar(Tensor(a!) self, Scalar value) -> Tensor(a!)")
|
| 46 |
+
static at::Tensor & call(at::Tensor & self, const at::Scalar & value);
|
| 47 |
+
static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & value);
|
| 48 |
+
};
|
| 49 |
+
|
| 50 |
+
struct TORCH_API fill__Tensor {
|
| 51 |
+
using schema = at::Tensor & (at::Tensor &, const at::Tensor &);
|
| 52 |
+
using ptr_schema = schema*;
|
| 53 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 54 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::fill_")
|
| 55 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor")
|
| 56 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "fill_.Tensor(Tensor(a!) self, Tensor value) -> Tensor(a!)")
|
| 57 |
+
static at::Tensor & call(at::Tensor & self, const at::Tensor & value);
|
| 58 |
+
static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & value);
|
| 59 |
+
};
|
| 60 |
+
|
| 61 |
+
struct TORCH_API fill_Scalar_out {
|
| 62 |
+
using schema = at::Tensor & (const at::Tensor &, const at::Scalar &, at::Tensor &);
|
| 63 |
+
using ptr_schema = schema*;
|
| 64 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 65 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::fill")
|
| 66 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar_out")
|
| 67 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "fill.Scalar_out(Tensor self, Scalar value, *, Tensor(a!) out) -> Tensor(a!)")
|
| 68 |
+
static at::Tensor & call(const at::Tensor & self, const at::Scalar & value, at::Tensor & out);
|
| 69 |
+
static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & value, at::Tensor & out);
|
| 70 |
+
};
|
| 71 |
+
|
| 72 |
+
struct TORCH_API fill_Tensor_out {
|
| 73 |
+
using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &);
|
| 74 |
+
using ptr_schema = schema*;
|
| 75 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 76 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::fill")
|
| 77 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor_out")
|
| 78 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "fill.Tensor_out(Tensor self, Tensor value, *, Tensor(a!) out) -> Tensor(a!)")
|
| 79 |
+
static at::Tensor & call(const at::Tensor & self, const at::Tensor & value, at::Tensor & out);
|
| 80 |
+
static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & value, at::Tensor & out);
|
| 81 |
+
};
|
| 82 |
+
|
| 83 |
+
}} // namespace at::_ops
|
pllava/lib/python3.10/site-packages/torch/include/ATen/ops/flatten_dense_tensors_compositeimplicitautograd_dispatch.h
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace compositeimplicitautograd {
|
| 19 |
+
|
| 20 |
+
TORCH_API at::Tensor flatten_dense_tensors(at::TensorList tensors);
|
| 21 |
+
|
| 22 |
+
} // namespace compositeimplicitautograd
|
| 23 |
+
} // namespace at
|
pllava/lib/python3.10/site-packages/torch/include/ATen/ops/from_file_cpu_dispatch.h
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace cpu {
|
| 19 |
+
|
| 20 |
+
TORCH_API at::Tensor from_file(c10::string_view filename, ::std::optional<bool> shared=::std::nullopt, ::std::optional<int64_t> size=0, at::TensorOptions options={});
|
| 21 |
+
TORCH_API at::Tensor from_file(c10::string_view filename, ::std::optional<bool> shared, ::std::optional<int64_t> size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory);
|
| 22 |
+
|
| 23 |
+
} // namespace cpu
|
| 24 |
+
} // namespace at
|
pllava/lib/python3.10/site-packages/torch/include/ATen/ops/glu.h
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Function.h
|
| 4 |
+
|
| 5 |
+
#include <ATen/Context.h>
|
| 6 |
+
#include <ATen/DeviceGuard.h>
|
| 7 |
+
#include <ATen/TensorUtils.h>
|
| 8 |
+
#include <ATen/TracerMode.h>
|
| 9 |
+
#include <ATen/core/Generator.h>
|
| 10 |
+
#include <ATen/core/Reduction.h>
|
| 11 |
+
#include <ATen/core/Tensor.h>
|
| 12 |
+
#include <c10/core/Scalar.h>
|
| 13 |
+
#include <c10/core/Storage.h>
|
| 14 |
+
#include <c10/core/TensorOptions.h>
|
| 15 |
+
#include <c10/util/Deprecated.h>
|
| 16 |
+
#include <optional>
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
#include <ATen/ops/glu_ops.h>
|
| 21 |
+
|
| 22 |
+
namespace at {
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
// aten::glu.out(Tensor self, int dim=-1, *, Tensor(a!) out) -> Tensor(a!)
|
| 26 |
+
inline at::Tensor & glu_out(at::Tensor & out, const at::Tensor & self, int64_t dim=-1) {
|
| 27 |
+
return at::_ops::glu_out::call(self, dim, out);
|
| 28 |
+
}
|
| 29 |
+
// aten::glu.out(Tensor self, int dim=-1, *, Tensor(a!) out) -> Tensor(a!)
|
| 30 |
+
inline at::Tensor & glu_outf(const at::Tensor & self, int64_t dim, at::Tensor & out) {
|
| 31 |
+
return at::_ops::glu_out::call(self, dim, out);
|
| 32 |
+
}
|
| 33 |
+
|
| 34 |
+
// aten::glu(Tensor self, int dim=-1) -> Tensor
|
| 35 |
+
inline at::Tensor glu(const at::Tensor & self, int64_t dim=-1) {
|
| 36 |
+
return at::_ops::glu::call(self, dim);
|
| 37 |
+
}
|
| 38 |
+
|
| 39 |
+
}
|
pllava/lib/python3.10/site-packages/torch/include/ATen/ops/infinitely_differentiable_gelu_backward_native.h
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from NativeFunction.h
|
| 4 |
+
|
| 5 |
+
#include <c10/core/Scalar.h>
|
| 6 |
+
#include <c10/core/Storage.h>
|
| 7 |
+
#include <c10/core/TensorOptions.h>
|
| 8 |
+
#include <c10/util/Deprecated.h>
|
| 9 |
+
#include <optional>
|
| 10 |
+
#include <c10/core/QScheme.h>
|
| 11 |
+
#include <ATen/core/Reduction.h>
|
| 12 |
+
#include <ATen/core/Tensor.h>
|
| 13 |
+
#include <tuple>
|
| 14 |
+
#include <vector>
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
namespace at {
|
| 18 |
+
namespace native {
|
| 19 |
+
TORCH_API at::Tensor infinitely_differentiable_gelu_backward(const at::Tensor & grad, const at::Tensor & self);
|
| 20 |
+
} // namespace native
|
| 21 |
+
} // namespace at
|
pllava/lib/python3.10/site-packages/torch/include/ATen/ops/is_nonzero_native.h
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from NativeFunction.h
|
| 4 |
+
|
| 5 |
+
#include <c10/core/Scalar.h>
|
| 6 |
+
#include <c10/core/Storage.h>
|
| 7 |
+
#include <c10/core/TensorOptions.h>
|
| 8 |
+
#include <c10/util/Deprecated.h>
|
| 9 |
+
#include <optional>
|
| 10 |
+
#include <c10/core/QScheme.h>
|
| 11 |
+
#include <ATen/core/Reduction.h>
|
| 12 |
+
#include <ATen/core/Tensor.h>
|
| 13 |
+
#include <tuple>
|
| 14 |
+
#include <vector>
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
namespace at {
|
| 18 |
+
namespace native {
|
| 19 |
+
TORCH_API bool is_nonzero(const at::Tensor & self);
|
| 20 |
+
} // namespace native
|
| 21 |
+
} // namespace at
|
pllava/lib/python3.10/site-packages/torch/include/ATen/ops/isneginf_meta_dispatch.h
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace meta {
|
| 19 |
+
|
| 20 |
+
TORCH_API at::Tensor isneginf(const at::Tensor & self);
|
| 21 |
+
TORCH_API at::Tensor & isneginf_out(at::Tensor & out, const at::Tensor & self);
|
| 22 |
+
TORCH_API at::Tensor & isneginf_outf(const at::Tensor & self, at::Tensor & out);
|
| 23 |
+
|
| 24 |
+
} // namespace meta
|
| 25 |
+
} // namespace at
|
pllava/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_cross_cuda_dispatch.h
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace cuda {
|
| 19 |
+
|
| 20 |
+
TORCH_API at::Tensor linalg_cross(const at::Tensor & self, const at::Tensor & other, int64_t dim=-1);
|
| 21 |
+
TORCH_API at::Tensor & linalg_cross_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other, int64_t dim=-1);
|
| 22 |
+
TORCH_API at::Tensor & linalg_cross_outf(const at::Tensor & self, const at::Tensor & other, int64_t dim, at::Tensor & out);
|
| 23 |
+
|
| 24 |
+
} // namespace cuda
|
| 25 |
+
} // namespace at
|
pllava/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_householder_product_cpu_dispatch.h
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace cpu {
|
| 19 |
+
|
| 20 |
+
TORCH_API at::Tensor linalg_householder_product(const at::Tensor & input, const at::Tensor & tau);
|
| 21 |
+
TORCH_API at::Tensor & linalg_householder_product_out(at::Tensor & out, const at::Tensor & input, const at::Tensor & tau);
|
| 22 |
+
TORCH_API at::Tensor & linalg_householder_product_outf(const at::Tensor & input, const at::Tensor & tau, at::Tensor & out);
|
| 23 |
+
|
| 24 |
+
} // namespace cpu
|
| 25 |
+
} // namespace at
|
pllava/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lstsq_compositeexplicitautograd_dispatch.h
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace compositeexplicitautograd {
|
| 19 |
+
|
| 20 |
+
TORCH_API ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> linalg_lstsq(const at::Tensor & self, const at::Tensor & b, ::std::optional<double> rcond=::std::nullopt, ::std::optional<c10::string_view> driver=::std::nullopt);
|
| 21 |
+
|
| 22 |
+
} // namespace compositeexplicitautograd
|
| 23 |
+
} // namespace at
|
pllava/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_cpu_dispatch.h
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace cpu {
|
| 19 |
+
|
| 20 |
+
TORCH_API ::std::tuple<at::Tensor,at::Tensor,at::Tensor> linalg_lu(const at::Tensor & A, bool pivot=true);
|
| 21 |
+
TORCH_API ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> linalg_lu_out(at::Tensor & P, at::Tensor & L, at::Tensor & U, const at::Tensor & A, bool pivot=true);
|
| 22 |
+
TORCH_API ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> linalg_lu_outf(const at::Tensor & A, bool pivot, at::Tensor & P, at::Tensor & L, at::Tensor & U);
|
| 23 |
+
|
| 24 |
+
} // namespace cpu
|
| 25 |
+
} // namespace at
|
pllava/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_solve_triangular_ops.h
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Operator.h
|
| 4 |
+
|
| 5 |
+
#include <tuple>
|
| 6 |
+
#include <vector>
|
| 7 |
+
|
| 8 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 9 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 10 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 11 |
+
#include <ATen/core/ATen_fwd.h>
|
| 12 |
+
|
| 13 |
+
namespace at {
|
| 14 |
+
namespace _ops {
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
struct TORCH_API linalg_solve_triangular_out {
|
| 18 |
+
using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, bool, bool, bool, at::Tensor &);
|
| 19 |
+
using ptr_schema = schema*;
|
| 20 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 21 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::linalg_solve_triangular")
|
| 22 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out")
|
| 23 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "linalg_solve_triangular.out(Tensor self, Tensor B, *, bool upper, bool left=True, bool unitriangular=False, Tensor(a!) out) -> Tensor(a!)")
|
| 24 |
+
static at::Tensor & call(const at::Tensor & self, const at::Tensor & B, bool upper, bool left, bool unitriangular, at::Tensor & out);
|
| 25 |
+
static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & B, bool upper, bool left, bool unitriangular, at::Tensor & out);
|
| 26 |
+
};
|
| 27 |
+
|
| 28 |
+
struct TORCH_API linalg_solve_triangular {
|
| 29 |
+
using schema = at::Tensor (const at::Tensor &, const at::Tensor &, bool, bool, bool);
|
| 30 |
+
using ptr_schema = schema*;
|
| 31 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 32 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::linalg_solve_triangular")
|
| 33 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
|
| 34 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "linalg_solve_triangular(Tensor self, Tensor B, *, bool upper, bool left=True, bool unitriangular=False) -> Tensor")
|
| 35 |
+
static at::Tensor call(const at::Tensor & self, const at::Tensor & B, bool upper, bool left, bool unitriangular);
|
| 36 |
+
static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & B, bool upper, bool left, bool unitriangular);
|
| 37 |
+
};
|
| 38 |
+
|
| 39 |
+
}} // namespace at::_ops
|