Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- llava_next/lib/python3.10/site-packages/importlib_resources-6.4.5.dist-info/REQUESTED +0 -0
- llava_next/lib/python3.10/site-packages/more_itertools/__init__.py +6 -0
- llava_next/lib/python3.10/site-packages/more_itertools/__init__.pyi +2 -0
- llava_next/lib/python3.10/site-packages/more_itertools/__pycache__/__init__.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/more_itertools/more.py +0 -0
- llava_next/lib/python3.10/site-packages/more_itertools/more.pyi +709 -0
- llava_next/lib/python3.10/site-packages/more_itertools/py.typed +0 -0
- llava_next/lib/python3.10/site-packages/more_itertools/recipes.pyi +136 -0
- llava_next/lib/python3.10/site-packages/torchgen/executorch/__init__.py +0 -0
- llava_next/lib/python3.10/site-packages/torchgen/executorch/__pycache__/__init__.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torchgen/executorch/__pycache__/model.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torchgen/executorch/__pycache__/parse.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torchgen/executorch/api/__init__.py +0 -0
- llava_next/lib/python3.10/site-packages/torchgen/executorch/api/__pycache__/__init__.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torchgen/executorch/api/__pycache__/custom_ops.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torchgen/executorch/api/__pycache__/et_cpp.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torchgen/executorch/api/__pycache__/unboxing.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torchgen/executorch/api/custom_ops.py +131 -0
- llava_next/lib/python3.10/site-packages/torchgen/executorch/api/et_cpp.py +368 -0
- llava_next/lib/python3.10/site-packages/torchgen/executorch/api/types/__init__.py +2 -0
- llava_next/lib/python3.10/site-packages/torchgen/executorch/api/types/__pycache__/__init__.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torchgen/executorch/api/types/__pycache__/signatures.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torchgen/executorch/api/types/__pycache__/types.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torchgen/executorch/api/types/signatures.py +73 -0
- llava_next/lib/python3.10/site-packages/torchgen/executorch/api/types/types.py +81 -0
- llava_next/lib/python3.10/site-packages/torchgen/executorch/api/unboxing.py +213 -0
- llava_next/lib/python3.10/site-packages/torchgen/gen_executorch.py +943 -0
- llava_next/lib/python3.10/site-packages/torchgen/gen_lazy_tensor.py +605 -0
- llava_next/lib/python3.10/site-packages/torchgen/gen_vmap_plumbing.py +265 -0
- llava_next/lib/python3.10/site-packages/torchgen/model.py +0 -0
- llava_next/lib/python3.10/site-packages/torchgen/operator_versions/gen_mobile_upgraders.py +392 -0
- llava_next/lib/python3.10/site-packages/torchgen/packaged/autograd/__init__.py +0 -0
- llava_next/lib/python3.10/site-packages/torchgen/static_runtime/__init__.py +0 -0
- llava_next/lib/python3.10/site-packages/torchgen/static_runtime/__pycache__/__init__.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torchgen/static_runtime/__pycache__/config.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torchgen/static_runtime/__pycache__/gen_static_runtime_ops.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torchgen/static_runtime/__pycache__/generator.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torchgen/static_runtime/config.py +388 -0
- llava_next/lib/python3.10/site-packages/torchgen/static_runtime/gen_static_runtime_ops.py +228 -0
- llava_next/lib/python3.10/site-packages/torchgen/static_runtime/generator.py +796 -0
- vlmpy310/lib/python3.10/site-packages/jupyter_server/services/__pycache__/__init__.cpython-310.pyc +0 -0
- vlmpy310/lib/python3.10/site-packages/jupyter_server/services/api/__init__.py +0 -0
- vlmpy310/lib/python3.10/site-packages/jupyter_server/services/api/__pycache__/__init__.cpython-310.pyc +0 -0
- vlmpy310/lib/python3.10/site-packages/jupyter_server/services/api/__pycache__/handlers.cpython-310.pyc +0 -0
- vlmpy310/lib/python3.10/site-packages/jupyter_server/services/api/api.yaml +975 -0
- vlmpy310/lib/python3.10/site-packages/jupyter_server/services/api/handlers.py +121 -0
- vlmpy310/lib/python3.10/site-packages/jupyter_server/services/events/__pycache__/__init__.cpython-310.pyc +0 -0
- vlmpy310/lib/python3.10/site-packages/jupyter_server/services/events/handlers.py +144 -0
- vlmpy310/lib/python3.10/site-packages/jupyter_server/services/kernels/__pycache__/__init__.cpython-310.pyc +0 -0
- vlmpy310/lib/python3.10/site-packages/jupyter_server/services/kernels/__pycache__/handlers.cpython-310.pyc +0 -0
llava_next/lib/python3.10/site-packages/importlib_resources-6.4.5.dist-info/REQUESTED
ADDED
|
File without changes
|
llava_next/lib/python3.10/site-packages/more_itertools/__init__.py
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""More routines for operating on iterables, beyond itertools"""
|
| 2 |
+
|
| 3 |
+
from .more import * # noqa
|
| 4 |
+
from .recipes import * # noqa
|
| 5 |
+
|
| 6 |
+
__version__ = '10.3.0'
|
llava_next/lib/python3.10/site-packages/more_itertools/__init__.pyi
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .more import *
|
| 2 |
+
from .recipes import *
|
llava_next/lib/python3.10/site-packages/more_itertools/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (312 Bytes). View file
|
|
|
llava_next/lib/python3.10/site-packages/more_itertools/more.py
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
llava_next/lib/python3.10/site-packages/more_itertools/more.pyi
ADDED
|
@@ -0,0 +1,709 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Stubs for more_itertools.more"""
|
| 2 |
+
|
| 3 |
+
from __future__ import annotations
|
| 4 |
+
|
| 5 |
+
from types import TracebackType
|
| 6 |
+
from typing import (
|
| 7 |
+
Any,
|
| 8 |
+
Callable,
|
| 9 |
+
Container,
|
| 10 |
+
ContextManager,
|
| 11 |
+
Generic,
|
| 12 |
+
Hashable,
|
| 13 |
+
Mapping,
|
| 14 |
+
Iterable,
|
| 15 |
+
Iterator,
|
| 16 |
+
Mapping,
|
| 17 |
+
overload,
|
| 18 |
+
Reversible,
|
| 19 |
+
Sequence,
|
| 20 |
+
Sized,
|
| 21 |
+
Type,
|
| 22 |
+
TypeVar,
|
| 23 |
+
type_check_only,
|
| 24 |
+
)
|
| 25 |
+
from typing_extensions import Protocol
|
| 26 |
+
|
| 27 |
+
# Type and type variable definitions
|
| 28 |
+
_T = TypeVar('_T')
|
| 29 |
+
_T1 = TypeVar('_T1')
|
| 30 |
+
_T2 = TypeVar('_T2')
|
| 31 |
+
_U = TypeVar('_U')
|
| 32 |
+
_V = TypeVar('_V')
|
| 33 |
+
_W = TypeVar('_W')
|
| 34 |
+
_T_co = TypeVar('_T_co', covariant=True)
|
| 35 |
+
_GenFn = TypeVar('_GenFn', bound=Callable[..., Iterator[Any]])
|
| 36 |
+
_Raisable = BaseException | Type[BaseException]
|
| 37 |
+
|
| 38 |
+
@type_check_only
|
| 39 |
+
class _SizedIterable(Protocol[_T_co], Sized, Iterable[_T_co]): ...
|
| 40 |
+
|
| 41 |
+
@type_check_only
|
| 42 |
+
class _SizedReversible(Protocol[_T_co], Sized, Reversible[_T_co]): ...
|
| 43 |
+
|
| 44 |
+
@type_check_only
|
| 45 |
+
class _SupportsSlicing(Protocol[_T_co]):
|
| 46 |
+
def __getitem__(self, __k: slice) -> _T_co: ...
|
| 47 |
+
|
| 48 |
+
def chunked(
|
| 49 |
+
iterable: Iterable[_T], n: int | None, strict: bool = ...
|
| 50 |
+
) -> Iterator[list[_T]]: ...
|
| 51 |
+
@overload
|
| 52 |
+
def first(iterable: Iterable[_T]) -> _T: ...
|
| 53 |
+
@overload
|
| 54 |
+
def first(iterable: Iterable[_T], default: _U) -> _T | _U: ...
|
| 55 |
+
@overload
|
| 56 |
+
def last(iterable: Iterable[_T]) -> _T: ...
|
| 57 |
+
@overload
|
| 58 |
+
def last(iterable: Iterable[_T], default: _U) -> _T | _U: ...
|
| 59 |
+
@overload
|
| 60 |
+
def nth_or_last(iterable: Iterable[_T], n: int) -> _T: ...
|
| 61 |
+
@overload
|
| 62 |
+
def nth_or_last(iterable: Iterable[_T], n: int, default: _U) -> _T | _U: ...
|
| 63 |
+
|
| 64 |
+
class peekable(Generic[_T], Iterator[_T]):
|
| 65 |
+
def __init__(self, iterable: Iterable[_T]) -> None: ...
|
| 66 |
+
def __iter__(self) -> peekable[_T]: ...
|
| 67 |
+
def __bool__(self) -> bool: ...
|
| 68 |
+
@overload
|
| 69 |
+
def peek(self) -> _T: ...
|
| 70 |
+
@overload
|
| 71 |
+
def peek(self, default: _U) -> _T | _U: ...
|
| 72 |
+
def prepend(self, *items: _T) -> None: ...
|
| 73 |
+
def __next__(self) -> _T: ...
|
| 74 |
+
@overload
|
| 75 |
+
def __getitem__(self, index: int) -> _T: ...
|
| 76 |
+
@overload
|
| 77 |
+
def __getitem__(self, index: slice) -> list[_T]: ...
|
| 78 |
+
|
| 79 |
+
def consumer(func: _GenFn) -> _GenFn: ...
|
| 80 |
+
def ilen(iterable: Iterable[_T]) -> int: ...
|
| 81 |
+
def iterate(func: Callable[[_T], _T], start: _T) -> Iterator[_T]: ...
|
| 82 |
+
def with_iter(
|
| 83 |
+
context_manager: ContextManager[Iterable[_T]],
|
| 84 |
+
) -> Iterator[_T]: ...
|
| 85 |
+
def one(
|
| 86 |
+
iterable: Iterable[_T],
|
| 87 |
+
too_short: _Raisable | None = ...,
|
| 88 |
+
too_long: _Raisable | None = ...,
|
| 89 |
+
) -> _T: ...
|
| 90 |
+
def raise_(exception: _Raisable, *args: Any) -> None: ...
|
| 91 |
+
def strictly_n(
|
| 92 |
+
iterable: Iterable[_T],
|
| 93 |
+
n: int,
|
| 94 |
+
too_short: _GenFn | None = ...,
|
| 95 |
+
too_long: _GenFn | None = ...,
|
| 96 |
+
) -> list[_T]: ...
|
| 97 |
+
def distinct_permutations(
|
| 98 |
+
iterable: Iterable[_T], r: int | None = ...
|
| 99 |
+
) -> Iterator[tuple[_T, ...]]: ...
|
| 100 |
+
def intersperse(
|
| 101 |
+
e: _U, iterable: Iterable[_T], n: int = ...
|
| 102 |
+
) -> Iterator[_T | _U]: ...
|
| 103 |
+
def unique_to_each(*iterables: Iterable[_T]) -> list[list[_T]]: ...
|
| 104 |
+
@overload
|
| 105 |
+
def windowed(
|
| 106 |
+
seq: Iterable[_T], n: int, *, step: int = ...
|
| 107 |
+
) -> Iterator[tuple[_T | None, ...]]: ...
|
| 108 |
+
@overload
|
| 109 |
+
def windowed(
|
| 110 |
+
seq: Iterable[_T], n: int, fillvalue: _U, step: int = ...
|
| 111 |
+
) -> Iterator[tuple[_T | _U, ...]]: ...
|
| 112 |
+
def substrings(iterable: Iterable[_T]) -> Iterator[tuple[_T, ...]]: ...
|
| 113 |
+
def substrings_indexes(
|
| 114 |
+
seq: Sequence[_T], reverse: bool = ...
|
| 115 |
+
) -> Iterator[tuple[Sequence[_T], int, int]]: ...
|
| 116 |
+
|
| 117 |
+
class bucket(Generic[_T, _U], Container[_U]):
|
| 118 |
+
def __init__(
|
| 119 |
+
self,
|
| 120 |
+
iterable: Iterable[_T],
|
| 121 |
+
key: Callable[[_T], _U],
|
| 122 |
+
validator: Callable[[_U], object] | None = ...,
|
| 123 |
+
) -> None: ...
|
| 124 |
+
def __contains__(self, value: object) -> bool: ...
|
| 125 |
+
def __iter__(self) -> Iterator[_U]: ...
|
| 126 |
+
def __getitem__(self, value: object) -> Iterator[_T]: ...
|
| 127 |
+
|
| 128 |
+
def spy(
|
| 129 |
+
iterable: Iterable[_T], n: int = ...
|
| 130 |
+
) -> tuple[list[_T], Iterator[_T]]: ...
|
| 131 |
+
def interleave(*iterables: Iterable[_T]) -> Iterator[_T]: ...
|
| 132 |
+
def interleave_longest(*iterables: Iterable[_T]) -> Iterator[_T]: ...
|
| 133 |
+
def interleave_evenly(
|
| 134 |
+
iterables: list[Iterable[_T]], lengths: list[int] | None = ...
|
| 135 |
+
) -> Iterator[_T]: ...
|
| 136 |
+
def collapse(
|
| 137 |
+
iterable: Iterable[Any],
|
| 138 |
+
base_type: type | None = ...,
|
| 139 |
+
levels: int | None = ...,
|
| 140 |
+
) -> Iterator[Any]: ...
|
| 141 |
+
@overload
|
| 142 |
+
def side_effect(
|
| 143 |
+
func: Callable[[_T], object],
|
| 144 |
+
iterable: Iterable[_T],
|
| 145 |
+
chunk_size: None = ...,
|
| 146 |
+
before: Callable[[], object] | None = ...,
|
| 147 |
+
after: Callable[[], object] | None = ...,
|
| 148 |
+
) -> Iterator[_T]: ...
|
| 149 |
+
@overload
|
| 150 |
+
def side_effect(
|
| 151 |
+
func: Callable[[list[_T]], object],
|
| 152 |
+
iterable: Iterable[_T],
|
| 153 |
+
chunk_size: int,
|
| 154 |
+
before: Callable[[], object] | None = ...,
|
| 155 |
+
after: Callable[[], object] | None = ...,
|
| 156 |
+
) -> Iterator[_T]: ...
|
| 157 |
+
def sliced(
|
| 158 |
+
seq: _SupportsSlicing[_T], n: int, strict: bool = ...
|
| 159 |
+
) -> Iterator[_T]: ...
|
| 160 |
+
def split_at(
|
| 161 |
+
iterable: Iterable[_T],
|
| 162 |
+
pred: Callable[[_T], object],
|
| 163 |
+
maxsplit: int = ...,
|
| 164 |
+
keep_separator: bool = ...,
|
| 165 |
+
) -> Iterator[list[_T]]: ...
|
| 166 |
+
def split_before(
|
| 167 |
+
iterable: Iterable[_T], pred: Callable[[_T], object], maxsplit: int = ...
|
| 168 |
+
) -> Iterator[list[_T]]: ...
|
| 169 |
+
def split_after(
|
| 170 |
+
iterable: Iterable[_T], pred: Callable[[_T], object], maxsplit: int = ...
|
| 171 |
+
) -> Iterator[list[_T]]: ...
|
| 172 |
+
def split_when(
|
| 173 |
+
iterable: Iterable[_T],
|
| 174 |
+
pred: Callable[[_T, _T], object],
|
| 175 |
+
maxsplit: int = ...,
|
| 176 |
+
) -> Iterator[list[_T]]: ...
|
| 177 |
+
def split_into(
|
| 178 |
+
iterable: Iterable[_T], sizes: Iterable[int | None]
|
| 179 |
+
) -> Iterator[list[_T]]: ...
|
| 180 |
+
@overload
|
| 181 |
+
def padded(
|
| 182 |
+
iterable: Iterable[_T],
|
| 183 |
+
*,
|
| 184 |
+
n: int | None = ...,
|
| 185 |
+
next_multiple: bool = ...,
|
| 186 |
+
) -> Iterator[_T | None]: ...
|
| 187 |
+
@overload
|
| 188 |
+
def padded(
|
| 189 |
+
iterable: Iterable[_T],
|
| 190 |
+
fillvalue: _U,
|
| 191 |
+
n: int | None = ...,
|
| 192 |
+
next_multiple: bool = ...,
|
| 193 |
+
) -> Iterator[_T | _U]: ...
|
| 194 |
+
@overload
|
| 195 |
+
def repeat_last(iterable: Iterable[_T]) -> Iterator[_T]: ...
|
| 196 |
+
@overload
|
| 197 |
+
def repeat_last(iterable: Iterable[_T], default: _U) -> Iterator[_T | _U]: ...
|
| 198 |
+
def distribute(n: int, iterable: Iterable[_T]) -> list[Iterator[_T]]: ...
|
| 199 |
+
@overload
|
| 200 |
+
def stagger(
|
| 201 |
+
iterable: Iterable[_T],
|
| 202 |
+
offsets: _SizedIterable[int] = ...,
|
| 203 |
+
longest: bool = ...,
|
| 204 |
+
) -> Iterator[tuple[_T | None, ...]]: ...
|
| 205 |
+
@overload
|
| 206 |
+
def stagger(
|
| 207 |
+
iterable: Iterable[_T],
|
| 208 |
+
offsets: _SizedIterable[int] = ...,
|
| 209 |
+
longest: bool = ...,
|
| 210 |
+
fillvalue: _U = ...,
|
| 211 |
+
) -> Iterator[tuple[_T | _U, ...]]: ...
|
| 212 |
+
|
| 213 |
+
class UnequalIterablesError(ValueError):
|
| 214 |
+
def __init__(self, details: tuple[int, int, int] | None = ...) -> None: ...
|
| 215 |
+
|
| 216 |
+
@overload
|
| 217 |
+
def zip_equal(__iter1: Iterable[_T1]) -> Iterator[tuple[_T1]]: ...
|
| 218 |
+
@overload
|
| 219 |
+
def zip_equal(
|
| 220 |
+
__iter1: Iterable[_T1], __iter2: Iterable[_T2]
|
| 221 |
+
) -> Iterator[tuple[_T1, _T2]]: ...
|
| 222 |
+
@overload
|
| 223 |
+
def zip_equal(
|
| 224 |
+
__iter1: Iterable[_T],
|
| 225 |
+
__iter2: Iterable[_T],
|
| 226 |
+
__iter3: Iterable[_T],
|
| 227 |
+
*iterables: Iterable[_T],
|
| 228 |
+
) -> Iterator[tuple[_T, ...]]: ...
|
| 229 |
+
@overload
|
| 230 |
+
def zip_offset(
|
| 231 |
+
__iter1: Iterable[_T1],
|
| 232 |
+
*,
|
| 233 |
+
offsets: _SizedIterable[int],
|
| 234 |
+
longest: bool = ...,
|
| 235 |
+
fillvalue: None = None,
|
| 236 |
+
) -> Iterator[tuple[_T1 | None]]: ...
|
| 237 |
+
@overload
|
| 238 |
+
def zip_offset(
|
| 239 |
+
__iter1: Iterable[_T1],
|
| 240 |
+
__iter2: Iterable[_T2],
|
| 241 |
+
*,
|
| 242 |
+
offsets: _SizedIterable[int],
|
| 243 |
+
longest: bool = ...,
|
| 244 |
+
fillvalue: None = None,
|
| 245 |
+
) -> Iterator[tuple[_T1 | None, _T2 | None]]: ...
|
| 246 |
+
@overload
|
| 247 |
+
def zip_offset(
|
| 248 |
+
__iter1: Iterable[_T],
|
| 249 |
+
__iter2: Iterable[_T],
|
| 250 |
+
__iter3: Iterable[_T],
|
| 251 |
+
*iterables: Iterable[_T],
|
| 252 |
+
offsets: _SizedIterable[int],
|
| 253 |
+
longest: bool = ...,
|
| 254 |
+
fillvalue: None = None,
|
| 255 |
+
) -> Iterator[tuple[_T | None, ...]]: ...
|
| 256 |
+
@overload
|
| 257 |
+
def zip_offset(
|
| 258 |
+
__iter1: Iterable[_T1],
|
| 259 |
+
*,
|
| 260 |
+
offsets: _SizedIterable[int],
|
| 261 |
+
longest: bool = ...,
|
| 262 |
+
fillvalue: _U,
|
| 263 |
+
) -> Iterator[tuple[_T1 | _U]]: ...
|
| 264 |
+
@overload
|
| 265 |
+
def zip_offset(
|
| 266 |
+
__iter1: Iterable[_T1],
|
| 267 |
+
__iter2: Iterable[_T2],
|
| 268 |
+
*,
|
| 269 |
+
offsets: _SizedIterable[int],
|
| 270 |
+
longest: bool = ...,
|
| 271 |
+
fillvalue: _U,
|
| 272 |
+
) -> Iterator[tuple[_T1 | _U, _T2 | _U]]: ...
|
| 273 |
+
@overload
|
| 274 |
+
def zip_offset(
|
| 275 |
+
__iter1: Iterable[_T],
|
| 276 |
+
__iter2: Iterable[_T],
|
| 277 |
+
__iter3: Iterable[_T],
|
| 278 |
+
*iterables: Iterable[_T],
|
| 279 |
+
offsets: _SizedIterable[int],
|
| 280 |
+
longest: bool = ...,
|
| 281 |
+
fillvalue: _U,
|
| 282 |
+
) -> Iterator[tuple[_T | _U, ...]]: ...
|
| 283 |
+
def sort_together(
|
| 284 |
+
iterables: Iterable[Iterable[_T]],
|
| 285 |
+
key_list: Iterable[int] = ...,
|
| 286 |
+
key: Callable[..., Any] | None = ...,
|
| 287 |
+
reverse: bool = ...,
|
| 288 |
+
) -> list[tuple[_T, ...]]: ...
|
| 289 |
+
def unzip(iterable: Iterable[Sequence[_T]]) -> tuple[Iterator[_T], ...]: ...
|
| 290 |
+
def divide(n: int, iterable: Iterable[_T]) -> list[Iterator[_T]]: ...
|
| 291 |
+
def always_iterable(
|
| 292 |
+
obj: object,
|
| 293 |
+
base_type: type | tuple[type | tuple[Any, ...], ...] | None = ...,
|
| 294 |
+
) -> Iterator[Any]: ...
|
| 295 |
+
def adjacent(
|
| 296 |
+
predicate: Callable[[_T], bool],
|
| 297 |
+
iterable: Iterable[_T],
|
| 298 |
+
distance: int = ...,
|
| 299 |
+
) -> Iterator[tuple[bool, _T]]: ...
|
| 300 |
+
@overload
|
| 301 |
+
def groupby_transform(
|
| 302 |
+
iterable: Iterable[_T],
|
| 303 |
+
keyfunc: None = None,
|
| 304 |
+
valuefunc: None = None,
|
| 305 |
+
reducefunc: None = None,
|
| 306 |
+
) -> Iterator[tuple[_T, Iterator[_T]]]: ...
|
| 307 |
+
@overload
|
| 308 |
+
def groupby_transform(
|
| 309 |
+
iterable: Iterable[_T],
|
| 310 |
+
keyfunc: Callable[[_T], _U],
|
| 311 |
+
valuefunc: None,
|
| 312 |
+
reducefunc: None,
|
| 313 |
+
) -> Iterator[tuple[_U, Iterator[_T]]]: ...
|
| 314 |
+
@overload
|
| 315 |
+
def groupby_transform(
|
| 316 |
+
iterable: Iterable[_T],
|
| 317 |
+
keyfunc: None,
|
| 318 |
+
valuefunc: Callable[[_T], _V],
|
| 319 |
+
reducefunc: None,
|
| 320 |
+
) -> Iterable[tuple[_T, Iterable[_V]]]: ...
|
| 321 |
+
@overload
|
| 322 |
+
def groupby_transform(
|
| 323 |
+
iterable: Iterable[_T],
|
| 324 |
+
keyfunc: Callable[[_T], _U],
|
| 325 |
+
valuefunc: Callable[[_T], _V],
|
| 326 |
+
reducefunc: None,
|
| 327 |
+
) -> Iterable[tuple[_U, Iterator[_V]]]: ...
|
| 328 |
+
@overload
|
| 329 |
+
def groupby_transform(
|
| 330 |
+
iterable: Iterable[_T],
|
| 331 |
+
keyfunc: None,
|
| 332 |
+
valuefunc: None,
|
| 333 |
+
reducefunc: Callable[[Iterator[_T]], _W],
|
| 334 |
+
) -> Iterable[tuple[_T, _W]]: ...
|
| 335 |
+
@overload
|
| 336 |
+
def groupby_transform(
|
| 337 |
+
iterable: Iterable[_T],
|
| 338 |
+
keyfunc: Callable[[_T], _U],
|
| 339 |
+
valuefunc: None,
|
| 340 |
+
reducefunc: Callable[[Iterator[_T]], _W],
|
| 341 |
+
) -> Iterable[tuple[_U, _W]]: ...
|
| 342 |
+
@overload
|
| 343 |
+
def groupby_transform(
|
| 344 |
+
iterable: Iterable[_T],
|
| 345 |
+
keyfunc: None,
|
| 346 |
+
valuefunc: Callable[[_T], _V],
|
| 347 |
+
reducefunc: Callable[[Iterable[_V]], _W],
|
| 348 |
+
) -> Iterable[tuple[_T, _W]]: ...
|
| 349 |
+
@overload
|
| 350 |
+
def groupby_transform(
|
| 351 |
+
iterable: Iterable[_T],
|
| 352 |
+
keyfunc: Callable[[_T], _U],
|
| 353 |
+
valuefunc: Callable[[_T], _V],
|
| 354 |
+
reducefunc: Callable[[Iterable[_V]], _W],
|
| 355 |
+
) -> Iterable[tuple[_U, _W]]: ...
|
| 356 |
+
|
| 357 |
+
class numeric_range(Generic[_T, _U], Sequence[_T], Hashable, Reversible[_T]):
|
| 358 |
+
@overload
|
| 359 |
+
def __init__(self, __stop: _T) -> None: ...
|
| 360 |
+
@overload
|
| 361 |
+
def __init__(self, __start: _T, __stop: _T) -> None: ...
|
| 362 |
+
@overload
|
| 363 |
+
def __init__(self, __start: _T, __stop: _T, __step: _U) -> None: ...
|
| 364 |
+
def __bool__(self) -> bool: ...
|
| 365 |
+
def __contains__(self, elem: object) -> bool: ...
|
| 366 |
+
def __eq__(self, other: object) -> bool: ...
|
| 367 |
+
@overload
|
| 368 |
+
def __getitem__(self, key: int) -> _T: ...
|
| 369 |
+
@overload
|
| 370 |
+
def __getitem__(self, key: slice) -> numeric_range[_T, _U]: ...
|
| 371 |
+
def __hash__(self) -> int: ...
|
| 372 |
+
def __iter__(self) -> Iterator[_T]: ...
|
| 373 |
+
def __len__(self) -> int: ...
|
| 374 |
+
def __reduce__(
|
| 375 |
+
self,
|
| 376 |
+
) -> tuple[Type[numeric_range[_T, _U]], tuple[_T, _T, _U]]: ...
|
| 377 |
+
def __repr__(self) -> str: ...
|
| 378 |
+
def __reversed__(self) -> Iterator[_T]: ...
|
| 379 |
+
def count(self, value: _T) -> int: ...
|
| 380 |
+
def index(self, value: _T) -> int: ... # type: ignore
|
| 381 |
+
|
| 382 |
+
def count_cycle(
|
| 383 |
+
iterable: Iterable[_T], n: int | None = ...
|
| 384 |
+
) -> Iterable[tuple[int, _T]]: ...
|
| 385 |
+
def mark_ends(
|
| 386 |
+
iterable: Iterable[_T],
|
| 387 |
+
) -> Iterable[tuple[bool, bool, _T]]: ...
|
| 388 |
+
def locate(
|
| 389 |
+
iterable: Iterable[_T],
|
| 390 |
+
pred: Callable[..., Any] = ...,
|
| 391 |
+
window_size: int | None = ...,
|
| 392 |
+
) -> Iterator[int]: ...
|
| 393 |
+
def lstrip(
|
| 394 |
+
iterable: Iterable[_T], pred: Callable[[_T], object]
|
| 395 |
+
) -> Iterator[_T]: ...
|
| 396 |
+
def rstrip(
|
| 397 |
+
iterable: Iterable[_T], pred: Callable[[_T], object]
|
| 398 |
+
) -> Iterator[_T]: ...
|
| 399 |
+
def strip(
|
| 400 |
+
iterable: Iterable[_T], pred: Callable[[_T], object]
|
| 401 |
+
) -> Iterator[_T]: ...
|
| 402 |
+
|
| 403 |
+
class islice_extended(Generic[_T], Iterator[_T]):
|
| 404 |
+
def __init__(self, iterable: Iterable[_T], *args: int | None) -> None: ...
|
| 405 |
+
def __iter__(self) -> islice_extended[_T]: ...
|
| 406 |
+
def __next__(self) -> _T: ...
|
| 407 |
+
def __getitem__(self, index: slice) -> islice_extended[_T]: ...
|
| 408 |
+
|
| 409 |
+
def always_reversible(iterable: Iterable[_T]) -> Iterator[_T]: ...
|
| 410 |
+
def consecutive_groups(
|
| 411 |
+
iterable: Iterable[_T], ordering: Callable[[_T], int] = ...
|
| 412 |
+
) -> Iterator[Iterator[_T]]: ...
|
| 413 |
+
@overload
|
| 414 |
+
def difference(
|
| 415 |
+
iterable: Iterable[_T],
|
| 416 |
+
func: Callable[[_T, _T], _U] = ...,
|
| 417 |
+
*,
|
| 418 |
+
initial: None = ...,
|
| 419 |
+
) -> Iterator[_T | _U]: ...
|
| 420 |
+
@overload
|
| 421 |
+
def difference(
|
| 422 |
+
iterable: Iterable[_T], func: Callable[[_T, _T], _U] = ..., *, initial: _U
|
| 423 |
+
) -> Iterator[_U]: ...
|
| 424 |
+
|
| 425 |
+
class SequenceView(Generic[_T], Sequence[_T]):
|
| 426 |
+
def __init__(self, target: Sequence[_T]) -> None: ...
|
| 427 |
+
@overload
|
| 428 |
+
def __getitem__(self, index: int) -> _T: ...
|
| 429 |
+
@overload
|
| 430 |
+
def __getitem__(self, index: slice) -> Sequence[_T]: ...
|
| 431 |
+
def __len__(self) -> int: ...
|
| 432 |
+
|
| 433 |
+
class seekable(Generic[_T], Iterator[_T]):
|
| 434 |
+
def __init__(
|
| 435 |
+
self, iterable: Iterable[_T], maxlen: int | None = ...
|
| 436 |
+
) -> None: ...
|
| 437 |
+
def __iter__(self) -> seekable[_T]: ...
|
| 438 |
+
def __next__(self) -> _T: ...
|
| 439 |
+
def __bool__(self) -> bool: ...
|
| 440 |
+
@overload
|
| 441 |
+
def peek(self) -> _T: ...
|
| 442 |
+
@overload
|
| 443 |
+
def peek(self, default: _U) -> _T | _U: ...
|
| 444 |
+
def elements(self) -> SequenceView[_T]: ...
|
| 445 |
+
def seek(self, index: int) -> None: ...
|
| 446 |
+
def relative_seek(self, count: int) -> None: ...
|
| 447 |
+
|
| 448 |
+
class run_length:
|
| 449 |
+
@staticmethod
|
| 450 |
+
def encode(iterable: Iterable[_T]) -> Iterator[tuple[_T, int]]: ...
|
| 451 |
+
@staticmethod
|
| 452 |
+
def decode(iterable: Iterable[tuple[_T, int]]) -> Iterator[_T]: ...
|
| 453 |
+
|
| 454 |
+
def exactly_n(
|
| 455 |
+
iterable: Iterable[_T], n: int, predicate: Callable[[_T], object] = ...
|
| 456 |
+
) -> bool: ...
|
| 457 |
+
def circular_shifts(iterable: Iterable[_T]) -> list[tuple[_T, ...]]: ...
|
| 458 |
+
def make_decorator(
|
| 459 |
+
wrapping_func: Callable[..., _U], result_index: int = ...
|
| 460 |
+
) -> Callable[..., Callable[[Callable[..., Any]], Callable[..., _U]]]: ...
|
| 461 |
+
@overload
|
| 462 |
+
def map_reduce(
|
| 463 |
+
iterable: Iterable[_T],
|
| 464 |
+
keyfunc: Callable[[_T], _U],
|
| 465 |
+
valuefunc: None = ...,
|
| 466 |
+
reducefunc: None = ...,
|
| 467 |
+
) -> dict[_U, list[_T]]: ...
|
| 468 |
+
@overload
|
| 469 |
+
def map_reduce(
|
| 470 |
+
iterable: Iterable[_T],
|
| 471 |
+
keyfunc: Callable[[_T], _U],
|
| 472 |
+
valuefunc: Callable[[_T], _V],
|
| 473 |
+
reducefunc: None = ...,
|
| 474 |
+
) -> dict[_U, list[_V]]: ...
|
| 475 |
+
@overload
|
| 476 |
+
def map_reduce(
|
| 477 |
+
iterable: Iterable[_T],
|
| 478 |
+
keyfunc: Callable[[_T], _U],
|
| 479 |
+
valuefunc: None = ...,
|
| 480 |
+
reducefunc: Callable[[list[_T]], _W] = ...,
|
| 481 |
+
) -> dict[_U, _W]: ...
|
| 482 |
+
@overload
|
| 483 |
+
def map_reduce(
|
| 484 |
+
iterable: Iterable[_T],
|
| 485 |
+
keyfunc: Callable[[_T], _U],
|
| 486 |
+
valuefunc: Callable[[_T], _V],
|
| 487 |
+
reducefunc: Callable[[list[_V]], _W],
|
| 488 |
+
) -> dict[_U, _W]: ...
|
| 489 |
+
def rlocate(
|
| 490 |
+
iterable: Iterable[_T],
|
| 491 |
+
pred: Callable[..., object] = ...,
|
| 492 |
+
window_size: int | None = ...,
|
| 493 |
+
) -> Iterator[int]: ...
|
| 494 |
+
def replace(
|
| 495 |
+
iterable: Iterable[_T],
|
| 496 |
+
pred: Callable[..., object],
|
| 497 |
+
substitutes: Iterable[_U],
|
| 498 |
+
count: int | None = ...,
|
| 499 |
+
window_size: int = ...,
|
| 500 |
+
) -> Iterator[_T | _U]: ...
|
| 501 |
+
def partitions(iterable: Iterable[_T]) -> Iterator[list[list[_T]]]: ...
|
| 502 |
+
def set_partitions(
|
| 503 |
+
iterable: Iterable[_T], k: int | None = ...
|
| 504 |
+
) -> Iterator[list[list[_T]]]: ...
|
| 505 |
+
|
| 506 |
+
class time_limited(Generic[_T], Iterator[_T]):
|
| 507 |
+
def __init__(
|
| 508 |
+
self, limit_seconds: float, iterable: Iterable[_T]
|
| 509 |
+
) -> None: ...
|
| 510 |
+
def __iter__(self) -> islice_extended[_T]: ...
|
| 511 |
+
def __next__(self) -> _T: ...
|
| 512 |
+
|
| 513 |
+
@overload
|
| 514 |
+
def only(
|
| 515 |
+
iterable: Iterable[_T], *, too_long: _Raisable | None = ...
|
| 516 |
+
) -> _T | None: ...
|
| 517 |
+
@overload
|
| 518 |
+
def only(
|
| 519 |
+
iterable: Iterable[_T], default: _U, too_long: _Raisable | None = ...
|
| 520 |
+
) -> _T | _U: ...
|
| 521 |
+
def ichunked(iterable: Iterable[_T], n: int) -> Iterator[Iterator[_T]]: ...
|
| 522 |
+
def distinct_combinations(
|
| 523 |
+
iterable: Iterable[_T], r: int
|
| 524 |
+
) -> Iterator[tuple[_T, ...]]: ...
|
| 525 |
+
def filter_except(
|
| 526 |
+
validator: Callable[[Any], object],
|
| 527 |
+
iterable: Iterable[_T],
|
| 528 |
+
*exceptions: Type[BaseException],
|
| 529 |
+
) -> Iterator[_T]: ...
|
| 530 |
+
def map_except(
|
| 531 |
+
function: Callable[[Any], _U],
|
| 532 |
+
iterable: Iterable[_T],
|
| 533 |
+
*exceptions: Type[BaseException],
|
| 534 |
+
) -> Iterator[_U]: ...
|
| 535 |
+
def map_if(
|
| 536 |
+
iterable: Iterable[Any],
|
| 537 |
+
pred: Callable[[Any], bool],
|
| 538 |
+
func: Callable[[Any], Any],
|
| 539 |
+
func_else: Callable[[Any], Any] | None = ...,
|
| 540 |
+
) -> Iterator[Any]: ...
|
| 541 |
+
def sample(
|
| 542 |
+
iterable: Iterable[_T],
|
| 543 |
+
k: int,
|
| 544 |
+
weights: Iterable[float] | None = ...,
|
| 545 |
+
) -> list[_T]: ...
|
| 546 |
+
def is_sorted(
|
| 547 |
+
iterable: Iterable[_T],
|
| 548 |
+
key: Callable[[_T], _U] | None = ...,
|
| 549 |
+
reverse: bool = False,
|
| 550 |
+
strict: bool = False,
|
| 551 |
+
) -> bool: ...
|
| 552 |
+
|
| 553 |
+
class AbortThread(BaseException):
|
| 554 |
+
pass
|
| 555 |
+
|
| 556 |
+
class callback_iter(Generic[_T], Iterator[_T]):
|
| 557 |
+
def __init__(
|
| 558 |
+
self,
|
| 559 |
+
func: Callable[..., Any],
|
| 560 |
+
callback_kwd: str = ...,
|
| 561 |
+
wait_seconds: float = ...,
|
| 562 |
+
) -> None: ...
|
| 563 |
+
def __enter__(self) -> callback_iter[_T]: ...
|
| 564 |
+
def __exit__(
|
| 565 |
+
self,
|
| 566 |
+
exc_type: Type[BaseException] | None,
|
| 567 |
+
exc_value: BaseException | None,
|
| 568 |
+
traceback: TracebackType | None,
|
| 569 |
+
) -> bool | None: ...
|
| 570 |
+
def __iter__(self) -> callback_iter[_T]: ...
|
| 571 |
+
def __next__(self) -> _T: ...
|
| 572 |
+
def _reader(self) -> Iterator[_T]: ...
|
| 573 |
+
@property
|
| 574 |
+
def done(self) -> bool: ...
|
| 575 |
+
@property
|
| 576 |
+
def result(self) -> Any: ...
|
| 577 |
+
|
| 578 |
+
def windowed_complete(
|
| 579 |
+
iterable: Iterable[_T], n: int
|
| 580 |
+
) -> Iterator[tuple[_T, ...]]: ...
|
| 581 |
+
def all_unique(
|
| 582 |
+
iterable: Iterable[_T], key: Callable[[_T], _U] | None = ...
|
| 583 |
+
) -> bool: ...
|
| 584 |
+
def nth_product(index: int, *args: Iterable[_T]) -> tuple[_T, ...]: ...
|
| 585 |
+
def nth_combination_with_replacement(
|
| 586 |
+
iterable: Iterable[_T], r: int, index: int
|
| 587 |
+
) -> tuple[_T, ...]: ...
|
| 588 |
+
def nth_permutation(
|
| 589 |
+
iterable: Iterable[_T], r: int, index: int
|
| 590 |
+
) -> tuple[_T, ...]: ...
|
| 591 |
+
def value_chain(*args: _T | Iterable[_T]) -> Iterable[_T]: ...
|
| 592 |
+
def product_index(element: Iterable[_T], *args: Iterable[_T]) -> int: ...
|
| 593 |
+
def combination_index(
|
| 594 |
+
element: Iterable[_T], iterable: Iterable[_T]
|
| 595 |
+
) -> int: ...
|
| 596 |
+
def combination_with_replacement_index(
|
| 597 |
+
element: Iterable[_T], iterable: Iterable[_T]
|
| 598 |
+
) -> int: ...
|
| 599 |
+
def permutation_index(
|
| 600 |
+
element: Iterable[_T], iterable: Iterable[_T]
|
| 601 |
+
) -> int: ...
|
| 602 |
+
def repeat_each(iterable: Iterable[_T], n: int = ...) -> Iterator[_T]: ...
|
| 603 |
+
|
| 604 |
+
class countable(Generic[_T], Iterator[_T]):
|
| 605 |
+
def __init__(self, iterable: Iterable[_T]) -> None: ...
|
| 606 |
+
def __iter__(self) -> countable[_T]: ...
|
| 607 |
+
def __next__(self) -> _T: ...
|
| 608 |
+
items_seen: int
|
| 609 |
+
|
| 610 |
+
def chunked_even(iterable: Iterable[_T], n: int) -> Iterator[list[_T]]: ...
|
| 611 |
+
def zip_broadcast(
|
| 612 |
+
*objects: _T | Iterable[_T],
|
| 613 |
+
scalar_types: type | tuple[type | tuple[Any, ...], ...] | None = ...,
|
| 614 |
+
strict: bool = ...,
|
| 615 |
+
) -> Iterable[tuple[_T, ...]]: ...
|
| 616 |
+
def unique_in_window(
|
| 617 |
+
iterable: Iterable[_T], n: int, key: Callable[[_T], _U] | None = ...
|
| 618 |
+
) -> Iterator[_T]: ...
|
| 619 |
+
def duplicates_everseen(
|
| 620 |
+
iterable: Iterable[_T], key: Callable[[_T], _U] | None = ...
|
| 621 |
+
) -> Iterator[_T]: ...
|
| 622 |
+
def duplicates_justseen(
|
| 623 |
+
iterable: Iterable[_T], key: Callable[[_T], _U] | None = ...
|
| 624 |
+
) -> Iterator[_T]: ...
|
| 625 |
+
def classify_unique(
|
| 626 |
+
iterable: Iterable[_T], key: Callable[[_T], _U] | None = ...
|
| 627 |
+
) -> Iterator[tuple[_T, bool, bool]]: ...
|
| 628 |
+
|
| 629 |
+
class _SupportsLessThan(Protocol):
|
| 630 |
+
def __lt__(self, __other: Any) -> bool: ...
|
| 631 |
+
|
| 632 |
+
_SupportsLessThanT = TypeVar("_SupportsLessThanT", bound=_SupportsLessThan)
|
| 633 |
+
|
| 634 |
+
@overload
|
| 635 |
+
def minmax(
|
| 636 |
+
iterable_or_value: Iterable[_SupportsLessThanT], *, key: None = None
|
| 637 |
+
) -> tuple[_SupportsLessThanT, _SupportsLessThanT]: ...
|
| 638 |
+
@overload
|
| 639 |
+
def minmax(
|
| 640 |
+
iterable_or_value: Iterable[_T], *, key: Callable[[_T], _SupportsLessThan]
|
| 641 |
+
) -> tuple[_T, _T]: ...
|
| 642 |
+
@overload
|
| 643 |
+
def minmax(
|
| 644 |
+
iterable_or_value: Iterable[_SupportsLessThanT],
|
| 645 |
+
*,
|
| 646 |
+
key: None = None,
|
| 647 |
+
default: _U,
|
| 648 |
+
) -> _U | tuple[_SupportsLessThanT, _SupportsLessThanT]: ...
|
| 649 |
+
@overload
|
| 650 |
+
def minmax(
|
| 651 |
+
iterable_or_value: Iterable[_T],
|
| 652 |
+
*,
|
| 653 |
+
key: Callable[[_T], _SupportsLessThan],
|
| 654 |
+
default: _U,
|
| 655 |
+
) -> _U | tuple[_T, _T]: ...
|
| 656 |
+
@overload
|
| 657 |
+
def minmax(
|
| 658 |
+
iterable_or_value: _SupportsLessThanT,
|
| 659 |
+
__other: _SupportsLessThanT,
|
| 660 |
+
*others: _SupportsLessThanT,
|
| 661 |
+
) -> tuple[_SupportsLessThanT, _SupportsLessThanT]: ...
|
| 662 |
+
@overload
|
| 663 |
+
def minmax(
|
| 664 |
+
iterable_or_value: _T,
|
| 665 |
+
__other: _T,
|
| 666 |
+
*others: _T,
|
| 667 |
+
key: Callable[[_T], _SupportsLessThan],
|
| 668 |
+
) -> tuple[_T, _T]: ...
|
| 669 |
+
def longest_common_prefix(
|
| 670 |
+
iterables: Iterable[Iterable[_T]],
|
| 671 |
+
) -> Iterator[_T]: ...
|
| 672 |
+
def iequals(*iterables: Iterable[Any]) -> bool: ...
|
| 673 |
+
def constrained_batches(
|
| 674 |
+
iterable: Iterable[_T],
|
| 675 |
+
max_size: int,
|
| 676 |
+
max_count: int | None = ...,
|
| 677 |
+
get_len: Callable[[_T], object] = ...,
|
| 678 |
+
strict: bool = ...,
|
| 679 |
+
) -> Iterator[tuple[_T]]: ...
|
| 680 |
+
def gray_product(*iterables: Iterable[_T]) -> Iterator[tuple[_T, ...]]: ...
|
| 681 |
+
def partial_product(*iterables: Iterable[_T]) -> Iterator[tuple[_T, ...]]: ...
|
| 682 |
+
def takewhile_inclusive(
|
| 683 |
+
predicate: Callable[[_T], bool], iterable: Iterable[_T]
|
| 684 |
+
) -> Iterator[_T]: ...
|
| 685 |
+
def outer_product(
|
| 686 |
+
func: Callable[[_T, _U], _V],
|
| 687 |
+
xs: Iterable[_T],
|
| 688 |
+
ys: Iterable[_U],
|
| 689 |
+
*args: Any,
|
| 690 |
+
**kwargs: Any,
|
| 691 |
+
) -> Iterator[tuple[_V, ...]]: ...
|
| 692 |
+
def iter_suppress(
|
| 693 |
+
iterable: Iterable[_T],
|
| 694 |
+
*exceptions: Type[BaseException],
|
| 695 |
+
) -> Iterator[_T]: ...
|
| 696 |
+
def filter_map(
|
| 697 |
+
func: Callable[[_T], _V | None],
|
| 698 |
+
iterable: Iterable[_T],
|
| 699 |
+
) -> Iterator[_V]: ...
|
| 700 |
+
def powerset_of_sets(iterable: Iterable[_T]) -> Iterator[set[_T]]: ...
|
| 701 |
+
def join_mappings(
|
| 702 |
+
**field_to_map: Mapping[_T, _V]
|
| 703 |
+
) -> dict[_T, dict[str, _V]]: ...
|
| 704 |
+
def doublestarmap(
|
| 705 |
+
func: Callable[..., _T],
|
| 706 |
+
iterable: Iterable[Mapping[str, Any]],
|
| 707 |
+
) -> Iterator[_T]: ...
|
| 708 |
+
def dft(xarr: Sequence[complex]) -> Iterator[complex]: ...
|
| 709 |
+
def idft(Xarr: Sequence[complex]) -> Iterator[complex]: ...
|
llava_next/lib/python3.10/site-packages/more_itertools/py.typed
ADDED
|
File without changes
|
llava_next/lib/python3.10/site-packages/more_itertools/recipes.pyi
ADDED
|
@@ -0,0 +1,136 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Stubs for more_itertools.recipes"""
|
| 2 |
+
|
| 3 |
+
from __future__ import annotations
|
| 4 |
+
|
| 5 |
+
from typing import (
|
| 6 |
+
Any,
|
| 7 |
+
Callable,
|
| 8 |
+
Iterable,
|
| 9 |
+
Iterator,
|
| 10 |
+
overload,
|
| 11 |
+
Sequence,
|
| 12 |
+
Type,
|
| 13 |
+
TypeVar,
|
| 14 |
+
)
|
| 15 |
+
|
| 16 |
+
# Type and type variable definitions
|
| 17 |
+
_T = TypeVar('_T')
|
| 18 |
+
_T1 = TypeVar('_T1')
|
| 19 |
+
_T2 = TypeVar('_T2')
|
| 20 |
+
_U = TypeVar('_U')
|
| 21 |
+
|
| 22 |
+
def take(n: int, iterable: Iterable[_T]) -> list[_T]: ...
|
| 23 |
+
def tabulate(
|
| 24 |
+
function: Callable[[int], _T], start: int = ...
|
| 25 |
+
) -> Iterator[_T]: ...
|
| 26 |
+
def tail(n: int, iterable: Iterable[_T]) -> Iterator[_T]: ...
|
| 27 |
+
def consume(iterator: Iterable[_T], n: int | None = ...) -> None: ...
|
| 28 |
+
@overload
|
| 29 |
+
def nth(iterable: Iterable[_T], n: int) -> _T | None: ...
|
| 30 |
+
@overload
|
| 31 |
+
def nth(iterable: Iterable[_T], n: int, default: _U) -> _T | _U: ...
|
| 32 |
+
def all_equal(
|
| 33 |
+
iterable: Iterable[_T], key: Callable[[_T], _U] | None = ...
|
| 34 |
+
) -> bool: ...
|
| 35 |
+
def quantify(
|
| 36 |
+
iterable: Iterable[_T], pred: Callable[[_T], bool] = ...
|
| 37 |
+
) -> int: ...
|
| 38 |
+
def pad_none(iterable: Iterable[_T]) -> Iterator[_T | None]: ...
|
| 39 |
+
def padnone(iterable: Iterable[_T]) -> Iterator[_T | None]: ...
|
| 40 |
+
def ncycles(iterable: Iterable[_T], n: int) -> Iterator[_T]: ...
|
| 41 |
+
def dotproduct(vec1: Iterable[_T1], vec2: Iterable[_T2]) -> Any: ...
|
| 42 |
+
def flatten(listOfLists: Iterable[Iterable[_T]]) -> Iterator[_T]: ...
|
| 43 |
+
def repeatfunc(
|
| 44 |
+
func: Callable[..., _U], times: int | None = ..., *args: Any
|
| 45 |
+
) -> Iterator[_U]: ...
|
| 46 |
+
def pairwise(iterable: Iterable[_T]) -> Iterator[tuple[_T, _T]]: ...
|
| 47 |
+
def grouper(
|
| 48 |
+
iterable: Iterable[_T],
|
| 49 |
+
n: int,
|
| 50 |
+
incomplete: str = ...,
|
| 51 |
+
fillvalue: _U = ...,
|
| 52 |
+
) -> Iterator[tuple[_T | _U, ...]]: ...
|
| 53 |
+
def roundrobin(*iterables: Iterable[_T]) -> Iterator[_T]: ...
|
| 54 |
+
def partition(
|
| 55 |
+
pred: Callable[[_T], object] | None, iterable: Iterable[_T]
|
| 56 |
+
) -> tuple[Iterator[_T], Iterator[_T]]: ...
|
| 57 |
+
def powerset(iterable: Iterable[_T]) -> Iterator[tuple[_T, ...]]: ...
|
| 58 |
+
def unique_everseen(
|
| 59 |
+
iterable: Iterable[_T], key: Callable[[_T], _U] | None = ...
|
| 60 |
+
) -> Iterator[_T]: ...
|
| 61 |
+
def unique_justseen(
|
| 62 |
+
iterable: Iterable[_T], key: Callable[[_T], object] | None = ...
|
| 63 |
+
) -> Iterator[_T]: ...
|
| 64 |
+
def unique(
|
| 65 |
+
iterable: Iterable[_T],
|
| 66 |
+
key: Callable[[_T], object] | None = ...,
|
| 67 |
+
reverse: bool = False,
|
| 68 |
+
) -> Iterator[_T]: ...
|
| 69 |
+
@overload
|
| 70 |
+
def iter_except(
|
| 71 |
+
func: Callable[[], _T],
|
| 72 |
+
exception: Type[BaseException] | tuple[Type[BaseException], ...],
|
| 73 |
+
first: None = ...,
|
| 74 |
+
) -> Iterator[_T]: ...
|
| 75 |
+
@overload
|
| 76 |
+
def iter_except(
|
| 77 |
+
func: Callable[[], _T],
|
| 78 |
+
exception: Type[BaseException] | tuple[Type[BaseException], ...],
|
| 79 |
+
first: Callable[[], _U],
|
| 80 |
+
) -> Iterator[_T | _U]: ...
|
| 81 |
+
@overload
|
| 82 |
+
def first_true(
|
| 83 |
+
iterable: Iterable[_T], *, pred: Callable[[_T], object] | None = ...
|
| 84 |
+
) -> _T | None: ...
|
| 85 |
+
@overload
|
| 86 |
+
def first_true(
|
| 87 |
+
iterable: Iterable[_T],
|
| 88 |
+
default: _U,
|
| 89 |
+
pred: Callable[[_T], object] | None = ...,
|
| 90 |
+
) -> _T | _U: ...
|
| 91 |
+
def random_product(
|
| 92 |
+
*args: Iterable[_T], repeat: int = ...
|
| 93 |
+
) -> tuple[_T, ...]: ...
|
| 94 |
+
def random_permutation(
|
| 95 |
+
iterable: Iterable[_T], r: int | None = ...
|
| 96 |
+
) -> tuple[_T, ...]: ...
|
| 97 |
+
def random_combination(iterable: Iterable[_T], r: int) -> tuple[_T, ...]: ...
|
| 98 |
+
def random_combination_with_replacement(
|
| 99 |
+
iterable: Iterable[_T], r: int
|
| 100 |
+
) -> tuple[_T, ...]: ...
|
| 101 |
+
def nth_combination(
|
| 102 |
+
iterable: Iterable[_T], r: int, index: int
|
| 103 |
+
) -> tuple[_T, ...]: ...
|
| 104 |
+
def prepend(value: _T, iterator: Iterable[_U]) -> Iterator[_T | _U]: ...
|
| 105 |
+
def convolve(signal: Iterable[_T], kernel: Iterable[_T]) -> Iterator[_T]: ...
|
| 106 |
+
def before_and_after(
|
| 107 |
+
predicate: Callable[[_T], bool], it: Iterable[_T]
|
| 108 |
+
) -> tuple[Iterator[_T], Iterator[_T]]: ...
|
| 109 |
+
def triplewise(iterable: Iterable[_T]) -> Iterator[tuple[_T, _T, _T]]: ...
|
| 110 |
+
def sliding_window(
|
| 111 |
+
iterable: Iterable[_T], n: int
|
| 112 |
+
) -> Iterator[tuple[_T, ...]]: ...
|
| 113 |
+
def subslices(iterable: Iterable[_T]) -> Iterator[list[_T]]: ...
|
| 114 |
+
def polynomial_from_roots(roots: Sequence[_T]) -> list[_T]: ...
|
| 115 |
+
def iter_index(
|
| 116 |
+
iterable: Iterable[_T],
|
| 117 |
+
value: Any,
|
| 118 |
+
start: int | None = ...,
|
| 119 |
+
stop: int | None = ...,
|
| 120 |
+
) -> Iterator[int]: ...
|
| 121 |
+
def sieve(n: int) -> Iterator[int]: ...
|
| 122 |
+
def batched(
|
| 123 |
+
iterable: Iterable[_T], n: int, *, strict: bool = False
|
| 124 |
+
) -> Iterator[tuple[_T]]: ...
|
| 125 |
+
def transpose(
|
| 126 |
+
it: Iterable[Iterable[_T]],
|
| 127 |
+
) -> Iterator[tuple[_T, ...]]: ...
|
| 128 |
+
def reshape(
|
| 129 |
+
matrix: Iterable[Iterable[_T]], cols: int
|
| 130 |
+
) -> Iterator[tuple[_T, ...]]: ...
|
| 131 |
+
def matmul(m1: Sequence[_T], m2: Sequence[_T]) -> Iterator[tuple[_T]]: ...
|
| 132 |
+
def factor(n: int) -> Iterator[int]: ...
|
| 133 |
+
def polynomial_eval(coefficients: Sequence[_T], x: _U) -> _U: ...
|
| 134 |
+
def sum_of_squares(it: Iterable[_T]) -> _T: ...
|
| 135 |
+
def polynomial_derivative(coefficients: Sequence[_T]) -> list[_T]: ...
|
| 136 |
+
def totient(n: int) -> int: ...
|
llava_next/lib/python3.10/site-packages/torchgen/executorch/__init__.py
ADDED
|
File without changes
|
llava_next/lib/python3.10/site-packages/torchgen/executorch/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (175 Bytes). View file
|
|
|
llava_next/lib/python3.10/site-packages/torchgen/executorch/__pycache__/model.cpython-310.pyc
ADDED
|
Binary file (7.32 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/torchgen/executorch/__pycache__/parse.cpython-310.pyc
ADDED
|
Binary file (4.29 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/torchgen/executorch/api/__init__.py
ADDED
|
File without changes
|
llava_next/lib/python3.10/site-packages/torchgen/executorch/api/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (179 Bytes). View file
|
|
|
llava_next/lib/python3.10/site-packages/torchgen/executorch/api/__pycache__/custom_ops.cpython-310.pyc
ADDED
|
Binary file (3.87 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/torchgen/executorch/api/__pycache__/et_cpp.cpython-310.pyc
ADDED
|
Binary file (7.44 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/torchgen/executorch/api/__pycache__/unboxing.cpython-310.pyc
ADDED
|
Binary file (6.37 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/torchgen/executorch/api/custom_ops.py
ADDED
|
@@ -0,0 +1,131 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from collections import defaultdict
|
| 2 |
+
|
| 3 |
+
from dataclasses import dataclass
|
| 4 |
+
from typing import Dict, List, Optional, Sequence, Tuple
|
| 5 |
+
|
| 6 |
+
from torchgen import dest
|
| 7 |
+
|
| 8 |
+
# disable import sorting to avoid circular dependency.
|
| 9 |
+
from torchgen.api.types import DispatcherSignature # isort:skip
|
| 10 |
+
from torchgen.context import method_with_native_function
|
| 11 |
+
from torchgen.executorch.model import ETKernelIndex
|
| 12 |
+
from torchgen.model import DispatchKey, NativeFunction, Variant
|
| 13 |
+
from torchgen.selective_build.selector import SelectiveBuilder
|
| 14 |
+
from torchgen.utils import concatMap, Target
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
# Generates RegisterKernelStub.cpp, which provides placeholder kernels for custom operators. This will be used at
|
| 18 |
+
# model authoring side.
|
| 19 |
+
@dataclass(frozen=True)
|
| 20 |
+
class ComputeNativeFunctionStub:
|
| 21 |
+
@method_with_native_function
|
| 22 |
+
def __call__(self, f: NativeFunction) -> Optional[str]:
|
| 23 |
+
if Variant.function not in f.variants:
|
| 24 |
+
return None
|
| 25 |
+
|
| 26 |
+
sig = DispatcherSignature.from_schema(
|
| 27 |
+
f.func, prefix=f"wrapper_CPU_{f.func.name.overload_name}_", symint=False
|
| 28 |
+
)
|
| 29 |
+
assert sig is not None
|
| 30 |
+
if len(f.func.returns) == 0:
|
| 31 |
+
ret_name = ""
|
| 32 |
+
elif len(f.func.returns) == 1:
|
| 33 |
+
if f.func.arguments.out:
|
| 34 |
+
ret_name = f.func.arguments.out[0].name
|
| 35 |
+
else:
|
| 36 |
+
ret_name = next(
|
| 37 |
+
(
|
| 38 |
+
a.name
|
| 39 |
+
for a in f.func.arguments.flat_non_out
|
| 40 |
+
if a.type == f.func.returns[0].type
|
| 41 |
+
),
|
| 42 |
+
"",
|
| 43 |
+
)
|
| 44 |
+
if not ret_name:
|
| 45 |
+
raise Exception(f"Can't handle this return type {f.func}")
|
| 46 |
+
else:
|
| 47 |
+
assert len(f.func.arguments.out) == len(f.func.returns), (
|
| 48 |
+
"Out variant number of returns need to match the number of out arguments."
|
| 49 |
+
f" Got outs {str(f.func.arguments.out)} but returns {str(f.func.returns)}"
|
| 50 |
+
)
|
| 51 |
+
# returns a tuple of out arguments
|
| 52 |
+
tensor_type = "at::Tensor &"
|
| 53 |
+
comma = ", "
|
| 54 |
+
ret_name = f"""::std::tuple<{comma.join([tensor_type] * len(f.func.returns))}>(
|
| 55 |
+
{comma.join([r.name for r in f.func.arguments.out])}
|
| 56 |
+
)"""
|
| 57 |
+
ret_str = f"return {ret_name};" if len(f.func.returns) > 0 else ""
|
| 58 |
+
return f"""
|
| 59 |
+
{sig.defn()} {{
|
| 60 |
+
{ret_str}
|
| 61 |
+
}}
|
| 62 |
+
"""
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
def gen_custom_ops_registration(
|
| 66 |
+
*,
|
| 67 |
+
native_functions: Sequence[NativeFunction],
|
| 68 |
+
selector: SelectiveBuilder,
|
| 69 |
+
kernel_index: ETKernelIndex,
|
| 70 |
+
rocm: bool,
|
| 71 |
+
) -> Tuple[str, str]:
|
| 72 |
+
"""
|
| 73 |
+
Generate custom ops registration code for dest.RegisterDispatchKey.
|
| 74 |
+
|
| 75 |
+
:param native_functions: a sequence of `NativeFunction`
|
| 76 |
+
:param selector: for selective build.
|
| 77 |
+
:param kernel_index: kernels for all the ops.
|
| 78 |
+
:param rocm: bool for dest.RegisterDispatchKey.
|
| 79 |
+
:return: generated C++ code to register custom operators into PyTorch
|
| 80 |
+
"""
|
| 81 |
+
|
| 82 |
+
# convert kernel index to BackendIndex. This is because we can't handle ETKernelIndex yet.
|
| 83 |
+
# TODO larryliu: evaluate if this code is still needed. If yes let it handle ETKernelIndex.
|
| 84 |
+
|
| 85 |
+
dispatch_key = DispatchKey.CPU
|
| 86 |
+
backend_index = kernel_index._to_backend_index()
|
| 87 |
+
static_init_dispatch_registrations = ""
|
| 88 |
+
ns_grouped_native_functions: Dict[str, List[NativeFunction]] = defaultdict(list)
|
| 89 |
+
for native_function in native_functions:
|
| 90 |
+
ns_grouped_native_functions[native_function.namespace].append(native_function)
|
| 91 |
+
|
| 92 |
+
for namespace, functions in ns_grouped_native_functions.items():
|
| 93 |
+
if len(functions) == 0:
|
| 94 |
+
continue
|
| 95 |
+
dispatch_registrations_body = "\n".join(
|
| 96 |
+
list(
|
| 97 |
+
concatMap(
|
| 98 |
+
dest.RegisterDispatchKey(
|
| 99 |
+
backend_index,
|
| 100 |
+
Target.REGISTRATION,
|
| 101 |
+
selector,
|
| 102 |
+
rocm=rocm,
|
| 103 |
+
symint=False,
|
| 104 |
+
class_method_name=None,
|
| 105 |
+
skip_dispatcher_op_registration=False,
|
| 106 |
+
),
|
| 107 |
+
functions,
|
| 108 |
+
)
|
| 109 |
+
)
|
| 110 |
+
)
|
| 111 |
+
static_init_dispatch_registrations += f"""
|
| 112 |
+
TORCH_LIBRARY_IMPL({namespace}, {dispatch_key}, m) {{
|
| 113 |
+
{dispatch_registrations_body}
|
| 114 |
+
}};"""
|
| 115 |
+
anonymous_definition = "\n".join(
|
| 116 |
+
list(
|
| 117 |
+
concatMap(
|
| 118 |
+
dest.RegisterDispatchKey(
|
| 119 |
+
backend_index,
|
| 120 |
+
Target.ANONYMOUS_DEFINITION,
|
| 121 |
+
selector,
|
| 122 |
+
rocm=rocm,
|
| 123 |
+
symint=False,
|
| 124 |
+
class_method_name=None,
|
| 125 |
+
skip_dispatcher_op_registration=False,
|
| 126 |
+
),
|
| 127 |
+
native_functions,
|
| 128 |
+
)
|
| 129 |
+
)
|
| 130 |
+
)
|
| 131 |
+
return anonymous_definition, static_init_dispatch_registrations
|
llava_next/lib/python3.10/site-packages/torchgen/executorch/api/et_cpp.py
ADDED
|
@@ -0,0 +1,368 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import List, Optional, Sequence, Set, Union
|
| 2 |
+
|
| 3 |
+
from torchgen import local
|
| 4 |
+
from torchgen.api.types import (
|
| 5 |
+
ArgName,
|
| 6 |
+
ArrayCType,
|
| 7 |
+
BaseCType,
|
| 8 |
+
Binding,
|
| 9 |
+
ConstRefCType,
|
| 10 |
+
CType,
|
| 11 |
+
MutRefCType,
|
| 12 |
+
NamedCType,
|
| 13 |
+
SpecialArgName,
|
| 14 |
+
TupleCType,
|
| 15 |
+
VectorCType,
|
| 16 |
+
voidT,
|
| 17 |
+
)
|
| 18 |
+
from torchgen.model import (
|
| 19 |
+
Argument,
|
| 20 |
+
Arguments,
|
| 21 |
+
BaseTy,
|
| 22 |
+
BaseType,
|
| 23 |
+
ListType,
|
| 24 |
+
NativeFunction,
|
| 25 |
+
OptionalType,
|
| 26 |
+
Return,
|
| 27 |
+
SelfArgument,
|
| 28 |
+
TensorOptionsArguments,
|
| 29 |
+
Type,
|
| 30 |
+
)
|
| 31 |
+
from torchgen.utils import assert_never
|
| 32 |
+
from .types import (
|
| 33 |
+
ArrayRefCType,
|
| 34 |
+
BaseTypeToCppMapping,
|
| 35 |
+
OptionalCType,
|
| 36 |
+
scalarT,
|
| 37 |
+
tensorListT,
|
| 38 |
+
tensorT,
|
| 39 |
+
)
|
| 40 |
+
|
| 41 |
+
"""
|
| 42 |
+
This file describes the translation of JIT schema to the public C++ API, which is what people use when they call
|
| 43 |
+
functions like at::add. It also serves as a native function API, which is the signature of kernels,
|
| 44 |
+
since in Executorch CppSignature is the same as NativeSignature.
|
| 45 |
+
|
| 46 |
+
Difference between this file and torchgen.api.cpp.py:
|
| 47 |
+
|
| 48 |
+
- Executorch doesn't support TensorOptions, however in this file we still keep the logic here to be compatible with
|
| 49 |
+
torchgen.api.cpp, so that we can do stuff like ATen mode (running ATen kernels in Executorch).
|
| 50 |
+
|
| 51 |
+
- Executorch doesn't support Dimname.
|
| 52 |
+
|
| 53 |
+
- Executorch runtime doesn't support SymInt, will treat it as int.
|
| 54 |
+
"""
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
# Translation of "value types" in JIT schema to C++ API type. Value
|
| 58 |
+
# types look the same no matter if they are argument types or return
|
| 59 |
+
# types. Returns None if the type in question is not a value type.
|
| 60 |
+
def valuetype_type(
|
| 61 |
+
t: Type,
|
| 62 |
+
*,
|
| 63 |
+
binds: ArgName,
|
| 64 |
+
remove_non_owning_ref_types: bool = False,
|
| 65 |
+
) -> Optional[NamedCType]:
|
| 66 |
+
if isinstance(t, BaseType):
|
| 67 |
+
if t.name == BaseTy.Tensor or t.name == BaseTy.Scalar:
|
| 68 |
+
return None
|
| 69 |
+
# For SymInt we simply treat it as int.
|
| 70 |
+
elif str(t) == "SymInt":
|
| 71 |
+
return NamedCType(binds, BaseCType(BaseTypeToCppMapping[BaseTy.int]))
|
| 72 |
+
if remove_non_owning_ref_types:
|
| 73 |
+
if t.name == BaseTy.str:
|
| 74 |
+
raise AssertionError(
|
| 75 |
+
"string ref->value conversion: not implemented yet"
|
| 76 |
+
)
|
| 77 |
+
# All other BaseType currently map directly to BaseCppTypes.
|
| 78 |
+
return NamedCType(binds, BaseCType(BaseTypeToCppMapping[t.name]))
|
| 79 |
+
elif isinstance(t, OptionalType):
|
| 80 |
+
elem = valuetype_type(t.elem, binds=binds)
|
| 81 |
+
if elem is None:
|
| 82 |
+
return None
|
| 83 |
+
return NamedCType(binds, OptionalCType(elem.type))
|
| 84 |
+
elif isinstance(t, ListType):
|
| 85 |
+
if str(t.elem) == "bool":
|
| 86 |
+
assert t.size is not None
|
| 87 |
+
return NamedCType(
|
| 88 |
+
binds, ArrayCType(BaseCType(BaseTypeToCppMapping[BaseTy.bool]), t.size)
|
| 89 |
+
)
|
| 90 |
+
else:
|
| 91 |
+
return None
|
| 92 |
+
else:
|
| 93 |
+
raise AssertionError(f"unrecognized type {repr(t)}")
|
| 94 |
+
|
| 95 |
+
|
| 96 |
+
# Translation of types occuring in JIT arguments to a C++ argument type.
|
| 97 |
+
# If remove_non_owning_ref_types is set, we'll guarantee that the outputed CType is not a non-owning reference type.
|
| 98 |
+
# For example, we'll return std::vector<int> instead of IntArrayRef.
|
| 99 |
+
# See Note [translation from C++ reference to value types]
|
| 100 |
+
def argumenttype_type(
|
| 101 |
+
t: Type,
|
| 102 |
+
*,
|
| 103 |
+
mutable: bool,
|
| 104 |
+
binds: ArgName,
|
| 105 |
+
remove_non_owning_ref_types: bool = False,
|
| 106 |
+
) -> NamedCType:
|
| 107 |
+
# If it's a value type, do the value type translation
|
| 108 |
+
r = valuetype_type(
|
| 109 |
+
t,
|
| 110 |
+
binds=binds,
|
| 111 |
+
remove_non_owning_ref_types=remove_non_owning_ref_types,
|
| 112 |
+
)
|
| 113 |
+
if r is not None:
|
| 114 |
+
return r
|
| 115 |
+
if isinstance(t, BaseType):
|
| 116 |
+
if t.name == BaseTy.Tensor:
|
| 117 |
+
if mutable and not local.use_const_ref_for_mutable_tensors():
|
| 118 |
+
return NamedCType(binds, MutRefCType(BaseCType(tensorT)))
|
| 119 |
+
else:
|
| 120 |
+
return NamedCType(binds, ConstRefCType(BaseCType(tensorT)))
|
| 121 |
+
elif t.name == BaseTy.Scalar:
|
| 122 |
+
return NamedCType(binds, ConstRefCType(BaseCType(scalarT)))
|
| 123 |
+
else:
|
| 124 |
+
raise AssertionError(f"base type should have been value type {t}")
|
| 125 |
+
elif isinstance(t, OptionalType):
|
| 126 |
+
if str(t.elem) == "Tensor":
|
| 127 |
+
if mutable and not local.use_const_ref_for_mutable_tensors():
|
| 128 |
+
return NamedCType(
|
| 129 |
+
binds, MutRefCType(BaseCType(tensorT))
|
| 130 |
+
) # TODO: fix this discrepancy
|
| 131 |
+
else:
|
| 132 |
+
return NamedCType(
|
| 133 |
+
binds, ConstRefCType(OptionalCType(BaseCType(tensorT)))
|
| 134 |
+
)
|
| 135 |
+
elif str(t.elem) == "Scalar":
|
| 136 |
+
return NamedCType(binds, ConstRefCType(OptionalCType(BaseCType(scalarT))))
|
| 137 |
+
elem = argumenttype_type(t.elem, mutable=mutable, binds=binds)
|
| 138 |
+
return NamedCType(binds, OptionalCType(elem.type))
|
| 139 |
+
elif isinstance(t, ListType):
|
| 140 |
+
# TODO: keeping these special cases for Tensor[] and Tensor?[] so that we can hookup with ATen kernels.
|
| 141 |
+
if str(t.elem) == "Tensor":
|
| 142 |
+
return NamedCType(binds, BaseCType(tensorListT))
|
| 143 |
+
elif str(t.elem) == "Dimname":
|
| 144 |
+
raise NotImplementedError("Executorch doesn't support Dimname")
|
| 145 |
+
elif str(t.elem) == "Tensor?":
|
| 146 |
+
return NamedCType(binds, ArrayRefCType(OptionalCType(BaseCType(tensorT))))
|
| 147 |
+
elem = argumenttype_type(t.elem, mutable=mutable, binds=binds)
|
| 148 |
+
return NamedCType(binds, ArrayRefCType(elem.type))
|
| 149 |
+
else:
|
| 150 |
+
raise AssertionError(f"unrecognized type {repr(t)}")
|
| 151 |
+
|
| 152 |
+
|
| 153 |
+
# Translate a JIT argument into its C++ type
|
| 154 |
+
def argument_type(a: Argument, *, binds: ArgName) -> NamedCType:
|
| 155 |
+
return argumenttype_type(a.type, mutable=a.is_write, binds=binds)
|
| 156 |
+
|
| 157 |
+
|
| 158 |
+
# Translation of a (non-multi) return type from JIT to C++
|
| 159 |
+
# N.B: returntype_type returns a CType, not a NamedCType.
|
| 160 |
+
# This is mostly because of the mismatch between return types and return names.
|
| 161 |
+
# e.g. a function with a return type of 'void' has 0 return names,
|
| 162 |
+
# and a function with a return type of 'std::tuple' has >1 return name.
|
| 163 |
+
def returntype_type(t: Type, *, mutable: bool) -> CType:
|
| 164 |
+
# placeholder is ignored
|
| 165 |
+
r = valuetype_type(t, binds="__placeholder__")
|
| 166 |
+
if r is not None:
|
| 167 |
+
return r.type
|
| 168 |
+
|
| 169 |
+
if isinstance(t, BaseType):
|
| 170 |
+
if t.name == BaseTy.Tensor:
|
| 171 |
+
if mutable:
|
| 172 |
+
if local.use_const_ref_for_mutable_tensors():
|
| 173 |
+
return ConstRefCType(BaseCType(tensorT))
|
| 174 |
+
else:
|
| 175 |
+
return MutRefCType(BaseCType(tensorT))
|
| 176 |
+
else:
|
| 177 |
+
# Note [Tensor Copy Returns]
|
| 178 |
+
# Currently, we use "Argument.is_write" to determine
|
| 179 |
+
# whether or not Tensor return types should be copies or references.
|
| 180 |
+
# If that ever changes, take a look at other locations of this note!
|
| 181 |
+
return BaseCType(tensorT)
|
| 182 |
+
elif t.name == BaseTy.Scalar:
|
| 183 |
+
return BaseCType(scalarT)
|
| 184 |
+
elif isinstance(t, ListType):
|
| 185 |
+
assert (
|
| 186 |
+
not mutable
|
| 187 |
+
), "Native functions should never return a mutable tensor list. They should return void."
|
| 188 |
+
elem = returntype_type(t.elem, mutable=False)
|
| 189 |
+
assert t.size is None, f"fixed size list returns not supported: {t}"
|
| 190 |
+
return VectorCType(elem)
|
| 191 |
+
|
| 192 |
+
raise AssertionError(f"unrecognized return type {t}")
|
| 193 |
+
|
| 194 |
+
|
| 195 |
+
# Translation of a single return to its C++ type
|
| 196 |
+
def return_type(r: Return) -> CType:
|
| 197 |
+
return returntype_type(r.type, mutable=r.is_write)
|
| 198 |
+
|
| 199 |
+
|
| 200 |
+
# Translation of a full (possibly multi) return from JIT to its C++ type
|
| 201 |
+
def returns_type(rs: Sequence[Return]) -> CType:
|
| 202 |
+
if len(rs) == 0:
|
| 203 |
+
return BaseCType(voidT)
|
| 204 |
+
elif len(rs) == 1:
|
| 205 |
+
return return_type(rs[0])
|
| 206 |
+
else:
|
| 207 |
+
return TupleCType([return_type(r) for r in rs])
|
| 208 |
+
|
| 209 |
+
|
| 210 |
+
def return_names(f: NativeFunction, *, fallback_name: str = "result") -> Sequence[str]:
|
| 211 |
+
returns: List[str] = []
|
| 212 |
+
for i, r in enumerate(f.func.returns):
|
| 213 |
+
# If we have an inplace function, the return argument is
|
| 214 |
+
# implicitly named self.
|
| 215 |
+
# TODO: Consider incorporating this into the data model
|
| 216 |
+
if f.func.name.name.inplace:
|
| 217 |
+
assert i == 0, "illegal inplace function with multiple returns"
|
| 218 |
+
name = "self"
|
| 219 |
+
# If we are out function, the name is the name of the
|
| 220 |
+
# corresponding output function (r.name will get recorded
|
| 221 |
+
# in field_name later.)
|
| 222 |
+
elif f.func.is_out_fn():
|
| 223 |
+
name = f.func.arguments.out[i].name
|
| 224 |
+
# If the return argument is explicitly named...
|
| 225 |
+
elif r.name:
|
| 226 |
+
name_conflict = any(
|
| 227 |
+
r.name == a.name for a in f.func.schema_order_arguments()
|
| 228 |
+
)
|
| 229 |
+
if name_conflict and not f.func.is_out_fn():
|
| 230 |
+
name = f"{r.name}_return"
|
| 231 |
+
else:
|
| 232 |
+
name = r.name
|
| 233 |
+
# If there is no explicit name and no fallback name was passed in, we just name the output result,
|
| 234 |
+
# unless it's a multi-return, in which case it's result0,
|
| 235 |
+
# result1, etc (zero-indexed)
|
| 236 |
+
else:
|
| 237 |
+
name = fallback_name if len(f.func.returns) == 1 else f"{fallback_name}{i}"
|
| 238 |
+
returns.append(name)
|
| 239 |
+
return returns
|
| 240 |
+
|
| 241 |
+
|
| 242 |
+
JIT_TO_CPP_DEFAULT = {
|
| 243 |
+
"False": "false",
|
| 244 |
+
"True": "true",
|
| 245 |
+
"None": "torch::executorch::nullopt", # UGH this one is type directed
|
| 246 |
+
"[]": "{}",
|
| 247 |
+
"contiguous_format": "torch::executorch::MemoryFormat::Contiguous",
|
| 248 |
+
"long": "torch::executorch::kLong",
|
| 249 |
+
}
|
| 250 |
+
|
| 251 |
+
|
| 252 |
+
# Convert a JIT default into C++ expression representing the default
|
| 253 |
+
def default_expr(d: str, t: Type) -> str:
|
| 254 |
+
if d == "None" and str(t) == "Tensor?":
|
| 255 |
+
return "{}"
|
| 256 |
+
if isinstance(t, BaseType) and t.name is BaseTy.str:
|
| 257 |
+
# Schema allows single quotes but C++ needs double
|
| 258 |
+
if len(d) >= 2 and d[0] == "'" and d[-1] == "'":
|
| 259 |
+
s = ""
|
| 260 |
+
i = 1
|
| 261 |
+
while i + 1 < len(d):
|
| 262 |
+
if d[i] != "\\":
|
| 263 |
+
if d[i] == '"':
|
| 264 |
+
s += '\\"'
|
| 265 |
+
else:
|
| 266 |
+
s += d[i]
|
| 267 |
+
i += 1
|
| 268 |
+
else:
|
| 269 |
+
if d[i + 1] == "'":
|
| 270 |
+
s += "'"
|
| 271 |
+
else:
|
| 272 |
+
s += d[i : i + 2]
|
| 273 |
+
i += 2
|
| 274 |
+
|
| 275 |
+
return f'"{s}"'
|
| 276 |
+
|
| 277 |
+
if isinstance(t, OptionalType):
|
| 278 |
+
if d == "None":
|
| 279 |
+
return "torch::executor::nullopt"
|
| 280 |
+
|
| 281 |
+
return default_expr(d, t.elem)
|
| 282 |
+
|
| 283 |
+
if isinstance(t, ListType):
|
| 284 |
+
if d.startswith("[") and d.endswith("]"):
|
| 285 |
+
return "{" + d[1:-1] + "}"
|
| 286 |
+
elif t.size is None:
|
| 287 |
+
# NOTE: Sized lists can have scalar defaults
|
| 288 |
+
raise ValueError(f"Expected a list default '[...]' but found: '{d}'")
|
| 289 |
+
|
| 290 |
+
return JIT_TO_CPP_DEFAULT.get(d, d)
|
| 291 |
+
|
| 292 |
+
|
| 293 |
+
# Convert an argument into its C++ API form
|
| 294 |
+
|
| 295 |
+
|
| 296 |
+
def argument(
|
| 297 |
+
a: Union[Argument, TensorOptionsArguments, SelfArgument],
|
| 298 |
+
*,
|
| 299 |
+
cpp_no_default_args: Set[str],
|
| 300 |
+
method: bool,
|
| 301 |
+
faithful: bool,
|
| 302 |
+
has_tensor_options: bool,
|
| 303 |
+
) -> List[Binding]:
|
| 304 |
+
def sub_argument(
|
| 305 |
+
a: Union[Argument, TensorOptionsArguments, SelfArgument]
|
| 306 |
+
) -> List[Binding]:
|
| 307 |
+
return argument(
|
| 308 |
+
a,
|
| 309 |
+
cpp_no_default_args=cpp_no_default_args,
|
| 310 |
+
method=method,
|
| 311 |
+
faithful=faithful,
|
| 312 |
+
has_tensor_options=has_tensor_options,
|
| 313 |
+
)
|
| 314 |
+
|
| 315 |
+
if isinstance(a, Argument):
|
| 316 |
+
binds: ArgName
|
| 317 |
+
if a.name == "memory_format" and has_tensor_options:
|
| 318 |
+
binds = SpecialArgName.possibly_redundant_memory_format
|
| 319 |
+
else:
|
| 320 |
+
binds = a.name
|
| 321 |
+
default: Optional[str] = None
|
| 322 |
+
if a.name not in cpp_no_default_args and a.default is not None:
|
| 323 |
+
default = default_expr(a.default, a.type)
|
| 324 |
+
return [
|
| 325 |
+
Binding(
|
| 326 |
+
nctype=argument_type(a, binds=binds),
|
| 327 |
+
name=a.name,
|
| 328 |
+
default=default,
|
| 329 |
+
argument=a,
|
| 330 |
+
)
|
| 331 |
+
]
|
| 332 |
+
elif isinstance(a, TensorOptionsArguments):
|
| 333 |
+
raise NotImplementedError("Need to implement type resolution for TensorOptions")
|
| 334 |
+
elif isinstance(a, SelfArgument):
|
| 335 |
+
if method:
|
| 336 |
+
# Caller is responsible for installing implicit this in context!
|
| 337 |
+
return []
|
| 338 |
+
else:
|
| 339 |
+
return sub_argument(a.argument)
|
| 340 |
+
else:
|
| 341 |
+
assert_never(a)
|
| 342 |
+
|
| 343 |
+
|
| 344 |
+
def arguments(
|
| 345 |
+
arguments: Arguments,
|
| 346 |
+
*,
|
| 347 |
+
faithful: bool,
|
| 348 |
+
method: bool,
|
| 349 |
+
cpp_no_default_args: Set[str],
|
| 350 |
+
) -> List[Binding]:
|
| 351 |
+
args: List[Union[Argument, TensorOptionsArguments, SelfArgument]] = []
|
| 352 |
+
if faithful:
|
| 353 |
+
args.extend(arguments.non_out)
|
| 354 |
+
args.extend(arguments.out)
|
| 355 |
+
else:
|
| 356 |
+
args.extend(arguments.out)
|
| 357 |
+
args.extend(arguments.non_out)
|
| 358 |
+
return [
|
| 359 |
+
r.no_default() if faithful else r
|
| 360 |
+
for a in args
|
| 361 |
+
for r in argument(
|
| 362 |
+
a,
|
| 363 |
+
faithful=faithful,
|
| 364 |
+
method=method,
|
| 365 |
+
has_tensor_options=arguments.tensor_options is not None,
|
| 366 |
+
cpp_no_default_args=cpp_no_default_args,
|
| 367 |
+
)
|
| 368 |
+
]
|
llava_next/lib/python3.10/site-packages/torchgen/executorch/api/types/__init__.py
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .types import *
|
| 2 |
+
from .signatures import * # isort:skip
|
llava_next/lib/python3.10/site-packages/torchgen/executorch/api/types/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (229 Bytes). View file
|
|
|
llava_next/lib/python3.10/site-packages/torchgen/executorch/api/types/__pycache__/signatures.cpython-310.pyc
ADDED
|
Binary file (3.04 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/torchgen/executorch/api/types/__pycache__/types.cpython-310.pyc
ADDED
|
Binary file (2.6 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/torchgen/executorch/api/types/signatures.py
ADDED
|
@@ -0,0 +1,73 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from dataclasses import dataclass
|
| 2 |
+
from typing import List, Optional, Set
|
| 3 |
+
|
| 4 |
+
import torchgen.api.cpp as aten_cpp
|
| 5 |
+
|
| 6 |
+
from torchgen.api.types import Binding, CType
|
| 7 |
+
from torchgen.model import FunctionSchema, NativeFunction
|
| 8 |
+
|
| 9 |
+
from .types import contextArg
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
@dataclass(frozen=True)
|
| 13 |
+
class ExecutorchCppSignature:
|
| 14 |
+
"""
|
| 15 |
+
This signature is merely a CppSignature with Executorch types (optionally
|
| 16 |
+
contains KernelRuntimeContext as well). The inline definition of
|
| 17 |
+
CppSignature is generated in Functions.h and it's used by unboxing
|
| 18 |
+
functions.
|
| 19 |
+
"""
|
| 20 |
+
|
| 21 |
+
# The schema this signature is derived from
|
| 22 |
+
func: FunctionSchema
|
| 23 |
+
|
| 24 |
+
# The set of C++ arguments which should not have defaults applied to them
|
| 25 |
+
cpp_no_default_args: Set[str]
|
| 26 |
+
|
| 27 |
+
# Allows you to prepend an arbitrary prefix to the signature name.
|
| 28 |
+
# This is useful for parts of the codegen that generate wrappers around kernels,
|
| 29 |
+
# and need to avoid naming collisions.
|
| 30 |
+
prefix: str = ""
|
| 31 |
+
|
| 32 |
+
def arguments(self, *, include_context: bool = True) -> List[Binding]:
|
| 33 |
+
return ([contextArg] if include_context else []) + et_cpp.arguments(
|
| 34 |
+
self.func.arguments,
|
| 35 |
+
faithful=True, # always faithful, out argument at the end
|
| 36 |
+
method=False, # method not supported
|
| 37 |
+
cpp_no_default_args=self.cpp_no_default_args,
|
| 38 |
+
)
|
| 39 |
+
|
| 40 |
+
def name(self) -> str:
|
| 41 |
+
return self.prefix + aten_cpp.name(
|
| 42 |
+
self.func,
|
| 43 |
+
faithful_name_for_out_overloads=True,
|
| 44 |
+
)
|
| 45 |
+
|
| 46 |
+
def decl(self, name: Optional[str] = None, *, include_context: bool = True) -> str:
|
| 47 |
+
args_str = ", ".join(
|
| 48 |
+
a.decl() for a in self.arguments(include_context=include_context)
|
| 49 |
+
)
|
| 50 |
+
if name is None:
|
| 51 |
+
name = self.name()
|
| 52 |
+
return f"{self.returns_type().cpp_type()} {name}({args_str})"
|
| 53 |
+
|
| 54 |
+
def defn(self, name: Optional[str] = None) -> str:
|
| 55 |
+
args = [a.defn() for a in self.arguments()]
|
| 56 |
+
args_str = ", ".join(args)
|
| 57 |
+
if name is None:
|
| 58 |
+
name = self.name()
|
| 59 |
+
return f"{self.returns_type().cpp_type()} {name}({args_str})"
|
| 60 |
+
|
| 61 |
+
def returns_type(self) -> CType:
|
| 62 |
+
return et_cpp.returns_type(self.func.returns)
|
| 63 |
+
|
| 64 |
+
@staticmethod
|
| 65 |
+
def from_native_function(
|
| 66 |
+
f: NativeFunction, *, prefix: str = ""
|
| 67 |
+
) -> "ExecutorchCppSignature":
|
| 68 |
+
return ExecutorchCppSignature(
|
| 69 |
+
func=f.func, prefix=prefix, cpp_no_default_args=f.cpp_no_default_args
|
| 70 |
+
)
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
from torchgen.executorch.api import et_cpp
|
llava_next/lib/python3.10/site-packages/torchgen/executorch/api/types/types.py
ADDED
|
@@ -0,0 +1,81 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from dataclasses import dataclass
|
| 2 |
+
from typing import Dict
|
| 3 |
+
|
| 4 |
+
from torchgen.api.types import (
|
| 5 |
+
BaseCppType,
|
| 6 |
+
BaseCType,
|
| 7 |
+
Binding,
|
| 8 |
+
boolT,
|
| 9 |
+
CType,
|
| 10 |
+
doubleT,
|
| 11 |
+
Expr,
|
| 12 |
+
longT,
|
| 13 |
+
MutRefCType,
|
| 14 |
+
NamedCType,
|
| 15 |
+
)
|
| 16 |
+
from torchgen.model import BaseTy
|
| 17 |
+
|
| 18 |
+
halfT = BaseCppType("torch::executor", "Half")
|
| 19 |
+
bfloat16T = BaseCppType("torch::executor", "BFloat16")
|
| 20 |
+
stringT = BaseCppType("torch::executor", "string_view")
|
| 21 |
+
scalarTypeT = BaseCppType("torch::executor", "ScalarType")
|
| 22 |
+
tensorT = BaseCppType("torch::executor", "Tensor")
|
| 23 |
+
tensorListT = BaseCppType("torch::executor", "TensorList")
|
| 24 |
+
scalarT = BaseCppType("torch::executor", "Scalar")
|
| 25 |
+
memoryFormatT = BaseCppType("torch::executor", "MemoryFormat")
|
| 26 |
+
intArrayRefT = BaseCppType("torch::executor", "IntArrayRef")
|
| 27 |
+
optionalT = BaseCppType("torch::executor", "optional")
|
| 28 |
+
contextT = BaseCppType("torch::executor", "KernelRuntimeContext")
|
| 29 |
+
|
| 30 |
+
contextExpr = Expr(
|
| 31 |
+
expr="context",
|
| 32 |
+
type=NamedCType(name="context", type=MutRefCType(BaseCType(contextT))),
|
| 33 |
+
)
|
| 34 |
+
|
| 35 |
+
contextArg = Binding(
|
| 36 |
+
name="context",
|
| 37 |
+
nctype=contextExpr.type,
|
| 38 |
+
argument=None, # type: ignore[arg-type]
|
| 39 |
+
default=None,
|
| 40 |
+
)
|
| 41 |
+
|
| 42 |
+
BaseTypeToCppMapping: Dict[BaseTy, BaseCppType] = {
|
| 43 |
+
BaseTy.int: longT,
|
| 44 |
+
BaseTy.float: doubleT,
|
| 45 |
+
BaseTy.bool: boolT,
|
| 46 |
+
BaseTy.str: stringT,
|
| 47 |
+
BaseTy.ScalarType: scalarTypeT,
|
| 48 |
+
BaseTy.Tensor: tensorT,
|
| 49 |
+
BaseTy.Scalar: scalarT,
|
| 50 |
+
BaseTy.MemoryFormat: memoryFormatT,
|
| 51 |
+
}
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
@dataclass(frozen=True)
|
| 55 |
+
class OptionalCType(CType):
|
| 56 |
+
elem: "CType"
|
| 57 |
+
|
| 58 |
+
def cpp_type(self, *, strip_ref: bool = False) -> str:
|
| 59 |
+
# Do not pass `strip_ref` recursively.
|
| 60 |
+
return f"torch::executor::optional<{self.elem.cpp_type()}>"
|
| 61 |
+
|
| 62 |
+
def cpp_type_registration_declarations(self) -> str:
|
| 63 |
+
return f"torch::executor::optional<{self.elem.cpp_type_registration_declarations()}>"
|
| 64 |
+
|
| 65 |
+
def remove_const_ref(self) -> "CType":
|
| 66 |
+
return OptionalCType(self.elem.remove_const_ref())
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
@dataclass(frozen=True)
|
| 70 |
+
class ArrayRefCType(CType):
|
| 71 |
+
elem: "CType"
|
| 72 |
+
|
| 73 |
+
def cpp_type(self, *, strip_ref: bool = False) -> str:
|
| 74 |
+
# Do not pass `strip_ref` recursively.
|
| 75 |
+
return f"torch::executor::ArrayRef<{self.elem.cpp_type()}>"
|
| 76 |
+
|
| 77 |
+
def cpp_type_registration_declarations(self) -> str:
|
| 78 |
+
return f"torch::executor::ArrayRef<{self.elem.cpp_type_registration_declarations()}>"
|
| 79 |
+
|
| 80 |
+
def remove_const_ref(self) -> "CType":
|
| 81 |
+
return ArrayRefCType(self.elem.remove_const_ref())
|
llava_next/lib/python3.10/site-packages/torchgen/executorch/api/unboxing.py
ADDED
|
@@ -0,0 +1,213 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from dataclasses import dataclass
|
| 2 |
+
from typing import Callable, List, Sequence, Tuple
|
| 3 |
+
|
| 4 |
+
from torchgen.api.types import Binding, CType, NamedCType
|
| 5 |
+
from torchgen.model import (
|
| 6 |
+
Argument,
|
| 7 |
+
BaseTy,
|
| 8 |
+
BaseType,
|
| 9 |
+
ListType,
|
| 10 |
+
NativeFunction,
|
| 11 |
+
OptionalType,
|
| 12 |
+
Type,
|
| 13 |
+
)
|
| 14 |
+
|
| 15 |
+
connector = "\n\t"
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
# Return unboxing function name for a NativeFunction
|
| 19 |
+
def name(f: NativeFunction) -> str:
|
| 20 |
+
return f.func.name.unambiguous_name()
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
@dataclass(frozen=True)
|
| 24 |
+
class Unboxing:
|
| 25 |
+
"""
|
| 26 |
+
Takes a sequence of Bindings and unbox EValues to these Bindings. Return generated code that performs correct unboxing.
|
| 27 |
+
A sample generated code:
|
| 28 |
+
// aten::mul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
|
| 29 |
+
void mul_out(EValue** stack) {
|
| 30 |
+
EValue& self = *stack[0];
|
| 31 |
+
EValue& other = *stack[1];
|
| 32 |
+
EValue& out = *stack[2];
|
| 33 |
+
const torch::executor::Tensor & self_base = self.to<torch::executor::Tensor>();
|
| 34 |
+
const torch::executor::Tensor & other_base = other.to<torch::executor::Tensor>();
|
| 35 |
+
torch::executor::Tensor & out_base = out.to<torch::executor::Tensor>();
|
| 36 |
+
|
| 37 |
+
EXECUTORCH_SCOPE_PROF("native_call_mul.out");
|
| 38 |
+
torch::executor::mul_outf(self_base, other_base, out_base);
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
}
|
| 42 |
+
"""
|
| 43 |
+
|
| 44 |
+
# this is a callable that converts a JIT argument, into its C++ type.
|
| 45 |
+
# Translates (type, mutability, binds) to NamedCType. E.g., torchgen.api.cpp.argumenttype_type.
|
| 46 |
+
argument_type_gen: Callable[
|
| 47 |
+
...,
|
| 48 |
+
NamedCType,
|
| 49 |
+
]
|
| 50 |
+
|
| 51 |
+
# Convert all the arguments in a NativeFunction to C++ code
|
| 52 |
+
def convert_arguments(
|
| 53 |
+
self, args: Sequence[Binding]
|
| 54 |
+
) -> Tuple[List[Binding], List[str]]:
|
| 55 |
+
code_list = [f"EValue& {args[i].name} = *stack[{i}];" for i in range(len(args))]
|
| 56 |
+
binding_list = []
|
| 57 |
+
for arg in args:
|
| 58 |
+
# expecting only Argument
|
| 59 |
+
if not isinstance(arg.argument, Argument):
|
| 60 |
+
raise Exception(
|
| 61 |
+
f"Unexpected argument type, expecting `Argument` but got {arg}"
|
| 62 |
+
)
|
| 63 |
+
argument: Argument = arg.argument
|
| 64 |
+
unboxed_name, _, code, decl = self.argumenttype_evalue_convert(
|
| 65 |
+
argument.type, argument.name, mutable=argument.is_write
|
| 66 |
+
)
|
| 67 |
+
code_list.extend(decl)
|
| 68 |
+
code_list.extend(code)
|
| 69 |
+
binding_list.append(arg.with_name(unboxed_name))
|
| 70 |
+
return binding_list, code_list
|
| 71 |
+
|
| 72 |
+
def argumenttype_evalue_convert(
|
| 73 |
+
self, t: Type, arg_name: str, *, mutable: bool = False
|
| 74 |
+
) -> Tuple[str, CType, List[str], List[str]]:
|
| 75 |
+
"""
|
| 76 |
+
Takes in the type, name and mutability corresponding to an argument, and generates a tuple of:
|
| 77 |
+
(1) the C++ code necessary to unbox the argument
|
| 78 |
+
(2) A Binding corresponding to the newly created unboxed variable, including variable name and its CType
|
| 79 |
+
:param t: a `Type` of an argument
|
| 80 |
+
:param arg_name: argument name
|
| 81 |
+
:param mutable: boolean for whether this argument type is mutable
|
| 82 |
+
:return: unboxed result
|
| 83 |
+
"""
|
| 84 |
+
ctype = self.argument_type_gen(t, mutable=mutable, binds=arg_name).type
|
| 85 |
+
|
| 86 |
+
if isinstance(t, BaseType):
|
| 87 |
+
out_name = f"{arg_name}_base"
|
| 88 |
+
code, decl = self._gen_code_base_type(
|
| 89 |
+
arg_name=arg_name, out_name=out_name, ctype=ctype
|
| 90 |
+
)
|
| 91 |
+
elif isinstance(t, OptionalType):
|
| 92 |
+
out_name = f"{arg_name}_opt_out"
|
| 93 |
+
code, decl = self._gen_code_optional_type(
|
| 94 |
+
arg_name=arg_name, out_name=out_name, t=t, ctype=ctype
|
| 95 |
+
)
|
| 96 |
+
elif isinstance(t, ListType):
|
| 97 |
+
out_name = f"{arg_name}_list_out"
|
| 98 |
+
code, decl = self._gen_code_list_type(
|
| 99 |
+
arg_name=arg_name, out_name=out_name, t=t, ctype=ctype
|
| 100 |
+
)
|
| 101 |
+
else:
|
| 102 |
+
raise Exception(f"Cannot handle type {t}. arg_name: {arg_name}")
|
| 103 |
+
return out_name, ctype, code, decl
|
| 104 |
+
|
| 105 |
+
def _gen_code_base_type(
|
| 106 |
+
self, arg_name: str, out_name: str, ctype: CType
|
| 107 |
+
) -> Tuple[List[str], List[str]]:
|
| 108 |
+
return [
|
| 109 |
+
f"{ctype.cpp_type()} {out_name} = {arg_name}.to<{ctype.cpp_type(strip_ref=True)}>();"
|
| 110 |
+
], []
|
| 111 |
+
|
| 112 |
+
def _gen_code_optional_type(
|
| 113 |
+
self, arg_name: str, out_name: str, t: OptionalType, ctype: CType
|
| 114 |
+
) -> Tuple[List[str], List[str]]:
|
| 115 |
+
in_name = f"{arg_name}_opt_in"
|
| 116 |
+
res_name, base_type, res_code, decl = self.argumenttype_evalue_convert(
|
| 117 |
+
t.elem, in_name
|
| 118 |
+
)
|
| 119 |
+
return (
|
| 120 |
+
f"""
|
| 121 |
+
{ctype.cpp_type(strip_ref=True)} {out_name} = {arg_name}.toOptional<{base_type.cpp_type(strip_ref=True)}>();
|
| 122 |
+
""".split(
|
| 123 |
+
"\n"
|
| 124 |
+
),
|
| 125 |
+
decl,
|
| 126 |
+
)
|
| 127 |
+
|
| 128 |
+
def _gen_code_list_type(
|
| 129 |
+
self, arg_name: str, out_name: str, t: ListType, ctype: CType
|
| 130 |
+
) -> Tuple[List[str], List[str]]:
|
| 131 |
+
in_name = f"{arg_name}_list_in"
|
| 132 |
+
elem_name = f"{arg_name}_elem"
|
| 133 |
+
code = []
|
| 134 |
+
res_name, res_ctype, res_code, decl = self.argumenttype_evalue_convert(
|
| 135 |
+
t.elem, elem_name
|
| 136 |
+
)
|
| 137 |
+
|
| 138 |
+
if isinstance(t.elem, BaseType) and t.elem.name == BaseTy.Tensor:
|
| 139 |
+
code.extend(
|
| 140 |
+
f"""
|
| 141 |
+
{ctype.cpp_type(strip_ref=True)} {out_name} = {arg_name}.toTensorList();
|
| 142 |
+
""".split(
|
| 143 |
+
"\n"
|
| 144 |
+
)
|
| 145 |
+
)
|
| 146 |
+
elif isinstance(t.elem, BaseType) and (
|
| 147 |
+
t.elem.name == BaseTy.int or t.elem.name == BaseTy.SymInt
|
| 148 |
+
):
|
| 149 |
+
code.extend(
|
| 150 |
+
f"""
|
| 151 |
+
{ctype.cpp_type(strip_ref=True)} {out_name} = {arg_name}.toIntList();
|
| 152 |
+
""".split(
|
| 153 |
+
"\n"
|
| 154 |
+
)
|
| 155 |
+
)
|
| 156 |
+
elif isinstance(t.elem, BaseType) and t.elem.name == BaseTy.float:
|
| 157 |
+
code.extend(
|
| 158 |
+
f"""
|
| 159 |
+
{ctype.cpp_type(strip_ref=True)} {out_name} = {arg_name}.toDoubleList();
|
| 160 |
+
""".split(
|
| 161 |
+
"\n"
|
| 162 |
+
)
|
| 163 |
+
)
|
| 164 |
+
elif isinstance(t.elem, BaseType) and t.elem.name == BaseTy.bool:
|
| 165 |
+
# handle list type with size, e.g., bool[4]
|
| 166 |
+
code.extend(
|
| 167 |
+
f"""
|
| 168 |
+
{ctype.cpp_type(strip_ref=True)} {out_name} = {arg_name}.toBoolList();
|
| 169 |
+
""".split(
|
| 170 |
+
"\n"
|
| 171 |
+
)
|
| 172 |
+
)
|
| 173 |
+
# pytorch codegen:
|
| 174 |
+
# we have to use c10::List for optional element. e.g., Tensor?[] -> c10::List<c10::optional<at::Tensor>>
|
| 175 |
+
elif (
|
| 176 |
+
isinstance(t.elem, OptionalType)
|
| 177 |
+
and isinstance(t.elem.elem, BaseType)
|
| 178 |
+
and t.elem.elem.name == BaseTy.Tensor
|
| 179 |
+
):
|
| 180 |
+
code.extend(
|
| 181 |
+
f"""
|
| 182 |
+
#ifdef USE_ATEN_LIB
|
| 183 |
+
at::ArrayRef<c10::optional<at::Tensor>> {in_name} = {arg_name}.toListOptionalTensor();
|
| 184 |
+
c10::List<c10::optional<at::Tensor>> {out_name};
|
| 185 |
+
for (auto {elem_name}: {in_name}) {{
|
| 186 |
+
{out_name}.push_back({elem_name});
|
| 187 |
+
}}
|
| 188 |
+
#else
|
| 189 |
+
torch::executor::ArrayRef<torch::executor::optional<torch::executor::Tensor>> {out_name} = {arg_name}.toListOptionalTensor();
|
| 190 |
+
#endif
|
| 191 |
+
""".split(
|
| 192 |
+
"\n"
|
| 193 |
+
)
|
| 194 |
+
)
|
| 195 |
+
else:
|
| 196 |
+
# use ArrayRef as default.
|
| 197 |
+
vec_name = arg_name + "_vec"
|
| 198 |
+
# need to bring vector instantiation out of scope so that ArrayRef has valid data
|
| 199 |
+
decl.append(
|
| 200 |
+
f"std::vector<{res_ctype.cpp_type(strip_ref=True)}> {vec_name};"
|
| 201 |
+
)
|
| 202 |
+
code.extend(
|
| 203 |
+
f"""
|
| 204 |
+
for (EValue {elem_name}: {in_name}) {{
|
| 205 |
+
{connector.join(res_code)}
|
| 206 |
+
{vec_name}.push_back({res_name});
|
| 207 |
+
}}
|
| 208 |
+
{ctype.cpp_type(strip_ref=True)} {out_name}({vec_name});
|
| 209 |
+
""".split(
|
| 210 |
+
"\n"
|
| 211 |
+
)
|
| 212 |
+
)
|
| 213 |
+
return code, decl
|
llava_next/lib/python3.10/site-packages/torchgen/gen_executorch.py
ADDED
|
@@ -0,0 +1,943 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import argparse
|
| 2 |
+
import os
|
| 3 |
+
import pathlib
|
| 4 |
+
from collections import defaultdict
|
| 5 |
+
from dataclasses import dataclass
|
| 6 |
+
from typing import Any, Callable, Dict, List, Optional, Sequence, TextIO, Tuple, Union
|
| 7 |
+
|
| 8 |
+
import yaml
|
| 9 |
+
|
| 10 |
+
# Parse native_functions.yaml into a sequence of NativeFunctions and Backend Indices.
|
| 11 |
+
from torchgen import dest
|
| 12 |
+
from torchgen.api import cpp as aten_cpp
|
| 13 |
+
from torchgen.api.types import CppSignature, CppSignatureGroup, CType, NamedCType
|
| 14 |
+
from torchgen.context import (
|
| 15 |
+
method_with_native_function,
|
| 16 |
+
method_with_nested_native_function,
|
| 17 |
+
with_native_function_and_index,
|
| 18 |
+
)
|
| 19 |
+
from torchgen.executorch.api import et_cpp
|
| 20 |
+
from torchgen.executorch.api.custom_ops import (
|
| 21 |
+
ComputeNativeFunctionStub,
|
| 22 |
+
gen_custom_ops_registration,
|
| 23 |
+
)
|
| 24 |
+
from torchgen.executorch.api.types import contextArg, ExecutorchCppSignature
|
| 25 |
+
from torchgen.executorch.api.unboxing import Unboxing
|
| 26 |
+
from torchgen.executorch.model import ETKernelIndex, ETKernelKey, ETParsedYaml
|
| 27 |
+
from torchgen.executorch.parse import ET_FIELDS, parse_et_yaml, parse_et_yaml_struct
|
| 28 |
+
from torchgen.gen import (
|
| 29 |
+
get_custom_build_selector,
|
| 30 |
+
get_native_function_declarations,
|
| 31 |
+
get_native_function_declarations_from_ns_grouped_kernels,
|
| 32 |
+
get_native_function_schema_registrations,
|
| 33 |
+
LineLoader,
|
| 34 |
+
parse_native_yaml,
|
| 35 |
+
)
|
| 36 |
+
from torchgen.model import (
|
| 37 |
+
BackendIndex,
|
| 38 |
+
BackendMetadata,
|
| 39 |
+
DEFAULT_KERNEL_NAMESPACE,
|
| 40 |
+
DispatchKey,
|
| 41 |
+
FunctionSchema,
|
| 42 |
+
Location,
|
| 43 |
+
NativeFunction,
|
| 44 |
+
NativeFunctionsGroup,
|
| 45 |
+
OperatorName,
|
| 46 |
+
Variant,
|
| 47 |
+
)
|
| 48 |
+
from torchgen.selective_build.selector import SelectiveBuilder
|
| 49 |
+
from torchgen.utils import (
|
| 50 |
+
context,
|
| 51 |
+
FileManager,
|
| 52 |
+
make_file_manager,
|
| 53 |
+
mapMaybe,
|
| 54 |
+
NamespaceHelper,
|
| 55 |
+
)
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
def _sig_decl_wrapper(sig: Union[CppSignature, ExecutorchCppSignature]) -> str:
|
| 59 |
+
"""
|
| 60 |
+
A wrapper function to basically get `sig.decl(include_context=True)`.
|
| 61 |
+
For ATen kernel, the codegen has no idea about ET contextArg, so we
|
| 62 |
+
use this wrapper to add it.
|
| 63 |
+
"""
|
| 64 |
+
if isinstance(sig, ExecutorchCppSignature):
|
| 65 |
+
return sig.decl()
|
| 66 |
+
|
| 67 |
+
returns_type = aten_cpp.returns_type(sig.func.returns).cpp_type()
|
| 68 |
+
cpp_args = [a.decl() for a in sig.arguments()]
|
| 69 |
+
cpp_args_str = ", ".join([contextArg.decl()] + cpp_args)
|
| 70 |
+
sig_decl = f"{returns_type} {sig.name()}({cpp_args_str})"
|
| 71 |
+
return sig_decl
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
def static_dispatch(
|
| 75 |
+
sig: Union[CppSignature, ExecutorchCppSignature],
|
| 76 |
+
f: NativeFunction,
|
| 77 |
+
backend_indices: List[BackendIndex],
|
| 78 |
+
) -> str:
|
| 79 |
+
"""
|
| 80 |
+
For a given `NativeFunction`, find out the corresponding native function and dispatch to it. If zero or more than one
|
| 81 |
+
native function exists, error out. A simplified version of register_dispatch_key.py
|
| 82 |
+
Arguments:
|
| 83 |
+
sig: A CppSignature for this native function we want to use.
|
| 84 |
+
f: NativeFunction to generate static dispatch.
|
| 85 |
+
backend_indices: All available backends.
|
| 86 |
+
Return:
|
| 87 |
+
C++ code to call backend-specific functions, e.g., "return at::native::add(self, other, scale);"
|
| 88 |
+
"""
|
| 89 |
+
if len(backend_indices) == 0 or f.manual_kernel_registration:
|
| 90 |
+
return ""
|
| 91 |
+
|
| 92 |
+
backends = [b for b in backend_indices if b.has_kernel(f)]
|
| 93 |
+
static_block = None
|
| 94 |
+
if len(backends) == 1:
|
| 95 |
+
backend_metadata = backends[0].get_kernel(f)
|
| 96 |
+
if backend_metadata:
|
| 97 |
+
args = ", ".join(a.name for a in sig.arguments())
|
| 98 |
+
# Here we are assuming there's no difference between CppSignature and NativeSignature for Executorch.
|
| 99 |
+
static_block = f"return ::{backend_metadata.cpp_namespace}::{backend_metadata.kernel}({args});"
|
| 100 |
+
else:
|
| 101 |
+
static_block = f"""
|
| 102 |
+
ET_ASSERT_UNREACHABLE_MSG("The number of native function(s) binding to {f.func.name} is {len(backends)}.");
|
| 103 |
+
"""
|
| 104 |
+
return f"""
|
| 105 |
+
// {f.namespace}::{f.func}
|
| 106 |
+
TORCH_API inline {_sig_decl_wrapper(sig)} {{
|
| 107 |
+
{static_block}
|
| 108 |
+
}}
|
| 109 |
+
"""
|
| 110 |
+
|
| 111 |
+
|
| 112 |
+
# Generates Functions.h, which provides the functional public C++ API,
|
| 113 |
+
# and the scaffolding to call into the dispatcher from these functions.
|
| 114 |
+
@dataclass(frozen=True)
|
| 115 |
+
class ComputeFunction:
|
| 116 |
+
static_dispatch_backend_indices: List[BackendIndex]
|
| 117 |
+
|
| 118 |
+
selector: SelectiveBuilder
|
| 119 |
+
|
| 120 |
+
use_aten_lib: bool
|
| 121 |
+
|
| 122 |
+
is_custom_op: Callable[[NativeFunction], bool]
|
| 123 |
+
|
| 124 |
+
@method_with_native_function
|
| 125 |
+
def __call__(self, f: NativeFunction) -> Optional[str]:
|
| 126 |
+
if not self.selector.is_root_operator(f"{f.namespace}::{f.func.name}"):
|
| 127 |
+
return None
|
| 128 |
+
if Variant.function not in f.variants:
|
| 129 |
+
return None
|
| 130 |
+
sig: Union[CppSignature, ExecutorchCppSignature] = (
|
| 131 |
+
CppSignatureGroup.from_native_function(
|
| 132 |
+
f, method=False, fallback_binding=f.manual_cpp_binding
|
| 133 |
+
).most_faithful_signature()
|
| 134 |
+
if self.use_aten_lib
|
| 135 |
+
else ExecutorchCppSignature.from_native_function(f)
|
| 136 |
+
)
|
| 137 |
+
if self.use_aten_lib and not self.is_custom_op(f):
|
| 138 |
+
comma = ", "
|
| 139 |
+
|
| 140 |
+
return f"""
|
| 141 |
+
// {f.namespace}::{f.func}
|
| 142 |
+
TORCH_API inline {_sig_decl_wrapper(sig)} {{
|
| 143 |
+
return at::{sig.name()}({comma.join(e.name for e in sig.arguments())});
|
| 144 |
+
}}
|
| 145 |
+
"""
|
| 146 |
+
|
| 147 |
+
else:
|
| 148 |
+
return static_dispatch(
|
| 149 |
+
sig,
|
| 150 |
+
f,
|
| 151 |
+
backend_indices=self.static_dispatch_backend_indices,
|
| 152 |
+
)
|
| 153 |
+
|
| 154 |
+
|
| 155 |
+
# Generates RegisterCodegenUnboxedKernels.cpp.
|
| 156 |
+
@dataclass(frozen=True)
|
| 157 |
+
class ComputeCodegenUnboxedKernels:
|
| 158 |
+
selector: SelectiveBuilder
|
| 159 |
+
|
| 160 |
+
use_aten_lib: bool
|
| 161 |
+
|
| 162 |
+
@method_with_nested_native_function
|
| 163 |
+
def __call__(
|
| 164 |
+
self,
|
| 165 |
+
unbox_kernel_entry: Tuple[NativeFunction, Tuple[ETKernelKey, BackendMetadata]],
|
| 166 |
+
) -> str:
|
| 167 |
+
f: NativeFunction = unbox_kernel_entry[0]
|
| 168 |
+
kernel_key: Union[ETKernelKey, List[ETKernelKey]] = unbox_kernel_entry[1][0]
|
| 169 |
+
kernel_meta: BackendMetadata = unbox_kernel_entry[1][1]
|
| 170 |
+
|
| 171 |
+
op_name = f"{f.namespace}::{f.func.name}"
|
| 172 |
+
if not self.selector.is_root_operator(op_name):
|
| 173 |
+
return ""
|
| 174 |
+
|
| 175 |
+
if not isinstance(kernel_key, list):
|
| 176 |
+
kernel_key = [kernel_key]
|
| 177 |
+
used_kernel_keys = self.selector.et_get_selected_kernels(
|
| 178 |
+
op_name, [k.to_native_string() for k in kernel_key]
|
| 179 |
+
)
|
| 180 |
+
if not used_kernel_keys:
|
| 181 |
+
return ""
|
| 182 |
+
sig: Union[CppSignature, ExecutorchCppSignature]
|
| 183 |
+
argument_type_gen: Callable[..., NamedCType]
|
| 184 |
+
return_type_gen: Callable[..., CType]
|
| 185 |
+
if self.use_aten_lib:
|
| 186 |
+
sig = CppSignatureGroup.from_native_function(
|
| 187 |
+
f, method=False, fallback_binding=f.manual_cpp_binding
|
| 188 |
+
).most_faithful_signature()
|
| 189 |
+
argument_type_gen = aten_cpp.argumenttype_type
|
| 190 |
+
return_type_gen = aten_cpp.returns_type
|
| 191 |
+
arguments = sig.arguments()
|
| 192 |
+
kernel_call = f"torch::executor::{f.namespace}::{sig.name()}"
|
| 193 |
+
else:
|
| 194 |
+
sig = ExecutorchCppSignature.from_native_function(f)
|
| 195 |
+
argument_type_gen = et_cpp.argumenttype_type
|
| 196 |
+
return_type_gen = et_cpp.returns_type
|
| 197 |
+
arguments = sig.arguments(include_context=False)
|
| 198 |
+
kernel_call = f"{kernel_meta.cpp_namespace}::{kernel_meta.kernel}"
|
| 199 |
+
# parse arguments into C++ code
|
| 200 |
+
binding_list, code_list = Unboxing(
|
| 201 |
+
argument_type_gen=argument_type_gen
|
| 202 |
+
).convert_arguments(arguments)
|
| 203 |
+
|
| 204 |
+
# for each C++ argument, generate the conversion code
|
| 205 |
+
code_connector = "\n\t"
|
| 206 |
+
arg_connector = ", "
|
| 207 |
+
|
| 208 |
+
args_str = f"{arg_connector.join(e.name for e in binding_list)}"
|
| 209 |
+
|
| 210 |
+
if len(f.func.returns) == 0:
|
| 211 |
+
if len(f.func.arguments.out) == 0:
|
| 212 |
+
raise Exception(
|
| 213 |
+
f"Can't handle native function {f.func} with no returns and no out yet."
|
| 214 |
+
)
|
| 215 |
+
out = f.func.arguments.out[0]
|
| 216 |
+
return_assignment = f"""stack[{len(binding_list)}] = &{out.name};"""
|
| 217 |
+
ret_prefix = ""
|
| 218 |
+
else:
|
| 219 |
+
if len(f.func.arguments.out) == 0:
|
| 220 |
+
return_assignment = (
|
| 221 |
+
f"""*stack[{len(binding_list)}] = EValue(result_);"""
|
| 222 |
+
)
|
| 223 |
+
ret_prefix = return_type_gen(f.func.returns).cpp_type() + " result_ = "
|
| 224 |
+
else:
|
| 225 |
+
return_assignment = ""
|
| 226 |
+
ret_prefix = ""
|
| 227 |
+
|
| 228 |
+
newline = "\n "
|
| 229 |
+
return "\n".join(
|
| 230 |
+
[
|
| 231 |
+
f"""
|
| 232 |
+
Kernel(
|
| 233 |
+
"{f.namespace}::{f.func.name}",{newline + '"' + (k + '",') if k != 'default' else ''}
|
| 234 |
+
[]({contextArg.defn()}, EValue** stack) {{
|
| 235 |
+
{code_connector.join(code_list)}
|
| 236 |
+
|
| 237 |
+
EXECUTORCH_SCOPE_PROF("native_call_{f.func.name}");
|
| 238 |
+
{ret_prefix}{kernel_call}(context, {args_str});
|
| 239 |
+
|
| 240 |
+
{return_assignment}
|
| 241 |
+
}}
|
| 242 |
+
),
|
| 243 |
+
"""
|
| 244 |
+
for k in used_kernel_keys
|
| 245 |
+
]
|
| 246 |
+
)
|
| 247 |
+
|
| 248 |
+
|
| 249 |
+
def gen_unboxing(
|
| 250 |
+
*,
|
| 251 |
+
native_functions: Sequence[NativeFunction],
|
| 252 |
+
cpu_fm: FileManager,
|
| 253 |
+
selector: SelectiveBuilder,
|
| 254 |
+
use_aten_lib: bool,
|
| 255 |
+
kernel_index: ETKernelIndex,
|
| 256 |
+
) -> None:
|
| 257 |
+
# Iterable type for write_sharded is a Tuple of (native_function, (kernel_key, metadata))
|
| 258 |
+
def key_func(
|
| 259 |
+
item: Tuple[NativeFunction, Tuple[ETKernelKey, BackendMetadata]]
|
| 260 |
+
) -> str:
|
| 261 |
+
return item[0].root_name + ":" + item[1][0].to_native_string()
|
| 262 |
+
|
| 263 |
+
items: List[Tuple[NativeFunction, Tuple[ETKernelKey, BackendMetadata]]] = [
|
| 264 |
+
(native_function, (kernel_key, metadata))
|
| 265 |
+
for native_function in native_functions
|
| 266 |
+
for kernel_key, metadata in kernel_index.get_kernels(native_function).items()
|
| 267 |
+
]
|
| 268 |
+
|
| 269 |
+
header = ["Functions.h" if use_aten_lib else "NativeFunctions.h"]
|
| 270 |
+
|
| 271 |
+
cpu_fm.write_sharded(
|
| 272 |
+
"RegisterCodegenUnboxedKernels.cpp",
|
| 273 |
+
items,
|
| 274 |
+
key_fn=key_func,
|
| 275 |
+
env_callable=lambda unbox_kernel_entry: {
|
| 276 |
+
"unboxed_kernels": [
|
| 277 |
+
ComputeCodegenUnboxedKernels(selector, use_aten_lib)(unbox_kernel_entry)
|
| 278 |
+
],
|
| 279 |
+
"fn_header": header
|
| 280 |
+
if unbox_kernel_entry == items[0]
|
| 281 |
+
else [], # Only write header once
|
| 282 |
+
},
|
| 283 |
+
num_shards=1,
|
| 284 |
+
sharded_keys={"unboxed_kernels", "fn_header"},
|
| 285 |
+
)
|
| 286 |
+
|
| 287 |
+
|
| 288 |
+
@with_native_function_and_index # type: ignore[arg-type]
|
| 289 |
+
def compute_native_function_declaration(
|
| 290 |
+
g: Union[NativeFunctionsGroup, NativeFunction], kernel_index: ETKernelIndex
|
| 291 |
+
) -> List[str]:
|
| 292 |
+
assert isinstance(g, NativeFunction)
|
| 293 |
+
sig = ExecutorchCppSignature.from_native_function(f=g)
|
| 294 |
+
metadata_list = kernel_index.get_kernels(g).values()
|
| 295 |
+
if metadata_list is None:
|
| 296 |
+
return []
|
| 297 |
+
prefix = "TORCH_API"
|
| 298 |
+
|
| 299 |
+
# for kernels in lean mode, we declare two versions, one with context and one without.
|
| 300 |
+
# In the end we will cleanup the unused one.
|
| 301 |
+
def gen_decl(metadata: BackendMetadata, include_context: bool) -> str:
|
| 302 |
+
return f"{prefix} {sig.decl(name=metadata.kernel, include_context=include_context)};"
|
| 303 |
+
|
| 304 |
+
return [
|
| 305 |
+
gen_decl(metadata, include_context)
|
| 306 |
+
for include_context in [False, True]
|
| 307 |
+
for metadata in metadata_list
|
| 308 |
+
]
|
| 309 |
+
|
| 310 |
+
|
| 311 |
+
def gen_functions_declarations(
|
| 312 |
+
*,
|
| 313 |
+
native_functions: Sequence[NativeFunction],
|
| 314 |
+
kernel_index: ETKernelIndex,
|
| 315 |
+
selector: SelectiveBuilder,
|
| 316 |
+
use_aten_lib: bool,
|
| 317 |
+
custom_ops_native_functions: Optional[Sequence[NativeFunction]] = None,
|
| 318 |
+
) -> str:
|
| 319 |
+
"""
|
| 320 |
+
Generates namespace separated C++ function API inline declaration/definitions.
|
| 321 |
+
Native functions are grouped by namespaces and the generated code is wrapped inside
|
| 322 |
+
namespace blocks.
|
| 323 |
+
|
| 324 |
+
E.g., for `custom_1::foo.out` in yaml file we will generate a C++ API as a symbol
|
| 325 |
+
in `torch::executor::custom_1::foo_out`. This way we avoid symbol conflict when
|
| 326 |
+
the other `custom_2::foo.out` is available.
|
| 327 |
+
"""
|
| 328 |
+
|
| 329 |
+
# convert kernel index to BackendIndex. This is because we can't handle ETKernelIndex yet.
|
| 330 |
+
# TODO larryliu: evaluate if this code is still needed. If yes let it handle ETKernelIndex.
|
| 331 |
+
|
| 332 |
+
dispatch_key = DispatchKey.CPU
|
| 333 |
+
backend_index = kernel_index._to_backend_index()
|
| 334 |
+
|
| 335 |
+
ns_grouped_functions = defaultdict(list)
|
| 336 |
+
for native_function in native_functions:
|
| 337 |
+
ns_grouped_functions[native_function.namespace].append(native_function)
|
| 338 |
+
functions_declarations = ""
|
| 339 |
+
newline = "\n"
|
| 340 |
+
for namespace in ns_grouped_functions:
|
| 341 |
+
ns_helper = NamespaceHelper(
|
| 342 |
+
namespace_str=namespace,
|
| 343 |
+
entity_name="",
|
| 344 |
+
max_level=3,
|
| 345 |
+
)
|
| 346 |
+
declarations = list(
|
| 347 |
+
mapMaybe(
|
| 348 |
+
ComputeFunction(
|
| 349 |
+
static_dispatch_backend_indices=[backend_index],
|
| 350 |
+
selector=selector,
|
| 351 |
+
use_aten_lib=use_aten_lib,
|
| 352 |
+
is_custom_op=lambda f: custom_ops_native_functions is not None
|
| 353 |
+
and f in custom_ops_native_functions,
|
| 354 |
+
),
|
| 355 |
+
ns_grouped_functions[namespace],
|
| 356 |
+
)
|
| 357 |
+
)
|
| 358 |
+
functions_declarations += f"""
|
| 359 |
+
{ns_helper.prologue}
|
| 360 |
+
{newline.join(declarations)}
|
| 361 |
+
{ns_helper.epilogue}
|
| 362 |
+
"""
|
| 363 |
+
return functions_declarations
|
| 364 |
+
|
| 365 |
+
|
| 366 |
+
def get_ns_grouped_kernels(
|
| 367 |
+
*,
|
| 368 |
+
native_functions: Sequence[NativeFunction],
|
| 369 |
+
kernel_index: ETKernelIndex,
|
| 370 |
+
native_function_decl_gen: Callable[
|
| 371 |
+
[
|
| 372 |
+
Union[NativeFunctionsGroup, NativeFunction],
|
| 373 |
+
ETKernelIndex,
|
| 374 |
+
],
|
| 375 |
+
List[str],
|
| 376 |
+
],
|
| 377 |
+
) -> Dict[str, List[str]]:
|
| 378 |
+
ns_grouped_kernels: Dict[str, List[str]] = defaultdict(list)
|
| 379 |
+
for f in native_functions:
|
| 380 |
+
native_function_namespaces = set()
|
| 381 |
+
op_kernels = kernel_index.get_kernels(f)
|
| 382 |
+
for backend_metadata in op_kernels.values():
|
| 383 |
+
if backend_metadata:
|
| 384 |
+
namespace = backend_metadata.cpp_namespace
|
| 385 |
+
native_function_namespaces.add(namespace)
|
| 386 |
+
else:
|
| 387 |
+
namespace = DEFAULT_KERNEL_NAMESPACE
|
| 388 |
+
assert (
|
| 389 |
+
len(native_function_namespaces) <= 1
|
| 390 |
+
), f"Codegen only supports one namespace per operator, got {native_function_namespaces}"
|
| 391 |
+
ns_grouped_kernels[namespace].extend(
|
| 392 |
+
native_function_decl_gen(f, kernel_index)
|
| 393 |
+
)
|
| 394 |
+
return ns_grouped_kernels
|
| 395 |
+
|
| 396 |
+
|
| 397 |
+
def gen_headers(
|
| 398 |
+
*,
|
| 399 |
+
native_functions: Sequence[NativeFunction],
|
| 400 |
+
gen_custom_ops_header: bool,
|
| 401 |
+
custom_ops_native_functions: Sequence[NativeFunction],
|
| 402 |
+
selector: SelectiveBuilder,
|
| 403 |
+
kernel_index: ETKernelIndex,
|
| 404 |
+
cpu_fm: FileManager,
|
| 405 |
+
use_aten_lib: bool,
|
| 406 |
+
) -> None:
|
| 407 |
+
"""Generate headers.
|
| 408 |
+
|
| 409 |
+
Args:
|
| 410 |
+
native_functions (Sequence[NativeFunction]): a collection of NativeFunction for ATen ops.
|
| 411 |
+
gen_custom_ops_header (bool): whether we should generate CustomOpsNativeFunctions.h
|
| 412 |
+
custom_ops_native_functions (Sequence[NativeFunction]): a collection of NativeFunction for custom ops.
|
| 413 |
+
kernel_index (ETKernelIndex): kernel collection
|
| 414 |
+
cpu_fm (FileManager): file manager manages output stream
|
| 415 |
+
use_aten_lib (bool): whether we are generating for PyTorch types or Executorch types.
|
| 416 |
+
"""
|
| 417 |
+
aten_headers = ["#include <ATen/Functions.h>"]
|
| 418 |
+
backend_indices = {DispatchKey.CPU: kernel_index._to_backend_index()}
|
| 419 |
+
if gen_custom_ops_header:
|
| 420 |
+
cpu_fm.write_with_template(
|
| 421 |
+
"CustomOpsNativeFunctions.h",
|
| 422 |
+
"NativeFunctions.h",
|
| 423 |
+
lambda: {
|
| 424 |
+
"nativeFunctions_declarations": get_native_function_declarations(
|
| 425 |
+
grouped_native_functions=custom_ops_native_functions,
|
| 426 |
+
backend_indices=backend_indices,
|
| 427 |
+
native_function_decl_gen=dest.compute_native_function_declaration,
|
| 428 |
+
),
|
| 429 |
+
"headers": [
|
| 430 |
+
"#include <ATen/ATen.h>",
|
| 431 |
+
"#include <torch/torch.h>",
|
| 432 |
+
],
|
| 433 |
+
},
|
| 434 |
+
)
|
| 435 |
+
aten_headers.append('#include "CustomOpsNativeFunctions.h"')
|
| 436 |
+
cpu_fm.write(
|
| 437 |
+
"Functions.h",
|
| 438 |
+
lambda: {
|
| 439 |
+
"static_dispatch_extra_headers": aten_headers
|
| 440 |
+
if use_aten_lib
|
| 441 |
+
else ['#include "NativeFunctions.h"'],
|
| 442 |
+
"Functions_declarations": gen_functions_declarations(
|
| 443 |
+
native_functions=native_functions,
|
| 444 |
+
kernel_index=kernel_index,
|
| 445 |
+
selector=selector,
|
| 446 |
+
use_aten_lib=use_aten_lib,
|
| 447 |
+
custom_ops_native_functions=custom_ops_native_functions,
|
| 448 |
+
),
|
| 449 |
+
},
|
| 450 |
+
)
|
| 451 |
+
headers = {
|
| 452 |
+
"headers": [
|
| 453 |
+
"#include <executorch/runtime/core/exec_aten/exec_aten.h> // at::Tensor etc.",
|
| 454 |
+
"#include <executorch/codegen/macros.h> // TORCH_API",
|
| 455 |
+
"#include <executorch/runtime/kernel/kernel_runtime_context.h>",
|
| 456 |
+
],
|
| 457 |
+
}
|
| 458 |
+
if use_aten_lib:
|
| 459 |
+
cpu_fm.write(
|
| 460 |
+
"NativeFunctions.h",
|
| 461 |
+
lambda: dict(
|
| 462 |
+
{
|
| 463 |
+
"nativeFunctions_declarations": get_native_function_declarations(
|
| 464 |
+
grouped_native_functions=native_functions,
|
| 465 |
+
backend_indices=backend_indices,
|
| 466 |
+
native_function_decl_gen=dest.compute_native_function_declaration,
|
| 467 |
+
),
|
| 468 |
+
},
|
| 469 |
+
**headers,
|
| 470 |
+
),
|
| 471 |
+
)
|
| 472 |
+
else:
|
| 473 |
+
ns_grouped_kernels = get_ns_grouped_kernels(
|
| 474 |
+
native_functions=native_functions,
|
| 475 |
+
kernel_index=kernel_index,
|
| 476 |
+
native_function_decl_gen=compute_native_function_declaration, # type: ignore[arg-type]
|
| 477 |
+
)
|
| 478 |
+
cpu_fm.write(
|
| 479 |
+
"NativeFunctions.h",
|
| 480 |
+
lambda: dict(
|
| 481 |
+
{
|
| 482 |
+
"nativeFunctions_declarations": get_native_function_declarations_from_ns_grouped_kernels(
|
| 483 |
+
ns_grouped_kernels=ns_grouped_kernels,
|
| 484 |
+
),
|
| 485 |
+
},
|
| 486 |
+
**headers,
|
| 487 |
+
),
|
| 488 |
+
)
|
| 489 |
+
|
| 490 |
+
|
| 491 |
+
def gen_custom_ops(
|
| 492 |
+
*,
|
| 493 |
+
native_functions: Sequence[NativeFunction],
|
| 494 |
+
selector: SelectiveBuilder,
|
| 495 |
+
kernel_index: ETKernelIndex,
|
| 496 |
+
cpu_fm: FileManager,
|
| 497 |
+
rocm: bool,
|
| 498 |
+
) -> None:
|
| 499 |
+
dispatch_key = DispatchKey.CPU
|
| 500 |
+
(
|
| 501 |
+
anonymous_definition,
|
| 502 |
+
static_init_dispatch_registrations,
|
| 503 |
+
) = gen_custom_ops_registration(
|
| 504 |
+
native_functions=native_functions,
|
| 505 |
+
selector=selector,
|
| 506 |
+
kernel_index=kernel_index,
|
| 507 |
+
rocm=rocm,
|
| 508 |
+
)
|
| 509 |
+
cpu_fm.write_with_template(
|
| 510 |
+
f"Register{dispatch_key}CustomOps.cpp",
|
| 511 |
+
"RegisterDispatchKeyCustomOps.cpp",
|
| 512 |
+
lambda: {
|
| 513 |
+
"ops_headers": '#include "CustomOpsNativeFunctions.h"',
|
| 514 |
+
"DispatchKey": dispatch_key,
|
| 515 |
+
"dispatch_namespace": dispatch_key.lower(),
|
| 516 |
+
"dispatch_namespaced_definitions": "",
|
| 517 |
+
"dispatch_anonymous_definitions": anonymous_definition,
|
| 518 |
+
"static_init_dispatch_registrations": static_init_dispatch_registrations,
|
| 519 |
+
},
|
| 520 |
+
)
|
| 521 |
+
cpu_fm.write_with_template(
|
| 522 |
+
f"Register{dispatch_key}Stub.cpp",
|
| 523 |
+
"RegisterDispatchKeyCustomOps.cpp",
|
| 524 |
+
lambda: {
|
| 525 |
+
"ops_headers": "",
|
| 526 |
+
"DispatchKey": dispatch_key,
|
| 527 |
+
"dispatch_namespace": dispatch_key.lower(),
|
| 528 |
+
"dispatch_namespaced_definitions": "",
|
| 529 |
+
"dispatch_anonymous_definitions": list(
|
| 530 |
+
mapMaybe(ComputeNativeFunctionStub(), native_functions)
|
| 531 |
+
),
|
| 532 |
+
"static_init_dispatch_registrations": static_init_dispatch_registrations,
|
| 533 |
+
},
|
| 534 |
+
)
|
| 535 |
+
|
| 536 |
+
(
|
| 537 |
+
aten_schema_registrations,
|
| 538 |
+
schema_registrations,
|
| 539 |
+
) = get_native_function_schema_registrations(
|
| 540 |
+
native_functions=native_functions,
|
| 541 |
+
schema_selector=selector,
|
| 542 |
+
)
|
| 543 |
+
cpu_fm.write(
|
| 544 |
+
"RegisterSchema.cpp",
|
| 545 |
+
lambda: {
|
| 546 |
+
"schema_registrations": schema_registrations,
|
| 547 |
+
"aten_schema_registrations": aten_schema_registrations,
|
| 548 |
+
},
|
| 549 |
+
)
|
| 550 |
+
|
| 551 |
+
|
| 552 |
+
def translate_native_yaml(
|
| 553 |
+
tags_yaml_path: str,
|
| 554 |
+
aten_yaml_path: str,
|
| 555 |
+
native_yaml_path: Optional[str],
|
| 556 |
+
use_aten_lib: bool,
|
| 557 |
+
out_file: TextIO,
|
| 558 |
+
) -> None:
|
| 559 |
+
"""Translates Executorch DSL dialect to use the same syntax as
|
| 560 |
+
native_functions.yaml. The major difference is that Executorch DSL dialect
|
| 561 |
+
supports "op" key, where it refers to the operator name in native_functions.yaml.
|
| 562 |
+
|
| 563 |
+
For example, a functions.yaml may have the following entry:
|
| 564 |
+
|
| 565 |
+
- op: add.out
|
| 566 |
+
...
|
| 567 |
+
|
| 568 |
+
It needs to be translated to the following:
|
| 569 |
+
|
| 570 |
+
- func: add.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
|
| 571 |
+
...
|
| 572 |
+
|
| 573 |
+
We go in aten_yaml_path and find the operator schema for "add.out" and add it
|
| 574 |
+
to the original functions.yaml. We also add required field "variants", where for
|
| 575 |
+
Executorch it will always be "function".
|
| 576 |
+
|
| 577 |
+
For ATen mode we don't have to do the translation because native_yaml_path is
|
| 578 |
+
the same as native_functions.yaml.
|
| 579 |
+
|
| 580 |
+
Args:
|
| 581 |
+
tags_yaml_path: Path to a tags.yaml file to satisfy codegen parsing.
|
| 582 |
+
It is not optional.
|
| 583 |
+
aten_yaml_path: Path to ATen operator yaml file native_functions.yaml.
|
| 584 |
+
native_yaml_path: Path to a functions.yaml file to parse.
|
| 585 |
+
If the path does not exist in the filesystem, it is treated as an
|
| 586 |
+
empty file. If `custom_ops_yaml_path` exists, the contents of that
|
| 587 |
+
file are appended to the yaml input to be parsed.
|
| 588 |
+
use_aten_lib: We use this flag to determine if we want to generate native
|
| 589 |
+
functions. In ATen mode we should generate out= variants.
|
| 590 |
+
out_file: The IO object that we are writing into.
|
| 591 |
+
Returns:
|
| 592 |
+
None
|
| 593 |
+
"""
|
| 594 |
+
if use_aten_lib:
|
| 595 |
+
with open(aten_yaml_path) as aten_yaml:
|
| 596 |
+
out_file.writelines(aten_yaml.readlines())
|
| 597 |
+
return
|
| 598 |
+
|
| 599 |
+
native_functions, persisted_fields = parse_et_yaml(
|
| 600 |
+
aten_yaml_path,
|
| 601 |
+
tags_yaml_path,
|
| 602 |
+
None,
|
| 603 |
+
skip_native_fns_gen=False,
|
| 604 |
+
)
|
| 605 |
+
|
| 606 |
+
func_to_scoped_name: Dict[FunctionSchema, str] = {
|
| 607 |
+
f.func: f"{f.namespace}::{f.func.name}" for f in native_functions
|
| 608 |
+
}
|
| 609 |
+
op_to_scoped_name: Dict[OperatorName, str] = {
|
| 610 |
+
func.name: name for func, name in func_to_scoped_name.items()
|
| 611 |
+
}
|
| 612 |
+
|
| 613 |
+
schema_dict = {name: str(func) for func, name in func_to_scoped_name.items()}
|
| 614 |
+
kernel_persist_dict: Dict[str, Dict[str, Any]] = {
|
| 615 |
+
op_to_scoped_name[op]: v for op, v in persisted_fields.items()
|
| 616 |
+
}
|
| 617 |
+
|
| 618 |
+
if (
|
| 619 |
+
not native_yaml_path
|
| 620 |
+
or not os.path.exists(native_yaml_path)
|
| 621 |
+
or os.stat(native_yaml_path).st_size == 0
|
| 622 |
+
):
|
| 623 |
+
return
|
| 624 |
+
with open(native_yaml_path) as native_yaml:
|
| 625 |
+
native_es = yaml.load(native_yaml, Loader=LineLoader)
|
| 626 |
+
if not native_es:
|
| 627 |
+
return
|
| 628 |
+
for e in native_es:
|
| 629 |
+
assert isinstance(e.get("__line__"), int), e
|
| 630 |
+
loc = Location(native_yaml_path, e.pop("__line__"))
|
| 631 |
+
with context(lambda: f"in {loc}:\n "):
|
| 632 |
+
if "variants" not in e:
|
| 633 |
+
e["variants"] = "function"
|
| 634 |
+
if "func" in e:
|
| 635 |
+
continue
|
| 636 |
+
assert isinstance(e.get("op"), str), e
|
| 637 |
+
opname = e.pop("op")
|
| 638 |
+
if "::" not in opname:
|
| 639 |
+
opname = "aten::" + opname
|
| 640 |
+
assert opname in schema_dict
|
| 641 |
+
e["func"] = schema_dict.get(opname)
|
| 642 |
+
|
| 643 |
+
# Write out persisted kernel information
|
| 644 |
+
if opname in kernel_persist_dict:
|
| 645 |
+
for k, v in kernel_persist_dict[opname].items():
|
| 646 |
+
e[k] = v
|
| 647 |
+
|
| 648 |
+
yaml.dump(native_es, out_file, width=1000)
|
| 649 |
+
|
| 650 |
+
|
| 651 |
+
def parse_yaml(
|
| 652 |
+
path: Optional[str],
|
| 653 |
+
tags_yaml_path: str,
|
| 654 |
+
function_filter: Callable[[NativeFunction], bool],
|
| 655 |
+
skip_native_fns_gen: bool = False,
|
| 656 |
+
) -> Tuple[
|
| 657 |
+
List[NativeFunction],
|
| 658 |
+
Union[Dict[DispatchKey, Dict[OperatorName, BackendMetadata]], ETKernelIndex],
|
| 659 |
+
]:
|
| 660 |
+
if path and os.path.exists(path) and os.stat(path).st_size > 0:
|
| 661 |
+
with open(path) as f:
|
| 662 |
+
es = yaml.load(f, Loader=LineLoader)
|
| 663 |
+
|
| 664 |
+
# Check for kernel index structure
|
| 665 |
+
kernel_index = (
|
| 666 |
+
parse_et_yaml_struct(es) if any("kernels" in e for e in es) else None
|
| 667 |
+
)
|
| 668 |
+
|
| 669 |
+
# Remove ET specific fields from entries for BC compatibility
|
| 670 |
+
for entry in es:
|
| 671 |
+
for field in ET_FIELDS:
|
| 672 |
+
entry.pop(field, None)
|
| 673 |
+
|
| 674 |
+
parsed_yaml = parse_native_yaml(
|
| 675 |
+
path,
|
| 676 |
+
tags_yaml_path,
|
| 677 |
+
None,
|
| 678 |
+
skip_native_fns_gen=skip_native_fns_gen,
|
| 679 |
+
loaded_yaml=es,
|
| 680 |
+
)
|
| 681 |
+
native_functions = list(filter(function_filter, parsed_yaml.native_functions))
|
| 682 |
+
op_names = [f.func.name for f in native_functions]
|
| 683 |
+
|
| 684 |
+
# (1) Return ETKernelIndex if kernel index is present
|
| 685 |
+
if kernel_index is not None:
|
| 686 |
+
filtered_index = {
|
| 687 |
+
op_name: kernel_mapping
|
| 688 |
+
for op_name, kernel_mapping in kernel_index.index.items()
|
| 689 |
+
if op_name in op_names
|
| 690 |
+
}
|
| 691 |
+
return native_functions, ETKernelIndex(index=filtered_index)
|
| 692 |
+
|
| 693 |
+
# (2) Return BackendIndices if kernel index is absent
|
| 694 |
+
def map_index(
|
| 695 |
+
m: Dict[OperatorName, BackendMetadata]
|
| 696 |
+
) -> Dict[OperatorName, BackendMetadata]:
|
| 697 |
+
return {op: m[op] for op in m if op in op_names}
|
| 698 |
+
|
| 699 |
+
backend_indices = {
|
| 700 |
+
k: map_index(b.index) for (k, b) in parsed_yaml.backend_indices.items()
|
| 701 |
+
}
|
| 702 |
+
|
| 703 |
+
return native_functions, backend_indices
|
| 704 |
+
else:
|
| 705 |
+
return [], {}
|
| 706 |
+
|
| 707 |
+
|
| 708 |
+
def parse_yaml_files(
|
| 709 |
+
tags_yaml_path: str,
|
| 710 |
+
aten_yaml_path: str,
|
| 711 |
+
native_yaml_path: Optional[str],
|
| 712 |
+
custom_ops_yaml_path: Optional[str],
|
| 713 |
+
selector: SelectiveBuilder,
|
| 714 |
+
use_aten_lib: bool,
|
| 715 |
+
) -> Tuple[ETParsedYaml, Optional[ETParsedYaml]]:
|
| 716 |
+
"""Parses functions.yaml and custom_ops.yaml files.
|
| 717 |
+
|
| 718 |
+
Args:
|
| 719 |
+
tags_yaml_path: Path to a tags.yaml file to satisfy codegen parsing.
|
| 720 |
+
It is not optional.
|
| 721 |
+
aten_yaml_path: Path to ATen operator yaml file native_functions.yaml.
|
| 722 |
+
native_yaml_path: Path to a functions.yaml file to parse.
|
| 723 |
+
If the path does not exist in the filesystem, it is treated as an
|
| 724 |
+
empty file. If `custom_ops_yaml_path` exists, the contents of that
|
| 725 |
+
file are appended to the yaml input to be parsed.
|
| 726 |
+
custom_ops_yaml_path: Path to a custom_ops.yaml file to parse. If
|
| 727 |
+
the path does not exist in the filesystem, it is ignored.
|
| 728 |
+
selector: For selective build.
|
| 729 |
+
use_aten_lib: We use this flag to determine if we want to generate native
|
| 730 |
+
functions. In ATen mode we should generate out= variants.
|
| 731 |
+
Returns:
|
| 732 |
+
A tuple with two elements:
|
| 733 |
+
[0]: The parsed results of concatenating the contents of
|
| 734 |
+
`native_yaml_path` and `custom_ops_yaml_path`.
|
| 735 |
+
[1]: The parsed results of the contents of `custom_ops_yaml_path`, if
|
| 736 |
+
present. If not present, None.
|
| 737 |
+
"""
|
| 738 |
+
import tempfile
|
| 739 |
+
|
| 740 |
+
# only include selected ops, this is because we want to avoid
|
| 741 |
+
def function_filter(f: NativeFunction) -> bool:
|
| 742 |
+
return selector.is_native_function_selected(f)
|
| 743 |
+
|
| 744 |
+
with tempfile.TemporaryDirectory() as tmpdirname:
|
| 745 |
+
translated_yaml_path = os.path.join(tmpdirname, "translated.yaml")
|
| 746 |
+
with open(translated_yaml_path, "w") as translated:
|
| 747 |
+
translate_native_yaml(
|
| 748 |
+
tags_yaml_path,
|
| 749 |
+
aten_yaml_path,
|
| 750 |
+
native_yaml_path,
|
| 751 |
+
use_aten_lib,
|
| 752 |
+
translated,
|
| 753 |
+
)
|
| 754 |
+
|
| 755 |
+
translated_functions, translated_indices = parse_yaml(
|
| 756 |
+
translated_yaml_path, tags_yaml_path, function_filter, not use_aten_lib
|
| 757 |
+
)
|
| 758 |
+
custom_ops_functions, custom_ops_indices = parse_yaml(
|
| 759 |
+
custom_ops_yaml_path, tags_yaml_path, function_filter, True
|
| 760 |
+
)
|
| 761 |
+
|
| 762 |
+
# Convert BackendIndices to ETKernelIndex
|
| 763 |
+
if not isinstance(translated_indices, ETKernelIndex):
|
| 764 |
+
translated_indices = ETKernelIndex.from_backend_indices(translated_indices)
|
| 765 |
+
if not isinstance(custom_ops_indices, ETKernelIndex):
|
| 766 |
+
custom_ops_indices = ETKernelIndex.from_backend_indices(custom_ops_indices)
|
| 767 |
+
|
| 768 |
+
combined_functions = translated_functions + custom_ops_functions
|
| 769 |
+
combined_kernel_index = ETKernelIndex.merge_indices(
|
| 770 |
+
translated_indices, custom_ops_indices
|
| 771 |
+
)
|
| 772 |
+
combined_yaml = ETParsedYaml(combined_functions, combined_kernel_index)
|
| 773 |
+
custom_ops_parsed_yaml = ETParsedYaml(custom_ops_functions, custom_ops_indices)
|
| 774 |
+
|
| 775 |
+
return combined_yaml, custom_ops_parsed_yaml
|
| 776 |
+
|
| 777 |
+
|
| 778 |
+
def main() -> None:
|
| 779 |
+
parser = argparse.ArgumentParser(description="Generate operator source files")
|
| 780 |
+
# Although we don't refer to --source-path directly, make_file_manager()
|
| 781 |
+
# expects it to point to a directory that contains a templates/ subdirectory
|
| 782 |
+
# containing the file templates.
|
| 783 |
+
parser.add_argument(
|
| 784 |
+
"-s",
|
| 785 |
+
"--source-path",
|
| 786 |
+
help="path to source directory for kernel templates",
|
| 787 |
+
)
|
| 788 |
+
parser.add_argument(
|
| 789 |
+
"--functions-yaml-path",
|
| 790 |
+
"--functions_yaml_path",
|
| 791 |
+
help="path to the functions.yaml file to use. Optional, but at least "
|
| 792 |
+
"one of --functions-yaml-path and --custom-ops-yaml-path must be "
|
| 793 |
+
"specified.",
|
| 794 |
+
)
|
| 795 |
+
parser.add_argument(
|
| 796 |
+
"--custom-ops-yaml-path",
|
| 797 |
+
"--custom_ops_yaml_path",
|
| 798 |
+
help="path to the custom_ops.yaml file to use. Optional, but at least "
|
| 799 |
+
"one of --functions-yaml-path and --custom-ops-yaml-path must be "
|
| 800 |
+
"specified.",
|
| 801 |
+
)
|
| 802 |
+
parser.add_argument(
|
| 803 |
+
"--aten-yaml-path",
|
| 804 |
+
"--aten_yaml_path",
|
| 805 |
+
help="path to native_functions.yaml file.",
|
| 806 |
+
)
|
| 807 |
+
# Note that make_file_manager() also looks at --install-dir.
|
| 808 |
+
parser.add_argument(
|
| 809 |
+
"-d",
|
| 810 |
+
"--install-dir",
|
| 811 |
+
"--install_dir",
|
| 812 |
+
help="output directory",
|
| 813 |
+
default="build/generated",
|
| 814 |
+
)
|
| 815 |
+
parser.add_argument(
|
| 816 |
+
"-o",
|
| 817 |
+
"--output-dependencies",
|
| 818 |
+
help="output a list of dependencies into the given file and exit",
|
| 819 |
+
)
|
| 820 |
+
# Although we don't refer to --dry-run directly, make_file_manager() looks
|
| 821 |
+
# for it.
|
| 822 |
+
parser.add_argument(
|
| 823 |
+
"--dry-run",
|
| 824 |
+
action="store_true",
|
| 825 |
+
help="run without writing any files (still updates outputs)",
|
| 826 |
+
)
|
| 827 |
+
parser.add_argument(
|
| 828 |
+
"--static-dispatch-backend",
|
| 829 |
+
"--static_dispatch_backend",
|
| 830 |
+
nargs="*",
|
| 831 |
+
help="generate static dispatch code for the specific backend (if set)",
|
| 832 |
+
)
|
| 833 |
+
parser.add_argument(
|
| 834 |
+
"--op-registration-whitelist",
|
| 835 |
+
"--op_registration_whitelist",
|
| 836 |
+
nargs="*",
|
| 837 |
+
help="filter op registrations by the whitelist (if set); "
|
| 838 |
+
"each item is `namespace`::`operator name` without overload name; "
|
| 839 |
+
"e.g.: aten::empty aten::conv2d ...",
|
| 840 |
+
)
|
| 841 |
+
parser.add_argument(
|
| 842 |
+
"--op-selection-yaml-path",
|
| 843 |
+
"--op_selection_yaml_path",
|
| 844 |
+
help="Provide a path to the operator selection (for custom build) YAML "
|
| 845 |
+
"that contains the information about the set of selected operators "
|
| 846 |
+
"and their categories (training, ...). Each operator is either a "
|
| 847 |
+
"full operator name with overload or just a bare operator name. "
|
| 848 |
+
"The operator names also contain the namespace prefix (e.g. aten::)",
|
| 849 |
+
)
|
| 850 |
+
parser.add_argument(
|
| 851 |
+
"--tags-path",
|
| 852 |
+
help="Path to tags.yaml. Required by yaml parsing in codegen system.",
|
| 853 |
+
)
|
| 854 |
+
parser.add_argument(
|
| 855 |
+
"--rocm",
|
| 856 |
+
action="store_true",
|
| 857 |
+
help="reinterpret CUDA as ROCm/HIP and adjust filepaths accordingly",
|
| 858 |
+
)
|
| 859 |
+
parser.add_argument(
|
| 860 |
+
"--use-aten-lib",
|
| 861 |
+
"--use_aten_lib",
|
| 862 |
+
action="store_true",
|
| 863 |
+
help="a boolean flag to indicate whether we use ATen kernels or not, in the future this flag will be per "
|
| 864 |
+
"operator",
|
| 865 |
+
)
|
| 866 |
+
parser.add_argument(
|
| 867 |
+
"--generate",
|
| 868 |
+
type=str,
|
| 869 |
+
nargs="*",
|
| 870 |
+
choices=["headers", "sources"],
|
| 871 |
+
default=["headers", "sources"],
|
| 872 |
+
help="Generate only a subset of files",
|
| 873 |
+
)
|
| 874 |
+
options = parser.parse_args()
|
| 875 |
+
assert options.tags_path, "tags.yaml is required by codegen yaml parsing."
|
| 876 |
+
|
| 877 |
+
selector = get_custom_build_selector(
|
| 878 |
+
options.op_registration_whitelist,
|
| 879 |
+
options.op_selection_yaml_path,
|
| 880 |
+
)
|
| 881 |
+
|
| 882 |
+
parsed_yaml, custom_ops_parsed_yaml = parse_yaml_files(
|
| 883 |
+
aten_yaml_path=options.aten_yaml_path,
|
| 884 |
+
tags_yaml_path=options.tags_path,
|
| 885 |
+
native_yaml_path=options.functions_yaml_path,
|
| 886 |
+
custom_ops_yaml_path=options.custom_ops_yaml_path,
|
| 887 |
+
selector=selector,
|
| 888 |
+
use_aten_lib=options.use_aten_lib,
|
| 889 |
+
)
|
| 890 |
+
native_functions, kernel_index = (
|
| 891 |
+
parsed_yaml.native_functions,
|
| 892 |
+
parsed_yaml.kernel_index,
|
| 893 |
+
)
|
| 894 |
+
custom_ops_native_functions = (
|
| 895 |
+
custom_ops_parsed_yaml.native_functions if custom_ops_parsed_yaml else []
|
| 896 |
+
)
|
| 897 |
+
|
| 898 |
+
cpu_fm = make_file_manager(options=options)
|
| 899 |
+
|
| 900 |
+
if "headers" in options.generate:
|
| 901 |
+
# generate CustomOpsNativeFunctions.h when custom_ops.yaml is present, to match the build system.
|
| 902 |
+
gen_headers(
|
| 903 |
+
native_functions=native_functions,
|
| 904 |
+
gen_custom_ops_header=options.custom_ops_yaml_path,
|
| 905 |
+
custom_ops_native_functions=custom_ops_native_functions,
|
| 906 |
+
selector=selector,
|
| 907 |
+
kernel_index=kernel_index,
|
| 908 |
+
cpu_fm=cpu_fm,
|
| 909 |
+
use_aten_lib=options.use_aten_lib,
|
| 910 |
+
)
|
| 911 |
+
|
| 912 |
+
if "sources" in options.generate:
|
| 913 |
+
gen_unboxing(
|
| 914 |
+
native_functions=native_functions,
|
| 915 |
+
cpu_fm=cpu_fm,
|
| 916 |
+
selector=selector,
|
| 917 |
+
use_aten_lib=options.use_aten_lib,
|
| 918 |
+
kernel_index=kernel_index,
|
| 919 |
+
)
|
| 920 |
+
if custom_ops_native_functions:
|
| 921 |
+
gen_custom_ops(
|
| 922 |
+
native_functions=custom_ops_native_functions,
|
| 923 |
+
selector=selector,
|
| 924 |
+
kernel_index=kernel_index,
|
| 925 |
+
cpu_fm=cpu_fm,
|
| 926 |
+
rocm=options.rocm,
|
| 927 |
+
)
|
| 928 |
+
|
| 929 |
+
if options.output_dependencies:
|
| 930 |
+
depfile_path = pathlib.Path(options.output_dependencies).resolve()
|
| 931 |
+
depfile_name = depfile_path.name
|
| 932 |
+
depfile_stem = depfile_path.stem
|
| 933 |
+
|
| 934 |
+
for fm, prefix in [
|
| 935 |
+
(cpu_fm, ""),
|
| 936 |
+
]:
|
| 937 |
+
varname = prefix + depfile_stem
|
| 938 |
+
path = depfile_path.parent / (prefix + depfile_name)
|
| 939 |
+
fm.write_outputs(varname, str(path))
|
| 940 |
+
|
| 941 |
+
|
| 942 |
+
if __name__ == "__main__":
|
| 943 |
+
main()
|
llava_next/lib/python3.10/site-packages/torchgen/gen_lazy_tensor.py
ADDED
|
@@ -0,0 +1,605 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import argparse
|
| 2 |
+
import os
|
| 3 |
+
import pathlib
|
| 4 |
+
import re
|
| 5 |
+
from collections import Counter, namedtuple
|
| 6 |
+
from typing import (
|
| 7 |
+
Any,
|
| 8 |
+
Callable,
|
| 9 |
+
Dict,
|
| 10 |
+
Iterable,
|
| 11 |
+
Iterator,
|
| 12 |
+
List,
|
| 13 |
+
Optional,
|
| 14 |
+
Sequence,
|
| 15 |
+
Tuple,
|
| 16 |
+
Type,
|
| 17 |
+
Union,
|
| 18 |
+
)
|
| 19 |
+
|
| 20 |
+
import yaml
|
| 21 |
+
|
| 22 |
+
import torchgen.dest as dest
|
| 23 |
+
|
| 24 |
+
from torchgen.api.lazy import setValueT
|
| 25 |
+
from torchgen.api.types import BaseCppType
|
| 26 |
+
from torchgen.dest.lazy_ir import GenLazyIR, GenLazyNativeFuncDefinition, GenTSLazyIR
|
| 27 |
+
from torchgen.gen import get_grouped_native_functions, parse_native_yaml
|
| 28 |
+
|
| 29 |
+
from torchgen.model import NativeFunction, NativeFunctionsGroup, OperatorName
|
| 30 |
+
from torchgen.selective_build.selector import SelectiveBuilder
|
| 31 |
+
from torchgen.utils import concatMap, FileManager, NamespaceHelper
|
| 32 |
+
from torchgen.yaml_utils import YamlLoader
|
| 33 |
+
from .gen_backend_stubs import (
|
| 34 |
+
error_on_missing_kernels,
|
| 35 |
+
gen_dispatcher_registrations,
|
| 36 |
+
gen_dispatchkey_nativefunc_headers,
|
| 37 |
+
parse_backend_yaml,
|
| 38 |
+
)
|
| 39 |
+
|
| 40 |
+
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
|
| 41 |
+
#
|
| 42 |
+
# Lazy Tensor Codegen
|
| 43 |
+
#
|
| 44 |
+
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
|
| 45 |
+
# Overview
|
| 46 |
+
# ~~~~~~~~
|
| 47 |
+
#
|
| 48 |
+
# This codegen script builds on existing data models and helpers used
|
| 49 |
+
# by all ATen backends, and adds new functionality specific to lazy
|
| 50 |
+
# tensor backends.
|
| 51 |
+
#
|
| 52 |
+
# Inputs:
|
| 53 |
+
# - <backend>_native_functions.yaml: controls which operators are
|
| 54 |
+
# supported by the backend.
|
| 55 |
+
#
|
| 56 |
+
# Outputs:
|
| 57 |
+
# (for all backends)
|
| 58 |
+
# <DispatchKey>Ir.h defines Lazy IR classes to be constructed during tracing
|
| 59 |
+
# - opt-in: also generate 'lowering' methods for the TorchScript backend only
|
| 60 |
+
# <DispatchKey>NativeFunctions.cpp defines implementations of native functions which perform lazy tracing
|
| 61 |
+
# - opt-in: 'full_codegen' section of backend yaml; 'supported' section omits these implementations
|
| 62 |
+
# <DispatchKey>NativeFunctions.h declares implementations of native functions for both 'supported' and 'full_codegen'
|
| 63 |
+
# ops
|
| 64 |
+
#
|
| 65 |
+
# Register<DispatchKey>.cpp registers all op implementations with the dispatcher
|
| 66 |
+
# RegisterAutograd<DispatchKey>.cpp registers all autograd implementations with the dispatcher
|
| 67 |
+
#
|
| 68 |
+
# Validation Helpers:
|
| 69 |
+
# - Shape Inference: errs if any ops in backend yaml require shape inference not provided by meta kernels or
|
| 70 |
+
# implementations in torch/csrc/lazy/core/shape_inference.*
|
| 71 |
+
# - native function impls: errs if any 'supported' ops do not have an implementation defined in the backend
|
| 72 |
+
# (non-codegen) implementation file
|
| 73 |
+
#
|
| 74 |
+
#
|
| 75 |
+
# About the Data Model
|
| 76 |
+
# ~~~~~~~~~~~~~~~~~~~~
|
| 77 |
+
#
|
| 78 |
+
# Modeled after ATen codegen, the first step is to parse yaml and build a data model for the operators
|
| 79 |
+
# we care about. In this case, the <backend>_native_functions yaml defines a subset of the core operators
|
| 80 |
+
# (defined in more detail in the main native_functions.yaml), which will be supported by your backend.
|
| 81 |
+
# Backends can list ops in two categories:
|
| 82 |
+
# - `supported` ops require hand-implementations but still get codegenned declarations and registrations
|
| 83 |
+
# - `full_codegen` ops get implementations (and IR classes) generated too
|
| 84 |
+
#
|
| 85 |
+
# Each native function is modeled as an object with a schema, and each schema has objects representing their
|
| 86 |
+
# arguments. Much of the codegen is manipulation of the arguments and their types. For example, lazy tensor
|
| 87 |
+
# backends need to transform 'at::Tensor' arguments into 'lazy::Value' objects, as well as replacing reference
|
| 88 |
+
# types (stringref) with actual string objects, and this is done by manipulating the data model objects.
|
| 89 |
+
# - see api/lazy.py for the lazy data model
|
| 90 |
+
#
|
| 91 |
+
# Once the data model is set up, the rest of this script processes a number of templates for output CPP file
|
| 92 |
+
# and fills in the template values using helpers in `dest/lazy_ir.py` and `dest/lazy_ts_lowering.py`. These
|
| 93 |
+
# helpers mostly iterate over functions and their arguments, outputting different c++ snippets.
|
| 94 |
+
#
|
| 95 |
+
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
# Parses the external backend's yaml, and adds a new BackendIndex for the backend's dispatch key.
|
| 99 |
+
# Returns a Tuple of (backend_key, autograd_key, cpp_namespace, updated BackendIndex mapping, full_codegen)
|
| 100 |
+
ParsedExternalYaml = namedtuple(
|
| 101 |
+
"ParsedExternalYaml",
|
| 102 |
+
["backend_key", "autograd_key", "cpp_namespace", "backend_indices", "full_codegen"],
|
| 103 |
+
)
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
def parse_native_functions_keys(
|
| 107 |
+
backend_yaml_path: str,
|
| 108 |
+
grouped_native_functions: Sequence[Union[NativeFunction, NativeFunctionsGroup]],
|
| 109 |
+
) -> Tuple[List[OperatorName], List[Any], List[OperatorName]]:
|
| 110 |
+
native_functions_map: Dict[OperatorName, NativeFunction] = {
|
| 111 |
+
f.func.name: f
|
| 112 |
+
for f in concatMap(
|
| 113 |
+
lambda f: [f] if isinstance(f, NativeFunction) else list(f.functions()),
|
| 114 |
+
grouped_native_functions,
|
| 115 |
+
)
|
| 116 |
+
}
|
| 117 |
+
|
| 118 |
+
with open(backend_yaml_path) as f:
|
| 119 |
+
yaml_values = yaml.load(f, Loader=YamlLoader)
|
| 120 |
+
assert isinstance(yaml_values, dict)
|
| 121 |
+
|
| 122 |
+
full_codegen = yaml_values.pop("full_codegen", [])
|
| 123 |
+
non_native = yaml_values.pop("non_native", [])
|
| 124 |
+
ir_gen = yaml_values.pop("ir_gen", [])
|
| 125 |
+
assert isinstance(full_codegen, list)
|
| 126 |
+
assert isinstance(non_native, list)
|
| 127 |
+
assert isinstance(ir_gen, list)
|
| 128 |
+
full_codegen_opnames = [OperatorName.parse(name) for name in full_codegen]
|
| 129 |
+
ir_gen_opnames = [OperatorName.parse(name) for name in ir_gen]
|
| 130 |
+
return full_codegen_opnames, non_native, ir_gen_opnames
|
| 131 |
+
|
| 132 |
+
|
| 133 |
+
def validate_shape_inference_header(
|
| 134 |
+
shape_inference_hdr: str, expected_shape_infr_decls: List[str]
|
| 135 |
+
) -> None:
|
| 136 |
+
try:
|
| 137 |
+
with open(shape_inference_hdr) as f:
|
| 138 |
+
shape_infr_decls = f.read()
|
| 139 |
+
shape_infr_decl_lines = set(shape_infr_decls.split("\n"))
|
| 140 |
+
except OSError as e:
|
| 141 |
+
raise AssertionError(
|
| 142 |
+
f"Unable to read from the specified shape_inference_hdr file: {shape_inference_hdr}"
|
| 143 |
+
) from e
|
| 144 |
+
|
| 145 |
+
shape_infr_regex = r"compute_shape_(\w+)"
|
| 146 |
+
actual_shape_infr_name_counts = Counter(
|
| 147 |
+
re.findall(shape_infr_regex, shape_infr_decls)
|
| 148 |
+
)
|
| 149 |
+
# TODO(whc) add a check for shape inference functions that have meta kernels implement and should be retired.
|
| 150 |
+
|
| 151 |
+
missing_decls = [
|
| 152 |
+
decl for decl in expected_shape_infr_decls if decl not in shape_infr_decl_lines
|
| 153 |
+
]
|
| 154 |
+
if missing_decls:
|
| 155 |
+
raise Exception(
|
| 156 |
+
f"""Missing shape inference function.\n
|
| 157 |
+
Please add declare this function in {shape_inference_hdr}:\n
|
| 158 |
+
and implement it in the the corresponding shape_inference.cpp file.\n
|
| 159 |
+
{os.linesep.join(missing_decls)}"""
|
| 160 |
+
)
|
| 161 |
+
|
| 162 |
+
|
| 163 |
+
# Some helper functions for the codegen.
|
| 164 |
+
def get_ltc_helper_fns() -> str:
|
| 165 |
+
return """\
|
| 166 |
+
at::Tensor to_meta(const at::Tensor& tensor) {
|
| 167 |
+
// undefined tensors can't be converted to the meta device, since they don't have sizes/strides
|
| 168 |
+
if (!tensor.defined()) return tensor;
|
| 169 |
+
auto out = at::native::empty_strided_meta_symint(tensor.sym_sizes(), tensor.sym_strides(), \
|
| 170 |
+
/*dtype=*/c10::make_optional(tensor.scalar_type()), /*layout=*/c10::make_optional(tensor.layout()), \
|
| 171 |
+
/*device=*/c10::make_optional(c10::Device(c10::kMeta)), /*pin_memory=*/c10::nullopt);
|
| 172 |
+
// needs to handle wrapped numbers, so dtype promotion works properly.
|
| 173 |
+
if (tensor.unsafeGetTensorImpl()->is_wrapped_number()) {
|
| 174 |
+
out.unsafeGetTensorImpl()->set_wrapped_number(true);
|
| 175 |
+
}
|
| 176 |
+
return out;
|
| 177 |
+
}
|
| 178 |
+
c10::optional<at::Tensor> to_meta(const c10::optional<at::Tensor>& tensor) {
|
| 179 |
+
if (tensor.has_value()) {
|
| 180 |
+
return to_meta(*tensor);
|
| 181 |
+
}
|
| 182 |
+
return c10::nullopt;
|
| 183 |
+
}
|
| 184 |
+
|
| 185 |
+
std::vector<at::Tensor> to_meta(at::ITensorListRef t_list) {
|
| 186 |
+
std::vector<at::Tensor> outs;
|
| 187 |
+
outs.reserve(t_list.size());
|
| 188 |
+
for (const auto& tensor : t_list) {
|
| 189 |
+
outs.push_back(to_meta(tensor));
|
| 190 |
+
}
|
| 191 |
+
return outs;
|
| 192 |
+
}
|
| 193 |
+
"""
|
| 194 |
+
|
| 195 |
+
|
| 196 |
+
class default_args:
|
| 197 |
+
node_base: str = "Node"
|
| 198 |
+
node_base_hdr: Optional[str] = None
|
| 199 |
+
shape_inference_hdr: str = "torch/csrc/lazy/core/shape_inference.h"
|
| 200 |
+
tensor_class: str = "torch::lazy::LazyTensor"
|
| 201 |
+
tensor_class_hdr: str = "torch/csrc/lazy/core/tensor.h"
|
| 202 |
+
lazy_ir_generator: Type[GenLazyIR] = GenLazyIR
|
| 203 |
+
native_func_definition_generator: Type[
|
| 204 |
+
GenLazyNativeFuncDefinition
|
| 205 |
+
] = GenLazyNativeFuncDefinition
|
| 206 |
+
backend_name: str = "TorchScript"
|
| 207 |
+
|
| 208 |
+
|
| 209 |
+
def main() -> None:
|
| 210 |
+
parser = argparse.ArgumentParser(description="Generate Lazy Tensor backend files")
|
| 211 |
+
parser.add_argument(
|
| 212 |
+
"-s",
|
| 213 |
+
"--source-yaml",
|
| 214 |
+
"--source_yaml",
|
| 215 |
+
help="path to source yaml file containing operator external definitions",
|
| 216 |
+
)
|
| 217 |
+
parser.add_argument("-o", "--output-dir", "--output_dir", help="output directory")
|
| 218 |
+
parser.add_argument(
|
| 219 |
+
"--dry-run", "--dry_run", type=bool, default=False, help="output directory"
|
| 220 |
+
)
|
| 221 |
+
parser.add_argument(
|
| 222 |
+
"--impl-path",
|
| 223 |
+
"--impl_path",
|
| 224 |
+
type=str,
|
| 225 |
+
default=None,
|
| 226 |
+
help="path to the source C++ file containing kernel definitions",
|
| 227 |
+
)
|
| 228 |
+
parser.add_argument(
|
| 229 |
+
"--gen-ts-lowerings",
|
| 230 |
+
"--gen_ts_lowerings",
|
| 231 |
+
action="store_true",
|
| 232 |
+
help="Generate TorchScript lowerings in addition to Lazy IR and NativeFunctions",
|
| 233 |
+
)
|
| 234 |
+
parser.add_argument(
|
| 235 |
+
"--node-base",
|
| 236 |
+
"--node_base",
|
| 237 |
+
type=str,
|
| 238 |
+
default=default_args.node_base,
|
| 239 |
+
help="Name of backend specific custom Lazy IR Node base class",
|
| 240 |
+
)
|
| 241 |
+
parser.add_argument(
|
| 242 |
+
"--node-base-hdr",
|
| 243 |
+
"--node_base_hdr",
|
| 244 |
+
type=str,
|
| 245 |
+
default=default_args.node_base_hdr,
|
| 246 |
+
help="Path to header file defining custom Lazy IR Node base class",
|
| 247 |
+
)
|
| 248 |
+
parser.add_argument(
|
| 249 |
+
"--shape-inference-hdr",
|
| 250 |
+
"--shape_inference_hdr",
|
| 251 |
+
type=str,
|
| 252 |
+
default=default_args.shape_inference_hdr,
|
| 253 |
+
help="Path to header file defining custom Lazy shape inference functions",
|
| 254 |
+
)
|
| 255 |
+
parser.add_argument(
|
| 256 |
+
"--tensor-class",
|
| 257 |
+
"--tensor_class",
|
| 258 |
+
type=str,
|
| 259 |
+
default=default_args.tensor_class,
|
| 260 |
+
help="Name of backend specific custom Lazy Tensor class",
|
| 261 |
+
)
|
| 262 |
+
parser.add_argument(
|
| 263 |
+
"--tensor-class-hdr",
|
| 264 |
+
"--tensor_class_hdr",
|
| 265 |
+
type=str,
|
| 266 |
+
default=default_args.tensor_class_hdr,
|
| 267 |
+
help="Path to header file defining custom Lazy Tensor class",
|
| 268 |
+
)
|
| 269 |
+
parser.add_argument(
|
| 270 |
+
"--backend-name",
|
| 271 |
+
"--backend_name",
|
| 272 |
+
type=str,
|
| 273 |
+
default=default_args.backend_name,
|
| 274 |
+
help="Name of the backend to generate",
|
| 275 |
+
)
|
| 276 |
+
options = parser.parse_args()
|
| 277 |
+
|
| 278 |
+
# Assumes that this file lives at PYTORCH_ROOT/torchgen/gen_backend_stubs.py
|
| 279 |
+
torch_root = pathlib.Path(__file__).parent.parent.parent.absolute()
|
| 280 |
+
aten_path = str(torch_root / "aten" / "src" / "ATen")
|
| 281 |
+
lazy_ir_generator: Type[GenLazyIR] = default_args.lazy_ir_generator
|
| 282 |
+
if options.gen_ts_lowerings:
|
| 283 |
+
lazy_ir_generator = GenTSLazyIR
|
| 284 |
+
native_func_definition_generator: Type[
|
| 285 |
+
GenLazyNativeFuncDefinition
|
| 286 |
+
] = default_args.native_func_definition_generator
|
| 287 |
+
|
| 288 |
+
run_gen_lazy_tensor(
|
| 289 |
+
aten_path,
|
| 290 |
+
options.source_yaml,
|
| 291 |
+
options.output_dir,
|
| 292 |
+
options.dry_run,
|
| 293 |
+
options.impl_path,
|
| 294 |
+
options.node_base,
|
| 295 |
+
options.node_base_hdr,
|
| 296 |
+
options.tensor_class,
|
| 297 |
+
options.tensor_class_hdr,
|
| 298 |
+
options.shape_inference_hdr,
|
| 299 |
+
lazy_ir_generator,
|
| 300 |
+
native_func_definition_generator,
|
| 301 |
+
options.backend_name,
|
| 302 |
+
)
|
| 303 |
+
|
| 304 |
+
|
| 305 |
+
def run_gen_lazy_tensor(
|
| 306 |
+
aten_path: str,
|
| 307 |
+
source_yaml: str,
|
| 308 |
+
output_dir: str,
|
| 309 |
+
dry_run: bool,
|
| 310 |
+
impl_path: Optional[str],
|
| 311 |
+
node_base: str = default_args.node_base,
|
| 312 |
+
node_base_hdr: Optional[str] = default_args.node_base_hdr,
|
| 313 |
+
tensor_class: str = default_args.tensor_class,
|
| 314 |
+
tensor_class_hdr: str = default_args.tensor_class_hdr,
|
| 315 |
+
shape_inference_hdr: str = default_args.shape_inference_hdr,
|
| 316 |
+
lazy_ir_generator: Type[GenLazyIR] = default_args.lazy_ir_generator,
|
| 317 |
+
native_func_definition_generator: Type[
|
| 318 |
+
GenLazyNativeFuncDefinition
|
| 319 |
+
] = default_args.native_func_definition_generator,
|
| 320 |
+
# build_in_tree is true for TS backend and affects include paths
|
| 321 |
+
build_in_tree: bool = False,
|
| 322 |
+
# per_operator_headers changes whether ATen/Functions.h or individual operator headers are used
|
| 323 |
+
# it must match how ATen was built
|
| 324 |
+
per_operator_headers: bool = False,
|
| 325 |
+
backend_name: str = default_args.backend_name,
|
| 326 |
+
gen_forced_fallback_code: bool = False,
|
| 327 |
+
use_lazy_shape: bool = True,
|
| 328 |
+
# the following arguments are temporary customization points for xla backend migration.
|
| 329 |
+
# do not rely on them otherwise, they should be removed once migration is complete
|
| 330 |
+
backend_namespace: str = "torch::lazy",
|
| 331 |
+
get_tensorlist: str = "GetTensorList",
|
| 332 |
+
get_tensor_or_wrap_number: str = "GetLtcTensorOrCreateForWrappedNumber",
|
| 333 |
+
try_get_tensor: str = "TryGetLtcTensor",
|
| 334 |
+
metrics_counter: str = 'TORCH_LAZY_FN_COUNTER("lazy::")',
|
| 335 |
+
create_tensor: str = "LazyTensor::Create",
|
| 336 |
+
create_from_first_tensor: bool = False,
|
| 337 |
+
create_aten_from_ltc_tensor: str = "torch::lazy::CreateAtenFromLtcTensor",
|
| 338 |
+
tuple_aten_from_ltc_tensors: str = "torch::lazy::TupleAtenFromLtcTensors",
|
| 339 |
+
lazy_value_class: str = "torch::lazy::Value",
|
| 340 |
+
lazy_tensor_ptr: str = "LazyTensorPtr",
|
| 341 |
+
get_device_fn: str = "torch::lazy::GetBackendDevice",
|
| 342 |
+
) -> None:
|
| 343 |
+
lv_tokens = lazy_value_class.split("::")
|
| 344 |
+
lv_class = lv_tokens[-1]
|
| 345 |
+
lv_ns = "::".join(lv_tokens[:-1])
|
| 346 |
+
setValueT(BaseCppType(lv_ns, lv_class))
|
| 347 |
+
template_dir = os.path.join(aten_path, "templates")
|
| 348 |
+
|
| 349 |
+
def make_file_manager(install_dir: str) -> FileManager:
|
| 350 |
+
return FileManager(
|
| 351 |
+
install_dir=install_dir, template_dir=template_dir, dry_run=dry_run
|
| 352 |
+
)
|
| 353 |
+
|
| 354 |
+
fm = make_file_manager(output_dir)
|
| 355 |
+
|
| 356 |
+
native_yaml_path = os.path.join(aten_path, "native/native_functions.yaml")
|
| 357 |
+
tags_yaml_path = os.path.join(aten_path, "native/tags.yaml")
|
| 358 |
+
parsed_yaml = parse_native_yaml(native_yaml_path, tags_yaml_path)
|
| 359 |
+
native_functions, backend_indices = (
|
| 360 |
+
parsed_yaml.native_functions,
|
| 361 |
+
parsed_yaml.backend_indices,
|
| 362 |
+
)
|
| 363 |
+
grouped_native_functions = get_grouped_native_functions(native_functions)
|
| 364 |
+
|
| 365 |
+
def sort_native_function(f: Union[NativeFunctionsGroup, NativeFunction]) -> str:
|
| 366 |
+
"""
|
| 367 |
+
We sort the native function because of the note in concat_map_codegen.
|
| 368 |
+
TODO(alanwaketan): Remove this sorting hack once all ops are grouped properly.
|
| 369 |
+
"""
|
| 370 |
+
func = f.functional.func if isinstance(f, NativeFunctionsGroup) else f.func
|
| 371 |
+
return str(func.name.name)
|
| 372 |
+
|
| 373 |
+
grouped_native_functions = sorted(
|
| 374 |
+
grouped_native_functions, key=sort_native_function
|
| 375 |
+
)
|
| 376 |
+
|
| 377 |
+
parsed_backend_yaml = parse_backend_yaml(
|
| 378 |
+
source_yaml, grouped_native_functions, backend_indices
|
| 379 |
+
)
|
| 380 |
+
backend_key = parsed_backend_yaml.backend_key
|
| 381 |
+
autograd_key = parsed_backend_yaml.autograd_key
|
| 382 |
+
cpp_namespace = parsed_backend_yaml.cpp_namespace
|
| 383 |
+
backend_indices = parsed_backend_yaml.backend_indices
|
| 384 |
+
# the following 3 keys are all processed differently
|
| 385 |
+
# for full_codegen, we generate IR, kernels, etc
|
| 386 |
+
# for ir_gen, we generate only IR
|
| 387 |
+
# non_native is used to register kernels not declared in
|
| 388 |
+
# native_functions.yaml
|
| 389 |
+
full_codegen, non_native, ir_gen = parse_native_functions_keys(
|
| 390 |
+
source_yaml, grouped_native_functions
|
| 391 |
+
)
|
| 392 |
+
|
| 393 |
+
def concat_map_codegen(
|
| 394 |
+
func: Callable[[NativeFunction], Sequence[str]],
|
| 395 |
+
xs: Iterable[Union[NativeFunctionsGroup, NativeFunction]],
|
| 396 |
+
ops_list: List[OperatorName] = full_codegen,
|
| 397 |
+
) -> Iterator[str]:
|
| 398 |
+
"""
|
| 399 |
+
We code-gen for the functional variant, which is all we need for IR classes/lowerings/shape inferences, but we
|
| 400 |
+
only code-gen additional entries for the inplace variant for the native functions.
|
| 401 |
+
"""
|
| 402 |
+
|
| 403 |
+
for x in xs:
|
| 404 |
+
fs = list(x.functions()) if isinstance(x, NativeFunctionsGroup) else [x]
|
| 405 |
+
for f in fs:
|
| 406 |
+
if f.func.name in ops_list:
|
| 407 |
+
yield from func(f)
|
| 408 |
+
|
| 409 |
+
selector = SelectiveBuilder.get_nop_selector()
|
| 410 |
+
|
| 411 |
+
assert backend_key is not None
|
| 412 |
+
class_name = backend_indices[backend_key].native_function_class_name()
|
| 413 |
+
|
| 414 |
+
if impl_path is not None:
|
| 415 |
+
error_on_missing_kernels(
|
| 416 |
+
native_functions,
|
| 417 |
+
backend_indices,
|
| 418 |
+
backend_key,
|
| 419 |
+
autograd_key,
|
| 420 |
+
class_name,
|
| 421 |
+
impl_path,
|
| 422 |
+
full_codegen,
|
| 423 |
+
)
|
| 424 |
+
|
| 425 |
+
""" Validate Shape Inference Definitions
|
| 426 |
+
|
| 427 |
+
Generated lazy native functions all perform shape inference, by first using a meta:: kernel
|
| 428 |
+
if available for that op, and otherwise using a 'compute_shape_{op}' function instead. The generator
|
| 429 |
+
knows the call signature for compute_shape_{op} becuase it matches the nativefunction (and meta::) signature,
|
| 430 |
+
so it just has to check whether the op is structured and generate a call for one or the other. It's up to the dev
|
| 431 |
+
to supply the missing compute_shape_{op} function, but the codegen at least warns you about this and provides
|
| 432 |
+
the expected signature which can be copy-pasted into shape_inference.h.
|
| 433 |
+
|
| 434 |
+
compute_shape_{op} functions are handwritten and should be replaced over time as ops get ported
|
| 435 |
+
to structured kernels.
|
| 436 |
+
|
| 437 |
+
See torch/csrc/lazy/core/shape_inference.cpp #READ THIS! for more information.
|
| 438 |
+
"""
|
| 439 |
+
if shape_inference_hdr is not None:
|
| 440 |
+
expected_shape_infr_decls = list(
|
| 441 |
+
concat_map_codegen(
|
| 442 |
+
dest.GenLazyShapeInferenceDefinition(
|
| 443 |
+
backend_indices[backend_key], tensor_class
|
| 444 |
+
),
|
| 445 |
+
grouped_native_functions,
|
| 446 |
+
)
|
| 447 |
+
)
|
| 448 |
+
|
| 449 |
+
validate_shape_inference_header(shape_inference_hdr, expected_shape_infr_decls)
|
| 450 |
+
assert class_name is not None
|
| 451 |
+
|
| 452 |
+
# Generate nativefunction declarations
|
| 453 |
+
# Note, eager registrations is set to False for the lazy TS backend as another LTC backend
|
| 454 |
+
# may want to register their own lazy kernels instead of registering the TS ones.
|
| 455 |
+
# The registration will lazily happen when init_ts_backend is called.
|
| 456 |
+
gen_dispatchkey_nativefunc_headers(
|
| 457 |
+
fm,
|
| 458 |
+
class_name,
|
| 459 |
+
cpp_namespace,
|
| 460 |
+
backend_indices,
|
| 461 |
+
grouped_native_functions,
|
| 462 |
+
backend_key,
|
| 463 |
+
autograd_key,
|
| 464 |
+
backend_name,
|
| 465 |
+
)
|
| 466 |
+
|
| 467 |
+
# Generate Dispatcher registrations which hook up the nativefunctions
|
| 468 |
+
for dispatch_key in (
|
| 469 |
+
[backend_key] if autograd_key is None else [backend_key, autograd_key]
|
| 470 |
+
):
|
| 471 |
+
gen_dispatcher_registrations(
|
| 472 |
+
fm,
|
| 473 |
+
output_dir,
|
| 474 |
+
class_name,
|
| 475 |
+
backend_indices,
|
| 476 |
+
grouped_native_functions,
|
| 477 |
+
backend_key,
|
| 478 |
+
dispatch_key,
|
| 479 |
+
selector,
|
| 480 |
+
build_in_tree=build_in_tree,
|
| 481 |
+
per_operator_headers=per_operator_headers,
|
| 482 |
+
backend_name=backend_name,
|
| 483 |
+
eager_registration=False,
|
| 484 |
+
)
|
| 485 |
+
|
| 486 |
+
# Generate native function impls that build IR nodes
|
| 487 |
+
ns_helper = NamespaceHelper(cpp_namespace)
|
| 488 |
+
fm.write_with_template(
|
| 489 |
+
f"{backend_key}NativeFunctions.cpp",
|
| 490 |
+
"DispatchKeyNativeFunctions.cpp",
|
| 491 |
+
lambda: {
|
| 492 |
+
"includes": [
|
| 493 |
+
f"#include <{path}>"
|
| 494 |
+
for path in [
|
| 495 |
+
tensor_class_hdr,
|
| 496 |
+
shape_inference_hdr,
|
| 497 |
+
"ATen/Functions.h",
|
| 498 |
+
"ATen/native/TensorConversions.h",
|
| 499 |
+
"ATen/NativeFunctions.h",
|
| 500 |
+
"ATen/CompositeExplicitAutogradNonFunctionalFunctions.h",
|
| 501 |
+
"ATen/MetaFunctions.h",
|
| 502 |
+
"ATen/Operators.h",
|
| 503 |
+
"ATen/native/CPUFallback.h",
|
| 504 |
+
"torch/csrc/lazy/core/ir_builder.h",
|
| 505 |
+
"torch/csrc/lazy/core/lazy_graph_executor.h",
|
| 506 |
+
"torch/csrc/lazy/core/metrics.h",
|
| 507 |
+
"torch/csrc/lazy/core/shape.h",
|
| 508 |
+
f"{output_dir}/{backend_key}NativeFunctions.h",
|
| 509 |
+
f"{output_dir}/LazyIr.h",
|
| 510 |
+
]
|
| 511 |
+
+ (
|
| 512 |
+
["torch/csrc/lazy/ts_backend/ts_eager_fallback.h"]
|
| 513 |
+
if gen_forced_fallback_code
|
| 514 |
+
else []
|
| 515 |
+
)
|
| 516 |
+
],
|
| 517 |
+
"helper_fns": get_ltc_helper_fns(),
|
| 518 |
+
"native_functions_include": "",
|
| 519 |
+
"namespace_prologue": ns_helper.prologue,
|
| 520 |
+
"namespace_epilogue": ns_helper.epilogue,
|
| 521 |
+
"native_function_definitions": list(
|
| 522 |
+
concat_map_codegen(
|
| 523 |
+
native_func_definition_generator(
|
| 524 |
+
f"{backend_key}NativeFunctions",
|
| 525 |
+
backend_indices[backend_key],
|
| 526 |
+
tensor_class,
|
| 527 |
+
gen_forced_fallback_code,
|
| 528 |
+
backend_namespace,
|
| 529 |
+
get_tensorlist,
|
| 530 |
+
get_tensor_or_wrap_number,
|
| 531 |
+
try_get_tensor,
|
| 532 |
+
metrics_counter,
|
| 533 |
+
create_tensor,
|
| 534 |
+
create_from_first_tensor,
|
| 535 |
+
create_aten_from_ltc_tensor,
|
| 536 |
+
tuple_aten_from_ltc_tensors,
|
| 537 |
+
lazy_tensor_ptr,
|
| 538 |
+
get_device_fn,
|
| 539 |
+
),
|
| 540 |
+
grouped_native_functions,
|
| 541 |
+
)
|
| 542 |
+
),
|
| 543 |
+
},
|
| 544 |
+
)
|
| 545 |
+
# Generate IR node classes
|
| 546 |
+
lazy_ir_obj = lazy_ir_generator(
|
| 547 |
+
backend_indices[backend_key], backend_name, node_base, use_lazy_shape
|
| 548 |
+
)
|
| 549 |
+
|
| 550 |
+
fm.write_with_template(
|
| 551 |
+
"LazyIr.h",
|
| 552 |
+
"LazyIr.h",
|
| 553 |
+
lambda: {
|
| 554 |
+
"lazy_ir_sysinc": [
|
| 555 |
+
f"#include <{path}>"
|
| 556 |
+
for path in [
|
| 557 |
+
"ATen/core/Formatting.h",
|
| 558 |
+
"c10/core/ScalarType.h",
|
| 559 |
+
"c10/util/Optional.h",
|
| 560 |
+
"torch/csrc/lazy/core/hash.h",
|
| 561 |
+
"torch/csrc/lazy/core/ir.h",
|
| 562 |
+
"torch/csrc/lazy/core/shape.h",
|
| 563 |
+
"vector",
|
| 564 |
+
]
|
| 565 |
+
],
|
| 566 |
+
"lazy_ir_inc": [f'#include "{node_base_hdr}"']
|
| 567 |
+
if node_base_hdr is not None
|
| 568 |
+
else [],
|
| 569 |
+
"ir_declarations": list(
|
| 570 |
+
concat_map_codegen(
|
| 571 |
+
lazy_ir_obj, grouped_native_functions, full_codegen + ir_gen
|
| 572 |
+
)
|
| 573 |
+
),
|
| 574 |
+
"namespace_prologue": ns_helper.prologue,
|
| 575 |
+
"namespace_epilogue": ns_helper.epilogue,
|
| 576 |
+
},
|
| 577 |
+
)
|
| 578 |
+
|
| 579 |
+
# Generate Non Native IR Node classes
|
| 580 |
+
fm.write_with_template(
|
| 581 |
+
"LazyNonNativeIr.h",
|
| 582 |
+
"LazyNonNativeIr.h",
|
| 583 |
+
lambda: {
|
| 584 |
+
"lazy_non_native_ir_inc": [
|
| 585 |
+
f"#include <{path}>"
|
| 586 |
+
for path in [
|
| 587 |
+
"torch/csrc/lazy/core/ir.h",
|
| 588 |
+
"torch/csrc/lazy/core/ir_builder.h",
|
| 589 |
+
"torch/csrc/lazy/core/internal_ops/ltc_ops.h",
|
| 590 |
+
"torch/csrc/lazy/core/shape_inference.h",
|
| 591 |
+
]
|
| 592 |
+
+ ([node_base_hdr] if node_base_hdr else [])
|
| 593 |
+
if path
|
| 594 |
+
],
|
| 595 |
+
"non_native_ir_nodes": dest.generate_non_native_lazy_ir_nodes(
|
| 596 |
+
non_native, lazy_ir_obj
|
| 597 |
+
),
|
| 598 |
+
"namespace_prologue": ns_helper.prologue,
|
| 599 |
+
"namespace_epilogue": ns_helper.epilogue,
|
| 600 |
+
},
|
| 601 |
+
)
|
| 602 |
+
|
| 603 |
+
|
| 604 |
+
if __name__ == "__main__":
|
| 605 |
+
main()
|
llava_next/lib/python3.10/site-packages/torchgen/gen_vmap_plumbing.py
ADDED
|
@@ -0,0 +1,265 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import textwrap
|
| 2 |
+
from dataclasses import dataclass
|
| 3 |
+
from typing import List, Optional, Sequence, Tuple
|
| 4 |
+
|
| 5 |
+
from torchgen.api.translate import translate
|
| 6 |
+
from torchgen.api.types import DispatcherSignature
|
| 7 |
+
from torchgen.context import method_with_native_function
|
| 8 |
+
from torchgen.model import (
|
| 9 |
+
Argument,
|
| 10 |
+
BaseTy,
|
| 11 |
+
BaseType,
|
| 12 |
+
FunctionSchema,
|
| 13 |
+
ListType,
|
| 14 |
+
NativeFunction,
|
| 15 |
+
OptionalType,
|
| 16 |
+
Return,
|
| 17 |
+
SchemaKind,
|
| 18 |
+
Type,
|
| 19 |
+
)
|
| 20 |
+
from torchgen.utils import mapMaybe
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
def is_tensor(typ: Type) -> bool:
|
| 24 |
+
return isinstance(typ, BaseType) and typ.name == BaseTy.Tensor
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
def is_optional_tensor(typ: Type) -> bool:
|
| 28 |
+
return isinstance(typ, OptionalType) and is_tensor(typ.elem)
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
def is_tensor_list(typ: Type) -> bool:
|
| 32 |
+
return isinstance(typ, ListType) and is_tensor(typ.elem)
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
def unwrap_tensor(name: str, cur_level_var: str) -> List[str]:
|
| 36 |
+
result = f"""\
|
| 37 |
+
Tensor {name}_value;
|
| 38 |
+
optional<int64_t> {name}_bdim;
|
| 39 |
+
std::tie({name}_value, {name}_bdim) = unwrapTensorAtLevel({name}, {cur_level_var});"""
|
| 40 |
+
return textwrap.dedent(result).split("\n")
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
def unwrap_optional_tensor(name: str, cur_level_var: str) -> List[str]:
|
| 44 |
+
result = f"""\
|
| 45 |
+
optional<Tensor> {name}_value;
|
| 46 |
+
optional<int64_t> {name}_bdim;
|
| 47 |
+
if ({name}) {{
|
| 48 |
+
std::tie({name}_value, {name}_bdim) = unwrapTensorAtLevel({name}.value(), {cur_level_var});
|
| 49 |
+
}}"""
|
| 50 |
+
return textwrap.dedent(result).split("\n")
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
def gen_unwraps(
|
| 54 |
+
flat_arguments: Sequence[Argument], cur_level_var: str
|
| 55 |
+
) -> Tuple[str, List[str]]:
|
| 56 |
+
arg_names = [a.name for a in flat_arguments]
|
| 57 |
+
arg_types = [a.type for a in flat_arguments]
|
| 58 |
+
|
| 59 |
+
tensors = [name for typ, name in zip(arg_types, arg_names) if is_tensor(typ)]
|
| 60 |
+
optional_tensors = [
|
| 61 |
+
name for typ, name in zip(arg_types, arg_names) if is_optional_tensor(typ)
|
| 62 |
+
]
|
| 63 |
+
|
| 64 |
+
unwraps = []
|
| 65 |
+
for tensor in tensors:
|
| 66 |
+
unwraps += unwrap_tensor(tensor, cur_level_var)
|
| 67 |
+
|
| 68 |
+
for opt_tensor in optional_tensors:
|
| 69 |
+
unwraps += unwrap_optional_tensor(opt_tensor, cur_level_var)
|
| 70 |
+
unwrap_code = "\n".join(unwraps)
|
| 71 |
+
|
| 72 |
+
unwrapped_arg_list = []
|
| 73 |
+
for arg in arg_names:
|
| 74 |
+
if arg in tensors or arg in optional_tensors:
|
| 75 |
+
unwrapped_arg_list += [f"{arg}_value", f"{arg}_bdim"]
|
| 76 |
+
else:
|
| 77 |
+
unwrapped_arg_list.append(arg)
|
| 78 |
+
return unwrap_code, unwrapped_arg_list
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
def gen_case_where_all_bdims_are_none(
|
| 82 |
+
outer_sig: DispatcherSignature, schema: FunctionSchema, cur_level_var: str
|
| 83 |
+
) -> str:
|
| 84 |
+
conditions = []
|
| 85 |
+
flat_args = schema.arguments.flat_all
|
| 86 |
+
for arg in flat_args:
|
| 87 |
+
if not arg.type.is_tensor_like():
|
| 88 |
+
continue
|
| 89 |
+
conditions.append(f"!isBatchedAtLevel({arg.name}, {cur_level_var})")
|
| 90 |
+
|
| 91 |
+
sig = DispatcherSignature.from_schema(schema)
|
| 92 |
+
translated_args = ", ".join(
|
| 93 |
+
e.expr for e in translate(outer_sig.arguments(), sig.arguments())
|
| 94 |
+
)
|
| 95 |
+
return f"""\
|
| 96 |
+
if ({' && '.join(conditions)}) {{
|
| 97 |
+
return at::_ops::{sig.func.name.unambiguous_name()}::call({translated_args});
|
| 98 |
+
}}"""
|
| 99 |
+
|
| 100 |
+
|
| 101 |
+
def gen_returns(
|
| 102 |
+
returns: Tuple[Return, ...], cur_level_var: str, results_var: str
|
| 103 |
+
) -> str:
|
| 104 |
+
idx = 0
|
| 105 |
+
wrapped_returns = []
|
| 106 |
+
for ret in returns:
|
| 107 |
+
if is_tensor(ret.type):
|
| 108 |
+
wrapped_returns.append(
|
| 109 |
+
f"makeBatched(std::get<{idx}>({results_var}), std::get<{idx + 1}>({results_var}), {cur_level_var})"
|
| 110 |
+
)
|
| 111 |
+
idx += 2
|
| 112 |
+
elif is_tensor_list(ret.type):
|
| 113 |
+
wrapped_returns.append(
|
| 114 |
+
f"makeBatchedVector(std::get<{idx}>({results_var}), std::get<{idx+1}>({results_var}), {cur_level_var})"
|
| 115 |
+
)
|
| 116 |
+
idx += 2
|
| 117 |
+
else:
|
| 118 |
+
wrapped_returns.append(f"std::get<{idx}>({results_var})")
|
| 119 |
+
idx += 1
|
| 120 |
+
if len(wrapped_returns) == 1:
|
| 121 |
+
result = f"return {wrapped_returns[0]};"
|
| 122 |
+
else:
|
| 123 |
+
result = f'return std::make_tuple({", ".join(wrapped_returns)});'
|
| 124 |
+
return result
|
| 125 |
+
|
| 126 |
+
|
| 127 |
+
def accepts_at_least_one_tensor_input(schema: FunctionSchema) -> bool:
|
| 128 |
+
return any(a.type.is_tensor_like() for a in schema.arguments.flat_all)
|
| 129 |
+
|
| 130 |
+
|
| 131 |
+
def is_mutated_arg(argument: Argument) -> bool:
|
| 132 |
+
return argument.annotation is not None and argument.annotation.is_write
|
| 133 |
+
|
| 134 |
+
|
| 135 |
+
def gen_vmap_inplace_plumbing(native_function: NativeFunction) -> Optional[str]:
|
| 136 |
+
# Assumptions:
|
| 137 |
+
# - only one argument is being modified in-place
|
| 138 |
+
# - the argument that is being modified in-place is the first argument
|
| 139 |
+
# - all returns are either Tensor, tuple of Tensor, or TensorList
|
| 140 |
+
schema = native_function.func
|
| 141 |
+
sig = DispatcherSignature.from_schema(schema)
|
| 142 |
+
returns = schema.returns
|
| 143 |
+
|
| 144 |
+
# Check assumptions. If these are invalid we return None
|
| 145 |
+
# and punt the work to handle them to the future.
|
| 146 |
+
assert schema.kind() == SchemaKind.inplace
|
| 147 |
+
if not is_mutated_arg(schema.arguments.flat_all[0]):
|
| 148 |
+
return None
|
| 149 |
+
if not len([arg for arg in schema.arguments.flat_all if is_mutated_arg(arg)]) == 1:
|
| 150 |
+
return None
|
| 151 |
+
|
| 152 |
+
# Only support cases where all returns are Tensors or vector<Tensor>
|
| 153 |
+
if len(returns) == 0:
|
| 154 |
+
return None
|
| 155 |
+
if not all(is_tensor(ret.type) or is_tensor_list(ret.type) for ret in returns):
|
| 156 |
+
return None
|
| 157 |
+
if not accepts_at_least_one_tensor_input(schema):
|
| 158 |
+
return None
|
| 159 |
+
|
| 160 |
+
cur_level_var = "cur_level"
|
| 161 |
+
|
| 162 |
+
unwraps, unwrapped_arg_list = gen_unwraps(schema.arguments.flat_all, cur_level_var)
|
| 163 |
+
bdims_all_none_case = gen_case_where_all_bdims_are_none(sig, schema, cur_level_var)
|
| 164 |
+
|
| 165 |
+
return f"""\
|
| 166 |
+
template <typename batch_rule_t, batch_rule_t batch_rule>
|
| 167 |
+
{sig.decl(name=schema.name.unambiguous_name() + '_generated_plumbing')} {{
|
| 168 |
+
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
| 169 |
+
auto maybe_layer = maybeCurrentDynamicLayer();
|
| 170 |
+
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
| 171 |
+
int64_t {cur_level_var} = maybe_layer->layerId();
|
| 172 |
+
{textwrap.indent(bdims_all_none_case, " ")}
|
| 173 |
+
{textwrap.indent(unwraps, " ")}
|
| 174 |
+
batch_rule({', '.join(unwrapped_arg_list)});
|
| 175 |
+
return {schema.arguments.flat_all[0].name};
|
| 176 |
+
}}"""
|
| 177 |
+
|
| 178 |
+
|
| 179 |
+
def gen_vmap_plumbing_no_returns(native_function: NativeFunction) -> str:
|
| 180 |
+
schema = native_function.func
|
| 181 |
+
sig = DispatcherSignature.from_schema(schema)
|
| 182 |
+
cur_level_var = "cur_level"
|
| 183 |
+
|
| 184 |
+
unwraps, unwrapped_arg_list = gen_unwraps(schema.arguments.flat_all, cur_level_var)
|
| 185 |
+
bdims_all_none_case = gen_case_where_all_bdims_are_none(sig, schema, cur_level_var)
|
| 186 |
+
|
| 187 |
+
return f"""\
|
| 188 |
+
template <typename batch_rule_t, batch_rule_t batch_rule>
|
| 189 |
+
{sig.decl(name=schema.name.unambiguous_name() + '_generated_plumbing')} {{
|
| 190 |
+
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
| 191 |
+
auto maybe_layer = maybeCurrentDynamicLayer();
|
| 192 |
+
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
| 193 |
+
int64_t {cur_level_var} = maybe_layer->layerId();
|
| 194 |
+
{textwrap.indent(bdims_all_none_case, " ")}
|
| 195 |
+
{textwrap.indent(unwraps, " ")}
|
| 196 |
+
batch_rule({', '.join(unwrapped_arg_list)});
|
| 197 |
+
}}"""
|
| 198 |
+
|
| 199 |
+
|
| 200 |
+
def gen_vmap_plumbing(native_function: NativeFunction) -> Optional[str]:
|
| 201 |
+
schema = native_function.func
|
| 202 |
+
sig = DispatcherSignature.from_schema(schema)
|
| 203 |
+
returns = schema.returns
|
| 204 |
+
|
| 205 |
+
# Only support cases where all returns are Tensors or vector<Tensor>
|
| 206 |
+
if not accepts_at_least_one_tensor_input(schema):
|
| 207 |
+
return None
|
| 208 |
+
if len(returns) == 0:
|
| 209 |
+
return gen_vmap_plumbing_no_returns(native_function)
|
| 210 |
+
if not all(ret.type.is_tensor_like() for ret in returns):
|
| 211 |
+
return None
|
| 212 |
+
# in-place views need special handling
|
| 213 |
+
if "inplace_view" in native_function.tags:
|
| 214 |
+
return None
|
| 215 |
+
|
| 216 |
+
if schema.kind() == SchemaKind.inplace:
|
| 217 |
+
return gen_vmap_inplace_plumbing(native_function)
|
| 218 |
+
|
| 219 |
+
# Don't support these (mutable, out, scratch)
|
| 220 |
+
if schema.kind() != SchemaKind.functional:
|
| 221 |
+
return None
|
| 222 |
+
|
| 223 |
+
results_var = "results"
|
| 224 |
+
cur_level_var = "cur_level"
|
| 225 |
+
|
| 226 |
+
unwraps, unwrapped_arg_list = gen_unwraps(schema.arguments.flat_all, cur_level_var)
|
| 227 |
+
bdims_all_none_case = gen_case_where_all_bdims_are_none(sig, schema, cur_level_var)
|
| 228 |
+
|
| 229 |
+
wrapped_returns = gen_returns(returns, cur_level_var, results_var)
|
| 230 |
+
return f"""\
|
| 231 |
+
template <typename batch_rule_t, batch_rule_t batch_rule>
|
| 232 |
+
{sig.decl(name=schema.name.unambiguous_name() + '_generated_plumbing')} {{
|
| 233 |
+
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
| 234 |
+
auto maybe_layer = maybeCurrentDynamicLayer();
|
| 235 |
+
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
| 236 |
+
int64_t {cur_level_var} = maybe_layer->layerId();
|
| 237 |
+
{textwrap.indent(bdims_all_none_case, " ")}
|
| 238 |
+
{textwrap.indent(unwraps, " ")}
|
| 239 |
+
auto {results_var} = batch_rule({', '.join(unwrapped_arg_list)});
|
| 240 |
+
{wrapped_returns}
|
| 241 |
+
}}"""
|
| 242 |
+
|
| 243 |
+
|
| 244 |
+
@dataclass(frozen=True)
|
| 245 |
+
class ComputeBatchRulePlumbing:
|
| 246 |
+
@method_with_native_function
|
| 247 |
+
def __call__(self, f: NativeFunction) -> Optional[str]:
|
| 248 |
+
opname = str(f.func.name)
|
| 249 |
+
result = gen_vmap_plumbing(f)
|
| 250 |
+
return result
|
| 251 |
+
|
| 252 |
+
|
| 253 |
+
def gen_all_vmap_plumbing(native_functions: Sequence[NativeFunction]) -> str:
|
| 254 |
+
body = "\n".join(list(mapMaybe(ComputeBatchRulePlumbing(), native_functions)))
|
| 255 |
+
return f"""
|
| 256 |
+
#pragma once
|
| 257 |
+
#include <ATen/Operators.h>
|
| 258 |
+
#include <ATen/functorch/PlumbingHelper.h>
|
| 259 |
+
|
| 260 |
+
namespace at {{ namespace functorch {{
|
| 261 |
+
|
| 262 |
+
{body}
|
| 263 |
+
|
| 264 |
+
}}}} // namespace at::functorch
|
| 265 |
+
"""
|
llava_next/lib/python3.10/site-packages/torchgen/model.py
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
llava_next/lib/python3.10/site-packages/torchgen/operator_versions/gen_mobile_upgraders.py
ADDED
|
@@ -0,0 +1,392 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
import os
|
| 3 |
+
from enum import Enum
|
| 4 |
+
from pathlib import Path
|
| 5 |
+
from typing import Any, Dict, List
|
| 6 |
+
|
| 7 |
+
import torch
|
| 8 |
+
from torch.jit.generate_bytecode import generate_upgraders_bytecode
|
| 9 |
+
|
| 10 |
+
from torchgen.code_template import CodeTemplate
|
| 11 |
+
from torchgen.operator_versions.gen_mobile_upgraders_constant import (
|
| 12 |
+
MOBILE_UPGRADERS_HEADER_DESCRIPTION,
|
| 13 |
+
)
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
class ByteCode(Enum):
|
| 17 |
+
instructions = 1
|
| 18 |
+
constants = 2
|
| 19 |
+
types = 3
|
| 20 |
+
operators = 4
|
| 21 |
+
register_size = 5
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
EXCLUDED_OP_SET = [
|
| 25 |
+
"aten::full.names",
|
| 26 |
+
"aten::full.out",
|
| 27 |
+
"aten::full",
|
| 28 |
+
]
|
| 29 |
+
|
| 30 |
+
EXCLUE_UPGRADER_SET = ["full_0_4", "full_out_0_4"]
|
| 31 |
+
|
| 32 |
+
ONE_INSTRUCTION = CodeTemplate(
|
| 33 |
+
"""
|
| 34 |
+
Instruction{OpCode::${operator_name}, ${X}, ${N}},"""
|
| 35 |
+
)
|
| 36 |
+
|
| 37 |
+
INSTRUCTION_LIST = CodeTemplate(
|
| 38 |
+
"""std::vector<Instruction>({
|
| 39 |
+
${instruction_list}
|
| 40 |
+
}), // instructions list"""
|
| 41 |
+
)
|
| 42 |
+
|
| 43 |
+
ONE_CONSTANT = CodeTemplate(
|
| 44 |
+
"""
|
| 45 |
+
c10::IValue(${constant}),"""
|
| 46 |
+
)
|
| 47 |
+
|
| 48 |
+
CONSTANT_LIST = CodeTemplate(
|
| 49 |
+
"""std::vector<c10::IValue>({
|
| 50 |
+
${constant_list}
|
| 51 |
+
}), // constants list"""
|
| 52 |
+
)
|
| 53 |
+
|
| 54 |
+
CONSTANTS_LIST_EMPTY = """std::vector<c10::IValue>(), // constants list"""
|
| 55 |
+
|
| 56 |
+
ONE_TYPE = CodeTemplate("""c10::parseType("${type_str}"),""")
|
| 57 |
+
|
| 58 |
+
TYPE_LIST = CodeTemplate(
|
| 59 |
+
"""std::vector<c10::TypePtr>({
|
| 60 |
+
${type_list}
|
| 61 |
+
}), // types list"""
|
| 62 |
+
)
|
| 63 |
+
|
| 64 |
+
TYPE_LIST_EMPTY = """std::vector<c10::TypePtr>(), // types list"""
|
| 65 |
+
|
| 66 |
+
ONE_OPERATOTR_STRING = CodeTemplate(
|
| 67 |
+
"""
|
| 68 |
+
OperatorString({"${operator_name}", "${overload_name}", ${num_of_args}}),"""
|
| 69 |
+
)
|
| 70 |
+
|
| 71 |
+
OPERATOR_STRING_LIST = CodeTemplate(
|
| 72 |
+
"""
|
| 73 |
+
std::vector<OperatorString>({
|
| 74 |
+
${operator_string_list}
|
| 75 |
+
}), // operators list"""
|
| 76 |
+
)
|
| 77 |
+
|
| 78 |
+
ONE_UPGRADER_FUNCTION = CodeTemplate(
|
| 79 |
+
"""
|
| 80 |
+
mobile::Function::registerFunc(
|
| 81 |
+
"${upgrader_name}",
|
| 82 |
+
${instruction_list},
|
| 83 |
+
${constant_list},
|
| 84 |
+
${type_list},
|
| 85 |
+
${register_size}
|
| 86 |
+
)"""
|
| 87 |
+
)
|
| 88 |
+
|
| 89 |
+
ONE_UPGRADER_SRC = CodeTemplate(
|
| 90 |
+
"""
|
| 91 |
+
ByteCodeFunctionWithOperator({
|
| 92 |
+
${bytecode_function},
|
| 93 |
+
${operator_string_list}
|
| 94 |
+
}),"""
|
| 95 |
+
)
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
ONE_UPGRADER_IN_VERSION_MAP = CodeTemplate(
|
| 99 |
+
"""Upgrader({${upgrader_min_version}, ${upgrader_max_version}, "${upgrader_name}", ${bytecode_func_index}})"""
|
| 100 |
+
) # noqa: E501
|
| 101 |
+
|
| 102 |
+
ONE_OPERATOR_IN_VERSION_MAP = CodeTemplate(
|
| 103 |
+
"""
|
| 104 |
+
{std::string("${operator_name}"),
|
| 105 |
+
std::vector<Upgrader>({
|
| 106 |
+
${upgrader_list_in_version_map}
|
| 107 |
+
})},"""
|
| 108 |
+
)
|
| 109 |
+
|
| 110 |
+
|
| 111 |
+
OPERATOR_VERSION_MAP = CodeTemplate(
|
| 112 |
+
"""
|
| 113 |
+
const std::unordered_map<std::string, std::vector<Upgrader>>
|
| 114 |
+
getOperatorVersionMapForMobile() {
|
| 115 |
+
static std::unordered_map<std::string, std::vector<Upgrader>>
|
| 116 |
+
operatorVersionMapForMobile({
|
| 117 |
+
${operator_list_in_version_map}
|
| 118 |
+
});
|
| 119 |
+
return operatorVersionMapForMobile;
|
| 120 |
+
}
|
| 121 |
+
"""
|
| 122 |
+
)
|
| 123 |
+
|
| 124 |
+
|
| 125 |
+
UPGRADER_CPP_SRC = CodeTemplate(
|
| 126 |
+
MOBILE_UPGRADERS_HEADER_DESCRIPTION
|
| 127 |
+
+ """
|
| 128 |
+
#include <caffe2/serialize/versions.h>
|
| 129 |
+
#include <torch/csrc/jit/mobile/upgrader_mobile.h>
|
| 130 |
+
|
| 131 |
+
namespace c10 {
|
| 132 |
+
TypePtr parseType(const std::string& pythonStr);
|
| 133 |
+
} // namespace c10
|
| 134 |
+
|
| 135 |
+
namespace torch {
|
| 136 |
+
namespace jit {
|
| 137 |
+
|
| 138 |
+
// clang-format off
|
| 139 |
+
|
| 140 |
+
// From operator_versions_map
|
| 141 |
+
${operator_version_map}
|
| 142 |
+
|
| 143 |
+
const std::vector<ByteCodeFunctionWithOperator>& getUpgraderBytecodeList() {
|
| 144 |
+
auto generate_upgrader_bytecode_list = []() {
|
| 145 |
+
std::vector<ByteCodeFunctionWithOperator> upgrader_function_list({
|
| 146 |
+
${upgrader_bytecode}
|
| 147 |
+
});
|
| 148 |
+
for (const auto& upgrader_function : upgrader_function_list) {
|
| 149 |
+
for (const auto& op : upgrader_function.operators) {
|
| 150 |
+
upgrader_function.function.append_operator(
|
| 151 |
+
op.name,
|
| 152 |
+
op.overload_name,
|
| 153 |
+
op.num_specified_args);
|
| 154 |
+
}
|
| 155 |
+
}
|
| 156 |
+
return upgrader_function_list;
|
| 157 |
+
};
|
| 158 |
+
static std::vector<ByteCodeFunctionWithOperator> upgraderBytecodeList =
|
| 159 |
+
generate_upgrader_bytecode_list();
|
| 160 |
+
return upgraderBytecodeList;
|
| 161 |
+
}
|
| 162 |
+
|
| 163 |
+
// clang-format on
|
| 164 |
+
|
| 165 |
+
} // namespace jit
|
| 166 |
+
} // namespace torch
|
| 167 |
+
"""
|
| 168 |
+
)
|
| 169 |
+
|
| 170 |
+
UPGRADER_MOBILE_FILE_NAME = "upgrader_mobile.cpp"
|
| 171 |
+
|
| 172 |
+
UPGRADER_ELEMENT = CodeTemplate(
|
| 173 |
+
"""\
|
| 174 |
+
Upgrader({${min_version}, ${max_version}, ${operator_name}, ${index}}),
|
| 175 |
+
"""
|
| 176 |
+
)
|
| 177 |
+
|
| 178 |
+
PER_OPERATOR_UPGRADER_LIST = CodeTemplate(
|
| 179 |
+
"""\
|
| 180 |
+
{
|
| 181 |
+
std::string(${operator_name}),
|
| 182 |
+
std::vector<Upgrader>({${upgrader_list}});
|
| 183 |
+
}
|
| 184 |
+
"""
|
| 185 |
+
)
|
| 186 |
+
|
| 187 |
+
|
| 188 |
+
def construct_instruction(instruction_list_from_yaml: List[Any]) -> str:
|
| 189 |
+
instruction_list_part = []
|
| 190 |
+
for instruction in instruction_list_from_yaml:
|
| 191 |
+
instruction_list_part.append(
|
| 192 |
+
ONE_INSTRUCTION.substitute(
|
| 193 |
+
operator_name=instruction[0],
|
| 194 |
+
X=instruction[1],
|
| 195 |
+
N=instruction[2],
|
| 196 |
+
)
|
| 197 |
+
)
|
| 198 |
+
return INSTRUCTION_LIST.substitute(
|
| 199 |
+
instruction_list="".join(instruction_list_part).lstrip("\n")
|
| 200 |
+
)
|
| 201 |
+
|
| 202 |
+
|
| 203 |
+
def construct_constants(constants_list_from_yaml: List[Any]) -> str:
|
| 204 |
+
constants_list_part = []
|
| 205 |
+
for constant_from_yaml in constants_list_from_yaml:
|
| 206 |
+
convert_constant = None
|
| 207 |
+
if isinstance(constant_from_yaml, str):
|
| 208 |
+
# Add quotes if it's string
|
| 209 |
+
convert_constant = f'"{constant_from_yaml}"'
|
| 210 |
+
elif isinstance(constant_from_yaml, bool):
|
| 211 |
+
convert_constant = "true" if constant_from_yaml else "false"
|
| 212 |
+
elif constant_from_yaml is None:
|
| 213 |
+
convert_constant = ""
|
| 214 |
+
elif isinstance(constant_from_yaml, int):
|
| 215 |
+
convert_constant = str(constant_from_yaml)
|
| 216 |
+
else:
|
| 217 |
+
raise ValueError(
|
| 218 |
+
f"The type of {constant_from_yaml} is {type(constant_from_yaml)}. "
|
| 219 |
+
"Please add change in construct_constants function in gen_mobile_upgraders.py."
|
| 220 |
+
)
|
| 221 |
+
constants_list_part.append(ONE_CONSTANT.substitute(constant=convert_constant))
|
| 222 |
+
if len(constants_list_part) == 0:
|
| 223 |
+
return CONSTANTS_LIST_EMPTY
|
| 224 |
+
return CONSTANT_LIST.substitute(
|
| 225 |
+
constant_list="".join(constants_list_part).lstrip("\n")
|
| 226 |
+
)
|
| 227 |
+
|
| 228 |
+
|
| 229 |
+
def construct_operators(operator_list_from_yaml: List[Any]) -> str:
|
| 230 |
+
operator_list_part = []
|
| 231 |
+
for operator in operator_list_from_yaml:
|
| 232 |
+
operator_list_part.append(
|
| 233 |
+
ONE_OPERATOTR_STRING.substitute(
|
| 234 |
+
operator_name=operator[0],
|
| 235 |
+
overload_name=operator[1],
|
| 236 |
+
num_of_args=operator[2],
|
| 237 |
+
)
|
| 238 |
+
)
|
| 239 |
+
return OPERATOR_STRING_LIST.substitute(
|
| 240 |
+
operator_string_list="".join(operator_list_part).lstrip("\n")
|
| 241 |
+
)
|
| 242 |
+
|
| 243 |
+
|
| 244 |
+
def construct_types(types_tr_list_from_yaml: List[Any]) -> str:
|
| 245 |
+
types_tr_list_part = []
|
| 246 |
+
for types_tr in types_tr_list_from_yaml:
|
| 247 |
+
types_tr_list_part.append(ONE_TYPE.substitute(type_str=types_tr))
|
| 248 |
+
if len(types_tr_list_part) == 0:
|
| 249 |
+
return TYPE_LIST_EMPTY
|
| 250 |
+
return TYPE_LIST.substitute(type_list="".join(types_tr_list_part).lstrip("\n"))
|
| 251 |
+
|
| 252 |
+
|
| 253 |
+
def construct_register_size(register_size_from_yaml: int) -> str:
|
| 254 |
+
if not isinstance(register_size_from_yaml, int):
|
| 255 |
+
raise ValueError(
|
| 256 |
+
f"Input register size is {register_size_from_yaml} and"
|
| 257 |
+
"it's type is {type(register_size_from_yaml)}. An int type is expected."
|
| 258 |
+
)
|
| 259 |
+
return str(register_size_from_yaml)
|
| 260 |
+
|
| 261 |
+
|
| 262 |
+
def construct_version_maps(
|
| 263 |
+
upgrader_bytecode_function_to_index_map: Dict[str, Any]
|
| 264 |
+
) -> str:
|
| 265 |
+
version_map = torch._C._get_operator_version_map()
|
| 266 |
+
sorted_version_map_ = sorted(version_map.items(), key=lambda item: item[0]) # type: ignore[no-any-return]
|
| 267 |
+
sorted_version_map = dict(sorted_version_map_)
|
| 268 |
+
|
| 269 |
+
operator_list_in_version_map_part = []
|
| 270 |
+
for op_name in sorted_version_map:
|
| 271 |
+
upgraders_in_version_map_part = []
|
| 272 |
+
# TODO: remove the skip after these two operators schemas are fixed
|
| 273 |
+
if op_name in EXCLUDED_OP_SET:
|
| 274 |
+
continue
|
| 275 |
+
upgrader_ranges = torch._C._get_upgrader_ranges(op_name)
|
| 276 |
+
upgrader_entries = sorted_version_map[op_name]
|
| 277 |
+
assert len(upgrader_ranges) == len(upgrader_entries)
|
| 278 |
+
for idx, upgrader_entry in enumerate(upgrader_entries):
|
| 279 |
+
upgrader_name = upgrader_entry.upgrader_name
|
| 280 |
+
bytecode_function_index = upgrader_bytecode_function_to_index_map[
|
| 281 |
+
upgrader_name
|
| 282 |
+
]
|
| 283 |
+
upgraders_in_version_map_part.append(
|
| 284 |
+
ONE_UPGRADER_IN_VERSION_MAP.substitute(
|
| 285 |
+
upgrader_min_version=upgrader_ranges[idx].min_version,
|
| 286 |
+
upgrader_max_version=upgrader_ranges[idx].max_version,
|
| 287 |
+
upgrader_name=upgrader_name,
|
| 288 |
+
bytecode_func_index=bytecode_function_index,
|
| 289 |
+
)
|
| 290 |
+
)
|
| 291 |
+
operator_list_in_version_map_part.append(
|
| 292 |
+
ONE_OPERATOR_IN_VERSION_MAP.substitute(
|
| 293 |
+
operator_name=op_name,
|
| 294 |
+
upgrader_list_in_version_map="".join(upgraders_in_version_map_part),
|
| 295 |
+
)
|
| 296 |
+
)
|
| 297 |
+
return OPERATOR_VERSION_MAP.substitute(
|
| 298 |
+
operator_list_in_version_map="".join(operator_list_in_version_map_part).lstrip(
|
| 299 |
+
"\n"
|
| 300 |
+
)
|
| 301 |
+
)
|
| 302 |
+
|
| 303 |
+
|
| 304 |
+
def get_upgrader_bytecode_function_to_index_map(
|
| 305 |
+
upgrader_dict: List[Dict[str, Any]]
|
| 306 |
+
) -> Dict[str, Any]:
|
| 307 |
+
upgrader_bytecode_function_to_index_map = {}
|
| 308 |
+
index = 0
|
| 309 |
+
for upgrader_bytecode in upgrader_dict:
|
| 310 |
+
for upgrader_name in upgrader_bytecode.keys():
|
| 311 |
+
if upgrader_name in EXCLUE_UPGRADER_SET:
|
| 312 |
+
continue
|
| 313 |
+
upgrader_bytecode_function_to_index_map[upgrader_name] = index
|
| 314 |
+
index += 1
|
| 315 |
+
return upgrader_bytecode_function_to_index_map
|
| 316 |
+
|
| 317 |
+
|
| 318 |
+
def write_cpp(cpp_path: str, upgrader_dict: List[Dict[str, Any]]) -> None:
|
| 319 |
+
body_parts = []
|
| 320 |
+
upgrader_bytecode_function_to_index_map = (
|
| 321 |
+
get_upgrader_bytecode_function_to_index_map(upgrader_dict)
|
| 322 |
+
)
|
| 323 |
+
version_map_src = construct_version_maps(upgrader_bytecode_function_to_index_map)
|
| 324 |
+
all_upgrader_src_string = []
|
| 325 |
+
for upgrader_bytecode in upgrader_dict:
|
| 326 |
+
for upgrader_name, bytecode in upgrader_bytecode.items():
|
| 327 |
+
# TODO: remove the skip after these two operators schemas are fixed
|
| 328 |
+
if upgrader_name in EXCLUE_UPGRADER_SET:
|
| 329 |
+
continue
|
| 330 |
+
instruction_list_str = ""
|
| 331 |
+
constant_list_str = ""
|
| 332 |
+
type_list_str = ""
|
| 333 |
+
register_size_str = ""
|
| 334 |
+
operator_list_str = ""
|
| 335 |
+
for table_name, contents in bytecode.items():
|
| 336 |
+
element = ByteCode[table_name]
|
| 337 |
+
body_string = ""
|
| 338 |
+
if element is ByteCode.instructions:
|
| 339 |
+
instruction_list_str = construct_instruction(contents)
|
| 340 |
+
elif element is ByteCode.constants:
|
| 341 |
+
constant_list_str = construct_constants(contents)
|
| 342 |
+
elif element is ByteCode.operators:
|
| 343 |
+
operator_list_str = construct_operators(contents)
|
| 344 |
+
elif element is ByteCode.types:
|
| 345 |
+
type_list_str = construct_types(contents)
|
| 346 |
+
elif element is ByteCode.register_size:
|
| 347 |
+
register_size_str = construct_register_size(contents)
|
| 348 |
+
|
| 349 |
+
one_upgrader_function_string = ONE_UPGRADER_FUNCTION.substitute(
|
| 350 |
+
upgrader_name=upgrader_name,
|
| 351 |
+
instruction_list=instruction_list_str,
|
| 352 |
+
constant_list=constant_list_str,
|
| 353 |
+
type_list=type_list_str,
|
| 354 |
+
register_size=register_size_str,
|
| 355 |
+
)
|
| 356 |
+
one_upgrader_src_string = ONE_UPGRADER_SRC.substitute(
|
| 357 |
+
bytecode_function=one_upgrader_function_string.lstrip("\n"),
|
| 358 |
+
operator_string_list=operator_list_str.lstrip("\n"),
|
| 359 |
+
)
|
| 360 |
+
all_upgrader_src_string.append(one_upgrader_src_string)
|
| 361 |
+
|
| 362 |
+
upgrader_file_content = UPGRADER_CPP_SRC.substitute(
|
| 363 |
+
operator_version_map=version_map_src,
|
| 364 |
+
upgrader_bytecode="".join(all_upgrader_src_string).lstrip("\n"),
|
| 365 |
+
)
|
| 366 |
+
body_parts.append(upgrader_file_content)
|
| 367 |
+
print("writing file to : ", cpp_path + "/" + UPGRADER_MOBILE_FILE_NAME)
|
| 368 |
+
with open(os.path.join(cpp_path, UPGRADER_MOBILE_FILE_NAME), "wb") as out_file:
|
| 369 |
+
final_output = "".join(body_parts)
|
| 370 |
+
out_file.write(upgrader_file_content.encode("utf-8"))
|
| 371 |
+
|
| 372 |
+
|
| 373 |
+
def sort_upgrader(upgrader_list: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
|
| 374 |
+
sorted_upgrader_list = sorted(
|
| 375 |
+
upgrader_list, key=lambda one_upgrader: next(iter(one_upgrader))
|
| 376 |
+
)
|
| 377 |
+
return sorted_upgrader_list
|
| 378 |
+
|
| 379 |
+
|
| 380 |
+
def main() -> None:
|
| 381 |
+
upgrader_list = generate_upgraders_bytecode()
|
| 382 |
+
sorted_upgrader_list = sort_upgrader(upgrader_list)
|
| 383 |
+
for up in sorted_upgrader_list:
|
| 384 |
+
print("after sort upgrader : ", next(iter(up)))
|
| 385 |
+
|
| 386 |
+
pytorch_dir = Path(__file__).resolve().parents[2]
|
| 387 |
+
upgrader_path = pytorch_dir / "torch" / "csrc" / "jit" / "mobile"
|
| 388 |
+
write_cpp(str(upgrader_path), sorted_upgrader_list)
|
| 389 |
+
|
| 390 |
+
|
| 391 |
+
if __name__ == "__main__":
|
| 392 |
+
main()
|
llava_next/lib/python3.10/site-packages/torchgen/packaged/autograd/__init__.py
ADDED
|
File without changes
|
llava_next/lib/python3.10/site-packages/torchgen/static_runtime/__init__.py
ADDED
|
File without changes
|
llava_next/lib/python3.10/site-packages/torchgen/static_runtime/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (179 Bytes). View file
|
|
|
llava_next/lib/python3.10/site-packages/torchgen/static_runtime/__pycache__/config.cpython-310.pyc
ADDED
|
Binary file (7.72 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/torchgen/static_runtime/__pycache__/gen_static_runtime_ops.cpython-310.pyc
ADDED
|
Binary file (7.33 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/torchgen/static_runtime/__pycache__/generator.cpython-310.pyc
ADDED
|
Binary file (19.6 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/torchgen/static_runtime/config.py
ADDED
|
@@ -0,0 +1,388 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Dict, Union
|
| 2 |
+
|
| 3 |
+
from torchgen.model import NativeFunctionsGroup, NativeFunctionsViewGroup
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
def func_name_base_str(g: Union[NativeFunctionsGroup, NativeFunctionsViewGroup]) -> str:
|
| 7 |
+
if isinstance(g, NativeFunctionsGroup):
|
| 8 |
+
return str(g.functional.func.name.name.base)
|
| 9 |
+
else:
|
| 10 |
+
return str(g.view.root_name)
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
is_hand_written_ops_ = frozenset(
|
| 14 |
+
(
|
| 15 |
+
"abs",
|
| 16 |
+
"add",
|
| 17 |
+
"addmm",
|
| 18 |
+
"all",
|
| 19 |
+
"any",
|
| 20 |
+
"argmin",
|
| 21 |
+
"bmm",
|
| 22 |
+
"clamp",
|
| 23 |
+
"clamp_min",
|
| 24 |
+
"cumsum",
|
| 25 |
+
"div",
|
| 26 |
+
"fmod",
|
| 27 |
+
"index_select",
|
| 28 |
+
"leaky_relu",
|
| 29 |
+
"linear",
|
| 30 |
+
"log",
|
| 31 |
+
"matmul",
|
| 32 |
+
"mul",
|
| 33 |
+
"narrow_copy",
|
| 34 |
+
"nonzero",
|
| 35 |
+
"pow",
|
| 36 |
+
"remainder",
|
| 37 |
+
"sigmoid",
|
| 38 |
+
"sign",
|
| 39 |
+
"sub",
|
| 40 |
+
"tanh",
|
| 41 |
+
"detach",
|
| 42 |
+
"expand_as",
|
| 43 |
+
"flatten",
|
| 44 |
+
"narrow",
|
| 45 |
+
"reshape_as",
|
| 46 |
+
"select",
|
| 47 |
+
"slice",
|
| 48 |
+
"softmax",
|
| 49 |
+
"split",
|
| 50 |
+
"squeeze",
|
| 51 |
+
"transpose",
|
| 52 |
+
"view",
|
| 53 |
+
"where",
|
| 54 |
+
)
|
| 55 |
+
)
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
def is_hand_written(g: Union[NativeFunctionsGroup, NativeFunctionsViewGroup]) -> bool:
|
| 59 |
+
name_base = func_name_base_str(g)
|
| 60 |
+
return name_base in is_hand_written_ops_
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
def override_test_values(arg_map: Dict[str, str], op_name: str, index: int) -> None:
|
| 64 |
+
assert index == 0 or index == 1
|
| 65 |
+
if op_name == "addr":
|
| 66 |
+
if index == 0:
|
| 67 |
+
arg_map["self"] = "at::rand({6, 6})"
|
| 68 |
+
arg_map["vec1"] = "at::rand({6})"
|
| 69 |
+
arg_map["vec2"] = "at::rand({6})"
|
| 70 |
+
else:
|
| 71 |
+
arg_map["self"] = "at::rand({22, 22})"
|
| 72 |
+
arg_map["vec1"] = "at::rand({22})"
|
| 73 |
+
arg_map["vec2"] = "at::rand({22})"
|
| 74 |
+
return
|
| 75 |
+
if op_name == "mv":
|
| 76 |
+
if index == 0:
|
| 77 |
+
arg_map["self"] = "at::rand({6, 6})"
|
| 78 |
+
arg_map["vec"] = "at::rand({6})"
|
| 79 |
+
else:
|
| 80 |
+
arg_map["self"] = "at::rand({22, 22})"
|
| 81 |
+
arg_map["vec"] = "at::rand({22})"
|
| 82 |
+
return
|
| 83 |
+
if op_name == "addbmm":
|
| 84 |
+
if index == 0:
|
| 85 |
+
arg_map["self"] = "at::rand({6, 6})"
|
| 86 |
+
else:
|
| 87 |
+
arg_map["self"] = "at::rand({22, 22})"
|
| 88 |
+
return
|
| 89 |
+
if op_name == "cross":
|
| 90 |
+
if index == 0:
|
| 91 |
+
arg_map["self"] = "at::rand({3, 3, 3})"
|
| 92 |
+
arg_map["other"] = "at::rand({3, 3, 3})"
|
| 93 |
+
else:
|
| 94 |
+
arg_map["self"] = "at::rand({22, 3, 22})"
|
| 95 |
+
arg_map["other"] = "at::rand({22, 3, 22})"
|
| 96 |
+
return
|
| 97 |
+
if op_name == "take":
|
| 98 |
+
if index == 0:
|
| 99 |
+
arg_map["index"] = "at::randint(0, 216, {20}, torch::kInt64)"
|
| 100 |
+
else:
|
| 101 |
+
arg_map["index"] = "at::randint(0, 1000, {100}, torch::kInt64)"
|
| 102 |
+
return
|
| 103 |
+
if op_name == "take_along_dim":
|
| 104 |
+
if index == 0:
|
| 105 |
+
arg_map["indices"] = "at::argsort(self0, 1, true)"
|
| 106 |
+
else:
|
| 107 |
+
arg_map["indices"] = "at::argsort(self1, 1, true)"
|
| 108 |
+
return
|
| 109 |
+
if op_name == "masked_select":
|
| 110 |
+
if index == 0:
|
| 111 |
+
arg_map["mask"] = "at::randn({6, 6, 6}) > 0.5"
|
| 112 |
+
else:
|
| 113 |
+
arg_map["mask"] = "at::rand({22, 22, 22}) > 0.5"
|
| 114 |
+
return
|
| 115 |
+
if op_name == "orgqr":
|
| 116 |
+
if index == 0:
|
| 117 |
+
arg_map["input2"] = "at::rand({6, 6})"
|
| 118 |
+
else:
|
| 119 |
+
arg_map["input2"] = "at::rand({22, 22})"
|
| 120 |
+
return
|
| 121 |
+
if op_name == "ormqr":
|
| 122 |
+
if index == 0:
|
| 123 |
+
arg_map["input2"] = "at::rand({6, 6})"
|
| 124 |
+
else:
|
| 125 |
+
arg_map["input2"] = "at::rand({22, 22})"
|
| 126 |
+
return
|
| 127 |
+
if op_name == "quantile":
|
| 128 |
+
if index == 0:
|
| 129 |
+
arg_map["q"] = "at::rand({6})"
|
| 130 |
+
arg_map["interpolation"] = '"linear"'
|
| 131 |
+
else:
|
| 132 |
+
arg_map["q"] = "at::rand({22})"
|
| 133 |
+
arg_map["interpolation"] = '"linear"'
|
| 134 |
+
return
|
| 135 |
+
if op_name == "nanquantile":
|
| 136 |
+
if index == 0:
|
| 137 |
+
arg_map["q"] = "at::rand({6})"
|
| 138 |
+
arg_map["interpolation"] = '"linear"'
|
| 139 |
+
else:
|
| 140 |
+
arg_map["q"] = "at::rand({22})"
|
| 141 |
+
arg_map["interpolation"] = '"linear"'
|
| 142 |
+
return
|
| 143 |
+
if op_name == "multi_margin_loss":
|
| 144 |
+
if index == 0:
|
| 145 |
+
arg_map["self"] = "at::rand({6, 6})"
|
| 146 |
+
arg_map["target"] = "at::randint(6, {6}, torch::kInt64)"
|
| 147 |
+
arg_map["weight"] = "at::rand({6})"
|
| 148 |
+
else:
|
| 149 |
+
arg_map["self"] = "at::rand({22, 22})"
|
| 150 |
+
arg_map["target"] = "at::randint(22, {22}, torch::kInt64)"
|
| 151 |
+
arg_map["weight"] = "at::rand({22})"
|
| 152 |
+
return
|
| 153 |
+
if op_name == "multilabel_margin_loss":
|
| 154 |
+
if index == 0:
|
| 155 |
+
arg_map["self"] = "at::rand({6, 6})"
|
| 156 |
+
arg_map["target"] = "at::randint(6, {6, 6}, torch::kInt64)"
|
| 157 |
+
else:
|
| 158 |
+
arg_map["self"] = "at::rand({22, 22})"
|
| 159 |
+
arg_map["target"] = "at::randint(22, {22, 22}, torch::kInt64)"
|
| 160 |
+
return
|
| 161 |
+
if op_name == "nll_loss":
|
| 162 |
+
if index == 0:
|
| 163 |
+
arg_map["self"] = "at::rand({6, 6})"
|
| 164 |
+
arg_map["target"] = "at::randint(6, {6}, torch::kInt64)"
|
| 165 |
+
arg_map["weight"] = "at::rand({6})"
|
| 166 |
+
else:
|
| 167 |
+
arg_map["self"] = "at::rand({22, 22})"
|
| 168 |
+
arg_map["target"] = "at::randint(22, {22}, torch::kInt64)"
|
| 169 |
+
arg_map["weight"] = "at::rand({22})"
|
| 170 |
+
return
|
| 171 |
+
if op_name == "nll_loss2d":
|
| 172 |
+
if index == 0:
|
| 173 |
+
arg_map["self"] = "at::rand({6, 6, 6, 6})"
|
| 174 |
+
arg_map["target"] = "at::randint(6, {6, 6, 6}, torch::kInt64)"
|
| 175 |
+
arg_map["weight"] = "at::rand({6})"
|
| 176 |
+
else:
|
| 177 |
+
arg_map["self"] = "at::rand({22, 22, 22, 22})"
|
| 178 |
+
arg_map["target"] = "at::randint(22, {22, 22, 22}, torch::kInt64)"
|
| 179 |
+
arg_map["weight"] = "at::rand({22})"
|
| 180 |
+
return
|
| 181 |
+
if op_name in (
|
| 182 |
+
"fft_fft",
|
| 183 |
+
"fft_ifft",
|
| 184 |
+
"fft_rfft",
|
| 185 |
+
"fft_irfft",
|
| 186 |
+
"fft_hfft",
|
| 187 |
+
"fft_ihfft",
|
| 188 |
+
):
|
| 189 |
+
arg_map["norm"] = '"forward"'
|
| 190 |
+
return
|
| 191 |
+
if op_name == "linalg_tensorinv":
|
| 192 |
+
if index == 0:
|
| 193 |
+
arg_map["self"] = "at::rand({6, 6, 6, 6})"
|
| 194 |
+
arg_map["ind"] = "2"
|
| 195 |
+
else:
|
| 196 |
+
arg_map["self"] = "at::rand({22, 22, 22, 22})"
|
| 197 |
+
arg_map["ind"] = "2"
|
| 198 |
+
return
|
| 199 |
+
if op_name == "addmv":
|
| 200 |
+
if index == 0:
|
| 201 |
+
arg_map["self"] = "at::rand({2})"
|
| 202 |
+
arg_map["mat"] = "at::rand({2, 2})"
|
| 203 |
+
arg_map["vec"] = "at::rand({2})"
|
| 204 |
+
else:
|
| 205 |
+
arg_map["self"] = "at::rand({35})"
|
| 206 |
+
arg_map["mat"] = "at::rand({35, 35})"
|
| 207 |
+
arg_map["vec"] = "at::rand({35})"
|
| 208 |
+
return
|
| 209 |
+
if op_name == "acosh":
|
| 210 |
+
if index == 0:
|
| 211 |
+
arg_map["self"] = "at::rand({2, 2, 2}) + at::ones({2, 2, 2})"
|
| 212 |
+
else:
|
| 213 |
+
arg_map["self"] = "at::rand({5, 5, 5}) + at::ones({5, 5, 5})"
|
| 214 |
+
return
|
| 215 |
+
if op_name == "adaptive_max_pool2d_backward":
|
| 216 |
+
if index == 0:
|
| 217 |
+
arg_map["grad_output"] = "at::rand({2, 2, 2}, at::kFloat)"
|
| 218 |
+
arg_map["self"] = "at::rand({2, 2, 2}, at::kFloat)"
|
| 219 |
+
arg_map["indices"] = "at::randint(0, 1, {2, 2, 2}, at::kLong)"
|
| 220 |
+
else:
|
| 221 |
+
arg_map["grad_output"] = "at::rand({3, 3, 3}, at::kFloat)"
|
| 222 |
+
arg_map["self"] = "at::rand({3, 3, 3}, at::kFloat)"
|
| 223 |
+
arg_map["indices"] = "at::randint(0, 1, {3, 3, 3}, at::kLong)"
|
| 224 |
+
return
|
| 225 |
+
if op_name == "adaptive_max_pool3d_backward":
|
| 226 |
+
if index == 0:
|
| 227 |
+
arg_map["grad_output"] = "at::rand({2, 2, 2, 2}, at::kFloat)"
|
| 228 |
+
arg_map["self"] = "at::rand({2, 2, 2, 2}, at::kFloat)"
|
| 229 |
+
arg_map["indices"] = "at::randint(0, 1, {2, 2, 2, 2}, at::kLong)"
|
| 230 |
+
else:
|
| 231 |
+
arg_map["grad_output"] = "at::rand({3, 3, 3, 3}, at::kFloat)"
|
| 232 |
+
arg_map["self"] = "at::rand({3, 3, 3, 3}, at::kFloat)"
|
| 233 |
+
arg_map["indices"] = "at::randint(0, 1, {3, 3, 3, 3}, at::kLong)"
|
| 234 |
+
return
|
| 235 |
+
if op_name == "bitwise_left_shift":
|
| 236 |
+
if index == 0:
|
| 237 |
+
arg_map["self"] = "at::randint(1, 1 << 4, {6, 6, 6}, at::kInt)"
|
| 238 |
+
arg_map["other"] = "at::randint(1, 26, {6, 6, 6}, at::kInt)"
|
| 239 |
+
else:
|
| 240 |
+
arg_map["self"] = "at::randint(1, 1 << 4, {22, 22, 22}, at::kInt)"
|
| 241 |
+
arg_map["other"] = "at::randint(1, 26, {22, 22, 22}, at::kInt)"
|
| 242 |
+
return
|
| 243 |
+
if op_name == "bitwise_right_shift":
|
| 244 |
+
if index == 0:
|
| 245 |
+
arg_map["self"] = "at::randint(1 << 21, 1 << 30, {6, 6, 6}, at::kInt)"
|
| 246 |
+
arg_map["other"] = "at::randint(1, 22, {6, 6, 6}, at::kInt)"
|
| 247 |
+
else:
|
| 248 |
+
arg_map["self"] = "at::randint(1 << 21, 1 << 30, {22, 22, 22}, at::kInt)"
|
| 249 |
+
arg_map["other"] = "at::randint(1, 22, {22, 22, 22}, at::kInt)"
|
| 250 |
+
return
|
| 251 |
+
if op_name == "gather":
|
| 252 |
+
if index == 0:
|
| 253 |
+
arg_map["self"] = "at::randint(1, 100, {2,2,2}, at::kInt)"
|
| 254 |
+
arg_map["dim"] = "1"
|
| 255 |
+
arg_map["index"] = "at::randint(0, 1, {2,2,2}, torch::kInt64)"
|
| 256 |
+
arg_map["sparse_grad"] = "false"
|
| 257 |
+
else:
|
| 258 |
+
arg_map["self"] = "at::randint(1, 100, {5,5,5}, at::kInt)"
|
| 259 |
+
arg_map["dim"] = "1"
|
| 260 |
+
arg_map["index"] = "at::randint(0, 4, {5,5,5}, torch::kInt64)"
|
| 261 |
+
arg_map["sparse_grad"] = "false"
|
| 262 |
+
return
|
| 263 |
+
if op_name == "gelu":
|
| 264 |
+
if index == 0:
|
| 265 |
+
arg_map["self"] = "at::rand({6, 6, 6})"
|
| 266 |
+
arg_map["approximate"] = '"tanh"'
|
| 267 |
+
else:
|
| 268 |
+
arg_map["self"] = "at::rand({22, 22, 22})"
|
| 269 |
+
arg_map["approximate"] = '"tanh"'
|
| 270 |
+
return
|
| 271 |
+
if op_name == "gelu_backward":
|
| 272 |
+
if index == 0:
|
| 273 |
+
arg_map["grad_output"] = "at::rand({6, 6, 6})"
|
| 274 |
+
arg_map["self"] = "at::rand({6, 6, 6})"
|
| 275 |
+
arg_map["approximate"] = '"tanh"'
|
| 276 |
+
else:
|
| 277 |
+
arg_map["grad_output"] = "at::rand({22, 22, 22})"
|
| 278 |
+
arg_map["self"] = "at::rand({22, 22, 22})"
|
| 279 |
+
arg_map["approximate"] = '"tanh"'
|
| 280 |
+
return
|
| 281 |
+
if op_name == "index_add":
|
| 282 |
+
if index == 0:
|
| 283 |
+
arg_map["self"] = "at::rand({2})"
|
| 284 |
+
arg_map["dim"] = "0"
|
| 285 |
+
arg_map["index"] = "at::randint(0, 1, {2}, at::kInt)"
|
| 286 |
+
arg_map["source"] = "at::rand({2})"
|
| 287 |
+
arg_map["alpha"] = "2"
|
| 288 |
+
else:
|
| 289 |
+
arg_map["self"] = "at::rand({16})"
|
| 290 |
+
arg_map["dim"] = "0"
|
| 291 |
+
arg_map["index"] = "at::randint(0, 10, {16}, at::kInt)"
|
| 292 |
+
arg_map["source"] = "at::rand({16})"
|
| 293 |
+
arg_map["alpha"] = "2"
|
| 294 |
+
return
|
| 295 |
+
if op_name == "index_copy":
|
| 296 |
+
if index == 0:
|
| 297 |
+
arg_map["self"] = "at::rand({2})"
|
| 298 |
+
arg_map["dim"] = "0"
|
| 299 |
+
arg_map["index"] = "at::randint(0, 1, {2}, at::kLong)"
|
| 300 |
+
arg_map["source"] = "at::rand({2})"
|
| 301 |
+
else:
|
| 302 |
+
arg_map["self"] = "at::rand({32})"
|
| 303 |
+
arg_map["dim"] = "0"
|
| 304 |
+
arg_map["index"] = "at::randint(0, 10, {32}, at::kLong)"
|
| 305 |
+
arg_map["source"] = "at::rand({32})"
|
| 306 |
+
return
|
| 307 |
+
if op_name == "linalg_cross":
|
| 308 |
+
if index == 0:
|
| 309 |
+
arg_map["self"] = "at::rand({6, 3, 6})"
|
| 310 |
+
arg_map["other"] = "at::rand({6, 3, 6})"
|
| 311 |
+
arg_map["dim"] = "1"
|
| 312 |
+
else:
|
| 313 |
+
arg_map["self"] = "at::rand({22, 3, 22})"
|
| 314 |
+
arg_map["other"] = "at::rand({22, 3, 22})"
|
| 315 |
+
arg_map["dim"] = "1"
|
| 316 |
+
return
|
| 317 |
+
if op_name == "nll_loss_backward":
|
| 318 |
+
if index == 0:
|
| 319 |
+
arg_map["grad_output"] = "at::rand({})"
|
| 320 |
+
arg_map["self"] = "at::rand({6})"
|
| 321 |
+
arg_map["target"] = "at::randint(0, 5, {6}, torch::kInt64)"
|
| 322 |
+
arg_map["weight"] = "at::rand({6})"
|
| 323 |
+
arg_map["reduction"] = "1"
|
| 324 |
+
arg_map["ignore_index"] = "1"
|
| 325 |
+
arg_map["total_weight"] = "at::rand({})"
|
| 326 |
+
else:
|
| 327 |
+
arg_map["grad_output"] = "at::rand({})"
|
| 328 |
+
arg_map["self"] = "at::rand({36})"
|
| 329 |
+
arg_map["target"] = "at::randint(0, 11, {36}, torch::kInt64)"
|
| 330 |
+
arg_map["weight"] = "at::rand({36})"
|
| 331 |
+
arg_map["reduction"] = "1"
|
| 332 |
+
arg_map["ignore_index"] = "1"
|
| 333 |
+
arg_map["total_weight"] = "at::rand({})"
|
| 334 |
+
return
|
| 335 |
+
if op_name in ["scatter", "scatter_add", "_scatter_reduce"]:
|
| 336 |
+
if index == 0:
|
| 337 |
+
arg_map["self"] = "at::randint(1, 100, {2,2,2}, torch::kInt64)"
|
| 338 |
+
arg_map["index"] = "at::randint(0, 1, {2,2,2}, torch::kInt64)"
|
| 339 |
+
arg_map["src"] = "at::randint(1, 100, {2,2,2}, torch::kInt64)"
|
| 340 |
+
else:
|
| 341 |
+
arg_map["self"] = "at::randint(1, 100, {5,5,5}, torch::kInt64)"
|
| 342 |
+
arg_map["index"] = "at::randint(0, 1, {5,5,5}, torch::kInt64)"
|
| 343 |
+
arg_map["src"] = "at::randint(1, 100, {5,5,5}, torch::kInt64)"
|
| 344 |
+
if "reduce" in arg_map:
|
| 345 |
+
arg_map["reduce"] = '"sum"' if op_name == "_scatter_reduce" else '"add"'
|
| 346 |
+
return
|
| 347 |
+
if op_name == "scatter_reduce":
|
| 348 |
+
arg_map["reduce"] = '"mean"'
|
| 349 |
+
if index == 0:
|
| 350 |
+
arg_map["index"] = "at::randint(6, {6, 6, 6}, torch::kInt64)"
|
| 351 |
+
else:
|
| 352 |
+
arg_map["index"] = "at::randint(22, {22, 22, 22}, torch::kInt64)"
|
| 353 |
+
return
|
| 354 |
+
if op_name == "special_zeta":
|
| 355 |
+
if index == 0:
|
| 356 |
+
arg_map["self"] = "at::rand({2,2,2}, at::kDouble) + at::ones({2,2,2})"
|
| 357 |
+
arg_map["other"] = "at::rand({2,2,2}, at::kDouble) + at::ones({2,2,2})"
|
| 358 |
+
else:
|
| 359 |
+
arg_map["self"] = "at::rand({5,5,5}, at::kDouble) + at::ones({5,5,5})"
|
| 360 |
+
arg_map["other"] = "at::rand({5,5,5}, at::kDouble) + at::ones({5,5,5})"
|
| 361 |
+
return
|
| 362 |
+
if op_name == "_convert_indices_from_csr_to_coo":
|
| 363 |
+
if index == 0:
|
| 364 |
+
arg_map["crow_indices"] = "torch::tensor({1}, torch::kInt32)"
|
| 365 |
+
arg_map["col_indices"] = "torch::tensor({0, 1, 0}, torch::kInt32)"
|
| 366 |
+
arg_map["out_int32"] = "false"
|
| 367 |
+
else:
|
| 368 |
+
arg_map["crow_indices"] = "torch::tensor({0}, torch::kInt32)"
|
| 369 |
+
arg_map[
|
| 370 |
+
"col_indices"
|
| 371 |
+
] = "torch::tensor({0, 1, 0, 2, 1, 2, 0, 1, 0, 2, 1, 2}, torch::kInt32)"
|
| 372 |
+
arg_map["out_int32"] = "false"
|
| 373 |
+
return
|
| 374 |
+
if op_name == "_convert_indices_from_coo_to_csr":
|
| 375 |
+
if index == 0:
|
| 376 |
+
arg_map["self"] = "at::randint(0, 3, {2}, at::kInt)"
|
| 377 |
+
arg_map["size"] = "10"
|
| 378 |
+
arg_map["out_int32"] = "false"
|
| 379 |
+
else:
|
| 380 |
+
arg_map["self"] = "at::randint(0, 3, {12}, at::kInt)"
|
| 381 |
+
arg_map["size"] = "24"
|
| 382 |
+
arg_map["out_int32"] = "false"
|
| 383 |
+
return
|
| 384 |
+
if op_name in ("diagonal", "linalg_diagonal"):
|
| 385 |
+
arg_map["offset"] = "0"
|
| 386 |
+
arg_map["dim0"] = "1"
|
| 387 |
+
arg_map["dim1"] = "2"
|
| 388 |
+
return
|
llava_next/lib/python3.10/site-packages/torchgen/static_runtime/gen_static_runtime_ops.py
ADDED
|
@@ -0,0 +1,228 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import argparse
|
| 2 |
+
import itertools
|
| 3 |
+
import os
|
| 4 |
+
from typing import Sequence, TypeVar, Union
|
| 5 |
+
|
| 6 |
+
from libfb.py.log import set_simple_logging # type: ignore[import]
|
| 7 |
+
|
| 8 |
+
from torchgen import gen
|
| 9 |
+
from torchgen.context import native_function_manager
|
| 10 |
+
from torchgen.model import DispatchKey, NativeFunctionsGroup, NativeFunctionsViewGroup
|
| 11 |
+
from torchgen.static_runtime import config, generator
|
| 12 |
+
|
| 13 |
+
# Given a list of `grouped_native_functions` sorted by their op names, return a list of
|
| 14 |
+
# lists each of which groups ops that share the base name. For example, `mean` and
|
| 15 |
+
# `mean.dim` are grouped together by this function.
|
| 16 |
+
|
| 17 |
+
NativeGroupT = TypeVar(
|
| 18 |
+
"NativeGroupT",
|
| 19 |
+
bound=Union[NativeFunctionsGroup, NativeFunctionsViewGroup],
|
| 20 |
+
)
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
def group_functions_by_op_name(
|
| 24 |
+
grouped_native_functions: Sequence[NativeGroupT],
|
| 25 |
+
) -> Sequence[Sequence[NativeGroupT]]:
|
| 26 |
+
if not grouped_native_functions:
|
| 27 |
+
return []
|
| 28 |
+
groups = []
|
| 29 |
+
|
| 30 |
+
def is_supported(g: Union[NativeFunctionsGroup, NativeFunctionsViewGroup]) -> bool:
|
| 31 |
+
with native_function_manager(g):
|
| 32 |
+
return generator.is_supported(g)
|
| 33 |
+
|
| 34 |
+
eligible_ops = (g for g in grouped_native_functions if is_supported(g))
|
| 35 |
+
groups = [
|
| 36 |
+
list(group)
|
| 37 |
+
for k, group in (
|
| 38 |
+
itertools.groupby(
|
| 39 |
+
eligible_ops,
|
| 40 |
+
key=lambda g: config.func_name_base_str(g),
|
| 41 |
+
)
|
| 42 |
+
)
|
| 43 |
+
]
|
| 44 |
+
|
| 45 |
+
return groups
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
def clang_format(cpp_file_path: str) -> None:
|
| 49 |
+
import subprocess
|
| 50 |
+
|
| 51 |
+
subprocess.run(["clang-format", "-i", cpp_file_path])
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
def write_cpp(cpp_ops: Sequence[str], file_path: str) -> None:
|
| 55 |
+
code = "\n".join(cpp_ops)
|
| 56 |
+
generated = f"""// @lint-ignore-every CLANGTIDY HOWTOEVEN
|
| 57 |
+
// AUTO-GENERATED FROM: torchgen/static_runtime/gen_static_runtime_ops.py
|
| 58 |
+
#include <torch/csrc/jit/runtime/static/ops.h>
|
| 59 |
+
|
| 60 |
+
#include <ATen/CPUFunctions.h>
|
| 61 |
+
#include <ATen/InferSize.h>
|
| 62 |
+
#include <ATen/NativeFunctions.h>
|
| 63 |
+
#include <ATen/Parallel.h>
|
| 64 |
+
#include <ATen/ScalarOps.h>
|
| 65 |
+
#include <ATen/TensorUtils.h>
|
| 66 |
+
#include <ATen/cpu/vec/functional.h>
|
| 67 |
+
#include <ATen/cpu/vec/vec.h>
|
| 68 |
+
#include <ATen/native/EmbeddingBag.h>
|
| 69 |
+
#include <ATen/native/Fill.h>
|
| 70 |
+
#include <ATen/native/IndexingUtils.h>
|
| 71 |
+
#include <ATen/native/NonSymbolicBC.h>
|
| 72 |
+
#include <ATen/native/Resize.h>
|
| 73 |
+
#include <ATen/native/SharedReduceOps.h>
|
| 74 |
+
#include <ATen/native/TensorAdvancedIndexing.h>
|
| 75 |
+
#include <ATen/native/cpu/SerialStackImpl.h>
|
| 76 |
+
#include <ATen/native/layer_norm.h>
|
| 77 |
+
#include <ATen/native/quantized/cpu/fbgemm_utils.h>
|
| 78 |
+
#include <ATen/native/quantized/cpu/qembeddingbag.h>
|
| 79 |
+
#include <ATen/native/quantized/cpu/qembeddingbag_prepack.h>
|
| 80 |
+
#include <ATen/quantized/QTensorImpl.h>
|
| 81 |
+
#include <ATen/quantized/Quantizer.h>
|
| 82 |
+
#include <c10/core/ScalarType.h>
|
| 83 |
+
#include <c10/core/WrapDimMinimal.h>
|
| 84 |
+
#include <c10/util/irange.h>
|
| 85 |
+
#include <torch/csrc/jit/ir/ir.h>
|
| 86 |
+
#include <torch/csrc/jit/runtime/static/impl.h>
|
| 87 |
+
#include <torch/csrc/jit/runtime/static/te_wrapper.h>
|
| 88 |
+
#include <torch/csrc/jit/runtime/vararg_functions.h>
|
| 89 |
+
#include <torch/csrc/jit/tensorexpr/ir.h>
|
| 90 |
+
#include <torch/csrc/jit/tensorexpr/ir_simplifier.h>
|
| 91 |
+
#include <torch/csrc/jit/tensorexpr/llvm_codegen.h>
|
| 92 |
+
#include <torch/csrc/jit/tensorexpr/loopnest.h>
|
| 93 |
+
|
| 94 |
+
namespace torch {{
|
| 95 |
+
namespace jit {{
|
| 96 |
+
|
| 97 |
+
{code}
|
| 98 |
+
|
| 99 |
+
}} // namespace jit
|
| 100 |
+
}} // namespace torch
|
| 101 |
+
"""
|
| 102 |
+
with open(file_path, "w") as f:
|
| 103 |
+
f.write(generated)
|
| 104 |
+
clang_format(file_path)
|
| 105 |
+
|
| 106 |
+
|
| 107 |
+
def write_test_cpp(cpp_ops: Sequence[str], file_path: str) -> None:
|
| 108 |
+
code = "\n".join(cpp_ops)
|
| 109 |
+
generated = f"""// @lint-ignore-every CLANGTIDY HOWTOEVEN
|
| 110 |
+
// AUTO-GENERATED FROM: torchgen/static_runtime/gen_static_runtime_ops.py
|
| 111 |
+
#include <gtest/gtest.h>
|
| 112 |
+
#include <torch/csrc/jit/runtime/static/impl.h>
|
| 113 |
+
#include <torch/torch.h>
|
| 114 |
+
|
| 115 |
+
#include "test_utils.h"
|
| 116 |
+
|
| 117 |
+
using namespace caffe2;
|
| 118 |
+
using namespace torch;
|
| 119 |
+
using namespace torch::jit;
|
| 120 |
+
using namespace torch::jit::test;
|
| 121 |
+
using c10::IValue;
|
| 122 |
+
|
| 123 |
+
{code}
|
| 124 |
+
|
| 125 |
+
"""
|
| 126 |
+
with open(file_path, "w") as f:
|
| 127 |
+
f.write(generated)
|
| 128 |
+
clang_format(file_path)
|
| 129 |
+
|
| 130 |
+
|
| 131 |
+
def main() -> None:
|
| 132 |
+
parser = argparse.ArgumentParser(description="Generate ATen source files")
|
| 133 |
+
parser.add_argument(
|
| 134 |
+
"-s",
|
| 135 |
+
"--source-path",
|
| 136 |
+
help="path to source directory for ATen",
|
| 137 |
+
default="caffe2/aten/src/ATen",
|
| 138 |
+
)
|
| 139 |
+
parser.add_argument(
|
| 140 |
+
"-p",
|
| 141 |
+
"--generated-ops-cpp-path",
|
| 142 |
+
help="path to directory to generate op dispatcher .cpp file",
|
| 143 |
+
default="caffe2/torch/csrc/jit/runtime/static/generated_ops.cpp",
|
| 144 |
+
)
|
| 145 |
+
parser.add_argument(
|
| 146 |
+
"-t",
|
| 147 |
+
"--generated-ops-test-cpp-path",
|
| 148 |
+
help="path to directory to generate op dispatcher .cpp file",
|
| 149 |
+
default="caffe2/benchmarks/static_runtime/test_generated_ops.cc",
|
| 150 |
+
)
|
| 151 |
+
options = parser.parse_args()
|
| 152 |
+
native_yaml_path = os.path.join(options.source_path, "native/native_functions.yaml")
|
| 153 |
+
tags_yaml_path = os.path.join(options.source_path, "native/tags.yaml")
|
| 154 |
+
parsed_yaml = gen.parse_native_yaml(native_yaml_path, tags_yaml_path)
|
| 155 |
+
native_functions, backend_indices = (
|
| 156 |
+
parsed_yaml.native_functions,
|
| 157 |
+
parsed_yaml.backend_indices,
|
| 158 |
+
)
|
| 159 |
+
|
| 160 |
+
op_generator = generator.GenOpDispatcher()
|
| 161 |
+
test_case_generator = generator.GenOpTestCase()
|
| 162 |
+
|
| 163 |
+
native_functions_groups = [
|
| 164 |
+
g
|
| 165 |
+
for g in gen.get_grouped_native_functions(native_functions)
|
| 166 |
+
if isinstance(g, NativeFunctionsGroup)
|
| 167 |
+
]
|
| 168 |
+
|
| 169 |
+
supported_functions_groups = group_functions_by_op_name(native_functions_groups)
|
| 170 |
+
|
| 171 |
+
out_variant_op_result = [
|
| 172 |
+
op_generator.out_variant(groups, backend_indices[DispatchKey.CPU])
|
| 173 |
+
for groups in supported_functions_groups
|
| 174 |
+
]
|
| 175 |
+
out_variant_test_result = [
|
| 176 |
+
test_case_generator.out_variant(groups) for groups in supported_functions_groups
|
| 177 |
+
]
|
| 178 |
+
|
| 179 |
+
native_functions_view_groups = [
|
| 180 |
+
g
|
| 181 |
+
for g in gen.get_grouped_by_view_native_functions(native_functions)
|
| 182 |
+
if isinstance(g, NativeFunctionsViewGroup)
|
| 183 |
+
]
|
| 184 |
+
|
| 185 |
+
supported_functions_view_groups = group_functions_by_op_name(
|
| 186 |
+
native_functions_view_groups
|
| 187 |
+
)
|
| 188 |
+
|
| 189 |
+
view_op_result = [
|
| 190 |
+
op_generator.view(groups, backend_indices[DispatchKey.CPU])
|
| 191 |
+
for groups in supported_functions_view_groups
|
| 192 |
+
]
|
| 193 |
+
view_test_result = [
|
| 194 |
+
test_case_generator.view(groups) for groups in supported_functions_view_groups
|
| 195 |
+
]
|
| 196 |
+
|
| 197 |
+
op_result = out_variant_op_result + ["\n\n"] + view_op_result
|
| 198 |
+
test_result = out_variant_test_result + ["\n\n"] + view_test_result
|
| 199 |
+
|
| 200 |
+
write_cpp(op_result, options.generated_ops_cpp_path)
|
| 201 |
+
write_test_cpp(test_result, options.generated_ops_test_cpp_path)
|
| 202 |
+
|
| 203 |
+
print(
|
| 204 |
+
"\ntotal grouped native ops: %d"
|
| 205 |
+
% len(gen.get_grouped_native_functions(native_functions))
|
| 206 |
+
)
|
| 207 |
+
|
| 208 |
+
print("grouped native ops with out variant: %d" % len(native_functions_groups))
|
| 209 |
+
supported_functions_num = sum(
|
| 210 |
+
[len(groups) for groups in supported_functions_groups]
|
| 211 |
+
)
|
| 212 |
+
print("generated functions groups with out variant: %d" % supported_functions_num)
|
| 213 |
+
|
| 214 |
+
print("\nview grouped native ops: %d" % len(native_functions_view_groups))
|
| 215 |
+
supported_view_functions_num = sum(
|
| 216 |
+
[len(groups) for groups in supported_functions_view_groups]
|
| 217 |
+
)
|
| 218 |
+
print("generated functions view groups: %d" % supported_view_functions_num)
|
| 219 |
+
|
| 220 |
+
print(
|
| 221 |
+
"\noverall generated : %d"
|
| 222 |
+
% (supported_functions_num + supported_view_functions_num)
|
| 223 |
+
)
|
| 224 |
+
|
| 225 |
+
|
| 226 |
+
if __name__ == "__main__":
|
| 227 |
+
set_simple_logging(escape_newlines=False)
|
| 228 |
+
main()
|
llava_next/lib/python3.10/site-packages/torchgen/static_runtime/generator.py
ADDED
|
@@ -0,0 +1,796 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
import logging
|
| 3 |
+
|
| 4 |
+
import math
|
| 5 |
+
from typing import Dict, List, Optional, Sequence, Tuple, Union
|
| 6 |
+
|
| 7 |
+
import torchgen.api.cpp as cpp
|
| 8 |
+
from torchgen.context import native_function_manager
|
| 9 |
+
from torchgen.model import (
|
| 10 |
+
Argument,
|
| 11 |
+
BackendIndex,
|
| 12 |
+
BaseTy,
|
| 13 |
+
BaseType,
|
| 14 |
+
FunctionSchema,
|
| 15 |
+
NativeFunctionsGroup,
|
| 16 |
+
NativeFunctionsViewGroup,
|
| 17 |
+
OptionalType,
|
| 18 |
+
SelfArgument,
|
| 19 |
+
TensorOptionsArguments,
|
| 20 |
+
Type,
|
| 21 |
+
)
|
| 22 |
+
from torchgen.static_runtime import config
|
| 23 |
+
|
| 24 |
+
logger: logging.Logger = logging.getLogger()
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
def has_alias(
|
| 28 |
+
arguments: Sequence[Union[Argument, SelfArgument, TensorOptionsArguments]]
|
| 29 |
+
) -> bool:
|
| 30 |
+
for arg in arguments:
|
| 31 |
+
annotation = getattr(arg, "annotation", None)
|
| 32 |
+
if not annotation:
|
| 33 |
+
continue
|
| 34 |
+
alias_set = getattr(annotation, "alias_set", ())
|
| 35 |
+
if alias_set:
|
| 36 |
+
return True
|
| 37 |
+
return False
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
BLOCKED_OPS = frozenset(
|
| 41 |
+
(
|
| 42 |
+
# non cpu ops
|
| 43 |
+
"sparse_sampled_addmm",
|
| 44 |
+
"hspmm",
|
| 45 |
+
"linalg_svdvals",
|
| 46 |
+
# sparse ops
|
| 47 |
+
"sspaddmm",
|
| 48 |
+
"coalesce",
|
| 49 |
+
"_indices",
|
| 50 |
+
"indices",
|
| 51 |
+
"_values",
|
| 52 |
+
"values",
|
| 53 |
+
"crow_indices",
|
| 54 |
+
"col_indices",
|
| 55 |
+
# deprecated ops
|
| 56 |
+
"floor_divide",
|
| 57 |
+
"ger",
|
| 58 |
+
# buggy ops
|
| 59 |
+
"conj_physical", # P495807361
|
| 60 |
+
"binary_cross_entropy", # P496394764
|
| 61 |
+
"arccosh",
|
| 62 |
+
# uncommon ops
|
| 63 |
+
"cholesky",
|
| 64 |
+
"lu_solve",
|
| 65 |
+
"linalg_cholesky",
|
| 66 |
+
"linalg_householder_product",
|
| 67 |
+
"linalg_ldl_solve",
|
| 68 |
+
"_compute_linear_combination",
|
| 69 |
+
# training related ops
|
| 70 |
+
"_make_dual",
|
| 71 |
+
# cannot call directly
|
| 72 |
+
"_fw_primal",
|
| 73 |
+
# no documentation
|
| 74 |
+
"_index_reduce",
|
| 75 |
+
# TODO: these ones got added recently and need manual inspection
|
| 76 |
+
"_new_zeros_with_same_feature_meta",
|
| 77 |
+
"_conj_physical",
|
| 78 |
+
"binary_cross_entropy_with_logits",
|
| 79 |
+
"bincount",
|
| 80 |
+
"conv_tbc",
|
| 81 |
+
"copy",
|
| 82 |
+
"_copy_from",
|
| 83 |
+
"_copy_from_and_resize",
|
| 84 |
+
"count_nonzero",
|
| 85 |
+
"cudnn_affine_grid_generator",
|
| 86 |
+
"cudnn_affine_grid_generator_backward",
|
| 87 |
+
"cudnn_grid_sampler",
|
| 88 |
+
"diag_embed",
|
| 89 |
+
"embedding",
|
| 90 |
+
"embedding_dense_backward",
|
| 91 |
+
"_embedding_bag_dense_backward",
|
| 92 |
+
"_embedding_bag_per_sample_weights_backward",
|
| 93 |
+
"grid_sampler_2d",
|
| 94 |
+
"_grid_sampler_2d_cpu_fallback",
|
| 95 |
+
"grid_sampler_3d",
|
| 96 |
+
"isnan",
|
| 97 |
+
"mkldnn_linear",
|
| 98 |
+
"median",
|
| 99 |
+
"nanmedian",
|
| 100 |
+
"_sparse_sparse_matmul",
|
| 101 |
+
"batch_norm_backward_elemt",
|
| 102 |
+
"_euclidean_dist",
|
| 103 |
+
"pixel_shuffle",
|
| 104 |
+
"pixel_unshuffle",
|
| 105 |
+
"channel_shuffle",
|
| 106 |
+
"_reshape_nested_backward",
|
| 107 |
+
"relu",
|
| 108 |
+
"prelu",
|
| 109 |
+
"celu",
|
| 110 |
+
"slice_scatter",
|
| 111 |
+
"select_scatter",
|
| 112 |
+
"diagonal_scatter",
|
| 113 |
+
"sum",
|
| 114 |
+
"_mkldnn_transpose",
|
| 115 |
+
"_nested_tensor_from_mask",
|
| 116 |
+
"_nested_from_padded",
|
| 117 |
+
"_nested_tensor_size",
|
| 118 |
+
"_nested_from_padded_and_nested_example",
|
| 119 |
+
"_standard_gamma_grad",
|
| 120 |
+
"_dirichlet_grad",
|
| 121 |
+
"native_norm",
|
| 122 |
+
"_sparse_softmax",
|
| 123 |
+
"_sparse_softmax_backward_data",
|
| 124 |
+
"_sparse_log_softmax",
|
| 125 |
+
"_sparse_log_softmax_backward_data",
|
| 126 |
+
"zero",
|
| 127 |
+
"_sparse_addmm",
|
| 128 |
+
"sparse_mask",
|
| 129 |
+
"_sparse_mask_projection",
|
| 130 |
+
"_to_dense",
|
| 131 |
+
"_coalesce",
|
| 132 |
+
"_coalesced",
|
| 133 |
+
"copy_sparse_to_sparse",
|
| 134 |
+
"to_sparse",
|
| 135 |
+
"to_sparse_csr",
|
| 136 |
+
"to_sparse_csc",
|
| 137 |
+
"to_mkldnn",
|
| 138 |
+
"quantize_per_tensor_dynamic",
|
| 139 |
+
"quantize_per_channel",
|
| 140 |
+
"q_per_channel_scales",
|
| 141 |
+
"q_per_channel_zero_points",
|
| 142 |
+
"int_repr",
|
| 143 |
+
"_make_per_channel_quantized_tensor",
|
| 144 |
+
"set",
|
| 145 |
+
"lift",
|
| 146 |
+
"lift_fresh",
|
| 147 |
+
"lift_fresh_copy",
|
| 148 |
+
"masked_scatter",
|
| 149 |
+
"_masked_softmax",
|
| 150 |
+
"_masked_softmax_backward",
|
| 151 |
+
"put",
|
| 152 |
+
"index_reduce",
|
| 153 |
+
"trace",
|
| 154 |
+
"_cholesky_solve_helper",
|
| 155 |
+
"dist",
|
| 156 |
+
"max",
|
| 157 |
+
"_torch_cuda_cu_linker_symbol_op",
|
| 158 |
+
"glu_jvp",
|
| 159 |
+
"glu_backward_jvp",
|
| 160 |
+
"hardswish_backward",
|
| 161 |
+
"rrelu_with_noise_backward",
|
| 162 |
+
"mkldnn_adaptive_avg_pool2d_backward",
|
| 163 |
+
"_adaptive_avg_pool2d_backward",
|
| 164 |
+
"_adaptive_avg_pool3d_backward",
|
| 165 |
+
"isinf",
|
| 166 |
+
"linalg_lu_solve",
|
| 167 |
+
"linalg_vecdot",
|
| 168 |
+
"linalg_matrix_exp",
|
| 169 |
+
"linalg_eigvalsh",
|
| 170 |
+
"_test_warn_in_autograd",
|
| 171 |
+
"_test_autograd_multiple_dispatch_view",
|
| 172 |
+
"_test_autograd_multiple_dispatch_view_copy",
|
| 173 |
+
"_segment_reduce",
|
| 174 |
+
"_segment_reduce_backward",
|
| 175 |
+
"_fw_primal_copy",
|
| 176 |
+
"_make_dual_copy",
|
| 177 |
+
"view_as_real_copy",
|
| 178 |
+
"view_as_complex_copy",
|
| 179 |
+
"_conj_copy",
|
| 180 |
+
"_neg_view_copy",
|
| 181 |
+
"diagonal_copy",
|
| 182 |
+
"detach_copy",
|
| 183 |
+
"squeeze_copy",
|
| 184 |
+
"t_copy",
|
| 185 |
+
"unsqueeze_copy",
|
| 186 |
+
"_indices_copy",
|
| 187 |
+
"_values_copy",
|
| 188 |
+
"indices_copy",
|
| 189 |
+
"values_copy",
|
| 190 |
+
"crow_indices_copy",
|
| 191 |
+
"col_indices_copy",
|
| 192 |
+
"ccol_indices",
|
| 193 |
+
"ccol_indices_copy",
|
| 194 |
+
"row_indices",
|
| 195 |
+
"row_indices_copy",
|
| 196 |
+
"unfold_copy",
|
| 197 |
+
"alias_copy",
|
| 198 |
+
"_triton_multi_head_attention",
|
| 199 |
+
"special_airy_ai",
|
| 200 |
+
"special_bessel_j0",
|
| 201 |
+
"special_bessel_j1",
|
| 202 |
+
"special_bessel_y0",
|
| 203 |
+
"special_bessel_y1",
|
| 204 |
+
"special_chebyshev_polynomial_t",
|
| 205 |
+
"special_chebyshev_polynomial_u",
|
| 206 |
+
"special_chebyshev_polynomial_v",
|
| 207 |
+
"special_chebyshev_polynomial_w",
|
| 208 |
+
"special_hermite_polynomial_h",
|
| 209 |
+
"special_hermite_polynomial_he",
|
| 210 |
+
"special_laguerre_polynomial_l",
|
| 211 |
+
"special_legendre_polynomial_p",
|
| 212 |
+
"special_modified_bessel_i0",
|
| 213 |
+
"special_modified_bessel_i1",
|
| 214 |
+
"special_modified_bessel_k0",
|
| 215 |
+
"special_modified_bessel_k1",
|
| 216 |
+
"special_scaled_modified_bessel_k0",
|
| 217 |
+
"special_scaled_modified_bessel_k1",
|
| 218 |
+
"special_shifted_chebyshev_polynomial_t",
|
| 219 |
+
"special_shifted_chebyshev_polynomial_u",
|
| 220 |
+
"special_shifted_chebyshev_polynomial_v",
|
| 221 |
+
"special_shifted_chebyshev_polynomial_w",
|
| 222 |
+
"special_spherical_bessel_j0",
|
| 223 |
+
"_foobar",
|
| 224 |
+
"_nested_tensor_strides",
|
| 225 |
+
)
|
| 226 |
+
)
|
| 227 |
+
|
| 228 |
+
|
| 229 |
+
def is_supported(g: Union[NativeFunctionsGroup, NativeFunctionsViewGroup]) -> bool:
|
| 230 |
+
base_op_name = ""
|
| 231 |
+
func = None
|
| 232 |
+
if isinstance(g, NativeFunctionsViewGroup):
|
| 233 |
+
base_op_name = g.view.root_name
|
| 234 |
+
func = g.view.func
|
| 235 |
+
else:
|
| 236 |
+
base_op_name = g.out.func.name.name.base
|
| 237 |
+
func = g.out.func
|
| 238 |
+
if config.is_hand_written(g):
|
| 239 |
+
logger.info("HAND WRITTEN: %s", base_op_name)
|
| 240 |
+
return False
|
| 241 |
+
if base_op_name in BLOCKED_OPS:
|
| 242 |
+
logger.info("BLOCKED: %s", base_op_name)
|
| 243 |
+
return False
|
| 244 |
+
for arg in func.schema_order_arguments():
|
| 245 |
+
maybe_method = ivalue_type_conversion_method(arg.type)
|
| 246 |
+
if not maybe_method:
|
| 247 |
+
# Type converting is unsupported yet.
|
| 248 |
+
logger.info("NOT SUPPORTED TYPE CONVERTING: %s", func)
|
| 249 |
+
return False
|
| 250 |
+
|
| 251 |
+
if isinstance(g, NativeFunctionsViewGroup):
|
| 252 |
+
# TODO: stop doing type tests by converting to C++ and then testing
|
| 253 |
+
# the string, just test the dang thing directly
|
| 254 |
+
if "at::Tensor" != cpp.returns_type(func.returns, symint=False).cpp_type():
|
| 255 |
+
# Returns a non-Tensor value.
|
| 256 |
+
logger.info("NON-TENSOR RET TYPE: %s", str(func))
|
| 257 |
+
return False
|
| 258 |
+
return True
|
| 259 |
+
|
| 260 |
+
# For out variant ops, we need to check the arguments of its functional func.
|
| 261 |
+
for arg in g.functional.func.schema_order_arguments():
|
| 262 |
+
maybe_method = ivalue_type_conversion_method(arg.type)
|
| 263 |
+
if not maybe_method:
|
| 264 |
+
# Type converting is unsupported yet.
|
| 265 |
+
logger.info("NOT SUPPORTED TYPE CONVERTING: %s", g.functional.func)
|
| 266 |
+
return False
|
| 267 |
+
|
| 268 |
+
if not g.structured:
|
| 269 |
+
# In case of unstructured op, we check if it has out variant implementation.
|
| 270 |
+
# The out variant implementation satisfies the minimum requirement that it has the output tensor as the last
|
| 271 |
+
# parameter.
|
| 272 |
+
if (
|
| 273 |
+
not hasattr(g, "out")
|
| 274 |
+
or not str(func).endswith("Tensor(a!) out) -> Tensor(a!)")
|
| 275 |
+
or not str(func.name).endswith(".out")
|
| 276 |
+
):
|
| 277 |
+
return False
|
| 278 |
+
# TODO: stop type testing by converting to C++
|
| 279 |
+
if "at::Tensor &" != cpp.returns_type(func.returns, symint=False).cpp_type():
|
| 280 |
+
logger.info("NON_TENSOR RET TYPE: %s", func)
|
| 281 |
+
return False
|
| 282 |
+
if has_alias(func.arguments.non_out):
|
| 283 |
+
# This op may create an alias of inputs.
|
| 284 |
+
logger.info("INPUTS ALIAS: %s", base_op_name)
|
| 285 |
+
return False
|
| 286 |
+
return True
|
| 287 |
+
|
| 288 |
+
|
| 289 |
+
def ivalue_type_conversion_method(
|
| 290 |
+
arg_type: Union[BaseType, OptionalType, Type]
|
| 291 |
+
) -> Optional[Tuple[bool, str]]:
|
| 292 |
+
"""
|
| 293 |
+
Return the method call expression of `c10::ivalue' to convert its contained value to
|
| 294 |
+
the expected value of `arg_type` type. For example, for `arg_type` == BaseTy.Tensor,
|
| 295 |
+
this function returns ".toTensor()", so that it can be appended to the ivalue's
|
| 296 |
+
variable name to get the value of the expected type.
|
| 297 |
+
"""
|
| 298 |
+
type_conversion_methods = {
|
| 299 |
+
BaseTy.Tensor: ((True, "toTensor()"), (False, "toOptional<at::Tensor>()")),
|
| 300 |
+
BaseTy.int: ((False, "toInt()"), (False, "toOptional<int64_t>()")),
|
| 301 |
+
BaseTy.bool: ((False, "toBool()"), (False, "toOptional<bool>()")),
|
| 302 |
+
BaseTy.Scalar: ((False, "toScalar()"), (False, "toOptional<at::Scalar>()")),
|
| 303 |
+
BaseTy.ScalarType: (
|
| 304 |
+
(False, "toScalarType()"),
|
| 305 |
+
(False, "toOptional<at::ScalarType>()"),
|
| 306 |
+
),
|
| 307 |
+
BaseTy.str: (
|
| 308 |
+
(False, "toStringView()"),
|
| 309 |
+
(False, "toOptional<c10::string_view>()"),
|
| 310 |
+
),
|
| 311 |
+
}
|
| 312 |
+
|
| 313 |
+
base_ty_object = None
|
| 314 |
+
if isinstance(arg_type, BaseType):
|
| 315 |
+
base_ty_object = arg_type.name
|
| 316 |
+
elif isinstance(arg_type, OptionalType):
|
| 317 |
+
if not isinstance(arg_type.elem, BaseType):
|
| 318 |
+
# ListType is currently unsupported.
|
| 319 |
+
return None
|
| 320 |
+
base_ty_object = arg_type.elem.name
|
| 321 |
+
else:
|
| 322 |
+
return None
|
| 323 |
+
|
| 324 |
+
if base_ty_object not in type_conversion_methods:
|
| 325 |
+
return None
|
| 326 |
+
methods = type_conversion_methods[base_ty_object]
|
| 327 |
+
if isinstance(arg_type, BaseType):
|
| 328 |
+
return methods[0]
|
| 329 |
+
return methods[1]
|
| 330 |
+
|
| 331 |
+
|
| 332 |
+
should_use_int_tensor_ops_ = frozenset(
|
| 333 |
+
(
|
| 334 |
+
"bitwise_not",
|
| 335 |
+
"bitwise_and",
|
| 336 |
+
"bitwise_or",
|
| 337 |
+
"bitwise_xor",
|
| 338 |
+
"bitwise_left_shift",
|
| 339 |
+
"bitwise_right_shift",
|
| 340 |
+
"gcd",
|
| 341 |
+
"lcm",
|
| 342 |
+
"scatter",
|
| 343 |
+
"gather",
|
| 344 |
+
"_convert_indices_from_coo_to_csr",
|
| 345 |
+
"_convert_indices_from_csr_to_coo",
|
| 346 |
+
)
|
| 347 |
+
)
|
| 348 |
+
should_use_complex_tensor_ops_ = frozenset(("view_as_real", "imag", "_conj"))
|
| 349 |
+
|
| 350 |
+
|
| 351 |
+
def should_use_int_tensor(op_name: str) -> bool:
|
| 352 |
+
return op_name in should_use_int_tensor_ops_
|
| 353 |
+
|
| 354 |
+
|
| 355 |
+
def should_use_complex_tensor(op_name: str) -> bool:
|
| 356 |
+
return op_name in should_use_complex_tensor_ops_
|
| 357 |
+
|
| 358 |
+
|
| 359 |
+
test_tensor_dim_ops_1_ = frozenset(
|
| 360 |
+
(
|
| 361 |
+
"addmv",
|
| 362 |
+
"index_add",
|
| 363 |
+
"_convert_indices_from_coo_to_csr",
|
| 364 |
+
"_convert_indices_from_csr_to_coo",
|
| 365 |
+
"nll_loss_backward",
|
| 366 |
+
"dot",
|
| 367 |
+
"vdot",
|
| 368 |
+
"outer",
|
| 369 |
+
"ger",
|
| 370 |
+
)
|
| 371 |
+
)
|
| 372 |
+
test_tensor_dim_ops_2_ = frozenset(
|
| 373 |
+
("addmm", "mm", "nuclear_norm", "diag", "_addmm_activation", "matrix_H", "t")
|
| 374 |
+
)
|
| 375 |
+
|
| 376 |
+
|
| 377 |
+
def test_tensor_dim(op_name: str) -> int:
|
| 378 |
+
if op_name in test_tensor_dim_ops_1_:
|
| 379 |
+
return 1
|
| 380 |
+
if op_name in test_tensor_dim_ops_2_:
|
| 381 |
+
return 2
|
| 382 |
+
return 3
|
| 383 |
+
|
| 384 |
+
|
| 385 |
+
test_tensor_shapes_string = '{"view_as_complex": "{2, 2}"}'
|
| 386 |
+
test_tensor_shape_json: Dict[str, str] = json.loads(test_tensor_shapes_string)
|
| 387 |
+
|
| 388 |
+
|
| 389 |
+
def test_tensor_shape(op_name: str) -> str:
|
| 390 |
+
if op_name in test_tensor_shape_json:
|
| 391 |
+
return test_tensor_shape_json[op_name]
|
| 392 |
+
else:
|
| 393 |
+
return ""
|
| 394 |
+
|
| 395 |
+
|
| 396 |
+
def test_value_expression(
|
| 397 |
+
arg_type: Union[BaseType, OptionalType, Type], index: int, op_name: str
|
| 398 |
+
) -> str:
|
| 399 |
+
tensor_size_ex = test_tensor_shape(op_name)
|
| 400 |
+
if tensor_size_ex == "":
|
| 401 |
+
num_tensors = 16 if index == 0 else 64
|
| 402 |
+
num_dim = test_tensor_dim(op_name)
|
| 403 |
+
size_per_dim = math.ceil(num_tensors / float(num_dim))
|
| 404 |
+
size_per_dim += size_per_dim % 2
|
| 405 |
+
tensor_size_ex = "{%s}" % (",".join([f"{size_per_dim}"] * num_dim))
|
| 406 |
+
if should_use_int_tensor(op_name):
|
| 407 |
+
tensor_expression = f"at::randint(1, 100, {tensor_size_ex}, at::kInt)"
|
| 408 |
+
elif should_use_complex_tensor(op_name):
|
| 409 |
+
tensor_expression = f"at::randn({tensor_size_ex}, at::kComplexFloat)"
|
| 410 |
+
else:
|
| 411 |
+
tensor_expression = f"at::rand({tensor_size_ex})"
|
| 412 |
+
|
| 413 |
+
value_expressions = {
|
| 414 |
+
BaseTy.Tensor: tensor_expression,
|
| 415 |
+
BaseTy.int: "1",
|
| 416 |
+
BaseTy.bool: "false",
|
| 417 |
+
BaseTy.Scalar: "2",
|
| 418 |
+
BaseTy.ScalarType: "at::ScalarType::Float",
|
| 419 |
+
BaseTy.str: '"floor"',
|
| 420 |
+
}
|
| 421 |
+
|
| 422 |
+
base_ty_object = None
|
| 423 |
+
if isinstance(arg_type, BaseType):
|
| 424 |
+
base_ty_object = arg_type.name
|
| 425 |
+
else:
|
| 426 |
+
assert isinstance(arg_type, OptionalType) and isinstance(
|
| 427 |
+
arg_type.elem, BaseType
|
| 428 |
+
)
|
| 429 |
+
base_ty_object = arg_type.elem.name
|
| 430 |
+
assert base_ty_object in value_expressions, "not expected type"
|
| 431 |
+
value_expression = value_expressions[base_ty_object]
|
| 432 |
+
return value_expression
|
| 433 |
+
|
| 434 |
+
|
| 435 |
+
def generate_test_value_definitions(schema: FunctionSchema, index: int) -> str:
|
| 436 |
+
assert not schema.is_out_fn()
|
| 437 |
+
schema_name = schema.name.name.base
|
| 438 |
+
arg_map = {}
|
| 439 |
+
for arg in schema.schema_order_arguments():
|
| 440 |
+
test_value_exp = test_value_expression(arg.type, index, schema_name)
|
| 441 |
+
arg_map[arg.name] = test_value_exp
|
| 442 |
+
config.override_test_values(arg_map, schema_name, index)
|
| 443 |
+
arg_populations = []
|
| 444 |
+
for arg_name, arg_value in arg_map.items():
|
| 445 |
+
arg_populations.append(f"auto {arg_name}{index} = {arg_value}")
|
| 446 |
+
return ";\n ".join(arg_populations) + ";"
|
| 447 |
+
|
| 448 |
+
|
| 449 |
+
def generate_test_value_names(schema: FunctionSchema, index: int) -> str:
|
| 450 |
+
assert not schema.is_out_fn()
|
| 451 |
+
return ",".join(f"{arg.name}{index}" for arg in schema.schema_order_arguments())
|
| 452 |
+
|
| 453 |
+
|
| 454 |
+
generate_test_ir_arguments_base_ty_to_type_str_ = {
|
| 455 |
+
BaseTy.Tensor: "Tensor",
|
| 456 |
+
BaseTy.int: "int",
|
| 457 |
+
BaseTy.float: "float",
|
| 458 |
+
BaseTy.str: "str",
|
| 459 |
+
BaseTy.Scalar: "int",
|
| 460 |
+
BaseTy.ScalarType: "int",
|
| 461 |
+
BaseTy.bool: "bool",
|
| 462 |
+
}
|
| 463 |
+
|
| 464 |
+
|
| 465 |
+
def generate_test_ir_arguments(
|
| 466 |
+
schema: FunctionSchema,
|
| 467 |
+
) -> List[Tuple[str, Optional[str]]]:
|
| 468 |
+
def ir_argument(arg: Argument) -> Tuple[str, Optional[str]]:
|
| 469 |
+
t = arg.type
|
| 470 |
+
add_optional = False
|
| 471 |
+
if isinstance(t, OptionalType):
|
| 472 |
+
t = t.elem
|
| 473 |
+
add_optional = True
|
| 474 |
+
assert isinstance(t, BaseType)
|
| 475 |
+
type_str = None
|
| 476 |
+
if t.name in generate_test_ir_arguments_base_ty_to_type_str_:
|
| 477 |
+
type_str = generate_test_ir_arguments_base_ty_to_type_str_[t.name]
|
| 478 |
+
if type_str and add_optional:
|
| 479 |
+
type_str = f"{type_str}?"
|
| 480 |
+
return ("%" + arg.name, type_str)
|
| 481 |
+
|
| 482 |
+
return [ir_argument(arg) for arg in schema.schema_order_arguments()]
|
| 483 |
+
|
| 484 |
+
|
| 485 |
+
def generate_arg_extraction(schema: FunctionSchema) -> str:
|
| 486 |
+
arg_populations = []
|
| 487 |
+
for i, arg in enumerate(schema.schema_order_arguments()):
|
| 488 |
+
maybe_method = ivalue_type_conversion_method(arg.type)
|
| 489 |
+
assert maybe_method
|
| 490 |
+
is_reference, type_conversion_method = maybe_method
|
| 491 |
+
reference = "&" if is_reference else ""
|
| 492 |
+
arg_populations.append(
|
| 493 |
+
f"const auto{reference} {arg.name} = p_node->Input({i}).{type_conversion_method}"
|
| 494 |
+
)
|
| 495 |
+
return ";\n ".join(arg_populations) + ";"
|
| 496 |
+
|
| 497 |
+
|
| 498 |
+
def get_kernel_name(g: NativeFunctionsGroup, backend_index: BackendIndex) -> str:
|
| 499 |
+
kernel = backend_index.get_kernel(g.functional)
|
| 500 |
+
if g.structured or kernel is None:
|
| 501 |
+
return cpp.name(g.functional.func)
|
| 502 |
+
return kernel.kernel
|
| 503 |
+
|
| 504 |
+
|
| 505 |
+
def get_out_kernel_name(g: NativeFunctionsGroup, backend_index: BackendIndex) -> str:
|
| 506 |
+
kernel = backend_index.get_kernel(g.out)
|
| 507 |
+
if g.structured or kernel is None:
|
| 508 |
+
return cpp.name(g.out.func)
|
| 509 |
+
return kernel.kernel
|
| 510 |
+
|
| 511 |
+
|
| 512 |
+
def generate_non_out_variant_call(
|
| 513 |
+
g: NativeFunctionsGroup, backend_index: BackendIndex
|
| 514 |
+
) -> str:
|
| 515 |
+
schema = g.functional.func
|
| 516 |
+
assert not schema.is_out_fn()
|
| 517 |
+
kernel_name = get_kernel_name(g, backend_index)
|
| 518 |
+
arg_names = (arg.name for arg in schema.schema_order_arguments())
|
| 519 |
+
namespace_name = "cpu" if g.structured else "native"
|
| 520 |
+
return f'at::{namespace_name}::{kernel_name}({",".join(arg_names)})'
|
| 521 |
+
|
| 522 |
+
|
| 523 |
+
def generate_call_to_view_ops(
|
| 524 |
+
g: NativeFunctionsViewGroup, backend_index: BackendIndex
|
| 525 |
+
) -> str:
|
| 526 |
+
schema = g.view.func
|
| 527 |
+
kernel_name = cpp.name(schema)
|
| 528 |
+
kernel = backend_index.get_kernel(g.view)
|
| 529 |
+
if kernel:
|
| 530 |
+
kernel_name = kernel.kernel
|
| 531 |
+
arg_names = (arg.name for arg in schema.schema_order_arguments())
|
| 532 |
+
namespace_name = "native"
|
| 533 |
+
return f'at::{namespace_name}::{kernel_name}({",".join(arg_names)})'
|
| 534 |
+
|
| 535 |
+
|
| 536 |
+
def generate_out_variant_call(
|
| 537 |
+
g: NativeFunctionsGroup, backend_index: BackendIndex
|
| 538 |
+
) -> str:
|
| 539 |
+
schema = g.out.func
|
| 540 |
+
assert schema.is_out_fn()
|
| 541 |
+
arg_names = []
|
| 542 |
+
kernel_name = get_out_kernel_name(g, backend_index)
|
| 543 |
+
if g.structured:
|
| 544 |
+
# structured op starts with the output tensor argument.
|
| 545 |
+
arg_names = [out_arg.name for out_arg in schema.arguments.out]
|
| 546 |
+
else:
|
| 547 |
+
arg_names = []
|
| 548 |
+
for arg in schema.arguments.non_out:
|
| 549 |
+
if isinstance(arg, SelfArgument):
|
| 550 |
+
arg_names.append(arg.argument.name)
|
| 551 |
+
else:
|
| 552 |
+
assert isinstance(arg, Argument)
|
| 553 |
+
arg_names.append(arg.name)
|
| 554 |
+
if not g.structured:
|
| 555 |
+
assert len(schema.arguments.out) == 1
|
| 556 |
+
arg_names.append(schema.arguments.out[0].name)
|
| 557 |
+
cpp_arg_names = ",".join(arg_names)
|
| 558 |
+
namespace_name = "cpu" if g.structured else "native"
|
| 559 |
+
return f"at::{namespace_name}::{kernel_name}({cpp_arg_names})"
|
| 560 |
+
|
| 561 |
+
|
| 562 |
+
no_memory_resize_ops = frozenset(
|
| 563 |
+
(
|
| 564 |
+
"isin.Scalar_Tensor",
|
| 565 |
+
"index_add",
|
| 566 |
+
"dot",
|
| 567 |
+
"vdot",
|
| 568 |
+
"nuclear_norm",
|
| 569 |
+
"histc",
|
| 570 |
+
"l1_loss",
|
| 571 |
+
"multi_margin_loss",
|
| 572 |
+
"multilabel_margin_loss",
|
| 573 |
+
"nll_loss",
|
| 574 |
+
"nll_loss2d",
|
| 575 |
+
"prod",
|
| 576 |
+
)
|
| 577 |
+
)
|
| 578 |
+
|
| 579 |
+
|
| 580 |
+
def should_check_resize(schema: FunctionSchema) -> bool:
|
| 581 |
+
schema_str = str(schema)
|
| 582 |
+
type_variant_op_name = schema_str[: schema_str.find("(")]
|
| 583 |
+
return type_variant_op_name not in no_memory_resize_ops
|
| 584 |
+
|
| 585 |
+
|
| 586 |
+
def op_name_from_group(g: NativeFunctionsGroup) -> str:
|
| 587 |
+
return g.functional.func.name.name.base
|
| 588 |
+
|
| 589 |
+
|
| 590 |
+
class GenOpDispatcher:
|
| 591 |
+
def out_variant(
|
| 592 |
+
self, groups: Sequence[NativeFunctionsGroup], backend_index: BackendIndex
|
| 593 |
+
) -> str:
|
| 594 |
+
if not groups:
|
| 595 |
+
return ""
|
| 596 |
+
generated_type_variants = []
|
| 597 |
+
for g in groups:
|
| 598 |
+
with native_function_manager(g):
|
| 599 |
+
assert is_supported(g)
|
| 600 |
+
assert isinstance(g, NativeFunctionsGroup)
|
| 601 |
+
generated_type_variant = self.out_variant_op_generator(g, backend_index)
|
| 602 |
+
generated_type_variants.append(generated_type_variant)
|
| 603 |
+
op_name = op_name_from_group(groups[0])
|
| 604 |
+
body = "\n".join(generated_type_variants)
|
| 605 |
+
generated = f"""
|
| 606 |
+
REGISTER_OPERATOR_FUNCTOR(
|
| 607 |
+
aten::{op_name},
|
| 608 |
+
aten_{op_name},
|
| 609 |
+
[](Node* n) -> SROperator {{
|
| 610 |
+
{body}
|
| 611 |
+
LogAndDumpSchema(n);
|
| 612 |
+
return nullptr;
|
| 613 |
+
}});
|
| 614 |
+
"""
|
| 615 |
+
return generated
|
| 616 |
+
|
| 617 |
+
def view(
|
| 618 |
+
self, groups: Sequence[NativeFunctionsViewGroup], backend_index: BackendIndex
|
| 619 |
+
) -> str:
|
| 620 |
+
if not groups:
|
| 621 |
+
return ""
|
| 622 |
+
generated_type_variants = []
|
| 623 |
+
for g in groups:
|
| 624 |
+
with native_function_manager(g):
|
| 625 |
+
assert is_supported(g)
|
| 626 |
+
assert isinstance(g, NativeFunctionsViewGroup)
|
| 627 |
+
generated_type_variant = self.view_op_generator(g, backend_index)
|
| 628 |
+
generated_type_variants.append(generated_type_variant)
|
| 629 |
+
op_name = config.func_name_base_str(groups[0])
|
| 630 |
+
body = "\n".join(generated_type_variants)
|
| 631 |
+
generated = f"""
|
| 632 |
+
REGISTER_NATIVE_OPERATOR_FUNCTOR(
|
| 633 |
+
aten::{op_name},
|
| 634 |
+
aten_{op_name},
|
| 635 |
+
[](Node* n) -> SROperator {{
|
| 636 |
+
{body}
|
| 637 |
+
LogAndDumpSchema(n);
|
| 638 |
+
return nullptr;
|
| 639 |
+
}});
|
| 640 |
+
"""
|
| 641 |
+
return generated
|
| 642 |
+
|
| 643 |
+
def out_variant_op_generator(
|
| 644 |
+
self, g: NativeFunctionsGroup, backend_index: BackendIndex
|
| 645 |
+
) -> str:
|
| 646 |
+
functional = g.functional
|
| 647 |
+
schema = str(functional.func)
|
| 648 |
+
populated_argument = generate_arg_extraction(g.functional.func)
|
| 649 |
+
functional_variant_call = generate_non_out_variant_call(g, backend_index)
|
| 650 |
+
assert len(g.out.func.arguments.out) == 1
|
| 651 |
+
out_variable_name = str(g.out.func.arguments.out[0].name)
|
| 652 |
+
out_variant_call = generate_out_variant_call(g, backend_index)
|
| 653 |
+
generated = f"""
|
| 654 |
+
if (n->matches(torch::schema("aten::{schema}"))) {{
|
| 655 |
+
return [](ProcessedNode* p_node) {{
|
| 656 |
+
{populated_argument}
|
| 657 |
+
if (p_node->Output(0).isNone()) {{
|
| 658 |
+
p_node->Output(0) = {functional_variant_call};
|
| 659 |
+
return;
|
| 660 |
+
}}
|
| 661 |
+
auto& {out_variable_name} = p_node->Output(0).toTensor();
|
| 662 |
+
fastResizeToZero({out_variable_name});
|
| 663 |
+
{out_variant_call};
|
| 664 |
+
}};
|
| 665 |
+
}}"""
|
| 666 |
+
return generated
|
| 667 |
+
|
| 668 |
+
def view_op_generator(
|
| 669 |
+
self, g: NativeFunctionsViewGroup, backend_index: BackendIndex
|
| 670 |
+
) -> str:
|
| 671 |
+
schema = str(g.view.func)
|
| 672 |
+
populated_argument = generate_arg_extraction(g.view.func)
|
| 673 |
+
functional_variant_call = generate_call_to_view_ops(g, backend_index)
|
| 674 |
+
generated = f"""
|
| 675 |
+
if (n->matches(torch::schema("aten::{schema}"))) {{
|
| 676 |
+
return [](ProcessedNode* p_node) {{
|
| 677 |
+
{populated_argument}
|
| 678 |
+
p_node->Output(0) = {functional_variant_call};
|
| 679 |
+
}};
|
| 680 |
+
}}"""
|
| 681 |
+
return generated
|
| 682 |
+
|
| 683 |
+
|
| 684 |
+
class GenOpTestCase:
|
| 685 |
+
def out_variant(self, groups: Sequence[NativeFunctionsGroup]) -> str:
|
| 686 |
+
if not groups:
|
| 687 |
+
return ""
|
| 688 |
+
generated_type_variants = []
|
| 689 |
+
for g in groups:
|
| 690 |
+
with native_function_manager(g):
|
| 691 |
+
assert is_supported(g)
|
| 692 |
+
assert isinstance(g, NativeFunctionsGroup)
|
| 693 |
+
generated_type_variant = self.out_variant_op_test_case_generator(g)
|
| 694 |
+
generated_type_variants.append(generated_type_variant)
|
| 695 |
+
return "\n".join(generated_type_variants)
|
| 696 |
+
|
| 697 |
+
def view(self, groups: Sequence[NativeFunctionsViewGroup]) -> str:
|
| 698 |
+
if not groups:
|
| 699 |
+
return ""
|
| 700 |
+
generated_type_variants = []
|
| 701 |
+
for g in groups:
|
| 702 |
+
with native_function_manager(g):
|
| 703 |
+
assert is_supported(g)
|
| 704 |
+
assert isinstance(g, NativeFunctionsViewGroup)
|
| 705 |
+
generated_type_variant = self.view_op_test_case_generator(g)
|
| 706 |
+
generated_type_variants.append(generated_type_variant)
|
| 707 |
+
return "\n".join(generated_type_variants)
|
| 708 |
+
|
| 709 |
+
def out_variant_op_test_case_generator(self, g: NativeFunctionsGroup) -> str:
|
| 710 |
+
schema = g.functional.func
|
| 711 |
+
schema_str = str(schema)
|
| 712 |
+
assert schema_str.find("(") > 0
|
| 713 |
+
type_variant_op_name = schema_str[: schema_str.find("(")].replace(".", "_")
|
| 714 |
+
op_name = op_name_from_group(g)
|
| 715 |
+
assert type_variant_op_name.startswith(op_name)
|
| 716 |
+
|
| 717 |
+
arg_types = generate_test_ir_arguments(schema)
|
| 718 |
+
arg_declarations = ", ".join(
|
| 719 |
+
(
|
| 720 |
+
arg_name if arg_type is None else f"{arg_name}: {arg_type}"
|
| 721 |
+
for arg_name, arg_type in arg_types
|
| 722 |
+
)
|
| 723 |
+
)
|
| 724 |
+
arg_names = ", ".join((arg_name for arg_name, _ in arg_types))
|
| 725 |
+
assert (
|
| 726 |
+
len(schema.returns) == 1
|
| 727 |
+
and isinstance(schema.returns[0].type, BaseType)
|
| 728 |
+
and schema.returns[0].type.name is BaseTy.Tensor
|
| 729 |
+
)
|
| 730 |
+
test_value_definitions = generate_test_value_definitions(schema, 0)
|
| 731 |
+
test_value_names = generate_test_value_names(schema, 0)
|
| 732 |
+
test_value_definitions2 = generate_test_value_definitions(schema, 1)
|
| 733 |
+
test_value_names2 = generate_test_value_names(schema, 1)
|
| 734 |
+
check_resize = "true" if should_check_resize(schema) else "false"
|
| 735 |
+
generated = f"""
|
| 736 |
+
TEST(StaticRuntime, autogen_{type_variant_op_name}) {{
|
| 737 |
+
const std::string script = R"IR(
|
| 738 |
+
graph({arg_declarations}):
|
| 739 |
+
%bias: None = prim::Constant()
|
| 740 |
+
%ret = aten::{op_name}({arg_names})
|
| 741 |
+
%cloned = aten::clone(%ret, %bias)
|
| 742 |
+
return (%cloned)
|
| 743 |
+
)IR";
|
| 744 |
+
|
| 745 |
+
{test_value_definitions}
|
| 746 |
+
std::vector<IValue> args{{{test_value_names}}};
|
| 747 |
+
testStaticRuntime(script, args, {{}}, /*use_allclose=*/false, /*use_equalnan=*/false, /*check_resize=*/{check_resize});
|
| 748 |
+
|
| 749 |
+
{test_value_definitions2}
|
| 750 |
+
std::vector<IValue> args2{{{test_value_names2}}};
|
| 751 |
+
testStaticRuntime(script, args, args2, /*use_allclose=*/false, /*use_equalnan=*/false, /*check_resize=*/{check_resize});
|
| 752 |
+
|
| 753 |
+
}}
|
| 754 |
+
"""
|
| 755 |
+
return generated
|
| 756 |
+
|
| 757 |
+
def view_op_test_case_generator(self, g: NativeFunctionsViewGroup) -> str:
|
| 758 |
+
schema = g.view.func
|
| 759 |
+
schema_str = str(schema)
|
| 760 |
+
assert schema_str.find("(") > 0
|
| 761 |
+
type_variant_op_name = schema_str[: schema_str.find("(")].replace(".", "_")
|
| 762 |
+
op_name = g.view.root_name
|
| 763 |
+
assert type_variant_op_name.startswith(op_name)
|
| 764 |
+
|
| 765 |
+
arg_types = generate_test_ir_arguments(schema)
|
| 766 |
+
arg_declarations = ", ".join(
|
| 767 |
+
(
|
| 768 |
+
arg_name if arg_type is None else f"{arg_name}: {arg_type}"
|
| 769 |
+
for arg_name, arg_type in arg_types
|
| 770 |
+
)
|
| 771 |
+
)
|
| 772 |
+
arg_names = ", ".join((arg_name for arg_name, _ in arg_types))
|
| 773 |
+
assert (
|
| 774 |
+
len(schema.returns) == 1
|
| 775 |
+
and isinstance(schema.returns[0].type, BaseType)
|
| 776 |
+
and schema.returns[0].type.name is BaseTy.Tensor
|
| 777 |
+
)
|
| 778 |
+
test_value_definitions = generate_test_value_definitions(schema, 0)
|
| 779 |
+
test_value_names = generate_test_value_names(schema, 0)
|
| 780 |
+
generated = f"""
|
| 781 |
+
TEST(StaticRuntime, autogen_{type_variant_op_name}) {{
|
| 782 |
+
const std::string script = R"IR(
|
| 783 |
+
graph({arg_declarations}):
|
| 784 |
+
%bias: None = prim::Constant()
|
| 785 |
+
%ret = aten::{op_name}({arg_names})
|
| 786 |
+
%cloned = aten::clone(%ret, %bias)
|
| 787 |
+
return (%cloned)
|
| 788 |
+
)IR";
|
| 789 |
+
|
| 790 |
+
{test_value_definitions}
|
| 791 |
+
std::vector<IValue> args{{{test_value_names}}};
|
| 792 |
+
testStaticRuntime(script, args);
|
| 793 |
+
}}
|
| 794 |
+
"""
|
| 795 |
+
|
| 796 |
+
return generated
|
vlmpy310/lib/python3.10/site-packages/jupyter_server/services/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (177 Bytes). View file
|
|
|
vlmpy310/lib/python3.10/site-packages/jupyter_server/services/api/__init__.py
ADDED
|
File without changes
|
vlmpy310/lib/python3.10/site-packages/jupyter_server/services/api/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (181 Bytes). View file
|
|
|
vlmpy310/lib/python3.10/site-packages/jupyter_server/services/api/__pycache__/handlers.cpython-310.pyc
ADDED
|
Binary file (4.03 kB). View file
|
|
|
vlmpy310/lib/python3.10/site-packages/jupyter_server/services/api/api.yaml
ADDED
|
@@ -0,0 +1,975 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
swagger: "2.0"
|
| 2 |
+
info:
|
| 3 |
+
title: Jupyter Server API
|
| 4 |
+
description: Server API
|
| 5 |
+
version: "5"
|
| 6 |
+
contact:
|
| 7 |
+
name: Jupyter Project
|
| 8 |
+
url: https://jupyter.org
|
| 9 |
+
# will be prefixed to all paths
|
| 10 |
+
basePath: /
|
| 11 |
+
produces:
|
| 12 |
+
- application/json
|
| 13 |
+
consumes:
|
| 14 |
+
- application/json
|
| 15 |
+
parameters:
|
| 16 |
+
kernel:
|
| 17 |
+
name: kernel_id
|
| 18 |
+
required: true
|
| 19 |
+
in: path
|
| 20 |
+
description: kernel uuid
|
| 21 |
+
type: string
|
| 22 |
+
format: uuid
|
| 23 |
+
session:
|
| 24 |
+
name: session
|
| 25 |
+
required: true
|
| 26 |
+
in: path
|
| 27 |
+
description: session uuid
|
| 28 |
+
type: string
|
| 29 |
+
format: uuid
|
| 30 |
+
path:
|
| 31 |
+
name: path
|
| 32 |
+
required: true
|
| 33 |
+
in: path
|
| 34 |
+
description: file path
|
| 35 |
+
type: string
|
| 36 |
+
permissions:
|
| 37 |
+
name: permissions
|
| 38 |
+
type: string
|
| 39 |
+
required: false
|
| 40 |
+
in: query
|
| 41 |
+
description: |
|
| 42 |
+
JSON-serialized dictionary of `{"resource": ["action",]}`
|
| 43 |
+
(dict of lists of strings) to check.
|
| 44 |
+
The same dictionary structure will be returned,
|
| 45 |
+
containing only the actions for which the user is authorized.
|
| 46 |
+
checkpoint_id:
|
| 47 |
+
name: checkpoint_id
|
| 48 |
+
required: true
|
| 49 |
+
in: path
|
| 50 |
+
description: Checkpoint id for a file
|
| 51 |
+
type: string
|
| 52 |
+
section_name:
|
| 53 |
+
name: section_name
|
| 54 |
+
required: true
|
| 55 |
+
in: path
|
| 56 |
+
description: Name of config section
|
| 57 |
+
type: string
|
| 58 |
+
terminal_id:
|
| 59 |
+
name: terminal_id
|
| 60 |
+
required: true
|
| 61 |
+
in: path
|
| 62 |
+
description: ID of terminal session
|
| 63 |
+
type: string
|
| 64 |
+
|
| 65 |
+
paths:
|
| 66 |
+
/api/:
|
| 67 |
+
get:
|
| 68 |
+
summary: Get the Jupyter Server version
|
| 69 |
+
description: |
|
| 70 |
+
This endpoint returns only the Jupyter Server version.
|
| 71 |
+
It does not require any authentication.
|
| 72 |
+
responses:
|
| 73 |
+
200:
|
| 74 |
+
description: Jupyter Server version information
|
| 75 |
+
schema:
|
| 76 |
+
type: object
|
| 77 |
+
properties:
|
| 78 |
+
version:
|
| 79 |
+
type: string
|
| 80 |
+
description: The Jupyter Server version number as a string.
|
| 81 |
+
|
| 82 |
+
/api/contents/{path}:
|
| 83 |
+
parameters:
|
| 84 |
+
- $ref: "#/parameters/path"
|
| 85 |
+
get:
|
| 86 |
+
summary: Get contents of file or directory
|
| 87 |
+
description: "A client can optionally specify a type and/or format argument via URL parameter. When given, the Contents service shall return a model in the requested type and/or format. If the request cannot be satisfied, e.g. type=text is requested, but the file is binary, then the request shall fail with 400 and have a JSON response containing a 'reason' field, with the value 'bad format' or 'bad type', depending on what was requested."
|
| 88 |
+
tags:
|
| 89 |
+
- contents
|
| 90 |
+
parameters:
|
| 91 |
+
- name: type
|
| 92 |
+
in: query
|
| 93 |
+
description: File type ('file', 'directory')
|
| 94 |
+
type: string
|
| 95 |
+
enum:
|
| 96 |
+
- file
|
| 97 |
+
- directory
|
| 98 |
+
- name: format
|
| 99 |
+
in: query
|
| 100 |
+
description: "How file content should be returned ('text', 'base64')"
|
| 101 |
+
type: string
|
| 102 |
+
enum:
|
| 103 |
+
- text
|
| 104 |
+
- base64
|
| 105 |
+
- name: content
|
| 106 |
+
in: query
|
| 107 |
+
description: "Return content (0 for no content, 1 for return content)"
|
| 108 |
+
type: integer
|
| 109 |
+
- name: hash
|
| 110 |
+
in: query
|
| 111 |
+
description: "May return hash hexdigest string of content and the hash algorithm (0 for no hash - default, 1 for return hash). It may be ignored by the content manager."
|
| 112 |
+
type: integer
|
| 113 |
+
responses:
|
| 114 |
+
404:
|
| 115 |
+
description: No item found
|
| 116 |
+
400:
|
| 117 |
+
description: Bad request
|
| 118 |
+
schema:
|
| 119 |
+
type: object
|
| 120 |
+
properties:
|
| 121 |
+
error:
|
| 122 |
+
type: string
|
| 123 |
+
description: Error condition
|
| 124 |
+
reason:
|
| 125 |
+
type: string
|
| 126 |
+
description: Explanation of error reason
|
| 127 |
+
200:
|
| 128 |
+
description: Contents of file or directory
|
| 129 |
+
headers:
|
| 130 |
+
Last-Modified:
|
| 131 |
+
description: Last modified date for file
|
| 132 |
+
type: string
|
| 133 |
+
format: dateTime
|
| 134 |
+
schema:
|
| 135 |
+
$ref: "#/definitions/Contents"
|
| 136 |
+
500:
|
| 137 |
+
description: Model key error
|
| 138 |
+
post:
|
| 139 |
+
summary: Create a new file in the specified path
|
| 140 |
+
description: "A POST to /api/contents/path creates a New untitled, empty file or directory. A POST to /api/contents/path with body {'copy_from': '/path/to/OtherNotebook.ipynb'} creates a new copy of OtherNotebook in path."
|
| 141 |
+
tags:
|
| 142 |
+
- contents
|
| 143 |
+
parameters:
|
| 144 |
+
- name: model
|
| 145 |
+
in: body
|
| 146 |
+
description: Path of file to copy
|
| 147 |
+
schema:
|
| 148 |
+
type: object
|
| 149 |
+
properties:
|
| 150 |
+
copy_from:
|
| 151 |
+
type: string
|
| 152 |
+
ext:
|
| 153 |
+
type: string
|
| 154 |
+
type:
|
| 155 |
+
type: string
|
| 156 |
+
responses:
|
| 157 |
+
201:
|
| 158 |
+
description: File created
|
| 159 |
+
headers:
|
| 160 |
+
Location:
|
| 161 |
+
description: URL for the new file
|
| 162 |
+
type: string
|
| 163 |
+
format: url
|
| 164 |
+
schema:
|
| 165 |
+
$ref: "#/definitions/Contents"
|
| 166 |
+
404:
|
| 167 |
+
description: No item found
|
| 168 |
+
400:
|
| 169 |
+
description: Bad request
|
| 170 |
+
schema:
|
| 171 |
+
type: object
|
| 172 |
+
properties:
|
| 173 |
+
error:
|
| 174 |
+
type: string
|
| 175 |
+
description: Error condition
|
| 176 |
+
reason:
|
| 177 |
+
type: string
|
| 178 |
+
description: Explanation of error reason
|
| 179 |
+
patch:
|
| 180 |
+
summary: Rename a file or directory without re-uploading content
|
| 181 |
+
tags:
|
| 182 |
+
- contents
|
| 183 |
+
parameters:
|
| 184 |
+
- name: path
|
| 185 |
+
in: body
|
| 186 |
+
required: true
|
| 187 |
+
description: New path for file or directory.
|
| 188 |
+
schema:
|
| 189 |
+
type: object
|
| 190 |
+
properties:
|
| 191 |
+
path:
|
| 192 |
+
type: string
|
| 193 |
+
format: path
|
| 194 |
+
description: New path for file or directory
|
| 195 |
+
responses:
|
| 196 |
+
200:
|
| 197 |
+
description: Path updated
|
| 198 |
+
headers:
|
| 199 |
+
Location:
|
| 200 |
+
description: Updated URL for the file or directory
|
| 201 |
+
type: string
|
| 202 |
+
format: url
|
| 203 |
+
schema:
|
| 204 |
+
$ref: "#/definitions/Contents"
|
| 205 |
+
400:
|
| 206 |
+
description: No data provided
|
| 207 |
+
schema:
|
| 208 |
+
type: object
|
| 209 |
+
properties:
|
| 210 |
+
error:
|
| 211 |
+
type: string
|
| 212 |
+
description: Error condition
|
| 213 |
+
reason:
|
| 214 |
+
type: string
|
| 215 |
+
description: Explanation of error reason
|
| 216 |
+
put:
|
| 217 |
+
summary: Save or upload file.
|
| 218 |
+
description: "Saves the file in the location specified by name and path. PUT is very similar to POST, but the requester specifies the name, whereas with POST, the server picks the name."
|
| 219 |
+
tags:
|
| 220 |
+
- contents
|
| 221 |
+
parameters:
|
| 222 |
+
- name: model
|
| 223 |
+
in: body
|
| 224 |
+
description: New path for file or directory
|
| 225 |
+
schema:
|
| 226 |
+
type: object
|
| 227 |
+
properties:
|
| 228 |
+
name:
|
| 229 |
+
type: string
|
| 230 |
+
description: The new filename if changed
|
| 231 |
+
path:
|
| 232 |
+
type: string
|
| 233 |
+
description: New path for file or directory
|
| 234 |
+
type:
|
| 235 |
+
type: string
|
| 236 |
+
description: Path dtype ('notebook', 'file', 'directory')
|
| 237 |
+
format:
|
| 238 |
+
type: string
|
| 239 |
+
description: File format ('json', 'text', 'base64')
|
| 240 |
+
content:
|
| 241 |
+
type: string
|
| 242 |
+
description: The actual body of the document excluding directory type
|
| 243 |
+
responses:
|
| 244 |
+
200:
|
| 245 |
+
description: File saved
|
| 246 |
+
headers:
|
| 247 |
+
Location:
|
| 248 |
+
description: Updated URL for the file or directory
|
| 249 |
+
type: string
|
| 250 |
+
format: url
|
| 251 |
+
schema:
|
| 252 |
+
$ref: "#/definitions/Contents"
|
| 253 |
+
201:
|
| 254 |
+
description: Path created
|
| 255 |
+
headers:
|
| 256 |
+
Location:
|
| 257 |
+
description: URL for the file or directory
|
| 258 |
+
type: string
|
| 259 |
+
format: url
|
| 260 |
+
schema:
|
| 261 |
+
$ref: "#/definitions/Contents"
|
| 262 |
+
400:
|
| 263 |
+
description: No data provided
|
| 264 |
+
schema:
|
| 265 |
+
type: object
|
| 266 |
+
properties:
|
| 267 |
+
error:
|
| 268 |
+
type: string
|
| 269 |
+
description: Error condition
|
| 270 |
+
reason:
|
| 271 |
+
type: string
|
| 272 |
+
description: Explanation of error reason
|
| 273 |
+
delete:
|
| 274 |
+
summary: Delete a file in the given path
|
| 275 |
+
tags:
|
| 276 |
+
- contents
|
| 277 |
+
responses:
|
| 278 |
+
204:
|
| 279 |
+
description: File deleted
|
| 280 |
+
headers:
|
| 281 |
+
Location:
|
| 282 |
+
description: URL for the removed file
|
| 283 |
+
type: string
|
| 284 |
+
format: url
|
| 285 |
+
/api/contents/{path}/checkpoints:
|
| 286 |
+
parameters:
|
| 287 |
+
- $ref: "#/parameters/path"
|
| 288 |
+
get:
|
| 289 |
+
summary: Get a list of checkpoints for a file
|
| 290 |
+
description: List checkpoints for a given file. There will typically be zero or one results.
|
| 291 |
+
tags:
|
| 292 |
+
- contents
|
| 293 |
+
responses:
|
| 294 |
+
404:
|
| 295 |
+
description: No item found
|
| 296 |
+
400:
|
| 297 |
+
description: Bad request
|
| 298 |
+
schema:
|
| 299 |
+
type: object
|
| 300 |
+
properties:
|
| 301 |
+
error:
|
| 302 |
+
type: string
|
| 303 |
+
description: Error condition
|
| 304 |
+
reason:
|
| 305 |
+
type: string
|
| 306 |
+
description: Explanation of error reason
|
| 307 |
+
200:
|
| 308 |
+
description: List of checkpoints for a file
|
| 309 |
+
schema:
|
| 310 |
+
type: array
|
| 311 |
+
items:
|
| 312 |
+
$ref: "#/definitions/Checkpoints"
|
| 313 |
+
500:
|
| 314 |
+
description: Model key error
|
| 315 |
+
post:
|
| 316 |
+
summary: Create a new checkpoint for a file
|
| 317 |
+
description: "Create a new checkpoint with the current state of a file. With the default FileContentsManager, only one checkpoint is supported, so creating new checkpoints clobbers existing ones."
|
| 318 |
+
tags:
|
| 319 |
+
- contents
|
| 320 |
+
responses:
|
| 321 |
+
201:
|
| 322 |
+
description: Checkpoint created
|
| 323 |
+
headers:
|
| 324 |
+
Location:
|
| 325 |
+
description: URL for the checkpoint
|
| 326 |
+
type: string
|
| 327 |
+
format: url
|
| 328 |
+
schema:
|
| 329 |
+
$ref: "#/definitions/Checkpoints"
|
| 330 |
+
404:
|
| 331 |
+
description: No item found
|
| 332 |
+
400:
|
| 333 |
+
description: Bad request
|
| 334 |
+
schema:
|
| 335 |
+
type: object
|
| 336 |
+
properties:
|
| 337 |
+
error:
|
| 338 |
+
type: string
|
| 339 |
+
description: Error condition
|
| 340 |
+
reason:
|
| 341 |
+
type: string
|
| 342 |
+
description: Explanation of error reason
|
| 343 |
+
/api/contents/{path}/checkpoints/{checkpoint_id}:
|
| 344 |
+
post:
|
| 345 |
+
summary: Restore a file to a particular checkpointed state
|
| 346 |
+
parameters:
|
| 347 |
+
- $ref: "#/parameters/path"
|
| 348 |
+
- $ref: "#/parameters/checkpoint_id"
|
| 349 |
+
tags:
|
| 350 |
+
- contents
|
| 351 |
+
responses:
|
| 352 |
+
204:
|
| 353 |
+
description: Checkpoint restored
|
| 354 |
+
400:
|
| 355 |
+
description: Bad request
|
| 356 |
+
schema:
|
| 357 |
+
type: object
|
| 358 |
+
properties:
|
| 359 |
+
error:
|
| 360 |
+
type: string
|
| 361 |
+
description: Error condition
|
| 362 |
+
reason:
|
| 363 |
+
type: string
|
| 364 |
+
description: Explanation of error reason
|
| 365 |
+
delete:
|
| 366 |
+
summary: Delete a checkpoint
|
| 367 |
+
parameters:
|
| 368 |
+
- $ref: "#/parameters/path"
|
| 369 |
+
- $ref: "#/parameters/checkpoint_id"
|
| 370 |
+
tags:
|
| 371 |
+
- contents
|
| 372 |
+
responses:
|
| 373 |
+
204:
|
| 374 |
+
description: Checkpoint deleted
|
| 375 |
+
/api/sessions/{session}:
|
| 376 |
+
parameters:
|
| 377 |
+
- $ref: "#/parameters/session"
|
| 378 |
+
get:
|
| 379 |
+
summary: Get session
|
| 380 |
+
tags:
|
| 381 |
+
- sessions
|
| 382 |
+
responses:
|
| 383 |
+
200:
|
| 384 |
+
description: Session
|
| 385 |
+
schema:
|
| 386 |
+
$ref: "#/definitions/Session"
|
| 387 |
+
patch:
|
| 388 |
+
summary: "This can be used to rename the session."
|
| 389 |
+
tags:
|
| 390 |
+
- sessions
|
| 391 |
+
parameters:
|
| 392 |
+
- name: model
|
| 393 |
+
in: body
|
| 394 |
+
required: true
|
| 395 |
+
schema:
|
| 396 |
+
$ref: "#/definitions/Session"
|
| 397 |
+
responses:
|
| 398 |
+
200:
|
| 399 |
+
description: Session
|
| 400 |
+
schema:
|
| 401 |
+
$ref: "#/definitions/Session"
|
| 402 |
+
400:
|
| 403 |
+
description: No data provided
|
| 404 |
+
delete:
|
| 405 |
+
summary: Delete a session
|
| 406 |
+
tags:
|
| 407 |
+
- sessions
|
| 408 |
+
responses:
|
| 409 |
+
204:
|
| 410 |
+
description: Session (and kernel) were deleted
|
| 411 |
+
410:
|
| 412 |
+
description: "Kernel was deleted before the session, and the session was *not* deleted (TODO - check to make sure session wasn't deleted)"
|
| 413 |
+
/api/sessions:
|
| 414 |
+
get:
|
| 415 |
+
summary: List available sessions
|
| 416 |
+
tags:
|
| 417 |
+
- sessions
|
| 418 |
+
responses:
|
| 419 |
+
200:
|
| 420 |
+
description: List of current sessions
|
| 421 |
+
schema:
|
| 422 |
+
type: array
|
| 423 |
+
items:
|
| 424 |
+
$ref: "#/definitions/Session"
|
| 425 |
+
post:
|
| 426 |
+
summary: "Create a new session, or return an existing session if a session of the same name already exists"
|
| 427 |
+
tags:
|
| 428 |
+
- sessions
|
| 429 |
+
parameters:
|
| 430 |
+
- name: session
|
| 431 |
+
in: body
|
| 432 |
+
schema:
|
| 433 |
+
$ref: "#/definitions/Session"
|
| 434 |
+
responses:
|
| 435 |
+
201:
|
| 436 |
+
description: Session created or returned
|
| 437 |
+
schema:
|
| 438 |
+
$ref: "#/definitions/Session"
|
| 439 |
+
headers:
|
| 440 |
+
Location:
|
| 441 |
+
description: URL for session commands
|
| 442 |
+
type: string
|
| 443 |
+
format: url
|
| 444 |
+
501:
|
| 445 |
+
description: Session not available
|
| 446 |
+
schema:
|
| 447 |
+
type: object
|
| 448 |
+
description: error message
|
| 449 |
+
properties:
|
| 450 |
+
message:
|
| 451 |
+
type: string
|
| 452 |
+
short_message:
|
| 453 |
+
type: string
|
| 454 |
+
|
| 455 |
+
/api/kernels:
|
| 456 |
+
get:
|
| 457 |
+
summary: List the JSON data for all kernels that are currently running
|
| 458 |
+
tags:
|
| 459 |
+
- kernels
|
| 460 |
+
responses:
|
| 461 |
+
200:
|
| 462 |
+
description: List of currently-running kernel uuids
|
| 463 |
+
schema:
|
| 464 |
+
type: array
|
| 465 |
+
items:
|
| 466 |
+
$ref: "#/definitions/Kernel"
|
| 467 |
+
post:
|
| 468 |
+
summary: Start a kernel and return the uuid
|
| 469 |
+
tags:
|
| 470 |
+
- kernels
|
| 471 |
+
parameters:
|
| 472 |
+
- name: options
|
| 473 |
+
in: body
|
| 474 |
+
schema:
|
| 475 |
+
type: object
|
| 476 |
+
required:
|
| 477 |
+
- name
|
| 478 |
+
properties:
|
| 479 |
+
name:
|
| 480 |
+
type: string
|
| 481 |
+
description: Kernel spec name (defaults to default kernel spec for server)
|
| 482 |
+
path:
|
| 483 |
+
type: string
|
| 484 |
+
description: API path from root to the cwd of the kernel
|
| 485 |
+
responses:
|
| 486 |
+
201:
|
| 487 |
+
description: Kernel started
|
| 488 |
+
schema:
|
| 489 |
+
$ref: "#/definitions/Kernel"
|
| 490 |
+
headers:
|
| 491 |
+
Location:
|
| 492 |
+
description: Model for started kernel
|
| 493 |
+
type: string
|
| 494 |
+
format: url
|
| 495 |
+
/api/kernels/{kernel_id}:
|
| 496 |
+
parameters:
|
| 497 |
+
- $ref: "#/parameters/kernel"
|
| 498 |
+
get:
|
| 499 |
+
summary: Get kernel information
|
| 500 |
+
tags:
|
| 501 |
+
- kernels
|
| 502 |
+
responses:
|
| 503 |
+
200:
|
| 504 |
+
description: Kernel information
|
| 505 |
+
schema:
|
| 506 |
+
$ref: "#/definitions/Kernel"
|
| 507 |
+
delete:
|
| 508 |
+
summary: Kill a kernel and delete the kernel id
|
| 509 |
+
tags:
|
| 510 |
+
- kernels
|
| 511 |
+
responses:
|
| 512 |
+
204:
|
| 513 |
+
description: Kernel deleted
|
| 514 |
+
/api/kernels/{kernel_id}/interrupt:
|
| 515 |
+
parameters:
|
| 516 |
+
- $ref: "#/parameters/kernel"
|
| 517 |
+
post:
|
| 518 |
+
summary: Interrupt a kernel
|
| 519 |
+
tags:
|
| 520 |
+
- kernels
|
| 521 |
+
responses:
|
| 522 |
+
204:
|
| 523 |
+
description: Kernel interrupted
|
| 524 |
+
/api/kernels/{kernel_id}/restart:
|
| 525 |
+
parameters:
|
| 526 |
+
- $ref: "#/parameters/kernel"
|
| 527 |
+
post:
|
| 528 |
+
summary: Restart a kernel
|
| 529 |
+
tags:
|
| 530 |
+
- kernels
|
| 531 |
+
responses:
|
| 532 |
+
200:
|
| 533 |
+
description: Kernel restarted
|
| 534 |
+
headers:
|
| 535 |
+
Location:
|
| 536 |
+
description: URL for kernel commands
|
| 537 |
+
type: string
|
| 538 |
+
format: url
|
| 539 |
+
schema:
|
| 540 |
+
$ref: "#/definitions/Kernel"
|
| 541 |
+
|
| 542 |
+
/api/kernelspecs:
|
| 543 |
+
get:
|
| 544 |
+
summary: Get kernel specs
|
| 545 |
+
tags:
|
| 546 |
+
- kernelspecs
|
| 547 |
+
responses:
|
| 548 |
+
200:
|
| 549 |
+
description: Kernel specs
|
| 550 |
+
schema:
|
| 551 |
+
type: object
|
| 552 |
+
properties:
|
| 553 |
+
default:
|
| 554 |
+
type: string
|
| 555 |
+
description: Default kernel name
|
| 556 |
+
kernelspecs:
|
| 557 |
+
type: object
|
| 558 |
+
additionalProperties:
|
| 559 |
+
$ref: "#/definitions/KernelSpec"
|
| 560 |
+
/api/config/{section_name}:
|
| 561 |
+
get:
|
| 562 |
+
summary: Get a configuration section by name
|
| 563 |
+
parameters:
|
| 564 |
+
- $ref: "#/parameters/section_name"
|
| 565 |
+
tags:
|
| 566 |
+
- config
|
| 567 |
+
responses:
|
| 568 |
+
200:
|
| 569 |
+
description: Configuration object
|
| 570 |
+
schema:
|
| 571 |
+
type: object
|
| 572 |
+
patch:
|
| 573 |
+
summary: Update a configuration section by name
|
| 574 |
+
tags:
|
| 575 |
+
- config
|
| 576 |
+
parameters:
|
| 577 |
+
- $ref: "#/parameters/section_name"
|
| 578 |
+
- name: configuration
|
| 579 |
+
in: body
|
| 580 |
+
schema:
|
| 581 |
+
type: object
|
| 582 |
+
responses:
|
| 583 |
+
200:
|
| 584 |
+
description: Configuration object
|
| 585 |
+
schema:
|
| 586 |
+
type: object
|
| 587 |
+
|
| 588 |
+
/api/terminals:
|
| 589 |
+
get:
|
| 590 |
+
summary: Get available terminals
|
| 591 |
+
tags:
|
| 592 |
+
- terminals
|
| 593 |
+
responses:
|
| 594 |
+
200:
|
| 595 |
+
description: A list of all available terminal ids.
|
| 596 |
+
schema:
|
| 597 |
+
type: array
|
| 598 |
+
items:
|
| 599 |
+
$ref: "#/definitions/Terminal"
|
| 600 |
+
403:
|
| 601 |
+
description: Forbidden to access
|
| 602 |
+
404:
|
| 603 |
+
description: Not found
|
| 604 |
+
|
| 605 |
+
post:
|
| 606 |
+
summary: Create a new terminal
|
| 607 |
+
tags:
|
| 608 |
+
- terminals
|
| 609 |
+
responses:
|
| 610 |
+
200:
|
| 611 |
+
description: Successfully created a new terminal
|
| 612 |
+
schema:
|
| 613 |
+
$ref: "#/definitions/Terminal"
|
| 614 |
+
403:
|
| 615 |
+
description: Forbidden to access
|
| 616 |
+
404:
|
| 617 |
+
description: Not found
|
| 618 |
+
|
| 619 |
+
/api/terminals/{terminal_id}:
|
| 620 |
+
get:
|
| 621 |
+
summary: Get a terminal session corresponding to an id.
|
| 622 |
+
tags:
|
| 623 |
+
- terminals
|
| 624 |
+
parameters:
|
| 625 |
+
- $ref: "#/parameters/terminal_id"
|
| 626 |
+
responses:
|
| 627 |
+
200:
|
| 628 |
+
description: Terminal session with given id
|
| 629 |
+
schema:
|
| 630 |
+
$ref: "#/definitions/Terminal"
|
| 631 |
+
403:
|
| 632 |
+
description: Forbidden to access
|
| 633 |
+
404:
|
| 634 |
+
description: Not found
|
| 635 |
+
|
| 636 |
+
delete:
|
| 637 |
+
summary: Delete a terminal session corresponding to an id.
|
| 638 |
+
tags:
|
| 639 |
+
- terminals
|
| 640 |
+
parameters:
|
| 641 |
+
- $ref: "#/parameters/terminal_id"
|
| 642 |
+
responses:
|
| 643 |
+
204:
|
| 644 |
+
description: Successfully deleted terminal session
|
| 645 |
+
403:
|
| 646 |
+
description: Forbidden to access
|
| 647 |
+
404:
|
| 648 |
+
description: Not found
|
| 649 |
+
/api/me:
|
| 650 |
+
get:
|
| 651 |
+
summary: |
|
| 652 |
+
Get the identity of the currently authenticated user.
|
| 653 |
+
If present, a `permissions` argument may be specified
|
| 654 |
+
to check what actions the user currently is authorized to take.
|
| 655 |
+
tags:
|
| 656 |
+
- identity
|
| 657 |
+
parameters:
|
| 658 |
+
- $ref: "#/parameters/permissions"
|
| 659 |
+
responses:
|
| 660 |
+
200:
|
| 661 |
+
description: The user's identity and permissions
|
| 662 |
+
schema:
|
| 663 |
+
type: object
|
| 664 |
+
properties:
|
| 665 |
+
identity:
|
| 666 |
+
$ref: "#/definitions/Identity"
|
| 667 |
+
permissions:
|
| 668 |
+
$ref: "#/definitions/Permissions"
|
| 669 |
+
example:
|
| 670 |
+
identity:
|
| 671 |
+
username: minrk
|
| 672 |
+
name: Min Ragan-Kelley
|
| 673 |
+
display_name: Min RK
|
| 674 |
+
initials: MRK
|
| 675 |
+
avatar_url: null
|
| 676 |
+
color: null
|
| 677 |
+
permissions:
|
| 678 |
+
contents:
|
| 679 |
+
- read
|
| 680 |
+
- write
|
| 681 |
+
kernels:
|
| 682 |
+
- read
|
| 683 |
+
- write
|
| 684 |
+
- execute
|
| 685 |
+
/api/status:
|
| 686 |
+
get:
|
| 687 |
+
summary: Get the current status/activity of the server.
|
| 688 |
+
tags:
|
| 689 |
+
- status
|
| 690 |
+
responses:
|
| 691 |
+
200:
|
| 692 |
+
description: The current status of the server
|
| 693 |
+
schema:
|
| 694 |
+
$ref: "#/definitions/APIStatus"
|
| 695 |
+
|
| 696 |
+
/api/spec.yaml:
|
| 697 |
+
get:
|
| 698 |
+
summary: Get the current spec for the notebook server's APIs.
|
| 699 |
+
tags:
|
| 700 |
+
- api-spec
|
| 701 |
+
produces:
|
| 702 |
+
- text/x-yaml
|
| 703 |
+
responses:
|
| 704 |
+
200:
|
| 705 |
+
description: The current spec for the notebook server's APIs.
|
| 706 |
+
schema:
|
| 707 |
+
type: file
|
| 708 |
+
definitions:
|
| 709 |
+
APIStatus:
|
| 710 |
+
description: |
|
| 711 |
+
Notebook server API status.
|
| 712 |
+
Added in notebook 5.0.
|
| 713 |
+
properties:
|
| 714 |
+
started:
|
| 715 |
+
type: string
|
| 716 |
+
description: |
|
| 717 |
+
ISO8601 timestamp indicating when the notebook server started.
|
| 718 |
+
last_activity:
|
| 719 |
+
type: string
|
| 720 |
+
description: |
|
| 721 |
+
ISO8601 timestamp indicating the last activity on the server,
|
| 722 |
+
either on the REST API or kernel activity.
|
| 723 |
+
connections:
|
| 724 |
+
type: number
|
| 725 |
+
description: |
|
| 726 |
+
The total number of currently open connections to kernels.
|
| 727 |
+
kernels:
|
| 728 |
+
type: number
|
| 729 |
+
description: |
|
| 730 |
+
The total number of running kernels.
|
| 731 |
+
Identity:
|
| 732 |
+
description: The identity of the currently authenticated user
|
| 733 |
+
properties:
|
| 734 |
+
username:
|
| 735 |
+
type: string
|
| 736 |
+
description: |
|
| 737 |
+
Unique string identifying the user
|
| 738 |
+
name:
|
| 739 |
+
type: string
|
| 740 |
+
description: |
|
| 741 |
+
For-humans name of the user.
|
| 742 |
+
May be the same as `username` in systems where
|
| 743 |
+
only usernames are available.
|
| 744 |
+
display_name:
|
| 745 |
+
type: string
|
| 746 |
+
description: |
|
| 747 |
+
Alternate rendering of name for display.
|
| 748 |
+
Often the same as `name`.
|
| 749 |
+
initials:
|
| 750 |
+
type: string
|
| 751 |
+
description: |
|
| 752 |
+
Short string of initials.
|
| 753 |
+
Initials should not be derived automatically due to localization issues.
|
| 754 |
+
May be `null` if unavailable.
|
| 755 |
+
avatar_url:
|
| 756 |
+
type: string
|
| 757 |
+
description: |
|
| 758 |
+
URL of an avatar to be used for the user.
|
| 759 |
+
May be `null` if unavailable.
|
| 760 |
+
color:
|
| 761 |
+
type: string
|
| 762 |
+
description: |
|
| 763 |
+
A CSS color string to use as a preferred color,
|
| 764 |
+
such as for collaboration cursors.
|
| 765 |
+
May be `null` if unavailable.
|
| 766 |
+
Permissions:
|
| 767 |
+
type: object
|
| 768 |
+
description: |
|
| 769 |
+
A dict of the form: `{"resource": ["action",]}`
|
| 770 |
+
containing only the AUTHORIZED subset of resource+actions
|
| 771 |
+
from the permissions specified in the request.
|
| 772 |
+
If no permission checks were made in the request,
|
| 773 |
+
this will be empty.
|
| 774 |
+
additionalProperties:
|
| 775 |
+
type: array
|
| 776 |
+
items:
|
| 777 |
+
type: string
|
| 778 |
+
KernelSpec:
|
| 779 |
+
description: Kernel spec (contents of kernel.json)
|
| 780 |
+
properties:
|
| 781 |
+
name:
|
| 782 |
+
type: string
|
| 783 |
+
description: Unique name for kernel
|
| 784 |
+
KernelSpecFile:
|
| 785 |
+
$ref: "#/definitions/KernelSpecFile"
|
| 786 |
+
resources:
|
| 787 |
+
type: object
|
| 788 |
+
properties:
|
| 789 |
+
kernel.js:
|
| 790 |
+
type: string
|
| 791 |
+
format: filename
|
| 792 |
+
description: path for kernel.js file
|
| 793 |
+
kernel.css:
|
| 794 |
+
type: string
|
| 795 |
+
format: filename
|
| 796 |
+
description: path for kernel.css file
|
| 797 |
+
logo-*:
|
| 798 |
+
type: string
|
| 799 |
+
format: filename
|
| 800 |
+
description: path for logo file. Logo filenames are of the form `logo-widthxheight`
|
| 801 |
+
KernelSpecFile:
|
| 802 |
+
description: Kernel spec json file
|
| 803 |
+
required:
|
| 804 |
+
- argv
|
| 805 |
+
- display_name
|
| 806 |
+
- language
|
| 807 |
+
properties:
|
| 808 |
+
language:
|
| 809 |
+
type: string
|
| 810 |
+
description: The programming language which this kernel runs. This will be stored in notebook metadata.
|
| 811 |
+
argv:
|
| 812 |
+
type: array
|
| 813 |
+
description: "A list of command line arguments used to start the kernel. The text `{connection_file}` in any argument will be replaced with the path to the connection file."
|
| 814 |
+
items:
|
| 815 |
+
type: string
|
| 816 |
+
display_name:
|
| 817 |
+
type: string
|
| 818 |
+
description: "The kernel's name as it should be displayed in the UI. Unlike the kernel name used in the API, this can contain arbitrary unicode characters."
|
| 819 |
+
codemirror_mode:
|
| 820 |
+
type: string
|
| 821 |
+
description: Codemirror mode. Can be a string *or* an valid Codemirror mode object. This defaults to the string from the `language` property.
|
| 822 |
+
env:
|
| 823 |
+
type: object
|
| 824 |
+
description: A dictionary of environment variables to set for the kernel. These will be added to the current environment variables.
|
| 825 |
+
additionalProperties:
|
| 826 |
+
type: string
|
| 827 |
+
help_links:
|
| 828 |
+
type: array
|
| 829 |
+
description: Help items to be displayed in the help menu in the notebook UI.
|
| 830 |
+
items:
|
| 831 |
+
type: object
|
| 832 |
+
required:
|
| 833 |
+
- text
|
| 834 |
+
- url
|
| 835 |
+
properties:
|
| 836 |
+
text:
|
| 837 |
+
type: string
|
| 838 |
+
description: menu item link text
|
| 839 |
+
url:
|
| 840 |
+
type: string
|
| 841 |
+
format: URL
|
| 842 |
+
description: menu item link url
|
| 843 |
+
Kernel:
|
| 844 |
+
description: Kernel information
|
| 845 |
+
required:
|
| 846 |
+
- id
|
| 847 |
+
- name
|
| 848 |
+
properties:
|
| 849 |
+
id:
|
| 850 |
+
type: string
|
| 851 |
+
format: uuid
|
| 852 |
+
description: uuid of kernel
|
| 853 |
+
name:
|
| 854 |
+
type: string
|
| 855 |
+
description: kernel spec name
|
| 856 |
+
last_activity:
|
| 857 |
+
type: string
|
| 858 |
+
description: |
|
| 859 |
+
ISO 8601 timestamp for the last-seen activity on this kernel.
|
| 860 |
+
Use this in combination with execution_state == 'idle' to identify
|
| 861 |
+
which kernels have been idle since a given time.
|
| 862 |
+
Timestamps will be UTC, indicated 'Z' suffix.
|
| 863 |
+
Added in notebook server 5.0.
|
| 864 |
+
connections:
|
| 865 |
+
type: number
|
| 866 |
+
description: |
|
| 867 |
+
The number of active connections to this kernel.
|
| 868 |
+
execution_state:
|
| 869 |
+
type: string
|
| 870 |
+
description: |
|
| 871 |
+
Current execution state of the kernel (typically 'idle' or 'busy', but may be other values, such as 'starting').
|
| 872 |
+
Added in notebook server 5.0.
|
| 873 |
+
Session:
|
| 874 |
+
description: A session
|
| 875 |
+
type: object
|
| 876 |
+
properties:
|
| 877 |
+
id:
|
| 878 |
+
type: string
|
| 879 |
+
format: uuid
|
| 880 |
+
path:
|
| 881 |
+
type: string
|
| 882 |
+
description: path to the session
|
| 883 |
+
name:
|
| 884 |
+
type: string
|
| 885 |
+
description: name of the session
|
| 886 |
+
type:
|
| 887 |
+
type: string
|
| 888 |
+
description: session type
|
| 889 |
+
kernel:
|
| 890 |
+
$ref: "#/definitions/Kernel"
|
| 891 |
+
Contents:
|
| 892 |
+
description: "A contents object. The content and format keys may be null if content is not contained. The hash maybe null if hash is not required. If type is 'file', then the mimetype will be null."
|
| 893 |
+
type: object
|
| 894 |
+
required:
|
| 895 |
+
- type
|
| 896 |
+
- name
|
| 897 |
+
- path
|
| 898 |
+
- writable
|
| 899 |
+
- created
|
| 900 |
+
- last_modified
|
| 901 |
+
- mimetype
|
| 902 |
+
- format
|
| 903 |
+
- content
|
| 904 |
+
properties:
|
| 905 |
+
name:
|
| 906 |
+
type: string
|
| 907 |
+
description: "Name of file or directory, equivalent to the last part of the path"
|
| 908 |
+
path:
|
| 909 |
+
type: string
|
| 910 |
+
description: Full path for file or directory
|
| 911 |
+
type:
|
| 912 |
+
type: string
|
| 913 |
+
description: Type of content
|
| 914 |
+
enum:
|
| 915 |
+
- directory
|
| 916 |
+
- file
|
| 917 |
+
- notebook
|
| 918 |
+
writable:
|
| 919 |
+
type: boolean
|
| 920 |
+
description: indicates whether the requester has permission to edit the file
|
| 921 |
+
created:
|
| 922 |
+
type: string
|
| 923 |
+
description: Creation timestamp
|
| 924 |
+
format: dateTime
|
| 925 |
+
last_modified:
|
| 926 |
+
type: string
|
| 927 |
+
description: Last modified timestamp
|
| 928 |
+
format: dateTime
|
| 929 |
+
size:
|
| 930 |
+
type: integer
|
| 931 |
+
description: "The size of the file or notebook in bytes. If no size is provided, defaults to null."
|
| 932 |
+
mimetype:
|
| 933 |
+
type: string
|
| 934 |
+
description: "The mimetype of a file. If content is not null, and type is 'file', this will contain the mimetype of the file, otherwise this will be null."
|
| 935 |
+
content:
|
| 936 |
+
type: string
|
| 937 |
+
description: "The content, if requested (otherwise null). Will be an array if type is 'directory'"
|
| 938 |
+
format:
|
| 939 |
+
type: string
|
| 940 |
+
description: Format of content (one of null, 'text', 'base64', 'json')
|
| 941 |
+
hash:
|
| 942 |
+
type: string
|
| 943 |
+
description: "[optional] The hexdigest hash string of content, if requested (otherwise null). It cannot be null if hash_algorithm is defined."
|
| 944 |
+
hash_algorithm:
|
| 945 |
+
type: string
|
| 946 |
+
description: "[optional] The algorithm used to produce the hash, if requested (otherwise null). It cannot be null if hash is defined."
|
| 947 |
+
Checkpoints:
|
| 948 |
+
description: A checkpoint object.
|
| 949 |
+
type: object
|
| 950 |
+
required:
|
| 951 |
+
- id
|
| 952 |
+
- last_modified
|
| 953 |
+
properties:
|
| 954 |
+
id:
|
| 955 |
+
type: string
|
| 956 |
+
description: Unique id for the checkpoint.
|
| 957 |
+
last_modified:
|
| 958 |
+
type: string
|
| 959 |
+
description: Last modified timestamp
|
| 960 |
+
format: dateTime
|
| 961 |
+
Terminal:
|
| 962 |
+
description: A Terminal object
|
| 963 |
+
type: object
|
| 964 |
+
required:
|
| 965 |
+
- name
|
| 966 |
+
properties:
|
| 967 |
+
name:
|
| 968 |
+
type: string
|
| 969 |
+
description: name of terminal
|
| 970 |
+
last_activity:
|
| 971 |
+
type: string
|
| 972 |
+
description: |
|
| 973 |
+
ISO 8601 timestamp for the last-seen activity on this terminal. Use
|
| 974 |
+
this to identify which terminals have been inactive since a given time.
|
| 975 |
+
Timestamps will be UTC, indicated 'Z' suffix.
|
vlmpy310/lib/python3.10/site-packages/jupyter_server/services/api/handlers.py
ADDED
|
@@ -0,0 +1,121 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Tornado handlers for api specifications."""
|
| 2 |
+
|
| 3 |
+
# Copyright (c) Jupyter Development Team.
|
| 4 |
+
# Distributed under the terms of the Modified BSD License.
|
| 5 |
+
import json
|
| 6 |
+
import os
|
| 7 |
+
from typing import Any
|
| 8 |
+
|
| 9 |
+
from jupyter_core.utils import ensure_async
|
| 10 |
+
from tornado import web
|
| 11 |
+
|
| 12 |
+
from jupyter_server._tz import isoformat, utcfromtimestamp
|
| 13 |
+
from jupyter_server.auth.decorator import authorized
|
| 14 |
+
|
| 15 |
+
from ...base.handlers import APIHandler, JupyterHandler
|
| 16 |
+
|
| 17 |
+
AUTH_RESOURCE = "api"
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
class APISpecHandler(web.StaticFileHandler, JupyterHandler):
|
| 21 |
+
"""A spec handler for the REST API."""
|
| 22 |
+
|
| 23 |
+
auth_resource = AUTH_RESOURCE
|
| 24 |
+
|
| 25 |
+
def initialize(self):
|
| 26 |
+
"""Initialize the API spec handler."""
|
| 27 |
+
web.StaticFileHandler.initialize(self, path=os.path.dirname(__file__))
|
| 28 |
+
|
| 29 |
+
@web.authenticated
|
| 30 |
+
@authorized
|
| 31 |
+
def head(self):
|
| 32 |
+
return self.get("api.yaml", include_body=False)
|
| 33 |
+
|
| 34 |
+
@web.authenticated
|
| 35 |
+
@authorized
|
| 36 |
+
def get(self):
|
| 37 |
+
"""Get the API spec."""
|
| 38 |
+
self.log.warning("Serving api spec (experimental, incomplete)")
|
| 39 |
+
return web.StaticFileHandler.get(self, "api.yaml")
|
| 40 |
+
|
| 41 |
+
def get_content_type(self):
|
| 42 |
+
"""Get the content type."""
|
| 43 |
+
return "text/x-yaml"
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
class APIStatusHandler(APIHandler):
|
| 47 |
+
"""An API status handler."""
|
| 48 |
+
|
| 49 |
+
auth_resource = AUTH_RESOURCE
|
| 50 |
+
_track_activity = False
|
| 51 |
+
|
| 52 |
+
@web.authenticated
|
| 53 |
+
@authorized
|
| 54 |
+
async def get(self):
|
| 55 |
+
"""Get the API status."""
|
| 56 |
+
# if started was missing, use unix epoch
|
| 57 |
+
started = self.settings.get("started", utcfromtimestamp(0))
|
| 58 |
+
started = isoformat(started)
|
| 59 |
+
|
| 60 |
+
kernels = await ensure_async(self.kernel_manager.list_kernels())
|
| 61 |
+
total_connections = sum(k["connections"] for k in kernels)
|
| 62 |
+
last_activity = isoformat(self.application.last_activity()) # type:ignore[attr-defined]
|
| 63 |
+
model = {
|
| 64 |
+
"started": started,
|
| 65 |
+
"last_activity": last_activity,
|
| 66 |
+
"kernels": len(kernels),
|
| 67 |
+
"connections": total_connections,
|
| 68 |
+
}
|
| 69 |
+
self.finish(json.dumps(model, sort_keys=True))
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
class IdentityHandler(APIHandler):
|
| 73 |
+
"""Get the current user's identity model"""
|
| 74 |
+
|
| 75 |
+
@web.authenticated
|
| 76 |
+
async def get(self):
|
| 77 |
+
"""Get the identity model."""
|
| 78 |
+
permissions_json: str = self.get_argument("permissions", "")
|
| 79 |
+
bad_permissions_msg = f'permissions should be a JSON dict of {{"resource": ["action",]}}, got {permissions_json!r}'
|
| 80 |
+
if permissions_json:
|
| 81 |
+
try:
|
| 82 |
+
permissions_to_check = json.loads(permissions_json)
|
| 83 |
+
except ValueError as e:
|
| 84 |
+
raise web.HTTPError(400, bad_permissions_msg) from e
|
| 85 |
+
if not isinstance(permissions_to_check, dict):
|
| 86 |
+
raise web.HTTPError(400, bad_permissions_msg)
|
| 87 |
+
else:
|
| 88 |
+
permissions_to_check = {}
|
| 89 |
+
|
| 90 |
+
permissions: dict[str, list[str]] = {}
|
| 91 |
+
user = self.current_user
|
| 92 |
+
|
| 93 |
+
for resource, actions in permissions_to_check.items():
|
| 94 |
+
if (
|
| 95 |
+
not isinstance(resource, str)
|
| 96 |
+
or not isinstance(actions, list)
|
| 97 |
+
or not all(isinstance(action, str) for action in actions)
|
| 98 |
+
):
|
| 99 |
+
raise web.HTTPError(400, bad_permissions_msg)
|
| 100 |
+
|
| 101 |
+
allowed = permissions[resource] = []
|
| 102 |
+
for action in actions:
|
| 103 |
+
authorized = await ensure_async(
|
| 104 |
+
self.authorizer.is_authorized(self, user, action, resource)
|
| 105 |
+
)
|
| 106 |
+
if authorized:
|
| 107 |
+
allowed.append(action)
|
| 108 |
+
|
| 109 |
+
identity: dict[str, Any] = self.identity_provider.identity_model(user)
|
| 110 |
+
model = {
|
| 111 |
+
"identity": identity,
|
| 112 |
+
"permissions": permissions,
|
| 113 |
+
}
|
| 114 |
+
self.write(json.dumps(model))
|
| 115 |
+
|
| 116 |
+
|
| 117 |
+
default_handlers = [
|
| 118 |
+
(r"/api/spec.yaml", APISpecHandler),
|
| 119 |
+
(r"/api/status", APIStatusHandler),
|
| 120 |
+
(r"/api/me", IdentityHandler),
|
| 121 |
+
]
|
vlmpy310/lib/python3.10/site-packages/jupyter_server/services/events/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (184 Bytes). View file
|
|
|
vlmpy310/lib/python3.10/site-packages/jupyter_server/services/events/handlers.py
ADDED
|
@@ -0,0 +1,144 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""A Websocket Handler for emitting Jupyter server events.
|
| 2 |
+
|
| 3 |
+
.. versionadded:: 2.0
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
from __future__ import annotations
|
| 7 |
+
|
| 8 |
+
import json
|
| 9 |
+
from datetime import datetime
|
| 10 |
+
from typing import TYPE_CHECKING, Any, Optional, cast
|
| 11 |
+
|
| 12 |
+
from jupyter_core.utils import ensure_async
|
| 13 |
+
from tornado import web, websocket
|
| 14 |
+
|
| 15 |
+
from jupyter_server.auth.decorator import authorized, ws_authenticated
|
| 16 |
+
from jupyter_server.base.handlers import JupyterHandler
|
| 17 |
+
|
| 18 |
+
from ...base.handlers import APIHandler
|
| 19 |
+
|
| 20 |
+
AUTH_RESOURCE = "events"
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
if TYPE_CHECKING:
|
| 24 |
+
import jupyter_events.logger
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
class SubscribeWebsocket(
|
| 28 |
+
JupyterHandler,
|
| 29 |
+
websocket.WebSocketHandler,
|
| 30 |
+
):
|
| 31 |
+
"""Websocket handler for subscribing to events"""
|
| 32 |
+
|
| 33 |
+
auth_resource = AUTH_RESOURCE
|
| 34 |
+
|
| 35 |
+
async def pre_get(self):
|
| 36 |
+
"""Handles authorization when
|
| 37 |
+
attempting to subscribe to events emitted by
|
| 38 |
+
Jupyter Server's eventbus.
|
| 39 |
+
"""
|
| 40 |
+
user = self.current_user
|
| 41 |
+
# authorize the user.
|
| 42 |
+
authorized = await ensure_async(
|
| 43 |
+
self.authorizer.is_authorized(self, user, "execute", "events")
|
| 44 |
+
)
|
| 45 |
+
if not authorized:
|
| 46 |
+
raise web.HTTPError(403)
|
| 47 |
+
|
| 48 |
+
@ws_authenticated
|
| 49 |
+
async def get(self, *args, **kwargs):
|
| 50 |
+
"""Get an event socket."""
|
| 51 |
+
await ensure_async(self.pre_get())
|
| 52 |
+
res = super().get(*args, **kwargs)
|
| 53 |
+
if res is not None:
|
| 54 |
+
await res
|
| 55 |
+
|
| 56 |
+
async def event_listener(
|
| 57 |
+
self, logger: jupyter_events.logger.EventLogger, schema_id: str, data: dict[str, Any]
|
| 58 |
+
) -> None:
|
| 59 |
+
"""Write an event message."""
|
| 60 |
+
capsule = dict(schema_id=schema_id, **data)
|
| 61 |
+
self.write_message(json.dumps(capsule))
|
| 62 |
+
|
| 63 |
+
def open(self):
|
| 64 |
+
"""Routes events that are emitted by Jupyter Server's
|
| 65 |
+
EventBus to a WebSocket client in the browser.
|
| 66 |
+
"""
|
| 67 |
+
self.event_logger.add_listener(listener=self.event_listener)
|
| 68 |
+
|
| 69 |
+
def on_close(self):
|
| 70 |
+
"""Handle a socket close."""
|
| 71 |
+
self.event_logger.remove_listener(listener=self.event_listener)
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
def validate_model(
|
| 75 |
+
data: dict[str, Any], registry: jupyter_events.schema_registry.SchemaRegistry
|
| 76 |
+
) -> None:
|
| 77 |
+
"""Validates for required fields in the JSON request body and verifies that
|
| 78 |
+
a registered schema/version exists"""
|
| 79 |
+
required_keys = {"schema_id", "version", "data"}
|
| 80 |
+
for key in required_keys:
|
| 81 |
+
if key not in data:
|
| 82 |
+
message = f"Missing `{key}` in the JSON request body."
|
| 83 |
+
raise Exception(message)
|
| 84 |
+
schema_id = cast(str, data.get("schema_id"))
|
| 85 |
+
# The case where a given schema_id isn't found,
|
| 86 |
+
# jupyter_events raises a useful error, so there's no need to
|
| 87 |
+
# handle that case here.
|
| 88 |
+
schema = registry.get(schema_id)
|
| 89 |
+
version = str(cast(str, data.get("version")))
|
| 90 |
+
if schema.version != version:
|
| 91 |
+
message = f"Unregistered version: {version!r}≠{schema.version!r} for `{schema_id}`"
|
| 92 |
+
raise Exception(message)
|
| 93 |
+
|
| 94 |
+
|
| 95 |
+
def get_timestamp(data: dict[str, Any]) -> Optional[datetime]:
|
| 96 |
+
"""Parses timestamp from the JSON request body"""
|
| 97 |
+
try:
|
| 98 |
+
if "timestamp" in data:
|
| 99 |
+
timestamp = datetime.strptime(data["timestamp"], "%Y-%m-%dT%H:%M:%S%zZ")
|
| 100 |
+
else:
|
| 101 |
+
timestamp = None
|
| 102 |
+
except Exception as e:
|
| 103 |
+
raise web.HTTPError(
|
| 104 |
+
400,
|
| 105 |
+
"""Failed to parse timestamp from JSON request body,
|
| 106 |
+
an ISO format datetime string with UTC offset is expected,
|
| 107 |
+
for example, 2022-05-26T13:50:00+05:00Z""",
|
| 108 |
+
) from e
|
| 109 |
+
|
| 110 |
+
return timestamp
|
| 111 |
+
|
| 112 |
+
|
| 113 |
+
class EventHandler(APIHandler):
|
| 114 |
+
"""REST api handler for events"""
|
| 115 |
+
|
| 116 |
+
auth_resource = AUTH_RESOURCE
|
| 117 |
+
|
| 118 |
+
@web.authenticated
|
| 119 |
+
@authorized
|
| 120 |
+
async def post(self):
|
| 121 |
+
"""Emit an event."""
|
| 122 |
+
payload = self.get_json_body()
|
| 123 |
+
if payload is None:
|
| 124 |
+
raise web.HTTPError(400, "No JSON data provided")
|
| 125 |
+
|
| 126 |
+
try:
|
| 127 |
+
validate_model(payload, self.event_logger.schemas)
|
| 128 |
+
self.event_logger.emit(
|
| 129 |
+
schema_id=cast(str, payload.get("schema_id")),
|
| 130 |
+
data=cast("dict[str, Any]", payload.get("data")),
|
| 131 |
+
timestamp_override=get_timestamp(payload),
|
| 132 |
+
)
|
| 133 |
+
self.set_status(204)
|
| 134 |
+
self.finish()
|
| 135 |
+
except Exception as e:
|
| 136 |
+
# All known exceptions are raised by bad requests, e.g., bad
|
| 137 |
+
# version, unregistered schema, invalid emission data payload, etc.
|
| 138 |
+
raise web.HTTPError(400, str(e)) from e
|
| 139 |
+
|
| 140 |
+
|
| 141 |
+
default_handlers = [
|
| 142 |
+
(r"/api/events", EventHandler),
|
| 143 |
+
(r"/api/events/subscribe", SubscribeWebsocket),
|
| 144 |
+
]
|
vlmpy310/lib/python3.10/site-packages/jupyter_server/services/kernels/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (185 Bytes). View file
|
|
|
vlmpy310/lib/python3.10/site-packages/jupyter_server/services/kernels/__pycache__/handlers.cpython-310.pyc
ADDED
|
Binary file (3.96 kB). View file
|
|
|