Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +4 -0
- mantis_evalkit/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/emoji/unicode_codes.pyi +6 -0
- mantis_evalkit/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/markdown/extensions/admonition.pyi +15 -0
- mantis_evalkit/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/markdown/extensions/codehilite.pyi +42 -0
- mantis_evalkit/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/markdown/extensions/meta.pyi +18 -0
- mantis_evalkit/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/pyVmomi/__init__.pyi +0 -0
- mantis_evalkit/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/pyVmomi/vim/__init__.pyi +69 -0
- mantis_evalkit/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/pyVmomi/vim/fault.pyi +7 -0
- mantis_evalkit/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/pyVmomi/vim/option.pyi +9 -0
- mantis_evalkit/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/pyVmomi/vim/view.pyi +15 -0
- mantis_evalkit/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/pyVmomi/vmodl/__init__.pyi +5 -0
- mantis_evalkit/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/requests/api.pyi +28 -0
- mantis_evalkit/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/requests/exceptions.pyi +31 -0
- mantis_evalkit/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/requests/models.pyi +129 -0
- moondream/lib/python3.10/site-packages/gradio_client/__pycache__/media_data.cpython-310.pyc +3 -0
- moondream/lib/python3.10/site-packages/sympy/logic/__pycache__/boolalg.cpython-310.pyc +3 -0
- moondream/lib/python3.10/site-packages/torch/__pycache__/_tensor_docs.cpython-310.pyc +3 -0
- moondream/lib/python3.10/site-packages/torch/_inductor/codegen/__pycache__/cpp.cpython-310.pyc +3 -0
- moondream/lib/python3.10/site-packages/torch/include/ATen/core/ATenGeneral.h +3 -0
- moondream/lib/python3.10/site-packages/torch/include/ATen/core/Array.h +39 -0
- moondream/lib/python3.10/site-packages/torch/include/ATen/core/DistributionsHelper.h +337 -0
- moondream/lib/python3.10/site-packages/torch/include/ATen/core/NamedTensor.h +139 -0
- moondream/lib/python3.10/site-packages/torch/include/ATen/core/TensorBody.h +0 -0
- moondream/lib/python3.10/site-packages/torch/include/ATen/core/UndefinedTensorImpl.h +1 -0
- moondream/lib/python3.10/site-packages/torch/include/ATen/core/boxing/BoxedKernel.h +176 -0
- moondream/lib/python3.10/site-packages/torch/include/ATen/core/boxing/BoxedKernel_impl.h +99 -0
- moondream/lib/python3.10/site-packages/torch/include/ATen/core/boxing/KernelFunction.h +260 -0
- moondream/lib/python3.10/site-packages/torch/include/ATen/core/boxing/OperatorKernel.h +27 -0
- moondream/lib/python3.10/site-packages/torch/include/ATen/core/boxing/impl/WrapFunctionIntoFunctor.h +32 -0
- moondream/lib/python3.10/site-packages/torch/include/ATen/core/boxing/impl/WrapFunctionIntoRuntimeFunctor.h +39 -0
- moondream/lib/python3.10/site-packages/torch/include/ATen/core/boxing/impl/boxing.h +387 -0
- moondream/lib/python3.10/site-packages/torch/include/ATen/core/boxing/impl/test_helpers.h +124 -0
- moondream/lib/python3.10/site-packages/torch/include/ATen/core/dispatch/CppSignature.h +65 -0
- moondream/lib/python3.10/site-packages/torch/include/ATen/core/dispatch/DispatchKeyExtractor.h +242 -0
- moondream/lib/python3.10/site-packages/torch/include/ATen/core/dispatch/Dispatcher.h +795 -0
- moondream/lib/python3.10/site-packages/torch/include/ATen/core/dispatch/ObservedOperators.h +17 -0
- moondream/lib/python3.10/site-packages/torch/include/ATen/core/dispatch/OperatorEntry.h +313 -0
- moondream/lib/python3.10/site-packages/torch/include/ATen/core/dispatch/OperatorOptions.h +30 -0
- moondream/lib/python3.10/site-packages/torch/include/ATen/core/dispatch/RegistrationHandleRAII.h +36 -0
- moondream/lib/python3.10/site-packages/torch/include/ATen/core/ivalue.h +1555 -0
- moondream/lib/python3.10/site-packages/torch/include/ATen/core/op_registration/adaption.h +83 -0
- moondream/lib/python3.10/site-packages/torch/include/ATen/core/op_registration/infer_schema.h +160 -0
- moondream/lib/python3.10/site-packages/torch/include/ATen/core/op_registration/op_allowlist.h +199 -0
- moondream/lib/python3.10/site-packages/torch/include/ATen/core/op_registration/op_registration.h +596 -0
- moondream/lib/python3.10/site-packages/torch/include/ATen/cpu/FlushDenormal.h +14 -0
- moondream/lib/python3.10/site-packages/torch/include/ATen/cpu/Utils.h +10 -0
- moondream/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/functional.h +4 -0
- moondream/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/functional_base.h +329 -0
- moondream/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/functional_bfloat16.h +549 -0
- moondream/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec.h +47 -0
.gitattributes
CHANGED
|
@@ -493,3 +493,7 @@ parrot/lib/libasan.so.6 filter=lfs diff=lfs merge=lfs -text
|
|
| 493 |
parrot/lib/python3.10/site-packages/tiktoken/_tiktoken.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
| 494 |
mantis_evalkit/lib/python3.10/site-packages/pyarrow/lib.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
| 495 |
moondream/lib/python3.10/site-packages/sympy/solvers/diophantine/__pycache__/diophantine.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 493 |
parrot/lib/python3.10/site-packages/tiktoken/_tiktoken.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
| 494 |
mantis_evalkit/lib/python3.10/site-packages/pyarrow/lib.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
| 495 |
moondream/lib/python3.10/site-packages/sympy/solvers/diophantine/__pycache__/diophantine.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 496 |
+
moondream/lib/python3.10/site-packages/sympy/logic/__pycache__/boolalg.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 497 |
+
moondream/lib/python3.10/site-packages/gradio_client/__pycache__/media_data.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 498 |
+
moondream/lib/python3.10/site-packages/torch/__pycache__/_tensor_docs.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 499 |
+
moondream/lib/python3.10/site-packages/torch/_inductor/codegen/__pycache__/cpp.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
mantis_evalkit/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/emoji/unicode_codes.pyi
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Dict, Text
|
| 2 |
+
|
| 3 |
+
EMOJI_ALIAS_UNICODE: Dict[Text, Text]
|
| 4 |
+
EMOJI_UNICODE: Dict[Text, Text]
|
| 5 |
+
UNICODE_EMOJI: Dict[Text, Text]
|
| 6 |
+
UNICODE_EMOJI_ALIAS: Dict[Text, Text]
|
mantis_evalkit/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/markdown/extensions/admonition.pyi
ADDED
|
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Any, Pattern
|
| 2 |
+
|
| 3 |
+
from markdown.blockprocessors import BlockProcessor
|
| 4 |
+
from markdown.extensions import Extension
|
| 5 |
+
|
| 6 |
+
class AdmonitionExtension(Extension): ...
|
| 7 |
+
|
| 8 |
+
class AdmonitionProcessor(BlockProcessor):
|
| 9 |
+
CLASSNAME: str = ...
|
| 10 |
+
CLASSNAME_TITLE: str = ...
|
| 11 |
+
RE: Pattern
|
| 12 |
+
RE_SPACES: Any
|
| 13 |
+
def get_class_and_title(self, match): ...
|
| 14 |
+
|
| 15 |
+
def makeExtension(**kwargs): ...
|
mantis_evalkit/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/markdown/extensions/codehilite.pyi
ADDED
|
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Any, Optional
|
| 2 |
+
|
| 3 |
+
from markdown.extensions import Extension
|
| 4 |
+
from markdown.treeprocessors import Treeprocessor
|
| 5 |
+
|
| 6 |
+
pygments: bool
|
| 7 |
+
|
| 8 |
+
def parse_hl_lines(expr): ...
|
| 9 |
+
|
| 10 |
+
class CodeHilite:
|
| 11 |
+
src: Any
|
| 12 |
+
lang: Any
|
| 13 |
+
linenums: Any
|
| 14 |
+
guess_lang: Any
|
| 15 |
+
css_class: Any
|
| 16 |
+
style: Any
|
| 17 |
+
noclasses: Any
|
| 18 |
+
tab_length: Any
|
| 19 |
+
hl_lines: Any
|
| 20 |
+
use_pygments: Any
|
| 21 |
+
def __init__(
|
| 22 |
+
self,
|
| 23 |
+
src: Optional[Any] = ...,
|
| 24 |
+
linenums: Optional[Any] = ...,
|
| 25 |
+
guess_lang: bool = ...,
|
| 26 |
+
css_class: str = ...,
|
| 27 |
+
lang: Optional[Any] = ...,
|
| 28 |
+
style: str = ...,
|
| 29 |
+
noclasses: bool = ...,
|
| 30 |
+
tab_length: int = ...,
|
| 31 |
+
hl_lines: Optional[Any] = ...,
|
| 32 |
+
use_pygments: bool = ...,
|
| 33 |
+
) -> None: ...
|
| 34 |
+
def hilite(self): ...
|
| 35 |
+
|
| 36 |
+
class HiliteTreeprocessor(Treeprocessor):
|
| 37 |
+
def code_unescape(self, text): ...
|
| 38 |
+
|
| 39 |
+
class CodeHiliteExtension(Extension):
|
| 40 |
+
def __init__(self, **kwargs) -> None: ...
|
| 41 |
+
|
| 42 |
+
def makeExtension(**kwargs): ...
|
mantis_evalkit/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/markdown/extensions/meta.pyi
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Any, Pattern
|
| 2 |
+
|
| 3 |
+
from markdown.extensions import Extension
|
| 4 |
+
from markdown.preprocessors import Preprocessor
|
| 5 |
+
|
| 6 |
+
log: Any
|
| 7 |
+
META_RE: Pattern
|
| 8 |
+
META_MORE_RE: Pattern
|
| 9 |
+
BEGIN_RE: Pattern
|
| 10 |
+
END_RE: Pattern
|
| 11 |
+
|
| 12 |
+
class MetaExtension(Extension):
|
| 13 |
+
md: Any
|
| 14 |
+
def reset(self) -> None: ...
|
| 15 |
+
|
| 16 |
+
class MetaPreprocessor(Preprocessor): ...
|
| 17 |
+
|
| 18 |
+
def makeExtension(**kwargs): ...
|
mantis_evalkit/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/pyVmomi/__init__.pyi
ADDED
|
File without changes
|
mantis_evalkit/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/pyVmomi/vim/__init__.pyi
ADDED
|
@@ -0,0 +1,69 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from datetime import datetime
|
| 2 |
+
from enum import Enum
|
| 3 |
+
from typing import Any, List
|
| 4 |
+
|
| 5 |
+
from ..vmodl.query import PropertyCollector
|
| 6 |
+
from .event import EventManager
|
| 7 |
+
from .option import OptionManager
|
| 8 |
+
from .view import ViewManager
|
| 9 |
+
|
| 10 |
+
def __getattr__(name: str) -> Any: ... # incomplete
|
| 11 |
+
|
| 12 |
+
class ManagedObject: ...
|
| 13 |
+
|
| 14 |
+
class ManagedEntity(ManagedObject):
|
| 15 |
+
_moId: str
|
| 16 |
+
obj: None
|
| 17 |
+
name: str
|
| 18 |
+
def __getattr__(self, name: str) -> Any: ... # incomplete
|
| 19 |
+
|
| 20 |
+
class ServiceInstanceContent:
|
| 21 |
+
setting: OptionManager
|
| 22 |
+
propertyCollector: PropertyCollector
|
| 23 |
+
rootFolder: Folder
|
| 24 |
+
viewManager: ViewManager
|
| 25 |
+
perfManager: PerformanceManager
|
| 26 |
+
eventManager: EventManager
|
| 27 |
+
def __getattr__(self, name: str) -> Any: ... # incomplete
|
| 28 |
+
|
| 29 |
+
class ServiceInstance:
|
| 30 |
+
content: ServiceInstanceContent
|
| 31 |
+
def CurrentTime(self) -> datetime: ...
|
| 32 |
+
def __getattr__(self, name: str) -> Any: ... # incomplete
|
| 33 |
+
|
| 34 |
+
class PerformanceManager:
|
| 35 |
+
class MetricId:
|
| 36 |
+
counterId: int
|
| 37 |
+
instance: str
|
| 38 |
+
def __init__(self, counterId: int, instance: str): ...
|
| 39 |
+
class PerfCounterInfo:
|
| 40 |
+
key: int
|
| 41 |
+
groupInfo: Any
|
| 42 |
+
nameInfo: Any
|
| 43 |
+
rollupType: Any
|
| 44 |
+
def __getattr__(self, name: str) -> Any: ... # incomplete
|
| 45 |
+
class QuerySpec:
|
| 46 |
+
entity: ManagedEntity
|
| 47 |
+
metricId: List[PerformanceManager.MetricId]
|
| 48 |
+
intervalId: int
|
| 49 |
+
maxSample: int
|
| 50 |
+
startTime: datetime
|
| 51 |
+
def __getattr__(self, name: str) -> Any: ... # incomplete
|
| 52 |
+
class EntityMetricBase:
|
| 53 |
+
entity: ManagedEntity
|
| 54 |
+
def QueryPerfCounterByLevel(self, collection_level: int) -> List[PerformanceManager.PerfCounterInfo]: ...
|
| 55 |
+
def QueryPerf(self, querySpec: List[PerformanceManager.QuerySpec]) -> List[PerformanceManager.EntityMetricBase]: ...
|
| 56 |
+
def __getattr__(self, name: str) -> Any: ... # incomplete
|
| 57 |
+
|
| 58 |
+
class ClusterComputeResource(ManagedEntity): ...
|
| 59 |
+
class ComputeResource(ManagedEntity): ...
|
| 60 |
+
class Datacenter(ManagedEntity): ...
|
| 61 |
+
class Datastore(ManagedEntity): ...
|
| 62 |
+
class Folder(ManagedEntity): ...
|
| 63 |
+
class HostSystem(ManagedEntity): ...
|
| 64 |
+
class VirtualMachine(ManagedEntity): ...
|
| 65 |
+
|
| 66 |
+
class VirtualMachinePowerState(Enum):
|
| 67 |
+
poweredOff: int
|
| 68 |
+
poweredOn: int
|
| 69 |
+
suspended: int
|
mantis_evalkit/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/pyVmomi/vim/fault.pyi
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Any
|
| 2 |
+
|
| 3 |
+
def __getattr__(name: str) -> Any: ... # incomplete
|
| 4 |
+
|
| 5 |
+
class InvalidName(Exception): ...
|
| 6 |
+
class RestrictedByAdministrator(Exception): ...
|
| 7 |
+
class NoPermission(Exception): ...
|
mantis_evalkit/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/pyVmomi/vim/option.pyi
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Any, List
|
| 2 |
+
|
| 3 |
+
def __getattr__(name: str) -> Any: ... # incomplete
|
| 4 |
+
|
| 5 |
+
class OptionManager:
|
| 6 |
+
def QueryOptions(self, name: str) -> List[OptionValue]: ...
|
| 7 |
+
|
| 8 |
+
class OptionValue:
|
| 9 |
+
value: Any
|
mantis_evalkit/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/pyVmomi/vim/view.pyi
ADDED
|
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Any, List, Type
|
| 2 |
+
|
| 3 |
+
from pyVmomi.vim import ManagedEntity
|
| 4 |
+
|
| 5 |
+
def __getattr__(name: str) -> Any: ... # incomplete
|
| 6 |
+
|
| 7 |
+
class ContainerView:
|
| 8 |
+
def Destroy(self) -> None: ...
|
| 9 |
+
|
| 10 |
+
class ViewManager:
|
| 11 |
+
# Doc says the `type` parameter of CreateContainerView is a `List[str]`,
|
| 12 |
+
# but in practice it seems to be `List[Type[ManagedEntity]]`
|
| 13 |
+
# Source: https://pubs.vmware.com/vi-sdk/visdk250/ReferenceGuide/vim.view.ViewManager.html
|
| 14 |
+
@staticmethod
|
| 15 |
+
def CreateContainerView(container: ManagedEntity, type: List[Type[ManagedEntity]], recursive: bool) -> ContainerView: ...
|
mantis_evalkit/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/pyVmomi/vmodl/__init__.pyi
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Any
|
| 2 |
+
|
| 3 |
+
class DynamicProperty:
|
| 4 |
+
name: str
|
| 5 |
+
val: Any
|
mantis_evalkit/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/requests/api.pyi
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from _typeshed import SupportsItems
|
| 2 |
+
from typing import Iterable, Optional, Text, Tuple, Union
|
| 3 |
+
|
| 4 |
+
from .models import Response
|
| 5 |
+
from .sessions import _Data
|
| 6 |
+
|
| 7 |
+
_ParamsMappingKeyType = Union[Text, bytes, int, float]
|
| 8 |
+
_ParamsMappingValueType = Union[Text, bytes, int, float, Iterable[Union[Text, bytes, int, float]], None]
|
| 9 |
+
|
| 10 |
+
def request(method: str, url: str, **kwargs) -> Response: ...
|
| 11 |
+
def get(
|
| 12 |
+
url: Union[Text, bytes],
|
| 13 |
+
params: Optional[
|
| 14 |
+
Union[
|
| 15 |
+
SupportsItems[_ParamsMappingKeyType, _ParamsMappingValueType],
|
| 16 |
+
Tuple[_ParamsMappingKeyType, _ParamsMappingValueType],
|
| 17 |
+
Iterable[Tuple[_ParamsMappingKeyType, _ParamsMappingValueType]],
|
| 18 |
+
Union[Text, bytes],
|
| 19 |
+
]
|
| 20 |
+
] = ...,
|
| 21 |
+
**kwargs,
|
| 22 |
+
) -> Response: ...
|
| 23 |
+
def options(url: Union[Text, bytes], **kwargs) -> Response: ...
|
| 24 |
+
def head(url: Union[Text, bytes], **kwargs) -> Response: ...
|
| 25 |
+
def post(url: Union[Text, bytes], data: _Data = ..., json=..., **kwargs) -> Response: ...
|
| 26 |
+
def put(url: Union[Text, bytes], data: _Data = ..., json=..., **kwargs) -> Response: ...
|
| 27 |
+
def patch(url: Union[Text, bytes], data: _Data = ..., json=..., **kwargs) -> Response: ...
|
| 28 |
+
def delete(url: Union[Text, bytes], **kwargs) -> Response: ...
|
mantis_evalkit/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/requests/exceptions.pyi
ADDED
|
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Any
|
| 2 |
+
|
| 3 |
+
from .packages.urllib3.exceptions import HTTPError as BaseHTTPError
|
| 4 |
+
|
| 5 |
+
class RequestException(IOError):
|
| 6 |
+
response: Any
|
| 7 |
+
request: Any
|
| 8 |
+
def __init__(self, *args, **kwargs) -> None: ...
|
| 9 |
+
|
| 10 |
+
class HTTPError(RequestException): ...
|
| 11 |
+
class ConnectionError(RequestException): ...
|
| 12 |
+
class ProxyError(ConnectionError): ...
|
| 13 |
+
class SSLError(ConnectionError): ...
|
| 14 |
+
class Timeout(RequestException): ...
|
| 15 |
+
class ConnectTimeout(ConnectionError, Timeout): ...
|
| 16 |
+
class ReadTimeout(Timeout): ...
|
| 17 |
+
class URLRequired(RequestException): ...
|
| 18 |
+
class TooManyRedirects(RequestException): ...
|
| 19 |
+
class MissingSchema(RequestException, ValueError): ...
|
| 20 |
+
class InvalidSchema(RequestException, ValueError): ...
|
| 21 |
+
class InvalidURL(RequestException, ValueError): ...
|
| 22 |
+
class InvalidHeader(RequestException, ValueError): ...
|
| 23 |
+
class InvalidProxyURL(InvalidURL): ...
|
| 24 |
+
class ChunkedEncodingError(RequestException): ...
|
| 25 |
+
class ContentDecodingError(RequestException, BaseHTTPError): ...
|
| 26 |
+
class StreamConsumedError(RequestException, TypeError): ...
|
| 27 |
+
class RetryError(RequestException): ...
|
| 28 |
+
class UnrewindableBodyError(RequestException): ...
|
| 29 |
+
class RequestsWarning(Warning): ...
|
| 30 |
+
class FileModeWarning(RequestsWarning, DeprecationWarning): ...
|
| 31 |
+
class RequestsDependencyWarning(RequestsWarning): ...
|
mantis_evalkit/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/requests/models.pyi
ADDED
|
@@ -0,0 +1,129 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import datetime
|
| 2 |
+
from typing import Any, Dict, Iterator, List, Optional, Text, Union
|
| 3 |
+
|
| 4 |
+
from . import auth, cookies, exceptions, hooks, status_codes, structures, utils
|
| 5 |
+
from .cookies import RequestsCookieJar
|
| 6 |
+
from .packages.urllib3 import exceptions as urllib3_exceptions, fields, filepost, util
|
| 7 |
+
|
| 8 |
+
default_hooks = hooks.default_hooks
|
| 9 |
+
CaseInsensitiveDict = structures.CaseInsensitiveDict
|
| 10 |
+
HTTPBasicAuth = auth.HTTPBasicAuth
|
| 11 |
+
cookiejar_from_dict = cookies.cookiejar_from_dict
|
| 12 |
+
get_cookie_header = cookies.get_cookie_header
|
| 13 |
+
RequestField = fields.RequestField
|
| 14 |
+
encode_multipart_formdata = filepost.encode_multipart_formdata
|
| 15 |
+
parse_url = util.parse_url
|
| 16 |
+
DecodeError = urllib3_exceptions.DecodeError
|
| 17 |
+
ReadTimeoutError = urllib3_exceptions.ReadTimeoutError
|
| 18 |
+
ProtocolError = urllib3_exceptions.ProtocolError
|
| 19 |
+
LocationParseError = urllib3_exceptions.LocationParseError
|
| 20 |
+
HTTPError = exceptions.HTTPError
|
| 21 |
+
MissingSchema = exceptions.MissingSchema
|
| 22 |
+
InvalidURL = exceptions.InvalidURL
|
| 23 |
+
ChunkedEncodingError = exceptions.ChunkedEncodingError
|
| 24 |
+
ContentDecodingError = exceptions.ContentDecodingError
|
| 25 |
+
ConnectionError = exceptions.ConnectionError
|
| 26 |
+
StreamConsumedError = exceptions.StreamConsumedError
|
| 27 |
+
guess_filename = utils.guess_filename
|
| 28 |
+
get_auth_from_url = utils.get_auth_from_url
|
| 29 |
+
requote_uri = utils.requote_uri
|
| 30 |
+
stream_decode_response_unicode = utils.stream_decode_response_unicode
|
| 31 |
+
to_key_val_list = utils.to_key_val_list
|
| 32 |
+
parse_header_links = utils.parse_header_links
|
| 33 |
+
iter_slices = utils.iter_slices
|
| 34 |
+
guess_json_utf = utils.guess_json_utf
|
| 35 |
+
super_len = utils.super_len
|
| 36 |
+
to_native_string = utils.to_native_string
|
| 37 |
+
codes = status_codes.codes
|
| 38 |
+
|
| 39 |
+
REDIRECT_STATI: Any
|
| 40 |
+
DEFAULT_REDIRECT_LIMIT: Any
|
| 41 |
+
CONTENT_CHUNK_SIZE: Any
|
| 42 |
+
ITER_CHUNK_SIZE: Any
|
| 43 |
+
|
| 44 |
+
class RequestEncodingMixin:
|
| 45 |
+
@property
|
| 46 |
+
def path_url(self): ...
|
| 47 |
+
|
| 48 |
+
class RequestHooksMixin:
|
| 49 |
+
def register_hook(self, event, hook): ...
|
| 50 |
+
def deregister_hook(self, event, hook): ...
|
| 51 |
+
|
| 52 |
+
class Request(RequestHooksMixin):
|
| 53 |
+
hooks: Any
|
| 54 |
+
method: Any
|
| 55 |
+
url: Any
|
| 56 |
+
headers: Any
|
| 57 |
+
files: Any
|
| 58 |
+
data: Any
|
| 59 |
+
json: Any
|
| 60 |
+
params: Any
|
| 61 |
+
auth: Any
|
| 62 |
+
cookies: Any
|
| 63 |
+
def __init__(
|
| 64 |
+
self, method=..., url=..., headers=..., files=..., data=..., params=..., auth=..., cookies=..., hooks=..., json=...
|
| 65 |
+
) -> None: ...
|
| 66 |
+
def prepare(self) -> PreparedRequest: ...
|
| 67 |
+
|
| 68 |
+
class PreparedRequest(RequestEncodingMixin, RequestHooksMixin):
|
| 69 |
+
method: Optional[Union[str, Text]]
|
| 70 |
+
url: Optional[Union[str, Text]]
|
| 71 |
+
headers: CaseInsensitiveDict[str]
|
| 72 |
+
body: Optional[Union[bytes, Text]]
|
| 73 |
+
hooks: Any
|
| 74 |
+
def __init__(self) -> None: ...
|
| 75 |
+
def prepare(
|
| 76 |
+
self, method=..., url=..., headers=..., files=..., data=..., params=..., auth=..., cookies=..., hooks=..., json=...
|
| 77 |
+
) -> None: ...
|
| 78 |
+
def copy(self) -> PreparedRequest: ...
|
| 79 |
+
def prepare_method(self, method) -> None: ...
|
| 80 |
+
def prepare_url(self, url, params) -> None: ...
|
| 81 |
+
def prepare_headers(self, headers) -> None: ...
|
| 82 |
+
def prepare_body(self, data, files, json=...) -> None: ...
|
| 83 |
+
def prepare_content_length(self, body) -> None: ...
|
| 84 |
+
def prepare_auth(self, auth, url=...) -> None: ...
|
| 85 |
+
def prepare_cookies(self, cookies) -> None: ...
|
| 86 |
+
def prepare_hooks(self, hooks) -> None: ...
|
| 87 |
+
|
| 88 |
+
class Response:
|
| 89 |
+
__attrs__: Any
|
| 90 |
+
_content: Optional[bytes] # undocumented
|
| 91 |
+
status_code: int
|
| 92 |
+
headers: CaseInsensitiveDict[str]
|
| 93 |
+
raw: Any
|
| 94 |
+
url: str
|
| 95 |
+
encoding: str
|
| 96 |
+
history: List[Response]
|
| 97 |
+
reason: str
|
| 98 |
+
cookies: RequestsCookieJar
|
| 99 |
+
elapsed: datetime.timedelta
|
| 100 |
+
request: PreparedRequest
|
| 101 |
+
def __init__(self) -> None: ...
|
| 102 |
+
def __bool__(self) -> bool: ...
|
| 103 |
+
def __nonzero__(self) -> bool: ...
|
| 104 |
+
def __iter__(self) -> Iterator[bytes]: ...
|
| 105 |
+
def __enter__(self) -> Response: ...
|
| 106 |
+
def __exit__(self, *args: Any) -> None: ...
|
| 107 |
+
@property
|
| 108 |
+
def next(self) -> Optional[PreparedRequest]: ...
|
| 109 |
+
@property
|
| 110 |
+
def ok(self) -> bool: ...
|
| 111 |
+
@property
|
| 112 |
+
def is_redirect(self) -> bool: ...
|
| 113 |
+
@property
|
| 114 |
+
def is_permanent_redirect(self) -> bool: ...
|
| 115 |
+
@property
|
| 116 |
+
def apparent_encoding(self) -> str: ...
|
| 117 |
+
def iter_content(self, chunk_size: Optional[int] = ..., decode_unicode: bool = ...) -> Iterator[Any]: ...
|
| 118 |
+
def iter_lines(
|
| 119 |
+
self, chunk_size: Optional[int] = ..., decode_unicode: bool = ..., delimiter: Optional[Union[Text, bytes]] = ...
|
| 120 |
+
) -> Iterator[Any]: ...
|
| 121 |
+
@property
|
| 122 |
+
def content(self) -> bytes: ...
|
| 123 |
+
@property
|
| 124 |
+
def text(self) -> str: ...
|
| 125 |
+
def json(self, **kwargs) -> Any: ...
|
| 126 |
+
@property
|
| 127 |
+
def links(self) -> Dict[Any, Any]: ...
|
| 128 |
+
def raise_for_status(self) -> None: ...
|
| 129 |
+
def close(self) -> None: ...
|
moondream/lib/python3.10/site-packages/gradio_client/__pycache__/media_data.cpython-310.pyc
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:780e6178d4cd01a86e4a8ed7aad0662f32d28cb73de5d948686d0616bc9127a9
|
| 3 |
+
size 452039
|
moondream/lib/python3.10/site-packages/sympy/logic/__pycache__/boolalg.cpython-310.pyc
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:2662d796c17fb36fe296aa8025d6cd6876c806e2ef3f1045a396c91ef10cd2ef
|
| 3 |
+
size 100319
|
moondream/lib/python3.10/site-packages/torch/__pycache__/_tensor_docs.cpython-310.pyc
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:56aadbce83b98a59784aae5c81795b63c1bd60b85d0e208b0df67165a12c847c
|
| 3 |
+
size 134199
|
moondream/lib/python3.10/site-packages/torch/_inductor/codegen/__pycache__/cpp.cpython-310.pyc
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:84f91e7611238b2061b0dade073ed52593044df49303bafd80d20961f622cc52
|
| 3 |
+
size 119182
|
moondream/lib/python3.10/site-packages/torch/include/ATen/core/ATenGeneral.h
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <c10/macros/Macros.h>
|
moondream/lib/python3.10/site-packages/torch/include/ATen/core/Array.h
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// A fixed-size array type usable from both host and
|
| 4 |
+
// device code.
|
| 5 |
+
|
| 6 |
+
#include <c10/macros/Macros.h>
|
| 7 |
+
#include <c10/util/irange.h>
|
| 8 |
+
|
| 9 |
+
namespace at { namespace detail {
|
| 10 |
+
|
| 11 |
+
template <typename T, int size_>
|
| 12 |
+
struct Array {
|
| 13 |
+
T data[size_];
|
| 14 |
+
|
| 15 |
+
C10_HOST_DEVICE T operator[](int i) const {
|
| 16 |
+
return data[i];
|
| 17 |
+
}
|
| 18 |
+
C10_HOST_DEVICE T& operator[](int i) {
|
| 19 |
+
return data[i];
|
| 20 |
+
}
|
| 21 |
+
#if defined(USE_ROCM)
|
| 22 |
+
C10_HOST_DEVICE Array() = default;
|
| 23 |
+
C10_HOST_DEVICE Array(const Array&) = default;
|
| 24 |
+
C10_HOST_DEVICE Array& operator=(const Array&) = default;
|
| 25 |
+
#else
|
| 26 |
+
Array() = default;
|
| 27 |
+
Array(const Array&) = default;
|
| 28 |
+
Array& operator=(const Array&) = default;
|
| 29 |
+
#endif
|
| 30 |
+
static constexpr int size(){return size_;}
|
| 31 |
+
// Fill the array with x.
|
| 32 |
+
C10_HOST_DEVICE Array(T x) {
|
| 33 |
+
for (int i = 0; i < size_; i++) {
|
| 34 |
+
data[i] = x;
|
| 35 |
+
}
|
| 36 |
+
}
|
| 37 |
+
};
|
| 38 |
+
|
| 39 |
+
}}
|
moondream/lib/python3.10/site-packages/torch/include/ATen/core/DistributionsHelper.h
ADDED
|
@@ -0,0 +1,337 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/core/Array.h>
|
| 4 |
+
#include <ATen/core/TransformationHelper.h>
|
| 5 |
+
#include <c10/util/Half.h>
|
| 6 |
+
#include <c10/util/BFloat16.h>
|
| 7 |
+
#include <c10/util/MathConstants.h>
|
| 8 |
+
#include <c10/util/Optional.h>
|
| 9 |
+
#include <c10/macros/Macros.h>
|
| 10 |
+
|
| 11 |
+
#include <type_traits>
|
| 12 |
+
#include <limits>
|
| 13 |
+
#include <cmath>
|
| 14 |
+
|
| 15 |
+
/**
|
| 16 |
+
* Distributions kernel adapted from THRandom.cpp
|
| 17 |
+
* The kernels try to follow std::random distributions signature
|
| 18 |
+
* For instance: in ATen
|
| 19 |
+
* auto gen = at::detail::createCPUGenerator();
|
| 20 |
+
* at::uniform_real_distribution<double> uniform(0, 1);
|
| 21 |
+
* auto sample = uniform(gen.get());
|
| 22 |
+
*
|
| 23 |
+
* vs std::random
|
| 24 |
+
*
|
| 25 |
+
* std::mt19937 gen;
|
| 26 |
+
* std::uniform_real_distribution uniform(0, 1);
|
| 27 |
+
* auto sample = uniform(gen);
|
| 28 |
+
*/
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
namespace at {
|
| 32 |
+
namespace {
|
| 33 |
+
|
| 34 |
+
/**
|
| 35 |
+
* Samples a discrete uniform distribution in the range [base, base+range) of type T
|
| 36 |
+
*/
|
| 37 |
+
template <typename T>
|
| 38 |
+
struct uniform_int_from_to_distribution {
|
| 39 |
+
|
| 40 |
+
C10_HOST_DEVICE inline uniform_int_from_to_distribution(uint64_t range, int64_t base) : range_(range), base_(base) {}
|
| 41 |
+
|
| 42 |
+
template <typename RNG>
|
| 43 |
+
C10_HOST_DEVICE inline T operator()(RNG generator) {
|
| 44 |
+
if ((
|
| 45 |
+
std::is_same<T, int64_t>::value ||
|
| 46 |
+
std::is_same<T, double>::value ||
|
| 47 |
+
std::is_same<T, float>::value ||
|
| 48 |
+
std::is_same<T, at::BFloat16>::value) && range_ >= 1ULL << 32)
|
| 49 |
+
{
|
| 50 |
+
return transformation::uniform_int_from_to<T>(generator->random64(), range_, base_);
|
| 51 |
+
} else {
|
| 52 |
+
return transformation::uniform_int_from_to<T>(generator->random(), range_, base_);
|
| 53 |
+
}
|
| 54 |
+
}
|
| 55 |
+
|
| 56 |
+
private:
|
| 57 |
+
uint64_t range_;
|
| 58 |
+
int64_t base_;
|
| 59 |
+
};
|
| 60 |
+
|
| 61 |
+
/**
|
| 62 |
+
* Samples a discrete uniform distribution in the range [min_value(int64_t), max_value(int64_t)]
|
| 63 |
+
*/
|
| 64 |
+
template <typename T>
|
| 65 |
+
struct uniform_int_full_range_distribution {
|
| 66 |
+
|
| 67 |
+
template <typename RNG>
|
| 68 |
+
C10_HOST_DEVICE inline T operator()(RNG generator) {
|
| 69 |
+
return transformation::uniform_int_full_range<T>(generator->random64());
|
| 70 |
+
}
|
| 71 |
+
|
| 72 |
+
};
|
| 73 |
+
|
| 74 |
+
/**
|
| 75 |
+
* Samples a discrete uniform distribution in the range [0, max_value(T)] for integral types
|
| 76 |
+
* and [0, 2^mantissa] for floating-point types.
|
| 77 |
+
*/
|
| 78 |
+
template <typename T>
|
| 79 |
+
struct uniform_int_distribution {
|
| 80 |
+
|
| 81 |
+
template <typename RNG>
|
| 82 |
+
C10_HOST_DEVICE inline T operator()(RNG generator) {
|
| 83 |
+
if constexpr (std::is_same_v<T, double> || std::is_same_v<T, int64_t>) {
|
| 84 |
+
return transformation::uniform_int<T>(generator->random64());
|
| 85 |
+
} else {
|
| 86 |
+
return transformation::uniform_int<T>(generator->random());
|
| 87 |
+
}
|
| 88 |
+
}
|
| 89 |
+
|
| 90 |
+
};
|
| 91 |
+
|
| 92 |
+
/**
|
| 93 |
+
* Samples a uniform distribution in the range [from, to) of type T
|
| 94 |
+
*/
|
| 95 |
+
template <typename T>
|
| 96 |
+
struct uniform_real_distribution {
|
| 97 |
+
|
| 98 |
+
C10_HOST_DEVICE inline uniform_real_distribution(T from, T to) {
|
| 99 |
+
TORCH_CHECK_IF_NOT_ON_CUDA(from <= to);
|
| 100 |
+
TORCH_CHECK_IF_NOT_ON_CUDA(to - from <= std::numeric_limits<T>::max());
|
| 101 |
+
from_ = from;
|
| 102 |
+
to_ = to;
|
| 103 |
+
}
|
| 104 |
+
|
| 105 |
+
template <typename RNG>
|
| 106 |
+
C10_HOST_DEVICE inline dist_acctype<T> operator()(RNG generator){
|
| 107 |
+
if constexpr (std::is_same_v<T, double>) {
|
| 108 |
+
return transformation::uniform_real<T>(generator->random64(), from_, to_);
|
| 109 |
+
} else {
|
| 110 |
+
return transformation::uniform_real<T>(generator->random(), from_, to_);
|
| 111 |
+
}
|
| 112 |
+
}
|
| 113 |
+
|
| 114 |
+
private:
|
| 115 |
+
T from_;
|
| 116 |
+
T to_;
|
| 117 |
+
};
|
| 118 |
+
|
| 119 |
+
// The SFINAE checks introduced in #39816 looks overcomplicated and must revisited
|
| 120 |
+
// https://github.com/pytorch/pytorch/issues/40052
|
| 121 |
+
#define DISTRIBUTION_HELPER_GENERATE_HAS_MEMBER(member) \
|
| 122 |
+
template <typename T> \
|
| 123 |
+
struct has_member_##member \
|
| 124 |
+
{ \
|
| 125 |
+
typedef char yes; \
|
| 126 |
+
typedef long no; \
|
| 127 |
+
template <typename U> static yes test(decltype(&U::member)); \
|
| 128 |
+
template <typename U> static no test(...); \
|
| 129 |
+
static constexpr bool value = sizeof(test<T>(0)) == sizeof(yes); \
|
| 130 |
+
}
|
| 131 |
+
|
| 132 |
+
DISTRIBUTION_HELPER_GENERATE_HAS_MEMBER(next_double_normal_sample);
|
| 133 |
+
DISTRIBUTION_HELPER_GENERATE_HAS_MEMBER(set_next_double_normal_sample);
|
| 134 |
+
DISTRIBUTION_HELPER_GENERATE_HAS_MEMBER(next_float_normal_sample);
|
| 135 |
+
DISTRIBUTION_HELPER_GENERATE_HAS_MEMBER(set_next_float_normal_sample);
|
| 136 |
+
|
| 137 |
+
#define DISTRIBUTION_HELPER_GENERATE_NEXT_NORMAL_METHODS(TYPE) \
|
| 138 |
+
\
|
| 139 |
+
template <typename RNG, typename ret_type, \
|
| 140 |
+
typename std::enable_if_t<( \
|
| 141 |
+
has_member_next_##TYPE##_normal_sample<RNG>::value && \
|
| 142 |
+
has_member_set_next_##TYPE##_normal_sample<RNG>::value \
|
| 143 |
+
), int> = 0> \
|
| 144 |
+
C10_HOST_DEVICE inline bool maybe_get_next_##TYPE##_normal_sample(RNG* generator, ret_type* ret) { \
|
| 145 |
+
if (generator->next_##TYPE##_normal_sample()) { \
|
| 146 |
+
*ret = *(generator->next_##TYPE##_normal_sample()); \
|
| 147 |
+
generator->set_next_##TYPE##_normal_sample(c10::optional<TYPE>()); \
|
| 148 |
+
return true; \
|
| 149 |
+
} \
|
| 150 |
+
return false; \
|
| 151 |
+
} \
|
| 152 |
+
\
|
| 153 |
+
template <typename RNG, typename ret_type, \
|
| 154 |
+
typename std::enable_if_t<( \
|
| 155 |
+
!has_member_next_##TYPE##_normal_sample<RNG>::value || \
|
| 156 |
+
!has_member_set_next_##TYPE##_normal_sample<RNG>::value \
|
| 157 |
+
), int> = 0> \
|
| 158 |
+
C10_HOST_DEVICE inline bool maybe_get_next_##TYPE##_normal_sample(RNG* /*generator*/, ret_type* /*ret*/) { \
|
| 159 |
+
return false; \
|
| 160 |
+
} \
|
| 161 |
+
\
|
| 162 |
+
template <typename RNG, typename ret_type, \
|
| 163 |
+
typename std::enable_if_t<( \
|
| 164 |
+
has_member_set_next_##TYPE##_normal_sample<RNG>::value \
|
| 165 |
+
), int> = 0> \
|
| 166 |
+
C10_HOST_DEVICE inline void maybe_set_next_##TYPE##_normal_sample(RNG* generator, ret_type cache) { \
|
| 167 |
+
generator->set_next_##TYPE##_normal_sample(cache); \
|
| 168 |
+
} \
|
| 169 |
+
\
|
| 170 |
+
template <typename RNG, typename ret_type, \
|
| 171 |
+
typename std::enable_if_t<( \
|
| 172 |
+
!has_member_set_next_##TYPE##_normal_sample<RNG>::value \
|
| 173 |
+
), int> = 0> \
|
| 174 |
+
C10_HOST_DEVICE inline void maybe_set_next_##TYPE##_normal_sample(RNG* /*generator*/, ret_type /*cache*/) { \
|
| 175 |
+
}
|
| 176 |
+
|
| 177 |
+
DISTRIBUTION_HELPER_GENERATE_NEXT_NORMAL_METHODS(double);
|
| 178 |
+
DISTRIBUTION_HELPER_GENERATE_NEXT_NORMAL_METHODS(float);
|
| 179 |
+
|
| 180 |
+
/**
|
| 181 |
+
* Samples a normal distribution using the Box-Muller method
|
| 182 |
+
* Takes mean and standard deviation as inputs
|
| 183 |
+
* Note that Box-muller method returns two samples at a time.
|
| 184 |
+
* Hence, we cache the "next" sample in the CPUGeneratorImpl class.
|
| 185 |
+
*/
|
| 186 |
+
template <typename T>
|
| 187 |
+
struct normal_distribution {
|
| 188 |
+
|
| 189 |
+
C10_HOST_DEVICE inline normal_distribution(T mean_in, T stdv_in) {
|
| 190 |
+
TORCH_CHECK_IF_NOT_ON_CUDA(stdv_in >= 0, "stdv_in must be positive: ", stdv_in);
|
| 191 |
+
mean = mean_in;
|
| 192 |
+
stdv = stdv_in;
|
| 193 |
+
}
|
| 194 |
+
|
| 195 |
+
template <typename RNG>
|
| 196 |
+
C10_HOST_DEVICE inline dist_acctype<T> operator()(RNG generator){
|
| 197 |
+
dist_acctype<T> ret;
|
| 198 |
+
// return cached values if available
|
| 199 |
+
if constexpr (std::is_same_v<T, double>) {
|
| 200 |
+
if (maybe_get_next_double_normal_sample(generator, &ret)) {
|
| 201 |
+
return transformation::normal(ret, mean, stdv);
|
| 202 |
+
}
|
| 203 |
+
} else {
|
| 204 |
+
if (maybe_get_next_float_normal_sample(generator, &ret)) {
|
| 205 |
+
return transformation::normal(ret, mean, stdv);
|
| 206 |
+
}
|
| 207 |
+
}
|
| 208 |
+
// otherwise generate new normal values
|
| 209 |
+
uniform_real_distribution<T> uniform(0.0, 1.0);
|
| 210 |
+
const dist_acctype<T> u1 = uniform(generator);
|
| 211 |
+
const dist_acctype<T> u2 = uniform(generator);
|
| 212 |
+
const dist_acctype<T> r = ::sqrt(static_cast<T>(-2.0) * ::log1p(-u2));
|
| 213 |
+
const dist_acctype<T> theta = static_cast<T>(2.0) * c10::pi<T> * u1;
|
| 214 |
+
if constexpr (std::is_same_v<T, double>) {
|
| 215 |
+
maybe_set_next_double_normal_sample(generator, r * ::sin(theta));
|
| 216 |
+
} else {
|
| 217 |
+
maybe_set_next_float_normal_sample(generator, r * ::sin(theta));
|
| 218 |
+
}
|
| 219 |
+
ret = r * ::cos(theta);
|
| 220 |
+
return transformation::normal(ret, mean, stdv);
|
| 221 |
+
}
|
| 222 |
+
|
| 223 |
+
private:
|
| 224 |
+
T mean;
|
| 225 |
+
T stdv;
|
| 226 |
+
};
|
| 227 |
+
|
| 228 |
+
template <typename T>
|
| 229 |
+
struct DiscreteDistributionType { using type = float; };
|
| 230 |
+
|
| 231 |
+
template <> struct DiscreteDistributionType<double> { using type = double; };
|
| 232 |
+
|
| 233 |
+
/**
|
| 234 |
+
* Samples a bernoulli distribution given a probability input
|
| 235 |
+
*/
|
| 236 |
+
template <typename T>
|
| 237 |
+
struct bernoulli_distribution {
|
| 238 |
+
|
| 239 |
+
C10_HOST_DEVICE inline bernoulli_distribution(T p_in) {
|
| 240 |
+
TORCH_CHECK_IF_NOT_ON_CUDA(p_in >= 0 && p_in <= 1);
|
| 241 |
+
p = p_in;
|
| 242 |
+
}
|
| 243 |
+
|
| 244 |
+
template <typename RNG>
|
| 245 |
+
C10_HOST_DEVICE inline T operator()(RNG generator) {
|
| 246 |
+
uniform_real_distribution<T> uniform(0.0, 1.0);
|
| 247 |
+
return transformation::bernoulli<T>(uniform(generator), p);
|
| 248 |
+
}
|
| 249 |
+
|
| 250 |
+
private:
|
| 251 |
+
T p;
|
| 252 |
+
};
|
| 253 |
+
|
| 254 |
+
/**
|
| 255 |
+
* Samples a geometric distribution given a probability input
|
| 256 |
+
*/
|
| 257 |
+
template <typename T>
|
| 258 |
+
struct geometric_distribution {
|
| 259 |
+
|
| 260 |
+
C10_HOST_DEVICE inline geometric_distribution(T p_in) {
|
| 261 |
+
TORCH_CHECK_IF_NOT_ON_CUDA(p_in > 0 && p_in < 1);
|
| 262 |
+
p = p_in;
|
| 263 |
+
}
|
| 264 |
+
|
| 265 |
+
template <typename RNG>
|
| 266 |
+
C10_HOST_DEVICE inline T operator()(RNG generator) {
|
| 267 |
+
uniform_real_distribution<T> uniform(0.0, 1.0);
|
| 268 |
+
return transformation::geometric<T>(uniform(generator), p);
|
| 269 |
+
}
|
| 270 |
+
|
| 271 |
+
private:
|
| 272 |
+
T p;
|
| 273 |
+
};
|
| 274 |
+
|
| 275 |
+
/**
|
| 276 |
+
* Samples an exponential distribution given a lambda input
|
| 277 |
+
*/
|
| 278 |
+
template <typename T>
|
| 279 |
+
struct exponential_distribution {
|
| 280 |
+
|
| 281 |
+
C10_HOST_DEVICE inline exponential_distribution(T lambda_in) : lambda(lambda_in) {}
|
| 282 |
+
|
| 283 |
+
template <typename RNG>
|
| 284 |
+
C10_HOST_DEVICE inline T operator()(RNG generator) {
|
| 285 |
+
uniform_real_distribution<T> uniform(0.0, 1.0);
|
| 286 |
+
return transformation::exponential<T>(uniform(generator), lambda);
|
| 287 |
+
}
|
| 288 |
+
|
| 289 |
+
private:
|
| 290 |
+
T lambda;
|
| 291 |
+
};
|
| 292 |
+
|
| 293 |
+
/**
|
| 294 |
+
* Samples a cauchy distribution given median and sigma as inputs
|
| 295 |
+
*/
|
| 296 |
+
template <typename T>
|
| 297 |
+
struct cauchy_distribution {
|
| 298 |
+
|
| 299 |
+
C10_HOST_DEVICE inline cauchy_distribution(T median_in, T sigma_in) : median(median_in), sigma(sigma_in) {}
|
| 300 |
+
|
| 301 |
+
template <typename RNG>
|
| 302 |
+
C10_HOST_DEVICE inline T operator()(RNG generator) {
|
| 303 |
+
uniform_real_distribution<T> uniform(0.0, 1.0);
|
| 304 |
+
return transformation::cauchy<T>(uniform(generator), median, sigma);
|
| 305 |
+
}
|
| 306 |
+
|
| 307 |
+
private:
|
| 308 |
+
T median;
|
| 309 |
+
T sigma;
|
| 310 |
+
};
|
| 311 |
+
|
| 312 |
+
/**
|
| 313 |
+
* Samples a lognormal distribution
|
| 314 |
+
* Takes mean and standard deviation as inputs
|
| 315 |
+
* Outputs two samples at a time
|
| 316 |
+
*/
|
| 317 |
+
template <typename T>
|
| 318 |
+
struct lognormal_distribution {
|
| 319 |
+
|
| 320 |
+
C10_HOST_DEVICE inline lognormal_distribution(T mean_in, T stdv_in) {
|
| 321 |
+
TORCH_CHECK_IF_NOT_ON_CUDA(stdv_in > 0);
|
| 322 |
+
mean = mean_in;
|
| 323 |
+
stdv = stdv_in;
|
| 324 |
+
}
|
| 325 |
+
|
| 326 |
+
template<typename RNG>
|
| 327 |
+
C10_HOST_DEVICE inline T operator()(RNG generator){
|
| 328 |
+
normal_distribution<T> normal(mean, stdv);
|
| 329 |
+
return transformation::log_normal<T>(normal(generator));
|
| 330 |
+
}
|
| 331 |
+
|
| 332 |
+
private:
|
| 333 |
+
T mean;
|
| 334 |
+
T stdv;
|
| 335 |
+
};
|
| 336 |
+
}
|
| 337 |
+
} // namespace at
|
moondream/lib/python3.10/site-packages/torch/include/ATen/core/NamedTensor.h
ADDED
|
@@ -0,0 +1,139 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/core/Dimname.h>
|
| 4 |
+
#include <c10/core/TensorImpl.h>
|
| 5 |
+
|
| 6 |
+
namespace at {
|
| 7 |
+
|
| 8 |
+
class TensorBase;
|
| 9 |
+
|
| 10 |
+
// XXX: This file exists because TensorImpl is in c10, but Dimname is in ATen.
|
| 11 |
+
// Due to the c10/ATen library split, TensorImpl cannot depend on Dimname,
|
| 12 |
+
// so we have a couple of workarounds.
|
| 13 |
+
//
|
| 14 |
+
// In the long term, we'll move Dimname to c10 and everything in this file
|
| 15 |
+
// can be refactored out. The main blocker for that is that "c10::Symbol"
|
| 16 |
+
// actually exists outside of c10 and needs to be moved in.
|
| 17 |
+
|
| 18 |
+
// TensorImpl has a unique_ptr<NamedTensorMetaInterface> field.
|
| 19 |
+
// XXX: Ideally we would just put optional<vector<Dimname>> into TensorImpl.
|
| 20 |
+
//
|
| 21 |
+
// This class has an important invariant: there must be at least ONE
|
| 22 |
+
// non-wildcard
|
| 23 |
+
struct TORCH_API NamedTensorMeta final : public c10::NamedTensorMetaInterface {
|
| 24 |
+
// This enum is to remind people that the invariant on constructors is that
|
| 25 |
+
// the list of dimnames must have at least one non-wildcard
|
| 26 |
+
enum HAS_NON_WILDCARD {
|
| 27 |
+
HasNonWildcard
|
| 28 |
+
};
|
| 29 |
+
|
| 30 |
+
explicit NamedTensorMeta(HAS_NON_WILDCARD, DimnameList names)
|
| 31 |
+
: names_(names.vec()) {
|
| 32 |
+
check_invariants();
|
| 33 |
+
}
|
| 34 |
+
explicit NamedTensorMeta(HAS_NON_WILDCARD, std::vector<Dimname>&& names)
|
| 35 |
+
: names_(std::move(names)) {
|
| 36 |
+
check_invariants();
|
| 37 |
+
}
|
| 38 |
+
|
| 39 |
+
std::unique_ptr<c10::NamedTensorMetaInterface> clone() const override {
|
| 40 |
+
return std::make_unique<NamedTensorMeta>(HasNonWildcard, names_);
|
| 41 |
+
}
|
| 42 |
+
|
| 43 |
+
DimnameList names() const { return names_; }
|
| 44 |
+
|
| 45 |
+
// Used for an assertion in TensorImpl.h
|
| 46 |
+
int64_t slow_dim() const override {
|
| 47 |
+
return names_.size();
|
| 48 |
+
}
|
| 49 |
+
|
| 50 |
+
void check_invariants() const {
|
| 51 |
+
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
|
| 52 |
+
std::any_of(names_.begin(), names_.end(), [](const Dimname& n) { return !n.isWildcard(); }));
|
| 53 |
+
}
|
| 54 |
+
|
| 55 |
+
void set_names(HAS_NON_WILDCARD, DimnameList new_names) {
|
| 56 |
+
TORCH_INTERNAL_ASSERT(new_names.size() == names_.size());
|
| 57 |
+
std::copy(new_names.begin(), new_names.end(), names_.begin());
|
| 58 |
+
check_invariants();
|
| 59 |
+
}
|
| 60 |
+
|
| 61 |
+
void set_names(HAS_NON_WILDCARD, std::vector<Dimname>&& new_names) {
|
| 62 |
+
TORCH_INTERNAL_ASSERT(new_names.size() == names_.size());
|
| 63 |
+
names_ = std::move(new_names);
|
| 64 |
+
check_invariants();
|
| 65 |
+
}
|
| 66 |
+
|
| 67 |
+
// INVARIANT: at least one Dimname is non-WILDCARD
|
| 68 |
+
std::vector<Dimname> names_;
|
| 69 |
+
};
|
| 70 |
+
|
| 71 |
+
// When NamesMode is disabled, then all operations ignore tensors' names fields.
|
| 72 |
+
// Concretely speaking, all tensors are treated as having nullopt names.
|
| 73 |
+
struct TORCH_API NamesMode {
|
| 74 |
+
static bool is_enabled();
|
| 75 |
+
static void set_enabled(bool enabled);
|
| 76 |
+
};
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
// A RAII, thread local (!) guard that enables or disables names upon
|
| 80 |
+
// construction, and sets it back to the original value upon destruction.
|
| 81 |
+
struct TORCH_API NoNamesGuard {
|
| 82 |
+
NoNamesGuard() : prev_mode(NamesMode::is_enabled()), initialized(true) {
|
| 83 |
+
NamesMode::set_enabled(false);
|
| 84 |
+
}
|
| 85 |
+
~NoNamesGuard() {
|
| 86 |
+
if (initialized) {
|
| 87 |
+
reset();
|
| 88 |
+
}
|
| 89 |
+
}
|
| 90 |
+
void reset() {
|
| 91 |
+
TORCH_INTERNAL_ASSERT(initialized);
|
| 92 |
+
NamesMode::set_enabled(prev_mode);
|
| 93 |
+
}
|
| 94 |
+
private:
|
| 95 |
+
bool prev_mode;
|
| 96 |
+
bool initialized;
|
| 97 |
+
};
|
| 98 |
+
|
| 99 |
+
void check_names_valid_for(const TensorBase& tensor, DimnameList names);
|
| 100 |
+
void check_names_valid_for(size_t tensor_dim, DimnameList names);
|
| 101 |
+
|
| 102 |
+
// Sets the names of `tensor` to be `names`.
|
| 103 |
+
TORCH_API const TensorBase& internal_set_names_inplace(const TensorBase& tensor, c10::optional<DimnameList> names);
|
| 104 |
+
TORCH_API const TensorBase& internal_set_names_inplace(const TensorBase& tensor, std::vector<Dimname>&& names, bool validate_names);
|
| 105 |
+
|
| 106 |
+
constexpr size_t kMaxNamedTensorDim = 64;
|
| 107 |
+
|
| 108 |
+
DimnameList default_names(size_t len);
|
| 109 |
+
|
| 110 |
+
namespace impl {
|
| 111 |
+
|
| 112 |
+
// Some helper functions on TensorImpl. Useful for working with names in TH.
|
| 113 |
+
// XXX: Ideally these would exist as methods on TensorImpl
|
| 114 |
+
TORCH_API void internal_set_names_inplace(TensorImpl* impl, c10::optional<DimnameList> names, bool validate_names);
|
| 115 |
+
TORCH_API void internal_set_names_inplace(TensorImpl* impl, std::vector<Dimname>&& names, bool validate_names);
|
| 116 |
+
|
| 117 |
+
void check_names_valid_for(TensorImpl* impl, DimnameList names);
|
| 118 |
+
|
| 119 |
+
// Returns true if the tensor's names exist and are not all 'None'.
|
| 120 |
+
// Returns false if the tensor's names don't exist (were not allocated),
|
| 121 |
+
// or if all names are 'None'.
|
| 122 |
+
// We treat not-allocated-names the same as allocated names that are all 'None'.
|
| 123 |
+
TORCH_API bool has_names(const TensorImpl* impl);
|
| 124 |
+
|
| 125 |
+
// Returns the names of the tensor's dimensions.
|
| 126 |
+
// Unnamed tensors are treated as having 'None' in all dimension; this method
|
| 127 |
+
// would return a DimnameList of all 'None's for an unnamed tensor.
|
| 128 |
+
TORCH_API DimnameList get_names(const TensorImpl* impl);
|
| 129 |
+
|
| 130 |
+
// This is more of an implementation detail; one should use impl::get_names /
|
| 131 |
+
// Tensor::names() whenever possible because it provides a cleaner API.
|
| 132 |
+
// Returns the names of the tensor if they have been allocated; returns nullopt
|
| 133 |
+
// instead if the haven't been. The names of a tensor are not allocated if a
|
| 134 |
+
// tensor is constructed with names=None.
|
| 135 |
+
TORCH_API c10::optional<DimnameList> get_opt_names(const TensorImpl* impl);
|
| 136 |
+
|
| 137 |
+
} // namespace impl
|
| 138 |
+
|
| 139 |
+
} // namespace at
|
moondream/lib/python3.10/site-packages/torch/include/ATen/core/TensorBody.h
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
moondream/lib/python3.10/site-packages/torch/include/ATen/core/UndefinedTensorImpl.h
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
#include <c10/core/UndefinedTensorImpl.h>
|
moondream/lib/python3.10/site-packages/torch/include/ATen/core/boxing/BoxedKernel.h
ADDED
|
@@ -0,0 +1,176 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/core/boxing/OperatorKernel.h>
|
| 4 |
+
#include <c10/core/DispatchKeySet.h>
|
| 5 |
+
#include <c10/util/intrusive_ptr.h>
|
| 6 |
+
|
| 7 |
+
namespace c10 {
|
| 8 |
+
|
| 9 |
+
struct IValue;
|
| 10 |
+
using Stack = std::vector<IValue>;
|
| 11 |
+
|
| 12 |
+
class OperatorHandle;
|
| 13 |
+
class KernelFunction;
|
| 14 |
+
|
| 15 |
+
// This kernel implements the behavior of falling through to the next available
|
| 16 |
+
// registered dispatch key. The implementation of this function is FAST; it is
|
| 17 |
+
// no overhead to fallthrough to the next key. See cpp file for some more
|
| 18 |
+
// implementation notes; notably, this does NOT actually go through the
|
| 19 |
+
// boxing/unboxing codepath.
|
| 20 |
+
TORCH_API void fallthrough_kernel(OperatorKernel*, const OperatorHandle&, DispatchKeySet, Stack*);
|
| 21 |
+
|
| 22 |
+
// Note [Ambiguity in AutogradOther kernel]
|
| 23 |
+
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
| 24 |
+
// This error-reporting kernel is registered to the AutogradOther entry in the
|
| 25 |
+
// dispatch table when there is both a CompositeImplicitAutograd kernel and a
|
| 26 |
+
// backend kernel for ANY backend that maps to AutogradOther. To see why
|
| 27 |
+
// this is necessary in the AutogradOther case, it's helpful to first see
|
| 28 |
+
// why everything works out fine for a backend that has a reserved Autograd
|
| 29 |
+
// entry (see rule 2.2 in [Note] DispatchTable computation):
|
| 30 |
+
//
|
| 31 |
+
// CPU AutogradCPU
|
| 32 |
+
// reg? registers with...
|
| 33 |
+
// -------------------------------------------------
|
| 34 |
+
// y Autograd registration takes precedence
|
| 35 |
+
// over CompositeImplicitAutograd.
|
| 36 |
+
// This is good, because the CPU specific backend
|
| 37 |
+
// implementation is more specialized and typically better;
|
| 38 |
+
// if we used the composite, we would bypass it.
|
| 39 |
+
// (NB: the Autograd key is guaranteed to exist because
|
| 40 |
+
// the autograd codegen requires it!)
|
| 41 |
+
//
|
| 42 |
+
// n CompositeImplicitAutograd takes precedence.
|
| 43 |
+
// This is also good, because the Autograd
|
| 44 |
+
// registration (if it exists) would try to redispatch
|
| 45 |
+
// to the (non-existent) CPU implementation; by
|
| 46 |
+
// using the composite, we ensure the operator
|
| 47 |
+
// actually works.
|
| 48 |
+
//
|
| 49 |
+
// As you can see, when we have a specific Autograd key (AutogradCPU), we can
|
| 50 |
+
// decide whether or not to use the CompositeImplicitAutograd kernel or the
|
| 51 |
+
// Autograd kernel based on whether or not the backend kernel exists.
|
| 52 |
+
//
|
| 53 |
+
// However, for AutogradOther (which is the catchall autograd kernel for
|
| 54 |
+
// everything that doesn't have a specific Autograd key), we can't do this
|
| 55 |
+
// trick because there isn't any unique backend to peek at to disambiguate;
|
| 56 |
+
// if there are some backends that have implementations they prefer Autograd,
|
| 57 |
+
// but unimplemented backends would prefer CompositeImplicitAutograd. Rather
|
| 58 |
+
// than arbitrarily pick one or the other, we just register a kernel that raises
|
| 59 |
+
// an error and let the user decide how to proceed.
|
| 60 |
+
TORCH_API void ambiguous_autogradother_kernel(OperatorKernel*, const OperatorHandle&, DispatchKeySet, Stack*);
|
| 61 |
+
|
| 62 |
+
// Note [named_not_supported_kernel]
|
| 63 |
+
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
| 64 |
+
// This kernel implements reporting an error message saying that named tensor is
|
| 65 |
+
// not supported. This kernel doesn't rely on the Stack, and so it is special
|
| 66 |
+
// cased in the dispatcher to be triggered before we attempt boxing (so we can
|
| 67 |
+
// give a good error message in cases when boxing is not supported). When
|
| 68 |
+
// boxing is universally supported this can be removed.
|
| 69 |
+
[[noreturn]] TORCH_API void named_not_supported_kernel(OperatorKernel*, const OperatorHandle&, DispatchKeySet, Stack*);
|
| 70 |
+
|
| 71 |
+
/**
|
| 72 |
+
* BoxedKernel is similar to a std::function storing a boxed kernel.
|
| 73 |
+
*/
|
| 74 |
+
class TORCH_API BoxedKernel final {
|
| 75 |
+
public:
|
| 76 |
+
// This is how boxed kernels are actually stored
|
| 77 |
+
//
|
| 78 |
+
// Note [Plumbing Keys Through The Dispatcher]
|
| 79 |
+
// Benchmarks have shown that it is expensive for the dispatcher to read from thread-local storage (TLS)
|
| 80 |
+
// upon every dispatch call into order to compute which kernel to dispatch to.
|
| 81 |
+
//
|
| 82 |
+
// To mitigate this, we've updated the calling convention inside the dispatcher to expect every kernel that it stores
|
| 83 |
+
// to have a first argument of type DispatchKeySet.
|
| 84 |
+
//
|
| 85 |
+
// What are the invariants of the DispatchKeySet when it gets passed to a kernel?
|
| 86 |
+
// - All keys to the left of the current dispatch key have been masked out.
|
| 87 |
+
// (e.g. a Tracing kernel that takes in the DispatchKeySet will expect the highest bit to be DispatchKey::Tracer)
|
| 88 |
+
// - All other keys that dispatcher normally would have computed through TLS + global state + op arguments
|
| 89 |
+
// are still in the set.
|
| 90 |
+
//
|
| 91 |
+
// Kernels can then opt into using this keyset to save the dispatcher from doing repeated work during redispatches:
|
| 92 |
+
// recalculating the highest-priority dispatch key, which involves reading from TLS. Instead, the kernels that opt in will
|
| 93 |
+
// calculate an updated DispatchKeySet directly from the old one, and pass the updated set directly into the dispatcher
|
| 94 |
+
// upon redispatching.
|
| 95 |
+
//
|
| 96 |
+
// This is an opt-in mechanism: Kernels can automatically opt in by setting the first argument in their signature
|
| 97 |
+
// to be of type DispatchKeySet. See the kernels in VariableTypeEverything.cpp and TraceTypeEverything.cpp for examples.
|
| 98 |
+
//
|
| 99 |
+
// The mechanism for optionally passing that DispatchKeySet into the kernel lives in make_boxed_from_unboxed_functor.h.
|
| 100 |
+
// See Note [Plumbing Keys Through The Dispatcher 2] for details.
|
| 101 |
+
using InternalBoxedKernelFunction = void(OperatorKernel*, const OperatorHandle&, DispatchKeySet, Stack*);
|
| 102 |
+
// This is the public API for how boxed kernels are defined
|
| 103 |
+
using BoxedKernelFunction = void(const OperatorHandle&, Stack*);
|
| 104 |
+
using BoxedKernelFunction_withDispatchKeys = void(const OperatorHandle&, DispatchKeySet, Stack*);
|
| 105 |
+
|
| 106 |
+
BoxedKernel();
|
| 107 |
+
|
| 108 |
+
// Fast path for dispatch to allow not touching the boxed kernel in
|
| 109 |
+
// the common case where unboxed is available.
|
| 110 |
+
bool isValid() const;
|
| 111 |
+
bool isFallthrough() const;
|
| 112 |
+
|
| 113 |
+
/**
|
| 114 |
+
* Call the function with boxed arguments.
|
| 115 |
+
*/
|
| 116 |
+
void callBoxed(const OperatorHandle& opHandle, DispatchKeySet dispatchKeySet, Stack* stack) const;
|
| 117 |
+
|
| 118 |
+
/**
|
| 119 |
+
* Create a KernelFunction from a boxed function.
|
| 120 |
+
*
|
| 121 |
+
* Example:
|
| 122 |
+
*
|
| 123 |
+
* > void boxed_func(OperatorKernel*, Stack* stack) {...}
|
| 124 |
+
* > BoxedFunction func = BoxedKernel::makeFromFunction<&boxed_func>();
|
| 125 |
+
*/
|
| 126 |
+
template<BoxedKernelFunction* func>
|
| 127 |
+
static BoxedKernel makeFromFunction();
|
| 128 |
+
|
| 129 |
+
/**
|
| 130 |
+
* TODO: This will only be useful if we write a backend fallback that plumbs dispatch keys (currently there are none)
|
| 131 |
+
* See Note [Plumbing Keys Through The Dispatcher] for details.
|
| 132 |
+
*/
|
| 133 |
+
template<BoxedKernelFunction_withDispatchKeys* func>
|
| 134 |
+
static BoxedKernel makeFromFunction();
|
| 135 |
+
|
| 136 |
+
/**
|
| 137 |
+
* Create a KernelFunction from a boxed functor.
|
| 138 |
+
*
|
| 139 |
+
* Example:
|
| 140 |
+
*
|
| 141 |
+
* > class MyFunctor final : public c10::OperatorKernel {
|
| 142 |
+
* > public:
|
| 143 |
+
* > void operator()(const OperatorHandle&, DispatchKeySet, Stack*) {...}
|
| 144 |
+
* > };
|
| 145 |
+
* > BoxedKernel func = BoxedKernel::makeFromFunctor(std::make_unique<MyFunctor>());
|
| 146 |
+
*/
|
| 147 |
+
template<class KernelFunctor>
|
| 148 |
+
static BoxedKernel makeFromFunctor(std::unique_ptr<KernelFunctor> kernelFunctor);
|
| 149 |
+
|
| 150 |
+
|
| 151 |
+
static BoxedKernel makeFallthrough();
|
| 152 |
+
static BoxedKernel makeAmbiguousAutogradOther();
|
| 153 |
+
static BoxedKernel makeNamedNotSupported();
|
| 154 |
+
|
| 155 |
+
private:
|
| 156 |
+
|
| 157 |
+
friend class KernelFunction;
|
| 158 |
+
|
| 159 |
+
template<BoxedKernelFunction* func>
|
| 160 |
+
static void make_boxed_function(OperatorKernel*, const OperatorHandle& opHandle, DispatchKeySet, Stack* stack);
|
| 161 |
+
|
| 162 |
+
template<BoxedKernelFunction_withDispatchKeys* func>
|
| 163 |
+
static void make_boxed_function(OperatorKernel*, const OperatorHandle& opHandle, DispatchKeySet, Stack* stack);
|
| 164 |
+
|
| 165 |
+
explicit BoxedKernel(std::unique_ptr<OperatorKernel> functor, InternalBoxedKernelFunction* boxed_kernel_func);
|
| 166 |
+
|
| 167 |
+
OperatorKernel* getFunctor() const;
|
| 168 |
+
InternalBoxedKernelFunction* getFnPtr() const;
|
| 169 |
+
|
| 170 |
+
c10::intrusive_ptr<OperatorKernel> functor_;
|
| 171 |
+
InternalBoxedKernelFunction* boxed_kernel_func_;
|
| 172 |
+
};
|
| 173 |
+
|
| 174 |
+
} // namespace c10
|
| 175 |
+
|
| 176 |
+
#include <ATen/core/boxing/BoxedKernel_impl.h>
|
moondream/lib/python3.10/site-packages/torch/include/ATen/core/boxing/BoxedKernel_impl.h
ADDED
|
@@ -0,0 +1,99 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
namespace c10 {
|
| 4 |
+
|
| 5 |
+
inline BoxedKernel::BoxedKernel()
|
| 6 |
+
: functor_()
|
| 7 |
+
, boxed_kernel_func_(nullptr)
|
| 8 |
+
{}
|
| 9 |
+
|
| 10 |
+
inline BoxedKernel::BoxedKernel(std::unique_ptr<OperatorKernel> functor, InternalBoxedKernelFunction* boxed_kernel_func)
|
| 11 |
+
: functor_(std::move(functor))
|
| 12 |
+
, boxed_kernel_func_(boxed_kernel_func)
|
| 13 |
+
{}
|
| 14 |
+
|
| 15 |
+
template<BoxedKernel::BoxedKernelFunction* func>
|
| 16 |
+
inline void BoxedKernel::make_boxed_function(OperatorKernel*, const OperatorHandle& opHandle, DispatchKeySet, Stack* stack) {
|
| 17 |
+
// Note that we're dropping the DispatchKeySet argument.
|
| 18 |
+
// See Note [Plumbing Keys Through The Dispatcher 2] for details.
|
| 19 |
+
func(opHandle, stack);
|
| 20 |
+
}
|
| 21 |
+
|
| 22 |
+
template<BoxedKernel::BoxedKernelFunction_withDispatchKeys* func>
|
| 23 |
+
inline void BoxedKernel::make_boxed_function(OperatorKernel*, const OperatorHandle& opHandle, DispatchKeySet ks, Stack* stack) {
|
| 24 |
+
// See Note [Plumbing Keys Through The Dispatcher 2] for details.
|
| 25 |
+
func(opHandle, ks, stack);
|
| 26 |
+
}
|
| 27 |
+
|
| 28 |
+
inline bool BoxedKernel::isValid() const {
|
| 29 |
+
return boxed_kernel_func_ != nullptr;
|
| 30 |
+
}
|
| 31 |
+
|
| 32 |
+
inline bool BoxedKernel::isFallthrough() const {
|
| 33 |
+
return boxed_kernel_func_ == &fallthrough_kernel;
|
| 34 |
+
}
|
| 35 |
+
|
| 36 |
+
inline void BoxedKernel::callBoxed(const OperatorHandle& opHandle, DispatchKeySet dispatchKeySet, Stack* stack) const {
|
| 37 |
+
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
|
| 38 |
+
boxed_kernel_func_ != nullptr,
|
| 39 |
+
"Tried to call BoxedKernel::callBoxed() on an uninitialized BoxedKernel."
|
| 40 |
+
);
|
| 41 |
+
(*boxed_kernel_func_)(functor_.get(), opHandle, dispatchKeySet, stack);
|
| 42 |
+
}
|
| 43 |
+
|
| 44 |
+
template<BoxedKernel::BoxedKernelFunction* func>
|
| 45 |
+
inline BoxedKernel BoxedKernel::makeFromFunction() {
|
| 46 |
+
return BoxedKernel(
|
| 47 |
+
nullptr, // no functor_ object
|
| 48 |
+
&make_boxed_function<func>
|
| 49 |
+
);
|
| 50 |
+
}
|
| 51 |
+
|
| 52 |
+
template<BoxedKernel::BoxedKernelFunction_withDispatchKeys* func>
|
| 53 |
+
inline BoxedKernel BoxedKernel::makeFromFunction() {
|
| 54 |
+
return BoxedKernel(
|
| 55 |
+
nullptr, // no functor_ object
|
| 56 |
+
&make_boxed_function<func>
|
| 57 |
+
);
|
| 58 |
+
}
|
| 59 |
+
|
| 60 |
+
inline BoxedKernel BoxedKernel::makeFallthrough() {
|
| 61 |
+
return BoxedKernel(
|
| 62 |
+
nullptr, // no functor_ object
|
| 63 |
+
&fallthrough_kernel
|
| 64 |
+
);
|
| 65 |
+
}
|
| 66 |
+
|
| 67 |
+
inline BoxedKernel BoxedKernel::makeAmbiguousAutogradOther() {
|
| 68 |
+
return BoxedKernel(
|
| 69 |
+
nullptr, // no functor_ object
|
| 70 |
+
&ambiguous_autogradother_kernel
|
| 71 |
+
);
|
| 72 |
+
}
|
| 73 |
+
|
| 74 |
+
inline BoxedKernel BoxedKernel::makeNamedNotSupported() {
|
| 75 |
+
return BoxedKernel(
|
| 76 |
+
nullptr, // no functor_ object
|
| 77 |
+
&named_not_supported_kernel
|
| 78 |
+
);
|
| 79 |
+
}
|
| 80 |
+
|
| 81 |
+
template<class KernelFunctor>
|
| 82 |
+
inline BoxedKernel BoxedKernel::makeFromFunctor(std::unique_ptr<KernelFunctor> kernelFunctor) {
|
| 83 |
+
static_assert(std::is_base_of<OperatorKernel, KernelFunctor>::value, "Tried to call BoxedKernel::makeFromFunctor<KernelFunctor>, but the functor doesn't inherit from c10::OperatorKernel. Please have the functor inherit from it.");
|
| 84 |
+
return BoxedKernel(
|
| 85 |
+
std::move(kernelFunctor),
|
| 86 |
+
[](OperatorKernel* kernel, const OperatorHandle& op, DispatchKeySet ks, Stack* stack) {
|
| 87 |
+
(*static_cast<KernelFunctor*>(kernel))(op, ks, stack);
|
| 88 |
+
}
|
| 89 |
+
);
|
| 90 |
+
}
|
| 91 |
+
|
| 92 |
+
inline OperatorKernel* BoxedKernel::getFunctor() const {
|
| 93 |
+
return functor_.get();
|
| 94 |
+
}
|
| 95 |
+
inline BoxedKernel::InternalBoxedKernelFunction* BoxedKernel::getFnPtr() const {
|
| 96 |
+
return boxed_kernel_func_;
|
| 97 |
+
}
|
| 98 |
+
|
| 99 |
+
} // namespace c10
|
moondream/lib/python3.10/site-packages/torch/include/ATen/core/boxing/KernelFunction.h
ADDED
|
@@ -0,0 +1,260 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/core/ATen_fwd.h>
|
| 4 |
+
#include <ATen/core/boxing/BoxedKernel.h>
|
| 5 |
+
#include <ATen/core/stack.h>
|
| 6 |
+
#include <c10/core/DispatchKeySet.h>
|
| 7 |
+
#include <c10/util/intrusive_ptr.h>
|
| 8 |
+
#include <c10/util/TypeList.h>
|
| 9 |
+
#include <type_traits>
|
| 10 |
+
|
| 11 |
+
namespace c10 {
|
| 12 |
+
|
| 13 |
+
using Stack = torch::jit::Stack; // TODO Instead of this, move torch::jit::Stack to the c10 namespace.
|
| 14 |
+
|
| 15 |
+
class OperatorHandle;
|
| 16 |
+
struct OperatorKernel;
|
| 17 |
+
class KernelFunction;
|
| 18 |
+
|
| 19 |
+
template <typename T>
|
| 20 |
+
using has_symint =
|
| 21 |
+
std::disjunction<
|
| 22 |
+
std::is_same<c10::SymInt, T>,
|
| 23 |
+
std::is_same<c10::SymIntArrayRef, T>,
|
| 24 |
+
std::is_same<at::OptionalSymIntArrayRef, T>,
|
| 25 |
+
std::is_same<c10::optional<c10::SymInt>, T>
|
| 26 |
+
>;
|
| 27 |
+
|
| 28 |
+
template <typename T>
|
| 29 |
+
struct remove_symint {
|
| 30 |
+
using type = T;
|
| 31 |
+
};
|
| 32 |
+
|
| 33 |
+
template <>
|
| 34 |
+
struct remove_symint<c10::SymInt> {
|
| 35 |
+
using type = int64_t;
|
| 36 |
+
};
|
| 37 |
+
|
| 38 |
+
template <>
|
| 39 |
+
struct remove_symint<at::OptionalSymIntArrayRef> {
|
| 40 |
+
using type = OptionalIntArrayRef;
|
| 41 |
+
};
|
| 42 |
+
|
| 43 |
+
template <>
|
| 44 |
+
struct remove_symint<c10::SymIntArrayRef> {
|
| 45 |
+
using type = c10::IntArrayRef;
|
| 46 |
+
};
|
| 47 |
+
|
| 48 |
+
template <>
|
| 49 |
+
struct remove_symint<c10::optional<c10::SymInt>> {
|
| 50 |
+
using type = c10::optional<int64_t>;
|
| 51 |
+
};
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
template <bool symint, typename T>
|
| 55 |
+
struct maybe_keep_symint final {};
|
| 56 |
+
|
| 57 |
+
template <typename T>
|
| 58 |
+
struct maybe_keep_symint<true, T> { using type = T; };
|
| 59 |
+
|
| 60 |
+
template <typename T>
|
| 61 |
+
struct maybe_keep_symint<false, T> { using type = typename remove_symint<T>::type; };
|
| 62 |
+
|
| 63 |
+
template <typename T>
|
| 64 |
+
using fn_has_symint = typename guts::typelist::true_for_any_type<
|
| 65 |
+
has_symint,
|
| 66 |
+
typename guts::infer_function_traits<T>::type::parameter_types
|
| 67 |
+
>;
|
| 68 |
+
|
| 69 |
+
template <typename T>
|
| 70 |
+
struct fn_remove_symint;
|
| 71 |
+
|
| 72 |
+
template <typename Ret, typename... Args>
|
| 73 |
+
struct fn_remove_symint<Ret(Args...)> {
|
| 74 |
+
using type = Ret(typename remove_symint<Args>::type...);
|
| 75 |
+
};
|
| 76 |
+
|
| 77 |
+
/**
|
| 78 |
+
* KernelFunction is similar to std::function but stores a kernel function.
|
| 79 |
+
* You can create a KernelFunction from a boxed or unboxed function/functor/lambda
|
| 80 |
+
* and call it in a boxed or unboxed way. If the way it was created doesn't
|
| 81 |
+
* match the way it was called, it will do boxing or unboxing as necessary.
|
| 82 |
+
*/
|
| 83 |
+
class TORCH_API KernelFunction final {
|
| 84 |
+
public:
|
| 85 |
+
using InternalBoxedKernelFunction = BoxedKernel::InternalBoxedKernelFunction;
|
| 86 |
+
using BoxedKernelFunction = BoxedKernel::BoxedKernelFunction;
|
| 87 |
+
using BoxedKernelFunction_withDispatchKeys = BoxedKernel::BoxedKernelFunction_withDispatchKeys;
|
| 88 |
+
|
| 89 |
+
KernelFunction();
|
| 90 |
+
|
| 91 |
+
// Fast path for dispatch to allow not touching the boxed kernel in
|
| 92 |
+
// the common case where unboxed is available.
|
| 93 |
+
bool isValidUnboxed() const;
|
| 94 |
+
bool isValidSymUnboxed() const;
|
| 95 |
+
bool isValid() const;
|
| 96 |
+
bool isFallthrough() const;
|
| 97 |
+
|
| 98 |
+
/**
|
| 99 |
+
* Call the function in a boxed way.
|
| 100 |
+
* If the kernel function was created with an unboxed function,
|
| 101 |
+
* this will call an unboxing wrapper which then calls into that
|
| 102 |
+
* unboxed function.
|
| 103 |
+
*
|
| 104 |
+
* Example:
|
| 105 |
+
*
|
| 106 |
+
* > void boxed_func(OperatorKernel*, Stack* stack) {...}
|
| 107 |
+
* > KernelFunction func = KernelFunction::makeFromBoxedFunction(&boxed_func);
|
| 108 |
+
* > Tensor result = func.callBoxed(stack);
|
| 109 |
+
*
|
| 110 |
+
* Or, with an unboxed implementation:
|
| 111 |
+
*
|
| 112 |
+
* > KernelFunction func = KernelFunction::makeFromUnboxedLambda(
|
| 113 |
+
* > [] (Tensor a, bool b) -> Tensor {...});
|
| 114 |
+
* > Tensor result = func.callBoxed(stack);
|
| 115 |
+
*/
|
| 116 |
+
void callBoxed(const OperatorHandle& opHandle, DispatchKeySet dispatchKeySet, Stack* stack) const;
|
| 117 |
+
|
| 118 |
+
/**
|
| 119 |
+
* Call the function in an unboxed way.
|
| 120 |
+
* If the kernel function was created with a boxed function,
|
| 121 |
+
* this will box all inputs and then call into that boxed function.
|
| 122 |
+
*
|
| 123 |
+
* Note that this doesn't work for all types yet.
|
| 124 |
+
*
|
| 125 |
+
* Example:
|
| 126 |
+
*
|
| 127 |
+
* > KernelFunction func = KernelFunction::makeFromUnboxedLambda(
|
| 128 |
+
* > [] (Tensor a, bool b) -> Tensor {...});
|
| 129 |
+
* > Tensor result = func.call<Tensor, Tensor, bool>(tensor1, true);
|
| 130 |
+
*
|
| 131 |
+
* Or, with a boxed implementation:
|
| 132 |
+
*
|
| 133 |
+
* > void boxed_func(OperatorKernel*, Stack* stack) {...}
|
| 134 |
+
* > KernelFunction func = KernelFunction::makeFromBoxedFunction(&boxed_func);
|
| 135 |
+
* > Tensor result = func.call<Tensor, Tensor, bool>(tensor1, true);
|
| 136 |
+
*/
|
| 137 |
+
template<class Return, class... Args>
|
| 138 |
+
Return call(const OperatorHandle& opHandle, DispatchKeySet dispatchKeySet, Args... args) const;
|
| 139 |
+
|
| 140 |
+
/**
|
| 141 |
+
* Create a KernelFunction from a BoxedKernel.
|
| 142 |
+
*/
|
| 143 |
+
static KernelFunction makeFromBoxedKernel(BoxedKernel boxed_fn);
|
| 144 |
+
|
| 145 |
+
/**
|
| 146 |
+
* Create a KernelFunction from a boxed function.
|
| 147 |
+
*
|
| 148 |
+
* Example:
|
| 149 |
+
*
|
| 150 |
+
* > void boxed_func(OperatorKernel*, Stack* stack) {...}
|
| 151 |
+
* > KernelFunction func = KernelFunction::makeFromBoxedFunction<&boxed_func>();
|
| 152 |
+
*/
|
| 153 |
+
template<BoxedKernelFunction* func>
|
| 154 |
+
static KernelFunction makeFromBoxedFunction();
|
| 155 |
+
|
| 156 |
+
/**
|
| 157 |
+
* TODO: This will only be useful if we write a backend fallback that plumbs dispatch keys (currently there are none)
|
| 158 |
+
* See Note [Plumbing Keys Through The Dispatcher] for details.
|
| 159 |
+
*/
|
| 160 |
+
template<BoxedKernelFunction_withDispatchKeys* func>
|
| 161 |
+
static KernelFunction makeFromBoxedFunction();
|
| 162 |
+
|
| 163 |
+
/**
|
| 164 |
+
* Create a KernelFunction from an unboxed functor.
|
| 165 |
+
*
|
| 166 |
+
* Example:
|
| 167 |
+
*
|
| 168 |
+
* > class MyFunctor final : public c10::OperatorKernel {
|
| 169 |
+
* > public:
|
| 170 |
+
* > Tensor operator()(Tensor a, Tensor b) {...}
|
| 171 |
+
* > };
|
| 172 |
+
* > KernelFunction func = KernelFunction::makeFromUnboxedFunctor<MyFunctor>(std::make_unique<MyFunctor>());
|
| 173 |
+
*/
|
| 174 |
+
template<bool AllowLegacyTypes = false, class KernelFunctor>
|
| 175 |
+
static KernelFunction makeFromUnboxedFunctor(std::unique_ptr<OperatorKernel> kernelFunctor);
|
| 176 |
+
|
| 177 |
+
/**
|
| 178 |
+
* Create a KernelFunction from a boxed functor.
|
| 179 |
+
*
|
| 180 |
+
* Example:
|
| 181 |
+
*
|
| 182 |
+
* > class MyFunctor final : public c10::OperatorKernel {
|
| 183 |
+
* > public:
|
| 184 |
+
* > void operator()(const OperatorHandle&, DispatchKeySet, Stack*) {...}
|
| 185 |
+
* > };
|
| 186 |
+
* > KernelFunction func = KernelFunction::makeFromBoxedFunctor(std::make_unique<MyFunctor>());
|
| 187 |
+
*/
|
| 188 |
+
template<class KernelFunctor>
|
| 189 |
+
static KernelFunction makeFromBoxedFunctor(std::unique_ptr<KernelFunctor> kernelFunctor);
|
| 190 |
+
|
| 191 |
+
/**
|
| 192 |
+
* Create a KernelFunction from an unboxed function.
|
| 193 |
+
* This is usually better than KernelFunction::makeFromUnboxedRuntimeFunction
|
| 194 |
+
* because knowing the function pointer as a template argument (i.e. at
|
| 195 |
+
* compile time) allows the compiler to inline the function into its
|
| 196 |
+
* unboxing wrapper and yields better performance when calling the function.
|
| 197 |
+
*
|
| 198 |
+
* Example:
|
| 199 |
+
*
|
| 200 |
+
* > Tensor unboxed_func(Tensor a, Tensor b) {...}
|
| 201 |
+
* > KernelFunction func = KernelFunction::makeFromUnboxedFunction<decltype(unboxed_func), &unboxed_func>();
|
| 202 |
+
*/
|
| 203 |
+
template<class FuncPtr, bool AllowLegacyTypes = false>
|
| 204 |
+
static KernelFunction makeFromUnboxedFunction(FuncPtr);
|
| 205 |
+
|
| 206 |
+
/**
|
| 207 |
+
* Create a KernelFunction from an unboxed function.
|
| 208 |
+
* KernelFunction::makeFromUnboxedFunction is usually a better choice than
|
| 209 |
+
* this if you know the function pointer at compile time, see doc comment
|
| 210 |
+
* there for an explanation.
|
| 211 |
+
*
|
| 212 |
+
* Example:
|
| 213 |
+
*
|
| 214 |
+
* > Tensor unboxed_func(Tensor a, Tensor b) {...}
|
| 215 |
+
* > KernelFunction func = KernelFunction::makeFromUnboxedRuntimeFunction(&unboxed_func);
|
| 216 |
+
*/
|
| 217 |
+
template<bool AllowLegacyTypes = false, class FuncType>
|
| 218 |
+
static KernelFunction makeFromUnboxedRuntimeFunction(FuncType* func);
|
| 219 |
+
|
| 220 |
+
static KernelFunction makeFallthrough();
|
| 221 |
+
static KernelFunction makeAmbiguousAutogradOther();
|
| 222 |
+
static KernelFunction makeNamedNotSupported();
|
| 223 |
+
|
| 224 |
+
/**
|
| 225 |
+
* Create a KernelFunction from an unboxed lambda.
|
| 226 |
+
*
|
| 227 |
+
* Example:
|
| 228 |
+
*
|
| 229 |
+
* > KernelFunction func = KernelFunction::makeFromUnboxedLambda(
|
| 230 |
+
* > [] (Tensor a, bool b) -> Tensor {...});
|
| 231 |
+
*/
|
| 232 |
+
template<bool AllowLegacyTypes = false, class Lambda>
|
| 233 |
+
static std::enable_if_t<guts::is_stateless_lambda<std::decay_t<Lambda>>::value, KernelFunction> makeFromUnboxedLambda(Lambda&& lambda);
|
| 234 |
+
template<bool AllowLegacyTypes = false, class Lambda>
|
| 235 |
+
static std::enable_if_t<!guts::is_stateless_lambda<std::decay_t<Lambda>>::value, KernelFunction> makeFromUnboxedLambda(Lambda&& lambda);
|
| 236 |
+
|
| 237 |
+
std::string dumpState() const;
|
| 238 |
+
// For testing internal invariants only
|
| 239 |
+
bool _equalsBoxedAndUnboxed(const KernelFunction&) const;
|
| 240 |
+
|
| 241 |
+
private:
|
| 242 |
+
|
| 243 |
+
explicit KernelFunction(
|
| 244 |
+
std::unique_ptr<OperatorKernel> functor,
|
| 245 |
+
InternalBoxedKernelFunction* boxed_kernel_func,
|
| 246 |
+
void* unboxed_kernel_func,
|
| 247 |
+
void* sym_unboxed_kernel_func);
|
| 248 |
+
explicit KernelFunction(
|
| 249 |
+
BoxedKernel boxed_fn,
|
| 250 |
+
void* unboxed_kernel_func,
|
| 251 |
+
void* sym_unboxed_kernel_func);
|
| 252 |
+
|
| 253 |
+
BoxedKernel boxed_kernel_func_;
|
| 254 |
+
void* unboxed_kernel_func_;
|
| 255 |
+
void* sym_unboxed_kernel_func_;
|
| 256 |
+
};
|
| 257 |
+
|
| 258 |
+
}
|
| 259 |
+
|
| 260 |
+
#include <ATen/core/boxing/KernelFunction_impl.h>
|
moondream/lib/python3.10/site-packages/torch/include/ATen/core/boxing/OperatorKernel.h
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
#include <c10/util/intrusive_ptr.h>
|
| 3 |
+
|
| 4 |
+
namespace c10 {
|
| 5 |
+
|
| 6 |
+
/**
|
| 7 |
+
* Inherit from OperatorKernel to implement a c10 kernel.
|
| 8 |
+
*
|
| 9 |
+
* Example:
|
| 10 |
+
* > namespace {
|
| 11 |
+
* > class my_kernel_cpu final : public c10::OperatorKernel {
|
| 12 |
+
* > public:
|
| 13 |
+
* > Tensor operator()(Tensor a, Tensor b) {...}
|
| 14 |
+
* > };
|
| 15 |
+
* > }
|
| 16 |
+
*
|
| 17 |
+
* The kernel class is allowed to have members but these are equivalent
|
| 18 |
+
* to global variables. The kernel implementation is responsible for
|
| 19 |
+
* preventing race conditions on them.
|
| 20 |
+
*
|
| 21 |
+
* See below for how to register this kernel with PyTorch.
|
| 22 |
+
*/
|
| 23 |
+
struct TORCH_API OperatorKernel : public c10::intrusive_ptr_target {
|
| 24 |
+
~OperatorKernel() override = default;
|
| 25 |
+
};
|
| 26 |
+
|
| 27 |
+
} // namespace c10
|
moondream/lib/python3.10/site-packages/torch/include/ATen/core/boxing/impl/WrapFunctionIntoFunctor.h
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <c10/core/CompileTimeFunctionPointer.h>
|
| 4 |
+
|
| 5 |
+
namespace c10 {
|
| 6 |
+
namespace impl {
|
| 7 |
+
namespace detail {
|
| 8 |
+
template<class FuncPtr, class ReturnType, class ParameterList> class WrapFunctionIntoFunctor_ {};
|
| 9 |
+
template<class FuncPtr, class ReturnType, class... Parameters>
|
| 10 |
+
class WrapFunctionIntoFunctor_<FuncPtr, ReturnType, guts::typelist::typelist<Parameters...>> final : public c10::OperatorKernel {
|
| 11 |
+
public:
|
| 12 |
+
C10_ALWAYS_INLINE decltype(auto) operator()(Parameters... args) {
|
| 13 |
+
return (*FuncPtr::func_ptr())(std::forward<Parameters>(args)...);
|
| 14 |
+
}
|
| 15 |
+
};
|
| 16 |
+
}
|
| 17 |
+
|
| 18 |
+
// WrapFunctionIntoFunctor: Wraps a compile time function pointer into a kernel functor.
|
| 19 |
+
// Since it is a compile time function pointer, many compilers can inline it
|
| 20 |
+
// into the wrapper and you don't get any performance overhead for wrapping.
|
| 21 |
+
template<class FuncPtr>
|
| 22 |
+
struct WrapFunctionIntoFunctor final {
|
| 23 |
+
static_assert(c10::is_compile_time_function_pointer<FuncPtr>::value, "WrapFunctionIntoFunctor can only wrap functions created with TORCH_FN.");
|
| 24 |
+
using type = detail::WrapFunctionIntoFunctor_<
|
| 25 |
+
FuncPtr,
|
| 26 |
+
typename guts::function_traits<typename FuncPtr::FuncType>::return_type,
|
| 27 |
+
typename guts::function_traits<typename FuncPtr::FuncType>::parameter_types
|
| 28 |
+
>;
|
| 29 |
+
};
|
| 30 |
+
}
|
| 31 |
+
|
| 32 |
+
}
|
moondream/lib/python3.10/site-packages/torch/include/ATen/core/boxing/impl/WrapFunctionIntoRuntimeFunctor.h
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <c10/util/TypeTraits.h>
|
| 4 |
+
|
| 5 |
+
namespace c10 {
|
| 6 |
+
|
| 7 |
+
namespace impl {
|
| 8 |
+
namespace detail {
|
| 9 |
+
template<class FuncType, class ReturnType, class ParameterList> class WrapFunctionIntoRuntimeFunctor_ {};
|
| 10 |
+
template<class FuncType, class ReturnType, class... Parameters>
|
| 11 |
+
class WrapFunctionIntoRuntimeFunctor_<FuncType, ReturnType, guts::typelist::typelist<Parameters...>> final : public c10::OperatorKernel {
|
| 12 |
+
public:
|
| 13 |
+
template<class FuncType_>
|
| 14 |
+
explicit WrapFunctionIntoRuntimeFunctor_(FuncType_&& kernel_func)
|
| 15 |
+
: kernel_func_(std::forward<FuncType_>(kernel_func)) {}
|
| 16 |
+
|
| 17 |
+
decltype(auto) operator()(Parameters... args) {
|
| 18 |
+
return kernel_func_(std::forward<Parameters>(args)...);
|
| 19 |
+
}
|
| 20 |
+
|
| 21 |
+
private:
|
| 22 |
+
FuncType kernel_func_;
|
| 23 |
+
};
|
| 24 |
+
}
|
| 25 |
+
|
| 26 |
+
// WrapFunctionIntoRuntimeFunctor: Wraps any runtime functor into a functor that
|
| 27 |
+
// inherits from c10::OperatorKernel, so it can be used as a c10 kernel.
|
| 28 |
+
// This can, for example, be used for lambdas, functors or even function pointers.
|
| 29 |
+
// In the case of function pointers, since it is a runtime function pointer,
|
| 30 |
+
// there is an overhead for calling it whenever the kernel is invoked.
|
| 31 |
+
template<class FuncType>
|
| 32 |
+
using WrapFunctionIntoRuntimeFunctor = detail::WrapFunctionIntoRuntimeFunctor_<
|
| 33 |
+
FuncType,
|
| 34 |
+
typename guts::infer_function_traits_t<FuncType>::return_type,
|
| 35 |
+
typename guts::infer_function_traits_t<FuncType>::parameter_types
|
| 36 |
+
>;
|
| 37 |
+
}
|
| 38 |
+
|
| 39 |
+
}
|
moondream/lib/python3.10/site-packages/torch/include/ATen/core/boxing/impl/boxing.h
ADDED
|
@@ -0,0 +1,387 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// This file contains boxing (not unboxing) logic,
|
| 4 |
+
// i.e. how to make a vector<IValue> from a set of concrete arguments.
|
| 5 |
+
|
| 6 |
+
#include <ATen/core/ivalue.h>
|
| 7 |
+
#include <ATen/core/stack.h>
|
| 8 |
+
#include <c10/core/TensorOptions.h>
|
| 9 |
+
|
| 10 |
+
#include <ATen/core/boxing/BoxedKernel.h>
|
| 11 |
+
|
| 12 |
+
#include <c10/util/Metaprogramming.h>
|
| 13 |
+
#include <type_traits>
|
| 14 |
+
|
| 15 |
+
namespace c10 {
|
| 16 |
+
namespace impl {
|
| 17 |
+
|
| 18 |
+
//
|
| 19 |
+
// utils
|
| 20 |
+
//
|
| 21 |
+
|
| 22 |
+
// is_mutable_tensor_ref
|
| 23 |
+
template <class T> struct is_mutable_tensor_ref : std::false_type {};
|
| 24 |
+
template <> struct is_mutable_tensor_ref<at::Tensor&> : std::true_type {};
|
| 25 |
+
|
| 26 |
+
// is_tuple_of_mutable_tensor_refs
|
| 27 |
+
//
|
| 28 |
+
template <class T, class Enable = void>
|
| 29 |
+
struct is_tuple_of_mutable_tensor_refs : std::false_type {};
|
| 30 |
+
|
| 31 |
+
template <class T>
|
| 32 |
+
struct is_tuple_of_mutable_tensor_refs<T, std::enable_if_t<guts::is_instantiation_of<std::tuple, T>::value, void>>
|
| 33 |
+
: guts::typelist::all<is_mutable_tensor_ref, guts::typelist::from_tuple_t<T>>
|
| 34 |
+
{};
|
| 35 |
+
|
| 36 |
+
// has_ivalue_to<T> tests the presence/absence of instance method IValue::to<T>()
|
| 37 |
+
//
|
| 38 |
+
template <class T, class Enable = void>
|
| 39 |
+
struct has_ivalue_to : std::false_type {};
|
| 40 |
+
|
| 41 |
+
template <class T>
|
| 42 |
+
struct has_ivalue_to<T, std::void_t<decltype(std::declval<IValue>().to<T>())>>
|
| 43 |
+
: std::true_type
|
| 44 |
+
{};
|
| 45 |
+
|
| 46 |
+
//
|
| 47 |
+
// boxing predicates
|
| 48 |
+
//
|
| 49 |
+
|
| 50 |
+
// A boxable arg type is one that IValue has a constructor for.
|
| 51 |
+
template <typename T>
|
| 52 |
+
using can_box =
|
| 53 |
+
std::disjunction<
|
| 54 |
+
std::is_constructible<IValue, std::decay_t<T>>,
|
| 55 |
+
// TensorOptions are not directly constructible into IValue,
|
| 56 |
+
// but torch::jit::push knows how to handle them
|
| 57 |
+
std::is_same<TensorOptions, std::decay_t<T>>
|
| 58 |
+
>;
|
| 59 |
+
|
| 60 |
+
template <typename... Ts>
|
| 61 |
+
using can_box_all = std::conjunction<can_box<Ts>...>;
|
| 62 |
+
|
| 63 |
+
// an unboxable result is one that can be extracted from an IValue
|
| 64 |
+
template <typename T>
|
| 65 |
+
using can_unbox =
|
| 66 |
+
std::conjunction<
|
| 67 |
+
std::disjunction<
|
| 68 |
+
has_ivalue_to<T>,
|
| 69 |
+
// void returns are ok
|
| 70 |
+
std::is_same<void, T>
|
| 71 |
+
>,
|
| 72 |
+
std::negation<std::is_lvalue_reference<T>>
|
| 73 |
+
>;
|
| 74 |
+
|
| 75 |
+
//
|
| 76 |
+
// boxArgs - utility for pushing unboxed args onto IValue stack
|
| 77 |
+
//
|
| 78 |
+
template <class... Args>
|
| 79 |
+
torch::jit::Stack boxArgs(Args... args) {
|
| 80 |
+
// TODO Reuse stack vector instead of allocating?
|
| 81 |
+
torch::jit::Stack stack;
|
| 82 |
+
stack.reserve(sizeof...(Args));
|
| 83 |
+
torch::jit::push(stack, std::forward<Args>(args)...);
|
| 84 |
+
return stack;
|
| 85 |
+
}
|
| 86 |
+
|
| 87 |
+
template <class T>
|
| 88 |
+
static inline constexpr size_t boxed_size_one() {
|
| 89 |
+
static_assert(!std::is_same<std::decay_t<T>, c10::TensorOptions>::value, "need to patch this path to support TensorOptions passed by reference");
|
| 90 |
+
return 1;
|
| 91 |
+
}
|
| 92 |
+
|
| 93 |
+
// torch::jit::push pushes 4 values for a TensorOptions; this needs to
|
| 94 |
+
// be kept in sync.
|
| 95 |
+
template <>
|
| 96 |
+
inline constexpr size_t boxed_size_one<c10::TensorOptions>() {
|
| 97 |
+
return 4;
|
| 98 |
+
}
|
| 99 |
+
|
| 100 |
+
// NOTE: this could probably be simplified with C++17 fold expressions.
|
| 101 |
+
template <typename...>
|
| 102 |
+
struct BoxedSize : std::integral_constant<size_t, 0> {};
|
| 103 |
+
template <class T, class... Args>
|
| 104 |
+
struct BoxedSize<T, Args...> : std::integral_constant<size_t, boxed_size_one<T>() + BoxedSize<Args...>::value> {};
|
| 105 |
+
|
| 106 |
+
template <class... Args>
|
| 107 |
+
static inline constexpr size_t boxed_size() {
|
| 108 |
+
return BoxedSize<Args...>::value;
|
| 109 |
+
}
|
| 110 |
+
|
| 111 |
+
using IValueAlignedStorage = std::aligned_storage_t<sizeof(IValue), alignof(IValue)>;
|
| 112 |
+
|
| 113 |
+
template <typename T>
|
| 114 |
+
C10_ALWAYS_INLINE_UNLESS_MOBILE void boxToStack(IValueAlignedStorage* dest, T& arg, int& lastIdx) {
|
| 115 |
+
new (&dest[lastIdx]) IValue(arg);
|
| 116 |
+
lastIdx++;
|
| 117 |
+
}
|
| 118 |
+
|
| 119 |
+
C10_ALWAYS_INLINE_UNLESS_MOBILE void boxToStack(IValueAlignedStorage* dest, c10::TensorOptions options, int& lastIdx) {
|
| 120 |
+
new (&dest[lastIdx++]) IValue(c10::typeMetaToScalarType(options.dtype()));
|
| 121 |
+
new (&dest[lastIdx++]) IValue(options.layout());
|
| 122 |
+
new (&dest[lastIdx++]) IValue(options.device());
|
| 123 |
+
new (&dest[lastIdx++]) IValue(options.pinned_memory());
|
| 124 |
+
}
|
| 125 |
+
|
| 126 |
+
inline void boxArgsToStack(IValueAlignedStorage*, int&) {}
|
| 127 |
+
|
| 128 |
+
template<typename T, typename... Args>
|
| 129 |
+
C10_ALWAYS_INLINE_UNLESS_MOBILE void boxArgsToStack(IValueAlignedStorage* dest, int& lastIdx, T& arg, Args &... args) {
|
| 130 |
+
boxToStack(dest, arg, lastIdx);
|
| 131 |
+
boxArgsToStack(dest, lastIdx, args...);
|
| 132 |
+
}
|
| 133 |
+
|
| 134 |
+
//
|
| 135 |
+
// PopResult is a helper class whose specializations handle popping single and
|
| 136 |
+
// multiple return values, respectively.
|
| 137 |
+
//
|
| 138 |
+
template <class Result>
|
| 139 |
+
struct PopResult final {
|
| 140 |
+
static Result call(Stack& stack) {
|
| 141 |
+
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
|
| 142 |
+
stack.size() == 1,
|
| 143 |
+
"Boxed kernel was expected to return one value on the stack, ",
|
| 144 |
+
"but instead pushed ", stack.size(), " values."
|
| 145 |
+
);
|
| 146 |
+
return std::move(stack[0]).to<Result>();
|
| 147 |
+
}
|
| 148 |
+
};
|
| 149 |
+
|
| 150 |
+
template <class... Types>
|
| 151 |
+
struct PopResult<std::tuple<Types...>> final {
|
| 152 |
+
using Result = std::tuple<Types...>;
|
| 153 |
+
|
| 154 |
+
static Result call(Stack& stack) {
|
| 155 |
+
// for tuple return types, boxed kernel has pushed multiple values onto the stack
|
| 156 |
+
constexpr int RetCount = sizeof...(Types);
|
| 157 |
+
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
|
| 158 |
+
stack.size() == RetCount,
|
| 159 |
+
"Boxed kernel was expected to return ", RetCount, " values on the stack, ",
|
| 160 |
+
"but instead pushed ", stack.size(), " values."
|
| 161 |
+
);
|
| 162 |
+
return pop_to_tuple_impl(stack, std::make_index_sequence<RetCount>());
|
| 163 |
+
}
|
| 164 |
+
private:
|
| 165 |
+
// note: this has been moved into its own helper only to avoid a parse error on `indices` otherwise.
|
| 166 |
+
// I'm sure there's an incantation that slips it past the parser but eh
|
| 167 |
+
template <size_t... indices>
|
| 168 |
+
static Result pop_to_tuple_impl(Stack& stack, std::index_sequence<indices...>) {
|
| 169 |
+
return std::make_tuple((std::move(stack[indices]).to<Types>())...);
|
| 170 |
+
}
|
| 171 |
+
};
|
| 172 |
+
|
| 173 |
+
//
|
| 174 |
+
// BoxedKernelWrapper
|
| 175 |
+
//
|
| 176 |
+
// For a given function type FT, BoxedKernelWrapper<FT> implements
|
| 177 |
+
// a `call` method that
|
| 178 |
+
// - takes a boxed kernel and unboxed arguments as specified by FT,
|
| 179 |
+
// - calls `boxArgs` to box the arguments
|
| 180 |
+
// - calls the boxed kernel
|
| 181 |
+
// - unboxes and returns the result
|
| 182 |
+
//
|
| 183 |
+
// The partial specializations below handle various cases: in
|
| 184 |
+
// particular, not all types appearing in op signatures are supported,
|
| 185 |
+
// and ops returning references have nonstandard wrapper implementations.
|
| 186 |
+
//
|
| 187 |
+
|
| 188 |
+
// 1. The base specialization of BoxedKernelWrapper should never be instantiated.
|
| 189 |
+
// A "no call method defined on BoxedKernelWrapper" compile error means that
|
| 190 |
+
// an op signature has failed to trigger any of the partial specializations
|
| 191 |
+
// that follow this one.
|
| 192 |
+
//
|
| 193 |
+
template <class FuncType, class Enable = void>
|
| 194 |
+
struct BoxedKernelWrapper {
|
| 195 |
+
// The reason we're not just doing straight up static_assert(false, ...) here:
|
| 196 |
+
// Basically, the way to make sure a static_assert only fires if a template
|
| 197 |
+
// is actually instantiated (rather than every time the file is parsed) is to use
|
| 198 |
+
// template parameters in the expression, e.g. FuncType here. However, since
|
| 199 |
+
// `sizeof(FuncType) != sizeof(FuncType)` is always false, this has the same
|
| 200 |
+
// effect.
|
| 201 |
+
static_assert(sizeof(FuncType) != sizeof(FuncType),
|
| 202 |
+
"Function signature contains one or more unsupported parameter and/or return types. "
|
| 203 |
+
"Look for a nearby error like "
|
| 204 |
+
"\"'call' is not a member of 'c10::impl::BoxedKernelWrapper<(your function type), void>'\" "
|
| 205 |
+
"- (your function type) is the unsupported signature.");
|
| 206 |
+
};
|
| 207 |
+
|
| 208 |
+
//
|
| 209 |
+
// 2. Supported signatures, other than those involving non-const Tensor refs -
|
| 210 |
+
// i.e., "functional" ops.
|
| 211 |
+
//
|
| 212 |
+
|
| 213 |
+
template <class Result, class... Args>
|
| 214 |
+
struct BoxedKernelWrapper<
|
| 215 |
+
Result(Args...),
|
| 216 |
+
std::enable_if_t<
|
| 217 |
+
can_box_all<Args...>::value && can_unbox<Result>::value && !is_tuple_of_mutable_tensor_refs<Result>::value,
|
| 218 |
+
void
|
| 219 |
+
>
|
| 220 |
+
> {
|
| 221 |
+
static Result call(
|
| 222 |
+
const BoxedKernel& boxed_kernel_func,
|
| 223 |
+
const OperatorHandle& opHandle,
|
| 224 |
+
DispatchKeySet dispatchKeySet,
|
| 225 |
+
Args... args
|
| 226 |
+
) {
|
| 227 |
+
torch::jit::Stack stack = boxArgs<Args...>(std::forward<Args>(args)...);
|
| 228 |
+
boxed_kernel_func.callBoxed(opHandle, dispatchKeySet, &stack);
|
| 229 |
+
|
| 230 |
+
if constexpr (!std::is_same_v<void, Result>) {
|
| 231 |
+
// op has pushed one or more values onto the stack.
|
| 232 |
+
return PopResult<Result>::call(stack);
|
| 233 |
+
} else {
|
| 234 |
+
// op returns void, boxed kernel has pushed nothing onto stack.
|
| 235 |
+
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
|
| 236 |
+
stack.empty(),
|
| 237 |
+
"Boxed kernel was expected to return no values on the stack, ",
|
| 238 |
+
"but instead returned ", stack.size(), " values."
|
| 239 |
+
);
|
| 240 |
+
}
|
| 241 |
+
}
|
| 242 |
+
};
|
| 243 |
+
|
| 244 |
+
//
|
| 245 |
+
// 3. in-place ops take a single non-const Tensor reference
|
| 246 |
+
// as their first argument, and return it.
|
| 247 |
+
//
|
| 248 |
+
// Note: all signatures matching this pattern are assumed to be for such ops.
|
| 249 |
+
// Because of this, the generated BoxedKernelWrapper specializations simply
|
| 250 |
+
// return the in-place argument.
|
| 251 |
+
//
|
| 252 |
+
|
| 253 |
+
template <class... OtherArgs>
|
| 254 |
+
struct BoxedKernelWrapper<
|
| 255 |
+
at::Tensor&(at::Tensor&, OtherArgs...),
|
| 256 |
+
std::enable_if_t<can_box_all<OtherArgs...>::value, void>
|
| 257 |
+
> {
|
| 258 |
+
static at::Tensor& call(
|
| 259 |
+
const BoxedKernel& boxed_kernel_func,
|
| 260 |
+
const OperatorHandle& opHandle,
|
| 261 |
+
DispatchKeySet dispatchKeySet,
|
| 262 |
+
at::Tensor& outArg, OtherArgs... otherArgs
|
| 263 |
+
) {
|
| 264 |
+
torch::jit::Stack stack = boxArgs<at::Tensor&, OtherArgs...>(outArg, std::forward<OtherArgs>(otherArgs)...);
|
| 265 |
+
boxed_kernel_func.callBoxed(opHandle, dispatchKeySet, &stack);
|
| 266 |
+
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
|
| 267 |
+
stack.size() == 1,
|
| 268 |
+
"Boxed kernel was expected to return a single value on the stack, ",
|
| 269 |
+
"but instead returned ", stack.size(), " values."
|
| 270 |
+
);
|
| 271 |
+
|
| 272 |
+
return outArg;
|
| 273 |
+
}
|
| 274 |
+
};
|
| 275 |
+
|
| 276 |
+
//
|
| 277 |
+
// 3.5. In-process migration to make in-place ops take and return
|
| 278 |
+
// const references instead.
|
| 279 |
+
template <class... OtherArgs>
|
| 280 |
+
struct BoxedKernelWrapper<
|
| 281 |
+
const at::Tensor&(const at::Tensor&, OtherArgs...),
|
| 282 |
+
std::enable_if_t<can_box_all<OtherArgs...>::value, void>
|
| 283 |
+
> {
|
| 284 |
+
static const at::Tensor& call(
|
| 285 |
+
const BoxedKernel& boxed_kernel_func,
|
| 286 |
+
const OperatorHandle& opHandle,
|
| 287 |
+
DispatchKeySet dispatchKeySet,
|
| 288 |
+
const at::Tensor& outArg, OtherArgs... otherArgs
|
| 289 |
+
) {
|
| 290 |
+
torch::jit::Stack stack = boxArgs(outArg, otherArgs...);
|
| 291 |
+
boxed_kernel_func.callBoxed(opHandle, dispatchKeySet, &stack);
|
| 292 |
+
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
|
| 293 |
+
stack.size() == 1,
|
| 294 |
+
"Boxed kernel was expected to return a single value on the stack, ",
|
| 295 |
+
"but instead returned ", stack.size(), " values."
|
| 296 |
+
);
|
| 297 |
+
|
| 298 |
+
return outArg;
|
| 299 |
+
}
|
| 300 |
+
};
|
| 301 |
+
|
| 302 |
+
//
|
| 303 |
+
// 4. out of place ops that take a single non-const Tensor reference as their
|
| 304 |
+
// final argument, and also return it.
|
| 305 |
+
//
|
| 306 |
+
// Note: all signatures matching this pattern are assumed to be for such ops.
|
| 307 |
+
// This assumption permits the generated BoxedKernelWrapper specializations to simply
|
| 308 |
+
// return out arguments.
|
| 309 |
+
//
|
| 310 |
+
template <class FirstArg, class... RestArgs>
|
| 311 |
+
struct BoxedKernelWrapper<
|
| 312 |
+
at::Tensor&(FirstArg, RestArgs...),
|
| 313 |
+
std::enable_if_t<
|
| 314 |
+
can_box_all<FirstArg, RestArgs...>::value
|
| 315 |
+
// this skips over in-place kernels with a non-const Tensor
|
| 316 |
+
// arg at the front, so those can unambiguously trigger the preceding specialization.
|
| 317 |
+
&& !is_mutable_tensor_ref<FirstArg>::value,
|
| 318 |
+
void
|
| 319 |
+
>
|
| 320 |
+
> {
|
| 321 |
+
static at::Tensor& call(
|
| 322 |
+
const BoxedKernel& boxed_kernel_func,
|
| 323 |
+
const OperatorHandle& opHandle,
|
| 324 |
+
DispatchKeySet dispatchKeySet,
|
| 325 |
+
FirstArg firstArg, RestArgs... restArgs
|
| 326 |
+
) {
|
| 327 |
+
torch::jit::Stack stack = boxArgs<FirstArg, RestArgs...>(std::forward<FirstArg>(firstArg), std::forward<RestArgs>(restArgs)...);
|
| 328 |
+
boxed_kernel_func.callBoxed(opHandle, dispatchKeySet, &stack);
|
| 329 |
+
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
|
| 330 |
+
stack.size() == 1,
|
| 331 |
+
"Boxed kernel was expected to return a single value on the stack, ",
|
| 332 |
+
"but instead returned ", stack.size(), " values."
|
| 333 |
+
);
|
| 334 |
+
|
| 335 |
+
// reusing restArgs after it has been forwarded here is ok because we know
|
| 336 |
+
// that the last element is of type `Tensor&`.
|
| 337 |
+
return std::get<sizeof...(RestArgs) - 1>(std::tuple<RestArgs...>{restArgs...});
|
| 338 |
+
}
|
| 339 |
+
};
|
| 340 |
+
|
| 341 |
+
//
|
| 342 |
+
// 5. out of place ops that take multiple non-const Tensor references as their
|
| 343 |
+
// final arguments, and return them in a std::tuple.
|
| 344 |
+
//
|
| 345 |
+
// Note: all signatures matching this pattern are assumed to be for such ops.
|
| 346 |
+
// This assumption permits the generated BoxedKernelWrapper specializations to simply
|
| 347 |
+
// return the out arguments.
|
| 348 |
+
//
|
| 349 |
+
template <class Result, class... Args>
|
| 350 |
+
struct BoxedKernelWrapper<
|
| 351 |
+
Result(Args...),
|
| 352 |
+
std::enable_if_t<
|
| 353 |
+
can_box_all<Args...>::value && is_tuple_of_mutable_tensor_refs<Result>::value,
|
| 354 |
+
void
|
| 355 |
+
>
|
| 356 |
+
> {
|
| 357 |
+
static Result call(
|
| 358 |
+
const BoxedKernel& boxed_kernel_func,
|
| 359 |
+
const OperatorHandle& opHandle,
|
| 360 |
+
DispatchKeySet dispatchKeySet,
|
| 361 |
+
Args... args
|
| 362 |
+
) {
|
| 363 |
+
using ArgTuple = std::tuple<Args...>;
|
| 364 |
+
constexpr int RetCount = std::tuple_size<Result>();
|
| 365 |
+
|
| 366 |
+
torch::jit::Stack stack = boxArgs<Args...>(std::forward<Args>(args)...);
|
| 367 |
+
boxed_kernel_func.callBoxed(opHandle, dispatchKeySet, &stack);
|
| 368 |
+
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
|
| 369 |
+
stack.size() == RetCount,
|
| 370 |
+
"Boxed kernel was expected to return ", RetCount, " values on the stack, ",
|
| 371 |
+
"but instead returned ", stack.size(), " values."
|
| 372 |
+
);
|
| 373 |
+
|
| 374 |
+
// reusing args after it has been forwarded here is ok because we know
|
| 375 |
+
// that the last RetCount elements are of type `Tensor&`.
|
| 376 |
+
auto result = guts::tuple_take<ArgTuple, -RetCount>(ArgTuple{std::forward<Args>(args)...});
|
| 377 |
+
static_assert(
|
| 378 |
+
std::is_same<Result, decltype(result)>::value,
|
| 379 |
+
"The parameter list of an op returning a tuple of Tensor references "
|
| 380 |
+
"must end with an equal number of Tensor reference parameters."
|
| 381 |
+
);
|
| 382 |
+
return result;
|
| 383 |
+
}
|
| 384 |
+
};
|
| 385 |
+
|
| 386 |
+
} // impl
|
| 387 |
+
} // c10
|
moondream/lib/python3.10/site-packages/torch/include/ATen/core/boxing/impl/test_helpers.h
ADDED
|
@@ -0,0 +1,124 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <gtest/gtest.h>
|
| 4 |
+
#include <gmock/gmock.h>
|
| 5 |
+
|
| 6 |
+
#include <ATen/core/Tensor.h>
|
| 7 |
+
#include <ATen/core/dispatch/Dispatcher.h>
|
| 8 |
+
#include <ATen/core/ivalue.h>
|
| 9 |
+
#include <c10/core/CPUAllocator.h>
|
| 10 |
+
#include <c10/util/irange.h>
|
| 11 |
+
|
| 12 |
+
template<class... Inputs>
|
| 13 |
+
inline std::vector<c10::IValue> makeStack(Inputs&&... inputs) {
|
| 14 |
+
return {std::forward<Inputs>(inputs)...};
|
| 15 |
+
}
|
| 16 |
+
|
| 17 |
+
inline at::Tensor dummyTensor(c10::DispatchKeySet ks, bool requires_grad=false) {
|
| 18 |
+
auto* allocator = c10::GetCPUAllocator();
|
| 19 |
+
int64_t nelements = 1;
|
| 20 |
+
auto dtype = caffe2::TypeMeta::Make<float>();
|
| 21 |
+
int64_t size_bytes = nelements * dtype.itemsize();
|
| 22 |
+
auto storage_impl = c10::make_intrusive<c10::StorageImpl>(
|
| 23 |
+
c10::StorageImpl::use_byte_size_t(),
|
| 24 |
+
size_bytes,
|
| 25 |
+
allocator->allocate(size_bytes),
|
| 26 |
+
allocator,
|
| 27 |
+
/*resizable=*/true);
|
| 28 |
+
at::Tensor t = at::detail::make_tensor<c10::TensorImpl>(storage_impl, ks, dtype);
|
| 29 |
+
// TODO: We add this to simulate the ideal case where we only have Autograd backend keys
|
| 30 |
+
// on Tensor when it requires grad. But currently Autograd keys are added in TensorImpl
|
| 31 |
+
// constructor by default.
|
| 32 |
+
if (!requires_grad) {
|
| 33 |
+
t.unsafeGetTensorImpl()->remove_autograd_key();
|
| 34 |
+
}
|
| 35 |
+
return t;
|
| 36 |
+
}
|
| 37 |
+
|
| 38 |
+
inline at::Tensor dummyTensor(c10::DispatchKey dispatch_key, bool requires_grad=false) {
|
| 39 |
+
return dummyTensor(c10::DispatchKeySet(dispatch_key), requires_grad);
|
| 40 |
+
}
|
| 41 |
+
|
| 42 |
+
template<class... Args>
|
| 43 |
+
inline std::vector<c10::IValue> callOp(const c10::OperatorHandle& op, Args... args) {
|
| 44 |
+
auto stack = makeStack(std::forward<Args>(args)...);
|
| 45 |
+
op.callBoxed(&stack);
|
| 46 |
+
return stack;
|
| 47 |
+
}
|
| 48 |
+
|
| 49 |
+
template<class Result, class... Args>
|
| 50 |
+
inline Result callOpUnboxed(const c10::OperatorHandle& op, Args... args) {
|
| 51 |
+
return op.typed<Result(Args...)>().call(std::forward<Args>(args)...);
|
| 52 |
+
}
|
| 53 |
+
|
| 54 |
+
template<class Result, class... Args>
|
| 55 |
+
inline Result callOpUnboxedWithDispatchKey(const c10::OperatorHandle& op, c10::DispatchKey dispatchKey, Args... args) {
|
| 56 |
+
return op.typed<Result(Args...)>().callWithDispatchKey(dispatchKey, std::forward<Args>(args)...);
|
| 57 |
+
}
|
| 58 |
+
|
| 59 |
+
template<class Result, class... Args>
|
| 60 |
+
inline Result callOpUnboxedWithPrecomputedDispatchKeySet(const c10::OperatorHandle& op, c10::DispatchKeySet ks, Args... args) {
|
| 61 |
+
return op.typed<Result(Args...)>().redispatch(ks, std::forward<Args>(args)...);
|
| 62 |
+
}
|
| 63 |
+
|
| 64 |
+
inline void expectDoesntFindKernel(const char* op_name, c10::DispatchKey dispatch_key) {
|
| 65 |
+
auto op = c10::Dispatcher::singleton().findSchema({op_name, ""});
|
| 66 |
+
EXPECT_ANY_THROW(
|
| 67 |
+
callOp(*op, dummyTensor(dispatch_key), 5);
|
| 68 |
+
);
|
| 69 |
+
}
|
| 70 |
+
|
| 71 |
+
inline void expectDoesntFindOperator(const char* op_name) {
|
| 72 |
+
auto op = c10::Dispatcher::singleton().findSchema({op_name, ""});
|
| 73 |
+
EXPECT_FALSE(op.has_value());
|
| 74 |
+
}
|
| 75 |
+
|
| 76 |
+
template<class Exception, class Functor>
|
| 77 |
+
inline void expectThrows(Functor&& functor, const char* expectMessageContains) {
|
| 78 |
+
try {
|
| 79 |
+
std::forward<Functor>(functor)();
|
| 80 |
+
} catch (const Exception& e) {
|
| 81 |
+
EXPECT_THAT(e.what(), testing::HasSubstr(expectMessageContains));
|
| 82 |
+
return;
|
| 83 |
+
}
|
| 84 |
+
ADD_FAILURE() << "Expected to throw exception containing \""
|
| 85 |
+
<< expectMessageContains << "\" but didn't throw";
|
| 86 |
+
}
|
| 87 |
+
|
| 88 |
+
template<class T, size_t N>
|
| 89 |
+
void expectListEquals(c10::ArrayRef<T> expected, std::array<T, N> actual) {
|
| 90 |
+
EXPECT_EQ(expected.size(), actual.size());
|
| 91 |
+
for (const auto i : c10::irange(expected.size())) {
|
| 92 |
+
EXPECT_EQ(expected[i], actual[i]);
|
| 93 |
+
}
|
| 94 |
+
}
|
| 95 |
+
|
| 96 |
+
template<class T>
|
| 97 |
+
void expectListEquals(c10::ArrayRef<T> expected, c10::ArrayRef<T> actual) {
|
| 98 |
+
EXPECT_EQ(expected.size(), actual.size());
|
| 99 |
+
for (const auto i : c10::irange(expected.size())) {
|
| 100 |
+
EXPECT_EQ(expected[i], actual[i]);
|
| 101 |
+
}
|
| 102 |
+
}
|
| 103 |
+
|
| 104 |
+
template<class T>
|
| 105 |
+
void expectListEquals(c10::ArrayRef<T> expected, c10::List<T> actual) {
|
| 106 |
+
EXPECT_EQ(expected.size(), actual.size());
|
| 107 |
+
for (const auto i : c10::irange(expected.size())) {
|
| 108 |
+
EXPECT_EQ(expected[i], actual.get(i));
|
| 109 |
+
}
|
| 110 |
+
}
|
| 111 |
+
|
| 112 |
+
template<class T>
|
| 113 |
+
void expectListEquals(c10::ArrayRef<T> expected, std::vector<T> actual) {
|
| 114 |
+
EXPECT_EQ(expected.size(), actual.size());
|
| 115 |
+
for (const auto i : c10::irange(expected.size())) {
|
| 116 |
+
EXPECT_EQ(expected[i], actual[i]);
|
| 117 |
+
}
|
| 118 |
+
}
|
| 119 |
+
|
| 120 |
+
// NB: This is not really sound, but all of the type sets constructed here
|
| 121 |
+
// are singletons so it's fine
|
| 122 |
+
static inline c10::DispatchKey extractDispatchKey(const at::Tensor& t) {
|
| 123 |
+
return legacyExtractDispatchKey(t.key_set());
|
| 124 |
+
}
|
moondream/lib/python3.10/site-packages/torch/include/ATen/core/dispatch/CppSignature.h
ADDED
|
@@ -0,0 +1,65 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <typeindex>
|
| 4 |
+
#include <c10/core/DispatchKeySet.h>
|
| 5 |
+
#include <c10/macros/Macros.h>
|
| 6 |
+
#include <c10/util/Metaprogramming.h>
|
| 7 |
+
#include <c10/util/Type.h>
|
| 8 |
+
|
| 9 |
+
namespace c10 {
|
| 10 |
+
namespace impl {
|
| 11 |
+
|
| 12 |
+
// A CppSignature object holds RTTI information about a C++ function signature at runtime
|
| 13 |
+
// and can compare them or get a debug-printable name.
|
| 14 |
+
class TORCH_API CppSignature final {
|
| 15 |
+
public:
|
| 16 |
+
CppSignature(const CppSignature&) = default;
|
| 17 |
+
CppSignature(CppSignature&&) noexcept = default;
|
| 18 |
+
CppSignature& operator=(const CppSignature&) = default;
|
| 19 |
+
CppSignature& operator=(CppSignature&&) noexcept = default;
|
| 20 |
+
|
| 21 |
+
template<class FuncType>
|
| 22 |
+
static CppSignature make() {
|
| 23 |
+
// Normalize functors, lambdas, function pointers, etc. into the plain function type
|
| 24 |
+
// The first argument of the schema might be of type DispatchKeySet, in which case we remove it.
|
| 25 |
+
// We do this to guarantee that all CppSignature's for an operator will match, even if they're registered
|
| 26 |
+
// with different calling conventions.
|
| 27 |
+
// See Note [Plumbing Keys Through The Dispatcher]
|
| 28 |
+
using decayed_function_type = typename c10::remove_DispatchKeySet_arg_from_func<std::decay_t<FuncType>>::func_type;
|
| 29 |
+
|
| 30 |
+
return CppSignature(std::type_index(typeid(decayed_function_type)));
|
| 31 |
+
}
|
| 32 |
+
|
| 33 |
+
std::string name() const {
|
| 34 |
+
return c10::demangle(signature_.name());
|
| 35 |
+
}
|
| 36 |
+
|
| 37 |
+
friend bool operator==(const CppSignature& lhs, const CppSignature& rhs) {
|
| 38 |
+
if (lhs.signature_ == rhs.signature_) {
|
| 39 |
+
return true;
|
| 40 |
+
}
|
| 41 |
+
// Without RTLD_GLOBAL, the type_index comparison could yield false because
|
| 42 |
+
// they point to different instances of the RTTI data, but the types would
|
| 43 |
+
// still be the same. Let's check for that case too.
|
| 44 |
+
// Note that there still is a case where this might not work, i.e. when
|
| 45 |
+
// linking libraries of different compilers together, they might have
|
| 46 |
+
// different ways to serialize a type name. That, together with a missing
|
| 47 |
+
// RTLD_GLOBAL, would still fail this.
|
| 48 |
+
if (0 == strcmp(lhs.signature_.name(), rhs.signature_.name())) {
|
| 49 |
+
return true;
|
| 50 |
+
}
|
| 51 |
+
|
| 52 |
+
return false;
|
| 53 |
+
}
|
| 54 |
+
|
| 55 |
+
private:
|
| 56 |
+
explicit CppSignature(std::type_index signature): signature_(std::move(signature)) {}
|
| 57 |
+
std::type_index signature_;
|
| 58 |
+
};
|
| 59 |
+
|
| 60 |
+
inline bool operator!=(const CppSignature& lhs, const CppSignature& rhs) {
|
| 61 |
+
return !(lhs == rhs );
|
| 62 |
+
}
|
| 63 |
+
|
| 64 |
+
}
|
| 65 |
+
}
|
moondream/lib/python3.10/site-packages/torch/include/ATen/core/dispatch/DispatchKeyExtractor.h
ADDED
|
@@ -0,0 +1,242 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <cstdint>
|
| 4 |
+
#include <ATen/core/function_schema.h>
|
| 5 |
+
#include <ATen/core/jit_type.h>
|
| 6 |
+
#include <c10/util/Bitset.h>
|
| 7 |
+
#include <c10/core/DispatchKeySet.h>
|
| 8 |
+
#include <c10/util/irange.h>
|
| 9 |
+
#include <ATen/core/Variadic.h>
|
| 10 |
+
#include <ATen/core/stack.h>
|
| 11 |
+
|
| 12 |
+
namespace c10 {
|
| 13 |
+
|
| 14 |
+
namespace impl {
|
| 15 |
+
|
| 16 |
+
// Take a DispatchKeySet for a Tensor and determine what the actual dispatch
|
| 17 |
+
// DispatchKey should be, taking into account TLS, and skipping backends which
|
| 18 |
+
// fall through.
|
| 19 |
+
//
|
| 20 |
+
// Unlike Tensor::key_set(), the value of this on a tensor can change depending
|
| 21 |
+
// on TLS.
|
| 22 |
+
//
|
| 23 |
+
// NB: If there is no valid dispatch key, this will return Undefined
|
| 24 |
+
static inline DispatchKeySet computeDispatchKeySet(
|
| 25 |
+
DispatchKeySet ks,
|
| 26 |
+
// The key mask lets us eliminate (by zero entries) keys which should not
|
| 27 |
+
// be considered for dispatch. There are two cases when we use this:
|
| 28 |
+
//
|
| 29 |
+
// - If an operator's dispatch table contains a fallthrough entry, we
|
| 30 |
+
// should bypass it entirely when finding the key
|
| 31 |
+
// - If a user invokes with redispatch, the mask lets us
|
| 32 |
+
// zero out the key the user asked us to stop.
|
| 33 |
+
//
|
| 34 |
+
// These excluded backends are NOT tracked in the TLS, but must be applied
|
| 35 |
+
// AFTER TLS (since the backend may have been introduced for consideration
|
| 36 |
+
// by the included TLS), which is why you have to pass them in to this
|
| 37 |
+
// function (as opposed to just applying it to the input 'ks').
|
| 38 |
+
DispatchKeySet key_mask
|
| 39 |
+
) {
|
| 40 |
+
c10::impl::LocalDispatchKeySet local = c10::impl::tls_local_dispatch_key_set();
|
| 41 |
+
// TODO: It's a bit irritating that we have to do logical ORs here, it would
|
| 42 |
+
// be nice to only do one. Can always_included be folded into the TLS? Well,
|
| 43 |
+
// it's a bit troublesome, because fastpath TLS access requires the type of
|
| 44 |
+
// the TLS in question to be zero-initialized, so you don't actually win
|
| 45 |
+
// anyting in that case.
|
| 46 |
+
return (((ks | local.included_) - local.excluded_) & key_mask);
|
| 47 |
+
}
|
| 48 |
+
|
| 49 |
+
}
|
| 50 |
+
|
| 51 |
+
namespace detail {
|
| 52 |
+
// A small gadget to extract the DispatchKeySet from types which are known
|
| 53 |
+
// to have it. Used to extract dispatch keys from unboxed calls.
|
| 54 |
+
struct MultiDispatchKeySet : at::IterArgs<MultiDispatchKeySet> {
|
| 55 |
+
DispatchKeySet ts;
|
| 56 |
+
void operator()(const at::Tensor& x) {
|
| 57 |
+
ts = ts | x.key_set();
|
| 58 |
+
}
|
| 59 |
+
void operator()(const c10::optional<at::Tensor>& x) {
|
| 60 |
+
if (x.has_value()) {
|
| 61 |
+
ts = ts | x->key_set();
|
| 62 |
+
}
|
| 63 |
+
}
|
| 64 |
+
void operator()(at::ArrayRef<at::Tensor> xs) {
|
| 65 |
+
for (const auto& x : xs) {
|
| 66 |
+
ts = ts | x.key_set();
|
| 67 |
+
}
|
| 68 |
+
}
|
| 69 |
+
// Tensor?[] translates to this case.
|
| 70 |
+
void operator()(const c10::List<c10::optional<at::Tensor>>& xs) {
|
| 71 |
+
for (c10::optional<at::Tensor> x : xs) {
|
| 72 |
+
if (x.has_value()) {
|
| 73 |
+
ts = ts | x.value().key_set();
|
| 74 |
+
}
|
| 75 |
+
}
|
| 76 |
+
}
|
| 77 |
+
// Structured Tensor[] translates to this case
|
| 78 |
+
void operator()(const at::ITensorListRef& xs) {
|
| 79 |
+
for (const auto& x : xs) {
|
| 80 |
+
ts = ts | x.key_set();
|
| 81 |
+
}
|
| 82 |
+
}
|
| 83 |
+
[[noreturn]] void operator()(at::ArrayRef<c10::optional<at::Tensor>>) {
|
| 84 |
+
// Just checking that the handling of Tensor?[] didn't change.
|
| 85 |
+
TORCH_INTERNAL_ASSERT(false);
|
| 86 |
+
}
|
| 87 |
+
void operator()(const at::Generator& gen) {
|
| 88 |
+
if (gen.defined()) {
|
| 89 |
+
ts = ts | gen.key_set();
|
| 90 |
+
}
|
| 91 |
+
}
|
| 92 |
+
void operator()(const c10::optional<at::Generator>& gen) {
|
| 93 |
+
if (gen.has_value() && gen->defined()) {
|
| 94 |
+
ts = ts | gen->key_set();
|
| 95 |
+
}
|
| 96 |
+
}
|
| 97 |
+
template <typename T>
|
| 98 |
+
void operator()(const T&) {
|
| 99 |
+
// do nothing
|
| 100 |
+
}
|
| 101 |
+
};
|
| 102 |
+
|
| 103 |
+
// NB: take by const reference (Don't do universal forwarding here! You
|
| 104 |
+
// don't want to move into this function!)
|
| 105 |
+
template <typename... Args>
|
| 106 |
+
DispatchKeySet multi_dispatch_key_set(const Args&... args) {
|
| 107 |
+
return MultiDispatchKeySet().apply(args...).ts;
|
| 108 |
+
}
|
| 109 |
+
}
|
| 110 |
+
|
| 111 |
+
/**
|
| 112 |
+
* An instance of DispatchKeyExtractor knows how to get a dispatch key given
|
| 113 |
+
* a list of arguments for an operator call.
|
| 114 |
+
*
|
| 115 |
+
* The instance is specific for a certain operator as:
|
| 116 |
+
* - In boxed dispatch, different operators have different ways to extract
|
| 117 |
+
* the dispatch key (e.g. different numbers of arguments), and we precompute
|
| 118 |
+
* the stack locations we should look at; and
|
| 119 |
+
* - In all dispatch, some backends should be excluded from dispatch because
|
| 120 |
+
* they have been registered as fallthrough. The set of excluded backends
|
| 121 |
+
* varies from operator, as some operators may have overridden the
|
| 122 |
+
* fallthrough with custom behavior.
|
| 123 |
+
*
|
| 124 |
+
* Note - this should maintain identical impl to the py dispatcher key extraction logic
|
| 125 |
+
* at pytorch/torch/dispatcher.py
|
| 126 |
+
*/
|
| 127 |
+
struct TORCH_API DispatchKeyExtractor final {
|
| 128 |
+
public:
|
| 129 |
+
static DispatchKeyExtractor make(const FunctionSchema& schema) {
|
| 130 |
+
return DispatchKeyExtractor(makeBitsetForDispatchArgs(schema));
|
| 131 |
+
}
|
| 132 |
+
|
| 133 |
+
static DispatchKeyExtractor makeUninitialized() {
|
| 134 |
+
return DispatchKeyExtractor(c10::utils::bitset());
|
| 135 |
+
}
|
| 136 |
+
|
| 137 |
+
void registerSchema(const FunctionSchema& schema) {
|
| 138 |
+
TORCH_INTERNAL_ASSERT(dispatch_arg_indices_reverse_.is_entirely_unset());
|
| 139 |
+
dispatch_arg_indices_reverse_ = makeBitsetForDispatchArgs(schema);
|
| 140 |
+
}
|
| 141 |
+
void deregisterSchema() {
|
| 142 |
+
dispatch_arg_indices_reverse_ = c10::utils::bitset();
|
| 143 |
+
}
|
| 144 |
+
|
| 145 |
+
DispatchKeySet getDispatchKeySetBoxed(const torch::jit::Stack* stack) const {
|
| 146 |
+
DispatchKeySet ks;
|
| 147 |
+
dispatch_arg_indices_reverse_.for_each_set_bit([&] (size_t reverse_arg_index) {
|
| 148 |
+
const auto& ivalue = torch::jit::peek(*stack, 0, reverse_arg_index + 1);
|
| 149 |
+
if (C10_LIKELY(ivalue.isTensor())) {
|
| 150 |
+
// NB: Take care not to introduce a refcount bump (there's
|
| 151 |
+
// no safe toTensorRef method, alas)
|
| 152 |
+
ks = ks | ivalue.unsafeToTensorImpl()->key_set();
|
| 153 |
+
} else if (C10_UNLIKELY(ivalue.isTensorList())) {
|
| 154 |
+
for (const at::Tensor& tensor : ivalue.toTensorList()) {
|
| 155 |
+
ks = ks | tensor.key_set();
|
| 156 |
+
}
|
| 157 |
+
}
|
| 158 |
+
// Tensor?[] translates to a c10::List<IValue> so we need to peek inside
|
| 159 |
+
else if (C10_UNLIKELY(ivalue.isList())) {
|
| 160 |
+
for (const auto& elt : ivalue.toListRef()) {
|
| 161 |
+
if (elt.isTensor()) {
|
| 162 |
+
ks = ks | elt.toTensor().key_set();
|
| 163 |
+
}
|
| 164 |
+
}
|
| 165 |
+
}
|
| 166 |
+
});
|
| 167 |
+
// Keys that are fallthrough should be skipped
|
| 168 |
+
if (requiresBitsetPerBackend_) {
|
| 169 |
+
auto backend_idx = ks.getBackendIndex();
|
| 170 |
+
return impl::computeDispatchKeySet(ks, nonFallthroughKeysPerBackend_[backend_idx]);
|
| 171 |
+
} else {
|
| 172 |
+
return impl::computeDispatchKeySet(ks, nonFallthroughKeys_);
|
| 173 |
+
}
|
| 174 |
+
}
|
| 175 |
+
|
| 176 |
+
template<class... Args>
|
| 177 |
+
DispatchKeySet getDispatchKeySetUnboxed(const Args&... args) const {
|
| 178 |
+
auto ks = detail::multi_dispatch_key_set(args...);
|
| 179 |
+
// Keys that are fallthrough should be skipped
|
| 180 |
+
if (requiresBitsetPerBackend_) {
|
| 181 |
+
auto backend_idx = ks.getBackendIndex();
|
| 182 |
+
return impl::computeDispatchKeySet(ks, nonFallthroughKeysPerBackend_[backend_idx]);
|
| 183 |
+
} else {
|
| 184 |
+
return impl::computeDispatchKeySet(ks, nonFallthroughKeys_);
|
| 185 |
+
}
|
| 186 |
+
}
|
| 187 |
+
|
| 188 |
+
void setOperatorHasFallthroughForKey(DispatchKey k, bool has_fallthrough);
|
| 189 |
+
|
| 190 |
+
std::string dumpState() const;
|
| 191 |
+
void checkInvariants(const FunctionSchema& schema) const;
|
| 192 |
+
|
| 193 |
+
private:
|
| 194 |
+
static c10::utils::bitset makeBitsetForDispatchArgs(const FunctionSchema& schema) {
|
| 195 |
+
TORCH_CHECK(schema.arguments().size() <= c10::utils::bitset::NUM_BITS(),
|
| 196 |
+
"The function schema has ", schema.arguments().size(),
|
| 197 |
+
" arguments but this PyTorch build only supports ", c10::utils::bitset::NUM_BITS());
|
| 198 |
+
c10::utils::bitset dispatch_arg_indices_reverse;
|
| 199 |
+
for (const auto index : c10::irange(schema.arguments().size())) {
|
| 200 |
+
if (schema.arguments()[index].type()->isSubtypeOf(*TensorType::get()) ||
|
| 201 |
+
schema.arguments()[index].type()->isSubtypeOf(
|
| 202 |
+
*ListType::ofTensors()) ||
|
| 203 |
+
schema.arguments()[index].type()->isSubtypeOf(
|
| 204 |
+
*ListType::ofOptionalTensors()) ||
|
| 205 |
+
schema.arguments()[index].type()->isSubtypeOf(
|
| 206 |
+
*OptionalType::ofTensor())) {
|
| 207 |
+
dispatch_arg_indices_reverse.set(schema.arguments().size() - 1 - index);
|
| 208 |
+
}
|
| 209 |
+
}
|
| 210 |
+
return dispatch_arg_indices_reverse;
|
| 211 |
+
}
|
| 212 |
+
|
| 213 |
+
explicit DispatchKeyExtractor(c10::utils::bitset dispatch_arg_indices_reverse)
|
| 214 |
+
: dispatch_arg_indices_reverse_(dispatch_arg_indices_reverse)
|
| 215 |
+
, nonFallthroughKeys_(DispatchKeySet::FULL)
|
| 216 |
+
, requiresBitsetPerBackend_(false) {
|
| 217 |
+
for (const auto i : c10::irange(nonFallthroughKeysPerBackend_.size())) {
|
| 218 |
+
nonFallthroughKeysPerBackend_[i] = DispatchKeySet::FULL;
|
| 219 |
+
}
|
| 220 |
+
}
|
| 221 |
+
|
| 222 |
+
// this is a bitset that has ones for each argument index which has to be
|
| 223 |
+
// considered for dispatch. This avoids having to iterate over the stack
|
| 224 |
+
// to find all the tensors. The bits are stored in reverse order, i.e.
|
| 225 |
+
// dispatch_arg_indices_reverse_[i] == true, then the i-th argument from
|
| 226 |
+
// the top of the stack (i.e. the i-th last argument of the function)
|
| 227 |
+
// is relevant for dispatch.
|
| 228 |
+
// dispatch_arg_indices_reverse_ is allowed to have zero bits set; that just means you must do the
|
| 229 |
+
// fallthrough
|
| 230 |
+
c10::utils::bitset dispatch_arg_indices_reverse_;
|
| 231 |
+
|
| 232 |
+
// Set of functionality keys for which the operator does NOT have fallthrough kernel.
|
| 233 |
+
DispatchKeySet nonFallthroughKeys_;
|
| 234 |
+
// Set of functionality keys for which the operator does NOT have fallthrough kernel, defined PER BACKEND.
|
| 235 |
+
// This is only needed if we know that the operator has a different set of fallthroughs defined for some backends.
|
| 236 |
+
std::array<DispatchKeySet, num_backends> nonFallthroughKeysPerBackend_;
|
| 237 |
+
// Flag to tell us if we can use the single set of nonFallthroughKeys_ (fast path),
|
| 238 |
+
// or if we need to fall back to the slower path and check nonFallthroughKeysPerBackend_
|
| 239 |
+
bool requiresBitsetPerBackend_;
|
| 240 |
+
};
|
| 241 |
+
|
| 242 |
+
}
|
moondream/lib/python3.10/site-packages/torch/include/ATen/core/dispatch/Dispatcher.h
ADDED
|
@@ -0,0 +1,795 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/SequenceNumber.h>
|
| 4 |
+
#include <ATen/core/boxing/KernelFunction.h>
|
| 5 |
+
#include <ATen/core/boxing/impl/boxing.h>
|
| 6 |
+
#include <ATen/core/dispatch/OperatorEntry.h>
|
| 7 |
+
#include <ATen/core/dispatch/CppSignature.h>
|
| 8 |
+
#include <ATen/core/dispatch/RegistrationHandleRAII.h>
|
| 9 |
+
#include <ATen/record_function.h>
|
| 10 |
+
#include <c10/util/Exception.h>
|
| 11 |
+
#include <c10/util/LeftRight.h>
|
| 12 |
+
#include <list>
|
| 13 |
+
#include <mutex>
|
| 14 |
+
#include <condition_variable>
|
| 15 |
+
#include <type_traits>
|
| 16 |
+
#include <c10/core/SafePyObject.h>
|
| 17 |
+
|
| 18 |
+
#include <ATen/core/grad_mode.h>
|
| 19 |
+
#include <ATen/core/enum_tag.h>
|
| 20 |
+
|
| 21 |
+
#ifndef NDEBUG
|
| 22 |
+
#include <iostream>
|
| 23 |
+
#endif
|
| 24 |
+
|
| 25 |
+
namespace c10 {
|
| 26 |
+
|
| 27 |
+
TORCH_API bool show_dispatch_trace();
|
| 28 |
+
TORCH_API void dispatch_trace_nesting_incr();
|
| 29 |
+
TORCH_API void dispatch_trace_nesting_decr();
|
| 30 |
+
TORCH_API int64_t dispatch_trace_nesting_value();
|
| 31 |
+
|
| 32 |
+
struct DispatchTraceNestingGuard {
|
| 33 |
+
DispatchTraceNestingGuard() { dispatch_trace_nesting_incr(); }
|
| 34 |
+
~DispatchTraceNestingGuard() { dispatch_trace_nesting_decr(); }
|
| 35 |
+
};
|
| 36 |
+
|
| 37 |
+
class TORCH_API OperatorHandle;
|
| 38 |
+
template<class FuncType> class TypedOperatorHandle;
|
| 39 |
+
|
| 40 |
+
/**
|
| 41 |
+
* Implement this interface and register your instance with the dispatcher
|
| 42 |
+
* to get notified when operators are registered or deregistered with
|
| 43 |
+
* the dispatcher.
|
| 44 |
+
*
|
| 45 |
+
* NB: registration events only occur when a 'def' occurs; we don't trigger
|
| 46 |
+
* on 'impl' or 'fallback' calls.
|
| 47 |
+
*/
|
| 48 |
+
class TORCH_API OpRegistrationListener {
|
| 49 |
+
public:
|
| 50 |
+
virtual ~OpRegistrationListener();
|
| 51 |
+
|
| 52 |
+
virtual void onOperatorRegistered(const OperatorHandle& op) = 0;
|
| 53 |
+
virtual void onOperatorDeregistered(const OperatorHandle& op) = 0;
|
| 54 |
+
};
|
| 55 |
+
|
| 56 |
+
namespace detail {
|
| 57 |
+
class RegistrationListenerList;
|
| 58 |
+
}
|
| 59 |
+
class SchemaRegistrationHandleRAII;
|
| 60 |
+
|
| 61 |
+
/**
|
| 62 |
+
* Top-level dispatch interface for dispatching via the dynamic dispatcher.
|
| 63 |
+
* Most end users shouldn't use this directly; if you're trying to register
|
| 64 |
+
* ops look in op_registration
|
| 65 |
+
*/
|
| 66 |
+
class TORCH_API Dispatcher final {
|
| 67 |
+
private:
|
| 68 |
+
// For direct access to backend fallback information
|
| 69 |
+
friend class impl::OperatorEntry;
|
| 70 |
+
|
| 71 |
+
struct OperatorDef final {
|
| 72 |
+
explicit OperatorDef(OperatorName&& op_name)
|
| 73 |
+
: op(std::move(op_name)) {}
|
| 74 |
+
|
| 75 |
+
impl::OperatorEntry op;
|
| 76 |
+
|
| 77 |
+
// These refer to the number of outstanding RegistrationHandleRAII
|
| 78 |
+
// for this operator. def_count reflects only def() registrations
|
| 79 |
+
// (in the new world, this should only ever be 1, but old style
|
| 80 |
+
// registrations may register the schema multiple times, which
|
| 81 |
+
// will increase this count). def_and_impl_count reflects the number
|
| 82 |
+
// of combined def() and impl() registrations. When the last def() gets
|
| 83 |
+
// unregistered, we must immediately call the Deregistered listeners, but we
|
| 84 |
+
// must not actually delete the handle as there are other outstanding RAII
|
| 85 |
+
// destructors which will try to destruct and they had better still have a
|
| 86 |
+
// working operator handle in this case
|
| 87 |
+
size_t def_count = 0;
|
| 88 |
+
size_t def_and_impl_count = 0;
|
| 89 |
+
};
|
| 90 |
+
friend class OperatorHandle;
|
| 91 |
+
template<class> friend class TypedOperatorHandle;
|
| 92 |
+
|
| 93 |
+
struct Guard final {
|
| 94 |
+
Guard() : alive(true), mutex() {}
|
| 95 |
+
std::atomic<bool> alive;
|
| 96 |
+
std::mutex mutex;
|
| 97 |
+
};
|
| 98 |
+
|
| 99 |
+
public:
|
| 100 |
+
~Dispatcher();
|
| 101 |
+
|
| 102 |
+
// Implementation note: this class abstracts over the fact that we have per-operator
|
| 103 |
+
// dispatch tables. This could be easily adjusted to have a single global hash
|
| 104 |
+
// table.
|
| 105 |
+
static Dispatcher& realSingleton();
|
| 106 |
+
|
| 107 |
+
C10_ALWAYS_INLINE static Dispatcher& singleton() {
|
| 108 |
+
#if !defined C10_MOBILE
|
| 109 |
+
// Implemented inline so that steady-state code needn't incur
|
| 110 |
+
// function-call overhead. We can't just inline `realSingleton`
|
| 111 |
+
// because the function-local static would get duplicated across
|
| 112 |
+
// all DSOs that include & use this header, leading to multiple
|
| 113 |
+
// singleton instances.
|
| 114 |
+
static Dispatcher& s = realSingleton();
|
| 115 |
+
return s;
|
| 116 |
+
#else
|
| 117 |
+
// For C10_MOBILE, we should never inline a static function that
|
| 118 |
+
// has a static member, since the generated code calls
|
| 119 |
+
// __cxa_guard_acquire and __cxa_guard_release which help
|
| 120 |
+
// implement exactly once semantics for the initialization of the
|
| 121 |
+
// static Dispatcher& s above (for the non-mobile case). That
|
| 122 |
+
// additional code when duplicated across all operator stubs
|
| 123 |
+
// for every backend results in a lot of additional code
|
| 124 |
+
// being generated by the compiler.
|
| 125 |
+
return realSingleton();
|
| 126 |
+
#endif
|
| 127 |
+
}
|
| 128 |
+
|
| 129 |
+
// ------------------------------------------------------------------------
|
| 130 |
+
//
|
| 131 |
+
// Accessing operators by schema
|
| 132 |
+
//
|
| 133 |
+
// ------------------------------------------------------------------------
|
| 134 |
+
|
| 135 |
+
/**
|
| 136 |
+
* Looks for an operator schema with the given name and overload name
|
| 137 |
+
* and returns it if it is registered WITH A SCHEMA.
|
| 138 |
+
* Returns nullopt otherwise.
|
| 139 |
+
*/
|
| 140 |
+
c10::optional<OperatorHandle> findSchema(const OperatorName& operator_name);
|
| 141 |
+
|
| 142 |
+
/**
|
| 143 |
+
* Variant of findSchema that results in less code generated at the call site.
|
| 144 |
+
* It (1) takes const char* pointer rather than OperatorName (so we skip
|
| 145 |
+
* generating std::string constructor calls at the call site), and (2)
|
| 146 |
+
* it raises an exception if the operator is not found (so we skip
|
| 147 |
+
* generating exception raising code at the call site)
|
| 148 |
+
*
|
| 149 |
+
* Irritatingly, we still have to generate the handful of instructions
|
| 150 |
+
* for dealing with an exception being thrown during static initialization
|
| 151 |
+
* (e.g. __cxa_guard_abort). If we could annotate this method noexcept we
|
| 152 |
+
* could avoid this code too, but as the name of the function suggests,
|
| 153 |
+
* it does throw exceptions.
|
| 154 |
+
*/
|
| 155 |
+
OperatorHandle findSchemaOrThrow(const char* name, const char* overload_name);
|
| 156 |
+
|
| 157 |
+
// Like findSchema, but also returns OperatorHandle even if there is no schema
|
| 158 |
+
c10::optional<OperatorHandle> findOp(const OperatorName& operator_name);
|
| 159 |
+
|
| 160 |
+
// Returns a list of all operator names present in the operatorLookupTable_
|
| 161 |
+
const std::vector<OperatorName> getAllOpNames();
|
| 162 |
+
|
| 163 |
+
// ------------------------------------------------------------------------
|
| 164 |
+
//
|
| 165 |
+
// Invoking operators
|
| 166 |
+
//
|
| 167 |
+
// ------------------------------------------------------------------------
|
| 168 |
+
|
| 169 |
+
template<class Return, class... Args>
|
| 170 |
+
Return call(const TypedOperatorHandle<Return (Args...)>& op, Args... args) const;
|
| 171 |
+
|
| 172 |
+
|
| 173 |
+
template<class Return, class... Args>
|
| 174 |
+
static Return callWithDispatchKeySlowPath(const TypedOperatorHandle<Return (Args...)>& op, at::StepCallbacks& stepCallbacks, DispatchKeySet dispatchKeySet, const KernelFunction& kernel, Args... args);
|
| 175 |
+
|
| 176 |
+
// Like call, but intended for use in a redispatch in kernels that have explicitly performed the DispatchKey update calculatulation.
|
| 177 |
+
// This will take the DispatchKeySet completely as is and dispatch to the kernel of the corresponding highest priority key in the set.
|
| 178 |
+
// Note that this version of redispatch treats the inputted DispatchKeySet *as is*, and does NOT mask out the highest priority key.
|
| 179 |
+
// See Note [Plumbing Keys Through The Dispatcher]
|
| 180 |
+
template<class Return, class... Args>
|
| 181 |
+
Return redispatch(const TypedOperatorHandle<Return (Args...)>& op, DispatchKeySet currentDispatchKeySet, Args... args) const;
|
| 182 |
+
|
| 183 |
+
// Invoke an operator via the boxed calling convention using an IValue stack
|
| 184 |
+
void callBoxed(const OperatorHandle& op, Stack* stack) const;
|
| 185 |
+
void callBoxedForDispatchKey(const OperatorHandle& op, DispatchKey dk, Stack* stack) const;
|
| 186 |
+
|
| 187 |
+
// TODO: This will only be useful if we write a backend fallback that plumbs dispatch keys (currently there are none)
|
| 188 |
+
// See Note [Plumbing Keys Through The Dispatcher]
|
| 189 |
+
void redispatchBoxed(const OperatorHandle& op, DispatchKeySet dispatchKeySet, Stack* stack) const;
|
| 190 |
+
|
| 191 |
+
bool hasBackendFallbackForDispatchKey(DispatchKey dk) {
|
| 192 |
+
auto dispatch_ix = getDispatchTableIndexForDispatchKey(dk);
|
| 193 |
+
if (dispatch_ix < 0) return false;
|
| 194 |
+
return backendFallbackKernels_[dispatch_ix].kernel.isValid();
|
| 195 |
+
}
|
| 196 |
+
|
| 197 |
+
// Used by torchdeploy/multipy for multiple interpreters racing.
|
| 198 |
+
void waitForDef(const FunctionSchema& schema);
|
| 199 |
+
void waitForImpl(const OperatorName& op_name, c10::optional<DispatchKey> dispatch_key);
|
| 200 |
+
|
| 201 |
+
// ------------------------------------------------------------------------
|
| 202 |
+
//
|
| 203 |
+
// Performing registrations (NON user public; use op_registration)
|
| 204 |
+
//
|
| 205 |
+
// ------------------------------------------------------------------------
|
| 206 |
+
|
| 207 |
+
/**
|
| 208 |
+
* Register a new operator schema.
|
| 209 |
+
*
|
| 210 |
+
* If a schema with the same operator name and overload name already exists,
|
| 211 |
+
* this function will check that both schemas are exactly identical.
|
| 212 |
+
*/
|
| 213 |
+
RegistrationHandleRAII registerDef(FunctionSchema schema, std::string debug, std::vector<at::Tag> tags = {});
|
| 214 |
+
|
| 215 |
+
/**
|
| 216 |
+
* Register a kernel to the dispatch table for an operator.
|
| 217 |
+
* If dispatch_key is nullopt, then this registers a fallback kernel.
|
| 218 |
+
*
|
| 219 |
+
* @return A RAII object that manages the lifetime of the registration.
|
| 220 |
+
* Once that object is destructed, the kernel will be deregistered.
|
| 221 |
+
*/
|
| 222 |
+
// NB: steals the inferred function schema, as we may need to hold on to
|
| 223 |
+
// it for a bit until the real schema turns up
|
| 224 |
+
RegistrationHandleRAII registerImpl(OperatorName op_name, c10::optional<DispatchKey> dispatch_key, KernelFunction kernel, c10::optional<impl::CppSignature> cpp_signature, std::unique_ptr<FunctionSchema> inferred_function_schema, std::string debug);
|
| 225 |
+
|
| 226 |
+
/**
|
| 227 |
+
* Given an operator, tells the Dispatcher that we have implemented an abstract impl
|
| 228 |
+
* for this op in the given Python module. Call this a "pystub".
|
| 229 |
+
*/
|
| 230 |
+
RegistrationHandleRAII registerAbstractImplPyStub(const OperatorName& op_name, const char* pymodule, const char* context);
|
| 231 |
+
|
| 232 |
+
/**
|
| 233 |
+
* Given an operator, throws if we have an abstract impl pystub.
|
| 234 |
+
*/
|
| 235 |
+
void throwIfHasAbstractImplPyStub(OperatorName op_name);
|
| 236 |
+
|
| 237 |
+
c10::optional<std::pair<const char*, const char*>> getAbstractImplPyStub(OperatorName op_name);
|
| 238 |
+
|
| 239 |
+
/**
|
| 240 |
+
* Register a new operator by name.
|
| 241 |
+
*/
|
| 242 |
+
RegistrationHandleRAII registerName(OperatorName op_name);
|
| 243 |
+
|
| 244 |
+
/**
|
| 245 |
+
* Register a fallback kernel for a backend.
|
| 246 |
+
* If an operator is called but there is no concrete kernel for the dispatch
|
| 247 |
+
* key of the given operator arguments, it will check if there is such a
|
| 248 |
+
* fallback kernel for the given dispatch key and, if yes, call that one.
|
| 249 |
+
*/
|
| 250 |
+
RegistrationHandleRAII registerFallback(DispatchKey dispatch_key, KernelFunction kernel, std::string debug);
|
| 251 |
+
|
| 252 |
+
/**
|
| 253 |
+
* Use to register whenever we had a TORCH_LIBRARY declaration in the frontend
|
| 254 |
+
* API. These invocations are only permitted once per program, so we raise
|
| 255 |
+
* an error if this is called again for the same namespace.
|
| 256 |
+
*/
|
| 257 |
+
RegistrationHandleRAII registerLibrary(std::string ns, std::string debug);
|
| 258 |
+
|
| 259 |
+
// ------------------------------------------------------------------------
|
| 260 |
+
//
|
| 261 |
+
// Listeners on registrations
|
| 262 |
+
//
|
| 263 |
+
// ------------------------------------------------------------------------
|
| 264 |
+
|
| 265 |
+
/**
|
| 266 |
+
* Add a listener that gets called whenever a new op is registered or an existing
|
| 267 |
+
* op is deregistered. Immediately after registering, this listener gets called
|
| 268 |
+
* for all previously registered ops, so it can be used to keep track of ops
|
| 269 |
+
* registered with this dispatcher.
|
| 270 |
+
*/
|
| 271 |
+
RegistrationHandleRAII addRegistrationListener(std::unique_ptr<OpRegistrationListener> listener);
|
| 272 |
+
|
| 273 |
+
void checkInvariants() const;
|
| 274 |
+
|
| 275 |
+
//
|
| 276 |
+
// ------------------------------------------------------------------------
|
| 277 |
+
//
|
| 278 |
+
// Assertions
|
| 279 |
+
//
|
| 280 |
+
// ------------------------------------------------------------------------
|
| 281 |
+
|
| 282 |
+
/**
|
| 283 |
+
* For testing purposes.
|
| 284 |
+
* Returns a list of all operators that were created through calls to registerImpl(),
|
| 285 |
+
* without any corresponding calls to registerDef(). After static initialization
|
| 286 |
+
* is done this is almost certainly a bug, as the created OperatorHandle won't have
|
| 287 |
+
* any schema associated with it and users calling the op through the dispatcher
|
| 288 |
+
* won't be able to access it
|
| 289 |
+
*
|
| 290 |
+
* Note that we cannot enforce this invariant "as we go" during static initialization,
|
| 291 |
+
* due to undefined static initialization order- we have no guarantees over the order
|
| 292 |
+
* in which .def() and .impl() calls are registered in the dispatcher at static
|
| 293 |
+
* initialization time. So this function should only be called after static initialization.
|
| 294 |
+
*/
|
| 295 |
+
std::vector<OperatorHandle> findDanglingImpls() const;
|
| 296 |
+
|
| 297 |
+
/**
|
| 298 |
+
* Useful for inspecting global Dispatcher registration state.
|
| 299 |
+
* Returns the names of all operators with a kernel registered for the specified DispatchKey.
|
| 300 |
+
* If no DispatchKey is specified, it returns all registered operators.
|
| 301 |
+
*/
|
| 302 |
+
std::vector<OperatorName> getRegistrationsForDispatchKey(c10::optional<DispatchKey> k) const;
|
| 303 |
+
|
| 304 |
+
private:
|
| 305 |
+
Dispatcher();
|
| 306 |
+
|
| 307 |
+
static int64_t sequenceNumberForRunningRecordFunction(DispatchKey dispatchKey);
|
| 308 |
+
static void runRecordFunction(at::RecordFunction& guard, at::RecordFunction::schema_ref_t schema_ref, DispatchKey dispatchKey);
|
| 309 |
+
static void runRecordFunction(at::RecordFunction& guard, at::RecordFunction::schema_ref_t schema_ref, DispatchKey dispatchKey, c10::ArrayRef<const c10::IValue> args);
|
| 310 |
+
|
| 311 |
+
#ifdef FBCODE_CAFFE2
|
| 312 |
+
static bool profilingOperatorEvents();
|
| 313 |
+
static void fireOpStartUSDT(at::RecordFunction::schema_ref_t schema_ref);
|
| 314 |
+
static void fireOpEndUSDT(at::RecordFunction::schema_ref_t schema_ref);
|
| 315 |
+
#endif // FBCODE_CAFFE2
|
| 316 |
+
|
| 317 |
+
OperatorHandle findOrRegisterSchema_(FunctionSchema&& schema);
|
| 318 |
+
OperatorHandle findOrRegisterName_(const OperatorName& op_name);
|
| 319 |
+
|
| 320 |
+
void deregisterDef_(const OperatorHandle& op, const OperatorName& op_name);
|
| 321 |
+
void deregisterImpl_(
|
| 322 |
+
const OperatorHandle& op,
|
| 323 |
+
const OperatorName& op_name,
|
| 324 |
+
c10::optional<DispatchKey> dispatch_key,
|
| 325 |
+
impl::OperatorEntry::AnnotatedKernelContainerIterator kernel_handle);
|
| 326 |
+
void deregisterName_(const OperatorHandle& op, const OperatorName& op_name);
|
| 327 |
+
void deregisterFallback_(DispatchKey dispatchKey);
|
| 328 |
+
void deregisterLibrary_(const std::string& ns);
|
| 329 |
+
void cleanup(const OperatorHandle& op, const OperatorName& op_name);
|
| 330 |
+
void checkSchemaCompatibility(const OperatorHandle& op, const FunctionSchema& schema, const std::string& debug);
|
| 331 |
+
|
| 332 |
+
std::list<OperatorDef> operators_;
|
| 333 |
+
#if !defined(C10_MOBILE)
|
| 334 |
+
LeftRight<ska::flat_hash_map<OperatorName, OperatorHandle>> operatorLookupTable_;
|
| 335 |
+
#else
|
| 336 |
+
RWSafeLeftRightWrapper<ska::flat_hash_map<OperatorName, OperatorHandle>> operatorLookupTable_;
|
| 337 |
+
#endif
|
| 338 |
+
// Map from namespace to debug string (saying, e.g., where the library was defined)
|
| 339 |
+
ska::flat_hash_map<std::string, std::string> libraries_;
|
| 340 |
+
|
| 341 |
+
std::array<impl::AnnotatedKernel, num_runtime_entries> backendFallbackKernels_;
|
| 342 |
+
|
| 343 |
+
std::unique_ptr<detail::RegistrationListenerList> listeners_;
|
| 344 |
+
|
| 345 |
+
// This condition variable gets notified whenever we add a new def/impl to the
|
| 346 |
+
// dispatch table. This is primarily used by multipy/torchdeploy, when
|
| 347 |
+
// we have multiple interpreters trying to register to the dispatch table.
|
| 348 |
+
// In this situation, whenever the non-primary interpreter would have tried
|
| 349 |
+
// to register to the dispatch table, instead it will check to see if the
|
| 350 |
+
// expected registration has already been made, and if it hasn't, wait on
|
| 351 |
+
// this condition variable to see if it was just racing with the primary
|
| 352 |
+
// interpreter.
|
| 353 |
+
//
|
| 354 |
+
// We expect it to be rare for there to be any waiters on this condition
|
| 355 |
+
// variable. This is mostly just to help give better diagnostics if
|
| 356 |
+
// something goes horribly wrong
|
| 357 |
+
std::condition_variable cond_var_;
|
| 358 |
+
|
| 359 |
+
// Protect concurrent access to the dispatcher. We store this in a
|
| 360 |
+
// `shared_ptr` as we return callbacks that call back into dispatcher methods,
|
| 361 |
+
// and we need to be able to handle and guard against the event when the
|
| 362 |
+
// `Dispatcher` has been destroyed before the callbacks fire.
|
| 363 |
+
std::shared_ptr<Guard> guard_;
|
| 364 |
+
};
|
| 365 |
+
|
| 366 |
+
/**
|
| 367 |
+
* This is a handle to an operator schema registered with the dispatcher.
|
| 368 |
+
* This handle can be used to register kernels with the dispatcher or
|
| 369 |
+
* to lookup a kernel for a certain set of arguments.
|
| 370 |
+
*/
|
| 371 |
+
class TORCH_API OperatorHandle {
|
| 372 |
+
template <typename T> friend struct std::hash;
|
| 373 |
+
|
| 374 |
+
public:
|
| 375 |
+
OperatorHandle(OperatorHandle&&) noexcept = default;
|
| 376 |
+
OperatorHandle& operator=(OperatorHandle&&) noexcept = default;
|
| 377 |
+
OperatorHandle(const OperatorHandle&) = default;
|
| 378 |
+
OperatorHandle& operator=(const OperatorHandle&) = default;
|
| 379 |
+
// NOLINTNEXTLINE(performance-trivially-destructible)
|
| 380 |
+
~OperatorHandle();
|
| 381 |
+
|
| 382 |
+
const OperatorName& operator_name() const {
|
| 383 |
+
return operatorDef_->op.operator_name();
|
| 384 |
+
}
|
| 385 |
+
|
| 386 |
+
bool hasSchema() const {
|
| 387 |
+
return operatorDef_->op.hasSchema();
|
| 388 |
+
}
|
| 389 |
+
|
| 390 |
+
const FunctionSchema& schema() const {
|
| 391 |
+
return operatorDef_->op.schema();
|
| 392 |
+
}
|
| 393 |
+
|
| 394 |
+
const std::string& debug() const {
|
| 395 |
+
return operatorDef_->op.debug();
|
| 396 |
+
}
|
| 397 |
+
|
| 398 |
+
std::string dumpState() const {
|
| 399 |
+
return operatorDef_->op.dumpState();
|
| 400 |
+
}
|
| 401 |
+
|
| 402 |
+
bool hasKernelForDispatchKey(DispatchKey k) const {
|
| 403 |
+
return operatorDef_->op.hasKernelForDispatchKey(k);
|
| 404 |
+
}
|
| 405 |
+
|
| 406 |
+
bool hasKernelForAnyDispatchKey(DispatchKeySet k) const {
|
| 407 |
+
return operatorDef_->op.hasKernelForAnyDispatchKey(k);
|
| 408 |
+
}
|
| 409 |
+
|
| 410 |
+
bool hasComputedKernelForDispatchKey(DispatchKey k) const {
|
| 411 |
+
return operatorDef_->op.hasComputedKernelForDispatchKey(k);
|
| 412 |
+
}
|
| 413 |
+
|
| 414 |
+
std::string dumpComputedTable() const {
|
| 415 |
+
return operatorDef_->op.dumpComputedTable();
|
| 416 |
+
}
|
| 417 |
+
|
| 418 |
+
void checkInvariants() const {
|
| 419 |
+
return operatorDef_->op.checkInvariants();
|
| 420 |
+
}
|
| 421 |
+
|
| 422 |
+
c10::ArrayRef<at::Tag> getTags() const {
|
| 423 |
+
return operatorDef_->op.getTags();
|
| 424 |
+
}
|
| 425 |
+
|
| 426 |
+
void setReportErrorCallback_(std::unique_ptr<c10::SafePyObject> callback) {
|
| 427 |
+
operatorDef_->op.setReportErrorCallback_(std::move(callback));
|
| 428 |
+
}
|
| 429 |
+
|
| 430 |
+
bool hasTag(const at::Tag& tag) const {
|
| 431 |
+
for(const auto& tag_: getTags()) {
|
| 432 |
+
if (tag == tag_) {
|
| 433 |
+
return true;
|
| 434 |
+
}
|
| 435 |
+
}
|
| 436 |
+
return false;
|
| 437 |
+
}
|
| 438 |
+
|
| 439 |
+
template<class FuncType>
|
| 440 |
+
TypedOperatorHandle<FuncType> typed() const {
|
| 441 |
+
// NB: This assert is not 100% sound: you can retrieve a typed() operator
|
| 442 |
+
// handle prior to ANY C++ signature being registered on the operator
|
| 443 |
+
// and the check will say everything is OK (at which point you can then
|
| 444 |
+
// smuggle in a kernel that is typed incorrectly). For everything
|
| 445 |
+
// in core library this won't happen, because all the static registrations
|
| 446 |
+
// will be done by the time a typed() handle is acquired.
|
| 447 |
+
#if !defined C10_MOBILE
|
| 448 |
+
operatorDef_->op.assertSignatureIsCorrect<FuncType>();
|
| 449 |
+
if (fn_has_symint<FuncType>::value) {
|
| 450 |
+
operatorDef_->op.assertSignatureIsCorrect<typename fn_remove_symint<FuncType>::type>();
|
| 451 |
+
}
|
| 452 |
+
#endif
|
| 453 |
+
return TypedOperatorHandle<FuncType>(operatorIterator_);
|
| 454 |
+
}
|
| 455 |
+
|
| 456 |
+
void callBoxed(Stack* stack) const {
|
| 457 |
+
c10::Dispatcher::singleton().callBoxed(*this, stack);
|
| 458 |
+
}
|
| 459 |
+
|
| 460 |
+
void callBoxed(Stack& stack) const {
|
| 461 |
+
callBoxed(&stack);
|
| 462 |
+
}
|
| 463 |
+
|
| 464 |
+
void callBoxedForDispatchKey(DispatchKey dk, Stack& stack) const {
|
| 465 |
+
c10::Dispatcher::singleton().callBoxedForDispatchKey(*this, dk, &stack);
|
| 466 |
+
}
|
| 467 |
+
|
| 468 |
+
void redispatchBoxed(DispatchKeySet ks, Stack* stack) const {
|
| 469 |
+
c10::Dispatcher::singleton().redispatchBoxed(*this, ks, stack);
|
| 470 |
+
}
|
| 471 |
+
|
| 472 |
+
template <typename F>
|
| 473 |
+
PyObject* getPythonOp(c10::impl::PyInterpreter* self_interpreter, F slow_accessor) const {
|
| 474 |
+
return operatorDef_->op.getPythonOp(self_interpreter, slow_accessor);
|
| 475 |
+
}
|
| 476 |
+
|
| 477 |
+
bool operator==(const OperatorHandle& other) const {
|
| 478 |
+
return operatorDef_ == other.operatorDef_;
|
| 479 |
+
}
|
| 480 |
+
|
| 481 |
+
bool operator!=(const OperatorHandle& other) const {
|
| 482 |
+
return operatorDef_ != other.operatorDef_;
|
| 483 |
+
}
|
| 484 |
+
|
| 485 |
+
private:
|
| 486 |
+
explicit OperatorHandle(std::list<Dispatcher::OperatorDef>::iterator operatorIterator)
|
| 487 |
+
: operatorDef_(&*operatorIterator), operatorIterator_(operatorIterator) {}
|
| 488 |
+
friend class Dispatcher;
|
| 489 |
+
template<class> friend class TypedOperatorHandle;
|
| 490 |
+
|
| 491 |
+
// Storing a direct pointer to the OperatorDef even though we
|
| 492 |
+
// already have the iterator saves an instruction in the critical
|
| 493 |
+
// dispatch path. The iterator is effectively a
|
| 494 |
+
// pointer-to-std::list-node, and (at least in libstdc++'s
|
| 495 |
+
// implementation) the element is at an offset 16 bytes from that,
|
| 496 |
+
// because the prev/next pointers come first in the list node
|
| 497 |
+
// struct. So, an add instruction would be necessary to convert from the
|
| 498 |
+
// iterator to an OperatorDef*.
|
| 499 |
+
Dispatcher::OperatorDef* operatorDef_;
|
| 500 |
+
|
| 501 |
+
// We need to store this iterator in order to make
|
| 502 |
+
// Dispatcher::cleanup() fast -- it runs a lot on program
|
| 503 |
+
// termination (and presuambly library unloading).
|
| 504 |
+
std::list<Dispatcher::OperatorDef>::iterator operatorIterator_;
|
| 505 |
+
};
|
| 506 |
+
|
| 507 |
+
/**
|
| 508 |
+
* This is a handle to an operator schema registered with the dispatcher.
|
| 509 |
+
* It holds the same information as an OperatorHandle, but it is templated
|
| 510 |
+
* on the operator arguments and allows calling the operator in an
|
| 511 |
+
* unboxed way.
|
| 512 |
+
*/
|
| 513 |
+
template<class FuncType>
|
| 514 |
+
class TypedOperatorHandle final {
|
| 515 |
+
static_assert(guts::false_t<FuncType>(), "FuncType in OperatorHandle::typed<FuncType> was not a valid function type");
|
| 516 |
+
};
|
| 517 |
+
template<class Return, class... Args>
|
| 518 |
+
class TypedOperatorHandle<Return (Args...)> final : public OperatorHandle {
|
| 519 |
+
public:
|
| 520 |
+
TypedOperatorHandle(TypedOperatorHandle&&) noexcept = default;
|
| 521 |
+
TypedOperatorHandle& operator=(TypedOperatorHandle&&) noexcept = default;
|
| 522 |
+
TypedOperatorHandle(const TypedOperatorHandle&) = default;
|
| 523 |
+
TypedOperatorHandle& operator=(const TypedOperatorHandle&) = default;
|
| 524 |
+
|
| 525 |
+
// See [Note: Argument forwarding in the dispatcher] for why Args doesn't use &&
|
| 526 |
+
C10_ALWAYS_INLINE Return call(Args... args) const {
|
| 527 |
+
return c10::Dispatcher::singleton().call<Return, Args...>(*this, std::forward<Args>(args)...);
|
| 528 |
+
}
|
| 529 |
+
|
| 530 |
+
// See [Note: Argument forwarding in the dispatcher] for why Args doesn't use &&
|
| 531 |
+
C10_ALWAYS_INLINE Return redispatch(DispatchKeySet currentDispatchKeySet, Args... args) const {
|
| 532 |
+
return c10::Dispatcher::singleton().redispatch<Return, Args...>(*this, currentDispatchKeySet, std::forward<Args>(args)...);
|
| 533 |
+
}
|
| 534 |
+
|
| 535 |
+
private:
|
| 536 |
+
explicit TypedOperatorHandle(std::list<Dispatcher::OperatorDef>::iterator operatorIterator)
|
| 537 |
+
: OperatorHandle(operatorIterator) {}
|
| 538 |
+
friend class OperatorHandle;
|
| 539 |
+
};
|
| 540 |
+
|
| 541 |
+
namespace detail {
|
| 542 |
+
template <class... Args> inline void unused_arg_(const Args&...) {}
|
| 543 |
+
|
| 544 |
+
// CaptureKernelCall is intended to capture return values from Dispatcher
|
| 545 |
+
// unboxed kernel calls. A record function may request to get outputs from the
|
| 546 |
+
// kernel calls. For boxed kernels, it's straightforward, the returned values
|
| 547 |
+
// are in the stack object. The stack can be passed to record functions. For
|
| 548 |
+
// unboxed kernels, we need to handle different kinds of return values, cache
|
| 549 |
+
// them temporarily, then release the values for the actual function call
|
| 550 |
+
// return.
|
| 551 |
+
template <typename ReturnType>
|
| 552 |
+
struct CaptureKernelCall {
|
| 553 |
+
template <typename F, typename... Args>
|
| 554 |
+
CaptureKernelCall(
|
| 555 |
+
const F& kernel,
|
| 556 |
+
const TypedOperatorHandle<ReturnType(Args...)>& op,
|
| 557 |
+
const DispatchKeySet& dispatchKeySet,
|
| 558 |
+
Args&&... args)
|
| 559 |
+
// Calls the kernel and capture the result in output_.
|
| 560 |
+
: output_{kernel.template call<ReturnType, Args...>(
|
| 561 |
+
op,
|
| 562 |
+
dispatchKeySet,
|
| 563 |
+
std::forward<Args>(args)...)} {}
|
| 564 |
+
// Wraps the return values in a Stack.
|
| 565 |
+
Stack getOutputs() {
|
| 566 |
+
Stack stack;
|
| 567 |
+
impl::push_outputs<ReturnType, false>::copy(output_, &stack);
|
| 568 |
+
return stack;
|
| 569 |
+
}
|
| 570 |
+
// Since we are returning the output_, we don't expect the output_ to be used
|
| 571 |
+
// afterward. Copy elision and RVO do not apply to class data members. Using
|
| 572 |
+
// move semantic to avoid copies when possible.
|
| 573 |
+
ReturnType release() && {
|
| 574 |
+
return std::move(output_);
|
| 575 |
+
}
|
| 576 |
+
|
| 577 |
+
private:
|
| 578 |
+
ReturnType output_;
|
| 579 |
+
};
|
| 580 |
+
|
| 581 |
+
// Handle the lvalue reference differently since it should not be moved.
|
| 582 |
+
template <>
|
| 583 |
+
inline at::Tensor& CaptureKernelCall<at::Tensor&>::release() && {
|
| 584 |
+
return output_;
|
| 585 |
+
}
|
| 586 |
+
|
| 587 |
+
// Handle case where the kernel returns void.
|
| 588 |
+
template <>
|
| 589 |
+
struct CaptureKernelCall<void> {
|
| 590 |
+
template <typename F, typename... Args>
|
| 591 |
+
CaptureKernelCall(
|
| 592 |
+
const F& kernel,
|
| 593 |
+
const TypedOperatorHandle<void(Args...)>& op,
|
| 594 |
+
const DispatchKeySet& dispatchKeySet,
|
| 595 |
+
Args&&... args) {
|
| 596 |
+
// Calling the kernel and no need to capture void.
|
| 597 |
+
kernel.template call<void, Args...>(
|
| 598 |
+
op, dispatchKeySet, std::forward<Args>(args)...);
|
| 599 |
+
}
|
| 600 |
+
Stack getOutputs() {
|
| 601 |
+
return Stack();
|
| 602 |
+
}
|
| 603 |
+
void release() && {}
|
| 604 |
+
};
|
| 605 |
+
|
| 606 |
+
} // namespace detail
|
| 607 |
+
|
| 608 |
+
// See [Note: Argument forwarding in the dispatcher] for why Args doesn't use &&
|
| 609 |
+
template<class Return, class... Args>
|
| 610 |
+
inline Return Dispatcher::callWithDispatchKeySlowPath(const TypedOperatorHandle<Return(Args...)>& op, at::StepCallbacks& stepCallbacks, DispatchKeySet dispatchKeySet, const KernelFunction& kernel, Args... args) {
|
| 611 |
+
// If callbacks need inputs, we box the arguments and pass them to the guard.
|
| 612 |
+
// Note: For perf reasons we wouldn't want to prematurely box the arguments.
|
| 613 |
+
at::RecordFunction guard(std::move(stepCallbacks));
|
| 614 |
+
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(op.operatorDef_->op.isObserved());
|
| 615 |
+
auto dispatchKey = dispatchKeySet.highestPriorityTypeId();
|
| 616 |
+
auto& schema = op.schema();
|
| 617 |
+
auto schema_ref = std::reference_wrapper<const FunctionSchema>(schema);
|
| 618 |
+
constexpr auto num_boxed_args = impl::boxed_size<Args...>();
|
| 619 |
+
if constexpr (num_boxed_args != 0) {
|
| 620 |
+
if (guard.needsInputs()) {
|
| 621 |
+
// If we used std::array<IValue, num_boxed_args> here, we would
|
| 622 |
+
// have to spend time default constructing the IValues in
|
| 623 |
+
// boxedArgs. aligned_storage has no such requirement.
|
| 624 |
+
impl::IValueAlignedStorage boxedArgs[num_boxed_args];
|
| 625 |
+
// For debugging only; could be removed (but the compiler will do
|
| 626 |
+
// that for us and it's nice to have the extra assurance of
|
| 627 |
+
// correctness from our debug builds).
|
| 628 |
+
int lastArgIdx = 0;
|
| 629 |
+
impl::boxArgsToStack(boxedArgs, lastArgIdx, args...);
|
| 630 |
+
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(lastArgIdx == num_boxed_args);
|
| 631 |
+
// I don't *think* we need std::launder here, because IValue has
|
| 632 |
+
// no subclasses and no const or reference fields.
|
| 633 |
+
runRecordFunction(guard, schema_ref, dispatchKey, c10::ArrayRef<const c10::IValue>(reinterpret_cast<IValue *>(boxedArgs), num_boxed_args));
|
| 634 |
+
for (size_t ii = 0; ii < num_boxed_args; ++ii) {
|
| 635 |
+
reinterpret_cast<IValue *>(&boxedArgs[ii])->~IValue();
|
| 636 |
+
}
|
| 637 |
+
} else {
|
| 638 |
+
runRecordFunction(guard, schema_ref, dispatchKey);
|
| 639 |
+
}
|
| 640 |
+
} else {
|
| 641 |
+
runRecordFunction(guard, schema_ref, dispatchKey);
|
| 642 |
+
}
|
| 643 |
+
|
| 644 |
+
if (C10_UNLIKELY(guard.needsOutputs())) {
|
| 645 |
+
// Calls the kernel and capture the output temporarily to pass to
|
| 646 |
+
// RecordFunction.
|
| 647 |
+
detail::CaptureKernelCall<Return> captureKernelCall(
|
| 648 |
+
kernel, op, dispatchKeySet, std::forward<Args>(args)...);
|
| 649 |
+
guard.setOutputs(captureKernelCall.getOutputs());
|
| 650 |
+
// Releases the captured output to return to caller.
|
| 651 |
+
return std::move(captureKernelCall).release();
|
| 652 |
+
}
|
| 653 |
+
|
| 654 |
+
// keeping the guard alive while executing the kernel
|
| 655 |
+
return kernel.template call<Return, Args...>(op, dispatchKeySet, std::forward<Args>(args)...);
|
| 656 |
+
}
|
| 657 |
+
|
| 658 |
+
// See [Note: Argument forwarding in the dispatcher] for why Args doesn't use &&
|
| 659 |
+
template<class Return, class... Args>
|
| 660 |
+
C10_ALWAYS_INLINE_UNLESS_MOBILE Return Dispatcher::call(const TypedOperatorHandle<Return(Args...)>& op, Args... args) const {
|
| 661 |
+
detail::unused_arg_(args...); // workaround for a false-positive warning about unused parameters in gcc 5
|
| 662 |
+
auto dispatchKeySet = op.operatorDef_->op.dispatchKeyExtractor()
|
| 663 |
+
.template getDispatchKeySetUnboxed<Args...>(args...);
|
| 664 |
+
#ifndef NDEBUG
|
| 665 |
+
DispatchTraceNestingGuard debug_guard;
|
| 666 |
+
if (show_dispatch_trace()) {
|
| 667 |
+
auto nesting_value = dispatch_trace_nesting_value();
|
| 668 |
+
for (int64_t i = 0; i < nesting_value; ++i) std::cerr << " ";
|
| 669 |
+
std::cerr << "[call] op=[" << op.operator_name() << "], key=[" << toString(dispatchKeySet.highestPriorityTypeId()) << "]" << std::endl;
|
| 670 |
+
}
|
| 671 |
+
#endif
|
| 672 |
+
const KernelFunction& kernel = op.operatorDef_->op.lookup(dispatchKeySet);
|
| 673 |
+
#ifndef PYTORCH_DISABLE_PER_OP_PROFILING
|
| 674 |
+
auto step_callbacks = at::getStepCallbacksUnlessEmpty(at::RecordScope::FUNCTION);
|
| 675 |
+
if (C10_UNLIKELY(step_callbacks.has_value() && op.operatorDef_->op.isObserved())) {
|
| 676 |
+
return callWithDispatchKeySlowPath<Return, Args...>(op, *step_callbacks, dispatchKeySet, kernel, std::forward<Args>(args)...);
|
| 677 |
+
}
|
| 678 |
+
#endif // PYTORCH_DISABLE_PER_OP_PROFILING
|
| 679 |
+
|
| 680 |
+
#ifdef FBCODE_CAFFE2
|
| 681 |
+
if(profilingOperatorEvents()) {
|
| 682 |
+
struct FireOpRAII {
|
| 683 |
+
FireOpRAII(at::RecordFunction::schema_ref_t schema_ref) : schema_ref_(schema_ref) {
|
| 684 |
+
fireOpStartUSDT(schema_ref);
|
| 685 |
+
}
|
| 686 |
+
~FireOpRAII() { fireOpEndUSDT(schema_ref_); }
|
| 687 |
+
at::RecordFunction::schema_ref_t schema_ref_;
|
| 688 |
+
} event(op.schema());
|
| 689 |
+
return kernel.template call<Return, Args...>(op, dispatchKeySet, std::forward<Args>(args)...);
|
| 690 |
+
} else {
|
| 691 |
+
return kernel.template call<Return, Args...>(op, dispatchKeySet, std::forward<Args>(args)...);
|
| 692 |
+
}
|
| 693 |
+
#else
|
| 694 |
+
return kernel.template call<Return, Args...>(op, dispatchKeySet, std::forward<Args>(args)...);
|
| 695 |
+
#endif // FBCODE_CAFFE2
|
| 696 |
+
}
|
| 697 |
+
|
| 698 |
+
// See [Note: Argument forwarding in the dispatcher] for why Args doesn't use &&
|
| 699 |
+
template<class Return, class... Args>
|
| 700 |
+
inline Return Dispatcher::redispatch(const TypedOperatorHandle<Return (Args...)>& op, DispatchKeySet currentDispatchKeySet, Args... args) const {
|
| 701 |
+
detail::unused_arg_(args...); // workaround for a false-positive warning about unused parameters in gcc 5
|
| 702 |
+
// do not use RecordFunction on redispatch
|
| 703 |
+
#ifndef NDEBUG
|
| 704 |
+
DispatchTraceNestingGuard debug_guard;
|
| 705 |
+
if (show_dispatch_trace()) {
|
| 706 |
+
auto nesting_value = dispatch_trace_nesting_value();
|
| 707 |
+
for (int64_t i = 0; i < nesting_value; ++i) std::cerr << " ";
|
| 708 |
+
std::cerr << "[redispatch] op=[" << op.operator_name() << "], key=[" << toString(currentDispatchKeySet.highestPriorityTypeId()) << "]" << std::endl;
|
| 709 |
+
}
|
| 710 |
+
#endif
|
| 711 |
+
const KernelFunction& kernel = op.operatorDef_->op.lookup(currentDispatchKeySet);
|
| 712 |
+
return kernel.template call<Return, Args...>(op, currentDispatchKeySet, std::forward<Args>(args)...);
|
| 713 |
+
}
|
| 714 |
+
|
| 715 |
+
inline void Dispatcher::callBoxed(const OperatorHandle& op, Stack* stack) const {
|
| 716 |
+
// note: this doesn't need the mutex because write operations on the list keep iterators intact.
|
| 717 |
+
const auto& entry = op.operatorDef_->op;
|
| 718 |
+
auto dispatchKeySet = entry.dispatchKeyExtractor().getDispatchKeySetBoxed(stack);
|
| 719 |
+
#ifndef NDEBUG
|
| 720 |
+
DispatchTraceNestingGuard debug_guard;
|
| 721 |
+
if (show_dispatch_trace()) {
|
| 722 |
+
auto nesting_value = dispatch_trace_nesting_value();
|
| 723 |
+
for (int64_t i = 0; i < nesting_value; ++i) std::cerr << " ";
|
| 724 |
+
std::cerr << "[callBoxed] op=[" << op.operator_name() << "], key=[" << toString(dispatchKeySet.highestPriorityTypeId()) << "]" << std::endl;
|
| 725 |
+
}
|
| 726 |
+
#endif
|
| 727 |
+
const auto& kernel = entry.lookup(dispatchKeySet);
|
| 728 |
+
#ifndef PYTORCH_DISABLE_PER_OP_PROFILING
|
| 729 |
+
auto step_callbacks = at::getStepCallbacksUnlessEmpty(at::RecordScope::FUNCTION);
|
| 730 |
+
if (C10_UNLIKELY(step_callbacks.has_value() && entry.isObserved())) {
|
| 731 |
+
at::RecordFunction guard(std::move(*step_callbacks));
|
| 732 |
+
auto dispatchKey = dispatchKeySet.highestPriorityTypeId();
|
| 733 |
+
auto& schema = op.schema();
|
| 734 |
+
auto schema_ref = std::reference_wrapper<const FunctionSchema>(schema);
|
| 735 |
+
guard.needsInputs() ? runRecordFunction(guard, schema_ref, dispatchKey, c10::ArrayRef<const c10::IValue>(stack->data(), stack->size()))
|
| 736 |
+
: runRecordFunction(guard, schema_ref, dispatchKey);
|
| 737 |
+
|
| 738 |
+
// keeping the guard alive while executing the kernel
|
| 739 |
+
kernel.callBoxed(op, dispatchKeySet, stack);
|
| 740 |
+
|
| 741 |
+
if (C10_UNLIKELY(guard.needsOutputs())) {
|
| 742 |
+
guard.setOutputs(*stack);
|
| 743 |
+
}
|
| 744 |
+
return;
|
| 745 |
+
}
|
| 746 |
+
#endif // PYTORCH_DISABLE_PER_OP_PROFILING
|
| 747 |
+
kernel.callBoxed(op, dispatchKeySet, stack);
|
| 748 |
+
}
|
| 749 |
+
|
| 750 |
+
// NB: this doesn't count as a "true" dispatcher jump, so no instrumentation
|
| 751 |
+
inline void Dispatcher::callBoxedForDispatchKey(const OperatorHandle& op, DispatchKey dk, Stack* stack) const {
|
| 752 |
+
// note: this doesn't need the mutex because write operations on the list keep iterators intact.
|
| 753 |
+
const auto& entry = op.operatorDef_->op;
|
| 754 |
+
// We still compute this as we're obligated to pass it on to the internal
|
| 755 |
+
// kernel, if it is a boxed fallback
|
| 756 |
+
auto dispatchKeySet = entry.dispatchKeyExtractor().getDispatchKeySetBoxed(stack);
|
| 757 |
+
const auto& kernel = ([&]() {
|
| 758 |
+
if (op.hasKernelForDispatchKey(dk)) {
|
| 759 |
+
return entry.kernelForDispatchKey(dk);
|
| 760 |
+
} else {
|
| 761 |
+
auto idx = getDispatchTableIndexForDispatchKey(dk);
|
| 762 |
+
TORCH_INTERNAL_ASSERT(idx >= 0);
|
| 763 |
+
return backendFallbackKernels_[idx].kernel;
|
| 764 |
+
}
|
| 765 |
+
})();
|
| 766 |
+
kernel.callBoxed(op, dispatchKeySet, stack);
|
| 767 |
+
}
|
| 768 |
+
|
| 769 |
+
inline void Dispatcher::redispatchBoxed(const OperatorHandle& op, DispatchKeySet dispatchKeySet, Stack* stack) const {
|
| 770 |
+
// note: this doesn't need the mutex because write operations on the list keep iterators intact.
|
| 771 |
+
const auto& entry = op.operatorDef_->op;
|
| 772 |
+
#ifndef NDEBUG
|
| 773 |
+
DispatchTraceNestingGuard debug_guard;
|
| 774 |
+
if (show_dispatch_trace()) {
|
| 775 |
+
auto nesting_value = dispatch_trace_nesting_value();
|
| 776 |
+
for (int64_t i = 0; i < nesting_value; ++i) std::cerr << " ";
|
| 777 |
+
std::cerr << "[redispatchBoxed] op=[" << op.operator_name() << "], key=[" << toString(dispatchKeySet.highestPriorityTypeId()) << "]" << std::endl;
|
| 778 |
+
}
|
| 779 |
+
#endif
|
| 780 |
+
const auto& kernel = entry.lookup(dispatchKeySet);
|
| 781 |
+
return kernel.callBoxed(op, dispatchKeySet, stack);
|
| 782 |
+
}
|
| 783 |
+
|
| 784 |
+
} // namespace c10
|
| 785 |
+
|
| 786 |
+
namespace std {
|
| 787 |
+
|
| 788 |
+
template <>
|
| 789 |
+
struct hash<c10::OperatorHandle> {
|
| 790 |
+
size_t operator()(const c10::OperatorHandle& op) const noexcept {
|
| 791 |
+
return std::hash<void*>{}(static_cast<void*>(op.operatorDef_));
|
| 792 |
+
}
|
| 793 |
+
};
|
| 794 |
+
|
| 795 |
+
} // namespace std
|
moondream/lib/python3.10/site-packages/torch/include/ATen/core/dispatch/ObservedOperators.h
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/core/operator_name.h>
|
| 4 |
+
#include <string>
|
| 5 |
+
#include <unordered_set>
|
| 6 |
+
|
| 7 |
+
namespace c10 {
|
| 8 |
+
|
| 9 |
+
struct TORCH_API ObservedOperators {
|
| 10 |
+
ObservedOperators() = delete;
|
| 11 |
+
|
| 12 |
+
static bool isObserved(const OperatorName& name);
|
| 13 |
+
|
| 14 |
+
static std::unordered_set<std::string>& getUnobservedOperatorList();
|
| 15 |
+
};
|
| 16 |
+
|
| 17 |
+
} // namespace c10
|
moondream/lib/python3.10/site-packages/torch/include/ATen/core/dispatch/OperatorEntry.h
ADDED
|
@@ -0,0 +1,313 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/core/function_schema.h>
|
| 4 |
+
#include <c10/util/Metaprogramming.h>
|
| 5 |
+
#include <c10/util/flat_hash_map.h>
|
| 6 |
+
#include <c10/util/Optional.h>
|
| 7 |
+
#include <c10/core/DispatchKey.h>
|
| 8 |
+
#include <c10/core/PyHandleCache.h>
|
| 9 |
+
#include <c10/core/SafePyObject.h>
|
| 10 |
+
#include <ATen/core/ivalue.h>
|
| 11 |
+
#include <ATen/core/boxing/KernelFunction.h>
|
| 12 |
+
#include <ATen/core/dispatch/DispatchKeyExtractor.h>
|
| 13 |
+
|
| 14 |
+
#include <ATen/core/dispatch/OperatorOptions.h>
|
| 15 |
+
#include <ATen/core/dispatch/CppSignature.h>
|
| 16 |
+
#include <ATen/core/dispatch/RegistrationHandleRAII.h>
|
| 17 |
+
#include <ATen/core/enum_tag.h>
|
| 18 |
+
|
| 19 |
+
#include <list>
|
| 20 |
+
#include <array>
|
| 21 |
+
|
| 22 |
+
#ifdef C10_MOBILE
|
| 23 |
+
#define C10_DISPATCHER_ONE_KERNEL_PER_DISPATCH_KEY
|
| 24 |
+
#endif
|
| 25 |
+
|
| 26 |
+
namespace c10 {
|
| 27 |
+
|
| 28 |
+
class Dispatcher;
|
| 29 |
+
|
| 30 |
+
namespace impl {
|
| 31 |
+
|
| 32 |
+
// This data structure represents a kernel that was registered to us from a
|
| 33 |
+
// user. Unlike KernelFunction, AnnotatedKernel contains some extra metadata
|
| 34 |
+
// about the kernel that isn't necessary for actual dispatching (this is why
|
| 35 |
+
// we don't put AnnotatedKernel in the actual DispatchTable), but is useful for
|
| 36 |
+
// giving good error messages.
|
| 37 |
+
struct AnnotatedKernel final {
|
| 38 |
+
AnnotatedKernel(KernelFunction k, std::unique_ptr<FunctionSchema> s, std::string d)
|
| 39 |
+
: kernel(std::move(k))
|
| 40 |
+
, inferred_function_schema(std::move(s))
|
| 41 |
+
, debug(std::move(d))
|
| 42 |
+
{}
|
| 43 |
+
AnnotatedKernel() = default;
|
| 44 |
+
KernelFunction kernel;
|
| 45 |
+
std::unique_ptr<FunctionSchema> inferred_function_schema;
|
| 46 |
+
// A little debug string to help us identify the kernel in question.
|
| 47 |
+
// Most importantly it records the TORCH_LIBRARY block that did the
|
| 48 |
+
// registration.
|
| 49 |
+
std::string debug;
|
| 50 |
+
};
|
| 51 |
+
|
| 52 |
+
// This data structure represents operator schema, with metadata specifying
|
| 53 |
+
// where the registration of this schema occurred
|
| 54 |
+
struct AnnotatedSchema final {
|
| 55 |
+
AnnotatedSchema(FunctionSchema s, std::string d)
|
| 56 |
+
: schema(std::move(s))
|
| 57 |
+
, debug(std::move(d))
|
| 58 |
+
{}
|
| 59 |
+
FunctionSchema schema;
|
| 60 |
+
std::string debug;
|
| 61 |
+
};
|
| 62 |
+
|
| 63 |
+
// Internal data structure that records information about a specific operator.
|
| 64 |
+
// It's not part of the public API; typically, users will interact with
|
| 65 |
+
// OperatorHandle instead.
|
| 66 |
+
//
|
| 67 |
+
// Concurrent writes to OperatorEntry are protected by the GLOBAL Dispatcher
|
| 68 |
+
// lock (this is important because some methods in OperatorEntry access
|
| 69 |
+
// dispatcher state)
|
| 70 |
+
class TORCH_API OperatorEntry final {
|
| 71 |
+
public:
|
| 72 |
+
explicit OperatorEntry(OperatorName&& operator_name);
|
| 73 |
+
|
| 74 |
+
OperatorEntry(const OperatorEntry&) = delete;
|
| 75 |
+
OperatorEntry(OperatorEntry&&) noexcept = delete;
|
| 76 |
+
OperatorEntry& operator=(const OperatorEntry&) = delete;
|
| 77 |
+
OperatorEntry& operator=(OperatorEntry&&) noexcept = delete;
|
| 78 |
+
|
| 79 |
+
const FunctionSchema& schema() const {
|
| 80 |
+
TORCH_INTERNAL_ASSERT(schema_.has_value(), "Tried to access the schema for ", name_, " which doesn't have a schema registered yet");
|
| 81 |
+
return schema_->schema;
|
| 82 |
+
}
|
| 83 |
+
const std::string& debug() const {
|
| 84 |
+
TORCH_INTERNAL_ASSERT(schema_.has_value());
|
| 85 |
+
return schema_->debug;
|
| 86 |
+
}
|
| 87 |
+
bool hasSchema() const {
|
| 88 |
+
return schema_.has_value();
|
| 89 |
+
}
|
| 90 |
+
|
| 91 |
+
bool isObserved() const {
|
| 92 |
+
return is_observed_;
|
| 93 |
+
}
|
| 94 |
+
|
| 95 |
+
// We may allocate an OperatorEntry for an operator even when we don't
|
| 96 |
+
// have a schema. When we receive the schema registration, we post
|
| 97 |
+
// facto register a schema.
|
| 98 |
+
//
|
| 99 |
+
// NB: registerSchema/deregisterSchema are not idempotent; if you
|
| 100 |
+
// attempt to register a schema when one is already present or vice
|
| 101 |
+
// versa that is an error. (Refcounting for the registrations is
|
| 102 |
+
// handled in the OperatorHandle in Dispatcher)
|
| 103 |
+
void registerSchema(FunctionSchema&&, std::string&& debug, std::vector<at::Tag> tags = {});
|
| 104 |
+
void deregisterSchema();
|
| 105 |
+
|
| 106 |
+
const OperatorName& operator_name() const {
|
| 107 |
+
return name_;
|
| 108 |
+
}
|
| 109 |
+
|
| 110 |
+
#ifdef C10_DISPATCHER_ONE_KERNEL_PER_DISPATCH_KEY
|
| 111 |
+
using AnnotatedKernelContainer = std::array<AnnotatedKernel, 1>;
|
| 112 |
+
#else
|
| 113 |
+
using AnnotatedKernelContainer = std::list<AnnotatedKernel>;
|
| 114 |
+
#endif
|
| 115 |
+
using AnnotatedKernelContainerIterator = AnnotatedKernelContainer::iterator;
|
| 116 |
+
|
| 117 |
+
// Why are kernels and fallback asymmetric? It has to do with ownership.
|
| 118 |
+
// Kernels and the computed dispatch tables for them are canonically
|
| 119 |
+
// owned by OperatorEntry, but backend fallbacks are specified once
|
| 120 |
+
// and apply for all operators, so they should be owned by Dispatcher.
|
| 121 |
+
// However, the registration of a backend fallback affects the
|
| 122 |
+
// state of the computed dispatch table, so when a backend fallback
|
| 123 |
+
// is updated, we need to update the operator tables too. Thus,
|
| 124 |
+
// registerKernel is the mechanism by which we give kernels to
|
| 125 |
+
// operator entry to own (and update dispatch table), but we only
|
| 126 |
+
// need a non-owning mechanism to update fallback.
|
| 127 |
+
|
| 128 |
+
// Precondition: Dispatcher::mutex_ is held
|
| 129 |
+
// Postcondition: caller is responsible for disposing of the kernel
|
| 130 |
+
AnnotatedKernelContainerIterator registerKernel(
|
| 131 |
+
const Dispatcher& dispatcher,
|
| 132 |
+
c10::optional<DispatchKey> dispatch_key,
|
| 133 |
+
KernelFunction kernel,
|
| 134 |
+
c10::optional<CppSignature> cpp_signature,
|
| 135 |
+
std::unique_ptr<FunctionSchema> inferred_function_schema,
|
| 136 |
+
std::string debug
|
| 137 |
+
);
|
| 138 |
+
|
| 139 |
+
// Precondition: Dispatcher::mutex_ is held
|
| 140 |
+
void deregisterKernel_(
|
| 141 |
+
const Dispatcher& dispatcher,
|
| 142 |
+
c10::optional<DispatchKey> dispatch_key,
|
| 143 |
+
AnnotatedKernelContainerIterator kernel
|
| 144 |
+
);
|
| 145 |
+
|
| 146 |
+
// Precondition: Dispatcher::mutex_ is held
|
| 147 |
+
void updateFallback(
|
| 148 |
+
const Dispatcher& dispatcher,
|
| 149 |
+
DispatchKey dispatch_key
|
| 150 |
+
);
|
| 151 |
+
|
| 152 |
+
// Precondition: Dispatcher::mutex_ is held
|
| 153 |
+
void updateSchemaAliasAnalysis(AliasAnalysisKind a) {
|
| 154 |
+
TORCH_INTERNAL_ASSERT(schema_.has_value());
|
| 155 |
+
schema_->schema.setAliasAnalysis(a);
|
| 156 |
+
}
|
| 157 |
+
|
| 158 |
+
std::string dumpComputedTable() const;
|
| 159 |
+
std::string dumpState() const;
|
| 160 |
+
void checkInvariants() const;
|
| 161 |
+
|
| 162 |
+
const DispatchKeyExtractor& dispatchKeyExtractor() const { return dispatchKeyExtractor_; }
|
| 163 |
+
|
| 164 |
+
// Asserts that the given FuncType is correct for calling this operator in an unboxed way.
|
| 165 |
+
template<class FuncType>
|
| 166 |
+
inline void assertSignatureIsCorrect() {
|
| 167 |
+
assertSignatureIsCorrect(CppSignature::make<FuncType>(), fn_has_symint<FuncType>::value);
|
| 168 |
+
}
|
| 169 |
+
|
| 170 |
+
void assertSignatureIsCorrect(const CppSignature& call_signature, bool has_symint) const;
|
| 171 |
+
|
| 172 |
+
[[noreturn]] void reportError(DispatchKey dispatchKey) const;
|
| 173 |
+
|
| 174 |
+
const KernelFunction& lookup(DispatchKeySet ks) const {
|
| 175 |
+
const auto idx = ks.getDispatchTableIndexForDispatchKeySet();
|
| 176 |
+
if (C10_UNLIKELY(idx == -1)) {
|
| 177 |
+
reportError(ks.highestPriorityTypeId());
|
| 178 |
+
}
|
| 179 |
+
const auto& kernel = dispatchTable_[idx];
|
| 180 |
+
// A valid kernel *always* has a boxed kernel and *may* have an
|
| 181 |
+
// unboxed kernel. However, we typically do unboxed calls in at::
|
| 182 |
+
// APIs, where the kernel 1) will very likely be valid and 2)
|
| 183 |
+
// should have an unboxed kernel. Checking the unboxed kernel
|
| 184 |
+
// first will allow us to avoid touching the boxed kernel at all
|
| 185 |
+
// in the common case.
|
| 186 |
+
if (C10_UNLIKELY(!kernel.isValidUnboxed())) {
|
| 187 |
+
if (!kernel.isValid()) {
|
| 188 |
+
reportError(ks.highestPriorityTypeId());
|
| 189 |
+
}
|
| 190 |
+
}
|
| 191 |
+
return kernel;
|
| 192 |
+
}
|
| 193 |
+
|
| 194 |
+
std::string listAllDispatchKeys() const;
|
| 195 |
+
|
| 196 |
+
// Returns true if kernel_ has entry for any key in ks.
|
| 197 |
+
//
|
| 198 |
+
// Invariant: There are no alias keys in the passed-in dispatch key set.
|
| 199 |
+
// Note [No Alias Keys in DispatchKeySet]
|
| 200 |
+
// Alias keys should be checked using `hasKernelForDispatchKey`
|
| 201 |
+
// Alias keys shouldn't go inside of a DispatchKeySet, since they can technically
|
| 202 |
+
// have a value > 63 (causing overflow).
|
| 203 |
+
bool hasKernelForAnyDispatchKey(DispatchKeySet ks) const;
|
| 204 |
+
// Returns true if kernel_ has entry for a particular key.
|
| 205 |
+
bool hasKernelForDispatchKey(DispatchKey k) const;
|
| 206 |
+
// Retrieves the kernel entry at a particular key. Symmetric with
|
| 207 |
+
// hasKernelForDispatchKey. To get the AnnotatedKernel, see
|
| 208 |
+
// getKernelForDispatchKey (private)
|
| 209 |
+
const KernelFunction& kernelForDispatchKey(DispatchKey k) const;
|
| 210 |
+
// Returns true if the "computed table" has an entry for a particular key.
|
| 211 |
+
bool hasComputedKernelForDispatchKey(DispatchKey k) const;
|
| 212 |
+
// Returns all the operator tags added at the time of registration
|
| 213 |
+
const std::vector<at::Tag>& getTags() const;
|
| 214 |
+
void setReportErrorCallback_(std::unique_ptr<c10::SafePyObject> callback);
|
| 215 |
+
|
| 216 |
+
template <typename F>
|
| 217 |
+
PyObject* getPythonOp(PyInterpreter* self_interpreter, F slow_accessor) const {
|
| 218 |
+
return py_cache_.ptr_or(self_interpreter, slow_accessor);
|
| 219 |
+
}
|
| 220 |
+
|
| 221 |
+
private:
|
| 222 |
+
|
| 223 |
+
OperatorName name_;
|
| 224 |
+
c10::optional<AnnotatedSchema> schema_;
|
| 225 |
+
#ifndef C10_MOBILE
|
| 226 |
+
std::vector<at::Tag> tags_;
|
| 227 |
+
#endif
|
| 228 |
+
std::array<KernelFunction, c10::num_runtime_entries> dispatchTable_;
|
| 229 |
+
DispatchKeyExtractor dispatchKeyExtractor_;
|
| 230 |
+
// Pointer to the torch.ops.ns.op.overload object for speed
|
| 231 |
+
c10::PyHandleCache py_cache_;
|
| 232 |
+
|
| 233 |
+
// kernels_ stores all registered kernels for the corresponding dispatch key
|
| 234 |
+
// and catchAllKernels_ stores the catch-all kernels.
|
| 235 |
+
// If an operator library gets loaded that overwrites an already existing kernel,
|
| 236 |
+
// both kernels will be in that list but only the newer one will be in
|
| 237 |
+
// dispatchTable. If any of the kernels go away (say the library gets
|
| 238 |
+
// unloaded), we remove the kernel from this list and update the
|
| 239 |
+
// dispatchTable if necessary.
|
| 240 |
+
// Kernels in the list are ordered by registration time descendingly,
|
| 241 |
+
// newer registrations are before older registrations.
|
| 242 |
+
// We do not combine dispatchTable and kernels into one hash map because
|
| 243 |
+
// kernels is a larger data structure and accessed quite infrequently
|
| 244 |
+
// while dispatchTable is accessed often and should be kept small to fit
|
| 245 |
+
// into CPU caches.
|
| 246 |
+
// Invariants:
|
| 247 |
+
// - dispatchTable[dispatch_key] == kernels_[dispatch_key].front()
|
| 248 |
+
// - dispatchTable[dispatch_key] does not exist if and only if
|
| 249 |
+
// kernels_[dispatch_key] does not exist
|
| 250 |
+
// - If kernels_[dispatch_key] exists, then it has elements.
|
| 251 |
+
// It is never an empty list.
|
| 252 |
+
//
|
| 253 |
+
// Why do we do that?
|
| 254 |
+
// -----
|
| 255 |
+
// We mostly do this to enable Jupyter notebooks where a cell registering
|
| 256 |
+
// a kernel could be executed multiple times and the later execution
|
| 257 |
+
// should overwrite the earlier one. Note that this still fails when the
|
| 258 |
+
// function schema changed between the executions, but it works as long
|
| 259 |
+
// as the function schema didn't change. A better solution would be to
|
| 260 |
+
// unload the old extension library from the Jupyter cell when the cell is
|
| 261 |
+
// re-executed and then only allow one kernel here, i.e. error if a kernel
|
| 262 |
+
// is already registered, but that's a lot of effort to implement and
|
| 263 |
+
// currently not high-pri.
|
| 264 |
+
ska::flat_hash_map<DispatchKey,
|
| 265 |
+
#ifdef C10_DISPATCHER_ONE_KERNEL_PER_DISPATCH_KEY
|
| 266 |
+
// On mobile, we needn't worry about Jupyter notebooks.
|
| 267 |
+
std::array<AnnotatedKernel, 1>
|
| 268 |
+
#else
|
| 269 |
+
std::list<AnnotatedKernel>
|
| 270 |
+
#endif
|
| 271 |
+
> kernels_;
|
| 272 |
+
|
| 273 |
+
const AnnotatedKernel& missingKernel() const;
|
| 274 |
+
const AnnotatedKernel& ambiguousAutogradOtherKernel() const;
|
| 275 |
+
|
| 276 |
+
// cpp_signature_ stores function signature if any of
|
| 277 |
+
// the kernels was created in a way that allowed us to know the function
|
| 278 |
+
// signature (i.e. by supplying an unboxed C++ kernel function).
|
| 279 |
+
// If this is set, it will be used to check that future kernel
|
| 280 |
+
// registrations match and it will be used in unboxed function calls
|
| 281 |
+
// to verify their arguments against the known function signature.
|
| 282 |
+
struct CppSignatureWithDebug {
|
| 283 |
+
CppSignature signature;
|
| 284 |
+
std::string debug;
|
| 285 |
+
c10::optional<DispatchKey> dispatch_key;
|
| 286 |
+
};
|
| 287 |
+
c10::optional<CppSignatureWithDebug> cpp_signature_;
|
| 288 |
+
c10::optional<CppSignatureWithDebug> sym_cpp_signature_;
|
| 289 |
+
|
| 290 |
+
// A Python custom error handler for OperatorEntry::reportError
|
| 291 |
+
std::unique_ptr<c10::SafePyObject> report_error_callback_;
|
| 292 |
+
|
| 293 |
+
// Whether this operator needs to be observed with RecordFunction
|
| 294 |
+
const bool is_observed_;
|
| 295 |
+
|
| 296 |
+
[[noreturn]] void reportSignatureError(const CppSignature& call_signature, const CppSignatureWithDebug& saved_signature) const;
|
| 297 |
+
const KernelFunction& computeDispatchTableEntry(const c10::Dispatcher& dispatcher, DispatchKey dispatch_key) const;
|
| 298 |
+
std::pair<const AnnotatedKernel&, const char*> computeDispatchTableEntryWithDebug(
|
| 299 |
+
const c10::Dispatcher& dispatcher, DispatchKey dispatch_key
|
| 300 |
+
) const;
|
| 301 |
+
// This function re-establishes the invariant that dispatchTable
|
| 302 |
+
// contains the front element from the kernels list for a given runtime dispatch key.
|
| 303 |
+
void updateDispatchTableEntry_(const c10::Dispatcher& dispatcher, DispatchKey dispatch_key);
|
| 304 |
+
// Like above, but also handles alias dispatch keys.
|
| 305 |
+
void updateDispatchTable_(const c10::Dispatcher& dispatcher, DispatchKey dispatch_key);
|
| 306 |
+
// Like above, but for ALL entries in the dispatch table.
|
| 307 |
+
void updateDispatchTableFull_(const c10::Dispatcher& dispatcher);
|
| 308 |
+
// Retrieves a pointer to AnnotatedKernel at kernels_.at(dispatch_key).front().
|
| 309 |
+
const AnnotatedKernel* getKernelForDispatchKey(DispatchKey dispatch_key) const;
|
| 310 |
+
};
|
| 311 |
+
|
| 312 |
+
} // namespace impl
|
| 313 |
+
} // namespace c10
|
moondream/lib/python3.10/site-packages/torch/include/ATen/core/dispatch/OperatorOptions.h
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <cstdint>
|
| 4 |
+
|
| 5 |
+
namespace c10 {
|
| 6 |
+
|
| 7 |
+
enum class AliasAnalysisKind : uint8_t {
|
| 8 |
+
INTERNAL_SPECIAL_CASE,
|
| 9 |
+
CONSERVATIVE, // The most conservative alias analysis type, assumes
|
| 10 |
+
// side-effects. This is the default analysis.
|
| 11 |
+
FROM_SCHEMA,
|
| 12 |
+
PURE_FUNCTION
|
| 13 |
+
};
|
| 14 |
+
|
| 15 |
+
#if !defined(_MSC_VER)
|
| 16 |
+
constexpr // Our current MSVC version has a bug that doesn't allow this to be constexpr.
|
| 17 |
+
#endif
|
| 18 |
+
inline const char* toString(AliasAnalysisKind aliasAnalysisKind) {
|
| 19 |
+
return (aliasAnalysisKind == AliasAnalysisKind::CONSERVATIVE)
|
| 20 |
+
? "CONSERVATIVE"
|
| 21 |
+
: (aliasAnalysisKind == AliasAnalysisKind::FROM_SCHEMA)
|
| 22 |
+
? "FROM_SCHEMA"
|
| 23 |
+
: (aliasAnalysisKind == AliasAnalysisKind::PURE_FUNCTION)
|
| 24 |
+
? "PURE_FUNCTION"
|
| 25 |
+
: (aliasAnalysisKind == AliasAnalysisKind::INTERNAL_SPECIAL_CASE)
|
| 26 |
+
? "INTERNAL_SPECIAL_CASE"
|
| 27 |
+
: "UNKNOWN";
|
| 28 |
+
}
|
| 29 |
+
|
| 30 |
+
} // namespace c10
|
moondream/lib/python3.10/site-packages/torch/include/ATen/core/dispatch/RegistrationHandleRAII.h
ADDED
|
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <functional>
|
| 4 |
+
|
| 5 |
+
namespace c10 {
|
| 6 |
+
|
| 7 |
+
class RegistrationHandleRAII final {
|
| 8 |
+
public:
|
| 9 |
+
explicit RegistrationHandleRAII(std::function<void()> onDestruction)
|
| 10 |
+
: onDestruction_(std::move(onDestruction)) {}
|
| 11 |
+
|
| 12 |
+
~RegistrationHandleRAII() {
|
| 13 |
+
if (onDestruction_) {
|
| 14 |
+
onDestruction_();
|
| 15 |
+
}
|
| 16 |
+
}
|
| 17 |
+
|
| 18 |
+
RegistrationHandleRAII(const RegistrationHandleRAII&) = delete;
|
| 19 |
+
RegistrationHandleRAII& operator=(const RegistrationHandleRAII&) = delete;
|
| 20 |
+
|
| 21 |
+
RegistrationHandleRAII(RegistrationHandleRAII&& rhs) noexcept
|
| 22 |
+
: onDestruction_(std::move(rhs.onDestruction_)) {
|
| 23 |
+
rhs.onDestruction_ = nullptr;
|
| 24 |
+
}
|
| 25 |
+
|
| 26 |
+
RegistrationHandleRAII& operator=(RegistrationHandleRAII&& rhs) noexcept {
|
| 27 |
+
onDestruction_ = std::move(rhs.onDestruction_);
|
| 28 |
+
rhs.onDestruction_ = nullptr;
|
| 29 |
+
return *this;
|
| 30 |
+
}
|
| 31 |
+
|
| 32 |
+
private:
|
| 33 |
+
std::function<void()> onDestruction_;
|
| 34 |
+
};
|
| 35 |
+
|
| 36 |
+
}
|
moondream/lib/python3.10/site-packages/torch/include/ATen/core/ivalue.h
ADDED
|
@@ -0,0 +1,1555 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/core/DimVector.h>
|
| 4 |
+
#include <ATen/core/TensorBody.h>
|
| 5 |
+
#include <ATen/core/blob.h>
|
| 6 |
+
#include <ATen/core/custom_class.h>
|
| 7 |
+
#include <ATen/core/ivalue_to.h>
|
| 8 |
+
#include <ATen/core/jit_type_base.h>
|
| 9 |
+
#include <ATen/core/type_factory.h>
|
| 10 |
+
#include <c10/core/SymBool.h>
|
| 11 |
+
#include <c10/core/SymFloat.h>
|
| 12 |
+
#include <c10/macros/Export.h>
|
| 13 |
+
#include <c10/util/MaybeOwned.h>
|
| 14 |
+
#include <c10/util/intrusive_ptr.h>
|
| 15 |
+
#include <type_traits>
|
| 16 |
+
#include <typeindex>
|
| 17 |
+
#include <unordered_map>
|
| 18 |
+
#include <unordered_set>
|
| 19 |
+
#include <utility>
|
| 20 |
+
|
| 21 |
+
namespace torch {
|
| 22 |
+
class TORCH_API CustomClassHolder : public c10::intrusive_ptr_target {};
|
| 23 |
+
namespace jit {
|
| 24 |
+
using ::torch::CustomClassHolder;
|
| 25 |
+
struct Function;
|
| 26 |
+
struct CompilationUnit;
|
| 27 |
+
struct Module;
|
| 28 |
+
} // namespace jit
|
| 29 |
+
} // namespace torch
|
| 30 |
+
namespace c10 {
|
| 31 |
+
template <class Key, class Value>
|
| 32 |
+
class Dict;
|
| 33 |
+
template <class T>
|
| 34 |
+
class List;
|
| 35 |
+
template <class T>
|
| 36 |
+
class IListRef;
|
| 37 |
+
struct IValue;
|
| 38 |
+
struct ClassType;
|
| 39 |
+
struct Type;
|
| 40 |
+
class RRefInterface;
|
| 41 |
+
|
| 42 |
+
struct ClassType;
|
| 43 |
+
using ClassTypePtr = std::shared_ptr<ClassType>;
|
| 44 |
+
|
| 45 |
+
TORCH_API bool _fastEqualsForContainer(const IValue& lhs, const IValue& rhs);
|
| 46 |
+
|
| 47 |
+
TORCH_API torch::jit::Function* checkObjectSortSchema(
|
| 48 |
+
const c10::ClassTypePtr& t,
|
| 49 |
+
std::stringstream& why_not);
|
| 50 |
+
|
| 51 |
+
// A comparator that checks ordering of two IValues of same type.
|
| 52 |
+
typedef std::function<bool(const IValue& a, const IValue& b)> IValueComparator;
|
| 53 |
+
|
| 54 |
+
TORCH_API IValueComparator getLessThanComparator(const IValue& v);
|
| 55 |
+
TORCH_API IValueComparator getGreaterThanComparator(const IValue& v);
|
| 56 |
+
|
| 57 |
+
namespace ivalue {
|
| 58 |
+
struct Tuple;
|
| 59 |
+
struct Future;
|
| 60 |
+
struct Await;
|
| 61 |
+
struct ConstantString;
|
| 62 |
+
struct GenericDict;
|
| 63 |
+
struct Object;
|
| 64 |
+
struct PyObjectHolder;
|
| 65 |
+
struct EnumHolder;
|
| 66 |
+
// We need a ComplexHolder because currently the payloads in the Union
|
| 67 |
+
// only take 64 bits. Since ComplexDouble takes up 128 bits, and is too big
|
| 68 |
+
// to fit in the IValue directly, we indirect complex numbers through an
|
| 69 |
+
// intrusive pointer to ComplexHolder (which contains a c10::complex).
|
| 70 |
+
struct ComplexHolder : c10::intrusive_ptr_target {
|
| 71 |
+
public:
|
| 72 |
+
template <typename T>
|
| 73 |
+
ComplexHolder(c10::complex<T> c) {
|
| 74 |
+
val = convert<decltype(val), c10::complex<T>>(c);
|
| 75 |
+
}
|
| 76 |
+
ComplexHolder() = default;
|
| 77 |
+
c10::complex<double> val;
|
| 78 |
+
};
|
| 79 |
+
|
| 80 |
+
// Similar to ComplexHolder, for StreamData3
|
| 81 |
+
struct StreamData3Holder : c10::intrusive_ptr_target {
|
| 82 |
+
public:
|
| 83 |
+
StreamData3Holder(struct c10::StreamData3 d) : val(d) {}
|
| 84 |
+
StreamData3Holder() = delete;
|
| 85 |
+
struct c10::StreamData3 val;
|
| 86 |
+
};
|
| 87 |
+
|
| 88 |
+
} // namespace ivalue
|
| 89 |
+
|
| 90 |
+
// This is an owning wrapper for a c10::optional<std::vector<T>>
|
| 91 |
+
// that can be implicitly converted to a (non-owning) optional<ArrayRef<T>>.
|
| 92 |
+
// Its purpose is to be used in generated code to keep the vector alive
|
| 93 |
+
// either until the end of a statement (as a temporary), or as a saved arg
|
| 94 |
+
// in autograd.
|
| 95 |
+
template <typename T>
|
| 96 |
+
struct OptionalArray {
|
| 97 |
+
c10::optional<std::vector<T>> list;
|
| 98 |
+
|
| 99 |
+
OptionalArray() = default;
|
| 100 |
+
OptionalArray(std::vector<T> val) : list(std::move(val)) {}
|
| 101 |
+
|
| 102 |
+
// Used when saving an argument for the backwards pass.
|
| 103 |
+
OptionalArray& operator=(c10::optional<ArrayRef<T>> ref) {
|
| 104 |
+
if (ref) {
|
| 105 |
+
list = std::vector<T>(ref->begin(), ref->end());
|
| 106 |
+
} else {
|
| 107 |
+
list = nullopt;
|
| 108 |
+
}
|
| 109 |
+
return *this;
|
| 110 |
+
}
|
| 111 |
+
|
| 112 |
+
// Used when saving an argument for the backwards pass.
|
| 113 |
+
OptionalArray& operator=(c10::OptionalArrayRef<T> ref) {
|
| 114 |
+
if (ref) {
|
| 115 |
+
list = std::vector<T>(ref->begin(), ref->end());
|
| 116 |
+
} else {
|
| 117 |
+
list = nullopt;
|
| 118 |
+
}
|
| 119 |
+
return *this;
|
| 120 |
+
}
|
| 121 |
+
|
| 122 |
+
operator c10::optional<c10::ArrayRef<T>>() {
|
| 123 |
+
if (!list) {
|
| 124 |
+
return nullopt;
|
| 125 |
+
}
|
| 126 |
+
return *list;
|
| 127 |
+
}
|
| 128 |
+
|
| 129 |
+
operator c10::OptionalArrayRef<T>() {
|
| 130 |
+
if (!list) {
|
| 131 |
+
return nullopt;
|
| 132 |
+
}
|
| 133 |
+
return *list;
|
| 134 |
+
}
|
| 135 |
+
};
|
| 136 |
+
|
| 137 |
+
// Capsule is an internal implementation detail of custom C++ classes. We
|
| 138 |
+
// define it as an owning wrapper for
|
| 139 |
+
// c10::intrusive_ptr<torch::CustomClassHolder> This wrapper is here to serve as
|
| 140 |
+
// an abstraction of the type erased custom class object pointer. It also allow
|
| 141 |
+
// pybind11 to treat this as a standalone class to register as a separate type
|
| 142 |
+
// caster, instead of a custom pointer holder which the pointer holder type
|
| 143 |
+
// caster try to "unwrap" it automatically.
|
| 144 |
+
struct Capsule {
|
| 145 |
+
c10::intrusive_ptr<torch::CustomClassHolder> obj_ptr;
|
| 146 |
+
explicit Capsule(c10::intrusive_ptr<torch::CustomClassHolder> ptr)
|
| 147 |
+
: obj_ptr(std::move(ptr)) {}
|
| 148 |
+
};
|
| 149 |
+
|
| 150 |
+
// IValue is the generic tagged union used by the interpreter to hold
|
| 151 |
+
// all value types.
|
| 152 |
+
// It is a 16-byte object with an 8-byte payload and an 8-byte tag.
|
| 153 |
+
// The tag is currently 4 bytes to determine the type, and 1 byte
|
| 154 |
+
// to mark whether that type is a subtype of c10::intrusive_ptr_target and needs
|
| 155 |
+
// retain/release calls.
|
| 156 |
+
|
| 157 |
+
#define TORCH_FORALL_TAGS(_) \
|
| 158 |
+
_(None) \
|
| 159 |
+
_(Tensor) \
|
| 160 |
+
_(Storage) \
|
| 161 |
+
_(Double) \
|
| 162 |
+
_(ComplexDouble) \
|
| 163 |
+
_(Int) \
|
| 164 |
+
_(SymInt) \
|
| 165 |
+
_(SymFloat) \
|
| 166 |
+
_(SymBool) \
|
| 167 |
+
_(Bool) \
|
| 168 |
+
_(Tuple) \
|
| 169 |
+
_(String) \
|
| 170 |
+
_(Blob) \
|
| 171 |
+
_(GenericList) \
|
| 172 |
+
_(GenericDict) \
|
| 173 |
+
_(Future) \
|
| 174 |
+
_(Await) \
|
| 175 |
+
_(Device) \
|
| 176 |
+
_(Stream) \
|
| 177 |
+
_(Object) \
|
| 178 |
+
_(PyObject) \
|
| 179 |
+
_(Uninitialized) \
|
| 180 |
+
_(Capsule) \
|
| 181 |
+
_(RRef) \
|
| 182 |
+
_(Quantizer) \
|
| 183 |
+
_(Generator) \
|
| 184 |
+
_(Enum)
|
| 185 |
+
|
| 186 |
+
// [doxygen private]
|
| 187 |
+
// These methods are not actually private but we don't want to document them, so
|
| 188 |
+
// they are marked `@private`, which hides them on the doxygen documentation for
|
| 189 |
+
// this page.
|
| 190 |
+
|
| 191 |
+
/// IValue (Interpreter Value) is a tagged union over the types
|
| 192 |
+
/// supported by the TorchScript interpreter. IValues contain their
|
| 193 |
+
/// values as an `IValue::Payload`, which holds primitive types
|
| 194 |
+
/// (`int64_t`, `bool`, `double`, `Device`) and `Tensor` as values,
|
| 195 |
+
/// and all other types as a `c10::intrusive_ptr`. In order to
|
| 196 |
+
/// optimize performance of the destructor and related operations by
|
| 197 |
+
/// making the `Tensor` and `c10::intrusive_ptr` paths generate the
|
| 198 |
+
/// same code, we represent a null `c10::intrusive_ptr` as
|
| 199 |
+
/// `UndefinedTensorImpl::singleton()`, *not* `nullptr`.
|
| 200 |
+
///
|
| 201 |
+
/// IValues are used as inputs to and outputs from the TorchScript interpreter.
|
| 202 |
+
/// To retrieve the value contained within an IValue, use the `.toX()` methods,
|
| 203 |
+
/// where `X` is the type you are trying to get. Note that neither the `.toX()`
|
| 204 |
+
/// methods nor the templated `.to<T>` functions do any kind of casting, they
|
| 205 |
+
/// only unwrap the contained value. For example:
|
| 206 |
+
///
|
| 207 |
+
/// \rst
|
| 208 |
+
/// .. code-block:: cpp
|
| 209 |
+
///
|
| 210 |
+
/// // Make the IValue
|
| 211 |
+
/// torch::IValue my_ivalue(26);
|
| 212 |
+
/// std::cout << my_ivalue << "\n";
|
| 213 |
+
///
|
| 214 |
+
/// // Unwrap the IValue
|
| 215 |
+
/// int64_t my_int = my_ivalue.toInt();
|
| 216 |
+
/// std::cout << my_int << "\n";
|
| 217 |
+
///
|
| 218 |
+
/// // This will throw an error!
|
| 219 |
+
/// // `my_ivalue` is tagged as an int and cannot be used as another type
|
| 220 |
+
/// torch::Tensor my_tensor = my_ivalue.toTensor();
|
| 221 |
+
/// \endrst
|
| 222 |
+
struct TORCH_API IValue final {
|
| 223 |
+
IValue(const IValue& rhs) : IValue(rhs.payload, rhs.tag) {
|
| 224 |
+
if (isIntrusivePtr() &&
|
| 225 |
+
payload.u.as_intrusive_ptr != c10::UndefinedTensorImpl::singleton()) {
|
| 226 |
+
c10::raw::intrusive_ptr::incref(payload.u.as_intrusive_ptr);
|
| 227 |
+
}
|
| 228 |
+
}
|
| 229 |
+
|
| 230 |
+
IValue(IValue&& rhs) noexcept : tag(rhs.tag) {
|
| 231 |
+
moveFrom(std::move(rhs));
|
| 232 |
+
}
|
| 233 |
+
|
| 234 |
+
/// @private [doxygen private]
|
| 235 |
+
~IValue() {
|
| 236 |
+
destroy();
|
| 237 |
+
}
|
| 238 |
+
|
| 239 |
+
C10_ALWAYS_INLINE IValue& operator=(IValue&& rhs) & noexcept {
|
| 240 |
+
if (&rhs == this) {
|
| 241 |
+
return *this;
|
| 242 |
+
}
|
| 243 |
+
|
| 244 |
+
destroy();
|
| 245 |
+
moveFrom(std::move(rhs));
|
| 246 |
+
return *this;
|
| 247 |
+
}
|
| 248 |
+
|
| 249 |
+
IValue& operator=(IValue const& rhs) & {
|
| 250 |
+
*this = IValue(rhs);
|
| 251 |
+
return *this;
|
| 252 |
+
}
|
| 253 |
+
|
| 254 |
+
void dump() const;
|
| 255 |
+
|
| 256 |
+
/**
|
| 257 |
+
* Equality comparison. The semantics are the same as Python's `==`:
|
| 258 |
+
* 1. Numerical types are compared by value.
|
| 259 |
+
* 2. Tensors compute element-wise equality, returning a BoolTensor (see:
|
| 260 |
+
* `torch.eq()`)
|
| 261 |
+
* 3. Strings are compared by value.
|
| 262 |
+
* 4. Sequence types (list, tuple) are compared lexicographically by
|
| 263 |
+
* comparing their elements. Different sequence types never compare equal.
|
| 264 |
+
* 5. Mappings (dict) must have equal (key, value) pairs.
|
| 265 |
+
* 6. If not listed above, the default behavior for is to test identity
|
| 266 |
+
* equality (e.g. pointer equality).
|
| 267 |
+
*
|
| 268 |
+
* Why does this return an IValue instead of a bool? Because in PyTorch,
|
| 269 |
+
* `tensor1 == tensor2` returns a `BoolTensor`, not a bool.
|
| 270 |
+
*
|
| 271 |
+
* NOTE: we (like Python) assume that identity equality implies value equality
|
| 272 |
+
* for efficiency.
|
| 273 |
+
* TODO: need to support customizing equality
|
| 274 |
+
*/
|
| 275 |
+
IValue equals(const IValue& rhs) const;
|
| 276 |
+
/**
|
| 277 |
+
* This implements the same semantics as `bool(lhs == rhs)` in Python. which
|
| 278 |
+
* is the same as `equals()` except for Tensor types.
|
| 279 |
+
*/
|
| 280 |
+
TORCH_API friend bool operator==(const IValue& lhs, const IValue& rhs);
|
| 281 |
+
TORCH_API friend bool operator!=(const IValue& lhs, const IValue& rhs);
|
| 282 |
+
|
| 283 |
+
/**
|
| 284 |
+
* Identity comparison. Checks if `this` is the same object as `rhs`. The
|
| 285 |
+
* semantics are the same as Python's `is` operator.
|
| 286 |
+
*
|
| 287 |
+
* NOTE: Like in Python, this operation is poorly defined for primitive types
|
| 288 |
+
* like numbers and strings. Prefer to use `==` unless you really want to
|
| 289 |
+
* check identity equality.
|
| 290 |
+
*/
|
| 291 |
+
bool is(const IValue& rhs) const;
|
| 292 |
+
|
| 293 |
+
/**
|
| 294 |
+
* Hashing for IValues. Returns an IValue-boxed int.
|
| 295 |
+
*
|
| 296 |
+
* Some notes:
|
| 297 |
+
* - Like eager, Tensors are hashed by looking at the pointer. This is not
|
| 298 |
+
* strictly correct because two value-equal tensors with different tensor
|
| 299 |
+
* pointers will hash differently, but we choose to reproduce the eager
|
| 300 |
+
* semantics.
|
| 301 |
+
* - Hashing is not defined on all built-in IValue types (e.g. list and
|
| 302 |
+
* dict), following Python. Calling `hash()` on these types will throw.
|
| 303 |
+
*/
|
| 304 |
+
IValue hash() const {
|
| 305 |
+
return (int64_t)IValue::hash(*this);
|
| 306 |
+
}
|
| 307 |
+
// This is defined because `c10::hash` dispatches to a function of this
|
| 308 |
+
// signature. See the member function `hash()`.
|
| 309 |
+
static size_t hash(const IValue& iv);
|
| 310 |
+
|
| 311 |
+
/**
|
| 312 |
+
* @private [doxygen private]
|
| 313 |
+
* [container equality]
|
| 314 |
+
* This is an equality implementation that assumes objects with the same
|
| 315 |
+
* identity equal themselves, for efficiency reasons. We primarily have this
|
| 316 |
+
* for consistency, because Python does the same thing. This actually
|
| 317 |
+
* provokes user-visible changes in behavior due to quirks in torch:
|
| 318 |
+
* [tensor1] == [tensor1] -> True (because container equality will first
|
| 319 |
+
* compare identity) [tensor1] == [tensor1_copy] -> RuntimeError:
|
| 320 |
+
* Boolean value of Tensor with more than one value is ambiguous
|
| 321 |
+
*/
|
| 322 |
+
TORCH_API friend bool _fastEqualsForContainer(
|
| 323 |
+
const IValue& lhs,
|
| 324 |
+
const IValue& rhs);
|
| 325 |
+
|
| 326 |
+
private:
|
| 327 |
+
static bool isAliasOf(const at::Tensor& a, const at::Tensor& b) {
|
| 328 |
+
if (a.is_sparse()) {
|
| 329 |
+
return isAliasOf(a._values(), b) || isAliasOf(a._indices(), b);
|
| 330 |
+
}
|
| 331 |
+
if (b.is_sparse()) {
|
| 332 |
+
return isAliasOf(a, b._values()) || isAliasOf(a, b._indices());
|
| 333 |
+
}
|
| 334 |
+
if (a.is_sparse_csr()) {
|
| 335 |
+
return isAliasOf(a.values(), b) || isAliasOf(a.crow_indices(), b) ||
|
| 336 |
+
isAliasOf(a.col_indices(), b);
|
| 337 |
+
}
|
| 338 |
+
if (b.is_sparse_csr()) {
|
| 339 |
+
return isAliasOf(a, b.values()) || isAliasOf(a, b.crow_indices()) ||
|
| 340 |
+
isAliasOf(a, b.col_indices());
|
| 341 |
+
}
|
| 342 |
+
|
| 343 |
+
// Opaque tensors such as the ones constructed by the MKL-DNN backend
|
| 344 |
+
// don't have storage so we just compare their TensorImpls.
|
| 345 |
+
// TODO: Find way to expose alias info for opaque tensors.
|
| 346 |
+
if (!a.has_storage() || !b.has_storage()) {
|
| 347 |
+
return a.unsafeGetTensorImpl() == b.unsafeGetTensorImpl();
|
| 348 |
+
}
|
| 349 |
+
|
| 350 |
+
return a.is_alias_of(b);
|
| 351 |
+
}
|
| 352 |
+
|
| 353 |
+
template <typename T>
|
| 354 |
+
bool isListOf() const;
|
| 355 |
+
|
| 356 |
+
public:
|
| 357 |
+
/// @private [doxygen private]
|
| 358 |
+
bool isAliasOf(const IValue& rhs) const {
|
| 359 |
+
if (this->tag != rhs.tag) {
|
| 360 |
+
// Trivially don't alias if the type is different
|
| 361 |
+
return false;
|
| 362 |
+
}
|
| 363 |
+
|
| 364 |
+
// Tensors should be compared based on internal storage
|
| 365 |
+
if (this->isTensor()) {
|
| 366 |
+
return isAliasOf(this->toTensor(), rhs.toTensor());
|
| 367 |
+
}
|
| 368 |
+
|
| 369 |
+
if (!isIntrusivePtr()) {
|
| 370 |
+
// Primitive types don't alias anything
|
| 371 |
+
return false;
|
| 372 |
+
}
|
| 373 |
+
|
| 374 |
+
AT_ASSERT(rhs.isIntrusivePtr());
|
| 375 |
+
|
| 376 |
+
// Other types can be compared by their ptr value
|
| 377 |
+
return this->payload.u.as_intrusive_ptr == rhs.payload.u.as_intrusive_ptr;
|
| 378 |
+
}
|
| 379 |
+
|
| 380 |
+
/// @private [doxygen private]
|
| 381 |
+
size_t use_count() const noexcept {
|
| 382 |
+
if (isTensor()) {
|
| 383 |
+
return payload.as_tensor.use_count();
|
| 384 |
+
}
|
| 385 |
+
|
| 386 |
+
if (!isIntrusivePtrLegacyBehavior()) {
|
| 387 |
+
return 1;
|
| 388 |
+
}
|
| 389 |
+
|
| 390 |
+
if (payload.u.as_intrusive_ptr == c10::UndefinedTensorImpl::singleton()) {
|
| 391 |
+
return 0;
|
| 392 |
+
}
|
| 393 |
+
return c10::raw::intrusive_ptr::use_count(payload.u.as_intrusive_ptr);
|
| 394 |
+
}
|
| 395 |
+
|
| 396 |
+
/// @private [doxygen private]
|
| 397 |
+
void swap(IValue& rhs) noexcept {
|
| 398 |
+
if (isTensor() && rhs.isTensor()) {
|
| 399 |
+
std::swap(payload.as_tensor, rhs.payload.as_tensor);
|
| 400 |
+
} else if (isTensor()) {
|
| 401 |
+
at::Tensor t = std::move(payload.as_tensor);
|
| 402 |
+
// As far as I can tell, omitting the usual explicit destructor call
|
| 403 |
+
// is not UB in and of itself, and it's a slight perf win. The
|
| 404 |
+
// destructor is a no-op, because the moved-from Tensor is
|
| 405 |
+
// effectively an intrusive_ptr in the null state, so we don't need
|
| 406 |
+
// the behavior for correctness reasons either. Leaving this
|
| 407 |
+
// explanatory comment, including commented-out destructor call, to
|
| 408 |
+
// make this abundantly clear.
|
| 409 |
+
//
|
| 410 |
+
// payload.as_tensor.~Tensor();
|
| 411 |
+
payload.u = rhs.payload.u;
|
| 412 |
+
new (&rhs.payload.as_tensor) at::Tensor(std::move(t));
|
| 413 |
+
} else if (rhs.isTensor()) {
|
| 414 |
+
rhs.swap(*this);
|
| 415 |
+
return;
|
| 416 |
+
} else {
|
| 417 |
+
std::swap(payload.u, rhs.payload.u);
|
| 418 |
+
}
|
| 419 |
+
std::swap(tag, rhs.tag);
|
| 420 |
+
}
|
| 421 |
+
|
| 422 |
+
// Accessors for subtypes are arranged together below
|
| 423 |
+
// While some of these accessors could be generated through templates,
|
| 424 |
+
// we prefer to write them manually for clarity
|
| 425 |
+
|
| 426 |
+
IValue(at::TensorBase t) : tag(Tag::Tensor) {
|
| 427 |
+
new (&payload.as_tensor) at::Tensor(std::move(t));
|
| 428 |
+
}
|
| 429 |
+
bool isTensor() const {
|
| 430 |
+
return Tag::Tensor == tag;
|
| 431 |
+
}
|
| 432 |
+
|
| 433 |
+
private:
|
| 434 |
+
// Outlined error path so that toTensor() can be inlined.
|
| 435 |
+
[[noreturn]] void reportToTensorTypeError() const;
|
| 436 |
+
|
| 437 |
+
public:
|
| 438 |
+
at::Tensor toTensor() &&;
|
| 439 |
+
at::Tensor& toTensor() &;
|
| 440 |
+
const at::Tensor& toTensor() const&;
|
| 441 |
+
at::TensorImpl* unsafeToTensorImpl() const {
|
| 442 |
+
TORCH_INTERNAL_ASSERT(isTensor());
|
| 443 |
+
return payload.as_tensor.unsafeGetTensorImpl();
|
| 444 |
+
}
|
| 445 |
+
|
| 446 |
+
IValue(at::Storage s) : tag(Tag::Storage) {
|
| 447 |
+
payload.u.as_intrusive_ptr =
|
| 448 |
+
null_to_undefined_tensor(s.unsafeReleaseStorageImpl());
|
| 449 |
+
}
|
| 450 |
+
bool isStorage() const {
|
| 451 |
+
return Tag::Storage == tag;
|
| 452 |
+
}
|
| 453 |
+
c10::Storage toStorage() &&;
|
| 454 |
+
c10::Storage toStorage() const&;
|
| 455 |
+
|
| 456 |
+
const IValue& toIValue() const {
|
| 457 |
+
return *this;
|
| 458 |
+
}
|
| 459 |
+
IValue& toIValue() {
|
| 460 |
+
return *this;
|
| 461 |
+
}
|
| 462 |
+
|
| 463 |
+
/// @private [doxygen private]
|
| 464 |
+
IValue(intrusive_ptr<caffe2::Blob> blob) : tag(Tag::Blob) {
|
| 465 |
+
// TODO (after Tensor merge) If we pass in a Blob holding a Tensor, extract
|
| 466 |
+
// and store it as a Tensor instead.
|
| 467 |
+
payload.u.as_intrusive_ptr = null_to_undefined_tensor(blob.release());
|
| 468 |
+
}
|
| 469 |
+
|
| 470 |
+
/// @private [doxygen private]
|
| 471 |
+
bool isBlob() const {
|
| 472 |
+
return Tag::Blob == tag;
|
| 473 |
+
}
|
| 474 |
+
|
| 475 |
+
/// @private [doxygen private]
|
| 476 |
+
c10::intrusive_ptr<caffe2::Blob> toBlob() &&;
|
| 477 |
+
|
| 478 |
+
/// @private [doxygen private]
|
| 479 |
+
c10::intrusive_ptr<caffe2::Blob> toBlob() const&;
|
| 480 |
+
|
| 481 |
+
// Capsule. No new callsites of these APIs should
|
| 482 |
+
// be introduced.
|
| 483 |
+
static inline IValue make_capsule(
|
| 484 |
+
intrusive_ptr<torch::CustomClassHolder> blob);
|
| 485 |
+
bool isCapsule() const {
|
| 486 |
+
return Tag::Capsule == tag;
|
| 487 |
+
}
|
| 488 |
+
c10::intrusive_ptr<torch::CustomClassHolder> toCapsule() &&;
|
| 489 |
+
c10::intrusive_ptr<torch::CustomClassHolder> toCapsule() const&;
|
| 490 |
+
|
| 491 |
+
// Custom C++ classes
|
| 492 |
+
template <
|
| 493 |
+
typename T,
|
| 494 |
+
std::enable_if_t<
|
| 495 |
+
std::is_base_of<torch::CustomClassHolder, T>::value,
|
| 496 |
+
int> = 0>
|
| 497 |
+
IValue(intrusive_ptr<T> custom_class);
|
| 498 |
+
bool isCustomClass() const;
|
| 499 |
+
template <typename T>
|
| 500 |
+
c10::intrusive_ptr<T> toCustomClass() &&;
|
| 501 |
+
template <typename T>
|
| 502 |
+
c10::intrusive_ptr<T> toCustomClass() const&;
|
| 503 |
+
|
| 504 |
+
// Tuple
|
| 505 |
+
IValue(c10::intrusive_ptr<ivalue::Tuple> v);
|
| 506 |
+
|
| 507 |
+
template <
|
| 508 |
+
typename... Args,
|
| 509 |
+
std::enable_if_t<
|
| 510 |
+
!std::disjunction<
|
| 511 |
+
std::is_lvalue_reference<Args>...,
|
| 512 |
+
std::negation<std::is_constructible<IValue, Args>>...>::value,
|
| 513 |
+
std::nullptr_t> = nullptr>
|
| 514 |
+
IValue(const std::tuple<Args...>& t);
|
| 515 |
+
template <
|
| 516 |
+
typename... Args,
|
| 517 |
+
std::enable_if_t<
|
| 518 |
+
!std::disjunction<
|
| 519 |
+
std::is_lvalue_reference<Args>...,
|
| 520 |
+
std::negation<std::is_constructible<IValue, Args>>...>::value,
|
| 521 |
+
std::nullptr_t> = nullptr>
|
| 522 |
+
IValue(std::tuple<Args...>&& t);
|
| 523 |
+
bool isTuple() const {
|
| 524 |
+
return Tag::Tuple == tag;
|
| 525 |
+
}
|
| 526 |
+
c10::intrusive_ptr<ivalue::Tuple> toTuple() &&;
|
| 527 |
+
c10::intrusive_ptr<ivalue::Tuple> toTuple() const&;
|
| 528 |
+
C10_NODISCARD ivalue::Tuple& toTupleRef() const;
|
| 529 |
+
|
| 530 |
+
// Double
|
| 531 |
+
IValue(double d) : tag(Tag::Double) {
|
| 532 |
+
payload.u.as_double = d;
|
| 533 |
+
}
|
| 534 |
+
bool isDouble() const {
|
| 535 |
+
return Tag::Double == tag;
|
| 536 |
+
}
|
| 537 |
+
double toDouble() const {
|
| 538 |
+
AT_ASSERT(isDouble());
|
| 539 |
+
return payload.u.as_double;
|
| 540 |
+
}
|
| 541 |
+
|
| 542 |
+
// ComplexDouble
|
| 543 |
+
template <typename T>
|
| 544 |
+
IValue(c10::complex<T> c);
|
| 545 |
+
bool isComplexDouble() const {
|
| 546 |
+
return Tag::ComplexDouble == tag;
|
| 547 |
+
}
|
| 548 |
+
c10::complex<double> toComplexDouble() const;
|
| 549 |
+
|
| 550 |
+
// Future
|
| 551 |
+
IValue(c10::intrusive_ptr<ivalue::Future> v);
|
| 552 |
+
bool isFuture() const {
|
| 553 |
+
return Tag::Future == tag;
|
| 554 |
+
}
|
| 555 |
+
c10::intrusive_ptr<ivalue::Future> toFuture() &&;
|
| 556 |
+
c10::intrusive_ptr<ivalue::Future> toFuture() const&;
|
| 557 |
+
|
| 558 |
+
IValue(c10::intrusive_ptr<ivalue::Await> v);
|
| 559 |
+
bool isAwait() const {
|
| 560 |
+
return Tag::Await == tag;
|
| 561 |
+
}
|
| 562 |
+
c10::intrusive_ptr<ivalue::Await> toAwait() &&;
|
| 563 |
+
c10::intrusive_ptr<ivalue::Await> toAwait() const&;
|
| 564 |
+
|
| 565 |
+
// RRef
|
| 566 |
+
IValue(c10::intrusive_ptr<c10::RRefInterface> v);
|
| 567 |
+
bool isRRef() const {
|
| 568 |
+
return Tag::RRef == tag;
|
| 569 |
+
}
|
| 570 |
+
c10::intrusive_ptr<c10::RRefInterface> toRRef() &&;
|
| 571 |
+
c10::intrusive_ptr<c10::RRefInterface> toRRef() const&;
|
| 572 |
+
|
| 573 |
+
// Quantizer
|
| 574 |
+
IValue(c10::intrusive_ptr<at::Quantizer> v);
|
| 575 |
+
bool isQuantizer() const {
|
| 576 |
+
return Tag::Quantizer == tag;
|
| 577 |
+
}
|
| 578 |
+
c10::intrusive_ptr<at::Quantizer> toQuantizer() &&;
|
| 579 |
+
c10::intrusive_ptr<at::Quantizer> toQuantizer() const&;
|
| 580 |
+
|
| 581 |
+
// Int
|
| 582 |
+
IValue(int64_t i) : tag(Tag::Int) {
|
| 583 |
+
payload.u.as_int = i;
|
| 584 |
+
}
|
| 585 |
+
|
| 586 |
+
IValue(const c10::SymInt& i) {
|
| 587 |
+
if (auto mi = i.maybe_as_int()) {
|
| 588 |
+
tag = Tag::Int;
|
| 589 |
+
payload.u.as_int = *mi;
|
| 590 |
+
} else {
|
| 591 |
+
tag = Tag::SymInt;
|
| 592 |
+
payload.u.as_intrusive_ptr = i.toSymNode().release();
|
| 593 |
+
}
|
| 594 |
+
}
|
| 595 |
+
|
| 596 |
+
bool isSymInt() const {
|
| 597 |
+
return Tag::SymInt == tag;
|
| 598 |
+
}
|
| 599 |
+
|
| 600 |
+
c10::SymInt toSymInt() &&;
|
| 601 |
+
c10::SymInt toSymInt() const&;
|
| 602 |
+
|
| 603 |
+
IValue(const c10::SymFloat& i) {
|
| 604 |
+
if (i.is_symbolic()) {
|
| 605 |
+
tag = Tag::SymFloat;
|
| 606 |
+
payload.u.as_intrusive_ptr = i.toSymNodeImpl().release();
|
| 607 |
+
} else {
|
| 608 |
+
tag = Tag::Double;
|
| 609 |
+
payload.u.as_double = i.as_float_unchecked();
|
| 610 |
+
}
|
| 611 |
+
}
|
| 612 |
+
|
| 613 |
+
bool isSymFloat() const {
|
| 614 |
+
return Tag::SymFloat == tag;
|
| 615 |
+
}
|
| 616 |
+
|
| 617 |
+
c10::SymFloat toSymFloat() &&;
|
| 618 |
+
c10::SymFloat toSymFloat() const&;
|
| 619 |
+
|
| 620 |
+
IValue(const c10::SymBool& i) {
|
| 621 |
+
if (auto mi = i.maybe_as_bool()) {
|
| 622 |
+
tag = Tag::Bool;
|
| 623 |
+
payload.u.as_int = *mi;
|
| 624 |
+
} else {
|
| 625 |
+
tag = Tag::SymBool;
|
| 626 |
+
payload.u.as_intrusive_ptr = i.toSymNodeImpl().release();
|
| 627 |
+
}
|
| 628 |
+
}
|
| 629 |
+
|
| 630 |
+
bool isSymBool() const {
|
| 631 |
+
return Tag::SymBool == tag;
|
| 632 |
+
}
|
| 633 |
+
|
| 634 |
+
c10::SymBool toSymBool() &&;
|
| 635 |
+
c10::SymBool toSymBool() const&;
|
| 636 |
+
|
| 637 |
+
// allow you to pass literals (3, 4) without ambiguity
|
| 638 |
+
IValue(int32_t i) : IValue(static_cast<int64_t>(i)) {}
|
| 639 |
+
|
| 640 |
+
bool isInt() const {
|
| 641 |
+
return Tag::Int == tag;
|
| 642 |
+
}
|
| 643 |
+
|
| 644 |
+
int64_t toInt() const {
|
| 645 |
+
AT_ASSERT(isInt());
|
| 646 |
+
return payload.u.as_int;
|
| 647 |
+
}
|
| 648 |
+
|
| 649 |
+
// Bool
|
| 650 |
+
IValue(bool b) : tag(Tag::Bool) {
|
| 651 |
+
#if defined(__clang__) && defined(__x86_64__)
|
| 652 |
+
// Initializing entire payload stops valgrind's from reporting
|
| 653 |
+
// "jump or move depends on uninitialised value" in IValue copy constructor
|
| 654 |
+
// See https://github.com/pytorch/pytorch/issues/37117
|
| 655 |
+
payload.u.as_int = b;
|
| 656 |
+
#else
|
| 657 |
+
payload.u.as_bool = b;
|
| 658 |
+
#endif
|
| 659 |
+
}
|
| 660 |
+
bool isBool() const {
|
| 661 |
+
return Tag::Bool == tag;
|
| 662 |
+
}
|
| 663 |
+
bool toBool() const {
|
| 664 |
+
AT_ASSERT(isBool());
|
| 665 |
+
return payload.u.as_bool;
|
| 666 |
+
}
|
| 667 |
+
|
| 668 |
+
// IntList
|
| 669 |
+
bool isIntList() const;
|
| 670 |
+
bool isSymIntList() const;
|
| 671 |
+
c10::List<int64_t> toIntList() &&;
|
| 672 |
+
c10::List<int64_t> toIntList() const&;
|
| 673 |
+
std::vector<int64_t> toIntVector() const;
|
| 674 |
+
std::vector<c10::SymInt> toSymIntVector() const;
|
| 675 |
+
at::DimVector toDimVector() const;
|
| 676 |
+
|
| 677 |
+
// ConstantString
|
| 678 |
+
IValue(c10::intrusive_ptr<ivalue::ConstantString> v);
|
| 679 |
+
IValue(std::string v);
|
| 680 |
+
IValue(const char* v) : IValue(std::string(v)) {}
|
| 681 |
+
IValue(c10::string_view v) : IValue(std::string(v)){};
|
| 682 |
+
bool isString() const {
|
| 683 |
+
return Tag::String == tag;
|
| 684 |
+
}
|
| 685 |
+
c10::intrusive_ptr<ivalue::ConstantString> toString() &&;
|
| 686 |
+
c10::intrusive_ptr<ivalue::ConstantString> toString() const&;
|
| 687 |
+
const std::string& toStringRef() const;
|
| 688 |
+
c10::optional<std::reference_wrapper<const std::string>> toOptionalStringRef()
|
| 689 |
+
const;
|
| 690 |
+
c10::string_view toStringView() const;
|
| 691 |
+
|
| 692 |
+
// DoubleList
|
| 693 |
+
bool isDoubleList() const;
|
| 694 |
+
c10::List<double> toDoubleList() &&;
|
| 695 |
+
c10::List<double> toDoubleList() const&;
|
| 696 |
+
std::vector<double> toDoubleVector() const;
|
| 697 |
+
|
| 698 |
+
// ComplexDoubleList
|
| 699 |
+
bool isComplexDoubleList() const;
|
| 700 |
+
c10::List<c10::complex<double>> toComplexDoubleList() &&;
|
| 701 |
+
c10::List<c10::complex<double>> toComplexDoubleList() const&;
|
| 702 |
+
std::vector<c10::complex<double>> toComplexDoubleVector() const;
|
| 703 |
+
|
| 704 |
+
// BoolList
|
| 705 |
+
bool isBoolList() const;
|
| 706 |
+
c10::List<bool> toBoolList() &&;
|
| 707 |
+
c10::List<bool> toBoolList() const&;
|
| 708 |
+
|
| 709 |
+
// TensorList
|
| 710 |
+
bool isTensorList() const;
|
| 711 |
+
c10::List<at::Tensor> toTensorList() &&;
|
| 712 |
+
c10::List<at::Tensor> toTensorList() const&;
|
| 713 |
+
std::vector<at::Tensor> toTensorVector() const;
|
| 714 |
+
|
| 715 |
+
// OptionalTensorList
|
| 716 |
+
bool isOptionalTensorList() const;
|
| 717 |
+
c10::List<c10::optional<at::Tensor>> toOptionalTensorList() &&;
|
| 718 |
+
c10::List<c10::optional<at::Tensor>> toOptionalTensorList() const&;
|
| 719 |
+
std::vector<c10::optional<at::Tensor>> toOptionalTensorVector() const;
|
| 720 |
+
|
| 721 |
+
// GenericList
|
| 722 |
+
IValue(c10::List<IValue> v);
|
| 723 |
+
bool isList() const {
|
| 724 |
+
return Tag::GenericList == tag;
|
| 725 |
+
}
|
| 726 |
+
c10::List<IValue> toList() &&;
|
| 727 |
+
c10::List<IValue> toList() const&;
|
| 728 |
+
c10::ArrayRef<IValue> toListRef() const;
|
| 729 |
+
|
| 730 |
+
// Some template constructors of IValue calls another constructor recursively.
|
| 731 |
+
// This SFINAEs the called constructor exists.
|
| 732 |
+
template <class T>
|
| 733 |
+
using enable_if_ivalue_constructible =
|
| 734 |
+
std::enable_if_t<std::is_constructible<IValue, T>::value, std::nullptr_t>;
|
| 735 |
+
|
| 736 |
+
// The rule for lists is more complicated; the generic constructor is only
|
| 737 |
+
// acceptable if your element isn't SymInt. If you do have a SymInt element,
|
| 738 |
+
// then you must also, at construction time, check if you can decay the list
|
| 739 |
+
// into an int list (this is MANDATORY, as at a use site we may expect
|
| 740 |
+
// toIntList to work even if at the call site you had a SymIntArrayRef
|
| 741 |
+
// argument). In practice, only SymIntArrayRef is used this way, so we
|
| 742 |
+
// didn't bother making it work for the other constructors, we just make sure
|
| 743 |
+
// they're not selectable.
|
| 744 |
+
template <class T>
|
| 745 |
+
using enable_if_list_is_ivalue_constructible = std::enable_if_t<
|
| 746 |
+
std::is_constructible<IValue, T>::value &&
|
| 747 |
+
!std::is_same<T, c10::SymInt>::value,
|
| 748 |
+
std::nullptr_t>;
|
| 749 |
+
|
| 750 |
+
template <class T, enable_if_list_is_ivalue_constructible<T> = nullptr>
|
| 751 |
+
IValue(c10::List<T>&& v);
|
| 752 |
+
template <class T, enable_if_list_is_ivalue_constructible<T> = nullptr>
|
| 753 |
+
IValue(const c10::List<T>& v);
|
| 754 |
+
template <class T, enable_if_list_is_ivalue_constructible<T> = nullptr>
|
| 755 |
+
IValue(at::ArrayRef<T> v);
|
| 756 |
+
template <class T, enable_if_list_is_ivalue_constructible<T> = nullptr>
|
| 757 |
+
IValue(const std::vector<T>& v);
|
| 758 |
+
template <class T, enable_if_list_is_ivalue_constructible<T> = nullptr>
|
| 759 |
+
IValue(std::vector<T>&& v);
|
| 760 |
+
template <class T, size_t N>
|
| 761 |
+
IValue(std::array<T, N> v);
|
| 762 |
+
|
| 763 |
+
// Manual constructors for lists of symints, which decay to int list if
|
| 764 |
+
// possible. To avoid ambiguous overload situations, we template them
|
| 765 |
+
// to prevent implicit conversions
|
| 766 |
+
template <class T>
|
| 767 |
+
using enable_if_symint =
|
| 768 |
+
std::enable_if_t<std::is_same<T, c10::SymInt>::value, std::nullptr_t>;
|
| 769 |
+
|
| 770 |
+
template <class T, enable_if_symint<T> = nullptr>
|
| 771 |
+
IValue(at::ArrayRef<T> v);
|
| 772 |
+
template <class T, enable_if_symint<T> = nullptr>
|
| 773 |
+
IValue(at::OptionalArrayRef<T> v);
|
| 774 |
+
template <class T, enable_if_symint<T> = nullptr>
|
| 775 |
+
IValue(const std::vector<T>& v);
|
| 776 |
+
template <class T, enable_if_symint<T> = nullptr>
|
| 777 |
+
IValue(std::vector<T>&& v);
|
| 778 |
+
|
| 779 |
+
|
| 780 |
+
template <class T>
|
| 781 |
+
using enable_if_ilist_is_ivalue_constructible = std::enable_if_t<
|
| 782 |
+
std::is_constructible<IValue, T>::value &&
|
| 783 |
+
std::is_constructible<IValue, typename IListRef<T>::boxed_type>::
|
| 784 |
+
value &&
|
| 785 |
+
!std::is_same<T, c10::SymInt>::value,
|
| 786 |
+
std::nullptr_t>;
|
| 787 |
+
|
| 788 |
+
template <class T, enable_if_ilist_is_ivalue_constructible<T> = nullptr>
|
| 789 |
+
IValue(c10::IListRef<T> v);
|
| 790 |
+
|
| 791 |
+
// GenericDict
|
| 792 |
+
IValue(c10::Dict<IValue, IValue> v);
|
| 793 |
+
bool isGenericDict() const {
|
| 794 |
+
return Tag::GenericDict == tag;
|
| 795 |
+
}
|
| 796 |
+
c10::Dict<IValue, IValue> toGenericDict() &&;
|
| 797 |
+
c10::Dict<IValue, IValue> toGenericDict() const&;
|
| 798 |
+
|
| 799 |
+
template <class Key, class Value>
|
| 800 |
+
IValue(c10::Dict<Key, Value> v);
|
| 801 |
+
|
| 802 |
+
template <class Key, class Value>
|
| 803 |
+
/// \cond
|
| 804 |
+
/// DOXYGEN_CANNOT_HANDLE_CONSTRUCTORS_WITH_MACROS_SO_EXCLUDE_THIS_LINE_FROM_DOXYGEN
|
| 805 |
+
C10_DEPRECATED_MESSAGE(
|
| 806 |
+
"IValues based on std::unordered_map<K, V> are slow and deprecated. Please use c10::Dict<K, V> instead.")
|
| 807 |
+
/// \endcond
|
| 808 |
+
IValue(std::unordered_map<Key, Value> v);
|
| 809 |
+
|
| 810 |
+
template <class T, enable_if_ivalue_constructible<T> = nullptr>
|
| 811 |
+
IValue(c10::optional<T> v);
|
| 812 |
+
template <class T, enable_if_list_is_ivalue_constructible<T> = nullptr>
|
| 813 |
+
IValue(c10::OptionalArrayRef<T> v);
|
| 814 |
+
IValue(c10::nullopt_t);
|
| 815 |
+
|
| 816 |
+
// ClassType
|
| 817 |
+
IValue(c10::intrusive_ptr<ivalue::Object> v);
|
| 818 |
+
bool isObject() const {
|
| 819 |
+
return tag == Tag::Object;
|
| 820 |
+
}
|
| 821 |
+
c10::intrusive_ptr<ivalue::Object> toObject() &&;
|
| 822 |
+
c10::intrusive_ptr<ivalue::Object> toObject() const&;
|
| 823 |
+
ivalue::Object& toObjectRef() const;
|
| 824 |
+
|
| 825 |
+
torch::jit::Module toModule() const;
|
| 826 |
+
bool isModule() const;
|
| 827 |
+
|
| 828 |
+
// PyObject
|
| 829 |
+
IValue(c10::intrusive_ptr<ivalue::PyObjectHolder> v);
|
| 830 |
+
bool isPyObject() const {
|
| 831 |
+
return tag == Tag::PyObject;
|
| 832 |
+
}
|
| 833 |
+
c10::intrusive_ptr<ivalue::PyObjectHolder> toPyObjectHolder() &&;
|
| 834 |
+
c10::intrusive_ptr<ivalue::PyObjectHolder> toPyObjectHolder() const&;
|
| 835 |
+
PyObject* toPyObject() const;
|
| 836 |
+
|
| 837 |
+
// Enum
|
| 838 |
+
explicit IValue(c10::intrusive_ptr<ivalue::EnumHolder> v);
|
| 839 |
+
bool isEnum() const {
|
| 840 |
+
return tag == Tag::Enum;
|
| 841 |
+
}
|
| 842 |
+
c10::intrusive_ptr<ivalue::EnumHolder> toEnumHolder() &&;
|
| 843 |
+
c10::intrusive_ptr<ivalue::EnumHolder> toEnumHolder() const&;
|
| 844 |
+
|
| 845 |
+
// None
|
| 846 |
+
IValue() : tag(Tag::None) {}
|
| 847 |
+
bool isNone() const {
|
| 848 |
+
return Tag::None == tag;
|
| 849 |
+
}
|
| 850 |
+
std::string toNone() const {
|
| 851 |
+
AT_ASSERT(isNone());
|
| 852 |
+
return "None";
|
| 853 |
+
}
|
| 854 |
+
|
| 855 |
+
static IValue uninitialized() {
|
| 856 |
+
auto i = IValue();
|
| 857 |
+
i.tag = Tag::Uninitialized;
|
| 858 |
+
return i;
|
| 859 |
+
}
|
| 860 |
+
|
| 861 |
+
// Scalar, which gets encoded as either an Int, a Double or a ComplexDouble
|
| 862 |
+
IValue(const at::Scalar& s) : IValue() {
|
| 863 |
+
// NB: do the symbolic versions first, as isFloatingPoint is true
|
| 864 |
+
// for both SymFloat and double
|
| 865 |
+
if (s.isSymInt()) {
|
| 866 |
+
tag = Tag::SymInt;
|
| 867 |
+
payload.u.as_intrusive_ptr = s.toSymInt().toSymNode().release();
|
| 868 |
+
} else if (s.isSymFloat()) {
|
| 869 |
+
tag = Tag::SymFloat;
|
| 870 |
+
payload.u.as_intrusive_ptr = s.toSymFloat().toSymNodeImpl().release();
|
| 871 |
+
} else if (s.isSymBool()) {
|
| 872 |
+
tag = Tag::SymBool;
|
| 873 |
+
payload.u.as_intrusive_ptr = s.toSymBool().toSymNodeImpl().release();
|
| 874 |
+
} else if (s.isFloatingPoint()) {
|
| 875 |
+
tag = Tag::Double;
|
| 876 |
+
payload.u.as_double = s.toDouble();
|
| 877 |
+
} else if (s.isComplex()) {
|
| 878 |
+
*this = s.toComplexDouble();
|
| 879 |
+
} else if (s.isBoolean()) {
|
| 880 |
+
tag = Tag::Bool;
|
| 881 |
+
payload.u.as_bool = s.toBool();
|
| 882 |
+
} else {
|
| 883 |
+
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
|
| 884 |
+
s.isIntegral(false), "Unknown type in Scalar");
|
| 885 |
+
tag = Tag::Int;
|
| 886 |
+
payload.u.as_int = s.toLong();
|
| 887 |
+
}
|
| 888 |
+
}
|
| 889 |
+
|
| 890 |
+
bool isScalar() const {
|
| 891 |
+
return isDouble() || isInt() || isComplexDouble() || isBool() ||
|
| 892 |
+
isSymInt() || isSymFloat() || isSymBool();
|
| 893 |
+
}
|
| 894 |
+
|
| 895 |
+
at::Scalar toScalar() const {
|
| 896 |
+
if (isDouble())
|
| 897 |
+
return toDouble();
|
| 898 |
+
else if (isInt())
|
| 899 |
+
return toInt();
|
| 900 |
+
else if (isComplexDouble())
|
| 901 |
+
return toComplexDouble();
|
| 902 |
+
else if (isBool())
|
| 903 |
+
return toBool();
|
| 904 |
+
else if (isSymInt())
|
| 905 |
+
return toSymInt();
|
| 906 |
+
else if (isSymFloat())
|
| 907 |
+
return toSymFloat();
|
| 908 |
+
else if (isSymBool())
|
| 909 |
+
return toSymBool();
|
| 910 |
+
throw std::runtime_error("IValue is not a Scalar");
|
| 911 |
+
}
|
| 912 |
+
|
| 913 |
+
// Device
|
| 914 |
+
IValue(c10::Device d) : tag(Tag::Device) {
|
| 915 |
+
payload.u.as_device.type = d.type();
|
| 916 |
+
payload.u.as_device.index = d.index();
|
| 917 |
+
}
|
| 918 |
+
bool isDevice() const {
|
| 919 |
+
return Tag::Device == tag;
|
| 920 |
+
}
|
| 921 |
+
c10::Device toDevice() const {
|
| 922 |
+
AT_ASSERT(isDevice());
|
| 923 |
+
return c10::Device(payload.u.as_device.type, payload.u.as_device.index);
|
| 924 |
+
}
|
| 925 |
+
|
| 926 |
+
// Stream
|
| 927 |
+
IValue(c10::Stream s) : tag(Tag::Stream) {
|
| 928 |
+
auto v = c10::make_intrusive<ivalue::StreamData3Holder>(s.pack3());
|
| 929 |
+
payload.u.as_intrusive_ptr = v.release();
|
| 930 |
+
}
|
| 931 |
+
c10::Stream toStream() &&;
|
| 932 |
+
c10::Stream toStream() const&;
|
| 933 |
+
bool isStream() const {
|
| 934 |
+
return Tag::Stream == tag;
|
| 935 |
+
}
|
| 936 |
+
|
| 937 |
+
// ScalarType
|
| 938 |
+
IValue(ScalarType t)
|
| 939 |
+
: IValue(static_cast<std::underlying_type<ScalarType>::type>(t)) {}
|
| 940 |
+
at::ScalarType toScalarType() const {
|
| 941 |
+
return static_cast<at::ScalarType>(toInt());
|
| 942 |
+
}
|
| 943 |
+
|
| 944 |
+
// Layout
|
| 945 |
+
IValue(Layout l)
|
| 946 |
+
: IValue(static_cast<std::underlying_type<Layout>::type>(l)) {}
|
| 947 |
+
at::Layout toLayout() const {
|
| 948 |
+
return static_cast<at::Layout>(toInt());
|
| 949 |
+
}
|
| 950 |
+
|
| 951 |
+
// MemoryFormat
|
| 952 |
+
IValue(MemoryFormat m)
|
| 953 |
+
: IValue(static_cast<std::underlying_type<MemoryFormat>::type>(m)) {}
|
| 954 |
+
at::MemoryFormat toMemoryFormat() const {
|
| 955 |
+
return static_cast<at::MemoryFormat>(toInt());
|
| 956 |
+
}
|
| 957 |
+
|
| 958 |
+
// QScheme
|
| 959 |
+
IValue(at::QScheme qscheme) : tag(Tag::Int) {
|
| 960 |
+
payload.u.as_int = static_cast<int64_t>(qscheme);
|
| 961 |
+
}
|
| 962 |
+
|
| 963 |
+
at::QScheme toQScheme() const {
|
| 964 |
+
return static_cast<at::QScheme>(toInt());
|
| 965 |
+
}
|
| 966 |
+
|
| 967 |
+
// Dimname
|
| 968 |
+
IValue(at::Dimname dimname) : IValue(dimname.symbol().toQualString()) {}
|
| 969 |
+
|
| 970 |
+
at::Dimname toDimname() const {
|
| 971 |
+
return at::Dimname::fromSymbol(Symbol::fromQualString(toStringRef()));
|
| 972 |
+
}
|
| 973 |
+
|
| 974 |
+
// Generator
|
| 975 |
+
IValue(at::Generator g) : tag(Tag::Generator) {
|
| 976 |
+
payload.u.as_intrusive_ptr =
|
| 977 |
+
null_to_undefined_tensor(g.unsafeReleaseGeneratorImpl());
|
| 978 |
+
}
|
| 979 |
+
bool isGenerator() const {
|
| 980 |
+
return Tag::Generator == tag;
|
| 981 |
+
}
|
| 982 |
+
at::Generator toGenerator() &&;
|
| 983 |
+
at::Generator toGenerator() const&;
|
| 984 |
+
|
| 985 |
+
// for debugging
|
| 986 |
+
std::string tagKind() const {
|
| 987 |
+
switch (tag) {
|
| 988 |
+
#define DEFINE_CASE(x) \
|
| 989 |
+
case Tag::x: \
|
| 990 |
+
return #x;
|
| 991 |
+
TORCH_FORALL_TAGS(DEFINE_CASE)
|
| 992 |
+
#undef DEFINE_CASE
|
| 993 |
+
}
|
| 994 |
+
return "InvalidTag(" + std::to_string(static_cast<int>(tag)) + ")";
|
| 995 |
+
}
|
| 996 |
+
|
| 997 |
+
// generic v.to<at::Tensor>() implementations
|
| 998 |
+
// that can be used in special functions like pop/push
|
| 999 |
+
// that use template meta-programming.
|
| 1000 |
+
// prefer the directly named methods when you can,
|
| 1001 |
+
// since they are simpler to understand
|
| 1002 |
+
|
| 1003 |
+
// Note: if you get linker errors saying one of these is missing,
|
| 1004 |
+
// change it to ... && = delete; and you will see better error messages for
|
| 1005 |
+
// why However, we cannot commit this because some compiler versions barf on
|
| 1006 |
+
// it.
|
| 1007 |
+
template <typename T>
|
| 1008 |
+
T to() &&;
|
| 1009 |
+
template <typename T>
|
| 1010 |
+
typename c10::detail::ivalue_to_const_ref_overload_return<T>::type to()
|
| 1011 |
+
const&;
|
| 1012 |
+
|
| 1013 |
+
// ToOptional: convert a IValue to the Optional obj that accepts both T and
|
| 1014 |
+
// None
|
| 1015 |
+
template <typename T>
|
| 1016 |
+
optional<T> toOptional();
|
| 1017 |
+
template <typename T>
|
| 1018 |
+
optional<T> toOptional() const;
|
| 1019 |
+
|
| 1020 |
+
/// @private [doxygen private]
|
| 1021 |
+
/// this is a shallow comparison of two IValues to test the object identity
|
| 1022 |
+
bool isSameIdentity(const IValue& rhs) const;
|
| 1023 |
+
|
| 1024 |
+
// Computes the "official" string representation of an IValue. This produces a
|
| 1025 |
+
// TorchScript expression that can be used to recreate an IValue with the same
|
| 1026 |
+
// value (e.g. when we are printing constants in the serializer).
|
| 1027 |
+
//
|
| 1028 |
+
// Callers can use `customFormatter` to override how `repr()` prints out an
|
| 1029 |
+
// IValue. This is useful if you have some other environment where you can
|
| 1030 |
+
// look up values, and you want to print a reference to that environment (like
|
| 1031 |
+
// the serializer's constant table).
|
| 1032 |
+
//
|
| 1033 |
+
// repr() is not necessarily defined on all objects!
|
| 1034 |
+
std::ostream& repr(
|
| 1035 |
+
std::ostream& stream,
|
| 1036 |
+
std::function<bool(std::ostream&, const IValue& v)> customFormatter)
|
| 1037 |
+
const;
|
| 1038 |
+
|
| 1039 |
+
// Computes an "informal" string representation of an IValue. This should be
|
| 1040 |
+
// used for debugging, or servicing `print()`-like functions.
|
| 1041 |
+
// This is different from `repr()` in that there is no expectation that we can
|
| 1042 |
+
// exactly reconstruct an IValue from the output; feel free to use a
|
| 1043 |
+
// concise/pretty form
|
| 1044 |
+
TORCH_API friend std::ostream& operator<<(std::ostream& out, const IValue& v);
|
| 1045 |
+
|
| 1046 |
+
bool isPtrType() const {
|
| 1047 |
+
if (isTensor()) {
|
| 1048 |
+
return payload.as_tensor.defined();
|
| 1049 |
+
}
|
| 1050 |
+
return isIntrusivePtrLegacyBehavior();
|
| 1051 |
+
}
|
| 1052 |
+
|
| 1053 |
+
/// @private [doxygen private]
|
| 1054 |
+
const void* internalToPointer() const {
|
| 1055 |
+
TORCH_INTERNAL_ASSERT(
|
| 1056 |
+
isPtrType(), "Can only call internalToPointer() for pointer types");
|
| 1057 |
+
if (isTensor()) {
|
| 1058 |
+
return payload.as_tensor.unsafeGetTensorImpl();
|
| 1059 |
+
} else {
|
| 1060 |
+
return payload.u.as_intrusive_ptr != c10::UndefinedTensorImpl::singleton()
|
| 1061 |
+
? payload.u.as_intrusive_ptr
|
| 1062 |
+
: nullptr;
|
| 1063 |
+
}
|
| 1064 |
+
}
|
| 1065 |
+
|
| 1066 |
+
template <typename T = c10::PlatformType>
|
| 1067 |
+
TypePtr type() const;
|
| 1068 |
+
|
| 1069 |
+
// Detect aliased tensors.
|
| 1070 |
+
struct HashAliasedIValue {
|
| 1071 |
+
size_t hashTensor(const at::Tensor& ten) const {
|
| 1072 |
+
if (ten.is_sparse()) {
|
| 1073 |
+
// COO sparse tensors have a "values" tensor and an "indices" tensor
|
| 1074 |
+
// so this will detect overlap of sparse tensors that share a values
|
| 1075 |
+
// tensor, but not sparse tensors that share an indices tensor.
|
| 1076 |
+
return hashTensor(ten._values());
|
| 1077 |
+
} else if (ten.is_sparse_csr()) {
|
| 1078 |
+
// COO sparse tensors have a "values" tensor and an "indices" tensor
|
| 1079 |
+
// so this will detect overlap of sparse tensors that share a values
|
| 1080 |
+
// tensor, but not sparse tensors that share an indices tensor.
|
| 1081 |
+
return hashTensor(ten.values());
|
| 1082 |
+
} else if (!ten.has_storage()) {
|
| 1083 |
+
// Opaque tensors such as the ones constructed by the MKL-DNN backend
|
| 1084 |
+
// don't have storage so we just use their TensorImpls.
|
| 1085 |
+
// TODO: Find way to expose alias info for opaque tensors.
|
| 1086 |
+
return reinterpret_cast<size_t>(ten.unsafeGetTensorImpl());
|
| 1087 |
+
} else {
|
| 1088 |
+
return reinterpret_cast<size_t>(ten.storage().unsafeGetStorageImpl());
|
| 1089 |
+
}
|
| 1090 |
+
}
|
| 1091 |
+
size_t operator()(const IValue& val) const {
|
| 1092 |
+
if (val.isTensor()) {
|
| 1093 |
+
return hashTensor(val.toTensor());
|
| 1094 |
+
}
|
| 1095 |
+
// If it is not a Tensor, then two mutable IValues alias each other only
|
| 1096 |
+
// if they are the same pointer.
|
| 1097 |
+
return val.payload.u.as_int;
|
| 1098 |
+
}
|
| 1099 |
+
};
|
| 1100 |
+
|
| 1101 |
+
struct CompAliasedIValues {
|
| 1102 |
+
bool operator()(const IValue& lhs, const IValue& rhs) const {
|
| 1103 |
+
return lhs.isAliasOf(rhs);
|
| 1104 |
+
}
|
| 1105 |
+
};
|
| 1106 |
+
|
| 1107 |
+
using HashAliasedIValues =
|
| 1108 |
+
std::unordered_set<IValue, HashAliasedIValue, CompAliasedIValues>;
|
| 1109 |
+
using HashAliasedIValueMap =
|
| 1110 |
+
std::unordered_map<IValue, IValue, HashAliasedIValue, CompAliasedIValues>;
|
| 1111 |
+
|
| 1112 |
+
// Chechs if this and rhs has a subvalues in common.
|
| 1113 |
+
// [t1,t2] and [t2, t3] returns true.
|
| 1114 |
+
bool overlaps(const IValue& rhs) const;
|
| 1115 |
+
|
| 1116 |
+
// Inserts all subvalues of this in subValues.
|
| 1117 |
+
void getSubValues(HashAliasedIValues& subValues) const;
|
| 1118 |
+
|
| 1119 |
+
// Apply visitor to every subvalue.
|
| 1120 |
+
// TODO: There are several places that recurse over IValue. This is fragile.
|
| 1121 |
+
// This visitor should be used to recurse over ivalues.
|
| 1122 |
+
void visit(const std::function<bool(const IValue&)>& visitor) const;
|
| 1123 |
+
IValue deepcopy(c10::optional<at::Device> device = c10::nullopt) const;
|
| 1124 |
+
IValue deepcopy(
|
| 1125 |
+
HashAliasedIValueMap& memo,
|
| 1126 |
+
c10::optional<at::Device> device = c10::nullopt) const;
|
| 1127 |
+
|
| 1128 |
+
private:
|
| 1129 |
+
static c10::intrusive_ptr_target* null_to_undefined_tensor(
|
| 1130 |
+
c10::intrusive_ptr_target* p) {
|
| 1131 |
+
return p ? p
|
| 1132 |
+
: static_cast<c10::intrusive_ptr_target*>(
|
| 1133 |
+
c10::UndefinedTensorImpl::singleton());
|
| 1134 |
+
}
|
| 1135 |
+
|
| 1136 |
+
static bool ptrEqual(const IValue& lhs, const IValue& rhs);
|
| 1137 |
+
// NOTE: IValue tags are intentionally private. In the future we may encode
|
| 1138 |
+
// this value different (e.g. using NaN boxing), and this would make it more
|
| 1139 |
+
// costly to determine the tag for all types vs just determining if something
|
| 1140 |
+
// is a particular type. Instead we want clients to use the `isX` methods when
|
| 1141 |
+
// possible. If for perf. reasons you really, absolutely, must have a jump
|
| 1142 |
+
// table, then we can revisit this.
|
| 1143 |
+
enum class Tag : uint32_t {
|
| 1144 |
+
#define DEFINE_TAG(x) x,
|
| 1145 |
+
TORCH_FORALL_TAGS(DEFINE_TAG)
|
| 1146 |
+
#undef DEFINE_TAG
|
| 1147 |
+
};
|
| 1148 |
+
|
| 1149 |
+
#define COUNT_TAG(x) 1 +
|
| 1150 |
+
static constexpr auto kNumTags = TORCH_FORALL_TAGS(COUNT_TAG) 0;
|
| 1151 |
+
#undef COUNT_TAG
|
| 1152 |
+
|
| 1153 |
+
template <
|
| 1154 |
+
class T,
|
| 1155 |
+
class NullType = c10::detail::intrusive_target_default_null_type<T>>
|
| 1156 |
+
c10::intrusive_ptr<T, NullType> moveToIntrusivePtr();
|
| 1157 |
+
template <
|
| 1158 |
+
typename T,
|
| 1159 |
+
class NullType = c10::detail::intrusive_target_default_null_type<T>>
|
| 1160 |
+
c10::intrusive_ptr<T, NullType> toIntrusivePtr() const;
|
| 1161 |
+
|
| 1162 |
+
void destroy() {
|
| 1163 |
+
// We carefully construct this call to both 1) avoid UB by using
|
| 1164 |
+
// the "wrong" one of as_tensor and as_intrusive_ptr and 2) enable
|
| 1165 |
+
// the compiler to generate the same code for each case. It is
|
| 1166 |
+
// surprisingly difficult to get this right.
|
| 1167 |
+
if (isTensor() || isIntrusivePtr()) {
|
| 1168 |
+
c10::intrusive_ptr_target* p = isTensor()
|
| 1169 |
+
? payload.as_tensor.unsafeGetTensorImpl()
|
| 1170 |
+
: payload.u.as_intrusive_ptr;
|
| 1171 |
+
c10::intrusive_ptr<intrusive_ptr_target, c10::UndefinedTensorImpl>::
|
| 1172 |
+
reclaim(p);
|
| 1173 |
+
// No need to make this destructor call!
|
| 1174 |
+
// payload.as_tensor.~Tensor();
|
| 1175 |
+
}
|
| 1176 |
+
}
|
| 1177 |
+
|
| 1178 |
+
C10_ALWAYS_INLINE void moveFrom(IValue&& rhs) noexcept {
|
| 1179 |
+
if (rhs.isTensor()) {
|
| 1180 |
+
new (&payload.as_tensor) at::Tensor(std::move(rhs.payload.as_tensor));
|
| 1181 |
+
// As far as I can tell, omitting the usual explicit destructor call
|
| 1182 |
+
// is not UB in and of itself, and it's a slight perf win. The
|
| 1183 |
+
// destructor is a no-op, because the moved-from Tensor is
|
| 1184 |
+
// effectively an intrusive_ptr in the null state, so we don't need
|
| 1185 |
+
// the behavior for correctness reasons either. Leaving this
|
| 1186 |
+
// explanatory comment, including commented-out destructor call, to
|
| 1187 |
+
// make this abundantly clear.
|
| 1188 |
+
//
|
| 1189 |
+
// rhs.payload.as_tensor.~Tensor();
|
| 1190 |
+
} else {
|
| 1191 |
+
payload.u = rhs.payload.u;
|
| 1192 |
+
}
|
| 1193 |
+
tag = rhs.tag;
|
| 1194 |
+
rhs.clearToNone();
|
| 1195 |
+
}
|
| 1196 |
+
|
| 1197 |
+
void clearToNone() noexcept {
|
| 1198 |
+
payload.u.as_int = 0;
|
| 1199 |
+
tag = Tag::None;
|
| 1200 |
+
}
|
| 1201 |
+
|
| 1202 |
+
private:
|
| 1203 |
+
// This is the source of truth for isIntrusivePtr; edit results here
|
| 1204 |
+
// as needed and isIntrusivePtr will pick them up.
|
| 1205 |
+
// NOLINTBEGIN(bugprone-branch-clone)
|
| 1206 |
+
static constexpr bool isIntrusivePtrConstexpr(Tag tag) {
|
| 1207 |
+
switch (tag) {
|
| 1208 |
+
case Tag::None:
|
| 1209 |
+
return false;
|
| 1210 |
+
case Tag::Tensor:
|
| 1211 |
+
return false;
|
| 1212 |
+
case Tag::Storage:
|
| 1213 |
+
return true;
|
| 1214 |
+
case Tag::Generator:
|
| 1215 |
+
return true;
|
| 1216 |
+
case Tag::Double:
|
| 1217 |
+
return false;
|
| 1218 |
+
case Tag::ComplexDouble:
|
| 1219 |
+
return true;
|
| 1220 |
+
case Tag::Int:
|
| 1221 |
+
return false;
|
| 1222 |
+
case Tag::SymInt:
|
| 1223 |
+
return true;
|
| 1224 |
+
case Tag::SymFloat:
|
| 1225 |
+
return true;
|
| 1226 |
+
case Tag::SymBool:
|
| 1227 |
+
return true;
|
| 1228 |
+
case Tag::Bool:
|
| 1229 |
+
return false;
|
| 1230 |
+
case Tag::Tuple:
|
| 1231 |
+
return true;
|
| 1232 |
+
case Tag::String:
|
| 1233 |
+
return true;
|
| 1234 |
+
case Tag::Blob:
|
| 1235 |
+
return true;
|
| 1236 |
+
case Tag::GenericList:
|
| 1237 |
+
return true;
|
| 1238 |
+
case Tag::GenericDict:
|
| 1239 |
+
return true;
|
| 1240 |
+
case Tag::Future:
|
| 1241 |
+
return true;
|
| 1242 |
+
case Tag::Await:
|
| 1243 |
+
return true;
|
| 1244 |
+
case Tag::Device:
|
| 1245 |
+
return false;
|
| 1246 |
+
case Tag::Stream:
|
| 1247 |
+
return true;
|
| 1248 |
+
case Tag::Object:
|
| 1249 |
+
return true;
|
| 1250 |
+
case Tag::PyObject:
|
| 1251 |
+
return true;
|
| 1252 |
+
case Tag::Uninitialized:
|
| 1253 |
+
return false;
|
| 1254 |
+
case Tag::Capsule:
|
| 1255 |
+
return true;
|
| 1256 |
+
case Tag::RRef:
|
| 1257 |
+
return true;
|
| 1258 |
+
case Tag::Quantizer:
|
| 1259 |
+
return true;
|
| 1260 |
+
case Tag::Enum:
|
| 1261 |
+
return true;
|
| 1262 |
+
}
|
| 1263 |
+
return false;
|
| 1264 |
+
}
|
| 1265 |
+
// NOLINTEND(bugprone-branch-clone)
|
| 1266 |
+
|
| 1267 |
+
public:
|
| 1268 |
+
// Don't edit this just to add results for new tags; edit
|
| 1269 |
+
// isIntrusivePtrConstexpr above.
|
| 1270 |
+
bool isIntrusivePtr() const {
|
| 1271 |
+
// Implementation NOTE: the switch in isIntrusivePtrConstexpr
|
| 1272 |
+
// above is the previous production implementation of this
|
| 1273 |
+
// function. We observed that, at least on x86_64, the generated
|
| 1274 |
+
// instruction sequence was a similar bit vector test to what we
|
| 1275 |
+
// have manually implemented below, except that there was an extra
|
| 1276 |
+
// "bounds check" branch confirming, essentially, that `tag <
|
| 1277 |
+
// kNumTags` and providing a consistent result in that case. We
|
| 1278 |
+
// don't care about the result if tag is out of bounds, so we'd
|
| 1279 |
+
// like to eliminate that comparison and branch; manually
|
| 1280 |
+
// implementing this function as a bit test is the simplest way I
|
| 1281 |
+
// could find to accomplish that elimination.
|
| 1282 |
+
static constexpr uint32_t kTruthTableBitVector =
|
| 1283 |
+
#define TRUTH_TABLE_ENTRY(tag) \
|
| 1284 |
+
(uint32_t(isIntrusivePtrConstexpr(Tag::tag)) << uint32_t(Tag::tag)) |
|
| 1285 |
+
TORCH_FORALL_TAGS(TRUTH_TABLE_ENTRY)
|
| 1286 |
+
#undef TRUTH_TABLE_ENTRY
|
| 1287 |
+
0;
|
| 1288 |
+
|
| 1289 |
+
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
|
| 1290 |
+
static_cast<uint32_t>(tag) < kNumTags,
|
| 1291 |
+
"unexpected tag ",
|
| 1292 |
+
static_cast<int>(tag));
|
| 1293 |
+
return kTruthTableBitVector & (1 << (uint32_t(tag) % 32));
|
| 1294 |
+
}
|
| 1295 |
+
|
| 1296 |
+
// Storage and Generator were treated specially when
|
| 1297 |
+
// is_intrusive_ptr was stored as explicit state. This getter
|
| 1298 |
+
// preserves the old behavior for use with WeakIValue for now.
|
| 1299 |
+
bool isIntrusivePtrLegacyBehavior() const {
|
| 1300 |
+
if (tag == Tag::Storage || tag == Tag::Generator) {
|
| 1301 |
+
return payload.u.as_intrusive_ptr !=
|
| 1302 |
+
c10::UndefinedTensorImpl::singleton();
|
| 1303 |
+
} else {
|
| 1304 |
+
return isIntrusivePtr();
|
| 1305 |
+
}
|
| 1306 |
+
}
|
| 1307 |
+
|
| 1308 |
+
union Payload {
|
| 1309 |
+
// [TriviallyCopyablePayload]
|
| 1310 |
+
// We use a nested union here so that we can make the copy easy
|
| 1311 |
+
// and efficient in the non-tensor (i.e., trivially copyable)
|
| 1312 |
+
// case. Specifically, we do not have to do a switch-on-tag to
|
| 1313 |
+
// figure out which union member to assign; we can just use
|
| 1314 |
+
// TriviallyCopyablePayload::operator=.
|
| 1315 |
+
union TriviallyCopyablePayload {
|
| 1316 |
+
TriviallyCopyablePayload() : as_int(0) {}
|
| 1317 |
+
int64_t as_int;
|
| 1318 |
+
double as_double;
|
| 1319 |
+
bool as_bool;
|
| 1320 |
+
// Invariant: never nullptr; null state is represented as
|
| 1321 |
+
// c10::UndefinedTensorImpl::singleton() for consistency of
|
| 1322 |
+
// representation with Tensor.
|
| 1323 |
+
c10::intrusive_ptr_target* as_intrusive_ptr;
|
| 1324 |
+
struct {
|
| 1325 |
+
c10::DeviceType type;
|
| 1326 |
+
DeviceIndex index;
|
| 1327 |
+
} as_device;
|
| 1328 |
+
} u;
|
| 1329 |
+
at::Tensor as_tensor;
|
| 1330 |
+
Payload() : u() {}
|
| 1331 |
+
~Payload() {}
|
| 1332 |
+
};
|
| 1333 |
+
|
| 1334 |
+
IValue(const Payload& p, Tag t) : tag(t) {
|
| 1335 |
+
if (isTensor()) {
|
| 1336 |
+
new (&payload.as_tensor) at::Tensor(p.as_tensor);
|
| 1337 |
+
} else {
|
| 1338 |
+
payload.u = p.u;
|
| 1339 |
+
}
|
| 1340 |
+
}
|
| 1341 |
+
|
| 1342 |
+
template <typename T>
|
| 1343 |
+
struct TagType {};
|
| 1344 |
+
|
| 1345 |
+
friend MaybeOwnedTraits<IValue>;
|
| 1346 |
+
|
| 1347 |
+
Payload payload;
|
| 1348 |
+
Tag tag{IValue::Tag::None};
|
| 1349 |
+
friend struct WeakIValue;
|
| 1350 |
+
};
|
| 1351 |
+
|
| 1352 |
+
struct TORCH_API WeakIValue final {
|
| 1353 |
+
WeakIValue() = default;
|
| 1354 |
+
|
| 1355 |
+
WeakIValue(const WeakIValue& rhs)
|
| 1356 |
+
: payload(rhs.payload),
|
| 1357 |
+
tag(rhs.tag),
|
| 1358 |
+
is_intrusive_ptr(rhs.is_intrusive_ptr) {
|
| 1359 |
+
if (is_intrusive_ptr &&
|
| 1360 |
+
payload.as_intrusive_ptr != c10::UndefinedTensorImpl::singleton()) {
|
| 1361 |
+
c10::raw::weak_intrusive_ptr::incref(payload.as_intrusive_ptr);
|
| 1362 |
+
}
|
| 1363 |
+
}
|
| 1364 |
+
WeakIValue(const IValue& rhs)
|
| 1365 |
+
: tag(rhs.tag), is_intrusive_ptr(rhs.isIntrusivePtrLegacyBehavior()) {
|
| 1366 |
+
if (rhs.isTensor()) {
|
| 1367 |
+
payload.as_intrusive_ptr = rhs.unsafeToTensorImpl();
|
| 1368 |
+
is_intrusive_ptr = true;
|
| 1369 |
+
} else {
|
| 1370 |
+
payload = rhs.payload.u;
|
| 1371 |
+
}
|
| 1372 |
+
if (is_intrusive_ptr) {
|
| 1373 |
+
if (payload.as_intrusive_ptr != c10::UndefinedTensorImpl::singleton()) {
|
| 1374 |
+
c10::raw::weak_intrusive_ptr::incref(payload.as_intrusive_ptr);
|
| 1375 |
+
}
|
| 1376 |
+
}
|
| 1377 |
+
}
|
| 1378 |
+
WeakIValue(WeakIValue&& rhs) noexcept : WeakIValue() {
|
| 1379 |
+
swap(rhs);
|
| 1380 |
+
}
|
| 1381 |
+
~WeakIValue() {
|
| 1382 |
+
if (is_intrusive_ptr &&
|
| 1383 |
+
payload.as_intrusive_ptr != c10::UndefinedTensorImpl::singleton()) {
|
| 1384 |
+
c10::raw::weak_intrusive_ptr::decref(payload.as_intrusive_ptr);
|
| 1385 |
+
}
|
| 1386 |
+
}
|
| 1387 |
+
WeakIValue& operator=(WeakIValue&& rhs) & noexcept {
|
| 1388 |
+
WeakIValue(std::move(rhs)).swap(*this); // this also sets rhs to None
|
| 1389 |
+
return *this;
|
| 1390 |
+
}
|
| 1391 |
+
WeakIValue& operator=(WeakIValue const& rhs) & {
|
| 1392 |
+
WeakIValue(rhs).swap(*this);
|
| 1393 |
+
return *this;
|
| 1394 |
+
}
|
| 1395 |
+
void swap(WeakIValue& rhs) noexcept {
|
| 1396 |
+
std::swap(payload, rhs.payload);
|
| 1397 |
+
std::swap(is_intrusive_ptr, rhs.is_intrusive_ptr);
|
| 1398 |
+
std::swap(tag, rhs.tag);
|
| 1399 |
+
}
|
| 1400 |
+
|
| 1401 |
+
bool isSameIdentity(const WeakIValue& rhs) const {
|
| 1402 |
+
return payload.as_int == rhs.payload.as_int && tag == rhs.tag &&
|
| 1403 |
+
is_intrusive_ptr == rhs.is_intrusive_ptr;
|
| 1404 |
+
}
|
| 1405 |
+
|
| 1406 |
+
IValue lock() const {
|
| 1407 |
+
if (!is_intrusive_ptr) {
|
| 1408 |
+
IValue::Payload newPayload;
|
| 1409 |
+
newPayload.u = payload;
|
| 1410 |
+
return IValue(newPayload, tag);
|
| 1411 |
+
}
|
| 1412 |
+
if (IValue::Tag::Tensor == tag) {
|
| 1413 |
+
auto temp =
|
| 1414 |
+
c10::weak_intrusive_ptr<at::TensorImpl, c10::UndefinedTensorImpl>::
|
| 1415 |
+
reclaim(static_cast<at::TensorImpl*>(payload.as_intrusive_ptr));
|
| 1416 |
+
c10::intrusive_ptr<at::TensorImpl, c10::UndefinedTensorImpl> ip(
|
| 1417 |
+
temp.lock());
|
| 1418 |
+
temp.release();
|
| 1419 |
+
if (!ip) {
|
| 1420 |
+
return IValue();
|
| 1421 |
+
} else {
|
| 1422 |
+
return IValue(at::Tensor(std::move(ip)));
|
| 1423 |
+
}
|
| 1424 |
+
} else {
|
| 1425 |
+
auto temp = c10::weak_intrusive_ptr<c10::intrusive_ptr_target>::reclaim(
|
| 1426 |
+
payload.as_intrusive_ptr == c10::UndefinedTensorImpl::singleton()
|
| 1427 |
+
? nullptr
|
| 1428 |
+
: payload.as_intrusive_ptr);
|
| 1429 |
+
IValue::Payload pl;
|
| 1430 |
+
pl.u.as_intrusive_ptr = temp.lock().release();
|
| 1431 |
+
temp.release();
|
| 1432 |
+
if (!pl.u.as_intrusive_ptr) {
|
| 1433 |
+
return IValue();
|
| 1434 |
+
} else {
|
| 1435 |
+
return IValue(pl, tag);
|
| 1436 |
+
}
|
| 1437 |
+
}
|
| 1438 |
+
}
|
| 1439 |
+
|
| 1440 |
+
size_t use_count() const noexcept {
|
| 1441 |
+
if (!is_intrusive_ptr) {
|
| 1442 |
+
return 1;
|
| 1443 |
+
}
|
| 1444 |
+
auto temp = c10::weak_intrusive_ptr<
|
| 1445 |
+
c10::intrusive_ptr_target,
|
| 1446 |
+
c10::UndefinedTensorImpl>::reclaim(payload.as_intrusive_ptr);
|
| 1447 |
+
size_t result = temp.use_count();
|
| 1448 |
+
temp.release();
|
| 1449 |
+
return result;
|
| 1450 |
+
}
|
| 1451 |
+
|
| 1452 |
+
size_t weak_use_count() const noexcept {
|
| 1453 |
+
if (!is_intrusive_ptr) {
|
| 1454 |
+
return 1;
|
| 1455 |
+
}
|
| 1456 |
+
auto temp = c10::weak_intrusive_ptr<
|
| 1457 |
+
c10::intrusive_ptr_target,
|
| 1458 |
+
c10::UndefinedTensorImpl>::reclaim(payload.as_intrusive_ptr);
|
| 1459 |
+
size_t result = temp.weak_use_count();
|
| 1460 |
+
temp.release();
|
| 1461 |
+
return result;
|
| 1462 |
+
}
|
| 1463 |
+
size_t hash() const {
|
| 1464 |
+
return payload.as_int;
|
| 1465 |
+
}
|
| 1466 |
+
|
| 1467 |
+
private:
|
| 1468 |
+
using Payload = IValue::Payload::TriviallyCopyablePayload;
|
| 1469 |
+
Payload payload;
|
| 1470 |
+
IValue::Tag tag{IValue::Tag::None};
|
| 1471 |
+
bool is_intrusive_ptr{false};
|
| 1472 |
+
};
|
| 1473 |
+
|
| 1474 |
+
// An owning pointer to a type. When the type is class type, it requires a pair
|
| 1475 |
+
// of shared_ptrs to the class type and its owning CU, so that the class type is
|
| 1476 |
+
// guaranteed to stay alive as long as we hold this object.
|
| 1477 |
+
struct TORCH_API StrongTypePtr {
|
| 1478 |
+
StrongTypePtr(std::shared_ptr<torch::jit::CompilationUnit> cu, TypePtr type);
|
| 1479 |
+
|
| 1480 |
+
std::shared_ptr<torch::jit::CompilationUnit> cu_;
|
| 1481 |
+
TypePtr type_;
|
| 1482 |
+
};
|
| 1483 |
+
|
| 1484 |
+
// [Constant Object Weak CompilationUnit Reference]
|
| 1485 |
+
// A non owning pointer to a type. When a class get inserted as a constant
|
| 1486 |
+
// into a graph, if we used a strong pointer we would have a circular reference
|
| 1487 |
+
// from Object -> CompilationUnit and CompilationUnit -> Graph (which owns the
|
| 1488 |
+
// Constant Object)
|
| 1489 |
+
struct TORCH_API WeakTypePtr {
|
| 1490 |
+
WeakTypePtr(std::weak_ptr<torch::jit::CompilationUnit> cu, TypePtr type);
|
| 1491 |
+
|
| 1492 |
+
std::weak_ptr<torch::jit::CompilationUnit> cu_;
|
| 1493 |
+
TypePtr type_;
|
| 1494 |
+
};
|
| 1495 |
+
|
| 1496 |
+
// internal build errors with std::variant :/
|
| 1497 |
+
struct WeakOrStrongCompilationUnit {
|
| 1498 |
+
explicit WeakOrStrongCompilationUnit(
|
| 1499 |
+
std::shared_ptr<torch::jit::CompilationUnit> shared_cu)
|
| 1500 |
+
: strong_ptr_(std::move(shared_cu)), weak_ptr_(c10::nullopt) {}
|
| 1501 |
+
|
| 1502 |
+
explicit WeakOrStrongCompilationUnit(
|
| 1503 |
+
std::weak_ptr<torch::jit::CompilationUnit> weak_cu)
|
| 1504 |
+
: strong_ptr_(c10::nullopt), weak_ptr_(std::move(weak_cu)) {}
|
| 1505 |
+
|
| 1506 |
+
std::shared_ptr<torch::jit::CompilationUnit> getStrongRefOrThrow() const {
|
| 1507 |
+
TORCH_INTERNAL_ASSERT(strong_ptr_ != c10::nullopt);
|
| 1508 |
+
return *strong_ptr_;
|
| 1509 |
+
}
|
| 1510 |
+
|
| 1511 |
+
std::weak_ptr<torch::jit::CompilationUnit> getWeakRefOrThrow() const {
|
| 1512 |
+
TORCH_INTERNAL_ASSERT(weak_ptr_ != c10::nullopt);
|
| 1513 |
+
return *weak_ptr_;
|
| 1514 |
+
}
|
| 1515 |
+
|
| 1516 |
+
bool holdingStrongRef() const {
|
| 1517 |
+
return strong_ptr_ != c10::nullopt;
|
| 1518 |
+
}
|
| 1519 |
+
|
| 1520 |
+
bool holdingEmptyStrongRef() const {
|
| 1521 |
+
return holdingStrongRef() && *strong_ptr_ == nullptr;
|
| 1522 |
+
}
|
| 1523 |
+
|
| 1524 |
+
c10::optional<std::shared_ptr<torch::jit::CompilationUnit>> strong_ptr_;
|
| 1525 |
+
c10::optional<std::weak_ptr<torch::jit::CompilationUnit>> weak_ptr_;
|
| 1526 |
+
};
|
| 1527 |
+
|
| 1528 |
+
// An Object will hold a non-owning Compilation Unit reference if it is a
|
| 1529 |
+
// Constant in the graph and a Owning reference otherwise
|
| 1530 |
+
struct TORCH_API WeakOrStrongTypePtr {
|
| 1531 |
+
explicit WeakOrStrongTypePtr(WeakTypePtr weak)
|
| 1532 |
+
: cu_(WeakOrStrongCompilationUnit(std::move(weak.cu_))),
|
| 1533 |
+
type_(std::move(weak.type_)) {}
|
| 1534 |
+
explicit WeakOrStrongTypePtr(StrongTypePtr strong)
|
| 1535 |
+
: cu_(WeakOrStrongCompilationUnit(std::move(strong.cu_))),
|
| 1536 |
+
type_(std::move(strong.type_)) {}
|
| 1537 |
+
explicit WeakOrStrongTypePtr(WeakOrStrongCompilationUnit cu, TypePtr type)
|
| 1538 |
+
: cu_(std::move(cu)), type_(std::move(type)) {}
|
| 1539 |
+
WeakTypePtr asWeakTypePtr() const;
|
| 1540 |
+
|
| 1541 |
+
WeakOrStrongCompilationUnit cu_;
|
| 1542 |
+
TypePtr type_;
|
| 1543 |
+
|
| 1544 |
+
bool holds_strong_ref() const {
|
| 1545 |
+
return cu_.holdingStrongRef();
|
| 1546 |
+
}
|
| 1547 |
+
|
| 1548 |
+
bool holds_empty_strong_ref() const {
|
| 1549 |
+
return cu_.holdingEmptyStrongRef();
|
| 1550 |
+
}
|
| 1551 |
+
};
|
| 1552 |
+
|
| 1553 |
+
} // namespace c10
|
| 1554 |
+
|
| 1555 |
+
#include <ATen/core/ivalue_inl.h> // IWYU pragma: keep
|
moondream/lib/python3.10/site-packages/torch/include/ATen/core/op_registration/adaption.h
ADDED
|
@@ -0,0 +1,83 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/Tensor.h>
|
| 4 |
+
#include <ATen/TensorUtils.h>
|
| 5 |
+
#include <ATen/core/List.h>
|
| 6 |
+
#include <c10/core/TensorOptions.h>
|
| 7 |
+
|
| 8 |
+
/*
|
| 9 |
+
* [Note: hacky wrapper removal for optional tensor]
|
| 10 |
+
*
|
| 11 |
+
* The kernel implementation takes an optional tensor marked in the schema as
|
| 12 |
+
* Tensor? but the C++ function takes Tensor instead of the optional<Tensor>
|
| 13 |
+
* expected by the dispatcher.
|
| 14 |
+
*
|
| 15 |
+
* To remove the hacky wrapper, the C++ function is changed to take
|
| 16 |
+
* optional<Tensor> and unwrap the Tensor value at the beginning of
|
| 17 |
+
* the function, e.g.:
|
| 18 |
+
* > c10::MaybeOwned<Tensor> weight_maybe_owned =
|
| 19 |
+
* > at::borrow_from_optional_tensor(weight_opt);
|
| 20 |
+
* > const Tensor& weight = *weight_maybe_owned;
|
| 21 |
+
*
|
| 22 |
+
* We may want to make the kernel handle optional directly without
|
| 23 |
+
* going through the creation of a default-constructed Tensor in
|
| 24 |
+
* at::borrow_from_optional_tensor.
|
| 25 |
+
*/
|
| 26 |
+
|
| 27 |
+
/*
|
| 28 |
+
* [Note: hacky wrapper removal for TensorOptions]
|
| 29 |
+
*
|
| 30 |
+
* The kernel implementation takes a TensorOptions argument but the dispatcher
|
| 31 |
+
* expects separate arguments for dtype, layout, device, pin_memory.
|
| 32 |
+
*
|
| 33 |
+
* To remove the hacky wrapper, the kernel implementation is changed to take
|
| 34 |
+
* the 4 arguments (dtype, layout, device, pin_memory), and assemble the
|
| 35 |
+
* TensorOptions value at the beginning of the function, e.g.:
|
| 36 |
+
* > TensorOptions options = TensorOptions().dtype(dtype).layout(layout)
|
| 37 |
+
* > .device(device).pinned_memory(pin_memory);
|
| 38 |
+
*
|
| 39 |
+
* We may want make the kernel handle these parameters directly without going
|
| 40 |
+
* through the creation of a TensorOptions value.
|
| 41 |
+
*/
|
| 42 |
+
|
| 43 |
+
namespace c10 {
|
| 44 |
+
namespace impl {
|
| 45 |
+
|
| 46 |
+
TORCH_API void common_device_check_failure(Device common_device, const at::Tensor& tensor, at::CheckedFrom methodName, at::CheckedFrom argName);
|
| 47 |
+
|
| 48 |
+
inline void check_and_update_common_device(optional<Device>& common_device, const at::Tensor& tensor, at::CheckedFrom methodName, at::CheckedFrom argName) {
|
| 49 |
+
// TODO: Remove this once the following issue is addressed:
|
| 50 |
+
// https://github.com/pytorch/pytorch/issues/57380
|
| 51 |
+
if (!tensor.defined()) {
|
| 52 |
+
return;
|
| 53 |
+
}
|
| 54 |
+
|
| 55 |
+
if (!common_device.has_value()) {
|
| 56 |
+
common_device = tensor.device();
|
| 57 |
+
return;
|
| 58 |
+
}
|
| 59 |
+
|
| 60 |
+
if (C10_UNLIKELY(common_device != tensor.device())) {
|
| 61 |
+
common_device_check_failure(*common_device, tensor, methodName, argName);
|
| 62 |
+
}
|
| 63 |
+
}
|
| 64 |
+
|
| 65 |
+
inline void check_and_update_common_device(optional<Device>& common_device, const optional<at::Tensor>& tensor, at::CheckedFrom methodName, at::CheckedFrom argName) {
|
| 66 |
+
if (tensor.has_value()) {
|
| 67 |
+
check_and_update_common_device(common_device, tensor.value(), methodName, argName);
|
| 68 |
+
}
|
| 69 |
+
}
|
| 70 |
+
|
| 71 |
+
inline void check_and_update_common_device(optional<Device>& common_device, at::ITensorListRef tensors, at::CheckedFrom methodName, at::CheckedFrom argName) {
|
| 72 |
+
for (const auto& tensor : tensors) {
|
| 73 |
+
check_and_update_common_device(common_device, tensor, methodName, argName);
|
| 74 |
+
}
|
| 75 |
+
}
|
| 76 |
+
|
| 77 |
+
inline void check_and_update_common_device(optional<Device>& common_device, const List<optional<at::Tensor>>& tensors, at::CheckedFrom methodName, at::CheckedFrom argName) {
|
| 78 |
+
for (const auto& tensor : tensors) {
|
| 79 |
+
check_and_update_common_device(common_device, tensor, methodName, argName);
|
| 80 |
+
}
|
| 81 |
+
}
|
| 82 |
+
} // namespace impl
|
| 83 |
+
} // namespace c10
|
moondream/lib/python3.10/site-packages/torch/include/ATen/core/op_registration/infer_schema.h
ADDED
|
@@ -0,0 +1,160 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
/**
|
| 4 |
+
* This file contains functionality to take a C++ function and infer its
|
| 5 |
+
* c10::FunctionSchema.
|
| 6 |
+
*/
|
| 7 |
+
|
| 8 |
+
#include <ATen/core/function_schema.h>
|
| 9 |
+
#include <c10/util/Metaprogramming.h>
|
| 10 |
+
|
| 11 |
+
namespace c10 {
|
| 12 |
+
namespace detail {
|
| 13 |
+
|
| 14 |
+
namespace infer_schema {
|
| 15 |
+
|
| 16 |
+
/// The templated inference code creates `ArgumentDef` instead of `Argument`,
|
| 17 |
+
/// because that can be constructed at compile time and has a much smaller
|
| 18 |
+
/// binary size than having calls to `Argument` constructors in the template.
|
| 19 |
+
/// Creating `Argument` objects from `ArgumentDef` can then be done at
|
| 20 |
+
/// runtime in a non-templated way.
|
| 21 |
+
struct ArgumentDef final {
|
| 22 |
+
using GetTypeFn = TypePtr();
|
| 23 |
+
GetTypeFn* getTypeFn;
|
| 24 |
+
GetTypeFn* getFakeTypeFn;
|
| 25 |
+
constexpr ArgumentDef(): getTypeFn(nullptr), getFakeTypeFn(nullptr) {}
|
| 26 |
+
explicit constexpr ArgumentDef(GetTypeFn *getTypeFn, GetTypeFn *getFakeTypeFn): getTypeFn(getTypeFn), getFakeTypeFn(getFakeTypeFn) {}
|
| 27 |
+
};
|
| 28 |
+
|
| 29 |
+
template<bool V>
|
| 30 |
+
struct bool_t {};
|
| 31 |
+
template<> struct bool_t<true> : std::true_type {};
|
| 32 |
+
template<> struct bool_t<false> : std::false_type {};
|
| 33 |
+
|
| 34 |
+
/// Checks the static C++ types `Types` for correctness to catch common error cases.
|
| 35 |
+
template <class... Types>
|
| 36 |
+
constexpr int checkStaticTypes() {
|
| 37 |
+
// Give nice error messages for some of the common error cases.
|
| 38 |
+
// Use a LOUD ERROR MESSAGE SO USERS SEE THE STATIC_ASSERT
|
| 39 |
+
static_assert(std::conjunction<
|
| 40 |
+
bool_t<!std::is_integral<Types>::value || std::is_same<Types, int8_t>::value || std::is_same<Types, int64_t>::value || std::is_same<Types, bool>::value>...
|
| 41 |
+
>::value, "INVALID TYPE: Only int8_t, int64_t and bool are supported as an integral argument type");
|
| 42 |
+
static_assert(std::conjunction<
|
| 43 |
+
bool_t<!std::is_same<Types, float>::value>...
|
| 44 |
+
>::value, "INVALID TYPE: float is not supported as an argument type, use double instead");
|
| 45 |
+
return 0;
|
| 46 |
+
}
|
| 47 |
+
|
| 48 |
+
template <typename... Ts, size_t... Is>
|
| 49 |
+
constexpr std::array<ArgumentDef, sizeof...(Ts)> createArgumentVectorFromTypes(std::index_sequence<Is...>) {
|
| 50 |
+
return (
|
| 51 |
+
// Check types for common errors
|
| 52 |
+
checkStaticTypes<Ts...>(),
|
| 53 |
+
|
| 54 |
+
// Create the return value
|
| 55 |
+
std::array<ArgumentDef, sizeof...(Ts)>{
|
| 56 |
+
ArgumentDef(&getTypePtrCopy<std::decay_t<Ts>>, &getFakeTypePtrCopy<std::decay_t<Ts>>)...}
|
| 57 |
+
);
|
| 58 |
+
}
|
| 59 |
+
|
| 60 |
+
/// Creates a vector of `ArgumentDef` from a list of C++ types that are specified
|
| 61 |
+
/// as template arguments.
|
| 62 |
+
template<class ParameterTypes> struct createArguments final {};
|
| 63 |
+
template<class... ParameterTypes>
|
| 64 |
+
struct createArguments<guts::typelist::typelist<ParameterTypes...>> final {
|
| 65 |
+
static constexpr std::array<ArgumentDef, sizeof...(ParameterTypes)> call() {
|
| 66 |
+
return createArgumentVectorFromTypes<ParameterTypes...>(
|
| 67 |
+
std::make_index_sequence<sizeof...(ParameterTypes)>()
|
| 68 |
+
);
|
| 69 |
+
}
|
| 70 |
+
};
|
| 71 |
+
|
| 72 |
+
/// Creates a vector of `ArgumentDef` from a list of C++ types that are specified
|
| 73 |
+
/// as a tuple (i.e. in the way c10 kernels return values).
|
| 74 |
+
/// It can be a tuple<A, B, C> if there's three output arguments with types A, B, C.
|
| 75 |
+
/// It can be an empty tuple<>, or void for kernels that don't return anything.
|
| 76 |
+
/// It can be a single type A (i.e. no tuple) for the case where a kernel just
|
| 77 |
+
/// returns one value.
|
| 78 |
+
template<class ReturnTypeTuple, class Enable = void> struct createReturns final {};
|
| 79 |
+
|
| 80 |
+
template<class... ReturnTypes>
|
| 81 |
+
struct createReturns<std::tuple<ReturnTypes...>, void> final {
|
| 82 |
+
static constexpr std::array<ArgumentDef, sizeof...(ReturnTypes)> call() {
|
| 83 |
+
return createArgumentVectorFromTypes<ReturnTypes...>(
|
| 84 |
+
std::make_index_sequence<sizeof...(ReturnTypes)>()
|
| 85 |
+
);
|
| 86 |
+
}
|
| 87 |
+
};
|
| 88 |
+
|
| 89 |
+
template<class ReturnType>
|
| 90 |
+
struct createReturns<ReturnType, std::enable_if_t<!std::is_same<void, ReturnType>::value && !guts::is_instantiation_of<std::tuple, ReturnType>::value>> final {
|
| 91 |
+
static constexpr std::array<ArgumentDef, 1> call() {
|
| 92 |
+
return createReturns<std::tuple<ReturnType>>::call();
|
| 93 |
+
}
|
| 94 |
+
};
|
| 95 |
+
|
| 96 |
+
template<>
|
| 97 |
+
struct createReturns<void, void> final {
|
| 98 |
+
static constexpr std::array<ArgumentDef, 0> call() {
|
| 99 |
+
return createReturns<std::tuple<>>::call();
|
| 100 |
+
}
|
| 101 |
+
};
|
| 102 |
+
|
| 103 |
+
template <typename ReturnType>
|
| 104 |
+
struct createSingleReturn {
|
| 105 |
+
static constexpr std::array<ArgumentDef, 1> call() {
|
| 106 |
+
return createArgumentVectorFromTypes<ReturnType>(std::make_index_sequence<1>());
|
| 107 |
+
}
|
| 108 |
+
};
|
| 109 |
+
|
| 110 |
+
TORCH_API FunctionSchema make_function_schema(std::string&& name, std::string&& overload_name, c10::ArrayRef<ArgumentDef> arguments, c10::ArrayRef<ArgumentDef> returns);
|
| 111 |
+
TORCH_API FunctionSchema make_function_schema(c10::ArrayRef<ArgumentDef> arguments, c10::ArrayRef<ArgumentDef> returns);
|
| 112 |
+
|
| 113 |
+
/// Creates a `FunctionSchema` object from a `FunctionTraits` type for a
|
| 114 |
+
/// function. Flattens std::tuple returns into multiple return types
|
| 115 |
+
template <typename FunctionTraits>
|
| 116 |
+
FunctionSchema createFunctionSchemaFromTraitsFlattenedReturns() {
|
| 117 |
+
using ReturnType = typename FunctionTraits::return_type;
|
| 118 |
+
using ParameterTypes = typename FunctionTraits::parameter_types;
|
| 119 |
+
|
| 120 |
+
// arguments and returns are computed into a std::array at compile time and embedded into the binary.
|
| 121 |
+
// The only code executed at runtime here is the one that creates a std::vector
|
| 122 |
+
// of the arguments/returns from the std::array.
|
| 123 |
+
constexpr auto arguments = createArguments<ParameterTypes>::call();
|
| 124 |
+
constexpr auto returns = createReturns<ReturnType>::call();
|
| 125 |
+
|
| 126 |
+
return make_function_schema(arguments, returns);
|
| 127 |
+
}
|
| 128 |
+
|
| 129 |
+
/// Creates a `FunctionSchema` object from a `FunctionTraits` type for a
|
| 130 |
+
/// function. Preserves std::tuple returns as a Tuple return type
|
| 131 |
+
template <typename FunctionTraits>
|
| 132 |
+
FunctionSchema createFunctionSchemaFromTraitsSingleReturn(std::string&& name, std::string&& overload_name) {
|
| 133 |
+
using ReturnType = typename FunctionTraits::return_type;
|
| 134 |
+
using ParameterTypes = typename FunctionTraits::parameter_types;
|
| 135 |
+
|
| 136 |
+
// arguments and returns are computed into a std::array at compile time and embedded into the binary.
|
| 137 |
+
// The only code executed at runtime here is the one that creates a std::vector
|
| 138 |
+
// of the arguments/returns from the std::array.
|
| 139 |
+
constexpr auto arguments = createArguments<ParameterTypes>::call();
|
| 140 |
+
constexpr auto returns = createSingleReturn<ReturnType>::call();
|
| 141 |
+
|
| 142 |
+
return make_function_schema(std::move(name), std::move(overload_name), arguments, returns);
|
| 143 |
+
}
|
| 144 |
+
|
| 145 |
+
}
|
| 146 |
+
}
|
| 147 |
+
|
| 148 |
+
template<class FuncType>
|
| 149 |
+
FunctionSchema inferFunctionSchemaFlattenedReturns() {
|
| 150 |
+
return detail::infer_schema::createFunctionSchemaFromTraitsFlattenedReturns<guts::infer_function_traits_t<FuncType>>();
|
| 151 |
+
}
|
| 152 |
+
|
| 153 |
+
template<class FuncType>
|
| 154 |
+
FunctionSchema inferFunctionSchemaSingleReturn(std::string&& name, std::string&& overload_name) {
|
| 155 |
+
return detail::infer_schema::createFunctionSchemaFromTraitsSingleReturn<guts::infer_function_traits_t<FuncType>>(std::move(name), std::move(overload_name));
|
| 156 |
+
}
|
| 157 |
+
|
| 158 |
+
TORCH_API c10::optional<std::string> findSchemaDifferences(const FunctionSchema& inferred, const FunctionSchema& specified);
|
| 159 |
+
|
| 160 |
+
}
|
moondream/lib/python3.10/site-packages/torch/include/ATen/core/op_registration/op_allowlist.h
ADDED
|
@@ -0,0 +1,199 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// TODO: unify to C10_MOBILE. In theory this header could be used in OSS.
|
| 4 |
+
#ifdef TEMPLATE_SELECTIVE_BUILD
|
| 5 |
+
#include <ATen/selected_mobile_ops.h>
|
| 6 |
+
#endif
|
| 7 |
+
|
| 8 |
+
/**
|
| 9 |
+
* This header implements functionality to build PyTorch with only a certain
|
| 10 |
+
* set of operators (+ dependencies) included.
|
| 11 |
+
*
|
| 12 |
+
* - Build with -DTORCH_OPERATOR_WHITELIST="aten::add;aten::sub" and only these
|
| 13 |
+
* two ops will be included in your build. The allowlist records operators
|
| 14 |
+
* only, no overloads; if you include aten::add, all overloads of aten::add
|
| 15 |
+
* will be included.
|
| 16 |
+
*
|
| 17 |
+
* Internally, this is done by removing the operator registration calls
|
| 18 |
+
* using compile time programming, and the linker will then prune all
|
| 19 |
+
* operator functions that weren't registered.
|
| 20 |
+
* See Note [Selective build] for more details
|
| 21 |
+
*
|
| 22 |
+
* WARNING: The allowlist mechanism doesn't work for all ways you could go about
|
| 23 |
+
* registering an operator. If the dispatch key / operator name is not
|
| 24 |
+
* sufficiently obvious at compile time, then the allowlisting mechanism
|
| 25 |
+
* will fail (and the operator will be included in the binary anyway).
|
| 26 |
+
*/
|
| 27 |
+
|
| 28 |
+
#include <c10/util/string_view.h>
|
| 29 |
+
#include <c10/core/DispatchKey.h>
|
| 30 |
+
#include <c10/macros/Macros.h>
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
#if defined(ENABLE_RECORD_KERNEL_FUNCTION_DTYPE)
|
| 34 |
+
#include <ATen/record_function.h>
|
| 35 |
+
#endif
|
| 36 |
+
|
| 37 |
+
namespace c10 {
|
| 38 |
+
|
| 39 |
+
namespace impl {
|
| 40 |
+
|
| 41 |
+
constexpr bool allowlist_contains(string_view allowlist, string_view item); // Forward Declare
|
| 42 |
+
|
| 43 |
+
/**
|
| 44 |
+
* In selective build mode returns true/false depending on whether a build
|
| 45 |
+
* feature is available or not.
|
| 46 |
+
*
|
| 47 |
+
* In instrumenting mode (tracing mode), always returns true, and doesn't
|
| 48 |
+
* trigger any side effects.
|
| 49 |
+
*/
|
| 50 |
+
constexpr bool is_build_feature_available(const char* name) {
|
| 51 |
+
#if !defined(ENABLE_RECORD_KERNEL_FUNCTION_DTYPE)
|
| 52 |
+
// Selective Build mode.
|
| 53 |
+
#if !defined(TORCH_BUILD_FEATURE_ALLOWLIST)
|
| 54 |
+
(void)name;
|
| 55 |
+
return true;
|
| 56 |
+
#else
|
| 57 |
+
return allowlist_contains(
|
| 58 |
+
C10_STRINGIZE(TORCH_BUILD_FEATURE_ALLOWLIST),
|
| 59 |
+
name);
|
| 60 |
+
#endif
|
| 61 |
+
|
| 62 |
+
#else
|
| 63 |
+
// Instrumenting mode.
|
| 64 |
+
(void)name;
|
| 65 |
+
return true;
|
| 66 |
+
#endif
|
| 67 |
+
}
|
| 68 |
+
|
| 69 |
+
[[noreturn]] void build_feature_required_feature_not_available(const char* feature);
|
| 70 |
+
|
| 71 |
+
/**
|
| 72 |
+
* Use BUILD_FEATURE_REQUIRED macro in user-code.
|
| 73 |
+
*
|
| 74 |
+
* In selective build mode becomes a no-op if the build feature passed
|
| 75 |
+
* in is available. If not available, throws an exception (c10::Error).
|
| 76 |
+
* The compiler is able to perform dead code elimination for code
|
| 77 |
+
* following this method if the build feature is not available.
|
| 78 |
+
*
|
| 79 |
+
* In instrumenting mode (tracing mode), registers (as a side effect)
|
| 80 |
+
* the presence of this specific build feature being triggered.
|
| 81 |
+
*/
|
| 82 |
+
#if !defined(ENABLE_RECORD_KERNEL_FUNCTION_DTYPE) // selective build mode
|
| 83 |
+
|
| 84 |
+
#if defined(TORCH_BUILD_FEATURE_ALLOWLIST)
|
| 85 |
+
#define BUILD_FEATURE_REQUIRED(NAME) \
|
| 86 |
+
if (!c10::impl::is_build_feature_available(NAME)) { \
|
| 87 |
+
::c10::impl::build_feature_required_feature_not_available(NAME); \
|
| 88 |
+
}
|
| 89 |
+
#else // Everything trivially selected
|
| 90 |
+
#define BUILD_FEATURE_REQUIRED(NAME)
|
| 91 |
+
|
| 92 |
+
#endif
|
| 93 |
+
|
| 94 |
+
#else // trace mode
|
| 95 |
+
#define BUILD_FEATURE_REQUIRED(NAME) \
|
| 96 |
+
RECORD_FUNCTION_WITH_SCOPE( \
|
| 97 |
+
at::RecordScope::BUILD_FEATURE, \
|
| 98 |
+
std::string(NAME), \
|
| 99 |
+
{});
|
| 100 |
+
#endif
|
| 101 |
+
|
| 102 |
+
// Use this macro, and not is_build_feature_available
|
| 103 |
+
#define BUILD_FEATURE_AVAILABLE(NAME) ::c10::impl::is_build_feature_available(NAME)
|
| 104 |
+
|
| 105 |
+
// returns true iff allowlist contains item
|
| 106 |
+
// allowlist_contains("a;bc;d", "bc") == true
|
| 107 |
+
constexpr bool allowlist_contains(string_view allowlist, string_view item) {
|
| 108 |
+
//Choose a really big value for next so that if something goes wrong
|
| 109 |
+
//this code will blow up in a hopefully detectable way.
|
| 110 |
+
size_t next = std::numeric_limits<size_t>::max();
|
| 111 |
+
for (size_t cur = 0; cur <= allowlist.size(); cur = next) {
|
| 112 |
+
next = allowlist.find(';', cur);
|
| 113 |
+
if (next != string_view::npos) {
|
| 114 |
+
if (allowlist.substr(cur, next - cur).compare(item) == 0) {
|
| 115 |
+
return true;
|
| 116 |
+
}
|
| 117 |
+
next++;
|
| 118 |
+
} else {
|
| 119 |
+
if (allowlist.substr(cur).compare(item) == 0) {
|
| 120 |
+
return true;
|
| 121 |
+
}
|
| 122 |
+
break;
|
| 123 |
+
}
|
| 124 |
+
}
|
| 125 |
+
return false;
|
| 126 |
+
}
|
| 127 |
+
|
| 128 |
+
// Returns true iff the given op name is on the allowlist
|
| 129 |
+
// and should be registered
|
| 130 |
+
constexpr bool op_allowlist_check(string_view op_name) {
|
| 131 |
+
assert(op_name.find("::") != string_view::npos);
|
| 132 |
+
// Use assert() instead of throw() due to a gcc bug. See:
|
| 133 |
+
// https://stackoverflow.com/questions/34280729/throw-in-constexpr-function
|
| 134 |
+
// https://github.com/fmtlib/fmt/issues/682
|
| 135 |
+
assert(op_name.find("(") == string_view::npos);
|
| 136 |
+
#if !defined(TORCH_OPERATOR_WHITELIST)
|
| 137 |
+
// If the TORCH_OPERATOR_WHITELIST parameter is not defined,
|
| 138 |
+
// all ops are to be registered
|
| 139 |
+
return true;
|
| 140 |
+
#else
|
| 141 |
+
return allowlist_contains(
|
| 142 |
+
C10_STRINGIZE(TORCH_OPERATOR_WHITELIST),
|
| 143 |
+
// This function is majorly used for mobile selective build with
|
| 144 |
+
// root operators, where the overload is included in the allowlist.
|
| 145 |
+
op_name);
|
| 146 |
+
// // Strip overload name (as allowlist doesn't contain overloads)
|
| 147 |
+
// // Another function based on this may be added when there's usage
|
| 148 |
+
// // on op names without overload.
|
| 149 |
+
// OperatorNameView::parse(op_name).name);
|
| 150 |
+
#endif
|
| 151 |
+
}
|
| 152 |
+
|
| 153 |
+
// Returns true iff the given schema string is on the allowlist
|
| 154 |
+
// and should be registered
|
| 155 |
+
constexpr bool schema_allowlist_check(string_view schema) {
|
| 156 |
+
#if defined(TORCH_FORCE_SCHEMA_REGISTRATION)
|
| 157 |
+
return true;
|
| 158 |
+
#else
|
| 159 |
+
return op_allowlist_check(schema.substr(0, schema.find("(")));
|
| 160 |
+
#endif
|
| 161 |
+
}
|
| 162 |
+
|
| 163 |
+
// Returns true iff the given custom class name is on the allowlist
|
| 164 |
+
// and should be registered
|
| 165 |
+
constexpr bool custom_class_allowlist_check(string_view custom_class_name) {
|
| 166 |
+
#if !defined(TORCH_CUSTOM_CLASS_ALLOWLIST)
|
| 167 |
+
// If the TORCH_CUSTOM_CLASS_ALLOWLIST parameter is not defined,
|
| 168 |
+
// all custom classes are to be registered
|
| 169 |
+
(void)custom_class_name;
|
| 170 |
+
return true;
|
| 171 |
+
#else
|
| 172 |
+
return allowlist_contains(
|
| 173 |
+
C10_STRINGIZE(TORCH_CUSTOM_CLASS_ALLOWLIST),
|
| 174 |
+
custom_class_name);
|
| 175 |
+
#endif
|
| 176 |
+
}
|
| 177 |
+
|
| 178 |
+
// schema_allowlist_check() implicitly depends on a macro, TORCH_OPERATOR_WHITELIST.
|
| 179 |
+
// Add this API to pass arbitrary allowlist.
|
| 180 |
+
constexpr bool op_allowlist_contains_name_in_schema(string_view allowlist, string_view schema) {
|
| 181 |
+
return allowlist_contains(allowlist, schema.substr(0, schema.find("(")));
|
| 182 |
+
}
|
| 183 |
+
|
| 184 |
+
// Returns true iff the given dispatch key is on the allowlist
|
| 185 |
+
// and should be registered. When we turn this on, the list of valid
|
| 186 |
+
// mobile dispatch keys is hard coded (but you need to make sure
|
| 187 |
+
// that you have the correct set of dispatch keys for this).
|
| 188 |
+
constexpr bool dispatch_key_allowlist_check(DispatchKey /*k*/) {
|
| 189 |
+
#ifdef C10_MOBILE
|
| 190 |
+
return true;
|
| 191 |
+
// Disabled for now: to be enabled later!
|
| 192 |
+
// return k == DispatchKey::CPU || k == DispatchKey::Vulkan || k == DispatchKey::QuantizedCPU || k == DispatchKey::BackendSelect || k == DispatchKey::CatchAll;
|
| 193 |
+
#else
|
| 194 |
+
return true;
|
| 195 |
+
#endif
|
| 196 |
+
}
|
| 197 |
+
|
| 198 |
+
} // namespace impl
|
| 199 |
+
} // namespace c10
|
moondream/lib/python3.10/site-packages/torch/include/ATen/core/op_registration/op_registration.h
ADDED
|
@@ -0,0 +1,596 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
/**
|
| 4 |
+
* Include this file if you want to register operators. It includes all
|
| 5 |
+
* functionality needed to do so for you.
|
| 6 |
+
*/
|
| 7 |
+
|
| 8 |
+
#include <c10/core/DispatchKey.h>
|
| 9 |
+
#include <c10/core/DispatchKeySet.h>
|
| 10 |
+
#include <c10/core/CompileTimeFunctionPointer.h>
|
| 11 |
+
#include <ATen/core/boxing/KernelFunction.h>
|
| 12 |
+
#include <ATen/core/dispatch/CppSignature.h>
|
| 13 |
+
#include <ATen/core/dispatch/RegistrationHandleRAII.h>
|
| 14 |
+
#include <ATen/core/op_registration/infer_schema.h>
|
| 15 |
+
#if defined(EXPOSE_C2_OPS) || !defined(CAFFE2_IS_XPLAT_BUILD)
|
| 16 |
+
#include <torch/csrc/jit/frontend/function_schema_parser.h>
|
| 17 |
+
#endif
|
| 18 |
+
#include <ATen/core/ATenOpList.h>
|
| 19 |
+
|
| 20 |
+
namespace c10 {
|
| 21 |
+
|
| 22 |
+
namespace detail {
|
| 23 |
+
// The first argument of the schema might be of type DispatchKeySet, in which case we remove it.
|
| 24 |
+
// We do this because every argument in a function schema is expected to be convertable
|
| 25 |
+
// to an ivalue, but DispatchKeySet is not a type we want the jit to be aware of.
|
| 26 |
+
// See Note [Plumbing Keys Through The Dispatcher]
|
| 27 |
+
template<class KernelFunctor>
|
| 28 |
+
std::unique_ptr<FunctionSchema> inferFunctionSchemaFromFunctor() {
|
| 29 |
+
using func_type = typename c10::remove_DispatchKeySet_arg_from_func<KernelFunctor>::func_type;
|
| 30 |
+
return std::make_unique<FunctionSchema>(inferFunctionSchemaFlattenedReturns<func_type>());
|
| 31 |
+
}
|
| 32 |
+
}
|
| 33 |
+
|
| 34 |
+
/**
|
| 35 |
+
* An instance of this class handles the registration for one or more operators.
|
| 36 |
+
* Make sure you keep the RegisterOperators instance around since it will
|
| 37 |
+
* deregister the operator it's responsible for in its destructor.
|
| 38 |
+
*
|
| 39 |
+
* Example:
|
| 40 |
+
*
|
| 41 |
+
* > namespace {
|
| 42 |
+
* > class my_kernel_cpu final : public c10::OperatorKernel {
|
| 43 |
+
* > public:
|
| 44 |
+
* > Tensor operator()(Tensor a, Tensor b) {...}
|
| 45 |
+
* > };
|
| 46 |
+
* > }
|
| 47 |
+
* >
|
| 48 |
+
* > static auto registry = c10::RegisterOperators()
|
| 49 |
+
* > .op(c10::RegisterOperators::options()
|
| 50 |
+
* > .schema("my_op")
|
| 51 |
+
* > .kernel<my_kernel_cpu>(DispatchKey::CPU));
|
| 52 |
+
*/
|
| 53 |
+
class TORCH_API RegisterOperators final {
|
| 54 |
+
public:
|
| 55 |
+
RegisterOperators() = default;
|
| 56 |
+
~RegisterOperators() = default;
|
| 57 |
+
|
| 58 |
+
RegisterOperators(const RegisterOperators&) = delete;
|
| 59 |
+
RegisterOperators& operator=(const RegisterOperators&) = delete;
|
| 60 |
+
RegisterOperators(RegisterOperators&&) noexcept = default;
|
| 61 |
+
RegisterOperators& operator=(RegisterOperators&&) noexcept = default;
|
| 62 |
+
|
| 63 |
+
class TORCH_API Options final {
|
| 64 |
+
public:
|
| 65 |
+
Options(const Options&) = delete;
|
| 66 |
+
Options(Options&&) noexcept = delete;
|
| 67 |
+
Options& operator=(const Options&) = delete;
|
| 68 |
+
Options& operator=(Options&&) noexcept = delete;
|
| 69 |
+
|
| 70 |
+
// internal-only for registering stack based kernels
|
| 71 |
+
template<KernelFunction::BoxedKernelFunction* kernel_func>
|
| 72 |
+
Options&& kernel(DispatchKey dispatch_key) && {
|
| 73 |
+
return std::move(*this).kernel(dispatch_key, KernelFunction::makeFromBoxedFunction<kernel_func>(), nullopt, nullptr);
|
| 74 |
+
}
|
| 75 |
+
|
| 76 |
+
// internal-only for registering stack based catch-all kernels
|
| 77 |
+
template<KernelFunction::BoxedKernelFunction* kernel_func>
|
| 78 |
+
Options&& catchAllKernel() && {
|
| 79 |
+
return std::move(*this).kernel(c10::nullopt, KernelFunction::makeFromBoxedFunction<kernel_func>(), nullopt, nullptr);
|
| 80 |
+
}
|
| 81 |
+
|
| 82 |
+
// internal only for registering caffe2 ops
|
| 83 |
+
Options&& schema(FunctionSchema&& schema) {
|
| 84 |
+
TORCH_CHECK(!schemaOrName_.has_value(), "You can only specify the schema once per operator registration.");
|
| 85 |
+
schemaOrName_ = FunctionSchema(std::move(schema));
|
| 86 |
+
return std::move(*this);
|
| 87 |
+
}
|
| 88 |
+
|
| 89 |
+
/**
|
| 90 |
+
* Use this to specify the schema for an operator. You can also specify
|
| 91 |
+
* the operator name only to have the function signature part of the
|
| 92 |
+
* schema be inferred from the kernel function.
|
| 93 |
+
*
|
| 94 |
+
* Example:
|
| 95 |
+
*
|
| 96 |
+
* > // Infer function signature from my_kernel_cpu
|
| 97 |
+
* > static auto registry = c10::RegisterOperators()
|
| 98 |
+
* > .op(c10::RegisterOperators::options()
|
| 99 |
+
* > .schema("my_op")
|
| 100 |
+
* > .kernel<my_kernel_cpu>(DispatchKey::CPU));
|
| 101 |
+
* >
|
| 102 |
+
* >
|
| 103 |
+
* > // Explicitly specify full schema
|
| 104 |
+
* > static auto registry = c10::RegisterOperators()
|
| 105 |
+
* > .op(c10::RegisterOperators::options()
|
| 106 |
+
* > .schema("my_op(Tensor a) -> Tensor")
|
| 107 |
+
* > .kernel<my_kernel_cpu>(DispatchKey::CPU));
|
| 108 |
+
*/
|
| 109 |
+
Options&& schema(const std::string& schemaOrName) {
|
| 110 |
+
TORCH_CHECK(!schemaOrName_.has_value(), "Tried to register operator ", schemaOrName," but specified schema multiple times. You can only specify the schema once per operator registration.");
|
| 111 |
+
|
| 112 |
+
#if !defined(EXPOSE_C2_OPS) && defined(CAFFE2_IS_XPLAT_BUILD)
|
| 113 |
+
throw std::logic_error("Tried to register operator " + schemaOrName + ". We don't support registering c10 ops on mobile yet because the function schema parser isn't present in the mobile build.");
|
| 114 |
+
#else
|
| 115 |
+
schemaOrName_ = torch::jit::parseSchemaOrName(schemaOrName);
|
| 116 |
+
#endif
|
| 117 |
+
|
| 118 |
+
return std::move(*this);
|
| 119 |
+
}
|
| 120 |
+
|
| 121 |
+
/**
|
| 122 |
+
* Use this to register an operator whose kernel is implemented as a functor.
|
| 123 |
+
* The kernel is only called for inputs matching the given dispatch key.
|
| 124 |
+
* You can register multiple kernels for different dispatch keys.
|
| 125 |
+
*
|
| 126 |
+
* Example:
|
| 127 |
+
*
|
| 128 |
+
* > namespace {
|
| 129 |
+
* > class my_kernel_cpu final : public c10::OperatorKernel {
|
| 130 |
+
* > public:
|
| 131 |
+
* > Tensor operator()(Tensor a, Tensor b) {...}
|
| 132 |
+
* > };
|
| 133 |
+
* > }
|
| 134 |
+
* >
|
| 135 |
+
* > static auto registry = c10::RegisterOperators()
|
| 136 |
+
* > .op(c10::RegisterOperators::options()
|
| 137 |
+
* > .schema("my_op")
|
| 138 |
+
* > .kernel<my_kernel_cpu>(DispatchKey::CPU));
|
| 139 |
+
*
|
| 140 |
+
* The functor constructor can take arguments to configure the kernel.
|
| 141 |
+
* The arguments are defined in the kernel registration.
|
| 142 |
+
* Example:
|
| 143 |
+
*
|
| 144 |
+
* > namespace {
|
| 145 |
+
* > class my_kernel_cpu final : public c10::OperatorKernel {
|
| 146 |
+
* > public:
|
| 147 |
+
* > explicit my_kernel_cpu(std::string some_configuration, int a, bool b)
|
| 148 |
+
* > : ... {...}
|
| 149 |
+
* >
|
| 150 |
+
* > Tensor operator()(Tensor a, Tensor b) {...}
|
| 151 |
+
* > };
|
| 152 |
+
* > }
|
| 153 |
+
* >
|
| 154 |
+
* > static auto registry = c10::RegisterOperators()
|
| 155 |
+
* > .op(c10::RegisterOperators::options()
|
| 156 |
+
* > .schema("my_op")
|
| 157 |
+
* > .kernel<my_kernel_cpu>(DispatchKey::CPU, "some_configuration", 3, true));
|
| 158 |
+
*/
|
| 159 |
+
template<class KernelFunctor, class... ConstructorParameters>
|
| 160 |
+
// enable_if: only enable it if KernelFunctor is actually a functor
|
| 161 |
+
std::enable_if_t<guts::is_functor<KernelFunctor>::value, Options&&> kernel(DispatchKey dispatch_key, ConstructorParameters&&... constructorParameters) && {
|
| 162 |
+
static_assert(std::is_base_of<OperatorKernel, KernelFunctor>::value, "Tried to register a kernel functor using the kernel<Functor>() API, but it doesn't inherit from c10::OperatorKernel. Please have the functor inherit from it.");
|
| 163 |
+
static_assert(std::is_constructible<KernelFunctor, ConstructorParameters...>::value, "Wrong argument list for constructor of kernel functor. The arguments to kernel<Functor>(arguments...) must match one of the constructors of Functor.");
|
| 164 |
+
|
| 165 |
+
return std::move(*this).kernel(
|
| 166 |
+
dispatch_key,
|
| 167 |
+
KernelFunction::makeFromUnboxedFunctor<false, KernelFunctor>(std::make_unique<KernelFunctor>(std::forward<ConstructorParameters>(constructorParameters)...)),
|
| 168 |
+
impl::CppSignature::make<KernelFunctor>(),
|
| 169 |
+
detail::inferFunctionSchemaFromFunctor<KernelFunctor>()
|
| 170 |
+
);
|
| 171 |
+
}
|
| 172 |
+
|
| 173 |
+
/**
|
| 174 |
+
* Use this to register an operator whose kernel is implemented as a functor.
|
| 175 |
+
* The kernel is a catch-all kernel, meaning it's called independent from
|
| 176 |
+
* the input. Dispatch is disabled for this operator.
|
| 177 |
+
*
|
| 178 |
+
* Example:
|
| 179 |
+
*
|
| 180 |
+
* > namespace {
|
| 181 |
+
* > class my_kernel_cpu final : public c10::OperatorKernel {
|
| 182 |
+
* > public:
|
| 183 |
+
* > Tensor operator()(Tensor a, Tensor b) {...}
|
| 184 |
+
* > };
|
| 185 |
+
* > }
|
| 186 |
+
* >
|
| 187 |
+
* > static auto registry = c10::RegisterOperators()
|
| 188 |
+
* > .op(c10::RegisterOperators::options()
|
| 189 |
+
* > .schema("my_op")
|
| 190 |
+
* > .catchAllKernel<my_kernel_cpu>());
|
| 191 |
+
*
|
| 192 |
+
* The functor constructor can take arguments to configure the kernel.
|
| 193 |
+
* The arguments are defined in the kernel registration.
|
| 194 |
+
* Example:
|
| 195 |
+
*
|
| 196 |
+
* > namespace {
|
| 197 |
+
* > class my_kernel_cpu final : public c10::OperatorKernel {
|
| 198 |
+
* > public:
|
| 199 |
+
* > explicit my_kernel_cpu(std::string some_configuration, int a, bool b)
|
| 200 |
+
* > : ... {...}
|
| 201 |
+
* >
|
| 202 |
+
* > Tensor operator()(Tensor a, Tensor b) {...}
|
| 203 |
+
* > };
|
| 204 |
+
* > }
|
| 205 |
+
* >
|
| 206 |
+
* > static auto registry = c10::RegisterOperators()
|
| 207 |
+
* > .op(c10::RegisterOperators::options()
|
| 208 |
+
* > .schema("my_op")
|
| 209 |
+
* > .catchAllKernel<my_kernel_cpu>("some_configuration", 3, true));
|
| 210 |
+
*/
|
| 211 |
+
template<class KernelFunctor, class... ConstructorParameters>
|
| 212 |
+
// enable_if: only enable it if KernelFunctor is actually a functor
|
| 213 |
+
std::enable_if_t<guts::is_functor<KernelFunctor>::value, Options&&> catchAllKernel(ConstructorParameters&&... constructorParameters) && {
|
| 214 |
+
static_assert(std::is_base_of<OperatorKernel, KernelFunctor>::value, "Tried to register a kernel functor using the kernel<Functor>() API, but it doesn't inherit from c10::OperatorKernel. Please have the functor inherit from it.");
|
| 215 |
+
static_assert(std::is_constructible<KernelFunctor, ConstructorParameters...>::value, "Wrong argument list for constructor of kernel functor. The arguments to kernel<Functor>(arguments...) must match one of the constructors of Functor.");
|
| 216 |
+
|
| 217 |
+
return std::move(*this).kernel(
|
| 218 |
+
c10::nullopt,
|
| 219 |
+
KernelFunction::makeFromUnboxedFunctor<false, KernelFunctor>(std::make_unique<KernelFunctor>(std::forward<ConstructorParameters>(constructorParameters)...)),
|
| 220 |
+
impl::CppSignature::make<KernelFunctor>(),
|
| 221 |
+
detail::inferFunctionSchemaFromFunctor<KernelFunctor>()
|
| 222 |
+
);
|
| 223 |
+
}
|
| 224 |
+
|
| 225 |
+
/**
|
| 226 |
+
* Use this to register an operator whose kernel is implemented by a function.
|
| 227 |
+
* The kernel is only called for inputs matching the given dispatch key.
|
| 228 |
+
* You can register multiple kernels for different dispatch keys.
|
| 229 |
+
*
|
| 230 |
+
* Example:
|
| 231 |
+
*
|
| 232 |
+
* > namespace { Tensor my_kernel_cpu(Tensor a, Tensor b) {...} }
|
| 233 |
+
* >
|
| 234 |
+
* > static auto registry = c10::RegisterOperators()
|
| 235 |
+
* > .op(c10::RegisterOperators::options()
|
| 236 |
+
* > .schema("my_op")
|
| 237 |
+
* > .kernel<decltype(my_kernel_cpu), &my_kernel_cpu>(DispatchKey::CPU));
|
| 238 |
+
*/
|
| 239 |
+
template<class FuncType, FuncType* kernel_func>
|
| 240 |
+
// enable_if: only enable it if FuncType is actually a function
|
| 241 |
+
std::enable_if_t<guts::is_function_type<FuncType>::value, Options&&> kernel(DispatchKey dispatch_key) && {
|
| 242 |
+
static_assert(!std::is_same<FuncType, KernelFunction::BoxedKernelFunction>::value, "Tried to register a stackbased (i.e. internal) kernel function using the public kernel<...>() API. Please either use the internal kernel(...) API or also implement the kernel function as defined by the public API.");
|
| 243 |
+
static_assert(kernel_func != nullptr, "Kernel function cannot be nullptr");
|
| 244 |
+
|
| 245 |
+
return std::move(*this).kernel(
|
| 246 |
+
dispatch_key,
|
| 247 |
+
KernelFunction::makeFromUnboxedFunction(TORCH_FN(kernel_func)),
|
| 248 |
+
impl::CppSignature::make<FuncType>(),
|
| 249 |
+
// TODO Do schema inference without relying on WrapFunctionIntoFunctor
|
| 250 |
+
detail::inferFunctionSchemaFromFunctor<typename impl::WrapFunctionIntoFunctor<CompileTimeFunctionPointer<FuncType, kernel_func>>::type>()
|
| 251 |
+
);
|
| 252 |
+
}
|
| 253 |
+
|
| 254 |
+
/**
|
| 255 |
+
* Use this to register an operator whose kernel is implemented by a function.
|
| 256 |
+
* The kernel is a catch-all kernel, meaning it's called independent from
|
| 257 |
+
* the input. Dispatch is disabled for this operator.
|
| 258 |
+
*
|
| 259 |
+
* Example:
|
| 260 |
+
*
|
| 261 |
+
* > namespace { Tensor my_kernel_cpu(Tensor a, Tensor b) {...} }
|
| 262 |
+
* >
|
| 263 |
+
* > static auto registry = c10::RegisterOperators()
|
| 264 |
+
* > .op(c10::RegisterOperators::options()
|
| 265 |
+
* > .schema("my_op")
|
| 266 |
+
* > .catchAllKernel<decltype(my_kernel_cpu), &my_kernel_cpu>());
|
| 267 |
+
*/
|
| 268 |
+
template<class FuncType, FuncType* kernel_func>
|
| 269 |
+
// enable_if: only enable it if FuncType is actually a function
|
| 270 |
+
std::enable_if_t<guts::is_function_type<FuncType>::value, Options&&> catchAllKernel() && {
|
| 271 |
+
static_assert(!std::is_same<FuncType, KernelFunction::BoxedKernelFunction>::value, "Tried to register a stackbased (i.e. internal) kernel function using the public kernel<...>() API. Please either use the internal kernel(...) API or also implement the kernel function as defined by the public API.");
|
| 272 |
+
static_assert(kernel_func != nullptr, "Kernel function cannot be nullptr");
|
| 273 |
+
|
| 274 |
+
return std::move(*this).kernel(
|
| 275 |
+
c10::nullopt,
|
| 276 |
+
KernelFunction::makeFromUnboxedFunction(TORCH_FN(kernel_func)),
|
| 277 |
+
impl::CppSignature::make<FuncType>(),
|
| 278 |
+
// TODO Do schema inference without relying on WrapFunctionIntoFunctor
|
| 279 |
+
detail::inferFunctionSchemaFromFunctor<typename impl::WrapFunctionIntoFunctor<CompileTimeFunctionPointer<FuncType, kernel_func>>::type>()
|
| 280 |
+
);
|
| 281 |
+
}
|
| 282 |
+
|
| 283 |
+
template<class FuncType>
|
| 284 |
+
// enable_if: only enable it if FuncType is actually a function
|
| 285 |
+
std::enable_if_t<guts::is_function_type<FuncType>::value, Options&&> kernel(DispatchKey dispatch_key, FuncType* kernel_func) && {
|
| 286 |
+
static_assert(!std::is_same<FuncType, KernelFunction::BoxedKernelFunction>::value, "Tried to register a stackbased (i.e. internal) kernel function using the public kernel<...>() API. Please either use the internal kernel(...) API or also implement the kernel function as defined by the public API.");
|
| 287 |
+
TORCH_INTERNAL_ASSERT(kernel_func != nullptr, "Kernel function cannot be nullptr");
|
| 288 |
+
|
| 289 |
+
return std::move(*this).kernel(
|
| 290 |
+
dispatch_key,
|
| 291 |
+
KernelFunction::makeFromUnboxedRuntimeFunction(kernel_func),
|
| 292 |
+
impl::CppSignature::make<FuncType>(),
|
| 293 |
+
// TODO Do schema inference without relying on WrapFunctionIntoFunctor
|
| 294 |
+
detail::inferFunctionSchemaFromFunctor<impl::WrapFunctionIntoRuntimeFunctor<std::decay_t<FuncType>>>()
|
| 295 |
+
);
|
| 296 |
+
}
|
| 297 |
+
|
| 298 |
+
template<class FuncType>
|
| 299 |
+
// enable_if: only enable it if FuncType is actually a function
|
| 300 |
+
std::enable_if_t<guts::is_function_type<FuncType>::value, Options&&> catchAllKernel(FuncType* kernel_func) && {
|
| 301 |
+
static_assert(!std::is_same<FuncType, KernelFunction::BoxedKernelFunction>::value, "Tried to register a stackbased (i.e. internal) kernel function using the public kernel<...>() API. Please either use the internal kernel(...) API or also implement the kernel function as defined by the public API.");
|
| 302 |
+
TORCH_INTERNAL_ASSERT(kernel_func != nullptr, "Kernel function cannot be nullptr");
|
| 303 |
+
|
| 304 |
+
return std::move(*this).kernel(
|
| 305 |
+
c10::nullopt,
|
| 306 |
+
KernelFunction::makeFromUnboxedRuntimeFunction(kernel_func),
|
| 307 |
+
impl::CppSignature::make<FuncType>(),
|
| 308 |
+
// TODO Do schema inference without relying on WrapFunctionIntoFunctor
|
| 309 |
+
detail::inferFunctionSchemaFromFunctor<impl::WrapFunctionIntoRuntimeFunctor<std::decay_t<FuncType>>>()
|
| 310 |
+
);
|
| 311 |
+
}
|
| 312 |
+
|
| 313 |
+
/**
|
| 314 |
+
* Use this to register an operator whose kernel is implemented as a lambda.
|
| 315 |
+
* The kernel is only called for inputs matching the given dispatch key.
|
| 316 |
+
* You can register multiple kernels for different dispatch keys.
|
| 317 |
+
*
|
| 318 |
+
* The lambda must be stateless, i.e. not have a capture. If your kernel
|
| 319 |
+
* needs to store some configuration parameters, write the kernel as a
|
| 320 |
+
* functor instead.
|
| 321 |
+
*
|
| 322 |
+
* Example:
|
| 323 |
+
*
|
| 324 |
+
* > static auto registry = c10::RegisterOperators()
|
| 325 |
+
* > .op(c10::RegisterOperators::options()
|
| 326 |
+
* > .schema("my_op")
|
| 327 |
+
* > .kernel(DispatchKey::CPU, [] (Tensor a) -> Tensor {...}));
|
| 328 |
+
*/
|
| 329 |
+
template<class Lambda>
|
| 330 |
+
// enable_if: only enable it if Lambda is a functor (note: lambdas are functors)
|
| 331 |
+
std::enable_if_t<
|
| 332 |
+
guts::is_functor<std::decay_t<Lambda>>::value
|
| 333 |
+
&& !std::is_same<typename guts::infer_function_traits_t<std::decay_t<Lambda>>::func_type, KernelFunction::BoxedKernelFunction>::value,
|
| 334 |
+
Options&&> kernel(DispatchKey dispatch_key, Lambda&& functor) && {
|
| 335 |
+
static_assert(!std::is_base_of<OperatorKernel, std::decay_t<Lambda>>::value, "The kernel(x) API for registering a kernel is only meant to be used with lambdas. Your kernel is a functor. Please use the kernel<Functor>() API instead.");
|
| 336 |
+
|
| 337 |
+
// We don't support stateful lambdas (i.e. lambdas with a capture), because their
|
| 338 |
+
// behavior would be nonobvious. A functor kernel with cache gets a new instance of
|
| 339 |
+
// its cache each time the kernel is looked up from the dispatch table.
|
| 340 |
+
// A lambda with a capture would be global and share its capture between all kernel lookups.
|
| 341 |
+
// So, instead of making users having to think about it (including the thread-safety
|
| 342 |
+
// issues this causes), let's just forbid stateful lambdas altogether.
|
| 343 |
+
static_assert(guts::is_stateless_lambda<std::decay_t<Lambda>>::value, "The kernel(x) API for registering a kernel only works for stateless lambdas (i.e. lambdas without captures). If you need a cache, please use the functor based API kernel<Functor>() instead.");
|
| 344 |
+
|
| 345 |
+
return std::move(*this).kernel(
|
| 346 |
+
dispatch_key,
|
| 347 |
+
KernelFunction::makeFromUnboxedLambda(std::forward<Lambda>(functor)),
|
| 348 |
+
impl::CppSignature::make<Lambda>(),
|
| 349 |
+
// TODO Do schema inference without relying on WrapFunctionIntoRuntimeFunctor
|
| 350 |
+
detail::inferFunctionSchemaFromFunctor<impl::WrapFunctionIntoRuntimeFunctor<std::decay_t<Lambda>>>()
|
| 351 |
+
);
|
| 352 |
+
}
|
| 353 |
+
|
| 354 |
+
/**
|
| 355 |
+
* Use this to register an operator whose kernel is implemented as a lambda.
|
| 356 |
+
* The kernel is a catch-all kernel, meaning it's called independent from
|
| 357 |
+
* the input. Dispatch is disabled for this operator.
|
| 358 |
+
*
|
| 359 |
+
* The lambda must be stateless, i.e. not have a capture. If your kernel
|
| 360 |
+
* needs to store some configuration parameters, write the kernel as a
|
| 361 |
+
* functor instead.
|
| 362 |
+
*
|
| 363 |
+
* Example:
|
| 364 |
+
*
|
| 365 |
+
* > static auto registry = c10::RegisterOperators()
|
| 366 |
+
* > .op(c10::RegisterOperators::options()
|
| 367 |
+
* > .schema("my_op")
|
| 368 |
+
* > .catchAllKernel([] (Tensor a) -> Tensor {...}));
|
| 369 |
+
*/
|
| 370 |
+
template<class Lambda>
|
| 371 |
+
// enable_if: only enable it if Lambda is a functor (note: lambdas are functors)
|
| 372 |
+
std::enable_if_t<
|
| 373 |
+
guts::is_functor<std::decay_t<Lambda>>::value
|
| 374 |
+
&& !std::is_same<typename guts::infer_function_traits_t<std::decay_t<Lambda>>::func_type, KernelFunction::BoxedKernelFunction>::value,
|
| 375 |
+
Options&&> catchAllKernel(Lambda&& lambda) && {
|
| 376 |
+
static_assert(!std::is_base_of<OperatorKernel, std::decay_t<Lambda>>::value, "The kernel(x) API for registering a kernel is only meant to be used with lambdas. Your kernel is a functor. Please use the kernel<Functor>() API instead.");
|
| 377 |
+
|
| 378 |
+
// We don't support stateful lambdas (i.e. lambdas with a capture), because their
|
| 379 |
+
// behavior would be nonobvious.
|
| 380 |
+
// A lambda with a capture would be global and share its capture between all kernel lookups.
|
| 381 |
+
// This would be a likely source for unexpected race conditions, so we forbid it.
|
| 382 |
+
// If a kernel really needs global state, they can just have regular global state
|
| 383 |
+
// in their .cpp file next to the kernel lambda.
|
| 384 |
+
static_assert(guts::is_stateless_lambda<std::decay_t<Lambda>>::value, "The kernel(x) API for registering a kernel only works for stateless lambdas (i.e. lambdas without captures). If you need a cache, please use the functor based API kernel<Functor>() instead.");
|
| 385 |
+
|
| 386 |
+
return std::move(*this).kernel(
|
| 387 |
+
c10::nullopt,
|
| 388 |
+
KernelFunction::makeFromUnboxedLambda(std::forward<Lambda>(lambda)),
|
| 389 |
+
impl::CppSignature::make<Lambda>(),
|
| 390 |
+
// TODO Do schema inference without relying on WrapFunctionIntoRuntimeFunctor
|
| 391 |
+
detail::inferFunctionSchemaFromFunctor<impl::WrapFunctionIntoRuntimeFunctor<std::decay_t<Lambda>>>()
|
| 392 |
+
);
|
| 393 |
+
}
|
| 394 |
+
|
| 395 |
+
Options&& aliasAnalysis(AliasAnalysisKind aliasAnalysisKind) && {
|
| 396 |
+
TORCH_CHECK(!aliasAnalysisKind_.has_value(), "You can only call aliasAnalysis() once per operator registration.");
|
| 397 |
+
aliasAnalysisKind_ = aliasAnalysisKind;
|
| 398 |
+
return std::move(*this);
|
| 399 |
+
}
|
| 400 |
+
|
| 401 |
+
private:
|
| 402 |
+
Options&& kernel(c10::optional<DispatchKey> dispatch_key, KernelFunction&& func, c10::optional<impl::CppSignature> cpp_signature, std::unique_ptr<FunctionSchema>&& inferred_function_schema) && {
|
| 403 |
+
KernelRegistrationConfig config;
|
| 404 |
+
config.dispatch_key = dispatch_key;
|
| 405 |
+
config.func = std::move(func);
|
| 406 |
+
config.cpp_signature = cpp_signature;
|
| 407 |
+
config.inferred_function_schema = std::move(inferred_function_schema);
|
| 408 |
+
kernels.push_back(std::move(config));
|
| 409 |
+
return std::move(*this);
|
| 410 |
+
}
|
| 411 |
+
|
| 412 |
+
Options()
|
| 413 |
+
: schemaOrName_(c10::nullopt)
|
| 414 |
+
, kernels()
|
| 415 |
+
, aliasAnalysisKind_(c10::nullopt)
|
| 416 |
+
{}
|
| 417 |
+
|
| 418 |
+
// KernelRegistrationConfig accumulates all information from the config
|
| 419 |
+
// parameters passed to a RegisterOperators::op() call into one object.
|
| 420 |
+
struct KernelRegistrationConfig final {
|
| 421 |
+
KernelRegistrationConfig()
|
| 422 |
+
: dispatch_key(c10::nullopt)
|
| 423 |
+
, func()
|
| 424 |
+
, cpp_signature(c10::nullopt)
|
| 425 |
+
, inferred_function_schema(nullptr)
|
| 426 |
+
{}
|
| 427 |
+
|
| 428 |
+
c10::optional<DispatchKey> dispatch_key;
|
| 429 |
+
KernelFunction func;
|
| 430 |
+
c10::optional<impl::CppSignature> cpp_signature;
|
| 431 |
+
std::unique_ptr<FunctionSchema> inferred_function_schema;
|
| 432 |
+
};
|
| 433 |
+
|
| 434 |
+
c10::optional<std::variant<OperatorName, FunctionSchema>> schemaOrName_;
|
| 435 |
+
|
| 436 |
+
std::vector<KernelRegistrationConfig> kernels;
|
| 437 |
+
optional<AliasAnalysisKind> aliasAnalysisKind_;
|
| 438 |
+
friend class RegisterOperators;
|
| 439 |
+
friend class Library;
|
| 440 |
+
};
|
| 441 |
+
|
| 442 |
+
/**
|
| 443 |
+
* Call this to get an instance of registration options, which
|
| 444 |
+
* can be passed to a call to RegisterOperators::op() to specify
|
| 445 |
+
* these options for the operator registration.
|
| 446 |
+
* See class doc comment for examples.
|
| 447 |
+
*/
|
| 448 |
+
static Options options() {
|
| 449 |
+
return {};
|
| 450 |
+
}
|
| 451 |
+
|
| 452 |
+
/**
|
| 453 |
+
* Call this to register an operator. See class doc comment for examples.
|
| 454 |
+
*/
|
| 455 |
+
RegisterOperators&& op(Options&& options) && {
|
| 456 |
+
checkSchemaAndRegisterOp_(std::move(options));
|
| 457 |
+
return std::move(*this);
|
| 458 |
+
}
|
| 459 |
+
|
| 460 |
+
// Regular mutator version of the && version above
|
| 461 |
+
RegisterOperators& op(Options&& options) & {
|
| 462 |
+
checkSchemaAndRegisterOp_(std::move(options));
|
| 463 |
+
return *this;
|
| 464 |
+
}
|
| 465 |
+
|
| 466 |
+
/**
|
| 467 |
+
* This is a shorthand for RegisterOperators::op(Options) where you can
|
| 468 |
+
* specify the operator schema outside of the options parameter.
|
| 469 |
+
* See class doc comment for examples.
|
| 470 |
+
*/
|
| 471 |
+
RegisterOperators&& op(const std::string& schemaOrName, Options&& options = RegisterOperators::options()) && {
|
| 472 |
+
return std::move(*this).op(std::move(options).schema(schemaOrName));
|
| 473 |
+
}
|
| 474 |
+
|
| 475 |
+
// internal only for registering caffe2 ops
|
| 476 |
+
RegisterOperators&& op(FunctionSchema schema, Options&& options) && {
|
| 477 |
+
return std::move(*this).op(std::move(options).schema(std::move(schema)));
|
| 478 |
+
}
|
| 479 |
+
|
| 480 |
+
template<class FuncType>
|
| 481 |
+
explicit RegisterOperators(const std::string& schemaOrName, FuncType&& func, Options&& options = RegisterOperators::options())
|
| 482 |
+
: RegisterOperators() {
|
| 483 |
+
std::move(*this).op(schemaOrName, std::forward<FuncType>(func), std::move(options));
|
| 484 |
+
}
|
| 485 |
+
|
| 486 |
+
/**
|
| 487 |
+
* This API registers an operator based on a kernel function pointer.
|
| 488 |
+
*
|
| 489 |
+
* Given a kernel
|
| 490 |
+
*
|
| 491 |
+
* > namespace { Tensor my_kernel_cpu(Tensor a, Tensor b) {...} }
|
| 492 |
+
*
|
| 493 |
+
* This API looks like:
|
| 494 |
+
*
|
| 495 |
+
* > static auto registry = c10::RegisterOperators()
|
| 496 |
+
* > .op("my_op", &my_kernel_cpu);
|
| 497 |
+
*
|
| 498 |
+
* If your kernel is small and the overhead of calling it matters,
|
| 499 |
+
* then this API might be the wrong choice since the following API
|
| 500 |
+
* has a slightly lower overhead for calling into the kernel:
|
| 501 |
+
*
|
| 502 |
+
* > static auto registry = c10::RegisterOperators()
|
| 503 |
+
* > .op("my_op", c10::RegisterOperators::options()
|
| 504 |
+
* > .kernel<decltype(my_kernel_cpu), &my_kernel_cpu>());
|
| 505 |
+
*
|
| 506 |
+
* Or, alternatively, write your kernel as a functor:
|
| 507 |
+
*
|
| 508 |
+
* > namespace {
|
| 509 |
+
* > class my_kernel_cpu final : public c10::OperatorKernel {
|
| 510 |
+
* > public:
|
| 511 |
+
* > Tensor operator()(Tensor a, Tensor b) {...}
|
| 512 |
+
* > };
|
| 513 |
+
* > }
|
| 514 |
+
* >
|
| 515 |
+
* > static auto registry = c10::RegisterOperators()
|
| 516 |
+
* > .op("my_op", c10::RegisterOperators::options()
|
| 517 |
+
* > .kernel<my_kernel_cpu>());
|
| 518 |
+
*/
|
| 519 |
+
template<class FuncType>
|
| 520 |
+
// enable_if: only enable it if FuncType is actually a function, but not a stack based BoxedKernelFunction.
|
| 521 |
+
std::enable_if_t<guts::is_function_type<FuncType>::value && !std::is_same<FuncType, KernelFunction::BoxedKernelFunction>::value, RegisterOperators&&>
|
| 522 |
+
op(const std::string& schemaOrName, FuncType* func, Options&& options = RegisterOperators::options()) && {
|
| 523 |
+
constexpr bool AllowLegacyTypes = true;
|
| 524 |
+
return std::move(*this).op(std::move(options).schema(schemaOrName).kernel(
|
| 525 |
+
c10::nullopt,
|
| 526 |
+
KernelFunction::makeFromUnboxedRuntimeFunction<AllowLegacyTypes>(func),
|
| 527 |
+
impl::CppSignature::make<FuncType>(),
|
| 528 |
+
// TODO Do schema inference without relying on WrapFunctionIntoRuntimeFunctor
|
| 529 |
+
detail::inferFunctionSchemaFromFunctor<impl::WrapFunctionIntoRuntimeFunctor<std::decay_t<FuncType>>>()
|
| 530 |
+
));
|
| 531 |
+
}
|
| 532 |
+
|
| 533 |
+
/**
|
| 534 |
+
* This API registers an operator based on a kernel lambda.
|
| 535 |
+
*
|
| 536 |
+
* This API looks like:
|
| 537 |
+
*
|
| 538 |
+
* > static auto registry = c10::RegisterOperators()
|
| 539 |
+
* > .op("my_op", [] (Tensor a, Tensor b) {...});
|
| 540 |
+
*
|
| 541 |
+
* This is equivalent to:
|
| 542 |
+
*
|
| 543 |
+
* > static auto registry = c10::RegisterOperators()
|
| 544 |
+
* > .op("my_op", c10::RegisterOperators::options()
|
| 545 |
+
* > .catchAllKernel([] (Tensor a, Tensor b) {...}));
|
| 546 |
+
*
|
| 547 |
+
*/
|
| 548 |
+
template<class Lambda>
|
| 549 |
+
// enable_if: only enable it if Lambda is actually a stateless lambda
|
| 550 |
+
std::enable_if_t<guts::is_functor<Lambda>::value && guts::is_stateless_lambda<std::decay_t<Lambda>>::value, RegisterOperators&&>
|
| 551 |
+
op(const std::string& schemaOrName, Lambda&& lambda, Options&& options = RegisterOperators::options()) && {
|
| 552 |
+
static_assert(!std::is_base_of<OperatorKernel, Lambda>::value, "c10::OperatorKernel is part of the new kernel registration API and shouldn't be used together with the deprecated registration API. Please use the new RegisterOperators::options().kernel() based API instead.");
|
| 553 |
+
|
| 554 |
+
constexpr bool AllowLegacyTypes = true;
|
| 555 |
+
return std::move(*this).op(std::move(options).schema(schemaOrName).kernel(
|
| 556 |
+
c10::nullopt,
|
| 557 |
+
KernelFunction::makeFromUnboxedLambda<AllowLegacyTypes>(std::forward<Lambda>(lambda)),
|
| 558 |
+
impl::CppSignature::make<Lambda>(),
|
| 559 |
+
// TODO Do schema inference without relying on WrapFunctionIntoRuntimeFunctor
|
| 560 |
+
detail::inferFunctionSchemaFromFunctor<impl::WrapFunctionIntoRuntimeFunctor<std::decay_t<Lambda>>>()
|
| 561 |
+
));
|
| 562 |
+
}
|
| 563 |
+
|
| 564 |
+
template<class Lambda>
|
| 565 |
+
C10_DEPRECATED_MESSAGE("Registering operator kernels with stateful lambdas (i.e. lambdas with a capture) has non-obvious behavior. This is deprecated. Please use a lambda without a capture or a functor class instead.")
|
| 566 |
+
// enable_if: only enable it if Lambda is actually a functor but not a stateless lambda
|
| 567 |
+
std::enable_if_t<guts::is_functor<Lambda>::value && !guts::is_stateless_lambda<std::decay_t<Lambda>>::value, RegisterOperators&&>
|
| 568 |
+
op(const std::string& schemaOrName, Lambda&& lambda, Options&& options = RegisterOperators::options()) && {
|
| 569 |
+
static_assert(!std::is_base_of<OperatorKernel, Lambda>::value, "c10::OperatorKernel is part of the new kernel registration API and shouldn't be used together with the deprecated registration API. Please use the new RegisterOperators::options().kernel() based API instead.");
|
| 570 |
+
|
| 571 |
+
constexpr bool AllowLegacyTypes = true;
|
| 572 |
+
return std::move(*this).op(std::move(options).schema(schemaOrName).kernel(
|
| 573 |
+
c10::nullopt,
|
| 574 |
+
KernelFunction::makeFromUnboxedLambda<AllowLegacyTypes>(std::forward<Lambda>(lambda)),
|
| 575 |
+
impl::CppSignature::make<Lambda>(),
|
| 576 |
+
// TODO Do schema inference without relying on WrapFunctionIntoRuntimeFunctor
|
| 577 |
+
detail::inferFunctionSchemaFromFunctor<impl::WrapFunctionIntoRuntimeFunctor<std::decay_t<Lambda>>>()
|
| 578 |
+
));
|
| 579 |
+
}
|
| 580 |
+
|
| 581 |
+
private:
|
| 582 |
+
void checkSchemaAndRegisterOp_(Options&& config);
|
| 583 |
+
|
| 584 |
+
static c10::FunctionSchema inferSchemaFromKernels_(const OperatorName& opNameStr, const Options& options);
|
| 585 |
+
void checkNoDuplicateKernels_(const Options& options);
|
| 586 |
+
void registerOp_(Options&& options);
|
| 587 |
+
|
| 588 |
+
std::vector<RegistrationHandleRAII> registrars_;
|
| 589 |
+
};
|
| 590 |
+
|
| 591 |
+
} // namespace c10
|
| 592 |
+
|
| 593 |
+
namespace torch {
|
| 594 |
+
// Old-style API
|
| 595 |
+
using RegisterOperators = c10::RegisterOperators;
|
| 596 |
+
}
|
moondream/lib/python3.10/site-packages/torch/include/ATen/cpu/FlushDenormal.h
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/// Flush-To-Zero and Denormals-Are-Zero mode
|
| 2 |
+
///
|
| 3 |
+
/// Flush-To-Zero (FTZ) and Denormals-Are-Zero (DAZ) are modes that bypass
|
| 4 |
+
/// IEEE 754 methods of dealing with denormal floating-point numbers on x86-64
|
| 5 |
+
/// and some x86 CPUs. They result in reduced precision for values near zero,
|
| 6 |
+
/// but increased performance.
|
| 7 |
+
///
|
| 8 |
+
/// See https://software.intel.com/en-us/articles/x87-and-sse-floating-point-assists-in-ia-32-flush-to-zero-ftz-and-denormals-are-zero-daz
|
| 9 |
+
|
| 10 |
+
namespace at::cpu {
|
| 11 |
+
|
| 12 |
+
bool set_flush_denormal(bool on);
|
| 13 |
+
|
| 14 |
+
} // namespace at::cpu
|
moondream/lib/python3.10/site-packages/torch/include/ATen/cpu/Utils.h
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <c10/macros/Export.h>
|
| 4 |
+
|
| 5 |
+
namespace at::cpu {
|
| 6 |
+
|
| 7 |
+
// Detect if CPU support Vector Neural Network Instruction.
|
| 8 |
+
TORCH_API bool is_cpu_support_vnni();
|
| 9 |
+
|
| 10 |
+
} // namespace at::cpu
|
moondream/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/functional.h
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/cpu/vec/functional_base.h>
|
| 4 |
+
#include <ATen/cpu/vec/functional_bfloat16.h>
|
moondream/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/functional_base.h
ADDED
|
@@ -0,0 +1,329 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// DO NOT DEFINE STATIC DATA IN THIS HEADER!
|
| 4 |
+
// See Note [Do not compile initializers with AVX]
|
| 5 |
+
|
| 6 |
+
#include <ATen/cpu/vec/vec.h>
|
| 7 |
+
#include <c10/util/irange.h>
|
| 8 |
+
|
| 9 |
+
namespace at::vec {
|
| 10 |
+
|
| 11 |
+
// slow path
|
| 12 |
+
template <typename scalar_t, typename Op>
|
| 13 |
+
inline scalar_t vec_reduce_all(
|
| 14 |
+
const Op& vec_fun,
|
| 15 |
+
vec::Vectorized<scalar_t> acc_vec,
|
| 16 |
+
int64_t size) {
|
| 17 |
+
using Vec = vec::Vectorized<scalar_t>;
|
| 18 |
+
scalar_t acc_arr[Vec::size()];
|
| 19 |
+
acc_vec.store(acc_arr);
|
| 20 |
+
for (const auto i : c10::irange(1, size)) {
|
| 21 |
+
std::array<scalar_t, Vec::size()> acc_arr_next = {0};
|
| 22 |
+
acc_arr_next[0] = acc_arr[i];
|
| 23 |
+
Vec acc_vec_next = Vec::loadu(acc_arr_next.data());
|
| 24 |
+
acc_vec = vec_fun(acc_vec, acc_vec_next);
|
| 25 |
+
}
|
| 26 |
+
acc_vec.store(acc_arr);
|
| 27 |
+
return acc_arr[0];
|
| 28 |
+
}
|
| 29 |
+
|
| 30 |
+
template <typename scalar_t, typename Op>
|
| 31 |
+
struct VecReduceAllSIMD {
|
| 32 |
+
static inline scalar_t apply(const Op& vec_fun, const Vectorized<scalar_t>& acc_vec) {
|
| 33 |
+
return vec_reduce_all(vec_fun, acc_vec, Vectorized<scalar_t>::size());
|
| 34 |
+
}
|
| 35 |
+
};
|
| 36 |
+
|
| 37 |
+
#if defined(__GNUC__) && (__GNUC__ > 5) && !defined(_MSC_VER) && !defined(C10_MOBILE)
|
| 38 |
+
#if defined(CPU_CAPABILITY_AVX2)
|
| 39 |
+
template <typename Op>
|
| 40 |
+
struct VecReduceAllSIMD<float, Op> {
|
| 41 |
+
static inline float apply(const Op& vec_fun, const Vectorized<float>& acc_vec) {
|
| 42 |
+
using Vec = Vectorized<float>;
|
| 43 |
+
Vec v = acc_vec;
|
| 44 |
+
// 128-bit shuffle
|
| 45 |
+
Vec v1 = _mm256_permute2f128_ps(v, v, 0x1);
|
| 46 |
+
v = vec_fun(v, v1);
|
| 47 |
+
// 64-bit shuffle
|
| 48 |
+
v1 = _mm256_shuffle_ps(v, v, 0x4E);
|
| 49 |
+
v = vec_fun(v, v1);
|
| 50 |
+
// 32-bit shuffle
|
| 51 |
+
v1 = _mm256_shuffle_ps(v, v, 0xB1);
|
| 52 |
+
v = vec_fun(v, v1);
|
| 53 |
+
return _mm256_cvtss_f32(v);
|
| 54 |
+
}
|
| 55 |
+
};
|
| 56 |
+
#endif // defined(CPU_CAPABILITY_AVX2)
|
| 57 |
+
#if defined(CPU_CAPABILITY_AVX512)
|
| 58 |
+
template <typename Op>
|
| 59 |
+
struct VecReduceAllSIMD<float, Op> {
|
| 60 |
+
static inline float apply(const Op& vec_fun, const Vectorized<float>& acc_vec) {
|
| 61 |
+
using Vec = Vectorized<float>;
|
| 62 |
+
Vec v = acc_vec;
|
| 63 |
+
// 256-bit shuffle
|
| 64 |
+
Vec v1 = _mm512_shuffle_f32x4(v, v, 0x4E);
|
| 65 |
+
v = vec_fun(v, v1);
|
| 66 |
+
// 128-bit shuffle
|
| 67 |
+
v1 = _mm512_shuffle_f32x4(v, v, 0xB1);
|
| 68 |
+
v = vec_fun(v, v1);
|
| 69 |
+
// 64-bit shuffle
|
| 70 |
+
v1 = _mm512_shuffle_ps(v, v, 0x4E);
|
| 71 |
+
v = vec_fun(v, v1);
|
| 72 |
+
// 32-bit shuffle
|
| 73 |
+
v1 = _mm512_shuffle_ps(v, v, 0xB1);
|
| 74 |
+
v = vec_fun(v, v1);
|
| 75 |
+
return _mm512_cvtss_f32(v);
|
| 76 |
+
}
|
| 77 |
+
};
|
| 78 |
+
#endif // defined(CPU_CAPABILITY_AVX512)
|
| 79 |
+
#endif // defined(__GNUC__) && (__GNUC__ > 5) && !defined(_MSC_VER) && !defined(C10_MOBILE)
|
| 80 |
+
|
| 81 |
+
template <typename scalar_t, typename Op>
|
| 82 |
+
inline scalar_t vec_reduce_all(const Op& vec_fun, const Vectorized<scalar_t>& acc_vec) {
|
| 83 |
+
return VecReduceAllSIMD<scalar_t, Op>::apply(vec_fun, acc_vec);
|
| 84 |
+
}
|
| 85 |
+
|
| 86 |
+
template <typename scalar_t, typename Op,
|
| 87 |
+
typename std::enable_if_t<!is_reduced_floating_point_v<scalar_t>, int> = 0>
|
| 88 |
+
inline scalar_t reduce_all(const Op& vec_fun, const scalar_t* data, int64_t size) {
|
| 89 |
+
using Vec = vec::Vectorized<scalar_t>;
|
| 90 |
+
if (size < Vec::size())
|
| 91 |
+
return vec_reduce_all(vec_fun, Vec::loadu(data, size), size);
|
| 92 |
+
int64_t d = Vec::size();
|
| 93 |
+
Vec acc_vec = Vec::loadu(data);
|
| 94 |
+
for (; d < size - (size % Vec::size()); d += Vec::size()) {
|
| 95 |
+
Vec data_vec = Vec::loadu(data + d);
|
| 96 |
+
acc_vec = vec_fun(acc_vec, data_vec);
|
| 97 |
+
}
|
| 98 |
+
if (size - d > 0) {
|
| 99 |
+
Vec data_vec = Vec::loadu(data + d, size - d);
|
| 100 |
+
acc_vec = Vec::set(acc_vec, vec_fun(acc_vec, data_vec), size - d);
|
| 101 |
+
}
|
| 102 |
+
return vec_reduce_all(vec_fun, acc_vec);
|
| 103 |
+
}
|
| 104 |
+
|
| 105 |
+
// similar to reduce_all, but reduces into two outputs
|
| 106 |
+
template <typename scalar_t, typename Op1, typename Op2,
|
| 107 |
+
typename std::enable_if_t<!is_reduced_floating_point_v<scalar_t>, int> = 0>
|
| 108 |
+
inline std::pair<scalar_t, scalar_t> reduce2_all(const Op1& vec_fun1, const Op2& vec_fun2,
|
| 109 |
+
const scalar_t* data, int64_t size) {
|
| 110 |
+
using Vec = vec::Vectorized<scalar_t>;
|
| 111 |
+
if (size < Vec::size()) {
|
| 112 |
+
auto loaded_data = Vec::loadu(data, size);
|
| 113 |
+
return std::pair<scalar_t, scalar_t>(
|
| 114 |
+
vec_reduce_all(vec_fun1, loaded_data, size),
|
| 115 |
+
vec_reduce_all(vec_fun2, loaded_data, size));
|
| 116 |
+
}
|
| 117 |
+
int64_t d = Vec::size();
|
| 118 |
+
Vec acc_vec1 = Vec::loadu(data);
|
| 119 |
+
Vec acc_vec2 = Vec::loadu(data);
|
| 120 |
+
for (; d < size - (size % Vec::size()); d += Vec::size()) {
|
| 121 |
+
Vec data_vec = Vec::loadu(data + d);
|
| 122 |
+
acc_vec1 = vec_fun1(acc_vec1, data_vec);
|
| 123 |
+
acc_vec2 = vec_fun2(acc_vec2, data_vec);
|
| 124 |
+
}
|
| 125 |
+
if (size - d > 0) {
|
| 126 |
+
Vec data_vec = Vec::loadu(data + d, size - d);
|
| 127 |
+
acc_vec1 = Vec::set(acc_vec1, vec_fun1(acc_vec1, data_vec), size - d);
|
| 128 |
+
acc_vec2 = Vec::set(acc_vec2, vec_fun2(acc_vec2, data_vec), size - d);
|
| 129 |
+
}
|
| 130 |
+
return std::pair<scalar_t, scalar_t>(
|
| 131 |
+
vec_reduce_all(vec_fun1, acc_vec1),
|
| 132 |
+
vec_reduce_all(vec_fun2, acc_vec2));
|
| 133 |
+
}
|
| 134 |
+
|
| 135 |
+
template <typename scalar_t, typename MapOp, typename ReduceOp,
|
| 136 |
+
typename std::enable_if_t<!is_reduced_floating_point_v<scalar_t>, int> = 0>
|
| 137 |
+
inline scalar_t map_reduce_all(
|
| 138 |
+
const MapOp& map_fun,
|
| 139 |
+
const ReduceOp& red_fun,
|
| 140 |
+
const scalar_t* data,
|
| 141 |
+
int64_t size) {
|
| 142 |
+
using Vec = vec::Vectorized<scalar_t>;
|
| 143 |
+
if (size < Vec::size())
|
| 144 |
+
return vec_reduce_all(red_fun, map_fun(Vec::loadu(data, size)), size);
|
| 145 |
+
int64_t d = Vec::size();
|
| 146 |
+
Vec acc_vec = map_fun(Vec::loadu(data));
|
| 147 |
+
for (; d < size - (size % Vec::size()); d += Vec::size()) {
|
| 148 |
+
Vec data_vec = Vec::loadu(data + d);
|
| 149 |
+
data_vec = map_fun(data_vec);
|
| 150 |
+
acc_vec = red_fun(acc_vec, data_vec);
|
| 151 |
+
}
|
| 152 |
+
if (size - d > 0) {
|
| 153 |
+
Vec data_vec = Vec::loadu(data + d, size - d);
|
| 154 |
+
data_vec = map_fun(data_vec);
|
| 155 |
+
acc_vec = Vec::set(acc_vec, red_fun(acc_vec, data_vec), size - d);
|
| 156 |
+
}
|
| 157 |
+
return vec_reduce_all(red_fun, acc_vec);
|
| 158 |
+
}
|
| 159 |
+
|
| 160 |
+
template <typename scalar_t, typename MapOp, typename ReduceOp,
|
| 161 |
+
typename std::enable_if_t<!is_reduced_floating_point_v<scalar_t>, int> = 0>
|
| 162 |
+
inline scalar_t map2_reduce_all(
|
| 163 |
+
const MapOp& map_fun,
|
| 164 |
+
const ReduceOp& red_fun,
|
| 165 |
+
const scalar_t* data,
|
| 166 |
+
const scalar_t* data2,
|
| 167 |
+
int64_t size) {
|
| 168 |
+
using Vec = vec::Vectorized<scalar_t>;
|
| 169 |
+
if (size < Vec::size()) {
|
| 170 |
+
Vec data_vec = Vec::loadu(data, size);
|
| 171 |
+
Vec data2_vec = Vec::loadu(data2, size);
|
| 172 |
+
data_vec = map_fun(data_vec, data2_vec);
|
| 173 |
+
return vec_reduce_all(red_fun, data_vec, size);
|
| 174 |
+
}
|
| 175 |
+
int64_t d = Vec::size();
|
| 176 |
+
Vec acc_vec = map_fun(Vec::loadu(data), Vec::loadu(data2));
|
| 177 |
+
for (; d < size - (size % Vec::size()); d += Vec::size()) {
|
| 178 |
+
Vec data_vec = Vec::loadu(data + d);
|
| 179 |
+
Vec data2_vec = Vec::loadu(data2 + d);
|
| 180 |
+
data_vec = map_fun(data_vec, data2_vec);
|
| 181 |
+
acc_vec = red_fun(acc_vec, data_vec);
|
| 182 |
+
}
|
| 183 |
+
if (size - d > 0) {
|
| 184 |
+
Vec data_vec = Vec::loadu(data + d, size - d);
|
| 185 |
+
Vec data2_vec = Vec::loadu(data2 + d, size - d);
|
| 186 |
+
data_vec = map_fun(data_vec, data2_vec);
|
| 187 |
+
acc_vec = Vec::set(acc_vec, red_fun(acc_vec, data_vec), size - d);
|
| 188 |
+
}
|
| 189 |
+
return vec_reduce_all(red_fun, acc_vec);
|
| 190 |
+
}
|
| 191 |
+
|
| 192 |
+
template <typename scalar_t, typename MapOp, typename ReduceOp,
|
| 193 |
+
typename std::enable_if_t<!is_reduced_floating_point_v<scalar_t>, int> = 0>
|
| 194 |
+
inline scalar_t map3_reduce_all(
|
| 195 |
+
const MapOp& map_fun,
|
| 196 |
+
const ReduceOp& red_fun,
|
| 197 |
+
const scalar_t* data,
|
| 198 |
+
const scalar_t* data2,
|
| 199 |
+
const scalar_t* data3,
|
| 200 |
+
int64_t size) {
|
| 201 |
+
using Vec = vec::Vectorized<scalar_t>;
|
| 202 |
+
if (size < Vec::size()) {
|
| 203 |
+
Vec data_vec = Vec::loadu(data, size);
|
| 204 |
+
Vec data2_vec = Vec::loadu(data2, size);
|
| 205 |
+
Vec data3_vec = Vec::loadu(data3, size);
|
| 206 |
+
data_vec = map_fun(data_vec, data2_vec, data3_vec);
|
| 207 |
+
return vec_reduce_all(red_fun, data_vec, size);
|
| 208 |
+
}
|
| 209 |
+
|
| 210 |
+
int64_t d = Vec::size();
|
| 211 |
+
Vec acc_vec = map_fun(Vec::loadu(data), Vec::loadu(data2), Vec::loadu(data3));
|
| 212 |
+
for (; d < size - (size % Vec::size()); d += Vec::size()) {
|
| 213 |
+
Vec data_vec = Vec::loadu(data + d);
|
| 214 |
+
Vec data2_vec = Vec::loadu(data2 + d);
|
| 215 |
+
Vec data3_vec = Vec::loadu(data3 + d);
|
| 216 |
+
data_vec = map_fun(data_vec, data2_vec, data3_vec);
|
| 217 |
+
acc_vec = red_fun(acc_vec, data_vec);
|
| 218 |
+
}
|
| 219 |
+
if (size - d > 0) {
|
| 220 |
+
Vec data_vec = Vec::loadu(data + d, size - d);
|
| 221 |
+
Vec data2_vec = Vec::loadu(data2 + d, size - d);
|
| 222 |
+
Vec data3_vec = Vec::loadu(data3 + d, size - d);
|
| 223 |
+
data_vec = map_fun(data_vec, data2_vec, data3_vec);
|
| 224 |
+
acc_vec = Vec::set(acc_vec, red_fun(acc_vec, data_vec), size - d);
|
| 225 |
+
}
|
| 226 |
+
return vec_reduce_all(red_fun, acc_vec);
|
| 227 |
+
}
|
| 228 |
+
|
| 229 |
+
template <typename scalar_t, typename Op,
|
| 230 |
+
typename std::enable_if_t<!is_reduced_floating_point_v<scalar_t>, int> = 0>
|
| 231 |
+
inline void map(
|
| 232 |
+
const Op& vec_fun,
|
| 233 |
+
scalar_t* output_data,
|
| 234 |
+
const scalar_t* input_data,
|
| 235 |
+
int64_t size) {
|
| 236 |
+
using Vec = vec::Vectorized<scalar_t>;
|
| 237 |
+
int64_t d = 0;
|
| 238 |
+
for (; d < size - (size % Vec::size()); d += Vec::size()) {
|
| 239 |
+
Vec output_vec = vec_fun(Vec::loadu(input_data + d));
|
| 240 |
+
output_vec.store(output_data + d);
|
| 241 |
+
}
|
| 242 |
+
if (size - d > 0) {
|
| 243 |
+
Vec output_vec = vec_fun(Vec::loadu(input_data + d, size - d));
|
| 244 |
+
output_vec.store(output_data + d, size - d);
|
| 245 |
+
}
|
| 246 |
+
}
|
| 247 |
+
|
| 248 |
+
template <typename scalar_t, typename Op,
|
| 249 |
+
typename std::enable_if_t<!is_reduced_floating_point_v<scalar_t>, int> = 0>
|
| 250 |
+
inline void map2(
|
| 251 |
+
const Op& vec_fun,
|
| 252 |
+
scalar_t* output_data,
|
| 253 |
+
const scalar_t* input_data,
|
| 254 |
+
const scalar_t* input_data2,
|
| 255 |
+
int64_t size) {
|
| 256 |
+
using Vec = vec::Vectorized<scalar_t>;
|
| 257 |
+
int64_t d = 0;
|
| 258 |
+
for (; d < size - (size % Vec::size()); d += Vec::size()) {
|
| 259 |
+
Vec data_vec = Vec::loadu(input_data + d);
|
| 260 |
+
Vec data_vec2 = Vec::loadu(input_data2 + d);
|
| 261 |
+
Vec output_vec = vec_fun(data_vec, data_vec2);
|
| 262 |
+
output_vec.store(output_data + d);
|
| 263 |
+
}
|
| 264 |
+
if (size - d > 0) {
|
| 265 |
+
Vec data_vec = Vec::loadu(input_data + d, size - d);
|
| 266 |
+
Vec data_vec2 = Vec::loadu(input_data2 + d, size - d);
|
| 267 |
+
Vec output_vec = vec_fun(data_vec, data_vec2);
|
| 268 |
+
output_vec.store(output_data + d, size - d);
|
| 269 |
+
}
|
| 270 |
+
}
|
| 271 |
+
|
| 272 |
+
template <typename scalar_t, typename Op,
|
| 273 |
+
typename std::enable_if_t<!is_reduced_floating_point_v<scalar_t>, int> = 0>
|
| 274 |
+
inline void map3(
|
| 275 |
+
const Op& vec_fun,
|
| 276 |
+
scalar_t* output_data,
|
| 277 |
+
const scalar_t* input_data1,
|
| 278 |
+
const scalar_t* input_data2,
|
| 279 |
+
const scalar_t* input_data3,
|
| 280 |
+
int64_t size) {
|
| 281 |
+
using Vec = vec::Vectorized<scalar_t>;
|
| 282 |
+
int64_t d = 0;
|
| 283 |
+
for (; d < size - (size % Vec::size()); d += Vec::size()) {
|
| 284 |
+
Vec data_vec1 = Vec::loadu(input_data1 + d);
|
| 285 |
+
Vec data_vec2 = Vec::loadu(input_data2 + d);
|
| 286 |
+
Vec data_vec3 = Vec::loadu(input_data3 + d);
|
| 287 |
+
Vec output_vec = vec_fun(data_vec1, data_vec2, data_vec3);
|
| 288 |
+
output_vec.store(output_data + d);
|
| 289 |
+
}
|
| 290 |
+
if (size - d > 0) {
|
| 291 |
+
Vec data_vec1 = Vec::loadu(input_data1 + d, size - d);
|
| 292 |
+
Vec data_vec2 = Vec::loadu(input_data2 + d, size - d);
|
| 293 |
+
Vec data_vec3 = Vec::loadu(input_data3 + d, size - d);
|
| 294 |
+
Vec output_vec = vec_fun(data_vec1, data_vec2, data_vec3);
|
| 295 |
+
output_vec.store(output_data + d, size - d);
|
| 296 |
+
}
|
| 297 |
+
}
|
| 298 |
+
|
| 299 |
+
template <typename scalar_t, typename Op,
|
| 300 |
+
typename std::enable_if_t<!is_reduced_floating_point_v<scalar_t>, int> = 0>
|
| 301 |
+
inline void map4(
|
| 302 |
+
const Op& vec_fun,
|
| 303 |
+
scalar_t* output_data,
|
| 304 |
+
const scalar_t* input_data1,
|
| 305 |
+
const scalar_t* input_data2,
|
| 306 |
+
const scalar_t* input_data3,
|
| 307 |
+
const scalar_t* input_data4,
|
| 308 |
+
int64_t size) {
|
| 309 |
+
using Vec = vec::Vectorized<scalar_t>;
|
| 310 |
+
int64_t d = 0;
|
| 311 |
+
for (; d < size - (size % Vec::size()); d += Vec::size()) {
|
| 312 |
+
Vec data_vec1 = Vec::loadu(input_data1 + d);
|
| 313 |
+
Vec data_vec2 = Vec::loadu(input_data2 + d);
|
| 314 |
+
Vec data_vec3 = Vec::loadu(input_data3 + d);
|
| 315 |
+
Vec data_vec4 = Vec::loadu(input_data4 + d);
|
| 316 |
+
Vec output_vec = vec_fun(data_vec1, data_vec2, data_vec3, data_vec4);
|
| 317 |
+
output_vec.store(output_data + d);
|
| 318 |
+
}
|
| 319 |
+
if (size - d > 0) {
|
| 320 |
+
Vec data_vec1 = Vec::loadu(input_data1 + d, size - d);
|
| 321 |
+
Vec data_vec2 = Vec::loadu(input_data2 + d, size - d);
|
| 322 |
+
Vec data_vec3 = Vec::loadu(input_data3 + d, size - d);
|
| 323 |
+
Vec data_vec4 = Vec::loadu(input_data4 + d, size - d);
|
| 324 |
+
Vec output_vec = vec_fun(data_vec1, data_vec2, data_vec3, data_vec4);
|
| 325 |
+
output_vec.store(output_data + d, size - d);
|
| 326 |
+
}
|
| 327 |
+
}
|
| 328 |
+
|
| 329 |
+
} // namespace at::vec
|
moondream/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/functional_bfloat16.h
ADDED
|
@@ -0,0 +1,549 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// DO NOT DEFINE STATIC DATA IN THIS HEADER!
|
| 4 |
+
// See Note [Do not compile initializers with AVX]
|
| 5 |
+
|
| 6 |
+
#include <ATen/cpu/vec/vec.h>
|
| 7 |
+
|
| 8 |
+
namespace at::vec {
|
| 9 |
+
|
| 10 |
+
// BFloat16 specification
|
| 11 |
+
template <typename scalar_t> struct VecScalarType { using type = scalar_t; };
|
| 12 |
+
template <> struct VecScalarType<BFloat16> { using type = float; };
|
| 13 |
+
template <> struct VecScalarType<Half> { using type = float; };
|
| 14 |
+
|
| 15 |
+
// This is different from at::acc_type since we only need to specialize BFloat16
|
| 16 |
+
template <typename scalar_t>
|
| 17 |
+
using vec_scalar_t = typename VecScalarType<scalar_t>::type;
|
| 18 |
+
|
| 19 |
+
// Vector conversion between float and bfloat16/half
|
| 20 |
+
template <typename scalar_t,
|
| 21 |
+
typename std::enable_if_t<is_reduced_floating_point_v<scalar_t>, int> = 0>
|
| 22 |
+
inline std::tuple<Vectorized<float>, Vectorized<float>> convert_to_float(const Vectorized<scalar_t>&);
|
| 23 |
+
|
| 24 |
+
template <>
|
| 25 |
+
inline std::tuple<Vectorized<float>, Vectorized<float>> convert_to_float<BFloat16> (const Vectorized<BFloat16>& a) {
|
| 26 |
+
return convert_bfloat16_float(a);
|
| 27 |
+
}
|
| 28 |
+
|
| 29 |
+
template <>
|
| 30 |
+
inline std::tuple<Vectorized<float>, Vectorized<float>> convert_to_float<Half> (const Vectorized<Half>& a) {
|
| 31 |
+
return convert_half_float(a);
|
| 32 |
+
}
|
| 33 |
+
|
| 34 |
+
template <typename scalar_t,
|
| 35 |
+
typename std::enable_if_t<is_reduced_floating_point_v<scalar_t>, int> = 0>
|
| 36 |
+
inline Vectorized<scalar_t> convert_from_float(const Vectorized<float>&, const Vectorized<float>&);
|
| 37 |
+
|
| 38 |
+
template <>
|
| 39 |
+
inline Vectorized<BFloat16> convert_from_float<BFloat16>(const Vectorized<float>& a, const Vectorized<float>& b) {
|
| 40 |
+
return convert_float_bfloat16(a, b);
|
| 41 |
+
}
|
| 42 |
+
|
| 43 |
+
template <>
|
| 44 |
+
inline Vectorized<Half> convert_from_float<Half>(const Vectorized<float>& a, const Vectorized<float>& b) {
|
| 45 |
+
return convert_float_half(a, b);
|
| 46 |
+
}
|
| 47 |
+
|
| 48 |
+
template <typename scalar_t,
|
| 49 |
+
typename std::enable_if_t<is_reduced_floating_point_v<scalar_t>, int> = 0>
|
| 50 |
+
inline void load_to_float(const scalar_t *data, Vectorized<float> &out1, Vectorized<float> &out2);
|
| 51 |
+
|
| 52 |
+
template <>
|
| 53 |
+
inline void load_to_float<BFloat16> (const BFloat16 *data, Vectorized<float> &out1, Vectorized<float> &out2) {
|
| 54 |
+
load_fp32_from_bf16(data, out1, out2);
|
| 55 |
+
}
|
| 56 |
+
|
| 57 |
+
template <>
|
| 58 |
+
inline void load_to_float<Half> (const Half *data, Vectorized<float> &out1, Vectorized<float> &out2) {
|
| 59 |
+
load_fp32_from_fp16(data, out1, out2);
|
| 60 |
+
}
|
| 61 |
+
|
| 62 |
+
template <typename scalar_t,
|
| 63 |
+
typename std::enable_if_t<is_reduced_floating_point_v<scalar_t>, int> = 0>
|
| 64 |
+
inline void load_to_float(const scalar_t *data, Vectorized<float> &out);
|
| 65 |
+
|
| 66 |
+
template <>
|
| 67 |
+
inline void load_to_float<BFloat16> (const BFloat16 *data, Vectorized<float> &out) {
|
| 68 |
+
load_fp32_from_bf16(data, out);
|
| 69 |
+
}
|
| 70 |
+
|
| 71 |
+
template <>
|
| 72 |
+
inline void load_to_float<Half> (const Half *data, Vectorized<float> &out) {
|
| 73 |
+
load_fp32_from_fp16(data, out);
|
| 74 |
+
}
|
| 75 |
+
|
| 76 |
+
// Note that we already have specialized member of Vectorized<scalar_t> for BFloat16
|
| 77 |
+
// so the following functions would run smoothly:
|
| 78 |
+
// using Vec = Vectorized<BFloat16>;
|
| 79 |
+
// Vec one = Vec(BFloat16(1));
|
| 80 |
+
// vec::map([](Vec x) { return one / (one + x.exp()); }, y_ptr, x_ptr, N);
|
| 81 |
+
//
|
| 82 |
+
// Then why we still need to specialize "functional"?
|
| 83 |
+
// If we do specialization at Vectorized<> level, the above example would need 3 pairs of
|
| 84 |
+
// conversion of bf16->fp32/fp32->bf16, each for ".exp()", "+" and "/".
|
| 85 |
+
// If we do specialization at vec::map<>() level, we have only 1 pair of conversion
|
| 86 |
+
// of bf16->fp32/fp32->bf16, for the input and output BFloat16 vector only.
|
| 87 |
+
//
|
| 88 |
+
// The following BFloat16 functionality will only do data type conversion for input
|
| 89 |
+
// and output vector (reduce functionality will only convert the final scalar back to bf16).
|
| 90 |
+
// Compared to Vectorized<> specialization,
|
| 91 |
+
// 1. better performance since we have less data type conversion;
|
| 92 |
+
// 2. less rounding error since immediate results are kept in fp32;
|
| 93 |
+
// 3. accumulation done on data type of fp32.
|
| 94 |
+
//
|
| 95 |
+
// If you plan to extend this file, please ensure adding unit tests at
|
| 96 |
+
// aten/src/ATen/test/vec_test_all_types.cpp
|
| 97 |
+
//
|
| 98 |
+
template <typename scalar_t, typename Op,
|
| 99 |
+
typename std::enable_if_t<is_reduced_floating_point_v<scalar_t>, int> = 0>
|
| 100 |
+
inline float reduce_all(const Op& vec_fun, const scalar_t* data, int64_t size) {
|
| 101 |
+
using bVec = vec::Vectorized<scalar_t>;
|
| 102 |
+
using fVec = vec::Vectorized<float>;
|
| 103 |
+
if (size < bVec::size()) {
|
| 104 |
+
bVec data_bvec = bVec::loadu(data, size);
|
| 105 |
+
auto [data_fvec0, data_fvec1] = convert_to_float<scalar_t>(data_bvec);
|
| 106 |
+
if (size > fVec::size()) {
|
| 107 |
+
data_fvec0 = fVec::set(data_fvec0, vec_fun(data_fvec0, data_fvec1), size - fVec::size());
|
| 108 |
+
return vec_reduce_all<float>(vec_fun, data_fvec0, fVec::size());
|
| 109 |
+
} else {
|
| 110 |
+
return vec_reduce_all<float>(vec_fun, data_fvec0, size);
|
| 111 |
+
}
|
| 112 |
+
}
|
| 113 |
+
int64_t d = bVec::size();
|
| 114 |
+
bVec acc_bvec = bVec::loadu(data);
|
| 115 |
+
auto [acc_fvec0, acc_fvec1] = convert_to_float<scalar_t>(acc_bvec);
|
| 116 |
+
for (; d < size - (size % bVec::size()); d += bVec::size()) {
|
| 117 |
+
bVec data_bvec = bVec::loadu(data + d);
|
| 118 |
+
auto [data_fvec0, data_fvec1] = convert_to_float<scalar_t>(data_bvec);
|
| 119 |
+
acc_fvec0 = vec_fun(acc_fvec0, data_fvec0);
|
| 120 |
+
acc_fvec1 = vec_fun(acc_fvec1, data_fvec1);
|
| 121 |
+
}
|
| 122 |
+
if (size - d > 0) {
|
| 123 |
+
bVec data_bvec = bVec::loadu(data + d, size - d);
|
| 124 |
+
auto [data_fvec0, data_fvec1] = convert_to_float<scalar_t>(data_bvec);
|
| 125 |
+
if (size - d > fVec::size()) {
|
| 126 |
+
acc_fvec0 = vec_fun(acc_fvec0, data_fvec0);
|
| 127 |
+
acc_fvec1 = fVec::set(acc_fvec1, vec_fun(acc_fvec1, data_fvec1), size - d - fVec::size());
|
| 128 |
+
} else {
|
| 129 |
+
acc_fvec0 = fVec::set(acc_fvec0, vec_fun(acc_fvec0, data_fvec0), size - d);
|
| 130 |
+
}
|
| 131 |
+
}
|
| 132 |
+
acc_fvec0 = vec_fun(acc_fvec0, acc_fvec1);
|
| 133 |
+
return vec_reduce_all<float>(vec_fun, acc_fvec0);
|
| 134 |
+
}
|
| 135 |
+
|
| 136 |
+
template <typename scalar_t, typename Op1, typename Op2,
|
| 137 |
+
typename std::enable_if_t<is_reduced_floating_point_v<scalar_t>, int> = 0>
|
| 138 |
+
inline std::pair<float, float> reduce2_all(const Op1& vec_fun1, const Op2& vec_fun2,
|
| 139 |
+
const scalar_t* data, int64_t size) {
|
| 140 |
+
using bVec = vec::Vectorized<scalar_t>;
|
| 141 |
+
using fVec = vec::Vectorized<float>;
|
| 142 |
+
if (size < bVec::size()) {
|
| 143 |
+
bVec data_bvec = bVec::loadu(data, size);
|
| 144 |
+
auto [data_fvec0, data_fvec1] = convert_to_float<scalar_t>(data_bvec);
|
| 145 |
+
if (size > fVec::size()) {
|
| 146 |
+
fVec acc1_fvec = fVec::set(data_fvec0, vec_fun1(data_fvec0, data_fvec1), size - fVec::size());
|
| 147 |
+
fVec acc2_fvec = fVec::set(data_fvec0, vec_fun2(data_fvec0, data_fvec1), size - fVec::size());
|
| 148 |
+
return std::pair<scalar_t, scalar_t>(
|
| 149 |
+
vec_reduce_all<float>(vec_fun1, acc1_fvec, fVec::size()),
|
| 150 |
+
vec_reduce_all<float>(vec_fun2, acc2_fvec, fVec::size()));
|
| 151 |
+
} else {
|
| 152 |
+
return std::pair<scalar_t, scalar_t>(
|
| 153 |
+
vec_reduce_all<float>(vec_fun1, data_fvec0, size),
|
| 154 |
+
vec_reduce_all<float>(vec_fun2, data_fvec0, size));
|
| 155 |
+
}
|
| 156 |
+
}
|
| 157 |
+
int64_t d = bVec::size();
|
| 158 |
+
bVec acc_bvec = bVec::loadu(data);
|
| 159 |
+
auto [acc1_fvec0, acc1_fvec1] = convert_to_float<scalar_t>(acc_bvec);
|
| 160 |
+
auto [acc2_fvec0, acc2_fvec1] = convert_to_float<scalar_t>(acc_bvec);
|
| 161 |
+
for (; d < size - (size % bVec::size()); d += bVec::size()) {
|
| 162 |
+
bVec data_bvec = bVec::loadu(data + d);
|
| 163 |
+
auto [data_fvec0, data_fvec1] = convert_to_float<scalar_t>(data_bvec);
|
| 164 |
+
acc1_fvec0 = vec_fun1(acc1_fvec0, data_fvec0);
|
| 165 |
+
acc1_fvec1 = vec_fun1(acc1_fvec1, data_fvec1);
|
| 166 |
+
acc2_fvec0 = vec_fun2(acc2_fvec0, data_fvec0);
|
| 167 |
+
acc2_fvec1 = vec_fun2(acc2_fvec1, data_fvec1);
|
| 168 |
+
}
|
| 169 |
+
if (size - d > 0) {
|
| 170 |
+
bVec data_bvec = bVec::loadu(data + d, size - d);
|
| 171 |
+
auto [data_fvec0, data_fvec1] = convert_to_float<scalar_t>(data_bvec);
|
| 172 |
+
if (size - d > fVec::size()) {
|
| 173 |
+
acc1_fvec0 = vec_fun1(acc1_fvec0, data_fvec0);
|
| 174 |
+
acc1_fvec1 = fVec::set(acc1_fvec1, vec_fun1(acc1_fvec1, data_fvec1), size - d - fVec::size());
|
| 175 |
+
acc2_fvec0 = vec_fun2(acc2_fvec0, data_fvec0);
|
| 176 |
+
acc2_fvec1 = fVec::set(acc2_fvec1, vec_fun2(acc2_fvec1, data_fvec1), size - d - fVec::size());
|
| 177 |
+
} else {
|
| 178 |
+
acc1_fvec0 = fVec::set(acc1_fvec0, vec_fun1(acc1_fvec0, data_fvec0), size - d);
|
| 179 |
+
acc2_fvec0 = fVec::set(acc2_fvec0, vec_fun2(acc2_fvec0, data_fvec0), size - d);
|
| 180 |
+
}
|
| 181 |
+
}
|
| 182 |
+
acc1_fvec0 = vec_fun1(acc1_fvec0, acc1_fvec1);
|
| 183 |
+
acc2_fvec0 = vec_fun2(acc2_fvec0, acc2_fvec1);
|
| 184 |
+
return std::pair<scalar_t, scalar_t>(
|
| 185 |
+
vec_reduce_all<float>(vec_fun1, acc1_fvec0),
|
| 186 |
+
vec_reduce_all<float>(vec_fun2, acc2_fvec0));
|
| 187 |
+
}
|
| 188 |
+
|
| 189 |
+
template <typename scalar_t, typename MapOp, typename ReduceOp,
|
| 190 |
+
typename std::enable_if_t<is_reduced_floating_point_v<scalar_t>, int> = 0>
|
| 191 |
+
inline float map_reduce_all(
|
| 192 |
+
const MapOp& map_fun,
|
| 193 |
+
const ReduceOp& red_fun,
|
| 194 |
+
const scalar_t* data,
|
| 195 |
+
int64_t size) {
|
| 196 |
+
using bVec = vec::Vectorized<scalar_t>;
|
| 197 |
+
using fVec = vec::Vectorized<float>;
|
| 198 |
+
if (size < bVec::size()) {
|
| 199 |
+
bVec data_bvec = bVec::loadu(data, size);
|
| 200 |
+
auto [data_fvec0, data_fvec1] = convert_to_float<scalar_t>(data_bvec);
|
| 201 |
+
if (size > fVec::size()) {
|
| 202 |
+
data_fvec0 = map_fun(data_fvec0);
|
| 203 |
+
data_fvec1 = map_fun(data_fvec1);
|
| 204 |
+
data_fvec0 = fVec::set(data_fvec0, red_fun(data_fvec0, data_fvec1), size - fVec::size());
|
| 205 |
+
return vec_reduce_all<float>(red_fun, data_fvec0, fVec::size());
|
| 206 |
+
} else {
|
| 207 |
+
data_fvec0 = map_fun(data_fvec0);
|
| 208 |
+
return vec_reduce_all<float>(red_fun, data_fvec0, size);
|
| 209 |
+
}
|
| 210 |
+
}
|
| 211 |
+
int64_t d = bVec::size();
|
| 212 |
+
bVec acc_bvec = bVec::loadu(data);
|
| 213 |
+
auto [acc_fvec0, acc_fvec1] = convert_to_float<scalar_t>(acc_bvec);
|
| 214 |
+
acc_fvec0 = map_fun(acc_fvec0);
|
| 215 |
+
acc_fvec1 = map_fun(acc_fvec1);
|
| 216 |
+
for (; d < size - (size % bVec::size()); d += bVec::size()) {
|
| 217 |
+
bVec data_bvec = bVec::loadu(data + d);
|
| 218 |
+
auto [data_fvec0, data_fvec1] = convert_to_float<scalar_t>(data_bvec);
|
| 219 |
+
data_fvec0 = map_fun(data_fvec0);
|
| 220 |
+
data_fvec1 = map_fun(data_fvec1);
|
| 221 |
+
acc_fvec0 = red_fun(acc_fvec0, data_fvec0);
|
| 222 |
+
acc_fvec1 = red_fun(acc_fvec1, data_fvec1);
|
| 223 |
+
}
|
| 224 |
+
if (size - d > 0) {
|
| 225 |
+
bVec data_bvec = bVec::loadu(data + d, size - d);
|
| 226 |
+
auto [data_fvec0, data_fvec1] = convert_to_float<scalar_t>(data_bvec);
|
| 227 |
+
if (size - d > fVec::size()) {
|
| 228 |
+
data_fvec0 = map_fun(data_fvec0);
|
| 229 |
+
data_fvec1 = map_fun(data_fvec1);
|
| 230 |
+
acc_fvec0 = red_fun(acc_fvec0, data_fvec0);
|
| 231 |
+
acc_fvec1 = fVec::set(acc_fvec1, red_fun(acc_fvec1, data_fvec1), size - d - fVec::size());
|
| 232 |
+
} else {
|
| 233 |
+
data_fvec0 = map_fun(data_fvec0);
|
| 234 |
+
acc_fvec0 = fVec::set(acc_fvec0, red_fun(acc_fvec0, data_fvec0), size - d);
|
| 235 |
+
}
|
| 236 |
+
}
|
| 237 |
+
acc_fvec0 = red_fun(acc_fvec0, acc_fvec1);
|
| 238 |
+
return vec_reduce_all<float>(red_fun, acc_fvec0);
|
| 239 |
+
}
|
| 240 |
+
|
| 241 |
+
template <typename scalar_t, typename MapOp, typename ReduceOp,
|
| 242 |
+
typename std::enable_if_t<is_reduced_floating_point_v<scalar_t>, int> = 0>
|
| 243 |
+
inline float map2_reduce_all(
|
| 244 |
+
const MapOp& map_fun,
|
| 245 |
+
const ReduceOp& red_fun,
|
| 246 |
+
const scalar_t* data,
|
| 247 |
+
const scalar_t* data2,
|
| 248 |
+
int64_t size) {
|
| 249 |
+
using bVec = vec::Vectorized<scalar_t>;
|
| 250 |
+
using fVec = vec::Vectorized<float>;
|
| 251 |
+
if (size < bVec::size()) {
|
| 252 |
+
bVec data_bvec = bVec::loadu(data, size);
|
| 253 |
+
auto [data_fvec0, data_fvec1] = convert_to_float<scalar_t>(data_bvec);
|
| 254 |
+
bVec data2_bvec = bVec::loadu(data2, size);
|
| 255 |
+
auto [data2_fvec0, data2_fvec1] = convert_to_float<scalar_t>(data2_bvec);
|
| 256 |
+
if (size > fVec::size()) {
|
| 257 |
+
data_fvec0 = map_fun(data_fvec0, data2_fvec0);
|
| 258 |
+
data_fvec1 = map_fun(data_fvec1, data2_fvec1);
|
| 259 |
+
data_fvec0 = fVec::set(data_fvec0, red_fun(data_fvec0, data_fvec1), size - fVec::size());
|
| 260 |
+
return vec_reduce_all<float>(red_fun, data_fvec0, fVec::size());
|
| 261 |
+
} else {
|
| 262 |
+
data_fvec0 = map_fun(data_fvec0, data2_fvec0);
|
| 263 |
+
return vec_reduce_all<float>(red_fun, data_fvec0, size);
|
| 264 |
+
}
|
| 265 |
+
}
|
| 266 |
+
int64_t d = bVec::size();
|
| 267 |
+
bVec acc_bvec = bVec::loadu(data);
|
| 268 |
+
auto [acc_fvec0, acc_fvec1] = convert_to_float<scalar_t>(acc_bvec);
|
| 269 |
+
bVec acc2_bvec = bVec::loadu(data2);
|
| 270 |
+
auto [acc2_fvec0, acc2_fvec1] = convert_to_float<scalar_t>(acc2_bvec);
|
| 271 |
+
acc_fvec0 = map_fun(acc_fvec0, acc2_fvec0);
|
| 272 |
+
acc_fvec1 = map_fun(acc_fvec1, acc2_fvec1);
|
| 273 |
+
for (; d < size - (size % bVec::size()); d += bVec::size()) {
|
| 274 |
+
bVec data_bvec = bVec::loadu(data + d);
|
| 275 |
+
auto [data_fvec0, data_fvec1] = convert_to_float<scalar_t>(data_bvec);
|
| 276 |
+
bVec data2_bvec = bVec::loadu(data2 + d);
|
| 277 |
+
auto [data2_fvec0, data2_fvec1] = convert_to_float<scalar_t>(data2_bvec);
|
| 278 |
+
data_fvec0 = map_fun(data_fvec0, data2_fvec0);
|
| 279 |
+
data_fvec1 = map_fun(data_fvec1, data2_fvec1);
|
| 280 |
+
acc_fvec0 = red_fun(acc_fvec0, data_fvec0);
|
| 281 |
+
acc_fvec1 = red_fun(acc_fvec1, data_fvec1);
|
| 282 |
+
}
|
| 283 |
+
if (size - d > 0) {
|
| 284 |
+
bVec data_bvec = bVec::loadu(data + d, size - d);
|
| 285 |
+
auto [data_fvec0, data_fvec1] = convert_to_float<scalar_t>(data_bvec);
|
| 286 |
+
bVec data2_bvec = bVec::loadu(data2 + d, size - d);
|
| 287 |
+
auto [data2_fvec0, data2_fvec1] = convert_to_float<scalar_t>(data2_bvec);
|
| 288 |
+
if (size - d > fVec::size()) {
|
| 289 |
+
data_fvec0 = map_fun(data_fvec0, data2_fvec0);
|
| 290 |
+
data_fvec1 = map_fun(data_fvec1, data2_fvec1);
|
| 291 |
+
acc_fvec0 = red_fun(acc_fvec0, data_fvec0);
|
| 292 |
+
acc_fvec1 = fVec::set(acc_fvec1, red_fun(acc_fvec1, data_fvec1), size - d - fVec::size());
|
| 293 |
+
} else {
|
| 294 |
+
data_fvec0 = map_fun(data_fvec0, data2_fvec0);
|
| 295 |
+
acc_fvec0 = fVec::set(acc_fvec0, red_fun(acc_fvec0, data_fvec0), size - d);
|
| 296 |
+
}
|
| 297 |
+
}
|
| 298 |
+
acc_fvec0 = red_fun(acc_fvec0, acc_fvec1);
|
| 299 |
+
return vec_reduce_all<float>(red_fun, acc_fvec0);
|
| 300 |
+
}
|
| 301 |
+
|
| 302 |
+
template <typename scalar_t, typename MapOp, typename ReduceOp,
|
| 303 |
+
typename std::enable_if_t<is_reduced_floating_point_v<scalar_t>, int> = 0>
|
| 304 |
+
inline float map3_reduce_all(
|
| 305 |
+
const MapOp& map_fun,
|
| 306 |
+
const ReduceOp& red_fun,
|
| 307 |
+
const scalar_t* data,
|
| 308 |
+
const scalar_t* data2,
|
| 309 |
+
const scalar_t* data3,
|
| 310 |
+
int64_t size) {
|
| 311 |
+
using bVec = vec::Vectorized<scalar_t>;
|
| 312 |
+
using fVec = vec::Vectorized<float>;
|
| 313 |
+
if (size < bVec::size()) {
|
| 314 |
+
bVec data_bvec = bVec::loadu(data, size);
|
| 315 |
+
auto [data_fvec0, data_fvec1] = convert_to_float<scalar_t>(data_bvec);
|
| 316 |
+
bVec data2_bvec = bVec::loadu(data2, size);
|
| 317 |
+
auto [data2_fvec0, data2_fvec1] = convert_to_float<scalar_t>(data2_bvec);
|
| 318 |
+
bVec data3_bvec = bVec::loadu(data3, size);
|
| 319 |
+
auto [data3_fvec0, data3_fvec1] = convert_to_float<scalar_t>(data3_bvec);
|
| 320 |
+
if (size > fVec::size()) {
|
| 321 |
+
data_fvec0 = map_fun(data_fvec0, data2_fvec0, data3_fvec0);
|
| 322 |
+
data_fvec1 = map_fun(data_fvec1, data2_fvec1, data3_fvec1);
|
| 323 |
+
data_fvec0 = fVec::set(data_fvec0, red_fun(data_fvec0, data_fvec1), size - fVec::size());
|
| 324 |
+
return vec_reduce_all<float>(red_fun, data_fvec0, fVec::size());
|
| 325 |
+
} else {
|
| 326 |
+
data_fvec0 = map_fun(data_fvec0, data2_fvec0, data3_fvec0);
|
| 327 |
+
return vec_reduce_all<float>(red_fun, data_fvec0, size);
|
| 328 |
+
}
|
| 329 |
+
}
|
| 330 |
+
int64_t d = bVec::size();
|
| 331 |
+
bVec acc_bvec = bVec::loadu(data);
|
| 332 |
+
auto [acc_fvec0, acc_fvec1] = convert_to_float<scalar_t>(acc_bvec);
|
| 333 |
+
bVec acc2_bvec = bVec::loadu(data2);
|
| 334 |
+
auto [acc2_fvec0, acc2_fvec1] = convert_to_float<scalar_t>(acc2_bvec);
|
| 335 |
+
bVec acc3_bvec = bVec::loadu(data3);
|
| 336 |
+
auto [acc3_fvec0, acc3_fvec1] = convert_to_float<scalar_t>(acc3_bvec);
|
| 337 |
+
acc_fvec0 = map_fun(acc_fvec0, acc2_fvec0, acc3_fvec0);
|
| 338 |
+
acc_fvec1 = map_fun(acc_fvec1, acc2_fvec1, acc3_fvec1);
|
| 339 |
+
for (; d < size - (size % bVec::size()); d += bVec::size()) {
|
| 340 |
+
bVec data_bvec = bVec::loadu(data + d);
|
| 341 |
+
auto [data_fvec0, data_fvec1] = convert_to_float<scalar_t>(data_bvec);
|
| 342 |
+
bVec data2_bvec = bVec::loadu(data2 + d);
|
| 343 |
+
auto [data2_fvec0, data2_fvec1] = convert_to_float<scalar_t>(data2_bvec);
|
| 344 |
+
bVec data3_bvec = bVec::loadu(data3 + d);
|
| 345 |
+
auto [data3_fvec0, data3_fvec1] = convert_to_float<scalar_t>(data3_bvec);
|
| 346 |
+
data_fvec0 = map_fun(data_fvec0, data2_fvec0, data3_fvec0);
|
| 347 |
+
data_fvec1 = map_fun(data_fvec1, data2_fvec1, data3_fvec1);
|
| 348 |
+
acc_fvec0 = red_fun(acc_fvec0, data_fvec0);
|
| 349 |
+
acc_fvec1 = red_fun(acc_fvec1, data_fvec1);
|
| 350 |
+
}
|
| 351 |
+
if (size - d > 0) {
|
| 352 |
+
bVec data_bvec = bVec::loadu(data + d, size - d);
|
| 353 |
+
auto [data_fvec0, data_fvec1] = convert_to_float<scalar_t>(data_bvec);
|
| 354 |
+
bVec data2_bvec = bVec::loadu(data2 + d, size - d);
|
| 355 |
+
auto [data2_fvec0, data2_fvec1] = convert_to_float<scalar_t>(data2_bvec);
|
| 356 |
+
bVec data3_bvec = bVec::loadu(data3 + d, size - d);
|
| 357 |
+
auto [data3_fvec0, data3_fvec1] = convert_to_float<scalar_t>(data3_bvec);
|
| 358 |
+
if (size - d > fVec::size()) {
|
| 359 |
+
data_fvec0 = map_fun(data_fvec0, data2_fvec0, data3_fvec0);
|
| 360 |
+
data_fvec1 = map_fun(data_fvec1, data2_fvec1, data3_fvec1);
|
| 361 |
+
acc_fvec0 = red_fun(acc_fvec0, data_fvec0);
|
| 362 |
+
acc_fvec1 = fVec::set(acc_fvec1, red_fun(acc_fvec1, data_fvec1), size - d - fVec::size());
|
| 363 |
+
} else {
|
| 364 |
+
data_fvec0 = map_fun(data_fvec0, data2_fvec0, data3_fvec0);
|
| 365 |
+
acc_fvec0 = fVec::set(acc_fvec0, red_fun(acc_fvec0, data_fvec0), size - d);
|
| 366 |
+
}
|
| 367 |
+
}
|
| 368 |
+
acc_fvec0 = red_fun(acc_fvec0, acc_fvec1);
|
| 369 |
+
return vec_reduce_all<float>(red_fun, acc_fvec0);
|
| 370 |
+
}
|
| 371 |
+
|
| 372 |
+
template <typename scalar_t, typename Op,
|
| 373 |
+
typename std::enable_if_t<is_reduced_floating_point_v<scalar_t>, int> = 0>
|
| 374 |
+
inline void map(
|
| 375 |
+
const Op& vec_fun,
|
| 376 |
+
scalar_t* output_data,
|
| 377 |
+
const scalar_t* input_data,
|
| 378 |
+
int64_t size) {
|
| 379 |
+
using bVec = vec::Vectorized<scalar_t>;
|
| 380 |
+
using fVec = vec::Vectorized<float>;
|
| 381 |
+
int64_t d = 0;
|
| 382 |
+
for (; d < size - (size % bVec::size()); d += bVec::size()) {
|
| 383 |
+
bVec data_bvec = bVec::loadu(input_data + d);
|
| 384 |
+
auto [data_fvec0, data_fvec1] = convert_to_float<scalar_t>(data_bvec);
|
| 385 |
+
fVec output_fvec0 = vec_fun(data_fvec0);
|
| 386 |
+
fVec output_fvec1 = vec_fun(data_fvec1);
|
| 387 |
+
bVec output_bvec = convert_from_float<scalar_t>(output_fvec0, output_fvec1);
|
| 388 |
+
output_bvec.store(output_data + d);
|
| 389 |
+
}
|
| 390 |
+
if (size - d > 0) {
|
| 391 |
+
bVec data_bvec = bVec::loadu(input_data + d, size - d);
|
| 392 |
+
auto [data_fvec0, data_fvec1] = convert_to_float<scalar_t>(data_bvec);
|
| 393 |
+
fVec output_fvec0 = vec_fun(data_fvec0);
|
| 394 |
+
fVec output_fvec1 = vec_fun(data_fvec1);
|
| 395 |
+
bVec output_bvec = convert_from_float<scalar_t>(output_fvec0, output_fvec1);
|
| 396 |
+
output_bvec.store(output_data + d, size - d);
|
| 397 |
+
}
|
| 398 |
+
}
|
| 399 |
+
|
| 400 |
+
template <typename scalar_t, typename Op,
|
| 401 |
+
typename std::enable_if_t<is_reduced_floating_point_v<scalar_t>, int> = 0>
|
| 402 |
+
inline void map(
|
| 403 |
+
const Op& vec_fun,
|
| 404 |
+
scalar_t* output_data,
|
| 405 |
+
const float* input_data,
|
| 406 |
+
int64_t size) {
|
| 407 |
+
using bVec = vec::Vectorized<scalar_t>;
|
| 408 |
+
using fVec = vec::Vectorized<float>;
|
| 409 |
+
int64_t d = 0;
|
| 410 |
+
for (; d < size - (size % bVec::size()); d += bVec::size()) {
|
| 411 |
+
fVec data_fvec0 = fVec::loadu(input_data + d);
|
| 412 |
+
fVec data_fvec1 = fVec::loadu(input_data + d + fVec::size());
|
| 413 |
+
fVec output_fvec0 = vec_fun(data_fvec0);
|
| 414 |
+
fVec output_fvec1 = vec_fun(data_fvec1);
|
| 415 |
+
bVec output_bvec = convert_from_float<scalar_t>(output_fvec0, output_fvec1);
|
| 416 |
+
output_bvec.store(output_data + d);
|
| 417 |
+
}
|
| 418 |
+
if (size - d > 0) {
|
| 419 |
+
fVec data_fvec0, data_fvec1;
|
| 420 |
+
if (size - d > fVec::size()) {
|
| 421 |
+
data_fvec0 = fVec::loadu(input_data + d);
|
| 422 |
+
data_fvec1 = fVec::loadu(input_data + d + fVec::size(), size - d - fVec::size());
|
| 423 |
+
} else {
|
| 424 |
+
// choose to align with behaviour of bVec::loadu(ptr, size),
|
| 425 |
+
// which leaves data_fvec1 uninitialized
|
| 426 |
+
data_fvec0 = fVec::loadu(input_data + d, size - d);
|
| 427 |
+
}
|
| 428 |
+
fVec output_fvec0 = vec_fun(data_fvec0);
|
| 429 |
+
fVec output_fvec1 = vec_fun(data_fvec1);
|
| 430 |
+
bVec output_bvec = convert_from_float<scalar_t>(output_fvec0, output_fvec1);
|
| 431 |
+
output_bvec.store(output_data + d, size - d);
|
| 432 |
+
}
|
| 433 |
+
}
|
| 434 |
+
|
| 435 |
+
template <typename scalar_t, typename Op,
|
| 436 |
+
typename std::enable_if_t<is_reduced_floating_point_v<scalar_t>, int> = 0>
|
| 437 |
+
inline void map2(
|
| 438 |
+
const Op& vec_fun,
|
| 439 |
+
scalar_t* output_data,
|
| 440 |
+
const scalar_t* input_data,
|
| 441 |
+
const scalar_t* input_data2,
|
| 442 |
+
int64_t size) {
|
| 443 |
+
using bVec = vec::Vectorized<scalar_t>;
|
| 444 |
+
using fVec = vec::Vectorized<float>;
|
| 445 |
+
int64_t d = 0;
|
| 446 |
+
for (; d < size - (size % bVec::size()); d += bVec::size()) {
|
| 447 |
+
bVec data_bvec = bVec::loadu(input_data + d);
|
| 448 |
+
auto [data_fvec0, data_fvec1] = convert_to_float<scalar_t>(data_bvec);
|
| 449 |
+
bVec data2_bvec = bVec::loadu(input_data2 + d);
|
| 450 |
+
auto [data2_fvec0, data2_fvec1] = convert_to_float<scalar_t>(data2_bvec);
|
| 451 |
+
fVec output_fvec0 = vec_fun(data_fvec0, data2_fvec0);
|
| 452 |
+
fVec output_fvec1 = vec_fun(data_fvec1, data2_fvec1);
|
| 453 |
+
bVec output_bvec = convert_from_float<scalar_t>(output_fvec0, output_fvec1);
|
| 454 |
+
output_bvec.store(output_data + d);
|
| 455 |
+
}
|
| 456 |
+
if (size - d > 0) {
|
| 457 |
+
bVec data_bvec = bVec::loadu(input_data + d, size - d);
|
| 458 |
+
auto [data_fvec0, data_fvec1] = convert_to_float<scalar_t>(data_bvec);
|
| 459 |
+
bVec data2_bvec = bVec::loadu(input_data2 + d, size - d);
|
| 460 |
+
auto [data2_fvec0, data2_fvec1] = convert_to_float<scalar_t>(data2_bvec);
|
| 461 |
+
fVec output_fvec0 = vec_fun(data_fvec0, data2_fvec0);
|
| 462 |
+
fVec output_fvec1 = vec_fun(data_fvec1, data2_fvec1);
|
| 463 |
+
bVec output_bvec = convert_from_float<scalar_t>(output_fvec0, output_fvec1);
|
| 464 |
+
output_bvec.store(output_data + d, size - d);
|
| 465 |
+
}
|
| 466 |
+
}
|
| 467 |
+
|
| 468 |
+
template <typename scalar_t, typename Op,
|
| 469 |
+
typename std::enable_if_t<is_reduced_floating_point_v<scalar_t>, int> = 0>
|
| 470 |
+
inline void map3(
|
| 471 |
+
const Op& vec_fun,
|
| 472 |
+
scalar_t* output_data,
|
| 473 |
+
const scalar_t* input_data1,
|
| 474 |
+
const scalar_t* input_data2,
|
| 475 |
+
const scalar_t* input_data3,
|
| 476 |
+
int64_t size) {
|
| 477 |
+
using bVec = vec::Vectorized<scalar_t>;
|
| 478 |
+
using fVec = vec::Vectorized<float>;
|
| 479 |
+
int64_t d = 0;
|
| 480 |
+
for (; d < size - (size % bVec::size()); d += bVec::size()) {
|
| 481 |
+
bVec data1_bvec = bVec::loadu(input_data1 + d);
|
| 482 |
+
auto [data1_fvec0, data1_fvec1] = convert_to_float<scalar_t>(data1_bvec);
|
| 483 |
+
bVec data2_bvec = bVec::loadu(input_data2 + d);
|
| 484 |
+
auto [data2_fvec0, data2_fvec1] = convert_to_float<scalar_t>(data2_bvec);
|
| 485 |
+
bVec data3_bvec = bVec::loadu(input_data3 + d);
|
| 486 |
+
auto [data3_fvec0, data3_fvec1] = convert_to_float<scalar_t>(data3_bvec);
|
| 487 |
+
fVec output_fvec0 = vec_fun(data1_fvec0, data2_fvec0, data3_fvec0);
|
| 488 |
+
fVec output_fvec1 = vec_fun(data1_fvec1, data2_fvec1, data3_fvec1);
|
| 489 |
+
bVec output_bvec = convert_from_float<scalar_t>(output_fvec0, output_fvec1);
|
| 490 |
+
output_bvec.store(output_data + d);
|
| 491 |
+
}
|
| 492 |
+
if (size - d > 0) {
|
| 493 |
+
bVec data1_bvec = bVec::loadu(input_data1 + d, size - d);
|
| 494 |
+
auto [data1_fvec0, data1_fvec1] = convert_to_float<scalar_t>(data1_bvec);
|
| 495 |
+
bVec data2_bvec = bVec::loadu(input_data2 + d, size - d);
|
| 496 |
+
auto [data2_fvec0, data2_fvec1] = convert_to_float<scalar_t>(data2_bvec);
|
| 497 |
+
bVec data3_bvec = bVec::loadu(input_data3 + d, size - d);
|
| 498 |
+
auto [data3_fvec0, data3_fvec1] = convert_to_float<scalar_t>(data3_bvec);
|
| 499 |
+
fVec output_fvec0 = vec_fun(data1_fvec0, data2_fvec0, data3_fvec0);
|
| 500 |
+
fVec output_fvec1 = vec_fun(data1_fvec1, data2_fvec1, data3_fvec1);
|
| 501 |
+
bVec output_bvec = convert_from_float<scalar_t>(output_fvec0, output_fvec1);
|
| 502 |
+
output_bvec.store(output_data + d, size - d);
|
| 503 |
+
}
|
| 504 |
+
}
|
| 505 |
+
|
| 506 |
+
template <typename scalar_t, typename Op,
|
| 507 |
+
typename std::enable_if_t<is_reduced_floating_point_v<scalar_t>, int> = 0>
|
| 508 |
+
inline void map4(
|
| 509 |
+
const Op& vec_fun,
|
| 510 |
+
scalar_t* output_data,
|
| 511 |
+
const scalar_t* input_data1,
|
| 512 |
+
const scalar_t* input_data2,
|
| 513 |
+
const scalar_t* input_data3,
|
| 514 |
+
const scalar_t* input_data4,
|
| 515 |
+
int64_t size) {
|
| 516 |
+
using bVec = vec::Vectorized<scalar_t>;
|
| 517 |
+
using fVec = vec::Vectorized<float>;
|
| 518 |
+
int64_t d = 0;
|
| 519 |
+
for (; d < size - (size % bVec::size()); d += bVec::size()) {
|
| 520 |
+
bVec data1_bvec = bVec::loadu(input_data1 + d);
|
| 521 |
+
auto [data1_fvec0, data1_fvec1] = convert_to_float<scalar_t>(data1_bvec);
|
| 522 |
+
bVec data2_bvec = bVec::loadu(input_data2 + d);
|
| 523 |
+
auto [data2_fvec0, data2_fvec1] = convert_to_float<scalar_t>(data2_bvec);
|
| 524 |
+
bVec data3_bvec = bVec::loadu(input_data3 + d);
|
| 525 |
+
auto [data3_fvec0, data3_fvec1] = convert_to_float<scalar_t>(data3_bvec);
|
| 526 |
+
bVec data4_bvec = bVec::loadu(input_data4 + d);
|
| 527 |
+
auto [data4_fvec0, data4_fvec1] = convert_to_float<scalar_t>(data4_bvec);
|
| 528 |
+
fVec output_fvec0 = vec_fun(data1_fvec0, data2_fvec0, data3_fvec0, data4_fvec0);
|
| 529 |
+
fVec output_fvec1 = vec_fun(data1_fvec1, data2_fvec1, data3_fvec1, data4_fvec1);
|
| 530 |
+
bVec output_bvec = convert_from_float<scalar_t>(output_fvec0, output_fvec1);
|
| 531 |
+
output_bvec.store(output_data + d);
|
| 532 |
+
}
|
| 533 |
+
if (size - d > 0) {
|
| 534 |
+
bVec data1_bvec = bVec::loadu(input_data1 + d, size - d);
|
| 535 |
+
auto [data1_fvec0, data1_fvec1] = convert_to_float<scalar_t>(data1_bvec);
|
| 536 |
+
bVec data2_bvec = bVec::loadu(input_data2 + d, size - d);
|
| 537 |
+
auto [data2_fvec0, data2_fvec1] = convert_to_float<scalar_t>(data2_bvec);
|
| 538 |
+
bVec data3_bvec = bVec::loadu(input_data3 + d, size - d);
|
| 539 |
+
auto [data3_fvec0, data3_fvec1] = convert_to_float<scalar_t>(data3_bvec);
|
| 540 |
+
bVec data4_bvec = bVec::loadu(input_data4 + d, size - d);
|
| 541 |
+
auto [data4_fvec0, data4_fvec1] = convert_to_float<scalar_t>(data4_bvec);
|
| 542 |
+
fVec output_fvec0 = vec_fun(data1_fvec0, data2_fvec0, data3_fvec0, data4_fvec0);
|
| 543 |
+
fVec output_fvec1 = vec_fun(data1_fvec1, data2_fvec1, data3_fvec1, data4_fvec1);
|
| 544 |
+
bVec output_bvec = convert_from_float<scalar_t>(output_fvec0, output_fvec1);
|
| 545 |
+
output_bvec.store(output_data + d, size - d);
|
| 546 |
+
}
|
| 547 |
+
}
|
| 548 |
+
|
| 549 |
+
} // namespace at::vec
|
moondream/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec.h
ADDED
|
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#if defined(CPU_CAPABILITY_AVX512)
|
| 4 |
+
#include <ATen/cpu/vec/vec512/vec512.h>
|
| 5 |
+
#else
|
| 6 |
+
#include <ATen/cpu/vec/vec256/vec256.h>
|
| 7 |
+
#endif
|
| 8 |
+
|
| 9 |
+
namespace at::vec {
|
| 10 |
+
// See Note [CPU_CAPABILITY namespace]
|
| 11 |
+
inline namespace CPU_CAPABILITY {
|
| 12 |
+
|
| 13 |
+
inline Vectorized<bool> convert_to_bool(Vectorized<int8_t> x) {
|
| 14 |
+
__at_align__ bool buffer[x.size()];
|
| 15 |
+
x.ne(Vectorized<int8_t>(0)).store(buffer);
|
| 16 |
+
|
| 17 |
+
Vectorized<bool> ret;
|
| 18 |
+
static_assert(x.size() == ret.size(), "");
|
| 19 |
+
std::memcpy(ret, buffer, ret.size() * sizeof(bool));
|
| 20 |
+
return ret;
|
| 21 |
+
}
|
| 22 |
+
|
| 23 |
+
template <>
|
| 24 |
+
inline Vectorized<bool> Vectorized<bool>::loadu(const void* ptr) {
|
| 25 |
+
// See NOTE [Loading boolean values]
|
| 26 |
+
return convert_to_bool(Vectorized<int8_t>::loadu(ptr));
|
| 27 |
+
}
|
| 28 |
+
|
| 29 |
+
template <>
|
| 30 |
+
inline Vectorized<bool> Vectorized<bool>::loadu(const void* ptr, int64_t count) {
|
| 31 |
+
// See NOTE [Loading boolean values]
|
| 32 |
+
return convert_to_bool(Vectorized<int8_t>::loadu(ptr, count));
|
| 33 |
+
}
|
| 34 |
+
|
| 35 |
+
template <typename VT>
|
| 36 |
+
struct VecHoldType { using hold_type = typename VT::value_type; };
|
| 37 |
+
|
| 38 |
+
template <>
|
| 39 |
+
struct VecHoldType<Vectorized<BFloat16>> { using hold_type = BFloat16; };
|
| 40 |
+
|
| 41 |
+
template <>
|
| 42 |
+
struct VecHoldType<Vectorized<Half>> {using hold_type = Half; };
|
| 43 |
+
|
| 44 |
+
template <typename VT>
|
| 45 |
+
using vechold_type = typename VecHoldType<VT>::hold_type;
|
| 46 |
+
|
| 47 |
+
}} // namespace at::vec::CPU_CAPABILITY
|