Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +2 -0
- mantis_evalkit/lib/python3.10/site-packages/jedi/third_party/django-stubs/django-stubs/core/exceptions.pyi +43 -0
- mantis_evalkit/lib/python3.10/site-packages/jedi/third_party/django-stubs/django-stubs/core/mail/__init__.pyi +48 -0
- mantis_evalkit/lib/python3.10/site-packages/jedi/third_party/django-stubs/django-stubs/core/mail/backends/dummy.pyi +3 -0
- mantis_evalkit/lib/python3.10/site-packages/jedi/third_party/django-stubs/django-stubs/core/mail/utils.pyi +6 -0
- mantis_evalkit/lib/python3.10/site-packages/jedi/third_party/django-stubs/django-stubs/core/paginator.pyi +62 -0
- mantis_evalkit/lib/python3.10/site-packages/jedi/third_party/django-stubs/django-stubs/core/serializers/__init__.pyi +31 -0
- mantis_evalkit/lib/python3.10/site-packages/jedi/third_party/django-stubs/django-stubs/core/serializers/base.pyi +87 -0
- mantis_evalkit/lib/python3.10/site-packages/jedi/third_party/django-stubs/django-stubs/core/serializers/json.pyi +17 -0
- mantis_evalkit/lib/python3.10/site-packages/jedi/third_party/django-stubs/django-stubs/core/validators.pyi +121 -0
- mantis_evalkit/lib/python3.10/site-packages/jedi/third_party/django-stubs/django-stubs/core/wsgi.pyi +3 -0
- mantis_evalkit/lib/python3.10/site-packages/jedi/third_party/django-stubs/django-stubs/db/migrations/autodetector.pyi +67 -0
- mantis_evalkit/lib/python3.10/site-packages/jedi/third_party/django-stubs/django-stubs/db/migrations/operations/base.pyi +17 -0
- mantis_evalkit/lib/python3.10/site-packages/jedi/third_party/django-stubs/django-stubs/db/migrations/operations/models.pyi +87 -0
- mantis_evalkit/lib/python3.10/site-packages/jedi/third_party/django-stubs/django-stubs/db/migrations/utils.pyi +10 -0
- mantis_evalkit/lib/python3.10/site-packages/jedi/third_party/django-stubs/django-stubs/db/migrations/writer.pyi +40 -0
- mantis_evalkit/lib/python3.10/site-packages/jedi/third_party/django-stubs/django-stubs/db/models/fields/related_lookups.pyi +48 -0
- mantis_evalkit/lib/python3.10/site-packages/jedi/third_party/django-stubs/django-stubs/db/models/fields/reverse_related.pyi +110 -0
- mantis_evalkit/lib/python3.10/site-packages/jedi/third_party/django-stubs/django-stubs/db/models/sql/__init__.pyi +8 -0
- mantis_evalkit/lib/python3.10/site-packages/jedi/third_party/django-stubs/django-stubs/db/models/sql/constants.pyi +14 -0
- mantis_evalkit/lib/python3.10/site-packages/jedi/third_party/django-stubs/django-stubs/db/models/sql/datastructures.pyi +49 -0
- mantis_evalkit/lib/python3.10/site-packages/jedi/third_party/django-stubs/django-stubs/db/models/sql/subqueries.pyi +45 -0
- mantis_evalkit/lib/python3.10/site-packages/jedi/third_party/django-stubs/django-stubs/db/models/sql/where.pyi +46 -0
- moondream/lib/python3.10/site-packages/sympy/core/__pycache__/numbers.cpython-310.pyc +3 -0
- moondream/lib/python3.10/site-packages/sympy/polys/__pycache__/polyquinticconst.cpython-310.pyc +3 -0
- moondream/lib/python3.10/site-packages/torch/include/ATen/Dimname.h +1 -0
- moondream/lib/python3.10/site-packages/torch/include/ATen/RedispatchFunctions.h +0 -0
- moondream/lib/python3.10/site-packages/torch/include/ATen/Tensor.h +3 -0
- moondream/lib/python3.10/site-packages/torch/include/ATen/TensorUtils.h +190 -0
- moondream/lib/python3.10/site-packages/torch/include/ATen/core/Dict_inl.h +209 -0
- moondream/lib/python3.10/site-packages/torch/include/ATen/core/NestedIntSymNodeImpl.h +186 -0
- moondream/lib/python3.10/site-packages/torch/include/ATen/core/Reduction.h +16 -0
- moondream/lib/python3.10/site-packages/torch/include/ATen/core/Scalar.h +1 -0
- moondream/lib/python3.10/site-packages/torch/include/ATen/core/TorchDispatchUtils.h +17 -0
- moondream/lib/python3.10/site-packages/torch/include/ATen/core/blob.h +208 -0
- moondream/lib/python3.10/site-packages/torch/include/ATen/core/ivalue_to.h +36 -0
- moondream/lib/python3.10/site-packages/torch/include/ATen/core/jit_type_base.h +719 -0
- moondream/lib/python3.10/site-packages/torch/include/ATen/core/rref_interface.h +40 -0
- moondream/lib/python3.10/site-packages/torch/include/ATen/cudnn/Descriptors.h +391 -0
- moondream/lib/python3.10/site-packages/torch/include/ATen/cudnn/Exceptions.h +0 -0
- moondream/lib/python3.10/site-packages/torch/include/ATen/cudnn/Handles.h +2 -0
- moondream/lib/python3.10/site-packages/torch/include/ATen/cudnn/Types.h +14 -0
- moondream/lib/python3.10/site-packages/torch/include/ATen/cudnn/Utils.h +21 -0
- moondream/lib/python3.10/site-packages/torch/include/ATen/cudnn/cudnn-wrapper.h +15 -0
- moondream/lib/python3.10/site-packages/torch/include/ATen/detail/CUDAHooksInterface.h +201 -0
- moondream/lib/python3.10/site-packages/torch/include/ATen/detail/MPSHooksInterface.h +106 -0
- moondream/lib/python3.10/site-packages/torch/include/ATen/detail/PrivateUse1HooksInterface.h +61 -0
- moondream/lib/python3.10/site-packages/torch/include/ATen/detail/XPUHooksInterface.h +80 -0
- moondream/lib/python3.10/site-packages/torch/include/ATen/functorch/ADInterpreters.h +38 -0
- moondream/lib/python3.10/site-packages/torch/include/ATen/functorch/BatchRulesHelper.h +475 -0
.gitattributes
CHANGED
|
@@ -499,3 +499,5 @@ moondream/lib/python3.10/site-packages/torch/__pycache__/_tensor_docs.cpython-31
|
|
| 499 |
moondream/lib/python3.10/site-packages/torch/_inductor/codegen/__pycache__/cpp.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 500 |
moondream/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/symbolic_shapes.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 501 |
moondream/lib/python3.10/site-packages/sympy/solvers/ode/__pycache__/single.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
| 499 |
moondream/lib/python3.10/site-packages/torch/_inductor/codegen/__pycache__/cpp.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 500 |
moondream/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/symbolic_shapes.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 501 |
moondream/lib/python3.10/site-packages/sympy/solvers/ode/__pycache__/single.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 502 |
+
moondream/lib/python3.10/site-packages/sympy/core/__pycache__/numbers.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 503 |
+
moondream/lib/python3.10/site-packages/sympy/polys/__pycache__/polyquinticconst.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
mantis_evalkit/lib/python3.10/site-packages/jedi/third_party/django-stubs/django-stubs/core/exceptions.pyi
ADDED
|
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Any, Dict, Iterator, List, Mapping, Optional, Tuple, Union
|
| 2 |
+
|
| 3 |
+
from django.forms.utils import ErrorDict
|
| 4 |
+
|
| 5 |
+
class FieldDoesNotExist(Exception): ...
|
| 6 |
+
class AppRegistryNotReady(Exception): ...
|
| 7 |
+
|
| 8 |
+
class ObjectDoesNotExist(Exception):
|
| 9 |
+
silent_variable_failure: bool = ...
|
| 10 |
+
|
| 11 |
+
class MultipleObjectsReturned(Exception): ...
|
| 12 |
+
class SuspiciousOperation(Exception): ...
|
| 13 |
+
class SuspiciousMultipartForm(SuspiciousOperation): ...
|
| 14 |
+
class SuspiciousFileOperation(SuspiciousOperation): ...
|
| 15 |
+
class DisallowedHost(SuspiciousOperation): ...
|
| 16 |
+
class DisallowedRedirect(SuspiciousOperation): ...
|
| 17 |
+
class TooManyFieldsSent(SuspiciousOperation): ...
|
| 18 |
+
class RequestDataTooBig(SuspiciousOperation): ...
|
| 19 |
+
class PermissionDenied(Exception): ...
|
| 20 |
+
class ViewDoesNotExist(Exception): ...
|
| 21 |
+
class MiddlewareNotUsed(Exception): ...
|
| 22 |
+
class ImproperlyConfigured(Exception): ...
|
| 23 |
+
class FieldError(Exception): ...
|
| 24 |
+
|
| 25 |
+
NON_FIELD_ERRORS: str
|
| 26 |
+
|
| 27 |
+
class ValidationError(Exception):
|
| 28 |
+
error_dict: Any = ...
|
| 29 |
+
error_list: Any = ...
|
| 30 |
+
message: Any = ...
|
| 31 |
+
code: Any = ...
|
| 32 |
+
params: Any = ...
|
| 33 |
+
def __init__(self, message: Any, code: Optional[str] = ..., params: Optional[Mapping[str, Any]] = ...) -> None: ...
|
| 34 |
+
@property
|
| 35 |
+
def message_dict(self) -> Dict[str, List[str]]: ...
|
| 36 |
+
@property
|
| 37 |
+
def messages(self) -> List[str]: ...
|
| 38 |
+
def update_error_dict(
|
| 39 |
+
self, error_dict: Mapping[str, Any]
|
| 40 |
+
) -> Union[Dict[str, List[ValidationError]], ErrorDict]: ...
|
| 41 |
+
def __iter__(self) -> Iterator[Union[Tuple[str, List[str]], str]]: ...
|
| 42 |
+
|
| 43 |
+
class EmptyResultSet(Exception): ...
|
mantis_evalkit/lib/python3.10/site-packages/jedi/third_party/django-stubs/django-stubs/core/mail/__init__.pyi
ADDED
|
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Any, List, Optional, Tuple
|
| 2 |
+
|
| 3 |
+
from .message import (
|
| 4 |
+
BadHeaderError as BadHeaderError,
|
| 5 |
+
DEFAULT_ATTACHMENT_MIME_TYPE as DEFAULT_ATTACHMENT_MIME_TYPE,
|
| 6 |
+
EmailMessage as EmailMessage,
|
| 7 |
+
EmailMultiAlternatives as EmailMultiAlternatives,
|
| 8 |
+
SafeMIMEMultipart as SafeMIMEMultipart,
|
| 9 |
+
SafeMIMEText as SafeMIMEText,
|
| 10 |
+
forbid_multi_line_headers as forbid_multi_line_headers,
|
| 11 |
+
)
|
| 12 |
+
from .utils import CachedDnsName as CachedDnsName, DNS_NAME as DNS_NAME
|
| 13 |
+
|
| 14 |
+
def get_connection(backend: Optional[str] = ..., fail_silently: bool = ..., **kwds: Any) -> Any: ...
|
| 15 |
+
def send_mail(
|
| 16 |
+
subject: str,
|
| 17 |
+
message: str,
|
| 18 |
+
from_email: Optional[str],
|
| 19 |
+
recipient_list: List[str],
|
| 20 |
+
fail_silently: bool = ...,
|
| 21 |
+
auth_user: Optional[str] = ...,
|
| 22 |
+
auth_password: Optional[str] = ...,
|
| 23 |
+
connection: Optional[Any] = ...,
|
| 24 |
+
html_message: Optional[str] = ...,
|
| 25 |
+
) -> int: ...
|
| 26 |
+
def send_mass_mail(
|
| 27 |
+
datatuple: List[Tuple[str, str, str, List[str]]],
|
| 28 |
+
fail_silently: bool = ...,
|
| 29 |
+
auth_user: Optional[str] = ...,
|
| 30 |
+
auth_password: Optional[str] = ...,
|
| 31 |
+
connection: Optional[Any] = ...,
|
| 32 |
+
) -> int: ...
|
| 33 |
+
def mail_admins(
|
| 34 |
+
subject: str,
|
| 35 |
+
message: str,
|
| 36 |
+
fail_silently: bool = ...,
|
| 37 |
+
connection: Optional[Any] = ...,
|
| 38 |
+
html_message: Optional[str] = ...,
|
| 39 |
+
) -> None: ...
|
| 40 |
+
def mail_managers(
|
| 41 |
+
subject: str,
|
| 42 |
+
message: str,
|
| 43 |
+
fail_silently: bool = ...,
|
| 44 |
+
connection: Optional[Any] = ...,
|
| 45 |
+
html_message: Optional[str] = ...,
|
| 46 |
+
) -> None: ...
|
| 47 |
+
|
| 48 |
+
outbox: List[EmailMessage] = ...
|
mantis_evalkit/lib/python3.10/site-packages/jedi/third_party/django-stubs/django-stubs/core/mail/backends/dummy.pyi
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from django.core.mail.backends.base import BaseEmailBackend
|
| 2 |
+
|
| 3 |
+
class EmailBackend(BaseEmailBackend): ...
|
mantis_evalkit/lib/python3.10/site-packages/jedi/third_party/django-stubs/django-stubs/core/mail/utils.pyi
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Any
|
| 2 |
+
|
| 3 |
+
class CachedDnsName:
|
| 4 |
+
def get_fqdn(self) -> str: ...
|
| 5 |
+
|
| 6 |
+
DNS_NAME: Any
|
mantis_evalkit/lib/python3.10/site-packages/jedi/third_party/django-stubs/django-stubs/core/paginator.pyi
ADDED
|
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Dict, List, Optional, Protocol, Sequence, Union
|
| 2 |
+
|
| 3 |
+
from django.db.models.base import Model
|
| 4 |
+
from django.db.models.query import QuerySet
|
| 5 |
+
|
| 6 |
+
class UnorderedObjectListWarning(RuntimeWarning): ...
|
| 7 |
+
class InvalidPage(Exception): ...
|
| 8 |
+
class PageNotAnInteger(InvalidPage): ...
|
| 9 |
+
class EmptyPage(InvalidPage): ...
|
| 10 |
+
|
| 11 |
+
class _SupportsLen(Protocol):
|
| 12 |
+
def __len__(self) -> int: ...
|
| 13 |
+
|
| 14 |
+
class _SupportsCount(Protocol):
|
| 15 |
+
def count(self) -> int: ...
|
| 16 |
+
|
| 17 |
+
class _SupportsOrdered(Protocol):
|
| 18 |
+
ordered: bool = ...
|
| 19 |
+
|
| 20 |
+
class Paginator:
|
| 21 |
+
object_list: QuerySet = ...
|
| 22 |
+
per_page: int = ...
|
| 23 |
+
orphans: int = ...
|
| 24 |
+
allow_empty_first_page: bool = ...
|
| 25 |
+
def __init__(
|
| 26 |
+
self,
|
| 27 |
+
object_list: Union[_SupportsLen, _SupportsCount, _SupportsOrdered],
|
| 28 |
+
per_page: Union[int, str],
|
| 29 |
+
orphans: int = ...,
|
| 30 |
+
allow_empty_first_page: bool = ...,
|
| 31 |
+
) -> None: ...
|
| 32 |
+
def validate_number(self, number: Optional[Union[float, str]]) -> int: ...
|
| 33 |
+
def get_page(self, number: Optional[int]) -> Page: ...
|
| 34 |
+
def page(self, number: Union[int, str]) -> Page: ...
|
| 35 |
+
@property
|
| 36 |
+
def count(self) -> int: ...
|
| 37 |
+
@property
|
| 38 |
+
def num_pages(self) -> int: ...
|
| 39 |
+
@property
|
| 40 |
+
def page_range(self) -> range: ...
|
| 41 |
+
|
| 42 |
+
QuerySetPaginator = Paginator
|
| 43 |
+
|
| 44 |
+
class Page(Sequence):
|
| 45 |
+
object_list: QuerySet = ...
|
| 46 |
+
number: int = ...
|
| 47 |
+
paginator: Paginator = ...
|
| 48 |
+
def __init__(
|
| 49 |
+
self,
|
| 50 |
+
object_list: Union[List[Dict[str, str]], List[Model], List[int], QuerySet, str],
|
| 51 |
+
number: int,
|
| 52 |
+
paginator: Paginator,
|
| 53 |
+
) -> None: ...
|
| 54 |
+
def __getitem__(self, item): ...
|
| 55 |
+
def __len__(self): ...
|
| 56 |
+
def has_next(self) -> bool: ...
|
| 57 |
+
def has_previous(self) -> bool: ...
|
| 58 |
+
def has_other_pages(self) -> bool: ...
|
| 59 |
+
def next_page_number(self) -> int: ...
|
| 60 |
+
def previous_page_number(self) -> int: ...
|
| 61 |
+
def start_index(self) -> int: ...
|
| 62 |
+
def end_index(self) -> int: ...
|
mantis_evalkit/lib/python3.10/site-packages/jedi/third_party/django-stubs/django-stubs/core/serializers/__init__.pyi
ADDED
|
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Any, Callable, Dict, Iterable, Iterator, List, Optional, Type, Union
|
| 2 |
+
|
| 3 |
+
from django.db.models.base import Model
|
| 4 |
+
|
| 5 |
+
from .base import (
|
| 6 |
+
DeserializationError as DeserializationError,
|
| 7 |
+
DeserializedObject,
|
| 8 |
+
Deserializer as Deserializer,
|
| 9 |
+
M2MDeserializationError as M2MDeserializationError,
|
| 10 |
+
SerializationError as SerializationError,
|
| 11 |
+
Serializer as Serializer,
|
| 12 |
+
SerializerDoesNotExist as SerializerDoesNotExist,
|
| 13 |
+
)
|
| 14 |
+
|
| 15 |
+
BUILTIN_SERIALIZERS: Any
|
| 16 |
+
|
| 17 |
+
class BadSerializer:
|
| 18 |
+
internal_use_only: bool = ...
|
| 19 |
+
exception: BaseException = ...
|
| 20 |
+
def __init__(self, exception: BaseException) -> None: ...
|
| 21 |
+
def __call__(self, *args: Any, **kwargs: Any) -> Any: ...
|
| 22 |
+
|
| 23 |
+
def register_serializer(format: str, serializer_module: str, serializers: Optional[Dict[str, Any]] = ...) -> None: ...
|
| 24 |
+
def unregister_serializer(format: str) -> None: ...
|
| 25 |
+
def get_serializer(format: str) -> Union[Type[Serializer], BadSerializer]: ...
|
| 26 |
+
def get_serializer_formats() -> List[str]: ...
|
| 27 |
+
def get_public_serializer_formats() -> List[str]: ...
|
| 28 |
+
def get_deserializer(format: str) -> Union[Callable, Type[Deserializer]]: ...
|
| 29 |
+
def serialize(format: str, queryset: Iterable[Model], **options: Any) -> Any: ...
|
| 30 |
+
def deserialize(format: str, stream_or_string: Any, **options: Any) -> Iterator[DeserializedObject]: ...
|
| 31 |
+
def sort_dependencies(app_list: Iterable[Any]) -> List[Type[Model]]: ...
|
mantis_evalkit/lib/python3.10/site-packages/jedi/third_party/django-stubs/django-stubs/core/serializers/base.pyi
ADDED
|
@@ -0,0 +1,87 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from datetime import date
|
| 2 |
+
from io import BufferedReader, StringIO, TextIOWrapper
|
| 3 |
+
from typing import Any, Dict, Iterable, List, Mapping, Optional, Type, Union, Collection
|
| 4 |
+
from uuid import UUID
|
| 5 |
+
|
| 6 |
+
from django.core.management.base import OutputWrapper
|
| 7 |
+
from django.db.models.base import Model
|
| 8 |
+
from django.db.models.fields.related import ForeignKey, ManyToManyField
|
| 9 |
+
|
| 10 |
+
from django.db.models.fields import Field
|
| 11 |
+
|
| 12 |
+
class SerializerDoesNotExist(KeyError): ...
|
| 13 |
+
class SerializationError(Exception): ...
|
| 14 |
+
|
| 15 |
+
class DeserializationError(Exception):
|
| 16 |
+
@classmethod
|
| 17 |
+
def WithData(
|
| 18 |
+
cls, original_exc: Exception, model: str, fk: Union[int, str], field_value: Optional[Union[List[str], str]]
|
| 19 |
+
) -> DeserializationError: ...
|
| 20 |
+
|
| 21 |
+
class M2MDeserializationError(Exception):
|
| 22 |
+
original_exc: Exception = ...
|
| 23 |
+
pk: List[str] = ...
|
| 24 |
+
def __init__(self, original_exc: Exception, pk: Union[List[str], str]) -> None: ...
|
| 25 |
+
|
| 26 |
+
class ProgressBar:
|
| 27 |
+
progress_width: int = ...
|
| 28 |
+
output: None = ...
|
| 29 |
+
total_count: int = ...
|
| 30 |
+
prev_done: int = ...
|
| 31 |
+
def __init__(self, output: Optional[Union[StringIO, OutputWrapper]], total_count: int) -> None: ...
|
| 32 |
+
def update(self, count: int) -> None: ...
|
| 33 |
+
|
| 34 |
+
class Serializer:
|
| 35 |
+
internal_use_only: bool = ...
|
| 36 |
+
progress_class: Any = ...
|
| 37 |
+
stream_class: Any = ...
|
| 38 |
+
options: Dict[str, Any] = ...
|
| 39 |
+
stream: Any = ...
|
| 40 |
+
selected_fields: Optional[Collection[str]] = ...
|
| 41 |
+
use_natural_foreign_keys: bool = ...
|
| 42 |
+
use_natural_primary_keys: bool = ...
|
| 43 |
+
first: bool = ...
|
| 44 |
+
def serialize(
|
| 45 |
+
self,
|
| 46 |
+
queryset: Iterable[Model],
|
| 47 |
+
*,
|
| 48 |
+
stream: Optional[Any] = ...,
|
| 49 |
+
fields: Optional[Collection[str]] = ...,
|
| 50 |
+
use_natural_foreign_keys: bool = ...,
|
| 51 |
+
use_natural_primary_keys: bool = ...,
|
| 52 |
+
progress_output: Optional[Any] = ...,
|
| 53 |
+
object_count: int = ...,
|
| 54 |
+
**options: Any
|
| 55 |
+
) -> Any: ...
|
| 56 |
+
def start_serialization(self) -> None: ...
|
| 57 |
+
def end_serialization(self) -> None: ...
|
| 58 |
+
def start_object(self, obj: Any) -> None: ...
|
| 59 |
+
def end_object(self, obj: Any) -> None: ...
|
| 60 |
+
def handle_field(self, obj: Any, field: Any) -> None: ...
|
| 61 |
+
def handle_fk_field(self, obj: Any, field: Any) -> None: ...
|
| 62 |
+
def handle_m2m_field(self, obj: Any, field: Any) -> None: ...
|
| 63 |
+
def getvalue(self) -> Optional[Union[bytes, str]]: ...
|
| 64 |
+
|
| 65 |
+
class Deserializer:
|
| 66 |
+
options: Dict[str, Any] = ...
|
| 67 |
+
stream: Any = ...
|
| 68 |
+
def __init__(self, stream_or_string: Union[BufferedReader, TextIOWrapper, str], **options: Any) -> None: ...
|
| 69 |
+
def __iter__(self) -> Deserializer: ...
|
| 70 |
+
def __next__(self) -> None: ...
|
| 71 |
+
|
| 72 |
+
class DeserializedObject:
|
| 73 |
+
object: Any = ...
|
| 74 |
+
m2m_data: Dict[str, List[int]] = ...
|
| 75 |
+
deferred_fields: Mapping[Field, Any]
|
| 76 |
+
def __init__(
|
| 77 |
+
self,
|
| 78 |
+
obj: Model,
|
| 79 |
+
m2m_data: Optional[Dict[str, List[int]]] = ...,
|
| 80 |
+
deferred_fields: Optional[Mapping[Field, Any]] = ...,
|
| 81 |
+
) -> None: ...
|
| 82 |
+
def save(self, save_m2m: bool = ..., using: Optional[str] = ..., **kwargs: Any) -> None: ...
|
| 83 |
+
def save_deferred_fields(self, using: Optional[str] = ...) -> None: ...
|
| 84 |
+
|
| 85 |
+
def build_instance(Model: Type[Model], data: Dict[str, Optional[Union[date, int, str, UUID]]], db: str) -> Model: ...
|
| 86 |
+
def deserialize_m2m_values(field: ManyToManyField, field_value: Any, using: str) -> List[Any]: ...
|
| 87 |
+
def deserialize_fk_value(field: ForeignKey, field_value: Any, using: str) -> Any: ...
|
mantis_evalkit/lib/python3.10/site-packages/jedi/third_party/django-stubs/django-stubs/core/serializers/json.pyi
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
from typing import Any, Dict
|
| 3 |
+
|
| 4 |
+
from django.core.serializers.python import Serializer as PythonSerializer
|
| 5 |
+
|
| 6 |
+
class Serializer(PythonSerializer):
|
| 7 |
+
json_kwargs: Dict[str, Any]
|
| 8 |
+
|
| 9 |
+
def Deserializer(stream_or_string: Any, **options: Any) -> None: ...
|
| 10 |
+
|
| 11 |
+
class DjangoJSONEncoder(json.JSONEncoder):
|
| 12 |
+
allow_nan: bool
|
| 13 |
+
check_circular: bool
|
| 14 |
+
ensure_ascii: bool
|
| 15 |
+
indent: int
|
| 16 |
+
skipkeys: bool
|
| 17 |
+
sort_keys: bool
|
mantis_evalkit/lib/python3.10/site-packages/jedi/third_party/django-stubs/django-stubs/core/validators.pyi
ADDED
|
@@ -0,0 +1,121 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from decimal import Decimal
|
| 2 |
+
from re import RegexFlag
|
| 3 |
+
from typing import Any, Callable, Collection, Dict, List, Optional, Pattern, Tuple, Union
|
| 4 |
+
|
| 5 |
+
from django.core.files.base import File
|
| 6 |
+
|
| 7 |
+
EMPTY_VALUES: Any
|
| 8 |
+
|
| 9 |
+
_Regex = Union[str, Pattern[str]]
|
| 10 |
+
_ErrorMessage = Union[str, Any]
|
| 11 |
+
|
| 12 |
+
def _lazy_re_compile(regex: _Regex, flags: int = ...): ...
|
| 13 |
+
|
| 14 |
+
class RegexValidator:
|
| 15 |
+
regex: _Regex = ...
|
| 16 |
+
message: str = ...
|
| 17 |
+
code: str = ...
|
| 18 |
+
inverse_match: bool = ...
|
| 19 |
+
flags: int = ...
|
| 20 |
+
def __init__(
|
| 21 |
+
self,
|
| 22 |
+
regex: Optional[_Regex] = ...,
|
| 23 |
+
message: Optional[_ErrorMessage] = ...,
|
| 24 |
+
code: Optional[str] = ...,
|
| 25 |
+
inverse_match: Optional[bool] = ...,
|
| 26 |
+
flags: Optional[RegexFlag] = ...,
|
| 27 |
+
) -> None: ...
|
| 28 |
+
def __call__(self, value: Optional[str]) -> None: ...
|
| 29 |
+
|
| 30 |
+
class URLValidator(RegexValidator):
|
| 31 |
+
ul: str = ...
|
| 32 |
+
ipv4_re: str = ...
|
| 33 |
+
ipv6_re: str = ...
|
| 34 |
+
hostname_re: str = ...
|
| 35 |
+
domain_re: str = ...
|
| 36 |
+
tld_re: str = ...
|
| 37 |
+
host_re: str = ...
|
| 38 |
+
schemes: List[str] = ...
|
| 39 |
+
def __init__(self, schemes: Optional[Collection[str]] = ..., **kwargs: Any) -> None: ...
|
| 40 |
+
|
| 41 |
+
integer_validator: RegexValidator = ...
|
| 42 |
+
|
| 43 |
+
def validate_integer(value: Optional[Union[float, str]]) -> None: ...
|
| 44 |
+
|
| 45 |
+
class EmailValidator:
|
| 46 |
+
message: str = ...
|
| 47 |
+
code: str = ...
|
| 48 |
+
user_regex: Pattern = ...
|
| 49 |
+
domain_regex: Pattern = ...
|
| 50 |
+
literal_regex: Pattern = ...
|
| 51 |
+
domain_whitelist: List[str] = ...
|
| 52 |
+
def __init__(
|
| 53 |
+
self,
|
| 54 |
+
message: Optional[_ErrorMessage] = ...,
|
| 55 |
+
code: Optional[str] = ...,
|
| 56 |
+
whitelist: Optional[Collection[str]] = ...,
|
| 57 |
+
) -> None: ...
|
| 58 |
+
def __call__(self, value: Optional[str]) -> None: ...
|
| 59 |
+
def validate_domain_part(self, domain_part: str) -> bool: ...
|
| 60 |
+
|
| 61 |
+
validate_email: EmailValidator = ...
|
| 62 |
+
slug_re: Pattern = ...
|
| 63 |
+
validate_slug: RegexValidator = ...
|
| 64 |
+
slug_unicode_re: Pattern = ...
|
| 65 |
+
validate_unicode_slug: RegexValidator = ...
|
| 66 |
+
|
| 67 |
+
def validate_ipv4_address(value: str) -> None: ...
|
| 68 |
+
def validate_ipv6_address(value: str) -> None: ...
|
| 69 |
+
def validate_ipv46_address(value: str) -> None: ...
|
| 70 |
+
|
| 71 |
+
_IPValidator = Tuple[Callable[[Any], None], str]
|
| 72 |
+
ip_address_validator_map: Dict[str, _IPValidator]
|
| 73 |
+
|
| 74 |
+
def ip_address_validators(protocol: str, unpack_ipv4: bool) -> _IPValidator: ...
|
| 75 |
+
def int_list_validator(
|
| 76 |
+
sep: str = ..., message: Optional[_ErrorMessage] = ..., code: str = ..., allow_negative: bool = ...
|
| 77 |
+
) -> RegexValidator: ...
|
| 78 |
+
|
| 79 |
+
validate_comma_separated_integer_list: Any
|
| 80 |
+
|
| 81 |
+
class BaseValidator:
|
| 82 |
+
message: str = ...
|
| 83 |
+
code: str = ...
|
| 84 |
+
limit_value: Any = ...
|
| 85 |
+
def __init__(self, limit_value: Any, message: Optional[_ErrorMessage] = ...) -> None: ...
|
| 86 |
+
def __call__(self, value: Any) -> None: ...
|
| 87 |
+
def compare(self, a: Any, b: Any) -> bool: ...
|
| 88 |
+
def clean(self, x: Any) -> Any: ...
|
| 89 |
+
|
| 90 |
+
class MaxValueValidator(BaseValidator): ...
|
| 91 |
+
class MinValueValidator(BaseValidator): ...
|
| 92 |
+
class MinLengthValidator(BaseValidator): ...
|
| 93 |
+
class MaxLengthValidator(BaseValidator): ...
|
| 94 |
+
|
| 95 |
+
class DecimalValidator:
|
| 96 |
+
messages: Dict[str, str] = ...
|
| 97 |
+
max_digits: int = ...
|
| 98 |
+
decimal_places: int = ...
|
| 99 |
+
def __init__(self, max_digits: Optional[Union[int, str]], decimal_places: Optional[Union[int, str]]) -> None: ...
|
| 100 |
+
def __call__(self, value: Decimal) -> None: ...
|
| 101 |
+
|
| 102 |
+
class FileExtensionValidator:
|
| 103 |
+
message: str = ...
|
| 104 |
+
code: str = ...
|
| 105 |
+
allowed_extensions: List[str] = ...
|
| 106 |
+
def __init__(
|
| 107 |
+
self,
|
| 108 |
+
allowed_extensions: Optional[Collection[str]] = ...,
|
| 109 |
+
message: Optional[_ErrorMessage] = ...,
|
| 110 |
+
code: Optional[str] = ...,
|
| 111 |
+
) -> None: ...
|
| 112 |
+
def __call__(self, value: File) -> None: ...
|
| 113 |
+
|
| 114 |
+
def get_available_image_extensions() -> List[str]: ...
|
| 115 |
+
def validate_image_file_extension(value: File) -> None: ...
|
| 116 |
+
|
| 117 |
+
class ProhibitNullCharactersValidator:
|
| 118 |
+
message: str = ...
|
| 119 |
+
code: str = ...
|
| 120 |
+
def __init__(self, message: Optional[_ErrorMessage] = ..., code: Optional[str] = ...) -> None: ...
|
| 121 |
+
def __call__(self, value: Any) -> None: ...
|
mantis_evalkit/lib/python3.10/site-packages/jedi/third_party/django-stubs/django-stubs/core/wsgi.pyi
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from django.core.handlers.wsgi import WSGIHandler
|
| 2 |
+
|
| 3 |
+
def get_wsgi_application() -> WSGIHandler: ...
|
mantis_evalkit/lib/python3.10/site-packages/jedi/third_party/django-stubs/django-stubs/db/migrations/autodetector.pyi
ADDED
|
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Union
|
| 2 |
+
|
| 3 |
+
from django.db.migrations.graph import MigrationGraph
|
| 4 |
+
from django.db.migrations.migration import Migration
|
| 5 |
+
from django.db.migrations.operations.base import Operation
|
| 6 |
+
from django.db.migrations.questioner import MigrationQuestioner
|
| 7 |
+
from django.db.migrations.state import ProjectState
|
| 8 |
+
from django.db.models.fields import Field
|
| 9 |
+
|
| 10 |
+
class MigrationAutodetector:
|
| 11 |
+
from_state: ProjectState = ...
|
| 12 |
+
to_state: ProjectState = ...
|
| 13 |
+
questioner: MigrationQuestioner = ...
|
| 14 |
+
existing_apps: Set[Any] = ...
|
| 15 |
+
def __init__(
|
| 16 |
+
self, from_state: ProjectState, to_state: ProjectState, questioner: Optional[MigrationQuestioner] = ...
|
| 17 |
+
) -> None: ...
|
| 18 |
+
def changes(
|
| 19 |
+
self,
|
| 20 |
+
graph: MigrationGraph,
|
| 21 |
+
trim_to_apps: Optional[Set[str]] = ...,
|
| 22 |
+
convert_apps: Optional[Set[str]] = ...,
|
| 23 |
+
migration_name: Optional[str] = ...,
|
| 24 |
+
) -> Dict[str, List[Migration]]: ...
|
| 25 |
+
def deep_deconstruct(self, obj: Any) -> Any: ...
|
| 26 |
+
def only_relation_agnostic_fields(
|
| 27 |
+
self, fields: List[Tuple[str, Field]]
|
| 28 |
+
) -> List[Tuple[str, List[Any], Dict[str, Union[Callable, int, str]]]]: ...
|
| 29 |
+
def check_dependency(
|
| 30 |
+
self, operation: Operation, dependency: Tuple[str, str, Optional[str], Union[bool, str]]
|
| 31 |
+
) -> bool: ...
|
| 32 |
+
def add_operation(
|
| 33 |
+
self,
|
| 34 |
+
app_label: str,
|
| 35 |
+
operation: Operation,
|
| 36 |
+
dependencies: Optional[List[Tuple[str, str, Optional[str], Union[bool, str]]]] = ...,
|
| 37 |
+
beginning: bool = ...,
|
| 38 |
+
) -> None: ...
|
| 39 |
+
def swappable_first_key(self, item: Tuple[str, str]) -> Tuple[str, str]: ...
|
| 40 |
+
renamed_models: Any = ...
|
| 41 |
+
renamed_models_rel: Any = ...
|
| 42 |
+
def generate_renamed_models(self) -> None: ...
|
| 43 |
+
def generate_created_models(self) -> None: ...
|
| 44 |
+
def generate_created_proxies(self) -> None: ...
|
| 45 |
+
def generate_deleted_models(self) -> None: ...
|
| 46 |
+
def generate_deleted_proxies(self) -> None: ...
|
| 47 |
+
renamed_fields: Any = ...
|
| 48 |
+
def generate_renamed_fields(self) -> None: ...
|
| 49 |
+
def generate_added_fields(self) -> None: ...
|
| 50 |
+
def generate_removed_fields(self) -> None: ...
|
| 51 |
+
def generate_altered_fields(self) -> None: ...
|
| 52 |
+
def create_altered_indexes(self) -> None: ...
|
| 53 |
+
def generate_added_indexes(self) -> None: ...
|
| 54 |
+
def generate_removed_indexes(self) -> None: ...
|
| 55 |
+
def generate_altered_unique_together(self) -> None: ...
|
| 56 |
+
def generate_altered_index_together(self) -> None: ...
|
| 57 |
+
def generate_altered_db_table(self) -> None: ...
|
| 58 |
+
def generate_altered_options(self) -> None: ...
|
| 59 |
+
def generate_altered_order_with_respect_to(self) -> None: ...
|
| 60 |
+
def generate_altered_managers(self) -> None: ...
|
| 61 |
+
def arrange_for_graph(
|
| 62 |
+
self, changes: Dict[str, List[Migration]], graph: MigrationGraph, migration_name: Optional[str] = ...
|
| 63 |
+
) -> Dict[str, List[Migration]]: ...
|
| 64 |
+
@classmethod
|
| 65 |
+
def suggest_name(cls, ops: List[Operation]) -> str: ...
|
| 66 |
+
@classmethod
|
| 67 |
+
def parse_number(cls, name: str) -> int: ...
|
mantis_evalkit/lib/python3.10/site-packages/jedi/third_party/django-stubs/django-stubs/db/migrations/operations/base.pyi
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Any, List
|
| 2 |
+
|
| 3 |
+
class Operation:
|
| 4 |
+
reversible: bool = ...
|
| 5 |
+
reduces_to_sql: bool = ...
|
| 6 |
+
atomic: bool = ...
|
| 7 |
+
elidable: bool = ...
|
| 8 |
+
serialization_expand_args: Any = ...
|
| 9 |
+
def deconstruct(self): ...
|
| 10 |
+
def state_forwards(self, app_label: Any, state: Any) -> None: ...
|
| 11 |
+
def database_forwards(self, app_label: Any, schema_editor: Any, from_state: Any, to_state: Any) -> None: ...
|
| 12 |
+
def database_backwards(self, app_label: Any, schema_editor: Any, from_state: Any, to_state: Any) -> None: ...
|
| 13 |
+
def describe(self): ...
|
| 14 |
+
def references_model(self, name: str, app_label: str = ...) -> bool: ...
|
| 15 |
+
def references_field(self, model_name: str, name: str, app_label: str = ...) -> bool: ...
|
| 16 |
+
def allow_migrate_model(self, connection_alias: Any, model: Any): ...
|
| 17 |
+
def reduce(self, operation: Operation, in_between: List[Operation], app_label: str = ...) -> bool: ...
|
mantis_evalkit/lib/python3.10/site-packages/jedi/third_party/django-stubs/django-stubs/db/migrations/operations/models.pyi
ADDED
|
@@ -0,0 +1,87 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Any, Collection, Dict, List, Optional, Sequence, Tuple, Union
|
| 2 |
+
|
| 3 |
+
from django.db.migrations.operations.base import Operation
|
| 4 |
+
from django.db.models.indexes import Index
|
| 5 |
+
from django.db.models.manager import Manager
|
| 6 |
+
|
| 7 |
+
from django.db.models.constraints import BaseConstraint
|
| 8 |
+
from django.db.models.fields import Field
|
| 9 |
+
|
| 10 |
+
class ModelOperation(Operation):
|
| 11 |
+
name: str = ...
|
| 12 |
+
def __init__(self, name: str) -> None: ...
|
| 13 |
+
def name_lower(self) -> str: ...
|
| 14 |
+
|
| 15 |
+
class CreateModel(ModelOperation):
|
| 16 |
+
fields: Sequence[Tuple[str, Field]] = ...
|
| 17 |
+
options: Any = ...
|
| 18 |
+
bases: Optional[Sequence[Union[type, str]]] = ...
|
| 19 |
+
managers: Optional[Sequence[Tuple[str, Manager]]] = ...
|
| 20 |
+
def __init__(
|
| 21 |
+
self,
|
| 22 |
+
name: str,
|
| 23 |
+
fields: Sequence[Tuple[str, Field]],
|
| 24 |
+
options: Optional[Dict[str, Any]] = ...,
|
| 25 |
+
bases: Optional[Sequence[Union[type, str]]] = ...,
|
| 26 |
+
managers: Optional[Sequence[Tuple[str, Manager]]] = ...,
|
| 27 |
+
) -> None: ...
|
| 28 |
+
def model_to_key(self, model: str) -> List[str]: ...
|
| 29 |
+
|
| 30 |
+
class DeleteModel(ModelOperation): ...
|
| 31 |
+
|
| 32 |
+
class RenameModel(ModelOperation):
|
| 33 |
+
old_name: Any = ...
|
| 34 |
+
new_name: Any = ...
|
| 35 |
+
def __init__(self, old_name: str, new_name: str) -> None: ...
|
| 36 |
+
def old_name_lower(self) -> str: ...
|
| 37 |
+
def new_name_lower(self) -> str: ...
|
| 38 |
+
|
| 39 |
+
class AlterModelTable(ModelOperation):
|
| 40 |
+
table: Optional[str] = ...
|
| 41 |
+
def __init__(self, name: str, table: Optional[str]) -> None: ...
|
| 42 |
+
|
| 43 |
+
class ModelOptionOperation(ModelOperation): ...
|
| 44 |
+
class FieldRelatedOptionOperation(ModelOptionOperation): ...
|
| 45 |
+
|
| 46 |
+
class AlterUniqueTogether(FieldRelatedOptionOperation):
|
| 47 |
+
option_name: str = ...
|
| 48 |
+
unique_together: Collection[Sequence[str]] = ...
|
| 49 |
+
def __init__(self, name: str, unique_together: Optional[Collection[Sequence[str]]]) -> None: ...
|
| 50 |
+
|
| 51 |
+
class AlterIndexTogether(FieldRelatedOptionOperation):
|
| 52 |
+
option_name: str = ...
|
| 53 |
+
index_together: Collection[Sequence[str]] = ...
|
| 54 |
+
def __init__(self, name: str, index_together: Optional[Collection[Sequence[str]]]) -> None: ...
|
| 55 |
+
|
| 56 |
+
class AlterOrderWithRespectTo(FieldRelatedOptionOperation):
|
| 57 |
+
order_with_respect_to: str = ...
|
| 58 |
+
def __init__(self, name: str, order_with_respect_to: str) -> None: ...
|
| 59 |
+
|
| 60 |
+
class AlterModelOptions(ModelOptionOperation):
|
| 61 |
+
ALTER_OPTION_KEYS: Any = ...
|
| 62 |
+
options: Dict[str, str] = ...
|
| 63 |
+
def __init__(self, name: str, options: Dict[str, Any]) -> None: ...
|
| 64 |
+
|
| 65 |
+
class AlterModelManagers(ModelOptionOperation):
|
| 66 |
+
managers: Any = ...
|
| 67 |
+
def __init__(self, name: Any, managers: Any) -> None: ...
|
| 68 |
+
|
| 69 |
+
class IndexOperation(Operation):
|
| 70 |
+
option_name: str = ...
|
| 71 |
+
def model_name_lower(self): ...
|
| 72 |
+
|
| 73 |
+
class AddIndex(IndexOperation):
|
| 74 |
+
model_name: str = ...
|
| 75 |
+
index: Index = ...
|
| 76 |
+
def __init__(self, model_name: str, index: Union[str, Index]) -> None: ...
|
| 77 |
+
|
| 78 |
+
class RemoveIndex(IndexOperation):
|
| 79 |
+
model_name: str = ...
|
| 80 |
+
name: str = ...
|
| 81 |
+
def __init__(self, model_name: str, name: Union[str, Index]) -> None: ...
|
| 82 |
+
|
| 83 |
+
class AddConstraint(IndexOperation):
|
| 84 |
+
def __init__(self, model_name: str, constraint: BaseConstraint): ...
|
| 85 |
+
|
| 86 |
+
class RemoveConstraint(IndexOperation):
|
| 87 |
+
def __init__(self, model_name: str, name: str) -> None: ...
|
mantis_evalkit/lib/python3.10/site-packages/jedi/third_party/django-stubs/django-stubs/db/migrations/utils.pyi
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Any
|
| 2 |
+
|
| 3 |
+
COMPILED_REGEX_TYPE: Any
|
| 4 |
+
|
| 5 |
+
class RegexObject:
|
| 6 |
+
pattern: str = ...
|
| 7 |
+
flags: int = ...
|
| 8 |
+
def __init__(self, obj: Any) -> None: ...
|
| 9 |
+
|
| 10 |
+
def get_migration_name_timestamp() -> str: ...
|
mantis_evalkit/lib/python3.10/site-packages/jedi/third_party/django-stubs/django-stubs/db/migrations/writer.pyi
ADDED
|
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Any, List, Set, Tuple, Union, Type
|
| 2 |
+
|
| 3 |
+
from django.db.migrations.migration import Migration
|
| 4 |
+
from django.db.migrations.operations.base import Operation
|
| 5 |
+
from django.db.migrations.operations.models import CreateModel
|
| 6 |
+
from django.db.migrations.serializer import BaseSerializer
|
| 7 |
+
|
| 8 |
+
class SettingsReference(str):
|
| 9 |
+
def __init__(self, value: str, setting_name: str) -> None: ...
|
| 10 |
+
|
| 11 |
+
class OperationWriter:
|
| 12 |
+
operation: CreateModel = ...
|
| 13 |
+
buff: List[Any] = ...
|
| 14 |
+
indentation: int = ...
|
| 15 |
+
def __init__(self, operation: Operation, indentation: int = ...) -> None: ...
|
| 16 |
+
def serialize(self) -> Tuple[str, Set[str]]: ...
|
| 17 |
+
def indent(self) -> None: ...
|
| 18 |
+
def unindent(self) -> None: ...
|
| 19 |
+
def feed(self, line: str) -> None: ...
|
| 20 |
+
def render(self) -> str: ...
|
| 21 |
+
|
| 22 |
+
class MigrationWriter:
|
| 23 |
+
migration: Migration = ...
|
| 24 |
+
needs_manual_porting: bool = ...
|
| 25 |
+
def __init__(self, migration: Union[type, Migration], include_header: bool = ...) -> None: ...
|
| 26 |
+
def as_string(self) -> str: ...
|
| 27 |
+
@property
|
| 28 |
+
def basedir(self) -> str: ...
|
| 29 |
+
@property
|
| 30 |
+
def filename(self) -> str: ...
|
| 31 |
+
@property
|
| 32 |
+
def path(self) -> str: ...
|
| 33 |
+
@classmethod
|
| 34 |
+
def serialize(cls, value: Any) -> Tuple[str, Set[str]]: ...
|
| 35 |
+
@classmethod
|
| 36 |
+
def register_serializer(cls, type_: type, serializer: Type[BaseSerializer]) -> None: ...
|
| 37 |
+
@classmethod
|
| 38 |
+
def unregister_serializer(cls, type_: type) -> None: ...
|
| 39 |
+
|
| 40 |
+
MIGRATION_TEMPLATE: str
|
mantis_evalkit/lib/python3.10/site-packages/jedi/third_party/django-stubs/django-stubs/db/models/fields/related_lookups.pyi
ADDED
|
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from collections import OrderedDict
|
| 2 |
+
from typing import Any, List, Tuple, Type, Iterable
|
| 3 |
+
|
| 4 |
+
from django.db.models.expressions import Expression
|
| 5 |
+
from django.db.models.lookups import (
|
| 6 |
+
BuiltinLookup,
|
| 7 |
+
Exact,
|
| 8 |
+
GreaterThan,
|
| 9 |
+
GreaterThanOrEqual,
|
| 10 |
+
In,
|
| 11 |
+
IsNull,
|
| 12 |
+
LessThan,
|
| 13 |
+
LessThanOrEqual,
|
| 14 |
+
)
|
| 15 |
+
|
| 16 |
+
from django.db.models.fields import Field
|
| 17 |
+
|
| 18 |
+
class MultiColSource:
|
| 19 |
+
alias: str
|
| 20 |
+
field: Field
|
| 21 |
+
sources: Tuple[Field, Field]
|
| 22 |
+
targets: Tuple[Field, Field]
|
| 23 |
+
contains_aggregate: bool = ...
|
| 24 |
+
output_field: Field = ...
|
| 25 |
+
def __init__(
|
| 26 |
+
self, alias: str, targets: Tuple[Field, Field], sources: Tuple[Field, Field], field: Field
|
| 27 |
+
) -> None: ...
|
| 28 |
+
def relabeled_clone(self, relabels: OrderedDict) -> MultiColSource: ...
|
| 29 |
+
def get_lookup(self, lookup: str) -> Type[BuiltinLookup]: ...
|
| 30 |
+
|
| 31 |
+
def get_normalized_value(value: Any, lhs: Expression) -> Tuple[None]: ...
|
| 32 |
+
|
| 33 |
+
class RelatedIn(In):
|
| 34 |
+
bilateral_transforms: List[Any]
|
| 35 |
+
lhs: Expression
|
| 36 |
+
rhs: Any = ...
|
| 37 |
+
def get_prep_lookup(self) -> Iterable[Any]: ...
|
| 38 |
+
|
| 39 |
+
class RelatedLookupMixin:
|
| 40 |
+
rhs: Any = ...
|
| 41 |
+
def get_prep_lookup(self) -> Any: ...
|
| 42 |
+
|
| 43 |
+
class RelatedExact(RelatedLookupMixin, Exact): ...
|
| 44 |
+
class RelatedLessThan(RelatedLookupMixin, LessThan): ...
|
| 45 |
+
class RelatedGreaterThan(RelatedLookupMixin, GreaterThan): ...
|
| 46 |
+
class RelatedGreaterThanOrEqual(RelatedLookupMixin, GreaterThanOrEqual): ...
|
| 47 |
+
class RelatedLessThanOrEqual(RelatedLookupMixin, LessThanOrEqual): ...
|
| 48 |
+
class RelatedIsNull(RelatedLookupMixin, IsNull): ...
|
mantis_evalkit/lib/python3.10/site-packages/jedi/third_party/django-stubs/django-stubs/db/models/fields/reverse_related.pyi
ADDED
|
@@ -0,0 +1,110 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Any, Callable, Dict, List, Optional, Tuple, Type, Union
|
| 2 |
+
|
| 3 |
+
from django.db.models.base import Model
|
| 4 |
+
from django.db.models.fields.related import ForeignKey, OneToOneField, RelatedField
|
| 5 |
+
from django.db.models.lookups import BuiltinLookup, StartsWith
|
| 6 |
+
from django.db.models.query_utils import FilteredRelation, PathInfo
|
| 7 |
+
from django.db.models.sql.where import WhereNode
|
| 8 |
+
|
| 9 |
+
from django.db.models.fields import AutoField, Field
|
| 10 |
+
from .mixins import FieldCacheMixin
|
| 11 |
+
|
| 12 |
+
class ForeignObjectRel(FieldCacheMixin):
|
| 13 |
+
many_to_many: bool
|
| 14 |
+
many_to_one: bool
|
| 15 |
+
one_to_many: bool
|
| 16 |
+
one_to_one: bool
|
| 17 |
+
auto_created: bool = ...
|
| 18 |
+
concrete: bool = ...
|
| 19 |
+
editable: bool = ...
|
| 20 |
+
is_relation: bool = ...
|
| 21 |
+
related_model: Type[Model]
|
| 22 |
+
null: bool = ...
|
| 23 |
+
field: RelatedField = ...
|
| 24 |
+
model: Union[Type[Model], str] = ...
|
| 25 |
+
related_name: Optional[str] = ...
|
| 26 |
+
related_query_name: Optional[str] = ...
|
| 27 |
+
limit_choices_to: Optional[Union[Dict[str, Any], Callable[[], Any]]] = ...
|
| 28 |
+
parent_link: bool = ...
|
| 29 |
+
on_delete: Callable = ...
|
| 30 |
+
symmetrical: bool = ...
|
| 31 |
+
multiple: bool = ...
|
| 32 |
+
field_name: Optional[str] = ...
|
| 33 |
+
def __init__(
|
| 34 |
+
self,
|
| 35 |
+
field: RelatedField,
|
| 36 |
+
to: Union[Type[Model], str],
|
| 37 |
+
related_name: Optional[str] = ...,
|
| 38 |
+
related_query_name: Optional[str] = ...,
|
| 39 |
+
limit_choices_to: Optional[Union[Dict[str, Any], Callable[[], Any]]] = ...,
|
| 40 |
+
parent_link: bool = ...,
|
| 41 |
+
on_delete: Optional[Callable] = ...,
|
| 42 |
+
) -> None: ...
|
| 43 |
+
@property
|
| 44 |
+
def hidden(self) -> bool: ...
|
| 45 |
+
@property
|
| 46 |
+
def name(self) -> str: ...
|
| 47 |
+
@property
|
| 48 |
+
def remote_field(self) -> RelatedField: ...
|
| 49 |
+
@property
|
| 50 |
+
def target_field(self) -> AutoField: ...
|
| 51 |
+
def get_lookup(self, lookup_name: str) -> Type[BuiltinLookup]: ...
|
| 52 |
+
def get_internal_type(self) -> str: ...
|
| 53 |
+
@property
|
| 54 |
+
def db_type(self) -> Callable: ...
|
| 55 |
+
def get_choices(
|
| 56 |
+
self, include_blank: bool = ..., blank_choice: List[Tuple[str, str]] = ...
|
| 57 |
+
) -> List[Tuple[int, str]]: ...
|
| 58 |
+
def is_hidden(self) -> bool: ...
|
| 59 |
+
def get_joining_columns(self) -> Tuple: ...
|
| 60 |
+
def get_extra_restriction(
|
| 61 |
+
self, where_class: Type[WhereNode], alias: str, related_alias: str
|
| 62 |
+
) -> Optional[Union[StartsWith, WhereNode]]: ...
|
| 63 |
+
def set_field_name(self) -> None: ...
|
| 64 |
+
def get_accessor_name(self, model: Optional[Type[Model]] = ...) -> Optional[str]: ...
|
| 65 |
+
def get_path_info(self, filtered_relation: Optional[FilteredRelation] = ...) -> List[PathInfo]: ...
|
| 66 |
+
|
| 67 |
+
class ManyToOneRel(ForeignObjectRel):
|
| 68 |
+
def __init__(
|
| 69 |
+
self,
|
| 70 |
+
field: ForeignKey,
|
| 71 |
+
to: Union[Type[Model], str],
|
| 72 |
+
field_name: Optional[str],
|
| 73 |
+
related_name: Optional[str] = ...,
|
| 74 |
+
related_query_name: Optional[str] = ...,
|
| 75 |
+
limit_choices_to: Optional[Union[Dict[str, Any], Callable[[], Any]]] = ...,
|
| 76 |
+
parent_link: bool = ...,
|
| 77 |
+
on_delete: Callable = ...,
|
| 78 |
+
) -> None: ...
|
| 79 |
+
def get_related_field(self) -> Field: ...
|
| 80 |
+
|
| 81 |
+
class OneToOneRel(ManyToOneRel):
|
| 82 |
+
def __init__(
|
| 83 |
+
self,
|
| 84 |
+
field: OneToOneField,
|
| 85 |
+
to: Union[Type[Model], str],
|
| 86 |
+
field_name: Optional[str],
|
| 87 |
+
related_name: Optional[str] = ...,
|
| 88 |
+
related_query_name: Optional[str] = ...,
|
| 89 |
+
limit_choices_to: Optional[Dict[str, str]] = ...,
|
| 90 |
+
parent_link: bool = ...,
|
| 91 |
+
on_delete: Callable = ...,
|
| 92 |
+
) -> None: ...
|
| 93 |
+
|
| 94 |
+
class ManyToManyRel(ForeignObjectRel):
|
| 95 |
+
through: Optional[Union[Type[Model], str]] = ...
|
| 96 |
+
through_fields: Optional[Tuple[str, str]] = ...
|
| 97 |
+
db_constraint: bool = ...
|
| 98 |
+
def __init__(
|
| 99 |
+
self,
|
| 100 |
+
field: RelatedField,
|
| 101 |
+
to: Union[Type[Model], str],
|
| 102 |
+
related_name: Optional[str] = ...,
|
| 103 |
+
related_query_name: Optional[str] = ...,
|
| 104 |
+
limit_choices_to: Any = ...,
|
| 105 |
+
symmetrical: bool = ...,
|
| 106 |
+
through: Optional[Union[Type[Model], str]] = ...,
|
| 107 |
+
through_fields: Optional[Tuple[str, str]] = ...,
|
| 108 |
+
db_constraint: bool = ...,
|
| 109 |
+
) -> None: ...
|
| 110 |
+
def get_related_field(self) -> Field: ...
|
mantis_evalkit/lib/python3.10/site-packages/jedi/third_party/django-stubs/django-stubs/db/models/sql/__init__.pyi
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .query import Query as Query, RawQuery as RawQuery
|
| 2 |
+
|
| 3 |
+
from .subqueries import (
|
| 4 |
+
InsertQuery as InsertQuery,
|
| 5 |
+
AggregateQuery as AggregateQuery,
|
| 6 |
+
DeleteQuery as DeleteQuery,
|
| 7 |
+
UpdateQuery as UpdateQuery,
|
| 8 |
+
)
|
mantis_evalkit/lib/python3.10/site-packages/jedi/third_party/django-stubs/django-stubs/db/models/sql/constants.pyi
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Dict, Pattern, Tuple
|
| 2 |
+
|
| 3 |
+
GET_ITERATOR_CHUNK_SIZE: int = ...
|
| 4 |
+
|
| 5 |
+
MULTI: str = ...
|
| 6 |
+
SINGLE: str = ...
|
| 7 |
+
CURSOR: str = ...
|
| 8 |
+
NO_RESULTS: str = ...
|
| 9 |
+
|
| 10 |
+
ORDER_PATTERN: Pattern = ...
|
| 11 |
+
ORDER_DIR: Dict[str, Tuple[str, str]] = ...
|
| 12 |
+
|
| 13 |
+
INNER: str = ...
|
| 14 |
+
LOUTER: str = ...
|
mantis_evalkit/lib/python3.10/site-packages/jedi/third_party/django-stubs/django-stubs/db/models/sql/datastructures.pyi
ADDED
|
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from collections import OrderedDict
|
| 2 |
+
from typing import Any, Dict, List, Optional, Tuple, Union
|
| 3 |
+
|
| 4 |
+
from django.db.models.fields.mixins import FieldCacheMixin
|
| 5 |
+
from django.db.models.query_utils import FilteredRelation, PathInfo
|
| 6 |
+
from django.db.models.sql.compiler import SQLCompiler
|
| 7 |
+
|
| 8 |
+
class MultiJoin(Exception):
|
| 9 |
+
level: int = ...
|
| 10 |
+
names_with_path: List[Tuple[str, List[PathInfo]]] = ...
|
| 11 |
+
def __init__(self, names_pos: int, path_with_names: List[Tuple[str, List[PathInfo]]]) -> None: ...
|
| 12 |
+
|
| 13 |
+
class Empty: ...
|
| 14 |
+
|
| 15 |
+
class Join:
|
| 16 |
+
table_name: str = ...
|
| 17 |
+
parent_alias: str = ...
|
| 18 |
+
table_alias: Optional[str] = ...
|
| 19 |
+
join_type: str = ...
|
| 20 |
+
join_cols: Tuple = ...
|
| 21 |
+
join_field: FieldCacheMixin = ...
|
| 22 |
+
nullable: bool = ...
|
| 23 |
+
filtered_relation: Optional[FilteredRelation] = ...
|
| 24 |
+
def __init__(
|
| 25 |
+
self,
|
| 26 |
+
table_name: str,
|
| 27 |
+
parent_alias: str,
|
| 28 |
+
table_alias: Optional[str],
|
| 29 |
+
join_type: str,
|
| 30 |
+
join_field: FieldCacheMixin,
|
| 31 |
+
nullable: bool,
|
| 32 |
+
filtered_relation: Optional[FilteredRelation] = ...,
|
| 33 |
+
) -> None: ...
|
| 34 |
+
def as_sql(self, compiler: SQLCompiler, connection: Any) -> Tuple[str, List[Union[int, str]]]: ...
|
| 35 |
+
def relabeled_clone(self, change_map: Union[Dict[str, str], OrderedDict]) -> Join: ...
|
| 36 |
+
def equals(self, other: Union[BaseTable, Join], with_filtered_relation: bool) -> bool: ...
|
| 37 |
+
def demote(self) -> Join: ...
|
| 38 |
+
def promote(self) -> Join: ...
|
| 39 |
+
|
| 40 |
+
class BaseTable:
|
| 41 |
+
join_type: Any = ...
|
| 42 |
+
parent_alias: Any = ...
|
| 43 |
+
filtered_relation: Any = ...
|
| 44 |
+
table_name: str = ...
|
| 45 |
+
table_alias: Optional[str] = ...
|
| 46 |
+
def __init__(self, table_name: str, alias: Optional[str]) -> None: ...
|
| 47 |
+
def as_sql(self, compiler: SQLCompiler, connection: Any) -> Tuple[str, List[Any]]: ...
|
| 48 |
+
def relabeled_clone(self, change_map: OrderedDict) -> BaseTable: ...
|
| 49 |
+
def equals(self, other: Join, with_filtered_relation: bool) -> bool: ...
|
mantis_evalkit/lib/python3.10/site-packages/jedi/third_party/django-stubs/django-stubs/db/models/sql/subqueries.pyi
ADDED
|
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Any, Dict, Iterable, List, Optional, Tuple, Type, Union
|
| 2 |
+
|
| 3 |
+
from django.db.models.base import Model
|
| 4 |
+
from django.db.models.expressions import Case
|
| 5 |
+
from django.db.models.query import QuerySet
|
| 6 |
+
from django.db.models.sql.query import Query
|
| 7 |
+
from django.db.models.sql.where import WhereNode
|
| 8 |
+
|
| 9 |
+
from django.db.models.fields import Field
|
| 10 |
+
|
| 11 |
+
class DeleteQuery(Query):
|
| 12 |
+
select: Tuple
|
| 13 |
+
where_class: Type[WhereNode]
|
| 14 |
+
where: WhereNode = ...
|
| 15 |
+
def do_query(self, table: str, where: WhereNode, using: str) -> int: ...
|
| 16 |
+
def delete_batch(self, pk_list: Union[List[int], List[str]], using: str) -> int: ...
|
| 17 |
+
def delete_qs(self, query: QuerySet, using: str) -> int: ...
|
| 18 |
+
|
| 19 |
+
class UpdateQuery(Query):
|
| 20 |
+
select: Tuple
|
| 21 |
+
where_class: Type[WhereNode]
|
| 22 |
+
def __init__(self, *args: Any, **kwargs: Any) -> None: ...
|
| 23 |
+
where: WhereNode = ...
|
| 24 |
+
def update_batch(self, pk_list: List[int], values: Dict[str, Optional[int]], using: str) -> None: ...
|
| 25 |
+
def add_update_values(self, values: Dict[str, Any]) -> None: ...
|
| 26 |
+
def add_update_fields(self, values_seq: List[Tuple[Field, Optional[Type[Model]], Case]]) -> None: ...
|
| 27 |
+
def add_related_update(self, model: Type[Model], field: Field, value: Union[int, str]) -> None: ...
|
| 28 |
+
def get_related_updates(self) -> List[UpdateQuery]: ...
|
| 29 |
+
|
| 30 |
+
class InsertQuery(Query):
|
| 31 |
+
select: Tuple
|
| 32 |
+
where: WhereNode
|
| 33 |
+
where_class: Type[WhereNode]
|
| 34 |
+
fields: Iterable[Field] = ...
|
| 35 |
+
objs: List[Model] = ...
|
| 36 |
+
raw: bool = ...
|
| 37 |
+
def __init__(self, *args: Any, **kwargs: Any) -> None: ...
|
| 38 |
+
def insert_values(self, fields: Iterable[Field], objs: List[Model], raw: bool = ...) -> None: ...
|
| 39 |
+
|
| 40 |
+
class AggregateQuery(Query):
|
| 41 |
+
select: Tuple
|
| 42 |
+
sub_params: Tuple
|
| 43 |
+
where: WhereNode
|
| 44 |
+
where_class: Type[WhereNode]
|
| 45 |
+
def add_subquery(self, query: Query, using: str) -> None: ...
|
mantis_evalkit/lib/python3.10/site-packages/jedi/third_party/django-stubs/django-stubs/db/models/sql/where.pyi
ADDED
|
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from collections import OrderedDict
|
| 2 |
+
from typing import Any, Dict, List, Optional, Tuple, Union
|
| 3 |
+
|
| 4 |
+
from django.db.models.expressions import Expression
|
| 5 |
+
from django.db.models.sql.compiler import SQLCompiler
|
| 6 |
+
from django.db.models.sql.query import Query
|
| 7 |
+
from django.utils import tree
|
| 8 |
+
|
| 9 |
+
AND: str
|
| 10 |
+
OR: str
|
| 11 |
+
|
| 12 |
+
class WhereNode(tree.Node):
|
| 13 |
+
connector: str
|
| 14 |
+
contains_aggregate: bool
|
| 15 |
+
contains_over_clause: bool
|
| 16 |
+
negated: bool
|
| 17 |
+
default: Any = ...
|
| 18 |
+
resolved: bool = ...
|
| 19 |
+
conditional: bool = ...
|
| 20 |
+
def split_having(self, negated: bool = ...) -> Tuple[Optional[WhereNode], Optional[WhereNode]]: ...
|
| 21 |
+
def as_sql(self, compiler: SQLCompiler, connection: Any) -> Any: ...
|
| 22 |
+
def get_group_by_cols(self) -> List[Expression]: ...
|
| 23 |
+
def relabel_aliases(self, change_map: Union[Dict[Optional[str], str], OrderedDict]) -> None: ...
|
| 24 |
+
def clone(self) -> WhereNode: ...
|
| 25 |
+
def relabeled_clone(self, change_map: Union[Dict[Optional[str], str], OrderedDict]) -> WhereNode: ...
|
| 26 |
+
def resolve_expression(self, *args: Any, **kwargs: Any) -> WhereNode: ...
|
| 27 |
+
|
| 28 |
+
class NothingNode:
|
| 29 |
+
contains_aggregate: bool = ...
|
| 30 |
+
def as_sql(self, compiler: SQLCompiler = ..., connection: Any = ...) -> Any: ...
|
| 31 |
+
|
| 32 |
+
class ExtraWhere:
|
| 33 |
+
contains_aggregate: bool = ...
|
| 34 |
+
sqls: List[str] = ...
|
| 35 |
+
params: Optional[Union[List[int], List[str]]] = ...
|
| 36 |
+
def __init__(self, sqls: List[str], params: Optional[Union[List[int], List[str]]]) -> None: ...
|
| 37 |
+
def as_sql(self, compiler: SQLCompiler = ..., connection: Any = ...) -> Tuple[str, Union[List[int], List[str]]]: ...
|
| 38 |
+
|
| 39 |
+
class SubqueryConstraint:
|
| 40 |
+
contains_aggregate: bool = ...
|
| 41 |
+
alias: str = ...
|
| 42 |
+
columns: List[str] = ...
|
| 43 |
+
targets: List[str] = ...
|
| 44 |
+
query_object: Query = ...
|
| 45 |
+
def __init__(self, alias: str, columns: List[str], targets: List[str], query_object: Query) -> None: ...
|
| 46 |
+
def as_sql(self, compiler: SQLCompiler, connection: Any) -> Tuple[str, Tuple]: ...
|
moondream/lib/python3.10/site-packages/sympy/core/__pycache__/numbers.cpython-310.pyc
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:664d8fdbb3bf4a5cd7cc0392aa3f4820db05b7d0f40376589a0fe53c5c6a4b46
|
| 3 |
+
size 118094
|
moondream/lib/python3.10/site-packages/sympy/polys/__pycache__/polyquinticconst.cpython-310.pyc
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:256cd4bffb598483da5a6dc2b2e889b8cecefcde8623a4986d929a516b9bba80
|
| 3 |
+
size 132096
|
moondream/lib/python3.10/site-packages/torch/include/ATen/Dimname.h
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
#include <ATen/core/Dimname.h>
|
moondream/lib/python3.10/site-packages/torch/include/ATen/RedispatchFunctions.h
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
moondream/lib/python3.10/site-packages/torch/include/ATen/Tensor.h
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/core/Tensor.h>
|
moondream/lib/python3.10/site-packages/torch/include/ATen/TensorUtils.h
ADDED
|
@@ -0,0 +1,190 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/DimVector.h>
|
| 4 |
+
#include <ATen/EmptyTensor.h>
|
| 5 |
+
#include <ATen/Tensor.h>
|
| 6 |
+
#include <ATen/TensorGeometry.h>
|
| 7 |
+
#include <ATen/Utils.h>
|
| 8 |
+
|
| 9 |
+
#include <utility>
|
| 10 |
+
|
| 11 |
+
// These functions are NOT in Utils.h, because this file has a dep on Tensor.h
|
| 12 |
+
|
| 13 |
+
#define TORCH_CHECK_TENSOR_ALL(cond, ...) \
|
| 14 |
+
TORCH_CHECK((cond)._is_all_true().item<bool>(), __VA_ARGS__);
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
// The following are utility functions for checking that arguments
|
| 19 |
+
// make sense. These are particularly useful for native functions,
|
| 20 |
+
// which do NO argument checking by default.
|
| 21 |
+
|
| 22 |
+
struct TORCH_API TensorArg {
|
| 23 |
+
// NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
|
| 24 |
+
const Tensor& tensor;
|
| 25 |
+
const char* name;
|
| 26 |
+
int pos; // 1-indexed
|
| 27 |
+
TensorArg(const Tensor& tensor, const char* name, int pos)
|
| 28 |
+
: tensor(tensor), name(name), pos(pos) {}
|
| 29 |
+
// Try to mitigate any possibility of dangling reference to temporaries.
|
| 30 |
+
// NOLINTNEXTLINE(cppcoreguidelines-rvalue-reference-param-not-moved)
|
| 31 |
+
TensorArg(Tensor&& tensor, const char* name, int pos) = delete;
|
| 32 |
+
const Tensor* operator->() const {
|
| 33 |
+
return &tensor;
|
| 34 |
+
}
|
| 35 |
+
const Tensor& operator*() const {
|
| 36 |
+
return tensor;
|
| 37 |
+
}
|
| 38 |
+
};
|
| 39 |
+
|
| 40 |
+
struct TORCH_API TensorGeometryArg {
|
| 41 |
+
TensorGeometry tensor;
|
| 42 |
+
const char* name;
|
| 43 |
+
int pos; // 1-indexed
|
| 44 |
+
/* implicit */ TensorGeometryArg(TensorArg arg)
|
| 45 |
+
: tensor(TensorGeometry{arg.tensor}), name(arg.name), pos(arg.pos) {}
|
| 46 |
+
TensorGeometryArg(TensorGeometry tensor, const char* name, int pos)
|
| 47 |
+
: tensor(std::move(tensor)), name(name), pos(pos) {}
|
| 48 |
+
const TensorGeometry* operator->() const {
|
| 49 |
+
return &tensor;
|
| 50 |
+
}
|
| 51 |
+
const TensorGeometry& operator*() const {
|
| 52 |
+
return tensor;
|
| 53 |
+
}
|
| 54 |
+
};
|
| 55 |
+
|
| 56 |
+
// A string describing which function did checks on its input
|
| 57 |
+
// arguments.
|
| 58 |
+
// TODO: Consider generalizing this into a call stack.
|
| 59 |
+
using CheckedFrom = const char*;
|
| 60 |
+
|
| 61 |
+
// The undefined convention: singular operators assume their arguments
|
| 62 |
+
// are defined, but functions which take multiple tensors will
|
| 63 |
+
// implicitly filter out undefined tensors (to make it easier to perform
|
| 64 |
+
// tests which should apply if the tensor is defined, and should not
|
| 65 |
+
// otherwise.)
|
| 66 |
+
//
|
| 67 |
+
// NB: This means that the n-ary operators take lists of TensorArg,
|
| 68 |
+
// not TensorGeometryArg, because the Tensor to TensorGeometry
|
| 69 |
+
// conversion will blow up if you have undefined tensors.
|
| 70 |
+
|
| 71 |
+
TORCH_API std::ostream& operator<<(
|
| 72 |
+
std::ostream& out,
|
| 73 |
+
const TensorGeometryArg& t);
|
| 74 |
+
TORCH_API void checkDim(
|
| 75 |
+
CheckedFrom c,
|
| 76 |
+
const Tensor& tensor,
|
| 77 |
+
const char* name,
|
| 78 |
+
int pos, // 1-indexed
|
| 79 |
+
int64_t dim);
|
| 80 |
+
TORCH_API void checkDim(CheckedFrom c, const TensorGeometryArg& t, int64_t dim);
|
| 81 |
+
// NB: this is an inclusive-exclusive range
|
| 82 |
+
TORCH_API void checkDimRange(
|
| 83 |
+
CheckedFrom c,
|
| 84 |
+
const TensorGeometryArg& t,
|
| 85 |
+
int64_t dim_start,
|
| 86 |
+
int64_t dim_end);
|
| 87 |
+
TORCH_API void checkSameDim(
|
| 88 |
+
CheckedFrom c,
|
| 89 |
+
const TensorGeometryArg& t1,
|
| 90 |
+
const TensorGeometryArg& t2);
|
| 91 |
+
TORCH_API void checkContiguous(CheckedFrom c, const TensorGeometryArg& t);
|
| 92 |
+
TORCH_API void checkAllContiguous(CheckedFrom c, at::ArrayRef<TensorArg> ts);
|
| 93 |
+
TORCH_API void checkSize(
|
| 94 |
+
CheckedFrom c,
|
| 95 |
+
const TensorGeometryArg& t,
|
| 96 |
+
IntArrayRef sizes);
|
| 97 |
+
TORCH_API void checkSize_symint(
|
| 98 |
+
CheckedFrom c,
|
| 99 |
+
const TensorGeometryArg& t,
|
| 100 |
+
c10::SymIntArrayRef sizes);
|
| 101 |
+
TORCH_API void checkSize(
|
| 102 |
+
CheckedFrom c,
|
| 103 |
+
const TensorGeometryArg& t,
|
| 104 |
+
int64_t dim,
|
| 105 |
+
int64_t size);
|
| 106 |
+
TORCH_API void checkSize_symint(
|
| 107 |
+
CheckedFrom c,
|
| 108 |
+
const TensorGeometryArg& t,
|
| 109 |
+
int64_t dim,
|
| 110 |
+
const c10::SymInt& size);
|
| 111 |
+
TORCH_API void checkNumel(
|
| 112 |
+
CheckedFrom c,
|
| 113 |
+
const TensorGeometryArg& t,
|
| 114 |
+
int64_t numel);
|
| 115 |
+
TORCH_API void checkSameNumel(
|
| 116 |
+
CheckedFrom c,
|
| 117 |
+
const TensorArg& t1,
|
| 118 |
+
const TensorArg& t2);
|
| 119 |
+
TORCH_API void checkAllSameNumel(CheckedFrom c, ArrayRef<TensorArg> tensors);
|
| 120 |
+
TORCH_API void checkScalarType(CheckedFrom c, const TensorArg& t, ScalarType s);
|
| 121 |
+
TORCH_API void checkScalarTypes(
|
| 122 |
+
CheckedFrom c,
|
| 123 |
+
const TensorArg& t,
|
| 124 |
+
at::ArrayRef<ScalarType> l);
|
| 125 |
+
TORCH_API void checkSameGPU(
|
| 126 |
+
CheckedFrom c,
|
| 127 |
+
const TensorArg& t1,
|
| 128 |
+
const TensorArg& t2);
|
| 129 |
+
TORCH_API void checkAllSameGPU(CheckedFrom c, ArrayRef<TensorArg> tensors);
|
| 130 |
+
TORCH_API void checkSameType(
|
| 131 |
+
CheckedFrom c,
|
| 132 |
+
const TensorArg& t1,
|
| 133 |
+
const TensorArg& t2);
|
| 134 |
+
TORCH_API void checkAllSameType(CheckedFrom c, ArrayRef<TensorArg> tensors);
|
| 135 |
+
TORCH_API void checkSameSize(
|
| 136 |
+
CheckedFrom c,
|
| 137 |
+
const TensorArg& t1,
|
| 138 |
+
const TensorArg& t2);
|
| 139 |
+
TORCH_API void checkAllSameSize(CheckedFrom c, ArrayRef<TensorArg> tensors);
|
| 140 |
+
TORCH_API void checkDefined(CheckedFrom c, const TensorArg& t);
|
| 141 |
+
TORCH_API void checkAllDefined(CheckedFrom c, at::ArrayRef<TensorArg> t);
|
| 142 |
+
|
| 143 |
+
// FixMe: does TensorArg slow things down?
|
| 144 |
+
TORCH_API void checkBackend(
|
| 145 |
+
CheckedFrom c,
|
| 146 |
+
at::ArrayRef<Tensor> t,
|
| 147 |
+
at::Backend backend);
|
| 148 |
+
|
| 149 |
+
TORCH_API void checkDeviceType(
|
| 150 |
+
CheckedFrom c,
|
| 151 |
+
at::ArrayRef<Tensor> tensors,
|
| 152 |
+
at::DeviceType device_type);
|
| 153 |
+
|
| 154 |
+
TORCH_API void checkLayout(CheckedFrom c, const Tensor& t, Layout layout);
|
| 155 |
+
|
| 156 |
+
TORCH_API void checkLayout(
|
| 157 |
+
CheckedFrom c,
|
| 158 |
+
at::ArrayRef<Tensor> tensors,
|
| 159 |
+
at::Layout layout);
|
| 160 |
+
|
| 161 |
+
// Methods for getting data_ptr if tensor is defined
|
| 162 |
+
TORCH_API void* maybe_data_ptr(const Tensor& tensor);
|
| 163 |
+
TORCH_API void* maybe_data_ptr(const TensorArg& tensor);
|
| 164 |
+
|
| 165 |
+
TORCH_API void check_dim_size(
|
| 166 |
+
const Tensor& tensor,
|
| 167 |
+
int64_t dim,
|
| 168 |
+
int64_t dim_size,
|
| 169 |
+
int64_t size);
|
| 170 |
+
|
| 171 |
+
namespace detail {
|
| 172 |
+
TORCH_API std::vector<int64_t> defaultStrides(IntArrayRef sizes);
|
| 173 |
+
|
| 174 |
+
TORCH_API c10::optional<std::vector<int64_t>> computeStride(
|
| 175 |
+
IntArrayRef oldshape,
|
| 176 |
+
IntArrayRef oldstride,
|
| 177 |
+
IntArrayRef newshape);
|
| 178 |
+
|
| 179 |
+
TORCH_API c10::optional<SymDimVector> computeStride(
|
| 180 |
+
c10::SymIntArrayRef oldshape,
|
| 181 |
+
c10::SymIntArrayRef oldstride,
|
| 182 |
+
c10::SymIntArrayRef newshape);
|
| 183 |
+
|
| 184 |
+
TORCH_API c10::optional<DimVector> computeStride(
|
| 185 |
+
IntArrayRef oldshape,
|
| 186 |
+
IntArrayRef oldstride,
|
| 187 |
+
const DimVector& newshape);
|
| 188 |
+
|
| 189 |
+
} // namespace detail
|
| 190 |
+
} // namespace at
|
moondream/lib/python3.10/site-packages/torch/include/ATen/core/Dict_inl.h
ADDED
|
@@ -0,0 +1,209 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/core/ivalue.h>
|
| 4 |
+
#include <c10/util/hash.h>
|
| 5 |
+
|
| 6 |
+
namespace c10 {
|
| 7 |
+
namespace detail {
|
| 8 |
+
inline bool DictKeyEqualTo::operator()(const IValue& lhs, const IValue& rhs) const {
|
| 9 |
+
if (lhs.isTensor() && rhs.isTensor()) {
|
| 10 |
+
// for tensors, we compare only by identity (following how it's done in Python).
|
| 11 |
+
return lhs.is(rhs);
|
| 12 |
+
}
|
| 13 |
+
// Otherwise, we first compare by identity for efficiency, then by value (see:
|
| 14 |
+
// [container equality])
|
| 15 |
+
return _fastEqualsForContainer(lhs, rhs);
|
| 16 |
+
}
|
| 17 |
+
}
|
| 18 |
+
|
| 19 |
+
template<class T> decltype(auto) getTypePtr();
|
| 20 |
+
std::string toString(const Type& type);
|
| 21 |
+
|
| 22 |
+
namespace impl {
|
| 23 |
+
|
| 24 |
+
template<class Key, class Value>
|
| 25 |
+
Dict<Key, Value> toTypedDict(GenericDict dict) {
|
| 26 |
+
TORCH_INTERNAL_ASSERT(*getTypePtr<Key>() == *dict.impl_->elementTypes.keyType, "Tried to cast a Dict<", toString(*dict.impl_->elementTypes.keyType), ", ", toString(*dict.impl_->elementTypes.valueType) ,"> to a Dict<", toString(*getTypePtr<Key>()), ", ", toString(*getTypePtr<Value>()), ">. Key types mismatch.");
|
| 27 |
+
TORCH_INTERNAL_ASSERT(*getTypePtr<Value>() == *dict.impl_->elementTypes.valueType, "Tried to cast a Dict<", toString(*dict.impl_->elementTypes.keyType), ", ", toString(*dict.impl_->elementTypes.valueType) ,"> to a Dict<", toString(*getTypePtr<Key>()), ", ", toString(*getTypePtr<Value>()), ">. Value types mismatch.");
|
| 28 |
+
|
| 29 |
+
return Dict<Key, Value>(std::move(dict.impl_));
|
| 30 |
+
}
|
| 31 |
+
|
| 32 |
+
template<class Key, class Value>
|
| 33 |
+
GenericDict toGenericDict(Dict<Key, Value> dict) {
|
| 34 |
+
return GenericDict(std::move(dict.impl_));
|
| 35 |
+
}
|
| 36 |
+
}
|
| 37 |
+
|
| 38 |
+
namespace detail {
|
| 39 |
+
|
| 40 |
+
inline size_t DictKeyHash::operator()(const IValue& ivalue) const {
|
| 41 |
+
if (ivalue.isInt()) {
|
| 42 |
+
return std::hash<int64_t>()(ivalue.toInt());
|
| 43 |
+
} else if (ivalue.isString()) {
|
| 44 |
+
return std::hash<c10::string_view>()(ivalue.toStringView());
|
| 45 |
+
} else if (ivalue.isDouble()) {
|
| 46 |
+
return std::hash<double>()(ivalue.toDouble());
|
| 47 |
+
} else if (ivalue.isComplexDouble()) {
|
| 48 |
+
return c10::hash<c10::complex<double>>()(ivalue.toComplexDouble());
|
| 49 |
+
} else if (ivalue.isBool()) {
|
| 50 |
+
return std::hash<bool>()(ivalue.toBool());
|
| 51 |
+
} else if (ivalue.isTensor()) {
|
| 52 |
+
return std::hash<TensorImpl*>()(ivalue.toTensor().unsafeGetTensorImpl());
|
| 53 |
+
} else if (ivalue.isDevice()) {
|
| 54 |
+
return std::hash<Device>()(ivalue.toDevice());
|
| 55 |
+
} else {
|
| 56 |
+
throw std::runtime_error(
|
| 57 |
+
"Can't hash IValues with tag '" + ivalue.tagKind() + "'");
|
| 58 |
+
}
|
| 59 |
+
}
|
| 60 |
+
|
| 61 |
+
inline intrusive_ptr<DictImpl> DictImpl::copy() const {
|
| 62 |
+
return make_intrusive<DictImpl>(dict, elementTypes);
|
| 63 |
+
}
|
| 64 |
+
|
| 65 |
+
}
|
| 66 |
+
|
| 67 |
+
template<class Key, class Value>
|
| 68 |
+
Dict<Key, Value>::Dict()
|
| 69 |
+
:Dict(make_intrusive<detail::DictImpl>(
|
| 70 |
+
detail::DictImpl::dict_map_type(),
|
| 71 |
+
detail::DictImpl::DictElementTypes{getTypePtr<Key>(), getTypePtr<Value>()})) {
|
| 72 |
+
static_assert(!std::is_same<Key, IValue>::value, "This constructor is not valid for Dict<IValue, _>. Please use c10::impl::GenericDict(keyType, valueType) instead.");
|
| 73 |
+
static_assert(!std::is_same<Value, IValue>::value, "This constructor is not valid for Dict<_, IValue>. Please use c10::impl::GenericDict(keyType, valueType) instead.");
|
| 74 |
+
}
|
| 75 |
+
|
| 76 |
+
template<class Key, class Value>
|
| 77 |
+
Dict<Key, Value>::Dict(TypePtr keyType, TypePtr valueType)
|
| 78 |
+
: Dict(make_intrusive<detail::DictImpl>(
|
| 79 |
+
detail::DictImpl::dict_map_type(),
|
| 80 |
+
detail::DictImpl::DictElementTypes {std::move(keyType), std::move(valueType)})) {
|
| 81 |
+
static_assert(std::is_same<Key, IValue>::value, "This constructor is only valid for c10::impl::GenericDict.");
|
| 82 |
+
static_assert(std::is_same<Value, IValue>::value, "This constructor is only valid for c10::impl::GenericDict.");
|
| 83 |
+
}
|
| 84 |
+
|
| 85 |
+
template<class Key, class Value>
|
| 86 |
+
Dict<Key, Value>::Dict(c10::intrusive_ptr<detail::DictImpl>&& impl): impl_(std::move(impl)) {}
|
| 87 |
+
|
| 88 |
+
template<class Key, class Value>
|
| 89 |
+
Dict<Key, Value> Dict<Key, Value>::copy() const {
|
| 90 |
+
return Dict<Key, Value>(impl_->copy());
|
| 91 |
+
}
|
| 92 |
+
|
| 93 |
+
template<class Key, class Value>
|
| 94 |
+
typename Dict<Key, Value>::iterator Dict<Key, Value>::begin() const {
|
| 95 |
+
return iterator{impl_->dict.begin()};
|
| 96 |
+
}
|
| 97 |
+
|
| 98 |
+
template<class Key, class Value>
|
| 99 |
+
typename Dict<Key, Value>::iterator Dict<Key, Value>::end() const {
|
| 100 |
+
return iterator{impl_->dict.end()};
|
| 101 |
+
}
|
| 102 |
+
|
| 103 |
+
template<class Key, class Value>
|
| 104 |
+
bool Dict<Key, Value>::empty() const {
|
| 105 |
+
return impl_->dict.empty();
|
| 106 |
+
}
|
| 107 |
+
|
| 108 |
+
template<class Key, class Value>
|
| 109 |
+
typename Dict<Key, Value>::size_type Dict<Key, Value>::size() const {
|
| 110 |
+
return impl_->dict.size();
|
| 111 |
+
}
|
| 112 |
+
|
| 113 |
+
template<class Key, class Value>
|
| 114 |
+
void Dict<Key, Value>::clear() const {
|
| 115 |
+
impl_->dict.clear();
|
| 116 |
+
}
|
| 117 |
+
|
| 118 |
+
template<class Key, class Value>
|
| 119 |
+
template<class Key_, class Value_>
|
| 120 |
+
std::pair<typename Dict<Key, Value>::iterator, bool> Dict<Key, Value>::insert(Key_&& key, Value_&& value) const {
|
| 121 |
+
static_assert(std::is_constructible<Key, Key_>::value, "Wrong type for the key argument of Dict::insert");
|
| 122 |
+
static_assert(std::is_constructible<Value, Value_>::value, "Wrong type for the value argument of Dict::insert");
|
| 123 |
+
auto inserted = impl_->dict.emplace(
|
| 124 |
+
Key(std::forward<Key_>(key)),
|
| 125 |
+
Value(std::forward<Value_>(value)));
|
| 126 |
+
return {iterator{inserted.first}, inserted.second};
|
| 127 |
+
}
|
| 128 |
+
|
| 129 |
+
template<class Key, class Value>
|
| 130 |
+
template<class Key_, class Value_>
|
| 131 |
+
std::pair<typename Dict<Key, Value>::iterator, bool> Dict<Key, Value>::insert_or_assign(Key_&& key, Value_&& value) const {
|
| 132 |
+
static_assert(std::is_constructible<Key, Key_>::value, "Wrong type for the key argument of Dict::insert_or_assign");
|
| 133 |
+
static_assert(std::is_constructible<Value, Value_>::value, "Wrong type for the value argument of Dict::insert_or_assign");
|
| 134 |
+
auto inserted = impl_->dict.insert_or_assign(
|
| 135 |
+
Key(std::forward<Key_>(key)),
|
| 136 |
+
Value(std::forward<Value_>(value)));
|
| 137 |
+
return {iterator{inserted.first}, inserted.second};
|
| 138 |
+
}
|
| 139 |
+
|
| 140 |
+
template<class Key, class Value>
|
| 141 |
+
void Dict<Key, Value>::erase(iterator iter) const {
|
| 142 |
+
impl_->dict.erase(iter.entryRef_.iterator_);
|
| 143 |
+
}
|
| 144 |
+
|
| 145 |
+
template<class Key, class Value>
|
| 146 |
+
C10_NODISCARD size_t Dict<Key, Value>::erase(const Key& key) const {
|
| 147 |
+
return impl_->dict.erase(key);
|
| 148 |
+
}
|
| 149 |
+
|
| 150 |
+
template<class Key, class Value>
|
| 151 |
+
Value Dict<Key, Value>::at(const Key& key) const {
|
| 152 |
+
return impl_->dict.at(key).template to<Value>();
|
| 153 |
+
}
|
| 154 |
+
|
| 155 |
+
template<class Key, class Value>
|
| 156 |
+
typename Dict<Key, Value>::iterator Dict<Key, Value>::find(const Key& key) const {
|
| 157 |
+
return iterator{impl_->dict.find(key)};
|
| 158 |
+
}
|
| 159 |
+
|
| 160 |
+
template<class Key, class Value>
|
| 161 |
+
bool Dict<Key, Value>::contains(const Key& key) const {
|
| 162 |
+
return end() != find(key);
|
| 163 |
+
}
|
| 164 |
+
|
| 165 |
+
template<class Key, class Value>
|
| 166 |
+
void Dict<Key, Value>::reserve(size_type count) const {
|
| 167 |
+
impl_->dict.reserve(count);
|
| 168 |
+
}
|
| 169 |
+
|
| 170 |
+
template<class Key, class Value>
|
| 171 |
+
TypePtr Dict<Key, Value>::keyType() const {
|
| 172 |
+
return impl_->elementTypes.keyType;
|
| 173 |
+
}
|
| 174 |
+
|
| 175 |
+
template<class Key, class Value>
|
| 176 |
+
TypePtr Dict<Key, Value>::valueType() const {
|
| 177 |
+
return impl_->elementTypes.valueType;
|
| 178 |
+
}
|
| 179 |
+
template <class Key, class Value>
|
| 180 |
+
void Dict<Key, Value>::unsafeSetKeyType(TypePtr t) {
|
| 181 |
+
impl_->elementTypes.keyType = std::move(t);
|
| 182 |
+
}
|
| 183 |
+
|
| 184 |
+
template <class Key, class Value>
|
| 185 |
+
void Dict<Key, Value>::unsafeSetValueType(TypePtr t) {
|
| 186 |
+
impl_->elementTypes.valueType = std::move(t);
|
| 187 |
+
}
|
| 188 |
+
|
| 189 |
+
template <class Key_, class Value_>
|
| 190 |
+
bool operator==(const Dict<Key_, Value_>& lhs, const Dict<Key_, Value_>& rhs) {
|
| 191 |
+
// Dicts with the same identity trivially compare equal.
|
| 192 |
+
if (lhs.impl_ == rhs.impl_) {
|
| 193 |
+
return true;
|
| 194 |
+
}
|
| 195 |
+
|
| 196 |
+
// Otherwise compare the values
|
| 197 |
+
return *lhs.impl_ == *rhs.impl_;
|
| 198 |
+
}
|
| 199 |
+
|
| 200 |
+
template <class Key_, class Value_>
|
| 201 |
+
bool operator!=(const Dict<Key_, Value_>& lhs, const Dict<Key_, Value_>& rhs) {
|
| 202 |
+
return !(lhs == rhs);
|
| 203 |
+
}
|
| 204 |
+
|
| 205 |
+
template <class Key, class Value>
|
| 206 |
+
bool Dict<Key, Value>::is(const Dict& rhs) const {
|
| 207 |
+
return this->impl_ == rhs.impl_;
|
| 208 |
+
}
|
| 209 |
+
}
|
moondream/lib/python3.10/site-packages/torch/include/ATen/core/NestedIntSymNodeImpl.h
ADDED
|
@@ -0,0 +1,186 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <c10/core/ConstantSymNodeImpl.h>
|
| 4 |
+
#include <c10/core/SymNodeImpl.h>
|
| 5 |
+
#include <c10/macros/Export.h>
|
| 6 |
+
#include <c10/util/Exception.h>
|
| 7 |
+
#include <c10/util/Optional.h>
|
| 8 |
+
#include <c10/util/intrusive_ptr.h>
|
| 9 |
+
#include <cstdint>
|
| 10 |
+
#include <string>
|
| 11 |
+
|
| 12 |
+
namespace c10 {
|
| 13 |
+
|
| 14 |
+
// The motivating usecase for this is to represent the ragged size structure
|
| 15 |
+
// of a jagged tensor [B, [s_0, s_1, s_2], D] as a single integer j0. This
|
| 16 |
+
// allows us to simply return [B, j0, D] if someone queries for the size of our
|
| 17 |
+
// tensor.
|
| 18 |
+
//
|
| 19 |
+
// Morally we define comparison between two nested ints to return true if
|
| 20 |
+
// that comparison holds for all corresponding elements of the arrays they
|
| 21 |
+
// represent. Comparison between a nested int and a plain int is defined
|
| 22 |
+
// similarly.
|
| 23 |
+
//
|
| 24 |
+
// To simulate this desired behavior but also avoid the O(N) cost of checking,
|
| 25 |
+
// we associate each raggedness pattern with an integer "id" that can be used as
|
| 26 |
+
// a proxy to evaluate equality. We also constrain the range of values for this
|
| 27 |
+
// as to enable inequality checks.
|
| 28 |
+
//
|
| 29 |
+
// We also support a positive integer scalar "coeff" that is used for computing
|
| 30 |
+
// strides. For example given, a [B, j0, D] tensor, it can be strided in two
|
| 31 |
+
// different ways: [D * j0, D, 1] and [j0, 1, sum(j0)]. The coeff is used to
|
| 32 |
+
// differentiate the two cases.
|
| 33 |
+
//
|
| 34 |
+
// During tracing the strides of the outputs need to be a function of the size
|
| 35 |
+
// and strides of the inputs so it is important that NestedIntSymNode itself is
|
| 36 |
+
// able to express this.
|
| 37 |
+
class TORCH_API NestedIntSymNodeImpl : public SymNodeImpl {
|
| 38 |
+
public:
|
| 39 |
+
// CAUTION: you should probably not be constructing these directly; please
|
| 40 |
+
// the higher-level API in python instead (TODO: actually introduce that).
|
| 41 |
+
explicit NestedIntSymNodeImpl(int64_t val, int64_t coeff)
|
| 42 |
+
: val_(val), coeff_(coeff) {}
|
| 43 |
+
|
| 44 |
+
bool bool_() override {
|
| 45 |
+
return false;
|
| 46 |
+
}
|
| 47 |
+
|
| 48 |
+
bool is_int() override {
|
| 49 |
+
return true;
|
| 50 |
+
}
|
| 51 |
+
|
| 52 |
+
bool is_float() override {
|
| 53 |
+
return false;
|
| 54 |
+
}
|
| 55 |
+
|
| 56 |
+
bool is_bool() override {
|
| 57 |
+
return false;
|
| 58 |
+
}
|
| 59 |
+
|
| 60 |
+
bool is_nested_int() const override {
|
| 61 |
+
return true;
|
| 62 |
+
}
|
| 63 |
+
|
| 64 |
+
bool has_hint() override {
|
| 65 |
+
return true;
|
| 66 |
+
}
|
| 67 |
+
|
| 68 |
+
c10::SymNode wrap_int(int64_t num) override {
|
| 69 |
+
return SymNode(c10::make_intrusive<ConstantSymNodeImpl<int64_t>>(num));
|
| 70 |
+
};
|
| 71 |
+
|
| 72 |
+
int64_t guard_int(const char* file, int64_t line) override {
|
| 73 |
+
TORCH_CHECK(false);
|
| 74 |
+
}
|
| 75 |
+
|
| 76 |
+
double guard_float(const char* file, int64_t line) override {
|
| 77 |
+
TORCH_CHECK(false, "not a float");
|
| 78 |
+
}
|
| 79 |
+
|
| 80 |
+
bool guard_bool(const char* file, int64_t line) override {
|
| 81 |
+
TORCH_CHECK(false, "not a bool");
|
| 82 |
+
}
|
| 83 |
+
|
| 84 |
+
int64_t int_() override {
|
| 85 |
+
TORCH_CHECK(false);
|
| 86 |
+
}
|
| 87 |
+
|
| 88 |
+
std::string str() override {
|
| 89 |
+
if (coeff_ == 1) {
|
| 90 |
+
return "j" + std::to_string(val_);
|
| 91 |
+
}
|
| 92 |
+
return std::to_string(coeff_) + "*j" + std::to_string(val_);
|
| 93 |
+
}
|
| 94 |
+
|
| 95 |
+
// NOTE [ Inequalities with nested int ]
|
| 96 |
+
//
|
| 97 |
+
// The semantics of nested int when it comes to relations is that it is
|
| 98 |
+
// treated as integer known to be within a certain range,
|
| 99 |
+
//
|
| 100 |
+
// j0 \in [2, int64_t::max]
|
| 101 |
+
//
|
| 102 |
+
// allowing us to answer queries like j0 >= 1 (True), and j0 == 0 (False).
|
| 103 |
+
// This is a useful default range for the raggedness pattern of a jagged
|
| 104 |
+
// tensor (1) since sizes are non-negative, and (2) we need to get past 0/1
|
| 105 |
+
// specialization checks.
|
| 106 |
+
//
|
| 107 |
+
// [ Indeterminate inequalities error out ]
|
| 108 |
+
//
|
| 109 |
+
// Given the semantic defined above, certain relations like j0 < 3 are thus
|
| 110 |
+
// indeterminable. In our impl today, evaluating such relations error
|
| 111 |
+
//
|
| 112 |
+
// It may seem convenient to just define indeterminate relations to return
|
| 113 |
+
// False, but the implementation we maintain in parallel using sympy does not
|
| 114 |
+
// allow this.
|
| 115 |
+
//
|
| 116 |
+
// Sympy only allows overriding of Ge. The other relations (Lt, Gt, Le) are,
|
| 117 |
+
// by consequence, all derived from Ge e.g., Lt(a, b) := !Ge(a, b). This
|
| 118 |
+
// would mean that means that if we define the indeterminate j0 >= 3 to be
|
| 119 |
+
// False, the also indeterminate j0 < 3 will be evaluated to be True!
|
| 120 |
+
//
|
| 121 |
+
// [ Coefficient are assumed positive ]
|
| 122 |
+
//
|
| 123 |
+
// For the purpose of computing inequalities, we consider the coefficient of
|
| 124 |
+
// the nested int to be a positive integer.
|
| 125 |
+
//
|
| 126 |
+
// Thus, no modifications are needed to the logic since
|
| 127 |
+
// j0 >= k implies coeff * j0 >= k
|
| 128 |
+
//
|
| 129 |
+
c10::SymNode eq(const c10::SymNode& other) override;
|
| 130 |
+
c10::SymNode ne(const c10::SymNode& other) override;
|
| 131 |
+
c10::SymNode ge(const c10::SymNode& other) override;
|
| 132 |
+
c10::SymNode gt(const c10::SymNode& other) override;
|
| 133 |
+
c10::SymNode lt(const c10::SymNode& other) override;
|
| 134 |
+
c10::SymNode le(const c10::SymNode& other) override;
|
| 135 |
+
c10::SymNode mul(const c10::SymNode& other) override;
|
| 136 |
+
|
| 137 |
+
c10::optional<int64_t> nested_int() override {
|
| 138 |
+
return val_;
|
| 139 |
+
}
|
| 140 |
+
|
| 141 |
+
c10::optional<int64_t> nested_int_coeff() override {
|
| 142 |
+
return coeff_;
|
| 143 |
+
}
|
| 144 |
+
|
| 145 |
+
bool is_symbolic() override {
|
| 146 |
+
return false;
|
| 147 |
+
}
|
| 148 |
+
|
| 149 |
+
#define DEFINE_BINARY_NOT_SUPPORTED(name) \
|
| 150 |
+
c10::SymNode name(const c10::SymNode& other) override { \
|
| 151 |
+
TORCH_CHECK(false, #name " not supported by NestedIntSymNode"); \
|
| 152 |
+
}
|
| 153 |
+
|
| 154 |
+
DEFINE_BINARY_NOT_SUPPORTED(add)
|
| 155 |
+
DEFINE_BINARY_NOT_SUPPORTED(sub)
|
| 156 |
+
DEFINE_BINARY_NOT_SUPPORTED(truediv)
|
| 157 |
+
DEFINE_BINARY_NOT_SUPPORTED(pow)
|
| 158 |
+
DEFINE_BINARY_NOT_SUPPORTED(floordiv)
|
| 159 |
+
DEFINE_BINARY_NOT_SUPPORTED(mod)
|
| 160 |
+
DEFINE_BINARY_NOT_SUPPORTED(sym_min)
|
| 161 |
+
DEFINE_BINARY_NOT_SUPPORTED(sym_max)
|
| 162 |
+
DEFINE_BINARY_NOT_SUPPORTED(sym_and)
|
| 163 |
+
DEFINE_BINARY_NOT_SUPPORTED(sym_or)
|
| 164 |
+
|
| 165 |
+
#undef DEFINE_BINARY_NOT_SUPPORTED
|
| 166 |
+
|
| 167 |
+
#define DEFINE_NOT_SUPPORTED(name) \
|
| 168 |
+
c10::SymNode name() override { \
|
| 169 |
+
TORCH_CHECK(false, #name " is not supported by NestedIntSymNode"); \
|
| 170 |
+
}
|
| 171 |
+
|
| 172 |
+
DEFINE_NOT_SUPPORTED(sym_not)
|
| 173 |
+
DEFINE_NOT_SUPPORTED(ceil)
|
| 174 |
+
DEFINE_NOT_SUPPORTED(floor)
|
| 175 |
+
DEFINE_NOT_SUPPORTED(neg)
|
| 176 |
+
DEFINE_NOT_SUPPORTED(clone)
|
| 177 |
+
DEFINE_NOT_SUPPORTED(sym_float)
|
| 178 |
+
|
| 179 |
+
#undef DEFINE_NOT_SUPPORTED
|
| 180 |
+
|
| 181 |
+
private:
|
| 182 |
+
int64_t val_;
|
| 183 |
+
int64_t coeff_;
|
| 184 |
+
};
|
| 185 |
+
|
| 186 |
+
} // namespace c10
|
moondream/lib/python3.10/site-packages/torch/include/ATen/core/Reduction.h
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
namespace at {
|
| 4 |
+
namespace Reduction {
|
| 5 |
+
|
| 6 |
+
// NB: Keep this in sync with Reduction class in torch/nn/_reduction.py
|
| 7 |
+
// These constants control the reduction behavior of loss functions.
|
| 8 |
+
// Ideally, this would be a scoped enum, but jit doesn't support that
|
| 9 |
+
enum Reduction {
|
| 10 |
+
None, // Do not reduce
|
| 11 |
+
Mean, // (Possibly weighted) mean of losses
|
| 12 |
+
Sum, // Sum losses
|
| 13 |
+
END
|
| 14 |
+
};
|
| 15 |
+
} // namespace Reduction
|
| 16 |
+
} // namespace at
|
moondream/lib/python3.10/site-packages/torch/include/ATen/core/Scalar.h
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
#include <c10/core/Scalar.h>
|
moondream/lib/python3.10/site-packages/torch/include/ATen/core/TorchDispatchUtils.h
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <torch/library.h>
|
| 4 |
+
#include <ATen/core/dispatch/Dispatcher.h>
|
| 5 |
+
#include <c10/util/ArrayRef.h>
|
| 6 |
+
#include <c10/util/Optional.h>
|
| 7 |
+
#include <c10/core/impl/TorchDispatchModeTLS.h>
|
| 8 |
+
|
| 9 |
+
namespace at {
|
| 10 |
+
namespace impl {
|
| 11 |
+
|
| 12 |
+
TORCH_API bool tensor_has_dispatch(const at::Tensor& t);
|
| 13 |
+
TORCH_API bool tensorlist_has_dispatch(at::ITensorListRef li);
|
| 14 |
+
TORCH_API bool tensorlist_has_dispatch(const c10::List<c10::optional<at::Tensor>>& li);
|
| 15 |
+
using c10::impl::dispatch_mode_enabled;
|
| 16 |
+
|
| 17 |
+
}}
|
moondream/lib/python3.10/site-packages/torch/include/ATen/core/blob.h
ADDED
|
@@ -0,0 +1,208 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <cstddef>
|
| 4 |
+
#include <sstream>
|
| 5 |
+
#include <type_traits>
|
| 6 |
+
#include <typeinfo>
|
| 7 |
+
#include <vector>
|
| 8 |
+
|
| 9 |
+
#include <c10/util/intrusive_ptr.h>
|
| 10 |
+
#include <c10/util/typeid.h>
|
| 11 |
+
#include <c10/macros/Macros.h>
|
| 12 |
+
|
| 13 |
+
namespace caffe2 {
|
| 14 |
+
|
| 15 |
+
class Tensor;
|
| 16 |
+
|
| 17 |
+
/**
|
| 18 |
+
* @brief Blob is a general container that hosts a typed pointer.
|
| 19 |
+
*
|
| 20 |
+
* A Blob hosts a pointer as well as its type, and takes charge of deleting it
|
| 21 |
+
* properly when the blob is deallocated or re-allocated with a new type. A blob
|
| 22 |
+
* could contain anything, although the most common case is to contain a Tensor.
|
| 23 |
+
*/
|
| 24 |
+
class TORCH_API Blob final : public c10::intrusive_ptr_target {
|
| 25 |
+
public:
|
| 26 |
+
/**
|
| 27 |
+
* Initializes an empty Blob.
|
| 28 |
+
*/
|
| 29 |
+
Blob() noexcept : meta_(), pointer_(nullptr), has_ownership_(false) {}
|
| 30 |
+
~Blob() override {
|
| 31 |
+
Reset();
|
| 32 |
+
}
|
| 33 |
+
|
| 34 |
+
Blob(Blob&& other) noexcept : Blob() {
|
| 35 |
+
swap(other);
|
| 36 |
+
}
|
| 37 |
+
|
| 38 |
+
Blob& operator=(Blob&& other) noexcept {
|
| 39 |
+
Blob(std::move(other)).swap(*this);
|
| 40 |
+
return *this;
|
| 41 |
+
}
|
| 42 |
+
|
| 43 |
+
/**
|
| 44 |
+
* Checks if the content stored in the blob is of type T.
|
| 45 |
+
*/
|
| 46 |
+
template <class T>
|
| 47 |
+
bool IsType() const noexcept {
|
| 48 |
+
return meta_.Match<T>();
|
| 49 |
+
}
|
| 50 |
+
|
| 51 |
+
/**
|
| 52 |
+
* Returns the meta info of the blob.
|
| 53 |
+
*/
|
| 54 |
+
const TypeMeta meta() const noexcept {
|
| 55 |
+
return meta_;
|
| 56 |
+
}
|
| 57 |
+
|
| 58 |
+
/**
|
| 59 |
+
* Returns a printable typename of the blob.
|
| 60 |
+
*/
|
| 61 |
+
c10::string_view TypeName() const noexcept {
|
| 62 |
+
return meta_.name();
|
| 63 |
+
}
|
| 64 |
+
|
| 65 |
+
/**
|
| 66 |
+
* @brief Gets the const reference of the stored object. The code checks if
|
| 67 |
+
* the stored object is of the desired type.
|
| 68 |
+
*/
|
| 69 |
+
// TODO(jerryzh): add a Get(c10::DeviceType) function?
|
| 70 |
+
template <class T>
|
| 71 |
+
const T& Get() const {
|
| 72 |
+
TORCH_INTERNAL_ASSERT(
|
| 73 |
+
IsType<T>(),
|
| 74 |
+
"wrong type for the Blob instance. Blob contains ",
|
| 75 |
+
meta_.name(),
|
| 76 |
+
" while caller expects ",
|
| 77 |
+
TypeMeta::TypeName<T>());
|
| 78 |
+
// TODO: after we add Get<Tensor>(c10::DeviceType)
|
| 79 |
+
// and changed all the callsites, we can add
|
| 80 |
+
// a static assert here to enforce T != Tensor
|
| 81 |
+
return *static_cast<const T*>(pointer_);
|
| 82 |
+
}
|
| 83 |
+
|
| 84 |
+
const void* GetRaw() const noexcept {
|
| 85 |
+
return pointer_;
|
| 86 |
+
}
|
| 87 |
+
void* GetRaw() noexcept {
|
| 88 |
+
return pointer_;
|
| 89 |
+
}
|
| 90 |
+
|
| 91 |
+
/**
|
| 92 |
+
* @brief Gets a mutable pointer to the stored object.
|
| 93 |
+
*
|
| 94 |
+
* If the current object is not of the right type, a new object is created
|
| 95 |
+
* and the old object is freed. Note that type T should have a default
|
| 96 |
+
* constructor. Otherwise, create the object yourself first, and use
|
| 97 |
+
* Reset().
|
| 98 |
+
*/
|
| 99 |
+
template <class T>
|
| 100 |
+
T* GetMutable() {
|
| 101 |
+
static_assert(
|
| 102 |
+
std::is_default_constructible<T>::value,
|
| 103 |
+
"GetMutable can't be called with non-default-constructible types. "
|
| 104 |
+
"Try using specialized methods");
|
| 105 |
+
if (IsType<T>()) {
|
| 106 |
+
return static_cast<T*>(pointer_);
|
| 107 |
+
} else {
|
| 108 |
+
// TODO Re-enable logging
|
| 109 |
+
// VLOG(1) << "Create new mutable object " << TypeMeta::TypeName<T>();
|
| 110 |
+
return Reset<T>(new T());
|
| 111 |
+
}
|
| 112 |
+
}
|
| 113 |
+
|
| 114 |
+
template <class T>
|
| 115 |
+
T* GetMutableOrNull() {
|
| 116 |
+
if (IsType<T>()) {
|
| 117 |
+
return static_cast<T*>(pointer_);
|
| 118 |
+
} else {
|
| 119 |
+
return nullptr;
|
| 120 |
+
}
|
| 121 |
+
}
|
| 122 |
+
|
| 123 |
+
/**
|
| 124 |
+
* Sets the underlying object to the allocated one. The Blob then takes over
|
| 125 |
+
* the ownership of the passed in pointer. If there is already an object in
|
| 126 |
+
* the Blob, the old object is freed.
|
| 127 |
+
*
|
| 128 |
+
* This is used when the underlying class T does not have a default ctor, or
|
| 129 |
+
* complex initializations needs to be done outside the blob.
|
| 130 |
+
*/
|
| 131 |
+
template <class T>
|
| 132 |
+
T* Reset(T* allocated) {
|
| 133 |
+
free_();
|
| 134 |
+
meta_ = TypeMeta::Make<T>();
|
| 135 |
+
pointer_ = static_cast<void*>(allocated);
|
| 136 |
+
has_ownership_ = true;
|
| 137 |
+
return allocated;
|
| 138 |
+
}
|
| 139 |
+
|
| 140 |
+
/**
|
| 141 |
+
* Sets the underlying object to the allocated one, but does not take over
|
| 142 |
+
* the ownership of the passed in pointer. If there is already an object in
|
| 143 |
+
* the Blob, the old object is freed.
|
| 144 |
+
*
|
| 145 |
+
* Unlike Reset, this does not take over the ownership of the pointer and the
|
| 146 |
+
* caller is responsible for making sure that the lifetime of the allocated
|
| 147 |
+
* blob outlasts the lifetime of any access to this blob, until another Reset
|
| 148 |
+
* call is made or the blob is destructed.
|
| 149 |
+
*/
|
| 150 |
+
template <class T>
|
| 151 |
+
typename std::remove_const<T>::type* ShareExternal(
|
| 152 |
+
typename std::remove_const<T>::type* allocated) {
|
| 153 |
+
return static_cast<T*>(ShareExternal(
|
| 154 |
+
static_cast<void*>(allocated),
|
| 155 |
+
TypeMeta::Make<typename std::remove_const<T>::type>()));
|
| 156 |
+
}
|
| 157 |
+
|
| 158 |
+
void* ShareExternal(void* allocated, const TypeMeta meta) {
|
| 159 |
+
free_();
|
| 160 |
+
meta_ = meta;
|
| 161 |
+
pointer_ = allocated;
|
| 162 |
+
has_ownership_ = false;
|
| 163 |
+
return allocated;
|
| 164 |
+
}
|
| 165 |
+
|
| 166 |
+
/**
|
| 167 |
+
* Resets the Blob to an empty one.
|
| 168 |
+
*/
|
| 169 |
+
void Reset() {
|
| 170 |
+
free_();
|
| 171 |
+
pointer_ = nullptr;
|
| 172 |
+
meta_ = TypeMeta();
|
| 173 |
+
has_ownership_ = false;
|
| 174 |
+
}
|
| 175 |
+
|
| 176 |
+
/**
|
| 177 |
+
* @brief Swaps the underlying storage of two blobs.
|
| 178 |
+
*/
|
| 179 |
+
void swap(Blob& rhs) {
|
| 180 |
+
using std::swap;
|
| 181 |
+
swap(meta_, rhs.meta_);
|
| 182 |
+
swap(pointer_, rhs.pointer_);
|
| 183 |
+
swap(has_ownership_, rhs.has_ownership_);
|
| 184 |
+
}
|
| 185 |
+
|
| 186 |
+
private:
|
| 187 |
+
void free_() {
|
| 188 |
+
if (has_ownership_ && pointer_ != nullptr) {
|
| 189 |
+
(*meta_.deleteFn())(pointer_);
|
| 190 |
+
}
|
| 191 |
+
}
|
| 192 |
+
|
| 193 |
+
TypeMeta meta_;
|
| 194 |
+
void* pointer_;
|
| 195 |
+
bool has_ownership_;
|
| 196 |
+
|
| 197 |
+
C10_DISABLE_COPY_AND_ASSIGN(Blob);
|
| 198 |
+
};
|
| 199 |
+
|
| 200 |
+
inline void swap(Blob& lhs, Blob& rhs) {
|
| 201 |
+
lhs.swap(rhs);
|
| 202 |
+
}
|
| 203 |
+
|
| 204 |
+
inline std::ostream& operator<<(std::ostream& out, const Blob& v) {
|
| 205 |
+
return out << "Blob[" << v.TypeName() << "]";
|
| 206 |
+
}
|
| 207 |
+
|
| 208 |
+
} // namespace caffe2
|
moondream/lib/python3.10/site-packages/torch/include/ATen/core/ivalue_to.h
ADDED
|
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <string>
|
| 4 |
+
|
| 5 |
+
namespace at {
|
| 6 |
+
class Tensor;
|
| 7 |
+
} // namespace at
|
| 8 |
+
|
| 9 |
+
namespace c10 {
|
| 10 |
+
struct IValue;
|
| 11 |
+
namespace detail {
|
| 12 |
+
// Determine the return type of `IValue::to() const &`. It's a const
|
| 13 |
+
// reference when possible and a copy otherwise. It is in this
|
| 14 |
+
// separate header so that List can use it as well.
|
| 15 |
+
template<typename T>
|
| 16 |
+
struct ivalue_to_const_ref_overload_return {
|
| 17 |
+
using type = T;
|
| 18 |
+
};
|
| 19 |
+
|
| 20 |
+
template<>
|
| 21 |
+
struct ivalue_to_const_ref_overload_return<at::Tensor> {
|
| 22 |
+
using type = const at::Tensor&;
|
| 23 |
+
};
|
| 24 |
+
|
| 25 |
+
template<>
|
| 26 |
+
struct ivalue_to_const_ref_overload_return<std::string> {
|
| 27 |
+
using type = const std::string&;
|
| 28 |
+
};
|
| 29 |
+
|
| 30 |
+
template<>
|
| 31 |
+
struct ivalue_to_const_ref_overload_return<IValue> {
|
| 32 |
+
using type = const IValue&;
|
| 33 |
+
};
|
| 34 |
+
|
| 35 |
+
} // namespace detail
|
| 36 |
+
} // namespace c10
|
moondream/lib/python3.10/site-packages/torch/include/ATen/core/jit_type_base.h
ADDED
|
@@ -0,0 +1,719 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <functional>
|
| 4 |
+
#include <memory>
|
| 5 |
+
#include <string>
|
| 6 |
+
#include <utility>
|
| 7 |
+
|
| 8 |
+
#include <ATen/core/qualified_name.h>
|
| 9 |
+
#include <ATen/core/type_ptr.h>
|
| 10 |
+
#include <c10/core/SymInt.h>
|
| 11 |
+
#include <c10/core/SymFloat.h>
|
| 12 |
+
#include <c10/core/SymBool.h>
|
| 13 |
+
#include <c10/core/SymIntArrayRef.h>
|
| 14 |
+
#include <c10/macros/Macros.h>
|
| 15 |
+
#include <c10/util/ArrayRef.h>
|
| 16 |
+
#include <c10/util/Exception.h>
|
| 17 |
+
#include <c10/util/Optional.h>
|
| 18 |
+
|
| 19 |
+
namespace c10 {
|
| 20 |
+
|
| 21 |
+
#define C10_FORALL_TYPES(_) \
|
| 22 |
+
_(AnyType) \
|
| 23 |
+
_(EnumType) \
|
| 24 |
+
_(AnyEnumType) \
|
| 25 |
+
_(TensorType) \
|
| 26 |
+
_(StorageType) \
|
| 27 |
+
_(TupleType) \
|
| 28 |
+
_(ListType) \
|
| 29 |
+
_(DictType) \
|
| 30 |
+
_(NumberType) \
|
| 31 |
+
_(FloatType) \
|
| 32 |
+
_(ComplexType) \
|
| 33 |
+
_(FutureType) \
|
| 34 |
+
_(AwaitType) \
|
| 35 |
+
_(RRefType) \
|
| 36 |
+
_(IntType) \
|
| 37 |
+
_(NoneType) \
|
| 38 |
+
_(StringType) \
|
| 39 |
+
_(GeneratorType) \
|
| 40 |
+
_(QuantizerType) \
|
| 41 |
+
_(BoolType) \
|
| 42 |
+
_(OptionalType) \
|
| 43 |
+
_(VarType) \
|
| 44 |
+
_(DeviceObjType) \
|
| 45 |
+
_(StreamObjType) \
|
| 46 |
+
_(FunctionType) \
|
| 47 |
+
_(ClassType) \
|
| 48 |
+
_(PyObjectType) \
|
| 49 |
+
_(CapsuleType) \
|
| 50 |
+
_(InterfaceType) \
|
| 51 |
+
_(QSchemeType) \
|
| 52 |
+
_(ScalarTypeType) \
|
| 53 |
+
_(LayoutType) \
|
| 54 |
+
_(MemoryFormatType) \
|
| 55 |
+
_(AnyListType) \
|
| 56 |
+
_(AnyTupleType) \
|
| 57 |
+
_(AnyClassType) \
|
| 58 |
+
_(SymIntType) \
|
| 59 |
+
_(SymFloatType) \
|
| 60 |
+
_(SymBoolType) \
|
| 61 |
+
_(UnionType) \
|
| 62 |
+
_(DynamicType)
|
| 63 |
+
|
| 64 |
+
enum class TypeKind {
|
| 65 |
+
#define DEFINE_TYPE(T) T,
|
| 66 |
+
C10_FORALL_TYPES(DEFINE_TYPE)
|
| 67 |
+
#undef DEFINE_TYPE
|
| 68 |
+
};
|
| 69 |
+
|
| 70 |
+
TORCH_API const char* typeKindToString(TypeKind kind);
|
| 71 |
+
|
| 72 |
+
struct Type;
|
| 73 |
+
struct SharedType;
|
| 74 |
+
|
| 75 |
+
// Use this to customize how a Type is printed using `annotation_str()`. If
|
| 76 |
+
// c10::nullopt is returned, `annotation_str()` falls through to its default
|
| 77 |
+
// implementation.
|
| 78 |
+
using TypePrinter = std::function<c10::optional<std::string>(const Type&)>;
|
| 79 |
+
|
| 80 |
+
namespace detail {
|
| 81 |
+
template <typename T>
|
| 82 |
+
struct IsSingletonType : public std::integral_constant<bool, false> {};
|
| 83 |
+
} // namespace detail
|
| 84 |
+
#define TORCH_DECLARE_SINGLETON(Type) \
|
| 85 |
+
struct Type; \
|
| 86 |
+
namespace detail { \
|
| 87 |
+
template <> struct IsSingletonType<Type> : public std::integral_constant<bool, true> {}; \
|
| 88 |
+
}
|
| 89 |
+
|
| 90 |
+
TORCH_DECLARE_SINGLETON(AnyType);
|
| 91 |
+
TORCH_DECLARE_SINGLETON(AnyEnumType);
|
| 92 |
+
TORCH_DECLARE_SINGLETON(NumberType);
|
| 93 |
+
TORCH_DECLARE_SINGLETON(FloatType);
|
| 94 |
+
TORCH_DECLARE_SINGLETON(ComplexType);
|
| 95 |
+
TORCH_DECLARE_SINGLETON(IntType);
|
| 96 |
+
TORCH_DECLARE_SINGLETON(BoolType);
|
| 97 |
+
TORCH_DECLARE_SINGLETON(StringType);
|
| 98 |
+
TORCH_DECLARE_SINGLETON(StorageType);
|
| 99 |
+
TORCH_DECLARE_SINGLETON(NoneType);
|
| 100 |
+
TORCH_DECLARE_SINGLETON(GeneratorType);
|
| 101 |
+
TORCH_DECLARE_SINGLETON(QuantizerType);
|
| 102 |
+
TORCH_DECLARE_SINGLETON(QSchemeType);
|
| 103 |
+
TORCH_DECLARE_SINGLETON(DeviceObjType);
|
| 104 |
+
TORCH_DECLARE_SINGLETON(StreamObjType);
|
| 105 |
+
TORCH_DECLARE_SINGLETON(CapsuleType);
|
| 106 |
+
TORCH_DECLARE_SINGLETON(PyObjectType);
|
| 107 |
+
TORCH_DECLARE_SINGLETON(ScalarTypeType);
|
| 108 |
+
TORCH_DECLARE_SINGLETON(LayoutType);
|
| 109 |
+
TORCH_DECLARE_SINGLETON(MemoryFormatType);
|
| 110 |
+
TORCH_DECLARE_SINGLETON(AnyListType);
|
| 111 |
+
TORCH_DECLARE_SINGLETON(AnyTupleType);
|
| 112 |
+
TORCH_DECLARE_SINGLETON(AnyClassType);
|
| 113 |
+
|
| 114 |
+
namespace detail {
|
| 115 |
+
template <typename T, typename Enable = void>
|
| 116 |
+
struct CastReturnType {
|
| 117 |
+
using type = std::shared_ptr<T>;
|
| 118 |
+
};
|
| 119 |
+
|
| 120 |
+
template <typename T>
|
| 121 |
+
struct CastReturnType<T, typename std::enable_if<IsSingletonType<T>::value>::type> {
|
| 122 |
+
using type = SingletonTypePtr<T>;
|
| 123 |
+
};
|
| 124 |
+
|
| 125 |
+
template <typename T, typename Enable = void>
|
| 126 |
+
struct CastConstReturnType {
|
| 127 |
+
using type = std::shared_ptr<const T>;
|
| 128 |
+
};
|
| 129 |
+
|
| 130 |
+
template <typename T>
|
| 131 |
+
struct CastConstReturnType<T, typename std::enable_if<IsSingletonType<T>::value>::type> {
|
| 132 |
+
using type = SingletonTypePtr<const T>;
|
| 133 |
+
};
|
| 134 |
+
|
| 135 |
+
template <typename T>
|
| 136 |
+
struct as_shared_type {
|
| 137 |
+
using type = SharedType*;
|
| 138 |
+
};
|
| 139 |
+
|
| 140 |
+
template <typename T>
|
| 141 |
+
struct as_shared_type<const T*> {
|
| 142 |
+
using type = const SharedType *;
|
| 143 |
+
};
|
| 144 |
+
} // namespace detail
|
| 145 |
+
|
| 146 |
+
struct TORCH_API Type {
|
| 147 |
+
friend TORCH_API bool operator==(const Type& lhs, const Type& rhs);
|
| 148 |
+
private:
|
| 149 |
+
TypeKind kind_;
|
| 150 |
+
|
| 151 |
+
protected:
|
| 152 |
+
Type(TypeKind kind) : kind_(kind) {}
|
| 153 |
+
|
| 154 |
+
Type(const Type&) = default;
|
| 155 |
+
Type& operator=(const Type&) = default;
|
| 156 |
+
Type(Type&&) noexcept = default;
|
| 157 |
+
Type& operator=(Type&&) noexcept = default;
|
| 158 |
+
|
| 159 |
+
virtual std::string annotation_str_impl(TypePrinter /*printer*/) const {
|
| 160 |
+
return str();
|
| 161 |
+
}
|
| 162 |
+
// a == b
|
| 163 |
+
virtual bool equals(const Type& rhs) const = 0;
|
| 164 |
+
// a == b <=> b == a
|
| 165 |
+
virtual bool symmetric() const {
|
| 166 |
+
return true;
|
| 167 |
+
}
|
| 168 |
+
|
| 169 |
+
public:
|
| 170 |
+
template <typename T>
|
| 171 |
+
class SingletonOrSharedTypePtr {
|
| 172 |
+
public:
|
| 173 |
+
using element_type = typename std::shared_ptr<T>::element_type;
|
| 174 |
+
|
| 175 |
+
SingletonOrSharedTypePtr() = default;
|
| 176 |
+
|
| 177 |
+
/* implicit */ SingletonOrSharedTypePtr(std::shared_ptr<T> x)
|
| 178 |
+
: repr_(std::move(x)) {}
|
| 179 |
+
|
| 180 |
+
template <typename U, std::enable_if_t<std::is_convertible<U*, T*>::value, bool> = true>
|
| 181 |
+
/* implicit */ SingletonOrSharedTypePtr(std::shared_ptr<U> x)
|
| 182 |
+
: repr_(std::move(x)) {}
|
| 183 |
+
|
| 184 |
+
/* implicit */ SingletonOrSharedTypePtr(std::nullptr_t)
|
| 185 |
+
: repr_(nullptr) {}
|
| 186 |
+
|
| 187 |
+
/* implicit */ SingletonOrSharedTypePtr(SingletonTypePtr<T> p)
|
| 188 |
+
: repr_(p) {}
|
| 189 |
+
|
| 190 |
+
template <typename U, std::enable_if_t<std::is_convertible<U*, T*>::value, bool> = true>
|
| 191 |
+
/* implicit */ SingletonOrSharedTypePtr(SingletonTypePtr<U> p)
|
| 192 |
+
: repr_(SingletonTypePtr<T>(p.get())) {}
|
| 193 |
+
|
| 194 |
+
|
| 195 |
+
// We need to support construction from T* for pybind. The problem
|
| 196 |
+
// is that it's not clear if we are supposed to be taking shared
|
| 197 |
+
// ownership or not.
|
| 198 |
+
//
|
| 199 |
+
// Case 1: if T is known statically to derive from SharedType, we should use
|
| 200 |
+
// shared_from_this() and take shared_ownership.
|
| 201 |
+
//
|
| 202 |
+
// Case 2: if T is exactly Type, we need to do a dynamic_cast to
|
| 203 |
+
// check if it's a SharedType and do the right thing.
|
| 204 |
+
//
|
| 205 |
+
// Case 3: Otherwise, T is not a SharedType. (debug-check this
|
| 206 |
+
// assumption!) Use a singleton pointer.
|
| 207 |
+
|
| 208 |
+
template <typename U = T, std::enable_if_t<std::is_base_of<SharedType, U>::value, bool> = true>
|
| 209 |
+
/* implicit */ SingletonOrSharedTypePtr(T* p) : SingletonOrSharedTypePtr(static_cast<typename detail::as_shared_type<U>::type>(p)->shared_from_this()) {}
|
| 210 |
+
|
| 211 |
+
template <typename U = T, std::enable_if_t<std::is_same<Type, U>::value, bool> = true>
|
| 212 |
+
/* implicit */ SingletonOrSharedTypePtr(T* p) {
|
| 213 |
+
if (auto* shared_p = dynamic_cast<typename detail::as_shared_type<U>::type>(p)) {
|
| 214 |
+
repr_ = Repr(shared_p->shared_from_this());
|
| 215 |
+
} else {
|
| 216 |
+
repr_ = Repr(p);
|
| 217 |
+
}
|
| 218 |
+
}
|
| 219 |
+
|
| 220 |
+
template <typename U = T, std::enable_if_t<!std::is_same<Type, U>::value && !std::is_base_of<SharedType, U>::value, bool> = true>
|
| 221 |
+
/* implicit */ SingletonOrSharedTypePtr(T* p)
|
| 222 |
+
: repr_(p) {
|
| 223 |
+
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(dynamic_cast<typename detail::as_shared_type<U>::type>(p) == nullptr);
|
| 224 |
+
}
|
| 225 |
+
|
| 226 |
+
SingletonOrSharedTypePtr(const SingletonOrSharedTypePtr&) = default;
|
| 227 |
+
SingletonOrSharedTypePtr(SingletonOrSharedTypePtr&&) noexcept = default;
|
| 228 |
+
SingletonOrSharedTypePtr& operator=(const SingletonOrSharedTypePtr&) = default;
|
| 229 |
+
SingletonOrSharedTypePtr& operator=(SingletonOrSharedTypePtr&&) noexcept = default;
|
| 230 |
+
|
| 231 |
+
T* get() const {
|
| 232 |
+
return repr_.isSharedAndNonNull() ? repr_.shared_.repr_.get() : static_cast<T*>(repr_.rawRepr().first);
|
| 233 |
+
}
|
| 234 |
+
|
| 235 |
+
operator bool() const {
|
| 236 |
+
return repr_.isNonNull();
|
| 237 |
+
}
|
| 238 |
+
|
| 239 |
+
bool operator==(std::nullptr_t) const {
|
| 240 |
+
return !repr_.isNonNull();
|
| 241 |
+
}
|
| 242 |
+
|
| 243 |
+
bool operator!=(std::nullptr_t) const {
|
| 244 |
+
return repr_.isNonNull();
|
| 245 |
+
}
|
| 246 |
+
|
| 247 |
+
template <typename U = T, std::enable_if_t<!std::is_same<std::remove_const_t<U>, void>::value, bool> = true>
|
| 248 |
+
U& operator*() const {
|
| 249 |
+
return *get();
|
| 250 |
+
}
|
| 251 |
+
|
| 252 |
+
T* operator->() const {
|
| 253 |
+
return get();
|
| 254 |
+
}
|
| 255 |
+
|
| 256 |
+
private:
|
| 257 |
+
// NOTE: SharedPtrWrapper exists to work around a baffling bug in
|
| 258 |
+
// nvcc; see comment in destroy() below.
|
| 259 |
+
struct SharedPtrWrapper {
|
| 260 |
+
SharedPtrWrapper(std::shared_ptr<T> &&x)
|
| 261 |
+
: repr_(std::move(x)) {}
|
| 262 |
+
std::shared_ptr<T> repr_;
|
| 263 |
+
};
|
| 264 |
+
union Repr {
|
| 265 |
+
Repr() : Repr(nullptr) {}
|
| 266 |
+
|
| 267 |
+
explicit Repr(std::shared_ptr<T> x)
|
| 268 |
+
: shared_(std::move(x)) {}
|
| 269 |
+
|
| 270 |
+
explicit Repr(std::nullptr_t)
|
| 271 |
+
: singletonRepr_(nullptr) {}
|
| 272 |
+
|
| 273 |
+
explicit Repr(SingletonTypePtr<T> p)
|
| 274 |
+
: singletonRepr_(p.get()) {}
|
| 275 |
+
|
| 276 |
+
~Repr() {
|
| 277 |
+
destroy();
|
| 278 |
+
}
|
| 279 |
+
|
| 280 |
+
// NOTE: the only non-UB way to access our null state is through
|
| 281 |
+
// rawRepr(), because our copy operation doesn't preserve which
|
| 282 |
+
// union member is active for null pointers.
|
| 283 |
+
Repr(const Repr& rhs) {
|
| 284 |
+
if (rhs.isSharedAndNonNull()) {
|
| 285 |
+
new (&shared_) SharedPtrWrapper(rhs.shared_);
|
| 286 |
+
} else {
|
| 287 |
+
singletonRepr_.singleton_ = static_cast<T*>(rhs.rawRepr().first);
|
| 288 |
+
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(rhs.singletonRepr_.unused_ == nullptr);
|
| 289 |
+
singletonRepr_.unused_ = nullptr;
|
| 290 |
+
}
|
| 291 |
+
}
|
| 292 |
+
|
| 293 |
+
Repr(Repr&& rhs) noexcept {
|
| 294 |
+
if (rhs.isSharedAndNonNull()) {
|
| 295 |
+
new (&shared_) SharedPtrWrapper(std::move(rhs.shared_));
|
| 296 |
+
} else {
|
| 297 |
+
singletonRepr_.singleton_ = static_cast<T*>(rhs.rawRepr().first);
|
| 298 |
+
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(rhs.singletonRepr_.unused_ == nullptr);
|
| 299 |
+
singletonRepr_.unused_ = nullptr;
|
| 300 |
+
}
|
| 301 |
+
}
|
| 302 |
+
|
| 303 |
+
Repr& operator=(const Repr& rhs) {
|
| 304 |
+
if (&rhs == this) {
|
| 305 |
+
return *this;
|
| 306 |
+
}
|
| 307 |
+
if (rhs.isSharedAndNonNull()) {
|
| 308 |
+
if (isSharedAndNonNull()) {
|
| 309 |
+
shared_ = rhs.shared_;
|
| 310 |
+
} else {
|
| 311 |
+
new (&shared_) SharedPtrWrapper(rhs.shared_);
|
| 312 |
+
}
|
| 313 |
+
} else {
|
| 314 |
+
if (isSharedAndNonNull()) {
|
| 315 |
+
destroy();
|
| 316 |
+
}
|
| 317 |
+
singletonRepr_.singleton_ = static_cast<T*>(rhs.rawRepr().first);
|
| 318 |
+
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(rhs.rawRepr().nullIfSingleton_ == nullptr);
|
| 319 |
+
singletonRepr_.unused_ = nullptr;
|
| 320 |
+
}
|
| 321 |
+
return *this;
|
| 322 |
+
}
|
| 323 |
+
|
| 324 |
+
Repr& operator=(Repr&& rhs) noexcept {
|
| 325 |
+
if (&rhs == this) {
|
| 326 |
+
return *this;
|
| 327 |
+
}
|
| 328 |
+
if (rhs.isSharedAndNonNull()) {
|
| 329 |
+
if (isSharedAndNonNull()) {
|
| 330 |
+
shared_ = std::move(rhs.shared_);
|
| 331 |
+
} else {
|
| 332 |
+
new (&shared_) SharedPtrWrapper(std::move(rhs.shared_));
|
| 333 |
+
}
|
| 334 |
+
} else {
|
| 335 |
+
if (isSharedAndNonNull()) {
|
| 336 |
+
destroy();
|
| 337 |
+
}
|
| 338 |
+
singletonRepr_.singleton_ = static_cast<T*>(rhs.rawRepr().first);
|
| 339 |
+
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(rhs.rawRepr().nullIfSingleton_ == nullptr);
|
| 340 |
+
singletonRepr_.unused_ = nullptr;
|
| 341 |
+
}
|
| 342 |
+
return *this;
|
| 343 |
+
}
|
| 344 |
+
|
| 345 |
+
SharedPtrWrapper shared_;
|
| 346 |
+
|
| 347 |
+
struct SingletonRepr {
|
| 348 |
+
explicit SingletonRepr(T* s) : singleton_(s) {}
|
| 349 |
+
T* singleton_;
|
| 350 |
+
void* unused_ = nullptr;
|
| 351 |
+
} singletonRepr_;
|
| 352 |
+
struct RawRepr {
|
| 353 |
+
void* first;
|
| 354 |
+
void* nullIfSingleton_;
|
| 355 |
+
};
|
| 356 |
+
|
| 357 |
+
// It is UB to read the singleton part of Repr if it was
|
| 358 |
+
// constructed as a shared_ptr and vice versa, but memcpying out
|
| 359 |
+
// the representation is always OK, so here's an accessor to obey
|
| 360 |
+
// the letter of the law.
|
| 361 |
+
RawRepr rawRepr() const {
|
| 362 |
+
RawRepr repr{};
|
| 363 |
+
memcpy(&repr, reinterpret_cast<const char *>(this), sizeof(RawRepr));
|
| 364 |
+
return repr;
|
| 365 |
+
}
|
| 366 |
+
|
| 367 |
+
bool isNonNull() const {
|
| 368 |
+
auto repr = rawRepr();
|
| 369 |
+
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(repr.nullIfSingleton_ == nullptr || repr.first != nullptr);
|
| 370 |
+
return repr.first != nullptr;
|
| 371 |
+
}
|
| 372 |
+
|
| 373 |
+
bool isSharedAndNonNull() const {
|
| 374 |
+
return rawRepr().nullIfSingleton_ != nullptr;
|
| 375 |
+
}
|
| 376 |
+
|
| 377 |
+
private:
|
| 378 |
+
void destroy() {
|
| 379 |
+
if (isSharedAndNonNull()) {
|
| 380 |
+
// Without SharedPtrWrapper, this line would read
|
| 381 |
+
// `shared_.~shared_ptr()` and nvcc would complain with
|
| 382 |
+
// "error: expected primary-expression before '>' token"
|
| 383 |
+
// referring to the "t" in "shared_ptr". SharedPtrWrapper
|
| 384 |
+
// exists to work around this compiler bug.
|
| 385 |
+
shared_.~SharedPtrWrapper();
|
| 386 |
+
}
|
| 387 |
+
}
|
| 388 |
+
} repr_;
|
| 389 |
+
};
|
| 390 |
+
|
| 391 |
+
using TypePtr = SingletonOrSharedTypePtr<Type>;
|
| 392 |
+
using Ptr = TypePtr;
|
| 393 |
+
using ElementType = Type;
|
| 394 |
+
|
| 395 |
+
// subtyping relation. By default, we return true for the case
|
| 396 |
+
// when the type is exactly equal or if this <: T where rhs = Optional[T]
|
| 397 |
+
|
| 398 |
+
// if this returns false and the why_not stream is non-null, it contains
|
| 399 |
+
// additional details that describe why this is not a subtype of 'rhs'.
|
| 400 |
+
// This additional information should only contain details that are not
|
| 401 |
+
// obvious from the annotation_str() that describes the type. For instance it
|
| 402 |
+
// is clear that `int <: str` is false but not clear why `Foo <: InterfaceBar`
|
| 403 |
+
// might be false.
|
| 404 |
+
virtual bool isSubtypeOfExt(const Type& rhs, std::ostream* why_not) const;
|
| 405 |
+
virtual bool is_module() const;
|
| 406 |
+
bool isSubtypeOf(const Type& rhs) const {
|
| 407 |
+
return isSubtypeOfExt(rhs, nullptr);
|
| 408 |
+
}
|
| 409 |
+
// Compatibility shims to accommodate existing code that passes shared_ptrs
|
| 410 |
+
// around. Ideally, we would just delete this, but it should be harmless.
|
| 411 |
+
template <typename T>
|
| 412 |
+
typename std::enable_if<std::is_base_of<Type, T>::value, bool>::type
|
| 413 |
+
isSubtypeOf(const std::shared_ptr<T>& rhs) const {
|
| 414 |
+
return isSubtypeOf(*rhs);
|
| 415 |
+
}
|
| 416 |
+
|
| 417 |
+
template <typename T>
|
| 418 |
+
typename std::enable_if<std::is_base_of<Type, T>::value, bool>::type
|
| 419 |
+
isSubtypeOf(const SingletonOrSharedTypePtr<T>& rhs) const {
|
| 420 |
+
return isSubtypeOf(*rhs);
|
| 421 |
+
}
|
| 422 |
+
|
| 423 |
+
template <typename T>
|
| 424 |
+
typename std::enable_if<std::is_base_of<Type, T>::value, bool>::type
|
| 425 |
+
isSubtypeOf(SingletonTypePtr<T> rhs) const {
|
| 426 |
+
return isSubtypeOf(*rhs);
|
| 427 |
+
}
|
| 428 |
+
|
| 429 |
+
template <typename T>
|
| 430 |
+
typename std::enable_if<std::is_base_of<Type, T>::value, bool>::type
|
| 431 |
+
isSubtypeOfExt(const SingletonOrSharedTypePtr<T>& rhs, std::ostream* why_not) const {
|
| 432 |
+
return isSubtypeOfExt(*rhs, why_not);
|
| 433 |
+
}
|
| 434 |
+
|
| 435 |
+
template <typename T>
|
| 436 |
+
typename std::enable_if<std::is_base_of<Type, T>::value, bool>::type
|
| 437 |
+
isSubtypeOfExt(const std::shared_ptr<T>& rhs, std::ostream* why_not) const {
|
| 438 |
+
return isSubtypeOfExt(*rhs, why_not);
|
| 439 |
+
}
|
| 440 |
+
|
| 441 |
+
template <typename T>
|
| 442 |
+
typename std::enable_if<std::is_base_of<Type, T>::value, bool>::type
|
| 443 |
+
isSubtypeOfExt(SingletonTypePtr<T> rhs, std::ostream* why_not) const {
|
| 444 |
+
return isSubtypeOfExt(*rhs, why_not);
|
| 445 |
+
}
|
| 446 |
+
|
| 447 |
+
// How this type will appear in FunctionSchema declarations
|
| 448 |
+
virtual std::string str() const = 0;
|
| 449 |
+
|
| 450 |
+
// How this type will appear as if it were a type annotation in Python
|
| 451 |
+
// which is sometimes different than how it appears in declarations (e.g.
|
| 452 |
+
// int[] vs List[int])
|
| 453 |
+
//
|
| 454 |
+
// Takes a custom printer that users can pass in to customize the output of
|
| 455 |
+
// this method.
|
| 456 |
+
std::string annotation_str(TypePrinter printer) const {
|
| 457 |
+
if (printer) {
|
| 458 |
+
// the printer can return nullopt to fall through to the default impl
|
| 459 |
+
if (auto renamed = printer(*this)) {
|
| 460 |
+
return *renamed;
|
| 461 |
+
}
|
| 462 |
+
}
|
| 463 |
+
return annotation_str_impl(std::move(printer));
|
| 464 |
+
}
|
| 465 |
+
std::string annotation_str() const {
|
| 466 |
+
// Overload instead of define a default value for `printer` to help
|
| 467 |
+
// debuggers out.
|
| 468 |
+
return annotation_str(nullptr);
|
| 469 |
+
}
|
| 470 |
+
|
| 471 |
+
// Returns a human readable string that includes additional information like
|
| 472 |
+
// "type is inferred rather than explicitly defined" to help construct more
|
| 473 |
+
// user-friendly messages.
|
| 474 |
+
virtual std::string repr_str() const {
|
| 475 |
+
return annotation_str();
|
| 476 |
+
}
|
| 477 |
+
|
| 478 |
+
TypeKind kind() const {
|
| 479 |
+
return kind_;
|
| 480 |
+
}
|
| 481 |
+
|
| 482 |
+
virtual bool isUnionType() const {
|
| 483 |
+
return false;
|
| 484 |
+
}
|
| 485 |
+
|
| 486 |
+
virtual bool requires_grad() const {
|
| 487 |
+
for (const auto& ct : containedTypes()) {
|
| 488 |
+
if (ct->requires_grad()) {
|
| 489 |
+
return true;
|
| 490 |
+
}
|
| 491 |
+
}
|
| 492 |
+
return false;
|
| 493 |
+
}
|
| 494 |
+
|
| 495 |
+
// Dynamically cast this object to the subclass indicated by the
|
| 496 |
+
// template variable, returning nullptr if the cast is invalid.
|
| 497 |
+
template <typename T, std::enable_if_t<!detail::IsSingletonType<T>::value, bool> = true>
|
| 498 |
+
typename detail::CastReturnType<T>::type cast() {
|
| 499 |
+
if (T::Kind == kind()) {
|
| 500 |
+
return std::static_pointer_cast<T>(static_cast<T*>(this)->shared_from_this());
|
| 501 |
+
}
|
| 502 |
+
return nullptr;
|
| 503 |
+
}
|
| 504 |
+
template <typename T, std::enable_if_t<detail::IsSingletonType<T>::value, bool> = true>
|
| 505 |
+
typename detail::CastReturnType<T>::type cast() {
|
| 506 |
+
if (T::Kind == kind()) {
|
| 507 |
+
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(this == T::get().get());
|
| 508 |
+
return typename detail::CastReturnType<T>::type(static_cast<T*>(this));
|
| 509 |
+
}
|
| 510 |
+
return nullptr;
|
| 511 |
+
}
|
| 512 |
+
template <typename T, std::enable_if_t<!detail::IsSingletonType<T>::value, bool> = true>
|
| 513 |
+
typename detail::CastConstReturnType<T>::type cast() const {
|
| 514 |
+
if (T::Kind == kind()) {
|
| 515 |
+
return std::static_pointer_cast<const T>(static_cast<const T*>(this)->shared_from_this());
|
| 516 |
+
}
|
| 517 |
+
return nullptr;
|
| 518 |
+
}
|
| 519 |
+
template <typename T, std::enable_if_t<detail::IsSingletonType<T>::value, bool> = true>
|
| 520 |
+
typename detail::CastConstReturnType<T>::type cast() const {
|
| 521 |
+
if (T::Kind == kind()) {
|
| 522 |
+
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(this == T::get().get());
|
| 523 |
+
return typename detail::CastConstReturnType<T>::type(static_cast<const T*>(this));
|
| 524 |
+
}
|
| 525 |
+
return nullptr;
|
| 526 |
+
}
|
| 527 |
+
template <typename T>
|
| 528 |
+
T* castRaw() {
|
| 529 |
+
if (T::Kind == kind()) {
|
| 530 |
+
return static_cast<T*>(this);
|
| 531 |
+
}
|
| 532 |
+
return nullptr;
|
| 533 |
+
}
|
| 534 |
+
template <typename T>
|
| 535 |
+
const T* castRaw() const {
|
| 536 |
+
if (T::Kind == kind()) {
|
| 537 |
+
return static_cast<const T*>(this);
|
| 538 |
+
}
|
| 539 |
+
return nullptr;
|
| 540 |
+
}
|
| 541 |
+
template <typename T>
|
| 542 |
+
auto expect() {
|
| 543 |
+
auto r = cast<T>();
|
| 544 |
+
AT_ASSERT(r);
|
| 545 |
+
return r;
|
| 546 |
+
}
|
| 547 |
+
template <typename T>
|
| 548 |
+
auto expect() const {
|
| 549 |
+
auto r = cast<const T>();
|
| 550 |
+
AT_ASSERT(r);
|
| 551 |
+
return r;
|
| 552 |
+
}
|
| 553 |
+
template <typename T>
|
| 554 |
+
T& expectRef() {
|
| 555 |
+
auto* r = castRaw<T>();
|
| 556 |
+
AT_ASSERT(r);
|
| 557 |
+
return *r;
|
| 558 |
+
}
|
| 559 |
+
template <typename T>
|
| 560 |
+
const T& expectRef() const {
|
| 561 |
+
auto* r = castRaw<const T>();
|
| 562 |
+
AT_ASSERT(r);
|
| 563 |
+
return *r;
|
| 564 |
+
}
|
| 565 |
+
virtual ~Type() = default;
|
| 566 |
+
virtual bool hasFreeVariables() const {
|
| 567 |
+
return false;
|
| 568 |
+
}
|
| 569 |
+
// list of types this type contains, e.g. for a List then element type of a
|
| 570 |
+
// list for a tuple, the types of the tuple elements
|
| 571 |
+
virtual at::ArrayRef<TypePtr> containedTypes() const {
|
| 572 |
+
return {};
|
| 573 |
+
}
|
| 574 |
+
virtual TypePtr containedType(size_t i) const {
|
| 575 |
+
return containedTypes().at(i);
|
| 576 |
+
}
|
| 577 |
+
virtual size_t containedTypeSize() const {
|
| 578 |
+
return containedTypes().size();
|
| 579 |
+
}
|
| 580 |
+
// create a new version of this type, replacing its contained types with
|
| 581 |
+
// contained_types
|
| 582 |
+
TypePtr withContained(std::vector<TypePtr> contained_types);
|
| 583 |
+
// per-type constructor, you only need to override this if the
|
| 584 |
+
// containedTypes() is not empty
|
| 585 |
+
virtual TypePtr createWithContained(
|
| 586 |
+
std::vector<TypePtr> /*contained_types*/) const {
|
| 587 |
+
AT_ERROR(
|
| 588 |
+
"type with contained types did not overload createWithContained: ",
|
| 589 |
+
str());
|
| 590 |
+
}
|
| 591 |
+
|
| 592 |
+
};
|
| 593 |
+
|
| 594 |
+
template <typename T>
|
| 595 |
+
using SingletonOrSharedTypePtr = Type::SingletonOrSharedTypePtr<T>;
|
| 596 |
+
|
| 597 |
+
|
| 598 |
+
template <typename T, typename U>
|
| 599 |
+
bool operator==(const SingletonOrSharedTypePtr<T>& x, const SingletonOrSharedTypePtr<U>& y) {
|
| 600 |
+
return (void*)x.get() == (void*)y.get();
|
| 601 |
+
}
|
| 602 |
+
|
| 603 |
+
template <typename T, typename U>
|
| 604 |
+
bool operator==(const SingletonOrSharedTypePtr<T>& x, const std::shared_ptr<U>& y) {
|
| 605 |
+
return (void*)x.get() == (void*)y.get();
|
| 606 |
+
}
|
| 607 |
+
|
| 608 |
+
template <typename T, typename U>
|
| 609 |
+
bool operator==(const std::shared_ptr<T>& x, const SingletonOrSharedTypePtr<U>& y) {
|
| 610 |
+
return (void*)x.get() == (void*)y.get();
|
| 611 |
+
}
|
| 612 |
+
|
| 613 |
+
template <typename T, typename U>
|
| 614 |
+
bool operator==(const SingletonOrSharedTypePtr<T>& x, const SingletonTypePtr<U>& y) {
|
| 615 |
+
return (void*)x.get() == (void*)y.get();
|
| 616 |
+
}
|
| 617 |
+
|
| 618 |
+
template <typename T, typename U>
|
| 619 |
+
bool operator==(const SingletonTypePtr<T>& x, const SingletonOrSharedTypePtr<U>& y) {
|
| 620 |
+
return (void*)x.get() == (void*)y.get();
|
| 621 |
+
}
|
| 622 |
+
|
| 623 |
+
template <typename T, typename U>
|
| 624 |
+
bool operator!=(const SingletonOrSharedTypePtr<T>& x, const SingletonOrSharedTypePtr<U>& y) {
|
| 625 |
+
return !(x == y);
|
| 626 |
+
}
|
| 627 |
+
|
| 628 |
+
template <typename T, typename U>
|
| 629 |
+
bool operator!=(const SingletonOrSharedTypePtr<T>& x, const std::shared_ptr<U>& y) {
|
| 630 |
+
return !(x == y);
|
| 631 |
+
}
|
| 632 |
+
|
| 633 |
+
template <typename T, typename U>
|
| 634 |
+
bool operator!=(const std::shared_ptr<T>& x, const SingletonOrSharedTypePtr<U>& y) {
|
| 635 |
+
return !(x == y);
|
| 636 |
+
}
|
| 637 |
+
|
| 638 |
+
template <typename T, typename U>
|
| 639 |
+
bool operator!=(const SingletonOrSharedTypePtr<T>& x, const SingletonTypePtr<U>& y) {
|
| 640 |
+
return !(x == y);
|
| 641 |
+
}
|
| 642 |
+
|
| 643 |
+
template <typename T, typename U>
|
| 644 |
+
bool operator!=(const SingletonTypePtr<T>& x, const SingletonOrSharedTypePtr<U>& y) {
|
| 645 |
+
return !(x == y);
|
| 646 |
+
}
|
| 647 |
+
|
| 648 |
+
using TypePtr = SingletonOrSharedTypePtr<Type>;
|
| 649 |
+
using ConstTypePtr = SingletonOrSharedTypePtr<const Type>;
|
| 650 |
+
|
| 651 |
+
// Explicitly enable MaybeOwned<shared_ptr<T>>, rather than allowing
|
| 652 |
+
// MaybeOwned to be used for any type right away.
|
| 653 |
+
template <typename T>
|
| 654 |
+
struct MaybeOwnedTraits<SingletonOrSharedTypePtr<T>>
|
| 655 |
+
: public MaybeOwnedTraitsGenericImpl<SingletonOrSharedTypePtr<T>> {};
|
| 656 |
+
|
| 657 |
+
// Base class for Types that are guaranteed to be owned by std::shared_ptr.
|
| 658 |
+
struct TORCH_API SharedType : public Type, public std::enable_shared_from_this<SharedType> {
|
| 659 |
+
using Type::Type;
|
| 660 |
+
};
|
| 661 |
+
|
| 662 |
+
inline TypePtr Type::withContained(std::vector<TypePtr> contained_types) {
|
| 663 |
+
auto current_contained = containedTypes();
|
| 664 |
+
// Types with no contained_types don't need this call. Check before calling!
|
| 665 |
+
//
|
| 666 |
+
// (We can't support this efficiently because types without
|
| 667 |
+
// contained types may be singletons, in which case
|
| 668 |
+
// shared_from_this will crash; we would have to provide a virtual
|
| 669 |
+
// typeptr_from_this or isSingleton.)
|
| 670 |
+
TORCH_INTERNAL_ASSERT(!current_contained.empty() && current_contained.size() == contained_types.size());
|
| 671 |
+
if (current_contained.equals(contained_types)) {
|
| 672 |
+
return std::static_pointer_cast<Type>(static_cast<SharedType *>(this)->shared_from_this());
|
| 673 |
+
}
|
| 674 |
+
return createWithContained(std::move(contained_types));
|
| 675 |
+
}
|
| 676 |
+
|
| 677 |
+
|
| 678 |
+
TORCH_API inline bool operator==(const Type& lhs, const Type& rhs) {
|
| 679 |
+
if (C10_UNLIKELY(!rhs.symmetric())) {
|
| 680 |
+
return rhs.equals(lhs);
|
| 681 |
+
}
|
| 682 |
+
return lhs.equals(rhs);
|
| 683 |
+
}
|
| 684 |
+
|
| 685 |
+
struct NamedType;
|
| 686 |
+
using NamedTypePtr = std::shared_ptr<NamedType>;
|
| 687 |
+
using ConstNamedTypePtr = std::shared_ptr<const NamedType>;
|
| 688 |
+
|
| 689 |
+
struct TORCH_API NamedType : public SharedType {
|
| 690 |
+
NamedType(TypeKind tk, c10::optional<QualifiedName> name)
|
| 691 |
+
: SharedType(tk), name_(std::move(name)) {
|
| 692 |
+
TORCH_INTERNAL_ASSERT(
|
| 693 |
+
tk == TypeKind::TupleType || tk == TypeKind::FunctionType ||
|
| 694 |
+
tk == TypeKind::ClassType || tk == TypeKind::InterfaceType ||
|
| 695 |
+
tk == TypeKind::EnumType,
|
| 696 |
+
"If you add a new kind of NamedType, ",
|
| 697 |
+
"please update the cast<NamedType> specialization and this assert");
|
| 698 |
+
}
|
| 699 |
+
|
| 700 |
+
// Fully qualified name of type
|
| 701 |
+
// Looks like: "foo.bar.Baz".
|
| 702 |
+
const c10::optional<QualifiedName>& name() const {
|
| 703 |
+
return name_;
|
| 704 |
+
}
|
| 705 |
+
|
| 706 |
+
private:
|
| 707 |
+
c10::optional<QualifiedName> name_;
|
| 708 |
+
};
|
| 709 |
+
|
| 710 |
+
} // namespace c10
|
| 711 |
+
|
| 712 |
+
namespace std {
|
| 713 |
+
template <typename T>
|
| 714 |
+
struct hash<c10::SingletonOrSharedTypePtr<T>> {
|
| 715 |
+
size_t operator()(const c10::SingletonOrSharedTypePtr<T>& x) const {
|
| 716 |
+
return std::hash<T*>()(x.get());
|
| 717 |
+
}
|
| 718 |
+
};
|
| 719 |
+
} // namespace std
|
moondream/lib/python3.10/site-packages/torch/include/ATen/core/rref_interface.h
ADDED
|
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <c10/util/intrusive_ptr.h>
|
| 4 |
+
#include <ATen/core/type_ptr.h>
|
| 5 |
+
|
| 6 |
+
namespace c10 {
|
| 7 |
+
|
| 8 |
+
struct Type;
|
| 9 |
+
using worker_id_t = int16_t;
|
| 10 |
+
|
| 11 |
+
// This abstract class contains only user-facing APIs, and will be shared
|
| 12 |
+
// between jit and distributed to implement TorchScript support.
|
| 13 |
+
class C10_EXPORT RRefInterface : public c10::intrusive_ptr_target {
|
| 14 |
+
public:
|
| 15 |
+
RRefInterface() = default;
|
| 16 |
+
// RRef is made NOT copyable NOT movable to prevent messing up reference
|
| 17 |
+
// counting.
|
| 18 |
+
RRefInterface(const RRefInterface& other) = delete;
|
| 19 |
+
RRefInterface(RRefInterface&& other) = delete;
|
| 20 |
+
RRefInterface& operator=(RRefInterface&& other) = delete;
|
| 21 |
+
|
| 22 |
+
~RRefInterface() override = default;
|
| 23 |
+
|
| 24 |
+
// returns the worker id of the owner
|
| 25 |
+
virtual worker_id_t owner() const = 0;
|
| 26 |
+
|
| 27 |
+
// returns the worker name of the owner
|
| 28 |
+
virtual std::string ownerName() const = 0;
|
| 29 |
+
|
| 30 |
+
// Returns true if this is the ``OwnerRRef``
|
| 31 |
+
virtual bool isOwner() const = 0;
|
| 32 |
+
|
| 33 |
+
// Returns true if this is an ``OwnerRRef`` or if this ``UserRRef`` has been
|
| 34 |
+
// confirmed by its owner.
|
| 35 |
+
virtual bool confirmedByOwner() const = 0;
|
| 36 |
+
|
| 37 |
+
virtual const TypePtr type() const = 0;
|
| 38 |
+
};
|
| 39 |
+
|
| 40 |
+
}
|
moondream/lib/python3.10/site-packages/torch/include/ATen/cudnn/Descriptors.h
ADDED
|
@@ -0,0 +1,391 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <string>
|
| 4 |
+
|
| 5 |
+
#include <ATen/cuda/CUDAContext.h>
|
| 6 |
+
#include <ATen/cuda/Exceptions.h>
|
| 7 |
+
|
| 8 |
+
#include <ATen/cudnn/cudnn-wrapper.h>
|
| 9 |
+
#include <ATen/cudnn/Utils.h>
|
| 10 |
+
#include <ATen/core/Tensor.h>
|
| 11 |
+
#include <ATen/TensorUtils.h>
|
| 12 |
+
#include <ATen/cuda/ATenCUDAGeneral.h>
|
| 13 |
+
#include <cuda.h>
|
| 14 |
+
|
| 15 |
+
#ifndef AT_PER_OPERATOR_HEADERS
|
| 16 |
+
#include <ATen/Functions.h>
|
| 17 |
+
#else
|
| 18 |
+
#include <ATen/ops/empty.h>
|
| 19 |
+
#endif
|
| 20 |
+
|
| 21 |
+
#if defined(CUDNN_VERSION) && CUDNN_VERSION >= 8907
|
| 22 |
+
#define USE_CUDNN_RNN_V8_API
|
| 23 |
+
#endif
|
| 24 |
+
|
| 25 |
+
namespace at { namespace native {
|
| 26 |
+
|
| 27 |
+
std::string cudnnTypeToString(cudnnDataType_t dtype);
|
| 28 |
+
|
| 29 |
+
// TODO: Add constructors for all of the descriptors
|
| 30 |
+
|
| 31 |
+
inline int dataSize(cudnnDataType_t dataType)
|
| 32 |
+
{
|
| 33 |
+
switch (dataType) {
|
| 34 |
+
#if defined(CUDNN_VERSION) && CUDNN_VERSION >= 8200
|
| 35 |
+
case CUDNN_DATA_BFLOAT16:
|
| 36 |
+
#endif
|
| 37 |
+
case CUDNN_DATA_HALF: return 2;
|
| 38 |
+
case CUDNN_DATA_FLOAT: return 4;
|
| 39 |
+
default: return 8;
|
| 40 |
+
}
|
| 41 |
+
}
|
| 42 |
+
|
| 43 |
+
// The stride for a size-1 dimensions is not uniquely determined; in
|
| 44 |
+
// fact, it can be anything you want, because the fact that the
|
| 45 |
+
// tensor is size 1 at this dimension means that you will never actually
|
| 46 |
+
// try advancing your pointer by this stride.
|
| 47 |
+
//
|
| 48 |
+
// However, CuDNN has a much more stringent requirement on strides:
|
| 49 |
+
// if you are passing a contiguous input, it better be the case
|
| 50 |
+
// that the stride for dim i is the product of the sizes of dims
|
| 51 |
+
// i+1 to the end. This stride is indeed uniquely determined. This
|
| 52 |
+
// function modifies 'stride' in place so this invariant holds.
|
| 53 |
+
template <typename T>
|
| 54 |
+
static inline void fixSizeOneDimStride(int dim, const T *size, T *stride, bool nhwc) {
|
| 55 |
+
int64_t z = 1;
|
| 56 |
+
int index = 0;
|
| 57 |
+
std::vector<int> permutation(dim);
|
| 58 |
+
|
| 59 |
+
if (nhwc) {
|
| 60 |
+
permutation[index++] = 1;
|
| 61 |
+
}
|
| 62 |
+
for (int d = dim-1; d > 1; d--) {
|
| 63 |
+
permutation[index++] = d;
|
| 64 |
+
}
|
| 65 |
+
if (!nhwc) {
|
| 66 |
+
permutation[index++] = 1;
|
| 67 |
+
}
|
| 68 |
+
permutation[index++] = 0;
|
| 69 |
+
for (int d : permutation) {
|
| 70 |
+
if (size[d] == 1) {
|
| 71 |
+
stride[d] = z;
|
| 72 |
+
} else {
|
| 73 |
+
z *= size[d];
|
| 74 |
+
}
|
| 75 |
+
}
|
| 76 |
+
}
|
| 77 |
+
|
| 78 |
+
template <typename T, cudnnStatus_t (*dtor)(T*)>
|
| 79 |
+
struct DescriptorDeleter {
|
| 80 |
+
void operator()(T* x) {
|
| 81 |
+
if (x != nullptr) {
|
| 82 |
+
AT_CUDNN_CHECK(dtor(x));
|
| 83 |
+
}
|
| 84 |
+
}
|
| 85 |
+
};
|
| 86 |
+
|
| 87 |
+
// A generic class for wrapping cuDNN descriptor types. All you need
|
| 88 |
+
// is to give the underlying type the Descriptor_t points to (usually,
|
| 89 |
+
// if it's cudnnTensorDescriptor_t it points to cudnnTensorStruct),
|
| 90 |
+
// the constructor and the destructor. Subclasses are responsible
|
| 91 |
+
// for defining a set() function to actually set the descriptor.
|
| 92 |
+
//
|
| 93 |
+
// Descriptors default construct to a nullptr, and have a descriptor
|
| 94 |
+
// initialized the first time you call set() or any other initializing
|
| 95 |
+
// function.
|
| 96 |
+
template <typename T, cudnnStatus_t (*ctor)(T**), cudnnStatus_t (*dtor)(T*)>
|
| 97 |
+
class TORCH_CUDA_CPP_API Descriptor {
|
| 98 |
+
public:
|
| 99 |
+
// TODO: Figure out why const-correctness doesn't work here
|
| 100 |
+
|
| 101 |
+
// Use desc() to access the underlying descriptor pointer in
|
| 102 |
+
// a read-only fashion. Most client code should use this.
|
| 103 |
+
// If the descriptor was never initialized, this will return
|
| 104 |
+
// nullptr.
|
| 105 |
+
T* desc() const { return desc_.get(); }
|
| 106 |
+
T* desc() { return desc_.get(); }
|
| 107 |
+
|
| 108 |
+
// Use mut_desc() to access the underlying descriptor pointer
|
| 109 |
+
// if you intend to modify what it points to (e.g., using
|
| 110 |
+
// cudnnSetFooDescriptor). This will ensure that the descriptor
|
| 111 |
+
// is initialized. Code in this file will use this function.
|
| 112 |
+
T* mut_desc() { init(); return desc_.get(); }
|
| 113 |
+
protected:
|
| 114 |
+
void init() {
|
| 115 |
+
if (desc_ == nullptr) {
|
| 116 |
+
T* raw_desc;
|
| 117 |
+
AT_CUDNN_CHECK(ctor(&raw_desc));
|
| 118 |
+
desc_.reset(raw_desc);
|
| 119 |
+
}
|
| 120 |
+
}
|
| 121 |
+
private:
|
| 122 |
+
std::unique_ptr<T, DescriptorDeleter<T, dtor>> desc_;
|
| 123 |
+
};
|
| 124 |
+
|
| 125 |
+
class TORCH_CUDA_CPP_API RNNDataDescriptor : public Descriptor<
|
| 126 |
+
cudnnRNNDataStruct,
|
| 127 |
+
&cudnnCreateRNNDataDescriptor,
|
| 128 |
+
&cudnnDestroyRNNDataDescriptor> {
|
| 129 |
+
public:
|
| 130 |
+
void set(const at::Tensor &t, cudnnRNNDataLayout_t layout, int maxSeqLength, int batchSize, int vectorSize, const int* seqLengthArray);
|
| 131 |
+
private:
|
| 132 |
+
void set(cudnnDataType_t dataType, cudnnRNNDataLayout_t layout, int maxSeqLength, int batchSize, int vectorSize, const int* seqLengthArray) {
|
| 133 |
+
AT_CUDNN_CHECK(cudnnSetRNNDataDescriptor(mut_desc(), dataType, layout, maxSeqLength, batchSize, vectorSize, seqLengthArray, NULL));
|
| 134 |
+
}
|
| 135 |
+
};
|
| 136 |
+
|
| 137 |
+
class TORCH_CUDA_CPP_API TensorDescriptor : public Descriptor<
|
| 138 |
+
cudnnTensorStruct,
|
| 139 |
+
&cudnnCreateTensorDescriptor,
|
| 140 |
+
&cudnnDestroyTensorDescriptor> {
|
| 141 |
+
public:
|
| 142 |
+
TensorDescriptor() = default;
|
| 143 |
+
explicit TensorDescriptor(const at::Tensor &t, size_t pad = 0) {
|
| 144 |
+
set(t, pad);
|
| 145 |
+
}
|
| 146 |
+
|
| 147 |
+
// Note [CuDNN broadcast padding]
|
| 148 |
+
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
| 149 |
+
// pad specifies the minimum dimensionality of the tensor descriptor
|
| 150 |
+
// we produce (it doesn't have anything to do with, e.g., convolution
|
| 151 |
+
// padding). If 't' is lower-dimensional than 'pad', the remaining
|
| 152 |
+
// dimensions (on the right) are padded with ones. This doesn't
|
| 153 |
+
// affect the underlying data layout. This is particularly useful for
|
| 154 |
+
// dealing with a peculiarity of the CuDNN API, which is that broadcasting in CuDNN is
|
| 155 |
+
// done in two steps: first, the client code is expected to pad out
|
| 156 |
+
// (the dimensions) input tensors to be the same dimension as the
|
| 157 |
+
// target broadcast, and then second, CuDNN takes of actually
|
| 158 |
+
// broadcasting size 1 dimensions.
|
| 159 |
+
|
| 160 |
+
void set(const at::Tensor &t, size_t pad = 0);
|
| 161 |
+
void set(const at::Tensor &t, at::MemoryFormat memory_format, size_t pad = 0);
|
| 162 |
+
void set(cudnnDataType_t dataType, IntArrayRef sizes, IntArrayRef strides, size_t pad = 0);
|
| 163 |
+
|
| 164 |
+
void print();
|
| 165 |
+
|
| 166 |
+
private:
|
| 167 |
+
void set(cudnnDataType_t dataType, IntArrayRef sizes, IntArrayRef strides, size_t pad, bool nhwc);
|
| 168 |
+
|
| 169 |
+
void set(cudnnDataType_t dataType, int dim, int* size, int* stride, bool nhwc) {
|
| 170 |
+
fixSizeOneDimStride<int>(dim, size, stride, nhwc);
|
| 171 |
+
AT_CUDNN_CHECK(cudnnSetTensorNdDescriptor(mut_desc(), dataType, dim, size, stride));
|
| 172 |
+
}
|
| 173 |
+
};
|
| 174 |
+
|
| 175 |
+
std::ostream& operator<<(std::ostream & out, const TensorDescriptor& d);
|
| 176 |
+
|
| 177 |
+
class TORCH_CUDA_CPP_API FilterDescriptor : public Descriptor<
|
| 178 |
+
cudnnFilterStruct,
|
| 179 |
+
&cudnnCreateFilterDescriptor,
|
| 180 |
+
&cudnnDestroyFilterDescriptor> {
|
| 181 |
+
public:
|
| 182 |
+
void set(const at::Tensor &t, int64_t pad = 0) {
|
| 183 |
+
set(t, at::MemoryFormat::Contiguous, pad);
|
| 184 |
+
}
|
| 185 |
+
|
| 186 |
+
void set(const at::Tensor &t, const at::MemoryFormat memory_format, int64_t pad = 0);
|
| 187 |
+
|
| 188 |
+
void print();
|
| 189 |
+
private:
|
| 190 |
+
void set(cudnnDataType_t dataType, int dim, int* size, cudnnTensorFormat_t filter_format) {
|
| 191 |
+
AT_CUDNN_CHECK(cudnnSetFilterNdDescriptor(mut_desc(), dataType, filter_format, dim, size));
|
| 192 |
+
}
|
| 193 |
+
};
|
| 194 |
+
|
| 195 |
+
std::ostream& operator<<(std::ostream & out, const FilterDescriptor& d);
|
| 196 |
+
|
| 197 |
+
struct TORCH_CUDA_CPP_API ConvolutionDescriptor
|
| 198 |
+
: public Descriptor<
|
| 199 |
+
cudnnConvolutionStruct,
|
| 200 |
+
&cudnnCreateConvolutionDescriptor,
|
| 201 |
+
&cudnnDestroyConvolutionDescriptor> {
|
| 202 |
+
void set(cudnnDataType_t dataType, int dim, int* pad, int* stride, int * upscale /* aka dilation */, int groups, bool allow_tf32) {
|
| 203 |
+
cudnnDataType_t mathType = dataType;
|
| 204 |
+
if (dataType == CUDNN_DATA_HALF) mathType = CUDNN_DATA_FLOAT;
|
| 205 |
+
AT_CUDNN_CHECK(cudnnSetConvolutionNdDescriptor(mut_desc(), dim, pad, stride, upscale,
|
| 206 |
+
CUDNN_CROSS_CORRELATION, mathType));
|
| 207 |
+
AT_CUDNN_CHECK(cudnnSetConvolutionGroupCount(mut_desc(), groups));
|
| 208 |
+
// See Note [behavior of cudnnFind and cudnnGet]
|
| 209 |
+
AT_CUDNN_CHECK(cudnnSetConvolutionMathType(mut_desc(), CUDNN_DEFAULT_MATH));
|
| 210 |
+
if(dataType == CUDNN_DATA_HALF) {
|
| 211 |
+
AT_CUDNN_CHECK(cudnnSetConvolutionMathType(mut_desc(), CUDNN_TENSOR_OP_MATH));
|
| 212 |
+
} else if (dataType == CUDNN_DATA_FLOAT && !allow_tf32) {
|
| 213 |
+
AT_CUDNN_CHECK(cudnnSetConvolutionMathType(mut_desc(), CUDNN_FMA_MATH));
|
| 214 |
+
}
|
| 215 |
+
}
|
| 216 |
+
};
|
| 217 |
+
|
| 218 |
+
struct TORCH_CUDA_CPP_API SpatialTransformerDescriptor
|
| 219 |
+
: public Descriptor<
|
| 220 |
+
cudnnSpatialTransformerStruct,
|
| 221 |
+
&cudnnCreateSpatialTransformerDescriptor,
|
| 222 |
+
&cudnnDestroySpatialTransformerDescriptor> {
|
| 223 |
+
void set(cudnnDataType_t dataType, int dim, int* size) {
|
| 224 |
+
AT_CUDNN_CHECK(cudnnSetSpatialTransformerNdDescriptor(mut_desc(), CUDNN_SAMPLER_BILINEAR, dataType, dim, size));
|
| 225 |
+
}
|
| 226 |
+
};
|
| 227 |
+
|
| 228 |
+
struct TORCH_CUDA_CPP_API DropoutDescriptor
|
| 229 |
+
: public Descriptor<
|
| 230 |
+
cudnnDropoutStruct,
|
| 231 |
+
&cudnnCreateDropoutDescriptor,
|
| 232 |
+
&cudnnDestroyDropoutDescriptor> {
|
| 233 |
+
at::Tensor state;
|
| 234 |
+
|
| 235 |
+
// Initialize a dropout descriptor's RNG state.
|
| 236 |
+
// WARNING: This function is very expensive, avoid calling this function!
|
| 237 |
+
void initialize_rng(cudnnHandle_t handle, float dropout, long long int seed, const TensorOptions& options) {
|
| 238 |
+
TORCH_INTERNAL_ASSERT(dropout > 0, "dropout must be nonzero; otherwise call set_no_dropout");
|
| 239 |
+
size_t state_size;
|
| 240 |
+
AT_CUDNN_CHECK(cudnnDropoutGetStatesSize(handle, &state_size));
|
| 241 |
+
AT_ASSERT(options.device().type() == kCUDA);
|
| 242 |
+
AT_ASSERT(options.dtype() == kByte);
|
| 243 |
+
state = at::empty({static_cast<int64_t>(state_size)}, options);
|
| 244 |
+
AT_CUDNN_CHECK(cudnnSetDropoutDescriptor(mut_desc(), handle, dropout, state.data_ptr(), state_size, seed));
|
| 245 |
+
}
|
| 246 |
+
|
| 247 |
+
// Restore a dropout descriptor given a dropout probability and existing RNG state.
|
| 248 |
+
void set(cudnnHandle_t handle, float dropout, at::Tensor state_) {
|
| 249 |
+
TORCH_INTERNAL_ASSERT(dropout > 0, "dropout must be nonzero; otherwise call set_no_dropout");
|
| 250 |
+
state = state_;
|
| 251 |
+
void *state_ptr = state.data_ptr();
|
| 252 |
+
size_t state_size = state.size(0);
|
| 253 |
+
// NB: The seed doesn't actually matter, so we give a dummy value
|
| 254 |
+
AT_CUDNN_CHECK(cudnnRestoreDropoutDescriptor(mut_desc(), handle, dropout, state_ptr, state_size, 0 /* seed */));
|
| 255 |
+
}
|
| 256 |
+
|
| 257 |
+
// Restore a dropout descriptor corresponding to no dropout
|
| 258 |
+
void set_no_dropout(cudnnHandle_t handle) {
|
| 259 |
+
// NB: seed doesn't matter when dropout = 0, because no random number
|
| 260 |
+
// initialization actually takes place when there is no dropout.
|
| 261 |
+
// NB: Empirically, cudnnSetDropoutDescriptor is cheap when
|
| 262 |
+
// dropout == 0
|
| 263 |
+
AT_CUDNN_CHECK(cudnnSetDropoutDescriptor(mut_desc(), handle, 0 /* dropout */, nullptr, 0 /* state_size */, 0 /* seed */));
|
| 264 |
+
}
|
| 265 |
+
};
|
| 266 |
+
|
| 267 |
+
struct TORCH_CUDA_CPP_API RNNDescriptor : public Descriptor<
|
| 268 |
+
cudnnRNNStruct,
|
| 269 |
+
&cudnnCreateRNNDescriptor,
|
| 270 |
+
&cudnnDestroyRNNDescriptor> {
|
| 271 |
+
DropoutDescriptor dropout_desc_;
|
| 272 |
+
void set(cudnnHandle_t handle,
|
| 273 |
+
#ifdef USE_CUDNN_RNN_V8_API
|
| 274 |
+
int input_size,
|
| 275 |
+
bool packed,
|
| 276 |
+
#endif
|
| 277 |
+
int hidden_size, int proj_size, int num_layers, DropoutDescriptor&& dropout_desc,
|
| 278 |
+
cudnnRNNInputMode_t input_mode, cudnnDirectionMode_t bidirectional,
|
| 279 |
+
cudnnRNNMode_t mode, cudnnDataType_t datatype, cudnnDataType_t input_type, cudnnRNNAlgo_t algo, bool allow_tf32) {
|
| 280 |
+
dropout_desc_ = std::move(dropout_desc);
|
| 281 |
+
#ifndef USE_CUDNN_RNN_V8_API
|
| 282 |
+
AT_CUDNN_CHECK(cudnnSetRNNDescriptor_v6(
|
| 283 |
+
handle,
|
| 284 |
+
mut_desc(),
|
| 285 |
+
hidden_size,
|
| 286 |
+
num_layers,
|
| 287 |
+
dropout_desc_.desc(),
|
| 288 |
+
input_mode,
|
| 289 |
+
bidirectional,
|
| 290 |
+
mode,
|
| 291 |
+
algo,
|
| 292 |
+
datatype));
|
| 293 |
+
if (proj_size != 0) {
|
| 294 |
+
AT_CUDNN_CHECK(cudnnSetRNNProjectionLayers(
|
| 295 |
+
handle,
|
| 296 |
+
/*rnnDesc=*/mut_desc(),
|
| 297 |
+
/*recProjSize=*/proj_size,
|
| 298 |
+
/*outProjSize=*/0));
|
| 299 |
+
}
|
| 300 |
+
cudaDeviceProp* prop = at::cuda::getCurrentDeviceProperties();
|
| 301 |
+
if (prop->major >= 7) {
|
| 302 |
+
if (input_type == CUDNN_DATA_HALF) {
|
| 303 |
+
cudnnSetRNNMatrixMathType(mut_desc(), CUDNN_TENSOR_OP_MATH);
|
| 304 |
+
}
|
| 305 |
+
else if (input_type == CUDNN_DATA_FLOAT && !allow_tf32) {
|
| 306 |
+
cudnnSetRNNMatrixMathType(mut_desc(), CUDNN_FMA_MATH);
|
| 307 |
+
}
|
| 308 |
+
else {
|
| 309 |
+
// Technically, as the default it's not necessary to explicitly
|
| 310 |
+
// set this.
|
| 311 |
+
cudnnSetRNNMatrixMathType(mut_desc(), CUDNN_DEFAULT_MATH);
|
| 312 |
+
}
|
| 313 |
+
}
|
| 314 |
+
#else
|
| 315 |
+
cudaDeviceProp* prop = at::cuda::getCurrentDeviceProperties();
|
| 316 |
+
auto math_type = CUDNN_DEFAULT_MATH;
|
| 317 |
+
if (prop->major >= 7) {
|
| 318 |
+
if (input_type == CUDNN_DATA_HALF) {
|
| 319 |
+
math_type = CUDNN_TENSOR_OP_MATH;
|
| 320 |
+
} else if (!allow_tf32) {
|
| 321 |
+
math_type = CUDNN_FMA_MATH;
|
| 322 |
+
}
|
| 323 |
+
}
|
| 324 |
+
AT_CUDNN_CHECK(cudnnSetRNNDescriptor_v8(
|
| 325 |
+
mut_desc(),
|
| 326 |
+
algo,
|
| 327 |
+
mode,
|
| 328 |
+
CUDNN_RNN_DOUBLE_BIAS,
|
| 329 |
+
bidirectional,
|
| 330 |
+
input_mode,
|
| 331 |
+
input_type,
|
| 332 |
+
datatype,
|
| 333 |
+
math_type,
|
| 334 |
+
input_size,
|
| 335 |
+
hidden_size,
|
| 336 |
+
proj_size ? proj_size : hidden_size,
|
| 337 |
+
num_layers,
|
| 338 |
+
dropout_desc_.desc(),
|
| 339 |
+
packed ? CUDNN_RNN_PADDED_IO_DISABLED : CUDNN_RNN_PADDED_IO_ENABLED));
|
| 340 |
+
#endif
|
| 341 |
+
}
|
| 342 |
+
};
|
| 343 |
+
|
| 344 |
+
struct TORCH_CUDA_CPP_API CTCLossDescriptor
|
| 345 |
+
: public Descriptor<
|
| 346 |
+
cudnnCTCLossStruct,
|
| 347 |
+
&cudnnCreateCTCLossDescriptor,
|
| 348 |
+
&cudnnDestroyCTCLossDescriptor> {
|
| 349 |
+
void set(cudnnDataType_t datatype) {
|
| 350 |
+
AT_CUDNN_CHECK(cudnnSetCTCLossDescriptor(mut_desc(), datatype));
|
| 351 |
+
}
|
| 352 |
+
void setEx(
|
| 353 |
+
cudnnDataType_t datatype,
|
| 354 |
+
cudnnLossNormalizationMode_t normMode,
|
| 355 |
+
cudnnNanPropagation_t gradMode) {
|
| 356 |
+
AT_CUDNN_CHECK(
|
| 357 |
+
cudnnSetCTCLossDescriptorEx(mut_desc(), datatype, normMode, gradMode));
|
| 358 |
+
}
|
| 359 |
+
};
|
| 360 |
+
|
| 361 |
+
struct TORCH_CUDA_CPP_API ActivationDescriptor
|
| 362 |
+
: public Descriptor<
|
| 363 |
+
cudnnActivationStruct,
|
| 364 |
+
&cudnnCreateActivationDescriptor,
|
| 365 |
+
&cudnnDestroyActivationDescriptor> {
|
| 366 |
+
void set(cudnnActivationMode_t mode) {
|
| 367 |
+
AT_ASSERT(
|
| 368 |
+
mode == CUDNN_ACTIVATION_RELU,
|
| 369 |
+
"TODO: support more cuDNN activation modes");
|
| 370 |
+
AT_CUDNN_CHECK(cudnnSetActivationDescriptor(
|
| 371 |
+
mut_desc(),
|
| 372 |
+
mode,
|
| 373 |
+
cudnnNanPropagation_t::CUDNN_NOT_PROPAGATE_NAN,
|
| 374 |
+
std::numeric_limits<double>::max()));
|
| 375 |
+
}
|
| 376 |
+
};
|
| 377 |
+
|
| 378 |
+
union Constant
|
| 379 |
+
{
|
| 380 |
+
float f;
|
| 381 |
+
double d;
|
| 382 |
+
Constant(cudnnDataType_t dataType, double value) {
|
| 383 |
+
if (dataType == CUDNN_DATA_HALF || dataType == CUDNN_DATA_FLOAT) {
|
| 384 |
+
f = static_cast<float>(value);
|
| 385 |
+
} else {
|
| 386 |
+
d = value;
|
| 387 |
+
}
|
| 388 |
+
}
|
| 389 |
+
};
|
| 390 |
+
|
| 391 |
+
}} // namespace
|
moondream/lib/python3.10/site-packages/torch/include/ATen/cudnn/Exceptions.h
ADDED
|
File without changes
|
moondream/lib/python3.10/site-packages/torch/include/ATen/cudnn/Handles.h
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
#include <ATen/cudnn/Handle.h>
|
moondream/lib/python3.10/site-packages/torch/include/ATen/cudnn/Types.h
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/cudnn/cudnn-wrapper.h>
|
| 4 |
+
#include <ATen/Tensor.h>
|
| 5 |
+
|
| 6 |
+
namespace at { namespace native {
|
| 7 |
+
|
| 8 |
+
TORCH_CUDA_CPP_API cudnnDataType_t
|
| 9 |
+
getCudnnDataTypeFromScalarType(const at::ScalarType dtype);
|
| 10 |
+
cudnnDataType_t getCudnnDataType(const at::Tensor& tensor);
|
| 11 |
+
|
| 12 |
+
int64_t cudnn_version();
|
| 13 |
+
|
| 14 |
+
}} // namespace at::cudnn
|
moondream/lib/python3.10/site-packages/torch/include/ATen/cudnn/Utils.h
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/core/Tensor.h>
|
| 4 |
+
#include <ATen/cuda/Exceptions.h>
|
| 5 |
+
#include <ATen/cudnn/cudnn-wrapper.h>
|
| 6 |
+
#include <ATen/cudnn/Handle.h>
|
| 7 |
+
|
| 8 |
+
namespace at { namespace native {
|
| 9 |
+
|
| 10 |
+
// cuDNN has a buggy check for tensor being contiguous (that is, it does
|
| 11 |
+
// not ignore stride for dimension that is equal to 0). This function
|
| 12 |
+
// makes tensors which have zero stride contiguous, by setting the
|
| 13 |
+
// strides to 1 as cuDNN likes.
|
| 14 |
+
inline Tensor contiguousIfZeroInStrides(const Tensor& t) {
|
| 15 |
+
for (auto s : t.strides()) {
|
| 16 |
+
if (s == 0) return t.contiguous();
|
| 17 |
+
}
|
| 18 |
+
return t;
|
| 19 |
+
}
|
| 20 |
+
|
| 21 |
+
}}
|
moondream/lib/python3.10/site-packages/torch/include/ATen/cudnn/cudnn-wrapper.h
ADDED
|
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <cudnn.h>
|
| 4 |
+
|
| 5 |
+
#define STRINGIFY(x) #x
|
| 6 |
+
#define STRING(x) STRINGIFY(x)
|
| 7 |
+
|
| 8 |
+
#if CUDNN_MAJOR < 6
|
| 9 |
+
#pragma message ("CuDNN v" STRING(CUDNN_MAJOR) " found, but need at least CuDNN v6. You can get the latest version of CuDNN from https://developer.nvidia.com/cudnn or disable CuDNN with USE_CUDNN=0")
|
| 10 |
+
#pragma message "We strongly encourage you to move to 6.0 and above."
|
| 11 |
+
#pragma message "This message is intended to annoy you enough to update."
|
| 12 |
+
#endif
|
| 13 |
+
|
| 14 |
+
#undef STRINGIFY
|
| 15 |
+
#undef STRING
|
moondream/lib/python3.10/site-packages/torch/include/ATen/detail/CUDAHooksInterface.h
ADDED
|
@@ -0,0 +1,201 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <c10/core/Allocator.h>
|
| 4 |
+
#include <c10/util/Exception.h>
|
| 5 |
+
#include <c10/util/Registry.h>
|
| 6 |
+
|
| 7 |
+
#include <ATen/detail/AcceleratorHooksInterface.h>
|
| 8 |
+
|
| 9 |
+
// Forward-declares at::Generator and at::cuda::NVRTC
|
| 10 |
+
namespace at {
|
| 11 |
+
struct Generator;
|
| 12 |
+
namespace cuda {
|
| 13 |
+
struct NVRTC;
|
| 14 |
+
} // namespace cuda
|
| 15 |
+
} // namespace at
|
| 16 |
+
|
| 17 |
+
// NB: Class must live in `at` due to limitations of Registry.h.
|
| 18 |
+
namespace at {
|
| 19 |
+
|
| 20 |
+
#ifdef _MSC_VER
|
| 21 |
+
constexpr const char* CUDA_HELP =
|
| 22 |
+
"PyTorch splits its backend into two shared libraries: a CPU library "
|
| 23 |
+
"and a CUDA library; this error has occurred because you are trying "
|
| 24 |
+
"to use some CUDA functionality, but the CUDA library has not been "
|
| 25 |
+
"loaded by the dynamic linker for some reason. The CUDA library MUST "
|
| 26 |
+
"be loaded, EVEN IF you don't directly use any symbols from the CUDA library! "
|
| 27 |
+
"One common culprit is a lack of -INCLUDE:?warp_size@cuda@at@@YAHXZ "
|
| 28 |
+
"in your link arguments; many dynamic linkers will delete dynamic library "
|
| 29 |
+
"dependencies if you don't depend on any of their symbols. You can check "
|
| 30 |
+
"if this has occurred by using link on your binary to see if there is a "
|
| 31 |
+
"dependency on *_cuda.dll library.";
|
| 32 |
+
#else
|
| 33 |
+
constexpr const char* CUDA_HELP =
|
| 34 |
+
"PyTorch splits its backend into two shared libraries: a CPU library "
|
| 35 |
+
"and a CUDA library; this error has occurred because you are trying "
|
| 36 |
+
"to use some CUDA functionality, but the CUDA library has not been "
|
| 37 |
+
"loaded by the dynamic linker for some reason. The CUDA library MUST "
|
| 38 |
+
"be loaded, EVEN IF you don't directly use any symbols from the CUDA library! "
|
| 39 |
+
"One common culprit is a lack of -Wl,--no-as-needed in your link arguments; many "
|
| 40 |
+
"dynamic linkers will delete dynamic library dependencies if you don't "
|
| 41 |
+
"depend on any of their symbols. You can check if this has occurred by "
|
| 42 |
+
"using ldd on your binary to see if there is a dependency on *_cuda.so "
|
| 43 |
+
"library.";
|
| 44 |
+
#endif
|
| 45 |
+
|
| 46 |
+
// The CUDAHooksInterface is an omnibus interface for any CUDA functionality
|
| 47 |
+
// which we may want to call into from CPU code (and thus must be dynamically
|
| 48 |
+
// dispatched, to allow for separate compilation of CUDA code). How do I
|
| 49 |
+
// decide if a function should live in this class? There are two tests:
|
| 50 |
+
//
|
| 51 |
+
// 1. Does the *implementation* of this function require linking against
|
| 52 |
+
// CUDA libraries?
|
| 53 |
+
//
|
| 54 |
+
// 2. Is this function *called* from non-CUDA ATen code?
|
| 55 |
+
//
|
| 56 |
+
// (2) should filter out many ostensible use-cases, since many times a CUDA
|
| 57 |
+
// function provided by ATen is only really ever used by actual CUDA code.
|
| 58 |
+
//
|
| 59 |
+
// TODO: Consider putting the stub definitions in another class, so that one
|
| 60 |
+
// never forgets to implement each virtual function in the real implementation
|
| 61 |
+
// in CUDAHooks. This probably doesn't buy us much though.
|
| 62 |
+
struct TORCH_API CUDAHooksInterface : AcceleratorHooksInterface {
|
| 63 |
+
// This should never actually be implemented, but it is used to
|
| 64 |
+
// squelch -Werror=non-virtual-dtor
|
| 65 |
+
virtual ~CUDAHooksInterface() override = default;
|
| 66 |
+
|
| 67 |
+
// Initialize THCState and, transitively, the CUDA state
|
| 68 |
+
virtual void initCUDA() const {
|
| 69 |
+
TORCH_CHECK(false, "Cannot initialize CUDA without ATen_cuda library. ", CUDA_HELP);
|
| 70 |
+
}
|
| 71 |
+
|
| 72 |
+
virtual const Generator& getDefaultCUDAGenerator(C10_UNUSED DeviceIndex device_index = -1) const {
|
| 73 |
+
TORCH_CHECK(false, "Cannot get default CUDA generator without ATen_cuda library. ", CUDA_HELP);
|
| 74 |
+
}
|
| 75 |
+
|
| 76 |
+
virtual Device getDeviceFromPtr(void* /*data*/) const {
|
| 77 |
+
TORCH_CHECK(false, "Cannot get device of pointer on CUDA without ATen_cuda library. ", CUDA_HELP);
|
| 78 |
+
}
|
| 79 |
+
|
| 80 |
+
virtual bool isPinnedPtr(const void* /*data*/) const {
|
| 81 |
+
return false;
|
| 82 |
+
}
|
| 83 |
+
|
| 84 |
+
virtual bool hasCUDA() const {
|
| 85 |
+
return false;
|
| 86 |
+
}
|
| 87 |
+
|
| 88 |
+
virtual bool hasCUDART() const {
|
| 89 |
+
return false;
|
| 90 |
+
}
|
| 91 |
+
|
| 92 |
+
virtual bool hasMAGMA() const {
|
| 93 |
+
return false;
|
| 94 |
+
}
|
| 95 |
+
|
| 96 |
+
virtual bool hasCuDNN() const {
|
| 97 |
+
return false;
|
| 98 |
+
}
|
| 99 |
+
|
| 100 |
+
virtual bool hasCuSOLVER() const {
|
| 101 |
+
return false;
|
| 102 |
+
}
|
| 103 |
+
|
| 104 |
+
virtual bool hasROCM() const {
|
| 105 |
+
return false;
|
| 106 |
+
}
|
| 107 |
+
|
| 108 |
+
virtual const at::cuda::NVRTC& nvrtc() const {
|
| 109 |
+
TORCH_CHECK(false, "NVRTC requires CUDA. ", CUDA_HELP);
|
| 110 |
+
}
|
| 111 |
+
|
| 112 |
+
virtual bool hasPrimaryContext(DeviceIndex device_index) const override {
|
| 113 |
+
TORCH_CHECK(false, "Cannot call hasPrimaryContext(", device_index, ") without ATen_cuda library. ", CUDA_HELP);
|
| 114 |
+
}
|
| 115 |
+
|
| 116 |
+
virtual DeviceIndex current_device() const {
|
| 117 |
+
return -1;
|
| 118 |
+
}
|
| 119 |
+
|
| 120 |
+
virtual Allocator* getPinnedMemoryAllocator() const {
|
| 121 |
+
TORCH_CHECK(false, "Pinned memory requires CUDA. ", CUDA_HELP);
|
| 122 |
+
}
|
| 123 |
+
|
| 124 |
+
virtual Allocator* getCUDADeviceAllocator() const {
|
| 125 |
+
TORCH_CHECK(false, "CUDADeviceAllocator requires CUDA. ", CUDA_HELP);
|
| 126 |
+
}
|
| 127 |
+
|
| 128 |
+
virtual bool compiledWithCuDNN() const {
|
| 129 |
+
return false;
|
| 130 |
+
}
|
| 131 |
+
|
| 132 |
+
virtual bool compiledWithMIOpen() const {
|
| 133 |
+
return false;
|
| 134 |
+
}
|
| 135 |
+
|
| 136 |
+
virtual bool supportsDilatedConvolutionWithCuDNN() const {
|
| 137 |
+
return false;
|
| 138 |
+
}
|
| 139 |
+
|
| 140 |
+
virtual bool supportsDepthwiseConvolutionWithCuDNN() const {
|
| 141 |
+
return false;
|
| 142 |
+
}
|
| 143 |
+
|
| 144 |
+
virtual bool supportsBFloat16ConvolutionWithCuDNNv8() const {
|
| 145 |
+
return false;
|
| 146 |
+
}
|
| 147 |
+
|
| 148 |
+
virtual long versionCuDNN() const {
|
| 149 |
+
TORCH_CHECK(false, "Cannot query cuDNN version without ATen_cuda library. ", CUDA_HELP);
|
| 150 |
+
}
|
| 151 |
+
|
| 152 |
+
virtual long versionCUDART() const {
|
| 153 |
+
TORCH_CHECK(false, "Cannot query CUDART version without ATen_cuda library. ", CUDA_HELP);
|
| 154 |
+
}
|
| 155 |
+
|
| 156 |
+
virtual std::string showConfig() const {
|
| 157 |
+
TORCH_CHECK(false, "Cannot query detailed CUDA version without ATen_cuda library. ", CUDA_HELP);
|
| 158 |
+
}
|
| 159 |
+
|
| 160 |
+
virtual double batchnormMinEpsilonCuDNN() const {
|
| 161 |
+
TORCH_CHECK(false,
|
| 162 |
+
"Cannot query batchnormMinEpsilonCuDNN() without ATen_cuda library. ", CUDA_HELP);
|
| 163 |
+
}
|
| 164 |
+
|
| 165 |
+
virtual int64_t cuFFTGetPlanCacheMaxSize(DeviceIndex /*device_index*/) const {
|
| 166 |
+
TORCH_CHECK(false, "Cannot access cuFFT plan cache without ATen_cuda library. ", CUDA_HELP);
|
| 167 |
+
}
|
| 168 |
+
|
| 169 |
+
virtual void cuFFTSetPlanCacheMaxSize(DeviceIndex /*device_index*/, int64_t /*max_size*/) const {
|
| 170 |
+
TORCH_CHECK(false, "Cannot access cuFFT plan cache without ATen_cuda library. ", CUDA_HELP);
|
| 171 |
+
}
|
| 172 |
+
|
| 173 |
+
virtual int64_t cuFFTGetPlanCacheSize(DeviceIndex /*device_index*/) const {
|
| 174 |
+
TORCH_CHECK(false, "Cannot access cuFFT plan cache without ATen_cuda library. ", CUDA_HELP);
|
| 175 |
+
}
|
| 176 |
+
|
| 177 |
+
virtual void cuFFTClearPlanCache(DeviceIndex /*device_index*/) const {
|
| 178 |
+
TORCH_CHECK(false, "Cannot access cuFFT plan cache without ATen_cuda library. ", CUDA_HELP);
|
| 179 |
+
}
|
| 180 |
+
|
| 181 |
+
virtual int getNumGPUs() const {
|
| 182 |
+
return 0;
|
| 183 |
+
}
|
| 184 |
+
|
| 185 |
+
virtual void deviceSynchronize(DeviceIndex /*device_index*/) const {
|
| 186 |
+
TORCH_CHECK(false, "Cannot synchronize CUDA device without ATen_cuda library. ", CUDA_HELP);
|
| 187 |
+
}
|
| 188 |
+
};
|
| 189 |
+
|
| 190 |
+
// NB: dummy argument to suppress "ISO C++11 requires at least one argument
|
| 191 |
+
// for the "..." in a variadic macro"
|
| 192 |
+
struct TORCH_API CUDAHooksArgs {};
|
| 193 |
+
|
| 194 |
+
TORCH_DECLARE_REGISTRY(CUDAHooksRegistry, CUDAHooksInterface, CUDAHooksArgs);
|
| 195 |
+
#define REGISTER_CUDA_HOOKS(clsname) \
|
| 196 |
+
C10_REGISTER_CLASS(CUDAHooksRegistry, clsname, clsname)
|
| 197 |
+
|
| 198 |
+
namespace detail {
|
| 199 |
+
TORCH_API const CUDAHooksInterface& getCUDAHooks();
|
| 200 |
+
} // namespace detail
|
| 201 |
+
} // namespace at
|
moondream/lib/python3.10/site-packages/torch/include/ATen/detail/MPSHooksInterface.h
ADDED
|
@@ -0,0 +1,106 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Copyright © 2022 Apple Inc.
|
| 2 |
+
|
| 3 |
+
#pragma once
|
| 4 |
+
|
| 5 |
+
#include <c10/core/Allocator.h>
|
| 6 |
+
#include <ATen/core/Generator.h>
|
| 7 |
+
#include <ATen/detail/AcceleratorHooksInterface.h>
|
| 8 |
+
#include <c10/util/Exception.h>
|
| 9 |
+
#include <c10/util/Registry.h>
|
| 10 |
+
|
| 11 |
+
#include <cstddef>
|
| 12 |
+
|
| 13 |
+
namespace at {
|
| 14 |
+
|
| 15 |
+
struct TORCH_API MPSHooksInterface : AcceleratorHooksInterface {
|
| 16 |
+
// this fails the implementation if MPSHooks functions are called, but
|
| 17 |
+
// MPS backend is not present.
|
| 18 |
+
#define FAIL_MPSHOOKS_FUNC(func) \
|
| 19 |
+
TORCH_CHECK(false, "Cannot execute ", func, "() without MPS backend.");
|
| 20 |
+
|
| 21 |
+
virtual ~MPSHooksInterface() override = default;
|
| 22 |
+
|
| 23 |
+
// Initialize the MPS library state
|
| 24 |
+
virtual void initMPS() const {
|
| 25 |
+
FAIL_MPSHOOKS_FUNC(__func__);
|
| 26 |
+
}
|
| 27 |
+
virtual bool hasMPS() const {
|
| 28 |
+
return false;
|
| 29 |
+
}
|
| 30 |
+
virtual bool isOnMacOSorNewer(unsigned major = 13, unsigned minor = 0) const {
|
| 31 |
+
FAIL_MPSHOOKS_FUNC(__func__);
|
| 32 |
+
}
|
| 33 |
+
virtual const Generator& getDefaultMPSGenerator() const {
|
| 34 |
+
FAIL_MPSHOOKS_FUNC(__func__);
|
| 35 |
+
}
|
| 36 |
+
virtual Allocator* getMPSDeviceAllocator() const {
|
| 37 |
+
FAIL_MPSHOOKS_FUNC(__func__);
|
| 38 |
+
}
|
| 39 |
+
virtual void deviceSynchronize() const {
|
| 40 |
+
FAIL_MPSHOOKS_FUNC(__func__);
|
| 41 |
+
}
|
| 42 |
+
virtual void commitStream() const {
|
| 43 |
+
FAIL_MPSHOOKS_FUNC(__func__);
|
| 44 |
+
}
|
| 45 |
+
virtual void* getCommandBuffer() const {
|
| 46 |
+
FAIL_MPSHOOKS_FUNC(__func__);
|
| 47 |
+
}
|
| 48 |
+
virtual void* getDispatchQueue() const {
|
| 49 |
+
FAIL_MPSHOOKS_FUNC(__func__);
|
| 50 |
+
}
|
| 51 |
+
virtual void emptyCache() const {
|
| 52 |
+
FAIL_MPSHOOKS_FUNC(__func__);
|
| 53 |
+
}
|
| 54 |
+
virtual size_t getCurrentAllocatedMemory() const {
|
| 55 |
+
FAIL_MPSHOOKS_FUNC(__func__);
|
| 56 |
+
}
|
| 57 |
+
virtual size_t getDriverAllocatedMemory() const {
|
| 58 |
+
FAIL_MPSHOOKS_FUNC(__func__);
|
| 59 |
+
}
|
| 60 |
+
virtual void setMemoryFraction(double /*ratio*/) const {
|
| 61 |
+
FAIL_MPSHOOKS_FUNC(__func__);
|
| 62 |
+
}
|
| 63 |
+
virtual void profilerStartTrace(const std::string& mode, bool waitUntilCompleted) const {
|
| 64 |
+
FAIL_MPSHOOKS_FUNC(__func__);
|
| 65 |
+
}
|
| 66 |
+
virtual void profilerStopTrace() const {
|
| 67 |
+
FAIL_MPSHOOKS_FUNC(__func__);
|
| 68 |
+
}
|
| 69 |
+
virtual uint32_t acquireEvent(bool enable_timing) const {
|
| 70 |
+
FAIL_MPSHOOKS_FUNC(__func__);
|
| 71 |
+
}
|
| 72 |
+
virtual void releaseEvent(uint32_t event_id) const {
|
| 73 |
+
FAIL_MPSHOOKS_FUNC(__func__);
|
| 74 |
+
}
|
| 75 |
+
virtual void recordEvent(uint32_t event_id) const {
|
| 76 |
+
FAIL_MPSHOOKS_FUNC(__func__);
|
| 77 |
+
}
|
| 78 |
+
virtual void waitForEvent(uint32_t event_id) const {
|
| 79 |
+
FAIL_MPSHOOKS_FUNC(__func__);
|
| 80 |
+
}
|
| 81 |
+
virtual void synchronizeEvent(uint32_t event_id) const {
|
| 82 |
+
FAIL_MPSHOOKS_FUNC(__func__);
|
| 83 |
+
}
|
| 84 |
+
virtual bool queryEvent(uint32_t event_id) const {
|
| 85 |
+
FAIL_MPSHOOKS_FUNC(__func__);
|
| 86 |
+
}
|
| 87 |
+
virtual double elapsedTimeOfEvents(uint32_t start_event_id, uint32_t end_event_id) const {
|
| 88 |
+
FAIL_MPSHOOKS_FUNC(__func__);
|
| 89 |
+
}
|
| 90 |
+
virtual bool hasPrimaryContext(DeviceIndex device_index) const override {
|
| 91 |
+
FAIL_MPSHOOKS_FUNC(__func__);
|
| 92 |
+
}
|
| 93 |
+
#undef FAIL_MPSHOOKS_FUNC
|
| 94 |
+
};
|
| 95 |
+
|
| 96 |
+
struct TORCH_API MPSHooksArgs {};
|
| 97 |
+
|
| 98 |
+
TORCH_DECLARE_REGISTRY(MPSHooksRegistry, MPSHooksInterface, MPSHooksArgs);
|
| 99 |
+
#define REGISTER_MPS_HOOKS(clsname) \
|
| 100 |
+
C10_REGISTER_CLASS(MPSHooksRegistry, clsname, clsname)
|
| 101 |
+
|
| 102 |
+
namespace detail {
|
| 103 |
+
TORCH_API const MPSHooksInterface& getMPSHooks();
|
| 104 |
+
|
| 105 |
+
} // namespace detail
|
| 106 |
+
} // namespace at
|
moondream/lib/python3.10/site-packages/torch/include/ATen/detail/PrivateUse1HooksInterface.h
ADDED
|
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/core/Generator.h>
|
| 4 |
+
#include <ATen/detail/AcceleratorHooksInterface.h>
|
| 5 |
+
#include <c10/core/Allocator.h>
|
| 6 |
+
#include <c10/core/Device.h>
|
| 7 |
+
#include <c10/core/Storage.h>
|
| 8 |
+
#include <c10/util/Exception.h>
|
| 9 |
+
namespace at {
|
| 10 |
+
|
| 11 |
+
struct TORCH_API PrivateUse1HooksInterface : AcceleratorHooksInterface {
|
| 12 |
+
virtual ~PrivateUse1HooksInterface() override = default;
|
| 13 |
+
virtual const at::Generator& getDefaultGenerator(
|
| 14 |
+
c10::DeviceIndex device_index) {
|
| 15 |
+
TORCH_CHECK_NOT_IMPLEMENTED(
|
| 16 |
+
false,
|
| 17 |
+
"You should register `PrivateUse1HooksInterface` for PrivateUse1 before call `getDefaultGenerator`.");
|
| 18 |
+
}
|
| 19 |
+
|
| 20 |
+
virtual at::Device getDeviceFromPtr(void* data) const {
|
| 21 |
+
TORCH_CHECK_NOT_IMPLEMENTED(
|
| 22 |
+
false,
|
| 23 |
+
"You should register `PrivateUse1HooksInterface` for PrivateUse1 before call `getDeviceFromPtr`.");
|
| 24 |
+
}
|
| 25 |
+
|
| 26 |
+
virtual Allocator* getPinnedMemoryAllocator() const {
|
| 27 |
+
TORCH_CHECK(
|
| 28 |
+
false,
|
| 29 |
+
"You should register `PrivateUse1HooksInterface` for PrivateUse1 before call `getPinnedMemoryAllocator`.");
|
| 30 |
+
}
|
| 31 |
+
|
| 32 |
+
virtual bool hasPrimaryContext(DeviceIndex device_index) const override {
|
| 33 |
+
TORCH_CHECK_NOT_IMPLEMENTED(
|
| 34 |
+
false,
|
| 35 |
+
"You should register `PrivateUse1HooksInterface` for PrivateUse1 before call `hasPrimaryContext`.");
|
| 36 |
+
}
|
| 37 |
+
|
| 38 |
+
virtual void initPrivateUse1() const {}
|
| 39 |
+
virtual void resizePrivateUse1Bytes(const c10::Storage &storage, size_t newsize) const {
|
| 40 |
+
TORCH_CHECK_NOT_IMPLEMENTED(
|
| 41 |
+
false,
|
| 42 |
+
"You should register `PrivateUse1HooksInterface` for PrivateUse1 before call `resizePrivateUse1Bytes`.");
|
| 43 |
+
}
|
| 44 |
+
};
|
| 45 |
+
|
| 46 |
+
struct TORCH_API PrivateUse1HooksArgs {};
|
| 47 |
+
|
| 48 |
+
TORCH_API void RegisterPrivateUse1HooksInterface(
|
| 49 |
+
at::PrivateUse1HooksInterface* hook_);
|
| 50 |
+
|
| 51 |
+
TORCH_API at::PrivateUse1HooksInterface* GetPrivateUse1HooksInterface();
|
| 52 |
+
|
| 53 |
+
TORCH_API bool isPrivateUse1HooksRegistered();
|
| 54 |
+
|
| 55 |
+
namespace detail {
|
| 56 |
+
|
| 57 |
+
TORCH_API const at::PrivateUse1HooksInterface& getPrivateUse1Hooks();
|
| 58 |
+
|
| 59 |
+
} // namespace detail
|
| 60 |
+
|
| 61 |
+
} // namespace at
|
moondream/lib/python3.10/site-packages/torch/include/ATen/detail/XPUHooksInterface.h
ADDED
|
@@ -0,0 +1,80 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <c10/core/Device.h>
|
| 4 |
+
#include <c10/util/Exception.h>
|
| 5 |
+
#include <ATen/core/Generator.h>
|
| 6 |
+
#include <c10/util/Registry.h>
|
| 7 |
+
|
| 8 |
+
#include <cstddef>
|
| 9 |
+
#include <functional>
|
| 10 |
+
#include <memory>
|
| 11 |
+
|
| 12 |
+
namespace at {
|
| 13 |
+
|
| 14 |
+
constexpr const char* XPU_HELP =
|
| 15 |
+
"The XPU backend requires Intel Extension for Pytorch;"
|
| 16 |
+
"this error has occurred because you are trying "
|
| 17 |
+
"to use some XPU's functionality, but the Intel Extension for Pytorch has not been "
|
| 18 |
+
"loaded for some reason. The Intel Extension for Pytorch MUST "
|
| 19 |
+
"be loaded, EVEN IF you don't directly use any symbols from that!";
|
| 20 |
+
|
| 21 |
+
struct TORCH_API XPUHooksInterface {
|
| 22 |
+
virtual ~XPUHooksInterface() {}
|
| 23 |
+
|
| 24 |
+
virtual void initXPU() const {
|
| 25 |
+
TORCH_CHECK(
|
| 26 |
+
false,
|
| 27 |
+
"Cannot initialize XPU without Intel Extension for Pytorch.",
|
| 28 |
+
XPU_HELP);
|
| 29 |
+
}
|
| 30 |
+
|
| 31 |
+
virtual bool hasXPU() const {
|
| 32 |
+
return false;
|
| 33 |
+
}
|
| 34 |
+
|
| 35 |
+
virtual std::string showConfig() const {
|
| 36 |
+
TORCH_CHECK(
|
| 37 |
+
false,
|
| 38 |
+
"Cannot query detailed XPU version without Intel Extension for Pytorch. ",
|
| 39 |
+
XPU_HELP);
|
| 40 |
+
}
|
| 41 |
+
|
| 42 |
+
virtual int32_t getGlobalIdxFromDevice(const Device& device) const {
|
| 43 |
+
TORCH_CHECK(false, "Cannot get XPU global device index without ATen_xpu library.");
|
| 44 |
+
}
|
| 45 |
+
|
| 46 |
+
virtual Generator getXPUGenerator(C10_UNUSED DeviceIndex device_index = -1) const {
|
| 47 |
+
TORCH_CHECK(false, "Cannot get XPU generator without Intel Extension for Pytorch. ", XPU_HELP);
|
| 48 |
+
}
|
| 49 |
+
|
| 50 |
+
virtual const Generator& getDefaultXPUGenerator(C10_UNUSED DeviceIndex device_index = -1) const {
|
| 51 |
+
TORCH_CHECK(false, "Cannot get default XPU generator without Intel Extension for Pytorch. ", XPU_HELP);
|
| 52 |
+
}
|
| 53 |
+
|
| 54 |
+
virtual DeviceIndex getNumGPUs() const {
|
| 55 |
+
return 0;
|
| 56 |
+
}
|
| 57 |
+
|
| 58 |
+
virtual DeviceIndex current_device() const {
|
| 59 |
+
TORCH_CHECK(false, "Cannot get current device on XPU without ATen_xpu library.");
|
| 60 |
+
}
|
| 61 |
+
|
| 62 |
+
virtual Device getDeviceFromPtr(void* /*data*/) const {
|
| 63 |
+
TORCH_CHECK(false, "Cannot get device of pointer on XPU without ATen_xpu library.");
|
| 64 |
+
}
|
| 65 |
+
|
| 66 |
+
virtual void deviceSynchronize(DeviceIndex /*device_index*/) const {
|
| 67 |
+
TORCH_CHECK(false, "Cannot synchronize XPU device without ATen_xpu library.");
|
| 68 |
+
}
|
| 69 |
+
};
|
| 70 |
+
|
| 71 |
+
struct TORCH_API XPUHooksArgs {};
|
| 72 |
+
|
| 73 |
+
C10_DECLARE_REGISTRY(XPUHooksRegistry, XPUHooksInterface, XPUHooksArgs);
|
| 74 |
+
#define REGISTER_XPU_HOOKS(clsname) \
|
| 75 |
+
C10_REGISTER_CLASS(XPUHooksRegistry, clsname, clsname)
|
| 76 |
+
|
| 77 |
+
namespace detail {
|
| 78 |
+
TORCH_API const XPUHooksInterface& getXPUHooks();
|
| 79 |
+
} // namespace detail
|
| 80 |
+
} // namespace at
|
moondream/lib/python3.10/site-packages/torch/include/ATen/functorch/ADInterpreters.h
ADDED
|
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
#include <ATen/functorch/Interpreter.h>
|
| 3 |
+
|
| 4 |
+
namespace at::functorch {
|
| 5 |
+
|
| 6 |
+
// These are the interpreters for our AD transforms
|
| 7 |
+
// (grad, vjp and jvp).
|
| 8 |
+
// See NOTE: [functorch interpreter stack] for more details.
|
| 9 |
+
|
| 10 |
+
struct TORCH_API GradInterpreterPtr {
|
| 11 |
+
explicit GradInterpreterPtr(const Interpreter* base): base_(base) { TORCH_INTERNAL_ASSERT(base->key() == TransformType::Grad); }
|
| 12 |
+
TransformType key() const { return base_->key(); }
|
| 13 |
+
int64_t level() const { return base_->level(); }
|
| 14 |
+
void processImpl(const c10::OperatorHandle& op, torch::jit::Stack* stack);
|
| 15 |
+
void sendToNextInterpreterImpl(const c10::OperatorHandle& op, torch::jit::Stack* stack, bool grad_special_case);
|
| 16 |
+
bool prevGradMode() const {
|
| 17 |
+
return std::get<GradInterpreterMeta>(base_->meta()).prevGradMode_;
|
| 18 |
+
}
|
| 19 |
+
Tensor lift(const Tensor& tensor) const;
|
| 20 |
+
private:
|
| 21 |
+
const Interpreter* base_;
|
| 22 |
+
};
|
| 23 |
+
|
| 24 |
+
struct TORCH_API JvpInterpreterPtr {
|
| 25 |
+
explicit JvpInterpreterPtr(const Interpreter* base): base_(base) { TORCH_INTERNAL_ASSERT(base->key() == TransformType::Jvp); }
|
| 26 |
+
TransformType key() const { return base_->key(); }
|
| 27 |
+
int64_t level() const { return base_->level(); }
|
| 28 |
+
void processImpl(const c10::OperatorHandle& op, torch::jit::Stack* stack);
|
| 29 |
+
void sendToNextInterpreterImpl(const c10::OperatorHandle& op, torch::jit::Stack* stack, bool grad_special_case);
|
| 30 |
+
bool prevFwdGradMode() const {
|
| 31 |
+
return std::get<JvpInterpreterMeta>(base_->meta()).prevFwdGradMode_;
|
| 32 |
+
}
|
| 33 |
+
Tensor lift(const Tensor& tensor) const;
|
| 34 |
+
private:
|
| 35 |
+
const Interpreter* base_;
|
| 36 |
+
};
|
| 37 |
+
|
| 38 |
+
} // namespace at::functorch
|
moondream/lib/python3.10/site-packages/torch/include/ATen/functorch/BatchRulesHelper.h
ADDED
|
@@ -0,0 +1,475 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Copyright (c) Facebook, Inc. and its affiliates.
|
| 2 |
+
// All rights reserved.
|
| 3 |
+
//
|
| 4 |
+
// This source code is licensed under the BSD-style license found in the
|
| 5 |
+
// LICENSE file in the root directory of this source tree.
|
| 6 |
+
#pragma once
|
| 7 |
+
|
| 8 |
+
#include <c10/util/TypeList.h>
|
| 9 |
+
|
| 10 |
+
#include <ATen/ATen.h>
|
| 11 |
+
#include <ATen/Operators.h>
|
| 12 |
+
|
| 13 |
+
#include <ATen/functorch/DynamicLayer.h>
|
| 14 |
+
#include <ATen/functorch/TensorWrapper.h>
|
| 15 |
+
#include <ATen/functorch/BatchingMetaprogramming.h>
|
| 16 |
+
#include <ATen/functorch/LegacyVmapTransforms.h>
|
| 17 |
+
#include <ATen/functorch/BatchedFallback.h>
|
| 18 |
+
#include <ATen/functorch/PlumbingHelper.h>
|
| 19 |
+
#include <ATen/core/dispatch/Dispatcher.h>
|
| 20 |
+
#include <ATen/VmapGeneratedPlumbing.h>
|
| 21 |
+
|
| 22 |
+
#include <utility>
|
| 23 |
+
|
| 24 |
+
// This file contains helper functions for batching rules.
|
| 25 |
+
|
| 26 |
+
namespace at::functorch {
|
| 27 |
+
|
| 28 |
+
TORCH_API Tensor reshape_dim_into(int64_t src, int64_t dst, const Tensor& x);
|
| 29 |
+
TORCH_API Tensor reshape_dim_outof(int64_t src, int64_t size1, const Tensor& x);
|
| 30 |
+
|
| 31 |
+
TORCH_API Tensor reshape_dim_outof_symint(int64_t src, c10::SymInt size1, const Tensor& x);
|
| 32 |
+
|
| 33 |
+
Tensor moveBatchDimToFront(const Tensor& tensor, optional<int64_t> maybe_batch_dim);
|
| 34 |
+
int64_t rankWithoutBatchDim(const Tensor& tensor, optional<int64_t> maybe_batch_dim);
|
| 35 |
+
int64_t numelWithoutBatchDim(const Tensor& tensor, optional<int64_t> maybe_batch_dim);
|
| 36 |
+
optional<int64_t> valIfNonempty(optional<int64_t> maybe_empty, int64_t new_val);
|
| 37 |
+
int64_t getPhysicalDim(const Tensor& tensor, bool has_batch_dim, int64_t logical_dim);
|
| 38 |
+
VmapDimVector getPhysicalDims(const Tensor& tensor, bool has_batch_dim, IntArrayRef logical_dims);
|
| 39 |
+
|
| 40 |
+
void vmapIncompatibleInplaceError(const char* schema_name);
|
| 41 |
+
|
| 42 |
+
Tensor maybePadToLogicalRank(const Tensor& tensor, optional<int64_t> has_bdim, int64_t logical_rank);
|
| 43 |
+
|
| 44 |
+
void check_randomness(RandomnessType randomness);
|
| 45 |
+
void check_randomness(RandomnessType randomness, bool any_tensor_bdim);
|
| 46 |
+
|
| 47 |
+
inline Tensor ensure_has_bdim(const Tensor& tensor, bool has_bdim, c10::SymInt batch_size) {
|
| 48 |
+
if (has_bdim) {
|
| 49 |
+
return tensor;
|
| 50 |
+
}
|
| 51 |
+
const auto sizes = tensor.sym_sizes();
|
| 52 |
+
SymDimVector expanded_shape;
|
| 53 |
+
expanded_shape.reserve(sizes.size());
|
| 54 |
+
expanded_shape.emplace_back(std::move(batch_size));
|
| 55 |
+
expanded_shape.insert(expanded_shape.end(), sizes.begin(), sizes.end());
|
| 56 |
+
return tensor.expand_symint(expanded_shape);
|
| 57 |
+
}
|
| 58 |
+
|
| 59 |
+
#define VMAP_SUPPORT(op, batch_rule) \
|
| 60 |
+
m.impl(#op, op ## _generated_plumbing<decltype(&batch_rule), &batch_rule>);
|
| 61 |
+
|
| 62 |
+
#define VMAP_SUPPORT2(op, overload, batch_rule) \
|
| 63 |
+
m.impl(#op "." #overload, op ## _ ## overload ## _generated_plumbing<decltype(&batch_rule), &batch_rule>);
|
| 64 |
+
|
| 65 |
+
#define OP_DECOMPOSE(op) m.impl(#op, static_cast<decltype(&ATEN_FN(op))>(native::op));
|
| 66 |
+
#define OP_DECOMPOSE2(op, overload) m.impl(#op"."#overload, static_cast<decltype(&ATEN_FN2(op, overload))>(native::op));
|
| 67 |
+
|
| 68 |
+
// DO NOT USE ME DIRECTLY! Use BASIC_UNARY_BATCH_RULE to save yourself some pain
|
| 69 |
+
template <typename A, A a, typename C>
|
| 70 |
+
struct BasicUnaryBatchRuleHelper;
|
| 71 |
+
|
| 72 |
+
template <typename F, F Func, typename A, typename... T>
|
| 73 |
+
struct BasicUnaryBatchRuleHelper<F, Func, c10::guts::typelist::typelist<A, T...>> {
|
| 74 |
+
static std::tuple<Tensor,optional<int64_t>> apply(
|
| 75 |
+
const Tensor& tensor,
|
| 76 |
+
optional<int64_t> batch_dim,
|
| 77 |
+
T... extra_args) {
|
| 78 |
+
return std::make_tuple(Func(tensor, std::forward<T>(extra_args)...), batch_dim);
|
| 79 |
+
}
|
| 80 |
+
};
|
| 81 |
+
|
| 82 |
+
// USAGE: BASIC_UNARY_BATCH_RULE(at::sin)
|
| 83 |
+
// INCORRECT USAGE: BASIC_UNARY_BATCH_RULE(&at::sin)
|
| 84 |
+
// It is important that this macro is not passed a function pointer!!
|
| 85 |
+
#define BASIC_UNARY_BATCH_RULE(fn) SINGLE_ARG(\
|
| 86 |
+
BasicUnaryBatchRuleHelper<\
|
| 87 |
+
decltype(&fn),\
|
| 88 |
+
&fn,\
|
| 89 |
+
c10::guts::function_traits<decltype(fn)>::parameter_types>::apply)
|
| 90 |
+
|
| 91 |
+
#define UNARY_POINTWISE(op) \
|
| 92 |
+
VMAP_SUPPORT(op, BASIC_UNARY_BATCH_RULE(ATEN_FN(op)));
|
| 93 |
+
|
| 94 |
+
template <typename A, A a, typename C>
|
| 95 |
+
struct VariadicBdimsBatchRuleHelper;
|
| 96 |
+
|
| 97 |
+
template <typename F, F Func, typename A, typename... T>
|
| 98 |
+
struct VariadicBdimsBatchRuleHelper<F, Func, c10::guts::typelist::typelist<A, T...>> {
|
| 99 |
+
static std::tuple<Tensor,optional<int64_t>> apply(
|
| 100 |
+
const Tensor& tensor,
|
| 101 |
+
optional<int64_t> batch_dim,
|
| 102 |
+
T... extra_args) {
|
| 103 |
+
auto tensor_ = moveBatchDimToFront(tensor, batch_dim);
|
| 104 |
+
return std::make_tuple(Func(tensor_, std::forward<T>(extra_args)...), 0);
|
| 105 |
+
}
|
| 106 |
+
};
|
| 107 |
+
|
| 108 |
+
// USAGE: VARIADIC_BDIMS_BATCH_RULE(at::cholesky_inverse)
|
| 109 |
+
// INCORRECT USAGE: VARIADIC_BDIMS_BATCH_RULE(&at::cholesky_inverse)
|
| 110 |
+
// It is important that this macro is not passed a function pointer!!
|
| 111 |
+
#define VARIADIC_BDIMS_BATCH_RULE(fn) SINGLE_ARG(\
|
| 112 |
+
VariadicBdimsBatchRuleHelper<\
|
| 113 |
+
decltype(&fn),\
|
| 114 |
+
&fn,\
|
| 115 |
+
c10::guts::function_traits<decltype(fn)>::parameter_types>::apply)
|
| 116 |
+
|
| 117 |
+
#define VARIADIC_BDIMS(op) \
|
| 118 |
+
VMAP_SUPPORT(op, VARIADIC_BDIMS_BATCH_RULE(ATEN_FN(op)));
|
| 119 |
+
|
| 120 |
+
#define VARIADIC_BDIMS2(op, overload) \
|
| 121 |
+
VMAP_SUPPORT2(op, overload, VARIADIC_BDIMS_BATCH_RULE(ATEN_FN2(op, overload)));
|
| 122 |
+
|
| 123 |
+
template<class F, F Func>
|
| 124 |
+
void boxed_tensor_inputs_batch_rule(const c10::OperatorHandle& op, torch::jit::Stack* stack) {
|
| 125 |
+
const auto& schema = op.schema();
|
| 126 |
+
const auto num_returns = schema.returns().size();
|
| 127 |
+
const auto num_arguments = schema.arguments().size();
|
| 128 |
+
|
| 129 |
+
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
| 130 |
+
auto maybe_layer = maybeCurrentDynamicLayer();
|
| 131 |
+
vmap_check_escaped(maybe_layer, "boxed_tensor_inputs_batch_rule");
|
| 132 |
+
|
| 133 |
+
int64_t cur_level = maybe_layer->layerId();
|
| 134 |
+
|
| 135 |
+
auto orig_arguments = torch::jit::last(*stack, num_arguments);
|
| 136 |
+
if (std::none_of(orig_arguments.begin(), orig_arguments.end(), ivalueParticipatesInCurrentLevel)) {
|
| 137 |
+
op.callBoxed(stack);
|
| 138 |
+
return;
|
| 139 |
+
}
|
| 140 |
+
|
| 141 |
+
auto arguments = torch::jit::pop(*stack, num_arguments);
|
| 142 |
+
std::vector<std::pair<Tensor, optional<int64_t>>> tensor_inputs;
|
| 143 |
+
std::vector<int64_t> tensor_pos;
|
| 144 |
+
for (const auto idx : c10::irange(0, num_arguments)) {
|
| 145 |
+
const auto& ivalue = arguments[idx];
|
| 146 |
+
if (ivalue.isTensor()) {
|
| 147 |
+
auto [tensor_value, tensor_bdim] = unwrapTensorAtLevel(ivalue.toTensor(), cur_level);
|
| 148 |
+
tensor_inputs.emplace_back(tensor_value, tensor_bdim);
|
| 149 |
+
tensor_pos.push_back(idx);
|
| 150 |
+
}
|
| 151 |
+
}
|
| 152 |
+
Func(tensor_inputs);
|
| 153 |
+
|
| 154 |
+
size_t tensor_idx = 0;
|
| 155 |
+
TORCH_INTERNAL_ASSERT(!tensor_pos.empty());
|
| 156 |
+
for (const auto arg_idx : c10::irange(0, num_arguments)) {
|
| 157 |
+
if (tensor_idx >= tensor_pos.size() || (int64_t)arg_idx != tensor_pos[tensor_idx]) {
|
| 158 |
+
torch::jit::push(stack, arguments[arg_idx]);
|
| 159 |
+
} else {
|
| 160 |
+
TORCH_INTERNAL_ASSERT(tensor_idx < tensor_inputs.size());
|
| 161 |
+
torch::jit::push(stack, tensor_inputs[tensor_idx].first);
|
| 162 |
+
tensor_idx++;
|
| 163 |
+
}
|
| 164 |
+
}
|
| 165 |
+
|
| 166 |
+
op.callBoxed(stack);
|
| 167 |
+
const auto returns = torch::jit::pop(*stack, num_returns);
|
| 168 |
+
for (const auto& ret : returns) {
|
| 169 |
+
if (ret.isTensor()) {
|
| 170 |
+
torch::jit::push(stack, makeBatched(ret.toTensor(), 0, cur_level));
|
| 171 |
+
} else {
|
| 172 |
+
TORCH_INTERNAL_ASSERT(false, "This boxed batching rule does not currently support ops that return non-tensor values");
|
| 173 |
+
}
|
| 174 |
+
}
|
| 175 |
+
}
|
| 176 |
+
|
| 177 |
+
inline void handle_pointwise_ops(std::vector<std::pair<Tensor, optional<int64_t>>> &tensor_inputs) {
|
| 178 |
+
int64_t out_logical_rank = 0;
|
| 179 |
+
for (auto& tensor_input : tensor_inputs) {
|
| 180 |
+
int64_t cur_logical_rank = rankWithoutBatchDim(tensor_input.first, tensor_input.second);
|
| 181 |
+
out_logical_rank = std::max(out_logical_rank, cur_logical_rank);
|
| 182 |
+
}
|
| 183 |
+
for (auto& tensor_input: tensor_inputs) {
|
| 184 |
+
tensor_input.first = moveBatchDimToFront(tensor_input.first, tensor_input.second);
|
| 185 |
+
tensor_input.first = maybePadToLogicalRank(tensor_input.first, tensor_input.second, out_logical_rank);
|
| 186 |
+
}
|
| 187 |
+
}
|
| 188 |
+
|
| 189 |
+
#define POINTWISE_BOXED(op) \
|
| 190 |
+
m.impl(#op, torch::CppFunction::makeFromBoxedFunction<boxed_tensor_inputs_batch_rule<decltype(&handle_pointwise_ops), &handle_pointwise_ops>>());
|
| 191 |
+
|
| 192 |
+
#define POINTWISE_BOXED2(op, overload) \
|
| 193 |
+
m.impl(#op "." #overload, torch::CppFunction::makeFromBoxedFunction<boxed_tensor_inputs_batch_rule<decltype(&handle_pointwise_ops), &handle_pointwise_ops>>());
|
| 194 |
+
|
| 195 |
+
inline void handle_variadic_bdims(std::vector<std::pair<Tensor, optional<int64_t>>> &tensor_inputs) {
|
| 196 |
+
for (auto & tensor_input : tensor_inputs) {
|
| 197 |
+
tensor_input.first = moveBatchDimToFront(tensor_input.first, tensor_input.second);
|
| 198 |
+
}
|
| 199 |
+
}
|
| 200 |
+
|
| 201 |
+
#define VARIADIC_BDIMS_BOXED(op) \
|
| 202 |
+
m.impl(#op, torch::CppFunction::makeFromBoxedFunction<boxed_tensor_inputs_batch_rule<decltype(&handle_variadic_bdims), &handle_variadic_bdims>>());
|
| 203 |
+
|
| 204 |
+
using UnpackedBatchedTensor = std::tuple<Tensor,optional<int64_t>>;
|
| 205 |
+
|
| 206 |
+
inline void find_and_unpack_tensors(
|
| 207 |
+
const torch::jit::Stack* stack,
|
| 208 |
+
int64_t num_args,
|
| 209 |
+
int64_t cur_level,
|
| 210 |
+
SmallVector<UnpackedBatchedTensor, 5>* tensors,
|
| 211 |
+
SmallVector<int64_t, 5>* tensors_pos,
|
| 212 |
+
int64_t* batch_size) {
|
| 213 |
+
|
| 214 |
+
int64_t computed_batch_size = -1;
|
| 215 |
+
int64_t args_begin = stack->size() - num_args;
|
| 216 |
+
|
| 217 |
+
for (const auto idx : c10::irange(0, num_args)) {
|
| 218 |
+
const auto& ivalue = (*stack)[args_begin + idx];
|
| 219 |
+
if (!ivalue.isTensor()) {
|
| 220 |
+
continue;
|
| 221 |
+
}
|
| 222 |
+
auto unpacked = unwrapTensorAtLevel(ivalue.toTensor(), cur_level);
|
| 223 |
+
const auto& tensor_value = std::get<0>(unpacked);
|
| 224 |
+
const auto tensor_bdim = std::get<1>(unpacked);
|
| 225 |
+
if (tensor_bdim.has_value()) {
|
| 226 |
+
auto candidate_batch_size = tensor_value.size(*tensor_bdim);
|
| 227 |
+
if (computed_batch_size == -1) {
|
| 228 |
+
computed_batch_size = candidate_batch_size;
|
| 229 |
+
}
|
| 230 |
+
TORCH_INTERNAL_ASSERT(candidate_batch_size == computed_batch_size);
|
| 231 |
+
}
|
| 232 |
+
|
| 233 |
+
tensors->push_back(std::move(unpacked));
|
| 234 |
+
tensors_pos->push_back(idx);
|
| 235 |
+
}
|
| 236 |
+
TORCH_INTERNAL_ASSERT(computed_batch_size > -1);
|
| 237 |
+
*batch_size = computed_batch_size;
|
| 238 |
+
}
|
| 239 |
+
|
| 240 |
+
inline void boxed_existing_bdim_all_batch_rule(
|
| 241 |
+
const c10::OperatorHandle& op, torch::jit::Stack* stack) {
|
| 242 |
+
const auto& schema = op.schema();
|
| 243 |
+
const auto num_returns = schema.returns().size();
|
| 244 |
+
const auto num_arguments = schema.arguments().size();
|
| 245 |
+
|
| 246 |
+
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
| 247 |
+
auto maybe_layer = maybeCurrentDynamicLayer();
|
| 248 |
+
vmap_check_escaped(maybe_layer, "boxed_existing_bdim_all_batch_rule");
|
| 249 |
+
int64_t cur_level = maybe_layer->layerId();
|
| 250 |
+
|
| 251 |
+
const auto arguments = torch::jit::last(stack, num_arguments);
|
| 252 |
+
if (std::none_of(arguments.begin(), arguments.end(), ivalueParticipatesInCurrentLevel)) {
|
| 253 |
+
op.callBoxed(stack);
|
| 254 |
+
return;
|
| 255 |
+
}
|
| 256 |
+
|
| 257 |
+
int64_t args_begin = stack->size() - num_arguments;
|
| 258 |
+
SmallVector<UnpackedBatchedTensor, 5> tensor_inputs;
|
| 259 |
+
SmallVector<int64_t, 5> tensor_pos;
|
| 260 |
+
int64_t batch_size;
|
| 261 |
+
|
| 262 |
+
find_and_unpack_tensors(
|
| 263 |
+
stack, num_arguments, cur_level,
|
| 264 |
+
&tensor_inputs, &tensor_pos, &batch_size);
|
| 265 |
+
|
| 266 |
+
// for each tensor, ensure it has a bdim and reshape it.
|
| 267 |
+
for (const auto tensor_idx : c10::irange(0, tensor_inputs.size())) {
|
| 268 |
+
const auto& value = std::get<0>(tensor_inputs[tensor_idx]);
|
| 269 |
+
auto bdim = std::get<1>(tensor_inputs[tensor_idx]);
|
| 270 |
+
auto value_ = ensure_has_bdim(value, bdim.has_value(), batch_size);
|
| 271 |
+
if (!bdim.has_value()) {
|
| 272 |
+
bdim = 0;
|
| 273 |
+
}
|
| 274 |
+
(*stack)[args_begin + tensor_pos[tensor_idx]] = reshape_dim_into(*bdim, 0, value_);
|
| 275 |
+
}
|
| 276 |
+
|
| 277 |
+
op.callBoxed(stack);
|
| 278 |
+
|
| 279 |
+
for (const auto idx : c10::irange(args_begin, args_begin + num_returns)) {
|
| 280 |
+
const auto& ret = (*stack)[idx];
|
| 281 |
+
TORCH_INTERNAL_ASSERT(ret.isTensor(),
|
| 282 |
+
"This boxed batching rule does not currently support ops that return non-tensor values");
|
| 283 |
+
(*stack)[idx] = makeBatched(reshape_dim_outof(0, batch_size, ret.toTensor()), 0, cur_level);
|
| 284 |
+
}
|
| 285 |
+
}
|
| 286 |
+
|
| 287 |
+
// Use when all tensors arguments accept one (normal) batch dim.
|
| 288 |
+
// This batching rule expands the batch dim on all Tensors, reshapes it into
|
| 289 |
+
// dim 0, calls the op, and then reshapes the batch dim out of dim 0.
|
| 290 |
+
// This is not the most efficient thing; if there are alternatives, plese try
|
| 291 |
+
// to use them. Use this only as a last resort.
|
| 292 |
+
#define EXISTING_BDIM_ALL_BOXED(op) \
|
| 293 |
+
m.impl(#op, torch::CppFunction::makeFromBoxedFunction<boxed_existing_bdim_all_batch_rule>());
|
| 294 |
+
|
| 295 |
+
template <int64_t feature_rank, int64_t contig_tensor_index=-1>
|
| 296 |
+
inline void boxed_all_tensors_have_optional_bdim(
|
| 297 |
+
const c10::OperatorHandle& op, torch::jit::Stack* stack) {
|
| 298 |
+
const auto& schema = op.schema();
|
| 299 |
+
const auto num_returns = schema.returns().size();
|
| 300 |
+
const auto num_arguments = schema.arguments().size();
|
| 301 |
+
|
| 302 |
+
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
| 303 |
+
auto maybe_layer = maybeCurrentDynamicLayer();
|
| 304 |
+
vmap_check_escaped(maybe_layer, "boxed_all_tensors_have_optional_bdim");
|
| 305 |
+
int64_t cur_level = maybe_layer->layerId();
|
| 306 |
+
|
| 307 |
+
const auto arguments = torch::jit::last(stack, num_arguments);
|
| 308 |
+
if (std::none_of(arguments.begin(), arguments.end(), ivalueParticipatesInCurrentLevel)) {
|
| 309 |
+
op.callBoxed(stack);
|
| 310 |
+
return;
|
| 311 |
+
}
|
| 312 |
+
|
| 313 |
+
int64_t args_begin = stack->size() - num_arguments;
|
| 314 |
+
SmallVector<UnpackedBatchedTensor, 5> tensor_inputs;
|
| 315 |
+
SmallVector<int64_t, 5> tensor_pos;
|
| 316 |
+
int64_t batch_size;
|
| 317 |
+
|
| 318 |
+
find_and_unpack_tensors(
|
| 319 |
+
stack, num_arguments, cur_level,
|
| 320 |
+
&tensor_inputs, &tensor_pos, &batch_size);
|
| 321 |
+
|
| 322 |
+
optional<bool> is_no_batch_dim_case;
|
| 323 |
+
|
| 324 |
+
for (const auto tensor_idx : c10::irange(0, tensor_inputs.size())) {
|
| 325 |
+
const auto& value = std::get<0>(tensor_inputs[tensor_idx]);
|
| 326 |
+
auto bdim = std::get<1>(tensor_inputs[tensor_idx]);
|
| 327 |
+
const auto logical_rank = rankWithoutBatchDim(value, bdim);
|
| 328 |
+
|
| 329 |
+
if (!is_no_batch_dim_case.has_value()) {
|
| 330 |
+
is_no_batch_dim_case = (logical_rank == feature_rank);
|
| 331 |
+
}
|
| 332 |
+
auto value_ = ensure_has_bdim(value, bdim.has_value(), batch_size);
|
| 333 |
+
if (!bdim.has_value()) {
|
| 334 |
+
bdim = 0;
|
| 335 |
+
}
|
| 336 |
+
if (*is_no_batch_dim_case) {
|
| 337 |
+
TORCH_INTERNAL_ASSERT(logical_rank == feature_rank);
|
| 338 |
+
value_ = moveBatchDimToFront(value_, bdim);
|
| 339 |
+
if (tensor_idx == contig_tensor_index) {
|
| 340 |
+
value_ = value_.contiguous();
|
| 341 |
+
}
|
| 342 |
+
(*stack)[args_begin + tensor_pos[tensor_idx]] = std::move(value_);
|
| 343 |
+
continue;
|
| 344 |
+
}
|
| 345 |
+
TORCH_INTERNAL_ASSERT(logical_rank == feature_rank + 1);
|
| 346 |
+
value_ = reshape_dim_into(*bdim, 0, value_);
|
| 347 |
+
if (tensor_idx == contig_tensor_index) {
|
| 348 |
+
value_ = value_.contiguous();
|
| 349 |
+
}
|
| 350 |
+
(*stack)[args_begin + tensor_pos[tensor_idx]] = std::move(value_);
|
| 351 |
+
}
|
| 352 |
+
|
| 353 |
+
op.callBoxed(stack);
|
| 354 |
+
|
| 355 |
+
for (const auto idx : c10::irange(args_begin, args_begin + num_returns)) {
|
| 356 |
+
const auto& ret = (*stack)[idx];
|
| 357 |
+
TORCH_INTERNAL_ASSERT(ret.isTensor(),
|
| 358 |
+
"This boxed batching rule does not currently support ops that return non-tensor values");
|
| 359 |
+
if (*is_no_batch_dim_case) {
|
| 360 |
+
(*stack)[idx] = makeBatched(ret.toTensor(), 0, cur_level);
|
| 361 |
+
} else {
|
| 362 |
+
(*stack)[idx] = makeBatched(reshape_dim_outof(0, batch_size, ret.toTensor()), 0, cur_level);
|
| 363 |
+
}
|
| 364 |
+
}
|
| 365 |
+
}
|
| 366 |
+
|
| 367 |
+
// Useful for many NN operators.
|
| 368 |
+
// The operator must satisfy the following:
|
| 369 |
+
// - All arguments must accept an optional batch dim.
|
| 370 |
+
// - All arguments must be the same rank
|
| 371 |
+
#define ALL_TENSORS_HAVE_OPTIONAL_BDIM_BOXED(feature_rank, op) \
|
| 372 |
+
m.impl(#op, torch::CppFunction::makeFromBoxedFunction<boxed_all_tensors_have_optional_bdim<feature_rank>>());
|
| 373 |
+
|
| 374 |
+
#define ALL_TENSORS_HAVE_OPTIONAL_BDIM_BOXED_CONTIG1(feature_rank, op, contig_tensor_index) \
|
| 375 |
+
m.impl(#op, \
|
| 376 |
+
torch::CppFunction::makeFromBoxedFunction<\
|
| 377 |
+
boxed_all_tensors_have_optional_bdim<\
|
| 378 |
+
feature_rank, \
|
| 379 |
+
contig_tensor_index>\
|
| 380 |
+
>());
|
| 381 |
+
|
| 382 |
+
template <typename A, A a, typename C>
|
| 383 |
+
struct ExistingBdimBatchRuleHelper;
|
| 384 |
+
|
| 385 |
+
template <typename F, F Func, typename A, typename... T>
|
| 386 |
+
struct ExistingBdimBatchRuleHelper<F, Func, c10::guts::typelist::typelist<A, T...>> {
|
| 387 |
+
static std::tuple<Tensor,optional<int64_t>> apply(
|
| 388 |
+
const Tensor& self,
|
| 389 |
+
optional<int64_t> self_bdim,
|
| 390 |
+
T... extra_args) {
|
| 391 |
+
auto self_ = reshape_dim_into(*self_bdim, 0, self);
|
| 392 |
+
auto out = Func(self_, std::forward<T>(extra_args)...);
|
| 393 |
+
return std::make_tuple(reshape_dim_outof_symint(0, self.sym_sizes()[*self_bdim], out), 0);
|
| 394 |
+
}
|
| 395 |
+
};
|
| 396 |
+
|
| 397 |
+
// USAGE: EXISTING_BDIM_BATCH_RULE(at::cholesky_inverse)
|
| 398 |
+
// INCORRECT USAGE: EXISTING_BDIM_BATCH_RULE(&at::cholesky_inverse)
|
| 399 |
+
// It is important that this macro is not passed a function pointer!!
|
| 400 |
+
#define EXISTING_BDIM_BATCH_RULE(fn) SINGLE_ARG(\
|
| 401 |
+
ExistingBdimBatchRuleHelper<\
|
| 402 |
+
decltype(&fn),\
|
| 403 |
+
&fn,\
|
| 404 |
+
c10::guts::function_traits<decltype(fn)>::parameter_types>::apply)
|
| 405 |
+
|
| 406 |
+
|
| 407 |
+
#define EXISTING_BDIM(op) \
|
| 408 |
+
VMAP_SUPPORT(op, EXISTING_BDIM_BATCH_RULE(ATEN_FN(op)));
|
| 409 |
+
|
| 410 |
+
#define EXISTING_BDIM2(op, overload) \
|
| 411 |
+
VMAP_SUPPORT2(op, overload, EXISTING_BDIM_BATCH_RULE(ATEN_FN2(op, overload)));
|
| 412 |
+
|
| 413 |
+
#define INVOKE(object,ptrToMember) ((object).*(ptrToMember))
|
| 414 |
+
|
| 415 |
+
|
| 416 |
+
template <typename F, F Method, typename... ExtraArgs>
|
| 417 |
+
Tensor& unary_inplace_batch_rule(Tensor& self, optional<int64_t>, ExtraArgs... extra_args) {
|
| 418 |
+
INVOKE(self, Method)(std::forward<ExtraArgs>(extra_args)...);
|
| 419 |
+
return self;
|
| 420 |
+
}
|
| 421 |
+
|
| 422 |
+
inline int64_t get_bdim_size4(
|
| 423 |
+
const Tensor& a_value, optional<int64_t> a_bdim,
|
| 424 |
+
const Tensor& b_value, optional<int64_t> b_bdim,
|
| 425 |
+
const Tensor& c_value, optional<int64_t> c_bdim,
|
| 426 |
+
const Tensor& d_value, optional<int64_t> d_bdim) {
|
| 427 |
+
if (a_bdim)
|
| 428 |
+
return a_value.size(*a_bdim);
|
| 429 |
+
if (b_bdim)
|
| 430 |
+
return b_value.size(*b_bdim);
|
| 431 |
+
if (c_bdim)
|
| 432 |
+
return c_value.size(*c_bdim);
|
| 433 |
+
if (d_bdim)
|
| 434 |
+
return d_value.size(*d_bdim);
|
| 435 |
+
TORCH_INTERNAL_ASSERT(false);
|
| 436 |
+
}
|
| 437 |
+
|
| 438 |
+
inline int64_t get_bdim_size3(
|
| 439 |
+
const Tensor& a_value, optional<int64_t> a_bdim,
|
| 440 |
+
const Tensor& b_value, optional<int64_t> b_bdim,
|
| 441 |
+
const Tensor& c_value, optional<int64_t> c_bdim) {
|
| 442 |
+
if (a_bdim)
|
| 443 |
+
return a_value.size(*a_bdim);
|
| 444 |
+
if (b_bdim)
|
| 445 |
+
return b_value.size(*b_bdim);
|
| 446 |
+
if (c_bdim)
|
| 447 |
+
return c_value.size(*c_bdim);
|
| 448 |
+
TORCH_INTERNAL_ASSERT(false);
|
| 449 |
+
}
|
| 450 |
+
|
| 451 |
+
inline int64_t get_bdim_size2(
|
| 452 |
+
const Tensor& a_value, optional<int64_t> a_bdim,
|
| 453 |
+
const Tensor& b_value, optional<int64_t> b_bdim) {
|
| 454 |
+
if (a_bdim)
|
| 455 |
+
return a_value.size(*a_bdim);
|
| 456 |
+
if (b_bdim)
|
| 457 |
+
return b_value.size(*b_bdim);
|
| 458 |
+
TORCH_INTERNAL_ASSERT(false);
|
| 459 |
+
}
|
| 460 |
+
|
| 461 |
+
// [start, start + 1, ..., stop - 1]
|
| 462 |
+
inline VmapDimVector range(int64_t start, int64_t stop) {
|
| 463 |
+
TORCH_INTERNAL_ASSERT(stop >= start);
|
| 464 |
+
VmapDimVector dims;
|
| 465 |
+
dims.reserve(stop - start);
|
| 466 |
+
for (int64_t i = start; i < stop; i++) {
|
| 467 |
+
dims.emplace_back(i);
|
| 468 |
+
}
|
| 469 |
+
return dims;
|
| 470 |
+
}
|
| 471 |
+
std::tuple<Tensor, Tensor> _binary_pointwise_helper(
|
| 472 |
+
const Tensor& tensor, optional<int64_t> tensor_batch_dim, const Tensor& other, optional<int64_t> other_batch_dim,
|
| 473 |
+
bool do_type_promotion=true);
|
| 474 |
+
|
| 475 |
+
} // namespace at::functorch
|