Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +2 -0
- evalkit_tf437/lib/python3.10/site-packages/pydantic/aliases.py +132 -0
- evalkit_tf437/lib/python3.10/site-packages/pydantic/class_validators.py +5 -0
- evalkit_tf437/lib/python3.10/site-packages/pydantic/datetime_parse.py +5 -0
- evalkit_tf437/lib/python3.10/site-packages/pydantic/decorator.py +5 -0
- evalkit_tf437/lib/python3.10/site-packages/pydantic/functional_serializers.py +438 -0
- evalkit_tf437/lib/python3.10/site-packages/pydantic/py.typed +0 -0
- evalkit_tf437/lib/python3.10/site-packages/pydantic/tools.py +5 -0
- evalkit_tf437/lib/python3.10/site-packages/pydantic/validate_call_decorator.py +69 -0
- evalkit_tf437/lib/python3.10/site-packages/pydantic_core/__pycache__/core_schema.cpython-310.pyc +3 -0
- evalkit_tf437/lib/python3.10/site-packages/rich/__pycache__/__init__.cpython-310.pyc +0 -0
- evalkit_tf437/lib/python3.10/site-packages/rich/__pycache__/_emoji_replace.cpython-310.pyc +0 -0
- evalkit_tf437/lib/python3.10/site-packages/rich/__pycache__/_null_file.cpython-310.pyc +0 -0
- evalkit_tf437/lib/python3.10/site-packages/rich/__pycache__/ansi.cpython-310.pyc +0 -0
- evalkit_tf437/lib/python3.10/site-packages/rich/__pycache__/bar.cpython-310.pyc +0 -0
- evalkit_tf437/lib/python3.10/site-packages/rich/__pycache__/emoji.cpython-310.pyc +0 -0
- evalkit_tf437/lib/python3.10/site-packages/rich/__pycache__/file_proxy.cpython-310.pyc +0 -0
- evalkit_tf437/lib/python3.10/site-packages/rich/__pycache__/filesize.cpython-310.pyc +0 -0
- evalkit_tf437/lib/python3.10/site-packages/rich/__pycache__/json.cpython-310.pyc +0 -0
- evalkit_tf437/lib/python3.10/site-packages/rich/__pycache__/live.cpython-310.pyc +0 -0
- evalkit_tf437/lib/python3.10/site-packages/rich/__pycache__/pretty.cpython-310.pyc +0 -0
- evalkit_tf437/lib/python3.10/site-packages/rich/__pycache__/progress.cpython-310.pyc +0 -0
- evalkit_tf437/lib/python3.10/site-packages/rich/__pycache__/protocol.cpython-310.pyc +0 -0
- evalkit_tf437/lib/python3.10/site-packages/rich/__pycache__/rule.cpython-310.pyc +0 -0
- evalkit_tf437/lib/python3.10/site-packages/rich/__pycache__/scope.cpython-310.pyc +0 -0
- evalkit_tf437/lib/python3.10/site-packages/rich/__pycache__/status.cpython-310.pyc +0 -0
- evalkit_tf437/lib/python3.10/site-packages/rich/__pycache__/syntax.cpython-310.pyc +0 -0
- evalkit_tf437/lib/python3.10/site-packages/rich/__pycache__/terminal_theme.cpython-310.pyc +0 -0
- falcon/lib/python3.10/site-packages/anyio-4.8.0.dist-info/INSTALLER +1 -0
- falcon/lib/python3.10/site-packages/anyio-4.8.0.dist-info/METADATA +104 -0
- falcon/lib/python3.10/site-packages/functorch/__pycache__/__init__.cpython-310.pyc +0 -0
- falcon/lib/python3.10/site-packages/functorch/_src/aot_autograd/__init__.py +8 -0
- falcon/lib/python3.10/site-packages/functorch/_src/make_functional/__pycache__/__init__.cpython-310.pyc +0 -0
- falcon/lib/python3.10/site-packages/functorch/compile/__pycache__/__init__.cpython-310.pyc +0 -0
- falcon/lib/python3.10/site-packages/functorch/dim/__init__.py +179 -0
- falcon/lib/python3.10/site-packages/functorch/dim/__pycache__/__init__.cpython-310.pyc +0 -0
- falcon/lib/python3.10/site-packages/functorch/dim/__pycache__/batch_tensor.cpython-310.pyc +0 -0
- falcon/lib/python3.10/site-packages/functorch/dim/__pycache__/delayed_mul_tensor.cpython-310.pyc +0 -0
- falcon/lib/python3.10/site-packages/functorch/dim/__pycache__/dim.cpython-310.pyc +0 -0
- falcon/lib/python3.10/site-packages/functorch/dim/__pycache__/magic_trace.cpython-310.pyc +0 -0
- falcon/lib/python3.10/site-packages/functorch/dim/__pycache__/op_properties.cpython-310.pyc +0 -0
- falcon/lib/python3.10/site-packages/functorch/dim/__pycache__/reference.cpython-310.pyc +0 -0
- falcon/lib/python3.10/site-packages/functorch/dim/__pycache__/tree_map.cpython-310.pyc +0 -0
- falcon/lib/python3.10/site-packages/functorch/dim/__pycache__/wrap_type.cpython-310.pyc +0 -0
- falcon/lib/python3.10/site-packages/functorch/dim/batch_tensor.py +25 -0
- falcon/lib/python3.10/site-packages/functorch/dim/delayed_mul_tensor.py +77 -0
- falcon/lib/python3.10/site-packages/functorch/dim/dim.py +121 -0
- falcon/lib/python3.10/site-packages/functorch/dim/reference.py +645 -0
- falcon/lib/python3.10/site-packages/functorch/dim/tree_map.py +14 -0
- falcon/lib/python3.10/site-packages/functorch/dim/wrap_type.py +71 -0
.gitattributes
CHANGED
|
@@ -1258,3 +1258,5 @@ infer_4_47_1/lib/python3.10/site-packages/triton/backends/nvidia/bin/ptxas filte
|
|
| 1258 |
falcon/bin/x86_64-conda-linux-gnu-ld filter=lfs diff=lfs merge=lfs -text
|
| 1259 |
falcon/lib/python3.10/tkinter/__pycache__/__init__.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 1260 |
falcon/lib/python3.10/site-packages/huggingface_hub/inference/__pycache__/_client.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
| 1258 |
falcon/bin/x86_64-conda-linux-gnu-ld filter=lfs diff=lfs merge=lfs -text
|
| 1259 |
falcon/lib/python3.10/tkinter/__pycache__/__init__.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 1260 |
falcon/lib/python3.10/site-packages/huggingface_hub/inference/__pycache__/_client.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 1261 |
+
falcon/lib/python3.10/site-packages/pysam.libs/libsasl2-7de4d792.so.3.0.0 filter=lfs diff=lfs merge=lfs -text
|
| 1262 |
+
evalkit_tf437/lib/python3.10/site-packages/pydantic_core/__pycache__/core_schema.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
evalkit_tf437/lib/python3.10/site-packages/pydantic/aliases.py
ADDED
|
@@ -0,0 +1,132 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Support for alias configurations."""
|
| 2 |
+
|
| 3 |
+
from __future__ import annotations
|
| 4 |
+
|
| 5 |
+
import dataclasses
|
| 6 |
+
from typing import Any, Callable, Literal
|
| 7 |
+
|
| 8 |
+
from pydantic_core import PydanticUndefined
|
| 9 |
+
|
| 10 |
+
from ._internal import _internal_dataclass
|
| 11 |
+
|
| 12 |
+
__all__ = ('AliasGenerator', 'AliasPath', 'AliasChoices')
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
@dataclasses.dataclass(**_internal_dataclass.slots_true)
|
| 16 |
+
class AliasPath:
|
| 17 |
+
"""Usage docs: https://docs.pydantic.dev/2.9/concepts/alias#aliaspath-and-aliaschoices
|
| 18 |
+
|
| 19 |
+
A data class used by `validation_alias` as a convenience to create aliases.
|
| 20 |
+
|
| 21 |
+
Attributes:
|
| 22 |
+
path: A list of string or integer aliases.
|
| 23 |
+
"""
|
| 24 |
+
|
| 25 |
+
path: list[int | str]
|
| 26 |
+
|
| 27 |
+
def __init__(self, first_arg: str, *args: str | int) -> None:
|
| 28 |
+
self.path = [first_arg] + list(args)
|
| 29 |
+
|
| 30 |
+
def convert_to_aliases(self) -> list[str | int]:
|
| 31 |
+
"""Converts arguments to a list of string or integer aliases.
|
| 32 |
+
|
| 33 |
+
Returns:
|
| 34 |
+
The list of aliases.
|
| 35 |
+
"""
|
| 36 |
+
return self.path
|
| 37 |
+
|
| 38 |
+
def search_dict_for_path(self, d: dict) -> Any:
|
| 39 |
+
"""Searches a dictionary for the path specified by the alias.
|
| 40 |
+
|
| 41 |
+
Returns:
|
| 42 |
+
The value at the specified path, or `PydanticUndefined` if the path is not found.
|
| 43 |
+
"""
|
| 44 |
+
v = d
|
| 45 |
+
for k in self.path:
|
| 46 |
+
if isinstance(v, str):
|
| 47 |
+
# disallow indexing into a str, like for AliasPath('x', 0) and x='abc'
|
| 48 |
+
return PydanticUndefined
|
| 49 |
+
try:
|
| 50 |
+
v = v[k]
|
| 51 |
+
except (KeyError, IndexError, TypeError):
|
| 52 |
+
return PydanticUndefined
|
| 53 |
+
return v
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
@dataclasses.dataclass(**_internal_dataclass.slots_true)
|
| 57 |
+
class AliasChoices:
|
| 58 |
+
"""Usage docs: https://docs.pydantic.dev/2.9/concepts/alias#aliaspath-and-aliaschoices
|
| 59 |
+
|
| 60 |
+
A data class used by `validation_alias` as a convenience to create aliases.
|
| 61 |
+
|
| 62 |
+
Attributes:
|
| 63 |
+
choices: A list containing a string or `AliasPath`.
|
| 64 |
+
"""
|
| 65 |
+
|
| 66 |
+
choices: list[str | AliasPath]
|
| 67 |
+
|
| 68 |
+
def __init__(self, first_choice: str | AliasPath, *choices: str | AliasPath) -> None:
|
| 69 |
+
self.choices = [first_choice] + list(choices)
|
| 70 |
+
|
| 71 |
+
def convert_to_aliases(self) -> list[list[str | int]]:
|
| 72 |
+
"""Converts arguments to a list of lists containing string or integer aliases.
|
| 73 |
+
|
| 74 |
+
Returns:
|
| 75 |
+
The list of aliases.
|
| 76 |
+
"""
|
| 77 |
+
aliases: list[list[str | int]] = []
|
| 78 |
+
for c in self.choices:
|
| 79 |
+
if isinstance(c, AliasPath):
|
| 80 |
+
aliases.append(c.convert_to_aliases())
|
| 81 |
+
else:
|
| 82 |
+
aliases.append([c])
|
| 83 |
+
return aliases
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
@dataclasses.dataclass(**_internal_dataclass.slots_true)
|
| 87 |
+
class AliasGenerator:
|
| 88 |
+
"""Usage docs: https://docs.pydantic.dev/2.9/concepts/alias#using-an-aliasgenerator
|
| 89 |
+
|
| 90 |
+
A data class used by `alias_generator` as a convenience to create various aliases.
|
| 91 |
+
|
| 92 |
+
Attributes:
|
| 93 |
+
alias: A callable that takes a field name and returns an alias for it.
|
| 94 |
+
validation_alias: A callable that takes a field name and returns a validation alias for it.
|
| 95 |
+
serialization_alias: A callable that takes a field name and returns a serialization alias for it.
|
| 96 |
+
"""
|
| 97 |
+
|
| 98 |
+
alias: Callable[[str], str] | None = None
|
| 99 |
+
validation_alias: Callable[[str], str | AliasPath | AliasChoices] | None = None
|
| 100 |
+
serialization_alias: Callable[[str], str] | None = None
|
| 101 |
+
|
| 102 |
+
def _generate_alias(
|
| 103 |
+
self,
|
| 104 |
+
alias_kind: Literal['alias', 'validation_alias', 'serialization_alias'],
|
| 105 |
+
allowed_types: tuple[type[str] | type[AliasPath] | type[AliasChoices], ...],
|
| 106 |
+
field_name: str,
|
| 107 |
+
) -> str | AliasPath | AliasChoices | None:
|
| 108 |
+
"""Generate an alias of the specified kind. Returns None if the alias generator is None.
|
| 109 |
+
|
| 110 |
+
Raises:
|
| 111 |
+
TypeError: If the alias generator produces an invalid type.
|
| 112 |
+
"""
|
| 113 |
+
alias = None
|
| 114 |
+
if alias_generator := getattr(self, alias_kind):
|
| 115 |
+
alias = alias_generator(field_name)
|
| 116 |
+
if alias and not isinstance(alias, allowed_types):
|
| 117 |
+
raise TypeError(
|
| 118 |
+
f'Invalid `{alias_kind}` type. `{alias_kind}` generator must produce one of `{allowed_types}`'
|
| 119 |
+
)
|
| 120 |
+
return alias
|
| 121 |
+
|
| 122 |
+
def generate_aliases(self, field_name: str) -> tuple[str | None, str | AliasPath | AliasChoices | None, str | None]:
|
| 123 |
+
"""Generate `alias`, `validation_alias`, and `serialization_alias` for a field.
|
| 124 |
+
|
| 125 |
+
Returns:
|
| 126 |
+
A tuple of three aliases - validation, alias, and serialization.
|
| 127 |
+
"""
|
| 128 |
+
alias = self._generate_alias('alias', (str,), field_name)
|
| 129 |
+
validation_alias = self._generate_alias('validation_alias', (str, AliasChoices, AliasPath), field_name)
|
| 130 |
+
serialization_alias = self._generate_alias('serialization_alias', (str,), field_name)
|
| 131 |
+
|
| 132 |
+
return alias, validation_alias, serialization_alias # type: ignore
|
evalkit_tf437/lib/python3.10/site-packages/pydantic/class_validators.py
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""`class_validators` module is a backport module from V1."""
|
| 2 |
+
|
| 3 |
+
from ._migration import getattr_migration
|
| 4 |
+
|
| 5 |
+
__getattr__ = getattr_migration(__name__)
|
evalkit_tf437/lib/python3.10/site-packages/pydantic/datetime_parse.py
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""The `datetime_parse` module is a backport module from V1."""
|
| 2 |
+
|
| 3 |
+
from ._migration import getattr_migration
|
| 4 |
+
|
| 5 |
+
__getattr__ = getattr_migration(__name__)
|
evalkit_tf437/lib/python3.10/site-packages/pydantic/decorator.py
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""The `decorator` module is a backport module from V1."""
|
| 2 |
+
|
| 3 |
+
from ._migration import getattr_migration
|
| 4 |
+
|
| 5 |
+
__getattr__ = getattr_migration(__name__)
|
evalkit_tf437/lib/python3.10/site-packages/pydantic/functional_serializers.py
ADDED
|
@@ -0,0 +1,438 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""This module contains related classes and functions for serialization."""
|
| 2 |
+
|
| 3 |
+
from __future__ import annotations
|
| 4 |
+
|
| 5 |
+
import dataclasses
|
| 6 |
+
from functools import partial, partialmethod
|
| 7 |
+
from typing import TYPE_CHECKING, Any, Callable, TypeVar, overload
|
| 8 |
+
|
| 9 |
+
from pydantic_core import PydanticUndefined, core_schema
|
| 10 |
+
from pydantic_core.core_schema import SerializationInfo, SerializerFunctionWrapHandler, WhenUsed
|
| 11 |
+
from typing_extensions import Annotated, Literal, TypeAlias
|
| 12 |
+
|
| 13 |
+
from . import PydanticUndefinedAnnotation
|
| 14 |
+
from ._internal import _decorators, _internal_dataclass
|
| 15 |
+
from .annotated_handlers import GetCoreSchemaHandler
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
@dataclasses.dataclass(**_internal_dataclass.slots_true, frozen=True)
|
| 19 |
+
class PlainSerializer:
|
| 20 |
+
"""Plain serializers use a function to modify the output of serialization.
|
| 21 |
+
|
| 22 |
+
This is particularly helpful when you want to customize the serialization for annotated types.
|
| 23 |
+
Consider an input of `list`, which will be serialized into a space-delimited string.
|
| 24 |
+
|
| 25 |
+
```python
|
| 26 |
+
from typing import List
|
| 27 |
+
|
| 28 |
+
from typing_extensions import Annotated
|
| 29 |
+
|
| 30 |
+
from pydantic import BaseModel, PlainSerializer
|
| 31 |
+
|
| 32 |
+
CustomStr = Annotated[
|
| 33 |
+
List, PlainSerializer(lambda x: ' '.join(x), return_type=str)
|
| 34 |
+
]
|
| 35 |
+
|
| 36 |
+
class StudentModel(BaseModel):
|
| 37 |
+
courses: CustomStr
|
| 38 |
+
|
| 39 |
+
student = StudentModel(courses=['Math', 'Chemistry', 'English'])
|
| 40 |
+
print(student.model_dump())
|
| 41 |
+
#> {'courses': 'Math Chemistry English'}
|
| 42 |
+
```
|
| 43 |
+
|
| 44 |
+
Attributes:
|
| 45 |
+
func: The serializer function.
|
| 46 |
+
return_type: The return type for the function. If omitted it will be inferred from the type annotation.
|
| 47 |
+
when_used: Determines when this serializer should be used. Accepts a string with values `'always'`,
|
| 48 |
+
`'unless-none'`, `'json'`, and `'json-unless-none'`. Defaults to 'always'.
|
| 49 |
+
"""
|
| 50 |
+
|
| 51 |
+
func: core_schema.SerializerFunction
|
| 52 |
+
return_type: Any = PydanticUndefined
|
| 53 |
+
when_used: WhenUsed = 'always'
|
| 54 |
+
|
| 55 |
+
def __get_pydantic_core_schema__(self, source_type: Any, handler: GetCoreSchemaHandler) -> core_schema.CoreSchema:
|
| 56 |
+
"""Gets the Pydantic core schema.
|
| 57 |
+
|
| 58 |
+
Args:
|
| 59 |
+
source_type: The source type.
|
| 60 |
+
handler: The `GetCoreSchemaHandler` instance.
|
| 61 |
+
|
| 62 |
+
Returns:
|
| 63 |
+
The Pydantic core schema.
|
| 64 |
+
"""
|
| 65 |
+
schema = handler(source_type)
|
| 66 |
+
try:
|
| 67 |
+
return_type = _decorators.get_function_return_type(
|
| 68 |
+
self.func, self.return_type, handler._get_types_namespace()
|
| 69 |
+
)
|
| 70 |
+
except NameError as e:
|
| 71 |
+
raise PydanticUndefinedAnnotation.from_name_error(e) from e
|
| 72 |
+
return_schema = None if return_type is PydanticUndefined else handler.generate_schema(return_type)
|
| 73 |
+
schema['serialization'] = core_schema.plain_serializer_function_ser_schema(
|
| 74 |
+
function=self.func,
|
| 75 |
+
info_arg=_decorators.inspect_annotated_serializer(self.func, 'plain'),
|
| 76 |
+
return_schema=return_schema,
|
| 77 |
+
when_used=self.when_used,
|
| 78 |
+
)
|
| 79 |
+
return schema
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
@dataclasses.dataclass(**_internal_dataclass.slots_true, frozen=True)
|
| 83 |
+
class WrapSerializer:
|
| 84 |
+
"""Wrap serializers receive the raw inputs along with a handler function that applies the standard serialization
|
| 85 |
+
logic, and can modify the resulting value before returning it as the final output of serialization.
|
| 86 |
+
|
| 87 |
+
For example, here's a scenario in which a wrap serializer transforms timezones to UTC **and** utilizes the existing `datetime` serialization logic.
|
| 88 |
+
|
| 89 |
+
```python
|
| 90 |
+
from datetime import datetime, timezone
|
| 91 |
+
from typing import Any, Dict
|
| 92 |
+
|
| 93 |
+
from typing_extensions import Annotated
|
| 94 |
+
|
| 95 |
+
from pydantic import BaseModel, WrapSerializer
|
| 96 |
+
|
| 97 |
+
class EventDatetime(BaseModel):
|
| 98 |
+
start: datetime
|
| 99 |
+
end: datetime
|
| 100 |
+
|
| 101 |
+
def convert_to_utc(value: Any, handler, info) -> Dict[str, datetime]:
|
| 102 |
+
# Note that `handler` can actually help serialize the `value` for
|
| 103 |
+
# further custom serialization in case it's a subclass.
|
| 104 |
+
partial_result = handler(value, info)
|
| 105 |
+
if info.mode == 'json':
|
| 106 |
+
return {
|
| 107 |
+
k: datetime.fromisoformat(v).astimezone(timezone.utc)
|
| 108 |
+
for k, v in partial_result.items()
|
| 109 |
+
}
|
| 110 |
+
return {k: v.astimezone(timezone.utc) for k, v in partial_result.items()}
|
| 111 |
+
|
| 112 |
+
UTCEventDatetime = Annotated[EventDatetime, WrapSerializer(convert_to_utc)]
|
| 113 |
+
|
| 114 |
+
class EventModel(BaseModel):
|
| 115 |
+
event_datetime: UTCEventDatetime
|
| 116 |
+
|
| 117 |
+
dt = EventDatetime(
|
| 118 |
+
start='2024-01-01T07:00:00-08:00', end='2024-01-03T20:00:00+06:00'
|
| 119 |
+
)
|
| 120 |
+
event = EventModel(event_datetime=dt)
|
| 121 |
+
print(event.model_dump())
|
| 122 |
+
'''
|
| 123 |
+
{
|
| 124 |
+
'event_datetime': {
|
| 125 |
+
'start': datetime.datetime(
|
| 126 |
+
2024, 1, 1, 15, 0, tzinfo=datetime.timezone.utc
|
| 127 |
+
),
|
| 128 |
+
'end': datetime.datetime(
|
| 129 |
+
2024, 1, 3, 14, 0, tzinfo=datetime.timezone.utc
|
| 130 |
+
),
|
| 131 |
+
}
|
| 132 |
+
}
|
| 133 |
+
'''
|
| 134 |
+
|
| 135 |
+
print(event.model_dump_json())
|
| 136 |
+
'''
|
| 137 |
+
{"event_datetime":{"start":"2024-01-01T15:00:00Z","end":"2024-01-03T14:00:00Z"}}
|
| 138 |
+
'''
|
| 139 |
+
```
|
| 140 |
+
|
| 141 |
+
Attributes:
|
| 142 |
+
func: The serializer function to be wrapped.
|
| 143 |
+
return_type: The return type for the function. If omitted it will be inferred from the type annotation.
|
| 144 |
+
when_used: Determines when this serializer should be used. Accepts a string with values `'always'`,
|
| 145 |
+
`'unless-none'`, `'json'`, and `'json-unless-none'`. Defaults to 'always'.
|
| 146 |
+
"""
|
| 147 |
+
|
| 148 |
+
func: core_schema.WrapSerializerFunction
|
| 149 |
+
return_type: Any = PydanticUndefined
|
| 150 |
+
when_used: WhenUsed = 'always'
|
| 151 |
+
|
| 152 |
+
def __get_pydantic_core_schema__(self, source_type: Any, handler: GetCoreSchemaHandler) -> core_schema.CoreSchema:
|
| 153 |
+
"""This method is used to get the Pydantic core schema of the class.
|
| 154 |
+
|
| 155 |
+
Args:
|
| 156 |
+
source_type: Source type.
|
| 157 |
+
handler: Core schema handler.
|
| 158 |
+
|
| 159 |
+
Returns:
|
| 160 |
+
The generated core schema of the class.
|
| 161 |
+
"""
|
| 162 |
+
schema = handler(source_type)
|
| 163 |
+
try:
|
| 164 |
+
return_type = _decorators.get_function_return_type(
|
| 165 |
+
self.func, self.return_type, handler._get_types_namespace()
|
| 166 |
+
)
|
| 167 |
+
except NameError as e:
|
| 168 |
+
raise PydanticUndefinedAnnotation.from_name_error(e) from e
|
| 169 |
+
return_schema = None if return_type is PydanticUndefined else handler.generate_schema(return_type)
|
| 170 |
+
schema['serialization'] = core_schema.wrap_serializer_function_ser_schema(
|
| 171 |
+
function=self.func,
|
| 172 |
+
info_arg=_decorators.inspect_annotated_serializer(self.func, 'wrap'),
|
| 173 |
+
return_schema=return_schema,
|
| 174 |
+
when_used=self.when_used,
|
| 175 |
+
)
|
| 176 |
+
return schema
|
| 177 |
+
|
| 178 |
+
|
| 179 |
+
if TYPE_CHECKING:
|
| 180 |
+
_Partial: TypeAlias = 'partial[Any] | partialmethod[Any]'
|
| 181 |
+
|
| 182 |
+
FieldPlainSerializer: TypeAlias = 'core_schema.SerializerFunction | _Partial'
|
| 183 |
+
"""A field serializer method or function in `plain` mode."""
|
| 184 |
+
|
| 185 |
+
FieldWrapSerializer: TypeAlias = 'core_schema.WrapSerializerFunction | _Partial'
|
| 186 |
+
"""A field serializer method or function in `wrap` mode."""
|
| 187 |
+
|
| 188 |
+
FieldSerializer: TypeAlias = 'FieldPlainSerializer | FieldWrapSerializer'
|
| 189 |
+
"""A field serializer method or function."""
|
| 190 |
+
|
| 191 |
+
_FieldPlainSerializerT = TypeVar('_FieldPlainSerializerT', bound=FieldPlainSerializer)
|
| 192 |
+
_FieldWrapSerializerT = TypeVar('_FieldWrapSerializerT', bound=FieldWrapSerializer)
|
| 193 |
+
|
| 194 |
+
|
| 195 |
+
@overload
|
| 196 |
+
def field_serializer(
|
| 197 |
+
field: str,
|
| 198 |
+
/,
|
| 199 |
+
*fields: str,
|
| 200 |
+
mode: Literal['wrap'],
|
| 201 |
+
return_type: Any = ...,
|
| 202 |
+
when_used: WhenUsed = ...,
|
| 203 |
+
check_fields: bool | None = ...,
|
| 204 |
+
) -> Callable[[_FieldWrapSerializerT], _FieldWrapSerializerT]: ...
|
| 205 |
+
|
| 206 |
+
|
| 207 |
+
@overload
|
| 208 |
+
def field_serializer(
|
| 209 |
+
field: str,
|
| 210 |
+
/,
|
| 211 |
+
*fields: str,
|
| 212 |
+
mode: Literal['plain'] = ...,
|
| 213 |
+
return_type: Any = ...,
|
| 214 |
+
when_used: WhenUsed = ...,
|
| 215 |
+
check_fields: bool | None = ...,
|
| 216 |
+
) -> Callable[[_FieldPlainSerializerT], _FieldPlainSerializerT]: ...
|
| 217 |
+
|
| 218 |
+
|
| 219 |
+
def field_serializer(
|
| 220 |
+
*fields: str,
|
| 221 |
+
mode: Literal['plain', 'wrap'] = 'plain',
|
| 222 |
+
return_type: Any = PydanticUndefined,
|
| 223 |
+
when_used: WhenUsed = 'always',
|
| 224 |
+
check_fields: bool | None = None,
|
| 225 |
+
) -> (
|
| 226 |
+
Callable[[_FieldWrapSerializerT], _FieldWrapSerializerT]
|
| 227 |
+
| Callable[[_FieldPlainSerializerT], _FieldPlainSerializerT]
|
| 228 |
+
):
|
| 229 |
+
"""Decorator that enables custom field serialization.
|
| 230 |
+
|
| 231 |
+
In the below example, a field of type `set` is used to mitigate duplication. A `field_serializer` is used to serialize the data as a sorted list.
|
| 232 |
+
|
| 233 |
+
```python
|
| 234 |
+
from typing import Set
|
| 235 |
+
|
| 236 |
+
from pydantic import BaseModel, field_serializer
|
| 237 |
+
|
| 238 |
+
class StudentModel(BaseModel):
|
| 239 |
+
name: str = 'Jane'
|
| 240 |
+
courses: Set[str]
|
| 241 |
+
|
| 242 |
+
@field_serializer('courses', when_used='json')
|
| 243 |
+
def serialize_courses_in_order(self, courses: Set[str]):
|
| 244 |
+
return sorted(courses)
|
| 245 |
+
|
| 246 |
+
student = StudentModel(courses={'Math', 'Chemistry', 'English'})
|
| 247 |
+
print(student.model_dump_json())
|
| 248 |
+
#> {"name":"Jane","courses":["Chemistry","English","Math"]}
|
| 249 |
+
```
|
| 250 |
+
|
| 251 |
+
See [Custom serializers](../concepts/serialization.md#custom-serializers) for more information.
|
| 252 |
+
|
| 253 |
+
Four signatures are supported:
|
| 254 |
+
|
| 255 |
+
- `(self, value: Any, info: FieldSerializationInfo)`
|
| 256 |
+
- `(self, value: Any, nxt: SerializerFunctionWrapHandler, info: FieldSerializationInfo)`
|
| 257 |
+
- `(value: Any, info: SerializationInfo)`
|
| 258 |
+
- `(value: Any, nxt: SerializerFunctionWrapHandler, info: SerializationInfo)`
|
| 259 |
+
|
| 260 |
+
Args:
|
| 261 |
+
fields: Which field(s) the method should be called on.
|
| 262 |
+
mode: The serialization mode.
|
| 263 |
+
|
| 264 |
+
- `plain` means the function will be called instead of the default serialization logic,
|
| 265 |
+
- `wrap` means the function will be called with an argument to optionally call the
|
| 266 |
+
default serialization logic.
|
| 267 |
+
return_type: Optional return type for the function, if omitted it will be inferred from the type annotation.
|
| 268 |
+
when_used: Determines the serializer will be used for serialization.
|
| 269 |
+
check_fields: Whether to check that the fields actually exist on the model.
|
| 270 |
+
|
| 271 |
+
Returns:
|
| 272 |
+
The decorator function.
|
| 273 |
+
"""
|
| 274 |
+
|
| 275 |
+
def dec(f: FieldSerializer) -> _decorators.PydanticDescriptorProxy[Any]:
|
| 276 |
+
dec_info = _decorators.FieldSerializerDecoratorInfo(
|
| 277 |
+
fields=fields,
|
| 278 |
+
mode=mode,
|
| 279 |
+
return_type=return_type,
|
| 280 |
+
when_used=when_used,
|
| 281 |
+
check_fields=check_fields,
|
| 282 |
+
)
|
| 283 |
+
return _decorators.PydanticDescriptorProxy(f, dec_info) # pyright: ignore[reportArgumentType]
|
| 284 |
+
|
| 285 |
+
return dec # pyright: ignore[reportReturnType]
|
| 286 |
+
|
| 287 |
+
|
| 288 |
+
if TYPE_CHECKING:
|
| 289 |
+
# The first argument in the following callables represent the `self` type:
|
| 290 |
+
|
| 291 |
+
ModelPlainSerializerWithInfo: TypeAlias = Callable[[Any, SerializationInfo], Any]
|
| 292 |
+
"""A model serializer method with the `info` argument, in `plain` mode."""
|
| 293 |
+
|
| 294 |
+
ModelPlainSerializerWithoutInfo: TypeAlias = Callable[[Any], Any]
|
| 295 |
+
"""A model serializer method without the `info` argument, in `plain` mode."""
|
| 296 |
+
|
| 297 |
+
ModelPlainSerializer: TypeAlias = 'ModelPlainSerializerWithInfo | ModelPlainSerializerWithoutInfo'
|
| 298 |
+
"""A model serializer method in `plain` mode."""
|
| 299 |
+
|
| 300 |
+
ModelWrapSerializerWithInfo: TypeAlias = Callable[[Any, SerializerFunctionWrapHandler, SerializationInfo], Any]
|
| 301 |
+
"""A model serializer method with the `info` argument, in `wrap` mode."""
|
| 302 |
+
|
| 303 |
+
ModelWrapSerializerWithoutInfo: TypeAlias = Callable[[Any, SerializerFunctionWrapHandler], Any]
|
| 304 |
+
"""A model serializer method without the `info` argument, in `wrap` mode."""
|
| 305 |
+
|
| 306 |
+
ModelWrapSerializer: TypeAlias = 'ModelWrapSerializerWithInfo | ModelWrapSerializerWithoutInfo'
|
| 307 |
+
"""A model serializer method in `wrap` mode."""
|
| 308 |
+
|
| 309 |
+
ModelSerializer: TypeAlias = 'ModelPlainSerializer | ModelWrapSerializer'
|
| 310 |
+
|
| 311 |
+
_ModelPlainSerializerT = TypeVar('_ModelPlainSerializerT', bound=ModelPlainSerializer)
|
| 312 |
+
_ModelWrapSerializerT = TypeVar('_ModelWrapSerializerT', bound=ModelWrapSerializer)
|
| 313 |
+
|
| 314 |
+
|
| 315 |
+
@overload
|
| 316 |
+
def model_serializer(f: _ModelPlainSerializerT, /) -> _ModelPlainSerializerT: ...
|
| 317 |
+
|
| 318 |
+
|
| 319 |
+
@overload
|
| 320 |
+
def model_serializer(
|
| 321 |
+
*, mode: Literal['wrap'], when_used: WhenUsed = 'always', return_type: Any = ...
|
| 322 |
+
) -> Callable[[_ModelWrapSerializerT], _ModelWrapSerializerT]: ...
|
| 323 |
+
|
| 324 |
+
|
| 325 |
+
@overload
|
| 326 |
+
def model_serializer(
|
| 327 |
+
*,
|
| 328 |
+
mode: Literal['plain'] = ...,
|
| 329 |
+
when_used: WhenUsed = 'always',
|
| 330 |
+
return_type: Any = ...,
|
| 331 |
+
) -> Callable[[_ModelPlainSerializerT], _ModelPlainSerializerT]: ...
|
| 332 |
+
|
| 333 |
+
|
| 334 |
+
def model_serializer(
|
| 335 |
+
f: _ModelPlainSerializerT | _ModelWrapSerializerT | None = None,
|
| 336 |
+
/,
|
| 337 |
+
*,
|
| 338 |
+
mode: Literal['plain', 'wrap'] = 'plain',
|
| 339 |
+
when_used: WhenUsed = 'always',
|
| 340 |
+
return_type: Any = PydanticUndefined,
|
| 341 |
+
) -> (
|
| 342 |
+
_ModelPlainSerializerT
|
| 343 |
+
| Callable[[_ModelWrapSerializerT], _ModelWrapSerializerT]
|
| 344 |
+
| Callable[[_ModelPlainSerializerT], _ModelPlainSerializerT]
|
| 345 |
+
):
|
| 346 |
+
"""Decorator that enables custom model serialization.
|
| 347 |
+
|
| 348 |
+
This is useful when a model need to be serialized in a customized manner, allowing for flexibility beyond just specific fields.
|
| 349 |
+
|
| 350 |
+
An example would be to serialize temperature to the same temperature scale, such as degrees Celsius.
|
| 351 |
+
|
| 352 |
+
```python
|
| 353 |
+
from typing import Literal
|
| 354 |
+
|
| 355 |
+
from pydantic import BaseModel, model_serializer
|
| 356 |
+
|
| 357 |
+
class TemperatureModel(BaseModel):
|
| 358 |
+
unit: Literal['C', 'F']
|
| 359 |
+
value: int
|
| 360 |
+
|
| 361 |
+
@model_serializer()
|
| 362 |
+
def serialize_model(self):
|
| 363 |
+
if self.unit == 'F':
|
| 364 |
+
return {'unit': 'C', 'value': int((self.value - 32) / 1.8)}
|
| 365 |
+
return {'unit': self.unit, 'value': self.value}
|
| 366 |
+
|
| 367 |
+
temperature = TemperatureModel(unit='F', value=212)
|
| 368 |
+
print(temperature.model_dump())
|
| 369 |
+
#> {'unit': 'C', 'value': 100}
|
| 370 |
+
```
|
| 371 |
+
|
| 372 |
+
Two signatures are supported for `mode='plain'`, which is the default:
|
| 373 |
+
|
| 374 |
+
- `(self)`
|
| 375 |
+
- `(self, info: SerializationInfo)`
|
| 376 |
+
|
| 377 |
+
And two other signatures for `mode='wrap'`:
|
| 378 |
+
|
| 379 |
+
- `(self, nxt: SerializerFunctionWrapHandler)`
|
| 380 |
+
- `(self, nxt: SerializerFunctionWrapHandler, info: SerializationInfo)`
|
| 381 |
+
|
| 382 |
+
See [Custom serializers](../concepts/serialization.md#custom-serializers) for more information.
|
| 383 |
+
|
| 384 |
+
Args:
|
| 385 |
+
f: The function to be decorated.
|
| 386 |
+
mode: The serialization mode.
|
| 387 |
+
|
| 388 |
+
- `'plain'` means the function will be called instead of the default serialization logic
|
| 389 |
+
- `'wrap'` means the function will be called with an argument to optionally call the default
|
| 390 |
+
serialization logic.
|
| 391 |
+
when_used: Determines when this serializer should be used.
|
| 392 |
+
return_type: The return type for the function. If omitted it will be inferred from the type annotation.
|
| 393 |
+
|
| 394 |
+
Returns:
|
| 395 |
+
The decorator function.
|
| 396 |
+
"""
|
| 397 |
+
|
| 398 |
+
def dec(f: ModelSerializer) -> _decorators.PydanticDescriptorProxy[Any]:
|
| 399 |
+
dec_info = _decorators.ModelSerializerDecoratorInfo(mode=mode, return_type=return_type, when_used=when_used)
|
| 400 |
+
return _decorators.PydanticDescriptorProxy(f, dec_info)
|
| 401 |
+
|
| 402 |
+
if f is None:
|
| 403 |
+
return dec # pyright: ignore[reportReturnType]
|
| 404 |
+
else:
|
| 405 |
+
return dec(f) # pyright: ignore[reportReturnType]
|
| 406 |
+
|
| 407 |
+
|
| 408 |
+
AnyType = TypeVar('AnyType')
|
| 409 |
+
|
| 410 |
+
|
| 411 |
+
if TYPE_CHECKING:
|
| 412 |
+
SerializeAsAny = Annotated[AnyType, ...] # SerializeAsAny[list[str]] will be treated by type checkers as list[str]
|
| 413 |
+
"""Force serialization to ignore whatever is defined in the schema and instead ask the object
|
| 414 |
+
itself how it should be serialized.
|
| 415 |
+
In particular, this means that when model subclasses are serialized, fields present in the subclass
|
| 416 |
+
but not in the original schema will be included.
|
| 417 |
+
"""
|
| 418 |
+
else:
|
| 419 |
+
|
| 420 |
+
@dataclasses.dataclass(**_internal_dataclass.slots_true)
|
| 421 |
+
class SerializeAsAny: # noqa: D101
|
| 422 |
+
def __class_getitem__(cls, item: Any) -> Any:
|
| 423 |
+
return Annotated[item, SerializeAsAny()]
|
| 424 |
+
|
| 425 |
+
def __get_pydantic_core_schema__(
|
| 426 |
+
self, source_type: Any, handler: GetCoreSchemaHandler
|
| 427 |
+
) -> core_schema.CoreSchema:
|
| 428 |
+
schema = handler(source_type)
|
| 429 |
+
schema_to_update = schema
|
| 430 |
+
while schema_to_update['type'] == 'definitions':
|
| 431 |
+
schema_to_update = schema_to_update.copy()
|
| 432 |
+
schema_to_update = schema_to_update['schema']
|
| 433 |
+
schema_to_update['serialization'] = core_schema.wrap_serializer_function_ser_schema(
|
| 434 |
+
lambda x, h: h(x), schema=core_schema.any_schema()
|
| 435 |
+
)
|
| 436 |
+
return schema
|
| 437 |
+
|
| 438 |
+
__hash__ = object.__hash__
|
evalkit_tf437/lib/python3.10/site-packages/pydantic/py.typed
ADDED
|
File without changes
|
evalkit_tf437/lib/python3.10/site-packages/pydantic/tools.py
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""The `tools` module is a backport module from V1."""
|
| 2 |
+
|
| 3 |
+
from ._migration import getattr_migration
|
| 4 |
+
|
| 5 |
+
__getattr__ = getattr_migration(__name__)
|
evalkit_tf437/lib/python3.10/site-packages/pydantic/validate_call_decorator.py
ADDED
|
@@ -0,0 +1,69 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Decorator for validating function calls."""
|
| 2 |
+
|
| 3 |
+
from __future__ import annotations as _annotations
|
| 4 |
+
|
| 5 |
+
import functools
|
| 6 |
+
from typing import TYPE_CHECKING, Any, Callable, TypeVar, overload
|
| 7 |
+
|
| 8 |
+
from ._internal import _typing_extra, _validate_call
|
| 9 |
+
|
| 10 |
+
__all__ = ('validate_call',)
|
| 11 |
+
|
| 12 |
+
if TYPE_CHECKING:
|
| 13 |
+
from .config import ConfigDict
|
| 14 |
+
|
| 15 |
+
AnyCallableT = TypeVar('AnyCallableT', bound=Callable[..., Any])
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
@overload
|
| 19 |
+
def validate_call(
|
| 20 |
+
*, config: ConfigDict | None = None, validate_return: bool = False
|
| 21 |
+
) -> Callable[[AnyCallableT], AnyCallableT]: ...
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
@overload
|
| 25 |
+
def validate_call(func: AnyCallableT, /) -> AnyCallableT: ...
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
def validate_call(
|
| 29 |
+
func: AnyCallableT | None = None,
|
| 30 |
+
/,
|
| 31 |
+
*,
|
| 32 |
+
config: ConfigDict | None = None,
|
| 33 |
+
validate_return: bool = False,
|
| 34 |
+
) -> AnyCallableT | Callable[[AnyCallableT], AnyCallableT]:
|
| 35 |
+
"""Usage docs: https://docs.pydantic.dev/2.9/concepts/validation_decorator/
|
| 36 |
+
|
| 37 |
+
Returns a decorated wrapper around the function that validates the arguments and, optionally, the return value.
|
| 38 |
+
|
| 39 |
+
Usage may be either as a plain decorator `@validate_call` or with arguments `@validate_call(...)`.
|
| 40 |
+
|
| 41 |
+
Args:
|
| 42 |
+
func: The function to be decorated.
|
| 43 |
+
config: The configuration dictionary.
|
| 44 |
+
validate_return: Whether to validate the return value.
|
| 45 |
+
|
| 46 |
+
Returns:
|
| 47 |
+
The decorated function.
|
| 48 |
+
"""
|
| 49 |
+
local_ns = _typing_extra.parent_frame_namespace()
|
| 50 |
+
|
| 51 |
+
def validate(function: AnyCallableT) -> AnyCallableT:
|
| 52 |
+
if isinstance(function, (classmethod, staticmethod)):
|
| 53 |
+
name = type(function).__name__
|
| 54 |
+
raise TypeError(f'The `@{name}` decorator should be applied after `@validate_call` (put `@{name}` on top)')
|
| 55 |
+
|
| 56 |
+
validate_call_wrapper = _validate_call.ValidateCallWrapper(function, config, validate_return, local_ns)
|
| 57 |
+
|
| 58 |
+
@functools.wraps(function)
|
| 59 |
+
def wrapper_function(*args, **kwargs):
|
| 60 |
+
return validate_call_wrapper(*args, **kwargs)
|
| 61 |
+
|
| 62 |
+
wrapper_function.raw_function = function # type: ignore
|
| 63 |
+
|
| 64 |
+
return wrapper_function # type: ignore
|
| 65 |
+
|
| 66 |
+
if func:
|
| 67 |
+
return validate(func)
|
| 68 |
+
else:
|
| 69 |
+
return validate
|
evalkit_tf437/lib/python3.10/site-packages/pydantic_core/__pycache__/core_schema.cpython-310.pyc
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:efa173282e69e63cd89631f686772e33bc74ad1f486d50700538bedf737419ea
|
| 3 |
+
size 123086
|
evalkit_tf437/lib/python3.10/site-packages/rich/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (6.06 kB). View file
|
|
|
evalkit_tf437/lib/python3.10/site-packages/rich/__pycache__/_emoji_replace.cpython-310.pyc
ADDED
|
Binary file (1.17 kB). View file
|
|
|
evalkit_tf437/lib/python3.10/site-packages/rich/__pycache__/_null_file.cpython-310.pyc
ADDED
|
Binary file (3.22 kB). View file
|
|
|
evalkit_tf437/lib/python3.10/site-packages/rich/__pycache__/ansi.cpython-310.pyc
ADDED
|
Binary file (5.97 kB). View file
|
|
|
evalkit_tf437/lib/python3.10/site-packages/rich/__pycache__/bar.cpython-310.pyc
ADDED
|
Binary file (2.96 kB). View file
|
|
|
evalkit_tf437/lib/python3.10/site-packages/rich/__pycache__/emoji.cpython-310.pyc
ADDED
|
Binary file (3.21 kB). View file
|
|
|
evalkit_tf437/lib/python3.10/site-packages/rich/__pycache__/file_proxy.cpython-310.pyc
ADDED
|
Binary file (2.38 kB). View file
|
|
|
evalkit_tf437/lib/python3.10/site-packages/rich/__pycache__/filesize.cpython-310.pyc
ADDED
|
Binary file (2.59 kB). View file
|
|
|
evalkit_tf437/lib/python3.10/site-packages/rich/__pycache__/json.cpython-310.pyc
ADDED
|
Binary file (4.69 kB). View file
|
|
|
evalkit_tf437/lib/python3.10/site-packages/rich/__pycache__/live.cpython-310.pyc
ADDED
|
Binary file (11.7 kB). View file
|
|
|
evalkit_tf437/lib/python3.10/site-packages/rich/__pycache__/pretty.cpython-310.pyc
ADDED
|
Binary file (27.8 kB). View file
|
|
|
evalkit_tf437/lib/python3.10/site-packages/rich/__pycache__/progress.cpython-310.pyc
ADDED
|
Binary file (54.5 kB). View file
|
|
|
evalkit_tf437/lib/python3.10/site-packages/rich/__pycache__/protocol.cpython-310.pyc
ADDED
|
Binary file (1.32 kB). View file
|
|
|
evalkit_tf437/lib/python3.10/site-packages/rich/__pycache__/rule.cpython-310.pyc
ADDED
|
Binary file (3.91 kB). View file
|
|
|
evalkit_tf437/lib/python3.10/site-packages/rich/__pycache__/scope.cpython-310.pyc
ADDED
|
Binary file (2.95 kB). View file
|
|
|
evalkit_tf437/lib/python3.10/site-packages/rich/__pycache__/status.cpython-310.pyc
ADDED
|
Binary file (4.57 kB). View file
|
|
|
evalkit_tf437/lib/python3.10/site-packages/rich/__pycache__/syntax.cpython-310.pyc
ADDED
|
Binary file (26 kB). View file
|
|
|
evalkit_tf437/lib/python3.10/site-packages/rich/__pycache__/terminal_theme.cpython-310.pyc
ADDED
|
Binary file (2.99 kB). View file
|
|
|
falcon/lib/python3.10/site-packages/anyio-4.8.0.dist-info/INSTALLER
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
pip
|
falcon/lib/python3.10/site-packages/anyio-4.8.0.dist-info/METADATA
ADDED
|
@@ -0,0 +1,104 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Metadata-Version: 2.1
|
| 2 |
+
Name: anyio
|
| 3 |
+
Version: 4.8.0
|
| 4 |
+
Summary: High level compatibility layer for multiple asynchronous event loop implementations
|
| 5 |
+
Author-email: Alex Grönholm <alex.gronholm@nextday.fi>
|
| 6 |
+
License: MIT
|
| 7 |
+
Project-URL: Documentation, https://anyio.readthedocs.io/en/latest/
|
| 8 |
+
Project-URL: Changelog, https://anyio.readthedocs.io/en/stable/versionhistory.html
|
| 9 |
+
Project-URL: Source code, https://github.com/agronholm/anyio
|
| 10 |
+
Project-URL: Issue tracker, https://github.com/agronholm/anyio/issues
|
| 11 |
+
Classifier: Development Status :: 5 - Production/Stable
|
| 12 |
+
Classifier: Intended Audience :: Developers
|
| 13 |
+
Classifier: License :: OSI Approved :: MIT License
|
| 14 |
+
Classifier: Framework :: AnyIO
|
| 15 |
+
Classifier: Typing :: Typed
|
| 16 |
+
Classifier: Programming Language :: Python
|
| 17 |
+
Classifier: Programming Language :: Python :: 3
|
| 18 |
+
Classifier: Programming Language :: Python :: 3.9
|
| 19 |
+
Classifier: Programming Language :: Python :: 3.10
|
| 20 |
+
Classifier: Programming Language :: Python :: 3.11
|
| 21 |
+
Classifier: Programming Language :: Python :: 3.12
|
| 22 |
+
Classifier: Programming Language :: Python :: 3.13
|
| 23 |
+
Requires-Python: >=3.9
|
| 24 |
+
Description-Content-Type: text/x-rst
|
| 25 |
+
License-File: LICENSE
|
| 26 |
+
Requires-Dist: exceptiongroup>=1.0.2; python_version < "3.11"
|
| 27 |
+
Requires-Dist: idna>=2.8
|
| 28 |
+
Requires-Dist: sniffio>=1.1
|
| 29 |
+
Requires-Dist: typing_extensions>=4.5; python_version < "3.13"
|
| 30 |
+
Provides-Extra: trio
|
| 31 |
+
Requires-Dist: trio>=0.26.1; extra == "trio"
|
| 32 |
+
Provides-Extra: test
|
| 33 |
+
Requires-Dist: anyio[trio]; extra == "test"
|
| 34 |
+
Requires-Dist: coverage[toml]>=7; extra == "test"
|
| 35 |
+
Requires-Dist: exceptiongroup>=1.2.0; extra == "test"
|
| 36 |
+
Requires-Dist: hypothesis>=4.0; extra == "test"
|
| 37 |
+
Requires-Dist: psutil>=5.9; extra == "test"
|
| 38 |
+
Requires-Dist: pytest>=7.0; extra == "test"
|
| 39 |
+
Requires-Dist: trustme; extra == "test"
|
| 40 |
+
Requires-Dist: truststore>=0.9.1; python_version >= "3.10" and extra == "test"
|
| 41 |
+
Requires-Dist: uvloop>=0.21; (platform_python_implementation == "CPython" and platform_system != "Windows" and python_version < "3.14") and extra == "test"
|
| 42 |
+
Provides-Extra: doc
|
| 43 |
+
Requires-Dist: packaging; extra == "doc"
|
| 44 |
+
Requires-Dist: Sphinx~=7.4; extra == "doc"
|
| 45 |
+
Requires-Dist: sphinx_rtd_theme; extra == "doc"
|
| 46 |
+
Requires-Dist: sphinx-autodoc-typehints>=1.2.0; extra == "doc"
|
| 47 |
+
|
| 48 |
+
.. image:: https://github.com/agronholm/anyio/actions/workflows/test.yml/badge.svg
|
| 49 |
+
:target: https://github.com/agronholm/anyio/actions/workflows/test.yml
|
| 50 |
+
:alt: Build Status
|
| 51 |
+
.. image:: https://coveralls.io/repos/github/agronholm/anyio/badge.svg?branch=master
|
| 52 |
+
:target: https://coveralls.io/github/agronholm/anyio?branch=master
|
| 53 |
+
:alt: Code Coverage
|
| 54 |
+
.. image:: https://readthedocs.org/projects/anyio/badge/?version=latest
|
| 55 |
+
:target: https://anyio.readthedocs.io/en/latest/?badge=latest
|
| 56 |
+
:alt: Documentation
|
| 57 |
+
.. image:: https://badges.gitter.im/gitterHQ/gitter.svg
|
| 58 |
+
:target: https://gitter.im/python-trio/AnyIO
|
| 59 |
+
:alt: Gitter chat
|
| 60 |
+
|
| 61 |
+
AnyIO is an asynchronous networking and concurrency library that works on top of either asyncio_ or
|
| 62 |
+
trio_. It implements trio-like `structured concurrency`_ (SC) on top of asyncio and works in harmony
|
| 63 |
+
with the native SC of trio itself.
|
| 64 |
+
|
| 65 |
+
Applications and libraries written against AnyIO's API will run unmodified on either asyncio_ or
|
| 66 |
+
trio_. AnyIO can also be adopted into a library or application incrementally – bit by bit, no full
|
| 67 |
+
refactoring necessary. It will blend in with the native libraries of your chosen backend.
|
| 68 |
+
|
| 69 |
+
Documentation
|
| 70 |
+
-------------
|
| 71 |
+
|
| 72 |
+
View full documentation at: https://anyio.readthedocs.io/
|
| 73 |
+
|
| 74 |
+
Features
|
| 75 |
+
--------
|
| 76 |
+
|
| 77 |
+
AnyIO offers the following functionality:
|
| 78 |
+
|
| 79 |
+
* Task groups (nurseries_ in trio terminology)
|
| 80 |
+
* High-level networking (TCP, UDP and UNIX sockets)
|
| 81 |
+
|
| 82 |
+
* `Happy eyeballs`_ algorithm for TCP connections (more robust than that of asyncio on Python
|
| 83 |
+
3.8)
|
| 84 |
+
* async/await style UDP sockets (unlike asyncio where you still have to use Transports and
|
| 85 |
+
Protocols)
|
| 86 |
+
|
| 87 |
+
* A versatile API for byte streams and object streams
|
| 88 |
+
* Inter-task synchronization and communication (locks, conditions, events, semaphores, object
|
| 89 |
+
streams)
|
| 90 |
+
* Worker threads
|
| 91 |
+
* Subprocesses
|
| 92 |
+
* Asynchronous file I/O (using worker threads)
|
| 93 |
+
* Signal handling
|
| 94 |
+
|
| 95 |
+
AnyIO also comes with its own pytest_ plugin which also supports asynchronous fixtures.
|
| 96 |
+
It even works with the popular Hypothesis_ library.
|
| 97 |
+
|
| 98 |
+
.. _asyncio: https://docs.python.org/3/library/asyncio.html
|
| 99 |
+
.. _trio: https://github.com/python-trio/trio
|
| 100 |
+
.. _structured concurrency: https://en.wikipedia.org/wiki/Structured_concurrency
|
| 101 |
+
.. _nurseries: https://trio.readthedocs.io/en/stable/reference-core.html#nurseries-and-spawning
|
| 102 |
+
.. _Happy eyeballs: https://en.wikipedia.org/wiki/Happy_Eyeballs
|
| 103 |
+
.. _pytest: https://docs.pytest.org/en/latest/
|
| 104 |
+
.. _Hypothesis: https://hypothesis.works/
|
falcon/lib/python3.10/site-packages/functorch/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (671 Bytes). View file
|
|
|
falcon/lib/python3.10/site-packages/functorch/_src/aot_autograd/__init__.py
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# This file has moved to under torch/_functorch. It is not public API.
|
| 2 |
+
# If you are not a PyTorch developer and you are relying on the following
|
| 3 |
+
# imports, please file an issue.
|
| 4 |
+
from torch._functorch.aot_autograd import (
|
| 5 |
+
aot_autograd_decompositions,
|
| 6 |
+
KNOWN_TYPES,
|
| 7 |
+
PytreeThunk,
|
| 8 |
+
)
|
falcon/lib/python3.10/site-packages/functorch/_src/make_functional/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (250 Bytes). View file
|
|
|
falcon/lib/python3.10/site-packages/functorch/compile/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (1.08 kB). View file
|
|
|
falcon/lib/python3.10/site-packages/functorch/dim/__init__.py
ADDED
|
@@ -0,0 +1,179 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import dis
|
| 2 |
+
import inspect
|
| 3 |
+
from typing import Sequence, Union
|
| 4 |
+
|
| 5 |
+
import torch
|
| 6 |
+
|
| 7 |
+
import functorch._C
|
| 8 |
+
from functorch._C import dim as _C
|
| 9 |
+
from .tree_map import tree_flatten, tree_map
|
| 10 |
+
from .wrap_type import wrap_type
|
| 11 |
+
|
| 12 |
+
_C._patch_tensor_class()
|
| 13 |
+
dims, DimList, dimlists = _C.dims, _C.DimList, _C.dimlists
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
class DimensionMismatchError(Exception):
|
| 17 |
+
pass
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
class DimensionBindError(Exception):
|
| 21 |
+
pass
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
from . import op_properties
|
| 25 |
+
|
| 26 |
+
# use dict to avoid writing C++ bindings for set
|
| 27 |
+
pointwise = dict.fromkeys(op_properties.pointwise, True)
|
| 28 |
+
|
| 29 |
+
use_c = True
|
| 30 |
+
if not use_c:
|
| 31 |
+
from . import reference
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
class _Tensor:
|
| 35 |
+
# fast path around slow wrapping/unwrapping logic for simply queries used
|
| 36 |
+
# by the implementation...
|
| 37 |
+
|
| 38 |
+
@property
|
| 39 |
+
def dims(self):
|
| 40 |
+
return tuple(d for d in self._levels if isinstance(d, Dim))
|
| 41 |
+
|
| 42 |
+
def dim(self):
|
| 43 |
+
return self.ndim
|
| 44 |
+
|
| 45 |
+
if use_c:
|
| 46 |
+
__torch_function__ = classmethod(_C.__torch_function__)
|
| 47 |
+
expand = _C._instancemethod(_C.expand)
|
| 48 |
+
else:
|
| 49 |
+
__torch_function__ = reference.__torch_function__
|
| 50 |
+
expand = reference.expand
|
| 51 |
+
|
| 52 |
+
index = _C._instancemethod(_C.index)
|
| 53 |
+
|
| 54 |
+
def __repr__(self):
|
| 55 |
+
tensor, levels, ndim = self._tensor, self._levels, self.ndim
|
| 56 |
+
return f"{tensor}\nwith dims={tuple(l + ndim if isinstance(l, int) else l for l in levels)} sizes={tuple(tensor.size())}"
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
TensorLike = (_Tensor, torch.Tensor)
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
class Dim(_C.Dim, _Tensor):
|
| 63 |
+
# note that _C.Dim comes before tensor because we want the Dim API for things like size to take precendence.
|
| 64 |
+
# Tensor defines format, but we want to print Dims with special formatting
|
| 65 |
+
__format__ = object.__format__
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
class Tensor(_Tensor, _C.Tensor):
|
| 69 |
+
if not use_c:
|
| 70 |
+
from_batched = staticmethod(_C.Tensor_from_batched)
|
| 71 |
+
from_positional = staticmethod(_C.Tensor_from_positional)
|
| 72 |
+
sum = _C._instancemethod(_C.Tensor_sum)
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
def cat(tensors, dim, new_dim):
|
| 76 |
+
n = dims()
|
| 77 |
+
return stack(tensors, n, dim).index([n, dim], new_dim)
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
if use_c:
|
| 81 |
+
_wrap = _C._wrap
|
| 82 |
+
|
| 83 |
+
def _def(name, *args, **kwargs):
|
| 84 |
+
orig = getattr(torch.Tensor, name)
|
| 85 |
+
setattr(_Tensor, name, _C._instancemethod(_wrap(orig, *args, **kwargs)))
|
| 86 |
+
|
| 87 |
+
t__getitem__ = _C._instancemethod(_C.__getitem__)
|
| 88 |
+
stack = _C.stack
|
| 89 |
+
split = _C._instancemethod(_C.split)
|
| 90 |
+
else:
|
| 91 |
+
_wrap, _def = reference._wrap, reference._def
|
| 92 |
+
t__getitem__ = reference.t__getitem__
|
| 93 |
+
stack = reference.stack
|
| 94 |
+
split = reference.split
|
| 95 |
+
|
| 96 |
+
# note: there is no python reference
|
| 97 |
+
t__setitem__ = _C._instancemethod(_C.__setitem__)
|
| 98 |
+
# this is patched in the C API because otherwise torch.Tensor will
|
| 99 |
+
# no longer be considered a sequence and things will break
|
| 100 |
+
# torch.Tensor.__getitem__ = t__getitem__
|
| 101 |
+
|
| 102 |
+
_Tensor.__getitem__ = t__getitem__
|
| 103 |
+
# torch.Tensor.__setitem__ = t__setitem__
|
| 104 |
+
_Tensor.__setitem__ = t__setitem__
|
| 105 |
+
|
| 106 |
+
torch.Tensor.split = split
|
| 107 |
+
_Tensor.split = split
|
| 108 |
+
torch.Tensor.expand = _C._instancemethod(_C.expand)
|
| 109 |
+
torch.Tensor.index = _C._instancemethod(_C.index)
|
| 110 |
+
wrap_type(use_c, _Tensor, torch.Tensor, _Tensor.__torch_function__)
|
| 111 |
+
del _Tensor.ndim
|
| 112 |
+
|
| 113 |
+
if use_c:
|
| 114 |
+
_Tensor.order = _C._instancemethod(_C.order)
|
| 115 |
+
else:
|
| 116 |
+
_Tensor.order = reference.positional
|
| 117 |
+
|
| 118 |
+
_def("mean")
|
| 119 |
+
_def("sum")
|
| 120 |
+
_def("all")
|
| 121 |
+
_def("amax")
|
| 122 |
+
_def("amin")
|
| 123 |
+
_def("aminmax")
|
| 124 |
+
_def("any")
|
| 125 |
+
_def("count_nonzero")
|
| 126 |
+
_def("logsumexp")
|
| 127 |
+
_def("nanmean")
|
| 128 |
+
_def("nansum")
|
| 129 |
+
_def("prod")
|
| 130 |
+
_def("std", keepdim_offset=2)
|
| 131 |
+
_def("var", keepdim_offset=2)
|
| 132 |
+
_def("max", single_dim=True)
|
| 133 |
+
_def("min", single_dim=True)
|
| 134 |
+
_def("argmax", single_dim=True)
|
| 135 |
+
_def("argmin", single_dim=True)
|
| 136 |
+
_def("kthvalue", single_dim=True)
|
| 137 |
+
_def("median", single_dim=True)
|
| 138 |
+
_def("nanmedian", single_dim=True)
|
| 139 |
+
_def("mode", single_dim=True)
|
| 140 |
+
_def("sort", reduce=False)
|
| 141 |
+
_def("argsort", reduce=False)
|
| 142 |
+
_def("unbind", single_dim=True)
|
| 143 |
+
_def("chunk", dim_offset=1, reduce=False)
|
| 144 |
+
_def("cummax", single_dim=True, reduce=False)
|
| 145 |
+
_def("cummin", single_dim=True, reduce=False)
|
| 146 |
+
_def("cumprod", single_dim=True, reduce=False)
|
| 147 |
+
_def("cumprod_", single_dim=True, reduce=False)
|
| 148 |
+
_def("cumsum", single_dim=True, reduce=False)
|
| 149 |
+
_def("cumsum_", single_dim=True, reduce=False)
|
| 150 |
+
_def("logcumsumexp", single_dim=True, reduce=False)
|
| 151 |
+
_def("renorm", dim_offset=1, single_dim=True, reduce=False)
|
| 152 |
+
_def("softmax", single_dim=True, reduce=False)
|
| 153 |
+
softmax = _wrap(torch.nn.functional.softmax, single_dim=True, reduce=False)
|
| 154 |
+
|
| 155 |
+
# stuff to handle in the future, because they require special
|
| 156 |
+
# binding logic for dims
|
| 157 |
+
# cross
|
| 158 |
+
# diag_embed
|
| 159 |
+
# diagonal
|
| 160 |
+
# diagonal_scatter
|
| 161 |
+
# diff
|
| 162 |
+
# nanquantile
|
| 163 |
+
# quantile
|
| 164 |
+
# roll
|
| 165 |
+
# rot90
|
| 166 |
+
# topk (new dimes on output)
|
| 167 |
+
# should these all be subsumed by inplace indexing?
|
| 168 |
+
# index_add_
|
| 169 |
+
# index_add
|
| 170 |
+
# index_copy
|
| 171 |
+
# index_copy_
|
| 172 |
+
# index_fill
|
| 173 |
+
# index_fill_
|
| 174 |
+
# index_select
|
| 175 |
+
# scatter
|
| 176 |
+
# scatter_
|
| 177 |
+
# scatter_add
|
| 178 |
+
# scatter_add_
|
| 179 |
+
# scatter_reduce
|
falcon/lib/python3.10/site-packages/functorch/dim/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (4.17 kB). View file
|
|
|
falcon/lib/python3.10/site-packages/functorch/dim/__pycache__/batch_tensor.cpython-310.pyc
ADDED
|
Binary file (773 Bytes). View file
|
|
|
falcon/lib/python3.10/site-packages/functorch/dim/__pycache__/delayed_mul_tensor.cpython-310.pyc
ADDED
|
Binary file (3.18 kB). View file
|
|
|
falcon/lib/python3.10/site-packages/functorch/dim/__pycache__/dim.cpython-310.pyc
ADDED
|
Binary file (4.11 kB). View file
|
|
|
falcon/lib/python3.10/site-packages/functorch/dim/__pycache__/magic_trace.cpython-310.pyc
ADDED
|
Binary file (1.19 kB). View file
|
|
|
falcon/lib/python3.10/site-packages/functorch/dim/__pycache__/op_properties.cpython-310.pyc
ADDED
|
Binary file (5.62 kB). View file
|
|
|
falcon/lib/python3.10/site-packages/functorch/dim/__pycache__/reference.cpython-310.pyc
ADDED
|
Binary file (16.1 kB). View file
|
|
|
falcon/lib/python3.10/site-packages/functorch/dim/__pycache__/tree_map.cpython-310.pyc
ADDED
|
Binary file (527 Bytes). View file
|
|
|
falcon/lib/python3.10/site-packages/functorch/dim/__pycache__/wrap_type.cpython-310.pyc
ADDED
|
Binary file (1.48 kB). View file
|
|
|
falcon/lib/python3.10/site-packages/functorch/dim/batch_tensor.py
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Facebook, Inc. and its affiliates.
|
| 2 |
+
# All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# This source code is licensed under the BSD-style license found in the
|
| 5 |
+
# LICENSE file in the root directory of this source tree.
|
| 6 |
+
from contextlib import contextmanager
|
| 7 |
+
|
| 8 |
+
from torch._C._functorch import _vmap_add_layers, _vmap_remove_layers
|
| 9 |
+
|
| 10 |
+
_enabled = False
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
@contextmanager
|
| 14 |
+
def _enable_layers(dims):
|
| 15 |
+
global _enabled
|
| 16 |
+
assert not _enabled
|
| 17 |
+
input = sorted((d._level, d.size) for d in dims if not isinstance(d, int))
|
| 18 |
+
n = len(input)
|
| 19 |
+
try:
|
| 20 |
+
_vmap_add_layers(input)
|
| 21 |
+
_enabled = True
|
| 22 |
+
yield
|
| 23 |
+
finally:
|
| 24 |
+
_enabled = False
|
| 25 |
+
_vmap_remove_layers(n)
|
falcon/lib/python3.10/site-packages/functorch/dim/delayed_mul_tensor.py
ADDED
|
@@ -0,0 +1,77 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Facebook, Inc. and its affiliates.
|
| 2 |
+
# All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# This source code is licensed under the BSD-style license found in the
|
| 5 |
+
# LICENSE file in the root directory of this source tree.
|
| 6 |
+
import torch
|
| 7 |
+
|
| 8 |
+
from . import _Tensor, Tensor
|
| 9 |
+
from .reference import _dims, _enable_layers, llist, ltuple
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
class DelayedMulTensor(_Tensor):
|
| 13 |
+
def __init__(self, lhs, rhs):
|
| 14 |
+
self._lhs, self._rhs = lhs, rhs
|
| 15 |
+
self._data = None
|
| 16 |
+
self._levels_data = None
|
| 17 |
+
self._has_device = lhs._has_device or rhs._has_device
|
| 18 |
+
self._batchtensor_data = None
|
| 19 |
+
self._tensor_data = None
|
| 20 |
+
|
| 21 |
+
@property
|
| 22 |
+
def _levels(self):
|
| 23 |
+
if self._levels_data is None:
|
| 24 |
+
levels = llist(self._lhs._levels)
|
| 25 |
+
for l in self._rhs._levels:
|
| 26 |
+
if l not in levels:
|
| 27 |
+
levels.append(l)
|
| 28 |
+
self._levels_data = ltuple(levels)
|
| 29 |
+
return self._levels_data
|
| 30 |
+
|
| 31 |
+
@property
|
| 32 |
+
def _batchtensor(self):
|
| 33 |
+
if self._batchtensor_data is None:
|
| 34 |
+
with _enable_layers(self._levels):
|
| 35 |
+
print("bt multiply fallback")
|
| 36 |
+
self._batchtensor_data = self._lhs._batchtensor * self._rhs._batchtensor
|
| 37 |
+
return self._batchtensor_data
|
| 38 |
+
|
| 39 |
+
@property
|
| 40 |
+
def _tensor(self):
|
| 41 |
+
if self._tensor_data is None:
|
| 42 |
+
self._tensor_data = Tensor.from_batched(
|
| 43 |
+
self._batchtensor, self._has_device
|
| 44 |
+
)._tensor
|
| 45 |
+
return self._tensor_data
|
| 46 |
+
|
| 47 |
+
@property
|
| 48 |
+
def ndim(self):
|
| 49 |
+
return self._batchtensor.ndim
|
| 50 |
+
|
| 51 |
+
@property
|
| 52 |
+
def dims(self):
|
| 53 |
+
return ltuple(super().dims)
|
| 54 |
+
|
| 55 |
+
def sum(self, dim):
|
| 56 |
+
dims = _dims(dim, 0, False, False)
|
| 57 |
+
n = ord("a")
|
| 58 |
+
all_levels = self._levels
|
| 59 |
+
|
| 60 |
+
def to_char(d):
|
| 61 |
+
return chr(n + all_levels.index(d))
|
| 62 |
+
|
| 63 |
+
plhs, levelslhs = self._lhs._tensor, self._lhs._levels
|
| 64 |
+
prhs, levelsrhs = self._rhs._tensor, self._rhs._levels
|
| 65 |
+
new_dims = tuple(d for d in self.dims if d not in dims)
|
| 66 |
+
new_levels = [l for l in self._levels if l not in dims]
|
| 67 |
+
fmt = "".join(
|
| 68 |
+
[
|
| 69 |
+
*(to_char(d) for d in levelslhs),
|
| 70 |
+
",",
|
| 71 |
+
*(to_char(d) for d in levelsrhs),
|
| 72 |
+
"->",
|
| 73 |
+
*(to_char(d) for d in new_levels),
|
| 74 |
+
]
|
| 75 |
+
)
|
| 76 |
+
result_data = torch.einsum(fmt, (plhs, prhs))
|
| 77 |
+
return Tensor.from_positional(result_data, new_levels, True)
|
falcon/lib/python3.10/site-packages/functorch/dim/dim.py
ADDED
|
@@ -0,0 +1,121 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Facebook, Inc. and its affiliates.
|
| 2 |
+
# All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# This source code is licensed under the BSD-style license found in the
|
| 5 |
+
# LICENSE file in the root directory of this source tree.
|
| 6 |
+
import dis
|
| 7 |
+
import inspect
|
| 8 |
+
|
| 9 |
+
from dataclasses import dataclass
|
| 10 |
+
from typing import Union
|
| 11 |
+
|
| 12 |
+
from . import DimList
|
| 13 |
+
|
| 14 |
+
_vmap_levels = []
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
@dataclass
|
| 18 |
+
class LevelInfo:
|
| 19 |
+
level: int
|
| 20 |
+
alive: bool = True
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
class Dim:
|
| 24 |
+
def __init__(self, name: str, size: Union[None, int] = None):
|
| 25 |
+
self.name = name
|
| 26 |
+
self._size = None
|
| 27 |
+
self._vmap_level = None
|
| 28 |
+
if size is not None:
|
| 29 |
+
self.size = size
|
| 30 |
+
|
| 31 |
+
def __del__(self):
|
| 32 |
+
if self._vmap_level is not None:
|
| 33 |
+
_vmap_active_levels[self._vmap_stack].alive = False # noqa: F821
|
| 34 |
+
while (
|
| 35 |
+
not _vmap_levels[-1].alive
|
| 36 |
+
and current_level() == _vmap_levels[-1].level # noqa: F821
|
| 37 |
+
):
|
| 38 |
+
_vmap_decrement_nesting() # noqa: F821
|
| 39 |
+
_vmap_levels.pop()
|
| 40 |
+
|
| 41 |
+
@property
|
| 42 |
+
def size(self):
|
| 43 |
+
assert self.is_bound
|
| 44 |
+
return self._size
|
| 45 |
+
|
| 46 |
+
@size.setter
|
| 47 |
+
def size(self, size: int):
|
| 48 |
+
from . import DimensionBindError
|
| 49 |
+
|
| 50 |
+
if self._size is None:
|
| 51 |
+
self._size = size
|
| 52 |
+
self._vmap_level = _vmap_increment_nesting(size, "same") # noqa: F821
|
| 53 |
+
self._vmap_stack = len(_vmap_levels)
|
| 54 |
+
_vmap_levels.append(LevelInfo(self._vmap_level))
|
| 55 |
+
|
| 56 |
+
elif self._size != size:
|
| 57 |
+
raise DimensionBindError(
|
| 58 |
+
f"Dim '{self}' previously bound to a dimension of size {self._size} cannot bind to a dimension of size {size}"
|
| 59 |
+
)
|
| 60 |
+
|
| 61 |
+
@property
|
| 62 |
+
def is_bound(self):
|
| 63 |
+
return self._size is not None
|
| 64 |
+
|
| 65 |
+
def __repr__(self):
|
| 66 |
+
return self.name
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
def extract_name(inst):
|
| 70 |
+
assert inst.opname == "STORE_FAST" or inst.opname == "STORE_NAME"
|
| 71 |
+
return inst.argval
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
_cache = {}
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
def dims(lists=0):
|
| 78 |
+
frame = inspect.currentframe()
|
| 79 |
+
assert frame is not None
|
| 80 |
+
calling_frame = frame.f_back
|
| 81 |
+
assert calling_frame is not None
|
| 82 |
+
code, lasti = calling_frame.f_code, calling_frame.f_lasti
|
| 83 |
+
key = (code, lasti)
|
| 84 |
+
if key not in _cache:
|
| 85 |
+
first = lasti // 2 + 1
|
| 86 |
+
instructions = list(dis.get_instructions(calling_frame.f_code))
|
| 87 |
+
unpack = instructions[first]
|
| 88 |
+
|
| 89 |
+
if unpack.opname == "STORE_FAST" or unpack.opname == "STORE_NAME":
|
| 90 |
+
# just a single dim, not a list
|
| 91 |
+
name = unpack.argval
|
| 92 |
+
ctor = Dim if lists == 0 else DimList
|
| 93 |
+
_cache[key] = lambda: ctor(name=name)
|
| 94 |
+
else:
|
| 95 |
+
assert unpack.opname == "UNPACK_SEQUENCE"
|
| 96 |
+
ndims = unpack.argval
|
| 97 |
+
names = tuple(
|
| 98 |
+
extract_name(instructions[first + 1 + i]) for i in range(ndims)
|
| 99 |
+
)
|
| 100 |
+
first_list = len(names) - lists
|
| 101 |
+
_cache[key] = lambda: tuple(
|
| 102 |
+
Dim(n) if i < first_list else DimList(name=n)
|
| 103 |
+
for i, n in enumerate(names)
|
| 104 |
+
)
|
| 105 |
+
return _cache[key]()
|
| 106 |
+
|
| 107 |
+
|
| 108 |
+
def _dim_set(positional, arg):
|
| 109 |
+
def convert(a):
|
| 110 |
+
if isinstance(a, Dim):
|
| 111 |
+
return a
|
| 112 |
+
else:
|
| 113 |
+
assert isinstance(a, int)
|
| 114 |
+
return positional[a]
|
| 115 |
+
|
| 116 |
+
if arg is None:
|
| 117 |
+
return positional
|
| 118 |
+
elif not isinstance(arg, (Dim, int)):
|
| 119 |
+
return tuple(convert(a) for a in arg)
|
| 120 |
+
else:
|
| 121 |
+
return (convert(arg),)
|
falcon/lib/python3.10/site-packages/functorch/dim/reference.py
ADDED
|
@@ -0,0 +1,645 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Facebook, Inc. and its affiliates.
|
| 2 |
+
# All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# This source code is licensed under the BSD-style license found in the
|
| 5 |
+
# LICENSE file in the root directory of this source tree.
|
| 6 |
+
|
| 7 |
+
# reference python implementations for C ops
|
| 8 |
+
import torch
|
| 9 |
+
|
| 10 |
+
from functorch._C import dim as _C
|
| 11 |
+
from . import op_properties
|
| 12 |
+
from .batch_tensor import _enable_layers
|
| 13 |
+
from .tree_map import tree_flatten, tree_map
|
| 14 |
+
|
| 15 |
+
DimList = _C.DimList
|
| 16 |
+
import operator
|
| 17 |
+
from functools import reduce
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
# use dict to avoid writing C++ bindings for set
|
| 21 |
+
pointwise = set(op_properties.pointwise)
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
def prod(x):
|
| 25 |
+
return reduce(operator.mul, x, 1)
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
def _wrap_dim(d, N, keepdim):
|
| 29 |
+
from . import Dim
|
| 30 |
+
|
| 31 |
+
if isinstance(d, Dim):
|
| 32 |
+
assert not keepdim, "cannot preserve first-class dimensions with keepdim=True"
|
| 33 |
+
return d
|
| 34 |
+
elif d >= 0:
|
| 35 |
+
return d - N
|
| 36 |
+
else:
|
| 37 |
+
return d
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
def _dims(d, N, keepdim, single_dim):
|
| 41 |
+
from . import Dim
|
| 42 |
+
|
| 43 |
+
if isinstance(d, (Dim, int)):
|
| 44 |
+
return ltuple((_wrap_dim(d, N, keepdim),))
|
| 45 |
+
assert not single_dim, f"expected a single dimension or int but found: {d}"
|
| 46 |
+
return ltuple(_wrap_dim(x, N, keepdim) for x in d)
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
def _bind_dims_to_size(lhs_size, rhs, lhs_debug):
|
| 50 |
+
from . import DimensionMismatchError
|
| 51 |
+
|
| 52 |
+
not_bound = tuple((i, r) for i, r in enumerate(rhs) if not r.is_bound)
|
| 53 |
+
if len(not_bound) == 1:
|
| 54 |
+
idx, d = not_bound[0]
|
| 55 |
+
rhs_so_far = prod(r.size for r in rhs if r.is_bound)
|
| 56 |
+
if lhs_size % rhs_so_far != 0:
|
| 57 |
+
rhs_s = tuple("?" if not r.is_bound else str(r.size) for r in rhs)
|
| 58 |
+
raise DimensionMismatchError(
|
| 59 |
+
f"inferred dimension does not evenly fit into larger dimension: {lhs_size} vs {rhs_s}"
|
| 60 |
+
)
|
| 61 |
+
new_size = lhs_size // rhs_so_far
|
| 62 |
+
d.size = new_size
|
| 63 |
+
elif len(not_bound) > 1:
|
| 64 |
+
rhs_s = tuple("?" if not r.is_bound else str(r.size) for r in rhs)
|
| 65 |
+
raise DimensionMismatchError(
|
| 66 |
+
f"cannot infer the size of two dimensions at once: {rhs} with sizes {rhs_s}"
|
| 67 |
+
)
|
| 68 |
+
else:
|
| 69 |
+
rhs_size = prod(r.size for r in rhs)
|
| 70 |
+
if lhs_size != rhs_size:
|
| 71 |
+
raise DimensionMismatchError(
|
| 72 |
+
f"Dimension sizes to do not match ({lhs_size} != {rhs_size}) when matching {lhs_debug} to {rhs}"
|
| 73 |
+
)
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
def _tensor_levels(inp):
|
| 77 |
+
from . import _Tensor
|
| 78 |
+
|
| 79 |
+
if isinstance(inp, _Tensor):
|
| 80 |
+
return inp._tensor, llist(inp._levels), inp._has_device
|
| 81 |
+
else:
|
| 82 |
+
return inp, llist(range(-inp.ndim, 0)), True
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
def _match_levels(v, from_levels, to_levels):
|
| 86 |
+
view = []
|
| 87 |
+
permute = []
|
| 88 |
+
requires_view = False
|
| 89 |
+
size = v.size()
|
| 90 |
+
for t in to_levels:
|
| 91 |
+
try:
|
| 92 |
+
idx = from_levels.index(t)
|
| 93 |
+
permute.append(idx)
|
| 94 |
+
view.append(size[idx])
|
| 95 |
+
except ValueError:
|
| 96 |
+
view.append(1)
|
| 97 |
+
requires_view = True
|
| 98 |
+
if permute != list(range(len(permute))):
|
| 99 |
+
v = v.permute(*permute)
|
| 100 |
+
if requires_view:
|
| 101 |
+
v = v.view(*view)
|
| 102 |
+
return v
|
| 103 |
+
|
| 104 |
+
|
| 105 |
+
# make a single dimension positional but do not permute it,
|
| 106 |
+
# used to do multi-tensor operators where the dim being acted on
|
| 107 |
+
# should not physically move if possible
|
| 108 |
+
def _positional_no_permute(self, dim, expand_dim=False):
|
| 109 |
+
from . import Tensor
|
| 110 |
+
|
| 111 |
+
ptensor, levels = self._tensor, llist(self._levels)
|
| 112 |
+
try:
|
| 113 |
+
idx = levels.index(dim)
|
| 114 |
+
except ValueError:
|
| 115 |
+
if not expand_dim:
|
| 116 |
+
raise
|
| 117 |
+
idx = 0
|
| 118 |
+
ptensor = ptensor.expand(dim.size, *ptensor.size())
|
| 119 |
+
levels.insert(0, 0)
|
| 120 |
+
idx_batched = 0
|
| 121 |
+
for i in range(idx):
|
| 122 |
+
if isinstance(levels[i], int):
|
| 123 |
+
levels[i] -= 1
|
| 124 |
+
idx_batched += 1
|
| 125 |
+
levels[idx] = -idx_batched - 1
|
| 126 |
+
return Tensor.from_positional(ptensor, levels, self._has_device), idx_batched
|
| 127 |
+
|
| 128 |
+
|
| 129 |
+
def seq(a, b):
|
| 130 |
+
from . import Dim
|
| 131 |
+
|
| 132 |
+
if isinstance(a, Dim) != isinstance(b, Dim):
|
| 133 |
+
return False
|
| 134 |
+
if isinstance(a, Dim):
|
| 135 |
+
return a is b
|
| 136 |
+
else:
|
| 137 |
+
return a == b
|
| 138 |
+
|
| 139 |
+
|
| 140 |
+
class isin:
|
| 141 |
+
def __contains__(self, item):
|
| 142 |
+
for x in self:
|
| 143 |
+
if seq(item, x):
|
| 144 |
+
return True
|
| 145 |
+
return False
|
| 146 |
+
|
| 147 |
+
def index(self, item):
|
| 148 |
+
for i, x in enumerate(self):
|
| 149 |
+
if seq(item, x):
|
| 150 |
+
return i
|
| 151 |
+
raise ValueError
|
| 152 |
+
|
| 153 |
+
|
| 154 |
+
class llist(isin, list):
|
| 155 |
+
pass
|
| 156 |
+
|
| 157 |
+
|
| 158 |
+
class ltuple(isin, tuple):
|
| 159 |
+
pass
|
| 160 |
+
|
| 161 |
+
|
| 162 |
+
empty_dict = {}
|
| 163 |
+
|
| 164 |
+
|
| 165 |
+
@classmethod
|
| 166 |
+
def __torch_function__(self, orig, cls, args, kwargs=empty_dict):
|
| 167 |
+
from . import _Tensor, Tensor, TensorLike
|
| 168 |
+
from .delayed_mul_tensor import DelayedMulTensor
|
| 169 |
+
|
| 170 |
+
if orig is torch.Tensor.__mul__:
|
| 171 |
+
lhs, rhs = args
|
| 172 |
+
if (
|
| 173 |
+
isinstance(lhs, _Tensor)
|
| 174 |
+
and isinstance(rhs, _Tensor)
|
| 175 |
+
and lhs.ndim == 0
|
| 176 |
+
and rhs.ndim == 0
|
| 177 |
+
):
|
| 178 |
+
return DelayedMulTensor(lhs, rhs)
|
| 179 |
+
all_dims = llist()
|
| 180 |
+
flat_args, unflatten = tree_flatten((args, kwargs))
|
| 181 |
+
device_holding_tensor = None
|
| 182 |
+
for f in flat_args:
|
| 183 |
+
if isinstance(f, _Tensor):
|
| 184 |
+
if f._has_device:
|
| 185 |
+
device_holding_tensor = f._batchtensor
|
| 186 |
+
for d in f.dims:
|
| 187 |
+
if d not in all_dims:
|
| 188 |
+
all_dims.append(d)
|
| 189 |
+
|
| 190 |
+
def unwrap(t):
|
| 191 |
+
if isinstance(t, _Tensor):
|
| 192 |
+
r = t._batchtensor
|
| 193 |
+
if device_holding_tensor is not None and not t._has_device:
|
| 194 |
+
r = r.to(device=device_holding_tensor.device)
|
| 195 |
+
return r
|
| 196 |
+
return t
|
| 197 |
+
|
| 198 |
+
if orig in pointwise:
|
| 199 |
+
result_levels = llist()
|
| 200 |
+
arg_levels = llist()
|
| 201 |
+
to_expand = []
|
| 202 |
+
for i, f in enumerate(flat_args):
|
| 203 |
+
if isinstance(f, TensorLike):
|
| 204 |
+
ptensor, levels, _ = _tensor_levels(f)
|
| 205 |
+
if (
|
| 206 |
+
isinstance(f, _Tensor)
|
| 207 |
+
and not f._has_device
|
| 208 |
+
and device_holding_tensor is not None
|
| 209 |
+
):
|
| 210 |
+
ptensor = ptensor.to(device=device_holding_tensor.device)
|
| 211 |
+
flat_args[i] = ptensor
|
| 212 |
+
for l in levels:
|
| 213 |
+
if l not in result_levels:
|
| 214 |
+
result_levels.append(l)
|
| 215 |
+
to_expand.append((i, levels))
|
| 216 |
+
|
| 217 |
+
for i, levels in to_expand:
|
| 218 |
+
flat_args[i] = _match_levels(flat_args[i], levels, result_levels)
|
| 219 |
+
args, kwargs = unflatten(flat_args)
|
| 220 |
+
result = orig(*args, **kwargs)
|
| 221 |
+
|
| 222 |
+
def wrap(t):
|
| 223 |
+
if isinstance(t, TensorLike):
|
| 224 |
+
return Tensor.from_positional(
|
| 225 |
+
t, result_levels, device_holding_tensor is not None
|
| 226 |
+
)
|
| 227 |
+
return t
|
| 228 |
+
|
| 229 |
+
return tree_map(wrap, result)
|
| 230 |
+
else:
|
| 231 |
+
|
| 232 |
+
def wrap(t):
|
| 233 |
+
if isinstance(t, TensorLike):
|
| 234 |
+
return Tensor.from_batched(t, device_holding_tensor is not None)
|
| 235 |
+
return t
|
| 236 |
+
|
| 237 |
+
with _enable_layers(all_dims):
|
| 238 |
+
print(f"batch_tensor for {orig}")
|
| 239 |
+
args, kwargs = unflatten(unwrap(f) for f in flat_args)
|
| 240 |
+
result = orig(*args, **kwargs)
|
| 241 |
+
# print("END", orig)
|
| 242 |
+
return tree_map(wrap, result)
|
| 243 |
+
|
| 244 |
+
|
| 245 |
+
def positional(self, *dims):
|
| 246 |
+
from . import Dim, DimensionBindError, Tensor
|
| 247 |
+
|
| 248 |
+
ptensor, levels = self._tensor, llist(self._levels)
|
| 249 |
+
flat_dims = llist()
|
| 250 |
+
view = []
|
| 251 |
+
needs_view = False
|
| 252 |
+
ndim = self.ndim
|
| 253 |
+
for d in dims:
|
| 254 |
+
if isinstance(d, DimList):
|
| 255 |
+
flat_dims.extend(d)
|
| 256 |
+
view.extend(e.size for e in d)
|
| 257 |
+
elif isinstance(d, Dim):
|
| 258 |
+
flat_dims.append(d)
|
| 259 |
+
view.append(d.size)
|
| 260 |
+
elif isinstance(d, int):
|
| 261 |
+
d = _wrap_dim(d, ndim, False)
|
| 262 |
+
flat_dims.append(d)
|
| 263 |
+
view.append(ptensor.size(d))
|
| 264 |
+
else:
|
| 265 |
+
flat_dims.extend(d)
|
| 266 |
+
view.append(prod(e.size for e in d))
|
| 267 |
+
needs_view = True
|
| 268 |
+
|
| 269 |
+
permute = list(range(len(levels)))
|
| 270 |
+
nflat = len(flat_dims)
|
| 271 |
+
for i, d in enumerate(flat_dims):
|
| 272 |
+
try:
|
| 273 |
+
idx = levels.index(d)
|
| 274 |
+
except ValueError as e:
|
| 275 |
+
raise DimensionBindError(
|
| 276 |
+
f"tensor of dimensions {self.dims} does not contain dim {d}"
|
| 277 |
+
) from e
|
| 278 |
+
p = permute[idx]
|
| 279 |
+
del levels[idx]
|
| 280 |
+
del permute[idx]
|
| 281 |
+
levels.insert(i, 0)
|
| 282 |
+
permute.insert(i, p)
|
| 283 |
+
ptensor = ptensor.permute(*permute)
|
| 284 |
+
seen = 0
|
| 285 |
+
for i in range(len(levels) - 1, -1, -1):
|
| 286 |
+
if isinstance(levels[i], int):
|
| 287 |
+
seen += 1
|
| 288 |
+
levels[i] = -seen
|
| 289 |
+
result = Tensor.from_positional(ptensor, levels, self._has_device)
|
| 290 |
+
if needs_view:
|
| 291 |
+
result = result.reshape(*view, *result.size()[len(flat_dims) :])
|
| 292 |
+
return result
|
| 293 |
+
|
| 294 |
+
|
| 295 |
+
def _contains_dim(input):
|
| 296 |
+
from . import Dim
|
| 297 |
+
|
| 298 |
+
for i in input:
|
| 299 |
+
if isinstance(i, Dim):
|
| 300 |
+
return True
|
| 301 |
+
|
| 302 |
+
|
| 303 |
+
def expand(self, *sizes):
|
| 304 |
+
if not _contains_dim(sizes):
|
| 305 |
+
return self.__torch_function__(torch.Tensor.expand, None, (self, *sizes))
|
| 306 |
+
dims = sizes
|
| 307 |
+
sizes = [d.size for d in dims] + [-1] * self.ndim
|
| 308 |
+
self = self.expand(*sizes)
|
| 309 |
+
return self[dims]
|
| 310 |
+
|
| 311 |
+
|
| 312 |
+
_not_present = object()
|
| 313 |
+
|
| 314 |
+
|
| 315 |
+
def _getarg(name, offset, args, kwargs, default):
|
| 316 |
+
if len(args) > offset:
|
| 317 |
+
return args[offset]
|
| 318 |
+
return kwargs.get(name, default)
|
| 319 |
+
|
| 320 |
+
|
| 321 |
+
def _patcharg(name, offset, args, kwargs, value):
|
| 322 |
+
if len(args) > offset:
|
| 323 |
+
args[offset] = value
|
| 324 |
+
else:
|
| 325 |
+
kwargs[name] = value
|
| 326 |
+
|
| 327 |
+
|
| 328 |
+
def _wrap(
|
| 329 |
+
orig, dim_offset=0, keepdim_offset=1, dim_name="dim", single_dim=False, reduce=True
|
| 330 |
+
):
|
| 331 |
+
from . import Dim, Tensor, TensorLike
|
| 332 |
+
|
| 333 |
+
def fn(self, *args, **kwargs):
|
| 334 |
+
dim = _getarg(dim_name, dim_offset, args, kwargs, _not_present)
|
| 335 |
+
if dim is _not_present or (single_dim and not isinstance(dim, Dim)):
|
| 336 |
+
with _enable_layers(self.dims):
|
| 337 |
+
print(f"dim fallback batch_tensor for {orig}")
|
| 338 |
+
return Tensor.from_batched(
|
| 339 |
+
orig(self._batchtensor, *args, **kwargs), self._has_device
|
| 340 |
+
)
|
| 341 |
+
keepdim = (
|
| 342 |
+
_getarg("keepdim", keepdim_offset, args, kwargs, False) if reduce else False
|
| 343 |
+
)
|
| 344 |
+
t, levels = self._tensor, llist(self._levels)
|
| 345 |
+
dims = _dims(dim, self._batchtensor.ndim, keepdim, single_dim)
|
| 346 |
+
dim_indices = tuple(levels.index(d) for d in dims)
|
| 347 |
+
if reduce and not keepdim:
|
| 348 |
+
new_levels = [l for i, l in enumerate(levels) if i not in dim_indices]
|
| 349 |
+
else:
|
| 350 |
+
new_levels = levels
|
| 351 |
+
|
| 352 |
+
if len(dim_indices) == 1:
|
| 353 |
+
dim_indices = dim_indices[
|
| 354 |
+
0
|
| 355 |
+
] # so that dims that really only take a single argument work...
|
| 356 |
+
args = list(args)
|
| 357 |
+
_patcharg(dim_name, dim_offset, args, kwargs, dim_indices)
|
| 358 |
+
|
| 359 |
+
def wrap(t):
|
| 360 |
+
if isinstance(t, TensorLike):
|
| 361 |
+
return Tensor.from_positional(t, new_levels, self._has_device)
|
| 362 |
+
return t
|
| 363 |
+
|
| 364 |
+
with _enable_layers(new_levels):
|
| 365 |
+
print(f"dim used batch_tensor for {orig}")
|
| 366 |
+
r = orig(t, *args, **kwargs)
|
| 367 |
+
return tree_map(wrap, r)
|
| 368 |
+
|
| 369 |
+
return fn
|
| 370 |
+
|
| 371 |
+
|
| 372 |
+
def _def(name, *args, **kwargs):
|
| 373 |
+
from . import _Tensor
|
| 374 |
+
|
| 375 |
+
orig = getattr(torch.Tensor, name)
|
| 376 |
+
setattr(_Tensor, name, _wrap(orig, *args, **kwargs))
|
| 377 |
+
|
| 378 |
+
|
| 379 |
+
no_slice = slice(None)
|
| 380 |
+
|
| 381 |
+
_orig_getitem = torch.Tensor.__getitem__
|
| 382 |
+
|
| 383 |
+
|
| 384 |
+
class dim_tracker:
|
| 385 |
+
def __init__(self):
|
| 386 |
+
self.dims = llist()
|
| 387 |
+
self.count = []
|
| 388 |
+
|
| 389 |
+
def record(self, d):
|
| 390 |
+
if d not in self.dims:
|
| 391 |
+
self.dims.append(d)
|
| 392 |
+
self.count.append(1)
|
| 393 |
+
|
| 394 |
+
def __getitem__(self, d):
|
| 395 |
+
return self.count[self.dims.index(d)]
|
| 396 |
+
|
| 397 |
+
|
| 398 |
+
def t__getitem__(self, input):
|
| 399 |
+
from . import _Tensor, Dim, DimensionBindError, DimList, Tensor, TensorLike
|
| 400 |
+
|
| 401 |
+
# * bail to original example if we have a single non-Dim tensor, or a non-tensor
|
| 402 |
+
# * locate ... or an unbound tensor list, and determine its size, bind dim list
|
| 403 |
+
# (remember that None does not count to the total dim count)
|
| 404 |
+
# * bind simple dims and dim-packs to their sizes, count the number of uses of each dim,
|
| 405 |
+
# produce the re-view if needed
|
| 406 |
+
# * for each single-use dim index, replace with no_slice and mark that it will be added
|
| 407 |
+
# (keep track of whether we have to call super)
|
| 408 |
+
# * call super if needed
|
| 409 |
+
# * if we have dims to bind, bind them (it will help if we eliminated ... and None before)
|
| 410 |
+
|
| 411 |
+
# this handles bool indexing handling, as well as some other simple cases.
|
| 412 |
+
|
| 413 |
+
is_simple = (
|
| 414 |
+
not isinstance(input, Dim)
|
| 415 |
+
and not isinstance(input, (tuple, list))
|
| 416 |
+
and
|
| 417 |
+
# WAR for functorch bug where zero time tensors in getitem are not handled correctly.
|
| 418 |
+
not (isinstance(input, TensorLike) and input.ndim == 0)
|
| 419 |
+
)
|
| 420 |
+
|
| 421 |
+
if is_simple:
|
| 422 |
+
if isinstance(self, _Tensor):
|
| 423 |
+
return _Tensor.__torch_function__(_orig_getitem, None, (self, input))
|
| 424 |
+
else:
|
| 425 |
+
return _orig_getitem(self, input)
|
| 426 |
+
|
| 427 |
+
# can further optimize this case
|
| 428 |
+
if not isinstance(input, tuple):
|
| 429 |
+
input = [input]
|
| 430 |
+
else:
|
| 431 |
+
input = list(input)
|
| 432 |
+
|
| 433 |
+
dims_indexed = 0
|
| 434 |
+
expanding_object = None
|
| 435 |
+
dimlists = []
|
| 436 |
+
for i, s in enumerate(input):
|
| 437 |
+
if s is ... or isinstance(s, DimList) and not s.is_bound:
|
| 438 |
+
if expanding_object is not None:
|
| 439 |
+
msg = (
|
| 440 |
+
"at most one ... or unbound dimension list can exist in indexing list but"
|
| 441 |
+
f" found 2 at offsets {i} and {expanding_object}"
|
| 442 |
+
)
|
| 443 |
+
raise DimensionBindError(msg)
|
| 444 |
+
expanding_object = i
|
| 445 |
+
|
| 446 |
+
if isinstance(s, DimList):
|
| 447 |
+
dims_indexed += len(s) if s.is_bound else 0
|
| 448 |
+
dimlists.append(i)
|
| 449 |
+
elif s is not None and s is not ...:
|
| 450 |
+
dims_indexed += 1
|
| 451 |
+
|
| 452 |
+
ndim = self.ndim
|
| 453 |
+
if dims_indexed > ndim:
|
| 454 |
+
raise IndexError(
|
| 455 |
+
f"at least {dims_indexed} indices were supplied but the tensor only has {ndim} dimensions."
|
| 456 |
+
)
|
| 457 |
+
if expanding_object is not None:
|
| 458 |
+
expanding_ndims = ndim - dims_indexed
|
| 459 |
+
obj = input[expanding_object]
|
| 460 |
+
if obj is ...:
|
| 461 |
+
input[expanding_object : expanding_object + 1] = [
|
| 462 |
+
no_slice
|
| 463 |
+
] * expanding_ndims
|
| 464 |
+
else:
|
| 465 |
+
obj.bind_len(expanding_ndims)
|
| 466 |
+
# flatten the dimslists into the indexing
|
| 467 |
+
for i in reversed(dimlists):
|
| 468 |
+
input[i : i + 1] = input[i]
|
| 469 |
+
dims_indexed = 0
|
| 470 |
+
requires_view = False
|
| 471 |
+
size = self.size()
|
| 472 |
+
view_sizes = []
|
| 473 |
+
dims_seen = dim_tracker()
|
| 474 |
+
|
| 475 |
+
def add_dims(t):
|
| 476 |
+
if not isinstance(t, _Tensor):
|
| 477 |
+
return
|
| 478 |
+
for d in t.dims:
|
| 479 |
+
dims_seen.record(d)
|
| 480 |
+
|
| 481 |
+
add_dims(self)
|
| 482 |
+
dim_packs = []
|
| 483 |
+
for i, idx in enumerate(input):
|
| 484 |
+
if idx is None:
|
| 485 |
+
input[i] = no_slice
|
| 486 |
+
view_sizes.append(1)
|
| 487 |
+
requires_view = True
|
| 488 |
+
else:
|
| 489 |
+
sz = size[dims_indexed]
|
| 490 |
+
if isinstance(idx, Dim):
|
| 491 |
+
idx.size = sz
|
| 492 |
+
dims_seen.record(idx)
|
| 493 |
+
view_sizes.append(sz)
|
| 494 |
+
elif isinstance(idx, (tuple, list)) and idx and isinstance(idx[0], Dim):
|
| 495 |
+
for d in idx:
|
| 496 |
+
dims_seen.record(idx)
|
| 497 |
+
_bind_dims_to_size(sz, idx, f"offset {i}")
|
| 498 |
+
view_sizes.extend(d.size for d in idx)
|
| 499 |
+
requires_view = True
|
| 500 |
+
dim_packs.append(i)
|
| 501 |
+
else:
|
| 502 |
+
add_dims(idx)
|
| 503 |
+
view_sizes.append(sz)
|
| 504 |
+
dims_indexed += 1
|
| 505 |
+
if requires_view:
|
| 506 |
+
self = self.view(*view_sizes)
|
| 507 |
+
for i in reversed(dim_packs):
|
| 508 |
+
input[i : i + 1] = input[i]
|
| 509 |
+
|
| 510 |
+
# currenty:
|
| 511 |
+
# input is flat, containing either Dim, or Tensor, or something valid for standard indexing
|
| 512 |
+
# self may have first-class dims as well.
|
| 513 |
+
|
| 514 |
+
# to index:
|
| 515 |
+
# drop the first class dims from self, they just become direct indices of their positions
|
| 516 |
+
|
| 517 |
+
# figure out the dimensions of the indexing tensors: union of all the dims in the tensors in the index.
|
| 518 |
+
# these dimensions will appear and need to be bound at the first place tensor occures
|
| 519 |
+
|
| 520 |
+
if isinstance(self, _Tensor):
|
| 521 |
+
ptensor_self, levels = self._tensor, list(self._levels)
|
| 522 |
+
# indices to ptensor rather than self which has first-class dimensions
|
| 523 |
+
input_it = iter(input)
|
| 524 |
+
flat_inputs = [next(input_it) if isinstance(l, int) else l for l in levels]
|
| 525 |
+
has_device = self._has_device
|
| 526 |
+
to_pad = 0
|
| 527 |
+
else:
|
| 528 |
+
ptensor_self, flat_inputs = self, input
|
| 529 |
+
to_pad = ptensor_self.ndim - len(flat_inputs)
|
| 530 |
+
has_device = True
|
| 531 |
+
|
| 532 |
+
result_levels = []
|
| 533 |
+
index_levels = []
|
| 534 |
+
tensor_insert_point = None
|
| 535 |
+
to_expand = {}
|
| 536 |
+
requires_getindex = False
|
| 537 |
+
for i, inp in enumerate(flat_inputs):
|
| 538 |
+
if isinstance(inp, Dim) and dims_seen[inp] == 1:
|
| 539 |
+
flat_inputs[i] = no_slice
|
| 540 |
+
result_levels.append(inp)
|
| 541 |
+
elif isinstance(inp, TensorLike):
|
| 542 |
+
requires_getindex = True
|
| 543 |
+
if tensor_insert_point is None:
|
| 544 |
+
tensor_insert_point = len(result_levels)
|
| 545 |
+
ptensor, levels, _ = _tensor_levels(inp)
|
| 546 |
+
to_expand[i] = levels
|
| 547 |
+
flat_inputs[i] = ptensor
|
| 548 |
+
for l in levels:
|
| 549 |
+
if l not in index_levels:
|
| 550 |
+
index_levels.append(l)
|
| 551 |
+
else:
|
| 552 |
+
requires_getindex = True
|
| 553 |
+
result_levels.append(0)
|
| 554 |
+
|
| 555 |
+
if tensor_insert_point is not None:
|
| 556 |
+
result_levels[tensor_insert_point:tensor_insert_point] = index_levels
|
| 557 |
+
|
| 558 |
+
for i, levels in to_expand.items():
|
| 559 |
+
flat_inputs[i] = _match_levels(flat_inputs[i], levels, index_levels)
|
| 560 |
+
|
| 561 |
+
if requires_getindex:
|
| 562 |
+
result = _orig_getitem(ptensor_self, flat_inputs)
|
| 563 |
+
else:
|
| 564 |
+
result = ptensor_self
|
| 565 |
+
|
| 566 |
+
next_positional = -1
|
| 567 |
+
if to_pad > 0:
|
| 568 |
+
result_levels.extend([0] * to_pad)
|
| 569 |
+
for i, r in enumerate(reversed(result_levels)):
|
| 570 |
+
if isinstance(r, int):
|
| 571 |
+
result_levels[-1 - i] = next_positional
|
| 572 |
+
next_positional -= 1
|
| 573 |
+
|
| 574 |
+
return Tensor.from_positional(result, result_levels, has_device)
|
| 575 |
+
|
| 576 |
+
|
| 577 |
+
# XXX - dim is optional and can be the outer-most dimension...
|
| 578 |
+
def stack(tensors, new_dim, dim=0, out=None):
|
| 579 |
+
if isinstance(dim, int):
|
| 580 |
+
return torch.stack(tensors, dim, out).index(dim, new_dim)
|
| 581 |
+
index = None
|
| 582 |
+
if out is not None:
|
| 583 |
+
out, index = _positional_no_permute(out, dim, expand_dim=True)
|
| 584 |
+
ptensors = []
|
| 585 |
+
for t in tensors:
|
| 586 |
+
pt, pi = _positional_no_permute(t, dim, expand_dim=True)
|
| 587 |
+
if index is not None and pi != index:
|
| 588 |
+
pt = pt.move_dim(pi, index)
|
| 589 |
+
else:
|
| 590 |
+
index = pi
|
| 591 |
+
ptensors.append(pt)
|
| 592 |
+
pr = torch.stack(ptensors, index, out=out)
|
| 593 |
+
return pr.index((index, index + 1), (new_dim, dim))
|
| 594 |
+
|
| 595 |
+
|
| 596 |
+
_orig_split = torch.Tensor.split
|
| 597 |
+
|
| 598 |
+
|
| 599 |
+
def split(self, split_size_or_sections, dim=0):
|
| 600 |
+
from . import _Tensor, Dim
|
| 601 |
+
|
| 602 |
+
if isinstance(split_size_or_sections, int) or any(
|
| 603 |
+
isinstance(t, int) for t in split_size_or_sections
|
| 604 |
+
):
|
| 605 |
+
if isinstance(dim, Dim):
|
| 606 |
+
raise ValueError(
|
| 607 |
+
"when dim is specified as a Dim object, split sizes must also be dimensions."
|
| 608 |
+
)
|
| 609 |
+
return _orig_split(self, split_size_or_sections, dim=dim)
|
| 610 |
+
|
| 611 |
+
if isinstance(dim, Dim):
|
| 612 |
+
assert isinstance(self, _Tensor), f"Tensor does not have dimension {dim}"
|
| 613 |
+
self, dim = _positional_no_permute(self, dim)
|
| 614 |
+
|
| 615 |
+
size = self.size(dim)
|
| 616 |
+
total_bound_size = 0
|
| 617 |
+
unbound = []
|
| 618 |
+
sizes = []
|
| 619 |
+
for i, d in enumerate(split_size_or_sections):
|
| 620 |
+
if d.is_bound:
|
| 621 |
+
sizes.append(d.size)
|
| 622 |
+
total_bound_size += d.size
|
| 623 |
+
else:
|
| 624 |
+
sizes.append(0)
|
| 625 |
+
unbound.append(i)
|
| 626 |
+
|
| 627 |
+
if unbound:
|
| 628 |
+
assert (
|
| 629 |
+
total_bound_size <= size
|
| 630 |
+
), f"result dimensions are larger than original: {total_bound_size} vs {size} ({split_size_or_sections})"
|
| 631 |
+
remaining_size = size - total_bound_size
|
| 632 |
+
chunk_size = -(-remaining_size // len(unbound))
|
| 633 |
+
for u in unbound:
|
| 634 |
+
sz = min(chunk_size, remaining_size)
|
| 635 |
+
split_size_or_sections[u].size = sz
|
| 636 |
+
sizes[u] = sz
|
| 637 |
+
remaining_size -= sz
|
| 638 |
+
else:
|
| 639 |
+
assert (
|
| 640 |
+
total_bound_size == size
|
| 641 |
+
), f"result dimensions do not match original: {total_bound_size} vs {size} ({split_size_or_sections})"
|
| 642 |
+
return tuple(
|
| 643 |
+
t.index(dim, d)
|
| 644 |
+
for d, t in zip(split_size_or_sections, _orig_split(self, sizes, dim=dim))
|
| 645 |
+
)
|
falcon/lib/python3.10/site-packages/functorch/dim/tree_map.py
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Facebook, Inc. and its affiliates.
|
| 2 |
+
# All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# This source code is licensed under the BSD-style license found in the
|
| 5 |
+
# LICENSE file in the root directory of this source tree.
|
| 6 |
+
|
| 7 |
+
from functorch._C import dim
|
| 8 |
+
|
| 9 |
+
tree_flatten = dim.tree_flatten
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
def tree_map(fn, tree):
|
| 13 |
+
vs, unflatten = tree_flatten(tree)
|
| 14 |
+
return unflatten(fn(v) for v in vs)
|
falcon/lib/python3.10/site-packages/functorch/dim/wrap_type.py
ADDED
|
@@ -0,0 +1,71 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Facebook, Inc. and its affiliates.
|
| 2 |
+
# All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# This source code is licensed under the BSD-style license found in the
|
| 5 |
+
# LICENSE file in the root directory of this source tree.
|
| 6 |
+
|
| 7 |
+
from types import (
|
| 8 |
+
BuiltinMethodType,
|
| 9 |
+
FunctionType,
|
| 10 |
+
GetSetDescriptorType,
|
| 11 |
+
MethodDescriptorType,
|
| 12 |
+
WrapperDescriptorType,
|
| 13 |
+
)
|
| 14 |
+
|
| 15 |
+
from functorch._C import dim as _C
|
| 16 |
+
|
| 17 |
+
_wrap_method = _C._wrap_method
|
| 18 |
+
|
| 19 |
+
FUNC_TYPES = (
|
| 20 |
+
FunctionType,
|
| 21 |
+
MethodDescriptorType,
|
| 22 |
+
BuiltinMethodType,
|
| 23 |
+
WrapperDescriptorType,
|
| 24 |
+
)
|
| 25 |
+
PROPERTY_TYPES = (GetSetDescriptorType, property)
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
def _py_wrap_method(orig, __torch_function__):
|
| 29 |
+
def impl(*args, **kwargs):
|
| 30 |
+
return __torch_function__(orig, None, args, kwargs)
|
| 31 |
+
|
| 32 |
+
return impl
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
def wrap_type(use_c, to_patch, pattern, __torch_function__):
|
| 36 |
+
if use_c:
|
| 37 |
+
wrap_method = _wrap_method
|
| 38 |
+
else:
|
| 39 |
+
wrap_method = _py_wrap_method
|
| 40 |
+
|
| 41 |
+
all = {}
|
| 42 |
+
for t in reversed(pattern.mro()[:-1]): # skip object
|
| 43 |
+
all.update(t.__dict__)
|
| 44 |
+
|
| 45 |
+
def wrap_attr(orig):
|
| 46 |
+
return property(wrap_method(orig.__get__, __torch_function__))
|
| 47 |
+
|
| 48 |
+
for name, obj in all.items():
|
| 49 |
+
if name in (
|
| 50 |
+
"__dict__",
|
| 51 |
+
"__new__",
|
| 52 |
+
"__init__",
|
| 53 |
+
"__repr__",
|
| 54 |
+
"__weakref__",
|
| 55 |
+
"__doc__",
|
| 56 |
+
"__module__",
|
| 57 |
+
"__dir__",
|
| 58 |
+
):
|
| 59 |
+
continue
|
| 60 |
+
|
| 61 |
+
# skip things that have been overloaded
|
| 62 |
+
# things that come from object like `__eq__` still need to be patched, however.
|
| 63 |
+
if hasattr(to_patch, name) and getattr(to_patch, name) is not getattr(
|
| 64 |
+
object, name, None
|
| 65 |
+
):
|
| 66 |
+
continue
|
| 67 |
+
|
| 68 |
+
if isinstance(obj, FUNC_TYPES):
|
| 69 |
+
setattr(to_patch, name, wrap_method(obj, __torch_function__))
|
| 70 |
+
elif isinstance(obj, PROPERTY_TYPES):
|
| 71 |
+
setattr(to_patch, name, wrap_attr(obj))
|