Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +4 -0
- parrot/lib/python3.10/site-packages/narwhals/_arrow/__init__.py +0 -0
- parrot/lib/python3.10/site-packages/narwhals/_arrow/__pycache__/namespace.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/narwhals/_arrow/dataframe.py +617 -0
- parrot/lib/python3.10/site-packages/narwhals/_arrow/expr.py +671 -0
- parrot/lib/python3.10/site-packages/narwhals/_arrow/group_by.py +168 -0
- parrot/lib/python3.10/site-packages/narwhals/_arrow/selectors.py +163 -0
- parrot/lib/python3.10/site-packages/narwhals/_arrow/series.py +1038 -0
- parrot/lib/python3.10/site-packages/narwhals/_arrow/typing.py +17 -0
- parrot/lib/python3.10/site-packages/narwhals/_dask/__init__.py +0 -0
- parrot/lib/python3.10/site-packages/narwhals/_dask/__pycache__/__init__.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/narwhals/_dask/__pycache__/dataframe.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/narwhals/_dask/__pycache__/expr.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/narwhals/_dask/__pycache__/group_by.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/narwhals/_dask/__pycache__/namespace.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/narwhals/_dask/__pycache__/selectors.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/narwhals/_dask/__pycache__/typing.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/narwhals/_dask/__pycache__/utils.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/narwhals/_dask/dataframe.py +357 -0
- parrot/lib/python3.10/site-packages/narwhals/_dask/namespace.py +331 -0
- parrot/lib/python3.10/site-packages/narwhals/_dask/typing.py +16 -0
- parrot/lib/python3.10/site-packages/narwhals/_exceptions.py +4 -0
- parrot/lib/python3.10/site-packages/narwhals/dataframe.py +0 -0
- parrot/lib/python3.10/site-packages/narwhals/group_by.py +133 -0
- parrot/lib/python3.10/site-packages/narwhals/series.py +0 -0
- parrot/lib/python3.10/site-packages/narwhals/typing.py +64 -0
- parrot/lib/python3.10/site-packages/scipy/linalg/_decomp_update.cpython-310-x86_64-linux-gnu.so +3 -0
- parrot/lib/python3.10/site-packages/scipy/ndimage/__pycache__/_interpolation.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/scipy/optimize/__pycache__/_optimize.cpython-310.pyc +3 -0
- parrot/lib/python3.10/site-packages/scipy/stats/__pycache__/_morestats.cpython-310.pyc +3 -0
- parrot/lib/python3.10/site-packages/scipy/stats/tests/data/levy_stable/stable-Z1-cdf-sample-data.npy +3 -0
- parrot/lib/python3.10/site-packages/six-1.16.0.dist-info/INSTALLER +1 -0
- parrot/lib/python3.10/site-packages/six-1.16.0.dist-info/LICENSE +18 -0
- parrot/lib/python3.10/site-packages/six-1.16.0.dist-info/RECORD +9 -0
- parrot/lib/python3.10/site-packages/six-1.16.0.dist-info/top_level.txt +1 -0
- parrot/lib/python3.10/site-packages/sty-1.0.6.dist-info/LICENSE +204 -0
- parrot/lib/python3.10/site-packages/sty-1.0.6.dist-info/WHEEL +4 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/detail/TensorDataContainer.h +372 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/detail/static.h +65 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/cloneable.h +98 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional.h +17 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/init.h +124 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/module.h +702 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules.h +36 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options.h +18 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/linear.h +95 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/loss.h +802 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/transformer.h +64 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/upsampling.h +110 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/pimpl-inl.h +74 -0
.gitattributes
CHANGED
|
@@ -1717,3 +1717,7 @@ vllm/lib/python3.10/site-packages/cupy/cuda/memory_hook.cpython-310-x86_64-linux
|
|
| 1717 |
vllm/lib/python3.10/site-packages/cupy/cuda/cufft.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
| 1718 |
vllm/lib/python3.10/site-packages/cupy/fft/_callback.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
| 1719 |
vllm/lib/python3.10/site-packages/cupy/cuda/graph.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1717 |
vllm/lib/python3.10/site-packages/cupy/cuda/cufft.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
| 1718 |
vllm/lib/python3.10/site-packages/cupy/fft/_callback.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
| 1719 |
vllm/lib/python3.10/site-packages/cupy/cuda/graph.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
| 1720 |
+
parrot/lib/python3.10/site-packages/scipy/stats/__pycache__/_morestats.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 1721 |
+
parrot/lib/python3.10/site-packages/scipy/linalg/_decomp_update.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
| 1722 |
+
parrot/lib/python3.10/site-packages/scipy/optimize/__pycache__/_optimize.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 1723 |
+
vllm/lib/python3.10/site-packages/cupy/lib/_polynomial.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
parrot/lib/python3.10/site-packages/narwhals/_arrow/__init__.py
ADDED
|
File without changes
|
parrot/lib/python3.10/site-packages/narwhals/_arrow/__pycache__/namespace.cpython-310.pyc
ADDED
|
Binary file (11.2 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/narwhals/_arrow/dataframe.py
ADDED
|
@@ -0,0 +1,617 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
from typing import TYPE_CHECKING
|
| 4 |
+
from typing import Any
|
| 5 |
+
from typing import Iterable
|
| 6 |
+
from typing import Iterator
|
| 7 |
+
from typing import Literal
|
| 8 |
+
from typing import Sequence
|
| 9 |
+
from typing import overload
|
| 10 |
+
|
| 11 |
+
from narwhals._arrow.utils import broadcast_series
|
| 12 |
+
from narwhals._arrow.utils import convert_str_slice_to_int_slice
|
| 13 |
+
from narwhals._arrow.utils import select_rows
|
| 14 |
+
from narwhals._arrow.utils import translate_dtype
|
| 15 |
+
from narwhals._arrow.utils import validate_dataframe_comparand
|
| 16 |
+
from narwhals._expression_parsing import evaluate_into_exprs
|
| 17 |
+
from narwhals.dependencies import get_pyarrow
|
| 18 |
+
from narwhals.dependencies import is_numpy_array
|
| 19 |
+
from narwhals.utils import Implementation
|
| 20 |
+
from narwhals.utils import flatten
|
| 21 |
+
from narwhals.utils import generate_unique_token
|
| 22 |
+
from narwhals.utils import is_sequence_but_not_str
|
| 23 |
+
from narwhals.utils import parse_columns_to_drop
|
| 24 |
+
|
| 25 |
+
if TYPE_CHECKING:
|
| 26 |
+
import numpy as np
|
| 27 |
+
import pyarrow as pa
|
| 28 |
+
from typing_extensions import Self
|
| 29 |
+
|
| 30 |
+
from narwhals._arrow.group_by import ArrowGroupBy
|
| 31 |
+
from narwhals._arrow.namespace import ArrowNamespace
|
| 32 |
+
from narwhals._arrow.series import ArrowSeries
|
| 33 |
+
from narwhals._arrow.typing import IntoArrowExpr
|
| 34 |
+
from narwhals.dtypes import DType
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
class ArrowDataFrame:
|
| 38 |
+
# --- not in the spec ---
|
| 39 |
+
def __init__(
|
| 40 |
+
self, native_dataframe: pa.Table, *, backend_version: tuple[int, ...]
|
| 41 |
+
) -> None:
|
| 42 |
+
self._native_frame = native_dataframe
|
| 43 |
+
self._implementation = Implementation.PYARROW
|
| 44 |
+
self._backend_version = backend_version
|
| 45 |
+
|
| 46 |
+
def __narwhals_namespace__(self) -> ArrowNamespace:
|
| 47 |
+
from narwhals._arrow.namespace import ArrowNamespace
|
| 48 |
+
|
| 49 |
+
return ArrowNamespace(backend_version=self._backend_version)
|
| 50 |
+
|
| 51 |
+
def __native_namespace__(self) -> Any:
|
| 52 |
+
return get_pyarrow()
|
| 53 |
+
|
| 54 |
+
def __narwhals_dataframe__(self) -> Self:
|
| 55 |
+
return self
|
| 56 |
+
|
| 57 |
+
def __narwhals_lazyframe__(self) -> Self:
|
| 58 |
+
return self
|
| 59 |
+
|
| 60 |
+
def _from_native_frame(self, df: Any) -> Self:
|
| 61 |
+
return self.__class__(df, backend_version=self._backend_version)
|
| 62 |
+
|
| 63 |
+
@property
|
| 64 |
+
def shape(self) -> tuple[int, int]:
|
| 65 |
+
return self._native_frame.shape # type: ignore[no-any-return]
|
| 66 |
+
|
| 67 |
+
def __len__(self) -> int:
|
| 68 |
+
return len(self._native_frame)
|
| 69 |
+
|
| 70 |
+
def row(self, index: int) -> tuple[Any, ...]:
|
| 71 |
+
return tuple(col[index] for col in self._native_frame)
|
| 72 |
+
|
| 73 |
+
def rows(
|
| 74 |
+
self, *, named: bool = False
|
| 75 |
+
) -> list[tuple[Any, ...]] | list[dict[str, Any]]:
|
| 76 |
+
if not named:
|
| 77 |
+
msg = "Unnamed rows are not yet supported on PyArrow tables"
|
| 78 |
+
raise NotImplementedError(msg)
|
| 79 |
+
return self._native_frame.to_pylist() # type: ignore[no-any-return]
|
| 80 |
+
|
| 81 |
+
def iter_rows(
|
| 82 |
+
self,
|
| 83 |
+
*,
|
| 84 |
+
named: bool = False,
|
| 85 |
+
buffer_size: int = 512,
|
| 86 |
+
) -> Iterator[tuple[Any, ...]] | Iterator[dict[str, Any]]:
|
| 87 |
+
df = self._native_frame
|
| 88 |
+
num_rows = df.num_rows
|
| 89 |
+
|
| 90 |
+
if not named:
|
| 91 |
+
for i in range(0, num_rows, buffer_size):
|
| 92 |
+
rows = df[i : i + buffer_size].to_pydict().values()
|
| 93 |
+
yield from zip(*rows)
|
| 94 |
+
else:
|
| 95 |
+
for i in range(0, num_rows, buffer_size):
|
| 96 |
+
yield from df[i : i + buffer_size].to_pylist()
|
| 97 |
+
|
| 98 |
+
def get_column(self, name: str) -> ArrowSeries:
|
| 99 |
+
from narwhals._arrow.series import ArrowSeries
|
| 100 |
+
|
| 101 |
+
if not isinstance(name, str):
|
| 102 |
+
msg = f"Expected str, got: {type(name)}"
|
| 103 |
+
raise TypeError(msg)
|
| 104 |
+
|
| 105 |
+
return ArrowSeries(
|
| 106 |
+
self._native_frame[name],
|
| 107 |
+
name=name,
|
| 108 |
+
backend_version=self._backend_version,
|
| 109 |
+
)
|
| 110 |
+
|
| 111 |
+
def __array__(self, dtype: Any = None, copy: bool | None = None) -> np.ndarray:
|
| 112 |
+
return self._native_frame.__array__(dtype, copy=copy)
|
| 113 |
+
|
| 114 |
+
@overload
|
| 115 |
+
def __getitem__(self, item: tuple[Sequence[int], str | int]) -> ArrowSeries: ... # type: ignore[overload-overlap]
|
| 116 |
+
|
| 117 |
+
@overload
|
| 118 |
+
def __getitem__(self, item: Sequence[int]) -> ArrowDataFrame: ...
|
| 119 |
+
|
| 120 |
+
@overload
|
| 121 |
+
def __getitem__(self, item: str) -> ArrowSeries: ...
|
| 122 |
+
|
| 123 |
+
@overload
|
| 124 |
+
def __getitem__(self, item: slice) -> ArrowDataFrame: ...
|
| 125 |
+
|
| 126 |
+
@overload
|
| 127 |
+
def __getitem__(self, item: tuple[slice, slice]) -> ArrowDataFrame: ...
|
| 128 |
+
|
| 129 |
+
def __getitem__(
|
| 130 |
+
self,
|
| 131 |
+
item: str
|
| 132 |
+
| slice
|
| 133 |
+
| Sequence[int]
|
| 134 |
+
| Sequence[str]
|
| 135 |
+
| tuple[Sequence[int], str | int]
|
| 136 |
+
| tuple[slice, str | int]
|
| 137 |
+
| tuple[slice, slice],
|
| 138 |
+
) -> ArrowSeries | ArrowDataFrame:
|
| 139 |
+
if isinstance(item, tuple):
|
| 140 |
+
item = tuple(list(i) if is_sequence_but_not_str(i) else i for i in item)
|
| 141 |
+
|
| 142 |
+
if isinstance(item, str):
|
| 143 |
+
from narwhals._arrow.series import ArrowSeries
|
| 144 |
+
|
| 145 |
+
return ArrowSeries(
|
| 146 |
+
self._native_frame[item],
|
| 147 |
+
name=item,
|
| 148 |
+
backend_version=self._backend_version,
|
| 149 |
+
)
|
| 150 |
+
elif (
|
| 151 |
+
isinstance(item, tuple)
|
| 152 |
+
and len(item) == 2
|
| 153 |
+
and is_sequence_but_not_str(item[1])
|
| 154 |
+
):
|
| 155 |
+
if len(item[1]) == 0:
|
| 156 |
+
# Return empty dataframe
|
| 157 |
+
return self._from_native_frame(self._native_frame.slice(0, 0).select([]))
|
| 158 |
+
selected_rows = select_rows(self._native_frame, item[0])
|
| 159 |
+
return self._from_native_frame(selected_rows.select(item[1]))
|
| 160 |
+
|
| 161 |
+
elif isinstance(item, tuple) and len(item) == 2:
|
| 162 |
+
if isinstance(item[1], slice):
|
| 163 |
+
columns = self.columns
|
| 164 |
+
if isinstance(item[1].start, str) or isinstance(item[1].stop, str):
|
| 165 |
+
start, stop, step = convert_str_slice_to_int_slice(item[1], columns)
|
| 166 |
+
return self._from_native_frame(
|
| 167 |
+
self._native_frame.take(item[0]).select(columns[start:stop:step])
|
| 168 |
+
)
|
| 169 |
+
if isinstance(item[1].start, int) or isinstance(item[1].stop, int):
|
| 170 |
+
return self._from_native_frame(
|
| 171 |
+
self._native_frame.take(item[0]).select(
|
| 172 |
+
columns[item[1].start : item[1].stop : item[1].step]
|
| 173 |
+
)
|
| 174 |
+
)
|
| 175 |
+
msg = f"Expected slice of integers or strings, got: {type(item[1])}" # pragma: no cover
|
| 176 |
+
raise TypeError(msg) # pragma: no cover
|
| 177 |
+
from narwhals._arrow.series import ArrowSeries
|
| 178 |
+
|
| 179 |
+
# PyArrow columns are always strings
|
| 180 |
+
col_name = item[1] if isinstance(item[1], str) else self.columns[item[1]]
|
| 181 |
+
if isinstance(item[0], str): # pragma: no cover
|
| 182 |
+
msg = "Can not slice with tuple with the first element as a str"
|
| 183 |
+
raise TypeError(msg)
|
| 184 |
+
if (isinstance(item[0], slice)) and (item[0] == slice(None)):
|
| 185 |
+
return ArrowSeries(
|
| 186 |
+
self._native_frame[col_name],
|
| 187 |
+
name=col_name,
|
| 188 |
+
backend_version=self._backend_version,
|
| 189 |
+
)
|
| 190 |
+
selected_rows = select_rows(self._native_frame, item[0])
|
| 191 |
+
return ArrowSeries(
|
| 192 |
+
selected_rows[col_name],
|
| 193 |
+
name=col_name,
|
| 194 |
+
backend_version=self._backend_version,
|
| 195 |
+
)
|
| 196 |
+
|
| 197 |
+
elif isinstance(item, slice):
|
| 198 |
+
if item.step is not None and item.step != 1:
|
| 199 |
+
msg = "Slicing with step is not supported on PyArrow tables"
|
| 200 |
+
raise NotImplementedError(msg)
|
| 201 |
+
columns = self.columns
|
| 202 |
+
if isinstance(item.start, str) or isinstance(item.stop, str):
|
| 203 |
+
start, stop, step = convert_str_slice_to_int_slice(item, columns)
|
| 204 |
+
return self._from_native_frame(
|
| 205 |
+
self._native_frame.select(columns[start:stop:step])
|
| 206 |
+
)
|
| 207 |
+
start = item.start or 0
|
| 208 |
+
stop = item.stop if item.stop is not None else len(self._native_frame)
|
| 209 |
+
return self._from_native_frame(
|
| 210 |
+
self._native_frame.slice(start, stop - start),
|
| 211 |
+
)
|
| 212 |
+
|
| 213 |
+
elif isinstance(item, Sequence) or (is_numpy_array(item) and item.ndim == 1):
|
| 214 |
+
if (
|
| 215 |
+
isinstance(item, Sequence)
|
| 216 |
+
and all(isinstance(x, str) for x in item)
|
| 217 |
+
and len(item) > 0
|
| 218 |
+
):
|
| 219 |
+
return self._from_native_frame(self._native_frame.select(item))
|
| 220 |
+
if isinstance(item, Sequence) and len(item) == 0:
|
| 221 |
+
return self._from_native_frame(self._native_frame.slice(0, 0))
|
| 222 |
+
return self._from_native_frame(self._native_frame.take(item))
|
| 223 |
+
|
| 224 |
+
else: # pragma: no cover
|
| 225 |
+
msg = f"Expected str or slice, got: {type(item)}"
|
| 226 |
+
raise TypeError(msg)
|
| 227 |
+
|
| 228 |
+
@property
|
| 229 |
+
def schema(self) -> dict[str, DType]:
|
| 230 |
+
schema = self._native_frame.schema
|
| 231 |
+
return {
|
| 232 |
+
name: translate_dtype(dtype)
|
| 233 |
+
for name, dtype in zip(schema.names, schema.types)
|
| 234 |
+
}
|
| 235 |
+
|
| 236 |
+
def collect_schema(self) -> dict[str, DType]:
|
| 237 |
+
return self.schema
|
| 238 |
+
|
| 239 |
+
@property
|
| 240 |
+
def columns(self) -> list[str]:
|
| 241 |
+
return self._native_frame.schema.names # type: ignore[no-any-return]
|
| 242 |
+
|
| 243 |
+
def select(
|
| 244 |
+
self,
|
| 245 |
+
*exprs: IntoArrowExpr,
|
| 246 |
+
**named_exprs: IntoArrowExpr,
|
| 247 |
+
) -> Self:
|
| 248 |
+
import pyarrow as pa # ignore-banned-import()
|
| 249 |
+
|
| 250 |
+
new_series = evaluate_into_exprs(self, *exprs, **named_exprs)
|
| 251 |
+
if not new_series:
|
| 252 |
+
# return empty dataframe, like Polars does
|
| 253 |
+
return self._from_native_frame(self._native_frame.__class__.from_arrays([]))
|
| 254 |
+
names = [s.name for s in new_series]
|
| 255 |
+
df = pa.Table.from_arrays(
|
| 256 |
+
broadcast_series(new_series),
|
| 257 |
+
names=names,
|
| 258 |
+
)
|
| 259 |
+
return self._from_native_frame(df)
|
| 260 |
+
|
| 261 |
+
def with_columns(
|
| 262 |
+
self,
|
| 263 |
+
*exprs: IntoArrowExpr,
|
| 264 |
+
**named_exprs: IntoArrowExpr,
|
| 265 |
+
) -> Self:
|
| 266 |
+
new_columns = evaluate_into_exprs(self, *exprs, **named_exprs)
|
| 267 |
+
new_column_name_to_new_column_map = {s.name: s for s in new_columns}
|
| 268 |
+
to_concat = []
|
| 269 |
+
output_names = []
|
| 270 |
+
# Make sure to preserve column order
|
| 271 |
+
length = len(self)
|
| 272 |
+
for name in self.columns:
|
| 273 |
+
if name in new_column_name_to_new_column_map:
|
| 274 |
+
to_concat.append(
|
| 275 |
+
validate_dataframe_comparand(
|
| 276 |
+
length=length,
|
| 277 |
+
other=new_column_name_to_new_column_map.pop(name),
|
| 278 |
+
backend_version=self._backend_version,
|
| 279 |
+
)
|
| 280 |
+
)
|
| 281 |
+
else:
|
| 282 |
+
to_concat.append(self._native_frame[name])
|
| 283 |
+
output_names.append(name)
|
| 284 |
+
for s in new_column_name_to_new_column_map:
|
| 285 |
+
to_concat.append(
|
| 286 |
+
validate_dataframe_comparand(
|
| 287 |
+
length=length,
|
| 288 |
+
other=new_column_name_to_new_column_map[s],
|
| 289 |
+
backend_version=self._backend_version,
|
| 290 |
+
)
|
| 291 |
+
)
|
| 292 |
+
output_names.append(s)
|
| 293 |
+
df = self._native_frame.__class__.from_arrays(to_concat, names=output_names)
|
| 294 |
+
return self._from_native_frame(df)
|
| 295 |
+
|
| 296 |
+
def group_by(self, *keys: str) -> ArrowGroupBy:
|
| 297 |
+
from narwhals._arrow.group_by import ArrowGroupBy
|
| 298 |
+
|
| 299 |
+
return ArrowGroupBy(self, list(keys))
|
| 300 |
+
|
| 301 |
+
def join(
|
| 302 |
+
self,
|
| 303 |
+
other: Self,
|
| 304 |
+
*,
|
| 305 |
+
how: Literal["left", "inner", "outer", "cross", "anti", "semi"] = "inner",
|
| 306 |
+
left_on: str | list[str] | None,
|
| 307 |
+
right_on: str | list[str] | None,
|
| 308 |
+
suffix: str,
|
| 309 |
+
) -> Self:
|
| 310 |
+
how_to_join_map = {
|
| 311 |
+
"anti": "left anti",
|
| 312 |
+
"semi": "left semi",
|
| 313 |
+
"inner": "inner",
|
| 314 |
+
"left": "left outer",
|
| 315 |
+
}
|
| 316 |
+
|
| 317 |
+
if how == "cross":
|
| 318 |
+
plx = self.__narwhals_namespace__()
|
| 319 |
+
key_token = generate_unique_token(
|
| 320 |
+
n_bytes=8, columns=[*self.columns, *other.columns]
|
| 321 |
+
)
|
| 322 |
+
|
| 323 |
+
return self._from_native_frame(
|
| 324 |
+
self.with_columns(**{key_token: plx.lit(0, None)})
|
| 325 |
+
._native_frame.join(
|
| 326 |
+
other.with_columns(**{key_token: plx.lit(0, None)})._native_frame,
|
| 327 |
+
keys=key_token,
|
| 328 |
+
right_keys=key_token,
|
| 329 |
+
join_type="inner",
|
| 330 |
+
right_suffix=suffix,
|
| 331 |
+
)
|
| 332 |
+
.drop([key_token]),
|
| 333 |
+
)
|
| 334 |
+
|
| 335 |
+
return self._from_native_frame(
|
| 336 |
+
self._native_frame.join(
|
| 337 |
+
other._native_frame,
|
| 338 |
+
keys=left_on,
|
| 339 |
+
right_keys=right_on,
|
| 340 |
+
join_type=how_to_join_map[how],
|
| 341 |
+
right_suffix=suffix,
|
| 342 |
+
),
|
| 343 |
+
)
|
| 344 |
+
|
| 345 |
+
def join_asof(
|
| 346 |
+
self,
|
| 347 |
+
other: Self,
|
| 348 |
+
*,
|
| 349 |
+
left_on: str | None = None,
|
| 350 |
+
right_on: str | None = None,
|
| 351 |
+
on: str | None = None,
|
| 352 |
+
by_left: str | list[str] | None = None,
|
| 353 |
+
by_right: str | list[str] | None = None,
|
| 354 |
+
by: str | list[str] | None = None,
|
| 355 |
+
strategy: Literal["backward", "forward", "nearest"] = "backward",
|
| 356 |
+
) -> Self:
|
| 357 |
+
msg = "join_asof is not yet supported on PyArrow tables"
|
| 358 |
+
raise NotImplementedError(msg)
|
| 359 |
+
|
| 360 |
+
def drop(self: Self, columns: list[str], strict: bool) -> Self: # noqa: FBT001
|
| 361 |
+
to_drop = parse_columns_to_drop(
|
| 362 |
+
compliant_frame=self, columns=columns, strict=strict
|
| 363 |
+
)
|
| 364 |
+
return self._from_native_frame(self._native_frame.drop(to_drop))
|
| 365 |
+
|
| 366 |
+
def drop_nulls(self: Self, subset: str | list[str] | None) -> Self:
|
| 367 |
+
if subset is None:
|
| 368 |
+
return self._from_native_frame(self._native_frame.drop_null())
|
| 369 |
+
subset = [subset] if isinstance(subset, str) else subset
|
| 370 |
+
plx = self.__narwhals_namespace__()
|
| 371 |
+
return self.filter(~plx.any_horizontal(plx.col(*subset).is_null()))
|
| 372 |
+
|
| 373 |
+
def sort(
|
| 374 |
+
self,
|
| 375 |
+
by: str | Iterable[str],
|
| 376 |
+
*more_by: str,
|
| 377 |
+
descending: bool | Sequence[bool] = False,
|
| 378 |
+
) -> Self:
|
| 379 |
+
flat_keys = flatten([*flatten([by]), *more_by])
|
| 380 |
+
df = self._native_frame
|
| 381 |
+
|
| 382 |
+
if isinstance(descending, bool):
|
| 383 |
+
order = "descending" if descending else "ascending"
|
| 384 |
+
sorting = [(key, order) for key in flat_keys]
|
| 385 |
+
else:
|
| 386 |
+
sorting = [
|
| 387 |
+
(key, "descending" if is_descending else "ascending")
|
| 388 |
+
for key, is_descending in zip(flat_keys, descending)
|
| 389 |
+
]
|
| 390 |
+
return self._from_native_frame(df.sort_by(sorting=sorting))
|
| 391 |
+
|
| 392 |
+
def to_pandas(self) -> Any:
|
| 393 |
+
return self._native_frame.to_pandas()
|
| 394 |
+
|
| 395 |
+
def to_numpy(self) -> Any:
|
| 396 |
+
import numpy as np # ignore-banned-import
|
| 397 |
+
|
| 398 |
+
return np.column_stack([col.to_numpy() for col in self._native_frame.columns])
|
| 399 |
+
|
| 400 |
+
def to_dict(self, *, as_series: bool) -> Any:
|
| 401 |
+
df = self._native_frame
|
| 402 |
+
|
| 403 |
+
names_and_values = zip(df.column_names, df.columns)
|
| 404 |
+
if as_series:
|
| 405 |
+
from narwhals._arrow.series import ArrowSeries
|
| 406 |
+
|
| 407 |
+
return {
|
| 408 |
+
name: ArrowSeries(col, name=name, backend_version=self._backend_version)
|
| 409 |
+
for name, col in names_and_values
|
| 410 |
+
}
|
| 411 |
+
else:
|
| 412 |
+
return {name: col.to_pylist() for name, col in names_and_values}
|
| 413 |
+
|
| 414 |
+
def with_row_index(self, name: str) -> Self:
|
| 415 |
+
import pyarrow as pa # ignore-banned-import()
|
| 416 |
+
|
| 417 |
+
df = self._native_frame
|
| 418 |
+
|
| 419 |
+
row_indices = pa.array(range(df.num_rows))
|
| 420 |
+
return self._from_native_frame(df.append_column(name, row_indices))
|
| 421 |
+
|
| 422 |
+
def filter(
|
| 423 |
+
self,
|
| 424 |
+
*predicates: IntoArrowExpr,
|
| 425 |
+
) -> Self:
|
| 426 |
+
if (
|
| 427 |
+
len(predicates) == 1
|
| 428 |
+
and isinstance(predicates[0], list)
|
| 429 |
+
and all(isinstance(x, bool) for x in predicates[0])
|
| 430 |
+
):
|
| 431 |
+
mask = predicates[0]
|
| 432 |
+
else:
|
| 433 |
+
plx = self.__narwhals_namespace__()
|
| 434 |
+
expr = plx.all_horizontal(*predicates)
|
| 435 |
+
# Safety: all_horizontal's expression only returns a single column.
|
| 436 |
+
mask = expr._call(self)[0]._native_series
|
| 437 |
+
return self._from_native_frame(self._native_frame.filter(mask))
|
| 438 |
+
|
| 439 |
+
def null_count(self) -> Self:
|
| 440 |
+
import pyarrow as pa # ignore-banned-import()
|
| 441 |
+
|
| 442 |
+
df = self._native_frame
|
| 443 |
+
names_and_values = zip(df.column_names, df.columns)
|
| 444 |
+
|
| 445 |
+
return self._from_native_frame(
|
| 446 |
+
pa.table({name: [col.null_count] for name, col in names_and_values})
|
| 447 |
+
)
|
| 448 |
+
|
| 449 |
+
def head(self, n: int) -> Self:
|
| 450 |
+
df = self._native_frame
|
| 451 |
+
if n >= 0:
|
| 452 |
+
return self._from_native_frame(df.slice(0, n))
|
| 453 |
+
else:
|
| 454 |
+
num_rows = df.num_rows
|
| 455 |
+
return self._from_native_frame(df.slice(0, max(0, num_rows + n)))
|
| 456 |
+
|
| 457 |
+
def tail(self, n: int) -> Self:
|
| 458 |
+
df = self._native_frame
|
| 459 |
+
if n >= 0:
|
| 460 |
+
num_rows = df.num_rows
|
| 461 |
+
return self._from_native_frame(df.slice(max(0, num_rows - n)))
|
| 462 |
+
else:
|
| 463 |
+
return self._from_native_frame(df.slice(abs(n)))
|
| 464 |
+
|
| 465 |
+
def lazy(self) -> Self:
|
| 466 |
+
return self
|
| 467 |
+
|
| 468 |
+
def collect(self) -> ArrowDataFrame:
|
| 469 |
+
return ArrowDataFrame(self._native_frame, backend_version=self._backend_version)
|
| 470 |
+
|
| 471 |
+
def clone(self) -> Self:
|
| 472 |
+
msg = "clone is not yet supported on PyArrow tables"
|
| 473 |
+
raise NotImplementedError(msg)
|
| 474 |
+
|
| 475 |
+
def is_empty(self: Self) -> bool:
|
| 476 |
+
return self.shape[0] == 0
|
| 477 |
+
|
| 478 |
+
def item(self: Self, row: int | None = None, column: int | str | None = None) -> Any:
|
| 479 |
+
if row is None and column is None:
|
| 480 |
+
if self.shape != (1, 1):
|
| 481 |
+
msg = (
|
| 482 |
+
"can only call `.item()` if the dataframe is of shape (1, 1),"
|
| 483 |
+
" or if explicit row/col values are provided;"
|
| 484 |
+
f" frame has shape {self.shape!r}"
|
| 485 |
+
)
|
| 486 |
+
raise ValueError(msg)
|
| 487 |
+
return self._native_frame[0][0]
|
| 488 |
+
|
| 489 |
+
elif row is None or column is None:
|
| 490 |
+
msg = "cannot call `.item()` with only one of `row` or `column`"
|
| 491 |
+
raise ValueError(msg)
|
| 492 |
+
|
| 493 |
+
_col = self.columns.index(column) if isinstance(column, str) else column
|
| 494 |
+
return self._native_frame[_col][row]
|
| 495 |
+
|
| 496 |
+
def rename(self, mapping: dict[str, str]) -> Self:
|
| 497 |
+
df = self._native_frame
|
| 498 |
+
new_cols = [mapping.get(c, c) for c in df.column_names]
|
| 499 |
+
return self._from_native_frame(df.rename_columns(new_cols))
|
| 500 |
+
|
| 501 |
+
def write_parquet(self, file: Any) -> Any:
|
| 502 |
+
import pyarrow.parquet as pp # ignore-banned-import
|
| 503 |
+
|
| 504 |
+
pp.write_table(self._native_frame, file)
|
| 505 |
+
|
| 506 |
+
def write_csv(self, file: Any) -> Any:
|
| 507 |
+
import pyarrow as pa # ignore-banned-import
|
| 508 |
+
import pyarrow.csv as pa_csv # ignore-banned-import
|
| 509 |
+
|
| 510 |
+
pa_table = self._native_frame
|
| 511 |
+
if file is None:
|
| 512 |
+
csv_buffer = pa.BufferOutputStream()
|
| 513 |
+
pa_csv.write_csv(pa_table, csv_buffer)
|
| 514 |
+
return csv_buffer.getvalue().to_pybytes().decode()
|
| 515 |
+
return pa_csv.write_csv(pa_table, file)
|
| 516 |
+
|
| 517 |
+
def is_duplicated(self: Self) -> ArrowSeries:
|
| 518 |
+
import numpy as np # ignore-banned-import
|
| 519 |
+
import pyarrow as pa # ignore-banned-import()
|
| 520 |
+
import pyarrow.compute as pc # ignore-banned-import()
|
| 521 |
+
|
| 522 |
+
from narwhals._arrow.series import ArrowSeries
|
| 523 |
+
|
| 524 |
+
df = self._native_frame
|
| 525 |
+
|
| 526 |
+
columns = self.columns
|
| 527 |
+
col_token = generate_unique_token(n_bytes=8, columns=columns)
|
| 528 |
+
row_count = (
|
| 529 |
+
df.append_column(col_token, pa.array(np.arange(len(self))))
|
| 530 |
+
.group_by(columns)
|
| 531 |
+
.aggregate([(col_token, "count")])
|
| 532 |
+
)
|
| 533 |
+
is_duplicated = pc.greater(
|
| 534 |
+
df.join(
|
| 535 |
+
row_count, keys=columns, right_keys=columns, join_type="inner"
|
| 536 |
+
).column(f"{col_token}_count"),
|
| 537 |
+
1,
|
| 538 |
+
)
|
| 539 |
+
return ArrowSeries(is_duplicated, name="", backend_version=self._backend_version)
|
| 540 |
+
|
| 541 |
+
def is_unique(self: Self) -> ArrowSeries:
|
| 542 |
+
import pyarrow.compute as pc # ignore-banned-import()
|
| 543 |
+
|
| 544 |
+
from narwhals._arrow.series import ArrowSeries
|
| 545 |
+
|
| 546 |
+
is_duplicated = self.is_duplicated()._native_series
|
| 547 |
+
|
| 548 |
+
return ArrowSeries(
|
| 549 |
+
pc.invert(is_duplicated), name="", backend_version=self._backend_version
|
| 550 |
+
)
|
| 551 |
+
|
| 552 |
+
def unique(
|
| 553 |
+
self: Self,
|
| 554 |
+
subset: str | list[str] | None,
|
| 555 |
+
*,
|
| 556 |
+
keep: Literal["any", "first", "last", "none"] = "any",
|
| 557 |
+
maintain_order: bool = False,
|
| 558 |
+
) -> Self:
|
| 559 |
+
"""
|
| 560 |
+
NOTE:
|
| 561 |
+
The param `maintain_order` is only here for compatibility with the polars API
|
| 562 |
+
and has no effect on the output.
|
| 563 |
+
"""
|
| 564 |
+
import numpy as np # ignore-banned-import
|
| 565 |
+
import pyarrow as pa # ignore-banned-import()
|
| 566 |
+
import pyarrow.compute as pc # ignore-banned-import()
|
| 567 |
+
|
| 568 |
+
df = self._native_frame
|
| 569 |
+
|
| 570 |
+
if isinstance(subset, str):
|
| 571 |
+
subset = [subset]
|
| 572 |
+
subset = subset or self.columns
|
| 573 |
+
|
| 574 |
+
if keep in {"any", "first", "last"}:
|
| 575 |
+
agg_func_map = {"any": "min", "first": "min", "last": "max"}
|
| 576 |
+
|
| 577 |
+
agg_func = agg_func_map[keep]
|
| 578 |
+
col_token = generate_unique_token(n_bytes=8, columns=self.columns)
|
| 579 |
+
keep_idx = (
|
| 580 |
+
df.append_column(col_token, pa.array(np.arange(len(self))))
|
| 581 |
+
.group_by(subset)
|
| 582 |
+
.aggregate([(col_token, agg_func)])
|
| 583 |
+
.column(f"{col_token}_{agg_func}")
|
| 584 |
+
)
|
| 585 |
+
|
| 586 |
+
return self._from_native_frame(pc.take(df, keep_idx))
|
| 587 |
+
|
| 588 |
+
keep_idx = self.select(*subset).is_unique()
|
| 589 |
+
return self.filter(keep_idx)
|
| 590 |
+
|
| 591 |
+
def gather_every(self: Self, n: int, offset: int = 0) -> Self:
|
| 592 |
+
return self._from_native_frame(self._native_frame[offset::n])
|
| 593 |
+
|
| 594 |
+
def to_arrow(self: Self) -> Any:
|
| 595 |
+
return self._native_frame
|
| 596 |
+
|
| 597 |
+
def sample(
|
| 598 |
+
self: Self,
|
| 599 |
+
n: int | None = None,
|
| 600 |
+
*,
|
| 601 |
+
fraction: float | None = None,
|
| 602 |
+
with_replacement: bool = False,
|
| 603 |
+
seed: int | None = None,
|
| 604 |
+
) -> Self:
|
| 605 |
+
import numpy as np # ignore-banned-import
|
| 606 |
+
import pyarrow.compute as pc # ignore-banned-import()
|
| 607 |
+
|
| 608 |
+
frame = self._native_frame
|
| 609 |
+
num_rows = len(self)
|
| 610 |
+
if n is None and fraction is not None:
|
| 611 |
+
n = int(num_rows * fraction)
|
| 612 |
+
|
| 613 |
+
rng = np.random.default_rng(seed=seed)
|
| 614 |
+
idx = np.arange(0, num_rows)
|
| 615 |
+
mask = rng.choice(idx, size=n, replace=with_replacement)
|
| 616 |
+
|
| 617 |
+
return self._from_native_frame(pc.take(frame, mask))
|
parrot/lib/python3.10/site-packages/narwhals/_arrow/expr.py
ADDED
|
@@ -0,0 +1,671 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
from typing import TYPE_CHECKING
|
| 4 |
+
from typing import Any
|
| 5 |
+
from typing import Callable
|
| 6 |
+
from typing import Literal
|
| 7 |
+
|
| 8 |
+
from narwhals._expression_parsing import reuse_series_implementation
|
| 9 |
+
from narwhals._expression_parsing import reuse_series_namespace_implementation
|
| 10 |
+
from narwhals.utils import Implementation
|
| 11 |
+
|
| 12 |
+
if TYPE_CHECKING:
|
| 13 |
+
from typing_extensions import Self
|
| 14 |
+
|
| 15 |
+
from narwhals._arrow.dataframe import ArrowDataFrame
|
| 16 |
+
from narwhals._arrow.namespace import ArrowNamespace
|
| 17 |
+
from narwhals._arrow.series import ArrowSeries
|
| 18 |
+
from narwhals._arrow.typing import IntoArrowExpr
|
| 19 |
+
from narwhals.dtypes import DType
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
class ArrowExpr:
|
| 23 |
+
def __init__(
|
| 24 |
+
self,
|
| 25 |
+
call: Callable[[ArrowDataFrame], list[ArrowSeries]],
|
| 26 |
+
*,
|
| 27 |
+
depth: int,
|
| 28 |
+
function_name: str,
|
| 29 |
+
root_names: list[str] | None,
|
| 30 |
+
output_names: list[str] | None,
|
| 31 |
+
backend_version: tuple[int, ...],
|
| 32 |
+
) -> None:
|
| 33 |
+
self._call = call
|
| 34 |
+
self._depth = depth
|
| 35 |
+
self._function_name = function_name
|
| 36 |
+
self._root_names = root_names
|
| 37 |
+
self._depth = depth
|
| 38 |
+
self._output_names = output_names
|
| 39 |
+
self._implementation = Implementation.PYARROW
|
| 40 |
+
self._backend_version = backend_version
|
| 41 |
+
|
| 42 |
+
def __repr__(self) -> str: # pragma: no cover
|
| 43 |
+
return (
|
| 44 |
+
f"ArrowExpr("
|
| 45 |
+
f"depth={self._depth}, "
|
| 46 |
+
f"function_name={self._function_name}, "
|
| 47 |
+
f"root_names={self._root_names}, "
|
| 48 |
+
f"output_names={self._output_names}"
|
| 49 |
+
)
|
| 50 |
+
|
| 51 |
+
@classmethod
|
| 52 |
+
def from_column_names(
|
| 53 |
+
cls: type[Self], *column_names: str, backend_version: tuple[int, ...]
|
| 54 |
+
) -> Self:
|
| 55 |
+
from narwhals._arrow.series import ArrowSeries
|
| 56 |
+
|
| 57 |
+
def func(df: ArrowDataFrame) -> list[ArrowSeries]:
|
| 58 |
+
return [
|
| 59 |
+
ArrowSeries(
|
| 60 |
+
df._native_frame[column_name],
|
| 61 |
+
name=column_name,
|
| 62 |
+
backend_version=df._backend_version,
|
| 63 |
+
)
|
| 64 |
+
for column_name in column_names
|
| 65 |
+
]
|
| 66 |
+
|
| 67 |
+
return cls(
|
| 68 |
+
func,
|
| 69 |
+
depth=0,
|
| 70 |
+
function_name="col",
|
| 71 |
+
root_names=list(column_names),
|
| 72 |
+
output_names=list(column_names),
|
| 73 |
+
backend_version=backend_version,
|
| 74 |
+
)
|
| 75 |
+
|
| 76 |
+
def __narwhals_namespace__(self) -> ArrowNamespace:
|
| 77 |
+
from narwhals._arrow.namespace import ArrowNamespace
|
| 78 |
+
|
| 79 |
+
return ArrowNamespace(backend_version=self._backend_version)
|
| 80 |
+
|
| 81 |
+
def __narwhals_expr__(self) -> None: ...
|
| 82 |
+
|
| 83 |
+
def __eq__(self, other: ArrowExpr | Any) -> Self: # type: ignore[override]
|
| 84 |
+
return reuse_series_implementation(self, "__eq__", other=other)
|
| 85 |
+
|
| 86 |
+
def __ne__(self, other: ArrowExpr | Any) -> Self: # type: ignore[override]
|
| 87 |
+
return reuse_series_implementation(self, "__ne__", other=other)
|
| 88 |
+
|
| 89 |
+
def __ge__(self, other: ArrowExpr | Any) -> Self:
|
| 90 |
+
return reuse_series_implementation(self, "__ge__", other=other)
|
| 91 |
+
|
| 92 |
+
def __gt__(self, other: ArrowExpr | Any) -> Self:
|
| 93 |
+
return reuse_series_implementation(self, "__gt__", other=other)
|
| 94 |
+
|
| 95 |
+
def __le__(self, other: ArrowExpr | Any) -> Self:
|
| 96 |
+
return reuse_series_implementation(self, "__le__", other=other)
|
| 97 |
+
|
| 98 |
+
def __lt__(self, other: ArrowExpr | Any) -> Self:
|
| 99 |
+
return reuse_series_implementation(self, "__lt__", other=other)
|
| 100 |
+
|
| 101 |
+
def __and__(self, other: ArrowExpr | bool | Any) -> Self:
|
| 102 |
+
return reuse_series_implementation(self, "__and__", other=other)
|
| 103 |
+
|
| 104 |
+
def __rand__(self, other: ArrowExpr | bool | Any) -> Self:
|
| 105 |
+
return reuse_series_implementation(self, "__rand__", other=other)
|
| 106 |
+
|
| 107 |
+
def __or__(self, other: ArrowExpr | bool | Any) -> Self:
|
| 108 |
+
return reuse_series_implementation(self, "__or__", other=other)
|
| 109 |
+
|
| 110 |
+
def __ror__(self, other: ArrowExpr | bool | Any) -> Self:
|
| 111 |
+
return reuse_series_implementation(self, "__ror__", other=other)
|
| 112 |
+
|
| 113 |
+
def __add__(self, other: ArrowExpr | Any) -> Self:
|
| 114 |
+
return reuse_series_implementation(self, "__add__", other)
|
| 115 |
+
|
| 116 |
+
def __radd__(self, other: ArrowExpr | Any) -> Self:
|
| 117 |
+
return reuse_series_implementation(self, "__radd__", other)
|
| 118 |
+
|
| 119 |
+
def __sub__(self, other: ArrowExpr | Any) -> Self:
|
| 120 |
+
return reuse_series_implementation(self, "__sub__", other)
|
| 121 |
+
|
| 122 |
+
def __rsub__(self, other: ArrowExpr | Any) -> Self:
|
| 123 |
+
return reuse_series_implementation(self, "__rsub__", other)
|
| 124 |
+
|
| 125 |
+
def __mul__(self, other: ArrowExpr | Any) -> Self:
|
| 126 |
+
return reuse_series_implementation(self, "__mul__", other)
|
| 127 |
+
|
| 128 |
+
def __rmul__(self, other: ArrowExpr | Any) -> Self:
|
| 129 |
+
return reuse_series_implementation(self, "__rmul__", other)
|
| 130 |
+
|
| 131 |
+
def __pow__(self, other: ArrowExpr | Any) -> Self:
|
| 132 |
+
return reuse_series_implementation(self, "__pow__", other)
|
| 133 |
+
|
| 134 |
+
def __rpow__(self, other: ArrowExpr | Any) -> Self:
|
| 135 |
+
return reuse_series_implementation(self, "__rpow__", other)
|
| 136 |
+
|
| 137 |
+
def __floordiv__(self, other: ArrowExpr | Any) -> Self:
|
| 138 |
+
return reuse_series_implementation(self, "__floordiv__", other)
|
| 139 |
+
|
| 140 |
+
def __rfloordiv__(self, other: ArrowExpr | Any) -> Self:
|
| 141 |
+
return reuse_series_implementation(self, "__rfloordiv__", other)
|
| 142 |
+
|
| 143 |
+
def __truediv__(self, other: ArrowExpr | Any) -> Self:
|
| 144 |
+
return reuse_series_implementation(self, "__truediv__", other)
|
| 145 |
+
|
| 146 |
+
def __rtruediv__(self, other: ArrowExpr | Any) -> Self:
|
| 147 |
+
return reuse_series_implementation(self, "__rtruediv__", other)
|
| 148 |
+
|
| 149 |
+
def __mod__(self, other: ArrowExpr | Any) -> Self:
|
| 150 |
+
return reuse_series_implementation(self, "__mod__", other)
|
| 151 |
+
|
| 152 |
+
def __rmod__(self, other: ArrowExpr | Any) -> Self:
|
| 153 |
+
return reuse_series_implementation(self, "__rmod__", other)
|
| 154 |
+
|
| 155 |
+
def __invert__(self) -> Self:
|
| 156 |
+
return reuse_series_implementation(self, "__invert__")
|
| 157 |
+
|
| 158 |
+
def len(self) -> Self:
|
| 159 |
+
return reuse_series_implementation(self, "len", returns_scalar=True)
|
| 160 |
+
|
| 161 |
+
def filter(self, *predicates: IntoArrowExpr) -> Self:
|
| 162 |
+
plx = self.__narwhals_namespace__()
|
| 163 |
+
expr = plx.all_horizontal(*predicates)
|
| 164 |
+
return reuse_series_implementation(self, "filter", other=expr)
|
| 165 |
+
|
| 166 |
+
def mean(self) -> Self:
|
| 167 |
+
return reuse_series_implementation(self, "mean", returns_scalar=True)
|
| 168 |
+
|
| 169 |
+
def count(self) -> Self:
|
| 170 |
+
return reuse_series_implementation(self, "count", returns_scalar=True)
|
| 171 |
+
|
| 172 |
+
def n_unique(self) -> Self:
|
| 173 |
+
return reuse_series_implementation(self, "n_unique", returns_scalar=True)
|
| 174 |
+
|
| 175 |
+
def std(self, ddof: int = 1) -> Self:
|
| 176 |
+
return reuse_series_implementation(self, "std", ddof=ddof, returns_scalar=True)
|
| 177 |
+
|
| 178 |
+
def cast(self, dtype: DType) -> Self:
|
| 179 |
+
return reuse_series_implementation(self, "cast", dtype)
|
| 180 |
+
|
| 181 |
+
def abs(self) -> Self:
|
| 182 |
+
return reuse_series_implementation(self, "abs")
|
| 183 |
+
|
| 184 |
+
def diff(self) -> Self:
|
| 185 |
+
return reuse_series_implementation(self, "diff")
|
| 186 |
+
|
| 187 |
+
def cum_sum(self) -> Self:
|
| 188 |
+
return reuse_series_implementation(self, "cum_sum")
|
| 189 |
+
|
| 190 |
+
def round(self, decimals: int) -> Self:
|
| 191 |
+
return reuse_series_implementation(self, "round", decimals)
|
| 192 |
+
|
| 193 |
+
def any(self) -> Self:
|
| 194 |
+
return reuse_series_implementation(self, "any", returns_scalar=True)
|
| 195 |
+
|
| 196 |
+
def min(self) -> Self:
|
| 197 |
+
return reuse_series_implementation(self, "min", returns_scalar=True)
|
| 198 |
+
|
| 199 |
+
def max(self) -> Self:
|
| 200 |
+
return reuse_series_implementation(self, "max", returns_scalar=True)
|
| 201 |
+
|
| 202 |
+
def all(self) -> Self:
|
| 203 |
+
return reuse_series_implementation(self, "all", returns_scalar=True)
|
| 204 |
+
|
| 205 |
+
def sum(self) -> Self:
|
| 206 |
+
return reuse_series_implementation(self, "sum", returns_scalar=True)
|
| 207 |
+
|
| 208 |
+
def drop_nulls(self) -> Self:
|
| 209 |
+
return reuse_series_implementation(self, "drop_nulls")
|
| 210 |
+
|
| 211 |
+
def shift(self, n: int) -> Self:
|
| 212 |
+
return reuse_series_implementation(self, "shift", n)
|
| 213 |
+
|
| 214 |
+
def alias(self, name: str) -> Self:
|
| 215 |
+
# Define this one manually, so that we can
|
| 216 |
+
# override `output_names` and not increase depth
|
| 217 |
+
return self.__class__(
|
| 218 |
+
lambda df: [series.alias(name) for series in self._call(df)],
|
| 219 |
+
depth=self._depth,
|
| 220 |
+
function_name=self._function_name,
|
| 221 |
+
root_names=self._root_names,
|
| 222 |
+
output_names=[name],
|
| 223 |
+
backend_version=self._backend_version,
|
| 224 |
+
)
|
| 225 |
+
|
| 226 |
+
def null_count(self) -> Self:
|
| 227 |
+
return reuse_series_implementation(self, "null_count", returns_scalar=True)
|
| 228 |
+
|
| 229 |
+
def is_null(self) -> Self:
|
| 230 |
+
return reuse_series_implementation(self, "is_null")
|
| 231 |
+
|
| 232 |
+
def is_between(self, lower_bound: Any, upper_bound: Any, closed: str) -> Self:
|
| 233 |
+
return reuse_series_implementation(
|
| 234 |
+
self, "is_between", lower_bound, upper_bound, closed
|
| 235 |
+
)
|
| 236 |
+
|
| 237 |
+
def head(self, n: int) -> Self:
|
| 238 |
+
return reuse_series_implementation(self, "head", n)
|
| 239 |
+
|
| 240 |
+
def tail(self, n: int) -> Self:
|
| 241 |
+
return reuse_series_implementation(self, "tail", n)
|
| 242 |
+
|
| 243 |
+
def is_in(self, other: ArrowExpr | Any) -> Self:
|
| 244 |
+
return reuse_series_implementation(self, "is_in", other)
|
| 245 |
+
|
| 246 |
+
def arg_true(self) -> Self:
|
| 247 |
+
return reuse_series_implementation(self, "arg_true")
|
| 248 |
+
|
| 249 |
+
def sample(
|
| 250 |
+
self: Self,
|
| 251 |
+
n: int | None = None,
|
| 252 |
+
*,
|
| 253 |
+
fraction: float | None = None,
|
| 254 |
+
with_replacement: bool = False,
|
| 255 |
+
seed: int | None = None,
|
| 256 |
+
) -> Self:
|
| 257 |
+
return reuse_series_implementation(
|
| 258 |
+
self,
|
| 259 |
+
"sample",
|
| 260 |
+
n=n,
|
| 261 |
+
fraction=fraction,
|
| 262 |
+
with_replacement=with_replacement,
|
| 263 |
+
seed=seed,
|
| 264 |
+
)
|
| 265 |
+
|
| 266 |
+
def fill_null(self: Self, value: Any) -> Self:
|
| 267 |
+
return reuse_series_implementation(self, "fill_null", value=value)
|
| 268 |
+
|
| 269 |
+
def is_duplicated(self: Self) -> Self:
|
| 270 |
+
return reuse_series_implementation(self, "is_duplicated")
|
| 271 |
+
|
| 272 |
+
def is_unique(self: Self) -> Self:
|
| 273 |
+
return reuse_series_implementation(self, "is_unique")
|
| 274 |
+
|
| 275 |
+
def is_first_distinct(self: Self) -> Self:
|
| 276 |
+
return reuse_series_implementation(self, "is_first_distinct")
|
| 277 |
+
|
| 278 |
+
def is_last_distinct(self: Self) -> Self:
|
| 279 |
+
return reuse_series_implementation(self, "is_last_distinct")
|
| 280 |
+
|
| 281 |
+
def unique(self: Self) -> Self:
|
| 282 |
+
return reuse_series_implementation(self, "unique")
|
| 283 |
+
|
| 284 |
+
def sort(self: Self, *, descending: bool = False, nulls_last: bool = False) -> Self:
|
| 285 |
+
return reuse_series_implementation(
|
| 286 |
+
self, "sort", descending=descending, nulls_last=nulls_last
|
| 287 |
+
)
|
| 288 |
+
|
| 289 |
+
def quantile(
|
| 290 |
+
self,
|
| 291 |
+
quantile: float,
|
| 292 |
+
interpolation: Literal["nearest", "higher", "lower", "midpoint", "linear"],
|
| 293 |
+
) -> Self:
|
| 294 |
+
return reuse_series_implementation(
|
| 295 |
+
self, "quantile", quantile, interpolation, returns_scalar=True
|
| 296 |
+
)
|
| 297 |
+
|
| 298 |
+
def gather_every(self: Self, n: int, offset: int = 0) -> Self:
|
| 299 |
+
return reuse_series_implementation(self, "gather_every", n=n, offset=offset)
|
| 300 |
+
|
| 301 |
+
def clip(
|
| 302 |
+
self: Self, lower_bound: Any | None = None, upper_bound: Any | None = None
|
| 303 |
+
) -> Self:
|
| 304 |
+
return reuse_series_implementation(
|
| 305 |
+
self, "clip", lower_bound=lower_bound, upper_bound=upper_bound
|
| 306 |
+
)
|
| 307 |
+
|
| 308 |
+
def over(self: Self, keys: list[str]) -> Self:
|
| 309 |
+
def func(df: ArrowDataFrame) -> list[ArrowSeries]:
|
| 310 |
+
if self._output_names is None:
|
| 311 |
+
msg = (
|
| 312 |
+
"Anonymous expressions are not supported in over.\n"
|
| 313 |
+
"Instead of `nw.all()`, try using a named expression, such as "
|
| 314 |
+
"`nw.col('a', 'b')`\n"
|
| 315 |
+
)
|
| 316 |
+
raise ValueError(msg)
|
| 317 |
+
tmp = df.group_by(*keys).agg(self)
|
| 318 |
+
tmp = df.select(*keys).join(
|
| 319 |
+
tmp, how="left", left_on=keys, right_on=keys, suffix="_right"
|
| 320 |
+
)
|
| 321 |
+
return [tmp[name] for name in self._output_names]
|
| 322 |
+
|
| 323 |
+
return self.__class__(
|
| 324 |
+
func,
|
| 325 |
+
depth=self._depth + 1,
|
| 326 |
+
function_name=self._function_name + "->over",
|
| 327 |
+
root_names=self._root_names,
|
| 328 |
+
output_names=self._output_names,
|
| 329 |
+
backend_version=self._backend_version,
|
| 330 |
+
)
|
| 331 |
+
|
| 332 |
+
def mode(self: Self) -> Self:
|
| 333 |
+
return reuse_series_implementation(self, "mode")
|
| 334 |
+
|
| 335 |
+
@property
|
| 336 |
+
def dt(self: Self) -> ArrowExprDateTimeNamespace:
|
| 337 |
+
return ArrowExprDateTimeNamespace(self)
|
| 338 |
+
|
| 339 |
+
@property
|
| 340 |
+
def str(self: Self) -> ArrowExprStringNamespace:
|
| 341 |
+
return ArrowExprStringNamespace(self)
|
| 342 |
+
|
| 343 |
+
@property
|
| 344 |
+
def cat(self: Self) -> ArrowExprCatNamespace:
|
| 345 |
+
return ArrowExprCatNamespace(self)
|
| 346 |
+
|
| 347 |
+
@property
|
| 348 |
+
def name(self: Self) -> ArrowExprNameNamespace:
|
| 349 |
+
return ArrowExprNameNamespace(self)
|
| 350 |
+
|
| 351 |
+
|
| 352 |
+
class ArrowExprCatNamespace:
|
| 353 |
+
def __init__(self, expr: ArrowExpr) -> None:
|
| 354 |
+
self._expr = expr
|
| 355 |
+
|
| 356 |
+
def get_categories(self) -> ArrowExpr:
|
| 357 |
+
return reuse_series_namespace_implementation(
|
| 358 |
+
self._expr,
|
| 359 |
+
"cat",
|
| 360 |
+
"get_categories",
|
| 361 |
+
)
|
| 362 |
+
|
| 363 |
+
|
| 364 |
+
class ArrowExprDateTimeNamespace:
|
| 365 |
+
def __init__(self: Self, expr: ArrowExpr) -> None:
|
| 366 |
+
self._expr = expr
|
| 367 |
+
|
| 368 |
+
def to_string(self: Self, format: str) -> ArrowExpr: # noqa: A002
|
| 369 |
+
return reuse_series_namespace_implementation(
|
| 370 |
+
self._expr, "dt", "to_string", format
|
| 371 |
+
)
|
| 372 |
+
|
| 373 |
+
def date(self: Self) -> ArrowExpr:
|
| 374 |
+
return reuse_series_namespace_implementation(self._expr, "dt", "date")
|
| 375 |
+
|
| 376 |
+
def year(self: Self) -> ArrowExpr:
|
| 377 |
+
return reuse_series_namespace_implementation(self._expr, "dt", "year")
|
| 378 |
+
|
| 379 |
+
def month(self: Self) -> ArrowExpr:
|
| 380 |
+
return reuse_series_namespace_implementation(self._expr, "dt", "month")
|
| 381 |
+
|
| 382 |
+
def day(self: Self) -> ArrowExpr:
|
| 383 |
+
return reuse_series_namespace_implementation(self._expr, "dt", "day")
|
| 384 |
+
|
| 385 |
+
def hour(self: Self) -> ArrowExpr:
|
| 386 |
+
return reuse_series_namespace_implementation(self._expr, "dt", "hour")
|
| 387 |
+
|
| 388 |
+
def minute(self: Self) -> ArrowExpr:
|
| 389 |
+
return reuse_series_namespace_implementation(self._expr, "dt", "minute")
|
| 390 |
+
|
| 391 |
+
def second(self: Self) -> ArrowExpr:
|
| 392 |
+
return reuse_series_namespace_implementation(self._expr, "dt", "second")
|
| 393 |
+
|
| 394 |
+
def millisecond(self: Self) -> ArrowExpr:
|
| 395 |
+
return reuse_series_namespace_implementation(self._expr, "dt", "millisecond")
|
| 396 |
+
|
| 397 |
+
def microsecond(self: Self) -> ArrowExpr:
|
| 398 |
+
return reuse_series_namespace_implementation(self._expr, "dt", "microsecond")
|
| 399 |
+
|
| 400 |
+
def nanosecond(self: Self) -> ArrowExpr:
|
| 401 |
+
return reuse_series_namespace_implementation(self._expr, "dt", "nanosecond")
|
| 402 |
+
|
| 403 |
+
def ordinal_day(self: Self) -> ArrowExpr:
|
| 404 |
+
return reuse_series_namespace_implementation(self._expr, "dt", "ordinal_day")
|
| 405 |
+
|
| 406 |
+
def total_minutes(self: Self) -> ArrowExpr:
|
| 407 |
+
return reuse_series_namespace_implementation(self._expr, "dt", "total_minutes")
|
| 408 |
+
|
| 409 |
+
def total_seconds(self: Self) -> ArrowExpr:
|
| 410 |
+
return reuse_series_namespace_implementation(self._expr, "dt", "total_seconds")
|
| 411 |
+
|
| 412 |
+
def total_milliseconds(self: Self) -> ArrowExpr:
|
| 413 |
+
return reuse_series_namespace_implementation(
|
| 414 |
+
self._expr, "dt", "total_milliseconds"
|
| 415 |
+
)
|
| 416 |
+
|
| 417 |
+
def total_microseconds(self: Self) -> ArrowExpr:
|
| 418 |
+
return reuse_series_namespace_implementation(
|
| 419 |
+
self._expr, "dt", "total_microseconds"
|
| 420 |
+
)
|
| 421 |
+
|
| 422 |
+
def total_nanoseconds(self: Self) -> ArrowExpr:
|
| 423 |
+
return reuse_series_namespace_implementation(
|
| 424 |
+
self._expr, "dt", "total_nanoseconds"
|
| 425 |
+
)
|
| 426 |
+
|
| 427 |
+
|
| 428 |
+
class ArrowExprStringNamespace:
|
| 429 |
+
def __init__(self, expr: ArrowExpr) -> None:
|
| 430 |
+
self._expr = expr
|
| 431 |
+
|
| 432 |
+
def len_chars(self) -> ArrowExpr:
|
| 433 |
+
return reuse_series_namespace_implementation(self._expr, "str", "len_chars")
|
| 434 |
+
|
| 435 |
+
def replace(
|
| 436 |
+
self,
|
| 437 |
+
pattern: str,
|
| 438 |
+
value: str,
|
| 439 |
+
*,
|
| 440 |
+
literal: bool = False,
|
| 441 |
+
n: int = 1,
|
| 442 |
+
) -> ArrowExpr:
|
| 443 |
+
return reuse_series_namespace_implementation(
|
| 444 |
+
self._expr,
|
| 445 |
+
"str",
|
| 446 |
+
"replace",
|
| 447 |
+
pattern,
|
| 448 |
+
value,
|
| 449 |
+
literal=literal,
|
| 450 |
+
n=n,
|
| 451 |
+
)
|
| 452 |
+
|
| 453 |
+
def replace_all(
|
| 454 |
+
self,
|
| 455 |
+
pattern: str,
|
| 456 |
+
value: str,
|
| 457 |
+
*,
|
| 458 |
+
literal: bool = False,
|
| 459 |
+
) -> ArrowExpr:
|
| 460 |
+
return reuse_series_namespace_implementation(
|
| 461 |
+
self._expr,
|
| 462 |
+
"str",
|
| 463 |
+
"replace_all",
|
| 464 |
+
pattern,
|
| 465 |
+
value,
|
| 466 |
+
literal=literal,
|
| 467 |
+
)
|
| 468 |
+
|
| 469 |
+
def strip_chars(self, characters: str | None = None) -> ArrowExpr:
|
| 470 |
+
return reuse_series_namespace_implementation(
|
| 471 |
+
self._expr,
|
| 472 |
+
"str",
|
| 473 |
+
"strip_chars",
|
| 474 |
+
characters,
|
| 475 |
+
)
|
| 476 |
+
|
| 477 |
+
def starts_with(self, prefix: str) -> ArrowExpr:
|
| 478 |
+
return reuse_series_namespace_implementation(
|
| 479 |
+
self._expr,
|
| 480 |
+
"str",
|
| 481 |
+
"starts_with",
|
| 482 |
+
prefix,
|
| 483 |
+
)
|
| 484 |
+
|
| 485 |
+
def ends_with(self, suffix: str) -> ArrowExpr:
|
| 486 |
+
return reuse_series_namespace_implementation(
|
| 487 |
+
self._expr,
|
| 488 |
+
"str",
|
| 489 |
+
"ends_with",
|
| 490 |
+
suffix,
|
| 491 |
+
)
|
| 492 |
+
|
| 493 |
+
def contains(self, pattern: str, *, literal: bool) -> ArrowExpr:
|
| 494 |
+
return reuse_series_namespace_implementation(
|
| 495 |
+
self._expr, "str", "contains", pattern, literal=literal
|
| 496 |
+
)
|
| 497 |
+
|
| 498 |
+
def slice(self, offset: int, length: int | None = None) -> ArrowExpr:
|
| 499 |
+
return reuse_series_namespace_implementation(
|
| 500 |
+
self._expr, "str", "slice", offset, length
|
| 501 |
+
)
|
| 502 |
+
|
| 503 |
+
def to_datetime(self, format: str | None = None) -> ArrowExpr: # noqa: A002
|
| 504 |
+
return reuse_series_namespace_implementation(
|
| 505 |
+
self._expr,
|
| 506 |
+
"str",
|
| 507 |
+
"to_datetime",
|
| 508 |
+
format,
|
| 509 |
+
)
|
| 510 |
+
|
| 511 |
+
def to_uppercase(self) -> ArrowExpr:
|
| 512 |
+
return reuse_series_namespace_implementation(
|
| 513 |
+
self._expr,
|
| 514 |
+
"str",
|
| 515 |
+
"to_uppercase",
|
| 516 |
+
)
|
| 517 |
+
|
| 518 |
+
def to_lowercase(self) -> ArrowExpr:
|
| 519 |
+
return reuse_series_namespace_implementation(
|
| 520 |
+
self._expr,
|
| 521 |
+
"str",
|
| 522 |
+
"to_lowercase",
|
| 523 |
+
)
|
| 524 |
+
|
| 525 |
+
|
| 526 |
+
class ArrowExprNameNamespace:
|
| 527 |
+
def __init__(self: Self, expr: ArrowExpr) -> None:
|
| 528 |
+
self._expr = expr
|
| 529 |
+
|
| 530 |
+
def keep(self: Self) -> ArrowExpr:
|
| 531 |
+
root_names = self._expr._root_names
|
| 532 |
+
|
| 533 |
+
if root_names is None:
|
| 534 |
+
msg = (
|
| 535 |
+
"Anonymous expressions are not supported in `.name.keep`.\n"
|
| 536 |
+
"Instead of `nw.all()`, try using a named expression, such as "
|
| 537 |
+
"`nw.col('a', 'b')`\n"
|
| 538 |
+
)
|
| 539 |
+
raise ValueError(msg)
|
| 540 |
+
|
| 541 |
+
return self._expr.__class__(
|
| 542 |
+
lambda df: [
|
| 543 |
+
series.alias(name)
|
| 544 |
+
for series, name in zip(self._expr._call(df), root_names)
|
| 545 |
+
],
|
| 546 |
+
depth=self._expr._depth,
|
| 547 |
+
function_name=self._expr._function_name,
|
| 548 |
+
root_names=root_names,
|
| 549 |
+
output_names=root_names,
|
| 550 |
+
backend_version=self._expr._backend_version,
|
| 551 |
+
)
|
| 552 |
+
|
| 553 |
+
def map(self: Self, function: Callable[[str], str]) -> ArrowExpr:
|
| 554 |
+
root_names = self._expr._root_names
|
| 555 |
+
|
| 556 |
+
if root_names is None:
|
| 557 |
+
msg = (
|
| 558 |
+
"Anonymous expressions are not supported in `.name.map`.\n"
|
| 559 |
+
"Instead of `nw.all()`, try using a named expression, such as "
|
| 560 |
+
"`nw.col('a', 'b')`\n"
|
| 561 |
+
)
|
| 562 |
+
raise ValueError(msg)
|
| 563 |
+
|
| 564 |
+
output_names = [function(str(name)) for name in root_names]
|
| 565 |
+
|
| 566 |
+
return self._expr.__class__(
|
| 567 |
+
lambda df: [
|
| 568 |
+
series.alias(name)
|
| 569 |
+
for series, name in zip(self._expr._call(df), output_names)
|
| 570 |
+
],
|
| 571 |
+
depth=self._expr._depth,
|
| 572 |
+
function_name=self._expr._function_name,
|
| 573 |
+
root_names=root_names,
|
| 574 |
+
output_names=output_names,
|
| 575 |
+
backend_version=self._expr._backend_version,
|
| 576 |
+
)
|
| 577 |
+
|
| 578 |
+
def prefix(self: Self, prefix: str) -> ArrowExpr:
|
| 579 |
+
root_names = self._expr._root_names
|
| 580 |
+
if root_names is None:
|
| 581 |
+
msg = (
|
| 582 |
+
"Anonymous expressions are not supported in `.name.prefix`.\n"
|
| 583 |
+
"Instead of `nw.all()`, try using a named expression, such as "
|
| 584 |
+
"`nw.col('a', 'b')`\n"
|
| 585 |
+
)
|
| 586 |
+
raise ValueError(msg)
|
| 587 |
+
|
| 588 |
+
output_names = [prefix + str(name) for name in root_names]
|
| 589 |
+
return self._expr.__class__(
|
| 590 |
+
lambda df: [
|
| 591 |
+
series.alias(name)
|
| 592 |
+
for series, name in zip(self._expr._call(df), output_names)
|
| 593 |
+
],
|
| 594 |
+
depth=self._expr._depth,
|
| 595 |
+
function_name=self._expr._function_name,
|
| 596 |
+
root_names=root_names,
|
| 597 |
+
output_names=output_names,
|
| 598 |
+
backend_version=self._expr._backend_version,
|
| 599 |
+
)
|
| 600 |
+
|
| 601 |
+
def suffix(self: Self, suffix: str) -> ArrowExpr:
|
| 602 |
+
root_names = self._expr._root_names
|
| 603 |
+
if root_names is None:
|
| 604 |
+
msg = (
|
| 605 |
+
"Anonymous expressions are not supported in `.name.suffix`.\n"
|
| 606 |
+
"Instead of `nw.all()`, try using a named expression, such as "
|
| 607 |
+
"`nw.col('a', 'b')`\n"
|
| 608 |
+
)
|
| 609 |
+
raise ValueError(msg)
|
| 610 |
+
|
| 611 |
+
output_names = [str(name) + suffix for name in root_names]
|
| 612 |
+
|
| 613 |
+
return self._expr.__class__(
|
| 614 |
+
lambda df: [
|
| 615 |
+
series.alias(name)
|
| 616 |
+
for series, name in zip(self._expr._call(df), output_names)
|
| 617 |
+
],
|
| 618 |
+
depth=self._expr._depth,
|
| 619 |
+
function_name=self._expr._function_name,
|
| 620 |
+
root_names=root_names,
|
| 621 |
+
output_names=output_names,
|
| 622 |
+
backend_version=self._expr._backend_version,
|
| 623 |
+
)
|
| 624 |
+
|
| 625 |
+
def to_lowercase(self: Self) -> ArrowExpr:
|
| 626 |
+
root_names = self._expr._root_names
|
| 627 |
+
|
| 628 |
+
if root_names is None:
|
| 629 |
+
msg = (
|
| 630 |
+
"Anonymous expressions are not supported in `.name.to_lowercase`.\n"
|
| 631 |
+
"Instead of `nw.all()`, try using a named expression, such as "
|
| 632 |
+
"`nw.col('a', 'b')`\n"
|
| 633 |
+
)
|
| 634 |
+
raise ValueError(msg)
|
| 635 |
+
output_names = [str(name).lower() for name in root_names]
|
| 636 |
+
|
| 637 |
+
return self._expr.__class__(
|
| 638 |
+
lambda df: [
|
| 639 |
+
series.alias(name)
|
| 640 |
+
for series, name in zip(self._expr._call(df), output_names)
|
| 641 |
+
],
|
| 642 |
+
depth=self._expr._depth,
|
| 643 |
+
function_name=self._expr._function_name,
|
| 644 |
+
root_names=root_names,
|
| 645 |
+
output_names=output_names,
|
| 646 |
+
backend_version=self._expr._backend_version,
|
| 647 |
+
)
|
| 648 |
+
|
| 649 |
+
def to_uppercase(self: Self) -> ArrowExpr:
|
| 650 |
+
root_names = self._expr._root_names
|
| 651 |
+
|
| 652 |
+
if root_names is None:
|
| 653 |
+
msg = (
|
| 654 |
+
"Anonymous expressions are not supported in `.name.to_uppercase`.\n"
|
| 655 |
+
"Instead of `nw.all()`, try using a named expression, such as "
|
| 656 |
+
"`nw.col('a', 'b')`\n"
|
| 657 |
+
)
|
| 658 |
+
raise ValueError(msg)
|
| 659 |
+
output_names = [str(name).upper() for name in root_names]
|
| 660 |
+
|
| 661 |
+
return self._expr.__class__(
|
| 662 |
+
lambda df: [
|
| 663 |
+
series.alias(name)
|
| 664 |
+
for series, name in zip(self._expr._call(df), output_names)
|
| 665 |
+
],
|
| 666 |
+
depth=self._expr._depth,
|
| 667 |
+
function_name=self._expr._function_name,
|
| 668 |
+
root_names=root_names,
|
| 669 |
+
output_names=output_names,
|
| 670 |
+
backend_version=self._expr._backend_version,
|
| 671 |
+
)
|
parrot/lib/python3.10/site-packages/narwhals/_arrow/group_by.py
ADDED
|
@@ -0,0 +1,168 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
from copy import copy
|
| 4 |
+
from typing import TYPE_CHECKING
|
| 5 |
+
from typing import Any
|
| 6 |
+
from typing import Callable
|
| 7 |
+
from typing import Iterator
|
| 8 |
+
|
| 9 |
+
from narwhals._expression_parsing import is_simple_aggregation
|
| 10 |
+
from narwhals._expression_parsing import parse_into_exprs
|
| 11 |
+
from narwhals.utils import remove_prefix
|
| 12 |
+
|
| 13 |
+
if TYPE_CHECKING:
|
| 14 |
+
from narwhals._arrow.dataframe import ArrowDataFrame
|
| 15 |
+
from narwhals._arrow.expr import ArrowExpr
|
| 16 |
+
from narwhals._arrow.typing import IntoArrowExpr
|
| 17 |
+
|
| 18 |
+
POLARS_TO_ARROW_AGGREGATIONS = {
|
| 19 |
+
"len": "count",
|
| 20 |
+
"n_unique": "count_distinct",
|
| 21 |
+
"std": "stddev",
|
| 22 |
+
"var": "variance", # currently unused, we don't have `var` yet
|
| 23 |
+
}
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
def get_function_name_option(function_name: str) -> Any | None:
|
| 27 |
+
"""Map specific pyarrow compute function to respective option to match polars behaviour."""
|
| 28 |
+
import pyarrow.compute as pc # ignore-banned-import
|
| 29 |
+
|
| 30 |
+
function_name_to_options = {
|
| 31 |
+
"count": pc.CountOptions(mode="all"),
|
| 32 |
+
"count_distinct": pc.CountOptions(mode="all"),
|
| 33 |
+
"stddev": pc.VarianceOptions(ddof=1),
|
| 34 |
+
"variance": pc.VarianceOptions(ddof=1),
|
| 35 |
+
}
|
| 36 |
+
return function_name_to_options.get(function_name)
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
class ArrowGroupBy:
|
| 40 |
+
def __init__(self, df: ArrowDataFrame, keys: list[str]) -> None:
|
| 41 |
+
import pyarrow as pa # ignore-banned-import()
|
| 42 |
+
|
| 43 |
+
self._df = df
|
| 44 |
+
self._keys = list(keys)
|
| 45 |
+
self._grouped = pa.TableGroupBy(self._df._native_frame, list(self._keys))
|
| 46 |
+
|
| 47 |
+
def agg(
|
| 48 |
+
self,
|
| 49 |
+
*aggs: IntoArrowExpr,
|
| 50 |
+
**named_aggs: IntoArrowExpr,
|
| 51 |
+
) -> ArrowDataFrame:
|
| 52 |
+
exprs = parse_into_exprs(
|
| 53 |
+
*aggs,
|
| 54 |
+
namespace=self._df.__narwhals_namespace__(),
|
| 55 |
+
**named_aggs,
|
| 56 |
+
)
|
| 57 |
+
output_names: list[str] = copy(self._keys)
|
| 58 |
+
for expr in exprs:
|
| 59 |
+
if expr._output_names is None:
|
| 60 |
+
msg = (
|
| 61 |
+
"Anonymous expressions are not supported in group_by.agg.\n"
|
| 62 |
+
"Instead of `nw.all()`, try using a named expression, such as "
|
| 63 |
+
"`nw.col('a', 'b')`\n"
|
| 64 |
+
)
|
| 65 |
+
raise ValueError(msg)
|
| 66 |
+
output_names.extend(expr._output_names)
|
| 67 |
+
|
| 68 |
+
return agg_arrow(
|
| 69 |
+
self._grouped,
|
| 70 |
+
exprs,
|
| 71 |
+
self._keys,
|
| 72 |
+
output_names,
|
| 73 |
+
self._df._from_native_frame,
|
| 74 |
+
)
|
| 75 |
+
|
| 76 |
+
def __iter__(self) -> Iterator[tuple[Any, ArrowDataFrame]]:
|
| 77 |
+
key_values = (
|
| 78 |
+
self._df.select(*self._keys)
|
| 79 |
+
.unique(subset=self._keys, keep="first")
|
| 80 |
+
.iter_rows()
|
| 81 |
+
)
|
| 82 |
+
nw_namespace = self._df.__narwhals_namespace__()
|
| 83 |
+
yield from (
|
| 84 |
+
(
|
| 85 |
+
key_value,
|
| 86 |
+
self._df.filter(
|
| 87 |
+
*[nw_namespace.col(k) == v for k, v in zip(self._keys, key_value)]
|
| 88 |
+
),
|
| 89 |
+
)
|
| 90 |
+
for key_value in key_values
|
| 91 |
+
)
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
def agg_arrow(
|
| 95 |
+
grouped: Any,
|
| 96 |
+
exprs: list[ArrowExpr],
|
| 97 |
+
keys: list[str],
|
| 98 |
+
output_names: list[str],
|
| 99 |
+
from_dataframe: Callable[[Any], ArrowDataFrame],
|
| 100 |
+
) -> ArrowDataFrame:
|
| 101 |
+
import pyarrow.compute as pc # ignore-banned-import()
|
| 102 |
+
|
| 103 |
+
all_simple_aggs = True
|
| 104 |
+
for expr in exprs:
|
| 105 |
+
if not is_simple_aggregation(expr):
|
| 106 |
+
all_simple_aggs = False
|
| 107 |
+
break
|
| 108 |
+
|
| 109 |
+
if all_simple_aggs:
|
| 110 |
+
# Mapping from output name to
|
| 111 |
+
# (aggregation_args, pyarrow_output_name) # noqa: ERA001
|
| 112 |
+
simple_aggregations: dict[str, tuple[tuple[Any, ...], str]] = {}
|
| 113 |
+
for expr in exprs:
|
| 114 |
+
if expr._depth == 0:
|
| 115 |
+
# e.g. agg(nw.len()) # noqa: ERA001
|
| 116 |
+
if (
|
| 117 |
+
expr._output_names is None or expr._function_name != "len"
|
| 118 |
+
): # pragma: no cover
|
| 119 |
+
msg = "Safety assertion failed, please report a bug to https://github.com/narwhals-dev/narwhals/issues"
|
| 120 |
+
raise AssertionError(msg)
|
| 121 |
+
simple_aggregations[expr._output_names[0]] = (
|
| 122 |
+
(keys[0], "count", pc.CountOptions(mode="all")),
|
| 123 |
+
f"{keys[0]}_count",
|
| 124 |
+
)
|
| 125 |
+
continue
|
| 126 |
+
|
| 127 |
+
# e.g. agg(nw.mean('a')) # noqa: ERA001
|
| 128 |
+
if (
|
| 129 |
+
expr._depth != 1 or expr._root_names is None or expr._output_names is None
|
| 130 |
+
): # pragma: no cover
|
| 131 |
+
msg = "Safety assertion failed, please report a bug to https://github.com/narwhals-dev/narwhals/issues"
|
| 132 |
+
raise AssertionError(msg)
|
| 133 |
+
|
| 134 |
+
function_name = remove_prefix(expr._function_name, "col->")
|
| 135 |
+
function_name = POLARS_TO_ARROW_AGGREGATIONS.get(function_name, function_name)
|
| 136 |
+
|
| 137 |
+
option = get_function_name_option(function_name)
|
| 138 |
+
for root_name, output_name in zip(expr._root_names, expr._output_names):
|
| 139 |
+
simple_aggregations[output_name] = (
|
| 140 |
+
(root_name, function_name, option),
|
| 141 |
+
f"{root_name}_{function_name}",
|
| 142 |
+
)
|
| 143 |
+
|
| 144 |
+
aggs: list[Any] = []
|
| 145 |
+
name_mapping = {}
|
| 146 |
+
for output_name, (
|
| 147 |
+
aggregation_args,
|
| 148 |
+
pyarrow_output_name,
|
| 149 |
+
) in simple_aggregations.items():
|
| 150 |
+
aggs.append(aggregation_args)
|
| 151 |
+
name_mapping[pyarrow_output_name] = output_name
|
| 152 |
+
result_simple = grouped.aggregate(aggs)
|
| 153 |
+
result_simple = result_simple.rename_columns(
|
| 154 |
+
[name_mapping.get(col, col) for col in result_simple.column_names]
|
| 155 |
+
).select(output_names)
|
| 156 |
+
return from_dataframe(result_simple)
|
| 157 |
+
|
| 158 |
+
msg = (
|
| 159 |
+
"Non-trivial complex found.\n\n"
|
| 160 |
+
"Hint: you were probably trying to apply a non-elementary aggregation with a "
|
| 161 |
+
"pyarrow table.\n"
|
| 162 |
+
"Please rewrite your query such that group-by aggregations "
|
| 163 |
+
"are elementary. For example, instead of:\n\n"
|
| 164 |
+
" df.group_by('a').agg(nw.col('b').round(2).mean())\n\n"
|
| 165 |
+
"use:\n\n"
|
| 166 |
+
" df.with_columns(nw.col('b').round(2)).group_by('a').agg(nw.col('b').mean())\n\n"
|
| 167 |
+
)
|
| 168 |
+
raise ValueError(msg)
|
parrot/lib/python3.10/site-packages/narwhals/_arrow/selectors.py
ADDED
|
@@ -0,0 +1,163 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
from typing import TYPE_CHECKING
|
| 4 |
+
from typing import Any
|
| 5 |
+
from typing import NoReturn
|
| 6 |
+
|
| 7 |
+
from narwhals import dtypes
|
| 8 |
+
from narwhals._arrow.expr import ArrowExpr
|
| 9 |
+
from narwhals.utils import Implementation
|
| 10 |
+
|
| 11 |
+
if TYPE_CHECKING:
|
| 12 |
+
from typing_extensions import Self
|
| 13 |
+
|
| 14 |
+
from narwhals._arrow.dataframe import ArrowDataFrame
|
| 15 |
+
from narwhals._arrow.series import ArrowSeries
|
| 16 |
+
from narwhals.dtypes import DType
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
class ArrowSelectorNamespace:
|
| 20 |
+
def __init__(self: Self, *, backend_version: tuple[int, ...]) -> None:
|
| 21 |
+
self._backend_version = backend_version
|
| 22 |
+
self._implementation = Implementation.PYARROW
|
| 23 |
+
|
| 24 |
+
def by_dtype(self: Self, dtypes: list[DType | type[DType]]) -> ArrowSelector:
|
| 25 |
+
def func(df: ArrowDataFrame) -> list[ArrowSeries]:
|
| 26 |
+
return [df[col] for col in df.columns if df.schema[col] in dtypes]
|
| 27 |
+
|
| 28 |
+
return ArrowSelector(
|
| 29 |
+
func,
|
| 30 |
+
depth=0,
|
| 31 |
+
function_name="type_selector",
|
| 32 |
+
root_names=None,
|
| 33 |
+
output_names=None,
|
| 34 |
+
backend_version=self._backend_version,
|
| 35 |
+
)
|
| 36 |
+
|
| 37 |
+
def numeric(self: Self) -> ArrowSelector:
|
| 38 |
+
return self.by_dtype(
|
| 39 |
+
[
|
| 40 |
+
dtypes.Int64,
|
| 41 |
+
dtypes.Int32,
|
| 42 |
+
dtypes.Int16,
|
| 43 |
+
dtypes.Int8,
|
| 44 |
+
dtypes.UInt64,
|
| 45 |
+
dtypes.UInt32,
|
| 46 |
+
dtypes.UInt16,
|
| 47 |
+
dtypes.UInt8,
|
| 48 |
+
dtypes.Float64,
|
| 49 |
+
dtypes.Float32,
|
| 50 |
+
],
|
| 51 |
+
)
|
| 52 |
+
|
| 53 |
+
def categorical(self: Self) -> ArrowSelector:
|
| 54 |
+
return self.by_dtype([dtypes.Categorical])
|
| 55 |
+
|
| 56 |
+
def string(self: Self) -> ArrowSelector:
|
| 57 |
+
return self.by_dtype([dtypes.String])
|
| 58 |
+
|
| 59 |
+
def boolean(self: Self) -> ArrowSelector:
|
| 60 |
+
return self.by_dtype([dtypes.Boolean])
|
| 61 |
+
|
| 62 |
+
def all(self: Self) -> ArrowSelector:
|
| 63 |
+
def func(df: ArrowDataFrame) -> list[ArrowSeries]:
|
| 64 |
+
return [df[col] for col in df.columns]
|
| 65 |
+
|
| 66 |
+
return ArrowSelector(
|
| 67 |
+
func,
|
| 68 |
+
depth=0,
|
| 69 |
+
function_name="type_selector",
|
| 70 |
+
root_names=None,
|
| 71 |
+
output_names=None,
|
| 72 |
+
backend_version=self._backend_version,
|
| 73 |
+
)
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
class ArrowSelector(ArrowExpr):
|
| 77 |
+
def __repr__(self: Self) -> str: # pragma: no cover
|
| 78 |
+
return (
|
| 79 |
+
f"ArrowSelector("
|
| 80 |
+
f"depth={self._depth}, "
|
| 81 |
+
f"function_name={self._function_name}, "
|
| 82 |
+
f"root_names={self._root_names}, "
|
| 83 |
+
f"output_names={self._output_names}"
|
| 84 |
+
)
|
| 85 |
+
|
| 86 |
+
def _to_expr(self: Self) -> ArrowExpr:
|
| 87 |
+
return ArrowExpr(
|
| 88 |
+
self._call,
|
| 89 |
+
depth=self._depth,
|
| 90 |
+
function_name=self._function_name,
|
| 91 |
+
root_names=self._root_names,
|
| 92 |
+
output_names=self._output_names,
|
| 93 |
+
backend_version=self._backend_version,
|
| 94 |
+
)
|
| 95 |
+
|
| 96 |
+
def __sub__(self: Self, other: Self | Any) -> ArrowSelector | Any:
|
| 97 |
+
if isinstance(other, ArrowSelector):
|
| 98 |
+
|
| 99 |
+
def call(df: ArrowDataFrame) -> list[ArrowSeries]:
|
| 100 |
+
lhs = self._call(df)
|
| 101 |
+
rhs = other._call(df)
|
| 102 |
+
return [x for x in lhs if x.name not in {x.name for x in rhs}]
|
| 103 |
+
|
| 104 |
+
return ArrowSelector(
|
| 105 |
+
call,
|
| 106 |
+
depth=0,
|
| 107 |
+
function_name="type_selector",
|
| 108 |
+
root_names=None,
|
| 109 |
+
output_names=None,
|
| 110 |
+
backend_version=self._backend_version,
|
| 111 |
+
)
|
| 112 |
+
else:
|
| 113 |
+
return self._to_expr() - other
|
| 114 |
+
|
| 115 |
+
def __or__(self: Self, other: Self | Any) -> ArrowSelector | Any:
|
| 116 |
+
if isinstance(other, ArrowSelector):
|
| 117 |
+
|
| 118 |
+
def call(df: ArrowDataFrame) -> list[ArrowSeries]:
|
| 119 |
+
lhs = self._call(df)
|
| 120 |
+
rhs = other._call(df)
|
| 121 |
+
return [x for x in lhs if x.name not in {x.name for x in rhs}] + rhs
|
| 122 |
+
|
| 123 |
+
return ArrowSelector(
|
| 124 |
+
call,
|
| 125 |
+
depth=0,
|
| 126 |
+
function_name="type_selector",
|
| 127 |
+
root_names=None,
|
| 128 |
+
output_names=None,
|
| 129 |
+
backend_version=self._backend_version,
|
| 130 |
+
)
|
| 131 |
+
else:
|
| 132 |
+
return self._to_expr() | other
|
| 133 |
+
|
| 134 |
+
def __and__(self: Self, other: Self | Any) -> ArrowSelector | Any:
|
| 135 |
+
if isinstance(other, ArrowSelector):
|
| 136 |
+
|
| 137 |
+
def call(df: ArrowDataFrame) -> list[ArrowSeries]:
|
| 138 |
+
lhs = self._call(df)
|
| 139 |
+
rhs = other._call(df)
|
| 140 |
+
return [x for x in lhs if x.name in {x.name for x in rhs}]
|
| 141 |
+
|
| 142 |
+
return ArrowSelector(
|
| 143 |
+
call,
|
| 144 |
+
depth=0,
|
| 145 |
+
function_name="type_selector",
|
| 146 |
+
root_names=None,
|
| 147 |
+
output_names=None,
|
| 148 |
+
backend_version=self._backend_version,
|
| 149 |
+
)
|
| 150 |
+
else:
|
| 151 |
+
return self._to_expr() & other
|
| 152 |
+
|
| 153 |
+
def __invert__(self: Self) -> ArrowSelector:
|
| 154 |
+
return ArrowSelectorNamespace(backend_version=self._backend_version).all() - self
|
| 155 |
+
|
| 156 |
+
def __rsub__(self: Self, other: Any) -> NoReturn:
|
| 157 |
+
raise NotImplementedError
|
| 158 |
+
|
| 159 |
+
def __rand__(self: Self, other: Any) -> NoReturn:
|
| 160 |
+
raise NotImplementedError
|
| 161 |
+
|
| 162 |
+
def __ror__(self: Self, other: Any) -> NoReturn:
|
| 163 |
+
raise NotImplementedError
|
parrot/lib/python3.10/site-packages/narwhals/_arrow/series.py
ADDED
|
@@ -0,0 +1,1038 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
from typing import TYPE_CHECKING
|
| 4 |
+
from typing import Any
|
| 5 |
+
from typing import Iterable
|
| 6 |
+
from typing import Iterator
|
| 7 |
+
from typing import Literal
|
| 8 |
+
from typing import Sequence
|
| 9 |
+
from typing import overload
|
| 10 |
+
|
| 11 |
+
from narwhals._arrow.utils import cast_for_truediv
|
| 12 |
+
from narwhals._arrow.utils import floordiv_compat
|
| 13 |
+
from narwhals._arrow.utils import narwhals_to_native_dtype
|
| 14 |
+
from narwhals._arrow.utils import translate_dtype
|
| 15 |
+
from narwhals._arrow.utils import validate_column_comparand
|
| 16 |
+
from narwhals.dependencies import get_pandas
|
| 17 |
+
from narwhals.dependencies import get_pyarrow
|
| 18 |
+
from narwhals.utils import Implementation
|
| 19 |
+
from narwhals.utils import generate_unique_token
|
| 20 |
+
|
| 21 |
+
if TYPE_CHECKING:
|
| 22 |
+
import pyarrow as pa
|
| 23 |
+
from typing_extensions import Self
|
| 24 |
+
|
| 25 |
+
from narwhals._arrow.dataframe import ArrowDataFrame
|
| 26 |
+
from narwhals._arrow.namespace import ArrowNamespace
|
| 27 |
+
from narwhals.dtypes import DType
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
class ArrowSeries:
|
| 31 |
+
def __init__(
|
| 32 |
+
self,
|
| 33 |
+
native_series: pa.ChunkedArray,
|
| 34 |
+
*,
|
| 35 |
+
name: str,
|
| 36 |
+
backend_version: tuple[int, ...],
|
| 37 |
+
) -> None:
|
| 38 |
+
self._name = name
|
| 39 |
+
self._native_series = native_series
|
| 40 |
+
self._implementation = Implementation.PYARROW
|
| 41 |
+
self._backend_version = backend_version
|
| 42 |
+
|
| 43 |
+
def _from_native_series(self, series: Any) -> Self:
|
| 44 |
+
import pyarrow as pa # ignore-banned-import()
|
| 45 |
+
|
| 46 |
+
if isinstance(series, pa.Array):
|
| 47 |
+
series = pa.chunked_array([series])
|
| 48 |
+
return self.__class__(
|
| 49 |
+
series,
|
| 50 |
+
name=self._name,
|
| 51 |
+
backend_version=self._backend_version,
|
| 52 |
+
)
|
| 53 |
+
|
| 54 |
+
@classmethod
|
| 55 |
+
def _from_iterable(
|
| 56 |
+
cls: type[Self],
|
| 57 |
+
data: Iterable[Any],
|
| 58 |
+
name: str,
|
| 59 |
+
*,
|
| 60 |
+
backend_version: tuple[int, ...],
|
| 61 |
+
) -> Self:
|
| 62 |
+
import pyarrow as pa # ignore-banned-import()
|
| 63 |
+
|
| 64 |
+
return cls(
|
| 65 |
+
pa.chunked_array([data]),
|
| 66 |
+
name=name,
|
| 67 |
+
backend_version=backend_version,
|
| 68 |
+
)
|
| 69 |
+
|
| 70 |
+
def __narwhals_namespace__(self) -> ArrowNamespace:
|
| 71 |
+
from narwhals._arrow.namespace import ArrowNamespace
|
| 72 |
+
|
| 73 |
+
return ArrowNamespace(backend_version=self._backend_version)
|
| 74 |
+
|
| 75 |
+
def __len__(self) -> int:
|
| 76 |
+
return len(self._native_series)
|
| 77 |
+
|
| 78 |
+
def __eq__(self, other: object) -> Self: # type: ignore[override]
|
| 79 |
+
import pyarrow.compute as pc
|
| 80 |
+
|
| 81 |
+
ser = self._native_series
|
| 82 |
+
other = validate_column_comparand(other)
|
| 83 |
+
return self._from_native_series(pc.equal(ser, other))
|
| 84 |
+
|
| 85 |
+
def __ne__(self, other: object) -> Self: # type: ignore[override]
|
| 86 |
+
import pyarrow.compute as pc # ignore-banned-import()
|
| 87 |
+
|
| 88 |
+
ser = self._native_series
|
| 89 |
+
other = validate_column_comparand(other)
|
| 90 |
+
return self._from_native_series(pc.not_equal(ser, other))
|
| 91 |
+
|
| 92 |
+
def __ge__(self, other: Any) -> Self:
|
| 93 |
+
import pyarrow.compute as pc # ignore-banned-import()
|
| 94 |
+
|
| 95 |
+
ser = self._native_series
|
| 96 |
+
other = validate_column_comparand(other)
|
| 97 |
+
return self._from_native_series(pc.greater_equal(ser, other))
|
| 98 |
+
|
| 99 |
+
def __gt__(self, other: Any) -> Self:
|
| 100 |
+
import pyarrow.compute as pc # ignore-banned-import()
|
| 101 |
+
|
| 102 |
+
ser = self._native_series
|
| 103 |
+
other = validate_column_comparand(other)
|
| 104 |
+
return self._from_native_series(pc.greater(ser, other))
|
| 105 |
+
|
| 106 |
+
def __le__(self, other: Any) -> Self:
|
| 107 |
+
import pyarrow.compute as pc # ignore-banned-import()
|
| 108 |
+
|
| 109 |
+
ser = self._native_series
|
| 110 |
+
other = validate_column_comparand(other)
|
| 111 |
+
return self._from_native_series(pc.less_equal(ser, other))
|
| 112 |
+
|
| 113 |
+
def __lt__(self, other: Any) -> Self:
|
| 114 |
+
import pyarrow.compute as pc # ignore-banned-import()
|
| 115 |
+
|
| 116 |
+
ser = self._native_series
|
| 117 |
+
other = validate_column_comparand(other)
|
| 118 |
+
return self._from_native_series(pc.less(ser, other))
|
| 119 |
+
|
| 120 |
+
def __and__(self, other: Any) -> Self:
|
| 121 |
+
import pyarrow.compute as pc # ignore-banned-import()
|
| 122 |
+
|
| 123 |
+
ser = self._native_series
|
| 124 |
+
other = validate_column_comparand(other)
|
| 125 |
+
return self._from_native_series(pc.and_kleene(ser, other))
|
| 126 |
+
|
| 127 |
+
def __rand__(self, other: Any) -> Self:
|
| 128 |
+
import pyarrow.compute as pc # ignore-banned-import()
|
| 129 |
+
|
| 130 |
+
ser = self._native_series
|
| 131 |
+
other = validate_column_comparand(other)
|
| 132 |
+
return self._from_native_series(pc.and_kleene(other, ser))
|
| 133 |
+
|
| 134 |
+
def __or__(self, other: Any) -> Self:
|
| 135 |
+
import pyarrow.compute as pc # ignore-banned-import()
|
| 136 |
+
|
| 137 |
+
ser = self._native_series
|
| 138 |
+
other = validate_column_comparand(other)
|
| 139 |
+
return self._from_native_series(pc.or_kleene(ser, other))
|
| 140 |
+
|
| 141 |
+
def __ror__(self, other: Any) -> Self:
|
| 142 |
+
import pyarrow.compute as pc # ignore-banned-import()
|
| 143 |
+
|
| 144 |
+
ser = self._native_series
|
| 145 |
+
other = validate_column_comparand(other)
|
| 146 |
+
return self._from_native_series(pc.or_kleene(other, ser))
|
| 147 |
+
|
| 148 |
+
def __add__(self, other: Any) -> Self:
|
| 149 |
+
import pyarrow.compute as pc # ignore-banned-import()
|
| 150 |
+
|
| 151 |
+
other = validate_column_comparand(other)
|
| 152 |
+
return self._from_native_series(pc.add(self._native_series, other))
|
| 153 |
+
|
| 154 |
+
def __radd__(self, other: Any) -> Self:
|
| 155 |
+
return self + other # type: ignore[no-any-return]
|
| 156 |
+
|
| 157 |
+
def __sub__(self, other: Any) -> Self:
|
| 158 |
+
import pyarrow.compute as pc # ignore-banned-import()
|
| 159 |
+
|
| 160 |
+
other = validate_column_comparand(other)
|
| 161 |
+
return self._from_native_series(pc.subtract(self._native_series, other))
|
| 162 |
+
|
| 163 |
+
def __rsub__(self, other: Any) -> Self:
|
| 164 |
+
return (self - other) * (-1) # type: ignore[no-any-return]
|
| 165 |
+
|
| 166 |
+
def __mul__(self, other: Any) -> Self:
|
| 167 |
+
import pyarrow.compute as pc # ignore-banned-import()
|
| 168 |
+
|
| 169 |
+
other = validate_column_comparand(other)
|
| 170 |
+
return self._from_native_series(pc.multiply(self._native_series, other))
|
| 171 |
+
|
| 172 |
+
def __rmul__(self, other: Any) -> Self:
|
| 173 |
+
return self * other # type: ignore[no-any-return]
|
| 174 |
+
|
| 175 |
+
def __pow__(self, other: Any) -> Self:
|
| 176 |
+
import pyarrow.compute as pc # ignore-banned-import()
|
| 177 |
+
|
| 178 |
+
ser = self._native_series
|
| 179 |
+
other = validate_column_comparand(other)
|
| 180 |
+
return self._from_native_series(pc.power(ser, other))
|
| 181 |
+
|
| 182 |
+
def __rpow__(self, other: Any) -> Self:
|
| 183 |
+
import pyarrow.compute as pc # ignore-banned-import()
|
| 184 |
+
|
| 185 |
+
ser = self._native_series
|
| 186 |
+
other = validate_column_comparand(other)
|
| 187 |
+
return self._from_native_series(pc.power(other, ser))
|
| 188 |
+
|
| 189 |
+
def __floordiv__(self, other: Any) -> Self:
|
| 190 |
+
ser = self._native_series
|
| 191 |
+
other = validate_column_comparand(other)
|
| 192 |
+
return self._from_native_series(floordiv_compat(ser, other))
|
| 193 |
+
|
| 194 |
+
def __rfloordiv__(self, other: Any) -> Self:
|
| 195 |
+
ser = self._native_series
|
| 196 |
+
other = validate_column_comparand(other)
|
| 197 |
+
return self._from_native_series(floordiv_compat(other, ser))
|
| 198 |
+
|
| 199 |
+
def __truediv__(self, other: Any) -> Self:
|
| 200 |
+
import pyarrow as pa # ignore-banned-import()
|
| 201 |
+
import pyarrow.compute as pc # ignore-banned-import()
|
| 202 |
+
|
| 203 |
+
ser = self._native_series
|
| 204 |
+
other = validate_column_comparand(other)
|
| 205 |
+
if not isinstance(other, (pa.Array, pa.ChunkedArray)):
|
| 206 |
+
# scalar
|
| 207 |
+
other = pa.scalar(other)
|
| 208 |
+
return self._from_native_series(pc.divide(*cast_for_truediv(ser, other)))
|
| 209 |
+
|
| 210 |
+
def __rtruediv__(self, other: Any) -> Self:
|
| 211 |
+
import pyarrow as pa # ignore-banned-import()
|
| 212 |
+
import pyarrow.compute as pc # ignore-banned-import()
|
| 213 |
+
|
| 214 |
+
ser = self._native_series
|
| 215 |
+
other = validate_column_comparand(other)
|
| 216 |
+
if not isinstance(other, (pa.Array, pa.ChunkedArray)):
|
| 217 |
+
# scalar
|
| 218 |
+
other = pa.scalar(other)
|
| 219 |
+
return self._from_native_series(pc.divide(*cast_for_truediv(other, ser)))
|
| 220 |
+
|
| 221 |
+
def __mod__(self, other: Any) -> Self:
|
| 222 |
+
import pyarrow.compute as pc # ignore-banned-import()
|
| 223 |
+
|
| 224 |
+
ser = self._native_series
|
| 225 |
+
other = validate_column_comparand(other)
|
| 226 |
+
floor_div = (self // other)._native_series
|
| 227 |
+
res = pc.subtract(ser, pc.multiply(floor_div, other))
|
| 228 |
+
return self._from_native_series(res)
|
| 229 |
+
|
| 230 |
+
def __rmod__(self, other: Any) -> Self:
|
| 231 |
+
import pyarrow.compute as pc # ignore-banned-import()
|
| 232 |
+
|
| 233 |
+
ser = self._native_series
|
| 234 |
+
other = validate_column_comparand(other)
|
| 235 |
+
floor_div = (other // self)._native_series
|
| 236 |
+
res = pc.subtract(other, pc.multiply(floor_div, ser))
|
| 237 |
+
return self._from_native_series(res)
|
| 238 |
+
|
| 239 |
+
def __invert__(self) -> Self:
|
| 240 |
+
import pyarrow.compute as pc # ignore-banned-import()
|
| 241 |
+
|
| 242 |
+
return self._from_native_series(pc.invert(self._native_series))
|
| 243 |
+
|
| 244 |
+
def len(self) -> int:
|
| 245 |
+
return len(self._native_series)
|
| 246 |
+
|
| 247 |
+
def filter(self, other: Any) -> Self:
|
| 248 |
+
if not (isinstance(other, list) and all(isinstance(x, bool) for x in other)):
|
| 249 |
+
other = validate_column_comparand(other)
|
| 250 |
+
return self._from_native_series(self._native_series.filter(other))
|
| 251 |
+
|
| 252 |
+
def mean(self) -> int:
|
| 253 |
+
import pyarrow.compute as pc # ignore-banned-import()
|
| 254 |
+
|
| 255 |
+
return pc.mean(self._native_series) # type: ignore[no-any-return]
|
| 256 |
+
|
| 257 |
+
def min(self) -> int:
|
| 258 |
+
import pyarrow.compute as pc # ignore-banned-import()
|
| 259 |
+
|
| 260 |
+
return pc.min(self._native_series) # type: ignore[no-any-return]
|
| 261 |
+
|
| 262 |
+
def max(self) -> int:
|
| 263 |
+
import pyarrow.compute as pc # ignore-banned-import()
|
| 264 |
+
|
| 265 |
+
return pc.max(self._native_series) # type: ignore[no-any-return]
|
| 266 |
+
|
| 267 |
+
def sum(self) -> int:
|
| 268 |
+
import pyarrow.compute as pc # ignore-banned-import()
|
| 269 |
+
|
| 270 |
+
return pc.sum(self._native_series) # type: ignore[no-any-return]
|
| 271 |
+
|
| 272 |
+
def drop_nulls(self) -> ArrowSeries:
|
| 273 |
+
import pyarrow.compute as pc # ignore-banned-import()
|
| 274 |
+
|
| 275 |
+
return self._from_native_series(pc.drop_null(self._native_series))
|
| 276 |
+
|
| 277 |
+
def shift(self, n: int) -> Self:
|
| 278 |
+
import pyarrow as pa # ignore-banned-import()
|
| 279 |
+
|
| 280 |
+
ca = self._native_series
|
| 281 |
+
|
| 282 |
+
if n > 0:
|
| 283 |
+
result = pa.concat_arrays([pa.nulls(n, ca.type), *ca[:-n].chunks])
|
| 284 |
+
elif n < 0:
|
| 285 |
+
result = pa.concat_arrays([*ca[-n:].chunks, pa.nulls(-n, ca.type)])
|
| 286 |
+
else:
|
| 287 |
+
result = ca
|
| 288 |
+
return self._from_native_series(result)
|
| 289 |
+
|
| 290 |
+
def std(self, ddof: int = 1) -> int:
|
| 291 |
+
import pyarrow.compute as pc # ignore-banned-import()
|
| 292 |
+
|
| 293 |
+
return pc.stddev(self._native_series, ddof=ddof) # type: ignore[no-any-return]
|
| 294 |
+
|
| 295 |
+
def count(self) -> int:
|
| 296 |
+
import pyarrow.compute as pc # ignore-banned-import()
|
| 297 |
+
|
| 298 |
+
return pc.count(self._native_series) # type: ignore[no-any-return]
|
| 299 |
+
|
| 300 |
+
def n_unique(self) -> int:
|
| 301 |
+
import pyarrow.compute as pc # ignore-banned-import()
|
| 302 |
+
|
| 303 |
+
unique_values = pc.unique(self._native_series)
|
| 304 |
+
return pc.count(unique_values, mode="all") # type: ignore[no-any-return]
|
| 305 |
+
|
| 306 |
+
def __native_namespace__(self) -> Any: # pragma: no cover
|
| 307 |
+
return get_pyarrow()
|
| 308 |
+
|
| 309 |
+
@property
|
| 310 |
+
def name(self) -> str:
|
| 311 |
+
return self._name
|
| 312 |
+
|
| 313 |
+
def __narwhals_series__(self) -> Self:
|
| 314 |
+
return self
|
| 315 |
+
|
| 316 |
+
@overload
|
| 317 |
+
def __getitem__(self, idx: int) -> Any: ...
|
| 318 |
+
|
| 319 |
+
@overload
|
| 320 |
+
def __getitem__(self, idx: slice | Sequence[int]) -> Self: ...
|
| 321 |
+
|
| 322 |
+
def __getitem__(self, idx: int | slice | Sequence[int]) -> Any | Self:
|
| 323 |
+
if isinstance(idx, int):
|
| 324 |
+
return self._native_series[idx]
|
| 325 |
+
if isinstance(idx, Sequence):
|
| 326 |
+
return self._from_native_series(self._native_series.take(idx))
|
| 327 |
+
return self._from_native_series(self._native_series[idx])
|
| 328 |
+
|
| 329 |
+
def scatter(self, indices: int | Sequence[int], values: Any) -> Self:
|
| 330 |
+
import numpy as np # ignore-banned-import
|
| 331 |
+
import pyarrow as pa # ignore-banned-import
|
| 332 |
+
import pyarrow.compute as pc # ignore-banned-import
|
| 333 |
+
|
| 334 |
+
ca = self._native_series
|
| 335 |
+
mask = np.zeros(len(ca), dtype=bool)
|
| 336 |
+
mask[indices] = True
|
| 337 |
+
if isinstance(values, self.__class__):
|
| 338 |
+
values = validate_column_comparand(values)
|
| 339 |
+
if isinstance(values, pa.ChunkedArray):
|
| 340 |
+
values = values.combine_chunks()
|
| 341 |
+
if not isinstance(values, pa.Array):
|
| 342 |
+
values = pa.array(values)
|
| 343 |
+
result = pc.replace_with_mask(ca, mask, values.take(indices))
|
| 344 |
+
return self._from_native_series(result)
|
| 345 |
+
|
| 346 |
+
def to_list(self) -> Any:
|
| 347 |
+
return self._native_series.to_pylist()
|
| 348 |
+
|
| 349 |
+
def __array__(self, dtype: Any = None, copy: bool | None = None) -> Any:
|
| 350 |
+
return self._native_series.__array__(dtype=dtype, copy=copy)
|
| 351 |
+
|
| 352 |
+
def to_numpy(self) -> Any:
|
| 353 |
+
return self._native_series.to_numpy()
|
| 354 |
+
|
| 355 |
+
def alias(self, name: str) -> Self:
|
| 356 |
+
return self.__class__(
|
| 357 |
+
self._native_series,
|
| 358 |
+
name=name,
|
| 359 |
+
backend_version=self._backend_version,
|
| 360 |
+
)
|
| 361 |
+
|
| 362 |
+
@property
|
| 363 |
+
def dtype(self: Self) -> DType:
|
| 364 |
+
return translate_dtype(self._native_series.type)
|
| 365 |
+
|
| 366 |
+
def abs(self) -> Self:
|
| 367 |
+
import pyarrow.compute as pc # ignore-banned-import()
|
| 368 |
+
|
| 369 |
+
return self._from_native_series(pc.abs(self._native_series))
|
| 370 |
+
|
| 371 |
+
def cum_sum(self) -> Self:
|
| 372 |
+
import pyarrow.compute as pc # ignore-banned-import()
|
| 373 |
+
|
| 374 |
+
return self._from_native_series(pc.cumulative_sum(self._native_series))
|
| 375 |
+
|
| 376 |
+
def round(self, decimals: int) -> Self:
|
| 377 |
+
import pyarrow.compute as pc # ignore-banned-import()
|
| 378 |
+
|
| 379 |
+
return self._from_native_series(
|
| 380 |
+
pc.round(self._native_series, decimals, round_mode="half_towards_infinity")
|
| 381 |
+
)
|
| 382 |
+
|
| 383 |
+
def diff(self) -> Self:
|
| 384 |
+
import pyarrow.compute as pc # ignore-banned-import()
|
| 385 |
+
|
| 386 |
+
return self._from_native_series(
|
| 387 |
+
pc.pairwise_diff(self._native_series.combine_chunks())
|
| 388 |
+
)
|
| 389 |
+
|
| 390 |
+
def any(self) -> bool:
|
| 391 |
+
import pyarrow.compute as pc # ignore-banned-import()
|
| 392 |
+
|
| 393 |
+
return pc.any(self._native_series) # type: ignore[no-any-return]
|
| 394 |
+
|
| 395 |
+
def all(self) -> bool:
|
| 396 |
+
import pyarrow.compute as pc # ignore-banned-import()
|
| 397 |
+
|
| 398 |
+
return pc.all(self._native_series) # type: ignore[no-any-return]
|
| 399 |
+
|
| 400 |
+
def is_between(
|
| 401 |
+
self, lower_bound: Any, upper_bound: Any, closed: str = "both"
|
| 402 |
+
) -> Self:
|
| 403 |
+
import pyarrow.compute as pc # ignore-banned-import()
|
| 404 |
+
|
| 405 |
+
ser = self._native_series
|
| 406 |
+
if closed == "left":
|
| 407 |
+
ge = pc.greater_equal(ser, lower_bound)
|
| 408 |
+
lt = pc.less(ser, upper_bound)
|
| 409 |
+
res = pc.and_kleene(ge, lt)
|
| 410 |
+
elif closed == "right":
|
| 411 |
+
gt = pc.greater(ser, lower_bound)
|
| 412 |
+
le = pc.less_equal(ser, upper_bound)
|
| 413 |
+
res = pc.and_kleene(gt, le)
|
| 414 |
+
elif closed == "none":
|
| 415 |
+
gt = pc.greater(ser, lower_bound)
|
| 416 |
+
lt = pc.less(ser, upper_bound)
|
| 417 |
+
res = pc.and_kleene(gt, lt)
|
| 418 |
+
elif closed == "both":
|
| 419 |
+
ge = pc.greater_equal(ser, lower_bound)
|
| 420 |
+
le = pc.less_equal(ser, upper_bound)
|
| 421 |
+
res = pc.and_kleene(ge, le)
|
| 422 |
+
else: # pragma: no cover
|
| 423 |
+
raise AssertionError
|
| 424 |
+
return self._from_native_series(res)
|
| 425 |
+
|
| 426 |
+
def is_empty(self) -> bool:
|
| 427 |
+
return len(self) == 0
|
| 428 |
+
|
| 429 |
+
def is_null(self) -> Self:
|
| 430 |
+
ser = self._native_series
|
| 431 |
+
return self._from_native_series(ser.is_null())
|
| 432 |
+
|
| 433 |
+
def cast(self, dtype: DType) -> Self:
|
| 434 |
+
import pyarrow.compute as pc # ignore-banned-import()
|
| 435 |
+
|
| 436 |
+
ser = self._native_series
|
| 437 |
+
dtype = narwhals_to_native_dtype(dtype)
|
| 438 |
+
return self._from_native_series(pc.cast(ser, dtype))
|
| 439 |
+
|
| 440 |
+
def null_count(self: Self) -> int:
|
| 441 |
+
return self._native_series.null_count # type: ignore[no-any-return]
|
| 442 |
+
|
| 443 |
+
def head(self, n: int) -> Self:
|
| 444 |
+
ser = self._native_series
|
| 445 |
+
if n >= 0:
|
| 446 |
+
return self._from_native_series(ser.slice(0, n))
|
| 447 |
+
else:
|
| 448 |
+
num_rows = len(ser)
|
| 449 |
+
return self._from_native_series(ser.slice(0, max(0, num_rows + n)))
|
| 450 |
+
|
| 451 |
+
def tail(self, n: int) -> Self:
|
| 452 |
+
ser = self._native_series
|
| 453 |
+
if n >= 0:
|
| 454 |
+
num_rows = len(ser)
|
| 455 |
+
return self._from_native_series(ser.slice(max(0, num_rows - n)))
|
| 456 |
+
else:
|
| 457 |
+
return self._from_native_series(ser.slice(abs(n)))
|
| 458 |
+
|
| 459 |
+
def is_in(self, other: Any) -> Self:
|
| 460 |
+
import pyarrow as pa # ignore-banned-import()
|
| 461 |
+
import pyarrow.compute as pc # ignore-banned-import()
|
| 462 |
+
|
| 463 |
+
value_set = pa.array(other)
|
| 464 |
+
ser = self._native_series
|
| 465 |
+
return self._from_native_series(pc.is_in(ser, value_set=value_set))
|
| 466 |
+
|
| 467 |
+
def arg_true(self) -> Self:
|
| 468 |
+
import numpy as np # ignore-banned-import
|
| 469 |
+
|
| 470 |
+
ser = self._native_series
|
| 471 |
+
res = np.flatnonzero(ser)
|
| 472 |
+
return self._from_iterable(
|
| 473 |
+
res, name=self.name, backend_version=self._backend_version
|
| 474 |
+
)
|
| 475 |
+
|
| 476 |
+
def item(self: Self, index: int | None = None) -> Any:
|
| 477 |
+
if index is None:
|
| 478 |
+
if len(self) != 1:
|
| 479 |
+
msg = (
|
| 480 |
+
"can only call '.item()' if the Series is of length 1,"
|
| 481 |
+
f" or an explicit index is provided (Series is of length {len(self)})"
|
| 482 |
+
)
|
| 483 |
+
raise ValueError(msg)
|
| 484 |
+
return self._native_series[0]
|
| 485 |
+
return self._native_series[index]
|
| 486 |
+
|
| 487 |
+
def value_counts(
|
| 488 |
+
self: Self,
|
| 489 |
+
*,
|
| 490 |
+
sort: bool = False,
|
| 491 |
+
parallel: bool = False,
|
| 492 |
+
name: str | None = None,
|
| 493 |
+
normalize: bool = False,
|
| 494 |
+
) -> ArrowDataFrame:
|
| 495 |
+
"""Parallel is unused, exists for compatibility"""
|
| 496 |
+
import pyarrow as pa # ignore-banned-import()
|
| 497 |
+
import pyarrow.compute as pc # ignore-banned-import()
|
| 498 |
+
|
| 499 |
+
from narwhals._arrow.dataframe import ArrowDataFrame
|
| 500 |
+
|
| 501 |
+
index_name_ = "index" if self._name is None else self._name
|
| 502 |
+
value_name_ = name or ("proportion" if normalize else "count")
|
| 503 |
+
|
| 504 |
+
val_count = pc.value_counts(self._native_series)
|
| 505 |
+
values = val_count.field("values")
|
| 506 |
+
counts = val_count.field("counts")
|
| 507 |
+
|
| 508 |
+
if normalize:
|
| 509 |
+
counts = pc.divide(*cast_for_truediv(counts, pc.sum(counts)))
|
| 510 |
+
|
| 511 |
+
val_count = pa.Table.from_arrays(
|
| 512 |
+
[values, counts], names=[index_name_, value_name_]
|
| 513 |
+
)
|
| 514 |
+
|
| 515 |
+
if sort:
|
| 516 |
+
val_count = val_count.sort_by([(value_name_, "descending")])
|
| 517 |
+
|
| 518 |
+
return ArrowDataFrame(
|
| 519 |
+
val_count,
|
| 520 |
+
backend_version=self._backend_version,
|
| 521 |
+
)
|
| 522 |
+
|
| 523 |
+
def zip_with(self: Self, mask: Self, other: Self) -> Self:
|
| 524 |
+
import pyarrow.compute as pc # ignore-banned-import()
|
| 525 |
+
|
| 526 |
+
mask = mask._native_series.combine_chunks()
|
| 527 |
+
return self._from_native_series(
|
| 528 |
+
pc.if_else(
|
| 529 |
+
mask,
|
| 530 |
+
self._native_series,
|
| 531 |
+
other._native_series,
|
| 532 |
+
)
|
| 533 |
+
)
|
| 534 |
+
|
| 535 |
+
def sample(
|
| 536 |
+
self: Self,
|
| 537 |
+
n: int | None = None,
|
| 538 |
+
*,
|
| 539 |
+
fraction: float | None = None,
|
| 540 |
+
with_replacement: bool = False,
|
| 541 |
+
seed: int | None = None,
|
| 542 |
+
) -> Self:
|
| 543 |
+
import numpy as np # ignore-banned-import
|
| 544 |
+
import pyarrow.compute as pc # ignore-banned-import()
|
| 545 |
+
|
| 546 |
+
ser = self._native_series
|
| 547 |
+
num_rows = len(self)
|
| 548 |
+
|
| 549 |
+
if n is None and fraction is not None:
|
| 550 |
+
n = int(num_rows * fraction)
|
| 551 |
+
|
| 552 |
+
rng = np.random.default_rng(seed=seed)
|
| 553 |
+
idx = np.arange(0, num_rows)
|
| 554 |
+
mask = rng.choice(idx, size=n, replace=with_replacement)
|
| 555 |
+
|
| 556 |
+
return self._from_native_series(pc.take(ser, mask))
|
| 557 |
+
|
| 558 |
+
def fill_null(self: Self, value: Any) -> Self:
|
| 559 |
+
import pyarrow as pa # ignore-banned-import()
|
| 560 |
+
import pyarrow.compute as pc # ignore-banned-import()
|
| 561 |
+
|
| 562 |
+
ser = self._native_series
|
| 563 |
+
dtype = ser.type
|
| 564 |
+
|
| 565 |
+
return self._from_native_series(pc.fill_null(ser, pa.scalar(value, dtype)))
|
| 566 |
+
|
| 567 |
+
def to_frame(self: Self) -> ArrowDataFrame:
|
| 568 |
+
import pyarrow as pa # ignore-banned-import()
|
| 569 |
+
|
| 570 |
+
from narwhals._arrow.dataframe import ArrowDataFrame
|
| 571 |
+
|
| 572 |
+
df = pa.Table.from_arrays([self._native_series], names=[self.name])
|
| 573 |
+
return ArrowDataFrame(df, backend_version=self._backend_version)
|
| 574 |
+
|
| 575 |
+
def to_pandas(self: Self) -> Any:
|
| 576 |
+
pd = get_pandas()
|
| 577 |
+
return pd.Series(self._native_series, name=self.name)
|
| 578 |
+
|
| 579 |
+
def is_duplicated(self: Self) -> ArrowSeries:
|
| 580 |
+
return self.to_frame().is_duplicated().alias(self.name)
|
| 581 |
+
|
| 582 |
+
def is_unique(self: Self) -> ArrowSeries:
|
| 583 |
+
return self.to_frame().is_unique().alias(self.name)
|
| 584 |
+
|
| 585 |
+
def is_first_distinct(self: Self) -> Self:
|
| 586 |
+
import numpy as np # ignore-banned-import
|
| 587 |
+
import pyarrow as pa # ignore-banned-import()
|
| 588 |
+
import pyarrow.compute as pc # ignore-banned-import()
|
| 589 |
+
|
| 590 |
+
row_number = pa.array(np.arange(len(self)))
|
| 591 |
+
col_token = generate_unique_token(n_bytes=8, columns=[self.name])
|
| 592 |
+
first_distinct_index = (
|
| 593 |
+
pa.Table.from_arrays([self._native_series], names=[self.name])
|
| 594 |
+
.append_column(col_token, row_number)
|
| 595 |
+
.group_by(self.name)
|
| 596 |
+
.aggregate([(col_token, "min")])
|
| 597 |
+
.column(f"{col_token}_min")
|
| 598 |
+
)
|
| 599 |
+
|
| 600 |
+
return self._from_native_series(pc.is_in(row_number, first_distinct_index))
|
| 601 |
+
|
| 602 |
+
def is_last_distinct(self: Self) -> Self:
|
| 603 |
+
import numpy as np # ignore-banned-import
|
| 604 |
+
import pyarrow as pa # ignore-banned-import()
|
| 605 |
+
import pyarrow.compute as pc # ignore-banned-import()
|
| 606 |
+
|
| 607 |
+
row_number = pa.array(np.arange(len(self)))
|
| 608 |
+
col_token = generate_unique_token(n_bytes=8, columns=[self.name])
|
| 609 |
+
last_distinct_index = (
|
| 610 |
+
pa.Table.from_arrays([self._native_series], names=[self.name])
|
| 611 |
+
.append_column(col_token, row_number)
|
| 612 |
+
.group_by(self.name)
|
| 613 |
+
.aggregate([(col_token, "max")])
|
| 614 |
+
.column(f"{col_token}_max")
|
| 615 |
+
)
|
| 616 |
+
|
| 617 |
+
return self._from_native_series(pc.is_in(row_number, last_distinct_index))
|
| 618 |
+
|
| 619 |
+
def is_sorted(self: Self, *, descending: bool = False) -> bool:
|
| 620 |
+
if not isinstance(descending, bool):
|
| 621 |
+
msg = f"argument 'descending' should be boolean, found {type(descending)}"
|
| 622 |
+
raise TypeError(msg)
|
| 623 |
+
import pyarrow.compute as pc # ignore-banned-import()
|
| 624 |
+
|
| 625 |
+
ser = self._native_series
|
| 626 |
+
if descending:
|
| 627 |
+
return pc.all(pc.greater_equal(ser[:-1], ser[1:])) # type: ignore[no-any-return]
|
| 628 |
+
else:
|
| 629 |
+
return pc.all(pc.less_equal(ser[:-1], ser[1:])) # type: ignore[no-any-return]
|
| 630 |
+
|
| 631 |
+
def unique(self: Self) -> ArrowSeries:
|
| 632 |
+
import pyarrow.compute as pc # ignore-banned-import()
|
| 633 |
+
|
| 634 |
+
return self._from_native_series(pc.unique(self._native_series))
|
| 635 |
+
|
| 636 |
+
def sort(
|
| 637 |
+
self: Self, *, descending: bool = False, nulls_last: bool = False
|
| 638 |
+
) -> ArrowSeries:
|
| 639 |
+
import pyarrow.compute as pc # ignore-banned-import()
|
| 640 |
+
|
| 641 |
+
series = self._native_series
|
| 642 |
+
order = "descending" if descending else "ascending"
|
| 643 |
+
null_placement = "at_end" if nulls_last else "at_start"
|
| 644 |
+
sorted_indices = pc.array_sort_indices(
|
| 645 |
+
series, order=order, null_placement=null_placement
|
| 646 |
+
)
|
| 647 |
+
|
| 648 |
+
return self._from_native_series(pc.take(series, sorted_indices))
|
| 649 |
+
|
| 650 |
+
def to_dummies(
|
| 651 |
+
self: Self, *, separator: str = "_", drop_first: bool = False
|
| 652 |
+
) -> ArrowDataFrame:
|
| 653 |
+
import numpy as np # ignore-banned-import
|
| 654 |
+
import pyarrow as pa # ignore-banned-import()
|
| 655 |
+
|
| 656 |
+
from narwhals._arrow.dataframe import ArrowDataFrame
|
| 657 |
+
|
| 658 |
+
series = self._native_series
|
| 659 |
+
da = series.dictionary_encode().combine_chunks()
|
| 660 |
+
|
| 661 |
+
columns = np.zeros((len(da.dictionary), len(da)), np.uint8)
|
| 662 |
+
columns[da.indices, np.arange(len(da))] = 1
|
| 663 |
+
names = [f"{self._name}{separator}{v}" for v in da.dictionary]
|
| 664 |
+
|
| 665 |
+
return ArrowDataFrame(
|
| 666 |
+
pa.Table.from_arrays(columns, names=names),
|
| 667 |
+
backend_version=self._backend_version,
|
| 668 |
+
).select(*sorted(names)[int(drop_first) :])
|
| 669 |
+
|
| 670 |
+
def quantile(
|
| 671 |
+
self: Self,
|
| 672 |
+
quantile: float,
|
| 673 |
+
interpolation: Literal["nearest", "higher", "lower", "midpoint", "linear"],
|
| 674 |
+
) -> Any:
|
| 675 |
+
import pyarrow.compute as pc # ignore-banned-import()
|
| 676 |
+
|
| 677 |
+
return pc.quantile(self._native_series, q=quantile, interpolation=interpolation)[
|
| 678 |
+
0
|
| 679 |
+
]
|
| 680 |
+
|
| 681 |
+
def gather_every(self: Self, n: int, offset: int = 0) -> Self:
|
| 682 |
+
return self._from_native_series(self._native_series[offset::n])
|
| 683 |
+
|
| 684 |
+
def clip(
|
| 685 |
+
self: Self, lower_bound: Any | None = None, upper_bound: Any | None = None
|
| 686 |
+
) -> Self:
|
| 687 |
+
import pyarrow as pa # ignore-banned-import()
|
| 688 |
+
import pyarrow.compute as pc # ignore-banned-import()
|
| 689 |
+
|
| 690 |
+
arr = self._native_series
|
| 691 |
+
arr = pc.max_element_wise(arr, pa.scalar(lower_bound, type=arr.type))
|
| 692 |
+
arr = pc.min_element_wise(arr, pa.scalar(upper_bound, type=arr.type))
|
| 693 |
+
|
| 694 |
+
return self._from_native_series(arr)
|
| 695 |
+
|
| 696 |
+
def to_arrow(self: Self) -> pa.Array:
|
| 697 |
+
return self._native_series.combine_chunks()
|
| 698 |
+
|
| 699 |
+
def mode(self: Self) -> ArrowSeries:
|
| 700 |
+
plx = self.__narwhals_namespace__()
|
| 701 |
+
col_token = generate_unique_token(n_bytes=8, columns=[self.name])
|
| 702 |
+
return self.value_counts(name=col_token, normalize=False).filter(
|
| 703 |
+
plx.col(col_token) == plx.col(col_token).max()
|
| 704 |
+
)[self.name]
|
| 705 |
+
|
| 706 |
+
def __iter__(self: Self) -> Iterator[Any]:
|
| 707 |
+
yield from self._native_series.__iter__()
|
| 708 |
+
|
| 709 |
+
@property
|
| 710 |
+
def shape(self) -> tuple[int]:
|
| 711 |
+
return (len(self._native_series),)
|
| 712 |
+
|
| 713 |
+
@property
|
| 714 |
+
def dt(self) -> ArrowSeriesDateTimeNamespace:
|
| 715 |
+
return ArrowSeriesDateTimeNamespace(self)
|
| 716 |
+
|
| 717 |
+
@property
|
| 718 |
+
def cat(self) -> ArrowSeriesCatNamespace:
|
| 719 |
+
return ArrowSeriesCatNamespace(self)
|
| 720 |
+
|
| 721 |
+
@property
|
| 722 |
+
def str(self) -> ArrowSeriesStringNamespace:
|
| 723 |
+
return ArrowSeriesStringNamespace(self)
|
| 724 |
+
|
| 725 |
+
|
| 726 |
+
class ArrowSeriesDateTimeNamespace:
|
| 727 |
+
def __init__(self: Self, series: ArrowSeries) -> None:
|
| 728 |
+
self._arrow_series = series
|
| 729 |
+
|
| 730 |
+
def to_string(self: Self, format: str) -> ArrowSeries: # noqa: A002
|
| 731 |
+
import pyarrow.compute as pc # ignore-banned-import()
|
| 732 |
+
|
| 733 |
+
# PyArrow differs from other libraries in that %S also prints out
|
| 734 |
+
# the fractional part of the second...:'(
|
| 735 |
+
# https://arrow.apache.org/docs/python/generated/pyarrow.compute.strftime.html
|
| 736 |
+
format = format.replace("%S.%f", "%S").replace("%S%.f", "%S")
|
| 737 |
+
return self._arrow_series._from_native_series(
|
| 738 |
+
pc.strftime(self._arrow_series._native_series, format)
|
| 739 |
+
)
|
| 740 |
+
|
| 741 |
+
def date(self: Self) -> ArrowSeries:
|
| 742 |
+
import pyarrow as pa # ignore-banned-import()
|
| 743 |
+
|
| 744 |
+
return self._arrow_series._from_native_series(
|
| 745 |
+
self._arrow_series._native_series.cast(pa.date64())
|
| 746 |
+
)
|
| 747 |
+
|
| 748 |
+
def year(self: Self) -> ArrowSeries:
|
| 749 |
+
import pyarrow.compute as pc # ignore-banned-import()
|
| 750 |
+
|
| 751 |
+
return self._arrow_series._from_native_series(
|
| 752 |
+
pc.year(self._arrow_series._native_series)
|
| 753 |
+
)
|
| 754 |
+
|
| 755 |
+
def month(self: Self) -> ArrowSeries:
|
| 756 |
+
import pyarrow.compute as pc # ignore-banned-import()
|
| 757 |
+
|
| 758 |
+
return self._arrow_series._from_native_series(
|
| 759 |
+
pc.month(self._arrow_series._native_series)
|
| 760 |
+
)
|
| 761 |
+
|
| 762 |
+
def day(self: Self) -> ArrowSeries:
|
| 763 |
+
import pyarrow.compute as pc # ignore-banned-import()
|
| 764 |
+
|
| 765 |
+
return self._arrow_series._from_native_series(
|
| 766 |
+
pc.day(self._arrow_series._native_series)
|
| 767 |
+
)
|
| 768 |
+
|
| 769 |
+
def hour(self: Self) -> ArrowSeries:
|
| 770 |
+
import pyarrow.compute as pc # ignore-banned-import()
|
| 771 |
+
|
| 772 |
+
return self._arrow_series._from_native_series(
|
| 773 |
+
pc.hour(self._arrow_series._native_series)
|
| 774 |
+
)
|
| 775 |
+
|
| 776 |
+
def minute(self: Self) -> ArrowSeries:
|
| 777 |
+
import pyarrow.compute as pc # ignore-banned-import()
|
| 778 |
+
|
| 779 |
+
return self._arrow_series._from_native_series(
|
| 780 |
+
pc.minute(self._arrow_series._native_series)
|
| 781 |
+
)
|
| 782 |
+
|
| 783 |
+
def second(self: Self) -> ArrowSeries:
|
| 784 |
+
import pyarrow.compute as pc # ignore-banned-import()
|
| 785 |
+
|
| 786 |
+
return self._arrow_series._from_native_series(
|
| 787 |
+
pc.second(self._arrow_series._native_series)
|
| 788 |
+
)
|
| 789 |
+
|
| 790 |
+
def millisecond(self: Self) -> ArrowSeries:
|
| 791 |
+
import pyarrow.compute as pc # ignore-banned-import()
|
| 792 |
+
|
| 793 |
+
return self._arrow_series._from_native_series(
|
| 794 |
+
pc.millisecond(self._arrow_series._native_series)
|
| 795 |
+
)
|
| 796 |
+
|
| 797 |
+
def microsecond(self: Self) -> ArrowSeries:
|
| 798 |
+
import pyarrow.compute as pc # ignore-banned-import()
|
| 799 |
+
|
| 800 |
+
arr = self._arrow_series._native_series
|
| 801 |
+
result = pc.add(pc.multiply(pc.millisecond(arr), 1000), pc.microsecond(arr))
|
| 802 |
+
|
| 803 |
+
return self._arrow_series._from_native_series(result)
|
| 804 |
+
|
| 805 |
+
def nanosecond(self: Self) -> ArrowSeries:
|
| 806 |
+
import pyarrow.compute as pc # ignore-banned-import()
|
| 807 |
+
|
| 808 |
+
arr = self._arrow_series._native_series
|
| 809 |
+
result = pc.add(
|
| 810 |
+
pc.multiply(self.microsecond()._native_series, 1000), pc.nanosecond(arr)
|
| 811 |
+
)
|
| 812 |
+
return self._arrow_series._from_native_series(result)
|
| 813 |
+
|
| 814 |
+
def ordinal_day(self: Self) -> ArrowSeries:
|
| 815 |
+
import pyarrow.compute as pc # ignore-banned-import()
|
| 816 |
+
|
| 817 |
+
return self._arrow_series._from_native_series(
|
| 818 |
+
pc.day_of_year(self._arrow_series._native_series)
|
| 819 |
+
)
|
| 820 |
+
|
| 821 |
+
def total_minutes(self: Self) -> ArrowSeries:
|
| 822 |
+
import pyarrow as pa # ignore-banned-import()
|
| 823 |
+
import pyarrow.compute as pc # ignore-banned-import()
|
| 824 |
+
|
| 825 |
+
arr = self._arrow_series._native_series
|
| 826 |
+
unit = arr.type.unit
|
| 827 |
+
|
| 828 |
+
unit_to_minutes_factor = {
|
| 829 |
+
"s": 60, # seconds
|
| 830 |
+
"ms": 60 * 1e3, # milli
|
| 831 |
+
"us": 60 * 1e6, # micro
|
| 832 |
+
"ns": 60 * 1e9, # nano
|
| 833 |
+
}
|
| 834 |
+
|
| 835 |
+
factor = pa.scalar(unit_to_minutes_factor[unit], type=pa.int64())
|
| 836 |
+
return self._arrow_series._from_native_series(
|
| 837 |
+
pc.cast(pc.divide(arr, factor), pa.int64())
|
| 838 |
+
)
|
| 839 |
+
|
| 840 |
+
def total_seconds(self: Self) -> ArrowSeries:
|
| 841 |
+
import pyarrow as pa # ignore-banned-import()
|
| 842 |
+
import pyarrow.compute as pc # ignore-banned-import()
|
| 843 |
+
|
| 844 |
+
arr = self._arrow_series._native_series
|
| 845 |
+
unit = arr.type.unit
|
| 846 |
+
|
| 847 |
+
unit_to_seconds_factor = {
|
| 848 |
+
"s": 1, # seconds
|
| 849 |
+
"ms": 1e3, # milli
|
| 850 |
+
"us": 1e6, # micro
|
| 851 |
+
"ns": 1e9, # nano
|
| 852 |
+
}
|
| 853 |
+
factor = pa.scalar(unit_to_seconds_factor[unit], type=pa.int64())
|
| 854 |
+
|
| 855 |
+
return self._arrow_series._from_native_series(
|
| 856 |
+
pc.cast(pc.divide(arr, factor), pa.int64())
|
| 857 |
+
)
|
| 858 |
+
|
| 859 |
+
def total_milliseconds(self: Self) -> ArrowSeries:
|
| 860 |
+
import pyarrow as pa # ignore-banned-import()
|
| 861 |
+
import pyarrow.compute as pc # ignore-banned-import()
|
| 862 |
+
|
| 863 |
+
arr = self._arrow_series._native_series
|
| 864 |
+
unit = arr.type.unit
|
| 865 |
+
|
| 866 |
+
unit_to_milli_factor = {
|
| 867 |
+
"s": 1e3, # seconds
|
| 868 |
+
"ms": 1, # milli
|
| 869 |
+
"us": 1e3, # micro
|
| 870 |
+
"ns": 1e6, # nano
|
| 871 |
+
}
|
| 872 |
+
|
| 873 |
+
factor = pa.scalar(unit_to_milli_factor[unit], type=pa.int64())
|
| 874 |
+
|
| 875 |
+
if unit == "s":
|
| 876 |
+
return self._arrow_series._from_native_series(
|
| 877 |
+
pc.cast(pc.multiply(arr, factor), pa.int64())
|
| 878 |
+
)
|
| 879 |
+
|
| 880 |
+
return self._arrow_series._from_native_series(
|
| 881 |
+
pc.cast(pc.divide(arr, factor), pa.int64())
|
| 882 |
+
)
|
| 883 |
+
|
| 884 |
+
def total_microseconds(self: Self) -> ArrowSeries:
|
| 885 |
+
import pyarrow as pa # ignore-banned-import()
|
| 886 |
+
import pyarrow.compute as pc # ignore-banned-import()
|
| 887 |
+
|
| 888 |
+
arr = self._arrow_series._native_series
|
| 889 |
+
unit = arr.type.unit
|
| 890 |
+
|
| 891 |
+
unit_to_micro_factor = {
|
| 892 |
+
"s": 1e6, # seconds
|
| 893 |
+
"ms": 1e3, # milli
|
| 894 |
+
"us": 1, # micro
|
| 895 |
+
"ns": 1e3, # nano
|
| 896 |
+
}
|
| 897 |
+
|
| 898 |
+
factor = pa.scalar(unit_to_micro_factor[unit], type=pa.int64())
|
| 899 |
+
|
| 900 |
+
if unit in {"s", "ms"}:
|
| 901 |
+
return self._arrow_series._from_native_series(
|
| 902 |
+
pc.cast(pc.multiply(arr, factor), pa.int64())
|
| 903 |
+
)
|
| 904 |
+
return self._arrow_series._from_native_series(
|
| 905 |
+
pc.cast(pc.divide(arr, factor), pa.int64())
|
| 906 |
+
)
|
| 907 |
+
|
| 908 |
+
def total_nanoseconds(self: Self) -> ArrowSeries:
|
| 909 |
+
import pyarrow as pa # ignore-banned-import()
|
| 910 |
+
import pyarrow.compute as pc # ignore-banned-import()
|
| 911 |
+
|
| 912 |
+
arr = self._arrow_series._native_series
|
| 913 |
+
unit = arr.type.unit
|
| 914 |
+
|
| 915 |
+
unit_to_nano_factor = {
|
| 916 |
+
"s": 1e9, # seconds
|
| 917 |
+
"ms": 1e6, # milli
|
| 918 |
+
"us": 1e3, # micro
|
| 919 |
+
"ns": 1, # nano
|
| 920 |
+
}
|
| 921 |
+
|
| 922 |
+
factor = pa.scalar(unit_to_nano_factor[unit], type=pa.int64())
|
| 923 |
+
|
| 924 |
+
return self._arrow_series._from_native_series(
|
| 925 |
+
pc.cast(pc.multiply(arr, factor), pa.int64())
|
| 926 |
+
)
|
| 927 |
+
|
| 928 |
+
|
| 929 |
+
class ArrowSeriesCatNamespace:
|
| 930 |
+
def __init__(self, series: ArrowSeries) -> None:
|
| 931 |
+
self._arrow_series = series
|
| 932 |
+
|
| 933 |
+
def get_categories(self) -> ArrowSeries:
|
| 934 |
+
import pyarrow as pa # ignore-banned-import()
|
| 935 |
+
|
| 936 |
+
ca = self._arrow_series._native_series
|
| 937 |
+
# TODO(Unassigned): this looks potentially expensive - is there no better way?
|
| 938 |
+
# https://github.com/narwhals-dev/narwhals/issues/464
|
| 939 |
+
out = pa.chunked_array(
|
| 940 |
+
[pa.concat_arrays([x.dictionary for x in ca.chunks]).unique()]
|
| 941 |
+
)
|
| 942 |
+
return self._arrow_series._from_native_series(out)
|
| 943 |
+
|
| 944 |
+
|
| 945 |
+
class ArrowSeriesStringNamespace:
|
| 946 |
+
def __init__(self: Self, series: ArrowSeries) -> None:
|
| 947 |
+
self._arrow_series = series
|
| 948 |
+
|
| 949 |
+
def len_chars(self) -> ArrowSeries:
|
| 950 |
+
import pyarrow.compute as pc # ignore-banned-import()
|
| 951 |
+
|
| 952 |
+
return self._arrow_series._from_native_series(
|
| 953 |
+
pc.utf8_length(self._arrow_series._native_series)
|
| 954 |
+
)
|
| 955 |
+
|
| 956 |
+
def replace(
|
| 957 |
+
self, pattern: str, value: str, *, literal: bool = False, n: int = 1
|
| 958 |
+
) -> ArrowSeries:
|
| 959 |
+
import pyarrow.compute as pc # ignore-banned-import()
|
| 960 |
+
|
| 961 |
+
method = "replace_substring" if literal else "replace_substring_regex"
|
| 962 |
+
return self._arrow_series._from_native_series(
|
| 963 |
+
getattr(pc, method)(
|
| 964 |
+
self._arrow_series._native_series,
|
| 965 |
+
pattern=pattern,
|
| 966 |
+
replacement=value,
|
| 967 |
+
max_replacements=n,
|
| 968 |
+
)
|
| 969 |
+
)
|
| 970 |
+
|
| 971 |
+
def replace_all(
|
| 972 |
+
self, pattern: str, value: str, *, literal: bool = False
|
| 973 |
+
) -> ArrowSeries:
|
| 974 |
+
return self.replace(pattern, value, literal=literal, n=-1)
|
| 975 |
+
|
| 976 |
+
def strip_chars(self: Self, characters: str | None = None) -> ArrowSeries:
|
| 977 |
+
import pyarrow.compute as pc # ignore-banned-import()
|
| 978 |
+
|
| 979 |
+
whitespace = " \t\n\r\v\f"
|
| 980 |
+
return self._arrow_series._from_native_series(
|
| 981 |
+
pc.utf8_trim(
|
| 982 |
+
self._arrow_series._native_series,
|
| 983 |
+
characters or whitespace,
|
| 984 |
+
)
|
| 985 |
+
)
|
| 986 |
+
|
| 987 |
+
def starts_with(self: Self, prefix: str) -> ArrowSeries:
|
| 988 |
+
import pyarrow.compute as pc # ignore-banned-import()
|
| 989 |
+
|
| 990 |
+
return self._arrow_series._from_native_series(
|
| 991 |
+
pc.equal(self.slice(0, len(prefix))._native_series, prefix)
|
| 992 |
+
)
|
| 993 |
+
|
| 994 |
+
def ends_with(self: Self, suffix: str) -> ArrowSeries:
|
| 995 |
+
import pyarrow.compute as pc # ignore-banned-import()
|
| 996 |
+
|
| 997 |
+
return self._arrow_series._from_native_series(
|
| 998 |
+
pc.equal(self.slice(-len(suffix))._native_series, suffix)
|
| 999 |
+
)
|
| 1000 |
+
|
| 1001 |
+
def contains(self: Self, pattern: str, *, literal: bool = False) -> ArrowSeries:
|
| 1002 |
+
import pyarrow.compute as pc # ignore-banned-import()
|
| 1003 |
+
|
| 1004 |
+
check_func = pc.match_substring if literal else pc.match_substring_regex
|
| 1005 |
+
return self._arrow_series._from_native_series(
|
| 1006 |
+
check_func(self._arrow_series._native_series, pattern)
|
| 1007 |
+
)
|
| 1008 |
+
|
| 1009 |
+
def slice(self: Self, offset: int, length: int | None = None) -> ArrowSeries:
|
| 1010 |
+
import pyarrow.compute as pc # ignore-banned-import()
|
| 1011 |
+
|
| 1012 |
+
stop = offset + length if length else None
|
| 1013 |
+
return self._arrow_series._from_native_series(
|
| 1014 |
+
pc.utf8_slice_codeunits(
|
| 1015 |
+
self._arrow_series._native_series, start=offset, stop=stop
|
| 1016 |
+
),
|
| 1017 |
+
)
|
| 1018 |
+
|
| 1019 |
+
def to_datetime(self: Self, format: str | None = None) -> ArrowSeries: # noqa: A002
|
| 1020 |
+
import pyarrow.compute as pc # ignore-banned-import()
|
| 1021 |
+
|
| 1022 |
+
return self._arrow_series._from_native_series(
|
| 1023 |
+
pc.strptime(self._arrow_series._native_series, format=format, unit="us")
|
| 1024 |
+
)
|
| 1025 |
+
|
| 1026 |
+
def to_uppercase(self: Self) -> ArrowSeries:
|
| 1027 |
+
import pyarrow.compute as pc # ignore-banned-import()
|
| 1028 |
+
|
| 1029 |
+
return self._arrow_series._from_native_series(
|
| 1030 |
+
pc.utf8_upper(self._arrow_series._native_series),
|
| 1031 |
+
)
|
| 1032 |
+
|
| 1033 |
+
def to_lowercase(self: Self) -> ArrowSeries:
|
| 1034 |
+
import pyarrow.compute as pc # ignore-banned-import()
|
| 1035 |
+
|
| 1036 |
+
return self._arrow_series._from_native_series(
|
| 1037 |
+
pc.utf8_lower(self._arrow_series._native_series),
|
| 1038 |
+
)
|
parrot/lib/python3.10/site-packages/narwhals/_arrow/typing.py
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations # pragma: no cover
|
| 2 |
+
|
| 3 |
+
from typing import TYPE_CHECKING # pragma: no cover
|
| 4 |
+
from typing import Union # pragma: no cover
|
| 5 |
+
|
| 6 |
+
if TYPE_CHECKING:
|
| 7 |
+
import sys
|
| 8 |
+
|
| 9 |
+
if sys.version_info >= (3, 10):
|
| 10 |
+
from typing import TypeAlias
|
| 11 |
+
else:
|
| 12 |
+
from typing_extensions import TypeAlias
|
| 13 |
+
|
| 14 |
+
from narwhals._arrow.expr import ArrowExpr
|
| 15 |
+
from narwhals._arrow.series import ArrowSeries
|
| 16 |
+
|
| 17 |
+
IntoArrowExpr: TypeAlias = Union[ArrowExpr, str, int, float, ArrowSeries]
|
parrot/lib/python3.10/site-packages/narwhals/_dask/__init__.py
ADDED
|
File without changes
|
parrot/lib/python3.10/site-packages/narwhals/_dask/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (166 Bytes). View file
|
|
|
parrot/lib/python3.10/site-packages/narwhals/_dask/__pycache__/dataframe.cpython-310.pyc
ADDED
|
Binary file (11.3 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/narwhals/_dask/__pycache__/expr.cpython-310.pyc
ADDED
|
Binary file (37.6 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/narwhals/_dask/__pycache__/group_by.cpython-310.pyc
ADDED
|
Binary file (4.47 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/narwhals/_dask/__pycache__/namespace.cpython-310.pyc
ADDED
|
Binary file (11.6 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/narwhals/_dask/__pycache__/selectors.cpython-310.pyc
ADDED
|
Binary file (6.54 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/narwhals/_dask/__pycache__/typing.cpython-310.pyc
ADDED
|
Binary file (536 Bytes). View file
|
|
|
parrot/lib/python3.10/site-packages/narwhals/_dask/__pycache__/utils.cpython-310.pyc
ADDED
|
Binary file (3.53 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/narwhals/_dask/dataframe.py
ADDED
|
@@ -0,0 +1,357 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
from typing import TYPE_CHECKING
|
| 4 |
+
from typing import Any
|
| 5 |
+
from typing import Iterable
|
| 6 |
+
from typing import Literal
|
| 7 |
+
from typing import Sequence
|
| 8 |
+
|
| 9 |
+
from narwhals._dask.utils import add_row_index
|
| 10 |
+
from narwhals._dask.utils import parse_exprs_and_named_exprs
|
| 11 |
+
from narwhals._pandas_like.utils import translate_dtype
|
| 12 |
+
from narwhals.dependencies import get_dask_dataframe
|
| 13 |
+
from narwhals.dependencies import get_pandas
|
| 14 |
+
from narwhals.utils import Implementation
|
| 15 |
+
from narwhals.utils import flatten
|
| 16 |
+
from narwhals.utils import generate_unique_token
|
| 17 |
+
from narwhals.utils import parse_columns_to_drop
|
| 18 |
+
from narwhals.utils import parse_version
|
| 19 |
+
|
| 20 |
+
if TYPE_CHECKING:
|
| 21 |
+
import dask.dataframe as dd
|
| 22 |
+
from typing_extensions import Self
|
| 23 |
+
|
| 24 |
+
from narwhals._dask.expr import DaskExpr
|
| 25 |
+
from narwhals._dask.group_by import DaskLazyGroupBy
|
| 26 |
+
from narwhals._dask.namespace import DaskNamespace
|
| 27 |
+
from narwhals._dask.typing import IntoDaskExpr
|
| 28 |
+
from narwhals.dtypes import DType
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
class DaskLazyFrame:
|
| 32 |
+
def __init__(
|
| 33 |
+
self, native_dataframe: dd.DataFrame, *, backend_version: tuple[int, ...]
|
| 34 |
+
) -> None:
|
| 35 |
+
self._native_frame = native_dataframe
|
| 36 |
+
self._backend_version = backend_version
|
| 37 |
+
self._implementation = Implementation.DASK
|
| 38 |
+
|
| 39 |
+
def __native_namespace__(self) -> Any: # pragma: no cover
|
| 40 |
+
return get_dask_dataframe()
|
| 41 |
+
|
| 42 |
+
def __narwhals_namespace__(self) -> DaskNamespace:
|
| 43 |
+
from narwhals._dask.namespace import DaskNamespace
|
| 44 |
+
|
| 45 |
+
return DaskNamespace(backend_version=self._backend_version)
|
| 46 |
+
|
| 47 |
+
def __narwhals_lazyframe__(self) -> Self:
|
| 48 |
+
return self
|
| 49 |
+
|
| 50 |
+
def _from_native_frame(self, df: Any) -> Self:
|
| 51 |
+
return self.__class__(df, backend_version=self._backend_version)
|
| 52 |
+
|
| 53 |
+
def with_columns(self, *exprs: DaskExpr, **named_exprs: DaskExpr) -> Self:
|
| 54 |
+
df = self._native_frame
|
| 55 |
+
new_series = parse_exprs_and_named_exprs(self, *exprs, **named_exprs)
|
| 56 |
+
df = df.assign(**new_series)
|
| 57 |
+
return self._from_native_frame(df)
|
| 58 |
+
|
| 59 |
+
def collect(self) -> Any:
|
| 60 |
+
from narwhals._pandas_like.dataframe import PandasLikeDataFrame
|
| 61 |
+
|
| 62 |
+
result = self._native_frame.compute()
|
| 63 |
+
return PandasLikeDataFrame(
|
| 64 |
+
result,
|
| 65 |
+
implementation=Implementation.PANDAS,
|
| 66 |
+
backend_version=parse_version(get_pandas().__version__),
|
| 67 |
+
)
|
| 68 |
+
|
| 69 |
+
@property
|
| 70 |
+
def columns(self) -> list[str]:
|
| 71 |
+
return self._native_frame.columns.tolist() # type: ignore[no-any-return]
|
| 72 |
+
|
| 73 |
+
def filter(
|
| 74 |
+
self,
|
| 75 |
+
*predicates: DaskExpr,
|
| 76 |
+
) -> Self:
|
| 77 |
+
if (
|
| 78 |
+
len(predicates) == 1
|
| 79 |
+
and isinstance(predicates[0], list)
|
| 80 |
+
and all(isinstance(x, bool) for x in predicates[0])
|
| 81 |
+
):
|
| 82 |
+
msg = (
|
| 83 |
+
"`LazyFrame.filter` is not supported for Dask backend with boolean masks."
|
| 84 |
+
)
|
| 85 |
+
raise NotImplementedError(msg)
|
| 86 |
+
|
| 87 |
+
from narwhals._dask.namespace import DaskNamespace
|
| 88 |
+
|
| 89 |
+
plx = DaskNamespace(backend_version=self._backend_version)
|
| 90 |
+
expr = plx.all_horizontal(*predicates)
|
| 91 |
+
# Safety: all_horizontal's expression only returns a single column.
|
| 92 |
+
mask = expr._call(self)[0]
|
| 93 |
+
return self._from_native_frame(self._native_frame.loc[mask])
|
| 94 |
+
|
| 95 |
+
def lazy(self) -> Self:
|
| 96 |
+
return self
|
| 97 |
+
|
| 98 |
+
def select(
|
| 99 |
+
self: Self,
|
| 100 |
+
*exprs: IntoDaskExpr,
|
| 101 |
+
**named_exprs: IntoDaskExpr,
|
| 102 |
+
) -> Self:
|
| 103 |
+
import dask.dataframe as dd # ignore-banned-import
|
| 104 |
+
|
| 105 |
+
if exprs and all(isinstance(x, str) for x in exprs) and not named_exprs:
|
| 106 |
+
# This is a simple slice => fastpath!
|
| 107 |
+
return self._from_native_frame(self._native_frame.loc[:, exprs])
|
| 108 |
+
|
| 109 |
+
new_series = parse_exprs_and_named_exprs(self, *exprs, **named_exprs)
|
| 110 |
+
|
| 111 |
+
if not new_series:
|
| 112 |
+
# return empty dataframe, like Polars does
|
| 113 |
+
import pandas as pd # ignore-banned-import
|
| 114 |
+
|
| 115 |
+
return self._from_native_frame(
|
| 116 |
+
dd.from_pandas(pd.DataFrame(), npartitions=self._native_frame.npartitions)
|
| 117 |
+
)
|
| 118 |
+
|
| 119 |
+
if all(getattr(expr, "_returns_scalar", False) for expr in exprs) and all(
|
| 120 |
+
getattr(val, "_returns_scalar", False) for val in named_exprs.values()
|
| 121 |
+
):
|
| 122 |
+
df = dd.concat(
|
| 123 |
+
[val.to_series().rename(name) for name, val in new_series.items()], axis=1
|
| 124 |
+
)
|
| 125 |
+
return self._from_native_frame(df)
|
| 126 |
+
|
| 127 |
+
df = self._native_frame.assign(**new_series).loc[:, list(new_series.keys())]
|
| 128 |
+
return self._from_native_frame(df)
|
| 129 |
+
|
| 130 |
+
def drop_nulls(self: Self, subset: str | list[str] | None) -> Self:
|
| 131 |
+
if subset is None:
|
| 132 |
+
return self._from_native_frame(self._native_frame.dropna())
|
| 133 |
+
subset = [subset] if isinstance(subset, str) else subset
|
| 134 |
+
plx = self.__narwhals_namespace__()
|
| 135 |
+
return self.filter(~plx.any_horizontal(plx.col(*subset).is_null()))
|
| 136 |
+
|
| 137 |
+
@property
|
| 138 |
+
def schema(self) -> dict[str, DType]:
|
| 139 |
+
return {
|
| 140 |
+
col: translate_dtype(self._native_frame.loc[:, col])
|
| 141 |
+
for col in self._native_frame.columns
|
| 142 |
+
}
|
| 143 |
+
|
| 144 |
+
def collect_schema(self) -> dict[str, DType]:
|
| 145 |
+
return self.schema
|
| 146 |
+
|
| 147 |
+
def drop(self: Self, columns: list[str], strict: bool) -> Self: # noqa: FBT001
|
| 148 |
+
to_drop = parse_columns_to_drop(
|
| 149 |
+
compliant_frame=self, columns=columns, strict=strict
|
| 150 |
+
)
|
| 151 |
+
|
| 152 |
+
return self._from_native_frame(self._native_frame.drop(columns=to_drop))
|
| 153 |
+
|
| 154 |
+
def with_row_index(self: Self, name: str) -> Self:
|
| 155 |
+
# Implementation is based on the following StackOverflow reply:
|
| 156 |
+
# https://stackoverflow.com/questions/60831518/in-dask-how-does-one-add-a-range-of-integersauto-increment-to-a-new-column/60852409#60852409
|
| 157 |
+
return self._from_native_frame(add_row_index(self._native_frame, name))
|
| 158 |
+
|
| 159 |
+
def rename(self: Self, mapping: dict[str, str]) -> Self:
|
| 160 |
+
return self._from_native_frame(self._native_frame.rename(columns=mapping))
|
| 161 |
+
|
| 162 |
+
def head(self: Self, n: int) -> Self:
|
| 163 |
+
return self._from_native_frame(
|
| 164 |
+
self._native_frame.head(n=n, compute=False, npartitions=-1)
|
| 165 |
+
)
|
| 166 |
+
|
| 167 |
+
def unique(
|
| 168 |
+
self: Self,
|
| 169 |
+
subset: str | list[str] | None,
|
| 170 |
+
*,
|
| 171 |
+
keep: Literal["any", "first", "last", "none"] = "any",
|
| 172 |
+
maintain_order: bool = False,
|
| 173 |
+
) -> Self:
|
| 174 |
+
"""
|
| 175 |
+
NOTE:
|
| 176 |
+
The param `maintain_order` is only here for compatibility with the polars API
|
| 177 |
+
and has no effect on the output.
|
| 178 |
+
"""
|
| 179 |
+
subset = flatten(subset) if subset else None
|
| 180 |
+
native_frame = self._native_frame
|
| 181 |
+
if keep == "none":
|
| 182 |
+
subset = subset or self.columns
|
| 183 |
+
token = generate_unique_token(n_bytes=8, columns=subset)
|
| 184 |
+
ser = native_frame.groupby(subset).size().rename(token)
|
| 185 |
+
ser = ser.loc[ser == 1]
|
| 186 |
+
unique = ser.reset_index().drop(columns=token)
|
| 187 |
+
result = native_frame.merge(unique, on=subset, how="inner")
|
| 188 |
+
else:
|
| 189 |
+
mapped_keep = {"any": "first"}.get(keep, keep)
|
| 190 |
+
result = native_frame.drop_duplicates(subset=subset, keep=mapped_keep)
|
| 191 |
+
return self._from_native_frame(result)
|
| 192 |
+
|
| 193 |
+
def sort(
|
| 194 |
+
self: Self,
|
| 195 |
+
by: str | Iterable[str],
|
| 196 |
+
*more_by: str,
|
| 197 |
+
descending: bool | Sequence[bool] = False,
|
| 198 |
+
) -> Self:
|
| 199 |
+
flat_keys = flatten([*flatten([by]), *more_by])
|
| 200 |
+
df = self._native_frame
|
| 201 |
+
if isinstance(descending, bool):
|
| 202 |
+
ascending: bool | list[bool] = not descending
|
| 203 |
+
else:
|
| 204 |
+
ascending = [not d for d in descending]
|
| 205 |
+
return self._from_native_frame(df.sort_values(flat_keys, ascending=ascending))
|
| 206 |
+
|
| 207 |
+
def join(
|
| 208 |
+
self: Self,
|
| 209 |
+
other: Self,
|
| 210 |
+
*,
|
| 211 |
+
how: Literal["left", "inner", "outer", "cross", "anti", "semi"] = "inner",
|
| 212 |
+
left_on: str | list[str] | None,
|
| 213 |
+
right_on: str | list[str] | None,
|
| 214 |
+
suffix: str,
|
| 215 |
+
) -> Self:
|
| 216 |
+
if isinstance(left_on, str):
|
| 217 |
+
left_on = [left_on]
|
| 218 |
+
if isinstance(right_on, str):
|
| 219 |
+
right_on = [right_on]
|
| 220 |
+
if how == "cross":
|
| 221 |
+
key_token = generate_unique_token(
|
| 222 |
+
n_bytes=8, columns=[*self.columns, *other.columns]
|
| 223 |
+
)
|
| 224 |
+
|
| 225 |
+
return self._from_native_frame(
|
| 226 |
+
self._native_frame.assign(**{key_token: 0})
|
| 227 |
+
.merge(
|
| 228 |
+
other._native_frame.assign(**{key_token: 0}),
|
| 229 |
+
how="inner",
|
| 230 |
+
left_on=key_token,
|
| 231 |
+
right_on=key_token,
|
| 232 |
+
suffixes=("", suffix),
|
| 233 |
+
)
|
| 234 |
+
.drop(columns=key_token),
|
| 235 |
+
)
|
| 236 |
+
|
| 237 |
+
if how == "anti":
|
| 238 |
+
indicator_token = generate_unique_token(
|
| 239 |
+
n_bytes=8, columns=[*self.columns, *other.columns]
|
| 240 |
+
)
|
| 241 |
+
|
| 242 |
+
other_native = (
|
| 243 |
+
other._native_frame.loc[:, right_on]
|
| 244 |
+
.rename( # rename to avoid creating extra columns in join
|
| 245 |
+
columns=dict(zip(right_on, left_on)) # type: ignore[arg-type]
|
| 246 |
+
)
|
| 247 |
+
.drop_duplicates()
|
| 248 |
+
)
|
| 249 |
+
df = self._native_frame.merge(
|
| 250 |
+
other_native,
|
| 251 |
+
how="outer",
|
| 252 |
+
indicator=indicator_token,
|
| 253 |
+
left_on=left_on,
|
| 254 |
+
right_on=left_on,
|
| 255 |
+
)
|
| 256 |
+
return self._from_native_frame(
|
| 257 |
+
df.loc[df[indicator_token] == "left_only"].drop(columns=[indicator_token])
|
| 258 |
+
)
|
| 259 |
+
|
| 260 |
+
if how == "semi":
|
| 261 |
+
other_native = (
|
| 262 |
+
other._native_frame.loc[:, right_on]
|
| 263 |
+
.rename( # rename to avoid creating extra columns in join
|
| 264 |
+
columns=dict(zip(right_on, left_on)) # type: ignore[arg-type]
|
| 265 |
+
)
|
| 266 |
+
.drop_duplicates() # avoids potential rows duplication from inner join
|
| 267 |
+
)
|
| 268 |
+
return self._from_native_frame(
|
| 269 |
+
self._native_frame.merge(
|
| 270 |
+
other_native,
|
| 271 |
+
how="inner",
|
| 272 |
+
left_on=left_on,
|
| 273 |
+
right_on=left_on,
|
| 274 |
+
)
|
| 275 |
+
)
|
| 276 |
+
|
| 277 |
+
if how == "left":
|
| 278 |
+
other_native = other._native_frame
|
| 279 |
+
result_native = self._native_frame.merge(
|
| 280 |
+
other_native,
|
| 281 |
+
how="left",
|
| 282 |
+
left_on=left_on,
|
| 283 |
+
right_on=right_on,
|
| 284 |
+
suffixes=("", suffix),
|
| 285 |
+
)
|
| 286 |
+
extra = []
|
| 287 |
+
for left_key, right_key in zip(left_on, right_on): # type: ignore[arg-type]
|
| 288 |
+
if right_key != left_key and right_key not in self.columns:
|
| 289 |
+
extra.append(right_key)
|
| 290 |
+
elif right_key != left_key:
|
| 291 |
+
extra.append(f"{right_key}_right")
|
| 292 |
+
return self._from_native_frame(result_native.drop(columns=extra))
|
| 293 |
+
|
| 294 |
+
return self._from_native_frame(
|
| 295 |
+
self._native_frame.merge(
|
| 296 |
+
other._native_frame,
|
| 297 |
+
left_on=left_on,
|
| 298 |
+
right_on=right_on,
|
| 299 |
+
how=how,
|
| 300 |
+
suffixes=("", suffix),
|
| 301 |
+
),
|
| 302 |
+
)
|
| 303 |
+
|
| 304 |
+
def join_asof(
|
| 305 |
+
self,
|
| 306 |
+
other: Self,
|
| 307 |
+
*,
|
| 308 |
+
left_on: str | None = None,
|
| 309 |
+
right_on: str | None = None,
|
| 310 |
+
on: str | None = None,
|
| 311 |
+
by_left: str | list[str] | None = None,
|
| 312 |
+
by_right: str | list[str] | None = None,
|
| 313 |
+
by: str | list[str] | None = None,
|
| 314 |
+
strategy: Literal["backward", "forward", "nearest"] = "backward",
|
| 315 |
+
) -> Self:
|
| 316 |
+
plx = self.__native_namespace__()
|
| 317 |
+
return self._from_native_frame(
|
| 318 |
+
plx.merge_asof(
|
| 319 |
+
self._native_frame,
|
| 320 |
+
other._native_frame,
|
| 321 |
+
left_on=left_on,
|
| 322 |
+
right_on=right_on,
|
| 323 |
+
on=on,
|
| 324 |
+
left_by=by_left,
|
| 325 |
+
right_by=by_right,
|
| 326 |
+
by=by,
|
| 327 |
+
direction=strategy,
|
| 328 |
+
suffixes=("", "_right"),
|
| 329 |
+
),
|
| 330 |
+
)
|
| 331 |
+
|
| 332 |
+
def group_by(self, *by: str) -> DaskLazyGroupBy:
|
| 333 |
+
from narwhals._dask.group_by import DaskLazyGroupBy
|
| 334 |
+
|
| 335 |
+
return DaskLazyGroupBy(self, list(by))
|
| 336 |
+
|
| 337 |
+
def tail(self: Self, n: int) -> Self:
|
| 338 |
+
native_frame = self._native_frame
|
| 339 |
+
n_partitions = native_frame.npartitions
|
| 340 |
+
|
| 341 |
+
if n_partitions == 1:
|
| 342 |
+
return self._from_native_frame(self._native_frame.tail(n=n, compute=False))
|
| 343 |
+
else:
|
| 344 |
+
msg = "`LazyFrame.tail` is not supported for Dask backend with multiple partitions."
|
| 345 |
+
raise NotImplementedError(msg)
|
| 346 |
+
|
| 347 |
+
def gather_every(self: Self, n: int, offset: int) -> Self:
|
| 348 |
+
row_index_token = generate_unique_token(n_bytes=8, columns=self.columns)
|
| 349 |
+
pln = self.__narwhals_namespace__()
|
| 350 |
+
return (
|
| 351 |
+
self.with_row_index(name=row_index_token)
|
| 352 |
+
.filter(
|
| 353 |
+
pln.col(row_index_token) >= offset, # type: ignore[operator]
|
| 354 |
+
(pln.col(row_index_token) - offset) % n == 0, # type: ignore[arg-type]
|
| 355 |
+
)
|
| 356 |
+
.drop([row_index_token], strict=False)
|
| 357 |
+
)
|
parrot/lib/python3.10/site-packages/narwhals/_dask/namespace.py
ADDED
|
@@ -0,0 +1,331 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
from functools import reduce
|
| 4 |
+
from typing import TYPE_CHECKING
|
| 5 |
+
from typing import Any
|
| 6 |
+
from typing import Callable
|
| 7 |
+
from typing import Iterable
|
| 8 |
+
from typing import NoReturn
|
| 9 |
+
from typing import cast
|
| 10 |
+
|
| 11 |
+
from narwhals import dtypes
|
| 12 |
+
from narwhals._dask.dataframe import DaskLazyFrame
|
| 13 |
+
from narwhals._dask.expr import DaskExpr
|
| 14 |
+
from narwhals._dask.selectors import DaskSelectorNamespace
|
| 15 |
+
from narwhals._dask.utils import reverse_translate_dtype
|
| 16 |
+
from narwhals._dask.utils import validate_comparand
|
| 17 |
+
from narwhals._expression_parsing import parse_into_exprs
|
| 18 |
+
|
| 19 |
+
if TYPE_CHECKING:
|
| 20 |
+
import dask_expr
|
| 21 |
+
|
| 22 |
+
from narwhals._dask.typing import IntoDaskExpr
|
| 23 |
+
from narwhals.dtypes import DType
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
class DaskNamespace:
|
| 27 |
+
Int64 = dtypes.Int64
|
| 28 |
+
Int32 = dtypes.Int32
|
| 29 |
+
Int16 = dtypes.Int16
|
| 30 |
+
Int8 = dtypes.Int8
|
| 31 |
+
UInt64 = dtypes.UInt64
|
| 32 |
+
UInt32 = dtypes.UInt32
|
| 33 |
+
UInt16 = dtypes.UInt16
|
| 34 |
+
UInt8 = dtypes.UInt8
|
| 35 |
+
Float64 = dtypes.Float64
|
| 36 |
+
Float32 = dtypes.Float32
|
| 37 |
+
Boolean = dtypes.Boolean
|
| 38 |
+
Object = dtypes.Object
|
| 39 |
+
Unknown = dtypes.Unknown
|
| 40 |
+
Categorical = dtypes.Categorical
|
| 41 |
+
Enum = dtypes.Enum
|
| 42 |
+
String = dtypes.String
|
| 43 |
+
Datetime = dtypes.Datetime
|
| 44 |
+
Duration = dtypes.Duration
|
| 45 |
+
Date = dtypes.Date
|
| 46 |
+
|
| 47 |
+
@property
|
| 48 |
+
def selectors(self) -> DaskSelectorNamespace:
|
| 49 |
+
return DaskSelectorNamespace(backend_version=self._backend_version)
|
| 50 |
+
|
| 51 |
+
def __init__(self, *, backend_version: tuple[int, ...]) -> None:
|
| 52 |
+
self._backend_version = backend_version
|
| 53 |
+
|
| 54 |
+
def all(self) -> DaskExpr:
|
| 55 |
+
def func(df: DaskLazyFrame) -> list[Any]:
|
| 56 |
+
return [df._native_frame.loc[:, column_name] for column_name in df.columns]
|
| 57 |
+
|
| 58 |
+
return DaskExpr(
|
| 59 |
+
func,
|
| 60 |
+
depth=0,
|
| 61 |
+
function_name="all",
|
| 62 |
+
root_names=None,
|
| 63 |
+
output_names=None,
|
| 64 |
+
returns_scalar=False,
|
| 65 |
+
backend_version=self._backend_version,
|
| 66 |
+
)
|
| 67 |
+
|
| 68 |
+
def col(self, *column_names: str) -> DaskExpr:
|
| 69 |
+
return DaskExpr.from_column_names(
|
| 70 |
+
*column_names,
|
| 71 |
+
backend_version=self._backend_version,
|
| 72 |
+
)
|
| 73 |
+
|
| 74 |
+
def lit(self, value: Any, dtype: dtypes.DType | None) -> DaskExpr:
|
| 75 |
+
def convert_if_dtype(
|
| 76 |
+
series: dask_expr.Series, dtype: DType | type[DType]
|
| 77 |
+
) -> dask_expr.Series:
|
| 78 |
+
return series.astype(reverse_translate_dtype(dtype)) if dtype else series
|
| 79 |
+
|
| 80 |
+
return DaskExpr(
|
| 81 |
+
lambda df: [
|
| 82 |
+
df._native_frame.assign(lit=value)
|
| 83 |
+
.loc[:, "lit"]
|
| 84 |
+
.pipe(convert_if_dtype, dtype)
|
| 85 |
+
],
|
| 86 |
+
depth=0,
|
| 87 |
+
function_name="lit",
|
| 88 |
+
root_names=None,
|
| 89 |
+
output_names=["lit"],
|
| 90 |
+
returns_scalar=False,
|
| 91 |
+
backend_version=self._backend_version,
|
| 92 |
+
)
|
| 93 |
+
|
| 94 |
+
def min(self, *column_names: str) -> DaskExpr:
|
| 95 |
+
return DaskExpr.from_column_names(
|
| 96 |
+
*column_names,
|
| 97 |
+
backend_version=self._backend_version,
|
| 98 |
+
).min()
|
| 99 |
+
|
| 100 |
+
def max(self, *column_names: str) -> DaskExpr:
|
| 101 |
+
return DaskExpr.from_column_names(
|
| 102 |
+
*column_names,
|
| 103 |
+
backend_version=self._backend_version,
|
| 104 |
+
).max()
|
| 105 |
+
|
| 106 |
+
def mean(self, *column_names: str) -> DaskExpr:
|
| 107 |
+
return DaskExpr.from_column_names(
|
| 108 |
+
*column_names,
|
| 109 |
+
backend_version=self._backend_version,
|
| 110 |
+
).mean()
|
| 111 |
+
|
| 112 |
+
def sum(self, *column_names: str) -> DaskExpr:
|
| 113 |
+
return DaskExpr.from_column_names(
|
| 114 |
+
*column_names,
|
| 115 |
+
backend_version=self._backend_version,
|
| 116 |
+
).sum()
|
| 117 |
+
|
| 118 |
+
def len(self) -> DaskExpr:
|
| 119 |
+
import dask.dataframe as dd # ignore-banned-import
|
| 120 |
+
import pandas as pd # ignore-banned-import
|
| 121 |
+
|
| 122 |
+
def func(df: DaskLazyFrame) -> list[Any]:
|
| 123 |
+
if not df.columns:
|
| 124 |
+
return [
|
| 125 |
+
dd.from_pandas(
|
| 126 |
+
pd.Series([0], name="len"),
|
| 127 |
+
npartitions=df._native_frame.npartitions,
|
| 128 |
+
)
|
| 129 |
+
]
|
| 130 |
+
return [df._native_frame.loc[:, df.columns[0]].size.to_series().rename("len")]
|
| 131 |
+
|
| 132 |
+
# coverage bug? this is definitely hit
|
| 133 |
+
return DaskExpr( # pragma: no cover
|
| 134 |
+
func,
|
| 135 |
+
depth=0,
|
| 136 |
+
function_name="len",
|
| 137 |
+
root_names=None,
|
| 138 |
+
output_names=["len"],
|
| 139 |
+
returns_scalar=True,
|
| 140 |
+
backend_version=self._backend_version,
|
| 141 |
+
)
|
| 142 |
+
|
| 143 |
+
def all_horizontal(self, *exprs: IntoDaskExpr) -> DaskExpr:
|
| 144 |
+
return reduce(lambda x, y: x & y, parse_into_exprs(*exprs, namespace=self))
|
| 145 |
+
|
| 146 |
+
def any_horizontal(self, *exprs: IntoDaskExpr) -> DaskExpr:
|
| 147 |
+
return reduce(lambda x, y: x | y, parse_into_exprs(*exprs, namespace=self))
|
| 148 |
+
|
| 149 |
+
def sum_horizontal(self, *exprs: IntoDaskExpr) -> DaskExpr:
|
| 150 |
+
return reduce(
|
| 151 |
+
lambda x, y: x + y,
|
| 152 |
+
[expr.fill_null(0) for expr in parse_into_exprs(*exprs, namespace=self)],
|
| 153 |
+
)
|
| 154 |
+
|
| 155 |
+
def concat(
|
| 156 |
+
self,
|
| 157 |
+
items: Iterable[DaskLazyFrame],
|
| 158 |
+
*,
|
| 159 |
+
how: str = "vertical",
|
| 160 |
+
) -> DaskLazyFrame:
|
| 161 |
+
import dask.dataframe as dd # ignore-banned-import
|
| 162 |
+
|
| 163 |
+
if len(list(items)) == 0:
|
| 164 |
+
msg = "No items to concatenate" # pragma: no cover
|
| 165 |
+
raise AssertionError(msg)
|
| 166 |
+
native_frames = [i._native_frame for i in items]
|
| 167 |
+
if how == "vertical":
|
| 168 |
+
if not all(
|
| 169 |
+
tuple(i.columns) == tuple(native_frames[0].columns) for i in native_frames
|
| 170 |
+
): # pragma: no cover
|
| 171 |
+
msg = "unable to vstack with non-matching columns"
|
| 172 |
+
raise AssertionError(msg)
|
| 173 |
+
return DaskLazyFrame(
|
| 174 |
+
dd.concat(native_frames, axis=0, join="inner"),
|
| 175 |
+
backend_version=self._backend_version,
|
| 176 |
+
)
|
| 177 |
+
if how == "horizontal":
|
| 178 |
+
all_column_names: list[str] = [
|
| 179 |
+
column for frame in native_frames for column in frame.columns
|
| 180 |
+
]
|
| 181 |
+
if len(all_column_names) != len(set(all_column_names)): # pragma: no cover
|
| 182 |
+
duplicates = [
|
| 183 |
+
i for i in all_column_names if all_column_names.count(i) > 1
|
| 184 |
+
]
|
| 185 |
+
msg = (
|
| 186 |
+
f"Columns with name(s): {', '.join(duplicates)} "
|
| 187 |
+
"have more than one occurrence"
|
| 188 |
+
)
|
| 189 |
+
raise AssertionError(msg)
|
| 190 |
+
return DaskLazyFrame(
|
| 191 |
+
dd.concat(native_frames, axis=1, join="outer"),
|
| 192 |
+
backend_version=self._backend_version,
|
| 193 |
+
)
|
| 194 |
+
raise NotImplementedError
|
| 195 |
+
|
| 196 |
+
def mean_horizontal(self, *exprs: IntoDaskExpr) -> IntoDaskExpr:
|
| 197 |
+
dask_exprs = parse_into_exprs(*exprs, namespace=self)
|
| 198 |
+
total = reduce(lambda x, y: x + y, (e.fill_null(0.0) for e in dask_exprs))
|
| 199 |
+
n_non_zero = reduce(lambda x, y: x + y, ((1 - e.is_null()) for e in dask_exprs))
|
| 200 |
+
return total / n_non_zero
|
| 201 |
+
|
| 202 |
+
def _create_expr_from_series(self, _: Any) -> NoReturn:
|
| 203 |
+
msg = "`_create_expr_from_series` for DaskNamespace exists only for compatibility"
|
| 204 |
+
raise NotImplementedError(msg)
|
| 205 |
+
|
| 206 |
+
def _create_compliant_series(self, _: Any) -> NoReturn:
|
| 207 |
+
msg = "`_create_compliant_series` for DaskNamespace exists only for compatibility"
|
| 208 |
+
raise NotImplementedError(msg)
|
| 209 |
+
|
| 210 |
+
def _create_series_from_scalar(self, *_: Any) -> NoReturn:
|
| 211 |
+
msg = (
|
| 212 |
+
"`_create_series_from_scalar` for DaskNamespace exists only for compatibility"
|
| 213 |
+
)
|
| 214 |
+
raise NotImplementedError(msg)
|
| 215 |
+
|
| 216 |
+
def _create_expr_from_callable( # pragma: no cover
|
| 217 |
+
self,
|
| 218 |
+
func: Callable[[DaskLazyFrame], list[DaskExpr]],
|
| 219 |
+
*,
|
| 220 |
+
depth: int,
|
| 221 |
+
function_name: str,
|
| 222 |
+
root_names: list[str] | None,
|
| 223 |
+
output_names: list[str] | None,
|
| 224 |
+
) -> DaskExpr:
|
| 225 |
+
msg = (
|
| 226 |
+
"`_create_expr_from_callable` for DaskNamespace exists only for compatibility"
|
| 227 |
+
)
|
| 228 |
+
raise NotImplementedError(msg)
|
| 229 |
+
|
| 230 |
+
def when(
|
| 231 |
+
self,
|
| 232 |
+
*predicates: IntoDaskExpr,
|
| 233 |
+
) -> DaskWhen:
|
| 234 |
+
plx = self.__class__(backend_version=self._backend_version)
|
| 235 |
+
if predicates:
|
| 236 |
+
condition = plx.all_horizontal(*predicates)
|
| 237 |
+
else:
|
| 238 |
+
msg = "at least one predicate needs to be provided"
|
| 239 |
+
raise TypeError(msg)
|
| 240 |
+
|
| 241 |
+
return DaskWhen(condition, self._backend_version, returns_scalar=False)
|
| 242 |
+
|
| 243 |
+
|
| 244 |
+
class DaskWhen:
|
| 245 |
+
def __init__(
|
| 246 |
+
self,
|
| 247 |
+
condition: DaskExpr,
|
| 248 |
+
backend_version: tuple[int, ...],
|
| 249 |
+
then_value: Any = None,
|
| 250 |
+
otherwise_value: Any = None,
|
| 251 |
+
*,
|
| 252 |
+
returns_scalar: bool,
|
| 253 |
+
) -> None:
|
| 254 |
+
self._backend_version = backend_version
|
| 255 |
+
self._condition = condition
|
| 256 |
+
self._then_value = then_value
|
| 257 |
+
self._otherwise_value = otherwise_value
|
| 258 |
+
self._returns_scalar = returns_scalar
|
| 259 |
+
|
| 260 |
+
def __call__(self, df: DaskLazyFrame) -> list[Any]:
|
| 261 |
+
from narwhals._dask.namespace import DaskNamespace
|
| 262 |
+
from narwhals._expression_parsing import parse_into_expr
|
| 263 |
+
|
| 264 |
+
plx = DaskNamespace(backend_version=self._backend_version)
|
| 265 |
+
|
| 266 |
+
condition = parse_into_expr(self._condition, namespace=plx)._call(df)[0] # type: ignore[arg-type]
|
| 267 |
+
condition = cast("dask_expr.Series", condition)
|
| 268 |
+
try:
|
| 269 |
+
value_series = parse_into_expr(self._then_value, namespace=plx)._call(df)[0] # type: ignore[arg-type]
|
| 270 |
+
except TypeError:
|
| 271 |
+
# `self._otherwise_value` is a scalar and can't be converted to an expression
|
| 272 |
+
_df = condition.to_frame("a")
|
| 273 |
+
_df["tmp"] = self._then_value
|
| 274 |
+
value_series = _df["tmp"]
|
| 275 |
+
value_series = cast("dask_expr.Series", value_series)
|
| 276 |
+
validate_comparand(condition, value_series)
|
| 277 |
+
|
| 278 |
+
if self._otherwise_value is None:
|
| 279 |
+
return [value_series.where(condition)]
|
| 280 |
+
try:
|
| 281 |
+
otherwise_series = parse_into_expr(
|
| 282 |
+
self._otherwise_value, namespace=plx
|
| 283 |
+
)._call(df)[0] # type: ignore[arg-type]
|
| 284 |
+
except TypeError:
|
| 285 |
+
# `self._otherwise_value` is a scalar and can't be converted to an expression
|
| 286 |
+
return [value_series.where(condition, self._otherwise_value)]
|
| 287 |
+
validate_comparand(condition, otherwise_series)
|
| 288 |
+
return [value_series.zip_with(condition, otherwise_series)]
|
| 289 |
+
|
| 290 |
+
def then(self, value: DaskExpr | Any) -> DaskThen:
|
| 291 |
+
self._then_value = value
|
| 292 |
+
|
| 293 |
+
return DaskThen(
|
| 294 |
+
self,
|
| 295 |
+
depth=0,
|
| 296 |
+
function_name="whenthen",
|
| 297 |
+
root_names=None,
|
| 298 |
+
output_names=None,
|
| 299 |
+
returns_scalar=self._returns_scalar,
|
| 300 |
+
backend_version=self._backend_version,
|
| 301 |
+
)
|
| 302 |
+
|
| 303 |
+
|
| 304 |
+
class DaskThen(DaskExpr):
|
| 305 |
+
def __init__(
|
| 306 |
+
self,
|
| 307 |
+
call: DaskWhen,
|
| 308 |
+
*,
|
| 309 |
+
depth: int,
|
| 310 |
+
function_name: str,
|
| 311 |
+
root_names: list[str] | None,
|
| 312 |
+
output_names: list[str] | None,
|
| 313 |
+
returns_scalar: bool,
|
| 314 |
+
backend_version: tuple[int, ...],
|
| 315 |
+
) -> None:
|
| 316 |
+
self._backend_version = backend_version
|
| 317 |
+
|
| 318 |
+
self._call = call
|
| 319 |
+
self._depth = depth
|
| 320 |
+
self._function_name = function_name
|
| 321 |
+
self._root_names = root_names
|
| 322 |
+
self._output_names = output_names
|
| 323 |
+
self._returns_scalar = returns_scalar
|
| 324 |
+
|
| 325 |
+
def otherwise(self, value: DaskExpr | Any) -> DaskExpr:
|
| 326 |
+
# type ignore because we are setting the `_call` attribute to a
|
| 327 |
+
# callable object of type `DaskWhen`, base class has the attribute as
|
| 328 |
+
# only a `Callable`
|
| 329 |
+
self._call._otherwise_value = value # type: ignore[attr-defined]
|
| 330 |
+
self._function_name = "whenotherwise"
|
| 331 |
+
return self
|
parrot/lib/python3.10/site-packages/narwhals/_dask/typing.py
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations # pragma: no cover
|
| 2 |
+
|
| 3 |
+
from typing import TYPE_CHECKING # pragma: no cover
|
| 4 |
+
from typing import Union # pragma: no cover
|
| 5 |
+
|
| 6 |
+
if TYPE_CHECKING:
|
| 7 |
+
import sys
|
| 8 |
+
|
| 9 |
+
if sys.version_info >= (3, 10):
|
| 10 |
+
from typing import TypeAlias
|
| 11 |
+
else:
|
| 12 |
+
from typing_extensions import TypeAlias
|
| 13 |
+
|
| 14 |
+
from narwhals._dask.expr import DaskExpr
|
| 15 |
+
|
| 16 |
+
IntoDaskExpr: TypeAlias = Union[DaskExpr, str]
|
parrot/lib/python3.10/site-packages/narwhals/_exceptions.py
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
class ColumnNotFoundError(Exception): ...
|
parrot/lib/python3.10/site-packages/narwhals/dataframe.py
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
parrot/lib/python3.10/site-packages/narwhals/group_by.py
ADDED
|
@@ -0,0 +1,133 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
from typing import TYPE_CHECKING
|
| 4 |
+
from typing import Any
|
| 5 |
+
from typing import Generic
|
| 6 |
+
from typing import Iterable
|
| 7 |
+
from typing import Iterator
|
| 8 |
+
from typing import TypeVar
|
| 9 |
+
from typing import cast
|
| 10 |
+
|
| 11 |
+
from narwhals.dataframe import DataFrame
|
| 12 |
+
from narwhals.dataframe import LazyFrame
|
| 13 |
+
from narwhals.utils import tupleify
|
| 14 |
+
|
| 15 |
+
if TYPE_CHECKING:
|
| 16 |
+
from narwhals.typing import IntoExpr
|
| 17 |
+
|
| 18 |
+
DataFrameT = TypeVar("DataFrameT")
|
| 19 |
+
LazyFrameT = TypeVar("LazyFrameT")
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
class GroupBy(Generic[DataFrameT]):
|
| 23 |
+
def __init__(self, df: DataFrameT, *keys: str) -> None:
|
| 24 |
+
self._df = cast(DataFrame[Any], df)
|
| 25 |
+
self._keys = keys
|
| 26 |
+
self._grouped = self._df._compliant_frame.group_by(*self._keys)
|
| 27 |
+
|
| 28 |
+
def agg(
|
| 29 |
+
self, *aggs: IntoExpr | Iterable[IntoExpr], **named_aggs: IntoExpr
|
| 30 |
+
) -> DataFrameT:
|
| 31 |
+
"""
|
| 32 |
+
Compute aggregations for each group of a group by operation.
|
| 33 |
+
|
| 34 |
+
Arguments:
|
| 35 |
+
aggs: Aggregations to compute for each group of the group by operation,
|
| 36 |
+
specified as positional arguments.
|
| 37 |
+
|
| 38 |
+
named_aggs: Additional aggregations, specified as keyword arguments.
|
| 39 |
+
|
| 40 |
+
Examples:
|
| 41 |
+
Group by one column or by multiple columns and call `agg` to compute
|
| 42 |
+
the grouped sum of another column.
|
| 43 |
+
|
| 44 |
+
>>> import pandas as pd
|
| 45 |
+
>>> import polars as pl
|
| 46 |
+
>>> import narwhals as nw
|
| 47 |
+
>>> df_pd = pd.DataFrame(
|
| 48 |
+
... {
|
| 49 |
+
... "a": ["a", "b", "a", "b", "c"],
|
| 50 |
+
... "b": [1, 2, 1, 3, 3],
|
| 51 |
+
... "c": [5, 4, 3, 2, 1],
|
| 52 |
+
... }
|
| 53 |
+
... )
|
| 54 |
+
>>> df_pl = pl.DataFrame(
|
| 55 |
+
... {
|
| 56 |
+
... "a": ["a", "b", "a", "b", "c"],
|
| 57 |
+
... "b": [1, 2, 1, 3, 3],
|
| 58 |
+
... "c": [5, 4, 3, 2, 1],
|
| 59 |
+
... }
|
| 60 |
+
... )
|
| 61 |
+
|
| 62 |
+
We define library agnostic functions:
|
| 63 |
+
|
| 64 |
+
>>> @nw.narwhalify
|
| 65 |
+
... def func(df):
|
| 66 |
+
... return df.group_by("a").agg(nw.col("b").sum()).sort("a")
|
| 67 |
+
|
| 68 |
+
>>> @nw.narwhalify
|
| 69 |
+
... def func_mult_col(df):
|
| 70 |
+
... return df.group_by("a", "b").agg(nw.sum("c")).sort("a", "b")
|
| 71 |
+
|
| 72 |
+
We can then pass either pandas or Polars to `func` and `func_mult_col`:
|
| 73 |
+
|
| 74 |
+
>>> func(df_pd)
|
| 75 |
+
a b
|
| 76 |
+
0 a 2
|
| 77 |
+
1 b 5
|
| 78 |
+
2 c 3
|
| 79 |
+
>>> func(df_pl)
|
| 80 |
+
shape: (3, 2)
|
| 81 |
+
┌─────┬─────┐
|
| 82 |
+
│ a ┆ b │
|
| 83 |
+
│ --- ┆ --- │
|
| 84 |
+
│ str ┆ i64 │
|
| 85 |
+
╞═════╪═════╡
|
| 86 |
+
│ a ┆ 2 │
|
| 87 |
+
│ b ┆ 5 │
|
| 88 |
+
│ c ┆ 3 │
|
| 89 |
+
└─────┴─────┘
|
| 90 |
+
>>> func_mult_col(df_pd)
|
| 91 |
+
a b c
|
| 92 |
+
0 a 1 8
|
| 93 |
+
1 b 2 4
|
| 94 |
+
2 b 3 2
|
| 95 |
+
3 c 3 1
|
| 96 |
+
>>> func_mult_col(df_pl)
|
| 97 |
+
shape: (4, 3)
|
| 98 |
+
┌─────┬─────┬─────┐
|
| 99 |
+
│ a ┆ b ┆ c │
|
| 100 |
+
│ --- ┆ --- ┆ --- │
|
| 101 |
+
│ str ┆ i64 ┆ i64 │
|
| 102 |
+
╞═════╪═════╪═════╡
|
| 103 |
+
│ a ┆ 1 ┆ 8 │
|
| 104 |
+
│ b ┆ 2 ┆ 4 │
|
| 105 |
+
│ b ┆ 3 ┆ 2 │
|
| 106 |
+
│ c ┆ 3 ┆ 1 │
|
| 107 |
+
└─────┴─────┴─────┘
|
| 108 |
+
"""
|
| 109 |
+
aggs, named_aggs = self._df._flatten_and_extract(*aggs, **named_aggs)
|
| 110 |
+
return self._df._from_compliant_dataframe( # type: ignore[return-value]
|
| 111 |
+
self._grouped.agg(*aggs, **named_aggs),
|
| 112 |
+
)
|
| 113 |
+
|
| 114 |
+
def __iter__(self) -> Iterator[tuple[Any, DataFrameT]]:
|
| 115 |
+
yield from ( # type: ignore[misc]
|
| 116 |
+
(tupleify(key), self._df._from_compliant_dataframe(df))
|
| 117 |
+
for (key, df) in self._grouped.__iter__()
|
| 118 |
+
)
|
| 119 |
+
|
| 120 |
+
|
| 121 |
+
class LazyGroupBy(Generic[LazyFrameT]):
|
| 122 |
+
def __init__(self, df: LazyFrameT, *keys: str) -> None:
|
| 123 |
+
self._df = cast(LazyFrame[Any], df)
|
| 124 |
+
self._keys = keys
|
| 125 |
+
self._grouped = self._df._compliant_frame.group_by(*self._keys)
|
| 126 |
+
|
| 127 |
+
def agg(
|
| 128 |
+
self, *aggs: IntoExpr | Iterable[IntoExpr], **named_aggs: IntoExpr
|
| 129 |
+
) -> LazyFrameT:
|
| 130 |
+
aggs, named_aggs = self._df._flatten_and_extract(*aggs, **named_aggs)
|
| 131 |
+
return self._df._from_compliant_dataframe( # type: ignore[return-value]
|
| 132 |
+
self._grouped.agg(*aggs, **named_aggs),
|
| 133 |
+
)
|
parrot/lib/python3.10/site-packages/narwhals/series.py
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
parrot/lib/python3.10/site-packages/narwhals/typing.py
ADDED
|
@@ -0,0 +1,64 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
from typing import TYPE_CHECKING
|
| 4 |
+
from typing import Any
|
| 5 |
+
from typing import Protocol
|
| 6 |
+
from typing import TypeVar
|
| 7 |
+
from typing import Union
|
| 8 |
+
|
| 9 |
+
if TYPE_CHECKING:
|
| 10 |
+
import sys
|
| 11 |
+
|
| 12 |
+
if sys.version_info >= (3, 10):
|
| 13 |
+
from typing import TypeAlias
|
| 14 |
+
else:
|
| 15 |
+
from typing_extensions import TypeAlias
|
| 16 |
+
|
| 17 |
+
from narwhals.dataframe import DataFrame
|
| 18 |
+
from narwhals.dataframe import LazyFrame
|
| 19 |
+
from narwhals.expr import Expr
|
| 20 |
+
from narwhals.series import Series
|
| 21 |
+
|
| 22 |
+
# All dataframes supported by Narwhals have a
|
| 23 |
+
# `columns` property. Their similarities don't extend
|
| 24 |
+
# _that_ much further unfortunately...
|
| 25 |
+
class NativeFrame(Protocol):
|
| 26 |
+
@property
|
| 27 |
+
def columns(self) -> Any: ...
|
| 28 |
+
|
| 29 |
+
def join(self, *args: Any, **kwargs: Any) -> Any: ...
|
| 30 |
+
|
| 31 |
+
class DataFrameLike(Protocol):
|
| 32 |
+
def __dataframe__(self, *args: Any, **kwargs: Any) -> Any: ...
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
IntoExpr: TypeAlias = Union["Expr", str, "Series"]
|
| 36 |
+
"""Anything which can be converted to an expression."""
|
| 37 |
+
|
| 38 |
+
IntoDataFrame: TypeAlias = Union["NativeFrame", "DataFrame[Any]", "DataFrameLike"]
|
| 39 |
+
"""Anything which can be converted to a Narwhals DataFrame."""
|
| 40 |
+
|
| 41 |
+
IntoFrame: TypeAlias = Union[
|
| 42 |
+
"NativeFrame", "DataFrame[Any]", "LazyFrame[Any]", "DataFrameLike"
|
| 43 |
+
]
|
| 44 |
+
"""Anything which can be converted to a Narwhals DataFrame or LazyFrame."""
|
| 45 |
+
|
| 46 |
+
Frame: TypeAlias = Union["DataFrame[Any]", "LazyFrame[Any]"]
|
| 47 |
+
"""DataFrame or LazyFrame"""
|
| 48 |
+
|
| 49 |
+
# TypeVars for some of the above
|
| 50 |
+
IntoFrameT = TypeVar("IntoFrameT", bound="IntoFrame")
|
| 51 |
+
IntoDataFrameT = TypeVar("IntoDataFrameT", bound="IntoDataFrame")
|
| 52 |
+
FrameT = TypeVar("FrameT", "DataFrame[Any]", "LazyFrame[Any]")
|
| 53 |
+
DataFrameT = TypeVar("DataFrameT", bound="DataFrame[Any]")
|
| 54 |
+
|
| 55 |
+
__all__ = [
|
| 56 |
+
"IntoExpr",
|
| 57 |
+
"IntoDataFrame",
|
| 58 |
+
"IntoDataFrameT",
|
| 59 |
+
"IntoFrame",
|
| 60 |
+
"IntoFrameT",
|
| 61 |
+
"Frame",
|
| 62 |
+
"FrameT",
|
| 63 |
+
"DataFrameT",
|
| 64 |
+
]
|
parrot/lib/python3.10/site-packages/scipy/linalg/_decomp_update.cpython-310-x86_64-linux-gnu.so
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:7a6204b7f449307731e48304a53bb75d6ae9c04988273ebb536c2c8a561798a1
|
| 3 |
+
size 368608
|
parrot/lib/python3.10/site-packages/scipy/ndimage/__pycache__/_interpolation.cpython-310.pyc
ADDED
|
Binary file (28.6 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/scipy/optimize/__pycache__/_optimize.cpython-310.pyc
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:765aafc800a8969959920f235bf84e3153f5f0155e4e8ca2345540b5c8e629f0
|
| 3 |
+
size 114363
|
parrot/lib/python3.10/site-packages/scipy/stats/__pycache__/_morestats.cpython-310.pyc
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:bd86e44c637fad170c336f3f0b7283971c06e4e1f8ba47f95bd4920d611984a6
|
| 3 |
+
size 174170
|
parrot/lib/python3.10/site-packages/scipy/stats/tests/data/levy_stable/stable-Z1-cdf-sample-data.npy
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:cf18c1f2d65a232bf2c7121282df31bf2a8be827afafc4ed810ed37457ee898a
|
| 3 |
+
size 183728
|
parrot/lib/python3.10/site-packages/six-1.16.0.dist-info/INSTALLER
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
pip
|
parrot/lib/python3.10/site-packages/six-1.16.0.dist-info/LICENSE
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Copyright (c) 2010-2020 Benjamin Peterson
|
| 2 |
+
|
| 3 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
| 4 |
+
this software and associated documentation files (the "Software"), to deal in
|
| 5 |
+
the Software without restriction, including without limitation the rights to
|
| 6 |
+
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
|
| 7 |
+
the Software, and to permit persons to whom the Software is furnished to do so,
|
| 8 |
+
subject to the following conditions:
|
| 9 |
+
|
| 10 |
+
The above copyright notice and this permission notice shall be included in all
|
| 11 |
+
copies or substantial portions of the Software.
|
| 12 |
+
|
| 13 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
| 14 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
|
| 15 |
+
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
|
| 16 |
+
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
|
| 17 |
+
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
| 18 |
+
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
parrot/lib/python3.10/site-packages/six-1.16.0.dist-info/RECORD
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
__pycache__/six.cpython-310.pyc,,
|
| 2 |
+
six-1.16.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
|
| 3 |
+
six-1.16.0.dist-info/LICENSE,sha256=i7hQxWWqOJ_cFvOkaWWtI9gq3_YPI5P8J2K2MYXo5sk,1066
|
| 4 |
+
six-1.16.0.dist-info/METADATA,sha256=VQcGIFCAEmfZcl77E5riPCN4v2TIsc_qtacnjxKHJoI,1795
|
| 5 |
+
six-1.16.0.dist-info/RECORD,,
|
| 6 |
+
six-1.16.0.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
| 7 |
+
six-1.16.0.dist-info/WHEEL,sha256=Z-nyYpwrcSqxfdux5Mbn_DQ525iP7J2DG3JgGvOYyTQ,110
|
| 8 |
+
six-1.16.0.dist-info/top_level.txt,sha256=_iVH_iYEtEXnD8nYGQYpYFUvkUW9sEO1GYbkeKSAais,4
|
| 9 |
+
six.py,sha256=TOOfQi7nFGfMrIvtdr6wX4wyHH8M7aknmuLfo2cBBrM,34549
|
parrot/lib/python3.10/site-packages/six-1.16.0.dist-info/top_level.txt
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
six
|
parrot/lib/python3.10/site-packages/sty-1.0.6.dist-info/LICENSE
ADDED
|
@@ -0,0 +1,204 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
Apache License
|
| 3 |
+
Version 2.0, January 2004
|
| 4 |
+
http://www.apache.org/licenses/
|
| 5 |
+
|
| 6 |
+
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
| 7 |
+
|
| 8 |
+
1. Definitions.
|
| 9 |
+
|
| 10 |
+
"License" shall mean the terms and conditions for use, reproduction,
|
| 11 |
+
and distribution as defined by Sections 1 through 9 of this document.
|
| 12 |
+
|
| 13 |
+
"Licensor" shall mean the copyright owner or entity authorized by
|
| 14 |
+
the copyright owner that is granting the License.
|
| 15 |
+
|
| 16 |
+
"Legal Entity" shall mean the union of the acting entity and all
|
| 17 |
+
other entities that control, are controlled by, or are under common
|
| 18 |
+
control with that entity. For the purposes of this definition,
|
| 19 |
+
"control" means (i) the power, direct or indirect, to cause the
|
| 20 |
+
direction or management of such entity, whether by contract or
|
| 21 |
+
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
| 22 |
+
outstanding shares, or (iii) beneficial ownership of such entity.
|
| 23 |
+
|
| 24 |
+
"You" (or "Your") shall mean an individual or Legal Entity
|
| 25 |
+
exercising permissions granted by this License.
|
| 26 |
+
|
| 27 |
+
"Source" form shall mean the preferred form for making modifications,
|
| 28 |
+
including but not limited to software source code, documentation
|
| 29 |
+
source, and configuration files.
|
| 30 |
+
|
| 31 |
+
"Object" form shall mean any form resulting from mechanical
|
| 32 |
+
transformation or translation of a Source form, including but
|
| 33 |
+
not limited to compiled object code, generated documentation,
|
| 34 |
+
and conversions to other media types.
|
| 35 |
+
|
| 36 |
+
"Work" shall mean the work of authorship, whether in Source or
|
| 37 |
+
Object form, made available under the License, as indicated by a
|
| 38 |
+
copyright notice that is included in or attached to the work
|
| 39 |
+
(an example is provided in the Appendix below).
|
| 40 |
+
|
| 41 |
+
"Derivative Works" shall mean any work, whether in Source or Object
|
| 42 |
+
form, that is based on (or derived from) the Work and for which the
|
| 43 |
+
editorial revisions, annotations, elaborations, or other modifications
|
| 44 |
+
represent, as a whole, an original work of authorship. For the purposes
|
| 45 |
+
of this License, Derivative Works shall not include works that remain
|
| 46 |
+
separable from, or merely link (or bind by name) to the interfaces of,
|
| 47 |
+
the Work and Derivative Works thereof.
|
| 48 |
+
|
| 49 |
+
"Contribution" shall mean any work of authorship, including
|
| 50 |
+
the original version of the Work and any modifications or additions
|
| 51 |
+
to that Work or Derivative Works thereof, that is intentionally
|
| 52 |
+
submitted to Licensor for inclusion in the Work by the copyright owner
|
| 53 |
+
or by an individual or Legal Entity authorized to submit on behalf of
|
| 54 |
+
the copyright owner. For the purposes of this definition, "submitted"
|
| 55 |
+
means any form of electronic, verbal, or written communication sent
|
| 56 |
+
to the Licensor or its representatives, including but not limited to
|
| 57 |
+
communication on electronic mailing lists, source code control systems,
|
| 58 |
+
and issue tracking systems that are managed by, or on behalf of, the
|
| 59 |
+
Licensor for the purpose of discussing and improving the Work, but
|
| 60 |
+
excluding communication that is conspicuously marked or otherwise
|
| 61 |
+
designated in writing by the copyright owner as "Not a Contribution."
|
| 62 |
+
|
| 63 |
+
"Contributor" shall mean Licensor and any individual or Legal Entity
|
| 64 |
+
on behalf of whom a Contribution has been received by Licensor and
|
| 65 |
+
subsequently incorporated within the Work.
|
| 66 |
+
|
| 67 |
+
2. Grant of Copyright License. Subject to the terms and conditions of
|
| 68 |
+
this License, each Contributor hereby grants to You a perpetual,
|
| 69 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
| 70 |
+
copyright license to reproduce, prepare Derivative Works of,
|
| 71 |
+
publicly display, publicly perform, sublicense, and distribute the
|
| 72 |
+
Work and such Derivative Works in Source or Object form.
|
| 73 |
+
|
| 74 |
+
3. Grant of Patent License. Subject to the terms and conditions of
|
| 75 |
+
this License, each Contributor hereby grants to You a perpetual,
|
| 76 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
| 77 |
+
(except as stated in this section) patent license to make, have made,
|
| 78 |
+
use, offer to sell, sell, import, and otherwise transfer the Work,
|
| 79 |
+
where such license applies only to those patent claims licensable
|
| 80 |
+
by such Contributor that are necessarily infringed by their
|
| 81 |
+
Contribution(s) alone or by combination of their Contribution(s)
|
| 82 |
+
with the Work to which such Contribution(s) was submitted. If You
|
| 83 |
+
institute patent litigation against any entity (including a
|
| 84 |
+
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
| 85 |
+
or a Contribution incorporated within the Work constitutes direct
|
| 86 |
+
or contributory patent infringement, then any patent licenses
|
| 87 |
+
granted to You under this License for that Work shall terminate
|
| 88 |
+
as of the date such litigation is filed.
|
| 89 |
+
|
| 90 |
+
4. Redistribution. You may reproduce and distribute copies of the
|
| 91 |
+
Work or Derivative Works thereof in any medium, with or without
|
| 92 |
+
modifications, and in Source or Object form, provided that You
|
| 93 |
+
meet the following conditions:
|
| 94 |
+
|
| 95 |
+
(a) You must give any other recipients of the Work or
|
| 96 |
+
Derivative Works a copy of this License; and
|
| 97 |
+
|
| 98 |
+
(b) You must cause any modified files to carry prominent notices
|
| 99 |
+
stating that You changed the files; and
|
| 100 |
+
|
| 101 |
+
(c) You must retain, in the Source form of any Derivative Works
|
| 102 |
+
that You distribute, all copyright, patent, trademark, and
|
| 103 |
+
attribution notices from the Source form of the Work,
|
| 104 |
+
excluding those notices that do not pertain to any part of
|
| 105 |
+
the Derivative Works; and
|
| 106 |
+
|
| 107 |
+
(d) If the Work includes a "NOTICE" text file as part of its
|
| 108 |
+
distribution, then any Derivative Works that You distribute must
|
| 109 |
+
include a readable copy of the attribution notices contained
|
| 110 |
+
within such NOTICE file, excluding those notices that do not
|
| 111 |
+
pertain to any part of the Derivative Works, in at least one
|
| 112 |
+
of the following places: within a NOTICE text file distributed
|
| 113 |
+
as part of the Derivative Works; within the Source form or
|
| 114 |
+
documentation, if provided along with the Derivative Works; or,
|
| 115 |
+
within a display generated by the Derivative Works, if and
|
| 116 |
+
wherever such third-party notices normally appear. The contents
|
| 117 |
+
of the NOTICE file are for informational purposes only and
|
| 118 |
+
do not modify the License. You may add Your own attribution
|
| 119 |
+
notices within Derivative Works that You distribute, alongside
|
| 120 |
+
or as an addendum to the NOTICE text from the Work, provided
|
| 121 |
+
that such additional attribution notices cannot be construed
|
| 122 |
+
as modifying the License.
|
| 123 |
+
|
| 124 |
+
You may add Your own copyright statement to Your modifications and
|
| 125 |
+
may provide additional or different license terms and conditions
|
| 126 |
+
for use, reproduction, or distribution of Your modifications, or
|
| 127 |
+
for any such Derivative Works as a whole, provided Your use,
|
| 128 |
+
reproduction, and distribution of the Work otherwise complies with
|
| 129 |
+
the conditions stated in this License.
|
| 130 |
+
|
| 131 |
+
5. Submission of Contributions. Unless You explicitly state otherwise,
|
| 132 |
+
any Contribution intentionally submitted for inclusion in the Work
|
| 133 |
+
by You to the Licensor shall be under the terms and conditions of
|
| 134 |
+
this License, without any additional terms or conditions.
|
| 135 |
+
Notwithstanding the above, nothing herein shall supersede or modify
|
| 136 |
+
the terms of any separate license agreement you may have executed
|
| 137 |
+
with Licensor regarding such Contributions.
|
| 138 |
+
|
| 139 |
+
6. Trademarks. This License does not grant permission to use the trade
|
| 140 |
+
names, trademarks, service marks, or product names of the Licensor,
|
| 141 |
+
except as required for reasonable and customary use in describing the
|
| 142 |
+
origin of the Work and reproducing the content of the NOTICE file.
|
| 143 |
+
|
| 144 |
+
7. Disclaimer of Warranty. Unless required by applicable law or
|
| 145 |
+
agreed to in writing, Licensor provides the Work (and each
|
| 146 |
+
Contributor provides its Contributions) on an "AS IS" BASIS,
|
| 147 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
| 148 |
+
implied, including, without limitation, any warranties or conditions
|
| 149 |
+
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
| 150 |
+
PARTICULAR PURPOSE. You are solely responsible for determining the
|
| 151 |
+
appropriateness of using or redistributing the Work and assume any
|
| 152 |
+
risks associated with Your exercise of permissions under this License.
|
| 153 |
+
|
| 154 |
+
8. Limitation of Liability. In no event and under no legal theory,
|
| 155 |
+
whether in tort (including negligence), contract, or otherwise,
|
| 156 |
+
unless required by applicable law (such as deliberate and grossly
|
| 157 |
+
negligent acts) or agreed to in writing, shall any Contributor be
|
| 158 |
+
liable to You for damages, including any direct, indirect, special,
|
| 159 |
+
incidental, or consequential damages of any character arising as a
|
| 160 |
+
result of this License or out of the use or inability to use the
|
| 161 |
+
Work (including but not limited to damages for loss of goodwill,
|
| 162 |
+
work stoppage, computer failure or malfunction, or any and all
|
| 163 |
+
other commercial damages or losses), even if such Contributor
|
| 164 |
+
has been advised of the possibility of such damages.
|
| 165 |
+
|
| 166 |
+
9. Accepting Warranty or Additional Liability. While redistributing
|
| 167 |
+
the Work or Derivative Works thereof, You may choose to offer,
|
| 168 |
+
and charge a fee for, acceptance of support, warranty, indemnity,
|
| 169 |
+
or other liability obligations and/or rights consistent with this
|
| 170 |
+
License. However, in accepting such obligations, You may act only
|
| 171 |
+
on Your own behalf and on Your sole responsibility, not on behalf
|
| 172 |
+
of any other Contributor, and only if You agree to indemnify,
|
| 173 |
+
defend, and hold each Contributor harmless for any liability
|
| 174 |
+
incurred by, or claims asserted against, such Contributor by reason
|
| 175 |
+
of your accepting any such warranty or additional liability.
|
| 176 |
+
|
| 177 |
+
END OF TERMS AND CONDITIONS
|
| 178 |
+
|
| 179 |
+
APPENDIX: How to apply the Apache License to your work.
|
| 180 |
+
|
| 181 |
+
To apply the Apache License to your work, attach the following
|
| 182 |
+
boilerplate notice, with the fields enclosed by brackets "[]"
|
| 183 |
+
replaced with your own identifying information. (Don't include
|
| 184 |
+
the brackets!) The text should be enclosed in the appropriate
|
| 185 |
+
comment syntax for the file format. We also recommend that a
|
| 186 |
+
file or class name and description of purpose be included on the
|
| 187 |
+
same "printed page" as the copyright notice for easier
|
| 188 |
+
identification within third-party archives.
|
| 189 |
+
|
| 190 |
+
Copyright 2018-present Felix Meyer-Wolters
|
| 191 |
+
|
| 192 |
+
Licensed under the Apache License, Version 2.0 (the "License");
|
| 193 |
+
you may not use this file except in compliance with the License.
|
| 194 |
+
You may obtain a copy of the License at
|
| 195 |
+
|
| 196 |
+
http://www.apache.org/licenses/LICENSE-2.0
|
| 197 |
+
|
| 198 |
+
Unless required by applicable law or agreed to in writing, software
|
| 199 |
+
distributed under the License is distributed on an "AS IS" BASIS,
|
| 200 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 201 |
+
See the License for the specific language governing permissions and
|
| 202 |
+
limitations under the License.
|
| 203 |
+
|
| 204 |
+
|
parrot/lib/python3.10/site-packages/sty-1.0.6.dist-info/WHEEL
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Wheel-Version: 1.0
|
| 2 |
+
Generator: poetry-core 1.8.1
|
| 3 |
+
Root-Is-Purelib: true
|
| 4 |
+
Tag: py3-none-any
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/detail/TensorDataContainer.h
ADDED
|
@@ -0,0 +1,372 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/Dispatch.h>
|
| 4 |
+
#include <ATen/ScalarOps.h>
|
| 5 |
+
#include <ATen/core/Tensor.h>
|
| 6 |
+
#include <ATen/core/grad_mode.h>
|
| 7 |
+
|
| 8 |
+
#include <c10/util/irange.h>
|
| 9 |
+
|
| 10 |
+
#ifndef AT_PER_OPERATOR_HEADERS
|
| 11 |
+
#include <ATen/Functions.h>
|
| 12 |
+
#else
|
| 13 |
+
#include <ATen/ops/empty.h>
|
| 14 |
+
#include <ATen/ops/tensor.h>
|
| 15 |
+
#endif
|
| 16 |
+
|
| 17 |
+
#include <initializer_list>
|
| 18 |
+
|
| 19 |
+
namespace torch {
|
| 20 |
+
|
| 21 |
+
namespace detail {
|
| 22 |
+
|
| 23 |
+
enum class TensorDataContainerType { Scalar, InitList, Tensor };
|
| 24 |
+
|
| 25 |
+
struct TensorDataContainer;
|
| 26 |
+
|
| 27 |
+
inline std::ostream& operator<<(
|
| 28 |
+
std::ostream& stream,
|
| 29 |
+
const TensorDataContainer& tensor_data_container);
|
| 30 |
+
|
| 31 |
+
// FIXME: There is no `operator<<` overload for `at::kBFloat16` type,
|
| 32 |
+
// and we need to convert it to `float` type using `operator float()` function
|
| 33 |
+
// defined in `c10/util/BFloat16.h`.
|
| 34 |
+
// Tracking issue: https://github.com/pytorch/pytorch/issues/28845
|
| 35 |
+
inline std::ostream& operator<<(std::ostream& stream, c10::BFloat16 value) {
|
| 36 |
+
stream << static_cast<float>(value);
|
| 37 |
+
return stream;
|
| 38 |
+
}
|
| 39 |
+
|
| 40 |
+
inline c10::ScalarType compute_desired_dtype(c10::ScalarType scalar_type) {
|
| 41 |
+
if (scalar_type == at::kInt || scalar_type == at::kLong) {
|
| 42 |
+
// C++ `torch::tensor` with an integer type or an `at::ArrayRef` /
|
| 43 |
+
// `std::vector` / (nested) braced-init-list of integer types always
|
| 44 |
+
// produces a tensor of dtype `at::kLong` (aka. int64_t), matching Python
|
| 45 |
+
// `torch.tensor` behavior.
|
| 46 |
+
return at::kLong;
|
| 47 |
+
} else if (scalar_type == at::kFloat || scalar_type == at::kDouble) {
|
| 48 |
+
// C++ `torch::tensor` with a floating-point type or an `at::ArrayRef` /
|
| 49 |
+
// `std::vector` / (nested) braced-init-list of floating-point types always
|
| 50 |
+
// produces a tensor of dtype `torch::get_default_dtype()`, matching Python
|
| 51 |
+
// `torch.tensor` behavior.
|
| 52 |
+
return at::typeMetaToScalarType(at::get_default_dtype());
|
| 53 |
+
} else {
|
| 54 |
+
return scalar_type;
|
| 55 |
+
}
|
| 56 |
+
}
|
| 57 |
+
|
| 58 |
+
// We use `TensorDataContainer` to support converting the following data
|
| 59 |
+
// container types into the equivalent Tensor:
|
| 60 |
+
//
|
| 61 |
+
// 1. Arbitrarily nested braced-init-list (e.g. `{{1, 2}, {3, 4}}`).
|
| 62 |
+
// 2. `at::ArrayRef` of supported tensor data types.
|
| 63 |
+
// 3. `std::vector` of supported tensor data types.
|
| 64 |
+
//
|
| 65 |
+
// At any time, a `TensorDataContainer` object represents one of the following:
|
| 66 |
+
//
|
| 67 |
+
// 1. A scalar with value `scalar()` and type `scalar_type()`.
|
| 68 |
+
// 2. A Tensor represented in `std::initializer_list<TensorDataContainer>` form,
|
| 69 |
+
// with value `init_list()`, Tensor scalar type `scalar_type()`, and Tensor
|
| 70 |
+
// sizes `sizes()`.
|
| 71 |
+
// 3. A Tensor represented in `at::Tensor` form, with value `tensor()`, scalar
|
| 72 |
+
// type `scalar_type()`,
|
| 73 |
+
// and Tensor sizes `sizes()`.
|
| 74 |
+
//
|
| 75 |
+
// All the infrastructure here is mostly to support converting an arbitrarily
|
| 76 |
+
// nested braced-init-list to the equivalent Tensor successfully. Consider the
|
| 77 |
+
// following example:
|
| 78 |
+
//
|
| 79 |
+
// `torch::tensor({{1}, {2}})`
|
| 80 |
+
//
|
| 81 |
+
// this will call into the `torch::tensor` function:
|
| 82 |
+
//
|
| 83 |
+
// `at::Tensor tensor(detail::TensorDataContainer tensor_data_container, const
|
| 84 |
+
// at::TensorOptions& options = {})`
|
| 85 |
+
//
|
| 86 |
+
// the compiler will first try to convert `{{1}, {2}}` to `TensorDataContainer`
|
| 87 |
+
// type:
|
| 88 |
+
//
|
| 89 |
+
// `TensorDataContainer({{1}, {2}})`
|
| 90 |
+
//
|
| 91 |
+
// which matches to the
|
| 92 |
+
// `TensorDataContainer(std::initializer_list<TensorDataContainer>)`
|
| 93 |
+
// constructor, and in an attempt to convert `{1}` and `{2}` to
|
| 94 |
+
// `TensorDataContainer`, it calls the following:
|
| 95 |
+
//
|
| 96 |
+
// `TensorDataContainer({1})` (same call path happens for `{2}`, and we'll just
|
| 97 |
+
// focus on `{1}` here)
|
| 98 |
+
//
|
| 99 |
+
// At this point, theoretically there are two plausible ways for `{1}` to be
|
| 100 |
+
// matched to one of the constructors of `TensorDataContainer`:
|
| 101 |
+
//
|
| 102 |
+
// 1. It can be a list-initialization of a scalar value, thus matching
|
| 103 |
+
// `TensorDataContainer(int value)`.
|
| 104 |
+
// 2. It can be converted to `std::initializer_list<TensorDataContainer>`, thus
|
| 105 |
+
// matching
|
| 106 |
+
// `TensorDataContainer(std::initializer_list<TensorDataContainer>)`.
|
| 107 |
+
//
|
| 108 |
+
// How does the compiler decide which one to choose? According to
|
| 109 |
+
// `https://en.cppreference.com/w/cpp/language/list_initialization`,
|
| 110 |
+
// braced-init-list always prefers the constructor that takes
|
| 111 |
+
// `std::initializer_list`. Hence we happily move forward with constructor #2,
|
| 112 |
+
// and it calls the following:
|
| 113 |
+
//
|
| 114 |
+
// `TensorDataContainer(1)`
|
| 115 |
+
//
|
| 116 |
+
// Now it matches `TensorDataContainer(int value)`, which stores `1` as a scalar
|
| 117 |
+
// value. All is good.
|
| 118 |
+
struct TensorDataContainer {
|
| 119 |
+
// NOTE: For tensors with zero-size dimensions (e.g. `torch::tensor({{},
|
| 120 |
+
// {}})`), the innermost empty braced-init-list `{}` matches the default
|
| 121 |
+
// constructor of the innermost `TensorDataContainer`.
|
| 122 |
+
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
|
| 123 |
+
TensorDataContainer()
|
| 124 |
+
: sizes_({0}),
|
| 125 |
+
// NOTE: In Python, the dtype of tensors with zero-size dimensions (e.g.
|
| 126 |
+
// `torch.tensor([[], []])`) depends on the value of
|
| 127 |
+
// `torch.get_default_dtype()`, and we should do the same for the C++
|
| 128 |
+
// equivalent.
|
| 129 |
+
scalar_type_(at::typeMetaToScalarType(at::get_default_dtype())),
|
| 130 |
+
type_(TensorDataContainerType::InitList) {}
|
| 131 |
+
#define TENSOR(T, S) \
|
| 132 |
+
TensorDataContainer(T value) \
|
| 133 |
+
: sizes_(), \
|
| 134 |
+
scalar_type_(at::k##S), \
|
| 135 |
+
type_(TensorDataContainerType::Scalar), \
|
| 136 |
+
scalar_(value) {}
|
| 137 |
+
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
|
| 138 |
+
AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, TENSOR)
|
| 139 |
+
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
|
| 140 |
+
AT_FORALL_COMPLEX_TYPES(TENSOR)
|
| 141 |
+
#undef TENSOR
|
| 142 |
+
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
|
| 143 |
+
TensorDataContainer(std::initializer_list<TensorDataContainer> init_list)
|
| 144 |
+
: sizes_(),
|
| 145 |
+
scalar_type_(init_list.begin()->scalar_type()),
|
| 146 |
+
type_(TensorDataContainerType::InitList),
|
| 147 |
+
init_list_(init_list) {
|
| 148 |
+
const TensorDataContainer& first_elem = *(init_list.begin());
|
| 149 |
+
for (const auto& elem : init_list) {
|
| 150 |
+
TORCH_CHECK(
|
| 151 |
+
elem.sizes() == first_elem.sizes(),
|
| 152 |
+
"Expected all sub-lists to have sizes: ",
|
| 153 |
+
first_elem.sizes(),
|
| 154 |
+
" (e.g. ",
|
| 155 |
+
first_elem,
|
| 156 |
+
"), ",
|
| 157 |
+
"but got sub-list ",
|
| 158 |
+
elem,
|
| 159 |
+
" with sizes: ",
|
| 160 |
+
elem.sizes());
|
| 161 |
+
TORCH_CHECK(
|
| 162 |
+
elem.scalar_type() == first_elem.scalar_type(),
|
| 163 |
+
"Expected all elements of the tensor to have the same scalar type: ",
|
| 164 |
+
first_elem.scalar_type(),
|
| 165 |
+
", but got element of scalar type: ",
|
| 166 |
+
elem.scalar_type());
|
| 167 |
+
}
|
| 168 |
+
sizes_.reserve(first_elem.sizes().size() + 1);
|
| 169 |
+
sizes_.push_back(init_list.size());
|
| 170 |
+
sizes_.insert(
|
| 171 |
+
sizes_.end(), first_elem.sizes().begin(), first_elem.sizes().end());
|
| 172 |
+
}
|
| 173 |
+
|
| 174 |
+
#define TENSOR(T, S) \
|
| 175 |
+
TensorDataContainer(at::ArrayRef<T> values) \
|
| 176 |
+
: sizes_({(int64_t)values.size()}), \
|
| 177 |
+
scalar_type_(at::k##S), \
|
| 178 |
+
type_(TensorDataContainerType::Tensor) { \
|
| 179 |
+
at::AutoDispatchBelowAutograd mode; \
|
| 180 |
+
if (scalar_type_ == at::kBool) { \
|
| 181 |
+
tensor_ = at::tensor(values, at::TensorOptions().device(at::kCPU)); \
|
| 182 |
+
} else { \
|
| 183 |
+
tensor_ = at::tensor(values, at::dtype(scalar_type_).device(at::kCPU)); \
|
| 184 |
+
} \
|
| 185 |
+
}
|
| 186 |
+
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
|
| 187 |
+
AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, TENSOR)
|
| 188 |
+
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
|
| 189 |
+
AT_FORALL_COMPLEX_TYPES(TENSOR)
|
| 190 |
+
#undef TENSOR
|
| 191 |
+
|
| 192 |
+
// NOTE: We need to handle `std::vector` explicitly instead of relying on an
|
| 193 |
+
// implicit conversion to `at::ArrayRef`, otherwise the following error can be
|
| 194 |
+
// thrown when calling `torch::tensor(std::vector<int>({1, 2}))`:
|
| 195 |
+
// ```
|
| 196 |
+
// error: no matching function for call to 'tensor(const std::vector<int>&)'
|
| 197 |
+
// no known conversion for argument 1 from 'const std::vector<int>' to
|
| 198 |
+
// 'torch::detail::TensorDataContainer'
|
| 199 |
+
// ```
|
| 200 |
+
//
|
| 201 |
+
// NOTE: `torch::tensor(std::vector<bool>)` is not supported for now, because
|
| 202 |
+
// ArrayRef<bool> cannot be constructed from a std::vector<bool> bitfield.
|
| 203 |
+
#define TENSOR(T, S) \
|
| 204 |
+
TensorDataContainer(const std::vector<T>& values) \
|
| 205 |
+
: TensorDataContainer(at::ArrayRef<T>(values)) {}
|
| 206 |
+
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
|
| 207 |
+
AT_FORALL_SCALAR_TYPES_AND2(Half, BFloat16, TENSOR)
|
| 208 |
+
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
|
| 209 |
+
AT_FORALL_COMPLEX_TYPES(TENSOR)
|
| 210 |
+
#undef TENSOR
|
| 211 |
+
|
| 212 |
+
bool is_scalar() const {
|
| 213 |
+
return type_ == TensorDataContainerType::Scalar;
|
| 214 |
+
}
|
| 215 |
+
|
| 216 |
+
const c10::Scalar& scalar() const {
|
| 217 |
+
TORCH_CHECK(
|
| 218 |
+
is_scalar(),
|
| 219 |
+
"Can only call `scalar()` on a TensorDataContainer that has `is_scalar() == true`");
|
| 220 |
+
return scalar_;
|
| 221 |
+
}
|
| 222 |
+
|
| 223 |
+
bool is_init_list() const {
|
| 224 |
+
return type_ == TensorDataContainerType::InitList;
|
| 225 |
+
}
|
| 226 |
+
|
| 227 |
+
const std::initializer_list<TensorDataContainer>& init_list() const {
|
| 228 |
+
TORCH_CHECK(
|
| 229 |
+
is_init_list(),
|
| 230 |
+
"Can only call `init_list()` on a TensorDataContainer that has `is_init_list() == true`");
|
| 231 |
+
return init_list_;
|
| 232 |
+
}
|
| 233 |
+
|
| 234 |
+
bool is_tensor() const {
|
| 235 |
+
return type_ == TensorDataContainerType::Tensor;
|
| 236 |
+
}
|
| 237 |
+
|
| 238 |
+
const at::Tensor& tensor() const {
|
| 239 |
+
TORCH_CHECK(
|
| 240 |
+
is_tensor(),
|
| 241 |
+
"Can only call `tensor()` on a TensorDataContainer that has `is_tensor() == true`");
|
| 242 |
+
return tensor_;
|
| 243 |
+
}
|
| 244 |
+
|
| 245 |
+
const std::vector<int64_t>& sizes() const {
|
| 246 |
+
return sizes_;
|
| 247 |
+
}
|
| 248 |
+
|
| 249 |
+
const c10::ScalarType& scalar_type() const {
|
| 250 |
+
return scalar_type_;
|
| 251 |
+
}
|
| 252 |
+
|
| 253 |
+
at::Tensor convert_to_tensor(at::TensorOptions options) const {
|
| 254 |
+
if (!options.has_dtype()) {
|
| 255 |
+
options = options.dtype(compute_desired_dtype(scalar_type_));
|
| 256 |
+
}
|
| 257 |
+
|
| 258 |
+
if (is_scalar()) {
|
| 259 |
+
at::AutoDispatchBelowAutograd mode;
|
| 260 |
+
return at::scalar_tensor(scalar_, options);
|
| 261 |
+
} else if (is_init_list()) {
|
| 262 |
+
// NOTE: Here we explicitly choose to initialize the tensor on CPU first,
|
| 263 |
+
// fill each element of the tensor, and then move the tensor to the
|
| 264 |
+
// desired device. For CUDA device, this approach only involves 1 CUDA
|
| 265 |
+
// kernel launch, and is much faster than initializing the tensor on CUDA
|
| 266 |
+
// first and then filling each element of it (which involves `N` CUDA
|
| 267 |
+
// kernel launches where `N` is the number of the elements in the tensor).
|
| 268 |
+
at::Tensor tensor = ([&]() {
|
| 269 |
+
at::AutoDispatchBelowAutograd mode;
|
| 270 |
+
return at::empty(sizes_, options.device(at::kCPU));
|
| 271 |
+
})();
|
| 272 |
+
fill_tensor(tensor);
|
| 273 |
+
return tensor.to(options.device());
|
| 274 |
+
} else if (is_tensor()) {
|
| 275 |
+
auto output = tensor_.to(options);
|
| 276 |
+
TORCH_CHECK(
|
| 277 |
+
!tensor_.is_complex() || output.is_complex(),
|
| 278 |
+
"can not do torch::tensor(complex, dtype=non-complex) because complex can not be casted to real number without loss of information");
|
| 279 |
+
return output;
|
| 280 |
+
} else {
|
| 281 |
+
TORCH_INTERNAL_ASSERT(false, "Invalid TensorDataContainer type");
|
| 282 |
+
}
|
| 283 |
+
}
|
| 284 |
+
|
| 285 |
+
void pretty_print_recursive(std::ostream& stream) const {
|
| 286 |
+
if (is_scalar()) {
|
| 287 |
+
AT_DISPATCH_ALL_TYPES_AND3(
|
| 288 |
+
at::kBool,
|
| 289 |
+
at::kHalf,
|
| 290 |
+
at::kBFloat16,
|
| 291 |
+
scalar_type_,
|
| 292 |
+
"TensorDataContainer_pretty_print_scalar",
|
| 293 |
+
[&] { stream << scalar_.to<scalar_t>(); });
|
| 294 |
+
} else if (is_init_list()) {
|
| 295 |
+
stream << "{";
|
| 296 |
+
for (const TensorDataContainer* it = init_list_.begin();
|
| 297 |
+
it != init_list_.end();
|
| 298 |
+
it++) {
|
| 299 |
+
stream << *it;
|
| 300 |
+
if (std::next(it) != init_list_.end())
|
| 301 |
+
stream << ", ";
|
| 302 |
+
}
|
| 303 |
+
stream << "}";
|
| 304 |
+
} else if (is_tensor()) {
|
| 305 |
+
stream << "{";
|
| 306 |
+
for (const auto i : c10::irange(tensor_.sizes()[0])) {
|
| 307 |
+
AT_DISPATCH_ALL_TYPES_AND3(
|
| 308 |
+
at::kBool,
|
| 309 |
+
at::kHalf,
|
| 310 |
+
at::kBFloat16,
|
| 311 |
+
scalar_type_,
|
| 312 |
+
"TensorDataContainer_pretty_print_tensor_item",
|
| 313 |
+
[&] { stream << tensor_[i].item<scalar_t>(); });
|
| 314 |
+
if (i != tensor_.sizes()[0] - 1)
|
| 315 |
+
stream << ", ";
|
| 316 |
+
}
|
| 317 |
+
stream << "}";
|
| 318 |
+
} else {
|
| 319 |
+
TORCH_INTERNAL_ASSERT(false, "Invalid TensorDataContainer type");
|
| 320 |
+
}
|
| 321 |
+
}
|
| 322 |
+
|
| 323 |
+
private:
|
| 324 |
+
void fill_tensor(at::Tensor& tensor) const {
|
| 325 |
+
if (is_scalar()) {
|
| 326 |
+
TORCH_INTERNAL_ASSERT(
|
| 327 |
+
tensor.dim() == 0,
|
| 328 |
+
"Expected a 0-dim Tensor, but got Tensor with dimensions: ",
|
| 329 |
+
tensor.dim());
|
| 330 |
+
at::NoGradGuard guard;
|
| 331 |
+
tensor.fill_(scalar_);
|
| 332 |
+
} else if (is_init_list()) {
|
| 333 |
+
TORCH_INTERNAL_ASSERT(
|
| 334 |
+
tensor.sizes()[0] == (int64_t)init_list_.size(),
|
| 335 |
+
"Expected a Tensor with size ",
|
| 336 |
+
init_list_.size(),
|
| 337 |
+
" in its first dimension, but got Tensor with size ",
|
| 338 |
+
tensor.sizes()[0],
|
| 339 |
+
" in its first dimension");
|
| 340 |
+
size_t index = 0;
|
| 341 |
+
for (const auto& elem : init_list_) {
|
| 342 |
+
at::Tensor slice = tensor[index];
|
| 343 |
+
elem.fill_tensor(slice);
|
| 344 |
+
index++;
|
| 345 |
+
}
|
| 346 |
+
} else if (is_tensor()) {
|
| 347 |
+
TORCH_INTERNAL_ASSERT(
|
| 348 |
+
false,
|
| 349 |
+
"TensorDataContainer is already a Tensor type, `fill_tensor` should not be called");
|
| 350 |
+
} else {
|
| 351 |
+
TORCH_INTERNAL_ASSERT(false, "Invalid TensorDataContainer type");
|
| 352 |
+
}
|
| 353 |
+
}
|
| 354 |
+
|
| 355 |
+
std::vector<int64_t> sizes_;
|
| 356 |
+
c10::ScalarType scalar_type_;
|
| 357 |
+
TensorDataContainerType type_;
|
| 358 |
+
c10::Scalar scalar_;
|
| 359 |
+
std::initializer_list<TensorDataContainer> init_list_;
|
| 360 |
+
at::Tensor tensor_;
|
| 361 |
+
};
|
| 362 |
+
|
| 363 |
+
inline std::ostream& operator<<(
|
| 364 |
+
std::ostream& stream,
|
| 365 |
+
const TensorDataContainer& tensor_data_container) {
|
| 366 |
+
tensor_data_container.pretty_print_recursive(stream);
|
| 367 |
+
return stream;
|
| 368 |
+
}
|
| 369 |
+
|
| 370 |
+
} // namespace detail
|
| 371 |
+
|
| 372 |
+
} // namespace torch
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/detail/static.h
ADDED
|
@@ -0,0 +1,65 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <torch/csrc/utils/variadic.h>
|
| 4 |
+
#include <torch/types.h>
|
| 5 |
+
|
| 6 |
+
#include <cstdint>
|
| 7 |
+
#include <type_traits>
|
| 8 |
+
|
| 9 |
+
namespace torch {
|
| 10 |
+
namespace nn {
|
| 11 |
+
class Module;
|
| 12 |
+
} // namespace nn
|
| 13 |
+
} // namespace torch
|
| 14 |
+
|
| 15 |
+
namespace torch {
|
| 16 |
+
namespace detail {
|
| 17 |
+
/// Detects if a type T has a forward() method.
|
| 18 |
+
template <typename T>
|
| 19 |
+
struct has_forward {
|
| 20 |
+
// Declare two types with differing size.
|
| 21 |
+
using yes = int8_t;
|
| 22 |
+
using no = int16_t;
|
| 23 |
+
|
| 24 |
+
// Here we declare two functions. The first is only enabled if `&U::forward`
|
| 25 |
+
// is well-formed and returns the `yes` type. In C++, the ellipsis parameter
|
| 26 |
+
// type (`...`) always puts the function at the bottom of overload resolution.
|
| 27 |
+
// This is specified in the standard as: 1) A standard conversion sequence is
|
| 28 |
+
// always better than a user-defined conversion sequence or an ellipsis
|
| 29 |
+
// conversion sequence. 2) A user-defined conversion sequence is always better
|
| 30 |
+
// than an ellipsis conversion sequence This means that if the first overload
|
| 31 |
+
// is viable, it will be preferred over the second as long as we pass any
|
| 32 |
+
// convertible type. The type of `&U::forward` is a pointer type, so we can
|
| 33 |
+
// pass e.g. 0.
|
| 34 |
+
template <typename U>
|
| 35 |
+
static yes test(decltype(&U::forward));
|
| 36 |
+
template <typename U>
|
| 37 |
+
static no test(...);
|
| 38 |
+
|
| 39 |
+
// Finally we test statically whether the size of the type returned by the
|
| 40 |
+
// selected overload is the size of the `yes` type.
|
| 41 |
+
static constexpr bool value = (sizeof(test<T>(nullptr)) == sizeof(yes));
|
| 42 |
+
};
|
| 43 |
+
|
| 44 |
+
template <typename Head = void, typename... Tail>
|
| 45 |
+
constexpr bool check_not_lvalue_references() {
|
| 46 |
+
return (!std::is_lvalue_reference<Head>::value ||
|
| 47 |
+
std::is_const<typename std::remove_reference<Head>::type>::value) &&
|
| 48 |
+
check_not_lvalue_references<Tail...>();
|
| 49 |
+
}
|
| 50 |
+
|
| 51 |
+
template <>
|
| 52 |
+
inline constexpr bool check_not_lvalue_references<void>() {
|
| 53 |
+
return true;
|
| 54 |
+
}
|
| 55 |
+
|
| 56 |
+
/// A type trait whose `value` member is true if `M` derives from `Module`.
|
| 57 |
+
template <typename M>
|
| 58 |
+
using is_module =
|
| 59 |
+
std::is_base_of<torch::nn::Module, typename std::decay<M>::type>;
|
| 60 |
+
|
| 61 |
+
template <typename M, typename T = void>
|
| 62 |
+
using enable_if_module_t =
|
| 63 |
+
typename std::enable_if<is_module<M>::value, T>::type;
|
| 64 |
+
} // namespace detail
|
| 65 |
+
} // namespace torch
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/cloneable.h
ADDED
|
@@ -0,0 +1,98 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <torch/nn/module.h>
|
| 4 |
+
#include <torch/types.h>
|
| 5 |
+
#include <torch/utils.h>
|
| 6 |
+
|
| 7 |
+
#include <c10/core/TensorOptions.h>
|
| 8 |
+
#include <c10/util/Exception.h>
|
| 9 |
+
|
| 10 |
+
#include <memory>
|
| 11 |
+
#include <utility>
|
| 12 |
+
|
| 13 |
+
namespace torch {
|
| 14 |
+
namespace nn {
|
| 15 |
+
/// The `clone()` method in the base `Module` class does not have knowledge of
|
| 16 |
+
/// the concrete runtime type of its subclasses. Therefore, `clone()` must
|
| 17 |
+
/// either be called from within the subclass, or from a base class that has
|
| 18 |
+
/// knowledge of the concrete type. `Cloneable` uses the CRTP to gain
|
| 19 |
+
/// knowledge of the subclass' static type and provide an implementation of the
|
| 20 |
+
/// `clone()` method. We do not want to use this pattern in the base class,
|
| 21 |
+
/// because then storing a module would always require templatizing it.
|
| 22 |
+
template <typename Derived>
|
| 23 |
+
// NOLINTNEXTLINE(bugprone-exception-escape)
|
| 24 |
+
class Cloneable : public Module {
|
| 25 |
+
public:
|
| 26 |
+
using Module::Module;
|
| 27 |
+
|
| 28 |
+
/// `reset()` must perform initialization of all members with reference
|
| 29 |
+
/// semantics, most importantly parameters, buffers and submodules.
|
| 30 |
+
virtual void reset() = 0;
|
| 31 |
+
|
| 32 |
+
/// Performs a recursive "deep copy" of the `Module`, such that all parameters
|
| 33 |
+
/// and submodules in the cloned module are different from those in the
|
| 34 |
+
/// original module.
|
| 35 |
+
std::shared_ptr<Module> clone(
|
| 36 |
+
const optional<Device>& device = nullopt) const override {
|
| 37 |
+
NoGradGuard no_grad;
|
| 38 |
+
|
| 39 |
+
const auto& self = static_cast<const Derived&>(*this);
|
| 40 |
+
auto copy = std::make_shared<Derived>(self);
|
| 41 |
+
copy->parameters_.clear();
|
| 42 |
+
copy->buffers_.clear();
|
| 43 |
+
copy->children_.clear();
|
| 44 |
+
copy->reset();
|
| 45 |
+
TORCH_CHECK(
|
| 46 |
+
copy->parameters_.size() == parameters_.size(),
|
| 47 |
+
"The cloned module does not have the same number of "
|
| 48 |
+
"parameters as the original module after calling reset(). "
|
| 49 |
+
"Are you sure you called register_parameter() inside reset() "
|
| 50 |
+
"and not the constructor?");
|
| 51 |
+
for (const auto& parameter : named_parameters(/*recurse=*/false)) {
|
| 52 |
+
auto& tensor = *parameter;
|
| 53 |
+
auto data = device && tensor.device() != *device
|
| 54 |
+
? tensor.to(*device)
|
| 55 |
+
: autograd::Variable(tensor).clone();
|
| 56 |
+
copy->parameters_[parameter.key()].set_data(data);
|
| 57 |
+
}
|
| 58 |
+
TORCH_CHECK(
|
| 59 |
+
copy->buffers_.size() == buffers_.size(),
|
| 60 |
+
"The cloned module does not have the same number of "
|
| 61 |
+
"buffers as the original module after calling reset(). "
|
| 62 |
+
"Are you sure you called register_buffer() inside reset() "
|
| 63 |
+
"and not the constructor?");
|
| 64 |
+
for (const auto& buffer : named_buffers(/*recurse=*/false)) {
|
| 65 |
+
auto& tensor = *buffer;
|
| 66 |
+
auto data = device && tensor.device() != *device
|
| 67 |
+
? tensor.to(*device)
|
| 68 |
+
: autograd::Variable(tensor).clone();
|
| 69 |
+
copy->buffers_[buffer.key()].set_data(data);
|
| 70 |
+
}
|
| 71 |
+
TORCH_CHECK(
|
| 72 |
+
copy->children_.size() == children_.size(),
|
| 73 |
+
"The cloned module does not have the same number of "
|
| 74 |
+
"child modules as the original module after calling reset(). "
|
| 75 |
+
"Are you sure you called register_module() inside reset() "
|
| 76 |
+
"and not the constructor?");
|
| 77 |
+
for (const auto& child : children_) {
|
| 78 |
+
copy->children_[child.key()]->clone_(*child.value(), device);
|
| 79 |
+
}
|
| 80 |
+
return copy;
|
| 81 |
+
}
|
| 82 |
+
|
| 83 |
+
private:
|
| 84 |
+
void clone_(Module& other, const optional<Device>& device) final {
|
| 85 |
+
// Here we are *pretty* certain that `other's` type is `Derived` (because it
|
| 86 |
+
// was registered under the same name as `this`), but you never know what
|
| 87 |
+
// crazy things `reset()` does, so `dynamic_cast` just to be safe.
|
| 88 |
+
auto clone = std::dynamic_pointer_cast<Derived>(other.clone(device));
|
| 89 |
+
TORCH_CHECK(
|
| 90 |
+
clone != nullptr,
|
| 91 |
+
"Attempted to clone submodule, but it is of a "
|
| 92 |
+
"different type than the submodule it was to be cloned into");
|
| 93 |
+
static_cast<Derived&>(*this) = *clone;
|
| 94 |
+
}
|
| 95 |
+
};
|
| 96 |
+
|
| 97 |
+
} // namespace nn
|
| 98 |
+
} // namespace torch
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional.h
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <torch/nn/functional/batchnorm.h>
|
| 4 |
+
#include <torch/nn/functional/conv.h>
|
| 5 |
+
#include <torch/nn/functional/distance.h>
|
| 6 |
+
#include <torch/nn/functional/dropout.h>
|
| 7 |
+
#include <torch/nn/functional/embedding.h>
|
| 8 |
+
#include <torch/nn/functional/fold.h>
|
| 9 |
+
#include <torch/nn/functional/instancenorm.h>
|
| 10 |
+
#include <torch/nn/functional/linear.h>
|
| 11 |
+
#include <torch/nn/functional/loss.h>
|
| 12 |
+
#include <torch/nn/functional/normalization.h>
|
| 13 |
+
#include <torch/nn/functional/padding.h>
|
| 14 |
+
#include <torch/nn/functional/pixelshuffle.h>
|
| 15 |
+
#include <torch/nn/functional/pooling.h>
|
| 16 |
+
#include <torch/nn/functional/upsampling.h>
|
| 17 |
+
#include <torch/nn/functional/vision.h>
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/init.h
ADDED
|
@@ -0,0 +1,124 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <torch/csrc/Export.h>
|
| 4 |
+
#include <torch/enum.h>
|
| 5 |
+
#include <torch/types.h>
|
| 6 |
+
|
| 7 |
+
namespace torch {
|
| 8 |
+
namespace nn {
|
| 9 |
+
namespace init {
|
| 10 |
+
|
| 11 |
+
using NonlinearityType = std::variant<
|
| 12 |
+
enumtype::kLinear,
|
| 13 |
+
enumtype::kConv1D,
|
| 14 |
+
enumtype::kConv2D,
|
| 15 |
+
enumtype::kConv3D,
|
| 16 |
+
enumtype::kConvTranspose1D,
|
| 17 |
+
enumtype::kConvTranspose2D,
|
| 18 |
+
enumtype::kConvTranspose3D,
|
| 19 |
+
enumtype::kSigmoid,
|
| 20 |
+
enumtype::kTanh,
|
| 21 |
+
enumtype::kReLU,
|
| 22 |
+
enumtype::kLeakyReLU>;
|
| 23 |
+
|
| 24 |
+
using FanModeType = std::variant<enumtype::kFanIn, enumtype::kFanOut>;
|
| 25 |
+
|
| 26 |
+
} // namespace init
|
| 27 |
+
} // namespace nn
|
| 28 |
+
|
| 29 |
+
namespace nn {
|
| 30 |
+
namespace init {
|
| 31 |
+
|
| 32 |
+
/// Return the recommended gain value for the given nonlinearity function.
|
| 33 |
+
TORCH_API double calculate_gain(
|
| 34 |
+
NonlinearityType nonlinearity,
|
| 35 |
+
double param = 0.01);
|
| 36 |
+
|
| 37 |
+
/// Fills the given `tensor` with the provided `value` in-place, and returns it.
|
| 38 |
+
/// No gradient will be recorded for this operation.
|
| 39 |
+
TORCH_API Tensor constant_(Tensor tensor, Scalar value);
|
| 40 |
+
|
| 41 |
+
/// Fills the given `tensor` with the Dirac delta function in-place, and returns
|
| 42 |
+
/// it. No gradient will be recorded for this operation.
|
| 43 |
+
TORCH_API Tensor dirac_(Tensor tensor);
|
| 44 |
+
|
| 45 |
+
/// Fills the given 2-dimensional `matrix` with an identity matrix.
|
| 46 |
+
/// No gradient will be recorded for this operation.
|
| 47 |
+
TORCH_API Tensor eye_(Tensor matrix);
|
| 48 |
+
|
| 49 |
+
/// Fills the given 2-dimensional `matrix` with values drawn from a normal
|
| 50 |
+
/// distribution parameterized by `mean` and `std`.
|
| 51 |
+
/// No gradient will be recorded for this operation.
|
| 52 |
+
TORCH_API Tensor normal_(Tensor tensor, double mean = 0, double std = 1);
|
| 53 |
+
|
| 54 |
+
/// Fills the given `tensor` with ones.
|
| 55 |
+
/// No gradient will be recorded for this operation.
|
| 56 |
+
TORCH_API Tensor ones_(Tensor tensor);
|
| 57 |
+
|
| 58 |
+
/// Fills the input `Tensor` with a (semi) orthogonal matrix, as described in
|
| 59 |
+
/// "Exact solutions to the nonlinear dynamics of learning in deep linear neural
|
| 60 |
+
/// networks" - Saxe, A. et al. (2013). The input tensor must have at least 2
|
| 61 |
+
/// dimensions, and for tensors with more than 2 dimensions the trailing
|
| 62 |
+
/// dimensions are flattened.
|
| 63 |
+
/// No gradient will be recorded for this operation.
|
| 64 |
+
TORCH_API Tensor orthogonal_(Tensor tensor, double gain = 1.0);
|
| 65 |
+
|
| 66 |
+
/// Fills the 2D input `Tensor` as a sparse matrix, where the
|
| 67 |
+
/// non-zero elements will be drawn from a centered normal distribution
|
| 68 |
+
/// with the given standard deviation `std`, as described in "Deep learning via
|
| 69 |
+
/// Hessian-free optimization" - Martens, J. (2010). The `sparsity` is a real
|
| 70 |
+
/// value between 0 and 1 that controls the fraction of elements in each column
|
| 71 |
+
/// to be set to zero.
|
| 72 |
+
/// No gradient will be recorded for this operation.
|
| 73 |
+
TORCH_API Tensor sparse_(Tensor tensor, double sparsity, double std = 0.01);
|
| 74 |
+
|
| 75 |
+
/// Fills the given 2-dimensional `matrix` with values drawn from a uniform
|
| 76 |
+
/// distribution parameterized by `low` and `high`.
|
| 77 |
+
/// No gradient will be recorded for this operation.
|
| 78 |
+
TORCH_API Tensor uniform_(Tensor tensor, double low = 0, double high = 1);
|
| 79 |
+
|
| 80 |
+
/// Fills the input `Tensor` with values according to the method
|
| 81 |
+
/// described in "Delving deep into rectifiers: Surpassing human-level
|
| 82 |
+
/// performance on ImageNet classification" - He, K. et al. (2015), using a
|
| 83 |
+
/// normal distribution. Also known as He initialization.
|
| 84 |
+
/// No gradient will be recorded for this operation.
|
| 85 |
+
TORCH_API Tensor kaiming_normal_(
|
| 86 |
+
Tensor tensor,
|
| 87 |
+
double a = 0,
|
| 88 |
+
FanModeType mode = torch::kFanIn,
|
| 89 |
+
NonlinearityType nonlinearity = torch::kLeakyReLU);
|
| 90 |
+
|
| 91 |
+
/// Fills the input `Tensor` with values according to the method
|
| 92 |
+
/// described in "Delving deep into rectifiers: Surpassing human-level
|
| 93 |
+
/// performance on ImageNet classification" - He, K. et al. (2015), using a
|
| 94 |
+
/// uniform distribution. Also known as He initialization.
|
| 95 |
+
/// No gradient will be recorded for this operation.
|
| 96 |
+
TORCH_API Tensor kaiming_uniform_(
|
| 97 |
+
Tensor tensor,
|
| 98 |
+
double a = 0,
|
| 99 |
+
FanModeType mode = torch::kFanIn,
|
| 100 |
+
NonlinearityType nonlinearity = torch::kLeakyReLU);
|
| 101 |
+
|
| 102 |
+
/// Fills the input `Tensor` with values according to the method
|
| 103 |
+
/// described in "Understanding the difficulty of training deep feedforward
|
| 104 |
+
/// neural networks" - Glorot, X. & Bengio, Y. (2010). Values are scaled by the
|
| 105 |
+
/// `gain` parameter. No gradient will be recorded for this operation.
|
| 106 |
+
TORCH_API Tensor xavier_normal_(Tensor tensor, double gain = 1.0);
|
| 107 |
+
|
| 108 |
+
/// Fills the input `Tensor` with values according to the method
|
| 109 |
+
/// described in "Understanding the difficulty of training deep feedforward
|
| 110 |
+
/// neural networks" - Glorot, X. & Bengio, Y. (2010), using a uniform
|
| 111 |
+
/// distribution. Values are scaled by the `gain` parameter
|
| 112 |
+
/// No gradient will be recorded for this operation.
|
| 113 |
+
TORCH_API Tensor xavier_uniform_(Tensor tensor, double gain = 1.0);
|
| 114 |
+
|
| 115 |
+
/// Fills the given `tensor` with zeros.
|
| 116 |
+
/// No gradient will be recorded for this operation.
|
| 117 |
+
TORCH_API Tensor zeros_(Tensor tensor);
|
| 118 |
+
|
| 119 |
+
TORCH_API std::tuple<int64_t, int64_t> _calculate_fan_in_and_fan_out(
|
| 120 |
+
const Tensor& tensor);
|
| 121 |
+
|
| 122 |
+
} // namespace init
|
| 123 |
+
} // namespace nn
|
| 124 |
+
} // namespace torch
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/module.h
ADDED
|
@@ -0,0 +1,702 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <torch/nn/modules/container/any_module_holder.h>
|
| 4 |
+
#include <torch/nn/modules/container/any_value.h>
|
| 5 |
+
#include <torch/nn/pimpl.h>
|
| 6 |
+
#include <torch/ordered_dict.h>
|
| 7 |
+
#include <torch/serialize/archive.h>
|
| 8 |
+
#include <torch/types.h>
|
| 9 |
+
|
| 10 |
+
#include <ATen/ATen.h>
|
| 11 |
+
|
| 12 |
+
#include <functional>
|
| 13 |
+
#include <iosfwd>
|
| 14 |
+
#include <map>
|
| 15 |
+
#include <memory>
|
| 16 |
+
#include <string>
|
| 17 |
+
#include <type_traits>
|
| 18 |
+
|
| 19 |
+
namespace torch {
|
| 20 |
+
namespace nn {
|
| 21 |
+
|
| 22 |
+
/// The base class for all modules in PyTorch.
|
| 23 |
+
///
|
| 24 |
+
/// \rst
|
| 25 |
+
/// .. note::
|
| 26 |
+
/// The design and implementation of this class is largely based on the Python
|
| 27 |
+
/// API. You may want to consult the python documentation for
|
| 28 |
+
/// :py:class:`pytorch:torch.nn.Module` for further clarification on certain
|
| 29 |
+
/// methods or behavior.
|
| 30 |
+
/// \endrst
|
| 31 |
+
///
|
| 32 |
+
/// A `Module` is an abstraction over the implementation of some function or
|
| 33 |
+
/// algorithm, possibly associated with some persistent data. A `Module` may
|
| 34 |
+
/// contain further `Module`s ("submodules"), each with their own
|
| 35 |
+
/// implementation, persistent data and further submodules. `Module`s can thus
|
| 36 |
+
/// be said to form a recursive tree structure. A `Module` is registered as a
|
| 37 |
+
/// submodule to another `Module` by calling `register_module()`, typically from
|
| 38 |
+
/// within a parent module's constructor.
|
| 39 |
+
///
|
| 40 |
+
/// A distinction is made between three kinds of persistent data that may be
|
| 41 |
+
/// associated with a `Module`:
|
| 42 |
+
///
|
| 43 |
+
/// 1. *Parameters*: tensors that record gradients, typically weights updated
|
| 44 |
+
/// during the backward step (e.g. the `weight` of a `Linear` module),
|
| 45 |
+
/// 2. *Buffers*: tensors that do not record gradients, typically updated during
|
| 46 |
+
/// the forward step, such as running statistics (e.g. `mean` and `variance`
|
| 47 |
+
/// in the `BatchNorm` module),
|
| 48 |
+
/// 3. Any additional state, not necessarily tensors, required for the
|
| 49 |
+
/// implementation or configuration of a `Module`.
|
| 50 |
+
///
|
| 51 |
+
/// The first two kinds of state are special in that they may be registered
|
| 52 |
+
/// with the `Module` system to allow convenient access and batch configuration.
|
| 53 |
+
/// For example, registered parameters in any `Module` may be iterated over via
|
| 54 |
+
/// the `parameters()` accessor. Further, changing the data type of a `Module`'s
|
| 55 |
+
/// registered parameters can be done conveniently via `Module::to()`, e.g.
|
| 56 |
+
/// `module->to(torch::kCUDA)` to move all parameters to GPU memory. Lastly,
|
| 57 |
+
/// registered parameters and buffers are handled specially during a `clone()`
|
| 58 |
+
/// operation, which performs a deepcopy of a cloneable `Module` hierarchy.
|
| 59 |
+
///
|
| 60 |
+
/// Parameters are registered with a `Module` via `register_parameter`. Buffers
|
| 61 |
+
/// are registered separately via `register_buffer`. These methods are part of
|
| 62 |
+
/// the public API of `Module` and are typically invoked from within a
|
| 63 |
+
/// concrete `Module`s constructor.
|
| 64 |
+
class TORCH_API Module : public std::enable_shared_from_this<Module> {
|
| 65 |
+
public:
|
| 66 |
+
using ModuleApplyFunction = std::function<void(Module&)>;
|
| 67 |
+
using ConstModuleApplyFunction = std::function<void(const Module&)>;
|
| 68 |
+
using NamedModuleApplyFunction =
|
| 69 |
+
std::function<void(const std::string&, Module&)>;
|
| 70 |
+
using ConstNamedModuleApplyFunction =
|
| 71 |
+
std::function<void(const std::string&, const Module&)>;
|
| 72 |
+
using ModulePointerApplyFunction =
|
| 73 |
+
std::function<void(const std::shared_ptr<Module>&)>;
|
| 74 |
+
using NamedModulePointerApplyFunction =
|
| 75 |
+
std::function<void(const std::string&, const std::shared_ptr<Module>&)>;
|
| 76 |
+
|
| 77 |
+
/// Tells the base `Module` about the name of the submodule.
|
| 78 |
+
explicit Module(std::string name);
|
| 79 |
+
|
| 80 |
+
/// Constructs the module without immediate knowledge of the submodule's name.
|
| 81 |
+
/// The name of the submodule is inferred via RTTI (if possible) the first
|
| 82 |
+
/// time `.name()` is invoked.
|
| 83 |
+
Module();
|
| 84 |
+
Module(const Module&) = default;
|
| 85 |
+
Module& operator=(const Module&) = default;
|
| 86 |
+
Module(Module&&) noexcept = default;
|
| 87 |
+
Module& operator=(Module&&) noexcept = default;
|
| 88 |
+
|
| 89 |
+
virtual ~Module() = default;
|
| 90 |
+
|
| 91 |
+
/// Returns the name of the `Module`.
|
| 92 |
+
///
|
| 93 |
+
/// A `Module` has an associated `name`, which is a string representation of
|
| 94 |
+
/// the kind of concrete `Module` it represents, such as `"Linear"` for the
|
| 95 |
+
/// `Linear` module. Under most circumstances, this name is automatically
|
| 96 |
+
/// inferred via runtime type information (RTTI). In the unusual circumstance
|
| 97 |
+
/// that you have this feature disabled, you may want to manually name your
|
| 98 |
+
/// `Module`s by passing the string name to the `Module` base class'
|
| 99 |
+
/// constructor.
|
| 100 |
+
const std::string& name() const noexcept;
|
| 101 |
+
|
| 102 |
+
/// Performs a recursive deep copy of the module and all its registered
|
| 103 |
+
/// parameters, buffers and submodules.
|
| 104 |
+
///
|
| 105 |
+
/// Optionally, this method sets the current device
|
| 106 |
+
/// to the one supplied before cloning. If no device is given, each
|
| 107 |
+
/// parameter and buffer will be moved to the device of its source.
|
| 108 |
+
///
|
| 109 |
+
/// \rst
|
| 110 |
+
/// .. attention::
|
| 111 |
+
/// Attempting to call the `clone()` method inherited from the base `Module`
|
| 112 |
+
/// class (the one documented here) will fail. To inherit an actual
|
| 113 |
+
/// implementation of `clone()`, you must subclass `Cloneable`. `Cloneable`
|
| 114 |
+
/// is templatized on the concrete module type, and can thus properly copy a
|
| 115 |
+
/// `Module`. This method is provided on the base class' API solely for an
|
| 116 |
+
/// easier-to-use polymorphic interface.
|
| 117 |
+
/// \endrst
|
| 118 |
+
virtual std::shared_ptr<Module> clone(
|
| 119 |
+
const optional<Device>& device = nullopt) const;
|
| 120 |
+
|
| 121 |
+
/// Applies the `function` to the `Module` and recursively to every submodule.
|
| 122 |
+
/// The function must accept a `Module&`.
|
| 123 |
+
///
|
| 124 |
+
/// \rst
|
| 125 |
+
/// .. code-block:: cpp
|
| 126 |
+
/// MyModule module;
|
| 127 |
+
/// module->apply([](nn::Module& module) {
|
| 128 |
+
/// std::cout << module.name() << std::endl;
|
| 129 |
+
/// });
|
| 130 |
+
/// \endrst
|
| 131 |
+
void apply(const ModuleApplyFunction& function);
|
| 132 |
+
|
| 133 |
+
/// Applies the `function` to the `Module` and recursively to every submodule.
|
| 134 |
+
/// The function must accept a `const Module&`.
|
| 135 |
+
///
|
| 136 |
+
/// \rst
|
| 137 |
+
/// .. code-block:: cpp
|
| 138 |
+
/// MyModule module;
|
| 139 |
+
/// module->apply([](const nn::Module& module) {
|
| 140 |
+
/// std::cout << module.name() << std::endl;
|
| 141 |
+
/// });
|
| 142 |
+
/// \endrst
|
| 143 |
+
void apply(const ConstModuleApplyFunction& function) const;
|
| 144 |
+
|
| 145 |
+
/// Applies the `function` to the `Module` and recursively to every submodule.
|
| 146 |
+
/// The function must accept a `const std::string&` for the key of the module,
|
| 147 |
+
/// and a `Module&`. The key of the module itself is the empty string. If
|
| 148 |
+
/// `name_prefix` is given, it is prepended to every key as
|
| 149 |
+
/// `<name_prefix>.<key>` (and just `name_prefix` for the module itself).
|
| 150 |
+
///
|
| 151 |
+
/// \rst
|
| 152 |
+
/// .. code-block:: cpp
|
| 153 |
+
/// MyModule module;
|
| 154 |
+
/// module->apply([](const std::string& key, nn::Module& module) {
|
| 155 |
+
/// std::cout << key << ": " << module.name() << std::endl;
|
| 156 |
+
/// });
|
| 157 |
+
/// \endrst
|
| 158 |
+
void apply(
|
| 159 |
+
const NamedModuleApplyFunction& function,
|
| 160 |
+
const std::string& name_prefix = std::string());
|
| 161 |
+
|
| 162 |
+
/// Applies the `function` to the `Module` and recursively to every submodule.
|
| 163 |
+
/// The function must accept a `const std::string&` for the key of the module,
|
| 164 |
+
/// and a `const Module&`. The key of the module itself is the empty string.
|
| 165 |
+
/// If `name_prefix` is given, it is prepended to every key as
|
| 166 |
+
/// `<name_prefix>.<key>` (and just `name_prefix` for the module itself).
|
| 167 |
+
///
|
| 168 |
+
/// \rst
|
| 169 |
+
/// .. code-block:: cpp
|
| 170 |
+
/// MyModule module;
|
| 171 |
+
/// module->apply([](const std::string& key, const nn::Module& module) {
|
| 172 |
+
/// std::cout << key << ": " << module.name() << std::endl;
|
| 173 |
+
/// });
|
| 174 |
+
/// \endrst
|
| 175 |
+
void apply(
|
| 176 |
+
const ConstNamedModuleApplyFunction& function,
|
| 177 |
+
const std::string& name_prefix = std::string()) const;
|
| 178 |
+
|
| 179 |
+
/// Applies the `function` to the `Module` and recursively to every submodule.
|
| 180 |
+
/// The function must accept a `const std::shared_ptr<Module>&`.
|
| 181 |
+
///
|
| 182 |
+
/// \rst
|
| 183 |
+
/// .. code-block:: cpp
|
| 184 |
+
/// MyModule module;
|
| 185 |
+
/// module->apply([](const std::shared_ptr<nn::Module>& module) {
|
| 186 |
+
/// std::cout << module->name() << std::endl;
|
| 187 |
+
/// });
|
| 188 |
+
/// \endrst
|
| 189 |
+
void apply(const ModulePointerApplyFunction& function) const;
|
| 190 |
+
|
| 191 |
+
/// Applies the `function` to the `Module` and recursively to every submodule.
|
| 192 |
+
/// The function must accept a `const std::string&` for the key of the module,
|
| 193 |
+
/// and a `const std::shared_ptr<Module>&`. The key of the module itself is
|
| 194 |
+
/// the empty string. If `name_prefix` is given, it is prepended to every key
|
| 195 |
+
/// as
|
| 196 |
+
/// `<name_prefix>.<key>` (and just `name_prefix` for the module itself).
|
| 197 |
+
///
|
| 198 |
+
/// \rst
|
| 199 |
+
/// .. code-block:: cpp
|
| 200 |
+
/// MyModule module;
|
| 201 |
+
/// module->apply([](const std::string& key,
|
| 202 |
+
/// const std::shared_ptr<nn::Module>& module) {
|
| 203 |
+
/// std::cout << key << ": " << module->name() << std::endl;
|
| 204 |
+
/// });
|
| 205 |
+
/// \endrst
|
| 206 |
+
void apply(
|
| 207 |
+
const NamedModulePointerApplyFunction& function,
|
| 208 |
+
const std::string& name_prefix = std::string()) const;
|
| 209 |
+
|
| 210 |
+
/// Returns the parameters of this `Module` and if `recurse` is true, also
|
| 211 |
+
/// recursively of every submodule.
|
| 212 |
+
std::vector<Tensor> parameters(bool recurse = true) const;
|
| 213 |
+
|
| 214 |
+
/// Returns an `OrderedDict` with the parameters of this `Module` along with
|
| 215 |
+
/// their keys, and if `recurse` is true also recursively of every submodule.
|
| 216 |
+
OrderedDict<std::string, Tensor> named_parameters(bool recurse = true) const;
|
| 217 |
+
|
| 218 |
+
/// Returns the buffers of this `Module` and if `recurse` is true, also
|
| 219 |
+
/// recursively of every submodule.
|
| 220 |
+
std::vector<Tensor> buffers(bool recurse = true) const;
|
| 221 |
+
|
| 222 |
+
/// Returns an `OrderedDict` with the buffers of this `Module` along with
|
| 223 |
+
/// their keys, and if `recurse` is true also recursively of every submodule.
|
| 224 |
+
OrderedDict<std::string, Tensor> named_buffers(bool recurse = true) const;
|
| 225 |
+
|
| 226 |
+
/// Returns the submodules of this `Module` (the entire submodule hierarchy)
|
| 227 |
+
/// and if `include_self` is true, also inserts a `shared_ptr` to this module
|
| 228 |
+
/// in the first position.
|
| 229 |
+
///
|
| 230 |
+
/// \rst
|
| 231 |
+
/// .. warning::
|
| 232 |
+
/// Only pass `include_self` as `true` if this `Module` is stored in a
|
| 233 |
+
/// `shared_ptr`! Otherwise an exception will be thrown. You may still call
|
| 234 |
+
/// this method with `include_self` set to false if your `Module` is not
|
| 235 |
+
/// stored in a `shared_ptr`.
|
| 236 |
+
/// \endrst
|
| 237 |
+
std::vector<std::shared_ptr<Module>> modules(bool include_self = true) const;
|
| 238 |
+
|
| 239 |
+
/// Returns an `OrderedDict` of the submodules of this `Module` (the entire
|
| 240 |
+
/// submodule hierarchy) and their keys, and if `include_self` is true, also
|
| 241 |
+
/// inserts a `shared_ptr` to this module in the first position. If
|
| 242 |
+
/// `name_prefix` is given, it is prepended to every key as
|
| 243 |
+
/// `<name_prefix>.<key>` (and just `name_prefix` for the module itself).
|
| 244 |
+
///
|
| 245 |
+
/// \rst
|
| 246 |
+
/// .. warning::
|
| 247 |
+
/// Only pass `include_self` as `true` if this `Module` is stored in a
|
| 248 |
+
/// `shared_ptr`! Otherwise an exception will be thrown. You may still call
|
| 249 |
+
/// this method with `include_self` set to false if your `Module` is not
|
| 250 |
+
/// stored in a `shared_ptr`.
|
| 251 |
+
/// \endrst
|
| 252 |
+
OrderedDict<std::string, std::shared_ptr<Module>> named_modules(
|
| 253 |
+
const std::string& name_prefix = std::string(),
|
| 254 |
+
bool include_self = true) const;
|
| 255 |
+
|
| 256 |
+
/// Returns the direct submodules of this `Module`.
|
| 257 |
+
std::vector<std::shared_ptr<Module>> children() const;
|
| 258 |
+
|
| 259 |
+
/// Returns an `OrderedDict` of the direct submodules of this `Module` and
|
| 260 |
+
/// their keys.
|
| 261 |
+
OrderedDict<std::string, std::shared_ptr<Module>> named_children() const;
|
| 262 |
+
|
| 263 |
+
/// Enables "training" mode.
|
| 264 |
+
virtual void train(bool on = true);
|
| 265 |
+
|
| 266 |
+
/// Calls train(false) to enable "eval" mode.
|
| 267 |
+
/// Do not override this method, override `train()` instead.
|
| 268 |
+
void eval();
|
| 269 |
+
|
| 270 |
+
/// True if the module is in training mode.
|
| 271 |
+
///
|
| 272 |
+
/// Every `Module` has a boolean associated with it that determines whether
|
| 273 |
+
/// the `Module` is currently in *training* mode (set via `.train()`) or in
|
| 274 |
+
/// *evaluation* (inference) mode (set via `.eval()`). This property is
|
| 275 |
+
/// exposed via `is_training()`, and may be used by the implementation of a
|
| 276 |
+
/// concrete module to modify its runtime behavior. See the `BatchNorm` or
|
| 277 |
+
/// `Dropout` modules for examples of `Module`s that use different code paths
|
| 278 |
+
/// depending on this property.
|
| 279 |
+
virtual bool is_training() const noexcept;
|
| 280 |
+
|
| 281 |
+
/// Recursively casts all parameters to the given `dtype` and `device`.
|
| 282 |
+
///
|
| 283 |
+
/// If `non_blocking` is true and the source is in pinned memory and
|
| 284 |
+
/// destination is on the GPU or vice versa, the copy is performed
|
| 285 |
+
/// asynchronously with respect to the host. Otherwise, the argument has no
|
| 286 |
+
/// effect.
|
| 287 |
+
virtual void to(
|
| 288 |
+
torch::Device device,
|
| 289 |
+
torch::Dtype dtype,
|
| 290 |
+
bool non_blocking = false);
|
| 291 |
+
|
| 292 |
+
/// Recursively casts all parameters to the given dtype.
|
| 293 |
+
///
|
| 294 |
+
/// If `non_blocking` is true and the source is in pinned memory and
|
| 295 |
+
/// destination is on the GPU or vice versa, the copy is performed
|
| 296 |
+
/// asynchronously with respect to the host. Otherwise, the argument has no
|
| 297 |
+
/// effect.
|
| 298 |
+
virtual void to(torch::Dtype dtype, bool non_blocking = false);
|
| 299 |
+
|
| 300 |
+
/// Recursively moves all parameters to the given device.
|
| 301 |
+
///
|
| 302 |
+
/// If `non_blocking` is true and the source is in pinned memory and
|
| 303 |
+
/// destination is on the GPU or vice versa, the copy is performed
|
| 304 |
+
/// asynchronously with respect to the host. Otherwise, the argument has no
|
| 305 |
+
/// effect.
|
| 306 |
+
virtual void to(torch::Device device, bool non_blocking = false);
|
| 307 |
+
|
| 308 |
+
/// Recursively zeros out the `grad` value of each registered parameter.
|
| 309 |
+
virtual void zero_grad(bool set_to_none = true);
|
| 310 |
+
|
| 311 |
+
/// Attempts to cast this `Module` to the given `ModuleType`.
|
| 312 |
+
///
|
| 313 |
+
/// This method is useful when calling `apply()`.
|
| 314 |
+
/// \rst
|
| 315 |
+
/// .. code-block:: cpp
|
| 316 |
+
///
|
| 317 |
+
/// void initialize_weights(nn::Module& module) {
|
| 318 |
+
/// torch::NoGradGuard no_grad;
|
| 319 |
+
/// if (auto* linear = module.as<nn::Linear>()) {
|
| 320 |
+
/// linear->weight.normal_(0.0, 0.02);
|
| 321 |
+
/// }
|
| 322 |
+
/// }
|
| 323 |
+
///
|
| 324 |
+
/// MyModule module;
|
| 325 |
+
/// module->apply(initialize_weights);
|
| 326 |
+
/// \endrst
|
| 327 |
+
template <typename ModuleType>
|
| 328 |
+
typename ModuleType::ContainedType* as() noexcept;
|
| 329 |
+
|
| 330 |
+
/// Attempts to cast this `Module` to the given `ModuleType`.
|
| 331 |
+
///
|
| 332 |
+
/// This method is useful when calling `apply()`.
|
| 333 |
+
/// \rst
|
| 334 |
+
/// .. code-block:: cpp
|
| 335 |
+
/// void initialize_weights(nn::Module& module) {
|
| 336 |
+
/// torch::NoGradGuard no_grad;
|
| 337 |
+
/// if (auto* linear = module.as<nn::Linear>()) {
|
| 338 |
+
/// linear->weight.normal_(0.0, 0.02);
|
| 339 |
+
/// }
|
| 340 |
+
/// }
|
| 341 |
+
///
|
| 342 |
+
/// MyModule module;
|
| 343 |
+
/// module->apply(initialize_weights);
|
| 344 |
+
/// \endrst
|
| 345 |
+
template <typename ModuleType>
|
| 346 |
+
const typename ModuleType::ContainedType* as() const noexcept;
|
| 347 |
+
|
| 348 |
+
/// Attempts to cast this `Module` to the given `ModuleType`.
|
| 349 |
+
///
|
| 350 |
+
/// This method is useful when calling `apply()`.
|
| 351 |
+
/// \rst
|
| 352 |
+
/// .. code-block:: cpp
|
| 353 |
+
///
|
| 354 |
+
/// void initialize_weights(nn::Module& module) {
|
| 355 |
+
/// torch::NoGradGuard no_grad;
|
| 356 |
+
/// if (auto* linear = module.as<nn::Linear>()) {
|
| 357 |
+
/// linear->weight.normal_(0.0, 0.02);
|
| 358 |
+
/// }
|
| 359 |
+
/// }
|
| 360 |
+
///
|
| 361 |
+
/// MyModule module;
|
| 362 |
+
/// module.apply(initialize_weights);
|
| 363 |
+
/// \endrst
|
| 364 |
+
template <
|
| 365 |
+
typename ModuleType,
|
| 366 |
+
typename = torch::detail::disable_if_module_holder_t<ModuleType>>
|
| 367 |
+
ModuleType* as() noexcept;
|
| 368 |
+
|
| 369 |
+
/// Attempts to cast this `Module` to the given `ModuleType`.
|
| 370 |
+
///
|
| 371 |
+
/// This method is useful when calling `apply()`.
|
| 372 |
+
/// \rst
|
| 373 |
+
/// .. code-block:: cpp
|
| 374 |
+
///
|
| 375 |
+
/// void initialize_weights(nn::Module& module) {
|
| 376 |
+
/// torch::NoGradGuard no_grad;
|
| 377 |
+
/// if (auto* linear = module.as<nn::Linear>()) {
|
| 378 |
+
/// linear->weight.normal_(0.0, 0.02);
|
| 379 |
+
/// }
|
| 380 |
+
/// }
|
| 381 |
+
///
|
| 382 |
+
/// MyModule module;
|
| 383 |
+
/// module.apply(initialize_weights);
|
| 384 |
+
/// \endrst
|
| 385 |
+
template <
|
| 386 |
+
typename ModuleType,
|
| 387 |
+
typename = torch::detail::disable_if_module_holder_t<ModuleType>>
|
| 388 |
+
const ModuleType* as() const noexcept;
|
| 389 |
+
|
| 390 |
+
/// Serializes the `Module` into the given `OutputArchive`.
|
| 391 |
+
///
|
| 392 |
+
/// If the `Module` contains unserializable submodules (e.g.
|
| 393 |
+
/// `nn::Functional`), those submodules are skipped when serializing.
|
| 394 |
+
virtual void save(serialize::OutputArchive& archive) const;
|
| 395 |
+
|
| 396 |
+
/// Deserializes the `Module` from the given `InputArchive`.
|
| 397 |
+
///
|
| 398 |
+
/// If the `Module` contains unserializable submodules (e.g.
|
| 399 |
+
/// `nn::Functional`), we don't check the existence of those submodules in the
|
| 400 |
+
/// `InputArchive` when deserializing.
|
| 401 |
+
virtual void load(serialize::InputArchive& archive);
|
| 402 |
+
|
| 403 |
+
/// Streams a pretty representation of the `Module` into the given `stream`.
|
| 404 |
+
/// By default, this representation will be the name of the module (taken from
|
| 405 |
+
/// `name()`), followed by a recursive pretty print of all of the `Module`'s
|
| 406 |
+
/// submodules.
|
| 407 |
+
///
|
| 408 |
+
/// Override this method to change the pretty print. The input
|
| 409 |
+
/// `stream` should be returned from the method, to allow easy chaining.
|
| 410 |
+
virtual void pretty_print(std::ostream& stream) const;
|
| 411 |
+
|
| 412 |
+
/// Returns whether the `Module` is serializable.
|
| 413 |
+
virtual bool is_serializable() const;
|
| 414 |
+
|
| 415 |
+
/// Registers a parameter with this `Module`.
|
| 416 |
+
///
|
| 417 |
+
/// A parameter should be any gradient-recording tensor used in the
|
| 418 |
+
/// implementation of your `Module`. Registering it makes it available to
|
| 419 |
+
/// methods such as `parameters()`, `clone()` or `to().`
|
| 420 |
+
///
|
| 421 |
+
/// Note that registering an undefined Tensor (e.g.
|
| 422 |
+
/// `module.register_parameter("param", Tensor())`) is allowed, and is
|
| 423 |
+
/// equivalent to `module.register_parameter("param", None)` in Python API.
|
| 424 |
+
///
|
| 425 |
+
/// \rst
|
| 426 |
+
/// .. code-block:: cpp
|
| 427 |
+
///
|
| 428 |
+
/// MyModule::MyModule() {
|
| 429 |
+
/// weight_ = register_parameter("weight", torch::randn({A, B}));
|
| 430 |
+
/// }
|
| 431 |
+
/// \endrst
|
| 432 |
+
Tensor& register_parameter(
|
| 433 |
+
std::string name,
|
| 434 |
+
Tensor tensor,
|
| 435 |
+
bool requires_grad = true);
|
| 436 |
+
|
| 437 |
+
/// Registers a buffer with this `Module`.
|
| 438 |
+
///
|
| 439 |
+
/// A buffer is intended to be state in your module that does not record
|
| 440 |
+
/// gradients, such as running statistics. Registering it makes it available
|
| 441 |
+
/// to methods such as `buffers()`, `clone()` or `to().
|
| 442 |
+
///
|
| 443 |
+
/// \rst
|
| 444 |
+
/// .. code-block:: cpp
|
| 445 |
+
///
|
| 446 |
+
/// MyModule::MyModule() {
|
| 447 |
+
/// mean_ = register_buffer("mean", torch::empty({num_features_}));
|
| 448 |
+
/// }
|
| 449 |
+
/// \endrst
|
| 450 |
+
Tensor& register_buffer(std::string name, Tensor tensor);
|
| 451 |
+
|
| 452 |
+
/// Registers a submodule with this `Module`.
|
| 453 |
+
///
|
| 454 |
+
/// Registering a module makes it available to methods such as `modules()`,
|
| 455 |
+
/// `clone()` or `to()`.
|
| 456 |
+
///
|
| 457 |
+
/// \rst
|
| 458 |
+
/// .. code-block:: cpp
|
| 459 |
+
///
|
| 460 |
+
/// MyModule::MyModule() {
|
| 461 |
+
/// submodule_ = register_module("linear", torch::nn::Linear(3, 4));
|
| 462 |
+
/// }
|
| 463 |
+
/// \endrst
|
| 464 |
+
template <typename ModuleType>
|
| 465 |
+
std::shared_ptr<ModuleType> register_module(
|
| 466 |
+
std::string name,
|
| 467 |
+
std::shared_ptr<ModuleType> module);
|
| 468 |
+
|
| 469 |
+
/// Registers a submodule with this `Module`.
|
| 470 |
+
///
|
| 471 |
+
/// This method deals with `ModuleHolder`s.
|
| 472 |
+
///
|
| 473 |
+
/// Registering a module makes it available to methods such as `modules()`,
|
| 474 |
+
/// `clone()` or `to()`.
|
| 475 |
+
///
|
| 476 |
+
/// \rst
|
| 477 |
+
/// .. code-block:: cpp
|
| 478 |
+
///
|
| 479 |
+
/// MyModule::MyModule() {
|
| 480 |
+
/// submodule_ = register_module("linear", torch::nn::Linear(3, 4));
|
| 481 |
+
/// }
|
| 482 |
+
/// \endrst
|
| 483 |
+
template <typename ModuleType>
|
| 484 |
+
std::shared_ptr<ModuleType> register_module(
|
| 485 |
+
std::string name,
|
| 486 |
+
ModuleHolder<ModuleType> module_holder);
|
| 487 |
+
|
| 488 |
+
/// Replaces a registered submodule with this `Module`.
|
| 489 |
+
///
|
| 490 |
+
/// This takes care of the registration, if you used submodule members, you
|
| 491 |
+
/// should
|
| 492 |
+
// assign the submodule as well, i.e. use as
|
| 493 |
+
/// module->submodule_ = module->replace_module("linear",
|
| 494 |
+
/// torch::nn::Linear(3, 4));
|
| 495 |
+
/// It only works when a module of the name is already registered.
|
| 496 |
+
///
|
| 497 |
+
/// This is useful for replacing a module after initialization, e.g.
|
| 498 |
+
/// for finetuning.
|
| 499 |
+
template <typename ModuleType>
|
| 500 |
+
std::shared_ptr<ModuleType> replace_module(
|
| 501 |
+
const std::string& name,
|
| 502 |
+
std::shared_ptr<ModuleType> module);
|
| 503 |
+
|
| 504 |
+
/// Replaces a registered submodule with this `Module`.
|
| 505 |
+
/// This method deals with `ModuleHolder`s.
|
| 506 |
+
///
|
| 507 |
+
/// This takes care of the registration, if you used submodule members, you
|
| 508 |
+
/// should
|
| 509 |
+
// assign the submodule as well, i.e. use as
|
| 510 |
+
/// module->submodule_ = module->replace_module("linear", linear_holder);
|
| 511 |
+
/// It only works when a module of the name is already registered.
|
| 512 |
+
///
|
| 513 |
+
/// This is useful for replacing a module after initialization, e.g.
|
| 514 |
+
/// for finetuning.
|
| 515 |
+
template <typename ModuleType>
|
| 516 |
+
std::shared_ptr<ModuleType> replace_module(
|
| 517 |
+
const std::string& name,
|
| 518 |
+
ModuleHolder<ModuleType> module_holder);
|
| 519 |
+
|
| 520 |
+
/// Unregisters a submodule from this `Module`. If there is no such module
|
| 521 |
+
/// with `name` an exception is thrown.
|
| 522 |
+
void unregister_module(const std::string& name);
|
| 523 |
+
|
| 524 |
+
protected:
|
| 525 |
+
/// The following three functions allow a module with default arguments in its
|
| 526 |
+
/// forward method to be used in a Sequential module.
|
| 527 |
+
/// You should NEVER override these functions manually. Instead, you should
|
| 528 |
+
/// use the `FORWARD_HAS_DEFAULT_ARGS` macro.
|
| 529 |
+
virtual bool _forward_has_default_args() {
|
| 530 |
+
return false;
|
| 531 |
+
}
|
| 532 |
+
|
| 533 |
+
virtual unsigned int _forward_num_required_args() {
|
| 534 |
+
TORCH_CHECK(
|
| 535 |
+
false,
|
| 536 |
+
"torch::nn::Module subclass that has default arguments in `forward` method ",
|
| 537 |
+
"must override `_forward_num_required_args` method. Please use ",
|
| 538 |
+
"`FORWARD_HAS_DEFAULT_ARGS` macro to do so.");
|
| 539 |
+
}
|
| 540 |
+
|
| 541 |
+
virtual std::vector<AnyValue> _forward_populate_default_args(
|
| 542 |
+
std::vector<AnyValue>&& arguments) {
|
| 543 |
+
TORCH_CHECK(
|
| 544 |
+
false,
|
| 545 |
+
"torch::nn::Module subclass that has default arguments in `forward` method ",
|
| 546 |
+
"must override `_forward_populate_default_args` method. Please use ",
|
| 547 |
+
"`FORWARD_HAS_DEFAULT_ARGS` macro to do so.");
|
| 548 |
+
}
|
| 549 |
+
|
| 550 |
+
/// The registered parameters of this `Module`.
|
| 551 |
+
/// Inorder to access parameters_ in ParameterDict and ParameterList
|
| 552 |
+
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
|
| 553 |
+
OrderedDict<std::string, Tensor> parameters_;
|
| 554 |
+
|
| 555 |
+
private:
|
| 556 |
+
// Friend classes.
|
| 557 |
+
|
| 558 |
+
template <typename Derived>
|
| 559 |
+
friend class Cloneable;
|
| 560 |
+
|
| 561 |
+
template <typename ModuleType, typename... ArgumentTypes>
|
| 562 |
+
friend struct AnyModuleHolder;
|
| 563 |
+
|
| 564 |
+
/// Pretty prints the given `Module` into the `ostream`.
|
| 565 |
+
TORCH_API friend std::ostream& operator<<(
|
| 566 |
+
std::ostream& stream,
|
| 567 |
+
const nn::Module& module);
|
| 568 |
+
|
| 569 |
+
// data parallel using this method to configure gradient edges during the
|
| 570 |
+
// replicate step.
|
| 571 |
+
template <typename ModuleType>
|
| 572 |
+
friend void replicate_grad_edges(
|
| 573 |
+
const std::shared_ptr<Module>& module,
|
| 574 |
+
const std::vector<std::shared_ptr<ModuleType>>& replicas,
|
| 575 |
+
const std::vector<Device>& devices);
|
| 576 |
+
|
| 577 |
+
// Private methods.
|
| 578 |
+
|
| 579 |
+
/// Used in the implementation of `Cloneable`.
|
| 580 |
+
virtual void clone_(Module& other, const optional<Device>& device);
|
| 581 |
+
|
| 582 |
+
/// The implementation of the various `to()` methods.
|
| 583 |
+
template <typename... Ts>
|
| 584 |
+
void to_impl(Ts&&... ts);
|
| 585 |
+
|
| 586 |
+
/// Implements pretty printing the module hierarchy.
|
| 587 |
+
void pretty_print_recursive(
|
| 588 |
+
std::ostream& stream,
|
| 589 |
+
const std::string& indentation) const;
|
| 590 |
+
|
| 591 |
+
/// Applies the `function` to every submodule recursively, starting at this
|
| 592 |
+
/// `Module`'s children (thus not including the module itself).
|
| 593 |
+
void apply_to_submodules(
|
| 594 |
+
const NamedModulePointerApplyFunction& function,
|
| 595 |
+
const std::string& name_prefix = std::string()) const;
|
| 596 |
+
|
| 597 |
+
/// Returns a shared_ptr to `this` in a safe (checked) way.
|
| 598 |
+
std::shared_ptr<Module> shared_from_this_checked() const;
|
| 599 |
+
|
| 600 |
+
/// The registered buffers of this `Module`.
|
| 601 |
+
OrderedDict<std::string, Tensor> buffers_;
|
| 602 |
+
|
| 603 |
+
/// The registered (direct) submodules of this `Module`.
|
| 604 |
+
OrderedDict<std::string, std::shared_ptr<Module>> children_;
|
| 605 |
+
|
| 606 |
+
/// The module's name (e.g. "LSTM").
|
| 607 |
+
mutable optional<std::string> name_;
|
| 608 |
+
|
| 609 |
+
/// Whether the module is in training mode.
|
| 610 |
+
bool is_training_{true};
|
| 611 |
+
};
|
| 612 |
+
|
| 613 |
+
/// Serialize a `Module` pointer into an `OutputArchive`.
|
| 614 |
+
TORCH_API serialize::OutputArchive& operator<<(
|
| 615 |
+
serialize::OutputArchive& archive,
|
| 616 |
+
const std::shared_ptr<nn::Module>& module);
|
| 617 |
+
|
| 618 |
+
/// Deserializes a `Module` from an `InputArchive`.
|
| 619 |
+
TORCH_API serialize::InputArchive& operator>>(
|
| 620 |
+
serialize::InputArchive& archive,
|
| 621 |
+
const std::shared_ptr<nn::Module>& module);
|
| 622 |
+
|
| 623 |
+
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ nn::Module ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
| 624 |
+
|
| 625 |
+
template <typename ModuleType>
|
| 626 |
+
typename ModuleType::ContainedType* Module::as() noexcept {
|
| 627 |
+
// Use the contained type of the `ModuleHolder`, e.g. `LinearImpl` for
|
| 628 |
+
// `Linear`, since `LinearImpl` inherits `nn::Module`.
|
| 629 |
+
return as<typename ModuleType::ContainedType>();
|
| 630 |
+
}
|
| 631 |
+
|
| 632 |
+
template <typename ModuleType>
|
| 633 |
+
const typename ModuleType::ContainedType* Module::as() const noexcept {
|
| 634 |
+
// Use the contained type of the `ModuleHolder`, e.g. `LinearImpl` for
|
| 635 |
+
// `Linear`, since `LinearImpl` inherits `nn::Module`.
|
| 636 |
+
return as<typename ModuleType::ContainedType>();
|
| 637 |
+
}
|
| 638 |
+
|
| 639 |
+
template <typename ModuleType, typename>
|
| 640 |
+
ModuleType* Module::as() noexcept {
|
| 641 |
+
return dynamic_cast<ModuleType*>(this);
|
| 642 |
+
}
|
| 643 |
+
|
| 644 |
+
template <typename ModuleType, typename>
|
| 645 |
+
const ModuleType* Module::as() const noexcept {
|
| 646 |
+
return dynamic_cast<const ModuleType*>(this);
|
| 647 |
+
}
|
| 648 |
+
|
| 649 |
+
template <typename ModuleType>
|
| 650 |
+
std::shared_ptr<ModuleType> Module::register_module(
|
| 651 |
+
std::string name,
|
| 652 |
+
std::shared_ptr<ModuleType> module) {
|
| 653 |
+
TORCH_CHECK(!name.empty(), "Submodule name must not be empty");
|
| 654 |
+
TORCH_CHECK(
|
| 655 |
+
name.find('.') == std::string::npos,
|
| 656 |
+
"Submodule name must not contain a dot (got '",
|
| 657 |
+
name,
|
| 658 |
+
"')");
|
| 659 |
+
auto& base_module = children_.insert(std::move(name), std::move(module));
|
| 660 |
+
return std::dynamic_pointer_cast<ModuleType>(base_module);
|
| 661 |
+
}
|
| 662 |
+
|
| 663 |
+
template <typename ModuleType>
|
| 664 |
+
std::shared_ptr<ModuleType> Module::register_module(
|
| 665 |
+
std::string name,
|
| 666 |
+
ModuleHolder<ModuleType> module_holder) {
|
| 667 |
+
return register_module(std::move(name), module_holder.ptr());
|
| 668 |
+
}
|
| 669 |
+
|
| 670 |
+
template <typename ModuleType>
|
| 671 |
+
std::shared_ptr<ModuleType> Module::replace_module(
|
| 672 |
+
const std::string& name,
|
| 673 |
+
std::shared_ptr<ModuleType> module) {
|
| 674 |
+
auto& base_module = (children_[name] = std::move(module));
|
| 675 |
+
return std::dynamic_pointer_cast<ModuleType>(base_module);
|
| 676 |
+
}
|
| 677 |
+
|
| 678 |
+
template <typename ModuleType>
|
| 679 |
+
std::shared_ptr<ModuleType> Module::replace_module(
|
| 680 |
+
const std::string& name,
|
| 681 |
+
ModuleHolder<ModuleType> module_holder) {
|
| 682 |
+
return replace_module(name, module_holder.ptr());
|
| 683 |
+
}
|
| 684 |
+
|
| 685 |
+
template <typename... Ts>
|
| 686 |
+
void Module::to_impl(Ts&&... ts) {
|
| 687 |
+
// First call `to()` on every child module.
|
| 688 |
+
for (auto& child : children_) {
|
| 689 |
+
child.value()->to(ts...);
|
| 690 |
+
}
|
| 691 |
+
// Then move every parameter to the new dtype/device.
|
| 692 |
+
for (auto& parameter : named_parameters(/*recurse=*/false)) {
|
| 693 |
+
parameter->set_data(autograd::Variable(*parameter).to(ts...));
|
| 694 |
+
}
|
| 695 |
+
// Then move every buffer to the new dtype/device.
|
| 696 |
+
for (auto& buffer : named_buffers(/*recurse=*/false)) {
|
| 697 |
+
buffer->set_data(autograd::Variable(*buffer).to(ts...));
|
| 698 |
+
}
|
| 699 |
+
}
|
| 700 |
+
|
| 701 |
+
} // namespace nn
|
| 702 |
+
} // namespace torch
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules.h
ADDED
|
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// Common
|
| 4 |
+
#include <torch/nn/modules/common.h>
|
| 5 |
+
|
| 6 |
+
// Containers
|
| 7 |
+
#include <torch/nn/modules/container/any.h>
|
| 8 |
+
#include <torch/nn/modules/container/functional.h>
|
| 9 |
+
#include <torch/nn/modules/container/moduledict.h>
|
| 10 |
+
#include <torch/nn/modules/container/modulelist.h>
|
| 11 |
+
#include <torch/nn/modules/container/named_any.h>
|
| 12 |
+
#include <torch/nn/modules/container/parameterdict.h>
|
| 13 |
+
#include <torch/nn/modules/container/parameterlist.h>
|
| 14 |
+
#include <torch/nn/modules/container/sequential.h>
|
| 15 |
+
|
| 16 |
+
// Layers
|
| 17 |
+
#include <torch/nn/modules/activation.h>
|
| 18 |
+
#include <torch/nn/modules/adaptive.h>
|
| 19 |
+
#include <torch/nn/modules/batchnorm.h>
|
| 20 |
+
#include <torch/nn/modules/conv.h>
|
| 21 |
+
#include <torch/nn/modules/distance.h>
|
| 22 |
+
#include <torch/nn/modules/dropout.h>
|
| 23 |
+
#include <torch/nn/modules/embedding.h>
|
| 24 |
+
#include <torch/nn/modules/fold.h>
|
| 25 |
+
#include <torch/nn/modules/instancenorm.h>
|
| 26 |
+
#include <torch/nn/modules/linear.h>
|
| 27 |
+
#include <torch/nn/modules/loss.h>
|
| 28 |
+
#include <torch/nn/modules/normalization.h>
|
| 29 |
+
#include <torch/nn/modules/padding.h>
|
| 30 |
+
#include <torch/nn/modules/pixelshuffle.h>
|
| 31 |
+
#include <torch/nn/modules/pooling.h>
|
| 32 |
+
#include <torch/nn/modules/rnn.h>
|
| 33 |
+
#include <torch/nn/modules/transformer.h>
|
| 34 |
+
#include <torch/nn/modules/transformercoder.h>
|
| 35 |
+
#include <torch/nn/modules/transformerlayer.h>
|
| 36 |
+
#include <torch/nn/modules/upsampling.h>
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options.h
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <torch/nn/options/batchnorm.h>
|
| 4 |
+
#include <torch/nn/options/conv.h>
|
| 5 |
+
#include <torch/nn/options/dropout.h>
|
| 6 |
+
#include <torch/nn/options/fold.h>
|
| 7 |
+
#include <torch/nn/options/linear.h>
|
| 8 |
+
#include <torch/nn/options/loss.h>
|
| 9 |
+
#include <torch/nn/options/normalization.h>
|
| 10 |
+
#include <torch/nn/options/padding.h>
|
| 11 |
+
#include <torch/nn/options/pixelshuffle.h>
|
| 12 |
+
#include <torch/nn/options/pooling.h>
|
| 13 |
+
#include <torch/nn/options/rnn.h>
|
| 14 |
+
#include <torch/nn/options/transformer.h>
|
| 15 |
+
#include <torch/nn/options/transformercoder.h>
|
| 16 |
+
#include <torch/nn/options/transformerlayer.h>
|
| 17 |
+
#include <torch/nn/options/upsampling.h>
|
| 18 |
+
#include <torch/nn/options/vision.h>
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/linear.h
ADDED
|
@@ -0,0 +1,95 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <torch/arg.h>
|
| 4 |
+
#include <torch/csrc/Export.h>
|
| 5 |
+
#include <torch/types.h>
|
| 6 |
+
|
| 7 |
+
namespace torch {
|
| 8 |
+
namespace nn {
|
| 9 |
+
|
| 10 |
+
/// Options for the `Linear` module.
|
| 11 |
+
///
|
| 12 |
+
/// Example:
|
| 13 |
+
/// ```
|
| 14 |
+
/// Linear model(LinearOptions(5, 2).bias(false));
|
| 15 |
+
/// ```
|
| 16 |
+
struct TORCH_API LinearOptions {
|
| 17 |
+
LinearOptions(int64_t in_features, int64_t out_features);
|
| 18 |
+
/// size of each input sample
|
| 19 |
+
TORCH_ARG(int64_t, in_features);
|
| 20 |
+
|
| 21 |
+
/// size of each output sample
|
| 22 |
+
TORCH_ARG(int64_t, out_features);
|
| 23 |
+
|
| 24 |
+
/// If set to false, the layer will not learn an additive bias. Default: true
|
| 25 |
+
TORCH_ARG(bool, bias) = true;
|
| 26 |
+
};
|
| 27 |
+
|
| 28 |
+
// ============================================================================
|
| 29 |
+
|
| 30 |
+
/// Options for the `Flatten` module.
|
| 31 |
+
///
|
| 32 |
+
/// Example:
|
| 33 |
+
/// ```
|
| 34 |
+
/// Flatten model(FlattenOptions().start_dim(2).end_dim(4));
|
| 35 |
+
/// ```
|
| 36 |
+
struct TORCH_API FlattenOptions {
|
| 37 |
+
/// first dim to flatten
|
| 38 |
+
TORCH_ARG(int64_t, start_dim) = 1;
|
| 39 |
+
/// last dim to flatten
|
| 40 |
+
TORCH_ARG(int64_t, end_dim) = -1;
|
| 41 |
+
};
|
| 42 |
+
|
| 43 |
+
// ============================================================================
|
| 44 |
+
|
| 45 |
+
/// Options for the `Unflatten` module.
|
| 46 |
+
///
|
| 47 |
+
/// Note: If input tensor is named, use dimname and namedshape arguments.
|
| 48 |
+
///
|
| 49 |
+
/// Example:
|
| 50 |
+
/// ```
|
| 51 |
+
/// Unflatten unnamed_model(UnflattenOptions(0, {2, 2}));
|
| 52 |
+
/// Unflatten named_model(UnflattenOptions("B", {{"B1", 2}, {"B2", 2}}));
|
| 53 |
+
/// ```
|
| 54 |
+
struct TORCH_API UnflattenOptions {
|
| 55 |
+
typedef std::vector<std::pair<std::string, int64_t>> namedshape_t;
|
| 56 |
+
|
| 57 |
+
UnflattenOptions(int64_t dim, std::vector<int64_t> sizes);
|
| 58 |
+
UnflattenOptions(const char* dimname, namedshape_t namedshape);
|
| 59 |
+
UnflattenOptions(std::string dimname, namedshape_t namedshape);
|
| 60 |
+
|
| 61 |
+
/// dim to unflatten
|
| 62 |
+
TORCH_ARG(int64_t, dim);
|
| 63 |
+
/// name of dim to unflatten, for use with named tensors
|
| 64 |
+
TORCH_ARG(std::string, dimname);
|
| 65 |
+
/// new shape of unflattened dim
|
| 66 |
+
TORCH_ARG(std::vector<int64_t>, sizes);
|
| 67 |
+
/// new shape of unflattened dim with names, for use with named tensors
|
| 68 |
+
TORCH_ARG(namedshape_t, namedshape);
|
| 69 |
+
};
|
| 70 |
+
|
| 71 |
+
// ============================================================================
|
| 72 |
+
|
| 73 |
+
/// Options for the `Bilinear` module.
|
| 74 |
+
///
|
| 75 |
+
/// Example:
|
| 76 |
+
/// ```
|
| 77 |
+
/// Bilinear model(BilinearOptions(3, 2, 4).bias(false));
|
| 78 |
+
/// ```
|
| 79 |
+
struct TORCH_API BilinearOptions {
|
| 80 |
+
BilinearOptions(
|
| 81 |
+
int64_t in1_features,
|
| 82 |
+
int64_t in2_features,
|
| 83 |
+
int64_t out_features);
|
| 84 |
+
/// The number of features in input 1 (columns of the input1 matrix).
|
| 85 |
+
TORCH_ARG(int64_t, in1_features);
|
| 86 |
+
/// The number of features in input 2 (columns of the input2 matrix).
|
| 87 |
+
TORCH_ARG(int64_t, in2_features);
|
| 88 |
+
/// The number of output features to produce (columns of the output matrix).
|
| 89 |
+
TORCH_ARG(int64_t, out_features);
|
| 90 |
+
/// Whether to learn and add a bias after the bilinear transformation.
|
| 91 |
+
TORCH_ARG(bool, bias) = true;
|
| 92 |
+
};
|
| 93 |
+
|
| 94 |
+
} // namespace nn
|
| 95 |
+
} // namespace torch
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/loss.h
ADDED
|
@@ -0,0 +1,802 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <torch/arg.h>
|
| 4 |
+
#include <torch/csrc/Export.h>
|
| 5 |
+
#include <torch/enum.h>
|
| 6 |
+
#include <torch/types.h>
|
| 7 |
+
|
| 8 |
+
namespace torch {
|
| 9 |
+
namespace nn {
|
| 10 |
+
|
| 11 |
+
/// Options for the `L1Loss` module.
|
| 12 |
+
///
|
| 13 |
+
/// Example:
|
| 14 |
+
/// ```
|
| 15 |
+
/// L1Loss model(L1LossOptions(torch::kNone));
|
| 16 |
+
/// ```
|
| 17 |
+
struct TORCH_API L1LossOptions {
|
| 18 |
+
typedef std::variant<enumtype::kNone, enumtype::kMean, enumtype::kSum>
|
| 19 |
+
reduction_t;
|
| 20 |
+
|
| 21 |
+
TORCH_OPTIONS_CTOR_VARIANT_ARG3(L1LossOptions, reduction, kNone, kMean, kSum)
|
| 22 |
+
|
| 23 |
+
/// Specifies the reduction to apply to the output.
|
| 24 |
+
TORCH_ARG(reduction_t, reduction) = torch::kMean;
|
| 25 |
+
};
|
| 26 |
+
|
| 27 |
+
namespace functional {
|
| 28 |
+
/// Options for `torch::nn::functional::l1_loss`.
|
| 29 |
+
///
|
| 30 |
+
/// See the documentation for `torch::nn::L1LossOptions` class to learn what
|
| 31 |
+
/// arguments are supported.
|
| 32 |
+
///
|
| 33 |
+
/// Example:
|
| 34 |
+
/// ```
|
| 35 |
+
/// namespace F = torch::nn::functional;
|
| 36 |
+
/// F::l1_loss(input, target, F::L1LossFuncOptions(torch::kNone));
|
| 37 |
+
/// ```
|
| 38 |
+
using L1LossFuncOptions = L1LossOptions;
|
| 39 |
+
} // namespace functional
|
| 40 |
+
|
| 41 |
+
// ============================================================================
|
| 42 |
+
|
| 43 |
+
/// Options for the `KLDivLoss` module.
|
| 44 |
+
///
|
| 45 |
+
/// Example:
|
| 46 |
+
/// ```
|
| 47 |
+
/// KLDivLoss
|
| 48 |
+
/// model(KLDivLossOptions().reduction(torch::kNone).log_target(false));
|
| 49 |
+
/// ```
|
| 50 |
+
struct TORCH_API KLDivLossOptions {
|
| 51 |
+
typedef std::variant<
|
| 52 |
+
enumtype::kNone,
|
| 53 |
+
enumtype::kBatchMean,
|
| 54 |
+
enumtype::kSum,
|
| 55 |
+
enumtype::kMean>
|
| 56 |
+
reduction_t;
|
| 57 |
+
|
| 58 |
+
TORCH_OPTIONS_CTOR_VARIANT_ARG4(
|
| 59 |
+
KLDivLossOptions,
|
| 60 |
+
reduction,
|
| 61 |
+
kNone,
|
| 62 |
+
kBatchMean,
|
| 63 |
+
kSum,
|
| 64 |
+
kMean)
|
| 65 |
+
|
| 66 |
+
/// Specifies the reduction to apply to the output.
|
| 67 |
+
/// ``'none'`` | ``'batchmean'`` | ``'sum'`` | ``'mean'``. Default: ``'mean'``
|
| 68 |
+
TORCH_ARG(reduction_t, reduction) = torch::kMean;
|
| 69 |
+
|
| 70 |
+
/// Specifies whether `target` is accepted in the log space. Default: False
|
| 71 |
+
TORCH_ARG(bool, log_target) = false;
|
| 72 |
+
};
|
| 73 |
+
|
| 74 |
+
namespace functional {
|
| 75 |
+
/// Options for `torch::nn::functional::kl_div`.
|
| 76 |
+
///
|
| 77 |
+
/// See the documentation for `torch::nn::KLDivLossOptions` class to learn what
|
| 78 |
+
/// arguments are supported.
|
| 79 |
+
///
|
| 80 |
+
/// Example:
|
| 81 |
+
/// ```
|
| 82 |
+
/// namespace F = torch::nn::functional;
|
| 83 |
+
/// F::kl_div(input, target,
|
| 84 |
+
/// F::KLDivFuncOptions().reduction(torch::kNone).log_target(false));
|
| 85 |
+
/// ```
|
| 86 |
+
using KLDivFuncOptions = KLDivLossOptions;
|
| 87 |
+
} // namespace functional
|
| 88 |
+
|
| 89 |
+
// ============================================================================
|
| 90 |
+
|
| 91 |
+
/// Options for the `MSELoss` module.
|
| 92 |
+
///
|
| 93 |
+
/// Example:
|
| 94 |
+
/// ```
|
| 95 |
+
/// MSELoss model(MSELossOptions(torch::kNone));
|
| 96 |
+
/// ```
|
| 97 |
+
struct TORCH_API MSELossOptions {
|
| 98 |
+
typedef std::variant<enumtype::kNone, enumtype::kMean, enumtype::kSum>
|
| 99 |
+
reduction_t;
|
| 100 |
+
|
| 101 |
+
TORCH_OPTIONS_CTOR_VARIANT_ARG3(MSELossOptions, reduction, kNone, kMean, kSum)
|
| 102 |
+
|
| 103 |
+
/// Specifies the reduction to apply to the output.
|
| 104 |
+
/// ``'none'`` | ``'mean'`` | ``'sum'``. Default: ``'mean'``
|
| 105 |
+
TORCH_ARG(reduction_t, reduction) = torch::kMean;
|
| 106 |
+
};
|
| 107 |
+
|
| 108 |
+
namespace functional {
|
| 109 |
+
/// Options for `torch::nn::functional::mse_loss`.
|
| 110 |
+
///
|
| 111 |
+
/// See the documentation for `torch::nn::MSELossOptions` class to learn what
|
| 112 |
+
/// arguments are supported.
|
| 113 |
+
///
|
| 114 |
+
/// Example:
|
| 115 |
+
/// ```
|
| 116 |
+
/// namespace F = torch::nn::functional;
|
| 117 |
+
/// F::mse_loss(input, target, F::MSELossFuncOptions(torch::kNone));
|
| 118 |
+
/// ```
|
| 119 |
+
using MSELossFuncOptions = MSELossOptions;
|
| 120 |
+
} // namespace functional
|
| 121 |
+
|
| 122 |
+
// ============================================================================
|
| 123 |
+
|
| 124 |
+
/// Options for the `BCELoss` module.
|
| 125 |
+
///
|
| 126 |
+
/// Example:
|
| 127 |
+
/// ```
|
| 128 |
+
/// BCELoss model(BCELossOptions().reduction(torch::kNone).weight(weight));
|
| 129 |
+
/// ```
|
| 130 |
+
struct TORCH_API BCELossOptions {
|
| 131 |
+
typedef std::variant<enumtype::kNone, enumtype::kMean, enumtype::kSum>
|
| 132 |
+
reduction_t;
|
| 133 |
+
|
| 134 |
+
/// A manual rescaling weight given to the loss of each batch element.
|
| 135 |
+
TORCH_ARG(Tensor, weight) = {};
|
| 136 |
+
/// Specifies the reduction to apply to the output.
|
| 137 |
+
/// ``'none'`` | ``'mean'`` | ``'sum'``. Default: ``'mean'``
|
| 138 |
+
TORCH_ARG(reduction_t, reduction) = torch::kMean;
|
| 139 |
+
};
|
| 140 |
+
|
| 141 |
+
namespace functional {
|
| 142 |
+
/// Options for `torch::nn::functional::binary_cross_entropy`.
|
| 143 |
+
///
|
| 144 |
+
/// See the documentation for `torch::nn::BCELossOptions` class to learn what
|
| 145 |
+
/// arguments are supported.
|
| 146 |
+
///
|
| 147 |
+
/// Example:
|
| 148 |
+
/// ```
|
| 149 |
+
/// namespace F = torch::nn::functional;
|
| 150 |
+
/// F::binary_cross_entropy(input, target,
|
| 151 |
+
/// F::BinaryCrossEntropyFuncOptions().weight(weight));
|
| 152 |
+
/// ```
|
| 153 |
+
using BinaryCrossEntropyFuncOptions = BCELossOptions;
|
| 154 |
+
} // namespace functional
|
| 155 |
+
|
| 156 |
+
// ============================================================================
|
| 157 |
+
|
| 158 |
+
/// Options for the `HingeEmbeddingLoss` module.
|
| 159 |
+
///
|
| 160 |
+
/// Example:
|
| 161 |
+
/// ```
|
| 162 |
+
/// HingeEmbeddingLoss
|
| 163 |
+
/// model(HingeEmbeddingLossOptions().margin(4).reduction(torch::kNone));
|
| 164 |
+
/// ```
|
| 165 |
+
struct TORCH_API HingeEmbeddingLossOptions {
|
| 166 |
+
typedef std::variant<enumtype::kNone, enumtype::kMean, enumtype::kSum>
|
| 167 |
+
reduction_t;
|
| 168 |
+
|
| 169 |
+
/// Specifies the threshold for which the distance of a negative sample must
|
| 170 |
+
/// reach in order to incur zero loss. Default: 1
|
| 171 |
+
TORCH_ARG(double, margin) = 1.0;
|
| 172 |
+
/// Specifies the reduction to apply to the output. Default: Mean
|
| 173 |
+
TORCH_ARG(reduction_t, reduction) = torch::kMean;
|
| 174 |
+
};
|
| 175 |
+
|
| 176 |
+
namespace functional {
|
| 177 |
+
/// Options for `torch::nn::functional::hinge_embedding_loss`.
|
| 178 |
+
///
|
| 179 |
+
/// See the documentation for `torch::nn::HingeEmbeddingLossOptions` class to
|
| 180 |
+
/// learn what arguments are supported.
|
| 181 |
+
///
|
| 182 |
+
/// Example:
|
| 183 |
+
/// ```
|
| 184 |
+
/// namespace F = torch::nn::functional;
|
| 185 |
+
/// F::hinge_embedding_loss(input, target,
|
| 186 |
+
/// F::HingeEmbeddingLossFuncOptions().margin(2));
|
| 187 |
+
/// ```
|
| 188 |
+
using HingeEmbeddingLossFuncOptions = HingeEmbeddingLossOptions;
|
| 189 |
+
} // namespace functional
|
| 190 |
+
|
| 191 |
+
// ============================================================================
|
| 192 |
+
|
| 193 |
+
/// Options for the `MultiMarginLoss` module.
|
| 194 |
+
///
|
| 195 |
+
/// Example:
|
| 196 |
+
/// ```
|
| 197 |
+
/// MultiMarginLoss model(MultiMarginLossOptions().margin(2).weight(weight));
|
| 198 |
+
/// ```
|
| 199 |
+
struct TORCH_API MultiMarginLossOptions {
|
| 200 |
+
typedef std::variant<enumtype::kNone, enumtype::kMean, enumtype::kSum>
|
| 201 |
+
reduction_t;
|
| 202 |
+
|
| 203 |
+
/// Has a default value of :math:`1`. :math:`1` and :math:`2`
|
| 204 |
+
/// are the only supported values.
|
| 205 |
+
TORCH_ARG(int64_t, p) = 1;
|
| 206 |
+
/// Has a default value of :math:`1`.
|
| 207 |
+
TORCH_ARG(double, margin) = 1.0;
|
| 208 |
+
/// A manual rescaling weight given to each
|
| 209 |
+
/// class. If given, it has to be a Tensor of size `C`. Otherwise, it is
|
| 210 |
+
/// treated as if having all ones.
|
| 211 |
+
TORCH_ARG(Tensor, weight) = Tensor();
|
| 212 |
+
/// Specifies the reduction to apply to the output:
|
| 213 |
+
/// ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be
|
| 214 |
+
/// applied,
|
| 215 |
+
/// ``'mean'``: the sum of the output will be divided by the number of
|
| 216 |
+
/// elements in the output, ``'sum'``: the output will be summed. Default:
|
| 217 |
+
/// ``'mean'``
|
| 218 |
+
TORCH_ARG(reduction_t, reduction) = torch::kMean;
|
| 219 |
+
};
|
| 220 |
+
|
| 221 |
+
namespace functional {
|
| 222 |
+
/// Options for `torch::nn::functional::multi_margin_loss`.
|
| 223 |
+
///
|
| 224 |
+
/// See the documentation for `torch::nn::MultiMarginLossOptions` class to learn
|
| 225 |
+
/// what arguments are supported.
|
| 226 |
+
///
|
| 227 |
+
/// Example:
|
| 228 |
+
/// ```
|
| 229 |
+
/// namespace F = torch::nn::functional;
|
| 230 |
+
/// F::multi_margin_loss(input, target,
|
| 231 |
+
/// F::MultiMarginLossFuncOptions().margin(2).weight(weight));
|
| 232 |
+
/// ```
|
| 233 |
+
using MultiMarginLossFuncOptions = MultiMarginLossOptions;
|
| 234 |
+
} // namespace functional
|
| 235 |
+
|
| 236 |
+
// ============================================================================
|
| 237 |
+
|
| 238 |
+
/// Options for the `CosineEmbeddingLoss` module.
|
| 239 |
+
///
|
| 240 |
+
/// Example:
|
| 241 |
+
/// ```
|
| 242 |
+
/// CosineEmbeddingLoss model(CosineEmbeddingLossOptions().margin(0.5));
|
| 243 |
+
/// ```
|
| 244 |
+
struct TORCH_API CosineEmbeddingLossOptions {
|
| 245 |
+
typedef std::variant<enumtype::kNone, enumtype::kMean, enumtype::kSum>
|
| 246 |
+
reduction_t;
|
| 247 |
+
|
| 248 |
+
/// Specifies the threshold for which the distance of a negative sample must
|
| 249 |
+
/// reach in order to incur zero loss. Should be a number from -1 to 1, 0
|
| 250 |
+
/// to 0.5 is suggested. Default: 0.0
|
| 251 |
+
TORCH_ARG(double, margin) = 0.0;
|
| 252 |
+
/// Specifies the reduction to apply to the output. Default: Mean
|
| 253 |
+
TORCH_ARG(reduction_t, reduction) = torch::kMean;
|
| 254 |
+
};
|
| 255 |
+
|
| 256 |
+
namespace functional {
|
| 257 |
+
/// Options for `torch::nn::functional::cosine_embedding_loss`.
|
| 258 |
+
///
|
| 259 |
+
/// See the documentation for `torch::nn::CosineEmbeddingLossOptions` class to
|
| 260 |
+
/// learn what arguments are supported.
|
| 261 |
+
///
|
| 262 |
+
/// Example:
|
| 263 |
+
/// ```
|
| 264 |
+
/// namespace F = torch::nn::functional;
|
| 265 |
+
/// F::cosine_embedding_loss(input1, input2, target,
|
| 266 |
+
/// F::CosineEmbeddingLossFuncOptions().margin(0.5));
|
| 267 |
+
/// ```
|
| 268 |
+
using CosineEmbeddingLossFuncOptions = CosineEmbeddingLossOptions;
|
| 269 |
+
} // namespace functional
|
| 270 |
+
|
| 271 |
+
// ============================================================================
|
| 272 |
+
|
| 273 |
+
/// Options for the `MultiLabelMarginLoss` module.
|
| 274 |
+
///
|
| 275 |
+
/// Example:
|
| 276 |
+
/// ```
|
| 277 |
+
/// MultiLabelMarginLoss model(MultiLabelMarginLossOptions(torch::kNone));
|
| 278 |
+
/// ```
|
| 279 |
+
struct TORCH_API MultiLabelMarginLossOptions {
|
| 280 |
+
typedef std::variant<enumtype::kNone, enumtype::kMean, enumtype::kSum>
|
| 281 |
+
reduction_t;
|
| 282 |
+
|
| 283 |
+
TORCH_OPTIONS_CTOR_VARIANT_ARG3(
|
| 284 |
+
MultiLabelMarginLossOptions,
|
| 285 |
+
reduction,
|
| 286 |
+
kNone,
|
| 287 |
+
kMean,
|
| 288 |
+
kSum)
|
| 289 |
+
|
| 290 |
+
/// Specifies the reduction to apply to the output: 'none' | 'mean' | 'sum'.
|
| 291 |
+
/// 'none': no reduction will be applied, 'mean': the sum of the output will
|
| 292 |
+
/// be divided by the number of elements in the output, 'sum': the output will
|
| 293 |
+
/// be summed. Default: 'mean'
|
| 294 |
+
TORCH_ARG(reduction_t, reduction) = torch::kMean;
|
| 295 |
+
};
|
| 296 |
+
|
| 297 |
+
namespace functional {
|
| 298 |
+
/// Options for `torch::nn::functional::multilabel_margin_loss`.
|
| 299 |
+
///
|
| 300 |
+
/// See the documentation for `torch::nn::MultiLabelMarginLossOptions` class to
|
| 301 |
+
/// learn what arguments are supported.
|
| 302 |
+
///
|
| 303 |
+
/// Example:
|
| 304 |
+
/// ```
|
| 305 |
+
/// namespace F = torch::nn::functional;
|
| 306 |
+
/// F::multilabel_margin_loss(input, target,
|
| 307 |
+
/// F::MultilabelMarginLossFuncOptions(torch::kNone));
|
| 308 |
+
/// ```
|
| 309 |
+
using MultilabelMarginLossFuncOptions = MultiLabelMarginLossOptions;
|
| 310 |
+
} // namespace functional
|
| 311 |
+
|
| 312 |
+
// ============================================================================
|
| 313 |
+
|
| 314 |
+
/// Options for the `SoftMarginLoss` module.
|
| 315 |
+
///
|
| 316 |
+
/// Example:
|
| 317 |
+
/// ```
|
| 318 |
+
/// SoftMarginLoss model(SoftMarginLossOptions(torch::kNone));
|
| 319 |
+
/// ```
|
| 320 |
+
struct TORCH_API SoftMarginLossOptions {
|
| 321 |
+
typedef std::variant<enumtype::kNone, enumtype::kMean, enumtype::kSum>
|
| 322 |
+
reduction_t;
|
| 323 |
+
|
| 324 |
+
TORCH_OPTIONS_CTOR_VARIANT_ARG3(
|
| 325 |
+
SoftMarginLossOptions,
|
| 326 |
+
reduction,
|
| 327 |
+
kNone,
|
| 328 |
+
kMean,
|
| 329 |
+
kSum)
|
| 330 |
+
|
| 331 |
+
/// Specifies the reduction to apply to the output: 'none' | 'mean' | 'sum'.
|
| 332 |
+
/// 'none': no reduction will be applied, 'mean': the sum of the output will
|
| 333 |
+
/// be divided by the number of elements in the output, 'sum': the output will
|
| 334 |
+
/// be summed. Default: 'mean'
|
| 335 |
+
TORCH_ARG(reduction_t, reduction) = torch::kMean;
|
| 336 |
+
};
|
| 337 |
+
|
| 338 |
+
namespace functional {
|
| 339 |
+
/// Options for `torch::nn::functional::soft_margin_loss`.
|
| 340 |
+
///
|
| 341 |
+
/// See the documentation for `torch::nn::SoftMarginLossOptions` class to learn
|
| 342 |
+
/// what arguments are supported.
|
| 343 |
+
///
|
| 344 |
+
/// Example:
|
| 345 |
+
/// ```
|
| 346 |
+
/// namespace F = torch::nn::functional;
|
| 347 |
+
/// F::soft_margin_loss(input, target,
|
| 348 |
+
/// F::SoftMarginLossFuncOptions(torch::kNone));
|
| 349 |
+
/// ```
|
| 350 |
+
using SoftMarginLossFuncOptions = SoftMarginLossOptions;
|
| 351 |
+
} // namespace functional
|
| 352 |
+
|
| 353 |
+
// ============================================================================
|
| 354 |
+
|
| 355 |
+
/// Options for the `MultiLabelSoftMarginLoss` module.
|
| 356 |
+
///
|
| 357 |
+
/// Example:
|
| 358 |
+
/// ```
|
| 359 |
+
/// MultiLabelSoftMarginLoss
|
| 360 |
+
/// model(MultiLabelSoftMarginLossOptions().reduction(torch::kNone).weight(weight));
|
| 361 |
+
/// ```
|
| 362 |
+
struct TORCH_API MultiLabelSoftMarginLossOptions {
|
| 363 |
+
typedef std::variant<enumtype::kNone, enumtype::kMean, enumtype::kSum>
|
| 364 |
+
reduction_t;
|
| 365 |
+
|
| 366 |
+
/// A manual rescaling weight given to each
|
| 367 |
+
/// class. If given, it has to be a Tensor of size `C`. Otherwise, it is
|
| 368 |
+
/// treated as if having all ones.
|
| 369 |
+
TORCH_ARG(Tensor, weight) = Tensor();
|
| 370 |
+
|
| 371 |
+
/// Specifies the reduction to apply to the output: 'none' | 'mean' | 'sum'.
|
| 372 |
+
/// 'none': no reduction will be applied, 'mean': the sum of the output will
|
| 373 |
+
/// be divided by the number of elements in the output, 'sum': the output will
|
| 374 |
+
/// be summed. Default: 'mean'
|
| 375 |
+
TORCH_ARG(reduction_t, reduction) = torch::kMean;
|
| 376 |
+
};
|
| 377 |
+
|
| 378 |
+
namespace functional {
|
| 379 |
+
/// Options for `torch::nn::functional::multilabel_soft_margin_loss`.
|
| 380 |
+
///
|
| 381 |
+
/// See the documentation for `torch::nn::MultiLabelSoftMarginLossOptions` class
|
| 382 |
+
/// to learn what arguments are supported.
|
| 383 |
+
///
|
| 384 |
+
/// Example:
|
| 385 |
+
/// ```
|
| 386 |
+
/// namespace F = torch::nn::functional;
|
| 387 |
+
/// F::multilabel_soft_margin_loss(input, target,
|
| 388 |
+
/// F::MultilabelSoftMarginLossFuncOptions().reduction(torch::kNone).weight(weight));
|
| 389 |
+
/// ```
|
| 390 |
+
using MultilabelSoftMarginLossFuncOptions = MultiLabelSoftMarginLossOptions;
|
| 391 |
+
} // namespace functional
|
| 392 |
+
|
| 393 |
+
// ============================================================================
|
| 394 |
+
|
| 395 |
+
/// Options for the `TripletMarginLoss` module.
|
| 396 |
+
///
|
| 397 |
+
/// Example:
|
| 398 |
+
/// ```
|
| 399 |
+
/// TripletMarginLoss
|
| 400 |
+
/// model(TripletMarginLossOptions().margin(3).p(2).eps(1e-06).swap(false));
|
| 401 |
+
/// ```
|
| 402 |
+
struct TORCH_API TripletMarginLossOptions {
|
| 403 |
+
typedef std::variant<enumtype::kNone, enumtype::kMean, enumtype::kSum>
|
| 404 |
+
reduction_t;
|
| 405 |
+
|
| 406 |
+
/// Specifies the threshold for which the distance of a negative sample must
|
| 407 |
+
/// reach in order to incur zero loss. Default: 1
|
| 408 |
+
TORCH_ARG(double, margin) = 1.0;
|
| 409 |
+
/// Specifies the norm degree for pairwise distance. Default: 2
|
| 410 |
+
TORCH_ARG(double, p) = 2.0;
|
| 411 |
+
TORCH_ARG(double, eps) = 1e-6;
|
| 412 |
+
/// The distance swap is described in detail in the paper Learning shallow
|
| 413 |
+
/// convolutional feature descriptors with triplet losses by V. Balntas,
|
| 414 |
+
/// E. Riba et al. Default: False
|
| 415 |
+
TORCH_ARG(bool, swap) = false;
|
| 416 |
+
/// Specifies the reduction to apply to the output. Default: Mean
|
| 417 |
+
TORCH_ARG(reduction_t, reduction) = torch::kMean;
|
| 418 |
+
};
|
| 419 |
+
|
| 420 |
+
namespace functional {
|
| 421 |
+
/// Options for `torch::nn::functional::triplet_margin_loss`.
|
| 422 |
+
///
|
| 423 |
+
/// See the documentation for `torch::nn::TripletMarginLossOptions` class to
|
| 424 |
+
/// learn what arguments are supported.
|
| 425 |
+
///
|
| 426 |
+
/// Example:
|
| 427 |
+
/// ```
|
| 428 |
+
/// namespace F = torch::nn::functional;
|
| 429 |
+
/// F::triplet_margin_loss(anchor, positive, negative,
|
| 430 |
+
/// F::TripletMarginLossFuncOptions().margin(1.0));
|
| 431 |
+
/// ```
|
| 432 |
+
using TripletMarginLossFuncOptions = TripletMarginLossOptions;
|
| 433 |
+
} // namespace functional
|
| 434 |
+
|
| 435 |
+
// ============================================================================
|
| 436 |
+
|
| 437 |
+
/// Options for the `TripletMarginWithDistanceLoss` module.
|
| 438 |
+
///
|
| 439 |
+
/// Example:
|
| 440 |
+
/// ```
|
| 441 |
+
/// TripletMarginWithDistanceLoss
|
| 442 |
+
/// model(TripletMarginWithDistanceLossOptions().margin(3).swap(false));
|
| 443 |
+
/// ```
|
| 444 |
+
struct TORCH_API TripletMarginWithDistanceLossOptions {
|
| 445 |
+
typedef std::variant<enumtype::kNone, enumtype::kMean, enumtype::kSum>
|
| 446 |
+
reduction_t;
|
| 447 |
+
typedef std::function<Tensor(const Tensor&, const Tensor&)>
|
| 448 |
+
distance_function_t;
|
| 449 |
+
|
| 450 |
+
/// Specifies a nonnegative, real-valued function that quantifies the
|
| 451 |
+
/// closeness of two tensors. If not specified, `F::pairwise_distance` will
|
| 452 |
+
/// be used. Default: nullopt
|
| 453 |
+
TORCH_ARG(c10::optional<distance_function_t>, distance_function) =
|
| 454 |
+
c10::nullopt;
|
| 455 |
+
/// Specifies a nonnegative margin representing the minimum difference
|
| 456 |
+
/// between the positive and negative distances required for the loss to be 0.
|
| 457 |
+
/// Larger margins penalize cases where the negative examples are not distance
|
| 458 |
+
/// enough from the anchors, relative to the positives. Default: 1
|
| 459 |
+
TORCH_ARG(double, margin) = 1.0;
|
| 460 |
+
/// Whether to use the distance swap described in the paper Learning shallow
|
| 461 |
+
/// convolutional feature descriptors with triplet losses by V. Balntas,
|
| 462 |
+
/// E. Riba et al. If True, and if the positive example is closer to the
|
| 463 |
+
/// negative example than the anchor is, swaps the positive example and the
|
| 464 |
+
/// anchor in the loss computation. Default: False
|
| 465 |
+
TORCH_ARG(bool, swap) = false;
|
| 466 |
+
/// Specifies the reduction to apply to the output. Default: Mean
|
| 467 |
+
TORCH_ARG(reduction_t, reduction) = torch::kMean;
|
| 468 |
+
};
|
| 469 |
+
|
| 470 |
+
namespace functional {
|
| 471 |
+
/// Options for `torch::nn::functional::triplet_margin_with_distance_loss`.
|
| 472 |
+
///
|
| 473 |
+
/// See the documentation for `torch::nn::TripletMarginWithDistanceLossOptions`
|
| 474 |
+
/// class to learn what arguments are supported.
|
| 475 |
+
///
|
| 476 |
+
/// Example:
|
| 477 |
+
/// ```
|
| 478 |
+
/// namespace F = torch::nn::functional;
|
| 479 |
+
/// F::triplet_margin_with_distance_loss(anchor, positive, negative,
|
| 480 |
+
/// F::TripletMarginWithDistanceLossFuncOptions().margin(1.0));
|
| 481 |
+
/// ```
|
| 482 |
+
using TripletMarginWithDistanceLossFuncOptions =
|
| 483 |
+
TripletMarginWithDistanceLossOptions;
|
| 484 |
+
} // namespace functional
|
| 485 |
+
|
| 486 |
+
// ============================================================================
|
| 487 |
+
|
| 488 |
+
/// Options for the `CTCLoss` module.
|
| 489 |
+
///
|
| 490 |
+
/// Example:
|
| 491 |
+
/// ```
|
| 492 |
+
/// CTCLoss
|
| 493 |
+
/// model(CTCLossOptions().blank(42).zero_infinity(false).reduction(torch::kSum));
|
| 494 |
+
/// ```
|
| 495 |
+
struct TORCH_API CTCLossOptions {
|
| 496 |
+
typedef std::variant<enumtype::kNone, enumtype::kMean, enumtype::kSum>
|
| 497 |
+
reduction_t;
|
| 498 |
+
|
| 499 |
+
/// blank label. Default `0`.
|
| 500 |
+
TORCH_ARG(int64_t, blank) = 0;
|
| 501 |
+
/// Specifies the reduction to apply to the output. Default: Mean
|
| 502 |
+
TORCH_ARG(reduction_t, reduction) = torch::kMean;
|
| 503 |
+
/// Whether to zero infinite losses and the associated gradients.
|
| 504 |
+
/// Default: `false`. Infinite losses mainly occur when the inputs are
|
| 505 |
+
/// too short to be aligned to the targets.
|
| 506 |
+
TORCH_ARG(bool, zero_infinity) = false;
|
| 507 |
+
};
|
| 508 |
+
|
| 509 |
+
namespace functional {
|
| 510 |
+
/// Options for `torch::nn::functional::ctc_loss`.
|
| 511 |
+
///
|
| 512 |
+
/// See the documentation for `torch::nn::CTCLossOptions` class to learn what
|
| 513 |
+
/// arguments are supported.
|
| 514 |
+
///
|
| 515 |
+
/// Example:
|
| 516 |
+
/// ```
|
| 517 |
+
/// namespace F = torch::nn::functional;
|
| 518 |
+
/// F::ctc_loss(log_probs, targets, input_lengths, target_lengths,
|
| 519 |
+
/// F::CTCLossFuncOptions().reduction(torch::kNone));
|
| 520 |
+
/// ```
|
| 521 |
+
using CTCLossFuncOptions = CTCLossOptions;
|
| 522 |
+
} // namespace functional
|
| 523 |
+
|
| 524 |
+
// ============================================================================
|
| 525 |
+
|
| 526 |
+
/// Options for the `SmoothL1Loss` module.
|
| 527 |
+
///
|
| 528 |
+
/// Example:
|
| 529 |
+
/// ```
|
| 530 |
+
/// SmoothL1Loss model(SmoothL1LossOptions().reduction(torch::kNone).beta(0.5));
|
| 531 |
+
/// ```
|
| 532 |
+
struct TORCH_API SmoothL1LossOptions {
|
| 533 |
+
typedef std::variant<enumtype::kNone, enumtype::kMean, enumtype::kSum>
|
| 534 |
+
reduction_t;
|
| 535 |
+
|
| 536 |
+
TORCH_OPTIONS_CTOR_VARIANT_ARG3(
|
| 537 |
+
SmoothL1LossOptions,
|
| 538 |
+
reduction,
|
| 539 |
+
kNone,
|
| 540 |
+
kMean,
|
| 541 |
+
kSum)
|
| 542 |
+
|
| 543 |
+
/// Specifies the reduction to apply to the output: 'none' | 'mean' | 'sum'.
|
| 544 |
+
/// 'none': no reduction will be applied, 'mean': the sum of the output will
|
| 545 |
+
/// be divided by the number of elements in the output, 'sum': the output will
|
| 546 |
+
/// be summed. Default: 'mean'
|
| 547 |
+
TORCH_ARG(reduction_t, reduction) = torch::kMean;
|
| 548 |
+
/// Specifies the threshold at which to change between L1 and L2 loss.
|
| 549 |
+
/// If beta is not specified, a value of 1.0 will be used.
|
| 550 |
+
/// Default: nullopt
|
| 551 |
+
TORCH_ARG(c10::optional<double>, beta) = c10::nullopt;
|
| 552 |
+
};
|
| 553 |
+
|
| 554 |
+
namespace functional {
|
| 555 |
+
/// Options for `torch::nn::functional::smooth_l1_loss`.
|
| 556 |
+
///
|
| 557 |
+
/// See the documentation for `torch::nn::SmoothL1LossOptions` class to learn
|
| 558 |
+
/// what arguments are supported.
|
| 559 |
+
///
|
| 560 |
+
/// Example:
|
| 561 |
+
/// ```
|
| 562 |
+
/// namespace F = torch::nn::functional;
|
| 563 |
+
/// F::smooth_l1_loss(input, target, F::SmoothL1LossFuncOptions(torch::kNone));
|
| 564 |
+
/// ```
|
| 565 |
+
using SmoothL1LossFuncOptions = SmoothL1LossOptions;
|
| 566 |
+
} // namespace functional
|
| 567 |
+
|
| 568 |
+
// ============================================================================
|
| 569 |
+
|
| 570 |
+
/// Options for the `HuberLoss` module.
|
| 571 |
+
///
|
| 572 |
+
/// Example:
|
| 573 |
+
/// ```
|
| 574 |
+
/// HuberLoss model(HuberLossOptions().reduction(torch::kNone).delta(0.5));
|
| 575 |
+
/// ```
|
| 576 |
+
struct TORCH_API HuberLossOptions {
|
| 577 |
+
typedef std::variant<enumtype::kNone, enumtype::kMean, enumtype::kSum>
|
| 578 |
+
reduction_t;
|
| 579 |
+
|
| 580 |
+
TORCH_OPTIONS_CTOR_VARIANT_ARG3(
|
| 581 |
+
HuberLossOptions,
|
| 582 |
+
reduction,
|
| 583 |
+
kNone,
|
| 584 |
+
kMean,
|
| 585 |
+
kSum)
|
| 586 |
+
|
| 587 |
+
/// Specifies the reduction to apply to the output: 'none' | 'mean' | 'sum'.
|
| 588 |
+
/// 'none': no reduction will be applied, 'mean': the sum of the output will
|
| 589 |
+
/// be divided by the number of elements in the output, 'sum': the output will
|
| 590 |
+
/// be summed. Default: 'mean'
|
| 591 |
+
TORCH_ARG(reduction_t, reduction) = torch::kMean;
|
| 592 |
+
/// Specifies the threshold at which to change between L1 and L2 loss.
|
| 593 |
+
/// Default: 1.0
|
| 594 |
+
TORCH_ARG(double, delta) = 1.0;
|
| 595 |
+
};
|
| 596 |
+
|
| 597 |
+
namespace functional {
|
| 598 |
+
/// Options for `torch::nn::functional::huber_loss`.
|
| 599 |
+
///
|
| 600 |
+
/// See the documentation for `torch::nn::HuberLossOptions` class to learn what
|
| 601 |
+
/// arguments are supported.
|
| 602 |
+
///
|
| 603 |
+
/// Example:
|
| 604 |
+
/// ```
|
| 605 |
+
/// namespace F = torch::nn::functional;
|
| 606 |
+
/// F::huber_loss(input, target, F::HuberLossFuncOptions(torch::kNone));
|
| 607 |
+
/// ```
|
| 608 |
+
using HuberLossFuncOptions = HuberLossOptions;
|
| 609 |
+
} // namespace functional
|
| 610 |
+
|
| 611 |
+
// ============================================================================
|
| 612 |
+
|
| 613 |
+
/// Options for the `PoissonNLLLoss` module.
|
| 614 |
+
///
|
| 615 |
+
/// Example:
|
| 616 |
+
/// ```
|
| 617 |
+
/// PoissonNLLLoss
|
| 618 |
+
/// model(PoissonNLLLossOptions().log_input(false).full(true).eps(0.42).reduction(torch::kSum));
|
| 619 |
+
/// ```
|
| 620 |
+
struct TORCH_API PoissonNLLLossOptions {
|
| 621 |
+
typedef std::variant<enumtype::kNone, enumtype::kMean, enumtype::kSum>
|
| 622 |
+
reduction_t;
|
| 623 |
+
|
| 624 |
+
/// if true the loss is computed as `exp(input) - target * input`,
|
| 625 |
+
/// if false the loss is `input - target * log(input + eps)`.
|
| 626 |
+
TORCH_ARG(bool, log_input) = true;
|
| 627 |
+
/// whether to compute full loss, i.e. to add the Stirling approximation term
|
| 628 |
+
/// target * log(target) - target + 0.5 * log(2 * pi * target).
|
| 629 |
+
TORCH_ARG(bool, full) = false;
|
| 630 |
+
/// Small value to avoid evaluation of `log(0)` when `log_input = false`.
|
| 631 |
+
/// Default: 1e-8
|
| 632 |
+
TORCH_ARG(double, eps) = 1e-8;
|
| 633 |
+
/// Specifies the reduction to apply to the output. Default: Mean
|
| 634 |
+
TORCH_ARG(reduction_t, reduction) = torch::kMean;
|
| 635 |
+
};
|
| 636 |
+
|
| 637 |
+
namespace functional {
|
| 638 |
+
/// Options for `torch::nn::functional::poisson_nll_loss`.
|
| 639 |
+
///
|
| 640 |
+
/// See the documentation for `torch::nn::PoissonNLLLossOptions` class to learn
|
| 641 |
+
/// what arguments are supported.
|
| 642 |
+
///
|
| 643 |
+
/// Example:
|
| 644 |
+
/// ```
|
| 645 |
+
/// namespace F = torch::nn::functional;
|
| 646 |
+
/// F::poisson_nll_loss(input, target,
|
| 647 |
+
/// F::PoissonNLLLossFuncOptions().reduction(torch::kNone));
|
| 648 |
+
/// ```
|
| 649 |
+
using PoissonNLLLossFuncOptions = PoissonNLLLossOptions;
|
| 650 |
+
} // namespace functional
|
| 651 |
+
|
| 652 |
+
// ============================================================================
|
| 653 |
+
|
| 654 |
+
/// Options for the `MarginRankingLoss` module.
|
| 655 |
+
///
|
| 656 |
+
/// Example:
|
| 657 |
+
/// ```
|
| 658 |
+
/// MarginRankingLoss
|
| 659 |
+
/// model(MarginRankingLossOptions().margin(0.5).reduction(torch::kSum));
|
| 660 |
+
/// ```
|
| 661 |
+
struct TORCH_API MarginRankingLossOptions {
|
| 662 |
+
typedef std::variant<enumtype::kNone, enumtype::kMean, enumtype::kSum>
|
| 663 |
+
reduction_t;
|
| 664 |
+
|
| 665 |
+
/// Has a default value of `0`.
|
| 666 |
+
TORCH_ARG(double, margin) = 0;
|
| 667 |
+
/// Specifies the reduction to apply to the output. Default: Mean
|
| 668 |
+
TORCH_ARG(reduction_t, reduction) = torch::kMean;
|
| 669 |
+
};
|
| 670 |
+
|
| 671 |
+
namespace functional {
|
| 672 |
+
/// Options for `torch::nn::functional::margin_ranking_loss`.
|
| 673 |
+
///
|
| 674 |
+
/// See the documentation for `torch::nn::MarginRankingLossOptions` class to
|
| 675 |
+
/// learn what arguments are supported.
|
| 676 |
+
///
|
| 677 |
+
/// Example:
|
| 678 |
+
/// ```
|
| 679 |
+
/// namespace F = torch::nn::functional;
|
| 680 |
+
/// F::margin_ranking_loss(input1, input2, target,
|
| 681 |
+
/// F::MarginRankingLossFuncOptions().margin(0.5).reduction(torch::kSum));
|
| 682 |
+
/// ```
|
| 683 |
+
using MarginRankingLossFuncOptions = MarginRankingLossOptions;
|
| 684 |
+
} // namespace functional
|
| 685 |
+
|
| 686 |
+
// ============================================================================
|
| 687 |
+
|
| 688 |
+
/// Options for the `NLLLoss` module.
|
| 689 |
+
///
|
| 690 |
+
/// Example:
|
| 691 |
+
/// ```
|
| 692 |
+
/// NLLLoss model(NLLLossOptions().ignore_index(-100).reduction(torch::kMean));
|
| 693 |
+
/// ```
|
| 694 |
+
struct TORCH_API NLLLossOptions {
|
| 695 |
+
typedef std::variant<enumtype::kNone, enumtype::kMean, enumtype::kSum>
|
| 696 |
+
reduction_t;
|
| 697 |
+
|
| 698 |
+
/// A manual rescaling weight given to each
|
| 699 |
+
/// class. If given, it has to be a Tensor of size `C`. Otherwise, it is
|
| 700 |
+
/// treated as if having all ones.
|
| 701 |
+
TORCH_ARG(Tensor, weight) = {};
|
| 702 |
+
/// Specifies a target value that is ignored
|
| 703 |
+
/// and does not contribute to the input gradient.
|
| 704 |
+
TORCH_ARG(int64_t, ignore_index) = -100;
|
| 705 |
+
/// Specifies the reduction to apply to the output. Default: Mean
|
| 706 |
+
TORCH_ARG(reduction_t, reduction) = torch::kMean;
|
| 707 |
+
};
|
| 708 |
+
|
| 709 |
+
namespace functional {
|
| 710 |
+
/// Options for `torch::nn::functional::nll_loss`.
|
| 711 |
+
///
|
| 712 |
+
/// See the documentation for `torch::nn::NLLLossOptions` class to learn what
|
| 713 |
+
/// arguments are supported.
|
| 714 |
+
///
|
| 715 |
+
/// Example:
|
| 716 |
+
/// ```
|
| 717 |
+
/// namespace F = torch::nn::functional;
|
| 718 |
+
/// F::nll_loss(input, target,
|
| 719 |
+
/// F::NLLLossFuncOptions().ignore_index(-100).reduction(torch::kMean));
|
| 720 |
+
/// ```
|
| 721 |
+
using NLLLossFuncOptions = NLLLossOptions;
|
| 722 |
+
} // namespace functional
|
| 723 |
+
|
| 724 |
+
// ============================================================================
|
| 725 |
+
|
| 726 |
+
/// Options for the `CrossEntropyLoss` module.
|
| 727 |
+
///
|
| 728 |
+
/// Example:
|
| 729 |
+
/// ```
|
| 730 |
+
/// CrossEntropyLoss
|
| 731 |
+
/// model(CrossEntropyLossOptions().ignore_index(-100).reduction(torch::kMean));
|
| 732 |
+
/// ```
|
| 733 |
+
struct TORCH_API CrossEntropyLossOptions {
|
| 734 |
+
typedef std::variant<enumtype::kNone, enumtype::kMean, enumtype::kSum>
|
| 735 |
+
reduction_t;
|
| 736 |
+
|
| 737 |
+
/// A manual rescaling weight given to each class. If given, has to be a
|
| 738 |
+
/// Tensor of size C
|
| 739 |
+
TORCH_ARG(Tensor, weight) = {};
|
| 740 |
+
/// Specifies a target value that is ignored
|
| 741 |
+
/// and does not contribute to the input gradient.
|
| 742 |
+
TORCH_ARG(int64_t, ignore_index) = -100;
|
| 743 |
+
/// Specifies the reduction to apply to the output. Default: Mean
|
| 744 |
+
TORCH_ARG(reduction_t, reduction) = torch::kMean;
|
| 745 |
+
/// Specifies the amount of smoothing when computing the loss. Default: 0.0
|
| 746 |
+
TORCH_ARG(double, label_smoothing) = 0.0;
|
| 747 |
+
};
|
| 748 |
+
|
| 749 |
+
namespace functional {
|
| 750 |
+
/// Options for `torch::nn::functional::cross_entropy`.
|
| 751 |
+
///
|
| 752 |
+
/// See the documentation for `torch::nn::CrossEntropyLossOptions` class to
|
| 753 |
+
/// learn what arguments are supported.
|
| 754 |
+
///
|
| 755 |
+
/// Example:
|
| 756 |
+
/// ```
|
| 757 |
+
/// namespace F = torch::nn::functional;
|
| 758 |
+
/// F::cross_entropy(input, target,
|
| 759 |
+
/// F::CrossEntropyFuncOptions().ignore_index(-100).reduction(torch::kMean));
|
| 760 |
+
/// ```
|
| 761 |
+
using CrossEntropyFuncOptions = CrossEntropyLossOptions;
|
| 762 |
+
} // namespace functional
|
| 763 |
+
|
| 764 |
+
// ============================================================================
|
| 765 |
+
|
| 766 |
+
/// Options for the `BCEWithLogitsLoss` module.
|
| 767 |
+
///
|
| 768 |
+
/// Example:
|
| 769 |
+
/// ```
|
| 770 |
+
/// BCEWithLogitsLoss
|
| 771 |
+
/// model(BCEWithLogitsLossOptions().reduction(torch::kNone).weight(weight));
|
| 772 |
+
/// ```
|
| 773 |
+
struct TORCH_API BCEWithLogitsLossOptions {
|
| 774 |
+
typedef std::variant<enumtype::kNone, enumtype::kMean, enumtype::kSum>
|
| 775 |
+
reduction_t;
|
| 776 |
+
/// A manual rescaling weight given to the loss of each batch element.
|
| 777 |
+
/// If given, has to be a Tensor of size `nbatch`.
|
| 778 |
+
TORCH_ARG(Tensor, weight) = {};
|
| 779 |
+
/// Specifies the reduction to apply to the output. Default: Mean
|
| 780 |
+
TORCH_ARG(reduction_t, reduction) = torch::kMean;
|
| 781 |
+
/// A weight of positive examples.
|
| 782 |
+
/// Must be a vector with length equal to the number of classes.
|
| 783 |
+
TORCH_ARG(Tensor, pos_weight) = {};
|
| 784 |
+
};
|
| 785 |
+
|
| 786 |
+
namespace functional {
|
| 787 |
+
/// Options for `torch::nn::functional::binary_cross_entropy_with_logits`.
|
| 788 |
+
///
|
| 789 |
+
/// See the documentation for `torch::nn::BCEWithLogitsLossOptions` class to
|
| 790 |
+
/// learn what arguments are supported.
|
| 791 |
+
///
|
| 792 |
+
/// Example:
|
| 793 |
+
/// ```
|
| 794 |
+
/// namespace F = torch::nn::functional;
|
| 795 |
+
/// F::binary_cross_entropy_with_logits(input, target,
|
| 796 |
+
/// F::BinaryCrossEntropyWithLogitsFuncOptions().pos_weight(pos_weight).reduction(torch::kSum));
|
| 797 |
+
/// ```
|
| 798 |
+
using BinaryCrossEntropyWithLogitsFuncOptions = BCEWithLogitsLossOptions;
|
| 799 |
+
} // namespace functional
|
| 800 |
+
|
| 801 |
+
} // namespace nn
|
| 802 |
+
} // namespace torch
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/transformer.h
ADDED
|
@@ -0,0 +1,64 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <torch/arg.h>
|
| 4 |
+
#include <torch/csrc/Export.h>
|
| 5 |
+
#include <torch/enum.h>
|
| 6 |
+
#include <torch/types.h>
|
| 7 |
+
|
| 8 |
+
#include <torch/nn/modules/container/any.h>
|
| 9 |
+
#include <torch/nn/options/transformerlayer.h>
|
| 10 |
+
|
| 11 |
+
namespace torch {
|
| 12 |
+
namespace nn {
|
| 13 |
+
|
| 14 |
+
/// Options for the `Transformer` module
|
| 15 |
+
///
|
| 16 |
+
/// Example:
|
| 17 |
+
/// ```
|
| 18 |
+
/// TransformerOptions options;
|
| 19 |
+
/// TransformerOptions options(16, 4);
|
| 20 |
+
/// auto options = TransformerOptions().d_model(4).nhead(2).dropout(0.0);
|
| 21 |
+
/// ```
|
| 22 |
+
struct TORCH_API TransformerOptions {
|
| 23 |
+
// The following constructors are commonly used
|
| 24 |
+
// Please don't add more unless it is proved as a common usage
|
| 25 |
+
TransformerOptions() = default;
|
| 26 |
+
TransformerOptions(int64_t d_model, int64_t nhead);
|
| 27 |
+
TransformerOptions(
|
| 28 |
+
int64_t d_model,
|
| 29 |
+
int64_t nhead,
|
| 30 |
+
int64_t num_encoder_layers,
|
| 31 |
+
int64_t num_decoder_layers);
|
| 32 |
+
|
| 33 |
+
/// the number of expected features in the encoder/decoder inputs
|
| 34 |
+
/// (default=512)
|
| 35 |
+
TORCH_ARG(int64_t, d_model) = 512;
|
| 36 |
+
|
| 37 |
+
/// the number of heads in the multiheadattention models (default=8)
|
| 38 |
+
TORCH_ARG(int64_t, nhead) = 8;
|
| 39 |
+
|
| 40 |
+
/// the number of sub-encoder-layers in the encoder (default=6)
|
| 41 |
+
TORCH_ARG(int64_t, num_encoder_layers) = 6;
|
| 42 |
+
|
| 43 |
+
/// the number of sub-decoder-layers in the decoder (default=6)
|
| 44 |
+
TORCH_ARG(int64_t, num_decoder_layers) = 6;
|
| 45 |
+
|
| 46 |
+
/// the dimension of the feedforward network model (default=2048)
|
| 47 |
+
TORCH_ARG(int64_t, dim_feedforward) = 2048;
|
| 48 |
+
|
| 49 |
+
/// the dropout value (default=0.1)
|
| 50 |
+
TORCH_ARG(double, dropout) = 0.1;
|
| 51 |
+
|
| 52 |
+
/// the activation function of encoder/decoder intermediate layer
|
| 53 |
+
/// (default=``torch::kReLU``)
|
| 54 |
+
TORCH_ARG(activation_t, activation) = torch::kReLU;
|
| 55 |
+
|
| 56 |
+
/// custom encoder (default=None)
|
| 57 |
+
TORCH_ARG(AnyModule, custom_encoder);
|
| 58 |
+
|
| 59 |
+
/// custom decoder (default=None)
|
| 60 |
+
TORCH_ARG(AnyModule, custom_decoder);
|
| 61 |
+
};
|
| 62 |
+
|
| 63 |
+
} // namespace nn
|
| 64 |
+
} // namespace torch
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/upsampling.h
ADDED
|
@@ -0,0 +1,110 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <torch/arg.h>
|
| 4 |
+
#include <torch/csrc/Export.h>
|
| 5 |
+
#include <torch/enum.h>
|
| 6 |
+
#include <torch/expanding_array.h>
|
| 7 |
+
#include <torch/types.h>
|
| 8 |
+
|
| 9 |
+
#include <vector>
|
| 10 |
+
|
| 11 |
+
namespace torch {
|
| 12 |
+
namespace nn {
|
| 13 |
+
|
| 14 |
+
/// Options for the `Upsample` module.
|
| 15 |
+
///
|
| 16 |
+
/// Example:
|
| 17 |
+
/// ```
|
| 18 |
+
/// Upsample
|
| 19 |
+
/// model(UpsampleOptions().scale_factor(std::vector<double>({3})).mode(torch::kLinear).align_corners(false));
|
| 20 |
+
/// ```
|
| 21 |
+
struct TORCH_API UpsampleOptions {
|
| 22 |
+
/// output spatial sizes.
|
| 23 |
+
TORCH_ARG(c10::optional<std::vector<int64_t>>, size) = c10::nullopt;
|
| 24 |
+
|
| 25 |
+
/// multiplier for spatial size.
|
| 26 |
+
TORCH_ARG(c10::optional<std::vector<double>>, scale_factor) = c10::nullopt;
|
| 27 |
+
|
| 28 |
+
/// the upsampling algorithm: one of "nearest", "linear", "bilinear",
|
| 29 |
+
/// "bicubic" and "trilinear". Default: "nearest"
|
| 30 |
+
typedef std::variant<
|
| 31 |
+
enumtype::kNearest,
|
| 32 |
+
enumtype::kLinear,
|
| 33 |
+
enumtype::kBilinear,
|
| 34 |
+
enumtype::kBicubic,
|
| 35 |
+
enumtype::kTrilinear>
|
| 36 |
+
mode_t;
|
| 37 |
+
TORCH_ARG(mode_t, mode) = torch::kNearest;
|
| 38 |
+
|
| 39 |
+
/// if "True", the corner pixels of the input and output tensors are
|
| 40 |
+
/// aligned, and thus preserving the values at those pixels. This only has
|
| 41 |
+
/// effect when :attr:`mode` is "linear", "bilinear", "bicubic", or
|
| 42 |
+
/// "trilinear". Default: "False"
|
| 43 |
+
TORCH_ARG(c10::optional<bool>, align_corners) = c10::nullopt;
|
| 44 |
+
};
|
| 45 |
+
|
| 46 |
+
namespace functional {
|
| 47 |
+
|
| 48 |
+
/// Options for `torch::nn::functional::interpolate`.
|
| 49 |
+
///
|
| 50 |
+
/// Example:
|
| 51 |
+
/// ```
|
| 52 |
+
/// namespace F = torch::nn::functional;
|
| 53 |
+
/// F::interpolate(input,
|
| 54 |
+
/// F::InterpolateFuncOptions().size(std::vector<int64_t>({4})).mode(torch::kNearest));
|
| 55 |
+
/// ```
|
| 56 |
+
struct TORCH_API InterpolateFuncOptions {
|
| 57 |
+
typedef std::variant<
|
| 58 |
+
enumtype::kNearest,
|
| 59 |
+
enumtype::kLinear,
|
| 60 |
+
enumtype::kBilinear,
|
| 61 |
+
enumtype::kBicubic,
|
| 62 |
+
enumtype::kTrilinear,
|
| 63 |
+
enumtype::kArea,
|
| 64 |
+
enumtype::kNearestExact>
|
| 65 |
+
mode_t;
|
| 66 |
+
|
| 67 |
+
/// output spatial sizes.
|
| 68 |
+
TORCH_ARG(c10::optional<std::vector<int64_t>>, size) = c10::nullopt;
|
| 69 |
+
|
| 70 |
+
/// multiplier for spatial size.
|
| 71 |
+
TORCH_ARG(c10::optional<std::vector<double>>, scale_factor) = c10::nullopt;
|
| 72 |
+
|
| 73 |
+
/// the upsampling algorithm: one of "nearest", "linear", "bilinear",
|
| 74 |
+
/// "bicubic", "trilinear", "area", "nearest-exact". Default: "nearest"
|
| 75 |
+
TORCH_ARG(mode_t, mode) = torch::kNearest;
|
| 76 |
+
|
| 77 |
+
/// Geometrically, we consider the pixels of the input and output as squares
|
| 78 |
+
/// rather than points. If set to "True", the input and output tensors are
|
| 79 |
+
/// aligned by the center points of their corner pixels, preserving the values
|
| 80 |
+
/// at the corner pixels. If set to "False", the input and output tensors
|
| 81 |
+
/// are aligned by the corner points of their corner pixels, and the
|
| 82 |
+
/// interpolation uses edge value padding for out-of-boundary values, making
|
| 83 |
+
/// this operation *independent* of input size when :attr:`scale_factor` is
|
| 84 |
+
/// kept the same. This only has an effect when :attr:`mode` is "linear",
|
| 85 |
+
/// "bilinear", "bicubic" or "trilinear". Default: "False"
|
| 86 |
+
TORCH_ARG(c10::optional<bool>, align_corners) = c10::nullopt;
|
| 87 |
+
|
| 88 |
+
/// recompute the scale_factor for use in the
|
| 89 |
+
/// interpolation calculation. When `scale_factor` is passed as a parameter,
|
| 90 |
+
/// it is used to compute the `output_size`. If `recompute_scale_factor` is
|
| 91 |
+
/// `true` or not specified, a new `scale_factor` will be computed based on
|
| 92 |
+
/// the output and input sizes for use in the interpolation computation (i.e.
|
| 93 |
+
/// the computation will be identical to if the computed `output_size` were
|
| 94 |
+
/// passed-in explicitly). Otherwise, the passed-in `scale_factor` will be
|
| 95 |
+
/// used in the interpolation computation. Note that when `scale_factor` is
|
| 96 |
+
/// floating-point, the recomputed scale_factor may differ from the one passed
|
| 97 |
+
/// in due to rounding and precision issues.
|
| 98 |
+
TORCH_ARG(c10::optional<bool>, recompute_scale_factor) = c10::nullopt;
|
| 99 |
+
|
| 100 |
+
/// flag to apply anti-aliasing. Using anti-alias
|
| 101 |
+
/// option together with :attr:`align_corners` equals "False", interpolation
|
| 102 |
+
/// result would match Pillow result for downsampling operation. Supported
|
| 103 |
+
/// modes: "bilinear". Default: "False".
|
| 104 |
+
TORCH_ARG(bool, antialias) = false;
|
| 105 |
+
};
|
| 106 |
+
|
| 107 |
+
} // namespace functional
|
| 108 |
+
|
| 109 |
+
} // namespace nn
|
| 110 |
+
} // namespace torch
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/pimpl-inl.h
ADDED
|
@@ -0,0 +1,74 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// This class exists only to do SFINAE on abstract types `T` that are really
|
| 2 |
+
// `ModuleHolder<ModuleType>`, because there's no good way to say that `T` is a
|
| 3 |
+
// `ModuleHolder` over some unknown type `ModuleType`. With this, you can do
|
| 4 |
+
// `enable_if_t<is_base_of_v<ModuleHolderIndicator, T>>`.
|
| 5 |
+
struct ModuleHolderIndicator {};
|
| 6 |
+
|
| 7 |
+
// A type trait that is true for types that are `ModuleHolder`s.
|
| 8 |
+
template <typename T>
|
| 9 |
+
using is_module_holder = std::is_base_of<ModuleHolderIndicator, decay_t<T>>;
|
| 10 |
+
|
| 11 |
+
template <typename T>
|
| 12 |
+
using disable_if_module_holder_t = disable_if_t<is_module_holder<T>::value>;
|
| 13 |
+
|
| 14 |
+
// A collection of templates that answer the question whether a type `T` is a
|
| 15 |
+
// `ModuleHolder`, and if so whether its contained type is of type `C`. This is
|
| 16 |
+
// tricky because it is hard to short circuit in template metaprogramming. A
|
| 17 |
+
// naive and incorrect solution to this problem would be something like
|
| 18 |
+
// `disable_if<is_module_holder<T>::value && typename T::ContainedType == C>`.
|
| 19 |
+
// This would disable all types that are not `ModuleHolder`s, because even
|
| 20 |
+
// though the `is_module_holder<T>::value` may be `false` for such types the
|
| 21 |
+
// `T::ContainedType` access would be ill-formed and thus fail the whole
|
| 22 |
+
// expression by the rules of SFINAE. Instead we have to use template
|
| 23 |
+
// specialization to statically branch on the first condition
|
| 24 |
+
// (`is_module_holder<T>`) and are only then allowed to query
|
| 25 |
+
// `T::ContainedType` in the branch for which the condition was true.
|
| 26 |
+
|
| 27 |
+
// Base template.
|
| 28 |
+
template <bool is_module_holder_value, typename T, typename C>
|
| 29 |
+
struct is_module_holder_of_impl;
|
| 30 |
+
|
| 31 |
+
// False branch. `T` is not a `ModuleHolder` and thus not a `ModuleHolder` with
|
| 32 |
+
// contained type `C`.
|
| 33 |
+
template <typename T, typename C>
|
| 34 |
+
struct is_module_holder_of_impl<false, T, C> : std::false_type {};
|
| 35 |
+
|
| 36 |
+
// True branch. `T` is a `ModuleHolder` and thus we can legit access its
|
| 37 |
+
// `ContainedType` and compare it against `C`.
|
| 38 |
+
template <typename T, typename C>
|
| 39 |
+
struct is_module_holder_of_impl<true, T, C>
|
| 40 |
+
: std::is_same<typename T::ContainedType, C> {};
|
| 41 |
+
|
| 42 |
+
// Helper template.
|
| 43 |
+
template <typename T, typename C>
|
| 44 |
+
struct is_module_holder_of : is_module_holder_of_impl<
|
| 45 |
+
is_module_holder<T>::value,
|
| 46 |
+
decay_t<T>,
|
| 47 |
+
decay_t<C>> {};
|
| 48 |
+
|
| 49 |
+
// A collection of templates that allow deducing the return type of the
|
| 50 |
+
// `forward()` method, but only if a module actually has a `forward()` method,
|
| 51 |
+
// and otherwise deduces to the type `void`.
|
| 52 |
+
|
| 53 |
+
template <bool has_forward_value, typename C, typename... Args>
|
| 54 |
+
struct return_type_of_forward_impl;
|
| 55 |
+
|
| 56 |
+
template <typename C, typename... Args>
|
| 57 |
+
struct return_type_of_forward_impl<true, C, Args...> {
|
| 58 |
+
using type = decltype(::std::declval<C>().forward(::std::declval<Args>()...));
|
| 59 |
+
};
|
| 60 |
+
|
| 61 |
+
template <typename C, typename... Args>
|
| 62 |
+
struct return_type_of_forward_impl<false, C, Args...> {
|
| 63 |
+
using type = void;
|
| 64 |
+
};
|
| 65 |
+
|
| 66 |
+
template <typename C, typename... Args>
|
| 67 |
+
using return_type_of_forward = return_type_of_forward_impl<
|
| 68 |
+
torch::detail::has_forward<C>::value,
|
| 69 |
+
C,
|
| 70 |
+
Args...>;
|
| 71 |
+
|
| 72 |
+
template <typename C, typename... Args>
|
| 73 |
+
using return_type_of_forward_t =
|
| 74 |
+
typename return_type_of_forward<C, Args...>::type;
|