diff --git a/.gitattributes b/.gitattributes index ac838963a42e315415d8c1d506bbf06947ac1c17..e1b205df5fb1b75a9925292a697cbbbe6e487598 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1717,3 +1717,7 @@ vllm/lib/python3.10/site-packages/cupy/cuda/memory_hook.cpython-310-x86_64-linux vllm/lib/python3.10/site-packages/cupy/cuda/cufft.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text vllm/lib/python3.10/site-packages/cupy/fft/_callback.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text vllm/lib/python3.10/site-packages/cupy/cuda/graph.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +parrot/lib/python3.10/site-packages/scipy/stats/__pycache__/_morestats.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text +parrot/lib/python3.10/site-packages/scipy/linalg/_decomp_update.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +parrot/lib/python3.10/site-packages/scipy/optimize/__pycache__/_optimize.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text +vllm/lib/python3.10/site-packages/cupy/lib/_polynomial.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text diff --git a/parrot/lib/python3.10/site-packages/narwhals/_arrow/__init__.py b/parrot/lib/python3.10/site-packages/narwhals/_arrow/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/parrot/lib/python3.10/site-packages/narwhals/_arrow/__pycache__/namespace.cpython-310.pyc b/parrot/lib/python3.10/site-packages/narwhals/_arrow/__pycache__/namespace.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..077a04e44534b6d7eb5b54054209a4d86fe1474d Binary files /dev/null and b/parrot/lib/python3.10/site-packages/narwhals/_arrow/__pycache__/namespace.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/narwhals/_arrow/dataframe.py b/parrot/lib/python3.10/site-packages/narwhals/_arrow/dataframe.py new file mode 100644 index 0000000000000000000000000000000000000000..89919c7634e147889efe0dd52caebf76cc0608c4 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/narwhals/_arrow/dataframe.py @@ -0,0 +1,617 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING +from typing import Any +from typing import Iterable +from typing import Iterator +from typing import Literal +from typing import Sequence +from typing import overload + +from narwhals._arrow.utils import broadcast_series +from narwhals._arrow.utils import convert_str_slice_to_int_slice +from narwhals._arrow.utils import select_rows +from narwhals._arrow.utils import translate_dtype +from narwhals._arrow.utils import validate_dataframe_comparand +from narwhals._expression_parsing import evaluate_into_exprs +from narwhals.dependencies import get_pyarrow +from narwhals.dependencies import is_numpy_array +from narwhals.utils import Implementation +from narwhals.utils import flatten +from narwhals.utils import generate_unique_token +from narwhals.utils import is_sequence_but_not_str +from narwhals.utils import parse_columns_to_drop + +if TYPE_CHECKING: + import numpy as np + import pyarrow as pa + from typing_extensions import Self + + from narwhals._arrow.group_by import ArrowGroupBy + from narwhals._arrow.namespace import ArrowNamespace + from narwhals._arrow.series import ArrowSeries + from narwhals._arrow.typing import IntoArrowExpr + from narwhals.dtypes import DType + + +class ArrowDataFrame: + # --- not in the spec --- + def __init__( + self, native_dataframe: pa.Table, *, backend_version: tuple[int, ...] + ) -> None: + self._native_frame = native_dataframe + self._implementation = Implementation.PYARROW + self._backend_version = backend_version + + def __narwhals_namespace__(self) -> ArrowNamespace: + from narwhals._arrow.namespace import ArrowNamespace + + return ArrowNamespace(backend_version=self._backend_version) + + def __native_namespace__(self) -> Any: + return get_pyarrow() + + def __narwhals_dataframe__(self) -> Self: + return self + + def __narwhals_lazyframe__(self) -> Self: + return self + + def _from_native_frame(self, df: Any) -> Self: + return self.__class__(df, backend_version=self._backend_version) + + @property + def shape(self) -> tuple[int, int]: + return self._native_frame.shape # type: ignore[no-any-return] + + def __len__(self) -> int: + return len(self._native_frame) + + def row(self, index: int) -> tuple[Any, ...]: + return tuple(col[index] for col in self._native_frame) + + def rows( + self, *, named: bool = False + ) -> list[tuple[Any, ...]] | list[dict[str, Any]]: + if not named: + msg = "Unnamed rows are not yet supported on PyArrow tables" + raise NotImplementedError(msg) + return self._native_frame.to_pylist() # type: ignore[no-any-return] + + def iter_rows( + self, + *, + named: bool = False, + buffer_size: int = 512, + ) -> Iterator[tuple[Any, ...]] | Iterator[dict[str, Any]]: + df = self._native_frame + num_rows = df.num_rows + + if not named: + for i in range(0, num_rows, buffer_size): + rows = df[i : i + buffer_size].to_pydict().values() + yield from zip(*rows) + else: + for i in range(0, num_rows, buffer_size): + yield from df[i : i + buffer_size].to_pylist() + + def get_column(self, name: str) -> ArrowSeries: + from narwhals._arrow.series import ArrowSeries + + if not isinstance(name, str): + msg = f"Expected str, got: {type(name)}" + raise TypeError(msg) + + return ArrowSeries( + self._native_frame[name], + name=name, + backend_version=self._backend_version, + ) + + def __array__(self, dtype: Any = None, copy: bool | None = None) -> np.ndarray: + return self._native_frame.__array__(dtype, copy=copy) + + @overload + def __getitem__(self, item: tuple[Sequence[int], str | int]) -> ArrowSeries: ... # type: ignore[overload-overlap] + + @overload + def __getitem__(self, item: Sequence[int]) -> ArrowDataFrame: ... + + @overload + def __getitem__(self, item: str) -> ArrowSeries: ... + + @overload + def __getitem__(self, item: slice) -> ArrowDataFrame: ... + + @overload + def __getitem__(self, item: tuple[slice, slice]) -> ArrowDataFrame: ... + + def __getitem__( + self, + item: str + | slice + | Sequence[int] + | Sequence[str] + | tuple[Sequence[int], str | int] + | tuple[slice, str | int] + | tuple[slice, slice], + ) -> ArrowSeries | ArrowDataFrame: + if isinstance(item, tuple): + item = tuple(list(i) if is_sequence_but_not_str(i) else i for i in item) + + if isinstance(item, str): + from narwhals._arrow.series import ArrowSeries + + return ArrowSeries( + self._native_frame[item], + name=item, + backend_version=self._backend_version, + ) + elif ( + isinstance(item, tuple) + and len(item) == 2 + and is_sequence_but_not_str(item[1]) + ): + if len(item[1]) == 0: + # Return empty dataframe + return self._from_native_frame(self._native_frame.slice(0, 0).select([])) + selected_rows = select_rows(self._native_frame, item[0]) + return self._from_native_frame(selected_rows.select(item[1])) + + elif isinstance(item, tuple) and len(item) == 2: + if isinstance(item[1], slice): + columns = self.columns + if isinstance(item[1].start, str) or isinstance(item[1].stop, str): + start, stop, step = convert_str_slice_to_int_slice(item[1], columns) + return self._from_native_frame( + self._native_frame.take(item[0]).select(columns[start:stop:step]) + ) + if isinstance(item[1].start, int) or isinstance(item[1].stop, int): + return self._from_native_frame( + self._native_frame.take(item[0]).select( + columns[item[1].start : item[1].stop : item[1].step] + ) + ) + msg = f"Expected slice of integers or strings, got: {type(item[1])}" # pragma: no cover + raise TypeError(msg) # pragma: no cover + from narwhals._arrow.series import ArrowSeries + + # PyArrow columns are always strings + col_name = item[1] if isinstance(item[1], str) else self.columns[item[1]] + if isinstance(item[0], str): # pragma: no cover + msg = "Can not slice with tuple with the first element as a str" + raise TypeError(msg) + if (isinstance(item[0], slice)) and (item[0] == slice(None)): + return ArrowSeries( + self._native_frame[col_name], + name=col_name, + backend_version=self._backend_version, + ) + selected_rows = select_rows(self._native_frame, item[0]) + return ArrowSeries( + selected_rows[col_name], + name=col_name, + backend_version=self._backend_version, + ) + + elif isinstance(item, slice): + if item.step is not None and item.step != 1: + msg = "Slicing with step is not supported on PyArrow tables" + raise NotImplementedError(msg) + columns = self.columns + if isinstance(item.start, str) or isinstance(item.stop, str): + start, stop, step = convert_str_slice_to_int_slice(item, columns) + return self._from_native_frame( + self._native_frame.select(columns[start:stop:step]) + ) + start = item.start or 0 + stop = item.stop if item.stop is not None else len(self._native_frame) + return self._from_native_frame( + self._native_frame.slice(start, stop - start), + ) + + elif isinstance(item, Sequence) or (is_numpy_array(item) and item.ndim == 1): + if ( + isinstance(item, Sequence) + and all(isinstance(x, str) for x in item) + and len(item) > 0 + ): + return self._from_native_frame(self._native_frame.select(item)) + if isinstance(item, Sequence) and len(item) == 0: + return self._from_native_frame(self._native_frame.slice(0, 0)) + return self._from_native_frame(self._native_frame.take(item)) + + else: # pragma: no cover + msg = f"Expected str or slice, got: {type(item)}" + raise TypeError(msg) + + @property + def schema(self) -> dict[str, DType]: + schema = self._native_frame.schema + return { + name: translate_dtype(dtype) + for name, dtype in zip(schema.names, schema.types) + } + + def collect_schema(self) -> dict[str, DType]: + return self.schema + + @property + def columns(self) -> list[str]: + return self._native_frame.schema.names # type: ignore[no-any-return] + + def select( + self, + *exprs: IntoArrowExpr, + **named_exprs: IntoArrowExpr, + ) -> Self: + import pyarrow as pa # ignore-banned-import() + + new_series = evaluate_into_exprs(self, *exprs, **named_exprs) + if not new_series: + # return empty dataframe, like Polars does + return self._from_native_frame(self._native_frame.__class__.from_arrays([])) + names = [s.name for s in new_series] + df = pa.Table.from_arrays( + broadcast_series(new_series), + names=names, + ) + return self._from_native_frame(df) + + def with_columns( + self, + *exprs: IntoArrowExpr, + **named_exprs: IntoArrowExpr, + ) -> Self: + new_columns = evaluate_into_exprs(self, *exprs, **named_exprs) + new_column_name_to_new_column_map = {s.name: s for s in new_columns} + to_concat = [] + output_names = [] + # Make sure to preserve column order + length = len(self) + for name in self.columns: + if name in new_column_name_to_new_column_map: + to_concat.append( + validate_dataframe_comparand( + length=length, + other=new_column_name_to_new_column_map.pop(name), + backend_version=self._backend_version, + ) + ) + else: + to_concat.append(self._native_frame[name]) + output_names.append(name) + for s in new_column_name_to_new_column_map: + to_concat.append( + validate_dataframe_comparand( + length=length, + other=new_column_name_to_new_column_map[s], + backend_version=self._backend_version, + ) + ) + output_names.append(s) + df = self._native_frame.__class__.from_arrays(to_concat, names=output_names) + return self._from_native_frame(df) + + def group_by(self, *keys: str) -> ArrowGroupBy: + from narwhals._arrow.group_by import ArrowGroupBy + + return ArrowGroupBy(self, list(keys)) + + def join( + self, + other: Self, + *, + how: Literal["left", "inner", "outer", "cross", "anti", "semi"] = "inner", + left_on: str | list[str] | None, + right_on: str | list[str] | None, + suffix: str, + ) -> Self: + how_to_join_map = { + "anti": "left anti", + "semi": "left semi", + "inner": "inner", + "left": "left outer", + } + + if how == "cross": + plx = self.__narwhals_namespace__() + key_token = generate_unique_token( + n_bytes=8, columns=[*self.columns, *other.columns] + ) + + return self._from_native_frame( + self.with_columns(**{key_token: plx.lit(0, None)}) + ._native_frame.join( + other.with_columns(**{key_token: plx.lit(0, None)})._native_frame, + keys=key_token, + right_keys=key_token, + join_type="inner", + right_suffix=suffix, + ) + .drop([key_token]), + ) + + return self._from_native_frame( + self._native_frame.join( + other._native_frame, + keys=left_on, + right_keys=right_on, + join_type=how_to_join_map[how], + right_suffix=suffix, + ), + ) + + def join_asof( + self, + other: Self, + *, + left_on: str | None = None, + right_on: str | None = None, + on: str | None = None, + by_left: str | list[str] | None = None, + by_right: str | list[str] | None = None, + by: str | list[str] | None = None, + strategy: Literal["backward", "forward", "nearest"] = "backward", + ) -> Self: + msg = "join_asof is not yet supported on PyArrow tables" + raise NotImplementedError(msg) + + def drop(self: Self, columns: list[str], strict: bool) -> Self: # noqa: FBT001 + to_drop = parse_columns_to_drop( + compliant_frame=self, columns=columns, strict=strict + ) + return self._from_native_frame(self._native_frame.drop(to_drop)) + + def drop_nulls(self: Self, subset: str | list[str] | None) -> Self: + if subset is None: + return self._from_native_frame(self._native_frame.drop_null()) + subset = [subset] if isinstance(subset, str) else subset + plx = self.__narwhals_namespace__() + return self.filter(~plx.any_horizontal(plx.col(*subset).is_null())) + + def sort( + self, + by: str | Iterable[str], + *more_by: str, + descending: bool | Sequence[bool] = False, + ) -> Self: + flat_keys = flatten([*flatten([by]), *more_by]) + df = self._native_frame + + if isinstance(descending, bool): + order = "descending" if descending else "ascending" + sorting = [(key, order) for key in flat_keys] + else: + sorting = [ + (key, "descending" if is_descending else "ascending") + for key, is_descending in zip(flat_keys, descending) + ] + return self._from_native_frame(df.sort_by(sorting=sorting)) + + def to_pandas(self) -> Any: + return self._native_frame.to_pandas() + + def to_numpy(self) -> Any: + import numpy as np # ignore-banned-import + + return np.column_stack([col.to_numpy() for col in self._native_frame.columns]) + + def to_dict(self, *, as_series: bool) -> Any: + df = self._native_frame + + names_and_values = zip(df.column_names, df.columns) + if as_series: + from narwhals._arrow.series import ArrowSeries + + return { + name: ArrowSeries(col, name=name, backend_version=self._backend_version) + for name, col in names_and_values + } + else: + return {name: col.to_pylist() for name, col in names_and_values} + + def with_row_index(self, name: str) -> Self: + import pyarrow as pa # ignore-banned-import() + + df = self._native_frame + + row_indices = pa.array(range(df.num_rows)) + return self._from_native_frame(df.append_column(name, row_indices)) + + def filter( + self, + *predicates: IntoArrowExpr, + ) -> Self: + if ( + len(predicates) == 1 + and isinstance(predicates[0], list) + and all(isinstance(x, bool) for x in predicates[0]) + ): + mask = predicates[0] + else: + plx = self.__narwhals_namespace__() + expr = plx.all_horizontal(*predicates) + # Safety: all_horizontal's expression only returns a single column. + mask = expr._call(self)[0]._native_series + return self._from_native_frame(self._native_frame.filter(mask)) + + def null_count(self) -> Self: + import pyarrow as pa # ignore-banned-import() + + df = self._native_frame + names_and_values = zip(df.column_names, df.columns) + + return self._from_native_frame( + pa.table({name: [col.null_count] for name, col in names_and_values}) + ) + + def head(self, n: int) -> Self: + df = self._native_frame + if n >= 0: + return self._from_native_frame(df.slice(0, n)) + else: + num_rows = df.num_rows + return self._from_native_frame(df.slice(0, max(0, num_rows + n))) + + def tail(self, n: int) -> Self: + df = self._native_frame + if n >= 0: + num_rows = df.num_rows + return self._from_native_frame(df.slice(max(0, num_rows - n))) + else: + return self._from_native_frame(df.slice(abs(n))) + + def lazy(self) -> Self: + return self + + def collect(self) -> ArrowDataFrame: + return ArrowDataFrame(self._native_frame, backend_version=self._backend_version) + + def clone(self) -> Self: + msg = "clone is not yet supported on PyArrow tables" + raise NotImplementedError(msg) + + def is_empty(self: Self) -> bool: + return self.shape[0] == 0 + + def item(self: Self, row: int | None = None, column: int | str | None = None) -> Any: + if row is None and column is None: + if self.shape != (1, 1): + msg = ( + "can only call `.item()` if the dataframe is of shape (1, 1)," + " or if explicit row/col values are provided;" + f" frame has shape {self.shape!r}" + ) + raise ValueError(msg) + return self._native_frame[0][0] + + elif row is None or column is None: + msg = "cannot call `.item()` with only one of `row` or `column`" + raise ValueError(msg) + + _col = self.columns.index(column) if isinstance(column, str) else column + return self._native_frame[_col][row] + + def rename(self, mapping: dict[str, str]) -> Self: + df = self._native_frame + new_cols = [mapping.get(c, c) for c in df.column_names] + return self._from_native_frame(df.rename_columns(new_cols)) + + def write_parquet(self, file: Any) -> Any: + import pyarrow.parquet as pp # ignore-banned-import + + pp.write_table(self._native_frame, file) + + def write_csv(self, file: Any) -> Any: + import pyarrow as pa # ignore-banned-import + import pyarrow.csv as pa_csv # ignore-banned-import + + pa_table = self._native_frame + if file is None: + csv_buffer = pa.BufferOutputStream() + pa_csv.write_csv(pa_table, csv_buffer) + return csv_buffer.getvalue().to_pybytes().decode() + return pa_csv.write_csv(pa_table, file) + + def is_duplicated(self: Self) -> ArrowSeries: + import numpy as np # ignore-banned-import + import pyarrow as pa # ignore-banned-import() + import pyarrow.compute as pc # ignore-banned-import() + + from narwhals._arrow.series import ArrowSeries + + df = self._native_frame + + columns = self.columns + col_token = generate_unique_token(n_bytes=8, columns=columns) + row_count = ( + df.append_column(col_token, pa.array(np.arange(len(self)))) + .group_by(columns) + .aggregate([(col_token, "count")]) + ) + is_duplicated = pc.greater( + df.join( + row_count, keys=columns, right_keys=columns, join_type="inner" + ).column(f"{col_token}_count"), + 1, + ) + return ArrowSeries(is_duplicated, name="", backend_version=self._backend_version) + + def is_unique(self: Self) -> ArrowSeries: + import pyarrow.compute as pc # ignore-banned-import() + + from narwhals._arrow.series import ArrowSeries + + is_duplicated = self.is_duplicated()._native_series + + return ArrowSeries( + pc.invert(is_duplicated), name="", backend_version=self._backend_version + ) + + def unique( + self: Self, + subset: str | list[str] | None, + *, + keep: Literal["any", "first", "last", "none"] = "any", + maintain_order: bool = False, + ) -> Self: + """ + NOTE: + The param `maintain_order` is only here for compatibility with the polars API + and has no effect on the output. + """ + import numpy as np # ignore-banned-import + import pyarrow as pa # ignore-banned-import() + import pyarrow.compute as pc # ignore-banned-import() + + df = self._native_frame + + if isinstance(subset, str): + subset = [subset] + subset = subset or self.columns + + if keep in {"any", "first", "last"}: + agg_func_map = {"any": "min", "first": "min", "last": "max"} + + agg_func = agg_func_map[keep] + col_token = generate_unique_token(n_bytes=8, columns=self.columns) + keep_idx = ( + df.append_column(col_token, pa.array(np.arange(len(self)))) + .group_by(subset) + .aggregate([(col_token, agg_func)]) + .column(f"{col_token}_{agg_func}") + ) + + return self._from_native_frame(pc.take(df, keep_idx)) + + keep_idx = self.select(*subset).is_unique() + return self.filter(keep_idx) + + def gather_every(self: Self, n: int, offset: int = 0) -> Self: + return self._from_native_frame(self._native_frame[offset::n]) + + def to_arrow(self: Self) -> Any: + return self._native_frame + + def sample( + self: Self, + n: int | None = None, + *, + fraction: float | None = None, + with_replacement: bool = False, + seed: int | None = None, + ) -> Self: + import numpy as np # ignore-banned-import + import pyarrow.compute as pc # ignore-banned-import() + + frame = self._native_frame + num_rows = len(self) + if n is None and fraction is not None: + n = int(num_rows * fraction) + + rng = np.random.default_rng(seed=seed) + idx = np.arange(0, num_rows) + mask = rng.choice(idx, size=n, replace=with_replacement) + + return self._from_native_frame(pc.take(frame, mask)) diff --git a/parrot/lib/python3.10/site-packages/narwhals/_arrow/expr.py b/parrot/lib/python3.10/site-packages/narwhals/_arrow/expr.py new file mode 100644 index 0000000000000000000000000000000000000000..1aceb576fff97d9fc80a86ed962583592a8d07e2 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/narwhals/_arrow/expr.py @@ -0,0 +1,671 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING +from typing import Any +from typing import Callable +from typing import Literal + +from narwhals._expression_parsing import reuse_series_implementation +from narwhals._expression_parsing import reuse_series_namespace_implementation +from narwhals.utils import Implementation + +if TYPE_CHECKING: + from typing_extensions import Self + + from narwhals._arrow.dataframe import ArrowDataFrame + from narwhals._arrow.namespace import ArrowNamespace + from narwhals._arrow.series import ArrowSeries + from narwhals._arrow.typing import IntoArrowExpr + from narwhals.dtypes import DType + + +class ArrowExpr: + def __init__( + self, + call: Callable[[ArrowDataFrame], list[ArrowSeries]], + *, + depth: int, + function_name: str, + root_names: list[str] | None, + output_names: list[str] | None, + backend_version: tuple[int, ...], + ) -> None: + self._call = call + self._depth = depth + self._function_name = function_name + self._root_names = root_names + self._depth = depth + self._output_names = output_names + self._implementation = Implementation.PYARROW + self._backend_version = backend_version + + def __repr__(self) -> str: # pragma: no cover + return ( + f"ArrowExpr(" + f"depth={self._depth}, " + f"function_name={self._function_name}, " + f"root_names={self._root_names}, " + f"output_names={self._output_names}" + ) + + @classmethod + def from_column_names( + cls: type[Self], *column_names: str, backend_version: tuple[int, ...] + ) -> Self: + from narwhals._arrow.series import ArrowSeries + + def func(df: ArrowDataFrame) -> list[ArrowSeries]: + return [ + ArrowSeries( + df._native_frame[column_name], + name=column_name, + backend_version=df._backend_version, + ) + for column_name in column_names + ] + + return cls( + func, + depth=0, + function_name="col", + root_names=list(column_names), + output_names=list(column_names), + backend_version=backend_version, + ) + + def __narwhals_namespace__(self) -> ArrowNamespace: + from narwhals._arrow.namespace import ArrowNamespace + + return ArrowNamespace(backend_version=self._backend_version) + + def __narwhals_expr__(self) -> None: ... + + def __eq__(self, other: ArrowExpr | Any) -> Self: # type: ignore[override] + return reuse_series_implementation(self, "__eq__", other=other) + + def __ne__(self, other: ArrowExpr | Any) -> Self: # type: ignore[override] + return reuse_series_implementation(self, "__ne__", other=other) + + def __ge__(self, other: ArrowExpr | Any) -> Self: + return reuse_series_implementation(self, "__ge__", other=other) + + def __gt__(self, other: ArrowExpr | Any) -> Self: + return reuse_series_implementation(self, "__gt__", other=other) + + def __le__(self, other: ArrowExpr | Any) -> Self: + return reuse_series_implementation(self, "__le__", other=other) + + def __lt__(self, other: ArrowExpr | Any) -> Self: + return reuse_series_implementation(self, "__lt__", other=other) + + def __and__(self, other: ArrowExpr | bool | Any) -> Self: + return reuse_series_implementation(self, "__and__", other=other) + + def __rand__(self, other: ArrowExpr | bool | Any) -> Self: + return reuse_series_implementation(self, "__rand__", other=other) + + def __or__(self, other: ArrowExpr | bool | Any) -> Self: + return reuse_series_implementation(self, "__or__", other=other) + + def __ror__(self, other: ArrowExpr | bool | Any) -> Self: + return reuse_series_implementation(self, "__ror__", other=other) + + def __add__(self, other: ArrowExpr | Any) -> Self: + return reuse_series_implementation(self, "__add__", other) + + def __radd__(self, other: ArrowExpr | Any) -> Self: + return reuse_series_implementation(self, "__radd__", other) + + def __sub__(self, other: ArrowExpr | Any) -> Self: + return reuse_series_implementation(self, "__sub__", other) + + def __rsub__(self, other: ArrowExpr | Any) -> Self: + return reuse_series_implementation(self, "__rsub__", other) + + def __mul__(self, other: ArrowExpr | Any) -> Self: + return reuse_series_implementation(self, "__mul__", other) + + def __rmul__(self, other: ArrowExpr | Any) -> Self: + return reuse_series_implementation(self, "__rmul__", other) + + def __pow__(self, other: ArrowExpr | Any) -> Self: + return reuse_series_implementation(self, "__pow__", other) + + def __rpow__(self, other: ArrowExpr | Any) -> Self: + return reuse_series_implementation(self, "__rpow__", other) + + def __floordiv__(self, other: ArrowExpr | Any) -> Self: + return reuse_series_implementation(self, "__floordiv__", other) + + def __rfloordiv__(self, other: ArrowExpr | Any) -> Self: + return reuse_series_implementation(self, "__rfloordiv__", other) + + def __truediv__(self, other: ArrowExpr | Any) -> Self: + return reuse_series_implementation(self, "__truediv__", other) + + def __rtruediv__(self, other: ArrowExpr | Any) -> Self: + return reuse_series_implementation(self, "__rtruediv__", other) + + def __mod__(self, other: ArrowExpr | Any) -> Self: + return reuse_series_implementation(self, "__mod__", other) + + def __rmod__(self, other: ArrowExpr | Any) -> Self: + return reuse_series_implementation(self, "__rmod__", other) + + def __invert__(self) -> Self: + return reuse_series_implementation(self, "__invert__") + + def len(self) -> Self: + return reuse_series_implementation(self, "len", returns_scalar=True) + + def filter(self, *predicates: IntoArrowExpr) -> Self: + plx = self.__narwhals_namespace__() + expr = plx.all_horizontal(*predicates) + return reuse_series_implementation(self, "filter", other=expr) + + def mean(self) -> Self: + return reuse_series_implementation(self, "mean", returns_scalar=True) + + def count(self) -> Self: + return reuse_series_implementation(self, "count", returns_scalar=True) + + def n_unique(self) -> Self: + return reuse_series_implementation(self, "n_unique", returns_scalar=True) + + def std(self, ddof: int = 1) -> Self: + return reuse_series_implementation(self, "std", ddof=ddof, returns_scalar=True) + + def cast(self, dtype: DType) -> Self: + return reuse_series_implementation(self, "cast", dtype) + + def abs(self) -> Self: + return reuse_series_implementation(self, "abs") + + def diff(self) -> Self: + return reuse_series_implementation(self, "diff") + + def cum_sum(self) -> Self: + return reuse_series_implementation(self, "cum_sum") + + def round(self, decimals: int) -> Self: + return reuse_series_implementation(self, "round", decimals) + + def any(self) -> Self: + return reuse_series_implementation(self, "any", returns_scalar=True) + + def min(self) -> Self: + return reuse_series_implementation(self, "min", returns_scalar=True) + + def max(self) -> Self: + return reuse_series_implementation(self, "max", returns_scalar=True) + + def all(self) -> Self: + return reuse_series_implementation(self, "all", returns_scalar=True) + + def sum(self) -> Self: + return reuse_series_implementation(self, "sum", returns_scalar=True) + + def drop_nulls(self) -> Self: + return reuse_series_implementation(self, "drop_nulls") + + def shift(self, n: int) -> Self: + return reuse_series_implementation(self, "shift", n) + + def alias(self, name: str) -> Self: + # Define this one manually, so that we can + # override `output_names` and not increase depth + return self.__class__( + lambda df: [series.alias(name) for series in self._call(df)], + depth=self._depth, + function_name=self._function_name, + root_names=self._root_names, + output_names=[name], + backend_version=self._backend_version, + ) + + def null_count(self) -> Self: + return reuse_series_implementation(self, "null_count", returns_scalar=True) + + def is_null(self) -> Self: + return reuse_series_implementation(self, "is_null") + + def is_between(self, lower_bound: Any, upper_bound: Any, closed: str) -> Self: + return reuse_series_implementation( + self, "is_between", lower_bound, upper_bound, closed + ) + + def head(self, n: int) -> Self: + return reuse_series_implementation(self, "head", n) + + def tail(self, n: int) -> Self: + return reuse_series_implementation(self, "tail", n) + + def is_in(self, other: ArrowExpr | Any) -> Self: + return reuse_series_implementation(self, "is_in", other) + + def arg_true(self) -> Self: + return reuse_series_implementation(self, "arg_true") + + def sample( + self: Self, + n: int | None = None, + *, + fraction: float | None = None, + with_replacement: bool = False, + seed: int | None = None, + ) -> Self: + return reuse_series_implementation( + self, + "sample", + n=n, + fraction=fraction, + with_replacement=with_replacement, + seed=seed, + ) + + def fill_null(self: Self, value: Any) -> Self: + return reuse_series_implementation(self, "fill_null", value=value) + + def is_duplicated(self: Self) -> Self: + return reuse_series_implementation(self, "is_duplicated") + + def is_unique(self: Self) -> Self: + return reuse_series_implementation(self, "is_unique") + + def is_first_distinct(self: Self) -> Self: + return reuse_series_implementation(self, "is_first_distinct") + + def is_last_distinct(self: Self) -> Self: + return reuse_series_implementation(self, "is_last_distinct") + + def unique(self: Self) -> Self: + return reuse_series_implementation(self, "unique") + + def sort(self: Self, *, descending: bool = False, nulls_last: bool = False) -> Self: + return reuse_series_implementation( + self, "sort", descending=descending, nulls_last=nulls_last + ) + + def quantile( + self, + quantile: float, + interpolation: Literal["nearest", "higher", "lower", "midpoint", "linear"], + ) -> Self: + return reuse_series_implementation( + self, "quantile", quantile, interpolation, returns_scalar=True + ) + + def gather_every(self: Self, n: int, offset: int = 0) -> Self: + return reuse_series_implementation(self, "gather_every", n=n, offset=offset) + + def clip( + self: Self, lower_bound: Any | None = None, upper_bound: Any | None = None + ) -> Self: + return reuse_series_implementation( + self, "clip", lower_bound=lower_bound, upper_bound=upper_bound + ) + + def over(self: Self, keys: list[str]) -> Self: + def func(df: ArrowDataFrame) -> list[ArrowSeries]: + if self._output_names is None: + msg = ( + "Anonymous expressions are not supported in over.\n" + "Instead of `nw.all()`, try using a named expression, such as " + "`nw.col('a', 'b')`\n" + ) + raise ValueError(msg) + tmp = df.group_by(*keys).agg(self) + tmp = df.select(*keys).join( + tmp, how="left", left_on=keys, right_on=keys, suffix="_right" + ) + return [tmp[name] for name in self._output_names] + + return self.__class__( + func, + depth=self._depth + 1, + function_name=self._function_name + "->over", + root_names=self._root_names, + output_names=self._output_names, + backend_version=self._backend_version, + ) + + def mode(self: Self) -> Self: + return reuse_series_implementation(self, "mode") + + @property + def dt(self: Self) -> ArrowExprDateTimeNamespace: + return ArrowExprDateTimeNamespace(self) + + @property + def str(self: Self) -> ArrowExprStringNamespace: + return ArrowExprStringNamespace(self) + + @property + def cat(self: Self) -> ArrowExprCatNamespace: + return ArrowExprCatNamespace(self) + + @property + def name(self: Self) -> ArrowExprNameNamespace: + return ArrowExprNameNamespace(self) + + +class ArrowExprCatNamespace: + def __init__(self, expr: ArrowExpr) -> None: + self._expr = expr + + def get_categories(self) -> ArrowExpr: + return reuse_series_namespace_implementation( + self._expr, + "cat", + "get_categories", + ) + + +class ArrowExprDateTimeNamespace: + def __init__(self: Self, expr: ArrowExpr) -> None: + self._expr = expr + + def to_string(self: Self, format: str) -> ArrowExpr: # noqa: A002 + return reuse_series_namespace_implementation( + self._expr, "dt", "to_string", format + ) + + def date(self: Self) -> ArrowExpr: + return reuse_series_namespace_implementation(self._expr, "dt", "date") + + def year(self: Self) -> ArrowExpr: + return reuse_series_namespace_implementation(self._expr, "dt", "year") + + def month(self: Self) -> ArrowExpr: + return reuse_series_namespace_implementation(self._expr, "dt", "month") + + def day(self: Self) -> ArrowExpr: + return reuse_series_namespace_implementation(self._expr, "dt", "day") + + def hour(self: Self) -> ArrowExpr: + return reuse_series_namespace_implementation(self._expr, "dt", "hour") + + def minute(self: Self) -> ArrowExpr: + return reuse_series_namespace_implementation(self._expr, "dt", "minute") + + def second(self: Self) -> ArrowExpr: + return reuse_series_namespace_implementation(self._expr, "dt", "second") + + def millisecond(self: Self) -> ArrowExpr: + return reuse_series_namespace_implementation(self._expr, "dt", "millisecond") + + def microsecond(self: Self) -> ArrowExpr: + return reuse_series_namespace_implementation(self._expr, "dt", "microsecond") + + def nanosecond(self: Self) -> ArrowExpr: + return reuse_series_namespace_implementation(self._expr, "dt", "nanosecond") + + def ordinal_day(self: Self) -> ArrowExpr: + return reuse_series_namespace_implementation(self._expr, "dt", "ordinal_day") + + def total_minutes(self: Self) -> ArrowExpr: + return reuse_series_namespace_implementation(self._expr, "dt", "total_minutes") + + def total_seconds(self: Self) -> ArrowExpr: + return reuse_series_namespace_implementation(self._expr, "dt", "total_seconds") + + def total_milliseconds(self: Self) -> ArrowExpr: + return reuse_series_namespace_implementation( + self._expr, "dt", "total_milliseconds" + ) + + def total_microseconds(self: Self) -> ArrowExpr: + return reuse_series_namespace_implementation( + self._expr, "dt", "total_microseconds" + ) + + def total_nanoseconds(self: Self) -> ArrowExpr: + return reuse_series_namespace_implementation( + self._expr, "dt", "total_nanoseconds" + ) + + +class ArrowExprStringNamespace: + def __init__(self, expr: ArrowExpr) -> None: + self._expr = expr + + def len_chars(self) -> ArrowExpr: + return reuse_series_namespace_implementation(self._expr, "str", "len_chars") + + def replace( + self, + pattern: str, + value: str, + *, + literal: bool = False, + n: int = 1, + ) -> ArrowExpr: + return reuse_series_namespace_implementation( + self._expr, + "str", + "replace", + pattern, + value, + literal=literal, + n=n, + ) + + def replace_all( + self, + pattern: str, + value: str, + *, + literal: bool = False, + ) -> ArrowExpr: + return reuse_series_namespace_implementation( + self._expr, + "str", + "replace_all", + pattern, + value, + literal=literal, + ) + + def strip_chars(self, characters: str | None = None) -> ArrowExpr: + return reuse_series_namespace_implementation( + self._expr, + "str", + "strip_chars", + characters, + ) + + def starts_with(self, prefix: str) -> ArrowExpr: + return reuse_series_namespace_implementation( + self._expr, + "str", + "starts_with", + prefix, + ) + + def ends_with(self, suffix: str) -> ArrowExpr: + return reuse_series_namespace_implementation( + self._expr, + "str", + "ends_with", + suffix, + ) + + def contains(self, pattern: str, *, literal: bool) -> ArrowExpr: + return reuse_series_namespace_implementation( + self._expr, "str", "contains", pattern, literal=literal + ) + + def slice(self, offset: int, length: int | None = None) -> ArrowExpr: + return reuse_series_namespace_implementation( + self._expr, "str", "slice", offset, length + ) + + def to_datetime(self, format: str | None = None) -> ArrowExpr: # noqa: A002 + return reuse_series_namespace_implementation( + self._expr, + "str", + "to_datetime", + format, + ) + + def to_uppercase(self) -> ArrowExpr: + return reuse_series_namespace_implementation( + self._expr, + "str", + "to_uppercase", + ) + + def to_lowercase(self) -> ArrowExpr: + return reuse_series_namespace_implementation( + self._expr, + "str", + "to_lowercase", + ) + + +class ArrowExprNameNamespace: + def __init__(self: Self, expr: ArrowExpr) -> None: + self._expr = expr + + def keep(self: Self) -> ArrowExpr: + root_names = self._expr._root_names + + if root_names is None: + msg = ( + "Anonymous expressions are not supported in `.name.keep`.\n" + "Instead of `nw.all()`, try using a named expression, such as " + "`nw.col('a', 'b')`\n" + ) + raise ValueError(msg) + + return self._expr.__class__( + lambda df: [ + series.alias(name) + for series, name in zip(self._expr._call(df), root_names) + ], + depth=self._expr._depth, + function_name=self._expr._function_name, + root_names=root_names, + output_names=root_names, + backend_version=self._expr._backend_version, + ) + + def map(self: Self, function: Callable[[str], str]) -> ArrowExpr: + root_names = self._expr._root_names + + if root_names is None: + msg = ( + "Anonymous expressions are not supported in `.name.map`.\n" + "Instead of `nw.all()`, try using a named expression, such as " + "`nw.col('a', 'b')`\n" + ) + raise ValueError(msg) + + output_names = [function(str(name)) for name in root_names] + + return self._expr.__class__( + lambda df: [ + series.alias(name) + for series, name in zip(self._expr._call(df), output_names) + ], + depth=self._expr._depth, + function_name=self._expr._function_name, + root_names=root_names, + output_names=output_names, + backend_version=self._expr._backend_version, + ) + + def prefix(self: Self, prefix: str) -> ArrowExpr: + root_names = self._expr._root_names + if root_names is None: + msg = ( + "Anonymous expressions are not supported in `.name.prefix`.\n" + "Instead of `nw.all()`, try using a named expression, such as " + "`nw.col('a', 'b')`\n" + ) + raise ValueError(msg) + + output_names = [prefix + str(name) for name in root_names] + return self._expr.__class__( + lambda df: [ + series.alias(name) + for series, name in zip(self._expr._call(df), output_names) + ], + depth=self._expr._depth, + function_name=self._expr._function_name, + root_names=root_names, + output_names=output_names, + backend_version=self._expr._backend_version, + ) + + def suffix(self: Self, suffix: str) -> ArrowExpr: + root_names = self._expr._root_names + if root_names is None: + msg = ( + "Anonymous expressions are not supported in `.name.suffix`.\n" + "Instead of `nw.all()`, try using a named expression, such as " + "`nw.col('a', 'b')`\n" + ) + raise ValueError(msg) + + output_names = [str(name) + suffix for name in root_names] + + return self._expr.__class__( + lambda df: [ + series.alias(name) + for series, name in zip(self._expr._call(df), output_names) + ], + depth=self._expr._depth, + function_name=self._expr._function_name, + root_names=root_names, + output_names=output_names, + backend_version=self._expr._backend_version, + ) + + def to_lowercase(self: Self) -> ArrowExpr: + root_names = self._expr._root_names + + if root_names is None: + msg = ( + "Anonymous expressions are not supported in `.name.to_lowercase`.\n" + "Instead of `nw.all()`, try using a named expression, such as " + "`nw.col('a', 'b')`\n" + ) + raise ValueError(msg) + output_names = [str(name).lower() for name in root_names] + + return self._expr.__class__( + lambda df: [ + series.alias(name) + for series, name in zip(self._expr._call(df), output_names) + ], + depth=self._expr._depth, + function_name=self._expr._function_name, + root_names=root_names, + output_names=output_names, + backend_version=self._expr._backend_version, + ) + + def to_uppercase(self: Self) -> ArrowExpr: + root_names = self._expr._root_names + + if root_names is None: + msg = ( + "Anonymous expressions are not supported in `.name.to_uppercase`.\n" + "Instead of `nw.all()`, try using a named expression, such as " + "`nw.col('a', 'b')`\n" + ) + raise ValueError(msg) + output_names = [str(name).upper() for name in root_names] + + return self._expr.__class__( + lambda df: [ + series.alias(name) + for series, name in zip(self._expr._call(df), output_names) + ], + depth=self._expr._depth, + function_name=self._expr._function_name, + root_names=root_names, + output_names=output_names, + backend_version=self._expr._backend_version, + ) diff --git a/parrot/lib/python3.10/site-packages/narwhals/_arrow/group_by.py b/parrot/lib/python3.10/site-packages/narwhals/_arrow/group_by.py new file mode 100644 index 0000000000000000000000000000000000000000..6c7b204853bcbde9da87161104316f7fb0f5a2db --- /dev/null +++ b/parrot/lib/python3.10/site-packages/narwhals/_arrow/group_by.py @@ -0,0 +1,168 @@ +from __future__ import annotations + +from copy import copy +from typing import TYPE_CHECKING +from typing import Any +from typing import Callable +from typing import Iterator + +from narwhals._expression_parsing import is_simple_aggregation +from narwhals._expression_parsing import parse_into_exprs +from narwhals.utils import remove_prefix + +if TYPE_CHECKING: + from narwhals._arrow.dataframe import ArrowDataFrame + from narwhals._arrow.expr import ArrowExpr + from narwhals._arrow.typing import IntoArrowExpr + +POLARS_TO_ARROW_AGGREGATIONS = { + "len": "count", + "n_unique": "count_distinct", + "std": "stddev", + "var": "variance", # currently unused, we don't have `var` yet +} + + +def get_function_name_option(function_name: str) -> Any | None: + """Map specific pyarrow compute function to respective option to match polars behaviour.""" + import pyarrow.compute as pc # ignore-banned-import + + function_name_to_options = { + "count": pc.CountOptions(mode="all"), + "count_distinct": pc.CountOptions(mode="all"), + "stddev": pc.VarianceOptions(ddof=1), + "variance": pc.VarianceOptions(ddof=1), + } + return function_name_to_options.get(function_name) + + +class ArrowGroupBy: + def __init__(self, df: ArrowDataFrame, keys: list[str]) -> None: + import pyarrow as pa # ignore-banned-import() + + self._df = df + self._keys = list(keys) + self._grouped = pa.TableGroupBy(self._df._native_frame, list(self._keys)) + + def agg( + self, + *aggs: IntoArrowExpr, + **named_aggs: IntoArrowExpr, + ) -> ArrowDataFrame: + exprs = parse_into_exprs( + *aggs, + namespace=self._df.__narwhals_namespace__(), + **named_aggs, + ) + output_names: list[str] = copy(self._keys) + for expr in exprs: + if expr._output_names is None: + msg = ( + "Anonymous expressions are not supported in group_by.agg.\n" + "Instead of `nw.all()`, try using a named expression, such as " + "`nw.col('a', 'b')`\n" + ) + raise ValueError(msg) + output_names.extend(expr._output_names) + + return agg_arrow( + self._grouped, + exprs, + self._keys, + output_names, + self._df._from_native_frame, + ) + + def __iter__(self) -> Iterator[tuple[Any, ArrowDataFrame]]: + key_values = ( + self._df.select(*self._keys) + .unique(subset=self._keys, keep="first") + .iter_rows() + ) + nw_namespace = self._df.__narwhals_namespace__() + yield from ( + ( + key_value, + self._df.filter( + *[nw_namespace.col(k) == v for k, v in zip(self._keys, key_value)] + ), + ) + for key_value in key_values + ) + + +def agg_arrow( + grouped: Any, + exprs: list[ArrowExpr], + keys: list[str], + output_names: list[str], + from_dataframe: Callable[[Any], ArrowDataFrame], +) -> ArrowDataFrame: + import pyarrow.compute as pc # ignore-banned-import() + + all_simple_aggs = True + for expr in exprs: + if not is_simple_aggregation(expr): + all_simple_aggs = False + break + + if all_simple_aggs: + # Mapping from output name to + # (aggregation_args, pyarrow_output_name) # noqa: ERA001 + simple_aggregations: dict[str, tuple[tuple[Any, ...], str]] = {} + for expr in exprs: + if expr._depth == 0: + # e.g. agg(nw.len()) # noqa: ERA001 + if ( + expr._output_names is None or expr._function_name != "len" + ): # pragma: no cover + msg = "Safety assertion failed, please report a bug to https://github.com/narwhals-dev/narwhals/issues" + raise AssertionError(msg) + simple_aggregations[expr._output_names[0]] = ( + (keys[0], "count", pc.CountOptions(mode="all")), + f"{keys[0]}_count", + ) + continue + + # e.g. agg(nw.mean('a')) # noqa: ERA001 + if ( + expr._depth != 1 or expr._root_names is None or expr._output_names is None + ): # pragma: no cover + msg = "Safety assertion failed, please report a bug to https://github.com/narwhals-dev/narwhals/issues" + raise AssertionError(msg) + + function_name = remove_prefix(expr._function_name, "col->") + function_name = POLARS_TO_ARROW_AGGREGATIONS.get(function_name, function_name) + + option = get_function_name_option(function_name) + for root_name, output_name in zip(expr._root_names, expr._output_names): + simple_aggregations[output_name] = ( + (root_name, function_name, option), + f"{root_name}_{function_name}", + ) + + aggs: list[Any] = [] + name_mapping = {} + for output_name, ( + aggregation_args, + pyarrow_output_name, + ) in simple_aggregations.items(): + aggs.append(aggregation_args) + name_mapping[pyarrow_output_name] = output_name + result_simple = grouped.aggregate(aggs) + result_simple = result_simple.rename_columns( + [name_mapping.get(col, col) for col in result_simple.column_names] + ).select(output_names) + return from_dataframe(result_simple) + + msg = ( + "Non-trivial complex found.\n\n" + "Hint: you were probably trying to apply a non-elementary aggregation with a " + "pyarrow table.\n" + "Please rewrite your query such that group-by aggregations " + "are elementary. For example, instead of:\n\n" + " df.group_by('a').agg(nw.col('b').round(2).mean())\n\n" + "use:\n\n" + " df.with_columns(nw.col('b').round(2)).group_by('a').agg(nw.col('b').mean())\n\n" + ) + raise ValueError(msg) diff --git a/parrot/lib/python3.10/site-packages/narwhals/_arrow/selectors.py b/parrot/lib/python3.10/site-packages/narwhals/_arrow/selectors.py new file mode 100644 index 0000000000000000000000000000000000000000..569724c45802915c353d8c72079e7d47c41c698d --- /dev/null +++ b/parrot/lib/python3.10/site-packages/narwhals/_arrow/selectors.py @@ -0,0 +1,163 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING +from typing import Any +from typing import NoReturn + +from narwhals import dtypes +from narwhals._arrow.expr import ArrowExpr +from narwhals.utils import Implementation + +if TYPE_CHECKING: + from typing_extensions import Self + + from narwhals._arrow.dataframe import ArrowDataFrame + from narwhals._arrow.series import ArrowSeries + from narwhals.dtypes import DType + + +class ArrowSelectorNamespace: + def __init__(self: Self, *, backend_version: tuple[int, ...]) -> None: + self._backend_version = backend_version + self._implementation = Implementation.PYARROW + + def by_dtype(self: Self, dtypes: list[DType | type[DType]]) -> ArrowSelector: + def func(df: ArrowDataFrame) -> list[ArrowSeries]: + return [df[col] for col in df.columns if df.schema[col] in dtypes] + + return ArrowSelector( + func, + depth=0, + function_name="type_selector", + root_names=None, + output_names=None, + backend_version=self._backend_version, + ) + + def numeric(self: Self) -> ArrowSelector: + return self.by_dtype( + [ + dtypes.Int64, + dtypes.Int32, + dtypes.Int16, + dtypes.Int8, + dtypes.UInt64, + dtypes.UInt32, + dtypes.UInt16, + dtypes.UInt8, + dtypes.Float64, + dtypes.Float32, + ], + ) + + def categorical(self: Self) -> ArrowSelector: + return self.by_dtype([dtypes.Categorical]) + + def string(self: Self) -> ArrowSelector: + return self.by_dtype([dtypes.String]) + + def boolean(self: Self) -> ArrowSelector: + return self.by_dtype([dtypes.Boolean]) + + def all(self: Self) -> ArrowSelector: + def func(df: ArrowDataFrame) -> list[ArrowSeries]: + return [df[col] for col in df.columns] + + return ArrowSelector( + func, + depth=0, + function_name="type_selector", + root_names=None, + output_names=None, + backend_version=self._backend_version, + ) + + +class ArrowSelector(ArrowExpr): + def __repr__(self: Self) -> str: # pragma: no cover + return ( + f"ArrowSelector(" + f"depth={self._depth}, " + f"function_name={self._function_name}, " + f"root_names={self._root_names}, " + f"output_names={self._output_names}" + ) + + def _to_expr(self: Self) -> ArrowExpr: + return ArrowExpr( + self._call, + depth=self._depth, + function_name=self._function_name, + root_names=self._root_names, + output_names=self._output_names, + backend_version=self._backend_version, + ) + + def __sub__(self: Self, other: Self | Any) -> ArrowSelector | Any: + if isinstance(other, ArrowSelector): + + def call(df: ArrowDataFrame) -> list[ArrowSeries]: + lhs = self._call(df) + rhs = other._call(df) + return [x for x in lhs if x.name not in {x.name for x in rhs}] + + return ArrowSelector( + call, + depth=0, + function_name="type_selector", + root_names=None, + output_names=None, + backend_version=self._backend_version, + ) + else: + return self._to_expr() - other + + def __or__(self: Self, other: Self | Any) -> ArrowSelector | Any: + if isinstance(other, ArrowSelector): + + def call(df: ArrowDataFrame) -> list[ArrowSeries]: + lhs = self._call(df) + rhs = other._call(df) + return [x for x in lhs if x.name not in {x.name for x in rhs}] + rhs + + return ArrowSelector( + call, + depth=0, + function_name="type_selector", + root_names=None, + output_names=None, + backend_version=self._backend_version, + ) + else: + return self._to_expr() | other + + def __and__(self: Self, other: Self | Any) -> ArrowSelector | Any: + if isinstance(other, ArrowSelector): + + def call(df: ArrowDataFrame) -> list[ArrowSeries]: + lhs = self._call(df) + rhs = other._call(df) + return [x for x in lhs if x.name in {x.name for x in rhs}] + + return ArrowSelector( + call, + depth=0, + function_name="type_selector", + root_names=None, + output_names=None, + backend_version=self._backend_version, + ) + else: + return self._to_expr() & other + + def __invert__(self: Self) -> ArrowSelector: + return ArrowSelectorNamespace(backend_version=self._backend_version).all() - self + + def __rsub__(self: Self, other: Any) -> NoReturn: + raise NotImplementedError + + def __rand__(self: Self, other: Any) -> NoReturn: + raise NotImplementedError + + def __ror__(self: Self, other: Any) -> NoReturn: + raise NotImplementedError diff --git a/parrot/lib/python3.10/site-packages/narwhals/_arrow/series.py b/parrot/lib/python3.10/site-packages/narwhals/_arrow/series.py new file mode 100644 index 0000000000000000000000000000000000000000..8c86437917386536b13830e794f24f9aaf72687d --- /dev/null +++ b/parrot/lib/python3.10/site-packages/narwhals/_arrow/series.py @@ -0,0 +1,1038 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING +from typing import Any +from typing import Iterable +from typing import Iterator +from typing import Literal +from typing import Sequence +from typing import overload + +from narwhals._arrow.utils import cast_for_truediv +from narwhals._arrow.utils import floordiv_compat +from narwhals._arrow.utils import narwhals_to_native_dtype +from narwhals._arrow.utils import translate_dtype +from narwhals._arrow.utils import validate_column_comparand +from narwhals.dependencies import get_pandas +from narwhals.dependencies import get_pyarrow +from narwhals.utils import Implementation +from narwhals.utils import generate_unique_token + +if TYPE_CHECKING: + import pyarrow as pa + from typing_extensions import Self + + from narwhals._arrow.dataframe import ArrowDataFrame + from narwhals._arrow.namespace import ArrowNamespace + from narwhals.dtypes import DType + + +class ArrowSeries: + def __init__( + self, + native_series: pa.ChunkedArray, + *, + name: str, + backend_version: tuple[int, ...], + ) -> None: + self._name = name + self._native_series = native_series + self._implementation = Implementation.PYARROW + self._backend_version = backend_version + + def _from_native_series(self, series: Any) -> Self: + import pyarrow as pa # ignore-banned-import() + + if isinstance(series, pa.Array): + series = pa.chunked_array([series]) + return self.__class__( + series, + name=self._name, + backend_version=self._backend_version, + ) + + @classmethod + def _from_iterable( + cls: type[Self], + data: Iterable[Any], + name: str, + *, + backend_version: tuple[int, ...], + ) -> Self: + import pyarrow as pa # ignore-banned-import() + + return cls( + pa.chunked_array([data]), + name=name, + backend_version=backend_version, + ) + + def __narwhals_namespace__(self) -> ArrowNamespace: + from narwhals._arrow.namespace import ArrowNamespace + + return ArrowNamespace(backend_version=self._backend_version) + + def __len__(self) -> int: + return len(self._native_series) + + def __eq__(self, other: object) -> Self: # type: ignore[override] + import pyarrow.compute as pc + + ser = self._native_series + other = validate_column_comparand(other) + return self._from_native_series(pc.equal(ser, other)) + + def __ne__(self, other: object) -> Self: # type: ignore[override] + import pyarrow.compute as pc # ignore-banned-import() + + ser = self._native_series + other = validate_column_comparand(other) + return self._from_native_series(pc.not_equal(ser, other)) + + def __ge__(self, other: Any) -> Self: + import pyarrow.compute as pc # ignore-banned-import() + + ser = self._native_series + other = validate_column_comparand(other) + return self._from_native_series(pc.greater_equal(ser, other)) + + def __gt__(self, other: Any) -> Self: + import pyarrow.compute as pc # ignore-banned-import() + + ser = self._native_series + other = validate_column_comparand(other) + return self._from_native_series(pc.greater(ser, other)) + + def __le__(self, other: Any) -> Self: + import pyarrow.compute as pc # ignore-banned-import() + + ser = self._native_series + other = validate_column_comparand(other) + return self._from_native_series(pc.less_equal(ser, other)) + + def __lt__(self, other: Any) -> Self: + import pyarrow.compute as pc # ignore-banned-import() + + ser = self._native_series + other = validate_column_comparand(other) + return self._from_native_series(pc.less(ser, other)) + + def __and__(self, other: Any) -> Self: + import pyarrow.compute as pc # ignore-banned-import() + + ser = self._native_series + other = validate_column_comparand(other) + return self._from_native_series(pc.and_kleene(ser, other)) + + def __rand__(self, other: Any) -> Self: + import pyarrow.compute as pc # ignore-banned-import() + + ser = self._native_series + other = validate_column_comparand(other) + return self._from_native_series(pc.and_kleene(other, ser)) + + def __or__(self, other: Any) -> Self: + import pyarrow.compute as pc # ignore-banned-import() + + ser = self._native_series + other = validate_column_comparand(other) + return self._from_native_series(pc.or_kleene(ser, other)) + + def __ror__(self, other: Any) -> Self: + import pyarrow.compute as pc # ignore-banned-import() + + ser = self._native_series + other = validate_column_comparand(other) + return self._from_native_series(pc.or_kleene(other, ser)) + + def __add__(self, other: Any) -> Self: + import pyarrow.compute as pc # ignore-banned-import() + + other = validate_column_comparand(other) + return self._from_native_series(pc.add(self._native_series, other)) + + def __radd__(self, other: Any) -> Self: + return self + other # type: ignore[no-any-return] + + def __sub__(self, other: Any) -> Self: + import pyarrow.compute as pc # ignore-banned-import() + + other = validate_column_comparand(other) + return self._from_native_series(pc.subtract(self._native_series, other)) + + def __rsub__(self, other: Any) -> Self: + return (self - other) * (-1) # type: ignore[no-any-return] + + def __mul__(self, other: Any) -> Self: + import pyarrow.compute as pc # ignore-banned-import() + + other = validate_column_comparand(other) + return self._from_native_series(pc.multiply(self._native_series, other)) + + def __rmul__(self, other: Any) -> Self: + return self * other # type: ignore[no-any-return] + + def __pow__(self, other: Any) -> Self: + import pyarrow.compute as pc # ignore-banned-import() + + ser = self._native_series + other = validate_column_comparand(other) + return self._from_native_series(pc.power(ser, other)) + + def __rpow__(self, other: Any) -> Self: + import pyarrow.compute as pc # ignore-banned-import() + + ser = self._native_series + other = validate_column_comparand(other) + return self._from_native_series(pc.power(other, ser)) + + def __floordiv__(self, other: Any) -> Self: + ser = self._native_series + other = validate_column_comparand(other) + return self._from_native_series(floordiv_compat(ser, other)) + + def __rfloordiv__(self, other: Any) -> Self: + ser = self._native_series + other = validate_column_comparand(other) + return self._from_native_series(floordiv_compat(other, ser)) + + def __truediv__(self, other: Any) -> Self: + import pyarrow as pa # ignore-banned-import() + import pyarrow.compute as pc # ignore-banned-import() + + ser = self._native_series + other = validate_column_comparand(other) + if not isinstance(other, (pa.Array, pa.ChunkedArray)): + # scalar + other = pa.scalar(other) + return self._from_native_series(pc.divide(*cast_for_truediv(ser, other))) + + def __rtruediv__(self, other: Any) -> Self: + import pyarrow as pa # ignore-banned-import() + import pyarrow.compute as pc # ignore-banned-import() + + ser = self._native_series + other = validate_column_comparand(other) + if not isinstance(other, (pa.Array, pa.ChunkedArray)): + # scalar + other = pa.scalar(other) + return self._from_native_series(pc.divide(*cast_for_truediv(other, ser))) + + def __mod__(self, other: Any) -> Self: + import pyarrow.compute as pc # ignore-banned-import() + + ser = self._native_series + other = validate_column_comparand(other) + floor_div = (self // other)._native_series + res = pc.subtract(ser, pc.multiply(floor_div, other)) + return self._from_native_series(res) + + def __rmod__(self, other: Any) -> Self: + import pyarrow.compute as pc # ignore-banned-import() + + ser = self._native_series + other = validate_column_comparand(other) + floor_div = (other // self)._native_series + res = pc.subtract(other, pc.multiply(floor_div, ser)) + return self._from_native_series(res) + + def __invert__(self) -> Self: + import pyarrow.compute as pc # ignore-banned-import() + + return self._from_native_series(pc.invert(self._native_series)) + + def len(self) -> int: + return len(self._native_series) + + def filter(self, other: Any) -> Self: + if not (isinstance(other, list) and all(isinstance(x, bool) for x in other)): + other = validate_column_comparand(other) + return self._from_native_series(self._native_series.filter(other)) + + def mean(self) -> int: + import pyarrow.compute as pc # ignore-banned-import() + + return pc.mean(self._native_series) # type: ignore[no-any-return] + + def min(self) -> int: + import pyarrow.compute as pc # ignore-banned-import() + + return pc.min(self._native_series) # type: ignore[no-any-return] + + def max(self) -> int: + import pyarrow.compute as pc # ignore-banned-import() + + return pc.max(self._native_series) # type: ignore[no-any-return] + + def sum(self) -> int: + import pyarrow.compute as pc # ignore-banned-import() + + return pc.sum(self._native_series) # type: ignore[no-any-return] + + def drop_nulls(self) -> ArrowSeries: + import pyarrow.compute as pc # ignore-banned-import() + + return self._from_native_series(pc.drop_null(self._native_series)) + + def shift(self, n: int) -> Self: + import pyarrow as pa # ignore-banned-import() + + ca = self._native_series + + if n > 0: + result = pa.concat_arrays([pa.nulls(n, ca.type), *ca[:-n].chunks]) + elif n < 0: + result = pa.concat_arrays([*ca[-n:].chunks, pa.nulls(-n, ca.type)]) + else: + result = ca + return self._from_native_series(result) + + def std(self, ddof: int = 1) -> int: + import pyarrow.compute as pc # ignore-banned-import() + + return pc.stddev(self._native_series, ddof=ddof) # type: ignore[no-any-return] + + def count(self) -> int: + import pyarrow.compute as pc # ignore-banned-import() + + return pc.count(self._native_series) # type: ignore[no-any-return] + + def n_unique(self) -> int: + import pyarrow.compute as pc # ignore-banned-import() + + unique_values = pc.unique(self._native_series) + return pc.count(unique_values, mode="all") # type: ignore[no-any-return] + + def __native_namespace__(self) -> Any: # pragma: no cover + return get_pyarrow() + + @property + def name(self) -> str: + return self._name + + def __narwhals_series__(self) -> Self: + return self + + @overload + def __getitem__(self, idx: int) -> Any: ... + + @overload + def __getitem__(self, idx: slice | Sequence[int]) -> Self: ... + + def __getitem__(self, idx: int | slice | Sequence[int]) -> Any | Self: + if isinstance(idx, int): + return self._native_series[idx] + if isinstance(idx, Sequence): + return self._from_native_series(self._native_series.take(idx)) + return self._from_native_series(self._native_series[idx]) + + def scatter(self, indices: int | Sequence[int], values: Any) -> Self: + import numpy as np # ignore-banned-import + import pyarrow as pa # ignore-banned-import + import pyarrow.compute as pc # ignore-banned-import + + ca = self._native_series + mask = np.zeros(len(ca), dtype=bool) + mask[indices] = True + if isinstance(values, self.__class__): + values = validate_column_comparand(values) + if isinstance(values, pa.ChunkedArray): + values = values.combine_chunks() + if not isinstance(values, pa.Array): + values = pa.array(values) + result = pc.replace_with_mask(ca, mask, values.take(indices)) + return self._from_native_series(result) + + def to_list(self) -> Any: + return self._native_series.to_pylist() + + def __array__(self, dtype: Any = None, copy: bool | None = None) -> Any: + return self._native_series.__array__(dtype=dtype, copy=copy) + + def to_numpy(self) -> Any: + return self._native_series.to_numpy() + + def alias(self, name: str) -> Self: + return self.__class__( + self._native_series, + name=name, + backend_version=self._backend_version, + ) + + @property + def dtype(self: Self) -> DType: + return translate_dtype(self._native_series.type) + + def abs(self) -> Self: + import pyarrow.compute as pc # ignore-banned-import() + + return self._from_native_series(pc.abs(self._native_series)) + + def cum_sum(self) -> Self: + import pyarrow.compute as pc # ignore-banned-import() + + return self._from_native_series(pc.cumulative_sum(self._native_series)) + + def round(self, decimals: int) -> Self: + import pyarrow.compute as pc # ignore-banned-import() + + return self._from_native_series( + pc.round(self._native_series, decimals, round_mode="half_towards_infinity") + ) + + def diff(self) -> Self: + import pyarrow.compute as pc # ignore-banned-import() + + return self._from_native_series( + pc.pairwise_diff(self._native_series.combine_chunks()) + ) + + def any(self) -> bool: + import pyarrow.compute as pc # ignore-banned-import() + + return pc.any(self._native_series) # type: ignore[no-any-return] + + def all(self) -> bool: + import pyarrow.compute as pc # ignore-banned-import() + + return pc.all(self._native_series) # type: ignore[no-any-return] + + def is_between( + self, lower_bound: Any, upper_bound: Any, closed: str = "both" + ) -> Self: + import pyarrow.compute as pc # ignore-banned-import() + + ser = self._native_series + if closed == "left": + ge = pc.greater_equal(ser, lower_bound) + lt = pc.less(ser, upper_bound) + res = pc.and_kleene(ge, lt) + elif closed == "right": + gt = pc.greater(ser, lower_bound) + le = pc.less_equal(ser, upper_bound) + res = pc.and_kleene(gt, le) + elif closed == "none": + gt = pc.greater(ser, lower_bound) + lt = pc.less(ser, upper_bound) + res = pc.and_kleene(gt, lt) + elif closed == "both": + ge = pc.greater_equal(ser, lower_bound) + le = pc.less_equal(ser, upper_bound) + res = pc.and_kleene(ge, le) + else: # pragma: no cover + raise AssertionError + return self._from_native_series(res) + + def is_empty(self) -> bool: + return len(self) == 0 + + def is_null(self) -> Self: + ser = self._native_series + return self._from_native_series(ser.is_null()) + + def cast(self, dtype: DType) -> Self: + import pyarrow.compute as pc # ignore-banned-import() + + ser = self._native_series + dtype = narwhals_to_native_dtype(dtype) + return self._from_native_series(pc.cast(ser, dtype)) + + def null_count(self: Self) -> int: + return self._native_series.null_count # type: ignore[no-any-return] + + def head(self, n: int) -> Self: + ser = self._native_series + if n >= 0: + return self._from_native_series(ser.slice(0, n)) + else: + num_rows = len(ser) + return self._from_native_series(ser.slice(0, max(0, num_rows + n))) + + def tail(self, n: int) -> Self: + ser = self._native_series + if n >= 0: + num_rows = len(ser) + return self._from_native_series(ser.slice(max(0, num_rows - n))) + else: + return self._from_native_series(ser.slice(abs(n))) + + def is_in(self, other: Any) -> Self: + import pyarrow as pa # ignore-banned-import() + import pyarrow.compute as pc # ignore-banned-import() + + value_set = pa.array(other) + ser = self._native_series + return self._from_native_series(pc.is_in(ser, value_set=value_set)) + + def arg_true(self) -> Self: + import numpy as np # ignore-banned-import + + ser = self._native_series + res = np.flatnonzero(ser) + return self._from_iterable( + res, name=self.name, backend_version=self._backend_version + ) + + def item(self: Self, index: int | None = None) -> Any: + if index is None: + if len(self) != 1: + msg = ( + "can only call '.item()' if the Series is of length 1," + f" or an explicit index is provided (Series is of length {len(self)})" + ) + raise ValueError(msg) + return self._native_series[0] + return self._native_series[index] + + def value_counts( + self: Self, + *, + sort: bool = False, + parallel: bool = False, + name: str | None = None, + normalize: bool = False, + ) -> ArrowDataFrame: + """Parallel is unused, exists for compatibility""" + import pyarrow as pa # ignore-banned-import() + import pyarrow.compute as pc # ignore-banned-import() + + from narwhals._arrow.dataframe import ArrowDataFrame + + index_name_ = "index" if self._name is None else self._name + value_name_ = name or ("proportion" if normalize else "count") + + val_count = pc.value_counts(self._native_series) + values = val_count.field("values") + counts = val_count.field("counts") + + if normalize: + counts = pc.divide(*cast_for_truediv(counts, pc.sum(counts))) + + val_count = pa.Table.from_arrays( + [values, counts], names=[index_name_, value_name_] + ) + + if sort: + val_count = val_count.sort_by([(value_name_, "descending")]) + + return ArrowDataFrame( + val_count, + backend_version=self._backend_version, + ) + + def zip_with(self: Self, mask: Self, other: Self) -> Self: + import pyarrow.compute as pc # ignore-banned-import() + + mask = mask._native_series.combine_chunks() + return self._from_native_series( + pc.if_else( + mask, + self._native_series, + other._native_series, + ) + ) + + def sample( + self: Self, + n: int | None = None, + *, + fraction: float | None = None, + with_replacement: bool = False, + seed: int | None = None, + ) -> Self: + import numpy as np # ignore-banned-import + import pyarrow.compute as pc # ignore-banned-import() + + ser = self._native_series + num_rows = len(self) + + if n is None and fraction is not None: + n = int(num_rows * fraction) + + rng = np.random.default_rng(seed=seed) + idx = np.arange(0, num_rows) + mask = rng.choice(idx, size=n, replace=with_replacement) + + return self._from_native_series(pc.take(ser, mask)) + + def fill_null(self: Self, value: Any) -> Self: + import pyarrow as pa # ignore-banned-import() + import pyarrow.compute as pc # ignore-banned-import() + + ser = self._native_series + dtype = ser.type + + return self._from_native_series(pc.fill_null(ser, pa.scalar(value, dtype))) + + def to_frame(self: Self) -> ArrowDataFrame: + import pyarrow as pa # ignore-banned-import() + + from narwhals._arrow.dataframe import ArrowDataFrame + + df = pa.Table.from_arrays([self._native_series], names=[self.name]) + return ArrowDataFrame(df, backend_version=self._backend_version) + + def to_pandas(self: Self) -> Any: + pd = get_pandas() + return pd.Series(self._native_series, name=self.name) + + def is_duplicated(self: Self) -> ArrowSeries: + return self.to_frame().is_duplicated().alias(self.name) + + def is_unique(self: Self) -> ArrowSeries: + return self.to_frame().is_unique().alias(self.name) + + def is_first_distinct(self: Self) -> Self: + import numpy as np # ignore-banned-import + import pyarrow as pa # ignore-banned-import() + import pyarrow.compute as pc # ignore-banned-import() + + row_number = pa.array(np.arange(len(self))) + col_token = generate_unique_token(n_bytes=8, columns=[self.name]) + first_distinct_index = ( + pa.Table.from_arrays([self._native_series], names=[self.name]) + .append_column(col_token, row_number) + .group_by(self.name) + .aggregate([(col_token, "min")]) + .column(f"{col_token}_min") + ) + + return self._from_native_series(pc.is_in(row_number, first_distinct_index)) + + def is_last_distinct(self: Self) -> Self: + import numpy as np # ignore-banned-import + import pyarrow as pa # ignore-banned-import() + import pyarrow.compute as pc # ignore-banned-import() + + row_number = pa.array(np.arange(len(self))) + col_token = generate_unique_token(n_bytes=8, columns=[self.name]) + last_distinct_index = ( + pa.Table.from_arrays([self._native_series], names=[self.name]) + .append_column(col_token, row_number) + .group_by(self.name) + .aggregate([(col_token, "max")]) + .column(f"{col_token}_max") + ) + + return self._from_native_series(pc.is_in(row_number, last_distinct_index)) + + def is_sorted(self: Self, *, descending: bool = False) -> bool: + if not isinstance(descending, bool): + msg = f"argument 'descending' should be boolean, found {type(descending)}" + raise TypeError(msg) + import pyarrow.compute as pc # ignore-banned-import() + + ser = self._native_series + if descending: + return pc.all(pc.greater_equal(ser[:-1], ser[1:])) # type: ignore[no-any-return] + else: + return pc.all(pc.less_equal(ser[:-1], ser[1:])) # type: ignore[no-any-return] + + def unique(self: Self) -> ArrowSeries: + import pyarrow.compute as pc # ignore-banned-import() + + return self._from_native_series(pc.unique(self._native_series)) + + def sort( + self: Self, *, descending: bool = False, nulls_last: bool = False + ) -> ArrowSeries: + import pyarrow.compute as pc # ignore-banned-import() + + series = self._native_series + order = "descending" if descending else "ascending" + null_placement = "at_end" if nulls_last else "at_start" + sorted_indices = pc.array_sort_indices( + series, order=order, null_placement=null_placement + ) + + return self._from_native_series(pc.take(series, sorted_indices)) + + def to_dummies( + self: Self, *, separator: str = "_", drop_first: bool = False + ) -> ArrowDataFrame: + import numpy as np # ignore-banned-import + import pyarrow as pa # ignore-banned-import() + + from narwhals._arrow.dataframe import ArrowDataFrame + + series = self._native_series + da = series.dictionary_encode().combine_chunks() + + columns = np.zeros((len(da.dictionary), len(da)), np.uint8) + columns[da.indices, np.arange(len(da))] = 1 + names = [f"{self._name}{separator}{v}" for v in da.dictionary] + + return ArrowDataFrame( + pa.Table.from_arrays(columns, names=names), + backend_version=self._backend_version, + ).select(*sorted(names)[int(drop_first) :]) + + def quantile( + self: Self, + quantile: float, + interpolation: Literal["nearest", "higher", "lower", "midpoint", "linear"], + ) -> Any: + import pyarrow.compute as pc # ignore-banned-import() + + return pc.quantile(self._native_series, q=quantile, interpolation=interpolation)[ + 0 + ] + + def gather_every(self: Self, n: int, offset: int = 0) -> Self: + return self._from_native_series(self._native_series[offset::n]) + + def clip( + self: Self, lower_bound: Any | None = None, upper_bound: Any | None = None + ) -> Self: + import pyarrow as pa # ignore-banned-import() + import pyarrow.compute as pc # ignore-banned-import() + + arr = self._native_series + arr = pc.max_element_wise(arr, pa.scalar(lower_bound, type=arr.type)) + arr = pc.min_element_wise(arr, pa.scalar(upper_bound, type=arr.type)) + + return self._from_native_series(arr) + + def to_arrow(self: Self) -> pa.Array: + return self._native_series.combine_chunks() + + def mode(self: Self) -> ArrowSeries: + plx = self.__narwhals_namespace__() + col_token = generate_unique_token(n_bytes=8, columns=[self.name]) + return self.value_counts(name=col_token, normalize=False).filter( + plx.col(col_token) == plx.col(col_token).max() + )[self.name] + + def __iter__(self: Self) -> Iterator[Any]: + yield from self._native_series.__iter__() + + @property + def shape(self) -> tuple[int]: + return (len(self._native_series),) + + @property + def dt(self) -> ArrowSeriesDateTimeNamespace: + return ArrowSeriesDateTimeNamespace(self) + + @property + def cat(self) -> ArrowSeriesCatNamespace: + return ArrowSeriesCatNamespace(self) + + @property + def str(self) -> ArrowSeriesStringNamespace: + return ArrowSeriesStringNamespace(self) + + +class ArrowSeriesDateTimeNamespace: + def __init__(self: Self, series: ArrowSeries) -> None: + self._arrow_series = series + + def to_string(self: Self, format: str) -> ArrowSeries: # noqa: A002 + import pyarrow.compute as pc # ignore-banned-import() + + # PyArrow differs from other libraries in that %S also prints out + # the fractional part of the second...:'( + # https://arrow.apache.org/docs/python/generated/pyarrow.compute.strftime.html + format = format.replace("%S.%f", "%S").replace("%S%.f", "%S") + return self._arrow_series._from_native_series( + pc.strftime(self._arrow_series._native_series, format) + ) + + def date(self: Self) -> ArrowSeries: + import pyarrow as pa # ignore-banned-import() + + return self._arrow_series._from_native_series( + self._arrow_series._native_series.cast(pa.date64()) + ) + + def year(self: Self) -> ArrowSeries: + import pyarrow.compute as pc # ignore-banned-import() + + return self._arrow_series._from_native_series( + pc.year(self._arrow_series._native_series) + ) + + def month(self: Self) -> ArrowSeries: + import pyarrow.compute as pc # ignore-banned-import() + + return self._arrow_series._from_native_series( + pc.month(self._arrow_series._native_series) + ) + + def day(self: Self) -> ArrowSeries: + import pyarrow.compute as pc # ignore-banned-import() + + return self._arrow_series._from_native_series( + pc.day(self._arrow_series._native_series) + ) + + def hour(self: Self) -> ArrowSeries: + import pyarrow.compute as pc # ignore-banned-import() + + return self._arrow_series._from_native_series( + pc.hour(self._arrow_series._native_series) + ) + + def minute(self: Self) -> ArrowSeries: + import pyarrow.compute as pc # ignore-banned-import() + + return self._arrow_series._from_native_series( + pc.minute(self._arrow_series._native_series) + ) + + def second(self: Self) -> ArrowSeries: + import pyarrow.compute as pc # ignore-banned-import() + + return self._arrow_series._from_native_series( + pc.second(self._arrow_series._native_series) + ) + + def millisecond(self: Self) -> ArrowSeries: + import pyarrow.compute as pc # ignore-banned-import() + + return self._arrow_series._from_native_series( + pc.millisecond(self._arrow_series._native_series) + ) + + def microsecond(self: Self) -> ArrowSeries: + import pyarrow.compute as pc # ignore-banned-import() + + arr = self._arrow_series._native_series + result = pc.add(pc.multiply(pc.millisecond(arr), 1000), pc.microsecond(arr)) + + return self._arrow_series._from_native_series(result) + + def nanosecond(self: Self) -> ArrowSeries: + import pyarrow.compute as pc # ignore-banned-import() + + arr = self._arrow_series._native_series + result = pc.add( + pc.multiply(self.microsecond()._native_series, 1000), pc.nanosecond(arr) + ) + return self._arrow_series._from_native_series(result) + + def ordinal_day(self: Self) -> ArrowSeries: + import pyarrow.compute as pc # ignore-banned-import() + + return self._arrow_series._from_native_series( + pc.day_of_year(self._arrow_series._native_series) + ) + + def total_minutes(self: Self) -> ArrowSeries: + import pyarrow as pa # ignore-banned-import() + import pyarrow.compute as pc # ignore-banned-import() + + arr = self._arrow_series._native_series + unit = arr.type.unit + + unit_to_minutes_factor = { + "s": 60, # seconds + "ms": 60 * 1e3, # milli + "us": 60 * 1e6, # micro + "ns": 60 * 1e9, # nano + } + + factor = pa.scalar(unit_to_minutes_factor[unit], type=pa.int64()) + return self._arrow_series._from_native_series( + pc.cast(pc.divide(arr, factor), pa.int64()) + ) + + def total_seconds(self: Self) -> ArrowSeries: + import pyarrow as pa # ignore-banned-import() + import pyarrow.compute as pc # ignore-banned-import() + + arr = self._arrow_series._native_series + unit = arr.type.unit + + unit_to_seconds_factor = { + "s": 1, # seconds + "ms": 1e3, # milli + "us": 1e6, # micro + "ns": 1e9, # nano + } + factor = pa.scalar(unit_to_seconds_factor[unit], type=pa.int64()) + + return self._arrow_series._from_native_series( + pc.cast(pc.divide(arr, factor), pa.int64()) + ) + + def total_milliseconds(self: Self) -> ArrowSeries: + import pyarrow as pa # ignore-banned-import() + import pyarrow.compute as pc # ignore-banned-import() + + arr = self._arrow_series._native_series + unit = arr.type.unit + + unit_to_milli_factor = { + "s": 1e3, # seconds + "ms": 1, # milli + "us": 1e3, # micro + "ns": 1e6, # nano + } + + factor = pa.scalar(unit_to_milli_factor[unit], type=pa.int64()) + + if unit == "s": + return self._arrow_series._from_native_series( + pc.cast(pc.multiply(arr, factor), pa.int64()) + ) + + return self._arrow_series._from_native_series( + pc.cast(pc.divide(arr, factor), pa.int64()) + ) + + def total_microseconds(self: Self) -> ArrowSeries: + import pyarrow as pa # ignore-banned-import() + import pyarrow.compute as pc # ignore-banned-import() + + arr = self._arrow_series._native_series + unit = arr.type.unit + + unit_to_micro_factor = { + "s": 1e6, # seconds + "ms": 1e3, # milli + "us": 1, # micro + "ns": 1e3, # nano + } + + factor = pa.scalar(unit_to_micro_factor[unit], type=pa.int64()) + + if unit in {"s", "ms"}: + return self._arrow_series._from_native_series( + pc.cast(pc.multiply(arr, factor), pa.int64()) + ) + return self._arrow_series._from_native_series( + pc.cast(pc.divide(arr, factor), pa.int64()) + ) + + def total_nanoseconds(self: Self) -> ArrowSeries: + import pyarrow as pa # ignore-banned-import() + import pyarrow.compute as pc # ignore-banned-import() + + arr = self._arrow_series._native_series + unit = arr.type.unit + + unit_to_nano_factor = { + "s": 1e9, # seconds + "ms": 1e6, # milli + "us": 1e3, # micro + "ns": 1, # nano + } + + factor = pa.scalar(unit_to_nano_factor[unit], type=pa.int64()) + + return self._arrow_series._from_native_series( + pc.cast(pc.multiply(arr, factor), pa.int64()) + ) + + +class ArrowSeriesCatNamespace: + def __init__(self, series: ArrowSeries) -> None: + self._arrow_series = series + + def get_categories(self) -> ArrowSeries: + import pyarrow as pa # ignore-banned-import() + + ca = self._arrow_series._native_series + # TODO(Unassigned): this looks potentially expensive - is there no better way? + # https://github.com/narwhals-dev/narwhals/issues/464 + out = pa.chunked_array( + [pa.concat_arrays([x.dictionary for x in ca.chunks]).unique()] + ) + return self._arrow_series._from_native_series(out) + + +class ArrowSeriesStringNamespace: + def __init__(self: Self, series: ArrowSeries) -> None: + self._arrow_series = series + + def len_chars(self) -> ArrowSeries: + import pyarrow.compute as pc # ignore-banned-import() + + return self._arrow_series._from_native_series( + pc.utf8_length(self._arrow_series._native_series) + ) + + def replace( + self, pattern: str, value: str, *, literal: bool = False, n: int = 1 + ) -> ArrowSeries: + import pyarrow.compute as pc # ignore-banned-import() + + method = "replace_substring" if literal else "replace_substring_regex" + return self._arrow_series._from_native_series( + getattr(pc, method)( + self._arrow_series._native_series, + pattern=pattern, + replacement=value, + max_replacements=n, + ) + ) + + def replace_all( + self, pattern: str, value: str, *, literal: bool = False + ) -> ArrowSeries: + return self.replace(pattern, value, literal=literal, n=-1) + + def strip_chars(self: Self, characters: str | None = None) -> ArrowSeries: + import pyarrow.compute as pc # ignore-banned-import() + + whitespace = " \t\n\r\v\f" + return self._arrow_series._from_native_series( + pc.utf8_trim( + self._arrow_series._native_series, + characters or whitespace, + ) + ) + + def starts_with(self: Self, prefix: str) -> ArrowSeries: + import pyarrow.compute as pc # ignore-banned-import() + + return self._arrow_series._from_native_series( + pc.equal(self.slice(0, len(prefix))._native_series, prefix) + ) + + def ends_with(self: Self, suffix: str) -> ArrowSeries: + import pyarrow.compute as pc # ignore-banned-import() + + return self._arrow_series._from_native_series( + pc.equal(self.slice(-len(suffix))._native_series, suffix) + ) + + def contains(self: Self, pattern: str, *, literal: bool = False) -> ArrowSeries: + import pyarrow.compute as pc # ignore-banned-import() + + check_func = pc.match_substring if literal else pc.match_substring_regex + return self._arrow_series._from_native_series( + check_func(self._arrow_series._native_series, pattern) + ) + + def slice(self: Self, offset: int, length: int | None = None) -> ArrowSeries: + import pyarrow.compute as pc # ignore-banned-import() + + stop = offset + length if length else None + return self._arrow_series._from_native_series( + pc.utf8_slice_codeunits( + self._arrow_series._native_series, start=offset, stop=stop + ), + ) + + def to_datetime(self: Self, format: str | None = None) -> ArrowSeries: # noqa: A002 + import pyarrow.compute as pc # ignore-banned-import() + + return self._arrow_series._from_native_series( + pc.strptime(self._arrow_series._native_series, format=format, unit="us") + ) + + def to_uppercase(self: Self) -> ArrowSeries: + import pyarrow.compute as pc # ignore-banned-import() + + return self._arrow_series._from_native_series( + pc.utf8_upper(self._arrow_series._native_series), + ) + + def to_lowercase(self: Self) -> ArrowSeries: + import pyarrow.compute as pc # ignore-banned-import() + + return self._arrow_series._from_native_series( + pc.utf8_lower(self._arrow_series._native_series), + ) diff --git a/parrot/lib/python3.10/site-packages/narwhals/_arrow/typing.py b/parrot/lib/python3.10/site-packages/narwhals/_arrow/typing.py new file mode 100644 index 0000000000000000000000000000000000000000..ab68e044ee9771ed89b15d83a9b71c5b47e6b94d --- /dev/null +++ b/parrot/lib/python3.10/site-packages/narwhals/_arrow/typing.py @@ -0,0 +1,17 @@ +from __future__ import annotations # pragma: no cover + +from typing import TYPE_CHECKING # pragma: no cover +from typing import Union # pragma: no cover + +if TYPE_CHECKING: + import sys + + if sys.version_info >= (3, 10): + from typing import TypeAlias + else: + from typing_extensions import TypeAlias + + from narwhals._arrow.expr import ArrowExpr + from narwhals._arrow.series import ArrowSeries + + IntoArrowExpr: TypeAlias = Union[ArrowExpr, str, int, float, ArrowSeries] diff --git a/parrot/lib/python3.10/site-packages/narwhals/_dask/__init__.py b/parrot/lib/python3.10/site-packages/narwhals/_dask/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/parrot/lib/python3.10/site-packages/narwhals/_dask/__pycache__/__init__.cpython-310.pyc b/parrot/lib/python3.10/site-packages/narwhals/_dask/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..272b8701f77f67ee3eee254d84d468f04e03124b Binary files /dev/null and b/parrot/lib/python3.10/site-packages/narwhals/_dask/__pycache__/__init__.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/narwhals/_dask/__pycache__/dataframe.cpython-310.pyc b/parrot/lib/python3.10/site-packages/narwhals/_dask/__pycache__/dataframe.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5f159e4f8f84b9b00754816cae1f10fc05b8ae19 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/narwhals/_dask/__pycache__/dataframe.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/narwhals/_dask/__pycache__/expr.cpython-310.pyc b/parrot/lib/python3.10/site-packages/narwhals/_dask/__pycache__/expr.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f2152be54d246e907acb53abc41ee77bc72e23ae Binary files /dev/null and b/parrot/lib/python3.10/site-packages/narwhals/_dask/__pycache__/expr.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/narwhals/_dask/__pycache__/group_by.cpython-310.pyc b/parrot/lib/python3.10/site-packages/narwhals/_dask/__pycache__/group_by.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7e55ac782a9a30448ee37f27e660bd775037d23b Binary files /dev/null and b/parrot/lib/python3.10/site-packages/narwhals/_dask/__pycache__/group_by.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/narwhals/_dask/__pycache__/namespace.cpython-310.pyc b/parrot/lib/python3.10/site-packages/narwhals/_dask/__pycache__/namespace.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f94f860550a458c992fd297de8727c37451b6977 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/narwhals/_dask/__pycache__/namespace.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/narwhals/_dask/__pycache__/selectors.cpython-310.pyc b/parrot/lib/python3.10/site-packages/narwhals/_dask/__pycache__/selectors.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..43f1df71cdfc0fe143f516dbc3903909a9e79b22 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/narwhals/_dask/__pycache__/selectors.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/narwhals/_dask/__pycache__/typing.cpython-310.pyc b/parrot/lib/python3.10/site-packages/narwhals/_dask/__pycache__/typing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..958e7fba9984585ee7566c28a4fe8e4dc0c501a9 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/narwhals/_dask/__pycache__/typing.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/narwhals/_dask/__pycache__/utils.cpython-310.pyc b/parrot/lib/python3.10/site-packages/narwhals/_dask/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fac10b438f13059287a6ac168db47134cf94fb6b Binary files /dev/null and b/parrot/lib/python3.10/site-packages/narwhals/_dask/__pycache__/utils.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/narwhals/_dask/dataframe.py b/parrot/lib/python3.10/site-packages/narwhals/_dask/dataframe.py new file mode 100644 index 0000000000000000000000000000000000000000..ac10ac2b84d6e17f712cef6abaf7ba258efe8c26 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/narwhals/_dask/dataframe.py @@ -0,0 +1,357 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING +from typing import Any +from typing import Iterable +from typing import Literal +from typing import Sequence + +from narwhals._dask.utils import add_row_index +from narwhals._dask.utils import parse_exprs_and_named_exprs +from narwhals._pandas_like.utils import translate_dtype +from narwhals.dependencies import get_dask_dataframe +from narwhals.dependencies import get_pandas +from narwhals.utils import Implementation +from narwhals.utils import flatten +from narwhals.utils import generate_unique_token +from narwhals.utils import parse_columns_to_drop +from narwhals.utils import parse_version + +if TYPE_CHECKING: + import dask.dataframe as dd + from typing_extensions import Self + + from narwhals._dask.expr import DaskExpr + from narwhals._dask.group_by import DaskLazyGroupBy + from narwhals._dask.namespace import DaskNamespace + from narwhals._dask.typing import IntoDaskExpr + from narwhals.dtypes import DType + + +class DaskLazyFrame: + def __init__( + self, native_dataframe: dd.DataFrame, *, backend_version: tuple[int, ...] + ) -> None: + self._native_frame = native_dataframe + self._backend_version = backend_version + self._implementation = Implementation.DASK + + def __native_namespace__(self) -> Any: # pragma: no cover + return get_dask_dataframe() + + def __narwhals_namespace__(self) -> DaskNamespace: + from narwhals._dask.namespace import DaskNamespace + + return DaskNamespace(backend_version=self._backend_version) + + def __narwhals_lazyframe__(self) -> Self: + return self + + def _from_native_frame(self, df: Any) -> Self: + return self.__class__(df, backend_version=self._backend_version) + + def with_columns(self, *exprs: DaskExpr, **named_exprs: DaskExpr) -> Self: + df = self._native_frame + new_series = parse_exprs_and_named_exprs(self, *exprs, **named_exprs) + df = df.assign(**new_series) + return self._from_native_frame(df) + + def collect(self) -> Any: + from narwhals._pandas_like.dataframe import PandasLikeDataFrame + + result = self._native_frame.compute() + return PandasLikeDataFrame( + result, + implementation=Implementation.PANDAS, + backend_version=parse_version(get_pandas().__version__), + ) + + @property + def columns(self) -> list[str]: + return self._native_frame.columns.tolist() # type: ignore[no-any-return] + + def filter( + self, + *predicates: DaskExpr, + ) -> Self: + if ( + len(predicates) == 1 + and isinstance(predicates[0], list) + and all(isinstance(x, bool) for x in predicates[0]) + ): + msg = ( + "`LazyFrame.filter` is not supported for Dask backend with boolean masks." + ) + raise NotImplementedError(msg) + + from narwhals._dask.namespace import DaskNamespace + + plx = DaskNamespace(backend_version=self._backend_version) + expr = plx.all_horizontal(*predicates) + # Safety: all_horizontal's expression only returns a single column. + mask = expr._call(self)[0] + return self._from_native_frame(self._native_frame.loc[mask]) + + def lazy(self) -> Self: + return self + + def select( + self: Self, + *exprs: IntoDaskExpr, + **named_exprs: IntoDaskExpr, + ) -> Self: + import dask.dataframe as dd # ignore-banned-import + + if exprs and all(isinstance(x, str) for x in exprs) and not named_exprs: + # This is a simple slice => fastpath! + return self._from_native_frame(self._native_frame.loc[:, exprs]) + + new_series = parse_exprs_and_named_exprs(self, *exprs, **named_exprs) + + if not new_series: + # return empty dataframe, like Polars does + import pandas as pd # ignore-banned-import + + return self._from_native_frame( + dd.from_pandas(pd.DataFrame(), npartitions=self._native_frame.npartitions) + ) + + if all(getattr(expr, "_returns_scalar", False) for expr in exprs) and all( + getattr(val, "_returns_scalar", False) for val in named_exprs.values() + ): + df = dd.concat( + [val.to_series().rename(name) for name, val in new_series.items()], axis=1 + ) + return self._from_native_frame(df) + + df = self._native_frame.assign(**new_series).loc[:, list(new_series.keys())] + return self._from_native_frame(df) + + def drop_nulls(self: Self, subset: str | list[str] | None) -> Self: + if subset is None: + return self._from_native_frame(self._native_frame.dropna()) + subset = [subset] if isinstance(subset, str) else subset + plx = self.__narwhals_namespace__() + return self.filter(~plx.any_horizontal(plx.col(*subset).is_null())) + + @property + def schema(self) -> dict[str, DType]: + return { + col: translate_dtype(self._native_frame.loc[:, col]) + for col in self._native_frame.columns + } + + def collect_schema(self) -> dict[str, DType]: + return self.schema + + def drop(self: Self, columns: list[str], strict: bool) -> Self: # noqa: FBT001 + to_drop = parse_columns_to_drop( + compliant_frame=self, columns=columns, strict=strict + ) + + return self._from_native_frame(self._native_frame.drop(columns=to_drop)) + + def with_row_index(self: Self, name: str) -> Self: + # Implementation is based on the following StackOverflow reply: + # https://stackoverflow.com/questions/60831518/in-dask-how-does-one-add-a-range-of-integersauto-increment-to-a-new-column/60852409#60852409 + return self._from_native_frame(add_row_index(self._native_frame, name)) + + def rename(self: Self, mapping: dict[str, str]) -> Self: + return self._from_native_frame(self._native_frame.rename(columns=mapping)) + + def head(self: Self, n: int) -> Self: + return self._from_native_frame( + self._native_frame.head(n=n, compute=False, npartitions=-1) + ) + + def unique( + self: Self, + subset: str | list[str] | None, + *, + keep: Literal["any", "first", "last", "none"] = "any", + maintain_order: bool = False, + ) -> Self: + """ + NOTE: + The param `maintain_order` is only here for compatibility with the polars API + and has no effect on the output. + """ + subset = flatten(subset) if subset else None + native_frame = self._native_frame + if keep == "none": + subset = subset or self.columns + token = generate_unique_token(n_bytes=8, columns=subset) + ser = native_frame.groupby(subset).size().rename(token) + ser = ser.loc[ser == 1] + unique = ser.reset_index().drop(columns=token) + result = native_frame.merge(unique, on=subset, how="inner") + else: + mapped_keep = {"any": "first"}.get(keep, keep) + result = native_frame.drop_duplicates(subset=subset, keep=mapped_keep) + return self._from_native_frame(result) + + def sort( + self: Self, + by: str | Iterable[str], + *more_by: str, + descending: bool | Sequence[bool] = False, + ) -> Self: + flat_keys = flatten([*flatten([by]), *more_by]) + df = self._native_frame + if isinstance(descending, bool): + ascending: bool | list[bool] = not descending + else: + ascending = [not d for d in descending] + return self._from_native_frame(df.sort_values(flat_keys, ascending=ascending)) + + def join( + self: Self, + other: Self, + *, + how: Literal["left", "inner", "outer", "cross", "anti", "semi"] = "inner", + left_on: str | list[str] | None, + right_on: str | list[str] | None, + suffix: str, + ) -> Self: + if isinstance(left_on, str): + left_on = [left_on] + if isinstance(right_on, str): + right_on = [right_on] + if how == "cross": + key_token = generate_unique_token( + n_bytes=8, columns=[*self.columns, *other.columns] + ) + + return self._from_native_frame( + self._native_frame.assign(**{key_token: 0}) + .merge( + other._native_frame.assign(**{key_token: 0}), + how="inner", + left_on=key_token, + right_on=key_token, + suffixes=("", suffix), + ) + .drop(columns=key_token), + ) + + if how == "anti": + indicator_token = generate_unique_token( + n_bytes=8, columns=[*self.columns, *other.columns] + ) + + other_native = ( + other._native_frame.loc[:, right_on] + .rename( # rename to avoid creating extra columns in join + columns=dict(zip(right_on, left_on)) # type: ignore[arg-type] + ) + .drop_duplicates() + ) + df = self._native_frame.merge( + other_native, + how="outer", + indicator=indicator_token, + left_on=left_on, + right_on=left_on, + ) + return self._from_native_frame( + df.loc[df[indicator_token] == "left_only"].drop(columns=[indicator_token]) + ) + + if how == "semi": + other_native = ( + other._native_frame.loc[:, right_on] + .rename( # rename to avoid creating extra columns in join + columns=dict(zip(right_on, left_on)) # type: ignore[arg-type] + ) + .drop_duplicates() # avoids potential rows duplication from inner join + ) + return self._from_native_frame( + self._native_frame.merge( + other_native, + how="inner", + left_on=left_on, + right_on=left_on, + ) + ) + + if how == "left": + other_native = other._native_frame + result_native = self._native_frame.merge( + other_native, + how="left", + left_on=left_on, + right_on=right_on, + suffixes=("", suffix), + ) + extra = [] + for left_key, right_key in zip(left_on, right_on): # type: ignore[arg-type] + if right_key != left_key and right_key not in self.columns: + extra.append(right_key) + elif right_key != left_key: + extra.append(f"{right_key}_right") + return self._from_native_frame(result_native.drop(columns=extra)) + + return self._from_native_frame( + self._native_frame.merge( + other._native_frame, + left_on=left_on, + right_on=right_on, + how=how, + suffixes=("", suffix), + ), + ) + + def join_asof( + self, + other: Self, + *, + left_on: str | None = None, + right_on: str | None = None, + on: str | None = None, + by_left: str | list[str] | None = None, + by_right: str | list[str] | None = None, + by: str | list[str] | None = None, + strategy: Literal["backward", "forward", "nearest"] = "backward", + ) -> Self: + plx = self.__native_namespace__() + return self._from_native_frame( + plx.merge_asof( + self._native_frame, + other._native_frame, + left_on=left_on, + right_on=right_on, + on=on, + left_by=by_left, + right_by=by_right, + by=by, + direction=strategy, + suffixes=("", "_right"), + ), + ) + + def group_by(self, *by: str) -> DaskLazyGroupBy: + from narwhals._dask.group_by import DaskLazyGroupBy + + return DaskLazyGroupBy(self, list(by)) + + def tail(self: Self, n: int) -> Self: + native_frame = self._native_frame + n_partitions = native_frame.npartitions + + if n_partitions == 1: + return self._from_native_frame(self._native_frame.tail(n=n, compute=False)) + else: + msg = "`LazyFrame.tail` is not supported for Dask backend with multiple partitions." + raise NotImplementedError(msg) + + def gather_every(self: Self, n: int, offset: int) -> Self: + row_index_token = generate_unique_token(n_bytes=8, columns=self.columns) + pln = self.__narwhals_namespace__() + return ( + self.with_row_index(name=row_index_token) + .filter( + pln.col(row_index_token) >= offset, # type: ignore[operator] + (pln.col(row_index_token) - offset) % n == 0, # type: ignore[arg-type] + ) + .drop([row_index_token], strict=False) + ) diff --git a/parrot/lib/python3.10/site-packages/narwhals/_dask/namespace.py b/parrot/lib/python3.10/site-packages/narwhals/_dask/namespace.py new file mode 100644 index 0000000000000000000000000000000000000000..1668ee3235ef21c3a1c8e87befa56bd4009324c2 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/narwhals/_dask/namespace.py @@ -0,0 +1,331 @@ +from __future__ import annotations + +from functools import reduce +from typing import TYPE_CHECKING +from typing import Any +from typing import Callable +from typing import Iterable +from typing import NoReturn +from typing import cast + +from narwhals import dtypes +from narwhals._dask.dataframe import DaskLazyFrame +from narwhals._dask.expr import DaskExpr +from narwhals._dask.selectors import DaskSelectorNamespace +from narwhals._dask.utils import reverse_translate_dtype +from narwhals._dask.utils import validate_comparand +from narwhals._expression_parsing import parse_into_exprs + +if TYPE_CHECKING: + import dask_expr + + from narwhals._dask.typing import IntoDaskExpr + from narwhals.dtypes import DType + + +class DaskNamespace: + Int64 = dtypes.Int64 + Int32 = dtypes.Int32 + Int16 = dtypes.Int16 + Int8 = dtypes.Int8 + UInt64 = dtypes.UInt64 + UInt32 = dtypes.UInt32 + UInt16 = dtypes.UInt16 + UInt8 = dtypes.UInt8 + Float64 = dtypes.Float64 + Float32 = dtypes.Float32 + Boolean = dtypes.Boolean + Object = dtypes.Object + Unknown = dtypes.Unknown + Categorical = dtypes.Categorical + Enum = dtypes.Enum + String = dtypes.String + Datetime = dtypes.Datetime + Duration = dtypes.Duration + Date = dtypes.Date + + @property + def selectors(self) -> DaskSelectorNamespace: + return DaskSelectorNamespace(backend_version=self._backend_version) + + def __init__(self, *, backend_version: tuple[int, ...]) -> None: + self._backend_version = backend_version + + def all(self) -> DaskExpr: + def func(df: DaskLazyFrame) -> list[Any]: + return [df._native_frame.loc[:, column_name] for column_name in df.columns] + + return DaskExpr( + func, + depth=0, + function_name="all", + root_names=None, + output_names=None, + returns_scalar=False, + backend_version=self._backend_version, + ) + + def col(self, *column_names: str) -> DaskExpr: + return DaskExpr.from_column_names( + *column_names, + backend_version=self._backend_version, + ) + + def lit(self, value: Any, dtype: dtypes.DType | None) -> DaskExpr: + def convert_if_dtype( + series: dask_expr.Series, dtype: DType | type[DType] + ) -> dask_expr.Series: + return series.astype(reverse_translate_dtype(dtype)) if dtype else series + + return DaskExpr( + lambda df: [ + df._native_frame.assign(lit=value) + .loc[:, "lit"] + .pipe(convert_if_dtype, dtype) + ], + depth=0, + function_name="lit", + root_names=None, + output_names=["lit"], + returns_scalar=False, + backend_version=self._backend_version, + ) + + def min(self, *column_names: str) -> DaskExpr: + return DaskExpr.from_column_names( + *column_names, + backend_version=self._backend_version, + ).min() + + def max(self, *column_names: str) -> DaskExpr: + return DaskExpr.from_column_names( + *column_names, + backend_version=self._backend_version, + ).max() + + def mean(self, *column_names: str) -> DaskExpr: + return DaskExpr.from_column_names( + *column_names, + backend_version=self._backend_version, + ).mean() + + def sum(self, *column_names: str) -> DaskExpr: + return DaskExpr.from_column_names( + *column_names, + backend_version=self._backend_version, + ).sum() + + def len(self) -> DaskExpr: + import dask.dataframe as dd # ignore-banned-import + import pandas as pd # ignore-banned-import + + def func(df: DaskLazyFrame) -> list[Any]: + if not df.columns: + return [ + dd.from_pandas( + pd.Series([0], name="len"), + npartitions=df._native_frame.npartitions, + ) + ] + return [df._native_frame.loc[:, df.columns[0]].size.to_series().rename("len")] + + # coverage bug? this is definitely hit + return DaskExpr( # pragma: no cover + func, + depth=0, + function_name="len", + root_names=None, + output_names=["len"], + returns_scalar=True, + backend_version=self._backend_version, + ) + + def all_horizontal(self, *exprs: IntoDaskExpr) -> DaskExpr: + return reduce(lambda x, y: x & y, parse_into_exprs(*exprs, namespace=self)) + + def any_horizontal(self, *exprs: IntoDaskExpr) -> DaskExpr: + return reduce(lambda x, y: x | y, parse_into_exprs(*exprs, namespace=self)) + + def sum_horizontal(self, *exprs: IntoDaskExpr) -> DaskExpr: + return reduce( + lambda x, y: x + y, + [expr.fill_null(0) for expr in parse_into_exprs(*exprs, namespace=self)], + ) + + def concat( + self, + items: Iterable[DaskLazyFrame], + *, + how: str = "vertical", + ) -> DaskLazyFrame: + import dask.dataframe as dd # ignore-banned-import + + if len(list(items)) == 0: + msg = "No items to concatenate" # pragma: no cover + raise AssertionError(msg) + native_frames = [i._native_frame for i in items] + if how == "vertical": + if not all( + tuple(i.columns) == tuple(native_frames[0].columns) for i in native_frames + ): # pragma: no cover + msg = "unable to vstack with non-matching columns" + raise AssertionError(msg) + return DaskLazyFrame( + dd.concat(native_frames, axis=0, join="inner"), + backend_version=self._backend_version, + ) + if how == "horizontal": + all_column_names: list[str] = [ + column for frame in native_frames for column in frame.columns + ] + if len(all_column_names) != len(set(all_column_names)): # pragma: no cover + duplicates = [ + i for i in all_column_names if all_column_names.count(i) > 1 + ] + msg = ( + f"Columns with name(s): {', '.join(duplicates)} " + "have more than one occurrence" + ) + raise AssertionError(msg) + return DaskLazyFrame( + dd.concat(native_frames, axis=1, join="outer"), + backend_version=self._backend_version, + ) + raise NotImplementedError + + def mean_horizontal(self, *exprs: IntoDaskExpr) -> IntoDaskExpr: + dask_exprs = parse_into_exprs(*exprs, namespace=self) + total = reduce(lambda x, y: x + y, (e.fill_null(0.0) for e in dask_exprs)) + n_non_zero = reduce(lambda x, y: x + y, ((1 - e.is_null()) for e in dask_exprs)) + return total / n_non_zero + + def _create_expr_from_series(self, _: Any) -> NoReturn: + msg = "`_create_expr_from_series` for DaskNamespace exists only for compatibility" + raise NotImplementedError(msg) + + def _create_compliant_series(self, _: Any) -> NoReturn: + msg = "`_create_compliant_series` for DaskNamespace exists only for compatibility" + raise NotImplementedError(msg) + + def _create_series_from_scalar(self, *_: Any) -> NoReturn: + msg = ( + "`_create_series_from_scalar` for DaskNamespace exists only for compatibility" + ) + raise NotImplementedError(msg) + + def _create_expr_from_callable( # pragma: no cover + self, + func: Callable[[DaskLazyFrame], list[DaskExpr]], + *, + depth: int, + function_name: str, + root_names: list[str] | None, + output_names: list[str] | None, + ) -> DaskExpr: + msg = ( + "`_create_expr_from_callable` for DaskNamespace exists only for compatibility" + ) + raise NotImplementedError(msg) + + def when( + self, + *predicates: IntoDaskExpr, + ) -> DaskWhen: + plx = self.__class__(backend_version=self._backend_version) + if predicates: + condition = plx.all_horizontal(*predicates) + else: + msg = "at least one predicate needs to be provided" + raise TypeError(msg) + + return DaskWhen(condition, self._backend_version, returns_scalar=False) + + +class DaskWhen: + def __init__( + self, + condition: DaskExpr, + backend_version: tuple[int, ...], + then_value: Any = None, + otherwise_value: Any = None, + *, + returns_scalar: bool, + ) -> None: + self._backend_version = backend_version + self._condition = condition + self._then_value = then_value + self._otherwise_value = otherwise_value + self._returns_scalar = returns_scalar + + def __call__(self, df: DaskLazyFrame) -> list[Any]: + from narwhals._dask.namespace import DaskNamespace + from narwhals._expression_parsing import parse_into_expr + + plx = DaskNamespace(backend_version=self._backend_version) + + condition = parse_into_expr(self._condition, namespace=plx)._call(df)[0] # type: ignore[arg-type] + condition = cast("dask_expr.Series", condition) + try: + value_series = parse_into_expr(self._then_value, namespace=plx)._call(df)[0] # type: ignore[arg-type] + except TypeError: + # `self._otherwise_value` is a scalar and can't be converted to an expression + _df = condition.to_frame("a") + _df["tmp"] = self._then_value + value_series = _df["tmp"] + value_series = cast("dask_expr.Series", value_series) + validate_comparand(condition, value_series) + + if self._otherwise_value is None: + return [value_series.where(condition)] + try: + otherwise_series = parse_into_expr( + self._otherwise_value, namespace=plx + )._call(df)[0] # type: ignore[arg-type] + except TypeError: + # `self._otherwise_value` is a scalar and can't be converted to an expression + return [value_series.where(condition, self._otherwise_value)] + validate_comparand(condition, otherwise_series) + return [value_series.zip_with(condition, otherwise_series)] + + def then(self, value: DaskExpr | Any) -> DaskThen: + self._then_value = value + + return DaskThen( + self, + depth=0, + function_name="whenthen", + root_names=None, + output_names=None, + returns_scalar=self._returns_scalar, + backend_version=self._backend_version, + ) + + +class DaskThen(DaskExpr): + def __init__( + self, + call: DaskWhen, + *, + depth: int, + function_name: str, + root_names: list[str] | None, + output_names: list[str] | None, + returns_scalar: bool, + backend_version: tuple[int, ...], + ) -> None: + self._backend_version = backend_version + + self._call = call + self._depth = depth + self._function_name = function_name + self._root_names = root_names + self._output_names = output_names + self._returns_scalar = returns_scalar + + def otherwise(self, value: DaskExpr | Any) -> DaskExpr: + # type ignore because we are setting the `_call` attribute to a + # callable object of type `DaskWhen`, base class has the attribute as + # only a `Callable` + self._call._otherwise_value = value # type: ignore[attr-defined] + self._function_name = "whenotherwise" + return self diff --git a/parrot/lib/python3.10/site-packages/narwhals/_dask/typing.py b/parrot/lib/python3.10/site-packages/narwhals/_dask/typing.py new file mode 100644 index 0000000000000000000000000000000000000000..23719eac00073efabafcc8342503f74450a29295 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/narwhals/_dask/typing.py @@ -0,0 +1,16 @@ +from __future__ import annotations # pragma: no cover + +from typing import TYPE_CHECKING # pragma: no cover +from typing import Union # pragma: no cover + +if TYPE_CHECKING: + import sys + + if sys.version_info >= (3, 10): + from typing import TypeAlias + else: + from typing_extensions import TypeAlias + + from narwhals._dask.expr import DaskExpr + + IntoDaskExpr: TypeAlias = Union[DaskExpr, str] diff --git a/parrot/lib/python3.10/site-packages/narwhals/_exceptions.py b/parrot/lib/python3.10/site-packages/narwhals/_exceptions.py new file mode 100644 index 0000000000000000000000000000000000000000..189954516c3e284f4976a0619bff51e47221232b --- /dev/null +++ b/parrot/lib/python3.10/site-packages/narwhals/_exceptions.py @@ -0,0 +1,4 @@ +from __future__ import annotations + + +class ColumnNotFoundError(Exception): ... diff --git a/parrot/lib/python3.10/site-packages/narwhals/dataframe.py b/parrot/lib/python3.10/site-packages/narwhals/dataframe.py new file mode 100644 index 0000000000000000000000000000000000000000..581c8617d27b7ed958b4bcb0c30ff4a2fdc7935d --- /dev/null +++ b/parrot/lib/python3.10/site-packages/narwhals/dataframe.py @@ -0,0 +1,4161 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING +from typing import Any +from typing import Callable +from typing import Generic +from typing import Iterable +from typing import Iterator +from typing import Literal +from typing import Sequence +from typing import TypeVar +from typing import overload + +from narwhals.dependencies import get_polars +from narwhals.dependencies import is_numpy_array +from narwhals.schema import Schema +from narwhals.translate import to_native +from narwhals.utils import flatten +from narwhals.utils import is_sequence_but_not_str +from narwhals.utils import parse_version + +if TYPE_CHECKING: + from io import BytesIO + from pathlib import Path + + import numpy as np + import pandas as pd + import pyarrow as pa + from typing_extensions import Self + + from narwhals.group_by import GroupBy + from narwhals.group_by import LazyGroupBy + from narwhals.series import Series + from narwhals.typing import IntoExpr + from narwhals.typing import IntoFrame + +FrameT = TypeVar("FrameT", bound="IntoFrame") + + +class BaseFrame(Generic[FrameT]): + _compliant_frame: Any + _level: Literal["full", "interchange"] + + def __native_namespace__(self) -> Any: + return self._compliant_frame.__native_namespace__() + + def __narwhals_namespace__(self) -> Any: + return self._compliant_frame.__narwhals_namespace__() + + def _from_compliant_dataframe(self, df: Any) -> Self: + # construct, preserving properties + return self.__class__( # type: ignore[call-arg] + df, + level=self._level, + ) + + def _flatten_and_extract(self, *args: Any, **kwargs: Any) -> Any: + """Process `args` and `kwargs`, extracting underlying objects as we go.""" + args = [self._extract_compliant(v) for v in flatten(args)] # type: ignore[assignment] + kwargs = {k: self._extract_compliant(v) for k, v in kwargs.items()} + return args, kwargs + + def _extract_compliant(self, arg: Any) -> Any: + from narwhals.expr import Expr + from narwhals.series import Series + + if isinstance(arg, BaseFrame): + return arg._compliant_frame + if isinstance(arg, Series): + return arg._compliant_series + if isinstance(arg, Expr): + return arg._call(self.__narwhals_namespace__()) + if get_polars() is not None and "polars" in str(type(arg)): + msg = ( + f"Expected Narwhals object, got: {type(arg)}.\n\n" + "Perhaps you:\n" + "- Forgot a `nw.from_native` somewhere?\n" + "- Used `pl.col` instead of `nw.col`?" + ) + raise TypeError(msg) + return arg + + @property + def schema(self) -> Schema: + return Schema(self._compliant_frame.schema.items()) + + def collect_schema(self) -> Schema: + native_schema = dict(self._compliant_frame.collect_schema()) + + return Schema(native_schema) + + def pipe(self, function: Callable[[Any], Self], *args: Any, **kwargs: Any) -> Self: + return function(self, *args, **kwargs) + + def with_row_index(self, name: str = "index") -> Self: + return self._from_compliant_dataframe( + self._compliant_frame.with_row_index(name), + ) + + def drop_nulls(self: Self, subset: str | list[str] | None = None) -> Self: + return self._from_compliant_dataframe( + self._compliant_frame.drop_nulls(subset=subset), + ) + + @property + def columns(self) -> list[str]: + return self._compliant_frame.columns # type: ignore[no-any-return] + + def lazy(self) -> LazyFrame[Any]: + return LazyFrame( + self._compliant_frame.lazy(), + level=self._level, + ) + + def with_columns( + self, *exprs: IntoExpr | Iterable[IntoExpr], **named_exprs: IntoExpr + ) -> Self: + exprs, named_exprs = self._flatten_and_extract(*exprs, **named_exprs) + return self._from_compliant_dataframe( + self._compliant_frame.with_columns(*exprs, **named_exprs), + ) + + def select( + self, + *exprs: IntoExpr | Iterable[IntoExpr], + **named_exprs: IntoExpr, + ) -> Self: + exprs, named_exprs = self._flatten_and_extract(*exprs, **named_exprs) + return self._from_compliant_dataframe( + self._compliant_frame.select(*exprs, **named_exprs), + ) + + def rename(self, mapping: dict[str, str]) -> Self: + return self._from_compliant_dataframe(self._compliant_frame.rename(mapping)) + + def head(self, n: int) -> Self: + return self._from_compliant_dataframe(self._compliant_frame.head(n)) + + def tail(self, n: int) -> Self: + return self._from_compliant_dataframe(self._compliant_frame.tail(n)) + + def drop(self, *columns: Iterable[str], strict: bool) -> Self: + return self._from_compliant_dataframe( + self._compliant_frame.drop(columns, strict=strict) + ) + + def unique( + self, + subset: str | list[str] | None = None, + *, + keep: Literal["any", "first", "last", "none"] = "any", + maintain_order: bool = False, + ) -> Self: + return self._from_compliant_dataframe( + self._compliant_frame.unique( + subset=subset, keep=keep, maintain_order=maintain_order + ) + ) + + def filter(self, *predicates: IntoExpr | Iterable[IntoExpr] | list[bool]) -> Self: + if not ( + len(predicates) == 1 + and isinstance(predicates[0], list) + and all(isinstance(x, bool) for x in predicates[0]) + ): + predicates, _ = self._flatten_and_extract(*predicates) + return self._from_compliant_dataframe( + self._compliant_frame.filter(*predicates), + ) + + def sort( + self, + by: str | Iterable[str], + *more_by: str, + descending: bool | Sequence[bool] = False, + ) -> Self: + return self._from_compliant_dataframe( + self._compliant_frame.sort(by, *more_by, descending=descending) + ) + + def join( + self, + other: Self, + on: str | list[str] | None = None, + how: Literal["inner", "left", "cross", "semi", "anti"] = "inner", + *, + left_on: str | list[str] | None = None, + right_on: str | list[str] | None = None, + suffix: str = "_right", + ) -> Self: + _supported_joins = ("inner", "left", "cross", "anti", "semi") + + if how not in _supported_joins: + msg = f"Only the following join strategies are supported: {_supported_joins}; found '{how}'." + raise NotImplementedError(msg) + + if how == "cross" and ( + left_on is not None or right_on is not None or on is not None + ): + msg = "Can not pass `left_on`, `right_on` or `on` keys for cross join" + raise ValueError(msg) + + if how != "cross" and (on is None and (left_on is None or right_on is None)): + msg = f"Either (`left_on` and `right_on`) or `on` keys should be specified for {how}." + raise ValueError(msg) + + if how != "cross" and ( + on is not None and (left_on is not None or right_on is not None) + ): + msg = f"If `on` is specified, `left_on` and `right_on` should be None for {how}." + raise ValueError(msg) + + if on is not None: + left_on = right_on = on + + return self._from_compliant_dataframe( + self._compliant_frame.join( + self._extract_compliant(other), + how=how, + left_on=left_on, + right_on=right_on, + suffix=suffix, + ) + ) + + def clone(self) -> Self: + return self._from_compliant_dataframe(self._compliant_frame.clone()) + + def gather_every(self: Self, n: int, offset: int = 0) -> Self: + return self._from_compliant_dataframe( + self._compliant_frame.gather_every(n=n, offset=offset) + ) + + def join_asof( + self, + other: Self, + *, + left_on: str | None = None, + right_on: str | None = None, + on: str | None = None, + by_left: str | list[str] | None = None, + by_right: str | list[str] | None = None, + by: str | list[str] | None = None, + strategy: Literal["backward", "forward", "nearest"] = "backward", + ) -> Self: + _supported_strategies = ("backward", "forward", "nearest") + + if strategy not in _supported_strategies: + msg = f"Only the following strategies are supported: {_supported_strategies}; found '{strategy}'." + raise NotImplementedError(msg) + + if (on is None) and (left_on is None or right_on is None): + msg = "Either (`left_on` and `right_on`) or `on` keys should be specified." + raise ValueError(msg) + if (on is not None) and (left_on is not None or right_on is not None): + msg = "If `on` is specified, `left_on` and `right_on` should be None." + raise ValueError(msg) + if (by is None) and ( + (by_left is None and by_right is not None) + or (by_left is not None and by_right is None) + ): + msg = ( + "Can not specify only `by_left` or `by_right`, you need to specify both." + ) + raise ValueError(msg) + if (by is not None) and (by_left is not None or by_right is not None): + msg = "If `by` is specified, `by_left` and `by_right` should be None." + raise ValueError(msg) + if on is not None: + return self._from_compliant_dataframe( + self._compliant_frame.join_asof( + self._extract_compliant(other), + on=on, + by_left=by_left, + by_right=by_right, + by=by, + strategy=strategy, + ) + ) + return self._from_compliant_dataframe( + self._compliant_frame.join_asof( + self._extract_compliant(other), + left_on=left_on, + right_on=right_on, + by_left=by_left, + by_right=by_right, + by=by, + strategy=strategy, + ) + ) + + +class DataFrame(BaseFrame[FrameT]): + """ + Narwhals DataFrame, backed by a native dataframe. + + The native dataframe might be pandas.DataFrame, polars.DataFrame, ... + + This class is not meant to be instantiated directly - instead, use + `narwhals.from_native`. + """ + + def __init__( + self, + df: Any, + *, + level: Literal["full", "interchange"], + ) -> None: + self._level: Literal["full", "interchange"] = level + if hasattr(df, "__narwhals_dataframe__"): + self._compliant_frame: Any = df.__narwhals_dataframe__() + else: # pragma: no cover + msg = f"Expected an object which implements `__narwhals_dataframe__`, got: {type(df)}" + raise AssertionError(msg) + + def __len__(self) -> Any: + return self._compliant_frame.__len__() + + def __array__(self, dtype: Any = None, copy: bool | None = None) -> np.ndarray: + return self._compliant_frame.__array__(dtype, copy=copy) + + def __repr__(self) -> str: # pragma: no cover + header = " Narwhals DataFrame " + length = len(header) + return ( + "┌" + + "─" * length + + "┐\n" + + f"|{header}|\n" + + "| Use `.to_native` to see native output |\n" + + "└" + + "─" * length + + "┘" + ) + + def __arrow_c_stream__(self, requested_schema: object | None = None) -> object: + """ + Export a DataFrame via the Arrow PyCapsule Interface. + + - if the underlying dataframe implements the interface, it'll return that + - else, it'll call `to_arrow` and then defer to PyArrow's implementation + + See [PyCapsule Interface](https://arrow.apache.org/docs/dev/format/CDataInterface/PyCapsuleInterface.html) + for more. + """ + native_frame = self._compliant_frame._native_frame + if hasattr(native_frame, "__arrow_c_stream__"): + return native_frame.__arrow_c_stream__(requested_schema=requested_schema) + try: + import pyarrow as pa # ignore-banned-import + except ModuleNotFoundError as exc: # pragma: no cover + msg = f"PyArrow>=14.0.0 is required for `DataFrame.__arrow_c_stream__` for object of type {type(native_frame)}" + raise ModuleNotFoundError(msg) from exc + if parse_version(pa.__version__) < (14, 0): # pragma: no cover + msg = f"PyArrow>=14.0.0 is required for `DataFrame.__arrow_c_stream__` for object of type {type(native_frame)}" + raise ModuleNotFoundError(msg) from None + pa_table = self.to_arrow() + return pa_table.__arrow_c_stream__(requested_schema=requested_schema) + + def lazy(self) -> LazyFrame[Any]: + """ + Lazify the DataFrame (if possible). + + If a library does not support lazy execution, then this is a no-op. + + Examples: + Construct pandas and Polars DataFrames: + + >>> import pandas as pd + >>> import polars as pl + >>> import narwhals as nw + >>> df = {"foo": [1, 2, 3], "bar": [6.0, 7.0, 8.0], "ham": ["a", "b", "c"]} + >>> df_pd = pd.DataFrame(df) + >>> df_pl = pl.DataFrame(df) + + We define a library agnostic function: + + >>> @nw.narwhalify + ... def func(df): + ... return df.lazy() + + Note that then, pandas dataframe stay eager, but Polars DataFrame becomes a Polars LazyFrame: + + >>> func(df_pd) + foo bar ham + 0 1 6.0 a + 1 2 7.0 b + 2 3 8.0 c + >>> func(df_pl) + + """ + return super().lazy() + + def to_native(self) -> FrameT: + """ + Convert Narwhals DataFrame to native one. + + Returns: + Object of class that user started with. + + Examples: + >>> import pandas as pd + >>> import polars as pl + >>> import pyarrow as pa + >>> import narwhals as nw + >>> data = {"foo": [1, 2, 3], "bar": [6.0, 7.0, 8.0], "ham": ["a", "b", "c"]} + >>> df_pd = pd.DataFrame(data) + >>> df_pl = pl.DataFrame(data) + >>> df_pa = pa.table(data) + + Calling `to_native` on a Narwhals DataFrame returns the native object: + + >>> nw.from_native(df_pd).to_native() + foo bar ham + 0 1 6.0 a + 1 2 7.0 b + 2 3 8.0 c + >>> nw.from_native(df_pl).to_native() + shape: (3, 3) + ┌─────┬─────┬─────┐ + │ foo ┆ bar ┆ ham │ + │ --- ┆ --- ┆ --- │ + │ i64 ┆ f64 ┆ str │ + ╞═════╪═════╪═════╡ + │ 1 ┆ 6.0 ┆ a │ + │ 2 ┆ 7.0 ┆ b │ + │ 3 ┆ 8.0 ┆ c │ + └─────┴─────┴─────┘ + >>> nw.from_native(df_pa).to_native() + pyarrow.Table + foo: int64 + bar: double + ham: string + ---- + foo: [[1,2,3]] + bar: [[6,7,8]] + ham: [["a","b","c"]] + """ + + return self._compliant_frame._native_frame # type: ignore[no-any-return] + + def to_pandas(self) -> pd.DataFrame: + """ + Convert this DataFrame to a pandas DataFrame. + + Examples: + Construct pandas, Polars (eager) and PyArrow DataFrames: + + >>> import pandas as pd + >>> import polars as pl + >>> import pyarrow as pa + >>> import narwhals as nw + >>> df = {"foo": [1, 2, 3], "bar": [6.0, 7.0, 8.0], "ham": ["a", "b", "c"]} + >>> df_pd = pd.DataFrame(df) + >>> df_pl = pl.DataFrame(df) + >>> df_pa = pa.table(df) + + We define a library agnostic function: + + >>> @nw.narwhalify + ... def func(df): + ... return df.to_pandas() + + We can then pass any supported library such as pandas, Polars (eager), or PyArrow to `func`: + + >>> func(df_pd) + foo bar ham + 0 1 6.0 a + 1 2 7.0 b + 2 3 8.0 c + >>> func(df_pl) + foo bar ham + 0 1 6.0 a + 1 2 7.0 b + 2 3 8.0 c + >>> func(df_pa) + foo bar ham + 0 1 6.0 a + 1 2 7.0 b + 2 3 8.0 c + + + """ + return self._compliant_frame.to_pandas() + + def write_csv(self, file: str | Path | BytesIO | None = None) -> Any: + r""" + Write dataframe to comma-separated values (CSV) file. + + Examples: + Construct pandas and Polars DataFrames: + + >>> import pandas as pd + >>> import polars as pl + >>> import pyarrow as pa + >>> import narwhals as nw + >>> df = {"foo": [1, 2, 3], "bar": [6.0, 7.0, 8.0], "ham": ["a", "b", "c"]} + >>> df_pd = pd.DataFrame(df) + >>> df_pl = pl.DataFrame(df) + >>> df_pa = pa.table(df) + + We define a library agnostic function: + + >>> def func(df): + ... df = nw.from_native(df) + ... return df.write_csv() + + We can pass any supported library such as pandas, Polars or PyArrow to `func`: + + >>> func(df_pd) # doctest: +SKIP + 'foo,bar,ham\n1,6.0,a\n2,7.0,b\n3,8.0,c\n' + >>> func(df_pl) # doctest: +SKIP + 'foo,bar,ham\n1,6.0,a\n2,7.0,b\n3,8.0,c\n' + >>> func(df_pa) # doctest: +SKIP + 'foo,bar,ham\n1,6.0,a\n2,7.0,b\n3,8.0,c\n' + + If we had passed a file name to `write_csv`, it would have been + written to that file. + """ + return self._compliant_frame.write_csv(file) + + def write_parquet(self, file: str | Path | BytesIO) -> Any: + """ + Write dataframe to parquet file. + + Examples: + Construct pandas and Polars DataFrames: + + >>> import pandas as pd + >>> import polars as pl + >>> import narwhals as nw + >>> df = {"foo": [1, 2, 3], "bar": [6.0, 7.0, 8.0], "ham": ["a", "b", "c"]} + >>> df_pd = pd.DataFrame(df) + >>> df_pl = pl.DataFrame(df) + + We define a library agnostic function: + + >>> def func(df): + ... df = nw.from_native(df) + ... df.write_parquet("foo.parquet") + + We can then pass either pandas or Polars to `func`: + + >>> func(df_pd) # doctest:+SKIP + >>> func(df_pl) # doctest:+SKIP + """ + self._compliant_frame.write_parquet(file) + + def to_numpy(self) -> np.ndarray: + """ + Convert this DataFrame to a NumPy ndarray. + + Examples: + Construct pandas and polars DataFrames: + + >>> import pandas as pd + >>> import polars as pl + >>> import narwhals as nw + >>> df = {"foo": [1, 2, 3], "bar": [6.5, 7.0, 8.5], "ham": ["a", "b", "c"]} + >>> df_pd = pd.DataFrame(df) + >>> df_pl = pl.DataFrame(df) + + We define a library agnostic function: + + >>> @nw.narwhalify + ... def func(df): + ... return df.to_numpy() + + We can then pass either pandas or Polars to `func`: + + >>> func(df_pd) + array([[1, 6.5, 'a'], + [2, 7.0, 'b'], + [3, 8.5, 'c']], dtype=object) + >>> func(df_pl) + array([[1, 6.5, 'a'], + [2, 7.0, 'b'], + [3, 8.5, 'c']], dtype=object) + """ + return self._compliant_frame.to_numpy() + + @property + def shape(self) -> tuple[int, int]: + """ + Get the shape of the DataFrame. + + Examples: + Construct pandas and polars DataFrames: + + >>> import pandas as pd + >>> import polars as pl + >>> import narwhals as nw + >>> df = {"foo": [1, 2, 3, 4, 5]} + >>> df_pd = pd.DataFrame(df) + >>> df_pl = pl.DataFrame(df) + + We define a library agnostic function: + + >>> @nw.narwhalify + ... def func(df): + ... return df.shape + + We can then pass either pandas or Polars to `func`: + + >>> func(df_pd) + (5, 1) + >>> func(df_pl) + (5, 1) + """ + return self._compliant_frame.shape # type: ignore[no-any-return] + + def get_column(self, name: str) -> Series: + """ + Get a single column by name. + + Notes: + Although `name` is typed as `str`, pandas does allow non-string column + names, and they will work when passed to this function if the + `narwhals.DataFrame` is backed by a pandas dataframe with non-string + columns. This function can only be used to extract a column by name, so + there is no risk of ambiguity. + + Examples: + >>> import pandas as pd + >>> import polars as pl + >>> import narwhals as nw + >>> data = {"a": [1, 2], "b": [3, 4]} + >>> df_pd = pd.DataFrame(data) + >>> df_pl = pl.DataFrame(data) + + We define a library agnostic function: + + >>> @nw.narwhalify(eager_only=True) + ... def func(df): + ... name = df.columns[0] + ... return df.get_column(name) + + We can then pass either pandas or Polars to `func`: + + >>> func(df_pd) + 0 1 + 1 2 + Name: a, dtype: int64 + >>> func(df_pl) # doctest:+NORMALIZE_WHITESPACE + shape: (2,) + Series: 'a' [i64] + [ + 1 + 2 + ] + """ + from narwhals.series import Series + + return Series( + self._compliant_frame.get_column(name), + level=self._level, + ) + + @overload + def __getitem__(self, item: tuple[Sequence[int], slice]) -> Self: ... + @overload + def __getitem__(self, item: tuple[Sequence[int], Sequence[int]]) -> Self: ... + @overload + def __getitem__(self, item: tuple[slice, Sequence[int]]) -> Self: ... + @overload + def __getitem__(self, item: tuple[Sequence[int], str]) -> Series: ... # type: ignore[overload-overlap] + @overload + def __getitem__(self, item: tuple[slice, str]) -> Series: ... # type: ignore[overload-overlap] + @overload + def __getitem__(self, item: tuple[Sequence[int], Sequence[str]]) -> Self: ... + @overload + def __getitem__(self, item: tuple[slice, Sequence[str]]) -> Self: ... + @overload + def __getitem__(self, item: tuple[Sequence[int], int]) -> Series: ... # type: ignore[overload-overlap] + @overload + def __getitem__(self, item: tuple[slice, int]) -> Series: ... # type: ignore[overload-overlap] + + @overload + def __getitem__(self, item: Sequence[int]) -> Self: ... + + @overload + def __getitem__(self, item: str) -> Series: ... # type: ignore[overload-overlap] + + @overload + def __getitem__(self, item: Sequence[str]) -> Self: ... + + @overload + def __getitem__(self, item: slice) -> Self: ... + + @overload + def __getitem__(self, item: tuple[slice, slice]) -> Self: ... + + def __getitem__( + self, + item: str + | slice + | Sequence[int] + | Sequence[str] + | tuple[Sequence[int], str | int] + | tuple[slice, str | int] + | tuple[slice | Sequence[int], Sequence[int] | Sequence[str] | slice] + | tuple[slice, slice], + ) -> Series | Self: + """ + Extract column or slice of DataFrame. + + Arguments: + item: How to slice dataframe. What happens depends on what is passed. It's easiest + to explain by example. Suppose we have a Dataframe `df`: + + - `df['a']` extracts column `'a'` and returns a `Series`. + - `df[0:2]` extracts the first two rows and returns a `DataFrame`. + - `df[0:2, 'a']` extracts the first two rows from column `'a'` and returns + a `Series`. + - `df[0:2, 0]` extracts the first two rows from the first column and returns + a `Series`. + - `df[[0, 1], [0, 1, 2]]` extracts the first two rows and the first three columns + and returns a `DataFrame` + - `df[:, [0, 1, 2]]` extracts all rows from the first three columns and returns a + `DataFrame`. + - `df[:, ['a', 'c']]` extracts all rows and columns `'a'` and `'c'` and returns a + `DataFrame`. + - `df[['a', 'c']]` extracts all rows and columns `'a'` and `'c'` and returns a + `DataFrame`. + - `df[0: 2, ['a', 'c']]` extracts the first two rows and columns `'a'` and `'c'` and + returns a `DataFrame` + - `df[:, 0: 2]` extracts all rows from the first two columns and returns a `DataFrame` + - `df[:, 'a': 'c']` extracts all rows and all columns positioned between `'a'` and `'c'` + _inclusive_ and returns a `DataFrame`. For example, if the columns are + `'a', 'd', 'c', 'b'`, then that would extract columns `'a'`, `'d'`, and `'c'`. + + Notes: + - Integers are always interpreted as positions + - Strings are always interpreted as column names. + + In contrast with Polars, pandas allows non-string column names. + If you don't know whether the column name you're trying to extract + is definitely a string (e.g. `df[df.columns[0]]`) then you should + use `DataFrame.get_column` instead. + + Examples: + >>> import pandas as pd + >>> import polars as pl + >>> import pyarrow as pa + >>> import narwhals as nw + >>> data = {"a": [1, 2], "b": [3, 4]} + >>> df_pd = pd.DataFrame(data) + >>> df_pl = pl.DataFrame(data) + >>> df_pa = pa.table(data) + + We define a library agnostic function: + + >>> @nw.narwhalify(eager_only=True) + ... def func(df): + ... return df["a"] + + We can then pass either pandas, Polars or PyArrow to `func`: + + >>> func(df_pd) + 0 1 + 1 2 + Name: a, dtype: int64 + >>> func(df_pl) # doctest:+NORMALIZE_WHITESPACE + shape: (2,) + Series: 'a' [i64] + [ + 1 + 2 + ] + >>> func(df_pa) # doctest:+ELLIPSIS + + [ + [ + 1, + 2 + ] + ] + + """ + if isinstance(item, int): + item = [item] + if ( + isinstance(item, tuple) + and len(item) == 2 + and (isinstance(item[0], (str, int))) + ): + msg = ( + f"Expected str or slice, got: {type(item)}.\n\n" + "Hint: if you were trying to get a single element out of a " + "dataframe, use `DataFrame.item`." + ) + raise TypeError(msg) + if ( + isinstance(item, tuple) + and len(item) == 2 + and (is_sequence_but_not_str(item[1]) or isinstance(item[1], slice)) + ): + if item[1] == slice(None) and item[0] == slice(None): + return self + if item[1] == slice(None): + return self._from_compliant_dataframe(self._compliant_frame[item[0]]) + return self._from_compliant_dataframe(self._compliant_frame[item]) + if isinstance(item, str) or (isinstance(item, tuple) and len(item) == 2): + from narwhals.series import Series + + return Series( + self._compliant_frame[item], + level=self._level, + ) + + elif ( + is_sequence_but_not_str(item) + or isinstance(item, slice) + or (is_numpy_array(item) and item.ndim == 1) + ): + return self._from_compliant_dataframe(self._compliant_frame[item]) + + else: + msg = f"Expected str or slice, got: {type(item)}" + raise TypeError(msg) + + def __contains__(self, key: str) -> bool: + return key in self.columns + + @overload + def to_dict(self, *, as_series: Literal[True] = ...) -> dict[str, Series]: ... + @overload + def to_dict(self, *, as_series: Literal[False]) -> dict[str, list[Any]]: ... + @overload + def to_dict(self, *, as_series: bool) -> dict[str, Series] | dict[str, list[Any]]: ... + def to_dict( + self, *, as_series: bool = True + ) -> dict[str, Series] | dict[str, list[Any]]: + """ + Convert DataFrame to a dictionary mapping column name to values. + + Arguments: + as_series: If set to true ``True``, then the values are Narwhals Series, + otherwise the values are Any. + + Examples: + >>> import pandas as pd + >>> import polars as pl + >>> import pyarrow as pa + >>> import narwhals as nw + >>> df = { + ... "A": [1, 2, 3, 4, 5], + ... "fruits": ["banana", "banana", "apple", "apple", "banana"], + ... "B": [5, 4, 3, 2, 1], + ... "animals": ["beetle", "fly", "beetle", "beetle", "beetle"], + ... "optional": [28, 300, None, 2, -30], + ... } + >>> df_pd = pd.DataFrame(df) + >>> df_pl = pl.DataFrame(df) + >>> df_pa = pa.table(df) + + We define a library agnostic function: + + >>> @nw.narwhalify + ... def func(df): + ... return df.to_dict(as_series=False) + + We can then pass either pandas, Polars or PyArrow to `func`: + + >>> func(df_pd) + {'A': [1, 2, 3, 4, 5], 'fruits': ['banana', 'banana', 'apple', 'apple', 'banana'], 'B': [5, 4, 3, 2, 1], 'animals': ['beetle', 'fly', 'beetle', 'beetle', 'beetle'], 'optional': [28.0, 300.0, nan, 2.0, -30.0]} + >>> func(df_pl) + {'A': [1, 2, 3, 4, 5], 'fruits': ['banana', 'banana', 'apple', 'apple', 'banana'], 'B': [5, 4, 3, 2, 1], 'animals': ['beetle', 'fly', 'beetle', 'beetle', 'beetle'], 'optional': [28, 300, None, 2, -30]} + >>> func(df_pa) + {'A': [1, 2, 3, 4, 5], 'fruits': ['banana', 'banana', 'apple', 'apple', 'banana'], 'B': [5, 4, 3, 2, 1], 'animals': ['beetle', 'fly', 'beetle', 'beetle', 'beetle'], 'optional': [28, 300, None, 2, -30]} + """ + from narwhals.series import Series + + if as_series: + return { + key: Series( + value, + level=self._level, + ) + for key, value in self._compliant_frame.to_dict( + as_series=as_series + ).items() + } + return self._compliant_frame.to_dict(as_series=as_series) # type: ignore[no-any-return] + + def row(self, index: int) -> tuple[Any, ...]: + """ + Get values at given row. + + !!!note + You should NEVER use this method to iterate over a DataFrame; + if you require row-iteration you should strongly prefer use of iter_rows() instead. + + Arguments: + index: Row number. + + Notes: + cuDF doesn't support this method. + + Examples: + >>> import narwhals as nw + >>> import pandas as pd + >>> import polars as pl + >>> data = {"a": [1, 2, 3], "b": [4, 5, 6]} + >>> df_pd = pd.DataFrame(data) + >>> df_pl = pl.DataFrame(data) + + Let's define a library-agnostic function to get the second row. + + >>> @nw.narwhalify + ... def func(df): + ... return df.row(1) + + We can then pass pandas / Polars / any other supported library: + + >>> func(df_pd) + (2, 5) + >>> func(df_pl) + (2, 5) + """ + return self._compliant_frame.row(index) # type: ignore[no-any-return] + + # inherited + def pipe(self, function: Callable[[Any], Self], *args: Any, **kwargs: Any) -> Self: + """ + Pipe function call. + + Examples: + >>> import polars as pl + >>> import pandas as pd + >>> import narwhals as nw + >>> data = {"a": [1, 2, 3], "ba": [4, 5, 6]} + >>> df_pd = pd.DataFrame(data) + >>> df_pl = pl.DataFrame(data) + + Let's define a dataframe-agnostic function: + + >>> @nw.narwhalify + ... def func(df): + ... return df.pipe( + ... lambda _df: _df.select([x for x in _df.columns if len(x) == 1]) + ... ) + + We can then pass either pandas or Polars: + + >>> func(df_pd) + a + 0 1 + 1 2 + 2 3 + >>> func(df_pl) + shape: (3, 1) + ┌─────┐ + │ a │ + │ --- │ + │ i64 │ + ╞═════╡ + │ 1 │ + │ 2 │ + │ 3 │ + └─────┘ + """ + return super().pipe(function, *args, **kwargs) + + def drop_nulls(self: Self, subset: str | list[str] | None = None) -> Self: + """ + Drop null values. + + Arguments: + subset: Column name(s) for which null values are considered. If set to None + (default), use all columns. + + Notes: + pandas and Polars handle null values differently. Polars distinguishes + between NaN and Null, whereas pandas doesn't. + + Examples: + >>> import polars as pl + >>> import pandas as pd + >>> import narwhals as nw + >>> data = {"a": [1.0, 2.0, None], "ba": [1.0, None, 2.0]} + >>> df_pd = pd.DataFrame(data) + >>> df_pl = pl.DataFrame(data) + + Let's define a dataframe-agnostic function: + + >>> @nw.narwhalify + ... def func(df): + ... return df.drop_nulls() + + We can then pass either pandas or Polars: + + >>> func(df_pd) + a ba + 0 1.0 1.0 + >>> func(df_pl) + shape: (1, 2) + ┌─────┬─────┐ + │ a ┆ ba │ + │ --- ┆ --- │ + │ f64 ┆ f64 │ + ╞═════╪═════╡ + │ 1.0 ┆ 1.0 │ + └─────┴─────┘ + """ + return super().drop_nulls(subset=subset) + + def with_row_index(self, name: str = "index") -> Self: + """ + Insert column which enumerates rows. + + Examples: + Construct pandas as polars DataFrames: + + >>> import polars as pl + >>> import pandas as pd + >>> import narwhals as nw + >>> data = {"a": [1, 2, 3], "b": [4, 5, 6]} + >>> df_pd = pd.DataFrame(data) + >>> df_pl = pl.DataFrame(data) + + Let's define a dataframe-agnostic function: + + >>> @nw.narwhalify + ... def func(df): + ... return df.with_row_index() + + We can then pass either pandas or Polars: + + >>> func(df_pd) + index a b + 0 0 1 4 + 1 1 2 5 + 2 2 3 6 + >>> func(df_pl) + shape: (3, 3) + ┌───────┬─────┬─────┐ + │ index ┆ a ┆ b │ + │ --- ┆ --- ┆ --- │ + │ u32 ┆ i64 ┆ i64 │ + ╞═══════╪═════╪═════╡ + │ 0 ┆ 1 ┆ 4 │ + │ 1 ┆ 2 ┆ 5 │ + │ 2 ┆ 3 ┆ 6 │ + └───────┴─────┴─────┘ + """ + return super().with_row_index(name) + + @property + def schema(self) -> Schema: + r""" + Get an ordered mapping of column names to their data type. + + Examples: + >>> import polars as pl + >>> import pandas as pd + >>> import narwhals as nw + >>> data = { + ... "foo": [1, 2, 3], + ... "bar": [6.0, 7.0, 8.0], + ... "ham": ["a", "b", "c"], + ... } + >>> df_pd = pd.DataFrame(data) + >>> df_pl = pl.DataFrame(data) + + We define a library agnostic function: + + >>> @nw.narwhalify + ... def func(df): + ... return df.schema + + You can pass either pandas or Polars to `func`: + + >>> df_pd_schema = func(df_pd) + >>> df_pd_schema # doctest:+SKIP + Schema({'foo': Int64, 'bar': Float64, 'ham', String}) + + >>> df_pl_schema = func(df_pl) + >>> df_pl_schema # doctest:+SKIP + Schema({'foo': Int64, 'bar': Float64, 'ham', String}) + """ + return super().schema + + def collect_schema(self: Self) -> Schema: + r""" + Get an ordered mapping of column names to their data type. + + Examples: + >>> import polars as pl + >>> import pandas as pd + >>> import narwhals as nw + >>> data = { + ... "foo": [1, 2, 3], + ... "bar": [6.0, 7.0, 8.0], + ... "ham": ["a", "b", "c"], + ... } + >>> df_pd = pd.DataFrame(data) + >>> df_pl = pl.DataFrame(data) + + We define a library agnostic function: + + >>> @nw.narwhalify + ... def func(df): + ... return df.collect_schema() + + You can pass either pandas or Polars to `func`: + + >>> df_pd_schema = func(df_pd) + >>> df_pd_schema # doctest:+SKIP + Schema({'foo': Int64, 'bar': Float64, 'ham', String}) + + >>> df_pl_schema = func(df_pl) + >>> df_pl_schema # doctest:+SKIP + Schema({'foo': Int64, 'bar': Float64, 'ham', String}) + """ + return super().collect_schema() + + @property + def columns(self) -> list[str]: + """ + Get column names. + + Examples: + >>> import pandas as pd + >>> import polars as pl + >>> import pyarrow as pa + >>> import narwhals as nw + >>> df = {"foo": [1, 2, 3], "bar": [6.0, 7.0, 8.0], "ham": ["a", "b", "c"]} + >>> df_pd = pd.DataFrame(df) + >>> df_pl = pl.DataFrame(df) + >>> df_pa = pa.table(df) + + We define a library agnostic function: + + >>> @nw.narwhalify + ... def func(df): + ... return df.columns + + We can pass any supported library such as pandas, Polars, or PyArrow to `func`: + + >>> func(df_pd) + ['foo', 'bar', 'ham'] + >>> func(df_pl) + ['foo', 'bar', 'ham'] + >>> func(df_pa) + ['foo', 'bar', 'ham'] + """ + return super().columns + + @overload + def rows( + self, + *, + named: Literal[False], + ) -> list[tuple[Any, ...]]: ... + + @overload + def rows( + self, + *, + named: Literal[True], + ) -> list[dict[str, Any]]: ... + + @overload + def rows( + self, + *, + named: bool, + ) -> list[tuple[Any, ...]] | list[dict[str, Any]]: ... + + def rows( + self, + *, + named: bool = False, + ) -> list[tuple[Any, ...]] | list[dict[str, Any]]: + """ + Returns all data in the DataFrame as a list of rows of python-native values. + + Arguments: + named: By default, each row is returned as a tuple of values given + in the same order as the frame columns. Setting named=True will + return rows of dictionaries instead. + + Examples: + >>> import pandas as pd + >>> import polars as pl + >>> import narwhals as nw + >>> df = {"foo": [1, 2, 3], "bar": [6.0, 7.0, 8.0], "ham": ["a", "b", "c"]} + >>> df_pd = pd.DataFrame(df) + >>> df_pl = pl.DataFrame(df) + + We define a library agnostic function: + + >>> @nw.narwhalify + ... def func(df, *, named): + ... return df.rows(named=named) + + We can then pass either pandas or Polars to `func`: + + >>> func(df_pd, named=False) + [(1, 6.0, 'a'), (2, 7.0, 'b'), (3, 8.0, 'c')] + >>> func(df_pd, named=True) + [{'foo': 1, 'bar': 6.0, 'ham': 'a'}, {'foo': 2, 'bar': 7.0, 'ham': 'b'}, {'foo': 3, 'bar': 8.0, 'ham': 'c'}] + >>> func(df_pl, named=False) + [(1, 6.0, 'a'), (2, 7.0, 'b'), (3, 8.0, 'c')] + >>> func(df_pl, named=True) + [{'foo': 1, 'bar': 6.0, 'ham': 'a'}, {'foo': 2, 'bar': 7.0, 'ham': 'b'}, {'foo': 3, 'bar': 8.0, 'ham': 'c'}] + """ + return self._compliant_frame.rows(named=named) # type: ignore[no-any-return] + + @overload + def iter_rows( + self, *, named: Literal[False], buffer_size: int = ... + ) -> Iterator[tuple[Any, ...]]: ... + + @overload + def iter_rows( + self, *, named: Literal[True], buffer_size: int = ... + ) -> Iterator[dict[str, Any]]: ... + + @overload + def iter_rows( + self, *, named: bool, buffer_size: int = ... + ) -> Iterator[tuple[Any, ...]] | Iterator[dict[str, Any]]: ... + + def iter_rows( + self, *, named: bool = False, buffer_size: int = 512 + ) -> Iterator[tuple[Any, ...]] | Iterator[dict[str, Any]]: + """ + Returns an iterator over the DataFrame of rows of python-native values. + + Arguments: + named: By default, each row is returned as a tuple of values given + in the same order as the frame columns. Setting named=True will + return rows of dictionaries instead. + buffer_size: Determines the number of rows that are buffered + internally while iterating over the data. + See https://docs.pola.rs/api/python/stable/reference/dataframe/api/polars.DataFrame.iter_rows.html + + Notes: + cuDF doesn't support this method. + + Examples: + >>> import pandas as pd + >>> import polars as pl + >>> import narwhals as nw + >>> df = {"foo": [1, 2, 3], "bar": [6.0, 7.0, 8.0], "ham": ["a", "b", "c"]} + >>> df_pd = pd.DataFrame(df) + >>> df_pl = pl.DataFrame(df) + + We define a library agnostic function: + + >>> @nw.narwhalify + ... def func(df, *, named): + ... return df.iter_rows(named=named) + + We can then pass either pandas or Polars to `func`: + + >>> [row for row in func(df_pd, named=False)] + [(1, 6.0, 'a'), (2, 7.0, 'b'), (3, 8.0, 'c')] + >>> [row for row in func(df_pd, named=True)] + [{'foo': 1, 'bar': 6.0, 'ham': 'a'}, {'foo': 2, 'bar': 7.0, 'ham': 'b'}, {'foo': 3, 'bar': 8.0, 'ham': 'c'}] + >>> [row for row in func(df_pl, named=False)] + [(1, 6.0, 'a'), (2, 7.0, 'b'), (3, 8.0, 'c')] + >>> [row for row in func(df_pl, named=True)] + [{'foo': 1, 'bar': 6.0, 'ham': 'a'}, {'foo': 2, 'bar': 7.0, 'ham': 'b'}, {'foo': 3, 'bar': 8.0, 'ham': 'c'}] + """ + return self._compliant_frame.iter_rows(named=named, buffer_size=buffer_size) # type: ignore[no-any-return] + + def with_columns( + self, *exprs: IntoExpr | Iterable[IntoExpr], **named_exprs: IntoExpr + ) -> Self: + r""" + Add columns to this DataFrame. + + Added columns will replace existing columns with the same name. + + Arguments: + *exprs: Column(s) to add, specified as positional arguments. + Accepts expression input. Strings are parsed as column names, other + non-expression inputs are parsed as literals. + + **named_exprs: Additional columns to add, specified as keyword arguments. + The columns will be renamed to the keyword used. + + Returns: + DataFrame: A new DataFrame with the columns added. + + Note: + Creating a new DataFrame using this method does not create a new copy of + existing data. + + Examples: + >>> import pandas as pd + >>> import polars as pl + >>> import narwhals as nw + >>> df = { + ... "a": [1, 2, 3, 4], + ... "b": [0.5, 4, 10, 13], + ... "c": [True, True, False, True], + ... } + >>> df_pd = pd.DataFrame(df) + >>> df_pl = pl.DataFrame(df) + + Let's define a dataframe-agnostic function in which we pass an expression + to add it as a new column: + + >>> @nw.narwhalify + ... def func(df): + ... return df.with_columns((nw.col("a") * 2).alias("a*2")) + + We can then pass either pandas or Polars to `func`: + + >>> func(df_pd) + a b c a*2 + 0 1 0.5 True 2 + 1 2 4.0 True 4 + 2 3 10.0 False 6 + 3 4 13.0 True 8 + >>> func(df_pl) + shape: (4, 4) + ┌─────┬──────┬───────┬─────┐ + │ a ┆ b ┆ c ┆ a*2 │ + │ --- ┆ --- ┆ --- ┆ --- │ + │ i64 ┆ f64 ┆ bool ┆ i64 │ + ╞═════╪══════╪═══════╪═════╡ + │ 1 ┆ 0.5 ┆ true ┆ 2 │ + │ 2 ┆ 4.0 ┆ true ┆ 4 │ + │ 3 ┆ 10.0 ┆ false ┆ 6 │ + │ 4 ┆ 13.0 ┆ true ┆ 8 │ + └─────┴──────┴───────┴─────┘ + """ + return super().with_columns(*exprs, **named_exprs) + + def select( + self, + *exprs: IntoExpr | Iterable[IntoExpr], + **named_exprs: IntoExpr, + ) -> Self: + r""" + Select columns from this DataFrame. + + Arguments: + *exprs: Column(s) to select, specified as positional arguments. + Accepts expression input. Strings are parsed as column names, + other non-expression inputs are parsed as literals. + + **named_exprs: Additional columns to select, specified as keyword arguments. + The columns will be renamed to the keyword used. + + Examples: + >>> import pandas as pd + >>> import polars as pl + >>> import narwhals as nw + >>> df = { + ... "foo": [1, 2, 3], + ... "bar": [6, 7, 8], + ... "ham": ["a", "b", "c"], + ... } + >>> df_pd = pd.DataFrame(df) + >>> df_pl = pl.DataFrame(df) + + Let's define a dataframe-agnostic function in which we pass the name of a + column to select that column. + + >>> @nw.narwhalify + ... def func(df): + ... return df.select("foo") + + We can then pass either pandas or Polars to `func`: + + >>> func(df_pd) + foo + 0 1 + 1 2 + 2 3 + >>> func(df_pl) + shape: (3, 1) + ┌─────┐ + │ foo │ + │ --- │ + │ i64 │ + ╞═════╡ + │ 1 │ + │ 2 │ + │ 3 │ + └─────┘ + + Multiple columns can be selected by passing a list of column names. + + >>> @nw.narwhalify + ... def func(df): + ... return df.select(["foo", "bar"]) + >>> func(df_pd) + foo bar + 0 1 6 + 1 2 7 + 2 3 8 + >>> func(df_pl) + shape: (3, 2) + ┌─────┬─────┐ + │ foo ┆ bar │ + │ --- ┆ --- │ + │ i64 ┆ i64 │ + ╞═════╪═════╡ + │ 1 ┆ 6 │ + │ 2 ┆ 7 │ + │ 3 ┆ 8 │ + └─────┴─────┘ + + Multiple columns can also be selected using positional arguments instead of a + list. Expressions are also accepted. + + >>> @nw.narwhalify + ... def func(df): + ... return df.select(nw.col("foo"), nw.col("bar") + 1) + >>> func(df_pd) + foo bar + 0 1 7 + 1 2 8 + 2 3 9 + >>> func(df_pl) + shape: (3, 2) + ┌─────┬─────┐ + │ foo ┆ bar │ + │ --- ┆ --- │ + │ i64 ┆ i64 │ + ╞═════╪═════╡ + │ 1 ┆ 7 │ + │ 2 ┆ 8 │ + │ 3 ┆ 9 │ + └─────┴─────┘ + + Use keyword arguments to easily name your expression inputs. + + >>> @nw.narwhalify + ... def func(df): + ... return df.select(threshold=nw.col("foo") * 2) + >>> func(df_pd) + threshold + 0 2 + 1 4 + 2 6 + >>> func(df_pl) + shape: (3, 1) + ┌───────────┐ + │ threshold │ + │ --- │ + │ i64 │ + ╞═══════════╡ + │ 2 │ + │ 4 │ + │ 6 │ + └───────────┘ + """ + return super().select(*exprs, **named_exprs) + + def rename(self, mapping: dict[str, str]) -> Self: + """ + Rename column names. + + Arguments: + mapping: Key value pairs that map from old name to new name. + + Examples: + >>> import pandas as pd + >>> import polars as pl + >>> import narwhals as nw + >>> df = {"foo": [1, 2, 3], "bar": [6, 7, 8], "ham": ["a", "b", "c"]} + >>> df_pd = pd.DataFrame(df) + >>> df_pl = pl.DataFrame(df) + + We define a library agnostic function: + + >>> @nw.narwhalify + ... def func(df): + ... return df.rename({"foo": "apple"}) + + We can then pass either pandas or Polars to `func`: + + >>> func(df_pd) + apple bar ham + 0 1 6 a + 1 2 7 b + 2 3 8 c + >>> func(df_pl) + shape: (3, 3) + ┌───────┬─────┬─────┐ + │ apple ┆ bar ┆ ham │ + │ --- ┆ --- ┆ --- │ + │ i64 ┆ i64 ┆ str │ + ╞═══════╪═════╪═════╡ + │ 1 ┆ 6 ┆ a │ + │ 2 ┆ 7 ┆ b │ + │ 3 ┆ 8 ┆ c │ + └───────┴─────┴─────┘ + """ + return super().rename(mapping) + + def head(self, n: int = 5) -> Self: + """ + Get the first `n` rows. + + Arguments: + n: Number of rows to return. If a negative value is passed, return all rows + except the last `abs(n)`. + + Examples: + >>> import pandas as pd + >>> import polars as pl + >>> import narwhals as nw + >>> df = { + ... "foo": [1, 2, 3, 4, 5], + ... "bar": [6, 7, 8, 9, 10], + ... "ham": ["a", "b", "c", "d", "e"], + ... } + >>> df_pd = pd.DataFrame(df) + >>> df_pl = pl.DataFrame(df) + + Let's define a dataframe-agnostic function that gets the first 3 rows. + + >>> @nw.narwhalify + ... def func(df): + ... return df.head(3) + + We can then pass either pandas or Polars to `func`: + + >>> func(df_pd) + foo bar ham + 0 1 6 a + 1 2 7 b + 2 3 8 c + >>> func(df_pl) + shape: (3, 3) + ┌─────┬─────┬─────┐ + │ foo ┆ bar ┆ ham │ + │ --- ┆ --- ┆ --- │ + │ i64 ┆ i64 ┆ str │ + ╞═════╪═════╪═════╡ + │ 1 ┆ 6 ┆ a │ + │ 2 ┆ 7 ┆ b │ + │ 3 ┆ 8 ┆ c │ + └─────┴─────┴─────┘ + """ + return super().head(n) + + def tail(self, n: int = 5) -> Self: + """ + Get the last `n` rows. + + Arguments: + n: Number of rows to return. If a negative value is passed, return all rows + except the first `abs(n)`. + + Examples: + >>> import pandas as pd + >>> import polars as pl + >>> import narwhals as nw + >>> df = { + ... "foo": [1, 2, 3, 4, 5], + ... "bar": [6, 7, 8, 9, 10], + ... "ham": ["a", "b", "c", "d", "e"], + ... } + >>> df_pd = pd.DataFrame(df) + >>> df_pl = pl.DataFrame(df) + + Let's define a dataframe-agnostic function that gets the last 3 rows. + + >>> @nw.narwhalify + ... def func(df): + ... return df.tail(3) + + We can then pass either pandas or Polars to `func`: + + >>> func(df_pd) + foo bar ham + 2 3 8 c + 3 4 9 d + 4 5 10 e + >>> func(df_pl) + shape: (3, 3) + ┌─────┬─────┬─────┐ + │ foo ┆ bar ┆ ham │ + │ --- ┆ --- ┆ --- │ + │ i64 ┆ i64 ┆ str │ + ╞═════╪═════╪═════╡ + │ 3 ┆ 8 ┆ c │ + │ 4 ┆ 9 ┆ d │ + │ 5 ┆ 10 ┆ e │ + └─────┴─────┴─────┘ + """ + return super().tail(n) + + def drop(self, *columns: str | Iterable[str], strict: bool = True) -> Self: + """ + Remove columns from the dataframe. + + Arguments: + *columns: Names of the columns that should be removed from the dataframe. + strict: Validate that all column names exist in the schema and throw an + exception if a column name does not exist in the schema. + + Examples: + >>> import pandas as pd + >>> import polars as pl + >>> import narwhals as nw + >>> data = {"foo": [1, 2, 3], "bar": [6.0, 7.0, 8.0], "ham": ["a", "b", "c"]} + >>> df_pd = pd.DataFrame(data) + >>> df_pl = pl.DataFrame(data) + + We define a library agnostic function: + + >>> @nw.narwhalify + ... def func(df): + ... return df.drop("ham") + + We can then pass either pandas or Polars to `func`: + + >>> func(df_pd) + foo bar + 0 1 6.0 + 1 2 7.0 + 2 3 8.0 + >>> func(df_pl) + shape: (3, 2) + ┌─────┬─────┐ + │ foo ┆ bar │ + │ --- ┆ --- │ + │ i64 ┆ f64 │ + ╞═════╪═════╡ + │ 1 ┆ 6.0 │ + │ 2 ┆ 7.0 │ + │ 3 ┆ 8.0 │ + └─────┴─────┘ + + Use positional arguments to drop multiple columns. + + >>> @nw.narwhalify + ... def func(df): + ... return df.drop("foo", "ham") + + >>> func(df_pd) + bar + 0 6.0 + 1 7.0 + 2 8.0 + >>> func(df_pl) + shape: (3, 1) + ┌─────┐ + │ bar │ + │ --- │ + │ f64 │ + ╞═════╡ + │ 6.0 │ + │ 7.0 │ + │ 8.0 │ + └─────┘ + """ + return super().drop(*flatten(columns), strict=strict) + + def unique( + self, + subset: str | list[str] | None = None, + *, + keep: Literal["any", "first", "last", "none"] = "any", + maintain_order: bool = False, + ) -> Self: + """ + Drop duplicate rows from this dataframe. + + Arguments: + subset: Column name(s) to consider when identifying duplicate rows. + keep: {'first', 'last', 'any', 'none'} + Which of the duplicate rows to keep. + + * 'any': Does not give any guarantee of which row is kept. + This allows more optimizations. + * 'none': Don't keep duplicate rows. + * 'first': Keep first unique row. + * 'last': Keep last unique row. + maintain_order: Keep the same order as the original DataFrame. This is more + expensive to compute. Settings this to `True` blocks the possibility + to run on the streaming engine for polars. + + Examples: + >>> import pandas as pd + >>> import polars as pl + >>> import narwhals as nw + >>> data = { + ... "foo": [1, 2, 3, 1], + ... "bar": ["a", "a", "a", "a"], + ... "ham": ["b", "b", "b", "b"], + ... } + >>> df_pd = pd.DataFrame(data) + >>> df_pl = pl.DataFrame(data) + + We define a library agnostic function: + + >>> @nw.narwhalify + ... def func(df): + ... return df.unique(["bar", "ham"]) + + We can then pass either pandas or Polars to `func`: + + >>> func(df_pd) + foo bar ham + 0 1 a b + >>> func(df_pl) + shape: (1, 3) + ┌─────┬─────┬─────┐ + │ foo ┆ bar ┆ ham │ + │ --- ┆ --- ┆ --- │ + │ i64 ┆ str ┆ str │ + ╞═════╪═════╪═════╡ + │ 1 ┆ a ┆ b │ + └─────┴─────┴─────┘ + """ + return super().unique(subset, keep=keep, maintain_order=maintain_order) + + def filter(self, *predicates: IntoExpr | Iterable[IntoExpr] | list[bool]) -> Self: + r""" + Filter the rows in the DataFrame based on one or more predicate expressions. + + The original order of the remaining rows is preserved. + + Arguments: + *predicates: Expression(s) that evaluates to a boolean Series. Can + also be a (single!) boolean list. + + Examples: + >>> import pandas as pd + >>> import polars as pl + >>> import narwhals as nw + >>> df = { + ... "foo": [1, 2, 3], + ... "bar": [6, 7, 8], + ... "ham": ["a", "b", "c"], + ... } + >>> df_pd = pd.DataFrame(df) + >>> df_pl = pl.DataFrame(df) + + Let's define a dataframe-agnostic function in which we filter on + one condition. + + >>> @nw.narwhalify + ... def func(df): + ... return df.filter(nw.col("foo") > 1) + + We can then pass either pandas or Polars to `func`: + + >>> func(df_pd) + foo bar ham + 1 2 7 b + 2 3 8 c + >>> func(df_pl) + shape: (2, 3) + ┌─────┬─────┬─────┐ + │ foo ┆ bar ┆ ham │ + │ --- ┆ --- ┆ --- │ + │ i64 ┆ i64 ┆ str │ + ╞═════╪═════╪═════╡ + │ 2 ┆ 7 ┆ b │ + │ 3 ┆ 8 ┆ c │ + └─────┴─────┴─────┘ + + Filter on multiple conditions, combined with and/or operators: + + >>> @nw.narwhalify + ... def func(df): + ... return df.filter((nw.col("foo") < 3) & (nw.col("ham") == "a")) + >>> func(df_pd) + foo bar ham + 0 1 6 a + >>> func(df_pl) + shape: (1, 3) + ┌─────┬─────┬─────┐ + │ foo ┆ bar ┆ ham │ + │ --- ┆ --- ┆ --- │ + │ i64 ┆ i64 ┆ str │ + ╞═════╪═════╪═════╡ + │ 1 ┆ 6 ┆ a │ + └─────┴─────┴─────┘ + + >>> @nw.narwhalify + ... def func(df): + ... return df.filter((nw.col("foo") == 1) | (nw.col("ham") == "c")) + >>> func(df_pd) + foo bar ham + 0 1 6 a + 2 3 8 c + >>> func(df_pl) + shape: (2, 3) + ┌─────┬─────┬─────┐ + │ foo ┆ bar ┆ ham │ + │ --- ┆ --- ┆ --- │ + │ i64 ┆ i64 ┆ str │ + ╞═════╪═════╪═════╡ + │ 1 ┆ 6 ┆ a │ + │ 3 ┆ 8 ┆ c │ + └─────┴─────┴─────┘ + + Provide multiple filters using `*args` syntax: + + >>> @nw.narwhalify + ... def func(df): + ... dframe = df.filter( + ... nw.col("foo") <= 2, + ... ~nw.col("ham").is_in(["b", "c"]), + ... ) + ... return dframe + >>> func(df_pd) + foo bar ham + 0 1 6 a + >>> func(df_pl) + shape: (1, 3) + ┌─────┬─────┬─────┐ + │ foo ┆ bar ┆ ham │ + │ --- ┆ --- ┆ --- │ + │ i64 ┆ i64 ┆ str │ + ╞═════╪═════╪═════╡ + │ 1 ┆ 6 ┆ a │ + └─────┴─────┴─────┘ + """ + return super().filter(*predicates) + + def group_by(self, *keys: str | Iterable[str]) -> GroupBy[Self]: + r""" + Start a group by operation. + + Arguments: + *keys: Column(s) to group by. Accepts multiple columns names as a list. + + Returns: + GroupBy: Object which can be used to perform aggregations. + + Examples: + >>> import pandas as pd + >>> import polars as pl + >>> import narwhals as nw + >>> df = { + ... "a": ["a", "b", "a", "b", "c"], + ... "b": [1, 2, 1, 3, 3], + ... "c": [5, 4, 3, 2, 1], + ... } + >>> df_pd = pd.DataFrame(df) + >>> df_pl = pl.DataFrame(df) + + Let's define a dataframe-agnostic function in which we group by one column + and call `agg` to compute the grouped sum of another column. + + >>> @nw.narwhalify + ... def func(df): + ... return df.group_by("a").agg(nw.col("b").sum()).sort("a") + + We can then pass either pandas or Polars to `func`: + + >>> func(df_pd) + a b + 0 a 2 + 1 b 5 + 2 c 3 + >>> func(df_pl) + shape: (3, 2) + ┌─────┬─────┐ + │ a ┆ b │ + │ --- ┆ --- │ + │ str ┆ i64 │ + ╞═════╪═════╡ + │ a ┆ 2 │ + │ b ┆ 5 │ + │ c ┆ 3 │ + └─────┴─────┘ + + Group by multiple columns by passing a list of column names. + + >>> @nw.narwhalify + ... def func(df): + ... return df.group_by(["a", "b"]).agg(nw.max("c")).sort("a", "b") + >>> func(df_pd) + a b c + 0 a 1 5 + 1 b 2 4 + 2 b 3 2 + 3 c 3 1 + >>> func(df_pl) + shape: (4, 3) + ┌─────┬─────┬─────┐ + │ a ┆ b ┆ c │ + │ --- ┆ --- ┆ --- │ + │ str ┆ i64 ┆ i64 │ + ╞═════╪═════╪═════╡ + │ a ┆ 1 ┆ 5 │ + │ b ┆ 2 ┆ 4 │ + │ b ┆ 3 ┆ 2 │ + │ c ┆ 3 ┆ 1 │ + └─────┴─────┴─────┘ + """ + from narwhals.group_by import GroupBy + + return GroupBy(self, *flatten(keys)) + + def sort( + self, + by: str | Iterable[str], + *more_by: str, + descending: bool | Sequence[bool] = False, + ) -> Self: + r""" + Sort the dataframe by the given columns. + + Arguments: + by: Column(s) names to sort by. + + *more_by: Additional columns to sort by, specified as positional + arguments. + + descending: Sort in descending order. When sorting by multiple + columns, can be specified per column by passing a + sequence of booleans. + + Examples: + >>> import narwhals as nw + >>> import pandas as pd + >>> import polars as pl + >>> data = { + ... "a": [1, 2, None], + ... "b": [6.0, 5.0, 4.0], + ... "c": ["a", "c", "b"], + ... } + >>> df_pd = pd.DataFrame(data) + >>> df_pl = pl.DataFrame(data) + + Let's define a dataframe-agnostic function in which we sort by multiple + columns in different orders + + >>> @nw.narwhalify + ... def func(df): + ... return df.sort("c", "a", descending=[False, True]) + + We can then pass either pandas or Polars to `func`: + + >>> func(df_pd) + a b c + 0 1.0 6.0 a + 2 NaN 4.0 b + 1 2.0 5.0 c + >>> func(df_pl) + shape: (3, 3) + ┌──────┬─────┬─────┐ + │ a ┆ b ┆ c │ + │ --- ┆ --- ┆ --- │ + │ i64 ┆ f64 ┆ str │ + ╞══════╪═════╪═════╡ + │ 1 ┆ 6.0 ┆ a │ + │ null ┆ 4.0 ┆ b │ + │ 2 ┆ 5.0 ┆ c │ + └──────┴─────┴─────┘ + """ + return super().sort(by, *more_by, descending=descending) + + def join( + self, + other: Self, + on: str | list[str] | None = None, + how: Literal["inner", "left", "cross", "semi", "anti"] = "inner", + *, + left_on: str | list[str] | None = None, + right_on: str | list[str] | None = None, + suffix: str = "_right", + ) -> Self: + r""" + Join in SQL-like fashion. + + Arguments: + other: Lazy DataFrame to join with. + on: Name(s) of the join columns in both DataFrames. If set, `left_on` and + `right_on` should be None. + how: Join strategy. + + * *inner*: Returns rows that have matching values in both tables. + * *cross*: Returns the Cartesian product of rows from both tables. + * *semi*: Filter rows that have a match in the right table. + * *anti*: Filter rows that do not have a match in the right table. + left_on: Join column of the left DataFrame. + right_on: Join column of the right DataFrame. + suffix: Suffix to append to columns with a duplicate name. + + Returns: + A new joined DataFrame + + Examples: + >>> import narwhals as nw + >>> import pandas as pd + >>> import polars as pl + >>> data = { + ... "foo": [1, 2, 3], + ... "bar": [6.0, 7.0, 8.0], + ... "ham": ["a", "b", "c"], + ... } + >>> data_other = { + ... "apple": ["x", "y", "z"], + ... "ham": ["a", "b", "d"], + ... } + + >>> df_pd = pd.DataFrame(data) + >>> other_pd = pd.DataFrame(data_other) + + >>> df_pl = pl.DataFrame(data) + >>> other_pl = pl.DataFrame(data_other) + + Let's define a dataframe-agnostic function in which we join over "ham" column: + + >>> @nw.narwhalify + ... def join_on_ham(df, other_any): + ... return df.join(other_any, left_on="ham", right_on="ham") + + We can now pass either pandas or Polars to the function: + + >>> join_on_ham(df_pd, other_pd) + foo bar ham apple + 0 1 6.0 a x + 1 2 7.0 b y + + >>> join_on_ham(df_pl, other_pl) + shape: (2, 4) + ┌─────┬─────┬─────┬───────┐ + │ foo ┆ bar ┆ ham ┆ apple │ + │ --- ┆ --- ┆ --- ┆ --- │ + │ i64 ┆ f64 ┆ str ┆ str │ + ╞═════╪═════╪═════╪═══════╡ + │ 1 ┆ 6.0 ┆ a ┆ x │ + │ 2 ┆ 7.0 ┆ b ┆ y │ + └─────┴─────┴─────┴───────┘ + """ + return super().join( + other, how=how, left_on=left_on, right_on=right_on, on=on, suffix=suffix + ) + + def join_asof( + self, + other: Self, + *, + left_on: str | None = None, + right_on: str | None = None, + on: str | None = None, + by_left: str | list[str] | None = None, + by_right: str | list[str] | None = None, + by: str | list[str] | None = None, + strategy: Literal["backward", "forward", "nearest"] = "backward", + ) -> Self: + """ + Perform an asof join. + + This is similar to a left-join except that we match on nearest key rather than equal keys. + + Both DataFrames must be sorted by the asof_join key. + + Arguments: + other: DataFrame to join with. + + left_on: Name(s) of the left join column(s). + + right_on: Name(s) of the right join column(s). + + on: Join column of both DataFrames. If set, left_on and right_on should be None. + + by_left: join on these columns before doing asof join + + by_right: join on these columns before doing asof join + + by: join on these columns before doing asof join + + strategy: Join strategy. The default is "backward". + + * *backward*: selects the last row in the right DataFrame whose "on" key is less than or equal to the left's key. + * *forward*: selects the first row in the right DataFrame whose "on" key is greater than or equal to the left's key. + * *nearest*: search selects the last row in the right DataFrame whose value is nearest to the left's key. + + Returns: + A new joined DataFrame + + Examples: + >>> from datetime import datetime + >>> import narwhals as nw + >>> import pandas as pd + >>> import polars as pl + >>> data_gdp = { + ... "datetime": [ + ... datetime(2016, 1, 1), + ... datetime(2017, 1, 1), + ... datetime(2018, 1, 1), + ... datetime(2019, 1, 1), + ... datetime(2020, 1, 1), + ... ], + ... "gdp": [4164, 4411, 4566, 4696, 4827], + ... } + >>> data_population = { + ... "datetime": [ + ... datetime(2016, 3, 1), + ... datetime(2018, 8, 1), + ... datetime(2019, 1, 1), + ... ], + ... "population": [82.19, 82.66, 83.12], + ... } + >>> gdp_pd = pd.DataFrame(data_gdp) + >>> population_pd = pd.DataFrame(data_population) + + >>> gdp_pl = pl.DataFrame(data_gdp).sort("datetime") + >>> population_pl = pl.DataFrame(data_population).sort("datetime") + + Let's define a dataframe-agnostic function in which we join over "datetime" column: + + >>> @nw.narwhalify + ... def join_asof_datetime(df, other_any, strategy): + ... return df.join_asof(other_any, on="datetime", strategy=strategy) + + We can now pass either pandas or Polars to the function: + + >>> join_asof_datetime(population_pd, gdp_pd, strategy="backward") + datetime population gdp + 0 2016-03-01 82.19 4164 + 1 2018-08-01 82.66 4566 + 2 2019-01-01 83.12 4696 + + >>> join_asof_datetime(population_pl, gdp_pl, strategy="backward") + shape: (3, 3) + ┌─────────────────────┬────────────┬──────┐ + │ datetime ┆ population ┆ gdp │ + │ --- ┆ --- ┆ --- │ + │ datetime[μs] ┆ f64 ┆ i64 │ + ╞═════════════════════╪════════════╪══════╡ + │ 2016-03-01 00:00:00 ┆ 82.19 ┆ 4164 │ + │ 2018-08-01 00:00:00 ┆ 82.66 ┆ 4566 │ + │ 2019-01-01 00:00:00 ┆ 83.12 ┆ 4696 │ + └─────────────────────┴────────────┴──────┘ + + Here is a real-world times-series example that uses `by` argument. + + >>> from datetime import datetime + >>> import narwhals as nw + >>> import pandas as pd + >>> import polars as pl + >>> data_quotes = { + ... "datetime": [ + ... datetime(2016, 5, 25, 13, 30, 0, 23), + ... datetime(2016, 5, 25, 13, 30, 0, 23), + ... datetime(2016, 5, 25, 13, 30, 0, 30), + ... datetime(2016, 5, 25, 13, 30, 0, 41), + ... datetime(2016, 5, 25, 13, 30, 0, 48), + ... datetime(2016, 5, 25, 13, 30, 0, 49), + ... datetime(2016, 5, 25, 13, 30, 0, 72), + ... datetime(2016, 5, 25, 13, 30, 0, 75), + ... ], + ... "ticker": [ + ... "GOOG", + ... "MSFT", + ... "MSFT", + ... "MSFT", + ... "GOOG", + ... "AAPL", + ... "GOOG", + ... "MSFT", + ... ], + ... "bid": [720.50, 51.95, 51.97, 51.99, 720.50, 97.99, 720.50, 52.01], + ... "ask": [720.93, 51.96, 51.98, 52.00, 720.93, 98.01, 720.88, 52.03], + ... } + >>> data_trades = { + ... "datetime": [ + ... datetime(2016, 5, 25, 13, 30, 0, 23), + ... datetime(2016, 5, 25, 13, 30, 0, 38), + ... datetime(2016, 5, 25, 13, 30, 0, 48), + ... datetime(2016, 5, 25, 13, 30, 0, 48), + ... datetime(2016, 5, 25, 13, 30, 0, 48), + ... ], + ... "ticker": ["MSFT", "MSFT", "GOOG", "GOOG", "AAPL"], + ... "price": [51.95, 51.95, 720.77, 720.92, 98.0], + ... "quantity": [75, 155, 100, 100, 100], + ... } + >>> quotes_pd = pd.DataFrame(data_quotes) + >>> trades_pd = pd.DataFrame(data_trades) + >>> quotes_pl = pl.DataFrame(data_quotes).sort("datetime") + >>> trades_pl = pl.DataFrame(data_trades).sort("datetime") + + Let's define a dataframe-agnostic function in which we join over "datetime" and by "ticker" columns: + + >>> @nw.narwhalify + ... def join_asof_datetime_by_ticker(df, other_any): + ... return df.join_asof(other_any, on="datetime", by="ticker") + + We can now pass either pandas or Polars to the function: + + >>> join_asof_datetime_by_ticker(trades_pd, quotes_pd) + datetime ticker price quantity bid ask + 0 2016-05-25 13:30:00.000023 MSFT 51.95 75 51.95 51.96 + 1 2016-05-25 13:30:00.000038 MSFT 51.95 155 51.97 51.98 + 2 2016-05-25 13:30:00.000048 GOOG 720.77 100 720.50 720.93 + 3 2016-05-25 13:30:00.000048 GOOG 720.92 100 720.50 720.93 + 4 2016-05-25 13:30:00.000048 AAPL 98.00 100 NaN NaN + + >>> join_asof_datetime_by_ticker(trades_pl, quotes_pl) + shape: (5, 6) + ┌────────────────────────────┬────────┬────────┬──────────┬───────┬────────┐ + │ datetime ┆ ticker ┆ price ┆ quantity ┆ bid ┆ ask │ + │ --- ┆ --- ┆ --- ┆ --- ┆ --- ┆ --- │ + │ datetime[μs] ┆ str ┆ f64 ┆ i64 ┆ f64 ┆ f64 │ + ╞════════════════════════════╪════════╪════════╪══════════╪═══════╪════════╡ + │ 2016-05-25 13:30:00.000023 ┆ MSFT ┆ 51.95 ┆ 75 ┆ 51.95 ┆ 51.96 │ + │ 2016-05-25 13:30:00.000038 ┆ MSFT ┆ 51.95 ┆ 155 ┆ 51.97 ┆ 51.98 │ + │ 2016-05-25 13:30:00.000048 ┆ GOOG ┆ 720.77 ┆ 100 ┆ 720.5 ┆ 720.93 │ + │ 2016-05-25 13:30:00.000048 ┆ GOOG ┆ 720.92 ┆ 100 ┆ 720.5 ┆ 720.93 │ + │ 2016-05-25 13:30:00.000048 ┆ AAPL ┆ 98.0 ┆ 100 ┆ null ┆ null │ + └────────────────────────────┴────────┴────────┴──────────┴───────┴────────┘ + """ + return super().join_asof( + other, + left_on=left_on, + right_on=right_on, + on=on, + by_left=by_left, + by_right=by_right, + by=by, + strategy=strategy, + ) + + # --- descriptive --- + def is_duplicated(self: Self) -> Series: + r""" + Get a mask of all duplicated rows in this DataFrame. + + Examples: + >>> import narwhals as nw + >>> import pandas as pd + >>> import polars as pl + >>> df_pd = pd.DataFrame( + ... { + ... "a": [1, 2, 3, 1], + ... "b": ["x", "y", "z", "x"], + ... } + ... ) + >>> df_pl = pl.DataFrame( + ... { + ... "a": [1, 2, 3, 1], + ... "b": ["x", "y", "z", "x"], + ... } + ... ) + + Let's define a dataframe-agnostic function: + + >>> @nw.narwhalify + ... def func(df): + ... return df.is_duplicated() + + We can then pass either pandas or Polars to `func`: + + >>> func(df_pd) # doctest: +NORMALIZE_WHITESPACE + 0 True + 1 False + 2 False + 3 True + dtype: bool + + >>> func(df_pl) # doctest: +NORMALIZE_WHITESPACE + shape: (4,) + Series: '' [bool] + [ + true + false + false + true + ] + """ + from narwhals.series import Series + + return Series( + self._compliant_frame.is_duplicated(), + level=self._level, + ) + + def is_empty(self: Self) -> bool: + r""" + Check if the dataframe is empty. + + Examples: + >>> import narwhals as nw + >>> import pandas as pd + >>> import polars as pl + + Let's define a dataframe-agnostic function that filters rows in which "foo" + values are greater than 10, and then checks if the result is empty or not: + + >>> @nw.narwhalify + ... def func(df): + ... return df.filter(nw.col("foo") > 10).is_empty() + + We can then pass either pandas or Polars to `func`: + + >>> df_pd = pd.DataFrame({"foo": [1, 2, 3], "bar": [4, 5, 6]}) + >>> df_pl = pl.DataFrame({"foo": [1, 2, 3], "bar": [4, 5, 6]}) + >>> func(df_pd), func(df_pl) + (True, True) + + >>> df_pd = pd.DataFrame({"foo": [100, 2, 3], "bar": [4, 5, 6]}) + >>> df_pl = pl.DataFrame({"foo": [100, 2, 3], "bar": [4, 5, 6]}) + >>> func(df_pd), func(df_pl) + (False, False) + """ + return self._compliant_frame.is_empty() # type: ignore[no-any-return] + + def is_unique(self: Self) -> Series: + r""" + Get a mask of all unique rows in this DataFrame. + + Examples: + >>> import narwhals as nw + >>> import pandas as pd + >>> import polars as pl + >>> df_pd = pd.DataFrame( + ... { + ... "a": [1, 2, 3, 1], + ... "b": ["x", "y", "z", "x"], + ... } + ... ) + >>> df_pl = pl.DataFrame( + ... { + ... "a": [1, 2, 3, 1], + ... "b": ["x", "y", "z", "x"], + ... } + ... ) + + Let's define a dataframe-agnostic function: + + >>> @nw.narwhalify + ... def func(df): + ... return df.is_unique() + + We can then pass either pandas or Polars to `func`: + + >>> func(df_pd) # doctest: +NORMALIZE_WHITESPACE + 0 False + 1 True + 2 True + 3 False + dtype: bool + + >>> func(df_pl) # doctest: +NORMALIZE_WHITESPACE + shape: (4,) + Series: '' [bool] + [ + false + true + true + false + ] + """ + from narwhals.series import Series + + return Series( + self._compliant_frame.is_unique(), + level=self._level, + ) + + def null_count(self: Self) -> Self: + r""" + Create a new DataFrame that shows the null counts per column. + + Notes: + pandas and Polars handle null values differently. Polars distinguishes + between NaN and Null, whereas pandas doesn't. + + Examples: + >>> import narwhals as nw + >>> import pandas as pd + >>> import polars as pl + >>> df_pd = pd.DataFrame( + ... { + ... "foo": [1, None, 3], + ... "bar": [6, 7, None], + ... "ham": ["a", "b", "c"], + ... } + ... ) + >>> df_pl = pl.DataFrame( + ... { + ... "foo": [1, None, 3], + ... "bar": [6, 7, None], + ... "ham": ["a", "b", "c"], + ... } + ... ) + + Let's define a dataframe-agnostic function that returns the null count of + each columns: + + >>> @nw.narwhalify + ... def func(df): + ... return df.null_count() + + We can then pass either pandas or Polars to `func`: + + >>> func(df_pd) + foo bar ham + 0 1 1 0 + + >>> func(df_pl) + shape: (1, 3) + ┌─────┬─────┬─────┐ + │ foo ┆ bar ┆ ham │ + │ --- ┆ --- ┆ --- │ + │ u32 ┆ u32 ┆ u32 │ + ╞═════╪═════╪═════╡ + │ 1 ┆ 1 ┆ 0 │ + └─────┴─────┴─────┘ + """ + return self._from_compliant_dataframe(self._compliant_frame.null_count()) + + def item(self: Self, row: int | None = None, column: int | str | None = None) -> Any: + r""" + Return the DataFrame as a scalar, or return the element at the given row/column. + + Notes: + If row/col not provided, this is equivalent to df[0,0], with a check that the shape is (1,1). + With row/col, this is equivalent to df[row,col]. + + Examples: + >>> import narwhals as nw + >>> import pandas as pd + >>> import polars as pl + >>> data = {"a": [1, 2, 3], "b": [4, 5, 6]} + >>> df_pd = pd.DataFrame(data) + >>> df_pl = pl.DataFrame(data) + + Let's define a dataframe-agnostic function that returns item at given row/column + + >>> @nw.narwhalify + ... def func(df, row, column): + ... return df.item(row, column) + + We can then pass either pandas or Polars to `func`: + + >>> func(df_pd, 1, 1), func(df_pd, 2, "b") # doctest:+SKIP + (5, 6) + + >>> func(df_pl, 1, 1), func(df_pl, 2, "b") + (5, 6) + """ + return self._compliant_frame.item(row=row, column=column) + + def clone(self) -> Self: + r""" + Create a copy of this DataFrame. + + Examples: + >>> import narwhals as nw + >>> import pandas as pd + >>> import polars as pl + >>> data = {"a": [1, 2], "b": [3, 4]} + >>> df_pd = pd.DataFrame(data) + >>> df_pl = pl.DataFrame(data) + + Let's define a dataframe-agnostic function in which we clone the DataFrame: + + >>> @nw.narwhalify + ... def func(df): + ... return df.clone() + + >>> func(df_pd) + a b + 0 1 3 + 1 2 4 + + >>> func(df_pl) + shape: (2, 2) + ┌─────┬─────┐ + │ a ┆ b │ + │ --- ┆ --- │ + │ i64 ┆ i64 │ + ╞═════╪═════╡ + │ 1 ┆ 3 │ + │ 2 ┆ 4 │ + └─────┴─────┘ + """ + return super().clone() + + def gather_every(self: Self, n: int, offset: int = 0) -> Self: + r""" + Take every nth row in the DataFrame and return as a new DataFrame. + + Arguments: + n: Gather every *n*-th row. + offset: Starting index. + + Examples: + >>> import narwhals as nw + >>> import pandas as pd + >>> import polars as pl + >>> data = {"a": [1, 2, 3, 4], "b": [5, 6, 7, 8]} + >>> df_pd = pd.DataFrame(data) + >>> df_pl = pl.DataFrame(data) + + Let's define a dataframe-agnostic function in which gather every 2 rows, + starting from a offset of 1: + + >>> @nw.narwhalify + ... def func(df): + ... return df.gather_every(n=2, offset=1) + + >>> func(df_pd) + a b + 1 2 6 + 3 4 8 + + >>> func(df_pl) + shape: (2, 2) + ┌─────┬─────┐ + │ a ┆ b │ + │ --- ┆ --- │ + │ i64 ┆ i64 │ + ╞═════╪═════╡ + │ 2 ┆ 6 │ + │ 4 ┆ 8 │ + └─────┴─────┘ + """ + return super().gather_every(n=n, offset=offset) + + def to_arrow(self: Self) -> pa.Table: + r""" + Convert to arrow table. + + Examples: + >>> import narwhals as nw + >>> import pandas as pd + >>> import polars as pl + >>> data = {"foo": [1, 2, 3], "bar": ["a", "b", "c"]} + >>> df_pd = pd.DataFrame(data) + >>> df_pl = pl.DataFrame(data) + + Let's define a dataframe-agnostic function that converts to arrow table: + + >>> @nw.narwhalify + ... def func(df): + ... return df.to_arrow() + + >>> func(df_pd) # doctest:+SKIP + pyarrow.Table + foo: int64 + bar: string + ---- + foo: [[1,2,3]] + bar: [["a","b","c"]] + + >>> func(df_pl) # doctest:+NORMALIZE_WHITESPACE + pyarrow.Table + foo: int64 + bar: large_string + ---- + foo: [[1,2,3]] + bar: [["a","b","c"]] + """ + return self._compliant_frame.to_arrow() + + def sample( + self: Self, + n: int | None = None, + *, + fraction: float | None = None, + with_replacement: bool = False, + seed: int | None = None, + ) -> Self: + r""" + Sample from this DataFrame. + + Arguments: + n: Number of items to return. Cannot be used with fraction. + fraction: Fraction of items to return. Cannot be used with n. + with_replacement: Allow values to be sampled more than once. + seed: Seed for the random number generator. If set to None (default), a random + seed is generated for each sample operation. + + Notes: + The results may not be consistent across libraries. + + Examples: + >>> import narwhals as nw + >>> import pandas as pd + >>> import polars as pl + >>> data = {"a": [1, 2, 3, 4], "b": ["x", "y", "x", "y"]} + >>> df_pd = pd.DataFrame(data) + >>> df_pl = pl.DataFrame(data) + + We define a library agnostic function: + + >>> @nw.narwhalify + ... def func(df): + ... return df.sample(n=2, seed=123) + + We can then pass either pandas or Polars to `func`: + >>> func(df_pd) + a b + 3 4 y + 0 1 x + >>> func(df_pl) + shape: (2, 2) + ┌─────┬─────┐ + │ a ┆ b │ + │ --- ┆ --- │ + │ i64 ┆ str │ + ╞═════╪═════╡ + │ 2 ┆ y │ + │ 3 ┆ x │ + └─────┴─────┘ + + As you can see, by using the same seed, the result will be consistent within + the same backend, but not necessarely across different backends. + """ + return self._from_compliant_dataframe( + self._compliant_frame.sample( + n=n, fraction=fraction, with_replacement=with_replacement, seed=seed + ) + ) + + +class LazyFrame(BaseFrame[FrameT]): + """ + Narwhals DataFrame, backed by a native dataframe. + + The native dataframe might be pandas.DataFrame, polars.LazyFrame, ... + + This class is not meant to be instantiated directly - instead, use + `narwhals.from_native`. + """ + + def __init__( + self, + df: Any, + *, + level: Literal["full", "interchange"], + ) -> None: + self._level = level + if hasattr(df, "__narwhals_lazyframe__"): + self._compliant_frame: Any = df.__narwhals_lazyframe__() + else: # pragma: no cover + msg = f"Expected Polars LazyFrame or an object that implements `__narwhals_lazyframe__`, got: {type(df)}" + raise AssertionError(msg) + + def __repr__(self) -> str: # pragma: no cover + header = " Narwhals LazyFrame " + length = len(header) + return ( + "┌" + + "─" * length + + "┐\n" + + f"|{header}|\n" + + "| Use `.to_native` to see native output |\n" + + "└" + + "─" * length + + "┘" + ) + + def __getitem__(self, item: str | slice) -> Series | Self: + msg = "Slicing is not supported on LazyFrame" + raise TypeError(msg) + + def collect(self) -> DataFrame[Any]: + r""" + Materialize this LazyFrame into a DataFrame. + + Returns: + DataFrame + + Examples: + >>> import narwhals as nw + >>> import polars as pl + >>> lf_pl = pl.LazyFrame( + ... { + ... "a": ["a", "b", "a", "b", "b", "c"], + ... "b": [1, 2, 3, 4, 5, 6], + ... "c": [6, 5, 4, 3, 2, 1], + ... } + ... ) + >>> lf = nw.from_native(lf_pl) + >>> lf + ┌───────────────────────────────────────┐ + | Narwhals LazyFrame | + | Use `.to_native` to see native output | + └───────────────────────────────────────┘ + >>> df = lf.group_by("a").agg(nw.all().sum()).collect() + >>> df.to_native().sort("a") + shape: (3, 3) + ┌─────┬─────┬─────┐ + │ a ┆ b ┆ c │ + │ --- ┆ --- ┆ --- │ + │ str ┆ i64 ┆ i64 │ + ╞═════╪═════╪═════╡ + │ a ┆ 4 ┆ 10 │ + │ b ┆ 11 ┆ 10 │ + │ c ┆ 6 ┆ 1 │ + └─────┴─────┴─────┘ + """ + return DataFrame( + self._compliant_frame.collect(), + level=self._level, + ) + + def to_native(self) -> FrameT: + """ + Convert Narwhals LazyFrame to native one. + + Returns: + Object of class that user started with. + + Examples: + >>> import pandas as pd + >>> import polars as pl + >>> import pyarrow as pa + >>> import narwhals as nw + >>> data = {"foo": [1, 2, 3], "bar": [6.0, 7.0, 8.0], "ham": ["a", "b", "c"]} + >>> df_pd = pd.DataFrame(data) + >>> df_pl = pl.LazyFrame(data) + >>> df_pa = pa.table(data) + + Calling `to_native` on a Narwhals DataFrame returns the native object: + + >>> nw.from_native(df_pd).lazy().to_native() + foo bar ham + 0 1 6.0 a + 1 2 7.0 b + 2 3 8.0 c + >>> nw.from_native(df_pl).to_native().collect() + shape: (3, 3) + ┌─────┬─────┬─────┐ + │ foo ┆ bar ┆ ham │ + │ --- ┆ --- ┆ --- │ + │ i64 ┆ f64 ┆ str │ + ╞═════╪═════╪═════╡ + │ 1 ┆ 6.0 ┆ a │ + │ 2 ┆ 7.0 ┆ b │ + │ 3 ┆ 8.0 ┆ c │ + └─────┴─────┴─────┘ + """ + + return to_native(narwhals_object=self, strict=True) + + # inherited + def pipe(self, function: Callable[[Any], Self], *args: Any, **kwargs: Any) -> Self: + """ + Pipe function call. + + Examples: + >>> import polars as pl + >>> import pandas as pd + >>> import narwhals as nw + >>> data = {"a": [1, 2, 3], "ba": [4, 5, 6]} + >>> df_pd = pd.DataFrame(data) + >>> df_pl = pl.LazyFrame(data) + + Let's define a dataframe-agnostic function: + + >>> @nw.narwhalify + ... def func(df): + ... return df.pipe(lambda _df: _df.select("a")) + + We can then pass either pandas or Polars: + + >>> func(df_pd) + a + 0 1 + 1 2 + 2 3 + >>> func(df_pl).collect() + shape: (3, 1) + ┌─────┐ + │ a │ + │ --- │ + │ i64 │ + ╞═════╡ + │ 1 │ + │ 2 │ + │ 3 │ + └─────┘ + """ + return super().pipe(function, *args, **kwargs) + + def drop_nulls(self: Self, subset: str | list[str] | None = None) -> Self: + """ + Drop null values. + + Arguments: + subset: Column name(s) for which null values are considered. If set to None + (default), use all columns. + + Notes: + pandas and Polars handle null values differently. Polars distinguishes + between NaN and Null, whereas pandas doesn't. + + Examples: + >>> import polars as pl + >>> import pandas as pd + >>> import narwhals as nw + >>> data = {"a": [1.0, 2.0, None], "ba": [1.0, None, 2.0]} + >>> df_pd = pd.DataFrame(data) + >>> df_pl = pl.LazyFrame(data) + + Let's define a dataframe-agnostic function: + + >>> @nw.narwhalify + ... def func(df): + ... return df.drop_nulls() + + We can then pass either pandas or Polars: + + >>> func(df_pd) + a ba + 0 1.0 1.0 + >>> func(df_pl).collect() + shape: (1, 2) + ┌─────┬─────┐ + │ a ┆ ba │ + │ --- ┆ --- │ + │ f64 ┆ f64 │ + ╞═════╪═════╡ + │ 1.0 ┆ 1.0 │ + └─────┴─────┘ + """ + return super().drop_nulls(subset=subset) + + def with_row_index(self, name: str = "index") -> Self: + """ + Insert column which enumerates rows. + + Examples: + >>> import polars as pl + >>> import pandas as pd + >>> import narwhals as nw + >>> data = {"a": [1, 2, 3], "b": [4, 5, 6]} + >>> df_pd = pd.DataFrame(data) + >>> df_pl = pl.LazyFrame(data) + + Let's define a dataframe-agnostic function: + + >>> @nw.narwhalify + ... def func(df): + ... return df.with_row_index() + + We can then pass either pandas or Polars: + + >>> func(df_pd) + index a b + 0 0 1 4 + 1 1 2 5 + 2 2 3 6 + >>> func(df_pl).collect() + shape: (3, 3) + ┌───────┬─────┬─────┐ + │ index ┆ a ┆ b │ + │ --- ┆ --- ┆ --- │ + │ u32 ┆ i64 ┆ i64 │ + ╞═══════╪═════╪═════╡ + │ 0 ┆ 1 ┆ 4 │ + │ 1 ┆ 2 ┆ 5 │ + │ 2 ┆ 3 ┆ 6 │ + └───────┴─────┴─────┘ + """ + return super().with_row_index(name) + + @property + def schema(self) -> Schema: + r""" + Get an ordered mapping of column names to their data type. + + Examples: + >>> import polars as pl + >>> import narwhals as nw + >>> lf_pl = pl.LazyFrame( + ... { + ... "foo": [1, 2, 3], + ... "bar": [6.0, 7.0, 8.0], + ... "ham": ["a", "b", "c"], + ... } + ... ) + >>> lf = nw.from_native(lf_pl) + >>> lf.schema # doctest:+SKIP + Schema({'foo': Int64, 'bar': Float64, 'ham', String}) + """ + return super().schema + + def collect_schema(self: Self) -> Schema: + r""" + Get an ordered mapping of column names to their data type. + + Examples: + >>> import polars as pl + >>> import narwhals as nw + >>> lf_pl = pl.LazyFrame( + ... { + ... "foo": [1, 2, 3], + ... "bar": [6.0, 7.0, 8.0], + ... "ham": ["a", "b", "c"], + ... } + ... ) + >>> lf = nw.from_native(lf_pl) + >>> lf.collect_schema() # doctest:+SKIP + Schema({'foo': Int64, 'bar': Float64, 'ham', String}) + """ + return super().collect_schema() + + @property + def columns(self) -> list[str]: + r""" + Get column names. + + Examples: + >>> import pandas as pd + >>> import polars as pl + >>> import narwhals as nw + >>> df = {"foo": [1, 2, 3], "bar": [6.0, 7.0, 8.0], "ham": ["a", "b", "c"]} + >>> df_pd = pd.DataFrame(df) + >>> lf_pl = pl.LazyFrame(df) + + We define a library agnostic function: + + >>> @nw.narwhalify + ... def func(df): + ... return df.columns + + We can then pass either pandas or Polars to `func`: + + >>> func(df_pd) + ['foo', 'bar', 'ham'] + >>> func(lf_pl) # doctest: +SKIP + ['foo', 'bar', 'ham'] + """ + return super().columns + + def with_columns( + self, *exprs: IntoExpr | Iterable[IntoExpr], **named_exprs: IntoExpr + ) -> Self: + r""" + Add columns to this LazyFrame. + + Added columns will replace existing columns with the same name. + + Arguments: + *exprs: Column(s) to add, specified as positional arguments. + Accepts expression input. Strings are parsed as column names, other + non-expression inputs are parsed as literals. + + **named_exprs: Additional columns to add, specified as keyword arguments. + The columns will be renamed to the keyword used. + + Returns: + LazyFrame: A new LazyFrame with the columns added. + + Note: + Creating a new LazyFrame using this method does not create a new copy of + existing data. + + Examples: + >>> import pandas as pd + >>> import polars as pl + >>> import narwhals as nw + >>> df = { + ... "a": [1, 2, 3, 4], + ... "b": [0.5, 4, 10, 13], + ... "c": [True, True, False, True], + ... } + >>> df_pd = pd.DataFrame(df) + >>> df_pl = pl.DataFrame(df) + >>> lf_pl = pl.LazyFrame(df) + + Let's define a dataframe-agnostic function in which we pass an expression + to add it as a new column: + + >>> @nw.narwhalify + ... def func(df): + ... return df.with_columns((nw.col("a") * 2).alias("2a")) + + We can then pass either pandas or Polars to `func`: + + >>> func(df_pd) + a b c 2a + 0 1 0.5 True 2 + 1 2 4.0 True 4 + 2 3 10.0 False 6 + 3 4 13.0 True 8 + >>> func(df_pl) + shape: (4, 4) + ┌─────┬──────┬───────┬─────┐ + │ a ┆ b ┆ c ┆ 2a │ + │ --- ┆ --- ┆ --- ┆ --- │ + │ i64 ┆ f64 ┆ bool ┆ i64 │ + ╞═════╪══════╪═══════╪═════╡ + │ 1 ┆ 0.5 ┆ true ┆ 2 │ + │ 2 ┆ 4.0 ┆ true ┆ 4 │ + │ 3 ┆ 10.0 ┆ false ┆ 6 │ + │ 4 ┆ 13.0 ┆ true ┆ 8 │ + └─────┴──────┴───────┴─────┘ + >>> func(lf_pl).collect() + shape: (4, 4) + ┌─────┬──────┬───────┬─────┐ + │ a ┆ b ┆ c ┆ 2a │ + │ --- ┆ --- ┆ --- ┆ --- │ + │ i64 ┆ f64 ┆ bool ┆ i64 │ + ╞═════╪══════╪═══════╪═════╡ + │ 1 ┆ 0.5 ┆ true ┆ 2 │ + │ 2 ┆ 4.0 ┆ true ┆ 4 │ + │ 3 ┆ 10.0 ┆ false ┆ 6 │ + │ 4 ┆ 13.0 ┆ true ┆ 8 │ + └─────┴──────┴───────┴─────┘ + """ + return super().with_columns(*exprs, **named_exprs) + + def select( + self, + *exprs: IntoExpr | Iterable[IntoExpr], + **named_exprs: IntoExpr, + ) -> Self: + r""" + Select columns from this LazyFrame. + + Arguments: + *exprs: Column(s) to select, specified as positional arguments. + Accepts expression input. Strings are parsed as column names. + **named_exprs: Additional columns to select, specified as keyword arguments. + The columns will be renamed to the keyword used. + + Notes: + If you'd like to select a column whose name isn't a string (for example, + if you're working with pandas) then you should explicitly use `nw.col` instead + of just passing the column name. For example, to select a column named + `0` use `df.select(nw.col(0))`, not `df.select(0)`. + + Examples: + >>> import pandas as pd + >>> import polars as pl + >>> import narwhals as nw + >>> df = { + ... "foo": [1, 2, 3], + ... "bar": [6, 7, 8], + ... "ham": ["a", "b", "c"], + ... } + >>> df_pd = pd.DataFrame(df) + >>> df_pl = pl.DataFrame(df) + >>> lf_pl = pl.LazyFrame(df) + + Let's define a dataframe-agnostic function in which we pass the name of a + column to select that column. + + >>> @nw.narwhalify + ... def func(df): + ... return df.select("foo") + + We can then pass either pandas or Polars to `func`: + + >>> func(df_pd) + foo + 0 1 + 1 2 + 2 3 + >>> func(df_pl) + shape: (3, 1) + ┌─────┐ + │ foo │ + │ --- │ + │ i64 │ + ╞═════╡ + │ 1 │ + │ 2 │ + │ 3 │ + └─────┘ + >>> func(lf_pl).collect() + shape: (3, 1) + ┌─────┐ + │ foo │ + │ --- │ + │ i64 │ + ╞═════╡ + │ 1 │ + │ 2 │ + │ 3 │ + └─────┘ + + Multiple columns can be selected by passing a list of column names. + + >>> @nw.narwhalify + ... def func(df): + ... return df.select(["foo", "bar"]) + >>> func(df_pd) + foo bar + 0 1 6 + 1 2 7 + 2 3 8 + >>> func(df_pl) + shape: (3, 2) + ┌─────┬─────┐ + │ foo ┆ bar │ + │ --- ┆ --- │ + │ i64 ┆ i64 │ + ╞═════╪═════╡ + │ 1 ┆ 6 │ + │ 2 ┆ 7 │ + │ 3 ┆ 8 │ + └─────┴─────┘ + >>> func(lf_pl).collect() + shape: (3, 2) + ┌─────┬─────┐ + │ foo ┆ bar │ + │ --- ┆ --- │ + │ i64 ┆ i64 │ + ╞═════╪═════╡ + │ 1 ┆ 6 │ + │ 2 ┆ 7 │ + │ 3 ┆ 8 │ + └─────┴─────┘ + + Multiple columns can also be selected using positional arguments instead of a + list. Expressions are also accepted. + + >>> @nw.narwhalify + ... def func(df): + ... return df.select(nw.col("foo"), nw.col("bar") + 1) + >>> func(df_pd) + foo bar + 0 1 7 + 1 2 8 + 2 3 9 + >>> func(df_pl) + shape: (3, 2) + ┌─────┬─────┐ + │ foo ┆ bar │ + │ --- ┆ --- │ + │ i64 ┆ i64 │ + ╞═════╪═════╡ + │ 1 ┆ 7 │ + │ 2 ┆ 8 │ + │ 3 ┆ 9 │ + └─────┴─────┘ + >>> func(lf_pl).collect() + shape: (3, 2) + ┌─────┬─────┐ + │ foo ┆ bar │ + │ --- ┆ --- │ + │ i64 ┆ i64 │ + ╞═════╪═════╡ + │ 1 ┆ 7 │ + │ 2 ┆ 8 │ + │ 3 ┆ 9 │ + └─────┴─────┘ + + Use keyword arguments to easily name your expression inputs. + + >>> @nw.narwhalify + ... def func(df): + ... return df.select(threshold=nw.col("foo") * 2) + >>> func(df_pd) + threshold + 0 2 + 1 4 + 2 6 + >>> func(df_pl) + shape: (3, 1) + ┌───────────┐ + │ threshold │ + │ --- │ + │ i64 │ + ╞═══════════╡ + │ 2 │ + │ 4 │ + │ 6 │ + └───────────┘ + >>> func(lf_pl).collect() + shape: (3, 1) + ┌───────────┐ + │ threshold │ + │ --- │ + │ i64 │ + ╞═══════════╡ + │ 2 │ + │ 4 │ + │ 6 │ + └───────────┘ + """ + return super().select(*exprs, **named_exprs) + + def rename(self, mapping: dict[str, str]) -> Self: + r""" + Rename column names. + + Arguments: + mapping: Key value pairs that map from old name to new name, or a + function that takes the old name as input and returns the + new name. + + Notes: + If existing names are swapped (e.g. 'A' points to 'B' and 'B' + points to 'A'), polars will block projection and predicate + pushdowns at this node. + + Examples: + >>> import pandas as pd + >>> import polars as pl + >>> import narwhals as nw + >>> data = {"foo": [1, 2, 3], "bar": [6, 7, 8], "ham": ["a", "b", "c"]} + >>> df_pd = pd.DataFrame(data) + >>> lf_pl = pl.LazyFrame(data) + + We define a library agnostic function: + + >>> @nw.narwhalify + ... def func(df): + ... return df.rename({"foo": "apple"}) + + We can then pass either pandas or Polars to `func`: + + >>> func(df_pd) + apple bar ham + 0 1 6 a + 1 2 7 b + 2 3 8 c + >>> func(lf_pl).collect() + shape: (3, 3) + ┌───────┬─────┬─────┐ + │ apple ┆ bar ┆ ham │ + │ --- ┆ --- ┆ --- │ + │ i64 ┆ i64 ┆ str │ + ╞═══════╪═════╪═════╡ + │ 1 ┆ 6 ┆ a │ + │ 2 ┆ 7 ┆ b │ + │ 3 ┆ 8 ┆ c │ + └───────┴─────┴─────┘ + """ + return super().rename(mapping) + + def head(self, n: int = 5) -> Self: + r""" + Get the first `n` rows. + + Arguments: + n: Number of rows to return. + + Examples: + >>> import narwhals as nw + >>> import pandas as pd + >>> import polars as pl + >>> data = { + ... "a": [1, 2, 3, 4, 5, 6], + ... "b": [7, 8, 9, 10, 11, 12], + ... } + >>> df_pd = pd.DataFrame(data) + >>> df_pl = pl.DataFrame(data) + >>> lf_pl = pl.LazyFrame(data) + + Let's define a dataframe-agnostic function that gets the first 3 rows. + + >>> @nw.narwhalify + ... def func(df): + ... return df.head(3) + + We can then pass either pandas or Polars to `func`: + + >>> func(df_pd) + a b + 0 1 7 + 1 2 8 + 2 3 9 + >>> func(df_pl) + shape: (3, 2) + ┌─────┬─────┐ + │ a ┆ b │ + │ --- ┆ --- │ + │ i64 ┆ i64 │ + ╞═════╪═════╡ + │ 1 ┆ 7 │ + │ 2 ┆ 8 │ + │ 3 ┆ 9 │ + └─────┴─────┘ + >>> func(lf_pl).collect() + shape: (3, 2) + ┌─────┬─────┐ + │ a ┆ b │ + │ --- ┆ --- │ + │ i64 ┆ i64 │ + ╞═════╪═════╡ + │ 1 ┆ 7 │ + │ 2 ┆ 8 │ + │ 3 ┆ 9 │ + └─────┴─────┘ + """ + return super().head(n) + + def tail(self, n: int = 5) -> Self: + r""" + Get the last `n` rows. + + Arguments: + n: Number of rows to return. + + Examples: + >>> import narwhals as nw + >>> import pandas as pd + >>> import polars as pl + >>> data = { + ... "a": [1, 2, 3, 4, 5, 6], + ... "b": [7, 8, 9, 10, 11, 12], + ... } + >>> df_pd = pd.DataFrame(data) + >>> df_pl = pl.DataFrame(data) + >>> lf_pl = pl.LazyFrame(data) + + Let's define a dataframe-agnostic function that gets the last 3 rows. + + >>> @nw.narwhalify + ... def func(df): + ... return df.tail(3) + + We can then pass either pandas or Polars to `func`: + + >>> func(df_pd) + a b + 3 4 10 + 4 5 11 + 5 6 12 + >>> func(df_pl) + shape: (3, 2) + ┌─────┬─────┐ + │ a ┆ b │ + │ --- ┆ --- │ + │ i64 ┆ i64 │ + ╞═════╪═════╡ + │ 4 ┆ 10 │ + │ 5 ┆ 11 │ + │ 6 ┆ 12 │ + └─────┴─────┘ + >>> func(lf_pl).collect() + shape: (3, 2) + ┌─────┬─────┐ + │ a ┆ b │ + │ --- ┆ --- │ + │ i64 ┆ i64 │ + ╞═════╪═════╡ + │ 4 ┆ 10 │ + │ 5 ┆ 11 │ + │ 6 ┆ 12 │ + └─────┴─────┘ + """ + return super().tail(n) + + def drop(self, *columns: str | Iterable[str], strict: bool = True) -> Self: + r""" + Remove columns from the LazyFrame. + + Arguments: + *columns: Names of the columns that should be removed from the dataframe. + strict: Validate that all column names exist in the schema and throw an + exception if a column name does not exist in the schema. + + Warning: + `strict` argument is ignored for `polars<1.0.0`. + + Please consider upgrading to a newer version or pass to eager mode. + + Examples: + >>> import pandas as pd + >>> import polars as pl + >>> import narwhals as nw + >>> data = {"foo": [1, 2, 3], "bar": [6.0, 7.0, 8.0], "ham": ["a", "b", "c"]} + >>> df_pd = pd.DataFrame(data) + >>> lf_pl = pl.LazyFrame(data) + + We define a library agnostic function: + + >>> @nw.narwhalify + ... def func(df): + ... return df.drop("ham") + + We can then pass either pandas or Polars to `func`: + + >>> func(df_pd) + foo bar + 0 1 6.0 + 1 2 7.0 + 2 3 8.0 + >>> func(lf_pl).collect() + shape: (3, 2) + ┌─────┬─────┐ + │ foo ┆ bar │ + │ --- ┆ --- │ + │ i64 ┆ f64 │ + ╞═════╪═════╡ + │ 1 ┆ 6.0 │ + │ 2 ┆ 7.0 │ + │ 3 ┆ 8.0 │ + └─────┴─────┘ + + Use positional arguments to drop multiple columns. + + >>> @nw.narwhalify + ... def func(df): + ... return df.drop("foo", "ham") + + >>> func(df_pd) + bar + 0 6.0 + 1 7.0 + 2 8.0 + >>> func(lf_pl).collect() + shape: (3, 1) + ┌─────┐ + │ bar │ + │ --- │ + │ f64 │ + ╞═════╡ + │ 6.0 │ + │ 7.0 │ + │ 8.0 │ + └─────┘ + """ + return super().drop(*flatten(columns), strict=strict) + + def unique( + self, + subset: str | list[str] | None = None, + *, + keep: Literal["any", "first", "last", "none"] = "any", + maintain_order: bool = False, + ) -> Self: + """ + Drop duplicate rows from this LazyFrame. + + Arguments: + subset: Column name(s) to consider when identifying duplicate rows. + If set to `None`, use all columns. + keep: {'first', 'last', 'any', 'none'} + Which of the duplicate rows to keep. + + * 'any': Does not give any guarantee of which row is kept. + This allows more optimizations. + * 'none': Don't keep duplicate rows. + * 'first': Keep first unique row. + * 'last': Keep last unique row. + maintain_order: Keep the same order as the original DataFrame. This is more + expensive to compute. Settings this to `True` blocks the possibility + to run on the streaming engine for polars. + + Returns: + LazyFrame: LazyFrame with unique rows. + + Examples: + >>> import pandas as pd + >>> import polars as pl + >>> import narwhals as nw + >>> data = { + ... "foo": [1, 2, 3, 1], + ... "bar": ["a", "a", "a", "a"], + ... "ham": ["b", "b", "b", "b"], + ... } + >>> df_pd = pd.DataFrame(data) + >>> lf_pl = pl.LazyFrame(data) + + We define a library agnostic function: + + >>> @nw.narwhalify + ... def func(df): + ... return df.unique(["bar", "ham"]) + + We can then pass either pandas or Polars to `func`: + + >>> func(df_pd) + foo bar ham + 0 1 a b + >>> func(lf_pl).collect() + shape: (1, 3) + ┌─────┬─────┬─────┐ + │ foo ┆ bar ┆ ham │ + │ --- ┆ --- ┆ --- │ + │ i64 ┆ str ┆ str │ + ╞═════╪═════╪═════╡ + │ 1 ┆ a ┆ b │ + └─────┴─────┴─────┘ + """ + return super().unique(subset, keep=keep, maintain_order=maintain_order) + + def filter(self, *predicates: IntoExpr | Iterable[IntoExpr] | list[bool]) -> Self: + r""" + Filter the rows in the LazyFrame based on a predicate expression. + + The original order of the remaining rows is preserved. + + Arguments: + *predicates: Expression that evaluates to a boolean Series. Can + also be a (single!) boolean list. + + Examples: + >>> import pandas as pd + >>> import polars as pl + >>> import narwhals as nw + >>> data = { + ... "foo": [1, 2, 3], + ... "bar": [6, 7, 8], + ... "ham": ["a", "b", "c"], + ... } + >>> df_pd = pd.DataFrame(data) + >>> df_pl = pl.DataFrame(data) + >>> lf_pl = pl.LazyFrame(data) + + Let's define a dataframe-agnostic function in which we filter on + one condition. + + >>> @nw.narwhalify + ... def func(df): + ... return df.filter(nw.col("foo") > 1) + + We can then pass either pandas or Polars to `func`: + + >>> func(df_pd) + foo bar ham + 1 2 7 b + 2 3 8 c + >>> func(df_pl) + shape: (2, 3) + ┌─────┬─────┬─────┐ + │ foo ┆ bar ┆ ham │ + │ --- ┆ --- ┆ --- │ + │ i64 ┆ i64 ┆ str │ + ╞═════╪═════╪═════╡ + │ 2 ┆ 7 ┆ b │ + │ 3 ┆ 8 ┆ c │ + └─────┴─────┴─────┘ + >>> func(lf_pl).collect() + shape: (2, 3) + ┌─────┬─────┬─────┐ + │ foo ┆ bar ┆ ham │ + │ --- ┆ --- ┆ --- │ + │ i64 ┆ i64 ┆ str │ + ╞═════╪═════╪═════╡ + │ 2 ┆ 7 ┆ b │ + │ 3 ┆ 8 ┆ c │ + └─────┴─────┴─────┘ + + Filter on multiple conditions: + + >>> @nw.narwhalify + ... def func(df): + ... return df.filter((nw.col("foo") < 3) & (nw.col("ham") == "a")) + >>> func(df_pd) + foo bar ham + 0 1 6 a + >>> func(df_pl) + shape: (1, 3) + ┌─────┬─────┬─────┐ + │ foo ┆ bar ┆ ham │ + │ --- ┆ --- ┆ --- │ + │ i64 ┆ i64 ┆ str │ + ╞═════╪═════╪═════╡ + │ 1 ┆ 6 ┆ a │ + └─────┴─────┴─────┘ + >>> func(lf_pl).collect() + shape: (1, 3) + ┌─────┬─────┬─────┐ + │ foo ┆ bar ┆ ham │ + │ --- ┆ --- ┆ --- │ + │ i64 ┆ i64 ┆ str │ + ╞═════╪═════╪═════╡ + │ 1 ┆ 6 ┆ a │ + └─────┴─────┴─────┘ + + Provide multiple filters using `*args` syntax: + + >>> @nw.narwhalify + ... def func(df): + ... dframe = df.filter( + ... nw.col("foo") == 1, + ... nw.col("ham") == "a", + ... ) + ... return dframe + >>> func(df_pd) + foo bar ham + 0 1 6 a + >>> func(df_pl) + shape: (1, 3) + ┌─────┬─────┬─────┐ + │ foo ┆ bar ┆ ham │ + │ --- ┆ --- ┆ --- │ + │ i64 ┆ i64 ┆ str │ + ╞═════╪═════╪═════╡ + │ 1 ┆ 6 ┆ a │ + └─────┴─────┴─────┘ + >>> func(lf_pl).collect() + shape: (1, 3) + ┌─────┬─────┬─────┐ + │ foo ┆ bar ┆ ham │ + │ --- ┆ --- ┆ --- │ + │ i64 ┆ i64 ┆ str │ + ╞═════╪═════╪═════╡ + │ 1 ┆ 6 ┆ a │ + └─────┴─────┴─────┘ + + Filter on an OR condition: + + >>> @nw.narwhalify + ... def func(df): + ... return df.filter((nw.col("foo") == 1) | (nw.col("ham") == "c")) + >>> func(df_pd) + foo bar ham + 0 1 6 a + 2 3 8 c + >>> func(df_pl) + shape: (2, 3) + ┌─────┬─────┬─────┐ + │ foo ┆ bar ┆ ham │ + │ --- ┆ --- ┆ --- │ + │ i64 ┆ i64 ┆ str │ + ╞═════╪═════╪═════╡ + │ 1 ┆ 6 ┆ a │ + │ 3 ┆ 8 ┆ c │ + └─────┴─────┴─────┘ + >>> func(lf_pl).collect() + shape: (2, 3) + ┌─────┬─────┬─────┐ + │ foo ┆ bar ┆ ham │ + │ --- ┆ --- ┆ --- │ + │ i64 ┆ i64 ┆ str │ + ╞═════╪═════╪═════╡ + │ 1 ┆ 6 ┆ a │ + │ 3 ┆ 8 ┆ c │ + └─────┴─────┴─────┘ + """ + return super().filter(*predicates) + + def group_by(self, *keys: str | Iterable[str]) -> LazyGroupBy[Self]: + r""" + Start a group by operation. + + Arguments: + *keys: + Column(s) to group by. Accepts expression input. Strings are + parsed as column names. + + Examples: + Group by one column and call `agg` to compute the grouped sum of + another column. + + >>> import pandas as pd + >>> import polars as pl + >>> import narwhals as nw + >>> df = { + ... "a": ["a", "b", "a", "b", "c"], + ... "b": [1, 2, 1, 3, 3], + ... "c": [5, 4, 3, 2, 1], + ... } + >>> df_pd = pd.DataFrame(df) + >>> df_pl = pl.DataFrame(df) + >>> lf_pl = pl.LazyFrame(df) + + Let's define a dataframe-agnostic function in which we group by one column + and call `agg` to compute the grouped sum of another column. + + >>> @nw.narwhalify + ... def func(df): + ... return df.group_by("a").agg(nw.col("b").sum()).sort("a") + + We can then pass either pandas or Polars to `func`: + + >>> func(df_pd) + a b + 0 a 2 + 1 b 5 + 2 c 3 + >>> func(df_pl) + shape: (3, 2) + ┌─────┬─────┐ + │ a ┆ b │ + │ --- ┆ --- │ + │ str ┆ i64 │ + ╞═════╪═════╡ + │ a ┆ 2 │ + │ b ┆ 5 │ + │ c ┆ 3 │ + └─────┴─────┘ + >>> func(lf_pl).collect() + shape: (3, 2) + ┌─────┬─────┐ + │ a ┆ b │ + │ --- ┆ --- │ + │ str ┆ i64 │ + ╞═════╪═════╡ + │ a ┆ 2 │ + │ b ┆ 5 │ + │ c ┆ 3 │ + └─────┴─────┘ + + Group by multiple columns by passing a list of column names. + + >>> @nw.narwhalify + ... def func(df): + ... return df.group_by(["a", "b"]).agg(nw.max("c")).sort(["a", "b"]) + >>> func(df_pd) + a b c + 0 a 1 5 + 1 b 2 4 + 2 b 3 2 + 3 c 3 1 + >>> func(df_pl) + shape: (4, 3) + ┌─────┬─────┬─────┐ + │ a ┆ b ┆ c │ + │ --- ┆ --- ┆ --- │ + │ str ┆ i64 ┆ i64 │ + ╞═════╪═════╪═════╡ + │ a ┆ 1 ┆ 5 │ + │ b ┆ 2 ┆ 4 │ + │ b ┆ 3 ┆ 2 │ + │ c ┆ 3 ┆ 1 │ + └─────┴─────┴─────┘ + >>> func(lf_pl).collect() + shape: (4, 3) + ┌─────┬─────┬─────┐ + │ a ┆ b ┆ c │ + │ --- ┆ --- ┆ --- │ + │ str ┆ i64 ┆ i64 │ + ╞═════╪═════╪═════╡ + │ a ┆ 1 ┆ 5 │ + │ b ┆ 2 ┆ 4 │ + │ b ┆ 3 ┆ 2 │ + │ c ┆ 3 ┆ 1 │ + └─────┴─────┴─────┘ + """ + from narwhals.group_by import LazyGroupBy + + return LazyGroupBy(self, *flatten(keys)) + + def sort( + self, + by: str | Iterable[str], + *more_by: str, + descending: bool | Sequence[bool] = False, + ) -> Self: + r""" + Sort the LazyFrame by the given columns. + + Arguments: + by: Column(s) to sort by. Accepts expression input. Strings are + parsed as column names. + + *more_by: Additional columns to sort by, specified as positional + arguments. + + descending: Sort in descending order. When sorting by multiple + columns, can be specified per column by passing a + sequence of booleans. + + Examples: + >>> import narwhals as nw + >>> import pandas as pd + >>> import polars as pl + >>> data = { + ... "a": [1, 2, None], + ... "b": [6.0, 5.0, 4.0], + ... "c": ["a", "c", "b"], + ... } + >>> df_pd = pd.DataFrame(data) + >>> df_lf = pl.LazyFrame(data) + + Let's define a dataframe-agnostic function in which we sort by multiple + columns in different orders + + >>> @nw.narwhalify + ... def func(df): + ... return df.sort("c", "a", descending=[False, True]) + + We can then pass either pandas or Polars to `func`: + + >>> func(df_pd) + a b c + 0 1.0 6.0 a + 2 NaN 4.0 b + 1 2.0 5.0 c + >>> func(df_lf).collect() + shape: (3, 3) + ┌──────┬─────┬─────┐ + │ a ┆ b ┆ c │ + │ --- ┆ --- ┆ --- │ + │ i64 ┆ f64 ┆ str │ + ╞══════╪═════╪═════╡ + │ 1 ┆ 6.0 ┆ a │ + │ null ┆ 4.0 ┆ b │ + │ 2 ┆ 5.0 ┆ c │ + └──────┴─────┴─────┘ + """ + return super().sort(by, *more_by, descending=descending) + + def join( + self, + other: Self, + on: str | list[str] | None = None, + how: Literal["inner", "left", "cross", "semi", "anti"] = "inner", + *, + left_on: str | list[str] | None = None, + right_on: str | list[str] | None = None, + suffix: str = "_right", + ) -> Self: + r""" + Add a join operation to the Logical Plan. + + Arguments: + other: Lazy DataFrame to join with. + on: Name(s) of the join columns in both DataFrames. If set, `left_on` and + `right_on` should be None. + how: Join strategy. + + * *inner*: Returns rows that have matching values in both tables. + * *cross*: Returns the Cartesian product of rows from both tables. + * *semi*: Filter rows that have a match in the right table. + * *anti*: Filter rows that do not have a match in the right table. + left_on: Join column of the left DataFrame. + right_on: Join column of the right DataFrame. + suffix: Suffix to append to columns with a duplicate name. + + Returns: + A new joined LazyFrame + + Examples: + >>> import narwhals as nw + >>> import pandas as pd + >>> import polars as pl + >>> data = { + ... "foo": [1, 2, 3], + ... "bar": [6.0, 7.0, 8.0], + ... "ham": ["a", "b", "c"], + ... } + >>> data_other = { + ... "apple": ["x", "y", "z"], + ... "ham": ["a", "b", "d"], + ... } + + >>> df_pd = pd.DataFrame(data) + >>> other_pd = pd.DataFrame(data_other) + + >>> df_pl = pl.LazyFrame(data) + >>> other_pl = pl.LazyFrame(data_other) + + Let's define a dataframe-agnostic function in which we join over "ham" column: + + >>> @nw.narwhalify + ... def join_on_ham(df, other_any): + ... return df.join(other_any, left_on="ham", right_on="ham") + + We can now pass either pandas or Polars to the function: + + >>> join_on_ham(df_pd, other_pd) + foo bar ham apple + 0 1 6.0 a x + 1 2 7.0 b y + + >>> join_on_ham(df_pl, other_pl).collect() + shape: (2, 4) + ┌─────┬─────┬─────┬───────┐ + │ foo ┆ bar ┆ ham ┆ apple │ + │ --- ┆ --- ┆ --- ┆ --- │ + │ i64 ┆ f64 ┆ str ┆ str │ + ╞═════╪═════╪═════╪═══════╡ + │ 1 ┆ 6.0 ┆ a ┆ x │ + │ 2 ┆ 7.0 ┆ b ┆ y │ + └─────┴─────┴─────┴───────┘ + """ + return super().join( + other, how=how, left_on=left_on, right_on=right_on, on=on, suffix=suffix + ) + + def join_asof( + self, + other: Self, + *, + left_on: str | None = None, + right_on: str | None = None, + on: str | None = None, + by_left: str | list[str] | None = None, + by_right: str | list[str] | None = None, + by: str | list[str] | None = None, + strategy: Literal["backward", "forward", "nearest"] = "backward", + ) -> Self: + """ + Perform an asof join. + + This is similar to a left-join except that we match on nearest key rather than equal keys. + + Both DataFrames must be sorted by the asof_join key. + + Arguments: + other: DataFrame to join with. + + left_on: Name(s) of the left join column(s). + + right_on: Name(s) of the right join column(s). + + on: Join column of both DataFrames. If set, left_on and right_on should be None. + + by_left: join on these columns before doing asof join + + by_right: join on these columns before doing asof join + + by: join on these columns before doing asof join + + strategy: Join strategy. The default is "backward". + + * *backward*: selects the last row in the right DataFrame whose "on" key is less than or equal to the left's key. + * *forward*: selects the first row in the right DataFrame whose "on" key is greater than or equal to the left's key. + * *nearest*: search selects the last row in the right DataFrame whose value is nearest to the left's key. + + Returns: + A new joined DataFrame + + Examples: + >>> from datetime import datetime + >>> import narwhals as nw + >>> import pandas as pd + >>> import polars as pl + >>> data_gdp = { + ... "datetime": [ + ... datetime(2016, 1, 1), + ... datetime(2017, 1, 1), + ... datetime(2018, 1, 1), + ... datetime(2019, 1, 1), + ... datetime(2020, 1, 1), + ... ], + ... "gdp": [4164, 4411, 4566, 4696, 4827], + ... } + >>> data_population = { + ... "datetime": [ + ... datetime(2016, 3, 1), + ... datetime(2018, 8, 1), + ... datetime(2019, 1, 1), + ... ], + ... "population": [82.19, 82.66, 83.12], + ... } + >>> gdp_pd = pd.DataFrame(data_gdp) + >>> population_pd = pd.DataFrame(data_population) + >>> gdp_pl = pl.LazyFrame(data_gdp).sort("datetime") + >>> population_pl = pl.LazyFrame(data_population).sort("datetime") + + Let's define a dataframe-agnostic function in which we join over "datetime" column: + + >>> @nw.narwhalify + ... def join_asof_datetime(df, other_any, strategy): + ... return df.join_asof(other_any, on="datetime", strategy=strategy) + + We can now pass either pandas or Polars to the function: + + >>> join_asof_datetime(population_pd, gdp_pd, strategy="backward") + datetime population gdp + 0 2016-03-01 82.19 4164 + 1 2018-08-01 82.66 4566 + 2 2019-01-01 83.12 4696 + + >>> join_asof_datetime(population_pl, gdp_pl, strategy="backward").collect() + shape: (3, 3) + ┌─────────────────────┬────────────┬──────┐ + │ datetime ┆ population ┆ gdp │ + │ --- ┆ --- ┆ --- │ + │ datetime[μs] ┆ f64 ┆ i64 │ + ╞═════════════════════╪════════════╪══════╡ + │ 2016-03-01 00:00:00 ┆ 82.19 ┆ 4164 │ + │ 2018-08-01 00:00:00 ┆ 82.66 ┆ 4566 │ + │ 2019-01-01 00:00:00 ┆ 83.12 ┆ 4696 │ + └─────────────────────┴────────────┴──────┘ + + Here is a real-world times-series example that uses `by` argument. + + >>> from datetime import datetime + >>> import narwhals as nw + >>> import pandas as pd + >>> import polars as pl + >>> data_quotes = { + ... "datetime": [ + ... datetime(2016, 5, 25, 13, 30, 0, 23), + ... datetime(2016, 5, 25, 13, 30, 0, 23), + ... datetime(2016, 5, 25, 13, 30, 0, 30), + ... datetime(2016, 5, 25, 13, 30, 0, 41), + ... datetime(2016, 5, 25, 13, 30, 0, 48), + ... datetime(2016, 5, 25, 13, 30, 0, 49), + ... datetime(2016, 5, 25, 13, 30, 0, 72), + ... datetime(2016, 5, 25, 13, 30, 0, 75), + ... ], + ... "ticker": [ + ... "GOOG", + ... "MSFT", + ... "MSFT", + ... "MSFT", + ... "GOOG", + ... "AAPL", + ... "GOOG", + ... "MSFT", + ... ], + ... "bid": [720.50, 51.95, 51.97, 51.99, 720.50, 97.99, 720.50, 52.01], + ... "ask": [720.93, 51.96, 51.98, 52.00, 720.93, 98.01, 720.88, 52.03], + ... } + >>> data_trades = { + ... "datetime": [ + ... datetime(2016, 5, 25, 13, 30, 0, 23), + ... datetime(2016, 5, 25, 13, 30, 0, 38), + ... datetime(2016, 5, 25, 13, 30, 0, 48), + ... datetime(2016, 5, 25, 13, 30, 0, 48), + ... datetime(2016, 5, 25, 13, 30, 0, 48), + ... ], + ... "ticker": ["MSFT", "MSFT", "GOOG", "GOOG", "AAPL"], + ... "price": [51.95, 51.95, 720.77, 720.92, 98.0], + ... "quantity": [75, 155, 100, 100, 100], + ... } + >>> quotes_pd = pd.DataFrame(data_quotes) + >>> trades_pd = pd.DataFrame(data_trades) + >>> quotes_pl = pl.LazyFrame(data_quotes).sort("datetime") + >>> trades_pl = pl.LazyFrame(data_trades).sort("datetime") + + Let's define a dataframe-agnostic function in which we join over "datetime" and by "ticker" columns: + + >>> @nw.narwhalify + ... def join_asof_datetime_by_ticker(df, other_any): + ... return df.join_asof(other_any, on="datetime", by="ticker") + + We can now pass either pandas or Polars to the function: + + >>> join_asof_datetime_by_ticker(trades_pd, quotes_pd) + datetime ticker price quantity bid ask + 0 2016-05-25 13:30:00.000023 MSFT 51.95 75 51.95 51.96 + 1 2016-05-25 13:30:00.000038 MSFT 51.95 155 51.97 51.98 + 2 2016-05-25 13:30:00.000048 GOOG 720.77 100 720.50 720.93 + 3 2016-05-25 13:30:00.000048 GOOG 720.92 100 720.50 720.93 + 4 2016-05-25 13:30:00.000048 AAPL 98.00 100 NaN NaN + + >>> join_asof_datetime_by_ticker(trades_pl, quotes_pl).collect() + shape: (5, 6) + ┌────────────────────────────┬────────┬────────┬──────────┬───────┬────────┐ + │ datetime ┆ ticker ┆ price ┆ quantity ┆ bid ┆ ask │ + │ --- ┆ --- ┆ --- ┆ --- ┆ --- ┆ --- │ + │ datetime[μs] ┆ str ┆ f64 ┆ i64 ┆ f64 ┆ f64 │ + ╞════════════════════════════╪════════╪════════╪══════════╪═══════╪════════╡ + │ 2016-05-25 13:30:00.000023 ┆ MSFT ┆ 51.95 ┆ 75 ┆ 51.95 ┆ 51.96 │ + │ 2016-05-25 13:30:00.000038 ┆ MSFT ┆ 51.95 ┆ 155 ┆ 51.97 ┆ 51.98 │ + │ 2016-05-25 13:30:00.000048 ┆ GOOG ┆ 720.77 ┆ 100 ┆ 720.5 ┆ 720.93 │ + │ 2016-05-25 13:30:00.000048 ┆ GOOG ┆ 720.92 ┆ 100 ┆ 720.5 ┆ 720.93 │ + │ 2016-05-25 13:30:00.000048 ┆ AAPL ┆ 98.0 ┆ 100 ┆ null ┆ null │ + └────────────────────────────┴────────┴────────┴──────────┴───────┴────────┘ + """ + return super().join_asof( + other, + left_on=left_on, + right_on=right_on, + on=on, + by_left=by_left, + by_right=by_right, + by=by, + strategy=strategy, + ) + + def clone(self) -> Self: + r""" + Create a copy of this DataFrame. + + Examples: + >>> import narwhals as nw + >>> import pandas as pd + >>> import polars as pl + >>> data = {"a": [1, 2], "b": [3, 4]} + >>> df_pd = pd.DataFrame(data) + >>> df_pl = pl.LazyFrame(data) + + Let's define a dataframe-agnostic function in which we copy the DataFrame: + + >>> @nw.narwhalify + ... def func(df): + ... return df.clone() + + >>> func(df_pd) + a b + 0 1 3 + 1 2 4 + + >>> func(df_pl).collect() + shape: (2, 2) + ┌─────┬─────┐ + │ a ┆ b │ + │ --- ┆ --- │ + │ i64 ┆ i64 │ + ╞═════╪═════╡ + │ 1 ┆ 3 │ + │ 2 ┆ 4 │ + └─────┴─────┘ + """ + return super().clone() + + def lazy(self) -> Self: + """ + Lazify the DataFrame (if possible). + + If a library does not support lazy execution, then this is a no-op. + + Examples: + Construct pandas and Polars objects: + + >>> import pandas as pd + >>> import polars as pl + >>> import narwhals as nw + >>> df = {"foo": [1, 2, 3], "bar": [6.0, 7.0, 8.0], "ham": ["a", "b", "c"]} + >>> df_pd = pd.DataFrame(df) + >>> df_pl = pl.LazyFrame(df) + + We define a library agnostic function: + + >>> @nw.narwhalify + ... def func(df): + ... return df.lazy() + + Note that then, pandas dataframe stay eager, and the Polars LazyFrame stays lazy: + + >>> func(df_pd) + foo bar ham + 0 1 6.0 a + 1 2 7.0 b + 2 3 8.0 c + >>> func(df_pl) + + """ + return super().lazy() # type: ignore[return-value] + + def gather_every(self: Self, n: int, offset: int = 0) -> Self: + r""" + Take every nth row in the DataFrame and return as a new DataFrame. + + Arguments: + n: Gather every *n*-th row. + offset: Starting index. + + Examples: + >>> import narwhals as nw + >>> import pandas as pd + >>> import polars as pl + >>> data = {"a": [1, 2, 3, 4], "b": [5, 6, 7, 8]} + >>> df_pd = pd.DataFrame(data) + >>> lf_pl = pl.LazyFrame(data) + + Let's define a dataframe-agnostic function in which gather every 2 rows, + starting from a offset of 1: + + >>> @nw.narwhalify + ... def func(df): + ... return df.gather_every(n=2, offset=1) + + >>> func(df_pd) + a b + 1 2 6 + 3 4 8 + + >>> func(lf_pl).collect() + shape: (2, 2) + ┌─────┬─────┐ + │ a ┆ b │ + │ --- ┆ --- │ + │ i64 ┆ i64 │ + ╞═════╪═════╡ + │ 2 ┆ 6 │ + │ 4 ┆ 8 │ + └─────┴─────┘ + """ + return super().gather_every(n=n, offset=offset) diff --git a/parrot/lib/python3.10/site-packages/narwhals/group_by.py b/parrot/lib/python3.10/site-packages/narwhals/group_by.py new file mode 100644 index 0000000000000000000000000000000000000000..797442e3c8feac4ece846344a994a059cf80f965 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/narwhals/group_by.py @@ -0,0 +1,133 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING +from typing import Any +from typing import Generic +from typing import Iterable +from typing import Iterator +from typing import TypeVar +from typing import cast + +from narwhals.dataframe import DataFrame +from narwhals.dataframe import LazyFrame +from narwhals.utils import tupleify + +if TYPE_CHECKING: + from narwhals.typing import IntoExpr + +DataFrameT = TypeVar("DataFrameT") +LazyFrameT = TypeVar("LazyFrameT") + + +class GroupBy(Generic[DataFrameT]): + def __init__(self, df: DataFrameT, *keys: str) -> None: + self._df = cast(DataFrame[Any], df) + self._keys = keys + self._grouped = self._df._compliant_frame.group_by(*self._keys) + + def agg( + self, *aggs: IntoExpr | Iterable[IntoExpr], **named_aggs: IntoExpr + ) -> DataFrameT: + """ + Compute aggregations for each group of a group by operation. + + Arguments: + aggs: Aggregations to compute for each group of the group by operation, + specified as positional arguments. + + named_aggs: Additional aggregations, specified as keyword arguments. + + Examples: + Group by one column or by multiple columns and call `agg` to compute + the grouped sum of another column. + + >>> import pandas as pd + >>> import polars as pl + >>> import narwhals as nw + >>> df_pd = pd.DataFrame( + ... { + ... "a": ["a", "b", "a", "b", "c"], + ... "b": [1, 2, 1, 3, 3], + ... "c": [5, 4, 3, 2, 1], + ... } + ... ) + >>> df_pl = pl.DataFrame( + ... { + ... "a": ["a", "b", "a", "b", "c"], + ... "b": [1, 2, 1, 3, 3], + ... "c": [5, 4, 3, 2, 1], + ... } + ... ) + + We define library agnostic functions: + + >>> @nw.narwhalify + ... def func(df): + ... return df.group_by("a").agg(nw.col("b").sum()).sort("a") + + >>> @nw.narwhalify + ... def func_mult_col(df): + ... return df.group_by("a", "b").agg(nw.sum("c")).sort("a", "b") + + We can then pass either pandas or Polars to `func` and `func_mult_col`: + + >>> func(df_pd) + a b + 0 a 2 + 1 b 5 + 2 c 3 + >>> func(df_pl) + shape: (3, 2) + ┌─────┬─────┐ + │ a ┆ b │ + │ --- ┆ --- │ + │ str ┆ i64 │ + ╞═════╪═════╡ + │ a ┆ 2 │ + │ b ┆ 5 │ + │ c ┆ 3 │ + └─────┴─────┘ + >>> func_mult_col(df_pd) + a b c + 0 a 1 8 + 1 b 2 4 + 2 b 3 2 + 3 c 3 1 + >>> func_mult_col(df_pl) + shape: (4, 3) + ┌─────┬─────┬─────┐ + │ a ┆ b ┆ c │ + │ --- ┆ --- ┆ --- │ + │ str ┆ i64 ┆ i64 │ + ╞═════╪═════╪═════╡ + │ a ┆ 1 ┆ 8 │ + │ b ┆ 2 ┆ 4 │ + │ b ┆ 3 ┆ 2 │ + │ c ┆ 3 ┆ 1 │ + └─────┴─────┴─────┘ + """ + aggs, named_aggs = self._df._flatten_and_extract(*aggs, **named_aggs) + return self._df._from_compliant_dataframe( # type: ignore[return-value] + self._grouped.agg(*aggs, **named_aggs), + ) + + def __iter__(self) -> Iterator[tuple[Any, DataFrameT]]: + yield from ( # type: ignore[misc] + (tupleify(key), self._df._from_compliant_dataframe(df)) + for (key, df) in self._grouped.__iter__() + ) + + +class LazyGroupBy(Generic[LazyFrameT]): + def __init__(self, df: LazyFrameT, *keys: str) -> None: + self._df = cast(LazyFrame[Any], df) + self._keys = keys + self._grouped = self._df._compliant_frame.group_by(*self._keys) + + def agg( + self, *aggs: IntoExpr | Iterable[IntoExpr], **named_aggs: IntoExpr + ) -> LazyFrameT: + aggs, named_aggs = self._df._flatten_and_extract(*aggs, **named_aggs) + return self._df._from_compliant_dataframe( # type: ignore[return-value] + self._grouped.agg(*aggs, **named_aggs), + ) diff --git a/parrot/lib/python3.10/site-packages/narwhals/series.py b/parrot/lib/python3.10/site-packages/narwhals/series.py new file mode 100644 index 0000000000000000000000000000000000000000..ae78eef766b610dd7f4d29020d4d5abaea91eb69 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/narwhals/series.py @@ -0,0 +1,3757 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING +from typing import Any +from typing import Callable +from typing import Iterator +from typing import Literal +from typing import Sequence +from typing import overload + +from narwhals.utils import parse_version + +if TYPE_CHECKING: + import numpy as np + import pandas as pd + import pyarrow as pa + from typing_extensions import Self + + from narwhals.dataframe import DataFrame + from narwhals.dtypes import DType + + +class Series: + """ + Narwhals Series, backed by a native series. + + The native series might be pandas.Series, polars.Series, ... + + This class is not meant to be instantiated directly - instead, use + `narwhals.from_native`, making sure to pass `allow_series=True` or + `series_only=True`. + """ + + def __init__( + self, + series: Any, + *, + level: Literal["full", "interchange"], + ) -> None: + self._level = level + if hasattr(series, "__narwhals_series__"): + self._compliant_series = series.__narwhals_series__() + else: # pragma: no cover + msg = f"Expected Polars Series or an object which implements `__narwhals_series__`, got: {type(series)}." + raise AssertionError(msg) + + def __array__(self, dtype: Any = None, copy: bool | None = None) -> np.ndarray: + return self._compliant_series.__array__(dtype=dtype, copy=copy) + + @overload + def __getitem__(self, idx: int) -> Any: ... + + @overload + def __getitem__(self, idx: slice | Sequence[int]) -> Self: ... + + def __getitem__(self, idx: int | slice | Sequence[int]) -> Any | Self: + if isinstance(idx, int): + return self._compliant_series[idx] + return self._from_compliant_series(self._compliant_series[idx]) + + def __native_namespace__(self) -> Any: + return self._compliant_series.__native_namespace__() + + def __arrow_c_stream__(self, requested_schema: object | None = None) -> object: + """ + Export a Series via the Arrow PyCapsule Interface. + + Narwhals doesn't implement anything itself here: + + - if the underlying series implements the interface, it'll return that + - else, it'll call `to_arrow` and then defer to PyArrow's implementation + + See [PyCapsule Interface](https://arrow.apache.org/docs/dev/format/CDataInterface/PyCapsuleInterface.html) + for more. + """ + native_series = self._compliant_series._native_series + if hasattr(native_series, "__arrow_c_stream__"): + return native_series.__arrow_c_stream__(requested_schema=requested_schema) + try: + import pyarrow as pa # ignore-banned-import + except ModuleNotFoundError as exc: # pragma: no cover + msg = f"PyArrow>=16.0.0 is required for `Series.__arrow_c_stream__` for object of type {type(native_series)}" + raise ModuleNotFoundError(msg) from exc + if parse_version(pa.__version__) < (16, 0): # pragma: no cover + msg = f"PyArrow>=16.0.0 is required for `Series.__arrow_c_stream__` for object of type {type(native_series)}" + raise ModuleNotFoundError(msg) + ca = pa.chunked_array([self.to_arrow()]) + return ca.__arrow_c_stream__(requested_schema=requested_schema) + + def to_native(self) -> Any: + """ + Convert Narwhals series to native series. + + Returns: + Series of class that user started with. + + Examples: + >>> import pandas as pd + >>> import polars as pl + >>> import narwhals as nw + >>> s = [1, 2, 3] + >>> s_pd = pd.Series(s) + >>> s_pl = pl.Series(s) + + We define a library agnostic function: + + >>> @nw.narwhalify + ... def func(s): + ... return s.to_native() + + We can then pass either pandas or Polars to `func`: + + >>> func(s_pd) + 0 1 + 1 2 + 2 3 + dtype: int64 + >>> func(s_pl) # doctest: +NORMALIZE_WHITESPACE + shape: (3,) + Series: '' [i64] + [ + 1 + 2 + 3 + ] + """ + return self._compliant_series._native_series + + def scatter(self, indices: int | Sequence[int], values: Any) -> Self: + """ + Set value(s) at given position(s). + + Arguments: + indices: Position(s) to set items at. + values: Values to set. + + Warning: + For some libraries (pandas, Polars), this method operates in-place, + whereas for others (PyArrow) it doesn't! + We recommend being careful with it, and not relying on the + in-placeness. For example, a valid use case is when updating + a column in an eager dataframe, see the example below. + + Examples: + >>> import pandas as pd + >>> import polars as pl + >>> import narwhals as nw + >>> data = {"a": [1, 2, 3], "b": [4, 5, 6]} + >>> df_pd = pd.DataFrame(data) + >>> df_pl = pl.DataFrame(data) + + We define a library agnostic function: + + >>> @nw.narwhalify + ... def func(df): + ... return df.with_columns(df["a"].scatter([0, 1], [999, 888])) + + We can then pass either pandas or Polars to `func`: + + >>> func(df_pd) + a b + 0 999 4 + 1 888 5 + 2 3 6 + >>> func(df_pl) + shape: (3, 2) + ┌─────┬─────┐ + │ a ┆ b │ + │ --- ┆ --- │ + │ i64 ┆ i64 │ + ╞═════╪═════╡ + │ 999 ┆ 4 │ + │ 888 ┆ 5 │ + │ 3 ┆ 6 │ + └─────┴─────┘ + """ + return self._from_compliant_series( + self._compliant_series.scatter(indices, self._extract_native(values)) + ) + + @property + def shape(self) -> tuple[int]: + """ + Get the shape of the Series. + + Examples: + >>> import pandas as pd + >>> import polars as pl + >>> import narwhals as nw + >>> s = [1, 2, 3] + >>> s_pd = pd.Series(s) + >>> s_pl = pl.Series(s) + + We define a library agnostic function: + + >>> @nw.narwhalify + ... def func(s): + ... return s.shape + + We can then pass either pandas or Polars to `func`: + + >>> func(s_pd) + (3,) + >>> func(s_pl) + (3,) + """ + return self._compliant_series.shape # type: ignore[no-any-return] + + def _extract_native(self, arg: Any) -> Any: + from narwhals.series import Series + + if isinstance(arg, Series): + return arg._compliant_series + return arg + + def _from_compliant_series(self, series: Any) -> Self: + return self.__class__( + series, + level=self._level, + ) + + def pipe(self, function: Callable[[Any], Self], *args: Any, **kwargs: Any) -> Self: + """ + Pipe function call. + + Examples: + >>> import polars as pl + >>> import pandas as pd + >>> import narwhals as nw + >>> s_pd = pd.Series([1, 2, 3, 4]) + >>> s_pl = pl.Series([1, 2, 3, 4]) + + Lets define a function to pipe into + >>> @nw.narwhalify + ... def func(s): + ... return s.pipe(lambda x: x + 2) + + Now apply it to the series + + >>> func(s_pd) + 0 3 + 1 4 + 2 5 + 3 6 + dtype: int64 + >>> func(s_pl) # doctest: +NORMALIZE_WHITESPACE + shape: (4,) + Series: '' [i64] + [ + 3 + 4 + 5 + 6 + ] + + + """ + return function(self, *args, **kwargs) + + def __repr__(self) -> str: # pragma: no cover + header = " Narwhals Series " + length = len(header) + return ( + "┌" + + "─" * length + + "┐\n" + + f"|{header}|\n" + + "| Use `.to_native()` to see native output |\n" + + "└" + + "─" * length + + "┘" + ) + + def __len__(self) -> int: + return len(self._compliant_series) + + def len(self) -> int: + r""" + Return the number of elements in the Series. + + Null values count towards the total. + + Examples: + >>> import narwhals as nw + >>> import pandas as pd + >>> import polars as pl + >>> data = [1, 2, None] + >>> s_pd = pd.Series(data) + >>> s_pl = pl.Series(data) + + Let's define a dataframe-agnostic function that computes the len of the series: + + >>> @nw.narwhalify + ... def func(s): + ... return s.len() + + We can then pass either pandas or Polars to `func`: + + >>> func(s_pd) + 3 + >>> func(s_pl) + 3 + """ + return len(self._compliant_series) + + @property + def dtype(self: Self) -> DType: + """ + Get the data type of the Series. + + Examples: + >>> import pandas as pd + >>> import polars as pl + >>> import narwhals as nw + >>> s = [1, 2, 3] + >>> s_pd = pd.Series(s) + >>> s_pl = pl.Series(s) + + We define a library agnostic function: + + >>> @nw.narwhalify + ... def func(s): + ... return s.dtype + + We can then pass either pandas or Polars to `func`: + + >>> func(s_pd) + Int64 + >>> func(s_pl) + Int64 + """ + return self._compliant_series.dtype # type: ignore[no-any-return] + + @property + def name(self) -> str: + """ + Get the name of the Series. + + Examples: + >>> import pandas as pd + >>> import polars as pl + >>> import narwhals as nw + >>> s = [1, 2, 3] + >>> s_pd = pd.Series(s, name="foo") + >>> s_pl = pl.Series("foo", s) + + We define a library agnostic function: + + >>> @nw.narwhalify + ... def func(s): + ... return s.name + + We can then pass either pandas or Polars to `func`: + + >>> func(s_pd) + 'foo' + >>> func(s_pl) + 'foo' + """ + return self._compliant_series.name # type: ignore[no-any-return] + + def cast( + self, + dtype: Any, + ) -> Self: + """ + Cast between data types. + + Arguments: + dtype: Data type that the object will be cast into. + + Examples: + >>> import pandas as pd + >>> import polars as pl + >>> import narwhals as nw + >>> s = [True, False, True] + >>> s_pd = pd.Series(s) + >>> s_pl = pl.Series(s) + + We define a dataframe-agnostic function: + + >>> @nw.narwhalify + ... def func(s): + ... return s.cast(nw.Int64) + + We can then pass either pandas or Polars to `func`: + + >>> func(s_pd) + 0 1 + 1 0 + 2 1 + dtype: int64 + >>> func(s_pl) # doctest: +NORMALIZE_WHITESPACE + shape: (3,) + Series: '' [i64] + [ + 1 + 0 + 1 + ] + """ + return self._from_compliant_series(self._compliant_series.cast(dtype)) + + def to_frame(self) -> DataFrame[Any]: + """ + Convert to dataframe. + + Examples: + >>> import pandas as pd + >>> import polars as pl + >>> import narwhals as nw + >>> s = [1, 2, 3] + >>> s_pd = pd.Series(s, name="a") + >>> s_pl = pl.Series("a", s) + + We define a library agnostic function: + + >>> @nw.narwhalify + ... def func(s): + ... return s.to_frame() + + We can then pass either pandas or Polars to `func`: + + >>> func(s_pd) + a + 0 1 + 1 2 + 2 3 + >>> func(s_pl) + shape: (3, 1) + ┌─────┐ + │ a │ + │ --- │ + │ i64 │ + ╞═════╡ + │ 1 │ + │ 2 │ + │ 3 │ + └─────┘ + """ + from narwhals.dataframe import DataFrame + + return DataFrame( + self._compliant_series.to_frame(), + level=self._level, + ) + + def to_list(self) -> list[Any]: + """ + Convert to list. + + Examples: + >>> import pandas as pd + >>> import polars as pl + >>> import narwhals as nw + >>> s = [1, 2, 3] + >>> s_pd = pd.Series(s, name="a") + >>> s_pl = pl.Series("a", s) + + We define a library agnostic function: + + >>> @nw.narwhalify + ... def func(s): + ... return s.to_list() + + We can then pass either pandas or Polars to `func`: + + >>> func(s_pd) + [1, 2, 3] + >>> func(s_pl) + [1, 2, 3] + """ + return self._compliant_series.to_list() # type: ignore[no-any-return] + + def mean(self) -> Any: + """ + Reduce this Series to the mean value. + + Examples: + >>> import pandas as pd + >>> import polars as pl + >>> import narwhals as nw + >>> s = [1, 2, 3] + >>> s_pd = pd.Series(s) + >>> s_pl = pl.Series(s) + + We define a library agnostic function: + + >>> @nw.narwhalify + ... def func(s): + ... return s.mean() + + We can then pass either pandas or Polars to `func`: + + >>> func(s_pd) # doctest:+SKIP + np.float64(2.0) + >>> func(s_pl) + 2.0 + """ + return self._compliant_series.mean() + + def count(self) -> Any: + """ + Returns the number of non-null elements in the Series. + + Examples: + >>> import pandas as pd + >>> import polars as pl + >>> import narwhals as nw + >>> s = [1, 2, 3] + >>> s_pd = pd.Series(s) + >>> s_pl = pl.Series(s) + + We define a library agnostic function: + + >>> @nw.narwhalify + ... def func(s): + ... return s.count() + + We can then pass either pandas or Polars to `func`: + + >>> func(s_pd) # doctest:+SKIP + np.int64(3) + >>> func(s_pl) + 3 + + """ + return self._compliant_series.count() + + def any(self) -> Any: + """ + Return whether any of the values in the Series are True. + + Notes: + Only works on Series of data type Boolean. + + Examples: + >>> import pandas as pd + >>> import polars as pl + >>> import narwhals as nw + >>> s = [False, True, False] + >>> s_pd = pd.Series(s) + >>> s_pl = pl.Series(s) + + We define a library agnostic function: + + >>> @nw.narwhalify + ... def func(s): + ... return s.any() + + We can then pass either pandas or Polars to `func`: + + >>> func(s_pd) # doctest:+SKIP + np.True_ + >>> func(s_pl) + True + """ + return self._compliant_series.any() + + def all(self) -> Any: + """ + Return whether all values in the Series are True. + + Examples: + >>> import pandas as pd + >>> import polars as pl + >>> import narwhals as nw + >>> s = [True, False, True] + >>> s_pd = pd.Series(s) + >>> s_pl = pl.Series(s) + + We define a library agnostic function: + + >>> @nw.narwhalify + ... def func(s): + ... return s.all() + + We can then pass either pandas or Polars to `func`: + + >>> func(s_pd) # doctest:+SKIP + np.False_ + >>> func(s_pl) + False + + """ + return self._compliant_series.all() + + def min(self) -> Any: + """ + Get the minimal value in this Series. + + Examples: + >>> import pandas as pd + >>> import polars as pl + >>> import narwhals as nw + >>> s = [1, 2, 3] + >>> s_pd = pd.Series(s) + >>> s_pl = pl.Series(s) + + We define a library agnostic function: + + >>> @nw.narwhalify + ... def func(s): + ... return s.min() + + We can then pass either pandas or Polars to `func`: + + >>> func(s_pd) # doctest:+SKIP + np.int64(1) + >>> func(s_pl) + 1 + """ + return self._compliant_series.min() + + def max(self) -> Any: + """ + Get the maximum value in this Series. + + Examples: + >>> import pandas as pd + >>> import polars as pl + >>> import narwhals as nw + >>> s = [1, 2, 3] + >>> s_pd = pd.Series(s) + >>> s_pl = pl.Series(s) + + We define a library agnostic function: + + >>> @nw.narwhalify + ... def func(s): + ... return s.max() + + We can then pass either pandas or Polars to `func`: + + >>> func(s_pd) # doctest:+SKIP + np.int64(3) + >>> func(s_pl) + 3 + """ + return self._compliant_series.max() + + def sum(self) -> Any: + """ + Reduce this Series to the sum value. + + Examples: + >>> import pandas as pd + >>> import polars as pl + >>> import narwhals as nw + >>> s = [1, 2, 3] + >>> s_pd = pd.Series(s) + >>> s_pl = pl.Series(s) + + We define a library agnostic function: + + >>> @nw.narwhalify + ... def func(s): + ... return s.sum() + + We can then pass either pandas or Polars to `func`: + + >>> func(s_pd) # doctest:+SKIP + np.int64(6) + >>> func(s_pl) + 6 + """ + return self._compliant_series.sum() + + def std(self, *, ddof: int = 1) -> Any: + """ + Get the standard deviation of this Series. + + Arguments: + ddof: “Delta Degrees of Freedom”: the divisor used in the calculation is N - ddof, + where N represents the number of elements. + + Examples: + >>> import pandas as pd + >>> import polars as pl + >>> import narwhals as nw + >>> s = [1, 2, 3] + >>> s_pd = pd.Series(s) + >>> s_pl = pl.Series(s) + + We define a library agnostic function: + + >>> @nw.narwhalify + ... def func(s): + ... return s.std() + + We can then pass either pandas or Polars to `func`: + + >>> func(s_pd) # doctest:+SKIP + np.float64(1.0) + >>> func(s_pl) + 1.0 + """ + return self._compliant_series.std(ddof=ddof) + + def clip( + self, lower_bound: Any | None = None, upper_bound: Any | None = None + ) -> Self: + r""" + Clip values in the Series. + + Arguments: + lower_bound: Lower bound value. + upper_bound: Upper bound value. + + Examples: + >>> import pandas as pd + >>> import polars as pl + >>> import narwhals as nw + >>> + >>> s = [1, 2, 3] + >>> s_pd = pd.Series(s) + >>> s_pl = pl.Series(s) + + We define a library agnostic function: + + >>> @nw.narwhalify + ... def func_lower(s): + ... return s.clip(2) + + We can then pass either pandas or Polars to `func_lower`: + + >>> func_lower(s_pd) + 0 2 + 1 2 + 2 3 + dtype: int64 + >>> func_lower(s_pl) # doctest: +NORMALIZE_WHITESPACE + shape: (3,) + Series: '' [i64] + [ + 2 + 2 + 3 + ] + + We define another library agnostic function: + + >>> @nw.narwhalify + ... def func_upper(s): + ... return s.clip(upper_bound=2) + + We can then pass either pandas or Polars to `func_upper`: + + >>> func_upper(s_pd) + 0 1 + 1 2 + 2 2 + dtype: int64 + >>> func_upper(s_pl) # doctest: +NORMALIZE_WHITESPACE + shape: (3,) + Series: '' [i64] + [ + 1 + 2 + 2 + ] + + We can have both at the same time + + >>> s = [-1, 1, -3, 3, -5, 5] + >>> s_pd = pd.Series(s) + >>> s_pl = pl.Series(s) + + We define a library agnostic function: + + >>> @nw.narwhalify + ... def func(s): + ... return s.clip(-1, 3) + + We can pass either pandas or Polars to `func`: + + >>> func(s_pd) + 0 -1 + 1 1 + 2 -1 + 3 3 + 4 -1 + 5 3 + dtype: int64 + >>> func(s_pl) # doctest: +NORMALIZE_WHITESPACE + shape: (6,) + Series: '' [i64] + [ + -1 + 1 + -1 + 3 + -1 + 3 + ] + """ + return self._from_compliant_series( + self._compliant_series.clip(lower_bound=lower_bound, upper_bound=upper_bound) + ) + + def is_in(self, other: Any) -> Self: + """ + Check if the elements of this Series are in the other sequence. + + Arguments: + other: Sequence of primitive type. + + Examples: + >>> import pandas as pd + >>> import polars as pl + >>> import narwhals as nw + >>> s_pd = pd.Series([1, 2, 3]) + >>> s_pl = pl.Series([1, 2, 3]) + + We define a library agnostic function: + + >>> @nw.narwhalify + ... def func(s): + ... return s.is_in([3, 2, 8]) + + We can then pass either pandas or Polars to `func`: + + >>> func(s_pd) + 0 False + 1 True + 2 True + dtype: bool + >>> func(s_pl) # doctest: +NORMALIZE_WHITESPACE + shape: (3,) + Series: '' [bool] + [ + false + true + true + ] + """ + return self._from_compliant_series( + self._compliant_series.is_in(self._extract_native(other)) + ) + + def arg_true(self) -> Self: + """ + Find elements where boolean Series is True. + + Examples: + >>> import pandas as pd + >>> import polars as pl + >>> import narwhals as nw + >>> data = [1, None, None, 2] + >>> s_pd = pd.Series(data, name="a") + >>> s_pl = pl.Series("a", data) + + We define a library agnostic function: + + >>> @nw.narwhalify + ... def func(s): + ... return s.is_null().arg_true() + + We can then pass either pandas or Polars to `func`: + + >>> func(s_pd) + 1 1 + 2 2 + Name: a, dtype: int64 + >>> func(s_pl) # doctest: +NORMALIZE_WHITESPACE + shape: (2,) + Series: 'a' [u32] + [ + 1 + 2 + ] + """ + return self._from_compliant_series(self._compliant_series.arg_true()) + + def drop_nulls(self) -> Self: + """ + Drop all null values. + + Notes: + pandas and Polars handle null values differently. Polars distinguishes + between NaN and Null, whereas pandas doesn't. + + Examples: + >>> import pandas as pd + >>> import polars as pl + >>> import numpy as np + >>> import narwhals as nw + >>> s_pd = pd.Series([2, 4, None, 3, 5]) + >>> s_pl = pl.Series("a", [2, 4, None, 3, 5]) + + Now define a dataframe-agnostic function with a `column` argument for the column to evaluate : + + >>> @nw.narwhalify + ... def func(s): + ... return s.drop_nulls() + + Then we can pass either Series (polars or pandas) to `func`: + + >>> func(s_pd) + 0 2.0 + 1 4.0 + 3 3.0 + 4 5.0 + dtype: float64 + >>> func(s_pl) # doctest: +NORMALIZE_WHITESPACE + shape: (4,) + Series: 'a' [i64] + [ + 2 + 4 + 3 + 5 + ] + """ + return self._from_compliant_series(self._compliant_series.drop_nulls()) + + def abs(self) -> Self: + """ + Calculate the absolute value of each element. + + Examples: + >>> import pandas as pd + >>> import polars as pl + >>> import narwhals as nw + >>> s = [2, -4, 3] + >>> s_pd = pd.Series(s) + >>> s_pl = pl.Series(s) + + We define a dataframe-agnostic function: + + >>> @nw.narwhalify + ... def func(s): + ... return s.abs() + + We can then pass either pandas or Polars to `func`: + + >>> func(s_pd) + 0 2 + 1 4 + 2 3 + dtype: int64 + >>> func(s_pl) # doctest: +NORMALIZE_WHITESPACE + shape: (3,) + Series: '' [i64] + [ + 2 + 4 + 3 + ] + """ + return self._from_compliant_series(self._compliant_series.abs()) + + def cum_sum(self) -> Self: + """ + Calculate the cumulative sum. + + Examples: + >>> import pandas as pd + >>> import polars as pl + >>> import narwhals as nw + >>> s = [2, 4, 3] + >>> s_pd = pd.Series(s) + >>> s_pl = pl.Series(s) + + We define a dataframe-agnostic function: + + >>> @nw.narwhalify + ... def func(s): + ... return s.cum_sum() + + We can then pass either pandas or Polars to `func`: + + >>> func(s_pd) + 0 2 + 1 6 + 2 9 + dtype: int64 + >>> func(s_pl) # doctest: +NORMALIZE_WHITESPACE + shape: (3,) + Series: '' [i64] + [ + 2 + 6 + 9 + ] + """ + return self._from_compliant_series(self._compliant_series.cum_sum()) + + def unique(self) -> Self: + """ + Returns unique values + + Examples: + >>> import pandas as pd + >>> import polars as pl + >>> import narwhals as nw + >>> s = [2, 4, 4, 6] + >>> s_pd = pd.Series(s) + >>> s_pl = pl.Series(s) + + Let's define a dataframe-agnostic function: + + >>> @nw.narwhalify + ... def func(s): + ... return s.unique() + + We can then pass either pandas or Polars to `func`: + + >>> func(s_pd) + 0 2 + 1 4 + 2 6 + dtype: int64 + >>> func(s_pl) # doctest: +NORMALIZE_WHITESPACE + shape: (3,) + Series: '' [i64] + [ + 2 + 4 + 6 + ] + """ + return self._from_compliant_series(self._compliant_series.unique()) + + def diff(self) -> Self: + """ + Calculate the difference with the previous element, for each element. + + Notes: + pandas may change the dtype here, for example when introducing missing + values in an integer column. To ensure, that the dtype doesn't change, + you may want to use `fill_null` and `cast`. For example, to calculate + the diff and fill missing values with `0` in a Int64 column, you could + do: + + s.diff().fill_null(0).cast(nw.Int64) + + Examples: + >>> import pandas as pd + >>> import polars as pl + >>> import narwhals as nw + >>> s = [2, 4, 3] + >>> s_pd = pd.Series(s) + >>> s_pl = pl.Series(s) + + We define a dataframe-agnostic function: + + >>> @nw.narwhalify + ... def func(s): + ... return s.diff() + + We can then pass either pandas or Polars to `func`: + + >>> func(s_pd) + 0 NaN + 1 2.0 + 2 -1.0 + dtype: float64 + >>> func(s_pl) # doctest: +NORMALIZE_WHITESPACE + shape: (3,) + Series: '' [i64] + [ + null + 2 + -1 + ] + """ + return self._from_compliant_series(self._compliant_series.diff()) + + def shift(self, n: int) -> Self: + """ + Shift values by `n` positions. + + Arguments: + n: Number of indices to shift forward. If a negative value is passed, + values are shifted in the opposite direction instead. + + Notes: + pandas may change the dtype here, for example when introducing missing + values in an integer column. To ensure, that the dtype doesn't change, + you may want to use `fill_null` and `cast`. For example, to shift + and fill missing values with `0` in a Int64 column, you could + do: + + s.shift(1).fill_null(0).cast(nw.Int64) + + Examples: + >>> import pandas as pd + >>> import polars as pl + >>> import narwhals as nw + >>> s = [2, 4, 3] + >>> s_pd = pd.Series(s) + >>> s_pl = pl.Series(s) + + We define a dataframe-agnostic function: + + >>> @nw.narwhalify + ... def func(s): + ... return s.shift(1) + + We can then pass either pandas or Polars to `func`: + + >>> func(s_pd) + 0 NaN + 1 2.0 + 2 4.0 + dtype: float64 + >>> func(s_pl) # doctest: +NORMALIZE_WHITESPACE + shape: (3,) + Series: '' [i64] + [ + null + 2 + 4 + ] + """ + return self._from_compliant_series(self._compliant_series.shift(n)) + + def sample( + self: Self, + n: int | None = None, + *, + fraction: float | None = None, + with_replacement: bool = False, + seed: int | None = None, + ) -> Self: + """ + Sample randomly from this Series. + + Arguments: + n: Number of items to return. Cannot be used with fraction. + fraction: Fraction of items to return. Cannot be used with n. + with_replacement: Allow values to be sampled more than once. + seed: Seed for the random number generator. If set to None (default), a random + seed is generated for each sample operation. + + Notes: + The `sample` method returns a Series with a specified number of + randomly selected items chosen from this Series. + The results are not consistent across libraries. + + Examples: + >>> import narwhals as nw + >>> import pandas as pd + >>> import polars as pl + + >>> s_pd = pd.Series([1, 2, 3, 4]) + >>> s_pl = pl.Series([1, 2, 3, 4]) + + We define a library agnostic function: + + >>> @nw.narwhalify + ... def func(s): + ... return s.sample(fraction=1.0, with_replacement=True) + + We can then pass either pandas or Polars to `func`: + + >>> func(s_pd) # doctest:+SKIP + a + 2 3 + 1 2 + 3 4 + 3 4 + >>> func(s_pl) # doctest:+SKIP + shape: (4,) + Series: '' [i64] + [ + 1 + 4 + 3 + 4 + ] + """ + return self._from_compliant_series( + self._compliant_series.sample( + n=n, fraction=fraction, with_replacement=with_replacement, seed=seed + ) + ) + + def alias(self, name: str) -> Self: + """ + Rename the Series. + + Arguments: + name: The new name. + + Examples: + >>> import pandas as pd + >>> import polars as pl + >>> import narwhals as nw + >>> s = [1, 2, 3] + >>> s_pd = pd.Series(s, name="foo") + >>> s_pl = pl.Series("foo", s) + + We define a library agnostic function: + + >>> @nw.narwhalify + ... def func(s): + ... return s.alias("bar") + + We can then pass either pandas or Polars to `func`: + + >>> func(s_pd) + 0 1 + 1 2 + 2 3 + Name: bar, dtype: int64 + >>> func(s_pl) # doctest: +NORMALIZE_WHITESPACE + shape: (3,) + Series: 'bar' [i64] + [ + 1 + 2 + 3 + ] + """ + return self._from_compliant_series(self._compliant_series.alias(name=name)) + + def sort(self, *, descending: bool = False, nulls_last: bool = False) -> Self: + """ + Sort this Series. Place null values first. + + Arguments: + descending: Sort in descending order. + nulls_last: Place null values last instead of first. + + Examples: + >>> import pandas as pd + >>> import polars as pl + >>> import narwhals as nw + >>> s = [5, None, 1, 2] + >>> s_pd = pd.Series(s) + >>> s_pl = pl.Series(s) + + We define library agnostic functions: + + >>> @nw.narwhalify + ... def func(s): + ... return s.sort() + + >>> @nw.narwhalify + ... def func_descend(s): + ... return s.sort(descending=True) + + We can then pass either pandas or Polars to `func`: + + >>> func(s_pd) + 1 NaN + 2 1.0 + 3 2.0 + 0 5.0 + dtype: float64 + >>> func(s_pl) # doctest: +NORMALIZE_WHITESPACE + shape: (4,) + Series: '' [i64] + [ + null + 1 + 2 + 5 + ] + >>> func_descend(s_pd) + 1 NaN + 0 5.0 + 3 2.0 + 2 1.0 + dtype: float64 + >>> func_descend(s_pl) # doctest: +NORMALIZE_WHITESPACE + shape: (4,) + Series: '' [i64] + [ + null + 5 + 2 + 1 + ] + """ + return self._from_compliant_series( + self._compliant_series.sort(descending=descending, nulls_last=nulls_last) + ) + + def is_null(self) -> Self: + """ + Returns a boolean Series indicating which values are null. + + Notes: + pandas and Polars handle null values differently. Polars distinguishes + between NaN and Null, whereas pandas doesn't. + + Examples: + >>> import pandas as pd + >>> import polars as pl + >>> import narwhals as nw + >>> s = [1, 2, None] + >>> s_pd = pd.Series(s) + >>> s_pl = pl.Series(s) + + We define a dataframe-agnostic function: + + >>> @nw.narwhalify + ... def func(s): + ... return s.is_null() + + We can then pass either pandas or Polars to `func`: + + >>> func(s_pd) + 0 False + 1 False + 2 True + dtype: bool + >>> func(s_pl) # doctest: +NORMALIZE_WHITESPACE + shape: (3,) + Series: '' [bool] + [ + false + false + true + ] + """ + return self._from_compliant_series(self._compliant_series.is_null()) + + def fill_null(self, value: Any) -> Self: + """ + Fill null values using the specified value. + + Arguments: + value: Value used to fill null values. + + Notes: + pandas and Polars handle null values differently. Polars distinguishes + between NaN and Null, whereas pandas doesn't. + + Examples: + >>> import pandas as pd + >>> import polars as pl + >>> import narwhals as nw + >>> s = [1, 2, None] + >>> s_pd = pd.Series(s) + >>> s_pl = pl.Series(s) + + We define a dataframe-agnostic function: + + >>> @nw.narwhalify + ... def func(s): + ... return s.fill_null(5) + + We can then pass either pandas or Polars to `func`: + + >>> func(s_pd) + 0 1.0 + 1 2.0 + 2 5.0 + dtype: float64 + >>> func(s_pl) # doctest: +NORMALIZE_WHITESPACE + shape: (3,) + Series: '' [i64] + [ + 1 + 2 + 5 + ] + """ + return self._from_compliant_series(self._compliant_series.fill_null(value)) + + def is_between( + self, lower_bound: Any, upper_bound: Any, closed: str = "both" + ) -> Self: + """ + Get a boolean mask of the values that are between the given lower/upper bounds. + + Arguments: + lower_bound: Lower bound value. + + upper_bound: Upper bound value. + + closed: Define which sides of the interval are closed (inclusive). + + Notes: + If the value of the `lower_bound` is greater than that of the `upper_bound`, + then the values will be False, as no value can satisfy the condition. + + Examples: + >>> import pandas as pd + >>> import polars as pl + >>> import narwhals as nw + >>> s_pd = pd.Series([1, 2, 3, 4, 5]) + >>> s_pl = pl.Series([1, 2, 3, 4, 5]) + + We define a library agnostic function: + + >>> @nw.narwhalify + ... def func(s): + ... return s.is_between(2, 4, "right") + + We can then pass either pandas or Polars to `func`: + + >>> func(s_pd) + 0 False + 1 False + 2 True + 3 True + 4 False + dtype: bool + >>> func(s_pl) # doctest: +NORMALIZE_WHITESPACE + shape: (5,) + Series: '' [bool] + [ + false + false + true + true + false + ] + """ + return self._from_compliant_series( + self._compliant_series.is_between(lower_bound, upper_bound, closed=closed) + ) + + def n_unique(self) -> int: + """ + Count the number of unique values. + + Examples: + >>> import pandas as pd + >>> import polars as pl + >>> import narwhals as nw + >>> s = [1, 2, 2, 3] + >>> s_pd = pd.Series(s) + >>> s_pl = pl.Series(s) + + We define a library agnostic function: + + >>> @nw.narwhalify + ... def func(s): + ... return s.n_unique() + + We can then pass either pandas or Polars to `func`: + + >>> func(s_pd) + 3 + >>> func(s_pl) + 3 + """ + return self._compliant_series.n_unique() # type: ignore[no-any-return] + + def to_numpy(self) -> np.ndarray: + """ + Convert to numpy. + + Examples: + >>> import pandas as pd + >>> import polars as pl + >>> import narwhals as nw + >>> s = [1, 2, 3] + >>> s_pd = pd.Series(s, name="a") + >>> s_pl = pl.Series("a", s) + + We define a library agnostic function: + + >>> @nw.narwhalify + ... def func(s): + ... return s.to_numpy() + + We can then pass either pandas or Polars to `func`: + + >>> func(s_pd) + array([1, 2, 3]...) + >>> func(s_pl) + array([1, 2, 3]...) + """ + return self._compliant_series.to_numpy() + + def to_pandas(self) -> pd.Series: + """ + Convert to pandas. + + Examples: + >>> import pandas as pd + >>> import polars as pl + >>> import narwhals as nw + >>> s = [1, 2, 3] + >>> s_pd = pd.Series(s, name="a") + >>> s_pl = pl.Series("a", s) + + We define a library agnostic function: + + >>> @nw.narwhalify + ... def func(s): + ... return s.to_pandas() + + We can then pass either pandas or Polars to `func`: + + >>> func(s_pd) + 0 1 + 1 2 + 2 3 + Name: a, dtype: int64 + >>> func(s_pl) + 0 1 + 1 2 + 2 3 + Name: a, dtype: int64 + """ + return self._compliant_series.to_pandas() + + def __add__(self, other: object) -> Self: + return self._from_compliant_series( + self._compliant_series.__add__(self._extract_native(other)) + ) + + def __radd__(self, other: object) -> Self: + return self._from_compliant_series( + self._compliant_series.__radd__(self._extract_native(other)) + ) + + def __sub__(self, other: object) -> Self: + return self._from_compliant_series( + self._compliant_series.__sub__(self._extract_native(other)) + ) + + def __rsub__(self, other: object) -> Self: + return self._from_compliant_series( + self._compliant_series.__rsub__(self._extract_native(other)) + ) + + def __mul__(self, other: object) -> Self: + return self._from_compliant_series( + self._compliant_series.__mul__(self._extract_native(other)) + ) + + def __rmul__(self, other: object) -> Self: + return self._from_compliant_series( + self._compliant_series.__rmul__(self._extract_native(other)) + ) + + def __truediv__(self, other: object) -> Self: + return self._from_compliant_series( + self._compliant_series.__truediv__(self._extract_native(other)) + ) + + def __rtruediv__(self, other: object) -> Self: + return self._from_compliant_series( + self._compliant_series.__rtruediv__(self._extract_native(other)) + ) + + def __floordiv__(self, other: object) -> Self: + return self._from_compliant_series( + self._compliant_series.__floordiv__(self._extract_native(other)) + ) + + def __rfloordiv__(self, other: object) -> Self: + return self._from_compliant_series( + self._compliant_series.__rfloordiv__(self._extract_native(other)) + ) + + def __pow__(self, other: object) -> Self: + return self._from_compliant_series( + self._compliant_series.__pow__(self._extract_native(other)) + ) + + def __rpow__(self, other: object) -> Self: + return self._from_compliant_series( + self._compliant_series.__rpow__(self._extract_native(other)) + ) + + def __mod__(self, other: object) -> Self: + return self._from_compliant_series( + self._compliant_series.__mod__(self._extract_native(other)) + ) + + def __rmod__(self, other: object) -> Self: + return self._from_compliant_series( + self._compliant_series.__rmod__(self._extract_native(other)) + ) + + def __eq__(self, other: object) -> Self: # type: ignore[override] + return self._from_compliant_series( + self._compliant_series.__eq__(self._extract_native(other)) + ) + + def __ne__(self, other: object) -> Self: # type: ignore[override] + return self._from_compliant_series( + self._compliant_series.__ne__(self._extract_native(other)) + ) + + def __gt__(self, other: Any) -> Self: + return self._from_compliant_series( + self._compliant_series.__gt__(self._extract_native(other)) + ) + + def __ge__(self, other: Any) -> Self: + return self._from_compliant_series( + self._compliant_series.__ge__(self._extract_native(other)) + ) + + def __lt__(self, other: Any) -> Self: + return self._from_compliant_series( + self._compliant_series.__lt__(self._extract_native(other)) + ) + + def __le__(self, other: Any) -> Self: + return self._from_compliant_series( + self._compliant_series.__le__(self._extract_native(other)) + ) + + def __and__(self, other: Any) -> Self: + return self._from_compliant_series( + self._compliant_series.__and__(self._extract_native(other)) + ) + + def __or__(self, other: Any) -> Self: + return self._from_compliant_series( + self._compliant_series.__or__(self._extract_native(other)) + ) + + # unary + def __invert__(self) -> Self: + return self._from_compliant_series(self._compliant_series.__invert__()) + + def filter(self, other: Any) -> Self: + """ + Filter elements in the Series based on a condition. + + Examples: + >>> import pandas as pd + >>> import polars as pl + >>> import narwhals as nw + >>> s = [4, 10, 15, 34, 50] + >>> s_pd = pd.Series(s) + >>> s_pl = pl.Series(s) + + We define a library agnostic function: + + >>> @nw.narwhalify + ... def func(s): + ... return s.filter(s > 10) + + We can then pass either pandas or Polars to `func`: + + >>> func(s_pd) + 2 15 + 3 34 + 4 50 + dtype: int64 + >>> func(s_pl) # doctest: +NORMALIZE_WHITESPACE + shape: (3,) + Series: '' [i64] + [ + 15 + 34 + 50 + ] + """ + return self._from_compliant_series( + self._compliant_series.filter(self._extract_native(other)) + ) + + # --- descriptive --- + def is_duplicated(self: Self) -> Self: + r""" + Get a mask of all duplicated rows in the Series. + + Examples: + >>> import narwhals as nw + >>> import pandas as pd + >>> import polars as pl + >>> s_pd = pd.Series([1, 2, 3, 1]) + >>> s_pl = pl.Series([1, 2, 3, 1]) + + Let's define a dataframe-agnostic function: + + >>> @nw.narwhalify + ... def func(s): + ... return s.is_duplicated() + + We can then pass either pandas or Polars to `func`: + + >>> func(s_pd) # doctest: +NORMALIZE_WHITESPACE + 0 True + 1 False + 2 False + 3 True + dtype: bool + >>> func(s_pl) # doctest: +NORMALIZE_WHITESPACE + shape: (4,) + Series: '' [bool] + [ + true + false + false + true + ] + """ + return self._from_compliant_series(self._compliant_series.is_duplicated()) + + def is_empty(self: Self) -> bool: + r""" + Check if the series is empty. + + Examples: + >>> import narwhals as nw + >>> import pandas as pd + >>> import polars as pl + + Let's define a dataframe-agnostic function that filters rows in which "foo" + values are greater than 10, and then checks if the result is empty or not: + + >>> @nw.narwhalify + ... def func(s): + ... return s.filter(s > 10).is_empty() + + We can then pass either pandas or Polars to `func`: + + >>> s_pd = pd.Series([1, 2, 3]) + >>> s_pl = pl.Series([1, 2, 3]) + >>> func(s_pd), func(s_pl) + (True, True) + + >>> s_pd = pd.Series([100, 2, 3]) + >>> s_pl = pl.Series([100, 2, 3]) + >>> func(s_pd), func(s_pl) + (False, False) + """ + return self._compliant_series.is_empty() # type: ignore[no-any-return] + + def is_unique(self: Self) -> Self: + r""" + Get a mask of all unique rows in the Series. + + Examples: + >>> import narwhals as nw + >>> import pandas as pd + >>> import polars as pl + >>> s_pd = pd.Series([1, 2, 3, 1]) + >>> s_pl = pl.Series([1, 2, 3, 1]) + + Let's define a dataframe-agnostic function: + + >>> @nw.narwhalify + ... def func(s): + ... return s.is_unique() + + We can then pass either pandas or Polars to `func`: + + >>> func(s_pd) # doctest: +NORMALIZE_WHITESPACE + 0 False + 1 True + 2 True + 3 False + dtype: bool + + >>> func(s_pl) # doctest: +NORMALIZE_WHITESPACE + shape: (4,) + Series: '' [bool] + [ + false + true + true + false + ] + """ + return self._from_compliant_series(self._compliant_series.is_unique()) + + def null_count(self: Self) -> int: + r""" + Create a new Series that shows the null counts per column. + + Notes: + pandas and Polars handle null values differently. Polars distinguishes + between NaN and Null, whereas pandas doesn't. + + Examples: + >>> import narwhals as nw + >>> import pandas as pd + >>> import polars as pl + >>> s_pd = pd.Series([1, None, 3]) + >>> s_pl = pl.Series([1, None, None]) + + Let's define a dataframe-agnostic function that returns the null count of + the series: + + >>> @nw.narwhalify + ... def func(s): + ... return s.null_count() + + We can then pass either pandas or Polars to `func`: + >>> func(s_pd) # doctest:+SKIP + 1 + >>> func(s_pl) + 2 + """ + return self._compliant_series.null_count() # type: ignore[no-any-return] + + def is_first_distinct(self: Self) -> Self: + r""" + Return a boolean mask indicating the first occurrence of each distinct value. + + Examples: + >>> import narwhals as nw + >>> import pandas as pd + >>> import polars as pl + >>> s_pd = pd.Series([1, 1, 2, 3, 2]) + >>> s_pl = pl.Series([1, 1, 2, 3, 2]) + + Let's define a dataframe-agnostic function: + + >>> @nw.narwhalify + ... def func(s): + ... return s.is_first_distinct() + + We can then pass either pandas or Polars to `func`: + + >>> func(s_pd) # doctest: +NORMALIZE_WHITESPACE + 0 True + 1 False + 2 True + 3 True + 4 False + dtype: bool + + >>> func(s_pl) # doctest: +NORMALIZE_WHITESPACE + shape: (5,) + Series: '' [bool] + [ + true + false + true + true + false + ] + """ + return self._from_compliant_series(self._compliant_series.is_first_distinct()) + + def is_last_distinct(self: Self) -> Self: + r""" + Return a boolean mask indicating the last occurrence of each distinct value. + + Examples: + >>> import narwhals as nw + >>> import pandas as pd + >>> import polars as pl + >>> s_pd = pd.Series([1, 1, 2, 3, 2]) + >>> s_pl = pl.Series([1, 1, 2, 3, 2]) + + Let's define a dataframe-agnostic function: + + >>> @nw.narwhalify + ... def func(s): + ... return s.is_last_distinct() + + We can then pass either pandas or Polars to `func`: + + >>> func(s_pd) # doctest: +NORMALIZE_WHITESPACE + 0 False + 1 True + 2 False + 3 True + 4 True + dtype: bool + + >>> func(s_pl) # doctest: +NORMALIZE_WHITESPACE + shape: (5,) + Series: '' [bool] + [ + false + true + false + true + true + ] + """ + return self._from_compliant_series(self._compliant_series.is_last_distinct()) + + def is_sorted(self: Self, *, descending: bool = False) -> bool: + r""" + Check if the Series is sorted. + + Arguments: + descending: Check if the Series is sorted in descending order. + + Examples: + >>> import narwhals as nw + >>> import pandas as pd + >>> import polars as pl + >>> unsorted_data = [1, 3, 2] + >>> sorted_data = [3, 2, 1] + + Let's define a dataframe-agnostic function: + + >>> @nw.narwhalify + ... def func(s, descending=False): + ... return s.is_sorted(descending=descending) + + We can then pass either pandas or Polars to `func`: + + >>> func(pl.Series(unsorted_data)) + False + >>> func(pl.Series(sorted_data), descending=True) + True + >>> func(pd.Series(unsorted_data)) + False + >>> func(pd.Series(sorted_data), descending=True) + True + """ + return self._compliant_series.is_sorted(descending=descending) # type: ignore[no-any-return] + + def value_counts( + self: Self, + *, + sort: bool = False, + parallel: bool = False, + name: str | None = None, + normalize: bool = False, + ) -> DataFrame[Any]: + r""" + Count the occurrences of unique values. + + Arguments: + sort: Sort the output by count in descending order. If set to False (default), + the order of the output is random. + parallel: Execute the computation in parallel. Used for Polars only. + name: Give the resulting count column a specific name; if `normalize` is True + defaults to "proportion", otherwise defaults to "count". + normalize: If true gives relative frequencies of the unique values + + Examples: + >>> import narwhals as nw + >>> import pandas as pd + >>> import polars as pl + >>> s_pd = pd.Series([1, 1, 2, 3, 2], name="s") + >>> s_pl = pl.Series(values=[1, 1, 2, 3, 2], name="s") + + Let's define a dataframe-agnostic function: + + >>> @nw.narwhalify + ... def func(s): + ... return s.value_counts(sort=True) + + We can then pass either pandas or Polars to `func`: + + >>> func(s_pd) # doctest: +NORMALIZE_WHITESPACE + s count + 0 1 2 + 1 2 2 + 2 3 1 + + >>> func(s_pl) # doctest: +NORMALIZE_WHITESPACE + shape: (3, 2) + ┌─────┬───────┐ + │ s ┆ count │ + │ --- ┆ --- │ + │ i64 ┆ u32 │ + ╞═════╪═══════╡ + │ 1 ┆ 2 │ + │ 2 ┆ 2 │ + │ 3 ┆ 1 │ + └─────┴───────┘ + """ + from narwhals.dataframe import DataFrame + + return DataFrame( + self._compliant_series.value_counts( + sort=sort, parallel=parallel, name=name, normalize=normalize + ), + level=self._level, + ) + + def quantile( + self, + quantile: float, + interpolation: Literal["nearest", "higher", "lower", "midpoint", "linear"], + ) -> Any: + """ + Get quantile value of the series. + + Note: + pandas and Polars may have implementation differences for a given interpolation method. + + Arguments: + quantile : float + Quantile between 0.0 and 1.0. + interpolation : {'nearest', 'higher', 'lower', 'midpoint', 'linear'} + Interpolation method. + + Examples: + >>> import narwhals as nw + >>> import pandas as pd + >>> import polars as pl + >>> data = list(range(50)) + >>> s_pd = pd.Series(data) + >>> s_pl = pl.Series(data) + + Let's define a dataframe-agnostic function: + + >>> @nw.narwhalify + ... def func(s): + ... return [ + ... s.quantile(quantile=q, interpolation="nearest") + ... for q in (0.1, 0.25, 0.5, 0.75, 0.9) + ... ] + + We can then pass either pandas or Polars to `func`: + + >>> func(s_pd) # doctest: +SKIP + [5, 12, 24, 37, 44] + + >>> func(s_pl) # doctest: +NORMALIZE_WHITESPACE + [5.0, 12.0, 25.0, 37.0, 44.0] + """ + return self._compliant_series.quantile( + quantile=quantile, interpolation=interpolation + ) + + def zip_with(self: Self, mask: Self, other: Self) -> Self: + """ + Take values from self or other based on the given mask. + + Where mask evaluates true, take values from self. Where mask evaluates false, + take values from other. + + Arguments: + mask: Boolean Series + other: Series of same type. + + Examples: + >>> import narwhals as nw + >>> import pandas as pd + >>> import polars as pl + >>> s1_pl = pl.Series([1, 2, 3, 4, 5]) + >>> s2_pl = pl.Series([5, 4, 3, 2, 1]) + >>> mask_pl = pl.Series([True, False, True, False, True]) + >>> s1_pd = pd.Series([1, 2, 3, 4, 5]) + >>> s2_pd = pd.Series([5, 4, 3, 2, 1]) + >>> mask_pd = pd.Series([True, False, True, False, True]) + + Let's define a dataframe-agnostic function: + + >>> @nw.narwhalify + ... def func(s1_any, mask_any, s2_any): + ... return s1_any.zip_with(mask_any, s2_any) + + We can then pass either pandas or Polars to `func`: + + >>> func(s1_pl, mask_pl, s2_pl) # doctest: +NORMALIZE_WHITESPACE + shape: (5,) + Series: '' [i64] + [ + 1 + 4 + 3 + 2 + 5 + ] + >>> func(s1_pd, mask_pd, s2_pd) + 0 1 + 1 4 + 2 3 + 3 2 + 4 5 + dtype: int64 + """ + return self._from_compliant_series( + self._compliant_series.zip_with( + self._extract_native(mask), self._extract_native(other) + ) + ) + + def item(self: Self, index: int | None = None) -> Any: + r""" + Return the Series as a scalar, or return the element at the given index. + + If no index is provided, this is equivalent to `s[0]`, with a check + that the shape is (1,). With an index, this is equivalent to `s[index]`. + + Examples: + >>> import narwhals as nw + >>> import pandas as pd + >>> import polars as pl + + Let's define a dataframe-agnostic function that returns item at given index + + >>> @nw.narwhalify + ... def func(s, index=None): + ... return s.item(index) + + We can then pass either pandas or Polars to `func`: + + >>> func(pl.Series("a", [1]), None), func(pd.Series([1]), None) # doctest:+SKIP + (1, 1) + + >>> func(pl.Series("a", [9, 8, 7]), -1), func(pl.Series([9, 8, 7]), -2) + (7, 8) + """ + return self._compliant_series.item(index=index) + + def head(self: Self, n: int = 10) -> Self: + r""" + Get the first `n` rows. + + Arguments: + n: Number of rows to return. + + Examples: + >>> import narwhals as nw + >>> import pandas as pd + >>> import polars as pl + >>> data = list(range(10)) + >>> s_pd = pd.Series(data) + >>> s_pl = pl.Series(data) + + Let's define a dataframe-agnostic function that returns the first 3 rows: + + >>> @nw.narwhalify + ... def func(s): + ... return s.head(3) + + We can then pass either pandas or Polars to `func`: + + >>> func(s_pd) # doctest: +NORMALIZE_WHITESPACE + 0 0 + 1 1 + 2 2 + dtype: int64 + + >>> func(s_pl) # doctest: +NORMALIZE_WHITESPACE + shape: (3,) + Series: '' [i64] + [ + 0 + 1 + 2 + ] + """ + return self._from_compliant_series(self._compliant_series.head(n)) + + def tail(self: Self, n: int = 10) -> Self: + r""" + Get the last `n` rows. + + Arguments: + n: Number of rows to return. + + Examples: + >>> import narwhals as nw + >>> import pandas as pd + >>> import polars as pl + >>> data = list(range(10)) + >>> s_pd = pd.Series(data) + >>> s_pl = pl.Series(data) + + Let's define a dataframe-agnostic function that returns the last 3 rows: + + >>> @nw.narwhalify + ... def func(s): + ... return s.tail(3) + + We can then pass either pandas or Polars to `func`: + + >>> func(s_pd) # doctest: +NORMALIZE_WHITESPACE + 7 7 + 8 8 + 9 9 + dtype: int64 + >>> func(s_pl) # doctest: +NORMALIZE_WHITESPACE + shape: (3,) + Series: '' [i64] + [ + 7 + 8 + 9 + ] + """ + return self._from_compliant_series(self._compliant_series.tail(n)) + + def round(self: Self, decimals: int = 0) -> Self: + r""" + Round underlying floating point data by `decimals` digits. + + Arguments: + decimals: Number of decimals to round by. + + Notes: + For values exactly halfway between rounded decimal values pandas behaves differently than Polars and Arrow. + + pandas rounds to the nearest even value (e.g. -0.5 and 0.5 round to 0.0, 1.5 and 2.5 round to 2.0, 3.5 and + 4.5 to 4.0, etc..). + + Polars and Arrow round away from 0 (e.g. -0.5 to -1.0, 0.5 to 1.0, 1.5 to 2.0, 2.5 to 3.0, etc..). + + Examples: + >>> import narwhals as nw + >>> import pandas as pd + >>> import polars as pl + >>> data = [1.12345, 2.56789, 3.901234] + >>> s_pd = pd.Series(data) + >>> s_pl = pl.Series(data) + + Let's define a dataframe-agnostic function that rounds to the first decimal: + + >>> @nw.narwhalify + ... def func(s): + ... return s.round(1) + + We can then pass either pandas or Polars to `func`: + + >>> func(s_pd) # doctest: +NORMALIZE_WHITESPACE + 0 1.1 + 1 2.6 + 2 3.9 + dtype: float64 + + >>> func(s_pl) # doctest: +NORMALIZE_WHITESPACE + shape: (3,) + Series: '' [f64] + [ + 1.1 + 2.6 + 3.9 + ] + """ + return self._from_compliant_series(self._compliant_series.round(decimals)) + + def to_dummies( + self: Self, *, separator: str = "_", drop_first: bool = False + ) -> DataFrame[Any]: + r""" + Get dummy/indicator variables. + + Arguments: + separator: Separator/delimiter used when generating column names. + drop_first: Remove the first category from the variable being encoded. + + Notes: + pandas and Polars handle null values differently. Polars distinguishes + between NaN and Null, whereas pandas doesn't. + + Examples: + >>> import narwhals as nw + >>> import pandas as pd + >>> import polars as pl + >>> data = [1, 2, 3] + >>> s_pd = pd.Series(data, name="a") + >>> s_pl = pl.Series("a", data) + + Let's define a dataframe-agnostic function that rounds to the first decimal: + + >>> @nw.narwhalify + ... def func(s, drop_first: bool = False): + ... return s.to_dummies(drop_first=drop_first) + + We can then pass either pandas or Polars to `func`: + + >>> func(s_pd) + a_1 a_2 a_3 + 0 1 0 0 + 1 0 1 0 + 2 0 0 1 + + >>> func(s_pd, drop_first=True) + a_2 a_3 + 0 0 0 + 1 1 0 + 2 0 1 + + >>> func(s_pl) + shape: (3, 3) + ┌─────┬─────┬─────┐ + │ a_1 ┆ a_2 ┆ a_3 │ + │ --- ┆ --- ┆ --- │ + │ u8 ┆ u8 ┆ u8 │ + ╞═════╪═════╪═════╡ + │ 1 ┆ 0 ┆ 0 │ + │ 0 ┆ 1 ┆ 0 │ + │ 0 ┆ 0 ┆ 1 │ + └─────┴─────┴─────┘ + >>> func(s_pl, drop_first=True) + shape: (3, 2) + ┌─────┬─────┐ + │ a_2 ┆ a_3 │ + │ --- ┆ --- │ + │ u8 ┆ u8 │ + ╞═════╪═════╡ + │ 0 ┆ 0 │ + │ 1 ┆ 0 │ + │ 0 ┆ 1 │ + └─────┴─────┘ + """ + from narwhals.dataframe import DataFrame + + return DataFrame( + self._compliant_series.to_dummies(separator=separator, drop_first=drop_first), + level=self._level, + ) + + def gather_every(self: Self, n: int, offset: int = 0) -> Self: + r""" + Take every nth value in the Series and return as new Series. + + Arguments: + n: Gather every *n*-th row. + offset: Starting index. + + Examples: + >>> import narwhals as nw + >>> import pandas as pd + >>> import polars as pl + >>> data = [1, 2, 3, 4] + >>> s_pd = pd.Series(name="a", data=data) + >>> s_pl = pl.Series(name="a", values=data) + + Let's define a dataframe-agnostic function in which gather every 2 rows, + starting from a offset of 1: + + >>> @nw.narwhalify + ... def func(s): + ... return s.gather_every(n=2, offset=1) + + >>> func(s_pd) + 1 2 + 3 4 + Name: a, dtype: int64 + + >>> func(s_pl) # doctest:+NORMALIZE_WHITESPACE + shape: (2,) + Series: 'a' [i64] + [ + 2 + 4 + ] + """ + return self._from_compliant_series( + self._compliant_series.gather_every(n=n, offset=offset) + ) + + def to_arrow(self: Self) -> pa.Array: + r""" + Convert to arrow. + + Examples: + >>> import narwhals as nw + >>> import pandas as pd + >>> import polars as pl + >>> data = [1, 2, 3, 4] + >>> s_pd = pd.Series(name="a", data=data) + >>> s_pl = pl.Series(name="a", values=data) + + Let's define a dataframe-agnostic function that converts to arrow: + + >>> @nw.narwhalify + ... def func(s): + ... return s.to_arrow() + + >>> func(s_pd) # doctest:+NORMALIZE_WHITESPACE + + [ + 1, + 2, + 3, + 4 + ] + + >>> func(s_pl) # doctest:+NORMALIZE_WHITESPACE + + [ + 1, + 2, + 3, + 4 + ] + """ + return self._compliant_series.to_arrow() + + def mode(self: Self) -> Self: + r""" + Compute the most occurring value(s). + + Can return multiple values. + + Examples: + >>> import pandas as pd + >>> import polars as pl + >>> import narwhals as nw + + >>> data = [1, 1, 2, 2, 3] + >>> s_pd = pd.Series(name="a", data=data) + >>> s_pl = pl.Series(name="a", values=data) + + We define a library agnostic function: + + >>> @nw.narwhalify + ... def func(s): + ... return s.mode().sort() + + We can then pass either pandas or Polars to `func`: + + >>> func(s_pd) + 0 1 + 1 2 + Name: a, dtype: int64 + + >>> func(s_pl) # doctest:+NORMALIZE_WHITESPACE + shape: (2,) + Series: 'a' [i64] + [ + 1 + 2 + ] + """ + return self._from_compliant_series(self._compliant_series.mode()) + + def __iter__(self: Self) -> Iterator[Any]: + yield from self._compliant_series.__iter__() + + @property + def str(self) -> SeriesStringNamespace: + return SeriesStringNamespace(self) + + @property + def dt(self) -> SeriesDateTimeNamespace: + return SeriesDateTimeNamespace(self) + + @property + def cat(self) -> SeriesCatNamespace: + return SeriesCatNamespace(self) + + +class SeriesCatNamespace: + def __init__(self, series: Series) -> None: + self._narwhals_series = series + + def get_categories(self) -> Series: + """ + Get unique categories from column. + + Examples: + Let's create some series: + + >>> import pandas as pd + >>> import polars as pl + >>> import narwhals as nw + >>> data = ["apple", "mango", "mango"] + >>> s_pd = pd.Series(data, dtype="category") + >>> s_pl = pl.Series(data, dtype=pl.Categorical) + + We define a dataframe-agnostic function to get unique categories + from column 'fruits': + + >>> @nw.narwhalify(series_only=True) + ... def func(s): + ... return s.cat.get_categories() + + We can then pass either pandas or Polars to `func`: + + >>> func(s_pd) + 0 apple + 1 mango + dtype: object + >>> func(s_pl) # doctest: +NORMALIZE_WHITESPACE + shape: (2,) + Series: '' [str] + [ + "apple" + "mango" + ] + """ + return self._narwhals_series._from_compliant_series( + self._narwhals_series._compliant_series.cat.get_categories() + ) + + +class SeriesStringNamespace: + def __init__(self, series: Series) -> None: + self._narwhals_series = series + + def len_chars(self) -> Series: + r""" + Return the length of each string as the number of characters. + + Examples: + >>> import pandas as pd + >>> import polars as pl + >>> import narwhals as nw + >>> data = ["foo", "Café", "345", "東京", None] + >>> s_pd = pd.Series(data) + >>> s_pl = pl.Series(data) + + We define a dataframe-agnostic function: + + >>> @nw.narwhalify + ... def func(s): + ... return s.str.len_chars() + + We can then pass either pandas or Polars to `func`: + + >>> func(s_pd) + 0 3.0 + 1 4.0 + 2 3.0 + 3 2.0 + 4 NaN + dtype: float64 + + >>> func(s_pl) # doctest: +NORMALIZE_WHITESPACE + shape: (5,) + Series: '' [u32] + [ + 3 + 4 + 3 + 2 + null + ] + """ + return self._narwhals_series._from_compliant_series( + self._narwhals_series._compliant_series.str.len_chars() + ) + + def replace( + self, pattern: str, value: str, *, literal: bool = False, n: int = 1 + ) -> Series: + r""" + Replace first matching regex/literal substring with a new string value. + + Arguments: + pattern: A valid regular expression pattern. + value: String that will replace the matched substring. + literal: Treat `pattern` as a literal string. + n: Number of matches to replace. + + Examples: + >>> import pandas as pd + >>> import polars as pl + >>> import narwhals as nw + >>> data = ["123abc", "abc abc123"] + >>> s_pd = pd.Series(data) + >>> s_pl = pl.Series(data) + + We define a dataframe-agnostic function: + + >>> @nw.narwhalify + ... def func(s): + ... s = s.str.replace("abc", "") + ... return s.to_list() + + We can then pass either pandas or Polars to `func`: + + >>> func(s_pd) + ['123', ' abc123'] + + >>> func(s_pl) + ['123', ' abc123'] + """ + return self._narwhals_series._from_compliant_series( + self._narwhals_series._compliant_series.str.replace( + pattern, value, literal=literal, n=n + ) + ) + + def replace_all(self, pattern: str, value: str, *, literal: bool = False) -> Series: + r""" + Replace all matching regex/literal substring with a new string value. + + Arguments: + pattern: A valid regular expression pattern. + value: String that will replace the matched substring. + literal: Treat `pattern` as a literal string. + + Examples: + >>> import pandas as pd + >>> import polars as pl + >>> import narwhals as nw + >>> data = ["123abc", "abc abc123"] + >>> s_pd = pd.Series(data) + >>> s_pl = pl.Series(data) + + We define a dataframe-agnostic function: + + >>> @nw.narwhalify + ... def func(s): + ... s = s.str.replace_all("abc", "") + ... return s.to_list() + + We can then pass either pandas or Polars to `func`: + + >>> func(s_pd) + ['123', ' 123'] + + >>> func(s_pl) + ['123', ' 123'] + """ + return self._narwhals_series._from_compliant_series( + self._narwhals_series._compliant_series.str.replace_all( + pattern, value, literal=literal + ) + ) + + def strip_chars(self, characters: str | None = None) -> Series: + r""" + Remove leading and trailing characters. + + Arguments: + characters: The set of characters to be removed. All combinations of this set of characters will be stripped from the start and end of the string. If set to None (default), all leading and trailing whitespace is removed instead. + + Examples: + >>> import pandas as pd + >>> import polars as pl + >>> import narwhals as nw + >>> data = ["apple", "\nmango"] + >>> s_pd = pd.Series(data) + >>> s_pl = pl.Series(data) + + We define a dataframe-agnostic function: + + >>> @nw.narwhalify + ... def func(s): + ... s = s.str.strip_chars() + ... return s.to_list() + + We can then pass either pandas or Polars to `func`: + + >>> func(s_pd) + ['apple', 'mango'] + + >>> func(s_pl) + ['apple', 'mango'] + """ + return self._narwhals_series._from_compliant_series( + self._narwhals_series._compliant_series.str.strip_chars(characters) + ) + + def starts_with(self, prefix: str) -> Series: + r""" + Check if string values start with a substring. + + Arguments: + prefix: prefix substring + + Examples: + >>> import pandas as pd + >>> import polars as pl + >>> import narwhals as nw + >>> data = ["apple", "mango", None] + >>> s_pd = pd.Series(data) + >>> s_pl = pl.Series(data) + + We define a dataframe-agnostic function: + + >>> @nw.narwhalify + ... def func(s): + ... return s.str.starts_with("app") + + We can then pass either pandas or Polars to `func`: + + >>> func(s_pd) + 0 True + 1 False + 2 None + dtype: object + + >>> func(s_pl) # doctest: +NORMALIZE_WHITESPACE + shape: (3,) + Series: '' [bool] + [ + true + false + null + ] + """ + return self._narwhals_series._from_compliant_series( + self._narwhals_series._compliant_series.str.starts_with(prefix) + ) + + def ends_with(self, suffix: str) -> Series: + r""" + Check if string values end with a substring. + + Arguments: + suffix: suffix substring + + Examples: + >>> import pandas as pd + >>> import polars as pl + >>> import narwhals as nw + >>> data = ["apple", "mango", None] + >>> s_pd = pd.Series(data) + >>> s_pl = pl.Series(data) + + We define a dataframe-agnostic function: + + >>> @nw.narwhalify + ... def func(s): + ... return s.str.ends_with("ngo") + + We can then pass either pandas or Polars to `func`: + + >>> func(s_pd) + 0 False + 1 True + 2 None + dtype: object + + >>> func(s_pl) # doctest: +NORMALIZE_WHITESPACE + shape: (3,) + Series: '' [bool] + [ + false + true + null + ] + """ + return self._narwhals_series._from_compliant_series( + self._narwhals_series._compliant_series.str.ends_with(suffix) + ) + + def contains(self, pattern: str, *, literal: bool = False) -> Series: + r""" + Check if string contains a substring that matches a pattern. + + Arguments: + pattern: A Character sequence or valid regular expression pattern. + literal: If True, treats the pattern as a literal string. + If False, assumes the pattern is a regular expression. + + Examples: + >>> import pandas as pd + >>> import polars as pl + >>> import narwhals as nw + >>> pets = ["cat", "dog", "rabbit and parrot", "dove", None] + >>> s_pd = pd.Series(pets) + >>> s_pl = pl.Series(pets) + + We define a dataframe-agnostic function: + + >>> @nw.narwhalify + ... def func(s): + ... return s.str.contains("parrot|dove") + + We can then pass either pandas or Polars to `func`: + + >>> func(s_pd) + 0 False + 1 False + 2 True + 3 True + 4 None + dtype: object + + >>> func(s_pl) # doctest: +NORMALIZE_WHITESPACE + shape: (5,) + Series: '' [bool] + [ + false + false + true + true + null + ] + """ + return self._narwhals_series._from_compliant_series( + self._narwhals_series._compliant_series.str.contains(pattern, literal=literal) + ) + + def slice(self, offset: int, length: int | None = None) -> Series: + r""" + Create subslices of the string values of a Series. + + Arguments: + offset: Start index. Negative indexing is supported. + length: Length of the slice. If set to `None` (default), the slice is taken to the + end of the string. + + Examples: + >>> import pandas as pd + >>> import polars as pl + >>> import narwhals as nw + >>> data = ["pear", None, "papaya", "dragonfruit"] + >>> s_pd = pd.Series(data) + >>> s_pl = pl.Series(data) + + We define a dataframe-agnostic function: + + >>> @nw.narwhalify + ... def func(s): + ... return s.str.slice(4, length=3) + + We can then pass either pandas or Polars to `func`: + + >>> func(s_pd) # doctest: +NORMALIZE_WHITESPACE + 0 + 1 None + 2 ya + 3 onf + dtype: object + + >>> func(s_pl) # doctest: +NORMALIZE_WHITESPACE + shape: (4,) + Series: '' [str] + [ + "" + null + "ya" + "onf" + ] + + Using negative indexes: + + >>> @nw.narwhalify + ... def func(s): + ... return s.str.slice(-3) + + >>> func(s_pd) # doctest: +NORMALIZE_WHITESPACE + 0 ear + 1 None + 2 aya + 3 uit + dtype: object + + >>> func(s_pl) # doctest: +NORMALIZE_WHITESPACE + shape: (4,) + Series: '' [str] + [ + "ear" + null + "aya" + "uit" + ] + """ + return self._narwhals_series._from_compliant_series( + self._narwhals_series._compliant_series.str.slice( + offset=offset, length=length + ) + ) + + def head(self, n: int = 5) -> Series: + r""" + Take the first n elements of each string. + + Arguments: + n: Number of elements to take. Negative indexing is supported (see note (1.)) + + Notes: + 1. When the `n` input is negative, `head` returns characters up to the n-th from the end of the string. + For example, if `n = -3`, then all characters except the last three are returned. + 2. If the length of the string has fewer than `n` characters, the full string is returned. + + Examples: + >>> import pandas as pd + >>> import polars as pl + >>> import narwhals as nw + >>> lyrics = ["Atatata", "taata", "taatatata", "zukkyun"] + >>> s_pd = pd.Series(lyrics) + >>> s_pl = pl.Series(lyrics) + + We define a dataframe-agnostic function: + + >>> @nw.narwhalify + ... def func(s): + ... return s.str.head() + + We can then pass either pandas or Polars to `func`: + + >>> func(s_pd) + 0 Atata + 1 taata + 2 taata + 3 zukky + dtype: object + >>> func(s_pl) # doctest: +NORMALIZE_WHITESPACE + shape: (4,) + Series: '' [str] + [ + "Atata" + "taata" + "taata" + "zukky" + ] + """ + return self._narwhals_series._from_compliant_series( + self._narwhals_series._compliant_series.str.slice(0, n) + ) + + def tail(self, n: int = 5) -> Series: + r""" + Take the last n elements of each string. + + Arguments: + n: Number of elements to take. Negative indexing is supported (see note (1.)) + + Notes: + 1. When the `n` input is negative, `tail` returns characters starting from the n-th from the beginning of + the string. For example, if `n = -3`, then all characters except the first three are returned. + 2. If the length of the string has fewer than `n` characters, the full string is returned. + + Examples: + >>> import pandas as pd + >>> import polars as pl + >>> import narwhals as nw + >>> lyrics = ["Atatata", "taata", "taatatata", "zukkyun"] + >>> s_pd = pd.Series(lyrics) + >>> s_pl = pl.Series(lyrics) + + We define a dataframe-agnostic function: + + >>> @nw.narwhalify + ... def func(s): + ... return s.str.tail() + + We can then pass either pandas or Polars to `func`: + + >>> func(s_pd) + 0 atata + 1 taata + 2 atata + 3 kkyun + dtype: object + >>> func(s_pl) # doctest: +NORMALIZE_WHITESPACE + shape: (4,) + Series: '' [str] + [ + "atata" + "taata" + "atata" + "kkyun" + ] + """ + return self._narwhals_series._from_compliant_series( + self._narwhals_series._compliant_series.str.slice(-n) + ) + + def to_uppercase(self) -> Series: + r""" + Transform string to uppercase variant. + + Notes: + The PyArrow backend will convert 'ß' to 'ẞ' instead of 'SS'. + For more info see: https://github.com/apache/arrow/issues/34599 + There may be other unicode-edge-case-related variations across implementations. + + Examples: + >>> import pandas as pd + >>> import polars as pl + >>> import narwhals as nw + >>> data = {"fruits": ["apple", "mango", None]} + >>> df_pd = pd.DataFrame(data) + >>> df_pl = pl.DataFrame(data) + + We define a dataframe-agnostic function: + + >>> @nw.narwhalify + ... def func(df): + ... return df.with_columns(upper_col=nw.col("fruits").str.to_uppercase()) + + We can then pass either pandas or Polars to `func`: + + >>> func(df_pd) # doctest: +NORMALIZE_WHITESPACE + fruits upper_col + 0 apple APPLE + 1 mango MANGO + 2 None None + + >>> func(df_pl) # doctest: +NORMALIZE_WHITESPACE + shape: (3, 2) + ┌────────┬───────────┐ + │ fruits ┆ upper_col │ + │ --- ┆ --- │ + │ str ┆ str │ + ╞════════╪═══════════╡ + │ apple ┆ APPLE │ + │ mango ┆ MANGO │ + │ null ┆ null │ + └────────┴───────────┘ + + """ + return self._narwhals_series._from_compliant_series( + self._narwhals_series._compliant_series.str.to_uppercase() + ) + + def to_lowercase(self) -> Series: + r""" + Transform string to lowercase variant. + + Examples: + >>> import pandas as pd + >>> import polars as pl + >>> import narwhals as nw + >>> data = {"fruits": ["APPLE", "MANGO", None]} + >>> df_pd = pd.DataFrame(data) + >>> df_pl = pl.DataFrame(data) + + We define a dataframe-agnostic function: + + >>> @nw.narwhalify + ... def func(df): + ... return df.with_columns(lower_col=nw.col("fruits").str.to_lowercase()) + + We can then pass either pandas or Polars to `func`: + + >>> func(df_pd) # doctest: +NORMALIZE_WHITESPACE + fruits lower_col + 0 APPLE apple + 1 MANGO mango + 2 None None + + + >>> func(df_pl) # doctest: +NORMALIZE_WHITESPACE + shape: (3, 2) + ┌────────┬───────────┐ + │ fruits ┆ lower_col │ + │ --- ┆ --- │ + │ str ┆ str │ + ╞════════╪═══════════╡ + │ APPLE ┆ apple │ + │ MANGO ┆ mango │ + │ null ┆ null │ + └────────┴───────────┘ + """ + return self._narwhals_series._from_compliant_series( + self._narwhals_series._compliant_series.str.to_lowercase() + ) + + +class SeriesDateTimeNamespace: + def __init__(self, series: Series) -> None: + self._narwhals_series = series + + def date(self) -> Series: + """ + Get the date in a datetime series. + + Raises: + NotImplementedError: If pandas default backend is being used. + + Examples: + >>> import pandas as pd + >>> import polars as pl + >>> from datetime import datetime + >>> import narwhals as nw + >>> dates = [datetime(2012, 1, 7, 10, 20), datetime(2023, 3, 10, 11, 32)] + >>> s_pd = pd.Series(dates).convert_dtypes( + ... dtype_backend="pyarrow" + ... ) # doctest:+SKIP + >>> s_pl = pl.Series(dates) + + We define a library agnostic function: + + >>> @nw.narwhalify + ... def func(s): + ... return s.dt.date() + + We can then pass either pandas or Polars to `func`: + + >>> func(s_pd) # doctest:+SKIP + 0 2012-01-07 + 1 2023-03-10 + dtype: date32[day][pyarrow] + + >>> func(s_pl) # doctest: +NORMALIZE_WHITESPACE + shape: (2,) + Series: '' [date] + [ + 2012-01-07 + 2023-03-10 + ] + """ + return self._narwhals_series._from_compliant_series( + self._narwhals_series._compliant_series.dt.date() + ) + + def year(self) -> Series: + """ + Get the year in a datetime series. + + Examples: + >>> import pandas as pd + >>> import polars as pl + >>> from datetime import datetime + >>> import narwhals as nw + >>> dates = [datetime(2012, 1, 7), datetime(2023, 3, 10)] + >>> s_pd = pd.Series(dates) + >>> s_pl = pl.Series(dates) + + We define a library agnostic function: + + >>> @nw.narwhalify + ... def func(s): + ... return s.dt.year() + + We can then pass either pandas or Polars to `func`: + + >>> func(s_pd) + 0 2012 + 1 2023 + dtype: int... + >>> func(s_pl) # doctest: +NORMALIZE_WHITESPACE + shape: (2,) + Series: '' [i32] + [ + 2012 + 2023 + ] + """ + return self._narwhals_series._from_compliant_series( + self._narwhals_series._compliant_series.dt.year() + ) + + def month(self) -> Series: + """ + Gets the month in a datetime series. + + Examples: + >>> import pandas as pd + >>> import polars as pl + >>> from datetime import datetime + >>> import narwhals as nw + >>> dates = [datetime(2023, 2, 1), datetime(2023, 8, 3)] + >>> s_pd = pd.Series(dates) + >>> s_pl = pl.Series(dates) + + We define a library agnostic function: + + >>> @nw.narwhalify + ... def func(s): + ... return s.dt.month() + + We can then pass either pandas or Polars to `func`: + + >>> func(s_pd) + 0 2 + 1 8 + dtype: int... + >>> func(s_pl) # doctest: +NORMALIZE_WHITESPACE + shape: (2,) + Series: '' [i8] + [ + 2 + 8 + ] + """ + return self._narwhals_series._from_compliant_series( + self._narwhals_series._compliant_series.dt.month() + ) + + def day(self) -> Series: + """ + Extracts the day in a datetime series. + + Examples: + >>> import pandas as pd + >>> import polars as pl + >>> from datetime import datetime + >>> import narwhals as nw + >>> dates = [datetime(2022, 1, 1), datetime(2022, 1, 5)] + >>> s_pd = pd.Series(dates) + >>> s_pl = pl.Series(dates) + + We define a library agnostic function: + + >>> @nw.narwhalify + ... def func(s): + ... return s.dt.day() + + We can then pass either pandas or Polars to `func`: + + >>> func(s_pd) + 0 1 + 1 5 + dtype: int... + >>> func(s_pl) # doctest: +NORMALIZE_WHITESPACE + shape: (2,) + Series: '' [i8] + [ + 1 + 5 + ] + """ + return self._narwhals_series._from_compliant_series( + self._narwhals_series._compliant_series.dt.day() + ) + + def hour(self) -> Series: + """ + Extracts the hour in a datetime series. + + Examples: + >>> import pandas as pd + >>> import polars as pl + >>> from datetime import datetime + >>> import narwhals as nw + >>> dates = [datetime(2022, 1, 1, 5, 3), datetime(2022, 1, 5, 9, 12)] + >>> s_pd = pd.Series(dates) + >>> s_pl = pl.Series(dates) + + We define a library agnostic function: + + >>> @nw.narwhalify + ... def func(s): + ... return s.dt.hour() + + We can then pass either pandas or Polars to `func`: + + >>> func(s_pd) + 0 5 + 1 9 + dtype: int... + >>> func(s_pl) # doctest: +NORMALIZE_WHITESPACE + shape: (2,) + Series: '' [i8] + [ + 5 + 9 + ] + """ + return self._narwhals_series._from_compliant_series( + self._narwhals_series._compliant_series.dt.hour() + ) + + def minute(self) -> Series: + """ + Extracts the minute in a datetime series. + + Examples: + >>> import pandas as pd + >>> import polars as pl + >>> from datetime import datetime + >>> import narwhals as nw + >>> dates = [datetime(2022, 1, 1, 5, 3), datetime(2022, 1, 5, 9, 12)] + >>> s_pd = pd.Series(dates) + >>> s_pl = pl.Series(dates) + + We define a library agnostic function: + + >>> @nw.narwhalify + ... def func(s): + ... return s.dt.minute() + + We can then pass either pandas or Polars to `func`: + + >>> func(s_pd) + 0 3 + 1 12 + dtype: int... + >>> func(s_pl) # doctest: +NORMALIZE_WHITESPACE + shape: (2,) + Series: '' [i8] + [ + 3 + 12 + ] + """ + return self._narwhals_series._from_compliant_series( + self._narwhals_series._compliant_series.dt.minute() + ) + + def second(self) -> Series: + """ + Extracts the second(s) in a datetime series. + + Examples: + >>> import pandas as pd + >>> import polars as pl + >>> from datetime import datetime + >>> import narwhals as nw + >>> dates = [datetime(2022, 1, 1, 5, 3, 10), datetime(2022, 1, 5, 9, 12, 4)] + >>> s_pd = pd.Series(dates) + >>> s_pl = pl.Series(dates) + + We define a library agnostic function: + + >>> @nw.narwhalify + ... def func(s): + ... return s.dt.second() + + We can then pass either pandas or Polars to `func`: + + >>> func(s_pd) + 0 10 + 1 4 + dtype: int... + >>> func(s_pl) # doctest: +NORMALIZE_WHITESPACE + shape: (2,) + Series: '' [i8] + [ + 10 + 4 + ] + """ + return self._narwhals_series._from_compliant_series( + self._narwhals_series._compliant_series.dt.second() + ) + + def millisecond(self) -> Series: + """ + Extracts the milliseconds in a datetime series. + + Examples: + >>> import pandas as pd + >>> import polars as pl + >>> from datetime import datetime + >>> import narwhals as nw + >>> dates = [ + ... datetime(2023, 5, 21, 12, 55, 10, 400000), + ... datetime(2023, 5, 21, 12, 55, 10, 600000), + ... datetime(2023, 5, 21, 12, 55, 10, 800000), + ... datetime(2023, 5, 21, 12, 55, 11, 0), + ... datetime(2023, 5, 21, 12, 55, 11, 200000), + ... ] + + >>> s_pd = pd.Series(dates) + >>> s_pl = pl.Series(dates) + + We define a library agnostic function: + + >>> @nw.narwhalify + ... def func(s): + ... return s.dt.millisecond().alias("datetime") + + We can then pass either pandas or Polars to `func`: + + >>> func(s_pd) + 0 400 + 1 600 + 2 800 + 3 0 + 4 200 + Name: datetime, dtype: int... + >>> func(s_pl) # doctest: +NORMALIZE_WHITESPACE + shape: (5,) + Series: 'datetime' [i32] + [ + 400 + 600 + 800 + 0 + 200 + ] + """ + return self._narwhals_series._from_compliant_series( + self._narwhals_series._compliant_series.dt.millisecond() + ) + + def microsecond(self) -> Series: + """ + Extracts the microseconds in a datetime series. + + Examples: + >>> import pandas as pd + >>> import polars as pl + >>> from datetime import datetime + >>> import narwhals as nw + >>> dates = [ + ... datetime(2023, 5, 21, 12, 55, 10, 400000), + ... datetime(2023, 5, 21, 12, 55, 10, 600000), + ... datetime(2023, 5, 21, 12, 55, 10, 800000), + ... datetime(2023, 5, 21, 12, 55, 11, 0), + ... datetime(2023, 5, 21, 12, 55, 11, 200000), + ... ] + + >>> s_pd = pd.Series(dates) + >>> s_pl = pl.Series(dates) + + We define a library agnostic function: + + >>> @nw.narwhalify + ... def func(s): + ... return s.dt.microsecond().alias("datetime") + + We can then pass either pandas or Polars to `func`: + + >>> func(s_pd) + 0 400000 + 1 600000 + 2 800000 + 3 0 + 4 200000 + Name: datetime, dtype: int... + >>> func(s_pl) # doctest: +NORMALIZE_WHITESPACE + shape: (5,) + Series: 'datetime' [i32] + [ + 400000 + 600000 + 800000 + 0 + 200000 + ] + """ + return self._narwhals_series._from_compliant_series( + self._narwhals_series._compliant_series.dt.microsecond() + ) + + def nanosecond(self) -> Series: + """ + Extracts the nanosecond(s) in a date series. + + Examples: + >>> import pandas as pd + >>> import polars as pl + >>> from datetime import datetime + >>> import narwhals as nw + >>> dates = [ + ... datetime(2022, 1, 1, 5, 3, 10, 500000), + ... datetime(2022, 1, 5, 9, 12, 4, 60000), + ... ] + >>> s_pd = pd.Series(dates) + >>> s_pl = pl.Series(dates) + + We define a library agnostic function: + + >>> @nw.narwhalify + ... def func(s): + ... return s.dt.nanosecond() + + We can then pass either pandas or Polars to `func`: + + >>> func(s_pd) + 0 500000000 + 1 60000000 + dtype: int... + >>> func(s_pl) # doctest: +NORMALIZE_WHITESPACE + shape: (2,) + Series: '' [i32] + [ + 500000000 + 60000000 + ] + """ + return self._narwhals_series._from_compliant_series( + self._narwhals_series._compliant_series.dt.nanosecond() + ) + + def ordinal_day(self) -> Series: + """ + Get ordinal day. + + Examples: + >>> import pandas as pd + >>> import polars as pl + >>> from datetime import datetime + >>> import narwhals as nw + >>> data = [datetime(2020, 1, 1), datetime(2020, 8, 3)] + >>> s_pd = pd.Series(data) + >>> s_pl = pl.Series(data) + + We define a library agnostic function: + + >>> @nw.narwhalify + ... def func(s): + ... return s.dt.ordinal_day() + + We can then pass either pandas or Polars to `func`: + + >>> func(s_pd) + 0 1 + 1 216 + dtype: int32 + >>> func(s_pl) # doctest: +NORMALIZE_WHITESPACE + shape: (2,) + Series: '' [i16] + [ + 1 + 216 + ] + """ + return self._narwhals_series._from_compliant_series( + self._narwhals_series._compliant_series.dt.ordinal_day() + ) + + def total_minutes(self) -> Series: + """ + Get total minutes. + + Notes: + The function outputs the total minutes in the int dtype by default, + however, pandas may change the dtype to float when there are missing values, + consider using `fill_null()` in this case. + + Examples: + >>> import pandas as pd + >>> import polars as pl + >>> from datetime import timedelta + >>> import narwhals as nw + >>> data = [timedelta(minutes=10), timedelta(minutes=20, seconds=40)] + >>> s_pd = pd.Series(data) + >>> s_pl = pl.Series(data) + + We define a library agnostic function: + + >>> @nw.narwhalify + ... def func(s): + ... return s.dt.total_minutes() + + We can then pass either pandas or Polars to `func`: + + >>> func(s_pd) + 0 10 + 1 20 + dtype: int... + >>> func(s_pl) # doctest: +NORMALIZE_WHITESPACE + shape: (2,) + Series: '' [i64] + [ + 10 + 20 + ] + """ + return self._narwhals_series._from_compliant_series( + self._narwhals_series._compliant_series.dt.total_minutes() + ) + + def total_seconds(self) -> Series: + """ + Get total seconds. + + Notes: + The function outputs the total seconds in the int dtype by default, + however, pandas may change the dtype to float when there are missing values, + consider using `fill_null()` in this case. + + Examples: + >>> import pandas as pd + >>> import polars as pl + >>> from datetime import timedelta + >>> import narwhals as nw + >>> data = [timedelta(seconds=10), timedelta(seconds=20, milliseconds=40)] + >>> s_pd = pd.Series(data) + >>> s_pl = pl.Series(data) + + We define a library agnostic function: + + >>> @nw.narwhalify + ... def func(s): + ... return s.dt.total_seconds() + + We can then pass either pandas or Polars to `func`: + + >>> func(s_pd) + 0 10 + 1 20 + dtype: int... + >>> func(s_pl) # doctest: +NORMALIZE_WHITESPACE + shape: (2,) + Series: '' [i64] + [ + 10 + 20 + ] + """ + return self._narwhals_series._from_compliant_series( + self._narwhals_series._compliant_series.dt.total_seconds() + ) + + def total_milliseconds(self) -> Series: + """ + Get total milliseconds. + + Notes: + The function outputs the total milliseconds in the int dtype by default, + however, pandas may change the dtype to float when there are missing values, + consider using `fill_null()` in this case. + + Examples: + >>> import pandas as pd + >>> import polars as pl + >>> from datetime import timedelta + >>> import narwhals as nw + >>> data = [ + ... timedelta(milliseconds=10), + ... timedelta(milliseconds=20, microseconds=40), + ... ] + >>> s_pd = pd.Series(data) + >>> s_pl = pl.Series(data) + + We define a library agnostic function: + + >>> @nw.narwhalify + ... def func(s): + ... return s.dt.total_milliseconds() + + We can then pass either pandas or Polars to `func`: + + >>> func(s_pd) + 0 10 + 1 20 + dtype: int... + >>> func(s_pl) # doctest: +NORMALIZE_WHITESPACE + shape: (2,) + Series: '' [i64] + [ + 10 + 20 + ] + """ + return self._narwhals_series._from_compliant_series( + self._narwhals_series._compliant_series.dt.total_milliseconds() + ) + + def total_microseconds(self) -> Series: + """ + Get total microseconds. + + Notes: + The function outputs the total microseconds in the int dtype by default, + however, pandas may change the dtype to float when there are missing values, + consider using `fill_null()` in this case. + + Examples: + >>> import pandas as pd + >>> import polars as pl + >>> from datetime import timedelta + >>> import narwhals as nw + >>> data = [ + ... timedelta(microseconds=10), + ... timedelta(milliseconds=1, microseconds=200), + ... ] + >>> s_pd = pd.Series(data) + >>> s_pl = pl.Series(data) + + We define a library agnostic function: + + >>> @nw.narwhalify + ... def func(s): + ... return s.dt.total_microseconds() + + We can then pass either pandas or Polars to `func`: + + >>> func(s_pd) + 0 10 + 1 1200 + dtype: int... + >>> func(s_pl) # doctest: +NORMALIZE_WHITESPACE + shape: (2,) + Series: '' [i64] + [ + 10 + 1200 + ] + """ + return self._narwhals_series._from_compliant_series( + self._narwhals_series._compliant_series.dt.total_microseconds() + ) + + def total_nanoseconds(self) -> Series: + """ + Get total nanoseconds. + + Notes: + The function outputs the total nanoseconds in the int dtype by default, + however, pandas may change the dtype to float when there are missing values, + consider using `fill_null()` in this case. + + Examples: + >>> import pandas as pd + >>> import polars as pl + >>> from datetime import timedelta + >>> import narwhals as nw + >>> data = ["2024-01-01 00:00:00.000000001", "2024-01-01 00:00:00.000000002"] + >>> s_pd = pd.to_datetime(pd.Series(data)) + >>> s_pl = pl.Series(data).str.to_datetime(time_unit="ns") + + We define a library agnostic function: + + >>> @nw.narwhalify + ... def func(s): + ... return s.diff().dt.total_nanoseconds() + + We can then pass either pandas or Polars to `func`: + + >>> func(s_pd) + 0 NaN + 1 1.0 + dtype: float64 + >>> func(s_pl) # doctest: +NORMALIZE_WHITESPACE + shape: (2,) + Series: '' [i64] + [ + null + 1 + ] + """ + return self._narwhals_series._from_compliant_series( + self._narwhals_series._compliant_series.dt.total_nanoseconds() + ) + + def to_string(self, format: str) -> Series: # noqa: A002 + """ + Convert a Date/Time/Datetime series into a String series with the given format. + + Notes: + Unfortunately, different libraries interpret format directives a bit + differently. + + - Chrono, the library used by Polars, uses `"%.f"` for fractional seconds, + whereas pandas and Python stdlib use `".%f"`. + - PyArrow interprets `"%S"` as "seconds, including fractional seconds" + whereas most other tools interpret it as "just seconds, as 2 digits". + + Therefore, we make the following adjustments: + + - for pandas-like libraries, we replace `"%S.%f"` with `"%S%.f"`. + - for PyArrow, we replace `"%S.%f"` with `"%S"`. + + Workarounds like these don't make us happy, and we try to avoid them as + much as possible, but here we feel like it's the best compromise. + + If you just want to format a date/datetime Series as a local datetime + string, and have it work as consistently as possible across libraries, + we suggest using: + + - `"%Y-%m-%dT%H:%M:%S%.f"` for datetimes + - `"%Y-%m-%d"` for dates + + though note that, even then, different tools may return a different number + of trailing zeros. Nonetheless, this is probably consistent enough for + most applications. + + If you have an application where this is not enough, please open an issue + and let us know. + + Examples: + >>> from datetime import datetime + >>> import pandas as pd + >>> import polars as pl + >>> import narwhals as nw + >>> data = [ + ... datetime(2020, 3, 1), + ... datetime(2020, 4, 1), + ... datetime(2020, 5, 1), + ... ] + >>> s_pd = pd.Series(data) + >>> s_pl = pl.Series(data) + + We define a dataframe-agnostic function: + + >>> @nw.narwhalify + ... def func(s): + ... return s.dt.to_string("%Y/%m/%d") + + We can then pass either pandas or Polars to `func`: + + >>> func(s_pd) + 0 2020/03/01 + 1 2020/04/01 + 2 2020/05/01 + dtype: object + + >>> func(s_pl) # doctest: +NORMALIZE_WHITESPACE + shape: (3,) + Series: '' [str] + [ + "2020/03/01" + "2020/04/01" + "2020/05/01" + ] + """ + return self._narwhals_series._from_compliant_series( + self._narwhals_series._compliant_series.dt.to_string(format) + ) diff --git a/parrot/lib/python3.10/site-packages/narwhals/typing.py b/parrot/lib/python3.10/site-packages/narwhals/typing.py new file mode 100644 index 0000000000000000000000000000000000000000..ecc89a4b25b228f2e8f2e2ce565162aa76707135 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/narwhals/typing.py @@ -0,0 +1,64 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING +from typing import Any +from typing import Protocol +from typing import TypeVar +from typing import Union + +if TYPE_CHECKING: + import sys + + if sys.version_info >= (3, 10): + from typing import TypeAlias + else: + from typing_extensions import TypeAlias + + from narwhals.dataframe import DataFrame + from narwhals.dataframe import LazyFrame + from narwhals.expr import Expr + from narwhals.series import Series + + # All dataframes supported by Narwhals have a + # `columns` property. Their similarities don't extend + # _that_ much further unfortunately... + class NativeFrame(Protocol): + @property + def columns(self) -> Any: ... + + def join(self, *args: Any, **kwargs: Any) -> Any: ... + + class DataFrameLike(Protocol): + def __dataframe__(self, *args: Any, **kwargs: Any) -> Any: ... + + +IntoExpr: TypeAlias = Union["Expr", str, "Series"] +"""Anything which can be converted to an expression.""" + +IntoDataFrame: TypeAlias = Union["NativeFrame", "DataFrame[Any]", "DataFrameLike"] +"""Anything which can be converted to a Narwhals DataFrame.""" + +IntoFrame: TypeAlias = Union[ + "NativeFrame", "DataFrame[Any]", "LazyFrame[Any]", "DataFrameLike" +] +"""Anything which can be converted to a Narwhals DataFrame or LazyFrame.""" + +Frame: TypeAlias = Union["DataFrame[Any]", "LazyFrame[Any]"] +"""DataFrame or LazyFrame""" + +# TypeVars for some of the above +IntoFrameT = TypeVar("IntoFrameT", bound="IntoFrame") +IntoDataFrameT = TypeVar("IntoDataFrameT", bound="IntoDataFrame") +FrameT = TypeVar("FrameT", "DataFrame[Any]", "LazyFrame[Any]") +DataFrameT = TypeVar("DataFrameT", bound="DataFrame[Any]") + +__all__ = [ + "IntoExpr", + "IntoDataFrame", + "IntoDataFrameT", + "IntoFrame", + "IntoFrameT", + "Frame", + "FrameT", + "DataFrameT", +] diff --git a/parrot/lib/python3.10/site-packages/scipy/linalg/_decomp_update.cpython-310-x86_64-linux-gnu.so b/parrot/lib/python3.10/site-packages/scipy/linalg/_decomp_update.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..33ddc007a3fdd7f99841f7ee5e0078a7d75d4110 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/linalg/_decomp_update.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7a6204b7f449307731e48304a53bb75d6ae9c04988273ebb536c2c8a561798a1 +size 368608 diff --git a/parrot/lib/python3.10/site-packages/scipy/ndimage/__pycache__/_interpolation.cpython-310.pyc b/parrot/lib/python3.10/site-packages/scipy/ndimage/__pycache__/_interpolation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..463250216c2febf6a062ce9232c2f3211ba8b1e4 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/scipy/ndimage/__pycache__/_interpolation.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/scipy/optimize/__pycache__/_optimize.cpython-310.pyc b/parrot/lib/python3.10/site-packages/scipy/optimize/__pycache__/_optimize.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..af24e674baa5732daf7d7b905f6b4513588e60f0 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/optimize/__pycache__/_optimize.cpython-310.pyc @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:765aafc800a8969959920f235bf84e3153f5f0155e4e8ca2345540b5c8e629f0 +size 114363 diff --git a/parrot/lib/python3.10/site-packages/scipy/stats/__pycache__/_morestats.cpython-310.pyc b/parrot/lib/python3.10/site-packages/scipy/stats/__pycache__/_morestats.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8946866697432dffa0f7f7a1ab75e254c290725d --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/stats/__pycache__/_morestats.cpython-310.pyc @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bd86e44c637fad170c336f3f0b7283971c06e4e1f8ba47f95bd4920d611984a6 +size 174170 diff --git a/parrot/lib/python3.10/site-packages/scipy/stats/tests/data/levy_stable/stable-Z1-cdf-sample-data.npy b/parrot/lib/python3.10/site-packages/scipy/stats/tests/data/levy_stable/stable-Z1-cdf-sample-data.npy new file mode 100644 index 0000000000000000000000000000000000000000..adda664a7b5442fc0977ddbaa572c864ddd31f08 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/stats/tests/data/levy_stable/stable-Z1-cdf-sample-data.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cf18c1f2d65a232bf2c7121282df31bf2a8be827afafc4ed810ed37457ee898a +size 183728 diff --git a/parrot/lib/python3.10/site-packages/six-1.16.0.dist-info/INSTALLER b/parrot/lib/python3.10/site-packages/six-1.16.0.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/six-1.16.0.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/parrot/lib/python3.10/site-packages/six-1.16.0.dist-info/LICENSE b/parrot/lib/python3.10/site-packages/six-1.16.0.dist-info/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..de6633112c1f9951fd688e1fb43457a1ec11d6d8 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/six-1.16.0.dist-info/LICENSE @@ -0,0 +1,18 @@ +Copyright (c) 2010-2020 Benjamin Peterson + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/parrot/lib/python3.10/site-packages/six-1.16.0.dist-info/RECORD b/parrot/lib/python3.10/site-packages/six-1.16.0.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..cf6a630d965162930ae0f852e1e6dc801169ba45 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/six-1.16.0.dist-info/RECORD @@ -0,0 +1,9 @@ +__pycache__/six.cpython-310.pyc,, +six-1.16.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +six-1.16.0.dist-info/LICENSE,sha256=i7hQxWWqOJ_cFvOkaWWtI9gq3_YPI5P8J2K2MYXo5sk,1066 +six-1.16.0.dist-info/METADATA,sha256=VQcGIFCAEmfZcl77E5riPCN4v2TIsc_qtacnjxKHJoI,1795 +six-1.16.0.dist-info/RECORD,, +six-1.16.0.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +six-1.16.0.dist-info/WHEEL,sha256=Z-nyYpwrcSqxfdux5Mbn_DQ525iP7J2DG3JgGvOYyTQ,110 +six-1.16.0.dist-info/top_level.txt,sha256=_iVH_iYEtEXnD8nYGQYpYFUvkUW9sEO1GYbkeKSAais,4 +six.py,sha256=TOOfQi7nFGfMrIvtdr6wX4wyHH8M7aknmuLfo2cBBrM,34549 diff --git a/parrot/lib/python3.10/site-packages/six-1.16.0.dist-info/top_level.txt b/parrot/lib/python3.10/site-packages/six-1.16.0.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..ffe2fce498955b628014618b28c6bcf152466a4a --- /dev/null +++ b/parrot/lib/python3.10/site-packages/six-1.16.0.dist-info/top_level.txt @@ -0,0 +1 @@ +six diff --git a/parrot/lib/python3.10/site-packages/sty-1.0.6.dist-info/LICENSE b/parrot/lib/python3.10/site-packages/sty-1.0.6.dist-info/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..8794eafdefd0b412d442e4c09df92f704c1d610d --- /dev/null +++ b/parrot/lib/python3.10/site-packages/sty-1.0.6.dist-info/LICENSE @@ -0,0 +1,204 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2018-present Felix Meyer-Wolters + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + diff --git a/parrot/lib/python3.10/site-packages/sty-1.0.6.dist-info/WHEEL b/parrot/lib/python3.10/site-packages/sty-1.0.6.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..7c881525d384f1537e81e8a783c8433a748a7089 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/sty-1.0.6.dist-info/WHEEL @@ -0,0 +1,4 @@ +Wheel-Version: 1.0 +Generator: poetry-core 1.8.1 +Root-Is-Purelib: true +Tag: py3-none-any diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/detail/TensorDataContainer.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/detail/TensorDataContainer.h new file mode 100644 index 0000000000000000000000000000000000000000..8faf9db66c8ce5bc3f14b8914f0cf616bddad8c0 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/detail/TensorDataContainer.h @@ -0,0 +1,372 @@ +#pragma once + +#include +#include +#include +#include + +#include + +#ifndef AT_PER_OPERATOR_HEADERS +#include +#else +#include +#include +#endif + +#include + +namespace torch { + +namespace detail { + +enum class TensorDataContainerType { Scalar, InitList, Tensor }; + +struct TensorDataContainer; + +inline std::ostream& operator<<( + std::ostream& stream, + const TensorDataContainer& tensor_data_container); + +// FIXME: There is no `operator<<` overload for `at::kBFloat16` type, +// and we need to convert it to `float` type using `operator float()` function +// defined in `c10/util/BFloat16.h`. +// Tracking issue: https://github.com/pytorch/pytorch/issues/28845 +inline std::ostream& operator<<(std::ostream& stream, c10::BFloat16 value) { + stream << static_cast(value); + return stream; +} + +inline c10::ScalarType compute_desired_dtype(c10::ScalarType scalar_type) { + if (scalar_type == at::kInt || scalar_type == at::kLong) { + // C++ `torch::tensor` with an integer type or an `at::ArrayRef` / + // `std::vector` / (nested) braced-init-list of integer types always + // produces a tensor of dtype `at::kLong` (aka. int64_t), matching Python + // `torch.tensor` behavior. + return at::kLong; + } else if (scalar_type == at::kFloat || scalar_type == at::kDouble) { + // C++ `torch::tensor` with a floating-point type or an `at::ArrayRef` / + // `std::vector` / (nested) braced-init-list of floating-point types always + // produces a tensor of dtype `torch::get_default_dtype()`, matching Python + // `torch.tensor` behavior. + return at::typeMetaToScalarType(at::get_default_dtype()); + } else { + return scalar_type; + } +} + +// We use `TensorDataContainer` to support converting the following data +// container types into the equivalent Tensor: +// +// 1. Arbitrarily nested braced-init-list (e.g. `{{1, 2}, {3, 4}}`). +// 2. `at::ArrayRef` of supported tensor data types. +// 3. `std::vector` of supported tensor data types. +// +// At any time, a `TensorDataContainer` object represents one of the following: +// +// 1. A scalar with value `scalar()` and type `scalar_type()`. +// 2. A Tensor represented in `std::initializer_list` form, +// with value `init_list()`, Tensor scalar type `scalar_type()`, and Tensor +// sizes `sizes()`. +// 3. A Tensor represented in `at::Tensor` form, with value `tensor()`, scalar +// type `scalar_type()`, +// and Tensor sizes `sizes()`. +// +// All the infrastructure here is mostly to support converting an arbitrarily +// nested braced-init-list to the equivalent Tensor successfully. Consider the +// following example: +// +// `torch::tensor({{1}, {2}})` +// +// this will call into the `torch::tensor` function: +// +// `at::Tensor tensor(detail::TensorDataContainer tensor_data_container, const +// at::TensorOptions& options = {})` +// +// the compiler will first try to convert `{{1}, {2}}` to `TensorDataContainer` +// type: +// +// `TensorDataContainer({{1}, {2}})` +// +// which matches to the +// `TensorDataContainer(std::initializer_list)` +// constructor, and in an attempt to convert `{1}` and `{2}` to +// `TensorDataContainer`, it calls the following: +// +// `TensorDataContainer({1})` (same call path happens for `{2}`, and we'll just +// focus on `{1}` here) +// +// At this point, theoretically there are two plausible ways for `{1}` to be +// matched to one of the constructors of `TensorDataContainer`: +// +// 1. It can be a list-initialization of a scalar value, thus matching +// `TensorDataContainer(int value)`. +// 2. It can be converted to `std::initializer_list`, thus +// matching +// `TensorDataContainer(std::initializer_list)`. +// +// How does the compiler decide which one to choose? According to +// `https://en.cppreference.com/w/cpp/language/list_initialization`, +// braced-init-list always prefers the constructor that takes +// `std::initializer_list`. Hence we happily move forward with constructor #2, +// and it calls the following: +// +// `TensorDataContainer(1)` +// +// Now it matches `TensorDataContainer(int value)`, which stores `1` as a scalar +// value. All is good. +struct TensorDataContainer { + // NOTE: For tensors with zero-size dimensions (e.g. `torch::tensor({{}, + // {}})`), the innermost empty braced-init-list `{}` matches the default + // constructor of the innermost `TensorDataContainer`. + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) + TensorDataContainer() + : sizes_({0}), + // NOTE: In Python, the dtype of tensors with zero-size dimensions (e.g. + // `torch.tensor([[], []])`) depends on the value of + // `torch.get_default_dtype()`, and we should do the same for the C++ + // equivalent. + scalar_type_(at::typeMetaToScalarType(at::get_default_dtype())), + type_(TensorDataContainerType::InitList) {} +#define TENSOR(T, S) \ + TensorDataContainer(T value) \ + : sizes_(), \ + scalar_type_(at::k##S), \ + type_(TensorDataContainerType::Scalar), \ + scalar_(value) {} + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) + AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, TENSOR) + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) + AT_FORALL_COMPLEX_TYPES(TENSOR) +#undef TENSOR + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) + TensorDataContainer(std::initializer_list init_list) + : sizes_(), + scalar_type_(init_list.begin()->scalar_type()), + type_(TensorDataContainerType::InitList), + init_list_(init_list) { + const TensorDataContainer& first_elem = *(init_list.begin()); + for (const auto& elem : init_list) { + TORCH_CHECK( + elem.sizes() == first_elem.sizes(), + "Expected all sub-lists to have sizes: ", + first_elem.sizes(), + " (e.g. ", + first_elem, + "), ", + "but got sub-list ", + elem, + " with sizes: ", + elem.sizes()); + TORCH_CHECK( + elem.scalar_type() == first_elem.scalar_type(), + "Expected all elements of the tensor to have the same scalar type: ", + first_elem.scalar_type(), + ", but got element of scalar type: ", + elem.scalar_type()); + } + sizes_.reserve(first_elem.sizes().size() + 1); + sizes_.push_back(init_list.size()); + sizes_.insert( + sizes_.end(), first_elem.sizes().begin(), first_elem.sizes().end()); + } + +#define TENSOR(T, S) \ + TensorDataContainer(at::ArrayRef values) \ + : sizes_({(int64_t)values.size()}), \ + scalar_type_(at::k##S), \ + type_(TensorDataContainerType::Tensor) { \ + at::AutoDispatchBelowAutograd mode; \ + if (scalar_type_ == at::kBool) { \ + tensor_ = at::tensor(values, at::TensorOptions().device(at::kCPU)); \ + } else { \ + tensor_ = at::tensor(values, at::dtype(scalar_type_).device(at::kCPU)); \ + } \ + } + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) + AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, TENSOR) + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) + AT_FORALL_COMPLEX_TYPES(TENSOR) +#undef TENSOR + + // NOTE: We need to handle `std::vector` explicitly instead of relying on an + // implicit conversion to `at::ArrayRef`, otherwise the following error can be + // thrown when calling `torch::tensor(std::vector({1, 2}))`: + // ``` + // error: no matching function for call to 'tensor(const std::vector&)' + // no known conversion for argument 1 from 'const std::vector' to + // 'torch::detail::TensorDataContainer' + // ``` + // + // NOTE: `torch::tensor(std::vector)` is not supported for now, because + // ArrayRef cannot be constructed from a std::vector bitfield. +#define TENSOR(T, S) \ + TensorDataContainer(const std::vector& values) \ + : TensorDataContainer(at::ArrayRef(values)) {} + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) + AT_FORALL_SCALAR_TYPES_AND2(Half, BFloat16, TENSOR) + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) + AT_FORALL_COMPLEX_TYPES(TENSOR) +#undef TENSOR + + bool is_scalar() const { + return type_ == TensorDataContainerType::Scalar; + } + + const c10::Scalar& scalar() const { + TORCH_CHECK( + is_scalar(), + "Can only call `scalar()` on a TensorDataContainer that has `is_scalar() == true`"); + return scalar_; + } + + bool is_init_list() const { + return type_ == TensorDataContainerType::InitList; + } + + const std::initializer_list& init_list() const { + TORCH_CHECK( + is_init_list(), + "Can only call `init_list()` on a TensorDataContainer that has `is_init_list() == true`"); + return init_list_; + } + + bool is_tensor() const { + return type_ == TensorDataContainerType::Tensor; + } + + const at::Tensor& tensor() const { + TORCH_CHECK( + is_tensor(), + "Can only call `tensor()` on a TensorDataContainer that has `is_tensor() == true`"); + return tensor_; + } + + const std::vector& sizes() const { + return sizes_; + } + + const c10::ScalarType& scalar_type() const { + return scalar_type_; + } + + at::Tensor convert_to_tensor(at::TensorOptions options) const { + if (!options.has_dtype()) { + options = options.dtype(compute_desired_dtype(scalar_type_)); + } + + if (is_scalar()) { + at::AutoDispatchBelowAutograd mode; + return at::scalar_tensor(scalar_, options); + } else if (is_init_list()) { + // NOTE: Here we explicitly choose to initialize the tensor on CPU first, + // fill each element of the tensor, and then move the tensor to the + // desired device. For CUDA device, this approach only involves 1 CUDA + // kernel launch, and is much faster than initializing the tensor on CUDA + // first and then filling each element of it (which involves `N` CUDA + // kernel launches where `N` is the number of the elements in the tensor). + at::Tensor tensor = ([&]() { + at::AutoDispatchBelowAutograd mode; + return at::empty(sizes_, options.device(at::kCPU)); + })(); + fill_tensor(tensor); + return tensor.to(options.device()); + } else if (is_tensor()) { + auto output = tensor_.to(options); + TORCH_CHECK( + !tensor_.is_complex() || output.is_complex(), + "can not do torch::tensor(complex, dtype=non-complex) because complex can not be casted to real number without loss of information"); + return output; + } else { + TORCH_INTERNAL_ASSERT(false, "Invalid TensorDataContainer type"); + } + } + + void pretty_print_recursive(std::ostream& stream) const { + if (is_scalar()) { + AT_DISPATCH_ALL_TYPES_AND3( + at::kBool, + at::kHalf, + at::kBFloat16, + scalar_type_, + "TensorDataContainer_pretty_print_scalar", + [&] { stream << scalar_.to(); }); + } else if (is_init_list()) { + stream << "{"; + for (const TensorDataContainer* it = init_list_.begin(); + it != init_list_.end(); + it++) { + stream << *it; + if (std::next(it) != init_list_.end()) + stream << ", "; + } + stream << "}"; + } else if (is_tensor()) { + stream << "{"; + for (const auto i : c10::irange(tensor_.sizes()[0])) { + AT_DISPATCH_ALL_TYPES_AND3( + at::kBool, + at::kHalf, + at::kBFloat16, + scalar_type_, + "TensorDataContainer_pretty_print_tensor_item", + [&] { stream << tensor_[i].item(); }); + if (i != tensor_.sizes()[0] - 1) + stream << ", "; + } + stream << "}"; + } else { + TORCH_INTERNAL_ASSERT(false, "Invalid TensorDataContainer type"); + } + } + + private: + void fill_tensor(at::Tensor& tensor) const { + if (is_scalar()) { + TORCH_INTERNAL_ASSERT( + tensor.dim() == 0, + "Expected a 0-dim Tensor, but got Tensor with dimensions: ", + tensor.dim()); + at::NoGradGuard guard; + tensor.fill_(scalar_); + } else if (is_init_list()) { + TORCH_INTERNAL_ASSERT( + tensor.sizes()[0] == (int64_t)init_list_.size(), + "Expected a Tensor with size ", + init_list_.size(), + " in its first dimension, but got Tensor with size ", + tensor.sizes()[0], + " in its first dimension"); + size_t index = 0; + for (const auto& elem : init_list_) { + at::Tensor slice = tensor[index]; + elem.fill_tensor(slice); + index++; + } + } else if (is_tensor()) { + TORCH_INTERNAL_ASSERT( + false, + "TensorDataContainer is already a Tensor type, `fill_tensor` should not be called"); + } else { + TORCH_INTERNAL_ASSERT(false, "Invalid TensorDataContainer type"); + } + } + + std::vector sizes_; + c10::ScalarType scalar_type_; + TensorDataContainerType type_; + c10::Scalar scalar_; + std::initializer_list init_list_; + at::Tensor tensor_; +}; + +inline std::ostream& operator<<( + std::ostream& stream, + const TensorDataContainer& tensor_data_container) { + tensor_data_container.pretty_print_recursive(stream); + return stream; +} + +} // namespace detail + +} // namespace torch diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/detail/static.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/detail/static.h new file mode 100644 index 0000000000000000000000000000000000000000..c85fc7fff4b4d56171c6add8f82ea99ba74242bb --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/detail/static.h @@ -0,0 +1,65 @@ +#pragma once + +#include +#include + +#include +#include + +namespace torch { +namespace nn { +class Module; +} // namespace nn +} // namespace torch + +namespace torch { +namespace detail { +/// Detects if a type T has a forward() method. +template +struct has_forward { + // Declare two types with differing size. + using yes = int8_t; + using no = int16_t; + + // Here we declare two functions. The first is only enabled if `&U::forward` + // is well-formed and returns the `yes` type. In C++, the ellipsis parameter + // type (`...`) always puts the function at the bottom of overload resolution. + // This is specified in the standard as: 1) A standard conversion sequence is + // always better than a user-defined conversion sequence or an ellipsis + // conversion sequence. 2) A user-defined conversion sequence is always better + // than an ellipsis conversion sequence This means that if the first overload + // is viable, it will be preferred over the second as long as we pass any + // convertible type. The type of `&U::forward` is a pointer type, so we can + // pass e.g. 0. + template + static yes test(decltype(&U::forward)); + template + static no test(...); + + // Finally we test statically whether the size of the type returned by the + // selected overload is the size of the `yes` type. + static constexpr bool value = (sizeof(test(nullptr)) == sizeof(yes)); +}; + +template +constexpr bool check_not_lvalue_references() { + return (!std::is_lvalue_reference::value || + std::is_const::type>::value) && + check_not_lvalue_references(); +} + +template <> +inline constexpr bool check_not_lvalue_references() { + return true; +} + +/// A type trait whose `value` member is true if `M` derives from `Module`. +template +using is_module = + std::is_base_of::type>; + +template +using enable_if_module_t = + typename std::enable_if::value, T>::type; +} // namespace detail +} // namespace torch diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/cloneable.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/cloneable.h new file mode 100644 index 0000000000000000000000000000000000000000..aaf30d90974b11bd97dfa7617bc78faf13ded068 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/cloneable.h @@ -0,0 +1,98 @@ +#pragma once + +#include +#include +#include + +#include +#include + +#include +#include + +namespace torch { +namespace nn { +/// The `clone()` method in the base `Module` class does not have knowledge of +/// the concrete runtime type of its subclasses. Therefore, `clone()` must +/// either be called from within the subclass, or from a base class that has +/// knowledge of the concrete type. `Cloneable` uses the CRTP to gain +/// knowledge of the subclass' static type and provide an implementation of the +/// `clone()` method. We do not want to use this pattern in the base class, +/// because then storing a module would always require templatizing it. +template +// NOLINTNEXTLINE(bugprone-exception-escape) +class Cloneable : public Module { + public: + using Module::Module; + + /// `reset()` must perform initialization of all members with reference + /// semantics, most importantly parameters, buffers and submodules. + virtual void reset() = 0; + + /// Performs a recursive "deep copy" of the `Module`, such that all parameters + /// and submodules in the cloned module are different from those in the + /// original module. + std::shared_ptr clone( + const optional& device = nullopt) const override { + NoGradGuard no_grad; + + const auto& self = static_cast(*this); + auto copy = std::make_shared(self); + copy->parameters_.clear(); + copy->buffers_.clear(); + copy->children_.clear(); + copy->reset(); + TORCH_CHECK( + copy->parameters_.size() == parameters_.size(), + "The cloned module does not have the same number of " + "parameters as the original module after calling reset(). " + "Are you sure you called register_parameter() inside reset() " + "and not the constructor?"); + for (const auto& parameter : named_parameters(/*recurse=*/false)) { + auto& tensor = *parameter; + auto data = device && tensor.device() != *device + ? tensor.to(*device) + : autograd::Variable(tensor).clone(); + copy->parameters_[parameter.key()].set_data(data); + } + TORCH_CHECK( + copy->buffers_.size() == buffers_.size(), + "The cloned module does not have the same number of " + "buffers as the original module after calling reset(). " + "Are you sure you called register_buffer() inside reset() " + "and not the constructor?"); + for (const auto& buffer : named_buffers(/*recurse=*/false)) { + auto& tensor = *buffer; + auto data = device && tensor.device() != *device + ? tensor.to(*device) + : autograd::Variable(tensor).clone(); + copy->buffers_[buffer.key()].set_data(data); + } + TORCH_CHECK( + copy->children_.size() == children_.size(), + "The cloned module does not have the same number of " + "child modules as the original module after calling reset(). " + "Are you sure you called register_module() inside reset() " + "and not the constructor?"); + for (const auto& child : children_) { + copy->children_[child.key()]->clone_(*child.value(), device); + } + return copy; + } + + private: + void clone_(Module& other, const optional& device) final { + // Here we are *pretty* certain that `other's` type is `Derived` (because it + // was registered under the same name as `this`), but you never know what + // crazy things `reset()` does, so `dynamic_cast` just to be safe. + auto clone = std::dynamic_pointer_cast(other.clone(device)); + TORCH_CHECK( + clone != nullptr, + "Attempted to clone submodule, but it is of a " + "different type than the submodule it was to be cloned into"); + static_cast(*this) = *clone; + } +}; + +} // namespace nn +} // namespace torch diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional.h new file mode 100644 index 0000000000000000000000000000000000000000..b148edc68173f4d11cf58e042902edf3c508afff --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional.h @@ -0,0 +1,17 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/init.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/init.h new file mode 100644 index 0000000000000000000000000000000000000000..d08d785f1dade72ccc4bc28a48c1d8014c257c6c --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/init.h @@ -0,0 +1,124 @@ +#pragma once + +#include +#include +#include + +namespace torch { +namespace nn { +namespace init { + +using NonlinearityType = std::variant< + enumtype::kLinear, + enumtype::kConv1D, + enumtype::kConv2D, + enumtype::kConv3D, + enumtype::kConvTranspose1D, + enumtype::kConvTranspose2D, + enumtype::kConvTranspose3D, + enumtype::kSigmoid, + enumtype::kTanh, + enumtype::kReLU, + enumtype::kLeakyReLU>; + +using FanModeType = std::variant; + +} // namespace init +} // namespace nn + +namespace nn { +namespace init { + +/// Return the recommended gain value for the given nonlinearity function. +TORCH_API double calculate_gain( + NonlinearityType nonlinearity, + double param = 0.01); + +/// Fills the given `tensor` with the provided `value` in-place, and returns it. +/// No gradient will be recorded for this operation. +TORCH_API Tensor constant_(Tensor tensor, Scalar value); + +/// Fills the given `tensor` with the Dirac delta function in-place, and returns +/// it. No gradient will be recorded for this operation. +TORCH_API Tensor dirac_(Tensor tensor); + +/// Fills the given 2-dimensional `matrix` with an identity matrix. +/// No gradient will be recorded for this operation. +TORCH_API Tensor eye_(Tensor matrix); + +/// Fills the given 2-dimensional `matrix` with values drawn from a normal +/// distribution parameterized by `mean` and `std`. +/// No gradient will be recorded for this operation. +TORCH_API Tensor normal_(Tensor tensor, double mean = 0, double std = 1); + +/// Fills the given `tensor` with ones. +/// No gradient will be recorded for this operation. +TORCH_API Tensor ones_(Tensor tensor); + +/// Fills the input `Tensor` with a (semi) orthogonal matrix, as described in +/// "Exact solutions to the nonlinear dynamics of learning in deep linear neural +/// networks" - Saxe, A. et al. (2013). The input tensor must have at least 2 +/// dimensions, and for tensors with more than 2 dimensions the trailing +/// dimensions are flattened. +/// No gradient will be recorded for this operation. +TORCH_API Tensor orthogonal_(Tensor tensor, double gain = 1.0); + +/// Fills the 2D input `Tensor` as a sparse matrix, where the +/// non-zero elements will be drawn from a centered normal distribution +/// with the given standard deviation `std`, as described in "Deep learning via +/// Hessian-free optimization" - Martens, J. (2010). The `sparsity` is a real +/// value between 0 and 1 that controls the fraction of elements in each column +/// to be set to zero. +/// No gradient will be recorded for this operation. +TORCH_API Tensor sparse_(Tensor tensor, double sparsity, double std = 0.01); + +/// Fills the given 2-dimensional `matrix` with values drawn from a uniform +/// distribution parameterized by `low` and `high`. +/// No gradient will be recorded for this operation. +TORCH_API Tensor uniform_(Tensor tensor, double low = 0, double high = 1); + +/// Fills the input `Tensor` with values according to the method +/// described in "Delving deep into rectifiers: Surpassing human-level +/// performance on ImageNet classification" - He, K. et al. (2015), using a +/// normal distribution. Also known as He initialization. +/// No gradient will be recorded for this operation. +TORCH_API Tensor kaiming_normal_( + Tensor tensor, + double a = 0, + FanModeType mode = torch::kFanIn, + NonlinearityType nonlinearity = torch::kLeakyReLU); + +/// Fills the input `Tensor` with values according to the method +/// described in "Delving deep into rectifiers: Surpassing human-level +/// performance on ImageNet classification" - He, K. et al. (2015), using a +/// uniform distribution. Also known as He initialization. +/// No gradient will be recorded for this operation. +TORCH_API Tensor kaiming_uniform_( + Tensor tensor, + double a = 0, + FanModeType mode = torch::kFanIn, + NonlinearityType nonlinearity = torch::kLeakyReLU); + +/// Fills the input `Tensor` with values according to the method +/// described in "Understanding the difficulty of training deep feedforward +/// neural networks" - Glorot, X. & Bengio, Y. (2010). Values are scaled by the +/// `gain` parameter. No gradient will be recorded for this operation. +TORCH_API Tensor xavier_normal_(Tensor tensor, double gain = 1.0); + +/// Fills the input `Tensor` with values according to the method +/// described in "Understanding the difficulty of training deep feedforward +/// neural networks" - Glorot, X. & Bengio, Y. (2010), using a uniform +/// distribution. Values are scaled by the `gain` parameter +/// No gradient will be recorded for this operation. +TORCH_API Tensor xavier_uniform_(Tensor tensor, double gain = 1.0); + +/// Fills the given `tensor` with zeros. +/// No gradient will be recorded for this operation. +TORCH_API Tensor zeros_(Tensor tensor); + +TORCH_API std::tuple _calculate_fan_in_and_fan_out( + const Tensor& tensor); + +} // namespace init +} // namespace nn +} // namespace torch diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/module.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/module.h new file mode 100644 index 0000000000000000000000000000000000000000..de8d243533a787b1ed10ea5c90ef3286756177cd --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/module.h @@ -0,0 +1,702 @@ +#pragma once + +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include +#include +#include + +namespace torch { +namespace nn { + +/// The base class for all modules in PyTorch. +/// +/// \rst +/// .. note:: +/// The design and implementation of this class is largely based on the Python +/// API. You may want to consult the python documentation for +/// :py:class:`pytorch:torch.nn.Module` for further clarification on certain +/// methods or behavior. +/// \endrst +/// +/// A `Module` is an abstraction over the implementation of some function or +/// algorithm, possibly associated with some persistent data. A `Module` may +/// contain further `Module`s ("submodules"), each with their own +/// implementation, persistent data and further submodules. `Module`s can thus +/// be said to form a recursive tree structure. A `Module` is registered as a +/// submodule to another `Module` by calling `register_module()`, typically from +/// within a parent module's constructor. +/// +/// A distinction is made between three kinds of persistent data that may be +/// associated with a `Module`: +/// +/// 1. *Parameters*: tensors that record gradients, typically weights updated +/// during the backward step (e.g. the `weight` of a `Linear` module), +/// 2. *Buffers*: tensors that do not record gradients, typically updated during +/// the forward step, such as running statistics (e.g. `mean` and `variance` +/// in the `BatchNorm` module), +/// 3. Any additional state, not necessarily tensors, required for the +/// implementation or configuration of a `Module`. +/// +/// The first two kinds of state are special in that they may be registered +/// with the `Module` system to allow convenient access and batch configuration. +/// For example, registered parameters in any `Module` may be iterated over via +/// the `parameters()` accessor. Further, changing the data type of a `Module`'s +/// registered parameters can be done conveniently via `Module::to()`, e.g. +/// `module->to(torch::kCUDA)` to move all parameters to GPU memory. Lastly, +/// registered parameters and buffers are handled specially during a `clone()` +/// operation, which performs a deepcopy of a cloneable `Module` hierarchy. +/// +/// Parameters are registered with a `Module` via `register_parameter`. Buffers +/// are registered separately via `register_buffer`. These methods are part of +/// the public API of `Module` and are typically invoked from within a +/// concrete `Module`s constructor. +class TORCH_API Module : public std::enable_shared_from_this { + public: + using ModuleApplyFunction = std::function; + using ConstModuleApplyFunction = std::function; + using NamedModuleApplyFunction = + std::function; + using ConstNamedModuleApplyFunction = + std::function; + using ModulePointerApplyFunction = + std::function&)>; + using NamedModulePointerApplyFunction = + std::function&)>; + + /// Tells the base `Module` about the name of the submodule. + explicit Module(std::string name); + + /// Constructs the module without immediate knowledge of the submodule's name. + /// The name of the submodule is inferred via RTTI (if possible) the first + /// time `.name()` is invoked. + Module(); + Module(const Module&) = default; + Module& operator=(const Module&) = default; + Module(Module&&) noexcept = default; + Module& operator=(Module&&) noexcept = default; + + virtual ~Module() = default; + + /// Returns the name of the `Module`. + /// + /// A `Module` has an associated `name`, which is a string representation of + /// the kind of concrete `Module` it represents, such as `"Linear"` for the + /// `Linear` module. Under most circumstances, this name is automatically + /// inferred via runtime type information (RTTI). In the unusual circumstance + /// that you have this feature disabled, you may want to manually name your + /// `Module`s by passing the string name to the `Module` base class' + /// constructor. + const std::string& name() const noexcept; + + /// Performs a recursive deep copy of the module and all its registered + /// parameters, buffers and submodules. + /// + /// Optionally, this method sets the current device + /// to the one supplied before cloning. If no device is given, each + /// parameter and buffer will be moved to the device of its source. + /// + /// \rst + /// .. attention:: + /// Attempting to call the `clone()` method inherited from the base `Module` + /// class (the one documented here) will fail. To inherit an actual + /// implementation of `clone()`, you must subclass `Cloneable`. `Cloneable` + /// is templatized on the concrete module type, and can thus properly copy a + /// `Module`. This method is provided on the base class' API solely for an + /// easier-to-use polymorphic interface. + /// \endrst + virtual std::shared_ptr clone( + const optional& device = nullopt) const; + + /// Applies the `function` to the `Module` and recursively to every submodule. + /// The function must accept a `Module&`. + /// + /// \rst + /// .. code-block:: cpp + /// MyModule module; + /// module->apply([](nn::Module& module) { + /// std::cout << module.name() << std::endl; + /// }); + /// \endrst + void apply(const ModuleApplyFunction& function); + + /// Applies the `function` to the `Module` and recursively to every submodule. + /// The function must accept a `const Module&`. + /// + /// \rst + /// .. code-block:: cpp + /// MyModule module; + /// module->apply([](const nn::Module& module) { + /// std::cout << module.name() << std::endl; + /// }); + /// \endrst + void apply(const ConstModuleApplyFunction& function) const; + + /// Applies the `function` to the `Module` and recursively to every submodule. + /// The function must accept a `const std::string&` for the key of the module, + /// and a `Module&`. The key of the module itself is the empty string. If + /// `name_prefix` is given, it is prepended to every key as + /// `.` (and just `name_prefix` for the module itself). + /// + /// \rst + /// .. code-block:: cpp + /// MyModule module; + /// module->apply([](const std::string& key, nn::Module& module) { + /// std::cout << key << ": " << module.name() << std::endl; + /// }); + /// \endrst + void apply( + const NamedModuleApplyFunction& function, + const std::string& name_prefix = std::string()); + + /// Applies the `function` to the `Module` and recursively to every submodule. + /// The function must accept a `const std::string&` for the key of the module, + /// and a `const Module&`. The key of the module itself is the empty string. + /// If `name_prefix` is given, it is prepended to every key as + /// `.` (and just `name_prefix` for the module itself). + /// + /// \rst + /// .. code-block:: cpp + /// MyModule module; + /// module->apply([](const std::string& key, const nn::Module& module) { + /// std::cout << key << ": " << module.name() << std::endl; + /// }); + /// \endrst + void apply( + const ConstNamedModuleApplyFunction& function, + const std::string& name_prefix = std::string()) const; + + /// Applies the `function` to the `Module` and recursively to every submodule. + /// The function must accept a `const std::shared_ptr&`. + /// + /// \rst + /// .. code-block:: cpp + /// MyModule module; + /// module->apply([](const std::shared_ptr& module) { + /// std::cout << module->name() << std::endl; + /// }); + /// \endrst + void apply(const ModulePointerApplyFunction& function) const; + + /// Applies the `function` to the `Module` and recursively to every submodule. + /// The function must accept a `const std::string&` for the key of the module, + /// and a `const std::shared_ptr&`. The key of the module itself is + /// the empty string. If `name_prefix` is given, it is prepended to every key + /// as + /// `.` (and just `name_prefix` for the module itself). + /// + /// \rst + /// .. code-block:: cpp + /// MyModule module; + /// module->apply([](const std::string& key, + /// const std::shared_ptr& module) { + /// std::cout << key << ": " << module->name() << std::endl; + /// }); + /// \endrst + void apply( + const NamedModulePointerApplyFunction& function, + const std::string& name_prefix = std::string()) const; + + /// Returns the parameters of this `Module` and if `recurse` is true, also + /// recursively of every submodule. + std::vector parameters(bool recurse = true) const; + + /// Returns an `OrderedDict` with the parameters of this `Module` along with + /// their keys, and if `recurse` is true also recursively of every submodule. + OrderedDict named_parameters(bool recurse = true) const; + + /// Returns the buffers of this `Module` and if `recurse` is true, also + /// recursively of every submodule. + std::vector buffers(bool recurse = true) const; + + /// Returns an `OrderedDict` with the buffers of this `Module` along with + /// their keys, and if `recurse` is true also recursively of every submodule. + OrderedDict named_buffers(bool recurse = true) const; + + /// Returns the submodules of this `Module` (the entire submodule hierarchy) + /// and if `include_self` is true, also inserts a `shared_ptr` to this module + /// in the first position. + /// + /// \rst + /// .. warning:: + /// Only pass `include_self` as `true` if this `Module` is stored in a + /// `shared_ptr`! Otherwise an exception will be thrown. You may still call + /// this method with `include_self` set to false if your `Module` is not + /// stored in a `shared_ptr`. + /// \endrst + std::vector> modules(bool include_self = true) const; + + /// Returns an `OrderedDict` of the submodules of this `Module` (the entire + /// submodule hierarchy) and their keys, and if `include_self` is true, also + /// inserts a `shared_ptr` to this module in the first position. If + /// `name_prefix` is given, it is prepended to every key as + /// `.` (and just `name_prefix` for the module itself). + /// + /// \rst + /// .. warning:: + /// Only pass `include_self` as `true` if this `Module` is stored in a + /// `shared_ptr`! Otherwise an exception will be thrown. You may still call + /// this method with `include_self` set to false if your `Module` is not + /// stored in a `shared_ptr`. + /// \endrst + OrderedDict> named_modules( + const std::string& name_prefix = std::string(), + bool include_self = true) const; + + /// Returns the direct submodules of this `Module`. + std::vector> children() const; + + /// Returns an `OrderedDict` of the direct submodules of this `Module` and + /// their keys. + OrderedDict> named_children() const; + + /// Enables "training" mode. + virtual void train(bool on = true); + + /// Calls train(false) to enable "eval" mode. + /// Do not override this method, override `train()` instead. + void eval(); + + /// True if the module is in training mode. + /// + /// Every `Module` has a boolean associated with it that determines whether + /// the `Module` is currently in *training* mode (set via `.train()`) or in + /// *evaluation* (inference) mode (set via `.eval()`). This property is + /// exposed via `is_training()`, and may be used by the implementation of a + /// concrete module to modify its runtime behavior. See the `BatchNorm` or + /// `Dropout` modules for examples of `Module`s that use different code paths + /// depending on this property. + virtual bool is_training() const noexcept; + + /// Recursively casts all parameters to the given `dtype` and `device`. + /// + /// If `non_blocking` is true and the source is in pinned memory and + /// destination is on the GPU or vice versa, the copy is performed + /// asynchronously with respect to the host. Otherwise, the argument has no + /// effect. + virtual void to( + torch::Device device, + torch::Dtype dtype, + bool non_blocking = false); + + /// Recursively casts all parameters to the given dtype. + /// + /// If `non_blocking` is true and the source is in pinned memory and + /// destination is on the GPU or vice versa, the copy is performed + /// asynchronously with respect to the host. Otherwise, the argument has no + /// effect. + virtual void to(torch::Dtype dtype, bool non_blocking = false); + + /// Recursively moves all parameters to the given device. + /// + /// If `non_blocking` is true and the source is in pinned memory and + /// destination is on the GPU or vice versa, the copy is performed + /// asynchronously with respect to the host. Otherwise, the argument has no + /// effect. + virtual void to(torch::Device device, bool non_blocking = false); + + /// Recursively zeros out the `grad` value of each registered parameter. + virtual void zero_grad(bool set_to_none = true); + + /// Attempts to cast this `Module` to the given `ModuleType`. + /// + /// This method is useful when calling `apply()`. + /// \rst + /// .. code-block:: cpp + /// + /// void initialize_weights(nn::Module& module) { + /// torch::NoGradGuard no_grad; + /// if (auto* linear = module.as()) { + /// linear->weight.normal_(0.0, 0.02); + /// } + /// } + /// + /// MyModule module; + /// module->apply(initialize_weights); + /// \endrst + template + typename ModuleType::ContainedType* as() noexcept; + + /// Attempts to cast this `Module` to the given `ModuleType`. + /// + /// This method is useful when calling `apply()`. + /// \rst + /// .. code-block:: cpp + /// void initialize_weights(nn::Module& module) { + /// torch::NoGradGuard no_grad; + /// if (auto* linear = module.as()) { + /// linear->weight.normal_(0.0, 0.02); + /// } + /// } + /// + /// MyModule module; + /// module->apply(initialize_weights); + /// \endrst + template + const typename ModuleType::ContainedType* as() const noexcept; + + /// Attempts to cast this `Module` to the given `ModuleType`. + /// + /// This method is useful when calling `apply()`. + /// \rst + /// .. code-block:: cpp + /// + /// void initialize_weights(nn::Module& module) { + /// torch::NoGradGuard no_grad; + /// if (auto* linear = module.as()) { + /// linear->weight.normal_(0.0, 0.02); + /// } + /// } + /// + /// MyModule module; + /// module.apply(initialize_weights); + /// \endrst + template < + typename ModuleType, + typename = torch::detail::disable_if_module_holder_t> + ModuleType* as() noexcept; + + /// Attempts to cast this `Module` to the given `ModuleType`. + /// + /// This method is useful when calling `apply()`. + /// \rst + /// .. code-block:: cpp + /// + /// void initialize_weights(nn::Module& module) { + /// torch::NoGradGuard no_grad; + /// if (auto* linear = module.as()) { + /// linear->weight.normal_(0.0, 0.02); + /// } + /// } + /// + /// MyModule module; + /// module.apply(initialize_weights); + /// \endrst + template < + typename ModuleType, + typename = torch::detail::disable_if_module_holder_t> + const ModuleType* as() const noexcept; + + /// Serializes the `Module` into the given `OutputArchive`. + /// + /// If the `Module` contains unserializable submodules (e.g. + /// `nn::Functional`), those submodules are skipped when serializing. + virtual void save(serialize::OutputArchive& archive) const; + + /// Deserializes the `Module` from the given `InputArchive`. + /// + /// If the `Module` contains unserializable submodules (e.g. + /// `nn::Functional`), we don't check the existence of those submodules in the + /// `InputArchive` when deserializing. + virtual void load(serialize::InputArchive& archive); + + /// Streams a pretty representation of the `Module` into the given `stream`. + /// By default, this representation will be the name of the module (taken from + /// `name()`), followed by a recursive pretty print of all of the `Module`'s + /// submodules. + /// + /// Override this method to change the pretty print. The input + /// `stream` should be returned from the method, to allow easy chaining. + virtual void pretty_print(std::ostream& stream) const; + + /// Returns whether the `Module` is serializable. + virtual bool is_serializable() const; + + /// Registers a parameter with this `Module`. + /// + /// A parameter should be any gradient-recording tensor used in the + /// implementation of your `Module`. Registering it makes it available to + /// methods such as `parameters()`, `clone()` or `to().` + /// + /// Note that registering an undefined Tensor (e.g. + /// `module.register_parameter("param", Tensor())`) is allowed, and is + /// equivalent to `module.register_parameter("param", None)` in Python API. + /// + /// \rst + /// .. code-block:: cpp + /// + /// MyModule::MyModule() { + /// weight_ = register_parameter("weight", torch::randn({A, B})); + /// } + /// \endrst + Tensor& register_parameter( + std::string name, + Tensor tensor, + bool requires_grad = true); + + /// Registers a buffer with this `Module`. + /// + /// A buffer is intended to be state in your module that does not record + /// gradients, such as running statistics. Registering it makes it available + /// to methods such as `buffers()`, `clone()` or `to(). + /// + /// \rst + /// .. code-block:: cpp + /// + /// MyModule::MyModule() { + /// mean_ = register_buffer("mean", torch::empty({num_features_})); + /// } + /// \endrst + Tensor& register_buffer(std::string name, Tensor tensor); + + /// Registers a submodule with this `Module`. + /// + /// Registering a module makes it available to methods such as `modules()`, + /// `clone()` or `to()`. + /// + /// \rst + /// .. code-block:: cpp + /// + /// MyModule::MyModule() { + /// submodule_ = register_module("linear", torch::nn::Linear(3, 4)); + /// } + /// \endrst + template + std::shared_ptr register_module( + std::string name, + std::shared_ptr module); + + /// Registers a submodule with this `Module`. + /// + /// This method deals with `ModuleHolder`s. + /// + /// Registering a module makes it available to methods such as `modules()`, + /// `clone()` or `to()`. + /// + /// \rst + /// .. code-block:: cpp + /// + /// MyModule::MyModule() { + /// submodule_ = register_module("linear", torch::nn::Linear(3, 4)); + /// } + /// \endrst + template + std::shared_ptr register_module( + std::string name, + ModuleHolder module_holder); + + /// Replaces a registered submodule with this `Module`. + /// + /// This takes care of the registration, if you used submodule members, you + /// should + // assign the submodule as well, i.e. use as + /// module->submodule_ = module->replace_module("linear", + /// torch::nn::Linear(3, 4)); + /// It only works when a module of the name is already registered. + /// + /// This is useful for replacing a module after initialization, e.g. + /// for finetuning. + template + std::shared_ptr replace_module( + const std::string& name, + std::shared_ptr module); + + /// Replaces a registered submodule with this `Module`. + /// This method deals with `ModuleHolder`s. + /// + /// This takes care of the registration, if you used submodule members, you + /// should + // assign the submodule as well, i.e. use as + /// module->submodule_ = module->replace_module("linear", linear_holder); + /// It only works when a module of the name is already registered. + /// + /// This is useful for replacing a module after initialization, e.g. + /// for finetuning. + template + std::shared_ptr replace_module( + const std::string& name, + ModuleHolder module_holder); + + /// Unregisters a submodule from this `Module`. If there is no such module + /// with `name` an exception is thrown. + void unregister_module(const std::string& name); + + protected: + /// The following three functions allow a module with default arguments in its + /// forward method to be used in a Sequential module. + /// You should NEVER override these functions manually. Instead, you should + /// use the `FORWARD_HAS_DEFAULT_ARGS` macro. + virtual bool _forward_has_default_args() { + return false; + } + + virtual unsigned int _forward_num_required_args() { + TORCH_CHECK( + false, + "torch::nn::Module subclass that has default arguments in `forward` method ", + "must override `_forward_num_required_args` method. Please use ", + "`FORWARD_HAS_DEFAULT_ARGS` macro to do so."); + } + + virtual std::vector _forward_populate_default_args( + std::vector&& arguments) { + TORCH_CHECK( + false, + "torch::nn::Module subclass that has default arguments in `forward` method ", + "must override `_forward_populate_default_args` method. Please use ", + "`FORWARD_HAS_DEFAULT_ARGS` macro to do so."); + } + + /// The registered parameters of this `Module`. + /// Inorder to access parameters_ in ParameterDict and ParameterList + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + OrderedDict parameters_; + + private: + // Friend classes. + + template + friend class Cloneable; + + template + friend struct AnyModuleHolder; + + /// Pretty prints the given `Module` into the `ostream`. + TORCH_API friend std::ostream& operator<<( + std::ostream& stream, + const nn::Module& module); + + // data parallel using this method to configure gradient edges during the + // replicate step. + template + friend void replicate_grad_edges( + const std::shared_ptr& module, + const std::vector>& replicas, + const std::vector& devices); + + // Private methods. + + /// Used in the implementation of `Cloneable`. + virtual void clone_(Module& other, const optional& device); + + /// The implementation of the various `to()` methods. + template + void to_impl(Ts&&... ts); + + /// Implements pretty printing the module hierarchy. + void pretty_print_recursive( + std::ostream& stream, + const std::string& indentation) const; + + /// Applies the `function` to every submodule recursively, starting at this + /// `Module`'s children (thus not including the module itself). + void apply_to_submodules( + const NamedModulePointerApplyFunction& function, + const std::string& name_prefix = std::string()) const; + + /// Returns a shared_ptr to `this` in a safe (checked) way. + std::shared_ptr shared_from_this_checked() const; + + /// The registered buffers of this `Module`. + OrderedDict buffers_; + + /// The registered (direct) submodules of this `Module`. + OrderedDict> children_; + + /// The module's name (e.g. "LSTM"). + mutable optional name_; + + /// Whether the module is in training mode. + bool is_training_{true}; +}; + +/// Serialize a `Module` pointer into an `OutputArchive`. +TORCH_API serialize::OutputArchive& operator<<( + serialize::OutputArchive& archive, + const std::shared_ptr& module); + +/// Deserializes a `Module` from an `InputArchive`. +TORCH_API serialize::InputArchive& operator>>( + serialize::InputArchive& archive, + const std::shared_ptr& module); + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ nn::Module ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +template +typename ModuleType::ContainedType* Module::as() noexcept { + // Use the contained type of the `ModuleHolder`, e.g. `LinearImpl` for + // `Linear`, since `LinearImpl` inherits `nn::Module`. + return as(); +} + +template +const typename ModuleType::ContainedType* Module::as() const noexcept { + // Use the contained type of the `ModuleHolder`, e.g. `LinearImpl` for + // `Linear`, since `LinearImpl` inherits `nn::Module`. + return as(); +} + +template +ModuleType* Module::as() noexcept { + return dynamic_cast(this); +} + +template +const ModuleType* Module::as() const noexcept { + return dynamic_cast(this); +} + +template +std::shared_ptr Module::register_module( + std::string name, + std::shared_ptr module) { + TORCH_CHECK(!name.empty(), "Submodule name must not be empty"); + TORCH_CHECK( + name.find('.') == std::string::npos, + "Submodule name must not contain a dot (got '", + name, + "')"); + auto& base_module = children_.insert(std::move(name), std::move(module)); + return std::dynamic_pointer_cast(base_module); +} + +template +std::shared_ptr Module::register_module( + std::string name, + ModuleHolder module_holder) { + return register_module(std::move(name), module_holder.ptr()); +} + +template +std::shared_ptr Module::replace_module( + const std::string& name, + std::shared_ptr module) { + auto& base_module = (children_[name] = std::move(module)); + return std::dynamic_pointer_cast(base_module); +} + +template +std::shared_ptr Module::replace_module( + const std::string& name, + ModuleHolder module_holder) { + return replace_module(name, module_holder.ptr()); +} + +template +void Module::to_impl(Ts&&... ts) { + // First call `to()` on every child module. + for (auto& child : children_) { + child.value()->to(ts...); + } + // Then move every parameter to the new dtype/device. + for (auto& parameter : named_parameters(/*recurse=*/false)) { + parameter->set_data(autograd::Variable(*parameter).to(ts...)); + } + // Then move every buffer to the new dtype/device. + for (auto& buffer : named_buffers(/*recurse=*/false)) { + buffer->set_data(autograd::Variable(*buffer).to(ts...)); + } +} + +} // namespace nn +} // namespace torch diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules.h new file mode 100644 index 0000000000000000000000000000000000000000..e037d52a8535490ff5ecb17e578df5b4101ee9a3 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules.h @@ -0,0 +1,36 @@ +#pragma once + +// Common +#include + +// Containers +#include +#include +#include +#include +#include +#include +#include +#include + +// Layers +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options.h new file mode 100644 index 0000000000000000000000000000000000000000..4a5224a478e1b650e393dcb3f95adc13ab36d65f --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options.h @@ -0,0 +1,18 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/linear.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/linear.h new file mode 100644 index 0000000000000000000000000000000000000000..5952d97806b378814f8bb0c1ffa6cf783d2f8426 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/linear.h @@ -0,0 +1,95 @@ +#pragma once + +#include +#include +#include + +namespace torch { +namespace nn { + +/// Options for the `Linear` module. +/// +/// Example: +/// ``` +/// Linear model(LinearOptions(5, 2).bias(false)); +/// ``` +struct TORCH_API LinearOptions { + LinearOptions(int64_t in_features, int64_t out_features); + /// size of each input sample + TORCH_ARG(int64_t, in_features); + + /// size of each output sample + TORCH_ARG(int64_t, out_features); + + /// If set to false, the layer will not learn an additive bias. Default: true + TORCH_ARG(bool, bias) = true; +}; + +// ============================================================================ + +/// Options for the `Flatten` module. +/// +/// Example: +/// ``` +/// Flatten model(FlattenOptions().start_dim(2).end_dim(4)); +/// ``` +struct TORCH_API FlattenOptions { + /// first dim to flatten + TORCH_ARG(int64_t, start_dim) = 1; + /// last dim to flatten + TORCH_ARG(int64_t, end_dim) = -1; +}; + +// ============================================================================ + +/// Options for the `Unflatten` module. +/// +/// Note: If input tensor is named, use dimname and namedshape arguments. +/// +/// Example: +/// ``` +/// Unflatten unnamed_model(UnflattenOptions(0, {2, 2})); +/// Unflatten named_model(UnflattenOptions("B", {{"B1", 2}, {"B2", 2}})); +/// ``` +struct TORCH_API UnflattenOptions { + typedef std::vector> namedshape_t; + + UnflattenOptions(int64_t dim, std::vector sizes); + UnflattenOptions(const char* dimname, namedshape_t namedshape); + UnflattenOptions(std::string dimname, namedshape_t namedshape); + + /// dim to unflatten + TORCH_ARG(int64_t, dim); + /// name of dim to unflatten, for use with named tensors + TORCH_ARG(std::string, dimname); + /// new shape of unflattened dim + TORCH_ARG(std::vector, sizes); + /// new shape of unflattened dim with names, for use with named tensors + TORCH_ARG(namedshape_t, namedshape); +}; + +// ============================================================================ + +/// Options for the `Bilinear` module. +/// +/// Example: +/// ``` +/// Bilinear model(BilinearOptions(3, 2, 4).bias(false)); +/// ``` +struct TORCH_API BilinearOptions { + BilinearOptions( + int64_t in1_features, + int64_t in2_features, + int64_t out_features); + /// The number of features in input 1 (columns of the input1 matrix). + TORCH_ARG(int64_t, in1_features); + /// The number of features in input 2 (columns of the input2 matrix). + TORCH_ARG(int64_t, in2_features); + /// The number of output features to produce (columns of the output matrix). + TORCH_ARG(int64_t, out_features); + /// Whether to learn and add a bias after the bilinear transformation. + TORCH_ARG(bool, bias) = true; +}; + +} // namespace nn +} // namespace torch diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/loss.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/loss.h new file mode 100644 index 0000000000000000000000000000000000000000..c9eb2b66f3e0b2122639f6354dadf539819efc48 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/loss.h @@ -0,0 +1,802 @@ +#pragma once + +#include +#include +#include +#include + +namespace torch { +namespace nn { + +/// Options for the `L1Loss` module. +/// +/// Example: +/// ``` +/// L1Loss model(L1LossOptions(torch::kNone)); +/// ``` +struct TORCH_API L1LossOptions { + typedef std::variant + reduction_t; + + TORCH_OPTIONS_CTOR_VARIANT_ARG3(L1LossOptions, reduction, kNone, kMean, kSum) + + /// Specifies the reduction to apply to the output. + TORCH_ARG(reduction_t, reduction) = torch::kMean; +}; + +namespace functional { +/// Options for `torch::nn::functional::l1_loss`. +/// +/// See the documentation for `torch::nn::L1LossOptions` class to learn what +/// arguments are supported. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::l1_loss(input, target, F::L1LossFuncOptions(torch::kNone)); +/// ``` +using L1LossFuncOptions = L1LossOptions; +} // namespace functional + +// ============================================================================ + +/// Options for the `KLDivLoss` module. +/// +/// Example: +/// ``` +/// KLDivLoss +/// model(KLDivLossOptions().reduction(torch::kNone).log_target(false)); +/// ``` +struct TORCH_API KLDivLossOptions { + typedef std::variant< + enumtype::kNone, + enumtype::kBatchMean, + enumtype::kSum, + enumtype::kMean> + reduction_t; + + TORCH_OPTIONS_CTOR_VARIANT_ARG4( + KLDivLossOptions, + reduction, + kNone, + kBatchMean, + kSum, + kMean) + + /// Specifies the reduction to apply to the output. + /// ``'none'`` | ``'batchmean'`` | ``'sum'`` | ``'mean'``. Default: ``'mean'`` + TORCH_ARG(reduction_t, reduction) = torch::kMean; + + /// Specifies whether `target` is accepted in the log space. Default: False + TORCH_ARG(bool, log_target) = false; +}; + +namespace functional { +/// Options for `torch::nn::functional::kl_div`. +/// +/// See the documentation for `torch::nn::KLDivLossOptions` class to learn what +/// arguments are supported. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::kl_div(input, target, +/// F::KLDivFuncOptions().reduction(torch::kNone).log_target(false)); +/// ``` +using KLDivFuncOptions = KLDivLossOptions; +} // namespace functional + +// ============================================================================ + +/// Options for the `MSELoss` module. +/// +/// Example: +/// ``` +/// MSELoss model(MSELossOptions(torch::kNone)); +/// ``` +struct TORCH_API MSELossOptions { + typedef std::variant + reduction_t; + + TORCH_OPTIONS_CTOR_VARIANT_ARG3(MSELossOptions, reduction, kNone, kMean, kSum) + + /// Specifies the reduction to apply to the output. + /// ``'none'`` | ``'mean'`` | ``'sum'``. Default: ``'mean'`` + TORCH_ARG(reduction_t, reduction) = torch::kMean; +}; + +namespace functional { +/// Options for `torch::nn::functional::mse_loss`. +/// +/// See the documentation for `torch::nn::MSELossOptions` class to learn what +/// arguments are supported. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::mse_loss(input, target, F::MSELossFuncOptions(torch::kNone)); +/// ``` +using MSELossFuncOptions = MSELossOptions; +} // namespace functional + +// ============================================================================ + +/// Options for the `BCELoss` module. +/// +/// Example: +/// ``` +/// BCELoss model(BCELossOptions().reduction(torch::kNone).weight(weight)); +/// ``` +struct TORCH_API BCELossOptions { + typedef std::variant + reduction_t; + + /// A manual rescaling weight given to the loss of each batch element. + TORCH_ARG(Tensor, weight) = {}; + /// Specifies the reduction to apply to the output. + /// ``'none'`` | ``'mean'`` | ``'sum'``. Default: ``'mean'`` + TORCH_ARG(reduction_t, reduction) = torch::kMean; +}; + +namespace functional { +/// Options for `torch::nn::functional::binary_cross_entropy`. +/// +/// See the documentation for `torch::nn::BCELossOptions` class to learn what +/// arguments are supported. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::binary_cross_entropy(input, target, +/// F::BinaryCrossEntropyFuncOptions().weight(weight)); +/// ``` +using BinaryCrossEntropyFuncOptions = BCELossOptions; +} // namespace functional + +// ============================================================================ + +/// Options for the `HingeEmbeddingLoss` module. +/// +/// Example: +/// ``` +/// HingeEmbeddingLoss +/// model(HingeEmbeddingLossOptions().margin(4).reduction(torch::kNone)); +/// ``` +struct TORCH_API HingeEmbeddingLossOptions { + typedef std::variant + reduction_t; + + /// Specifies the threshold for which the distance of a negative sample must + /// reach in order to incur zero loss. Default: 1 + TORCH_ARG(double, margin) = 1.0; + /// Specifies the reduction to apply to the output. Default: Mean + TORCH_ARG(reduction_t, reduction) = torch::kMean; +}; + +namespace functional { +/// Options for `torch::nn::functional::hinge_embedding_loss`. +/// +/// See the documentation for `torch::nn::HingeEmbeddingLossOptions` class to +/// learn what arguments are supported. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::hinge_embedding_loss(input, target, +/// F::HingeEmbeddingLossFuncOptions().margin(2)); +/// ``` +using HingeEmbeddingLossFuncOptions = HingeEmbeddingLossOptions; +} // namespace functional + +// ============================================================================ + +/// Options for the `MultiMarginLoss` module. +/// +/// Example: +/// ``` +/// MultiMarginLoss model(MultiMarginLossOptions().margin(2).weight(weight)); +/// ``` +struct TORCH_API MultiMarginLossOptions { + typedef std::variant + reduction_t; + + /// Has a default value of :math:`1`. :math:`1` and :math:`2` + /// are the only supported values. + TORCH_ARG(int64_t, p) = 1; + /// Has a default value of :math:`1`. + TORCH_ARG(double, margin) = 1.0; + /// A manual rescaling weight given to each + /// class. If given, it has to be a Tensor of size `C`. Otherwise, it is + /// treated as if having all ones. + TORCH_ARG(Tensor, weight) = Tensor(); + /// Specifies the reduction to apply to the output: + /// ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be + /// applied, + /// ``'mean'``: the sum of the output will be divided by the number of + /// elements in the output, ``'sum'``: the output will be summed. Default: + /// ``'mean'`` + TORCH_ARG(reduction_t, reduction) = torch::kMean; +}; + +namespace functional { +/// Options for `torch::nn::functional::multi_margin_loss`. +/// +/// See the documentation for `torch::nn::MultiMarginLossOptions` class to learn +/// what arguments are supported. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::multi_margin_loss(input, target, +/// F::MultiMarginLossFuncOptions().margin(2).weight(weight)); +/// ``` +using MultiMarginLossFuncOptions = MultiMarginLossOptions; +} // namespace functional + +// ============================================================================ + +/// Options for the `CosineEmbeddingLoss` module. +/// +/// Example: +/// ``` +/// CosineEmbeddingLoss model(CosineEmbeddingLossOptions().margin(0.5)); +/// ``` +struct TORCH_API CosineEmbeddingLossOptions { + typedef std::variant + reduction_t; + + /// Specifies the threshold for which the distance of a negative sample must + /// reach in order to incur zero loss. Should be a number from -1 to 1, 0 + /// to 0.5 is suggested. Default: 0.0 + TORCH_ARG(double, margin) = 0.0; + /// Specifies the reduction to apply to the output. Default: Mean + TORCH_ARG(reduction_t, reduction) = torch::kMean; +}; + +namespace functional { +/// Options for `torch::nn::functional::cosine_embedding_loss`. +/// +/// See the documentation for `torch::nn::CosineEmbeddingLossOptions` class to +/// learn what arguments are supported. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::cosine_embedding_loss(input1, input2, target, +/// F::CosineEmbeddingLossFuncOptions().margin(0.5)); +/// ``` +using CosineEmbeddingLossFuncOptions = CosineEmbeddingLossOptions; +} // namespace functional + +// ============================================================================ + +/// Options for the `MultiLabelMarginLoss` module. +/// +/// Example: +/// ``` +/// MultiLabelMarginLoss model(MultiLabelMarginLossOptions(torch::kNone)); +/// ``` +struct TORCH_API MultiLabelMarginLossOptions { + typedef std::variant + reduction_t; + + TORCH_OPTIONS_CTOR_VARIANT_ARG3( + MultiLabelMarginLossOptions, + reduction, + kNone, + kMean, + kSum) + + /// Specifies the reduction to apply to the output: 'none' | 'mean' | 'sum'. + /// 'none': no reduction will be applied, 'mean': the sum of the output will + /// be divided by the number of elements in the output, 'sum': the output will + /// be summed. Default: 'mean' + TORCH_ARG(reduction_t, reduction) = torch::kMean; +}; + +namespace functional { +/// Options for `torch::nn::functional::multilabel_margin_loss`. +/// +/// See the documentation for `torch::nn::MultiLabelMarginLossOptions` class to +/// learn what arguments are supported. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::multilabel_margin_loss(input, target, +/// F::MultilabelMarginLossFuncOptions(torch::kNone)); +/// ``` +using MultilabelMarginLossFuncOptions = MultiLabelMarginLossOptions; +} // namespace functional + +// ============================================================================ + +/// Options for the `SoftMarginLoss` module. +/// +/// Example: +/// ``` +/// SoftMarginLoss model(SoftMarginLossOptions(torch::kNone)); +/// ``` +struct TORCH_API SoftMarginLossOptions { + typedef std::variant + reduction_t; + + TORCH_OPTIONS_CTOR_VARIANT_ARG3( + SoftMarginLossOptions, + reduction, + kNone, + kMean, + kSum) + + /// Specifies the reduction to apply to the output: 'none' | 'mean' | 'sum'. + /// 'none': no reduction will be applied, 'mean': the sum of the output will + /// be divided by the number of elements in the output, 'sum': the output will + /// be summed. Default: 'mean' + TORCH_ARG(reduction_t, reduction) = torch::kMean; +}; + +namespace functional { +/// Options for `torch::nn::functional::soft_margin_loss`. +/// +/// See the documentation for `torch::nn::SoftMarginLossOptions` class to learn +/// what arguments are supported. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::soft_margin_loss(input, target, +/// F::SoftMarginLossFuncOptions(torch::kNone)); +/// ``` +using SoftMarginLossFuncOptions = SoftMarginLossOptions; +} // namespace functional + +// ============================================================================ + +/// Options for the `MultiLabelSoftMarginLoss` module. +/// +/// Example: +/// ``` +/// MultiLabelSoftMarginLoss +/// model(MultiLabelSoftMarginLossOptions().reduction(torch::kNone).weight(weight)); +/// ``` +struct TORCH_API MultiLabelSoftMarginLossOptions { + typedef std::variant + reduction_t; + + /// A manual rescaling weight given to each + /// class. If given, it has to be a Tensor of size `C`. Otherwise, it is + /// treated as if having all ones. + TORCH_ARG(Tensor, weight) = Tensor(); + + /// Specifies the reduction to apply to the output: 'none' | 'mean' | 'sum'. + /// 'none': no reduction will be applied, 'mean': the sum of the output will + /// be divided by the number of elements in the output, 'sum': the output will + /// be summed. Default: 'mean' + TORCH_ARG(reduction_t, reduction) = torch::kMean; +}; + +namespace functional { +/// Options for `torch::nn::functional::multilabel_soft_margin_loss`. +/// +/// See the documentation for `torch::nn::MultiLabelSoftMarginLossOptions` class +/// to learn what arguments are supported. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::multilabel_soft_margin_loss(input, target, +/// F::MultilabelSoftMarginLossFuncOptions().reduction(torch::kNone).weight(weight)); +/// ``` +using MultilabelSoftMarginLossFuncOptions = MultiLabelSoftMarginLossOptions; +} // namespace functional + +// ============================================================================ + +/// Options for the `TripletMarginLoss` module. +/// +/// Example: +/// ``` +/// TripletMarginLoss +/// model(TripletMarginLossOptions().margin(3).p(2).eps(1e-06).swap(false)); +/// ``` +struct TORCH_API TripletMarginLossOptions { + typedef std::variant + reduction_t; + + /// Specifies the threshold for which the distance of a negative sample must + /// reach in order to incur zero loss. Default: 1 + TORCH_ARG(double, margin) = 1.0; + /// Specifies the norm degree for pairwise distance. Default: 2 + TORCH_ARG(double, p) = 2.0; + TORCH_ARG(double, eps) = 1e-6; + /// The distance swap is described in detail in the paper Learning shallow + /// convolutional feature descriptors with triplet losses by V. Balntas, + /// E. Riba et al. Default: False + TORCH_ARG(bool, swap) = false; + /// Specifies the reduction to apply to the output. Default: Mean + TORCH_ARG(reduction_t, reduction) = torch::kMean; +}; + +namespace functional { +/// Options for `torch::nn::functional::triplet_margin_loss`. +/// +/// See the documentation for `torch::nn::TripletMarginLossOptions` class to +/// learn what arguments are supported. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::triplet_margin_loss(anchor, positive, negative, +/// F::TripletMarginLossFuncOptions().margin(1.0)); +/// ``` +using TripletMarginLossFuncOptions = TripletMarginLossOptions; +} // namespace functional + +// ============================================================================ + +/// Options for the `TripletMarginWithDistanceLoss` module. +/// +/// Example: +/// ``` +/// TripletMarginWithDistanceLoss +/// model(TripletMarginWithDistanceLossOptions().margin(3).swap(false)); +/// ``` +struct TORCH_API TripletMarginWithDistanceLossOptions { + typedef std::variant + reduction_t; + typedef std::function + distance_function_t; + + /// Specifies a nonnegative, real-valued function that quantifies the + /// closeness of two tensors. If not specified, `F::pairwise_distance` will + /// be used. Default: nullopt + TORCH_ARG(c10::optional, distance_function) = + c10::nullopt; + /// Specifies a nonnegative margin representing the minimum difference + /// between the positive and negative distances required for the loss to be 0. + /// Larger margins penalize cases where the negative examples are not distance + /// enough from the anchors, relative to the positives. Default: 1 + TORCH_ARG(double, margin) = 1.0; + /// Whether to use the distance swap described in the paper Learning shallow + /// convolutional feature descriptors with triplet losses by V. Balntas, + /// E. Riba et al. If True, and if the positive example is closer to the + /// negative example than the anchor is, swaps the positive example and the + /// anchor in the loss computation. Default: False + TORCH_ARG(bool, swap) = false; + /// Specifies the reduction to apply to the output. Default: Mean + TORCH_ARG(reduction_t, reduction) = torch::kMean; +}; + +namespace functional { +/// Options for `torch::nn::functional::triplet_margin_with_distance_loss`. +/// +/// See the documentation for `torch::nn::TripletMarginWithDistanceLossOptions` +/// class to learn what arguments are supported. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::triplet_margin_with_distance_loss(anchor, positive, negative, +/// F::TripletMarginWithDistanceLossFuncOptions().margin(1.0)); +/// ``` +using TripletMarginWithDistanceLossFuncOptions = + TripletMarginWithDistanceLossOptions; +} // namespace functional + +// ============================================================================ + +/// Options for the `CTCLoss` module. +/// +/// Example: +/// ``` +/// CTCLoss +/// model(CTCLossOptions().blank(42).zero_infinity(false).reduction(torch::kSum)); +/// ``` +struct TORCH_API CTCLossOptions { + typedef std::variant + reduction_t; + + /// blank label. Default `0`. + TORCH_ARG(int64_t, blank) = 0; + /// Specifies the reduction to apply to the output. Default: Mean + TORCH_ARG(reduction_t, reduction) = torch::kMean; + /// Whether to zero infinite losses and the associated gradients. + /// Default: `false`. Infinite losses mainly occur when the inputs are + /// too short to be aligned to the targets. + TORCH_ARG(bool, zero_infinity) = false; +}; + +namespace functional { +/// Options for `torch::nn::functional::ctc_loss`. +/// +/// See the documentation for `torch::nn::CTCLossOptions` class to learn what +/// arguments are supported. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::ctc_loss(log_probs, targets, input_lengths, target_lengths, +/// F::CTCLossFuncOptions().reduction(torch::kNone)); +/// ``` +using CTCLossFuncOptions = CTCLossOptions; +} // namespace functional + +// ============================================================================ + +/// Options for the `SmoothL1Loss` module. +/// +/// Example: +/// ``` +/// SmoothL1Loss model(SmoothL1LossOptions().reduction(torch::kNone).beta(0.5)); +/// ``` +struct TORCH_API SmoothL1LossOptions { + typedef std::variant + reduction_t; + + TORCH_OPTIONS_CTOR_VARIANT_ARG3( + SmoothL1LossOptions, + reduction, + kNone, + kMean, + kSum) + + /// Specifies the reduction to apply to the output: 'none' | 'mean' | 'sum'. + /// 'none': no reduction will be applied, 'mean': the sum of the output will + /// be divided by the number of elements in the output, 'sum': the output will + /// be summed. Default: 'mean' + TORCH_ARG(reduction_t, reduction) = torch::kMean; + /// Specifies the threshold at which to change between L1 and L2 loss. + /// If beta is not specified, a value of 1.0 will be used. + /// Default: nullopt + TORCH_ARG(c10::optional, beta) = c10::nullopt; +}; + +namespace functional { +/// Options for `torch::nn::functional::smooth_l1_loss`. +/// +/// See the documentation for `torch::nn::SmoothL1LossOptions` class to learn +/// what arguments are supported. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::smooth_l1_loss(input, target, F::SmoothL1LossFuncOptions(torch::kNone)); +/// ``` +using SmoothL1LossFuncOptions = SmoothL1LossOptions; +} // namespace functional + +// ============================================================================ + +/// Options for the `HuberLoss` module. +/// +/// Example: +/// ``` +/// HuberLoss model(HuberLossOptions().reduction(torch::kNone).delta(0.5)); +/// ``` +struct TORCH_API HuberLossOptions { + typedef std::variant + reduction_t; + + TORCH_OPTIONS_CTOR_VARIANT_ARG3( + HuberLossOptions, + reduction, + kNone, + kMean, + kSum) + + /// Specifies the reduction to apply to the output: 'none' | 'mean' | 'sum'. + /// 'none': no reduction will be applied, 'mean': the sum of the output will + /// be divided by the number of elements in the output, 'sum': the output will + /// be summed. Default: 'mean' + TORCH_ARG(reduction_t, reduction) = torch::kMean; + /// Specifies the threshold at which to change between L1 and L2 loss. + /// Default: 1.0 + TORCH_ARG(double, delta) = 1.0; +}; + +namespace functional { +/// Options for `torch::nn::functional::huber_loss`. +/// +/// See the documentation for `torch::nn::HuberLossOptions` class to learn what +/// arguments are supported. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::huber_loss(input, target, F::HuberLossFuncOptions(torch::kNone)); +/// ``` +using HuberLossFuncOptions = HuberLossOptions; +} // namespace functional + +// ============================================================================ + +/// Options for the `PoissonNLLLoss` module. +/// +/// Example: +/// ``` +/// PoissonNLLLoss +/// model(PoissonNLLLossOptions().log_input(false).full(true).eps(0.42).reduction(torch::kSum)); +/// ``` +struct TORCH_API PoissonNLLLossOptions { + typedef std::variant + reduction_t; + + /// if true the loss is computed as `exp(input) - target * input`, + /// if false the loss is `input - target * log(input + eps)`. + TORCH_ARG(bool, log_input) = true; + /// whether to compute full loss, i.e. to add the Stirling approximation term + /// target * log(target) - target + 0.5 * log(2 * pi * target). + TORCH_ARG(bool, full) = false; + /// Small value to avoid evaluation of `log(0)` when `log_input = false`. + /// Default: 1e-8 + TORCH_ARG(double, eps) = 1e-8; + /// Specifies the reduction to apply to the output. Default: Mean + TORCH_ARG(reduction_t, reduction) = torch::kMean; +}; + +namespace functional { +/// Options for `torch::nn::functional::poisson_nll_loss`. +/// +/// See the documentation for `torch::nn::PoissonNLLLossOptions` class to learn +/// what arguments are supported. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::poisson_nll_loss(input, target, +/// F::PoissonNLLLossFuncOptions().reduction(torch::kNone)); +/// ``` +using PoissonNLLLossFuncOptions = PoissonNLLLossOptions; +} // namespace functional + +// ============================================================================ + +/// Options for the `MarginRankingLoss` module. +/// +/// Example: +/// ``` +/// MarginRankingLoss +/// model(MarginRankingLossOptions().margin(0.5).reduction(torch::kSum)); +/// ``` +struct TORCH_API MarginRankingLossOptions { + typedef std::variant + reduction_t; + + /// Has a default value of `0`. + TORCH_ARG(double, margin) = 0; + /// Specifies the reduction to apply to the output. Default: Mean + TORCH_ARG(reduction_t, reduction) = torch::kMean; +}; + +namespace functional { +/// Options for `torch::nn::functional::margin_ranking_loss`. +/// +/// See the documentation for `torch::nn::MarginRankingLossOptions` class to +/// learn what arguments are supported. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::margin_ranking_loss(input1, input2, target, +/// F::MarginRankingLossFuncOptions().margin(0.5).reduction(torch::kSum)); +/// ``` +using MarginRankingLossFuncOptions = MarginRankingLossOptions; +} // namespace functional + +// ============================================================================ + +/// Options for the `NLLLoss` module. +/// +/// Example: +/// ``` +/// NLLLoss model(NLLLossOptions().ignore_index(-100).reduction(torch::kMean)); +/// ``` +struct TORCH_API NLLLossOptions { + typedef std::variant + reduction_t; + + /// A manual rescaling weight given to each + /// class. If given, it has to be a Tensor of size `C`. Otherwise, it is + /// treated as if having all ones. + TORCH_ARG(Tensor, weight) = {}; + /// Specifies a target value that is ignored + /// and does not contribute to the input gradient. + TORCH_ARG(int64_t, ignore_index) = -100; + /// Specifies the reduction to apply to the output. Default: Mean + TORCH_ARG(reduction_t, reduction) = torch::kMean; +}; + +namespace functional { +/// Options for `torch::nn::functional::nll_loss`. +/// +/// See the documentation for `torch::nn::NLLLossOptions` class to learn what +/// arguments are supported. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::nll_loss(input, target, +/// F::NLLLossFuncOptions().ignore_index(-100).reduction(torch::kMean)); +/// ``` +using NLLLossFuncOptions = NLLLossOptions; +} // namespace functional + +// ============================================================================ + +/// Options for the `CrossEntropyLoss` module. +/// +/// Example: +/// ``` +/// CrossEntropyLoss +/// model(CrossEntropyLossOptions().ignore_index(-100).reduction(torch::kMean)); +/// ``` +struct TORCH_API CrossEntropyLossOptions { + typedef std::variant + reduction_t; + + /// A manual rescaling weight given to each class. If given, has to be a + /// Tensor of size C + TORCH_ARG(Tensor, weight) = {}; + /// Specifies a target value that is ignored + /// and does not contribute to the input gradient. + TORCH_ARG(int64_t, ignore_index) = -100; + /// Specifies the reduction to apply to the output. Default: Mean + TORCH_ARG(reduction_t, reduction) = torch::kMean; + /// Specifies the amount of smoothing when computing the loss. Default: 0.0 + TORCH_ARG(double, label_smoothing) = 0.0; +}; + +namespace functional { +/// Options for `torch::nn::functional::cross_entropy`. +/// +/// See the documentation for `torch::nn::CrossEntropyLossOptions` class to +/// learn what arguments are supported. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::cross_entropy(input, target, +/// F::CrossEntropyFuncOptions().ignore_index(-100).reduction(torch::kMean)); +/// ``` +using CrossEntropyFuncOptions = CrossEntropyLossOptions; +} // namespace functional + +// ============================================================================ + +/// Options for the `BCEWithLogitsLoss` module. +/// +/// Example: +/// ``` +/// BCEWithLogitsLoss +/// model(BCEWithLogitsLossOptions().reduction(torch::kNone).weight(weight)); +/// ``` +struct TORCH_API BCEWithLogitsLossOptions { + typedef std::variant + reduction_t; + /// A manual rescaling weight given to the loss of each batch element. + /// If given, has to be a Tensor of size `nbatch`. + TORCH_ARG(Tensor, weight) = {}; + /// Specifies the reduction to apply to the output. Default: Mean + TORCH_ARG(reduction_t, reduction) = torch::kMean; + /// A weight of positive examples. + /// Must be a vector with length equal to the number of classes. + TORCH_ARG(Tensor, pos_weight) = {}; +}; + +namespace functional { +/// Options for `torch::nn::functional::binary_cross_entropy_with_logits`. +/// +/// See the documentation for `torch::nn::BCEWithLogitsLossOptions` class to +/// learn what arguments are supported. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::binary_cross_entropy_with_logits(input, target, +/// F::BinaryCrossEntropyWithLogitsFuncOptions().pos_weight(pos_weight).reduction(torch::kSum)); +/// ``` +using BinaryCrossEntropyWithLogitsFuncOptions = BCEWithLogitsLossOptions; +} // namespace functional + +} // namespace nn +} // namespace torch diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/transformer.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/transformer.h new file mode 100644 index 0000000000000000000000000000000000000000..41db38fe0757a72081b0125f8747bc0b65c16c85 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/transformer.h @@ -0,0 +1,64 @@ +#pragma once + +#include +#include +#include +#include + +#include +#include + +namespace torch { +namespace nn { + +/// Options for the `Transformer` module +/// +/// Example: +/// ``` +/// TransformerOptions options; +/// TransformerOptions options(16, 4); +/// auto options = TransformerOptions().d_model(4).nhead(2).dropout(0.0); +/// ``` +struct TORCH_API TransformerOptions { + // The following constructors are commonly used + // Please don't add more unless it is proved as a common usage + TransformerOptions() = default; + TransformerOptions(int64_t d_model, int64_t nhead); + TransformerOptions( + int64_t d_model, + int64_t nhead, + int64_t num_encoder_layers, + int64_t num_decoder_layers); + + /// the number of expected features in the encoder/decoder inputs + /// (default=512) + TORCH_ARG(int64_t, d_model) = 512; + + /// the number of heads in the multiheadattention models (default=8) + TORCH_ARG(int64_t, nhead) = 8; + + /// the number of sub-encoder-layers in the encoder (default=6) + TORCH_ARG(int64_t, num_encoder_layers) = 6; + + /// the number of sub-decoder-layers in the decoder (default=6) + TORCH_ARG(int64_t, num_decoder_layers) = 6; + + /// the dimension of the feedforward network model (default=2048) + TORCH_ARG(int64_t, dim_feedforward) = 2048; + + /// the dropout value (default=0.1) + TORCH_ARG(double, dropout) = 0.1; + + /// the activation function of encoder/decoder intermediate layer + /// (default=``torch::kReLU``) + TORCH_ARG(activation_t, activation) = torch::kReLU; + + /// custom encoder (default=None) + TORCH_ARG(AnyModule, custom_encoder); + + /// custom decoder (default=None) + TORCH_ARG(AnyModule, custom_decoder); +}; + +} // namespace nn +} // namespace torch diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/upsampling.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/upsampling.h new file mode 100644 index 0000000000000000000000000000000000000000..d03e5f2345f320624fca623109fe21f095a54983 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/upsampling.h @@ -0,0 +1,110 @@ +#pragma once + +#include +#include +#include +#include +#include + +#include + +namespace torch { +namespace nn { + +/// Options for the `Upsample` module. +/// +/// Example: +/// ``` +/// Upsample +/// model(UpsampleOptions().scale_factor(std::vector({3})).mode(torch::kLinear).align_corners(false)); +/// ``` +struct TORCH_API UpsampleOptions { + /// output spatial sizes. + TORCH_ARG(c10::optional>, size) = c10::nullopt; + + /// multiplier for spatial size. + TORCH_ARG(c10::optional>, scale_factor) = c10::nullopt; + + /// the upsampling algorithm: one of "nearest", "linear", "bilinear", + /// "bicubic" and "trilinear". Default: "nearest" + typedef std::variant< + enumtype::kNearest, + enumtype::kLinear, + enumtype::kBilinear, + enumtype::kBicubic, + enumtype::kTrilinear> + mode_t; + TORCH_ARG(mode_t, mode) = torch::kNearest; + + /// if "True", the corner pixels of the input and output tensors are + /// aligned, and thus preserving the values at those pixels. This only has + /// effect when :attr:`mode` is "linear", "bilinear", "bicubic", or + /// "trilinear". Default: "False" + TORCH_ARG(c10::optional, align_corners) = c10::nullopt; +}; + +namespace functional { + +/// Options for `torch::nn::functional::interpolate`. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::interpolate(input, +/// F::InterpolateFuncOptions().size(std::vector({4})).mode(torch::kNearest)); +/// ``` +struct TORCH_API InterpolateFuncOptions { + typedef std::variant< + enumtype::kNearest, + enumtype::kLinear, + enumtype::kBilinear, + enumtype::kBicubic, + enumtype::kTrilinear, + enumtype::kArea, + enumtype::kNearestExact> + mode_t; + + /// output spatial sizes. + TORCH_ARG(c10::optional>, size) = c10::nullopt; + + /// multiplier for spatial size. + TORCH_ARG(c10::optional>, scale_factor) = c10::nullopt; + + /// the upsampling algorithm: one of "nearest", "linear", "bilinear", + /// "bicubic", "trilinear", "area", "nearest-exact". Default: "nearest" + TORCH_ARG(mode_t, mode) = torch::kNearest; + + /// Geometrically, we consider the pixels of the input and output as squares + /// rather than points. If set to "True", the input and output tensors are + /// aligned by the center points of their corner pixels, preserving the values + /// at the corner pixels. If set to "False", the input and output tensors + /// are aligned by the corner points of their corner pixels, and the + /// interpolation uses edge value padding for out-of-boundary values, making + /// this operation *independent* of input size when :attr:`scale_factor` is + /// kept the same. This only has an effect when :attr:`mode` is "linear", + /// "bilinear", "bicubic" or "trilinear". Default: "False" + TORCH_ARG(c10::optional, align_corners) = c10::nullopt; + + /// recompute the scale_factor for use in the + /// interpolation calculation. When `scale_factor` is passed as a parameter, + /// it is used to compute the `output_size`. If `recompute_scale_factor` is + /// `true` or not specified, a new `scale_factor` will be computed based on + /// the output and input sizes for use in the interpolation computation (i.e. + /// the computation will be identical to if the computed `output_size` were + /// passed-in explicitly). Otherwise, the passed-in `scale_factor` will be + /// used in the interpolation computation. Note that when `scale_factor` is + /// floating-point, the recomputed scale_factor may differ from the one passed + /// in due to rounding and precision issues. + TORCH_ARG(c10::optional, recompute_scale_factor) = c10::nullopt; + + /// flag to apply anti-aliasing. Using anti-alias + /// option together with :attr:`align_corners` equals "False", interpolation + /// result would match Pillow result for downsampling operation. Supported + /// modes: "bilinear". Default: "False". + TORCH_ARG(bool, antialias) = false; +}; + +} // namespace functional + +} // namespace nn +} // namespace torch diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/pimpl-inl.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/pimpl-inl.h new file mode 100644 index 0000000000000000000000000000000000000000..b38e6cf2c0ff729485cf4a27a1ae49818d06c807 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/pimpl-inl.h @@ -0,0 +1,74 @@ +// This class exists only to do SFINAE on abstract types `T` that are really +// `ModuleHolder`, because there's no good way to say that `T` is a +// `ModuleHolder` over some unknown type `ModuleType`. With this, you can do +// `enable_if_t>`. +struct ModuleHolderIndicator {}; + +// A type trait that is true for types that are `ModuleHolder`s. +template +using is_module_holder = std::is_base_of>; + +template +using disable_if_module_holder_t = disable_if_t::value>; + +// A collection of templates that answer the question whether a type `T` is a +// `ModuleHolder`, and if so whether its contained type is of type `C`. This is +// tricky because it is hard to short circuit in template metaprogramming. A +// naive and incorrect solution to this problem would be something like +// `disable_if::value && typename T::ContainedType == C>`. +// This would disable all types that are not `ModuleHolder`s, because even +// though the `is_module_holder::value` may be `false` for such types the +// `T::ContainedType` access would be ill-formed and thus fail the whole +// expression by the rules of SFINAE. Instead we have to use template +// specialization to statically branch on the first condition +// (`is_module_holder`) and are only then allowed to query +// `T::ContainedType` in the branch for which the condition was true. + +// Base template. +template +struct is_module_holder_of_impl; + +// False branch. `T` is not a `ModuleHolder` and thus not a `ModuleHolder` with +// contained type `C`. +template +struct is_module_holder_of_impl : std::false_type {}; + +// True branch. `T` is a `ModuleHolder` and thus we can legit access its +// `ContainedType` and compare it against `C`. +template +struct is_module_holder_of_impl + : std::is_same {}; + +// Helper template. +template +struct is_module_holder_of : is_module_holder_of_impl< + is_module_holder::value, + decay_t, + decay_t> {}; + +// A collection of templates that allow deducing the return type of the +// `forward()` method, but only if a module actually has a `forward()` method, +// and otherwise deduces to the type `void`. + +template +struct return_type_of_forward_impl; + +template +struct return_type_of_forward_impl { + using type = decltype(::std::declval().forward(::std::declval()...)); +}; + +template +struct return_type_of_forward_impl { + using type = void; +}; + +template +using return_type_of_forward = return_type_of_forward_impl< + torch::detail::has_forward::value, + C, + Args...>; + +template +using return_type_of_forward_t = + typename return_type_of_forward::type; diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/pimpl.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/pimpl.h new file mode 100644 index 0000000000000000000000000000000000000000..d66d83c257ebd0061b5fa59e1299dd16ff9badb8 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/pimpl.h @@ -0,0 +1,214 @@ +#pragma once + +#include +#include +#include +#include + +#include + +#include +#include +#include + +namespace torch { +namespace detail { +// Dump all the template metaprogramming in this file. +#include +} // namespace detail + +namespace nn { + +/// A `ModuleHolder` is essentially a wrapper around `std::shared_ptr` where +/// `M` is an `nn::Module` subclass, with convenient constructors defined for +/// the kind of constructions we want to allow for our modules. +template +class ModuleHolder : torch::detail::ModuleHolderIndicator { + protected: + /// The module pointer this class wraps. + /// NOTE: Must be placed at the top of the class so that we can use it with + /// trailing return types below. + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + std::shared_ptr impl_; + + public: + using ContainedType = Contained; + + /// Default constructs the contained module if if has a default constructor, + /// else produces a static error. + /// + /// NOTE: This uses the behavior of template + /// classes in C++ that constructors (or any methods) are only compiled when + /// actually used. + ModuleHolder() : impl_(default_construct()) { + static_assert( + std::is_default_constructible::value, + "You are trying to default construct a module which has " + "no default constructor. Use = nullptr to give it the empty state " + "(e.g. `Linear linear = nullptr;` instead of `Linear linear;`)."); + } + + /// Constructs the `ModuleHolder` with an empty contained value. Access to + /// the underlying module is not permitted and will throw an exception, until + /// a value is assigned. + /* implicit */ ModuleHolder(std::nullptr_t) : impl_(nullptr) {} + + /// Constructs the `ModuleHolder` with a contained module, forwarding all + /// arguments to its constructor. + template < + typename Head, + typename... Tail, + typename = typename std::enable_if< + !(torch::detail::is_module_holder_of::value && + (sizeof...(Tail) == 0))>::type> + explicit ModuleHolder(Head&& head, Tail&&... tail) + : impl_(new Contained( + std::forward(head), + std::forward(tail)...)) {} + + /// Constructs the `ModuleHolder` from a pointer to the contained type. + /// Example: `Linear(std::make_shared(...))`. + /* implicit */ ModuleHolder(std::shared_ptr module) + : impl_(std::move(module)) {} + + /// Returns true if the `ModuleHolder` contains a module, or false if it is + /// `nullptr`. + explicit operator bool() const noexcept { + return !is_empty(); + } + + /// Forwards to the contained module. + Contained* operator->() { + return get(); + } + + /// Forwards to the contained module. + const Contained* operator->() const { + return get(); + } + + /// Returns a reference to the contained module. + Contained& operator*() { + return *get(); + } + + /// Returns a const reference to the contained module. + const Contained& operator*() const { + return *get(); + } + + /// Returns a shared pointer to the underlying module. + const std::shared_ptr& ptr() const { + TORCH_CHECK(!is_empty(), "Accessing empty ModuleHolder"); + return impl_; + } + + /// Returns a pointer to the underlying module. + Contained* get() { + TORCH_CHECK(!is_empty(), "Accessing empty ModuleHolder"); + return impl_.get(); + } + + /// Returns a const pointer to the underlying module. + const Contained* get() const { + TORCH_CHECK(!is_empty(), "Accessing empty ModuleHolder"); + return impl_.get(); + } + + /// Calls the `forward()` method of the contained module. + template + auto operator()(Args&&... args) + -> torch::detail::return_type_of_forward_t { + // This will not compile if the module does not have a `forward()` method + // (as expected). + // NOTE: `std::forward` is qualified to prevent VS2017 emitting + // error C2872: 'std': ambiguous symbol + return impl_->forward(::std::forward(args)...); + } + + /// Forwards to the subscript operator of the contained module. + /// NOTE: std::forward is qualified to prevent VS2017 emitting + /// error C2872: 'std': ambiguous symbol + template + decltype(auto) operator[](Arg&& arg) { + return (*impl_)[::std::forward(arg)]; + } + + /// Returns true if the `ModuleHolder` does not contain a module. + bool is_empty() const noexcept { + return impl_ == nullptr; + } + + private: + /// In C++17, the two methods below could be written as the following: + /// if constexpr (std::is_default_constructible_v) { + /// return std::make_shared(); + /// } else { + /// return nullptr; + /// } + /// In C++11, we use SFINAE instead of `if constexpr`. + + template < + typename T = Contained, + typename = torch::enable_if_t::value>> + std::shared_ptr default_construct() { + return std::make_shared(); + } + + template + torch::disable_if_t< + std::is_default_constructible::value, + std::shared_ptr> + default_construct() { + return nullptr; + } +}; + +/// Pretty prints the given `Module` into the `ostream`. +template +std::ostream& operator<<( + std::ostream& stream, + const nn::ModuleHolder& module) { + return stream << *module; +} + +/// Serializes a `ModuleHolder` into an `OutputArchive`. +template +serialize::OutputArchive& operator<<( + serialize::OutputArchive& archive, + const nn::ModuleHolder& module) { + return archive << module.ptr(); +} + +/// Deserializes a `ModuleHolder` from an `InputArchive`. +template +serialize::InputArchive& operator>>( + serialize::InputArchive& archive, + nn::ModuleHolder& module) { + return archive >> module.ptr(); +} + +} // namespace nn +} // namespace torch + +// Workaround for CUDA 10.2 and below not allowing attribute unused on +// using declarations. +#ifdef __CUDACC__ +#define TORCH_UNUSED_EXCEPT_CUDA +#else +#define TORCH_UNUSED_EXCEPT_CUDA C10_UNUSED +#endif + +/// Defines a class `Name` which inherits from `nn::ModuleHolder` to provide a +/// wrapper over a `std::shared_ptr`. +/// `Impl` is a type alias for `ImplType` which provides a way to call static +/// method of `ImplType`. +#define TORCH_MODULE_IMPL(Name, ImplType) \ + class Name : public torch::nn::ModuleHolder { /* NOLINT */ \ + public: \ + using torch::nn::ModuleHolder::ModuleHolder; \ + using Impl TORCH_UNUSED_EXCEPT_CUDA = ImplType; \ + } + +/// Like `TORCH_MODULE_IMPL`, but defaults the `ImplType` name to `Impl`. +#define TORCH_MODULE(Name) TORCH_MODULE_IMPL(Name, Name##Impl) diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/utils.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/utils.h new file mode 100644 index 0000000000000000000000000000000000000000..8dbfaf5126e4f3db94174937432ea4b017354ab7 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/utils.h @@ -0,0 +1,5 @@ +#pragma once + +#include +#include +#include diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/optim/adamw.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/optim/adamw.h new file mode 100644 index 0000000000000000000000000000000000000000..a63d7fc32d455425fbb6967534e72c36ac2830c8 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/optim/adamw.h @@ -0,0 +1,92 @@ +#pragma once + +#include +#include +#include + +#include +#include + +namespace torch { +namespace serialize { +class OutputArchive; +class InputArchive; +} // namespace serialize +} // namespace torch + +namespace torch { +namespace optim { + +struct TORCH_API AdamWOptions : public OptimizerCloneableOptions { + AdamWOptions(double lr = 1e-3); + TORCH_ARG(double, lr) = 1e-3; + typedef std::tuple betas_t; + TORCH_ARG(betas_t, betas) = std::make_tuple(0.9, 0.999); + TORCH_ARG(double, eps) = 1e-8; + TORCH_ARG(double, weight_decay) = 1e-2; + TORCH_ARG(bool, amsgrad) = false; + + public: + void serialize(torch::serialize::InputArchive& archive) override; + void serialize(torch::serialize::OutputArchive& archive) const override; + TORCH_API friend bool operator==( + const AdamWOptions& lhs, + const AdamWOptions& rhs); + double get_lr() const override; + void set_lr(const double lr) override; +}; + +struct TORCH_API AdamWParamState + : public OptimizerCloneableParamState { + TORCH_ARG(int64_t, step) = 0; + TORCH_ARG(torch::Tensor, exp_avg); + TORCH_ARG(torch::Tensor, exp_avg_sq); + TORCH_ARG(torch::Tensor, max_exp_avg_sq) = {}; + + public: + void serialize(torch::serialize::InputArchive& archive) override; + void serialize(torch::serialize::OutputArchive& archive) const override; + TORCH_API friend bool operator==( + const AdamWParamState& lhs, + const AdamWParamState& rhs); +}; + +class TORCH_API AdamW : public Optimizer { + public: + explicit AdamW( + std::vector param_groups, + AdamWOptions defaults = {}) + : Optimizer( + std::move(param_groups), + std::make_unique(defaults)) { + TORCH_CHECK(defaults.lr() >= 0, "Invalid learning rate: ", defaults.lr()); + TORCH_CHECK(defaults.eps() >= 0, "Invalid epsilon value: ", defaults.eps()); + auto betas = defaults.betas(); + TORCH_CHECK( + 0 <= std::get<0>(betas) && std::get<0>(betas) < 1.0, + "Invalid beta parameter at index 0: ", + std::get<0>(betas)); + TORCH_CHECK( + 0 <= std::get<1>(betas) && std::get<1>(betas) < 1.0, + "Invalid beta parameter at index 1: ", + std::get<1>(betas)); + TORCH_CHECK( + defaults.weight_decay() >= 0, + "Invalid weight_decay value: ", + defaults.weight_decay()); + } + explicit AdamW(std::vector params, AdamWOptions defaults = {}) + : AdamW({OptimizerParamGroup(std::move(params))}, defaults) {} + + torch::Tensor step(LossClosure closure = nullptr) override; + void save(serialize::OutputArchive& archive) const override; + void load(serialize::InputArchive& archive) override; + + private: + template + static void serialize(Self& self, Archive& archive) { + _TORCH_OPTIM_SERIALIZE_WITH_TEMPLATE_ARG(AdamW); + } +}; +} // namespace optim +} // namespace torch diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/optim/schedulers/lr_scheduler.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/optim/schedulers/lr_scheduler.h new file mode 100644 index 0000000000000000000000000000000000000000..26d324fbecce166c19e315ab41142b5e9e4cf4de --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/optim/schedulers/lr_scheduler.h @@ -0,0 +1,39 @@ +#pragma once + +#include + +#include + +namespace torch { +namespace optim { + +class TORCH_API LRScheduler { + public: + // This class needs to take a reference of an optimizer from outside such that + // it can modify its learning rates; due to this the lifetime of said + // optimizer must be maintained + LRScheduler(torch::optim::Optimizer& optimizer); + + virtual ~LRScheduler() = default; + + void step(); + + protected: + // A vector of learning rates is calculated and returned from the specific + // subclass. A vector is returned with each element being a separate learning + // rate for each param group - although the normal use case would be to return + // a vector of identical elements. + virtual std::vector get_lrs() = 0; + + // Get current learning rates from the optimizer + std::vector get_current_lrs() const; + + unsigned step_count_{}; + + private: + void set_optimizer_lrs(const std::vector& learning_rates); + + torch::optim::Optimizer& optimizer_; +}; +} // namespace optim +} // namespace torch diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/optim/schedulers/reduce_on_plateau_scheduler.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/optim/schedulers/reduce_on_plateau_scheduler.h new file mode 100644 index 0000000000000000000000000000000000000000..8214070104988d35d975cf39b2816e70de5985ec --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/optim/schedulers/reduce_on_plateau_scheduler.h @@ -0,0 +1,63 @@ +#pragma once + +#include + +#include + +#include + +#include + +#include + +namespace torch { +namespace optim { + +class TORCH_API ReduceLROnPlateauScheduler { + public: + enum SchedulerMode { min, max }; + enum ThresholdMode { rel, abs }; + ReduceLROnPlateauScheduler( + Optimizer& optimizer, + SchedulerMode mode = min, + float factor = 0.1, + int patience = 10, + double threshold = 1e-4, + ThresholdMode threshold_mode = rel, + int cooldown = 0, + const std::vector& min_lr = std::vector(), + double eps = 1e-8, + bool verbose = false); + + virtual ~ReduceLROnPlateauScheduler() = default; + + void step(float metric); + + private: + void reset(); + void reduce_lr(int epoch); + bool in_cooldown(); + bool is_better(float a); + void init_is_better( + SchedulerMode mode, + double threshold, + ThresholdMode threshold_mode); + + Optimizer& optimizer; + SchedulerMode mode; + float mode_worse; + float factor; + int patience; + double threshold; + ThresholdMode threshold_mode; + int cooldown; + int cooldown_counter; + std::vector min_lrs; + double eps; + float best; + bool verbose; + int last_epoch; + int num_bad_epochs; +}; +} // namespace optim +} // namespace torch diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/optim/schedulers/step_lr.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/optim/schedulers/step_lr.h new file mode 100644 index 0000000000000000000000000000000000000000..289bb4bd84e54e995bfc6581aa0c76724661c7ca --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/optim/schedulers/step_lr.h @@ -0,0 +1,22 @@ +#pragma once + +#include + +namespace torch { +namespace optim { + +class TORCH_API StepLR : public LRScheduler { + public: + StepLR( + torch::optim::Optimizer& optimizer, + const unsigned step_size, + const double gamma = 0.1); + + private: + std::vector get_lrs() override; + + const unsigned step_size_; + const double gamma_; +}; +} // namespace optim +} // namespace torch diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/optim/sgd.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/optim/sgd.h new file mode 100644 index 0000000000000000000000000000000000000000..85e9aba7ba48f751d0ae00f8356ca8d47d7b0ad2 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/optim/sgd.h @@ -0,0 +1,91 @@ +#pragma once + +#include +#include +#include +#include +#include + +#include +#include +#include + +namespace torch { +namespace serialize { +class OutputArchive; +class InputArchive; +} // namespace serialize +} // namespace torch + +namespace torch { +namespace optim { + +struct TORCH_API SGDOptions : public OptimizerCloneableOptions { + SGDOptions(double lr); + TORCH_ARG(double, lr); + TORCH_ARG(double, momentum) = 0; + TORCH_ARG(double, dampening) = 0; + TORCH_ARG(double, weight_decay) = 0; + TORCH_ARG(bool, nesterov) = false; + + public: + void serialize(torch::serialize::InputArchive& archive) override; + void serialize(torch::serialize::OutputArchive& archive) const override; + TORCH_API friend bool operator==( + const SGDOptions& lhs, + const SGDOptions& rhs); + double get_lr() const override; + void set_lr(const double lr) override; +}; + +struct TORCH_API SGDParamState + : public OptimizerCloneableParamState { + TORCH_ARG(torch::Tensor, momentum_buffer); + + public: + void serialize(torch::serialize::InputArchive& archive) override; + void serialize(torch::serialize::OutputArchive& archive) const override; + TORCH_API friend bool operator==( + const SGDParamState& lhs, + const SGDParamState& rhs); +}; + +class TORCH_API SGD : public Optimizer { + public: + explicit SGD( + std::vector param_groups, + SGDOptions defaults) + : Optimizer( + std::move(param_groups), + std::make_unique(defaults)) { + TORCH_CHECK(defaults.lr() >= 0, "Invalid learning rate: ", defaults.lr()); + TORCH_CHECK( + defaults.momentum() >= 0, + "Invalid momentum value: ", + defaults.momentum()); + TORCH_CHECK( + defaults.weight_decay() >= 0, + "Invalid weight_decay value: ", + defaults.weight_decay()); + TORCH_CHECK( + !defaults.nesterov() || + (defaults.momentum() > 0 && defaults.dampening() == 0), + "Nesterov momentum requires a momentum and zero dampening"); + } + + explicit SGD(std::vector params, SGDOptions defaults) + : SGD({OptimizerParamGroup(std::move(params))}, defaults) {} + + torch::Tensor step(LossClosure closure = nullptr) override; + + void save(serialize::OutputArchive& archive) const override; + void load(serialize::InputArchive& archive) override; + + private: + template + static void serialize(Self& self, Archive& archive) { + _TORCH_OPTIM_SERIALIZE_WITH_TEMPLATE_ARG(SGD); + } +}; +} // namespace optim +} // namespace torch diff --git a/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/device/device_adjacent_difference.cuh b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/device/device_adjacent_difference.cuh new file mode 100644 index 0000000000000000000000000000000000000000..a63ff9111e18b1a6b84a3358deb34d6313cece10 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/device/device_adjacent_difference.cuh @@ -0,0 +1,685 @@ +/****************************************************************************** + * Copyright (c) 2011-2021, NVIDIA CORPORATION. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the NVIDIA CORPORATION nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + ******************************************************************************/ + +#pragma once + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header + +#include +#include +#include +#include +#include + +#include + +#include + +CUB_NAMESPACE_BEGIN + +//! @rst +//! DeviceAdjacentDifference provides device-wide, parallel operations for +//! computing the differences of adjacent elements residing within +//! device-accessible memory. +//! +//! Overview +//! ++++++++++++++++++++++++++ +//! +//! - DeviceAdjacentDifference calculates the differences of adjacent elements in +//! d_input. Because the binary operation could be noncommutative, there +//! are two sets of methods. Methods named SubtractLeft subtract left element +//! ``*(i - 1)`` of input sequence from current element ``*i``. +//! Methods named ``SubtractRight`` subtract current element ``*i`` from the +//! right one ``*(i + 1)``: +//! +//! .. code-block:: c++ +//! +//! int *d_values; // [1, 2, 3, 4] +//! //... +//! int *d_subtract_left_result <-- [ 1, 1, 1, 1 ] +//! int *d_subtract_right_result <-- [ -1, -1, -1, 4 ] +//! +//! - For SubtractLeft, if the left element is out of bounds, the iterator is +//! assigned to ``*(result + (i - first))`` without modification. +//! - For SubtractRight, if the right element is out of bounds, the iterator is +//! assigned to ``*(result + (i - first))`` without modification. +//! +//! Snippet +//! ++++++++++++++++++++++++++ +//! +//! The code snippet below illustrates how to use ``DeviceAdjacentDifference`` to +//! compute the left difference between adjacent elements. +//! +//! .. code-block:: c++ +//! +//! #include +//! // or equivalently +//! +//! // Declare, allocate, and initialize device-accessible pointers +//! int num_items; // e.g., 8 +//! int *d_values; // e.g., [1, 2, 1, 2, 1, 2, 1, 2] +//! //... +//! +//! // Determine temporary device storage requirements +//! void *d_temp_storage = nullptr; +//! size_t temp_storage_bytes = 0; +//! +//! cub::DeviceAdjacentDifference::SubtractLeft( +//! d_temp_storage, temp_storage_bytes, d_values, num_items); +//! +//! // Allocate temporary storage +//! cudaMalloc(&d_temp_storage, temp_storage_bytes); +//! +//! // Run operation +//! cub::DeviceAdjacentDifference::SubtractLeft( +//! d_temp_storage, temp_storage_bytes, d_values, num_items); +//! +//! // d_values <-- [1, 1, -1, 1, -1, 1, -1, 1] +//! +//! @endrst +struct DeviceAdjacentDifference +{ +private: + template + static CUB_RUNTIME_FUNCTION cudaError_t AdjacentDifference( + void* d_temp_storage, + std::size_t& temp_storage_bytes, + InputIteratorT d_input, + OutputIteratorT d_output, + NumItemsT num_items, + DifferenceOpT difference_op, + cudaStream_t stream) + { + using OffsetT = detail::choose_offset_t; + + using DispatchT = + DispatchAdjacentDifference; + + return DispatchT::Dispatch( + d_temp_storage, temp_storage_bytes, d_input, d_output, static_cast(num_items), difference_op, stream); + } + +public: + //! @rst + //! Subtracts the left element of each adjacent pair of elements residing within device-accessible memory + //! + //! Overview + //! ++++++++++++++++++++++++++ + //! + //! - Calculates the differences of adjacent elements in ``d_input``. + //! That is, ``*d_input`` is assigned to ``*d_output``, and, for each iterator ``i`` in the + //! range ``[d_input + 1, d_input + num_items)``, the result of + //! ``difference_op(*i, *(i - 1))`` is assigned to ``*(d_output + (i - d_input))``. + //! - Note that the behavior is undefined if the input and output ranges + //! overlap in any way. + //! + //! Snippet + //! ++++++++++++++++++++++++++ + //! + //! The code snippet below illustrates how to use ``DeviceAdjacentDifference`` + //! to compute the difference between adjacent elements. + //! + //! + //! .. code-block:: c++ + //! + //! #include + //! // or equivalently + //! + //! struct CustomDifference + //! { + //! template + //! __host__ DataType operator()(DataType &lhs, DataType &rhs) + //! { + //! return lhs - rhs; + //! } + //! }; + //! + //! // Declare, allocate, and initialize device-accessible pointers + //! int num_items; // e.g., 8 + //! int *d_input; // e.g., [1, 2, 1, 2, 1, 2, 1, 2] + //! int *d_output; + //! ... + //! + //! // Determine temporary device storage requirements + //! void *d_temp_storage = nullptr; + //! size_t temp_storage_bytes = 0; + //! + //! cub::DeviceAdjacentDifference::SubtractLeftCopy( + //! d_temp_storage, temp_storage_bytes, + //! d_input, d_output, + //! num_items, CustomDifference()); + //! + //! // Allocate temporary storage + //! cudaMalloc(&d_temp_storage, temp_storage_bytes); + //! + //! // Run operation + //! cub::DeviceAdjacentDifference::SubtractLeftCopy( + //! d_temp_storage, temp_storage_bytes, + //! d_input, d_output, + //! num_items, CustomDifference()); + //! + //! // d_input <-- [1, 2, 1, 2, 1, 2, 1, 2] + //! // d_output <-- [1, 1, -1, 1, -1, 1, -1, 1] + //! + //! @endrst + //! + //! @tparam InputIteratorT + //! @rst + //! is a model of `Input Iterator `_, + //! and ``x`` and ``y`` are objects of ``InputIteratorT``'s ``value_type``, then + //! ``x - y`` is defined, and ``InputIteratorT``'s ``value_type`` is convertible to + //! a type in ``OutputIteratorT``'s set of ``value_types``, and the return type + //! of ``x - y`` is convertible to a type in ``OutputIteratorT``'s set of + //! ``value_types``. + //! @endrst + //! + //! @tparam OutputIteratorT + //! @rst + //! is a model of `Output Iterator `_. + //! @endrst + //! + //! @tparam DifferenceOpT + //! Its `result_type` is convertible to a type in `OutputIteratorT`'s set of `value_types`. + //! + //! @tparam NumItemsT + //! **[inferred]** Type of num_items + //! + //! @param[in] d_temp_storage + //! Device-accessible allocation of temporary storage. When `nullptr`, the + //! required allocation size is written to `temp_storage_bytes` and no work is done. + //! + //! @param[in,out] temp_storage_bytes + //! Reference to size in bytes of `d_temp_storage` allocation + //! + //! @param[in] d_input + //! Pointer to the input sequence + //! + //! @param[out] d_output + //! Pointer to the output sequence + //! + //! @param[in] num_items + //! Number of items in the input sequence + //! + //! @param[in] difference_op + //! The binary function used to compute differences + //! + //! @param[in] stream + //! @rst + //! **[optional]** CUDA stream to launch kernels within. Default is stream\ :sub:`0` + //! @endrst + template , + typename NumItemsT = std::uint32_t> + static CUB_RUNTIME_FUNCTION cudaError_t SubtractLeftCopy( + void* d_temp_storage, + std::size_t& temp_storage_bytes, + InputIteratorT d_input, + OutputIteratorT d_output, + NumItemsT num_items, + DifferenceOpT difference_op = {}, + cudaStream_t stream = 0) + { + CUB_DETAIL_NVTX_RANGE_SCOPE_IF(d_temp_storage, "cub::DeviceAdjacentDifference::SubtractLeftCopy"); + + constexpr bool may_alias = false; + constexpr bool read_left = true; + + return AdjacentDifference( + d_temp_storage, temp_storage_bytes, d_input, d_output, num_items, difference_op, stream); + } + +#ifndef _CCCL_DOXYGEN_INVOKED // Do not document + template + CUB_DETAIL_RUNTIME_DEBUG_SYNC_IS_NOT_SUPPORTED static CUB_RUNTIME_FUNCTION cudaError_t SubtractLeftCopy( + void* d_temp_storage, + std::size_t& temp_storage_bytes, + InputIteratorT d_input, + OutputIteratorT d_output, + NumItemsT num_items, + DifferenceOpT difference_op, + cudaStream_t stream, + bool debug_synchronous) + { + CUB_DETAIL_RUNTIME_DEBUG_SYNC_USAGE_LOG + + return SubtractLeftCopy(d_temp_storage, temp_storage_bytes, d_input, d_output, num_items, difference_op, stream); + } +#endif // _CCCL_DOXYGEN_INVOKED + + //! @rst + //! Subtracts the left element of each adjacent pair of elements residing within device-accessible memory. + //! + //! Overview + //! +++++++++++++++++++++++++++++++++++++++++++++ + //! + //! Calculates the differences of adjacent elements in ``d_input``. That is, for + //! each iterator ``i`` in the range ``[d_input + 1, d_input + num_items)``, the + //! result of ``difference_op(*i, *(i - 1))`` is assigned to + //! ``*(d_input + (i - d_input))``. + //! + //! Snippet + //! +++++++++++++++++++++++++++++++++++++++++++++ + //! + //! The code snippet below illustrates how to use ``DeviceAdjacentDifference`` + //! to compute the difference between adjacent elements. + //! + //! + //! .. code-block:: c++ + //! + //! #include + //! // or equivalently + //! + //! struct CustomDifference + //! { + //! template + //! __host__ DataType operator()(DataType &lhs, DataType &rhs) + //! { + //! return lhs - rhs; + //! } + //! }; + //! + //! // Declare, allocate, and initialize device-accessible pointers + //! int num_items; // e.g., 8 + //! int *d_data; // e.g., [1, 2, 1, 2, 1, 2, 1, 2] + //! ... + //! + //! // Determine temporary device storage requirements + //! void *d_temp_storage = nullptr; + //! size_t temp_storage_bytes = 0; + //! cub::DeviceAdjacentDifference::SubtractLeft( + //! d_temp_storage, temp_storage_bytes, + //! d_data, num_items, CustomDifference()); + //! + //! // Allocate temporary storage + //! cudaMalloc(&d_temp_storage, temp_storage_bytes); + //! + //! // Run operation + //! cub::DeviceAdjacentDifference::SubtractLeft( + //! d_temp_storage, temp_storage_bytes, + //! d_data, num_items, CustomDifference()); + //! + //! // d_data <-- [1, 1, -1, 1, -1, 1, -1, 1] + //! + //! @endrst + //! + //! @tparam RandomAccessIteratorT + //! @rst + //! is a model of `Random Access Iterator `_, + //! ``RandomAccessIteratorT`` is mutable. If ``x`` and ``y`` are objects of + //! ``RandomAccessIteratorT``'s ``value_type``, and ``x - y`` is defined, then the + //! return type of ``x - y`` should be convertible to a type in + //! ``RandomAccessIteratorT``'s set of ``value_types``. + //! @endrst + //! + //! @tparam DifferenceOpT + //! Its `result_type` is convertible to a type in `RandomAccessIteratorT`'s + //! set of `value_types`. + //! + //! @tparam NumItemsT + //! **[inferred]** Type of `num_items` + //! + //! @param[in] d_temp_storage + //! Device-accessible allocation of temporary storage. When `nullptr`, the + //! required allocation size is written to `temp_storage_bytes` and no work + //! is done. + //! + //! @param[in,out] temp_storage_bytes + //! Reference to size in bytes of `d_temp_storage` allocation + //! + //! @param[in,out] d_input + //! Pointer to the input sequence and the result + //! + //! @param[in] num_items + //! Number of items in the input sequence + //! + //! @param[in] difference_op + //! The binary function used to compute differences + //! + //! @param[in] stream + //! @rst + //! **[optional]** CUDA stream to launch kernels within. Default is stream\ :sub:`0`. + //! @endrst + template , + typename NumItemsT = std::uint32_t> + static CUB_RUNTIME_FUNCTION cudaError_t SubtractLeft( + void* d_temp_storage, + std::size_t& temp_storage_bytes, + RandomAccessIteratorT d_input, + NumItemsT num_items, + DifferenceOpT difference_op = {}, + cudaStream_t stream = 0) + { + CUB_DETAIL_NVTX_RANGE_SCOPE_IF(d_temp_storage, "cub::DeviceAdjacentDifference::SubtractLeft"); + + constexpr bool may_alias = true; + constexpr bool read_left = true; + + return AdjacentDifference( + d_temp_storage, temp_storage_bytes, d_input, d_input, num_items, difference_op, stream); + } + +#ifndef _CCCL_DOXYGEN_INVOKED // Do not document + template + CUB_DETAIL_RUNTIME_DEBUG_SYNC_IS_NOT_SUPPORTED static CUB_RUNTIME_FUNCTION cudaError_t SubtractLeft( + void* d_temp_storage, + std::size_t& temp_storage_bytes, + RandomAccessIteratorT d_input, + NumItemsT num_items, + DifferenceOpT difference_op, + cudaStream_t stream, + bool debug_synchronous) + { + CUB_DETAIL_RUNTIME_DEBUG_SYNC_USAGE_LOG + + return SubtractLeft(d_temp_storage, temp_storage_bytes, d_input, num_items, difference_op, stream); + } +#endif // _CCCL_DOXYGEN_INVOKED + + //! @rst + //! Subtracts the right element of each adjacent pair of elements residing within device-accessible memory. + //! + //! Overview + //! ++++++++++++++++++++++++++ + //! + //! - Calculates the right differences of adjacent elements in ``d_input``. + //! That is, ``*(d_input + num_items - 1)`` is assigned to + //! ``*(d_output + num_items - 1)``, and, for each iterator ``i`` in the range + //! ``[d_input, d_input + num_items - 1)``, the result of + //! ``difference_op(*i, *(i + 1))`` is assigned to + //! ``*(d_output + (i - d_input))``. + //! - Note that the behavior is undefined if the input and output ranges + //! overlap in any way. + //! + //! Snippet + //! ++++++++++++++++++++++++++ + //! + //! The code snippet below illustrates how to use ``DeviceAdjacentDifference`` + //! to compute the difference between adjacent elements. + //! + //! + //! .. code-block:: c++ + //! + //! #include + //! // or equivalently + //! + //! struct CustomDifference + //! { + //! template + //! __host__ DataType operator()(DataType &lhs, DataType &rhs) + //! { + //! return lhs - rhs; + //! } + //! }; + //! + //! // Declare, allocate, and initialize device-accessible pointers + //! int num_items; // e.g., 8 + //! int *d_input; // e.g., [1, 2, 1, 2, 1, 2, 1, 2] + //! int *d_output; + //! .. + //! + //! // Determine temporary device storage requirements + //! void *d_temp_storage = nullptr; + //! size_t temp_storage_bytes = 0; + //! cub::DeviceAdjacentDifference::SubtractRightCopy( + //! d_temp_storage, temp_storage_bytes, + //! d_input, d_output, num_items, CustomDifference()); + //! + //! // Allocate temporary storage + //! cudaMalloc(&d_temp_storage, temp_storage_bytes); + //! + //! // Run operation + //! cub::DeviceAdjacentDifference::SubtractRightCopy( + //! d_temp_storage, temp_storage_bytes, + //! d_input, d_output, num_items, CustomDifference()); + //! + //! // d_input <-- [1, 2, 1, 2, 1, 2, 1, 2] + //! // d_data <-- [-1, 1, -1, 1, -1, 1, -1, 2] + //! + //! @endrst + //! + //! @tparam InputIteratorT + //! @rst + //! is a model of `Input Iterator `_, + //! and ``x`` and ``y`` are objects of ``InputIteratorT``'s ``value_type``, then + //! ``x - y`` is defined, and ``InputIteratorT``'s ``value_type`` is convertible to + //! a type in ``OutputIteratorT``'s set of ``value_types``, and the return type + //! of ``x - y`` is convertible to a type in ``OutputIteratorT``'s set of + //! ``value_types``. + //! @endrst + //! + //! @tparam OutputIteratorT + //! @rst + //! is a model of `Output Iterator `_. + //! @endrst + //! + //! @tparam DifferenceOpT + //! Its `result_type` is convertible to a type in `RandomAccessIteratorT`'s + //! set of `value_types`. + //! + //! @tparam NumItemsT + //! **[inferred]** Type of num_items + //! + //! @param[in] d_temp_storage + //! Device-accessible allocation of temporary storage. When `nullptr`, the + //! required allocation size is written to `temp_storage_bytes` and no work + //! is done. + //! + //! @param[in,out] temp_storage_bytes + //! Reference to size in bytes of `d_temp_storage` allocation + //! + //! @param[in] d_input + //! Pointer to the input sequence + //! + //! @param[out] d_output + //! Pointer to the output sequence + //! + //! @param[in] num_items + //! Number of items in the input sequence + //! + //! @param[in] difference_op + //! The binary function used to compute differences. + //! + //! @param[in] stream + //! @rst + //! **[optional]** CUDA stream to launch kernels within. Default is stream\ :sub:`0`. + //! @endrst + template , + typename NumItemsT = std::uint32_t> + static CUB_RUNTIME_FUNCTION cudaError_t SubtractRightCopy( + void* d_temp_storage, + std::size_t& temp_storage_bytes, + InputIteratorT d_input, + OutputIteratorT d_output, + NumItemsT num_items, + DifferenceOpT difference_op = {}, + cudaStream_t stream = 0) + { + CUB_DETAIL_NVTX_RANGE_SCOPE_IF(d_temp_storage, "cub::DeviceAdjacentDifference::SubtractRightCopy"); + + constexpr bool may_alias = false; + constexpr bool read_left = false; + + return AdjacentDifference( + d_temp_storage, temp_storage_bytes, d_input, d_output, num_items, difference_op, stream); + } + +#ifndef _CCCL_DOXYGEN_INVOKED // Do not document + template + CUB_DETAIL_RUNTIME_DEBUG_SYNC_IS_NOT_SUPPORTED static CUB_RUNTIME_FUNCTION cudaError_t SubtractRightCopy( + void* d_temp_storage, + std::size_t& temp_storage_bytes, + InputIteratorT d_input, + OutputIteratorT d_output, + NumItemsT num_items, + DifferenceOpT difference_op, + cudaStream_t stream, + bool debug_synchronous) + { + CUB_DETAIL_RUNTIME_DEBUG_SYNC_USAGE_LOG + + return SubtractRightCopy(d_temp_storage, temp_storage_bytes, d_input, d_output, num_items, difference_op, stream); + } +#endif // _CCCL_DOXYGEN_INVOKED + + //! @rst + //! Subtracts the right element of each adjacent pair of elements residing within device-accessible memory. + //! + //! Overview + //! ++++++++++++++++++++++++++ + //! + //! Calculates the right differences of adjacent elements in ``d_input``. + //! That is, for each iterator ``i`` in the range + //! ``[d_input, d_input + num_items - 1)``, the result of + //! ``difference_op(*i, *(i + 1))`` is assigned to ``*(d_input + (i - d_input))``. + //! + //! Snippet + //! ++++++++++++++++++++++++++ + //! + //! The code snippet below illustrates how to use ``DeviceAdjacentDifference`` + //! to compute the difference between adjacent elements. + //! + //! + //! .. code-block:: c++ + //! + //! #include + //! // or equivalently + //! + //! // Declare, allocate, and initialize device-accessible pointers + //! int num_items; // e.g., 8 + //! int *d_data; // e.g., [1, 2, 1, 2, 1, 2, 1, 2] + //! ... + //! + //! // Determine temporary device storage requirements + //! void *d_temp_storage = nullptr; + //! size_t temp_storage_bytes = 0; + //! cub::DeviceAdjacentDifference::SubtractRight( + //! d_temp_storage, temp_storage_bytes, d_data, num_items); + //! + //! // Allocate temporary storage + //! cudaMalloc(&d_temp_storage, temp_storage_bytes); + //! + //! // Run operation + //! cub::DeviceAdjacentDifference::SubtractRight( + //! d_temp_storage, temp_storage_bytes, d_data, num_items); + //! + //! // d_data <-- [-1, 1, -1, 1, -1, 1, -1, 2] + //! + //! @endrst + //! + //! @tparam RandomAccessIteratorT + //! @rst + //! is a model of `Random Access Iterator `_, + //! ``RandomAccessIteratorT`` is mutable. If ``x`` and ``y`` are objects of + //! ``RandomAccessIteratorT``'s `value_type`, and ``x - y`` is defined, then the + //! return type of ``x - y`` should be convertible to a type in + //! ``RandomAccessIteratorT``'s set of ``value_types``. + //! @endrst + //! + //! @tparam DifferenceOpT + //! Its `result_type` is convertible to a type in `RandomAccessIteratorT`'s + //! set of `value_types`. + //! + //! @tparam NumItemsT + //! **[inferred]** Type of num_items + //! + //! @param[in] d_temp_storage + //! Device-accessible allocation of temporary storage. When `nullptr`, the + //! required allocation size is written to `temp_storage_bytes` and no work + //! is done. + //! + //! @param[in,out] temp_storage_bytes + //! Reference to size in bytes of `d_temp_storage` allocation + //! + //! @param[in,out] d_input + //! Pointer to the input sequence + //! + //! @param[in] num_items + //! Number of items in the input sequence + //! + //! @param[in] difference_op + //! The binary function used to compute differences + //! + //! @param[in] stream + //! @rst + //! **[optional]** CUDA stream to launch kernels within. Default is stream\ :sub:`0`. + //! @endrst + template , + typename NumItemsT = std::uint32_t> + static CUB_RUNTIME_FUNCTION cudaError_t SubtractRight( + void* d_temp_storage, + std::size_t& temp_storage_bytes, + RandomAccessIteratorT d_input, + NumItemsT num_items, + DifferenceOpT difference_op = {}, + cudaStream_t stream = 0) + { + CUB_DETAIL_NVTX_RANGE_SCOPE_IF(d_temp_storage, "cub::DeviceAdjacentDifference::SubtractRight"); + + constexpr bool may_alias = true; + constexpr bool read_left = false; + + return AdjacentDifference( + d_temp_storage, temp_storage_bytes, d_input, d_input, num_items, difference_op, stream); + } + +#ifndef _CCCL_DOXYGEN_INVOKED // Do not document + template + CUB_DETAIL_RUNTIME_DEBUG_SYNC_IS_NOT_SUPPORTED static CUB_RUNTIME_FUNCTION cudaError_t SubtractRight( + void* d_temp_storage, + std::size_t& temp_storage_bytes, + RandomAccessIteratorT d_input, + NumItemsT num_items, + DifferenceOpT difference_op, + cudaStream_t stream, + bool debug_synchronous) + { + CUB_DETAIL_RUNTIME_DEBUG_SYNC_USAGE_LOG + + return SubtractRight(d_temp_storage, temp_storage_bytes, d_input, num_items, difference_op, stream); + } +#endif // _CCCL_DOXYGEN_INVOKED +}; + +CUB_NAMESPACE_END diff --git a/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/device/device_partition.cuh b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/device/device_partition.cuh new file mode 100644 index 0000000000000000000000000000000000000000..c68f6cf4d617355dc76eda82a5454a724e7c2956 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/device/device_partition.cuh @@ -0,0 +1,748 @@ +/****************************************************************************** + * Copyright (c) 2011, Duane Merrill. All rights reserved. + * Copyright (c) 2011-2022, NVIDIA CORPORATION. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the NVIDIA CORPORATION nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + ******************************************************************************/ + +//! @file +//! cub::DevicePartition provides device-wide, parallel operations for partitioning sequences of data items residing +//! within device-accessible memory. + +#pragma once + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header + +#include +#include +#include +#include + +#include + +#include + +CUB_NAMESPACE_BEGIN + +//! @rst +//! DevicePartition provides device-wide, parallel operations for +//! partitioning sequences of data items residing within device-accessible memory. +//! +//! Overview +//! ++++++++++++++++++++++++++ +//! +//! These operations apply a selection criterion to construct a partitioned +//! output sequence from items selected/unselected from a specified input +//! sequence. +//! +//! Usage Considerations +//! ++++++++++++++++++++++++++ +//! +//! @cdp_class{DevicePartition} +//! +//! Performance +//! ++++++++++++++++++++++++++ +//! +//! @linear_performance{partition} +//! +//! @endrst +struct DevicePartition +{ + //! @rst + //! Uses the ``d_flags`` sequence to split the corresponding items from + //! ``d_in`` into a partitioned sequence ``d_out``. + //! The total number of items copied into the first partition is written to ``d_num_selected_out``. + //! + //! - The value type of ``d_flags`` must be castable to ``bool`` (e.g., ``bool``, ``char``, ``int``, etc.). + //! - Copies of the selected items are compacted into ``d_out`` and maintain + //! their original relative ordering, however copies of the unselected + //! items are compacted into the rear of ``d_out`` in reverse order. + //! - The range ``[d_out, d_out + num_items)`` shall not overlap + //! ``[d_in, d_in + num_items)`` nor ``[d_flags, d_flags + num_items)`` in any way. + //! The range ``[d_in, d_in + num_items)`` may overlap ``[d_flags, d_flags + num_items)``. + //! - @devicestorage + //! + //! Snippet + //! ++++++++++++++++++++++++++ + //! + //! The code snippet below illustrates the compaction of items selected from an ``int`` device vector. + //! + //! .. code-block:: c++ + //! + //! #include + //! // or equivalently + //! + //! // Declare, allocate, and initialize device-accessible pointers for + //! // input, flags, and output + //! int num_items; // e.g., 8 + //! int *d_in; // e.g., [1, 2, 3, 4, 5, 6, 7, 8] + //! char *d_flags; // e.g., [1, 0, 0, 1, 0, 1, 1, 0] + //! int *d_out; // e.g., [ , , , , , , , ] + //! int *d_num_selected_out; // e.g., [ ] + //! ... + //! + //! // Determine temporary device storage requirements + //! void *d_temp_storage = nullptr; + //! std::size_t temp_storage_bytes = 0; + //! cub::DevicePartition::Flagged( + //! d_temp_storage, temp_storage_bytes, + //! d_in, d_flags, d_out, d_num_selected_out, num_items); + //! + //! // Allocate temporary storage + //! cudaMalloc(&d_temp_storage, temp_storage_bytes); + //! + //! // Run selection + //! cub::DevicePartition::Flagged( + //! d_temp_storage, temp_storage_bytes, + //! d_in, d_flags, d_out, d_num_selected_out, num_items); + //! + //! // d_out <-- [1, 4, 6, 7, 8, 5, 3, 2] + //! // d_num_selected_out <-- [4] + //! + //! @endrst + //! + //! @tparam InputIteratorT + //! **[inferred]** Random-access input iterator type for reading input items @iterator + //! + //! @tparam FlagIterator + //! **[inferred]** Random-access input iterator type for reading selection flags @iterator + //! + //! @tparam OutputIteratorT + //! **[inferred]** Random-access output iterator type for writing output items @iterator + //! + //! @tparam NumSelectedIteratorT + //! **[inferred]** Output iterator type for recording the number of items selected @iterator + //! + //! @tparam NumItemsT + //! **[inferred]** Type of num_items + //! + //! @param[in] d_temp_storage + //! Device-accessible allocation of temporary storage. When `nullptr`, the + //! required allocation size is written to `temp_storage_bytes` and no work is done. + //! + //! @param[in,out] temp_storage_bytes + //! Reference to size in bytes of `d_temp_storage` allocation + //! + //! @param[in] d_in + //! Pointer to the input sequence of data items + //! + //! @param[in] d_flags + //! Pointer to the input sequence of selection flags + //! + //! @param[out] d_out + //! Pointer to the output sequence of partitioned data items + //! + //! @param[out] d_num_selected_out + //! Pointer to the output total number of items selected (i.e., the + //! offset of the unselected partition) + //! + //! @param[in] num_items + //! Total number of items to select from + //! + //! @param[in] stream + //! @rst + //! **[optional]** CUDA stream to launch kernels within. Default is stream\ :sub:`0`. + //! @endrst + template + CUB_RUNTIME_FUNCTION _CCCL_FORCEINLINE static cudaError_t Flagged( + void* d_temp_storage, + size_t& temp_storage_bytes, + InputIteratorT d_in, + FlagIterator d_flags, + OutputIteratorT d_out, + NumSelectedIteratorT d_num_selected_out, + NumItemsT num_items, + cudaStream_t stream = 0) + { + CUB_DETAIL_NVTX_RANGE_SCOPE_IF(d_temp_storage, "cub::DevicePartition::Flagged"); + using ChooseOffsetT = detail::choose_signed_offset; + using OffsetT = typename ChooseOffsetT::type; // Signed integer type for global offsets + using SelectOp = NullType; // Selection op (not used) + using EqualityOp = NullType; // Equality operator (not used) + using DispatchSelectIfT = + DispatchSelectIf; + + // Check if the number of items exceeds the range covered by the selected signed offset type + cudaError_t error = ChooseOffsetT::is_exceeding_offset_type(num_items); + if (error) + { + return error; + } + + return DispatchSelectIfT::Dispatch( + d_temp_storage, + temp_storage_bytes, + d_in, + d_flags, + d_out, + d_num_selected_out, + SelectOp{}, + EqualityOp{}, + num_items, + stream); + } + +#ifndef _CCCL_DOXYGEN_INVOKED // Do not document + template + CUB_DETAIL_RUNTIME_DEBUG_SYNC_IS_NOT_SUPPORTED CUB_RUNTIME_FUNCTION _CCCL_FORCEINLINE static cudaError_t Flagged( + void* d_temp_storage, + size_t& temp_storage_bytes, + InputIteratorT d_in, + FlagIterator d_flags, + OutputIteratorT d_out, + NumSelectedIteratorT d_num_selected_out, + NumItemsT num_items, + cudaStream_t stream, + bool debug_synchronous) + { + CUB_DETAIL_RUNTIME_DEBUG_SYNC_USAGE_LOG + + return Flagged( + d_temp_storage, temp_storage_bytes, d_in, d_flags, d_out, d_num_selected_out, num_items, stream); + } +#endif // _CCCL_DOXYGEN_INVOKED + + //! @rst + //! Uses the ``select_op`` functor to split the corresponding items from ``d_in`` into + //! a partitioned sequence ``d_out``. The total number of items copied into the first partition is written + //! to ``d_num_selected_out``. + //! + //! - Copies of the selected items are compacted into ``d_out`` and maintain + //! their original relative ordering, however copies of the unselected + //! items are compacted into the rear of ``d_out`` in reverse order. + //! - The range ``[d_out, d_out + num_items)`` shall not overlap + //! ``[d_in, d_in + num_items)`` in any way. + //! - @devicestorage + //! + //! Snippet + //! +++++++++++++++++++++++++++++++++++++++++++++ + //! + //! The code snippet below illustrates the compaction of items selected from an ``int`` device vector. + //! + //! .. code-block:: c++ + //! + //! #include + //! // or equivalently + //! + //! // Functor type for selecting values less than some criteria + //! struct LessThan + //! { + //! int compare; + //! + //! CUB_RUNTIME_FUNCTION __forceinline__ + //! explicit LessThan(int compare) : compare(compare) {} + //! + //! CUB_RUNTIME_FUNCTION __forceinline__ + //! bool operator()(const int &a) const + //! { + //! return (a < compare); + //! } + //! }; + //! + //! // Declare, allocate, and initialize device-accessible pointers for + //! // input and output + //! int num_items; // e.g., 8 + //! int *d_in; // e.g., [0, 2, 3, 9, 5, 2, 81, 8] + //! int *d_out; // e.g., [ , , , , , , , ] + //! int *d_num_selected_out; // e.g., [ ] + //! LessThan select_op(7); + //! ... + //! + //! // Determine temporary device storage requirements + //! void *d_temp_storage = nullptr; + //! std::size_t temp_storage_bytes = 0; + //! cub::DevicePartition::If( + //! d_temp_storage, temp_storage_bytes, + //! d_in, d_out, d_num_selected_out, num_items, select_op); + //! + //! // Allocate temporary storage + //! cudaMalloc(&d_temp_storage, temp_storage_bytes); + //! + //! // Run selection + //! cub::DevicePartition::If( + //! d_temp_storage, temp_storage_bytes, + //! d_in, d_out, d_num_selected_out, num_items, select_op); + //! + //! // d_out <-- [0, 2, 3, 5, 2, 8, 81, 9] + //! // d_num_selected_out <-- [5] + //! + //! @endrst + //! + //! @tparam InputIteratorT + //! **[inferred]** Random-access input iterator type for reading input items @iterator + //! + //! @tparam OutputIteratorT + //! **[inferred]** Random-access output iterator type for writing output items @iterator + //! + //! @tparam NumSelectedIteratorT + //! **[inferred]** Output iterator type for recording the number of items selected @iterator + //! + //! @tparam SelectOp + //! **[inferred]** Selection functor type having member `bool operator()(const T &a)` + //! + //! @tparam NumItemsT + //! **[inferred]** Type of num_items + //! + //! @param[in] d_temp_storage + //! Device-accessible allocation of temporary storage. When `nullptr`, the + //! required allocation size is written to `temp_storage_bytes` and no work is done. + //! + //! @param[in,out] temp_storage_bytes + //! Reference to size in bytes of ``d_temp_storage`` allocation + //! + //! @param[in] d_in + //! Pointer to the input sequence of data items + //! + //! @param[out] d_out + //! Pointer to the output sequence of partitioned data items + //! + //! @param[out] d_num_selected_out + //! Pointer to the output total number of items selected (i.e., the offset of the unselected partition) + //! + //! @param[in] num_items + //! Total number of items to select from + //! + //! @param[in] select_op + //! Unary selection operator + //! + //! @param[in] stream + //! @rst + //! **[optional]** CUDA stream to launch kernels within. Default is stream\ :sub:`0`. + //! @endrst + template + CUB_RUNTIME_FUNCTION _CCCL_FORCEINLINE static cudaError_t + If(void* d_temp_storage, + size_t& temp_storage_bytes, + InputIteratorT d_in, + OutputIteratorT d_out, + NumSelectedIteratorT d_num_selected_out, + NumItemsT num_items, + SelectOp select_op, + cudaStream_t stream = 0) + { + CUB_DETAIL_NVTX_RANGE_SCOPE_IF(d_temp_storage, "cub::DevicePartition::If"); + using ChooseOffsetT = detail::choose_signed_offset; + using OffsetT = typename ChooseOffsetT::type; // Signed integer type for global offsets + using FlagIterator = NullType*; // FlagT iterator type (not used) + using EqualityOp = NullType; // Equality operator (not used) + + // Check if the number of items exceeds the range covered by the selected signed offset type + cudaError_t error = ChooseOffsetT::is_exceeding_offset_type(num_items); + if (error) + { + return error; + } + + using DispatchSelectIfT = + DispatchSelectIf; + + return DispatchSelectIfT::Dispatch( + d_temp_storage, + temp_storage_bytes, + d_in, + nullptr, + d_out, + d_num_selected_out, + select_op, + EqualityOp{}, + num_items, + stream); + } + +#ifndef _CCCL_DOXYGEN_INVOKED // Do not document + template + CUB_DETAIL_RUNTIME_DEBUG_SYNC_IS_NOT_SUPPORTED CUB_RUNTIME_FUNCTION _CCCL_FORCEINLINE static cudaError_t + If(void* d_temp_storage, + size_t& temp_storage_bytes, + InputIteratorT d_in, + OutputIteratorT d_out, + NumSelectedIteratorT d_num_selected_out, + NumItemsT num_items, + SelectOp select_op, + cudaStream_t stream, + bool debug_synchronous) + { + CUB_DETAIL_RUNTIME_DEBUG_SYNC_USAGE_LOG + + return If( + d_temp_storage, temp_storage_bytes, d_in, d_out, d_num_selected_out, num_items, select_op, stream); + } +#endif // _CCCL_DOXYGEN_INVOKED + +private: + template + friend class DispatchSegmentedSort; + + // Internal version without NVTX range + template + CUB_RUNTIME_FUNCTION _CCCL_FORCEINLINE static cudaError_t IfNoNVTX( + void* d_temp_storage, + std::size_t& temp_storage_bytes, + InputIteratorT d_in, + FirstOutputIteratorT d_first_part_out, + SecondOutputIteratorT d_second_part_out, + UnselectedOutputIteratorT d_unselected_out, + NumSelectedIteratorT d_num_selected_out, + int num_items, + SelectFirstPartOp select_first_part_op, + SelectSecondPartOp select_second_part_op, + cudaStream_t stream = 0) + { + using OffsetT = int; + using DispatchThreeWayPartitionIfT = DispatchThreeWayPartitionIf< + InputIteratorT, + FirstOutputIteratorT, + SecondOutputIteratorT, + UnselectedOutputIteratorT, + NumSelectedIteratorT, + SelectFirstPartOp, + SelectSecondPartOp, + OffsetT>; + + return DispatchThreeWayPartitionIfT::Dispatch( + d_temp_storage, + temp_storage_bytes, + d_in, + d_first_part_out, + d_second_part_out, + d_unselected_out, + d_num_selected_out, + select_first_part_op, + select_second_part_op, + num_items, + stream); + } + +public: + //! @rst + //! Uses two functors to split the corresponding items from ``d_in`` into a three partitioned sequences + //! ``d_first_part_out``, ``d_second_part_out``, and ``d_unselected_out``. + //! The total number of items copied into the first partition is written + //! to ``d_num_selected_out[0]``, while the total number of items copied into the second partition is written + //! to ``d_num_selected_out[1]``. + //! + //! - Copies of the items selected by ``select_first_part_op`` are compacted + //! into ``d_first_part_out`` and maintain their original relative ordering. + //! - Copies of the items selected by ``select_second_part_op`` are compacted + //! into ``d_second_part_out`` and maintain their original relative ordering. + //! - Copies of the unselected items are compacted into the ``d_unselected_out`` in reverse order. + //! - The ranges ``[d_out, d_out + num_items)``, + //! ``[d_first_part_out, d_first_part_out + d_num_selected_out[0])``, + //! ``[d_second_part_out, d_second_part_out + d_num_selected_out[1])``, + //! ``[d_unselected_out, d_unselected_out + num_items - d_num_selected_out[0] - d_num_selected_out[1])``, + //! shall not overlap in any way. + //! + //! Snippet + //! +++++++++++++++++++++++++++++++++++++++++++++ + //! + //! The code snippet below illustrates how this algorithm can partition an + //! input vector into small, medium, and large items so that the relative + //! order of items remain deterministic. + //! + //! Let's consider any value that doesn't exceed six a small one. On the + //! other hand, any value that exceeds 50 will be considered a large one. + //! Since the value used to define a small part doesn't match one that + //! defines the large part, the intermediate segment is implied. + //! + //! These definitions partition a value space into three categories. We want + //! to preserve the order of items in which they appear in the input vector. + //! Since the algorithm provides stable partitioning, this is possible. + //! + //! Since the number of items in each category is unknown beforehand, we need + //! three output arrays of num_items elements each. To reduce the memory + //! requirements, we can combine the output storage for two categories. + //! + //! Since each value falls precisely in one category, it's safe to add + //! "large" values into the head of the shared output vector and the "middle" + //! values into its tail. To add items into the tail of the output array, we + //! can use ``thrust::reverse_iterator``. + //! + //! .. code-block:: c++ + //! + //! #include + //! // or equivalently + //! + //! // Functor type for selecting values less than some criteria + //! struct LessThan + //! { + //! int compare; + //! + //! __host__ __device__ __forceinline__ + //! explicit LessThan(int compare) : compare(compare) {} + //! + //! __host__ __device__ __forceinline__ + //! bool operator()(const int &a) const + //! { + //! return a < compare; + //! } + //! }; + //! + //! // Functor type for selecting values greater than some criteria + //! struct GreaterThan + //! { + //! int compare; + //! + //! __host__ __device__ __forceinline__ + //! explicit GreaterThan(int compare) : compare(compare) {} + //! + //! __host__ __device__ __forceinline__ + //! bool operator()(const int &a) const + //! { + //! return a > compare; + //! } + //! }; + //! + //! // Declare, allocate, and initialize device-accessible pointers for + //! // input and output + //! int num_items; // e.g., 8 + //! int *d_in; // e.g., [0, 2, 3, 9, 5, 2, 81, 8] + //! int *d_large_and_unselected_out; // e.g., [ , , , , , , , ] + //! int *d_small_out; // e.g., [ , , , , , , , ] + //! int *d_num_selected_out; // e.g., [ , ] + //! thrust::reverse_iterator unselected_out(d_large_and_unselected_out + num_items); + //! LessThan small_items_selector(7); + //! GreaterThan large_items_selector(50); + //! ... + //! + //! // Determine temporary device storage requirements + //! void *d_temp_storage = nullptr; + //! std::size_t temp_storage_bytes = 0; + //! cub::DevicePartition::If( + //! d_temp_storage, temp_storage_bytes, + //! d_in, d_large_and_medium_out, d_small_out, unselected_out, + //! d_num_selected_out, num_items, + //! large_items_selector, small_items_selector); + //! + //! // Allocate temporary storage + //! cudaMalloc(&d_temp_storage, temp_storage_bytes); + //! + //! // Run selection + //! cub::DevicePartition::If( + //! d_temp_storage, temp_storage_bytes, + //! d_in, d_large_and_medium_out, d_small_out, unselected_out, + //! d_num_selected_out, num_items, + //! large_items_selector, small_items_selector); + //! + //! // d_large_and_unselected_out <-- [ 81, , , , , , 8, 9 ] + //! // d_small_out <-- [ 0, 2, 3, 5, 2, , , ] + //! // d_num_selected_out <-- [ 1, 5 ] + //! + //! @endrst + //! + //! @tparam InputIteratorT + //! **[inferred]** Random-access input iterator type for reading input items @iterator + //! + //! @tparam FirstOutputIteratorT + //! **[inferred]** Random-access output iterator type for writing output + //! items selected by first operator @iterator + //! + //! @tparam SecondOutputIteratorT + //! **[inferred]** Random-access output iterator type for writing output + //! items selected by second operator @iterator + //! + //! @tparam UnselectedOutputIteratorT + //! **[inferred]** Random-access output iterator type for writing + //! unselected items @iterator + //! + //! @tparam NumSelectedIteratorT + //! **[inferred]** Output iterator type for recording the number of items + //! selected @iterator + //! + //! @tparam SelectFirstPartOp + //! **[inferred]** Selection functor type having member `bool operator()(const T &a)` + //! + //! @tparam SelectSecondPartOp + //! **[inferred]** Selection functor type having member `bool operator()(const T &a)` + //! + //! @param[in] d_temp_storage + //! Device-accessible allocation of temporary storage. When `nullptr`, the + //! required allocation size is written to `temp_storage_bytes` and no work is done. + //! + //! @param[in,out] temp_storage_bytes + //! Reference to size in bytes of `d_temp_storage` allocation + //! + //! @param[in] d_in + //! Pointer to the input sequence of data items + //! + //! @param[out] d_first_part_out + //! Pointer to the output sequence of data items selected by `select_first_part_op` + //! + //! @param[out] d_second_part_out + //! Pointer to the output sequence of data items selected by `select_second_part_op` + //! + //! @param[out] d_unselected_out + //! Pointer to the output sequence of unselected data items + //! + //! @param[out] d_num_selected_out + //! Pointer to the output array with two elements, where total number of + //! items selected by `select_first_part_op` is stored as + //! `d_num_selected_out[0]` and total number of items selected by + //! `select_second_part_op` is stored as `d_num_selected_out[1]`, + //! respectively + //! + //! @param[in] num_items + //! Total number of items to select from + //! + //! @param[in] select_first_part_op + //! Unary selection operator to select `d_first_part_out` + //! + //! @param[in] select_second_part_op + //! Unary selection operator to select `d_second_part_out` + //! + //! @param[in] stream + //! @rst + //! **[optional]** CUDA stream to launch kernels within. Default is stream\ :sub:`0`. + //! @endrst + template + CUB_RUNTIME_FUNCTION _CCCL_FORCEINLINE static cudaError_t + If(void* d_temp_storage, + std::size_t& temp_storage_bytes, + InputIteratorT d_in, + FirstOutputIteratorT d_first_part_out, + SecondOutputIteratorT d_second_part_out, + UnselectedOutputIteratorT d_unselected_out, + NumSelectedIteratorT d_num_selected_out, + int num_items, + SelectFirstPartOp select_first_part_op, + SelectSecondPartOp select_second_part_op, + cudaStream_t stream = 0) + { + CUB_DETAIL_NVTX_RANGE_SCOPE_IF(d_temp_storage, "cub::DevicePartition::If"); + return IfNoNVTX( + d_temp_storage, + temp_storage_bytes, + d_in, + d_first_part_out, + d_second_part_out, + d_unselected_out, + d_num_selected_out, + num_items, + select_first_part_op, + select_second_part_op, + stream); + } + +#ifndef _CCCL_DOXYGEN_INVOKED // Do not document + template + CUB_DETAIL_RUNTIME_DEBUG_SYNC_IS_NOT_SUPPORTED CUB_RUNTIME_FUNCTION _CCCL_FORCEINLINE static cudaError_t + If(void* d_temp_storage, + std::size_t& temp_storage_bytes, + InputIteratorT d_in, + FirstOutputIteratorT d_first_part_out, + SecondOutputIteratorT d_second_part_out, + UnselectedOutputIteratorT d_unselected_out, + NumSelectedIteratorT d_num_selected_out, + int num_items, + SelectFirstPartOp select_first_part_op, + SelectSecondPartOp select_second_part_op, + cudaStream_t stream, + bool debug_synchronous) + { + CUB_DETAIL_RUNTIME_DEBUG_SYNC_USAGE_LOG + + return If( + d_temp_storage, + temp_storage_bytes, + d_in, + d_first_part_out, + d_second_part_out, + d_unselected_out, + d_num_selected_out, + num_items, + select_first_part_op, + select_second_part_op, + stream); + } +#endif // _CCCL_DOXYGEN_INVOKED +}; + +CUB_NAMESPACE_END diff --git a/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/device/device_radix_sort.cuh b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/device/device_radix_sort.cuh new file mode 100644 index 0000000000000000000000000000000000000000..9a099ee3cb9d524cd9a207f6d39acd5540a10b37 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/device/device_radix_sort.cuh @@ -0,0 +1,3632 @@ +/****************************************************************************** + * Copyright (c) 2011, Duane Merrill. All rights reserved. + * Copyright (c) 2011-2023, NVIDIA CORPORATION. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the NVIDIA CORPORATION nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + ******************************************************************************/ + +//! @file +//! cub::DeviceRadixSort provides device-wide, parallel operations for computing a radix sort across a sequence of data +//! items residing within device-accessible memory. + +#pragma once + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header + +#include +#include +#include + +#include + +CUB_NAMESPACE_BEGIN + +//! @brief DeviceRadixSort provides device-wide, parallel operations for +//! computing a radix sort across a sequence of data items residing +//! within device-accessible memory. ![](sorting_logo.png) +//! +//! @par Overview +//! The [*radix sorting method*](http://en.wikipedia.org/wiki/Radix_sort) +//! arranges items into ascending (or descending) order. The algorithm relies +//! upon a positional representation for keys, i.e., each key is comprised of an +//! ordered sequence of symbols (e.g., digits, characters, etc.) specified from +//! least-significant to most-significant. For a given input sequence of keys +//! and a set of rules specifying a total ordering of the symbolic alphabet, the +//! radix sorting method produces a lexicographic ordering of those keys. +//! +//! @par Supported Types +//! DeviceRadixSort can sort all of the built-in C++ numeric primitive types +//! (`unsigned char`, `int`, `double`, etc.) as well as CUDA's `__half` +//! and `__nv_bfloat16` 16-bit floating-point types. User-defined types are +//! supported as long as decomposer object is provided. +//! +//! @par Floating-Point Special Cases +//! +//! - Positive and negative zeros are considered equivalent, and will be treated +//! as such in the output. +//! - No special handling is implemented for NaN values; these are sorted +//! according to their bit representations after any transformations. +//! +//! @par Transformations +//! Although the direct radix sorting method can only be applied to unsigned +//! integral types, DeviceRadixSort is able to sort signed and floating-point +//! types via simple bit-wise transformations that ensure lexicographic key +//! ordering. Additional transformations occur for descending sorts. These +//! transformations must be considered when restricting the +//! `[begin_bit, end_bit)` range, as the bitwise transformations will occur +//! before the bit-range truncation. +//! +//! Any transformations applied to the keys prior to sorting are reversed +//! while writing to the final output buffer. +//! +//! @par Type Specific Bitwise Transformations +//! To convert the input values into a radix-sortable bitwise representation, +//! the following transformations take place prior to sorting: +//! +//! - For unsigned integral values, the keys are used directly. +//! - For signed integral values, the sign bit is inverted. +//! - For positive floating point values, the sign bit is inverted. +//! - For negative floating point values, the full key is inverted. +//! +//! For floating point types, positive and negative zero are a special case and +//! will be considered equivalent during sorting. +//! +//! @par Descending Sort Bitwise Transformations +//! If descending sort is used, the keys are inverted after performing any +//! type-specific transformations, and the resulting keys are sorted in ascending +//! order. +//! +//! @par Stability +//! DeviceRadixSort is stable. For floating-point types, `-0.0` and `+0.0` are +//! considered equal and appear in the result in the same order as they appear in +//! the input. +//! +//! @par Usage Considerations +//! @cdp_class{DeviceRadixSort} +//! +//! @par Performance +//! @linear_performance{radix sort} The following chart illustrates +//! DeviceRadixSort::SortKeys performance across different CUDA architectures +//! for uniform-random `uint32` keys. +//! @plots_below +//! +//! @image html lsb_radix_sort_int32_keys.png +struct DeviceRadixSort +{ +private: + template + CUB_RUNTIME_FUNCTION static cudaError_t custom_radix_sort( + ::cuda::std::false_type, + void* d_temp_storage, + size_t& temp_storage_bytes, + bool is_overwrite_okay, + DoubleBuffer& d_keys, + DoubleBuffer& d_values, + NumItemsT num_items, + DecomposerT decomposer, + int begin_bit, + int end_bit, + cudaStream_t stream); + + template + CUB_RUNTIME_FUNCTION static cudaError_t custom_radix_sort( + ::cuda::std::true_type, + void* d_temp_storage, + size_t& temp_storage_bytes, + bool is_overwrite_okay, + DoubleBuffer& d_keys, + DoubleBuffer& d_values, + OffsetT num_items, + DecomposerT decomposer, + int begin_bit, + int end_bit, + cudaStream_t stream) + { + return DispatchRadixSort< + IsDescending, + KeyT, + ValueT, + OffsetT, + detail::radix::policy_hub, + DecomposerT>::Dispatch(d_temp_storage, + temp_storage_bytes, + d_keys, + d_values, + static_cast(num_items), + begin_bit, + end_bit, + is_overwrite_okay, + stream, + decomposer); + } + + template + CUB_RUNTIME_FUNCTION static cudaError_t custom_radix_sort( + ::cuda::std::false_type, + void* d_temp_storage, + size_t& temp_storage_bytes, + bool is_overwrite_okay, + DoubleBuffer& d_keys, + DoubleBuffer& d_values, + NumItemsT num_items, + DecomposerT decomposer, + cudaStream_t stream); + + template + CUB_RUNTIME_FUNCTION static cudaError_t custom_radix_sort( + ::cuda::std::true_type, + void* d_temp_storage, + size_t& temp_storage_bytes, + bool is_overwrite_okay, + DoubleBuffer& d_keys, + DoubleBuffer& d_values, + OffsetT num_items, + DecomposerT decomposer, + cudaStream_t stream) + { + constexpr int begin_bit = 0; + const int end_bit = detail::radix::traits_t::default_end_bit(decomposer); + + return DeviceRadixSort::custom_radix_sort( + ::cuda::std::true_type{}, + d_temp_storage, + temp_storage_bytes, + is_overwrite_okay, + d_keys, + d_values, + num_items, + decomposer, + begin_bit, + end_bit, + stream); + } + + // Name reported for NVTX ranges + _CCCL_HOST_DEVICE static constexpr auto GetName() -> const char* + { + return "cub::DeviceRadixSort"; + } + +public: + //! @name KeyT-value pairs + //@{ + + //! @brief Sorts key-value pairs into ascending order. + //! (`~2N` auxiliary storage required) + //! + //! @par + //! - The contents of the input data are not altered by the sorting operation. + //! - Pointers to contiguous memory must be used; iterators are not currently + //! supported. + //! - In-place operations are not supported. There must be no overlap between + //! any of the provided ranges: + //! - `[d_keys_in, d_keys_in + num_items)` + //! - `[d_keys_out, d_keys_out + num_items)` + //! - `[d_values_in, d_values_in + num_items)` + //! - `[d_values_out, d_values_out + num_items)` + //! - An optional bit subrange `[begin_bit, end_bit)` of differentiating key + //! bits can be specified. This can reduce overall sorting overhead and + //! yield a corresponding performance improvement. + //! - @devicestorageNP For sorting using only `O(P)` temporary storage, see + //! the sorting interface using DoubleBuffer wrappers below. + //! - @devicestorage + //! + //! @par Performance + //! The following charts illustrate saturated sorting performance across + //! different CUDA architectures for uniform-random `uint32, uint32` and + //! `uint64, uint64` pairs, respectively. + //! + //! @image html lsb_radix_sort_int32_pairs.png + //! @image html lsb_radix_sort_int64_pairs.png + //! + //! @par Snippet + //! The code snippet below illustrates the sorting of a device vector of `int` + //! keys with associated vector of `int` values. + //! @par + //! @code + //! #include + //! // or equivalently + //! + //! // Declare, allocate, and initialize device-accessible pointers + //! // for sorting data + //! int num_items; // e.g., 7 + //! int *d_keys_in; // e.g., [8, 6, 7, 5, 3, 0, 9] + //! int *d_keys_out; // e.g., [ ... ] + //! int *d_values_in; // e.g., [0, 1, 2, 3, 4, 5, 6] + //! int *d_values_out; // e.g., [ ... ] + //! ... + //! + //! // Determine temporary device storage requirements + //! void *d_temp_storage = nullptr; + //! size_t temp_storage_bytes = 0; + //! cub::DeviceRadixSort::SortPairs(d_temp_storage, temp_storage_bytes, + //! d_keys_in, d_keys_out, d_values_in, d_values_out, num_items); + //! + //! // Allocate temporary storage + //! cudaMalloc(&d_temp_storage, temp_storage_bytes); + //! + //! // Run sorting operation + //! cub::DeviceRadixSort::SortPairs(d_temp_storage, temp_storage_bytes, + //! d_keys_in, d_keys_out, d_values_in, d_values_out, num_items); + //! + //! // d_keys_out <-- [0, 3, 5, 6, 7, 8, 9] + //! // d_values_out <-- [5, 4, 3, 1, 2, 0, 6] + //! @endcode + //! + //! @tparam KeyT + //! **[inferred]** KeyT type + //! + //! @tparam ValueT + //! **[inferred]** ValueT type + //! + //! @tparam NumItemsT + //! **[inferred]** Type of num_items + //! + //! @param[in] d_temp_storage + //! Device-accessible allocation of temporary storage. When `nullptr`, the + //! required allocation size is written to `temp_storage_bytes` and no work + //! is done. + //! + //! @param[in,out] temp_storage_bytes + //! Reference to size in bytes of `d_temp_storage` allocation + //! + //! @param[in] d_keys_in + //! Pointer to the input data of key data to sort + //! + //! @param[out] d_keys_out + //! Pointer to the sorted output sequence of key data + //! + //! @param[in] d_values_in + //! Pointer to the corresponding input sequence of associated value items + //! + //! @param[out] d_values_out + //! Pointer to the correspondingly-reordered output sequence of associated + //! value items + //! + //! @param[in] num_items + //! Number of items to sort + //! + //! @param[in] begin_bit + //! **[optional]** The least-significant bit index (inclusive) needed for + //! key comparison + //! + //! @param[in] end_bit + //! **[optional]** The most-significant bit index (exclusive) needed for key + //! comparison (e.g., sizeof(unsigned int) * 8) + //! + //! @param[in] stream + //! **[optional]** CUDA stream to launch kernels within. + //! Default is stream0. + template + CUB_RUNTIME_FUNCTION static cudaError_t SortPairs( + void* d_temp_storage, + size_t& temp_storage_bytes, + const KeyT* d_keys_in, + KeyT* d_keys_out, + const ValueT* d_values_in, + ValueT* d_values_out, + NumItemsT num_items, + int begin_bit = 0, + int end_bit = sizeof(KeyT) * 8, + cudaStream_t stream = 0) + { + CUB_DETAIL_NVTX_RANGE_SCOPE_IF(d_temp_storage, GetName()); + // Unsigned integer type for global offsets. + using OffsetT = detail::choose_offset_t; + + // TODO API that doesn't accept decomposer should also contain a static + // assert that the key type is fundamental. + + // We cast away const-ness, but will *not* write to these arrays. + // `DispatchRadixSort::Dispatch` will allocate temporary storage and + // create a new double-buffer internally when the `is_overwrite_ok` flag + // is not set. + constexpr bool is_overwrite_okay = false; + DoubleBuffer d_keys(const_cast(d_keys_in), d_keys_out); + DoubleBuffer d_values(const_cast(d_values_in), d_values_out); + + return DispatchRadixSort::Dispatch( + d_temp_storage, + temp_storage_bytes, + d_keys, + d_values, + static_cast(num_items), + begin_bit, + end_bit, + is_overwrite_okay, + stream); + } + +#ifndef _CCCL_DOXYGEN_INVOKED // Do not document + template + CUB_DETAIL_RUNTIME_DEBUG_SYNC_IS_NOT_SUPPORTED CUB_RUNTIME_FUNCTION static cudaError_t SortPairs( + void* d_temp_storage, + size_t& temp_storage_bytes, + const KeyT* d_keys_in, + KeyT* d_keys_out, + const ValueT* d_values_in, + ValueT* d_values_out, + NumItemsT num_items, + int begin_bit, + int end_bit, + cudaStream_t stream, + bool debug_synchronous) + { + CUB_DETAIL_RUNTIME_DEBUG_SYNC_USAGE_LOG + + return SortPairs( + d_temp_storage, + temp_storage_bytes, + d_keys_in, + d_keys_out, + d_values_in, + d_values_out, + num_items, + begin_bit, + end_bit, + stream); + } +#endif + + //! @rst + //! Sorts key-value pairs into ascending order using :math:`\approx 2N` auxiliary storage. + //! + //! * The contents of the input data are not altered by the sorting operation. + //! * Pointers to contiguous memory must be used; iterators are not currently + //! supported. + //! * In-place operations are not supported. There must be no overlap between + //! any of the provided ranges: + //! + //! * ``[d_keys_in, d_keys_in + num_items)`` + //! * ``[d_keys_out, d_keys_out + num_items)`` + //! * ``[d_values_in, d_values_in + num_items)`` + //! * ``[d_values_out, d_values_out + num_items)`` + //! + //! * A bit subrange ``[begin_bit, end_bit)`` is provided to specify + //! differentiating key bits. This can reduce overall sorting overhead and + //! yield a corresponding performance improvement. + //! * @devicestorageNP For sorting using only :math:`O(P)` temporary storage, see + //! the sorting interface using DoubleBuffer wrappers below. + //! * @devicestorage + //! + //! Snippet + //! ========================================================================== + //! + //! Let's consider a user-defined ``custom_t`` type below. To sort an array of + //! ``custom_t`` objects, we have to tell CUB about relevant members of the + //! ``custom_t`` type. We do this by providing a decomposer that returns a + //! tuple of references to relevant members of the key. + //! + //! .. literalinclude:: ../../../cub/test/catch2_test_device_radix_sort_custom.cu + //! :language: c++ + //! :dedent: + //! :start-after: example-begin custom-type + //! :end-before: example-end custom-type + //! + //! The following snippet shows how to sort an array of ``custom_t`` objects + //! using ``cub::DeviceRadixSort::SortPairs``: + //! + //! .. literalinclude:: ../../../cub/test/catch2_test_device_radix_sort_custom.cu + //! :language: c++ + //! :dedent: + //! :start-after: example-begin pairs-bits + //! :end-before: example-end pairs-bits + //! + //! @endrst + //! + //! @tparam KeyT + //! **[inferred]** KeyT type + //! + //! @tparam ValueT + //! **[inferred]** ValueT type + //! + //! @tparam NumItemsT + //! **[inferred]** Type of num_items + //! + //! @tparam DecomposerT + //! **[inferred]** Type of a callable object responsible for decomposing a + //! ``KeyT`` into a tuple of references to its constituent arithmetic types: + //! ``::cuda::std::tuple operator()(KeyT &key)``. + //! The leftmost element of the tuple is considered the most significant. + //! The call operator must not modify members of the key. + //! + //! @param[in] d_temp_storage + //! Device-accessible allocation of temporary storage. When `nullptr`, the + //! required allocation size is written to `temp_storage_bytes` and no work + //! is done. + //! + //! @param[in,out] temp_storage_bytes + //! Reference to size in bytes of `d_temp_storage` allocation + //! + //! @param[in] d_keys_in + //! Pointer to the input data of key data to sort + //! + //! @param[out] d_keys_out + //! Pointer to the sorted output sequence of key data + //! + //! @param[in] d_values_in + //! Pointer to the corresponding input sequence of associated value items + //! + //! @param[out] d_values_out + //! Pointer to the correspondingly-reordered output sequence of associated + //! value items + //! + //! @param[in] num_items + //! Number of items to sort + //! + //! @param decomposer + //! Callable object responsible for decomposing a ``KeyT`` into a tuple of + //! references to its constituent arithmetic types. The leftmost element of + //! the tuple is considered the most significant. The call operator must not + //! modify members of the key. + //! + //! @param[in] begin_bit + //! **[optional]** The least-significant bit index (inclusive) needed for + //! key comparison + //! + //! @param[in] end_bit + //! **[optional]** The most-significant bit index (exclusive) needed for key + //! comparison (e.g., `(sizeof(float) + sizeof(long long int)) * 8`) + //! + //! @param[in] stream + //! **[optional]** CUDA stream to launch kernels within. + //! Default is stream0. + template + CUB_RUNTIME_FUNCTION static // + typename ::cuda::std::enable_if< // + !::cuda::std::is_convertible::value, // + cudaError_t>::type + SortPairs(void* d_temp_storage, + size_t& temp_storage_bytes, + const KeyT* d_keys_in, + KeyT* d_keys_out, + const ValueT* d_values_in, + ValueT* d_values_out, + NumItemsT num_items, + DecomposerT decomposer, + int begin_bit, + int end_bit, + cudaStream_t stream = 0) + { + CUB_DETAIL_NVTX_RANGE_SCOPE_IF(d_temp_storage, GetName()); + // unsigned integer type for global offsets + using offset_t = detail::choose_offset_t; + using decomposer_check_t = detail::radix::decomposer_check_t; + + static_assert(decomposer_check_t::value, + "DecomposerT must be a callable object returning a tuple of references to " + "arithmetic types"); + + // We cast away const-ness, but will *not* write to these arrays. + // `DispatchRadixSort::Dispatch` will allocate temporary storage and + // create a new double-buffer internally when the `is_overwrite_ok` flag + // is not set. + constexpr bool is_overwrite_okay = false; + constexpr bool is_descending = false; + DoubleBuffer d_keys(const_cast(d_keys_in), d_keys_out); + DoubleBuffer d_values(const_cast(d_values_in), d_values_out); + + return DeviceRadixSort::custom_radix_sort( + decomposer_check_t{}, + d_temp_storage, + temp_storage_bytes, + is_overwrite_okay, + d_keys, + d_values, + static_cast(num_items), + decomposer, + begin_bit, + end_bit, + stream); + } + + //! @rst + //! Sorts key-value pairs into ascending order using :math:`\approx 2N` auxiliary storage. + //! + //! * The contents of the input data are not altered by the sorting operation. + //! * Pointers to contiguous memory must be used; iterators are not currently + //! supported. + //! * In-place operations are not supported. There must be no overlap between + //! any of the provided ranges: + //! + //! * ``[d_keys_in, d_keys_in + num_items)`` + //! * ``[d_keys_out, d_keys_out + num_items)`` + //! * ``[d_values_in, d_values_in + num_items)`` + //! * ``[d_values_out, d_values_out + num_items)`` + //! + //! * @devicestorageNP For sorting using only :math:`O(P)` temporary storage, see + //! the sorting interface using DoubleBuffer wrappers below. + //! * @devicestorage + //! + //! Snippet + //! ========================================================================== + //! + //! Let's consider a user-defined ``custom_t`` type below. To sort an array of + //! ``custom_t`` objects, we have to tell CUB about relevant members of the + //! ``custom_t`` type. We do this by providing a decomposer that returns a + //! tuple of references to relevant members of the key. + //! + //! .. literalinclude:: ../../../cub/test/catch2_test_device_radix_sort_custom.cu + //! :language: c++ + //! :dedent: + //! :start-after: example-begin custom-type + //! :end-before: example-end custom-type + //! + //! The following snippet shows how to sort an array of ``custom_t`` objects + //! using ``cub::DeviceRadixSort::SortPairs``: + //! + //! .. literalinclude:: ../../../cub/test/catch2_test_device_radix_sort_custom.cu + //! :language: c++ + //! :dedent: + //! :start-after: example-begin pairs + //! :end-before: example-end pairs + //! + //! @endrst + //! + //! @tparam KeyT + //! **[inferred]** KeyT type + //! + //! @tparam ValueT + //! **[inferred]** ValueT type + //! + //! @tparam NumItemsT + //! **[inferred]** Type of num_items + //! + //! @tparam DecomposerT + //! **[inferred]** Type of a callable object responsible for decomposing a + //! ``KeyT`` into a tuple of references to its constituent arithmetic types: + //! ``::cuda::std::tuple operator()(KeyT &key)``. + //! The leftmost element of the tuple is considered the most significant. + //! The call operator must not modify members of the key. + //! + //! @param[in] d_temp_storage + //! Device-accessible allocation of temporary storage. When `nullptr`, the + //! required allocation size is written to `temp_storage_bytes` and no work + //! is done. + //! + //! @param[in,out] temp_storage_bytes + //! Reference to size in bytes of `d_temp_storage` allocation + //! + //! @param[in] d_keys_in + //! Pointer to the input data of key data to sort + //! + //! @param[out] d_keys_out + //! Pointer to the sorted output sequence of key data + //! + //! @param[in] d_values_in + //! Pointer to the corresponding input sequence of associated value items + //! + //! @param[out] d_values_out + //! Pointer to the correspondingly-reordered output sequence of associated + //! value items + //! + //! @param[in] num_items + //! Number of items to sort + //! + //! @param decomposer + //! Callable object responsible for decomposing a ``KeyT`` into a tuple of + //! references to its constituent arithmetic types. The leftmost element of + //! the tuple is considered the most significant. The call operator must not + //! modify members of the key. + //! + //! @param[in] stream + //! **[optional]** CUDA stream to launch kernels within. + //! Default is stream0. + template + CUB_RUNTIME_FUNCTION static // + typename ::cuda::std::enable_if< // + !::cuda::std::is_convertible::value, // + cudaError_t>::type + SortPairs(void* d_temp_storage, + size_t& temp_storage_bytes, + const KeyT* d_keys_in, + KeyT* d_keys_out, + const ValueT* d_values_in, + ValueT* d_values_out, + NumItemsT num_items, + DecomposerT decomposer, + cudaStream_t stream = 0) + { + CUB_DETAIL_NVTX_RANGE_SCOPE_IF(d_temp_storage, GetName()); + // unsigned integer type for global offsets + using offset_t = detail::choose_offset_t; + using decomposer_check_t = detail::radix::decomposer_check_t; + + static_assert(decomposer_check_t::value, + "DecomposerT must be a callable object returning a tuple of references to " + "arithmetic types"); + + // We cast away const-ness, but will *not* write to these arrays. + // `DispatchRadixSort::Dispatch` will allocate temporary storage and + // create a new double-buffer internally when the `is_overwrite_ok` flag + // is not set. + constexpr bool is_overwrite_okay = false; + constexpr bool is_descending = false; + DoubleBuffer d_keys(const_cast(d_keys_in), d_keys_out); + DoubleBuffer d_values(const_cast(d_values_in), d_values_out); + + return DeviceRadixSort::custom_radix_sort( + decomposer_check_t{}, + d_temp_storage, + temp_storage_bytes, + is_overwrite_okay, + d_keys, + d_values, + static_cast(num_items), + decomposer, + stream); + } + + //! @brief Sorts key-value pairs into ascending order. + //! (`~N` auxiliary storage required) + //! + //! @par + //! - The sorting operation is given a pair of key buffers and a corresponding + //! pair of associated value buffers. Each pair is managed by a DoubleBuffer + //! structure that indicates which of the two buffers is "current" (and thus + //! contains the input data to be sorted). + //! - The contents of both buffers within each pair may be altered by the + //! sorting operation. + //! - In-place operations are not supported. There must be no overlap between + //! any of the provided ranges: + //! - `[d_keys.Current(), d_keys.Current() + num_items)` + //! - `[d_keys.Alternate(), d_keys.Alternate() + num_items)` + //! - `[d_values.Current(), d_values.Current() + num_items)` + //! - `[d_values.Alternate(), d_values.Alternate() + num_items)` + //! - Upon completion, the sorting operation will update the "current" + //! indicator within each DoubleBuffer wrapper to reference which of the two + //! buffers now contains the sorted output sequence (a function of the + //! number of key bits specified and the targeted device architecture). + //! - An optional bit subrange `[begin_bit, end_bit)` of differentiating key + //! bits can be specified. This can reduce overall sorting overhead and + //! yield a corresponding performance improvement. + //! - @devicestorageP + //! - @devicestorage + //! + //! @par Performance + //! The following charts illustrate saturated sorting performance across + //! different CUDA architectures for uniform-random `uint32, uint32` and + //! `uint64, uint64` pairs, respectively. + //! + //! @image html lsb_radix_sort_int32_pairs.png + //! @image html lsb_radix_sort_int64_pairs.png + //! + //! @par Snippet + //! The code snippet below illustrates the sorting of a device vector of `int` + //! keys with associated vector of `int` values. + //! @par + //! @code + //! #include + //! // or equivalently + //! + //! // Declare, allocate, and initialize device-accessible pointers for + //! // sorting data + //! int num_items; // e.g., 7 + //! int *d_key_buf; // e.g., [8, 6, 7, 5, 3, 0, 9] + //! int *d_key_alt_buf; // e.g., [ ... ] + //! int *d_value_buf; // e.g., [0, 1, 2, 3, 4, 5, 6] + //! int *d_value_alt_buf; // e.g., [ ... ] + //! ... + //! + //! // Create a set of DoubleBuffers to wrap pairs of device pointers + //! cub::DoubleBuffer d_keys(d_key_buf, d_key_alt_buf); + //! cub::DoubleBuffer d_values(d_value_buf, d_value_alt_buf); + //! + //! // Determine temporary device storage requirements + //! void *d_temp_storage = nullptr; + //! size_t temp_storage_bytes = 0; + //! cub::DeviceRadixSort::SortPairs( + //! d_temp_storage, temp_storage_bytes, d_keys, d_values, num_items); + //! + //! // Allocate temporary storage + //! cudaMalloc(&d_temp_storage, temp_storage_bytes); + //! + //! // Run sorting operation + //! cub::DeviceRadixSort::SortPairs( + //! d_temp_storage, temp_storage_bytes, d_keys, d_values, num_items); + //! + //! // d_keys.Current() <-- [0, 3, 5, 6, 7, 8, 9] + //! // d_values.Current() <-- [5, 4, 3, 1, 2, 0, 6] + //! + //! @endcode + //! + //! @tparam KeyT + //! **[inferred]** KeyT type + //! + //! @tparam ValueT + //! **[inferred]** ValueT type + //! + //! @tparam NumItemsT + //! **[inferred]** Type of num_items + //! + //! @param[in] d_temp_storage + //! Device-accessible allocation of temporary storage. When `nullptr`, the + //! required allocation size is written to `temp_storage_bytes` and no work is done. + //! + //! @param[in,out] temp_storage_bytes + //! Reference to size in bytes of `d_temp_storage` allocation + //! + //! @param[in,out] d_keys + //! Reference to the double-buffer of keys whose "current" device-accessible + //! buffer contains the unsorted input keys and, upon return, is updated to + //! point to the sorted output keys + //! + //! @param[in,out] d_values + //! Double-buffer of values whose "current" device-accessible buffer + //! contains the unsorted input values and, upon return, is updated to point + //! to the sorted output values + //! + //! @param[in] num_items + //! Number of items to sort + //! + //! @param[in] begin_bit + //! **[optional]** The least-significant bit index (inclusive) needed for + //! key comparison + //! + //! @param[in] end_bit + //! **[optional]** The most-significant bit index (exclusive) needed for key + //! comparison (e.g., `sizeof(unsigned int) * 8`) + //! + //! @param[in] stream + //! **[optional]** CUDA stream to launch kernels within. + //! Default is stream0. + template + CUB_RUNTIME_FUNCTION static cudaError_t SortPairs( + void* d_temp_storage, + size_t& temp_storage_bytes, + DoubleBuffer& d_keys, + DoubleBuffer& d_values, + NumItemsT num_items, + int begin_bit = 0, + int end_bit = sizeof(KeyT) * 8, + cudaStream_t stream = 0) + { + CUB_DETAIL_NVTX_RANGE_SCOPE_IF(d_temp_storage, GetName()); + + // Unsigned integer type for global offsets. + using OffsetT = detail::choose_offset_t; + + constexpr bool is_overwrite_okay = true; + + return DispatchRadixSort::Dispatch( + d_temp_storage, temp_storage_bytes, d_keys, d_values, num_items, begin_bit, end_bit, is_overwrite_okay, stream); + } + +#ifndef _CCCL_DOXYGEN_INVOKED // Do not document + template + CUB_DETAIL_RUNTIME_DEBUG_SYNC_IS_NOT_SUPPORTED CUB_RUNTIME_FUNCTION static cudaError_t SortPairs( + void* d_temp_storage, + size_t& temp_storage_bytes, + DoubleBuffer& d_keys, + DoubleBuffer& d_values, + NumItemsT num_items, + int begin_bit, + int end_bit, + cudaStream_t stream, + bool debug_synchronous) + { + CUB_DETAIL_RUNTIME_DEBUG_SYNC_USAGE_LOG + + return SortPairs( + d_temp_storage, temp_storage_bytes, d_keys, d_values, num_items, begin_bit, end_bit, stream); + } +#endif + + //! @rst + //! Sorts key-value pairs into ascending order using :math:`\approx N` auxiliary storage. + //! + //! * The sorting operation is given a pair of key buffers and a corresponding + //! pair of associated value buffers. Each pair is managed by a DoubleBuffer + //! structure that indicates which of the two buffers is "current" (and thus + //! contains the input data to be sorted). + //! * The contents of both buffers within each pair may be altered by the + //! sorting operation. + //! * In-place operations are not supported. There must be no overlap between + //! any of the provided ranges: + //! + //! - ``[d_keys.Current(), d_keys.Current() + num_items)`` + //! - ``[d_keys.Alternate(), d_keys.Alternate() + num_items)`` + //! - ``[d_values.Current(), d_values.Current() + num_items)`` + //! - ``[d_values.Alternate(), d_values.Alternate() + num_items)`` + //! + //! - Upon completion, the sorting operation will update the "current" + //! indicator within each DoubleBuffer wrapper to reference which of the two + //! buffers now contains the sorted output sequence (a function of the + //! number of key bits specified and the targeted device architecture). + //! - @devicestorageP + //! - @devicestorage + //! + //! Snippet + //! ========================================================================== + //! + //! Let's consider a user-defined ``custom_t`` type below. To sort an array of + //! ``custom_t`` objects, we have to tell CUB about relevant members of the + //! ``custom_t`` type. We do this by providing a decomposer that returns a + //! tuple of references to relevant members of the key. + //! + //! .. literalinclude:: ../../../cub/test/catch2_test_device_radix_sort_custom.cu + //! :language: c++ + //! :dedent: + //! :start-after: example-begin custom-type + //! :end-before: example-end custom-type + //! + //! The following snippet shows how to sort an array of ``custom_t`` objects + //! using ``cub::DeviceRadixSort::SortPairs``: + //! + //! .. literalinclude:: ../../../cub/test/catch2_test_device_radix_sort_custom.cu + //! :language: c++ + //! :dedent: + //! :start-after: example-begin pairs-db + //! :end-before: example-end pairs-db + //! + //! @endrst + //! + //! @tparam KeyT + //! **[inferred]** KeyT type + //! + //! @tparam ValueT + //! **[inferred]** ValueT type + //! + //! @tparam NumItemsT + //! **[inferred]** Type of num_items + //! + //! @tparam DecomposerT + //! **[inferred]** Type of a callable object responsible for decomposing a + //! ``KeyT`` into a tuple of references to its constituent arithmetic types: + //! ``::cuda::std::tuple operator()(KeyT &key)``. + //! The leftmost element of the tuple is considered the most significant. + //! The call operator must not modify members of the key. + //! + //! @param[in] d_temp_storage + //! Device-accessible allocation of temporary storage. When `nullptr`, the + //! required allocation size is written to `temp_storage_bytes` and no work + //! is done. + //! + //! @param[in,out] temp_storage_bytes + //! Reference to size in bytes of `d_temp_storage` allocation + //! + //! @param[in,out] d_keys + //! Reference to the double-buffer of keys whose "current" device-accessible + //! buffer contains the unsorted input keys and, upon return, is updated to + //! point to the sorted output keys + //! + //! @param[in,out] d_values + //! Double-buffer of values whose "current" device-accessible buffer + //! contains the unsorted input values and, upon return, is updated to point + //! to the sorted output values + //! + //! @param[in] num_items + //! Number of items to sort + //! + //! @param decomposer + //! Callable object responsible for decomposing a ``KeyT`` into a tuple of + //! references to its constituent arithmetic types. The leftmost element of + //! the tuple is considered the most significant. The call operator must not + //! modify members of the key. + //! + //! @param[in] stream + //! **[optional]** CUDA stream to launch kernels within. + //! Default is stream0. + template + CUB_RUNTIME_FUNCTION static // + typename ::cuda::std::enable_if< // + !::cuda::std::is_convertible::value, // + cudaError_t>::type + SortPairs(void* d_temp_storage, + size_t& temp_storage_bytes, + DoubleBuffer& d_keys, + DoubleBuffer& d_values, + NumItemsT num_items, + DecomposerT decomposer, + cudaStream_t stream = 0) + { + CUB_DETAIL_NVTX_RANGE_SCOPE_IF(d_temp_storage, GetName()); + + // unsigned integer type for global offsets + using offset_t = detail::choose_offset_t; + using decomposer_check_t = detail::radix::decomposer_check_t; + + static_assert(decomposer_check_t::value, + "DecomposerT must be a callable object returning a tuple of references to " + "arithmetic types"); + + constexpr bool is_overwrite_okay = true; + constexpr bool is_descending = false; + + return DeviceRadixSort::custom_radix_sort( + decomposer_check_t{}, + d_temp_storage, + temp_storage_bytes, + is_overwrite_okay, + d_keys, + d_values, + static_cast(num_items), + decomposer, + stream); + } + + //! @rst + //! Sorts key-value pairs into ascending order using :math:`\approx N` auxiliary storage. + //! + //! * The sorting operation is given a pair of key buffers and a corresponding + //! pair of associated value buffers. Each pair is managed by a DoubleBuffer + //! structure that indicates which of the two buffers is "current" (and thus + //! contains the input data to be sorted). + //! * The contents of both buffers within each pair may be altered by the + //! sorting operation. + //! * In-place operations are not supported. There must be no overlap between + //! any of the provided ranges: + //! + //! - ``[d_keys.Current(), d_keys.Current() + num_items)`` + //! - ``[d_keys.Alternate(), d_keys.Alternate() + num_items)`` + //! - ``[d_values.Current(), d_values.Current() + num_items)`` + //! - ``[d_values.Alternate(), d_values.Alternate() + num_items)`` + //! + //! - Upon completion, the sorting operation will update the "current" + //! indicator within each DoubleBuffer wrapper to reference which of the two + //! buffers now contains the sorted output sequence (a function of the + //! number of key bits specified and the targeted device architecture). + //! - An optional bit subrange ``[begin_bit, end_bit)`` of differentiating key + //! bits can be specified. This can reduce overall sorting overhead and + //! yield a corresponding performance improvement. + //! - @devicestorageP + //! - @devicestorage + //! + //! Snippet + //! ========================================================================== + //! + //! Let's consider a user-defined ``custom_t`` type below. To sort an array of + //! ``custom_t`` objects, we have to tell CUB about relevant members of the + //! ``custom_t`` type. We do this by providing a decomposer that returns a + //! tuple of references to relevant members of the key. + //! + //! .. literalinclude:: ../../../cub/test/catch2_test_device_radix_sort_custom.cu + //! :language: c++ + //! :dedent: + //! :start-after: example-begin custom-type + //! :end-before: example-end custom-type + //! + //! The following snippet shows how to sort an array of ``custom_t`` objects + //! using ``cub::DeviceRadixSort::SortPairs``: + //! + //! .. literalinclude:: ../../../cub/test/catch2_test_device_radix_sort_custom.cu + //! :language: c++ + //! :dedent: + //! :start-after: example-begin pairs-bits-db + //! :end-before: example-end pairs-bits-db + //! + //! @endrst + //! + //! @tparam KeyT + //! **[inferred]** KeyT type + //! + //! @tparam ValueT + //! **[inferred]** ValueT type + //! + //! @tparam NumItemsT + //! **[inferred]** Type of num_items + //! + //! @tparam DecomposerT + //! **[inferred]** Type of a callable object responsible for decomposing a + //! ``KeyT`` into a tuple of references to its constituent arithmetic types: + //! ``::cuda::std::tuple operator()(KeyT &key)``. + //! The leftmost element of the tuple is considered the most significant. + //! The call operator must not modify members of the key. + //! + //! @param[in] d_temp_storage + //! Device-accessible allocation of temporary storage. When `nullptr`, the + //! required allocation size is written to `temp_storage_bytes` and no work + //! is done. + //! + //! @param[in,out] temp_storage_bytes + //! Reference to size in bytes of `d_temp_storage` allocation + //! + //! @param[in,out] d_keys + //! Reference to the double-buffer of keys whose "current" device-accessible + //! buffer contains the unsorted input keys and, upon return, is updated to + //! point to the sorted output keys + //! + //! @param[in,out] d_values + //! Double-buffer of values whose "current" device-accessible buffer + //! contains the unsorted input values and, upon return, is updated to point + //! to the sorted output values + //! + //! @param[in] num_items + //! Number of items to sort + //! + //! @param decomposer + //! Callable object responsible for decomposing a ``KeyT`` into a tuple of + //! references to its constituent arithmetic types. The leftmost element of + //! the tuple is considered the most significant. The call operator must not + //! modify members of the key. + //! + //! @param[in] begin_bit + //! **[optional]** The least-significant bit index (inclusive) needed for + //! key comparison + //! + //! @param[in] end_bit + //! **[optional]** The most-significant bit index (exclusive) needed for key + //! comparison (e.g., `(sizeof(float) + sizeof(long long int)) * 8`) + //! + //! @param[in] stream + //! **[optional]** CUDA stream to launch kernels within. + //! Default is stream0. + template + CUB_RUNTIME_FUNCTION static // + typename ::cuda::std::enable_if< // + !::cuda::std::is_convertible::value, // + cudaError_t>::type + SortPairs(void* d_temp_storage, + size_t& temp_storage_bytes, + DoubleBuffer& d_keys, + DoubleBuffer& d_values, + NumItemsT num_items, + DecomposerT decomposer, + int begin_bit, + int end_bit, + cudaStream_t stream = 0) + { + CUB_DETAIL_NVTX_RANGE_SCOPE_IF(d_temp_storage, GetName()); + + // unsigned integer type for global offsets + using offset_t = detail::choose_offset_t; + using decomposer_check_t = detail::radix::decomposer_check_t; + + static_assert(decomposer_check_t::value, + "DecomposerT must be a callable object returning a tuple of references to " + "arithmetic types"); + + constexpr bool is_overwrite_okay = true; + constexpr bool is_descending = false; + + return DeviceRadixSort::custom_radix_sort( + decomposer_check_t{}, + d_temp_storage, + temp_storage_bytes, + is_overwrite_okay, + d_keys, + d_values, + static_cast(num_items), + decomposer, + begin_bit, + end_bit, + stream); + } + + //! @brief Sorts key-value pairs into descending order. + //! (`~2N` auxiliary storage required). + //! + //! @par + //! - The contents of the input data are not altered by the sorting operation. + //! - Pointers to contiguous memory must be used; iterators are not currently + //! supported. + //! - In-place operations are not supported. There must be no overlap between + //! any of the provided ranges: + //! - `[d_keys_in, d_keys_in + num_items)` + //! - `[d_keys_out, d_keys_out + num_items)` + //! - `[d_values_in, d_values_in + num_items)` + //! - `[d_values_out, d_values_out + num_items)` + //! - An optional bit subrange `[begin_bit, end_bit)` of differentiating key + //! bits can be specified. This can reduce overall sorting overhead and + //! yield a corresponding performance improvement. + //! - @devicestorageNP For sorting using only `O(P)` temporary storage, see + //! the sorting interface using DoubleBuffer wrappers below. + //! - @devicestorage + //! + //! @par Performance + //! Performance is similar to DeviceRadixSort::SortPairs. + //! + //! @par Snippet + //! The code snippet below illustrates the sorting of a device vector of `int` + //! keys with associated vector of `int` values. + //! @par + //! @code + //! #include + //! // or equivalently + //! + //! // Declare, allocate, and initialize device-accessible pointers + //! // for sorting data + //! int num_items; // e.g., 7 + //! int *d_keys_in; // e.g., [8, 6, 7, 5, 3, 0, 9] + //! int *d_keys_out; // e.g., [ ... ] + //! int *d_values_in; // e.g., [0, 1, 2, 3, 4, 5, 6] + //! int *d_values_out; // e.g., [ ... ] + //! ... + //! + //! // Determine temporary device storage requirements + //! void *d_temp_storage = nullptr; + //! size_t temp_storage_bytes = 0; + //! cub::DeviceRadixSort::SortPairsDescending( + //! d_temp_storage, temp_storage_bytes, + //! d_keys_in, d_keys_out, d_values_in, d_values_out, num_items); + //! + //! // Allocate temporary storage + //! cudaMalloc(&d_temp_storage, temp_storage_bytes); + //! + //! // Run sorting operation + //! cub::DeviceRadixSort::SortPairsDescending( + //! d_temp_storage, temp_storage_bytes, + //! d_keys_in, d_keys_out, d_values_in, d_values_out, num_items); + //! + //! // d_keys_out <-- [9, 8, 7, 6, 5, 3, 0] + //! // d_values_out <-- [6, 0, 2, 1, 3, 4, 5] + //! @endcode + //! + //! @tparam KeyT + //! **[inferred]** KeyT type + //! + //! @tparam ValueT + //! **[inferred]** ValueT type + //! + //! @tparam NumItemsT + //! **[inferred]** Type of num_items + //! + //! @param[in] d_temp_storage + //! Device-accessible allocation of temporary storage. When `nullptr`, the + //! required allocation size is written to `temp_storage_bytes` and no work + //! is done. + //! + //! @param[in,out] temp_storage_bytes + //! Reference to size in bytes of `d_temp_storage` allocation + //! + //! @param[in] d_keys_in + //! Pointer to the input data of key data to sort + //! + //! @param[out] d_keys_out + //! Pointer to the sorted output sequence of key data + //! + //! @param[in] d_values_in + //! Pointer to the corresponding input sequence of associated value items + //! + //! @param[out] d_values_out + //! Pointer to the correspondingly-reordered output sequence of associated + //! value items + //! + //! @param[in] num_items + //! Number of items to sort + //! + //! @param[in] begin_bit + //! **[optional]** The least-significant bit index (inclusive) needed for + //! key comparison + //! + //! @param[in] end_bit + //! **[optional]** The most-significant bit index (exclusive) needed for key + //! comparison (e.g., `sizeof(unsigned int) * 8`) + //! + //! @param[in] stream + //! **[optional]** CUDA stream to launch kernels within. + //! Default is stream0. + template + CUB_RUNTIME_FUNCTION static cudaError_t SortPairsDescending( + void* d_temp_storage, + size_t& temp_storage_bytes, + const KeyT* d_keys_in, + KeyT* d_keys_out, + const ValueT* d_values_in, + ValueT* d_values_out, + NumItemsT num_items, + int begin_bit = 0, + int end_bit = sizeof(KeyT) * 8, + cudaStream_t stream = 0) + { + CUB_DETAIL_NVTX_RANGE_SCOPE_IF(d_temp_storage, GetName()); + + // Unsigned integer type for global offsets. + using OffsetT = detail::choose_offset_t; + + // We cast away const-ness, but will *not* write to these arrays. + // `DispatchRadixSort::Dispatch` will allocate temporary storage and + // create a new double-buffer internally when the `is_overwrite_ok` flag + // is not set. + constexpr bool is_overwrite_okay = false; + DoubleBuffer d_keys(const_cast(d_keys_in), d_keys_out); + DoubleBuffer d_values(const_cast(d_values_in), d_values_out); + + return DispatchRadixSort::Dispatch( + d_temp_storage, temp_storage_bytes, d_keys, d_values, num_items, begin_bit, end_bit, is_overwrite_okay, stream); + } + +#ifndef _CCCL_DOXYGEN_INVOKED // Do not document + template + CUB_DETAIL_RUNTIME_DEBUG_SYNC_IS_NOT_SUPPORTED CUB_RUNTIME_FUNCTION static cudaError_t SortPairsDescending( + void* d_temp_storage, + size_t& temp_storage_bytes, + const KeyT* d_keys_in, + KeyT* d_keys_out, + const ValueT* d_values_in, + ValueT* d_values_out, + NumItemsT num_items, + int begin_bit, + int end_bit, + cudaStream_t stream, + bool debug_synchronous) + { + CUB_DETAIL_RUNTIME_DEBUG_SYNC_USAGE_LOG + + return SortPairsDescending( + d_temp_storage, + temp_storage_bytes, + d_keys_in, + d_keys_out, + d_values_in, + d_values_out, + num_items, + begin_bit, + end_bit, + stream); + } +#endif + + //! @rst + //! Sorts key-value pairs into descending order using :math:`\approx 2N` auxiliary storage. + //! + //! * The contents of the input data are not altered by the sorting operation. + //! * Pointers to contiguous memory must be used; iterators are not currently + //! supported. + //! * In-place operations are not supported. There must be no overlap between + //! any of the provided ranges: + //! + //! * ``[d_keys_in, d_keys_in + num_items)`` + //! * ``[d_keys_out, d_keys_out + num_items)`` + //! * ``[d_values_in, d_values_in + num_items)`` + //! * ``[d_values_out, d_values_out + num_items)`` + //! + //! * A bit subrange ``[begin_bit, end_bit)`` is provided to specify + //! differentiating key bits. This can reduce overall sorting overhead and + //! yield a corresponding performance improvement. + //! * @devicestorageNP For sorting using only :math:`O(P)` temporary storage, see + //! the sorting interface using DoubleBuffer wrappers below. + //! * @devicestorage + //! + //! Snippet + //! ========================================================================== + //! + //! Let's consider a user-defined ``custom_t`` type below. To sort an array of + //! ``custom_t`` objects, we have to tell CUB about relevant members of the + //! ``custom_t`` type. We do this by providing a decomposer that returns a + //! tuple of references to relevant members of the key. + //! + //! .. literalinclude:: ../../../cub/test/catch2_test_device_radix_sort_custom.cu + //! :language: c++ + //! :dedent: + //! :start-after: example-begin custom-type + //! :end-before: example-end custom-type + //! + //! The following snippet shows how to sort an array of ``custom_t`` objects + //! using ``cub::DeviceRadixSort::SortPairsDescending``: + //! + //! .. literalinclude:: ../../../cub/test/catch2_test_device_radix_sort_custom.cu + //! :language: c++ + //! :dedent: + //! :start-after: example-begin pairs-descending-bits + //! :end-before: example-end pairs-descending-bits + //! + //! @endrst + //! + //! @tparam KeyT + //! **[inferred]** KeyT type + //! + //! @tparam ValueT + //! **[inferred]** ValueT type + //! + //! @tparam NumItemsT + //! **[inferred]** Type of num_items + //! + //! @tparam DecomposerT + //! **[inferred]** Type of a callable object responsible for decomposing a + //! ``KeyT`` into a tuple of references to its constituent arithmetic types: + //! ``::cuda::std::tuple operator()(KeyT &key)``. + //! The leftmost element of the tuple is considered the most significant. + //! The call operator must not modify members of the key. + //! + //! @param[in] d_temp_storage + //! Device-accessible allocation of temporary storage. When `nullptr`, the + //! required allocation size is written to `temp_storage_bytes` and no work + //! is done. + //! + //! @param[in,out] temp_storage_bytes + //! Reference to size in bytes of `d_temp_storage` allocation + //! + //! @param[in] d_keys_in + //! Pointer to the input data of key data to sort + //! + //! @param[out] d_keys_out + //! Pointer to the sorted output sequence of key data + //! + //! @param[in] d_values_in + //! Pointer to the corresponding input sequence of associated value items + //! + //! @param[out] d_values_out + //! Pointer to the correspondingly-reordered output sequence of associated + //! value items + //! + //! @param[in] num_items + //! Number of items to sort + //! + //! @param decomposer + //! Callable object responsible for decomposing a ``KeyT`` into a tuple of + //! references to its constituent arithmetic types. The leftmost element of + //! the tuple is considered the most significant. The call operator must not + //! modify members of the key. + //! + //! @param[in] begin_bit + //! **[optional]** The least-significant bit index (inclusive) needed for + //! key comparison + //! + //! @param[in] end_bit + //! **[optional]** The most-significant bit index (exclusive) needed for key + //! comparison (e.g., `(sizeof(float) + sizeof(long long int)) * 8`) + //! + //! @param[in] stream + //! **[optional]** CUDA stream to launch kernels within. + //! Default is stream0. + template + CUB_RUNTIME_FUNCTION static // + typename ::cuda::std::enable_if< // + !::cuda::std::is_convertible::value, // + cudaError_t>::type + SortPairsDescending( + void* d_temp_storage, + size_t& temp_storage_bytes, + const KeyT* d_keys_in, + KeyT* d_keys_out, + const ValueT* d_values_in, + ValueT* d_values_out, + NumItemsT num_items, + DecomposerT decomposer, + int begin_bit, + int end_bit, + cudaStream_t stream = 0) + { + CUB_DETAIL_NVTX_RANGE_SCOPE_IF(d_temp_storage, GetName()); + + // unsigned integer type for global offsets + using offset_t = detail::choose_offset_t; + using decomposer_check_t = detail::radix::decomposer_check_t; + + static_assert(decomposer_check_t::value, + "DecomposerT must be a callable object returning a tuple of references to " + "arithmetic types"); + + // We cast away const-ness, but will *not* write to these arrays. + // `DispatchRadixSort::Dispatch` will allocate temporary storage and + // create a new double-buffer internally when the `is_overwrite_ok` flag + // is not set. + constexpr bool is_overwrite_okay = false; + constexpr bool is_descending = true; + DoubleBuffer d_keys(const_cast(d_keys_in), d_keys_out); + DoubleBuffer d_values(const_cast(d_values_in), d_values_out); + + return DeviceRadixSort::custom_radix_sort( + decomposer_check_t{}, + d_temp_storage, + temp_storage_bytes, + is_overwrite_okay, + d_keys, + d_values, + static_cast(num_items), + decomposer, + begin_bit, + end_bit, + stream); + } + + //! @rst + //! Sorts key-value pairs into descending order using :math:`\approx 2N` auxiliary storage. + //! + //! * The contents of the input data are not altered by the sorting operation. + //! * Pointers to contiguous memory must be used; iterators are not currently + //! supported. + //! * In-place operations are not supported. There must be no overlap between + //! any of the provided ranges: + //! + //! * ``[d_keys_in, d_keys_in + num_items)`` + //! * ``[d_keys_out, d_keys_out + num_items)`` + //! * ``[d_values_in, d_values_in + num_items)`` + //! * ``[d_values_out, d_values_out + num_items)`` + //! + //! * @devicestorageNP For sorting using only :math:`O(P)` temporary storage, see + //! the sorting interface using DoubleBuffer wrappers below. + //! * @devicestorage + //! + //! Snippet + //! ========================================================================== + //! + //! Let's consider a user-defined ``custom_t`` type below. To sort an array of + //! ``custom_t`` objects, we have to tell CUB about relevant members of the + //! ``custom_t`` type. We do this by providing a decomposer that returns a + //! tuple of references to relevant members of the key. + //! + //! .. literalinclude:: ../../../cub/test/catch2_test_device_radix_sort_custom.cu + //! :language: c++ + //! :dedent: + //! :start-after: example-begin custom-type + //! :end-before: example-end custom-type + //! + //! The following snippet shows how to sort an array of ``custom_t`` objects + //! using ``cub::DeviceRadixSort::SortPairsDescending``: + //! + //! .. literalinclude:: ../../../cub/test/catch2_test_device_radix_sort_custom.cu + //! :language: c++ + //! :dedent: + //! :start-after: example-begin pairs-descending + //! :end-before: example-end pairs-descending + //! + //! @endrst + //! + //! @tparam KeyT + //! **[inferred]** KeyT type + //! + //! @tparam ValueT + //! **[inferred]** ValueT type + //! + //! @tparam NumItemsT + //! **[inferred]** Type of num_items + //! + //! @tparam DecomposerT + //! **[inferred]** Type of a callable object responsible for decomposing a + //! ``KeyT`` into a tuple of references to its constituent arithmetic types: + //! ``::cuda::std::tuple operator()(KeyT &key)``. + //! The leftmost element of the tuple is considered the most significant. + //! The call operator must not modify members of the key. + //! + //! @param[in] d_temp_storage + //! Device-accessible allocation of temporary storage. When `nullptr`, the + //! required allocation size is written to `temp_storage_bytes` and no work + //! is done. + //! + //! @param[in,out] temp_storage_bytes + //! Reference to size in bytes of `d_temp_storage` allocation + //! + //! @param[in] d_keys_in + //! Pointer to the input data of key data to sort + //! + //! @param[out] d_keys_out + //! Pointer to the sorted output sequence of key data + //! + //! @param[in] d_values_in + //! Pointer to the corresponding input sequence of associated value items + //! + //! @param[out] d_values_out + //! Pointer to the correspondingly-reordered output sequence of associated + //! value items + //! + //! @param[in] num_items + //! Number of items to sort + //! + //! @param decomposer + //! Callable object responsible for decomposing a ``KeyT`` into a tuple of + //! references to its constituent arithmetic types. The leftmost element of + //! the tuple is considered the most significant. The call operator must not + //! modify members of the key. + //! + //! @param[in] stream + //! **[optional]** CUDA stream to launch kernels within. + //! Default is stream0. + template + CUB_RUNTIME_FUNCTION static // + typename ::cuda::std::enable_if< // + !::cuda::std::is_convertible::value, // + cudaError_t>::type + SortPairsDescending( + void* d_temp_storage, + size_t& temp_storage_bytes, + const KeyT* d_keys_in, + KeyT* d_keys_out, + const ValueT* d_values_in, + ValueT* d_values_out, + NumItemsT num_items, + DecomposerT decomposer, + cudaStream_t stream = 0) + { + CUB_DETAIL_NVTX_RANGE_SCOPE_IF(d_temp_storage, GetName()); + + // unsigned integer type for global offsets + using offset_t = detail::choose_offset_t; + using decomposer_check_t = detail::radix::decomposer_check_t; + + static_assert(decomposer_check_t::value, + "DecomposerT must be a callable object returning a tuple of references to " + "arithmetic types"); + + // We cast away const-ness, but will *not* write to these arrays. + // `DispatchRadixSort::Dispatch` will allocate temporary storage and + // create a new double-buffer internally when the `is_overwrite_ok` flag + // is not set. + constexpr bool is_overwrite_okay = false; + constexpr bool is_descending = true; + DoubleBuffer d_keys(const_cast(d_keys_in), d_keys_out); + DoubleBuffer d_values(const_cast(d_values_in), d_values_out); + + return DeviceRadixSort::custom_radix_sort( + decomposer_check_t{}, + d_temp_storage, + temp_storage_bytes, + is_overwrite_okay, + d_keys, + d_values, + static_cast(num_items), + decomposer, + stream); + } + + //! @brief Sorts key-value pairs into descending order. + //! (`~N` auxiliary storage required). + //! + //! @par + //! - The sorting operation is given a pair of key buffers and a corresponding + //! pair of associated value buffers. Each pair is managed by a DoubleBuffer + //! structure that indicates which of the two buffers is "current" (and thus + //! contains the input data to be sorted). + //! - The contents of both buffers within each pair may be altered by the + //! sorting operation. + //! - In-place operations are not supported. There must be no overlap between + //! any of the provided ranges: + //! - `[d_keys.Current(), d_keys.Current() + num_items)` + //! - `[d_keys.Alternate(), d_keys.Alternate() + num_items)` + //! - `[d_values.Current(), d_values.Current() + num_items)` + //! - `[d_values.Alternate(), d_values.Alternate() + num_items)` + //! - Upon completion, the sorting operation will update the "current" + //! indicator within each DoubleBuffer wrapper to reference which of the two + //! buffers now contains the sorted output sequence (a function of the number + //! of key bits specified and the targeted device architecture). + //! - An optional bit subrange `[begin_bit, end_bit)` of differentiating key + //! bits can be specified. This can reduce overall sorting overhead and + //! yield a corresponding performance improvement. + //! - @devicestorageP + //! - @devicestorage + //! + //! @par Performance + //! Performance is similar to DeviceRadixSort::SortPairs. + //! + //! @par Snippet + //! The code snippet below illustrates the sorting of a device vector of `int` + //! keys with associated vector of `int` values. + //! @par + //! @code + //! #include + //! // or equivalently + //! + //! // Declare, allocate, and initialize device-accessible pointers + //! // for sorting data + //! int num_items; // e.g., 7 + //! int *d_key_buf; // e.g., [8, 6, 7, 5, 3, 0, 9] + //! int *d_key_alt_buf; // e.g., [ ... ] + //! int *d_value_buf; // e.g., [0, 1, 2, 3, 4, 5, 6] + //! int *d_value_alt_buf; // e.g., [ ... ] + //! ... + //! + //! // Create a set of DoubleBuffers to wrap pairs of device pointers + //! cub::DoubleBuffer d_keys(d_key_buf, d_key_alt_buf); + //! cub::DoubleBuffer d_values(d_value_buf, d_value_alt_buf); + //! + //! // Determine temporary device storage requirements + //! void *d_temp_storage = nullptr; + //! size_t temp_storage_bytes = 0; + //! cub::DeviceRadixSort::SortPairsDescending( + //! d_temp_storage, temp_storage_bytes, d_keys, d_values, num_items); + //! + //! // Allocate temporary storage + //! cudaMalloc(&d_temp_storage, temp_storage_bytes); + //! + //! // Run sorting operation + //! cub::DeviceRadixSort::SortPairsDescending( + //! d_temp_storage, temp_storage_bytes, d_keys, d_values, num_items); + //! + //! // d_keys.Current() <-- [9, 8, 7, 6, 5, 3, 0] + //! // d_values.Current() <-- [6, 0, 2, 1, 3, 4, 5] + //! @endcode + //! + //! @tparam KeyT + //! **[inferred]** KeyT type + //! + //! @tparam ValueT + //! **[inferred]** ValueT type + //! + //! @tparam NumItemsT + //! **[inferred]** Type of num_items + //! + //! @param[in] d_temp_storage + //! Device-accessible allocation of temporary storage. When `nullptr`, the + //! required allocation size is written to `temp_storage_bytes` and no work + //! is done. + //! + //! @param[in,out] temp_storage_bytes + //! Reference to size in bytes of `d_temp_storage` allocation + //! + //! @param[in,out] d_keys + //! Reference to the double-buffer of keys whose "current" device-accessible + //! buffer contains the unsorted input keys and, upon return, is updated to + //! point to the sorted output keys + //! + //! @param[in,out] d_values + //! Double-buffer of values whose "current" device-accessible buffer + //! contains the unsorted input values and, upon return, is updated to point + //! to the sorted output values + //! + //! @param[in] num_items + //! Number of items to sort + //! + //! @param[in] begin_bit + //! **[optional]** The least-significant bit index (inclusive) needed for + //! key comparison + //! + //! @param[in] end_bit + //! **[optional]** The most-significant bit index (exclusive) needed for key + //! comparison (e.g., `sizeof(unsigned int) * 8`) + //! + //! @param[in] stream + //! **[optional]** CUDA stream to launch kernels within. + //! Default is stream0. + template + CUB_RUNTIME_FUNCTION static cudaError_t SortPairsDescending( + void* d_temp_storage, + size_t& temp_storage_bytes, + DoubleBuffer& d_keys, + DoubleBuffer& d_values, + NumItemsT num_items, + int begin_bit = 0, + int end_bit = sizeof(KeyT) * 8, + cudaStream_t stream = 0) + { + CUB_DETAIL_NVTX_RANGE_SCOPE_IF(d_temp_storage, GetName()); + + // Unsigned integer type for global offsets. + using OffsetT = detail::choose_offset_t; + + constexpr bool is_overwrite_okay = true; + + return DispatchRadixSort::Dispatch( + d_temp_storage, temp_storage_bytes, d_keys, d_values, num_items, begin_bit, end_bit, is_overwrite_okay, stream); + } + +#ifndef _CCCL_DOXYGEN_INVOKED // Do not document + template + CUB_DETAIL_RUNTIME_DEBUG_SYNC_IS_NOT_SUPPORTED CUB_RUNTIME_FUNCTION static cudaError_t SortPairsDescending( + void* d_temp_storage, + size_t& temp_storage_bytes, + DoubleBuffer& d_keys, + DoubleBuffer& d_values, + NumItemsT num_items, + int begin_bit, + int end_bit, + cudaStream_t stream, + bool debug_synchronous) + { + CUB_DETAIL_RUNTIME_DEBUG_SYNC_USAGE_LOG + + return SortPairsDescending( + d_temp_storage, temp_storage_bytes, d_keys, d_values, num_items, begin_bit, end_bit, stream); + } +#endif + + //! @rst + //! Sorts key-value pairs into descending order using :math:`\approx N` auxiliary storage. + //! + //! * The sorting operation is given a pair of key buffers and a corresponding + //! pair of associated value buffers. Each pair is managed by a DoubleBuffer + //! structure that indicates which of the two buffers is "current" (and thus + //! contains the input data to be sorted). + //! * The contents of both buffers within each pair may be altered by the + //! sorting operation. + //! * In-place operations are not supported. There must be no overlap between + //! any of the provided ranges: + //! + //! - ``[d_keys.Current(), d_keys.Current() + num_items)`` + //! - ``[d_keys.Alternate(), d_keys.Alternate() + num_items)`` + //! - ``[d_values.Current(), d_values.Current() + num_items)`` + //! - ``[d_values.Alternate(), d_values.Alternate() + num_items)`` + //! + //! - Upon completion, the sorting operation will update the "current" + //! indicator within each DoubleBuffer wrapper to reference which of the two + //! buffers now contains the sorted output sequence (a function of the + //! number of key bits specified and the targeted device architecture). + //! - @devicestorageP + //! - @devicestorage + //! + //! Snippet + //! ========================================================================== + //! + //! Let's consider a user-defined ``custom_t`` type below. To sort an array of + //! ``custom_t`` objects, we have to tell CUB about relevant members of the + //! ``custom_t`` type. We do this by providing a decomposer that returns a + //! tuple of references to relevant members of the key. + //! + //! .. literalinclude:: ../../../cub/test/catch2_test_device_radix_sort_custom.cu + //! :language: c++ + //! :dedent: + //! :start-after: example-begin custom-type + //! :end-before: example-end custom-type + //! + //! The following snippet shows how to sort an array of ``custom_t`` objects + //! using ``cub::DeviceRadixSort::SortPairsDescending``: + //! + //! .. literalinclude:: ../../../cub/test/catch2_test_device_radix_sort_custom.cu + //! :language: c++ + //! :dedent: + //! :start-after: example-begin pairs-descending-db + //! :end-before: example-end pairs-descending-db + //! + //! @endrst + //! + //! @tparam KeyT + //! **[inferred]** KeyT type + //! + //! @tparam ValueT + //! **[inferred]** ValueT type + //! + //! @tparam NumItemsT + //! **[inferred]** Type of num_items + //! + //! @tparam DecomposerT + //! **[inferred]** Type of a callable object responsible for decomposing a + //! ``KeyT`` into a tuple of references to its constituent arithmetic types: + //! ``::cuda::std::tuple operator()(KeyT &key)``. + //! The leftmost element of the tuple is considered the most significant. + //! The call operator must not modify members of the key. + //! + //! @param[in] d_temp_storage + //! Device-accessible allocation of temporary storage. When `nullptr`, the + //! required allocation size is written to `temp_storage_bytes` and no work + //! is done. + //! + //! @param[in,out] temp_storage_bytes + //! Reference to size in bytes of `d_temp_storage` allocation + //! + //! @param[in,out] d_keys + //! Reference to the double-buffer of keys whose "current" device-accessible + //! buffer contains the unsorted input keys and, upon return, is updated to + //! point to the sorted output keys + //! + //! @param[in,out] d_values + //! Double-buffer of values whose "current" device-accessible buffer + //! contains the unsorted input values and, upon return, is updated to point + //! to the sorted output values + //! + //! @param[in] num_items + //! Number of items to sort + //! + //! @param decomposer + //! Callable object responsible for decomposing a ``KeyT`` into a tuple of + //! references to its constituent arithmetic types. The leftmost element of + //! the tuple is considered the most significant. The call operator must not + //! modify members of the key. + //! + //! @param[in] stream + //! **[optional]** CUDA stream to launch kernels within. + //! Default is stream0. + template + CUB_RUNTIME_FUNCTION static // + typename ::cuda::std::enable_if< // + !::cuda::std::is_convertible::value, // + cudaError_t>::type + SortPairsDescending( + void* d_temp_storage, + size_t& temp_storage_bytes, + DoubleBuffer& d_keys, + DoubleBuffer& d_values, + NumItemsT num_items, + DecomposerT decomposer, + cudaStream_t stream = 0) + { + CUB_DETAIL_NVTX_RANGE_SCOPE_IF(d_temp_storage, GetName()); + + // unsigned integer type for global offsets + using offset_t = detail::choose_offset_t; + using decomposer_check_t = detail::radix::decomposer_check_t; + + static_assert(decomposer_check_t::value, + "DecomposerT must be a callable object returning a tuple of references to " + "arithmetic types"); + + constexpr bool is_overwrite_okay = true; + constexpr bool is_descending = true; + + return DeviceRadixSort::custom_radix_sort( + decomposer_check_t{}, + d_temp_storage, + temp_storage_bytes, + is_overwrite_okay, + d_keys, + d_values, + static_cast(num_items), + decomposer, + stream); + } + + //! @rst + //! Sorts key-value pairs into descending order using :math:`\approx N` auxiliary storage. + //! + //! * The sorting operation is given a pair of key buffers and a corresponding + //! pair of associated value buffers. Each pair is managed by a DoubleBuffer + //! structure that indicates which of the two buffers is "current" (and thus + //! contains the input data to be sorted). + //! * The contents of both buffers within each pair may be altered by the + //! sorting operation. + //! * In-place operations are not supported. There must be no overlap between + //! any of the provided ranges: + //! + //! - ``[d_keys.Current(), d_keys.Current() + num_items)`` + //! - ``[d_keys.Alternate(), d_keys.Alternate() + num_items)`` + //! - ``[d_values.Current(), d_values.Current() + num_items)`` + //! - ``[d_values.Alternate(), d_values.Alternate() + num_items)`` + //! + //! - Upon completion, the sorting operation will update the "current" + //! indicator within each DoubleBuffer wrapper to reference which of the two + //! buffers now contains the sorted output sequence (a function of the + //! number of key bits specified and the targeted device architecture). + //! - An optional bit subrange ``[begin_bit, end_bit)`` of differentiating key + //! bits can be specified. This can reduce overall sorting overhead and + //! yield a corresponding performance improvement. + //! - @devicestorageP + //! - @devicestorage + //! + //! Snippet + //! ========================================================================== + //! + //! Let's consider a user-defined ``custom_t`` type below. To sort an array of + //! ``custom_t`` objects, we have to tell CUB about relevant members of the + //! ``custom_t`` type. We do this by providing a decomposer that returns a + //! tuple of references to relevant members of the key. + //! + //! .. literalinclude:: ../../../cub/test/catch2_test_device_radix_sort_custom.cu + //! :language: c++ + //! :dedent: + //! :start-after: example-begin custom-type + //! :end-before: example-end custom-type + //! + //! The following snippet shows how to sort an array of ``custom_t`` objects + //! using ``cub::DeviceRadixSort::SortPairsDescending``: + //! + //! .. literalinclude:: ../../../cub/test/catch2_test_device_radix_sort_custom.cu + //! :language: c++ + //! :dedent: + //! :start-after: example-begin pairs-descending-bits-db + //! :end-before: example-end pairs-descending-bits-db + //! + //! @endrst + //! + //! @tparam KeyT + //! **[inferred]** KeyT type + //! + //! @tparam ValueT + //! **[inferred]** ValueT type + //! + //! @tparam NumItemsT + //! **[inferred]** Type of num_items + //! + //! @tparam DecomposerT + //! **[inferred]** Type of a callable object responsible for decomposing a + //! ``KeyT`` into a tuple of references to its constituent arithmetic types: + //! ``::cuda::std::tuple operator()(KeyT &key)``. + //! The leftmost element of the tuple is considered the most significant. + //! The call operator must not modify members of the key. + //! + //! @param[in] d_temp_storage + //! Device-accessible allocation of temporary storage. When `nullptr`, the + //! required allocation size is written to `temp_storage_bytes` and no work + //! is done. + //! + //! @param[in,out] temp_storage_bytes + //! Reference to size in bytes of `d_temp_storage` allocation + //! + //! @param[in,out] d_keys + //! Reference to the double-buffer of keys whose "current" device-accessible + //! buffer contains the unsorted input keys and, upon return, is updated to + //! point to the sorted output keys + //! + //! @param[in,out] d_values + //! Double-buffer of values whose "current" device-accessible buffer + //! contains the unsorted input values and, upon return, is updated to point + //! to the sorted output values + //! + //! @param[in] num_items + //! Number of items to sort + //! + //! @param decomposer + //! Callable object responsible for decomposing a ``KeyT`` into a tuple of + //! references to its constituent arithmetic types. The leftmost element of + //! the tuple is considered the most significant. The call operator must not + //! modify members of the key. + //! + //! @param[in] begin_bit + //! **[optional]** The least-significant bit index (inclusive) needed for + //! key comparison + //! + //! @param[in] end_bit + //! **[optional]** The most-significant bit index (exclusive) needed for key + //! comparison (e.g., `(sizeof(float) + sizeof(long long int)) * 8`) + //! + //! @param[in] stream + //! **[optional]** CUDA stream to launch kernels within. + //! Default is stream0. + template + CUB_RUNTIME_FUNCTION static // + typename ::cuda::std::enable_if< // + !::cuda::std::is_convertible::value, // + cudaError_t>::type + SortPairsDescending( + void* d_temp_storage, + size_t& temp_storage_bytes, + DoubleBuffer& d_keys, + DoubleBuffer& d_values, + NumItemsT num_items, + DecomposerT decomposer, + int begin_bit, + int end_bit, + cudaStream_t stream = 0) + { + CUB_DETAIL_NVTX_RANGE_SCOPE_IF(d_temp_storage, GetName()); + + // unsigned integer type for global offsets + using offset_t = detail::choose_offset_t; + using decomposer_check_t = detail::radix::decomposer_check_t; + + static_assert(decomposer_check_t::value, + "DecomposerT must be a callable object returning a tuple of references to " + "arithmetic types"); + + constexpr bool is_overwrite_okay = true; + constexpr bool is_descending = true; + + return DeviceRadixSort::custom_radix_sort( + decomposer_check_t{}, + d_temp_storage, + temp_storage_bytes, + is_overwrite_okay, + d_keys, + d_values, + static_cast(num_items), + decomposer, + begin_bit, + end_bit, + stream); + } + + //@} end member group + /******************************************************************/ /** + * @name Keys-only + *********************************************************************/ + //@{ + + //! @brief Sorts keys into ascending order. + //! (`~2N` auxiliary storage required) + //! + //! @par + //! - The contents of the input data are not altered by the sorting operation. + //! - Pointers to contiguous memory must be used; iterators are not currently + //! supported. + //! - In-place operations are not supported. There must be no overlap between + //! any of the provided ranges: + //! - `[d_keys_in, d_keys_in + num_items)` + //! - `[d_keys_out, d_keys_out + num_items)` + //! - An optional bit subrange `[begin_bit, end_bit)` of differentiating key + //! bits can be specified. This can reduce overall sorting overhead and + //! yield a corresponding performance improvement. + //! - @devicestorageNP For sorting using only `O(P)` temporary storage, see + //! the sorting interface using DoubleBuffer wrappers below. + //! - @devicestorage + //! + //! @par Performance + //! The following charts illustrate saturated sorting performance across + //! different CUDA architectures for uniform-random `uint32` and `uint64` + //! keys, respectively. + //! + //! @image html lsb_radix_sort_int32_keys.png + //! @image html lsb_radix_sort_int64_keys.png + //! + //! @par Snippet + //! The code snippet below illustrates the sorting of a device vector of + //! `int` keys. + //! @par + //! @code + //! #include + //! // or equivalently + //! + //! // Declare, allocate, and initialize device-accessible pointers + //! // for sorting data + //! int num_items; // e.g., 7 + //! int *d_keys_in; // e.g., [8, 6, 7, 5, 3, 0, 9] + //! int *d_keys_out; // e.g., [ ... ] + //! ... + //! + //! // Determine temporary device storage requirements + //! void *d_temp_storage = nullptr; + //! size_t temp_storage_bytes = 0; + //! cub::DeviceRadixSort::SortKeys( + //! d_temp_storage, temp_storage_bytes, d_keys_in, d_keys_out, num_items); + //! + //! // Allocate temporary storage + //! cudaMalloc(&d_temp_storage, temp_storage_bytes); + //! + //! // Run sorting operation + //! cub::DeviceRadixSort::SortKeys( + //! d_temp_storage, temp_storage_bytes, d_keys_in, d_keys_out, num_items); + //! + //! // d_keys_out <-- [0, 3, 5, 6, 7, 8, 9] + //! @endcode + //! + //! @tparam KeyT + //! **[inferred]** KeyT type + //! + //! @tparam NumItemsT + //! **[inferred]** Type of num_items + //! + //! @tparam NumItemsT + //! **[inferred]** Type of num_items + //! + //! @param[in] d_temp_storage + //! Device-accessible allocation of temporary storage. When `nullptr`, the + //! required allocation size is written to `temp_storage_bytes` and no work + //! is done. + //! + //! @param[in,out] temp_storage_bytes + //! Reference to size in bytes of `d_temp_storage` allocation + //! + //! @param[in] d_keys_in + //! Pointer to the input data of key data to sort + //! + //! @param[out] d_keys_out + //! Pointer to the sorted output sequence of key data + //! + //! @param[in] num_items + //! Number of items to sort + //! + //! @param[in] begin_bit + //! **[optional]** The least-significant bit index (inclusive) needed for + //! key comparison + //! + //! @param[in] end_bit + //! **[optional]** The most-significant bit index (exclusive) needed for key + //! comparison (e.g., `sizeof(unsigned int) * 8`) + //! + //! @param[in] stream + //! **[optional]** CUDA stream to launch kernels within. + //! Default is stream0. + template + CUB_RUNTIME_FUNCTION static cudaError_t SortKeys( + void* d_temp_storage, + size_t& temp_storage_bytes, + const KeyT* d_keys_in, + KeyT* d_keys_out, + NumItemsT num_items, + int begin_bit = 0, + int end_bit = sizeof(KeyT) * 8, + cudaStream_t stream = 0) + { + CUB_DETAIL_NVTX_RANGE_SCOPE_IF(d_temp_storage, GetName()); + + // Unsigned integer type for global offsets. + using OffsetT = detail::choose_offset_t; + + // We cast away const-ness, but will *not* write to these arrays. + // `DispatchRadixSort::Dispatch` will allocate temporary storage and + // create a new double-buffer internally when the `is_overwrite_ok` flag + // is not set. + constexpr bool is_overwrite_okay = false; + DoubleBuffer d_keys(const_cast(d_keys_in), d_keys_out); + // Null value type + DoubleBuffer d_values; + + return DispatchRadixSort::Dispatch( + d_temp_storage, + temp_storage_bytes, + d_keys, + d_values, + static_cast(num_items), + begin_bit, + end_bit, + is_overwrite_okay, + stream); + } + + //! @rst + //! Sorts keys into ascending order using :math:`\approx 2N` auxiliary storage. + //! + //! * The contents of the input data are not altered by the sorting operation. + //! * Pointers to contiguous memory must be used; iterators are not currently + //! supported. + //! * In-place operations are not supported. There must be no overlap between + //! any of the provided ranges: + //! + //! * ``[d_keys_in, d_keys_in + num_items)`` + //! * ``[d_keys_out, d_keys_out + num_items)`` + //! + //! * A bit subrange ``[begin_bit, end_bit)`` is provided to specify + //! differentiating key bits. This can reduce overall sorting overhead and + //! yield a corresponding performance improvement. + //! * @devicestorageNP For sorting using only :math:`O(P)` temporary storage, see + //! the sorting interface using DoubleBuffer wrappers below. + //! * @devicestorage + //! + //! Snippet + //! ========================================================================== + //! + //! Let's consider a user-defined ``custom_t`` type below. To sort an array of + //! ``custom_t`` objects, we have to tell CUB about relevant members of the + //! ``custom_t`` type. We do this by providing a decomposer that returns a + //! tuple of references to relevant members of the key. + //! + //! .. literalinclude:: ../../../cub/test/catch2_test_device_radix_sort_custom.cu + //! :language: c++ + //! :dedent: + //! :start-after: example-begin custom-type + //! :end-before: example-end custom-type + //! + //! The following snippet shows how to sort an array of ``custom_t`` objects + //! using ``cub::DeviceRadixSort::SortKeys``: + //! + //! .. literalinclude:: ../../../cub/test/catch2_test_device_radix_sort_custom.cu + //! :language: c++ + //! :dedent: + //! :start-after: example-begin keys-bits + //! :end-before: example-end keys-bits + //! + //! @endrst + //! + //! @tparam KeyT + //! **[inferred]** KeyT type + //! + //! @tparam NumItemsT + //! **[inferred]** Type of num_items + //! + //! @tparam DecomposerT + //! **[inferred]** Type of a callable object responsible for decomposing a + //! ``KeyT`` into a tuple of references to its constituent arithmetic types: + //! ``::cuda::std::tuple operator()(KeyT &key)``. + //! The leftmost element of the tuple is considered the most significant. + //! The call operator must not modify members of the key. + //! + //! @param[in] d_temp_storage + //! Device-accessible allocation of temporary storage. When `nullptr`, the + //! required allocation size is written to `temp_storage_bytes` and no work + //! is done. + //! + //! @param[in,out] temp_storage_bytes + //! Reference to size in bytes of `d_temp_storage` allocation + //! + //! @param[in] d_keys_in + //! Pointer to the input data of key data to sort + //! + //! @param[out] d_keys_out + //! Pointer to the sorted output sequence of key data + //! + //! @param[in] num_items + //! Number of items to sort + //! + //! @param decomposer + //! Callable object responsible for decomposing a ``KeyT`` into a tuple of + //! references to its constituent arithmetic types. The leftmost element of + //! the tuple is considered the most significant. The call operator must not + //! modify members of the key. + //! + //! @param[in] begin_bit + //! **[optional]** The least-significant bit index (inclusive) needed for + //! key comparison + //! + //! @param[in] end_bit + //! **[optional]** The most-significant bit index (exclusive) needed for key + //! comparison (e.g., `(sizeof(float) + sizeof(long long int)) * 8`) + //! + //! @param[in] stream + //! **[optional]** CUDA stream to launch kernels within. + //! Default is stream0. + template + CUB_RUNTIME_FUNCTION static // + typename ::cuda::std::enable_if< // + !::cuda::std::is_convertible::value, // + cudaError_t>::type + SortKeys(void* d_temp_storage, + size_t& temp_storage_bytes, + const KeyT* d_keys_in, + KeyT* d_keys_out, + NumItemsT num_items, + DecomposerT decomposer, + int begin_bit, + int end_bit, + cudaStream_t stream = 0) + { + CUB_DETAIL_NVTX_RANGE_SCOPE_IF(d_temp_storage, GetName()); + + // unsigned integer type for global offsets + using offset_t = detail::choose_offset_t; + using decomposer_check_t = detail::radix::decomposer_check_t; + + static_assert(decomposer_check_t::value, + "DecomposerT must be a callable object returning a tuple of references to " + "arithmetic types"); + + // We cast away const-ness, but will *not* write to these arrays. + // `DispatchRadixSort::Dispatch` will allocate temporary storage and + // create a new double-buffer internally when the `is_overwrite_ok` flag + // is not set. + constexpr bool is_overwrite_okay = false; + constexpr bool is_descending = false; + DoubleBuffer d_keys(const_cast(d_keys_in), d_keys_out); + DoubleBuffer d_values; + + return DeviceRadixSort::custom_radix_sort( + decomposer_check_t{}, + d_temp_storage, + temp_storage_bytes, + is_overwrite_okay, + d_keys, + d_values, + static_cast(num_items), + decomposer, + begin_bit, + end_bit, + stream); + } + + //! @rst + //! Sorts keys into ascending order using :math:`\approx 2N` auxiliary storage. + //! + //! * The contents of the input data are not altered by the sorting operation. + //! * Pointers to contiguous memory must be used; iterators are not currently + //! supported. + //! * In-place operations are not supported. There must be no overlap between + //! any of the provided ranges: + //! + //! * ``[d_keys_in, d_keys_in + num_items)`` + //! * ``[d_keys_out, d_keys_out + num_items)`` + //! + //! * An optional bit subrange ``[begin_bit, end_bit)`` of differentiating key + //! bits can be specified. This can reduce overall sorting overhead and + //! yield a corresponding performance improvement. + //! * @devicestorageNP For sorting using only :math:`O(P)` temporary storage, see + //! the sorting interface using DoubleBuffer wrappers below. + //! * @devicestorage + //! + //! Snippet + //! ========================================================================== + //! + //! Let's consider a user-defined ``custom_t`` type below. To sort an array of + //! ``custom_t`` objects, we have to tell CUB about relevant members of the + //! ``custom_t`` type. We do this by providing a decomposer that returns a + //! tuple of references to relevant members of the key. + //! + //! .. literalinclude:: ../../../cub/test/catch2_test_device_radix_sort_custom.cu + //! :language: c++ + //! :dedent: + //! :start-after: example-begin custom-type + //! :end-before: example-end custom-type + //! + //! The following snippet shows how to sort an array of ``custom_t`` objects + //! using ``cub::DeviceRadixSort::SortKeys``: + //! + //! .. literalinclude:: ../../../cub/test/catch2_test_device_radix_sort_custom.cu + //! :language: c++ + //! :dedent: + //! :start-after: example-begin keys + //! :end-before: example-end keys + //! + //! @endrst + //! + //! @tparam KeyT + //! **[inferred]** KeyT type + //! + //! @tparam NumItemsT + //! **[inferred]** Type of num_items + //! + //! @tparam DecomposerT + //! **[inferred]** Type of a callable object responsible for decomposing a + //! ``KeyT`` into a tuple of references to its constituent arithmetic types: + //! ``::cuda::std::tuple operator()(KeyT &key)``. + //! The leftmost element of the tuple is considered the most significant. + //! The call operator must not modify members of the key. + //! + //! @param[in] d_temp_storage + //! Device-accessible allocation of temporary storage. When `nullptr`, the + //! required allocation size is written to `temp_storage_bytes` and no work + //! is done. + //! + //! @param[in,out] temp_storage_bytes + //! Reference to size in bytes of `d_temp_storage` allocation + //! + //! @param[in] d_keys_in + //! Pointer to the input data of key data to sort + //! + //! @param[out] d_keys_out + //! Pointer to the sorted output sequence of key data + //! + //! @param[in] num_items + //! Number of items to sort + //! + //! @param decomposer + //! Callable object responsible for decomposing a ``KeyT`` into a tuple of + //! references to its constituent arithmetic types. The leftmost element of + //! the tuple is considered the most significant. The call operator must not + //! modify members of the key. + //! + //! @param[in] stream + //! **[optional]** CUDA stream to launch kernels within. + //! Default is stream0. + template + CUB_RUNTIME_FUNCTION static // + typename ::cuda::std::enable_if< // + !::cuda::std::is_convertible::value, // + cudaError_t>::type + SortKeys(void* d_temp_storage, + size_t& temp_storage_bytes, + const KeyT* d_keys_in, + KeyT* d_keys_out, + NumItemsT num_items, + DecomposerT decomposer, + cudaStream_t stream = 0) + { + CUB_DETAIL_NVTX_RANGE_SCOPE_IF(d_temp_storage, GetName()); + + // unsigned integer type for global offsets + using offset_t = detail::choose_offset_t; + using decomposer_check_t = detail::radix::decomposer_check_t; + + static_assert(decomposer_check_t::value, + "DecomposerT must be a callable object returning a tuple of references to " + "arithmetic types"); + + // We cast away const-ness, but will *not* write to these arrays. + // `DispatchRadixSort::Dispatch` will allocate temporary storage and + // create a new double-buffer internally when the `is_overwrite_ok` flag + // is not set. + constexpr bool is_overwrite_okay = false; + constexpr bool is_descending = false; + DoubleBuffer d_keys(const_cast(d_keys_in), d_keys_out); + DoubleBuffer d_values; + + return DeviceRadixSort::custom_radix_sort( + decomposer_check_t{}, + d_temp_storage, + temp_storage_bytes, + is_overwrite_okay, + d_keys, + d_values, + static_cast(num_items), + decomposer, + stream); + } + +#ifndef _CCCL_DOXYGEN_INVOKED // Do not document + template + CUB_DETAIL_RUNTIME_DEBUG_SYNC_IS_NOT_SUPPORTED CUB_RUNTIME_FUNCTION static cudaError_t SortKeys( + void* d_temp_storage, + size_t& temp_storage_bytes, + const KeyT* d_keys_in, + KeyT* d_keys_out, + NumItemsT num_items, + int begin_bit, + int end_bit, + cudaStream_t stream, + bool debug_synchronous) + { + CUB_DETAIL_RUNTIME_DEBUG_SYNC_USAGE_LOG + + return SortKeys( + d_temp_storage, temp_storage_bytes, d_keys_in, d_keys_out, num_items, begin_bit, end_bit, stream); + } +#endif + + //! @brief Sorts keys into ascending order. (`~N` auxiliary storage required). + //! + //! @par + //! - The sorting operation is given a pair of key buffers managed by a + //! DoubleBuffer structure that indicates which of the two buffers is + //! "current" (and thus contains the input data to be sorted). + //! - The contents of both buffers may be altered by the sorting operation. + //! - In-place operations are not supported. There must be no overlap between + //! any of the provided ranges: + //! - `[d_keys.Current(), d_keys.Current() + num_items)` + //! - `[d_keys.Alternate(), d_keys.Alternate() + num_items)` + //! - Upon completion, the sorting operation will update the "current" + //! indicator within the DoubleBuffer wrapper to reference which of the two + //! buffers now contains the sorted output sequence (a function of the + //! number of key bits specified and the targeted device architecture). + //! - An optional bit subrange `[begin_bit, end_bit)` of differentiating key + //! bits can be specified. This can reduce overall sorting overhead and + //! yield a corresponding performance improvement. + //! - @devicestorageP + //! - @devicestorage + //! + //! @par Performance + //! The following charts illustrate saturated sorting performance across + //! different CUDA architectures for uniform-random `uint32` and `uint64` + //! keys, respectively. + //! + //! @image html lsb_radix_sort_int32_keys.png + //! @image html lsb_radix_sort_int64_keys.png + //! + //! @par Snippet + //! The code snippet below illustrates the sorting of a device vector of + //! `int` keys. + //! @par + //! @code + //! #include + //! // or equivalently + //! + //! // Declare, allocate, and initialize device-accessible pointers + //! // for sorting data + //! int num_items; // e.g., 7 + //! int *d_key_buf; // e.g., [8, 6, 7, 5, 3, 0, 9] + //! int *d_key_alt_buf; // e.g., [ ... ] + //! ... + //! + //! // Create a DoubleBuffer to wrap the pair of device pointers + //! cub::DoubleBuffer d_keys(d_key_buf, d_key_alt_buf); + //! + //! // Determine temporary device storage requirements + //! void *d_temp_storage = nullptr; + //! size_t temp_storage_bytes = 0; + //! cub::DeviceRadixSort::SortKeys( + //! d_temp_storage, temp_storage_bytes, d_keys, num_items); + //! + //! // Allocate temporary storage + //! cudaMalloc(&d_temp_storage, temp_storage_bytes); + //! + //! // Run sorting operation + //! cub::DeviceRadixSort::SortKeys( + //! d_temp_storage, temp_storage_bytes, d_keys, num_items); + //! + //! // d_keys.Current() <-- [0, 3, 5, 6, 7, 8, 9] + //! @endcode + //! + //! @tparam KeyT + //! **[inferred]** KeyT type + //! + //! @tparam NumItemsT + //! **[inferred]** Type of num_items + //! + //! @param[in] d_temp_storage + //! Device-accessible allocation of temporary storage. When `nullptr`, the + //! required allocation size is written to `temp_storage_bytes` and no work + //! is done. + //! + //! @param[in,out] temp_storage_bytes + //! Reference to size in bytes of `d_temp_storage` allocation + //! + //! @param[in,out] d_keys + //! Reference to the double-buffer of keys whose "current" device-accessible + //! buffer contains the unsorted input keys and, upon return, is updated to + //! point to the sorted output keys + //! + //! @param[in] num_items + //! Number of items to sort + //! + //! @param[in] begin_bit + //! **[optional]** The least-significant bit index (inclusive) needed for + //! key comparison + //! + //! @param[in] end_bit + //! **[optional]** The most-significant bit index (exclusive) needed for key + //! comparison (e.g., `sizeof(unsigned int) * 8`) + //! + //! @param[in] stream + //! **[optional]** CUDA stream to launch kernels within. + //! Default is stream0. + template + CUB_RUNTIME_FUNCTION static cudaError_t SortKeys( + void* d_temp_storage, + size_t& temp_storage_bytes, + DoubleBuffer& d_keys, + NumItemsT num_items, + int begin_bit = 0, + int end_bit = sizeof(KeyT) * 8, + cudaStream_t stream = 0) + { + CUB_DETAIL_NVTX_RANGE_SCOPE_IF(d_temp_storage, GetName()); + + // Unsigned integer type for global offsets. + using OffsetT = detail::choose_offset_t; + + constexpr bool is_overwrite_okay = true; + + // Null value type + DoubleBuffer d_values; + + return DispatchRadixSort::Dispatch( + d_temp_storage, temp_storage_bytes, d_keys, d_values, num_items, begin_bit, end_bit, is_overwrite_okay, stream); + } + +#ifndef _CCCL_DOXYGEN_INVOKED // Do not document + template + CUB_DETAIL_RUNTIME_DEBUG_SYNC_IS_NOT_SUPPORTED CUB_RUNTIME_FUNCTION static cudaError_t SortKeys( + void* d_temp_storage, + size_t& temp_storage_bytes, + DoubleBuffer& d_keys, + NumItemsT num_items, + int begin_bit, + int end_bit, + cudaStream_t stream, + bool debug_synchronous) + { + CUB_DETAIL_RUNTIME_DEBUG_SYNC_USAGE_LOG + + return SortKeys(d_temp_storage, temp_storage_bytes, d_keys, num_items, begin_bit, end_bit, stream); + } +#endif + + //! @rst + //! Sorts keys into ascending order using :math:`\approx N` auxiliary storage. + //! + //! * The sorting operation is given a pair of key buffers managed by a + //! DoubleBuffer structure that indicates which of the two buffers is + //! "current" (and thus contains the input data to be sorted). + //! * The contents of both buffers may be altered by the sorting operation. + //! * In-place operations are not supported. There must be no overlap between + //! any of the provided ranges: + //! + //! * ``[d_keys.Current(), d_keys.Current() + num_items)`` + //! * ``[d_keys.Alternate(), d_keys.Alternate() + num_items)`` + //! + //! * Upon completion, the sorting operation will update the "current" + //! indicator within the DoubleBuffer wrapper to reference which of the two + //! buffers now contains the sorted output sequence (a function of the + //! number of key bits specified and the targeted device architecture). + //! * @devicestorageP + //! * @devicestorage + //! + //! Snippet + //! ========================================================================== + //! + //! Let's consider a user-defined ``custom_t`` type below. To sort an array of + //! ``custom_t`` objects, we have to tell CUB about relevant members of the + //! ``custom_t`` type. We do this by providing a decomposer that returns a + //! tuple of references to relevant members of the key. + //! + //! .. literalinclude:: ../../../cub/test/catch2_test_device_radix_sort_custom.cu + //! :language: c++ + //! :dedent: + //! :start-after: example-begin custom-type + //! :end-before: example-end custom-type + //! + //! The following snippet shows how to sort an array of ``custom_t`` objects + //! using ``cub::DeviceRadixSort::SortKeys``: + //! + //! .. literalinclude:: ../../../cub/test/catch2_test_device_radix_sort_custom.cu + //! :language: c++ + //! :dedent: + //! :start-after: example-begin keys-db + //! :end-before: example-end keys-db + //! + //! @endrst + //! + //! @tparam KeyT + //! **[inferred]** KeyT type + //! + //! @tparam NumItemsT + //! **[inferred]** Type of num_items + //! + //! @tparam DecomposerT + //! **[inferred]** Type of a callable object responsible for decomposing a + //! ``KeyT`` into a tuple of references to its constituent arithmetic types: + //! ``::cuda::std::tuple operator()(KeyT &key)``. + //! The leftmost element of the tuple is considered the most significant. + //! The call operator must not modify members of the key. + //! + //! @param[in] d_temp_storage + //! Device-accessible allocation of temporary storage. When `nullptr`, the + //! required allocation size is written to `temp_storage_bytes` and no work + //! is done. + //! + //! @param[in,out] temp_storage_bytes + //! Reference to size in bytes of `d_temp_storage` allocation + //! + //! @param[in,out] d_keys + //! Reference to the double-buffer of keys whose "current" device-accessible + //! buffer contains the unsorted input keys and, upon return, is updated to + //! point to the sorted output keys + //! + //! @param[in] num_items + //! Number of items to sort + //! + //! @param decomposer + //! Callable object responsible for decomposing a ``KeyT`` into a tuple of + //! references to its constituent arithmetic types. The leftmost element of + //! the tuple is considered the most significant. The call operator must not + //! modify members of the key. + //! + //! @param[in] stream + //! **[optional]** CUDA stream to launch kernels within. + //! Default is stream0. + template + CUB_RUNTIME_FUNCTION static // + typename ::cuda::std::enable_if< // + !::cuda::std::is_convertible::value, // + cudaError_t>::type + SortKeys(void* d_temp_storage, + size_t& temp_storage_bytes, + DoubleBuffer& d_keys, + NumItemsT num_items, + DecomposerT decomposer, + cudaStream_t stream = 0) + { + CUB_DETAIL_NVTX_RANGE_SCOPE_IF(d_temp_storage, GetName()); + + // unsigned integer type for global offsets + using offset_t = detail::choose_offset_t; + using decomposer_check_t = detail::radix::decomposer_check_t; + + static_assert(decomposer_check_t::value, + "DecomposerT must be a callable object returning a tuple of references to " + "arithmetic types"); + + constexpr bool is_overwrite_okay = true; + constexpr bool is_descending = false; + DoubleBuffer d_values; + + return DeviceRadixSort::custom_radix_sort( + decomposer_check_t{}, + d_temp_storage, + temp_storage_bytes, + is_overwrite_okay, + d_keys, + d_values, + static_cast(num_items), + decomposer, + stream); + } + + //! @rst + //! Sorts keys into ascending order using :math:`\approx N` auxiliary storage. + //! + //! * The sorting operation is given a pair of key buffers managed by a + //! DoubleBuffer structure that indicates which of the two buffers is + //! "current" (and thus contains the input data to be sorted). + //! * The contents of both buffers may be altered by the sorting operation. + //! * In-place operations are not supported. There must be no overlap between + //! any of the provided ranges: + //! + //! * ``[d_keys.Current(), d_keys.Current() + num_items)`` + //! * ``[d_keys.Alternate(), d_keys.Alternate() + num_items)`` + //! + //! * A bit subrange ``[begin_bit, end_bit)`` is provided to specify + //! differentiating key bits. This can reduce overall sorting overhead and + //! yield a corresponding performance improvement. + //! * Upon completion, the sorting operation will update the "current" + //! indicator within the DoubleBuffer wrapper to reference which of the two + //! buffers now contains the sorted output sequence (a function of the + //! number of key bits specified and the targeted device architecture). + //! * @devicestorageP + //! * @devicestorage + //! + //! Snippet + //! ========================================================================== + //! + //! Let's consider a user-defined ``custom_t`` type below. To sort an array of + //! ``custom_t`` objects, we have to tell CUB about relevant members of the + //! ``custom_t`` type. We do this by providing a decomposer that returns a + //! tuple of references to relevant members of the key. + //! + //! .. literalinclude:: ../../../cub/test/catch2_test_device_radix_sort_custom.cu + //! :language: c++ + //! :dedent: + //! :start-after: example-begin custom-type + //! :end-before: example-end custom-type + //! + //! The following snippet shows how to sort an array of ``custom_t`` objects + //! using ``cub::DeviceRadixSort::SortKeys``: + //! + //! .. literalinclude:: ../../../cub/test/catch2_test_device_radix_sort_custom.cu + //! :language: c++ + //! :dedent: + //! :start-after: example-begin keys-bits-db + //! :end-before: example-end keys-bits-db + //! + //! @endrst + //! + //! @tparam KeyT + //! **[inferred]** KeyT type + //! + //! @tparam NumItemsT + //! **[inferred]** Type of num_items + //! + //! @tparam DecomposerT + //! **[inferred]** Type of a callable object responsible for decomposing a + //! ``KeyT`` into a tuple of references to its constituent arithmetic types: + //! ``::cuda::std::tuple operator()(KeyT &key)``. + //! The leftmost element of the tuple is considered the most significant. + //! The call operator must not modify members of the key. + //! + //! @param[in] d_temp_storage + //! Device-accessible allocation of temporary storage. When `nullptr`, the + //! required allocation size is written to `temp_storage_bytes` and no work + //! is done. + //! + //! @param[in,out] temp_storage_bytes + //! Reference to size in bytes of `d_temp_storage` allocation + //! + //! @param[in,out] d_keys + //! Reference to the double-buffer of keys whose "current" device-accessible + //! buffer contains the unsorted input keys and, upon return, is updated to + //! point to the sorted output keys + //! + //! @param[in] num_items + //! Number of items to sort + //! + //! @param decomposer + //! Callable object responsible for decomposing a ``KeyT`` into a tuple of + //! references to its constituent arithmetic types. The leftmost element of + //! the tuple is considered the most significant. The call operator must not + //! modify members of the key. + //! + //! @param[in] begin_bit + //! **[optional]** The least-significant bit index (inclusive) needed for + //! key comparison + //! + //! @param[in] end_bit + //! **[optional]** The most-significant bit index (exclusive) needed for key + //! comparison (e.g., `(sizeof(float) + sizeof(long long int)) * 8`) + //! + //! @param[in] stream + //! **[optional]** CUDA stream to launch kernels within. + //! Default is stream0. + template + CUB_RUNTIME_FUNCTION static // + typename ::cuda::std::enable_if< // + !::cuda::std::is_convertible::value, // + cudaError_t>::type + SortKeys(void* d_temp_storage, + size_t& temp_storage_bytes, + DoubleBuffer& d_keys, + NumItemsT num_items, + DecomposerT decomposer, + int begin_bit, + int end_bit, + cudaStream_t stream = 0) + { + CUB_DETAIL_NVTX_RANGE_SCOPE_IF(d_temp_storage, GetName()); + + // unsigned integer type for global offsets + using offset_t = detail::choose_offset_t; + using decomposer_check_t = detail::radix::decomposer_check_t; + + static_assert(decomposer_check_t::value, + "DecomposerT must be a callable object returning a tuple of references to " + "arithmetic types"); + + constexpr bool is_overwrite_okay = true; + constexpr bool is_descending = false; + DoubleBuffer d_values; + + return DeviceRadixSort::custom_radix_sort( + decomposer_check_t{}, + d_temp_storage, + temp_storage_bytes, + is_overwrite_okay, + d_keys, + d_values, + static_cast(num_items), + decomposer, + begin_bit, + end_bit, + stream); + } + + //! @brief Sorts keys into descending order. + //! (`~2N` auxiliary storage required). + //! + //! @par + //! - The contents of the input data are not altered by the sorting operation. + //! - Pointers to contiguous memory must be used; iterators are not currently + //! supported. + //! - In-place operations are not supported. There must be no overlap between + //! any of the provided ranges: + //! - `[d_keys_in, d_keys_in + num_items)` + //! - `[d_keys_out, d_keys_out + num_items)` + //! - An optional bit subrange `[begin_bit, end_bit)` of differentiating key + //! bits can be specified. This can reduce overall sorting overhead and + //! yield a corresponding performance improvement. + //! - @devicestorageNP For sorting using only `O(P)` temporary storage, see + //! the sorting interface using DoubleBuffer wrappers below. + //! - @devicestorage + //! + //! @par Performance + //! Performance is similar to DeviceRadixSort::SortKeys. + //! + //! @par Snippet + //! The code snippet below illustrates the sorting of a device vector of + //! `int` keys. + //! @par + //! @code + //! #include + //! // or equivalently + //! + //! // Declare, allocate, and initialize device-accessible pointers + //! // for sorting data + //! int num_items; // e.g., 7 + //! int *d_keys_in; // e.g., [8, 6, 7, 5, 3, 0, 9] + //! int *d_keys_out; // e.g., [ ... ] + //! ... + //! + //! // Create a DoubleBuffer to wrap the pair of device pointers + //! cub::DoubleBuffer d_keys(d_key_buf, d_key_alt_buf); + //! + //! // Determine temporary device storage requirements + //! void *d_temp_storage = nullptr; + //! size_t temp_storage_bytes = 0; + //! cub::DeviceRadixSort::SortKeysDescending( + //! d_temp_storage, temp_storage_bytes, d_keys_in, d_keys_out, num_items); + //! + //! // Allocate temporary storage + //! cudaMalloc(&d_temp_storage, temp_storage_bytes); + //! + //! // Run sorting operation + //! cub::DeviceRadixSort::SortKeysDescending( + //! d_temp_storage, temp_storage_bytes, d_keys_in, d_keys_out, num_items); + //! + //! // d_keys_out <-- [9, 8, 7, 6, 5, 3, 0]s + //! + //! @endcode + //! + //! @tparam KeyT + //! **[inferred]** KeyT type + //! + //! @tparam NumItemsT + //! **[inferred]** Type of num_items + //! + //! @param[in] d_temp_storage + //! Device-accessible allocation of temporary storage. When `nullptr`, the + //! required allocation size is written to `temp_storage_bytes` and no work + //! is done. + //! + //! @param[in,out] temp_storage_bytes + //! Reference to size in bytes of `d_temp_storage` allocation + //! + //! @param[in] d_keys_in + //! Pointer to the input data of key data to sort + //! + //! @param[out] d_keys_out + //! Pointer to the sorted output sequence of key data + //! + //! @param[in] num_items + //! Number of items to sort + //! + //! @param[in] begin_bit + //! **[optional]** The least-significant bit index (inclusive) needed for + //! key comparison + //! + //! @param[in] end_bit + //! **[optional]** The most-significant bit index (exclusive) needed for key + //! comparison (e.g., `sizeof(unsigned int) * 8`) + //! + //! @param[in] stream + //! **[optional]** CUDA stream to launch kernels within. + //! Default is stream0. + template + CUB_RUNTIME_FUNCTION static cudaError_t SortKeysDescending( + void* d_temp_storage, + size_t& temp_storage_bytes, + const KeyT* d_keys_in, + KeyT* d_keys_out, + NumItemsT num_items, + int begin_bit = 0, + int end_bit = sizeof(KeyT) * 8, + cudaStream_t stream = 0) + { + CUB_DETAIL_NVTX_RANGE_SCOPE_IF(d_temp_storage, GetName()); + + // Unsigned integer type for global offsets. + using OffsetT = detail::choose_offset_t; + + // We cast away const-ness, but will *not* write to these arrays. + // `DispatchRadixSort::Dispatch` will allocate temporary storage and + // create a new double-buffer internally when the `is_overwrite_ok` flag + // is not set. + constexpr bool is_overwrite_okay = false; + DoubleBuffer d_keys(const_cast(d_keys_in), d_keys_out); + DoubleBuffer d_values; + + return DispatchRadixSort::Dispatch( + d_temp_storage, temp_storage_bytes, d_keys, d_values, num_items, begin_bit, end_bit, is_overwrite_okay, stream); + } + +#ifndef _CCCL_DOXYGEN_INVOKED // Do not document + template + CUB_DETAIL_RUNTIME_DEBUG_SYNC_IS_NOT_SUPPORTED CUB_RUNTIME_FUNCTION static cudaError_t SortKeysDescending( + void* d_temp_storage, + size_t& temp_storage_bytes, + const KeyT* d_keys_in, + KeyT* d_keys_out, + NumItemsT num_items, + int begin_bit, + int end_bit, + cudaStream_t stream, + bool debug_synchronous) + { + CUB_DETAIL_RUNTIME_DEBUG_SYNC_USAGE_LOG + + return SortKeysDescending( + d_temp_storage, temp_storage_bytes, d_keys_in, d_keys_out, num_items, begin_bit, end_bit, stream); + } +#endif + + //! @rst + //! Sorts keys into descending order using :math:`\approx 2N` auxiliary storage. + //! + //! * The contents of the input data are not altered by the sorting operation. + //! * Pointers to contiguous memory must be used; iterators are not currently + //! supported. + //! * In-place operations are not supported. There must be no overlap between + //! any of the provided ranges: + //! + //! * ``[d_keys_in, d_keys_in + num_items)`` + //! * ``[d_keys_out, d_keys_out + num_items)`` + //! + //! * An optional bit subrange ``[begin_bit, end_bit)`` of differentiating key + //! bits can be specified. This can reduce overall sorting overhead and + //! yield a corresponding performance improvement. + //! * @devicestorageNP For sorting using only :math:`O(P)` temporary storage, see + //! the sorting interface using DoubleBuffer wrappers below. + //! * @devicestorage + //! + //! Snippet + //! ========================================================================== + //! + //! Let's consider a user-defined ``custom_t`` type below. To sort an array of + //! ``custom_t`` objects, we have to tell CUB about relevant members of the + //! ``custom_t`` type. We do this by providing a decomposer that returns a + //! tuple of references to relevant members of the key. + //! + //! .. literalinclude:: ../../../cub/test/catch2_test_device_radix_sort_custom.cu + //! :language: c++ + //! :dedent: + //! :start-after: example-begin custom-type + //! :end-before: example-end custom-type + //! + //! The following snippet shows how to sort an array of ``custom_t`` objects + //! using ``cub::DeviceRadixSort::SortKeysDescending``: + //! + //! .. literalinclude:: ../../../cub/test/catch2_test_device_radix_sort_custom.cu + //! :language: c++ + //! :dedent: + //! :start-after: example-begin keys-descending-bits + //! :end-before: example-end keys-descending-bits + //! + //! @endrst + //! + //! @tparam KeyT + //! **[inferred]** KeyT type + //! + //! @tparam NumItemsT + //! **[inferred]** Type of num_items + //! + //! @tparam DecomposerT + //! **[inferred]** Type of a callable object responsible for decomposing a + //! ``KeyT`` into a tuple of references to its constituent arithmetic types: + //! ``::cuda::std::tuple operator()(KeyT &key)``. + //! The leftmost element of the tuple is considered the most significant. + //! The call operator must not modify members of the key. + //! + //! @param[in] d_temp_storage + //! Device-accessible allocation of temporary storage. When `nullptr`, the + //! required allocation size is written to `temp_storage_bytes` and no work + //! is done. + //! + //! @param[in,out] temp_storage_bytes + //! Reference to size in bytes of `d_temp_storage` allocation + //! + //! @param[in] d_keys_in + //! Pointer to the input data of key data to sort + //! + //! @param[out] d_keys_out + //! Pointer to the sorted output sequence of key data + //! + //! @param[in] num_items + //! Number of items to sort + //! + //! @param decomposer + //! Callable object responsible for decomposing a ``KeyT`` into a tuple of + //! references to its constituent arithmetic types. The leftmost element of + //! the tuple is considered the most significant. The call operator must not + //! modify members of the key. + //! + //! @param[in] begin_bit + //! **[optional]** The least-significant bit index (inclusive) needed for + //! key comparison + //! + //! @param[in] end_bit + //! **[optional]** The most-significant bit index (exclusive) needed for key + //! comparison (e.g., `(sizeof(float) + sizeof(long long int)) * 8`) + //! + //! @param[in] stream + //! **[optional]** CUDA stream to launch kernels within. + //! Default is stream0. + template + CUB_RUNTIME_FUNCTION static // + typename ::cuda::std::enable_if< // + !::cuda::std::is_convertible::value, // + cudaError_t>::type + SortKeysDescending( + void* d_temp_storage, + size_t& temp_storage_bytes, + const KeyT* d_keys_in, + KeyT* d_keys_out, + NumItemsT num_items, + DecomposerT decomposer, + int begin_bit, + int end_bit, + cudaStream_t stream = 0) + { + CUB_DETAIL_NVTX_RANGE_SCOPE_IF(d_temp_storage, GetName()); + + // unsigned integer type for global offsets + using offset_t = detail::choose_offset_t; + using decomposer_check_t = detail::radix::decomposer_check_t; + + static_assert(decomposer_check_t::value, + "DecomposerT must be a callable object returning a tuple of references to " + "arithmetic types"); + + // We cast away const-ness, but will *not* write to these arrays. + // `DispatchRadixSort::Dispatch` will allocate temporary storage and + // create a new double-buffer internally when the `is_overwrite_ok` flag + // is not set. + constexpr bool is_overwrite_okay = false; + constexpr bool is_descending = true; + DoubleBuffer d_keys(const_cast(d_keys_in), d_keys_out); + DoubleBuffer d_values; + + return DeviceRadixSort::custom_radix_sort( + decomposer_check_t{}, + d_temp_storage, + temp_storage_bytes, + is_overwrite_okay, + d_keys, + d_values, + static_cast(num_items), + decomposer, + begin_bit, + end_bit, + stream); + } + + //! @rst + //! Sorts keys into descending order using :math:`\approx 2N` auxiliary storage. + //! + //! * The contents of the input data are not altered by the sorting operation. + //! * Pointers to contiguous memory must be used; iterators are not currently + //! supported. + //! * In-place operations are not supported. There must be no overlap between + //! any of the provided ranges: + //! + //! * ``[d_keys_in, d_keys_in + num_items)`` + //! * ``[d_keys_out, d_keys_out + num_items)`` + //! + //! * @devicestorageNP For sorting using only :math:`O(P)` temporary storage, see + //! the sorting interface using DoubleBuffer wrappers below. + //! * @devicestorage + //! + //! Snippet + //! ========================================================================== + //! + //! Let's consider a user-defined ``custom_t`` type below. To sort an array of + //! ``custom_t`` objects, we have to tell CUB about relevant members of the + //! ``custom_t`` type. We do this by providing a decomposer that returns a + //! tuple of references to relevant members of the key. + //! + //! .. literalinclude:: ../../../cub/test/catch2_test_device_radix_sort_custom.cu + //! :language: c++ + //! :dedent: + //! :start-after: example-begin custom-type + //! :end-before: example-end custom-type + //! + //! The following snippet shows how to sort an array of ``custom_t`` objects + //! using ``cub::DeviceRadixSort::SortKeysDescending``: + //! + //! .. literalinclude:: ../../../cub/test/catch2_test_device_radix_sort_custom.cu + //! :language: c++ + //! :dedent: + //! :start-after: example-begin keys-descending + //! :end-before: example-end keys-descending + //! + //! @endrst + //! + //! @tparam KeyT + //! **[inferred]** KeyT type + //! + //! @tparam NumItemsT + //! **[inferred]** Type of num_items + //! + //! @tparam DecomposerT + //! **[inferred]** Type of a callable object responsible for decomposing a + //! ``KeyT`` into a tuple of references to its constituent arithmetic types: + //! ``::cuda::std::tuple operator()(KeyT &key)``. + //! The leftmost element of the tuple is considered the most significant. + //! The call operator must not modify members of the key. + //! + //! @param[in] d_temp_storage + //! Device-accessible allocation of temporary storage. When `nullptr`, the + //! required allocation size is written to `temp_storage_bytes` and no work + //! is done. + //! + //! @param[in,out] temp_storage_bytes + //! Reference to size in bytes of `d_temp_storage` allocation + //! + //! @param[in] d_keys_in + //! Pointer to the input data of key data to sort + //! + //! @param[out] d_keys_out + //! Pointer to the sorted output sequence of key data + //! + //! @param[in] num_items + //! Number of items to sort + //! + //! @param decomposer + //! Callable object responsible for decomposing a ``KeyT`` into a tuple of + //! references to its constituent arithmetic types. The leftmost element of + //! the tuple is considered the most significant. The call operator must not + //! modify members of the key. + //! + //! @param[in] stream + //! **[optional]** CUDA stream to launch kernels within. + //! Default is stream0. + template + CUB_RUNTIME_FUNCTION static // + typename ::cuda::std::enable_if< // + !::cuda::std::is_convertible::value, // + cudaError_t>::type + SortKeysDescending( + void* d_temp_storage, + size_t& temp_storage_bytes, + const KeyT* d_keys_in, + KeyT* d_keys_out, + NumItemsT num_items, + DecomposerT decomposer, + cudaStream_t stream = 0) + { + CUB_DETAIL_NVTX_RANGE_SCOPE_IF(d_temp_storage, GetName()); + + // unsigned integer type for global offsets + using offset_t = detail::choose_offset_t; + using decomposer_check_t = detail::radix::decomposer_check_t; + + static_assert(decomposer_check_t::value, + "DecomposerT must be a callable object returning a tuple of references to " + "arithmetic types"); + + // We cast away const-ness, but will *not* write to these arrays. + // `DispatchRadixSort::Dispatch` will allocate temporary storage and + // create a new double-buffer internally when the `is_overwrite_ok` flag + // is not set. + constexpr bool is_overwrite_okay = false; + constexpr bool is_descending = true; + DoubleBuffer d_keys(const_cast(d_keys_in), d_keys_out); + DoubleBuffer d_values; + + return DeviceRadixSort::custom_radix_sort( + decomposer_check_t{}, + d_temp_storage, + temp_storage_bytes, + is_overwrite_okay, + d_keys, + d_values, + static_cast(num_items), + decomposer, + stream); + } + + //! @brief Sorts keys into descending order. + //! (`~N` auxiliary storage required). + //! + //! @par + //! - The sorting operation is given a pair of key buffers managed by a + //! DoubleBuffer structure that indicates which of the two buffers is + //! "current" (and thus contains the input data to be sorted). + //! - The contents of both buffers may be altered by the sorting operation. + //! - In-place operations are not supported. There must be no overlap between + //! any of the provided ranges: + //! - `[d_keys.Current(), d_keys.Current() + num_items)` + //! - `[d_keys.Alternate(), d_keys.Alternate() + num_items)` + //! - Upon completion, the sorting operation will update the "current" + //! indicator within the DoubleBuffer wrapper to reference which of the two + //! buffers now contains the sorted output sequence (a function of the + //! number of key bits specified and the targeted device architecture). + //! - An optional bit subrange `[begin_bit, end_bit)` of differentiating key + //! bits can be specified. This can reduce overall sorting overhead and + //! yield a corresponding performance improvement. + //! - @devicestorageP + //! - @devicestorage + //! + //! @par Performance + //! Performance is similar to DeviceRadixSort::SortKeys. + //! + //! @par Snippet + //! The code snippet below illustrates the sorting of a device vector of `i`nt keys. + //! @par + //! @code + //! #include + //! // or equivalently + //! + //! // Declare, allocate, and initialize device-accessible pointers + //! // for sorting data + //! int num_items; // e.g., 7 + //! int *d_key_buf; // e.g., [8, 6, 7, 5, 3, 0, 9] + //! int *d_key_alt_buf; // e.g., [ ... ] + //! ... + //! + //! // Create a DoubleBuffer to wrap the pair of device pointers + //! cub::DoubleBuffer d_keys(d_key_buf, d_key_alt_buf); + //! + //! // Determine temporary device storage requirements + //! void *d_temp_storage = nullptr; + //! size_t temp_storage_bytes = 0; + //! cub::DeviceRadixSort::SortKeysDescending( + //! d_temp_storage, temp_storage_bytes, d_keys, num_items); + //! + //! // Allocate temporary storage + //! cudaMalloc(&d_temp_storage, temp_storage_bytes); + //! + //! // Run sorting operation + //! cub::DeviceRadixSort::SortKeysDescending( + //! d_temp_storage, temp_storage_bytes, d_keys, num_items); + //! + //! // d_keys.Current() <-- [9, 8, 7, 6, 5, 3, 0] + //! @endcode + //! + //! @tparam KeyT + //! **[inferred]** KeyT type + //! + //! @tparam NumItemsT + //! **[inferred]** Type of num_items + //! + //! @param[in] d_temp_storage + //! Device-accessible allocation of temporary storage. When `nullptr`, the + //! required allocation size is written to `temp_storage_bytes` and no work + //! is done. + //! + //! @param[in,out] temp_storage_bytes + //! Reference to size in bytes of `d_temp_storage` allocation + //! + //! @param[in,out] d_keys + //! Reference to the double-buffer of keys whose "current" device-accessible + //! buffer contains the unsorted input keys and, upon return, is updated to + //! point to the sorted output keys + //! + //! @param[in] num_items + //! Number of items to sort + //! + //! @param[in] begin_bit + //! **[optional]** The least-significant bit index (inclusive) needed for + //! key comparison + //! + //! @param[in] end_bit + //! **[optional]** The most-significant bit index (exclusive) needed for key + //! comparison (e.g., `sizeof(unsigned int) * 8`) + //! + //! @param[in] stream + //! **[optional]** CUDA stream to launch kernels within. + //! Default is stream0. + template + CUB_RUNTIME_FUNCTION static cudaError_t SortKeysDescending( + void* d_temp_storage, + size_t& temp_storage_bytes, + DoubleBuffer& d_keys, + NumItemsT num_items, + int begin_bit = 0, + int end_bit = sizeof(KeyT) * 8, + cudaStream_t stream = 0) + { + CUB_DETAIL_NVTX_RANGE_SCOPE_IF(d_temp_storage, GetName()); + + // Unsigned integer type for global offsets. + using OffsetT = detail::choose_offset_t; + + constexpr bool is_overwrite_okay = true; + + // Null value type + DoubleBuffer d_values; + + return DispatchRadixSort::Dispatch( + d_temp_storage, temp_storage_bytes, d_keys, d_values, num_items, begin_bit, end_bit, is_overwrite_okay, stream); + } + +#ifndef _CCCL_DOXYGEN_INVOKED // Do not document + template + CUB_DETAIL_RUNTIME_DEBUG_SYNC_IS_NOT_SUPPORTED CUB_RUNTIME_FUNCTION static cudaError_t SortKeysDescending( + void* d_temp_storage, + size_t& temp_storage_bytes, + DoubleBuffer& d_keys, + NumItemsT num_items, + int begin_bit, + int end_bit, + cudaStream_t stream, + bool debug_synchronous) + { + CUB_DETAIL_RUNTIME_DEBUG_SYNC_USAGE_LOG + + return SortKeysDescending( + d_temp_storage, temp_storage_bytes, d_keys, num_items, begin_bit, end_bit, stream); + } +#endif + + //! @rst + //! Sorts keys into descending order using :math:`\approx N` auxiliary storage. + //! + //! * The sorting operation is given a pair of key buffers managed by a + //! DoubleBuffer structure that indicates which of the two buffers is + //! "current" (and thus contains the input data to be sorted). + //! * The contents of both buffers may be altered by the sorting operation. + //! * In-place operations are not supported. There must be no overlap between + //! any of the provided ranges: + //! + //! * ``[d_keys.Current(), d_keys.Current() + num_items)`` + //! * ``[d_keys.Alternate(), d_keys.Alternate() + num_items)`` + //! + //! * Upon completion, the sorting operation will update the "current" + //! indicator within the DoubleBuffer wrapper to reference which of the two + //! buffers now contains the sorted output sequence (a function of the + //! number of key bits specified and the targeted device architecture). + //! * @devicestorageP + //! * @devicestorage + //! + //! Snippet + //! ========================================================================== + //! + //! Let's consider a user-defined ``custom_t`` type below. To sort an array of + //! ``custom_t`` objects, we have to tell CUB about relevant members of the + //! ``custom_t`` type. We do this by providing a decomposer that returns a + //! tuple of references to relevant members of the key. + //! + //! .. literalinclude:: ../../../cub/test/catch2_test_device_radix_sort_custom.cu + //! :language: c++ + //! :dedent: + //! :start-after: example-begin custom-type + //! :end-before: example-end custom-type + //! + //! The following snippet shows how to sort an array of ``custom_t`` objects + //! using ``cub::DeviceRadixSort::SortKeysDescending``: + //! + //! .. literalinclude:: ../../../cub/test/catch2_test_device_radix_sort_custom.cu + //! :language: c++ + //! :dedent: + //! :start-after: example-begin keys-descending-db + //! :end-before: example-end keys-descending-db + //! + //! @endrst + //! + //! @tparam KeyT + //! **[inferred]** KeyT type + //! + //! @tparam NumItemsT + //! **[inferred]** Type of num_items + //! + //! @tparam DecomposerT + //! **[inferred]** Type of a callable object responsible for decomposing a + //! ``KeyT`` into a tuple of references to its constituent arithmetic types: + //! ``::cuda::std::tuple operator()(KeyT &key)``. + //! The leftmost element of the tuple is considered the most significant. + //! The call operator must not modify members of the key. + //! + //! @param[in] d_temp_storage + //! Device-accessible allocation of temporary storage. When `nullptr`, the + //! required allocation size is written to `temp_storage_bytes` and no work + //! is done. + //! + //! @param[in,out] temp_storage_bytes + //! Reference to size in bytes of `d_temp_storage` allocation + //! + //! @param[in,out] d_keys + //! Reference to the double-buffer of keys whose "current" device-accessible + //! buffer contains the unsorted input keys and, upon return, is updated to + //! point to the sorted output keys + //! + //! @param[in] num_items + //! Number of items to sort + //! + //! @param decomposer + //! Callable object responsible for decomposing a ``KeyT`` into a tuple of + //! references to its constituent arithmetic types. The leftmost element of + //! the tuple is considered the most significant. The call operator must not + //! modify members of the key. + //! + //! @param[in] stream + //! **[optional]** CUDA stream to launch kernels within. + //! Default is stream0. + template + CUB_RUNTIME_FUNCTION static // + typename ::cuda::std::enable_if< // + !::cuda::std::is_convertible::value, // + cudaError_t>::type + SortKeysDescending( + void* d_temp_storage, + size_t& temp_storage_bytes, + DoubleBuffer& d_keys, + NumItemsT num_items, + DecomposerT decomposer, + cudaStream_t stream = 0) + { + CUB_DETAIL_NVTX_RANGE_SCOPE_IF(d_temp_storage, GetName()); + + // unsigned integer type for global offsets + using offset_t = detail::choose_offset_t; + using decomposer_check_t = detail::radix::decomposer_check_t; + + static_assert(decomposer_check_t::value, + "DecomposerT must be a callable object returning a tuple of references to " + "arithmetic types"); + + constexpr bool is_overwrite_okay = true; + constexpr bool is_descending = true; + DoubleBuffer d_values; + + return DeviceRadixSort::custom_radix_sort( + decomposer_check_t{}, + d_temp_storage, + temp_storage_bytes, + is_overwrite_okay, + d_keys, + d_values, + static_cast(num_items), + decomposer, + stream); + } + + //! @rst + //! Sorts keys into descending order using :math:`\approx N` auxiliary storage. + //! + //! * The sorting operation is given a pair of key buffers managed by a + //! DoubleBuffer structure that indicates which of the two buffers is + //! "current" (and thus contains the input data to be sorted). + //! * The contents of both buffers may be altered by the sorting operation. + //! * In-place operations are not supported. There must be no overlap between + //! any of the provided ranges: + //! + //! * ``[d_keys.Current(), d_keys.Current() + num_items)`` + //! * ``[d_keys.Alternate(), d_keys.Alternate() + num_items)`` + //! + //! * A bit subrange ``[begin_bit, end_bit)`` is provided to specify + //! differentiating key bits. This can reduce overall sorting overhead and + //! yield a corresponding performance improvement. + //! * Upon completion, the sorting operation will update the "current" + //! indicator within the DoubleBuffer wrapper to reference which of the two + //! buffers now contains the sorted output sequence (a function of the + //! number of key bits specified and the targeted device architecture). + //! * @devicestorageP + //! * @devicestorage + //! + //! Snippet + //! ========================================================================== + //! + //! Let's consider a user-defined ``custom_t`` type below. To sort an array of + //! ``custom_t`` objects, we have to tell CUB about relevant members of the + //! ``custom_t`` type. We do this by providing a decomposer that returns a + //! tuple of references to relevant members of the key. + //! + //! .. literalinclude:: ../../../cub/test/catch2_test_device_radix_sort_custom.cu + //! :language: c++ + //! :dedent: + //! :start-after: example-begin custom-type + //! :end-before: example-end custom-type + //! + //! The following snippet shows how to sort an array of ``custom_t`` objects + //! using ``cub::DeviceRadixSort::SortKeysDescending``: + //! + //! .. literalinclude:: ../../../cub/test/catch2_test_device_radix_sort_custom.cu + //! :language: c++ + //! :dedent: + //! :start-after: example-begin keys-descending-bits-db + //! :end-before: example-end keys-descending-bits-db + //! + //! @endrst + //! + //! @tparam KeyT + //! **[inferred]** KeyT type + //! + //! @tparam NumItemsT + //! **[inferred]** Type of num_items + //! + //! @tparam DecomposerT + //! **[inferred]** Type of a callable object responsible for decomposing a + //! ``KeyT`` into a tuple of references to its constituent arithmetic types: + //! ``::cuda::std::tuple operator()(KeyT &key)``. + //! The leftmost element of the tuple is considered the most significant. + //! The call operator must not modify members of the key. + //! + //! @param[in] d_temp_storage + //! Device-accessible allocation of temporary storage. When `nullptr`, the + //! required allocation size is written to `temp_storage_bytes` and no work + //! is done. + //! + //! @param[in,out] temp_storage_bytes + //! Reference to size in bytes of `d_temp_storage` allocation + //! + //! @param[in,out] d_keys + //! Reference to the double-buffer of keys whose "current" device-accessible + //! buffer contains the unsorted input keys and, upon return, is updated to + //! point to the sorted output keys + //! + //! @param[in] num_items + //! Number of items to sort + //! + //! @param decomposer + //! Callable object responsible for decomposing a ``KeyT`` into a tuple of + //! references to its constituent arithmetic types. The leftmost element of + //! the tuple is considered the most significant. The call operator must not + //! modify members of the key. + //! + //! @param[in] begin_bit + //! **[optional]** The least-significant bit index (inclusive) needed for + //! key comparison + //! + //! @param[in] end_bit + //! **[optional]** The most-significant bit index (exclusive) needed for key + //! comparison (e.g., `(sizeof(float) + sizeof(long long int)) * 8`) + //! + //! @param[in] stream + //! **[optional]** CUDA stream to launch kernels within. + //! Default is stream0. + template + CUB_RUNTIME_FUNCTION static // + typename ::cuda::std::enable_if< // + !::cuda::std::is_convertible::value, // + cudaError_t>::type + SortKeysDescending( + void* d_temp_storage, + size_t& temp_storage_bytes, + DoubleBuffer& d_keys, + NumItemsT num_items, + DecomposerT decomposer, + int begin_bit, + int end_bit, + cudaStream_t stream = 0) + { + CUB_DETAIL_NVTX_RANGE_SCOPE_IF(d_temp_storage, GetName()); + + // unsigned integer type for global offsets + using offset_t = detail::choose_offset_t; + using decomposer_check_t = detail::radix::decomposer_check_t; + + static_assert(decomposer_check_t::value, + "DecomposerT must be a callable object returning a tuple of references to " + "arithmetic types"); + + constexpr bool is_overwrite_okay = true; + constexpr bool is_descending = true; + DoubleBuffer d_values; + + return DeviceRadixSort::custom_radix_sort( + decomposer_check_t{}, + d_temp_storage, + temp_storage_bytes, + is_overwrite_okay, + d_keys, + d_values, + static_cast(num_items), + decomposer, + begin_bit, + end_bit, + stream); + } + + //! @} end member group +}; + +CUB_NAMESPACE_END diff --git a/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/device/device_segmented_radix_sort.cuh b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/device/device_segmented_radix_sort.cuh new file mode 100644 index 0000000000000000000000000000000000000000..6bde88ed9da2b61a2649e2f7404e9dc82504ae21 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/device/device_segmented_radix_sort.cuh @@ -0,0 +1,1712 @@ +/****************************************************************************** + * Copyright (c) 2011, Duane Merrill. All rights reserved. + * Copyright (c) 2011-2022, NVIDIA CORPORATION. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the NVIDIA CORPORATION nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + ******************************************************************************/ + +//! @file +//! cub::DeviceSegmentedRadixSort provides device-wide, parallel operations for computing a batched radix sort across +//! multiple, non-overlapping sequences of data items residing within device-accessible memory. + +#pragma once + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header + +#include +#include + +#include + +#include + +CUB_NAMESPACE_BEGIN + +//! @rst +//! DeviceSegmentedRadixSort provides device-wide, parallel operations +//! for computing a batched radix sort across multiple, non-overlapping +//! sequences of data items residing within device-accessible memory. +//! +//! Overview +//! +++++++++++++++++++++++++++++++++++++++++++++ +//! +//! The `radix sorting method `_ +//! arranges items into ascending (or descending) order. The algorithm relies +//! upon a positional representation for keys, i.e., each key is comprised of an +//! ordered sequence of symbols (e.g., digits, characters, etc.) specified from +//! least-significant to most-significant. For a given input sequence of keys +//! and a set of rules specifying a total ordering of the symbolic alphabet, the +//! radix sorting method produces a lexicographic ordering of those keys. +//! +//! See Also +//! +++++++++++++++++++++++++++++++++++++++++++++ +//! +//! DeviceSegmentedRadixSort shares its implementation with DeviceRadixSort. See +//! that algorithm's documentation for more information. +//! +//! Segments are not required to be contiguous. Any element of input(s) or +//! output(s) outside the specified segments will not be accessed nor modified. +//! +//! Usage Considerations +//! +++++++++++++++++++++++++++++++++++++++++++++ +//! +//! @cdp_class{DeviceSegmentedRadixSort} +//! +//! @endrst +struct DeviceSegmentedRadixSort +{ +private: + // Name reported for NVTX ranges + _CCCL_HOST_DEVICE static constexpr auto GetName() -> const char* + { + return "cub::DeviceSegmentedRadixSort"; + } + +public: + //! @name Key-value pairs + //! @{ + + //! @rst + //! Sorts segments of key-value pairs into ascending order. (``~2N`` auxiliary storage required) + //! + //! - The contents of the input data are not altered by the sorting operation + //! - When input a contiguous sequence of segments, a single sequence + //! ``segment_offsets`` (of length ``num_segments + 1``) can be aliased + //! for both the ``d_begin_offsets`` and ``d_end_offsets`` parameters (where + //! the latter is specified as ``segment_offsets + 1``). + //! - An optional bit subrange ``[begin_bit, end_bit)`` of differentiating key + //! bits can be specified. This can reduce overall sorting overhead and + //! yield a corresponding performance improvement. + //! - Let ``in`` be one of ``{d_keys_in, d_values_in}`` and ``out`` be any of + //! ``{d_keys_out, d_values_out}``. The range ``[out, out + num_items)`` shall + //! not overlap ``[in, in + num_items)``, + //! ``[d_begin_offsets, d_begin_offsets + num_segments)`` nor + //! ``[d_end_offsets, d_end_offsets + num_segments)`` in any way. + //! - @devicestorageNP For sorting using only ``O(P)`` temporary storage, see + //! the sorting interface using DoubleBuffer wrappers below. + //! - Segments are not required to be contiguous. For all index values ``i`` + //! outside the specified segments ``d_keys_in[i]``, ``d_values_in[i]``, + //! ``d_keys_out[i]``, ``d_values_out[i]`` will not be accessed nor modified. + //! - @devicestorage + //! + //! Snippet + //! +++++++++++++++++++++++++++++++++++++++++++++ + //! + //! The code snippet below illustrates the batched sorting of three segments + //! (with one zero-length segment) of ``int`` keys with associated vector of ``int`` values. + //! + //! .. code-block:: c++ + //! + //! #include // or equivalently + //! + //! // Declare, allocate, and initialize device-accessible pointers for sorting data + //! int num_items; // e.g., 7 + //! int num_segments; // e.g., 3 + //! int *d_offsets; // e.g., [0, 3, 3, 7] + //! int *d_keys_in; // e.g., [8, 6, 7, 5, 3, 0, 9] + //! int *d_keys_out; // e.g., [-, -, -, -, -, -, -] + //! int *d_values_in; // e.g., [0, 1, 2, 3, 4, 5, 6] + //! int *d_values_out; // e.g., [-, -, -, -, -, -, -] + //! ... + //! + //! // Determine temporary device storage requirements + //! void *d_temp_storage = nullptr; + //! size_t temp_storage_bytes = 0; + //! cub::DeviceSegmentedRadixSort::SortPairs( + //! d_temp_storage, temp_storage_bytes, + //! d_keys_in, d_keys_out, d_values_in, d_values_out, + //! num_items, num_segments, d_offsets, d_offsets + 1); + //! + //! // Allocate temporary storage + //! cudaMalloc(&d_temp_storage, temp_storage_bytes); + //! + //! // Run sorting operation + //! cub::DeviceSegmentedRadixSort::SortPairs( + //! d_temp_storage, temp_storage_bytes, + //! d_keys_in, d_keys_out, d_values_in, d_values_out, + //! num_items, num_segments, d_offsets, d_offsets + 1); + //! + //! // d_keys_out <-- [6, 7, 8, 0, 3, 5, 9] + //! // d_values_out <-- [1, 2, 0, 5, 4, 3, 6] + //! + //! @endrst + //! + //! @tparam KeyT + //! **[inferred]** Key type + //! + //! @tparam ValueT + //! **[inferred]** Value type + //! + //! @tparam BeginOffsetIteratorT + //! **[inferred]** Random-access input iterator type for reading segment beginning offsets @iterator + //! + //! @tparam EndOffsetIteratorT + //! **[inferred]** Random-access input iterator type for reading segment ending offsets @iterator + //! + //! @param[in] d_temp_storage + //! Device-accessible allocation of temporary storage. When `nullptr`, the + //! required allocation size is written to `temp_storage_bytes` and no work is done. + //! + //! @param[in,out] temp_storage_bytes + //! Reference to size in bytes of `d_temp_storage` allocation + //! + //! @param[in] d_keys_in + //! Device-accessible pointer to the input data of key data to sort + //! + //! @param[out] d_keys_out + //! Device-accessible pointer to the sorted output sequence of key data + //! + //! @param[in] d_values_in + //! Device-accessible pointer to the corresponding input sequence of + //! associated value items + //! + //! @param[out] d_values_out + //! Device-accessible pointer to the correspondingly-reordered output + //! sequence of associated value items + //! + //! @param[in] num_items + //! The total number of items within the segmented array, including items not + //! covered by segments. `num_items` should match the largest element within + //! the range `[d_end_offsets, d_end_offsets + num_segments)`. + //! + //! @param[in] num_segments + //! The number of segments that comprise the sorting data + //! + //! @param[in] d_begin_offsets + //! Random-access input iterator to the sequence of beginning offsets of + //! length `num_segments`, such that `d_begin_offsets[i]` is the first + //! element of the *i*th data segment in `d_keys_*` and `d_values_*` + //! + //! @param[in] d_end_offsets + //! @rst + //! Random-access input iterator to the sequence of ending offsets of length + //! ``num_segments``, such that ``d_end_offsets[i] - 1`` is the last element of + //! the *i*\ :sup:`th` data segment in ``d_keys_*`` and ``d_values_*``. If + //! ``d_end_offsets[i] - 1 <= d_begin_offsets[i]``, the *i*\ :sup:`th` is considered empty. + //! @endrst + //! + //! @param[in] begin_bit + //! **[optional]** The least-significant bit index (inclusive) needed for key comparison + //! + //! @param[in] end_bit + //! **[optional]** The most-significant bit index (exclusive) needed for key + //! comparison (e.g., `sizeof(unsigned int) * 8`) + //! + //! @param[in] stream + //! @rst + //! **[optional]** CUDA stream to launch kernels within. Default is stream\ :sub:`0`. + //! @endrst + template + CUB_RUNTIME_FUNCTION static cudaError_t SortPairs( + void* d_temp_storage, + size_t& temp_storage_bytes, + const KeyT* d_keys_in, + KeyT* d_keys_out, + const ValueT* d_values_in, + ValueT* d_values_out, + int num_items, + int num_segments, + BeginOffsetIteratorT d_begin_offsets, + EndOffsetIteratorT d_end_offsets, + int begin_bit = 0, + int end_bit = sizeof(KeyT) * 8, + cudaStream_t stream = 0) + { + CUB_DETAIL_NVTX_RANGE_SCOPE_IF(d_temp_storage, GetName()); + + // Signed integer type for global offsets + using OffsetT = int; + + DoubleBuffer d_keys(const_cast(d_keys_in), d_keys_out); + DoubleBuffer d_values(const_cast(d_values_in), d_values_out); + + return DispatchSegmentedRadixSort::Dispatch( + d_temp_storage, + temp_storage_bytes, + d_keys, + d_values, + num_items, + num_segments, + d_begin_offsets, + d_end_offsets, + begin_bit, + end_bit, + false, + stream); + } + +#ifndef _CCCL_DOXYGEN_INVOKED // Do not document + template + CUB_DETAIL_RUNTIME_DEBUG_SYNC_IS_NOT_SUPPORTED CUB_RUNTIME_FUNCTION static cudaError_t SortPairs( + void* d_temp_storage, + size_t& temp_storage_bytes, + const KeyT* d_keys_in, + KeyT* d_keys_out, + const ValueT* d_values_in, + ValueT* d_values_out, + int num_items, + int num_segments, + BeginOffsetIteratorT d_begin_offsets, + EndOffsetIteratorT d_end_offsets, + int begin_bit, + int end_bit, + cudaStream_t stream, + bool debug_synchronous) + { + CUB_DETAIL_RUNTIME_DEBUG_SYNC_USAGE_LOG + + return SortPairs( + d_temp_storage, + temp_storage_bytes, + d_keys_in, + d_keys_out, + d_values_in, + d_values_out, + num_items, + num_segments, + d_begin_offsets, + d_end_offsets, + begin_bit, + end_bit, + stream); + } +#endif // _CCCL_DOXYGEN_INVOKED + + //! @rst + //! Sorts segments of key-value pairs into ascending order. (``~N`` auxiliary storage required) + //! + //! - The sorting operation is given a pair of key buffers and a corresponding + //! pair of associated value buffers. Each pair is managed by a DoubleBuffer + //! structure that indicates which of the two buffers is "current" (and thus + //! contains the input data to be sorted). + //! - The contents of both buffers within each pair may be altered by the sorting operation. + //! - Upon completion, the sorting operation will update the "current" + //! indicator within each DoubleBuffer wrapper to reference which of the two + //! buffers now contains the sorted output sequence (a function of the number + //! of key bits specified and the targeted device architecture). + //! - When input a contiguous sequence of segments, a single sequence + //! ``segment_offsets`` (of length ``num_segments + 1``) can be aliased for both + //! the ``d_begin_offsets`` and ``d_end_offsets`` parameters (where the latter is + //! specified as ``segment_offsets + 1``). + //! - An optional bit subrange ``[begin_bit, end_bit)`` of differentiating key + //! bits can be specified. This can reduce overall sorting overhead and yield + //! a corresponding performance improvement. + //! - Let ``cur`` be one of ``{d_keys.Current(), d_values.Current()}`` and ``alt`` + //! be any of ``{d_keys.Alternate(), d_values.Alternate()}``. The range + //! ``[cur, cur + num_items)`` shall not overlap + //! ``[alt, alt + num_items)``. Both ranges shall not overlap + //! ``[d_begin_offsets, d_begin_offsets + num_segments)`` nor + //! ``[d_end_offsets, d_end_offsets + num_segments)`` in any way. + //! - Segments are not required to be contiguous. For all index values ``i`` + //! outside the specified segments ``d_keys.Current()[i]``, + //! ``d_values.Current()[i]``, ``d_keys.Alternate()[i]``, + //! ``d_values.Alternate()[i]`` will not be accessed nor modified. + //! - @devicestorageP + //! - @devicestorage + //! + //! Snippet + //! +++++++++++++++++++++++++++++++++++++++++++++ + //! + //! The code snippet below illustrates the batched sorting of three segments + //! (with one zero-length segment) of `int` keys with associated vector of ``int`` values. + //! + //! .. code-block:: c++ + //! + //! #include // or equivalently + //! + //! // Declare, allocate, and initialize device-accessible pointers + //! // for sorting data + //! int num_items; // e.g., 7 + //! int num_segments; // e.g., 3 + //! int *d_offsets; // e.g., [0, 3, 3, 7] + //! int *d_key_buf; // e.g., [8, 6, 7, 5, 3, 0, 9] + //! int *d_key_alt_buf; // e.g., [-, -, -, -, -, -, -] + //! int *d_value_buf; // e.g., [0, 1, 2, 3, 4, 5, 6] + //! int *d_value_alt_buf; // e.g., [-, -, -, -, -, -, -] + //! ... + //! + //! // Create a set of DoubleBuffers to wrap pairs of device pointers + //! cub::DoubleBuffer d_keys(d_key_buf, d_key_alt_buf); + //! cub::DoubleBuffer d_values(d_value_buf, d_value_alt_buf); + //! + //! // Determine temporary device storage requirements + //! void *d_temp_storage = nullptr; + //! size_t temp_storage_bytes = 0; + //! cub::DeviceSegmentedRadixSort::SortPairs( + //! d_temp_storage, temp_storage_bytes, d_keys, d_values, + //! num_items, num_segments, d_offsets, d_offsets + 1); + //! + //! // Allocate temporary storage + //! cudaMalloc(&d_temp_storage, temp_storage_bytes); + //! + //! // Run sorting operation + //! cub::DeviceSegmentedRadixSort::SortPairs( + //! d_temp_storage, temp_storage_bytes, d_keys, d_values, + //! num_items, num_segments, d_offsets, d_offsets + 1); + //! + //! // d_keys.Current() <-- [6, 7, 8, 0, 3, 5, 9] + //! // d_values.Current() <-- [5, 4, 3, 1, 2, 0, 6] + //! + //! @endrst + //! + //! @tparam KeyT + //! **[inferred]** Key type + //! + //! @tparam ValueT + //! **[inferred]** Value type + //! + //! @tparam BeginOffsetIteratorT + //! **[inferred]** Random-access input iterator type for reading segment beginning offsets @iterator + //! + //! @tparam EndOffsetIteratorT + //! **[inferred]** Random-access input iterator type for reading segment ending offsets @iterator + //! + //! @param[in] d_temp_storage + //! Device-accessible allocation of temporary storage. When `nullptr`, the + //! required allocation size is written to `temp_storage_bytes` and no work is done. + //! + //! @param[in,out] temp_storage_bytes + //! Reference to size in bytes of `d_temp_storage` allocation + //! + //! @param[in,out] d_keys + //! Reference to the double-buffer of keys whose "current" device-accessible + //! buffer contains the unsorted input keys and, upon return, is updated to + //! point to the sorted output keys + //! + //! @param[in,out] d_values + //! Double-buffer of values whose "current" device-accessible buffer + //! contains the unsorted input values and, upon return, is updated to point + //! to the sorted output values + //! + //! @param[in] num_items + //! The total number of items within the segmented array, including items not + //! covered by segments. `num_items` should match the largest element within + //! the range `[d_end_offsets, d_end_offsets + num_segments)`. + //! + //! @param[in] num_segments + //! The number of segments that comprise the sorting data + //! + //! @param[in] d_begin_offsets + //! @rst + //! Random-access input iterator to the sequence of beginning offsets of + //! length ``num_segments``, such that ``d_begin_offsets[i]`` is the first + //! element of the *i*\ :sup:`th` data segment in ``d_keys_*`` and ``d_values_*`` + //! @endrst + //! + //! @param[in] d_end_offsets + //! @rst + //! Random-access input iterator to the sequence of ending offsets of length + //! ``num_segments``, such that ``d_end_offsets[i] - 1`` is the last element of + //! the *i*\ :sup:`th` data segment in ``d_keys_*`` and ``d_values_*``. + //! If ``d_end_offsets[i] - 1 <= d_begin_offsets[i]``, the *i*\ :sup:`th` is considered empty. + //! @endrst + //! + //! @param[in] begin_bit + //! **[optional]** The least-significant bit index (inclusive) needed for key comparison + //! + //! @param[in] end_bit + //! **[optional]** The most-significant bit index (exclusive) needed for key + //! comparison (e.g., `sizeof(unsigned int) * 8`) + //! + //! @param[in] stream + //! @rst + //! **[optional]** CUDA stream to launch kernels within. Default is stream\ :sub:`0`. + //! @endrst + template + CUB_RUNTIME_FUNCTION static cudaError_t SortPairs( + void* d_temp_storage, + size_t& temp_storage_bytes, + DoubleBuffer& d_keys, + DoubleBuffer& d_values, + int num_items, + int num_segments, + BeginOffsetIteratorT d_begin_offsets, + EndOffsetIteratorT d_end_offsets, + int begin_bit = 0, + int end_bit = sizeof(KeyT) * 8, + cudaStream_t stream = 0) + { + CUB_DETAIL_NVTX_RANGE_SCOPE_IF(d_temp_storage, GetName()); + + // Signed integer type for global offsets + using OffsetT = int; + + return DispatchSegmentedRadixSort::Dispatch( + d_temp_storage, + temp_storage_bytes, + d_keys, + d_values, + num_items, + num_segments, + d_begin_offsets, + d_end_offsets, + begin_bit, + end_bit, + true, + stream); + } + +#ifndef _CCCL_DOXYGEN_INVOKED // Do not document + template + CUB_DETAIL_RUNTIME_DEBUG_SYNC_IS_NOT_SUPPORTED CUB_RUNTIME_FUNCTION static cudaError_t SortPairs( + void* d_temp_storage, + size_t& temp_storage_bytes, + DoubleBuffer& d_keys, + DoubleBuffer& d_values, + int num_items, + int num_segments, + BeginOffsetIteratorT d_begin_offsets, + EndOffsetIteratorT d_end_offsets, + int begin_bit, + int end_bit, + cudaStream_t stream, + bool debug_synchronous) + { + CUB_DETAIL_RUNTIME_DEBUG_SYNC_USAGE_LOG + + return SortPairs( + d_temp_storage, + temp_storage_bytes, + d_keys, + d_values, + num_items, + num_segments, + d_begin_offsets, + d_end_offsets, + begin_bit, + end_bit, + stream); + } +#endif // _CCCL_DOXYGEN_INVOKED + + //! @rst + //! Sorts segments of key-value pairs into descending order. (``~2N`` auxiliary storage required). + //! + //! - The contents of the input data are not altered by the sorting operation + //! - When input a contiguous sequence of segments, a single sequence + //! ``segment_offsets`` (of length ``num_segments + 1``) can be aliased for both + //! the ``d_begin_offsets`` and ``d_end_offsets`` parameters (where the latter is + //! specified as ``segment_offsets + 1``). + //! - An optional bit subrange ``[begin_bit, end_bit)`` of differentiating key + //! bits can be specified. This can reduce overall sorting overhead and + //! yield a corresponding performance improvement. + //! - Let ``in`` be one of ``{d_keys_in, d_values_in}`` and `out` be any of + //! ``{d_keys_out, d_values_out}``. The range ``[out, out + num_items)`` shall + //! not overlap ``[in, in + num_items)``, + //! ``[d_begin_offsets, d_begin_offsets + num_segments)`` nor + //! ``[d_end_offsets, d_end_offsets + num_segments)`` in any way. + //! - @devicestorageNP For sorting using only ``O(P)`` temporary storage, see + //! the sorting interface using DoubleBuffer wrappers below. + //! - Segments are not required to be contiguous. For all index values ``i`` + //! outside the specified segments ``d_keys_in[i]``, ``d_values_in[i]``, + //! ``d_keys_out[i]``, ``d_values_out[i]`` will not be accessed nor modified. + //! - @devicestorage + //! + //! Snippet + //! +++++++++++++++++++++++++++++++++++++++++++++ + //! + //! The code snippet below illustrates the batched sorting of three segments + //! (with one zero-length segment) of ``int`` keys with associated vector of ``int`` values. + //! + //! .. code-block:: c++ + //! + //! #include // or equivalently + //! + //! // Declare, allocate, and initialize device-accessible pointers + //! // for sorting data + //! int num_items; // e.g., 7 + //! int num_segments; // e.g., 3 + //! int *d_offsets; // e.g., [0, 3, 3, 7] + //! int *d_keys_in; // e.g., [8, 6, 7, 5, 3, 0, 9] + //! int *d_keys_out; // e.g., [-, -, -, -, -, -, -] + //! int *d_values_in; // e.g., [0, 1, 2, 3, 4, 5, 6] + //! int *d_values_out; // e.g., [-, -, -, -, -, -, -] + //! ... + //! + //! // Determine temporary device storage requirements + //! void *d_temp_storage = nullptr; + //! size_t temp_storage_bytes = 0; + //! cub::DeviceSegmentedRadixSort::SortPairsDescending( + //! d_temp_storage, temp_storage_bytes, + //! d_keys_in, d_keys_out, d_values_in, d_values_out, + //! num_items, num_segments, d_offsets, d_offsets + 1); + //! + //! // Allocate temporary storage + //! cudaMalloc(&d_temp_storage, temp_storage_bytes); + //! + //! // Run sorting operation + //! cub::DeviceSegmentedRadixSort::SortPairsDescending( + //! d_temp_storage, temp_storage_bytes, + //! d_keys_in, d_keys_out, d_values_in, d_values_out, + //! num_items, num_segments, d_offsets, d_offsets + 1); + //! + //! // d_keys_out <-- [8, 7, 6, 9, 5, 3, 0] + //! // d_values_out <-- [0, 2, 1, 6, 3, 4, 5] + //! + //! @endrst + //! + //! @tparam KeyT + //! **[inferred]** Key type + //! + //! @tparam ValueT + //! **[inferred]** Value type + //! + //! @tparam BeginOffsetIteratorT + //! **[inferred]** Random-access input iterator type for reading segment + //! beginning offsets @iterator + //! + //! @tparam EndOffsetIteratorT + //! **[inferred]** Random-access input iterator type for reading segment + //! ending offsets @iterator + //! + //! @param[in] d_temp_storage + //! Device-accessible allocation of temporary storage. When `nullptr`, the + //! required allocation size is written to `temp_storage_bytes` and no work is done. + //! + //! @param[in,out] temp_storage_bytes + //! Reference to size in bytes of `d_temp_storage` allocation + //! + //! @param[in] d_keys_in + //! Device-accessible pointer to the input data of key data to sort + //! + //! @param[out] d_keys_out + //! Device-accessible pointer to the sorted output sequence of key data + //! + //! @param[in] d_values_in + //! Device-accessible pointer to the corresponding input sequence of + //! associated value items + //! + //! @param[out] d_values_out + //! Device-accessible pointer to the correspondingly-reordered output + //! sequence of associated value items + //! + //! @param[in] num_items + //! The total number of items within the segmented array, including items not + //! covered by segments. `num_items` should match the largest element within + //! the range `[d_end_offsets, d_end_offsets + num_segments)`. + //! + //! @param[in] num_segments + //! The number of segments that comprise the sorting data + //! + //! @param[in] d_begin_offsets + //! @rst + //! Random-access input iterator to the sequence of beginning offsets of + //! length ``num_segments``, such that ``d_begin_offsets[i]`` is the first + //! element of the *i*\ :sup:`th` data segment in ``d_keys_*`` and ``d_values_*`` + //! @endrst + //! + //! @param[in] d_end_offsets + //! @rst + //! Random-access input iterator to the sequence of ending offsets of length + //! ``num_segments``, such that ``d_end_offsets[i] - 1`` is the last element of + //! the *i*\ :sup:`th` data segment in ``d_keys_*`` and ``d_values_*``. + //! If ``d_end_offsets[i] - 1 <= d_begin_offsets[i]``, the *i*\ :sup:`th` is considered empty. + //! @endrst + //! + //! @param[in] begin_bit + //! **[optional]** The least-significant bit index (inclusive) needed for key comparison + //! + //! @param[in] end_bit + //! **[optional]** The most-significant bit index (exclusive) needed for key + //! comparison (e.g., `sizeof(unsigned int) * 8`) + //! + //! @param[in] stream + //! @rst + //! **[optional]** CUDA stream to launch kernels within. Default is stream\ :sub:`0`. + //! @endrst + template + CUB_RUNTIME_FUNCTION static cudaError_t SortPairsDescending( + void* d_temp_storage, + size_t& temp_storage_bytes, + const KeyT* d_keys_in, + KeyT* d_keys_out, + const ValueT* d_values_in, + ValueT* d_values_out, + int num_items, + int num_segments, + BeginOffsetIteratorT d_begin_offsets, + EndOffsetIteratorT d_end_offsets, + int begin_bit = 0, + int end_bit = sizeof(KeyT) * 8, + cudaStream_t stream = 0) + { + CUB_DETAIL_NVTX_RANGE_SCOPE_IF(d_temp_storage, GetName()); + + // Signed integer type for global offsets + using OffsetT = int; + + DoubleBuffer d_keys(const_cast(d_keys_in), d_keys_out); + DoubleBuffer d_values(const_cast(d_values_in), d_values_out); + + return DispatchSegmentedRadixSort::Dispatch( + d_temp_storage, + temp_storage_bytes, + d_keys, + d_values, + num_items, + num_segments, + d_begin_offsets, + d_end_offsets, + begin_bit, + end_bit, + false, + stream); + } + +#ifndef _CCCL_DOXYGEN_INVOKED // Do not document + template + CUB_DETAIL_RUNTIME_DEBUG_SYNC_IS_NOT_SUPPORTED CUB_RUNTIME_FUNCTION static cudaError_t SortPairsDescending( + void* d_temp_storage, + size_t& temp_storage_bytes, + const KeyT* d_keys_in, + KeyT* d_keys_out, + const ValueT* d_values_in, + ValueT* d_values_out, + int num_items, + int num_segments, + BeginOffsetIteratorT d_begin_offsets, + EndOffsetIteratorT d_end_offsets, + int begin_bit, + int end_bit, + cudaStream_t stream, + bool debug_synchronous) + { + CUB_DETAIL_RUNTIME_DEBUG_SYNC_USAGE_LOG + + return SortPairsDescending( + d_temp_storage, + temp_storage_bytes, + d_keys_in, + d_keys_out, + d_values_in, + d_values_out, + num_items, + num_segments, + d_begin_offsets, + d_end_offsets, + begin_bit, + end_bit, + stream); + } +#endif // _CCCL_DOXYGEN_INVOKED + + //! @rst + //! Sorts segments of key-value pairs into descending order. (``~N`` auxiliary storage required). + //! + //! - The sorting operation is given a pair of key buffers and a corresponding + //! pair of associated value buffers. Each pair is managed by a DoubleBuffer + //! structure that indicates which of the two buffers is "current" (and thus + //! contains the input data to be sorted). + //! - The contents of both buffers within each pair may be altered by the + //! sorting operation. + //! - Upon completion, the sorting operation will update the "current" + //! indicator within each DoubleBuffer wrapper to reference which of the two + //! buffers now contains the sorted output sequence (a function of the number + //! of key bits specified and the targeted device architecture). + //! - When input a contiguous sequence of segments, a single sequence + //! ``segment_offsets`` (of length ``num_segments + 1``) can be aliased for both + //! the ``d_begin_offsets`` and ``d_end_offsets`` parameters (where the latter is + //! specified as ``segment_offsets + 1``). + //! - An optional bit subrange ``[begin_bit, end_bit)`` of differentiating key + //! bits can be specified. This can reduce overall sorting overhead and + //! yield a corresponding performance improvement. + //! - Let ``cur`` be one of ``{d_keys.Current(), d_values.Current()}`` and ``alt`` + //! be any of ``{d_keys.Alternate(), d_values.Alternate()}``. The range + //! ``[cur, cur + num_items)`` shall not overlap + //! ``[alt, alt + num_items)``. Both ranges shall not overlap + //! ``[d_begin_offsets, d_begin_offsets + num_segments)`` nor + //! ``[d_end_offsets, d_end_offsets + num_segments)`` in any way. + //! - Segments are not required to be contiguous. For all index values ``i`` + //! outside the specified segments ``d_keys.Current()[i]``, + //! ``d_values.Current()[i]``, ``d_keys.Alternate()[i]``, + //! ``d_values.Alternate()[i]`` will not be accessed nor modified. + //! not to be modified. + //! - @devicestorageP + //! - @devicestorage + //! + //! Snippet + //! +++++++++++++++++++++++++++++++++++++++++++++ + //! + //! The code snippet below illustrates the batched sorting of three segments + //! (with one zero-length segment) of ``int`` keys with associated vector of ``int`` values. + //! + //! .. code-block:: c++ + //! + //! #include // or equivalently + //! + //! // Declare, allocate, and initialize device-accessible pointers + //! // for sorting data + //! int num_items; // e.g., 7 + //! int num_segments; // e.g., 3 + //! int *d_offsets; // e.g., [0, 3, 3, 7] + //! int *d_key_buf; // e.g., [8, 6, 7, 5, 3, 0, 9] + //! int *d_key_alt_buf; // e.g., [-, -, -, -, -, -, -] + //! int *d_value_buf; // e.g., [0, 1, 2, 3, 4, 5, 6] + //! int *d_value_alt_buf; // e.g., [-, -, -, -, -, -, -] + //! ... + //! + //! // Create a set of DoubleBuffers to wrap pairs of device pointers + //! cub::DoubleBuffer d_keys(d_key_buf, d_key_alt_buf); + //! cub::DoubleBuffer d_values(d_value_buf, d_value_alt_buf); + //! + //! // Determine temporary device storage requirements + //! void *d_temp_storage = nullptr; + //! size_t temp_storage_bytes = 0; + //! cub::DeviceSegmentedRadixSort::SortPairsDescending( + //! d_temp_storage, temp_storage_bytes, d_keys, d_values, + //! num_items, num_segments, d_offsets, d_offsets + 1); + //! + //! // Allocate temporary storage + //! cudaMalloc(&d_temp_storage, temp_storage_bytes); + //! + //! // Run sorting operation + //! cub::DeviceSegmentedRadixSort::SortPairsDescending( + //! d_temp_storage, temp_storage_bytes, d_keys, d_values, + //! num_items, num_segments, d_offsets, d_offsets + 1); + //! + //! // d_keys.Current() <-- [8, 7, 6, 9, 5, 3, 0] + //! // d_values.Current() <-- [0, 2, 1, 6, 3, 4, 5] + //! + //! @endrst + //! + //! @tparam KeyT + //! **[inferred]** Key type + //! + //! @tparam ValueT + //! **[inferred]** Value type + //! + //! @tparam BeginOffsetIteratorT + //! **[inferred]** Random-access input iterator type for reading segment + //! beginning offsets @iterator + //! + //! @tparam EndOffsetIteratorT + //! **[inferred]** Random-access input iterator type for reading segment + //! ending offsets @iterator + //! + //! @param[in] d_temp_storage + //! Device-accessible allocation of temporary storage. When `nullptr`, the + //! required allocation size is written to `temp_storage_bytes` and no work is done. + //! + //! @param[in,out] temp_storage_bytes + //! Reference to size in bytes of `d_temp_storage` allocation + //! + //! @param[in,out] d_keys + //! Reference to the double-buffer of keys whose "current" device-accessible + //! buffer contains the unsorted input keys and, upon return, is updated to + //! point to the sorted output keys + //! + //! @param[in,out] d_values + //! Double-buffer of values whose "current" device-accessible buffer + //! contains the unsorted input values and, upon return, is updated to point + //! to the sorted output values + //! + //! @param[in] num_items + //! The total number of items within the segmented array, including items not + //! covered by segments. `num_items` should match the largest element within + //! the range `[d_end_offsets, d_end_offsets + num_segments)`. + //! + //! @param[in] num_segments + //! The number of segments that comprise the sorting data + //! + //! @param[in] d_begin_offsets + //! @rst + //! Random-access input iterator to the sequence of beginning offsets of + //! length ``num_segments``, such that ``d_begin_offsets[i]`` is the first + //! element of the *i*\ :sup:`th` data segment in ``d_keys_*`` and ``d_values_*`` + //! @endrst + //! + //! @param[in] d_end_offsets + //! @rst + //! Random-access input iterator to the sequence of ending offsets of length + //! ``num_segments``, such that ``d_end_offsets[i] - 1`` is the last element of + //! the *i*\ :sup:`th` data segment in ``d_keys_*`` and ``d_values_*``. + //! If ``d_end_offsets[i] - 1 <= d_begin_offsets[i]``, the *i*\ :sup:`th` is considered empty. + //! @endrst + //! + //! @param[in] begin_bit + //! **[optional]** The least-significant bit index (inclusive) needed for key comparison + //! + //! @param[in] end_bit + //! **[optional]** The most-significant bit index (exclusive) needed for key + //! comparison (e.g., `sizeof(unsigned int) * 8`) + //! + //! @param[in] stream + //! @rst + //! **[optional]** CUDA stream to launch kernels within. Default is stream\ :sub:`0`. + //! @endrst + template + CUB_RUNTIME_FUNCTION static cudaError_t SortPairsDescending( + void* d_temp_storage, + size_t& temp_storage_bytes, + DoubleBuffer& d_keys, + DoubleBuffer& d_values, + int num_items, + int num_segments, + BeginOffsetIteratorT d_begin_offsets, + EndOffsetIteratorT d_end_offsets, + int begin_bit = 0, + int end_bit = sizeof(KeyT) * 8, + cudaStream_t stream = 0) + { + CUB_DETAIL_NVTX_RANGE_SCOPE_IF(d_temp_storage, GetName()); + + // Signed integer type for global offsets + using OffsetT = int; + + return DispatchSegmentedRadixSort::Dispatch( + d_temp_storage, + temp_storage_bytes, + d_keys, + d_values, + num_items, + num_segments, + d_begin_offsets, + d_end_offsets, + begin_bit, + end_bit, + true, + stream); + } + +#ifndef _CCCL_DOXYGEN_INVOKED // Do not document + template + CUB_DETAIL_RUNTIME_DEBUG_SYNC_IS_NOT_SUPPORTED CUB_RUNTIME_FUNCTION static cudaError_t SortPairsDescending( + void* d_temp_storage, + size_t& temp_storage_bytes, + DoubleBuffer& d_keys, + DoubleBuffer& d_values, + int num_items, + int num_segments, + BeginOffsetIteratorT d_begin_offsets, + EndOffsetIteratorT d_end_offsets, + int begin_bit, + int end_bit, + cudaStream_t stream, + bool debug_synchronous) + { + CUB_DETAIL_RUNTIME_DEBUG_SYNC_USAGE_LOG + + return SortPairsDescending( + d_temp_storage, + temp_storage_bytes, + d_keys, + d_values, + num_items, + num_segments, + d_begin_offsets, + d_end_offsets, + begin_bit, + end_bit, + stream); + } +#endif // _CCCL_DOXYGEN_INVOKED + + //! @} end member group + //! @name Keys-only + //! @{ + + //! @rst + //! Sorts segments of keys into ascending order. (``~2N`` auxiliary storage required) + //! + //! - The contents of the input data are not altered by the sorting operation + //! - An optional bit subrange ``[begin_bit, end_bit)`` of differentiating key + //! bits can be specified. This can reduce overall sorting overhead and + //! yield a corresponding performance improvement. + //! - When input a contiguous sequence of segments, a single sequence + //! ``segment_offsets`` (of length ``num_segments + 1``) can be aliased for both + //! the ``d_begin_offsets`` and ``d_end_offsets`` parameters (where the latter + //! is specified as ``segment_offsets + 1``). + //! - The range ``[d_keys_out, d_keys_out + num_items)`` shall not overlap + //! ``[d_keys_in, d_keys_in + num_items)``, + //! ``[d_begin_offsets, d_begin_offsets + num_segments)`` nor + //! ``[d_end_offsets, d_end_offsets + num_segments)`` in any way. + //! - @devicestorageNP For sorting using only ``O(P)`` temporary storage, see + //! the sorting interface using DoubleBuffer wrappers below. + //! - Segments are not required to be contiguous. For all index values ``i`` + //! outside the specified segments ``d_keys_in[i]``, ``d_keys_out[i]`` will not + //! be accessed nor modified. + //! - @devicestorage + //! + //! Snippet + //! +++++++++++++++++++++++++++++++++++++++++++++ + //! + //! The code snippet below illustrates the batched sorting of three segments + //! (with one zero-length segment) of `int` keys. + //! + //! .. code-block:: c++ + //! + //! #include + //! // or equivalently + //! + //! // Declare, allocate, and initialize device-accessible pointers + //! // for sorting data + //! int num_items; // e.g., 7 + //! int num_segments; // e.g., 3 + //! int *d_offsets; // e.g., [0, 3, 3, 7] + //! int *d_keys_in; // e.g., [8, 6, 7, 5, 3, 0, 9] + //! int *d_keys_out; // e.g., [-, -, -, -, -, -, -] + //! ... + //! + //! // Determine temporary device storage requirements + //! void *d_temp_storage = nullptr; + //! size_t temp_storage_bytes = 0; + //! cub::DeviceSegmentedRadixSort::SortKeys( + //! d_temp_storage, temp_storage_bytes, d_keys_in, d_keys_out, + //! num_items, num_segments, d_offsets, d_offsets + 1); + //! + //! // Allocate temporary storage + //! cudaMalloc(&d_temp_storage, temp_storage_bytes); + //! + //! // Run sorting operation + //! cub::DeviceSegmentedRadixSort::SortKeys( + //! d_temp_storage, temp_storage_bytes, d_keys_in, d_keys_out, + //! num_items, num_segments, d_offsets, d_offsets + 1); + //! + //! // d_keys_out <-- [6, 7, 8, 0, 3, 5, 9] + //! + //! @endrst + //! + //! @tparam KeyT + //! **[inferred]** Key type + //! + //! @tparam BeginOffsetIteratorT + //! **[inferred]** Random-access input iterator type for reading segment + //! beginning offsets @iterator + //! + //! @tparam EndOffsetIteratorT + //! **[inferred]** Random-access input iterator type for reading segment + //! ending offsets @iterator + //! + //! @param[in] d_temp_storage + //! Device-accessible allocation of temporary storage. + //! When `nullptr`, the required allocation size is written to `temp_storage_bytes` and no work is done. + //! + //! @param[in,out] temp_storage_bytes + //! Reference to size in bytes of `d_temp_storage` allocation + //! + //! @param[in] d_keys_in + //! Device-accessible pointer to the input data of key data to sort + //! + //! @param[out] d_keys_out + //! Device-accessible pointer to the sorted output sequence of key data + //! + //! @param[in] num_items + //! The total number of items within the segmented array, including items not + //! covered by segments. `num_items` should match the largest element within + //! the range `[d_end_offsets, d_end_offsets + num_segments)`. + //! + //! @param[in] num_segments + //! The number of segments that comprise the sorting data + //! + //! @param[in] d_begin_offsets + //! @rst + //! Random-access input iterator to the sequence of beginning offsets of + //! length ``num_segments``, such that ``d_begin_offsets[i]`` is the first + //! element of the *i*\ :sup:`th` data segment in ``d_keys_*`` and ``d_values_*`` + //! @endrst + //! + //! @param[in] d_end_offsets + //! @rst + //! Random-access input iterator to the sequence of ending offsets of length + //! ``num_segments``, such that ``d_end_offsets[i] - 1`` is the last element of + //! the *i*\ :sup:`th` data segment in ``d_keys_*`` and ``d_values_*``. + //! If ``d_end_offsets[i] - 1 <= d_begin_offsets[i]``, the *i*\ :sup:`th` is considered empty. + //! @endrst + //! + //! @param[in] begin_bit + //! **[optional]** The least-significant bit index (inclusive) needed for key comparison + //! + //! @param[in] end_bit + //! **[optional]** The most-significant bit index (exclusive) needed for key + //! comparison (e.g., `sizeof(unsigned int) * 8`) + //! + //! @param[in] stream + //! @rst + //! **[optional]** CUDA stream to launch kernels within. Default is stream\ :sub:`0`. + //! @endrst + template + CUB_RUNTIME_FUNCTION static cudaError_t SortKeys( + void* d_temp_storage, + size_t& temp_storage_bytes, + const KeyT* d_keys_in, + KeyT* d_keys_out, + int num_items, + int num_segments, + BeginOffsetIteratorT d_begin_offsets, + EndOffsetIteratorT d_end_offsets, + int begin_bit = 0, + int end_bit = sizeof(KeyT) * 8, + cudaStream_t stream = 0) + { + CUB_DETAIL_NVTX_RANGE_SCOPE_IF(d_temp_storage, GetName()); + + // Signed integer type for global offsets + using OffsetT = int; + + // Null value type + DoubleBuffer d_keys(const_cast(d_keys_in), d_keys_out); + DoubleBuffer d_values; + + return DispatchSegmentedRadixSort::Dispatch( + d_temp_storage, + temp_storage_bytes, + d_keys, + d_values, + num_items, + num_segments, + d_begin_offsets, + d_end_offsets, + begin_bit, + end_bit, + false, + stream); + } + +#ifndef _CCCL_DOXYGEN_INVOKED // Do not document + template + CUB_DETAIL_RUNTIME_DEBUG_SYNC_IS_NOT_SUPPORTED CUB_RUNTIME_FUNCTION static cudaError_t SortKeys( + void* d_temp_storage, + size_t& temp_storage_bytes, + const KeyT* d_keys_in, + KeyT* d_keys_out, + int num_items, + int num_segments, + BeginOffsetIteratorT d_begin_offsets, + EndOffsetIteratorT d_end_offsets, + int begin_bit, + int end_bit, + cudaStream_t stream, + bool debug_synchronous) + { + CUB_DETAIL_RUNTIME_DEBUG_SYNC_USAGE_LOG + + return SortKeys( + d_temp_storage, + temp_storage_bytes, + d_keys_in, + d_keys_out, + num_items, + num_segments, + d_begin_offsets, + d_end_offsets, + begin_bit, + end_bit, + stream); + } +#endif // _CCCL_DOXYGEN_INVOKED + + //! @rst + //! Sorts segments of keys into ascending order. (``~N`` auxiliary storage required). + //! + //! - The sorting operation is given a pair of key buffers managed by a + //! DoubleBuffer structure that indicates which of the two buffers is + //! "current" (and thus contains the input data to be sorted). + //! - The contents of both buffers may be altered by the sorting operation. + //! - Upon completion, the sorting operation will update the "current" + //! indicator within the DoubleBuffer wrapper to reference which of the two + //! buffers now contains the sorted output sequence (a function of the + //! number of key bits specified and the targeted device architecture). + //! - When input a contiguous sequence of segments, a single sequence + //! ``segment_offsets`` (of length ``num_segments + 1``) can be aliased for both + //! the ``d_begin_offsets`` and ``d_end_offsets`` parameters (where the latter + //! is specified as ``segment_offsets + 1``). + //! - An optional bit subrange ``[begin_bit, end_bit)`` of differentiating key + //! bits can be specified. This can reduce overall sorting overhead and + //! yield a corresponding performance improvement. + //! - Let ``cur = d_keys.Current()`` and ``alt = d_keys.Alternate()``. + //! The range ``[cur, cur + num_items)`` shall not overlap + //! ``[alt, alt + num_items)``. Both ranges shall not overlap + //! ``[d_begin_offsets, d_begin_offsets + num_segments)`` nor + //! ``[d_end_offsets, d_end_offsets + num_segments)`` in any way. + //! - Segments are not required to be contiguous. For all index values ``i`` + //! outside the specified segments ``d_keys.Current()[i]``, + //! ``d_keys[i].Alternate()[i]`` will not be accessed nor modified. + //! - @devicestorageP + //! - @devicestorage + //! + //! Snippet + //! +++++++++++++++++++++++++++++++++++++++++++++ + //! + //! The code snippet below illustrates the batched sorting of three segments + //! (with one zero-length segment) of ``int`` keys. + //! + //! .. code-block:: c++ + //! + //! #include + //! // or equivalently + //! + //! // Declare, allocate, and initialize device-accessible pointers for + //! // sorting data + //! int num_items; // e.g., 7 + //! int num_segments; // e.g., 3 + //! int *d_offsets; // e.g., [0, 3, 3, 7] + //! int *d_key_buf; // e.g., [8, 6, 7, 5, 3, 0, 9] + //! int *d_key_alt_buf; // e.g., [-, -, -, -, -, -, -] + //! ... + //! + //! // Create a DoubleBuffer to wrap the pair of device pointers + //! cub::DoubleBuffer d_keys(d_key_buf, d_key_alt_buf); + //! + //! // Determine temporary device storage requirements + //! void *d_temp_storage = nullptr; + //! size_t temp_storage_bytes = 0; + //! cub::DeviceSegmentedRadixSort::SortKeys( + //! d_temp_storage, temp_storage_bytes, d_keys, + //! num_items, num_segments, d_offsets, d_offsets + 1); + //! + //! // Allocate temporary storage + //! cudaMalloc(&d_temp_storage, temp_storage_bytes); + //! + //! // Run sorting operation + //! cub::DeviceSegmentedRadixSort::SortKeys( + //! d_temp_storage, temp_storage_bytes, d_keys, + //! num_items, num_segments, d_offsets, d_offsets + 1); + //! + //! // d_keys.Current() <-- [6, 7, 8, 0, 3, 5, 9] + //! + //! @endrst + //! + //! @tparam KeyT + //! **[inferred]** Key type + //! + //! @tparam BeginOffsetIteratorT + //! **[inferred]** Random-access input iterator type for reading segment + //! beginning offsets @iterator + //! + //! @tparam EndOffsetIteratorT + //! **[inferred]** Random-access input iterator type for reading segment + //! ending offsets @iterator + //! + //! @param[in] d_temp_storage + //! Device-accessible allocation of temporary storage. When `nullptr`, the + //! required allocation size is written to `temp_storage_bytes` and no work + //! is done. + //! + //! @param[in,out] temp_storage_bytes + //! Reference to size in bytes of `d_temp_storage` allocation + //! + //! @param[in,out] d_keys + //! Reference to the double-buffer of keys whose "current" device-accessible + //! buffer contains the unsorted input keys and, upon return, is updated to + //! point to the sorted output keys + //! + //! @param[in] num_items + //! The total number of items within the segmented array, including items not + //! covered by segments. `num_items` should match the largest element within + //! the range `[d_end_offsets, d_end_offsets + num_segments)`. + //! + //! @param[in] num_segments + //! The number of segments that comprise the sorting data + //! + //! @param[in] d_begin_offsets + //! @rst + //! Random-access input iterator to the sequence of beginning offsets of + //! length ``num_segments``, such that ``d_begin_offsets[i]`` is the first + //! element of the *i*\ :sup:`th` data segment in ``d_keys_*`` and ``d_values_*`` + //! @endrst + //! + //! @param[in] d_end_offsets + //! @rst + //! Random-access input iterator to the sequence of ending offsets of length + //! ``num_segments``, such that ``d_end_offsets[i] - 1`` is the last element of + //! the *i*\ :sup:`th` data segment in ``d_keys_*`` and ``d_values_*``. + //! If ``d_end_offsets[i] - 1 <= d_begin_offsets[i]``, the *i*\ :sup:`th` is considered empty. + //! @endrst + //! + //! @param[in] begin_bit + //! **[optional]** The least-significant bit index (inclusive) + //! needed for key comparison + //! + //! @param[in] end_bit + //! **[optional]** The most-significant bit index (exclusive) needed for key + //! comparison (e.g., `sizeof(unsigned int) * 8`) + //! + //! @param[in] stream + //! @rst + //! **[optional]** CUDA stream to launch kernels within. Default is stream\ :sub:`0`. + //! @endrst + template + CUB_RUNTIME_FUNCTION static cudaError_t SortKeys( + void* d_temp_storage, + size_t& temp_storage_bytes, + DoubleBuffer& d_keys, + int num_items, + int num_segments, + BeginOffsetIteratorT d_begin_offsets, + EndOffsetIteratorT d_end_offsets, + int begin_bit = 0, + int end_bit = sizeof(KeyT) * 8, + cudaStream_t stream = 0) + { + CUB_DETAIL_NVTX_RANGE_SCOPE_IF(d_temp_storage, GetName()); + + // Signed integer type for global offsets + using OffsetT = int; + + // Null value type + DoubleBuffer d_values; + + return DispatchSegmentedRadixSort::Dispatch( + d_temp_storage, + temp_storage_bytes, + d_keys, + d_values, + num_items, + num_segments, + d_begin_offsets, + d_end_offsets, + begin_bit, + end_bit, + true, + stream); + } + +#ifndef _CCCL_DOXYGEN_INVOKED // Do not document + template + CUB_DETAIL_RUNTIME_DEBUG_SYNC_IS_NOT_SUPPORTED CUB_RUNTIME_FUNCTION static cudaError_t SortKeys( + void* d_temp_storage, + size_t& temp_storage_bytes, + DoubleBuffer& d_keys, + int num_items, + int num_segments, + BeginOffsetIteratorT d_begin_offsets, + EndOffsetIteratorT d_end_offsets, + int begin_bit, + int end_bit, + cudaStream_t stream, + bool debug_synchronous) + { + CUB_DETAIL_RUNTIME_DEBUG_SYNC_USAGE_LOG + + return SortKeys( + d_temp_storage, + temp_storage_bytes, + d_keys, + num_items, + num_segments, + d_begin_offsets, + d_end_offsets, + begin_bit, + end_bit, + stream); + } +#endif // _CCCL_DOXYGEN_INVOKED + + //! @rst + //! Sorts segments of keys into descending order. (``~2N`` auxiliary storage required). + //! + //! - The contents of the input data are not altered by the sorting operation + //! - When input a contiguous sequence of segments, a single sequence + //! ``segment_offsets`` (of length ``num_segments + 1``) can be aliased for both + //! the ``d_begin_offsets`` and ``d_end_offsets`` parameters (where the latter + //! is specified as ``segment_offsets + 1``). + //! - An optional bit subrange ``[begin_bit, end_bit)`` of differentiating key + //! bits can be specified. This can reduce overall sorting overhead and + //! yield a corresponding performance improvement. + //! - The range ``[d_keys_out, d_keys_out + num_items)`` shall not overlap + //! ``[d_keys_in, d_keys_in + num_items)``, + //! ``[d_begin_offsets, d_begin_offsets + num_segments)`` nor + //! ``[d_end_offsets, d_end_offsets + num_segments)`` in any way. + //! - @devicestorageNP For sorting using only ``O(P)`` temporary storage, see + //! the sorting interface using DoubleBuffer wrappers below. + //! - Segments are not required to be contiguous. For all index values ``i`` + //! outside the specified segments ``d_keys_in[i]``, ``d_keys_out[i]`` will not + //! be accessed nor modified. + //! - @devicestorage + //! + //! Snippet + //! +++++++++++++++++++++++++++++++++++++++++++++ + //! + //! The code snippet below illustrates the batched sorting of three segments + //! (with one zero-length segment) of ``int`` keys. + //! + //! .. code-block:: c++ + //! + //! #include + //! // or equivalently + //! + //! // Declare, allocate, and initialize device-accessible pointers + //! // for sorting data + //! int num_items; // e.g., 7 + //! int num_segments; // e.g., 3 + //! int *d_offsets; // e.g., [0, 3, 3, 7] + //! int *d_keys_in; // e.g., [8, 6, 7, 5, 3, 0, 9] + //! int *d_keys_out; // e.g., [-, -, -, -, -, -, -] + //! ... + //! + //! // Create a DoubleBuffer to wrap the pair of device pointers + //! cub::DoubleBuffer d_keys(d_key_buf, d_key_alt_buf); + //! + //! // Determine temporary device storage requirements + //! void *d_temp_storage = nullptr; + //! size_t temp_storage_bytes = 0; + //! cub::DeviceSegmentedRadixSort::SortKeysDescending( + //! d_temp_storage, temp_storage_bytes, d_keys_in, d_keys_out, + //! num_items, num_segments, d_offsets, d_offsets + 1); + //! + //! // Allocate temporary storage + //! cudaMalloc(&d_temp_storage, temp_storage_bytes); + //! + //! // Run sorting operation + //! cub::DeviceSegmentedRadixSort::SortKeysDescending( + //! d_temp_storage, temp_storage_bytes, d_keys_in, d_keys_out, + //! num_items, num_segments, d_offsets, d_offsets + 1); + //! + //! // d_keys_out <-- [8, 7, 6, 9, 5, 3, 0] + //! + //! @endrst + //! + //! @tparam KeyT + //! **[inferred]** Key type + //! + //! @tparam BeginOffsetIteratorT + //! **[inferred]** Random-access input iterator type for reading segment beginning offsets @iterator + //! + //! @tparam EndOffsetIteratorT + //! **[inferred]** Random-access input iterator type for reading segment ending offsets @iterator + //! + //! @param[in] d_temp_storage + //! Device-accessible allocation of temporary storage. When `nullptr`, the + //! required allocation size is written to `temp_storage_bytes` and no work is done. + //! + //! @param[in,out] temp_storage_bytes + //! Reference to size in bytes of `d_temp_storage` allocation + //! + //! @param[in] d_keys_in + //! Device-accessible pointer to the input data of key data to sort + //! + //! @param[out] d_keys_out + //! Device-accessible pointer to the sorted output sequence of key data + //! + //! @param[in] num_items + //! The total number of items within the segmented array, including items not + //! covered by segments. `num_items` should match the largest element within + //! the range `[d_end_offsets, d_end_offsets + num_segments)`. + //! + //! @param[in] num_segments + //! The number of segments that comprise the sorting data + //! + //! @param[in] d_begin_offsets + //! @rst + //! Random-access input iterator to the sequence of beginning offsets of + //! length ``num_segments``, such that ``d_begin_offsets[i]`` is the first + //! element of the *i*\ :sup:`th` data segment in ``d_keys_*`` and ``d_values_*`` + //! @endrst + //! + //! @param[in] d_end_offsets + //! @rst + //! Random-access input iterator to the sequence of ending offsets of length + //! ``num_segments``, such that ``d_end_offsets[i] - 1`` is the last element of + //! the *i*\ :sup:`th` data segment in ``d_keys_*`` and ``d_values_*``. + //! If ``d_end_offsets[i] - 1 <= d_begin_offsets[i]``, the *i*\ :sup:`th` is considered empty. + //! @endrst + //! + //! @param[in] begin_bit + //! **[optional]** The least-significant bit index (inclusive) needed for key comparison + //! + //! @param[in] end_bit + //! **[optional]** The most-significant bit index (exclusive) needed for key + //! comparison (e.g., sizeof(unsigned int) * 8) + //! + //! @param[in] stream + //! @rst + //! **[optional]** CUDA stream to launch kernels within. Default is stream\ :sub:`0`. + //! @endrst + template + CUB_RUNTIME_FUNCTION static cudaError_t SortKeysDescending( + void* d_temp_storage, + size_t& temp_storage_bytes, + const KeyT* d_keys_in, + KeyT* d_keys_out, + int num_items, + int num_segments, + BeginOffsetIteratorT d_begin_offsets, + EndOffsetIteratorT d_end_offsets, + int begin_bit = 0, + int end_bit = sizeof(KeyT) * 8, + cudaStream_t stream = 0) + { + CUB_DETAIL_NVTX_RANGE_SCOPE_IF(d_temp_storage, GetName()); + + // Signed integer type for global offsets + using OffsetT = int; + + DoubleBuffer d_keys(const_cast(d_keys_in), d_keys_out); + DoubleBuffer d_values; + + return DispatchSegmentedRadixSort::Dispatch( + d_temp_storage, + temp_storage_bytes, + d_keys, + d_values, + num_items, + num_segments, + d_begin_offsets, + d_end_offsets, + begin_bit, + end_bit, + false, + stream); + } + +#ifndef _CCCL_DOXYGEN_INVOKED // Do not document + template + CUB_DETAIL_RUNTIME_DEBUG_SYNC_IS_NOT_SUPPORTED CUB_RUNTIME_FUNCTION static cudaError_t SortKeysDescending( + void* d_temp_storage, + size_t& temp_storage_bytes, + const KeyT* d_keys_in, + KeyT* d_keys_out, + int num_items, + int num_segments, + BeginOffsetIteratorT d_begin_offsets, + EndOffsetIteratorT d_end_offsets, + int begin_bit, + int end_bit, + cudaStream_t stream, + bool debug_synchronous) + { + CUB_DETAIL_RUNTIME_DEBUG_SYNC_USAGE_LOG + + return SortKeysDescending( + d_temp_storage, + temp_storage_bytes, + d_keys_in, + d_keys_out, + num_items, + num_segments, + d_begin_offsets, + d_end_offsets, + begin_bit, + end_bit, + stream); + } +#endif // _CCCL_DOXYGEN_INVOKED + + //! @rst + //! Sorts segments of keys into descending order. (``~N`` auxiliary storage required). + //! + //! - The sorting operation is given a pair of key buffers managed by a + //! DoubleBuffer structure that indicates which of the two buffers is + //! "current" (and thus contains the input data to be sorted). + //! - The contents of both buffers may be altered by the sorting operation. + //! - Upon completion, the sorting operation will update the "current" + //! indicator within the DoubleBuffer wrapper to reference which of the two + //! buffers now contains the sorted output sequence (a function of the + //! number of key bits specified and the targeted device architecture). + //! - When input a contiguous sequence of segments, a single sequence + //! ``segment_offsets`` (of length ``num_segments + 1``) can be aliased + //! for both the ``d_begin_offsets`` and ``d_end_offsets`` parameters (where + //! the latter is specified as ``segment_offsets + 1``). + //! - An optional bit subrange ``[begin_bit, end_bit)`` of differentiating key + //! bits can be specified. This can reduce overall sorting overhead and + //! yield a corresponding performance improvement. + //! - Let ``cur = d_keys.Current()`` and ``alt = d_keys.Alternate()``. + //! The range ``[cur, cur + num_items)`` shall not overlap + //! ``[alt, alt + num_items)``. Both ranges shall not overlap + //! ``[d_begin_offsets, d_begin_offsets + num_segments)`` nor + //! ``[d_end_offsets, d_end_offsets + num_segments)`` in any way. + //! - Segments are not required to be contiguous. For all index values ``i`` + //! outside the specified segments ``d_keys.Current()[i]``, + //! ``d_keys[i].Alternate()[i]`` will not be accessed nor modified. + //! - @devicestorageP + //! - @devicestorage + //! + //! Snippet + //! +++++++++++++++++++++++++++++++++++++++++++++ + //! + //! The code snippet below illustrates the batched sorting of three segments + //! (with one zero-length segment) of `int` keys. + //! + //! .. code-block:: c++ + //! + //! #include + //! // or equivalently + //! + //! // Declare, allocate, and initialize device-accessible pointers + //! // for sorting data + //! int num_items; // e.g., 7 + //! int num_segments; // e.g., 3 + //! int *d_offsets; // e.g., [0, 3, 3, 7] + //! int *d_key_buf; // e.g., [8, 6, 7, 5, 3, 0, 9] + //! int *d_key_alt_buf; // e.g., [-, -, -, -, -, -, -] + //! ... + //! + //! // Create a DoubleBuffer to wrap the pair of device pointers + //! cub::DoubleBuffer d_keys(d_key_buf, d_key_alt_buf); + //! + //! // Determine temporary device storage requirements + //! void *d_temp_storage = nullptr; + //! size_t temp_storage_bytes = 0; + //! cub::DeviceSegmentedRadixSort::SortKeysDescending( + //! d_temp_storage, temp_storage_bytes, d_keys, + //! num_items, num_segments, d_offsets, d_offsets + 1); + //! + //! // Allocate temporary storage + //! cudaMalloc(&d_temp_storage, temp_storage_bytes); + //! + //! // Run sorting operation + //! cub::DeviceSegmentedRadixSort::SortKeysDescending( + //! d_temp_storage, temp_storage_bytes, d_keys, + //! num_items, num_segments, d_offsets, d_offsets + 1); + //! + //! // d_keys.Current() <-- [8, 7, 6, 9, 5, 3, 0] + //! + //! @endrst + //! + //! @tparam KeyT + //! **[inferred]** Key type + //! + //! @tparam BeginOffsetIteratorT + //! **[inferred]** Random-access input iterator type for reading segment + //! beginning offsets @iterator + //! + //! @tparam EndOffsetIteratorT + //! **[inferred]** Random-access input iterator type for reading segment + //! ending offsets @iterator + //! + //! @param[in] d_temp_storage + //! Device-accessible allocation of temporary storage. When `nullptr`, the + //! required allocation size is written to `temp_storage_bytes` and no work is done. + //! + //! @param[in,out] temp_storage_bytes + //! Reference to size in bytes of `d_temp_storage` allocation + //! + //! @param[in,out] d_keys + //! Reference to the double-buffer of keys whose "current" device-accessible + //! buffer contains the unsorted input keys and, upon return, is updated to + //! point to the sorted output keys + //! + //! @param[in] num_items + //! The total number of items within the segmented array, including items not + //! covered by segments. `num_items` should match the largest element within + //! the range `[d_end_offsets, d_end_offsets + num_segments)`. + //! + //! @param[in] num_segments + //! The number of segments that comprise the sorting data + //! + //! @param[in] d_begin_offsets + //! @rst + //! Random-access input iterator to the sequence of beginning offsets of + //! length ``num_segments``, such that ``d_begin_offsets[i]`` is the first + //! element of the *i*\ :sup:`th` data segment in ``d_keys_*`` and ``d_values_*`` + //! @endrst + //! + //! @param[in] d_end_offsets + //! @rst + //! Random-access input iterator to the sequence of ending offsets of length + //! ``num_segments``, such that ``d_end_offsets[i] - 1`` is the last element of + //! the *i*\ :sup:`th` data segment in ``d_keys_*`` and ``d_values_*``. + //! If ``d_end_offsets[i] - 1 <= d_begin_offsets[i]``, the *i*\ :sup:`th` is considered empty. + //! @endrst + //! + //! @param[in] begin_bit + //! **[optional]** The least-significant bit index (inclusive) needed for key comparison + //! + //! @param[in] end_bit + //! **[optional]** The most-significant bit index (exclusive) needed for key + //! comparison (e.g., `sizeof(unsigned int) * 8`) + //! + //! @param[in] stream + //! @rst + //! **[optional]** CUDA stream to launch kernels within. Default is stream\ :sub:`0`. + //! @endrst + template + CUB_RUNTIME_FUNCTION static cudaError_t SortKeysDescending( + void* d_temp_storage, + size_t& temp_storage_bytes, + DoubleBuffer& d_keys, + int num_items, + int num_segments, + BeginOffsetIteratorT d_begin_offsets, + EndOffsetIteratorT d_end_offsets, + int begin_bit = 0, + int end_bit = sizeof(KeyT) * 8, + cudaStream_t stream = 0) + { + CUB_DETAIL_NVTX_RANGE_SCOPE_IF(d_temp_storage, GetName()); + + // Signed integer type for global offsets + using OffsetT = int; + + // Null value type + DoubleBuffer d_values; + + return DispatchSegmentedRadixSort::Dispatch( + d_temp_storage, + temp_storage_bytes, + d_keys, + d_values, + num_items, + num_segments, + d_begin_offsets, + d_end_offsets, + begin_bit, + end_bit, + true, + stream); + } + +#ifndef _CCCL_DOXYGEN_INVOKED // Do not document + template + CUB_DETAIL_RUNTIME_DEBUG_SYNC_IS_NOT_SUPPORTED CUB_RUNTIME_FUNCTION static cudaError_t SortKeysDescending( + void* d_temp_storage, + size_t& temp_storage_bytes, + DoubleBuffer& d_keys, + int num_items, + int num_segments, + BeginOffsetIteratorT d_begin_offsets, + EndOffsetIteratorT d_end_offsets, + int begin_bit, + int end_bit, + cudaStream_t stream, + bool debug_synchronous) + { + CUB_DETAIL_RUNTIME_DEBUG_SYNC_USAGE_LOG + + return SortKeysDescending( + d_temp_storage, + temp_storage_bytes, + d_keys, + num_items, + num_segments, + d_begin_offsets, + d_end_offsets, + begin_bit, + end_bit, + stream); + } +#endif // _CCCL_DOXYGEN_INVOKED + + //! @} end member group +}; + +CUB_NAMESPACE_END diff --git a/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/device/device_segmented_reduce.cuh b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/device/device_segmented_reduce.cuh new file mode 100644 index 0000000000000000000000000000000000000000..591930ad01cb0a2dae10c013968f61af399b8e87 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/device/device_segmented_reduce.cuh @@ -0,0 +1,1073 @@ +/****************************************************************************** + * Copyright (c) 2011, Duane Merrill. All rights reserved. + * Copyright (c) 2011-2022, NVIDIA CORPORATION. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the NVIDIA CORPORATION nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + ******************************************************************************/ + +//! @file +//! cub::DeviceSegmentedReduce provides device-wide, parallel operations for computing a batched reduction across +//! multiple sequences of data items residing within device-accessible memory. + +#pragma once + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header + +#include +#include +#include +#include +#include +#include + +#include + +#include + +CUB_NAMESPACE_BEGIN + +//! @rst +//! DeviceSegmentedReduce provides device-wide, parallel operations for +//! computing a reduction across multiple sequences of data items +//! residing within device-accessible memory. +//! +//! Overview +//! +++++++++++++++++++++++++++++++++++++++++++++ +//! +//! A `reduction `_ +//! (or *fold*) uses a binary combining operator to compute a single aggregate +//! from a sequence of input elements. +//! +//! Usage Considerations +//! +++++++++++++++++++++++++++++++++++++++++++++ +//! +//! @cdp_class{DeviceSegmentedReduce} +//! +//! @endrst +struct DeviceSegmentedReduce +{ +private: + template + CUB_RUNTIME_FUNCTION static cudaError_t segmented_reduce( + ::cuda::std::false_type, + void* d_temp_storage, + std::size_t& temp_storage_bytes, + InputIteratorT d_in, + OutputIteratorT d_out, + int num_segments, + BeginOffsetIteratorT d_begin_offsets, + EndOffsetIteratorT d_end_offsets, + ReductionOpT reduction_op, + InitT initial_value, + cudaStream_t stream); + + template + CUB_RUNTIME_FUNCTION static cudaError_t segmented_reduce( + ::cuda::std::true_type, + void* d_temp_storage, + std::size_t& temp_storage_bytes, + InputIteratorT d_in, + OutputIteratorT d_out, + int num_segments, + BeginOffsetIteratorT d_begin_offsets, + EndOffsetIteratorT d_end_offsets, + ReductionOpT reduction_op, + InitT initial_value, + cudaStream_t stream) + { + return DispatchSegmentedReduce< + InputIteratorT, + OutputIteratorT, + BeginOffsetIteratorT, + EndOffsetIteratorT, + OffsetT, + ReductionOpT, + Ts...>::Dispatch(d_temp_storage, + temp_storage_bytes, + d_in, + d_out, + num_segments, + d_begin_offsets, + d_end_offsets, + reduction_op, + initial_value, + stream); + } + +public: + //! @rst + //! Computes a device-wide segmented reduction using the specified + //! binary ``reduction_op`` functor. + //! + //! - Does not support binary reduction operators that are non-commutative. + //! - Provides "run-to-run" determinism for pseudo-associative reduction + //! (e.g., addition of floating point types) on the same GPU device. + //! However, results for pseudo-associative reduction may be inconsistent + //! from one device to a another device of a different compute-capability + //! because CUB can employ different tile-sizing for different architectures. + //! - When input a contiguous sequence of segments, a single sequence + //! ``segment_offsets`` (of length ``num_segments + 1``) can be aliased + //! for both the ``d_begin_offsets`` and ``d_end_offsets`` parameters (where + //! the latter is specified as ``segment_offsets + 1``). + //! - Let ``s`` be in ``[0, num_segments)``. The range + //! ``[d_out + d_begin_offsets[s], d_out + d_end_offsets[s])`` shall not + //! overlap ``[d_in + d_begin_offsets[s], d_in + d_end_offsets[s])``, + //! ``[d_begin_offsets, d_begin_offsets + num_segments)`` nor + //! ``[d_end_offsets, d_end_offsets + num_segments)``. + //! - @devicestorage + //! + //! Snippet + //! +++++++++++++++++++++++++++++++++++++++++++++ + //! + //! The code snippet below illustrates a custom min-reduction of a device vector of ``int`` data elements. + //! + //! .. literalinclude:: ../../../cub/test/catch2_test_device_segmented_reduce_api.cu + //! :language: c++ + //! :dedent: + //! :start-after: example-begin segmented-reduce-reduce + //! :end-before: example-end segmented-reduce-reduce + //! + //! @endrst + //! + //! @tparam InputIteratorT + //! **[inferred]** Random-access input iterator type for reading input items @iterator + //! + //! @tparam OutputIteratorT + //! **[inferred]** Output iterator type for recording the reduced aggregate @iterator + //! + //! @tparam BeginOffsetIteratorT + //! **[inferred]** Random-access input iterator type for reading segment beginning offsets @iterator + //! + //! @tparam EndOffsetIteratorT + //! **[inferred]** Random-access input iterator type for reading segment ending offsets @iterator + //! + //! @tparam ReductionOpT + //! **[inferred]** Binary reduction functor type having member `T operator()(const T &a, const T &b)` + //! + //! @tparam T + //! **[inferred]** Data element type that is convertible to the `value` type of `InputIteratorT` + //! + //! @param[in] d_temp_storage + //! Device-accessible allocation of temporary storage. When `nullptr`, the + //! required allocation size is written to `temp_storage_bytes` and no work is done. + //! + //! @param[in,out] temp_storage_bytes + //! Reference to size in bytes of `d_temp_storage` allocation + //! + //! @param[in] d_in + //! Pointer to the input sequence of data items + //! + //! @param[out] d_out + //! Pointer to the output aggregate + //! + //! @param[in] num_segments + //! The number of segments that comprise the sorting data + //! + //! @param[in] d_begin_offsets + //! @rst + //! Random-access input iterator to the sequence of beginning offsets of + //! length ``num_segments``, such that ``d_begin_offsets[i]`` is the first + //! element of the *i*\ :sup:`th` data segment in ``d_keys_*`` and ``d_values_*`` + //! @endrst + //! + //! @param[in] d_end_offsets + //! @rst + //! Random-access input iterator to the sequence of ending offsets of length + //! ``num_segments``, such that ``d_end_offsets[i] - 1`` is the last element of + //! the *i*\ :sup:`th` data segment in ``d_keys_*`` and ``d_values_*``. + //! If ``d_end_offsets[i] - 1 <= d_begin_offsets[i]``, the *i*\ :sup:`th` is considered empty. + //! @endrst + //! + //! @param[in] reduction_op + //! Binary reduction functor + //! + //! @param[in] initial_value + //! Initial value of the reduction for each segment + //! + //! @param[in] stream + //! @rst + //! **[optional]** CUDA stream to launch kernels within. Default is stream\ :sub:`0`. + //! @endrst + template + CUB_RUNTIME_FUNCTION static cudaError_t Reduce( + void* d_temp_storage, + size_t& temp_storage_bytes, + InputIteratorT d_in, + OutputIteratorT d_out, + int num_segments, + BeginOffsetIteratorT d_begin_offsets, + EndOffsetIteratorT d_end_offsets, + ReductionOpT reduction_op, + T initial_value, + cudaStream_t stream = 0) + { + CUB_DETAIL_NVTX_RANGE_SCOPE_IF(d_temp_storage, "cub::DeviceSegmentedReduce::Reduce"); + + // Integer type for global offsets + using OffsetT = detail::common_iterator_value_t; + using integral_offset_check = ::cuda::std::is_integral; + + static_assert(integral_offset_check::value, "Offset iterator value type should be integral."); + + return segmented_reduce( + integral_offset_check{}, + d_temp_storage, + temp_storage_bytes, + d_in, + d_out, + num_segments, + d_begin_offsets, + d_end_offsets, + reduction_op, + initial_value, // zero-initialize + stream); + } + +#ifndef _CCCL_DOXYGEN_INVOKED // Do not document + template + CUB_DETAIL_RUNTIME_DEBUG_SYNC_IS_NOT_SUPPORTED CUB_RUNTIME_FUNCTION static cudaError_t Reduce( + void* d_temp_storage, + size_t& temp_storage_bytes, + InputIteratorT d_in, + OutputIteratorT d_out, + int num_segments, + BeginOffsetIteratorT d_begin_offsets, + EndOffsetIteratorT d_end_offsets, + ReductionOpT reduction_op, + T initial_value, + cudaStream_t stream, + bool debug_synchronous) + { + CUB_DETAIL_RUNTIME_DEBUG_SYNC_USAGE_LOG + + return Reduce( + d_temp_storage, + temp_storage_bytes, + d_in, + d_out, + num_segments, + d_begin_offsets, + d_end_offsets, + reduction_op, + initial_value, + stream); + } +#endif // _CCCL_DOXYGEN_INVOKED + + //! @rst + //! Computes a device-wide segmented sum using the addition (``+``) operator. + //! + //! - Uses ``0`` as the initial value of the reduction for each segment. + //! - When input a contiguous sequence of segments, a single sequence + //! ``segment_offsets`` (of length ``num_segments + 1``) can be aliased + //! for both the ``d_begin_offsets`` and ``d_end_offsets`` parameters (where + //! the latter is specified as ``segment_offsets + 1``). + //! - Does not support ``+`` operators that are non-commutative. + //! - Let ``s`` be in ``[0, num_segments)``. The range + //! ``[d_out + d_begin_offsets[s], d_out + d_end_offsets[s])`` shall not + //! overlap ``[d_in + d_begin_offsets[s], d_in + d_end_offsets[s])``, + //! ``[d_begin_offsets, d_begin_offsets + num_segments)`` nor + //! ``[d_end_offsets, d_end_offsets + num_segments)``. + //! - @devicestorage + //! + //! Snippet + //! +++++++++++++++++++++++++++++++++++++++++++++ + //! + //! The code snippet below illustrates the sum reduction of a device vector of ``int`` data elements. + //! + //! .. literalinclude:: ../../../cub/test/catch2_test_device_segmented_reduce_api.cu + //! :language: c++ + //! :dedent: + //! :start-after: example-begin segmented-reduce-sum + //! :end-before: example-end segmented-reduce-sum + //! + //! @endrst + //! + //! @tparam InputIteratorT + //! **[inferred]** Random-access input iterator type for reading input items @iterator + //! + //! @tparam OutputIteratorT + //! **[inferred]** Output iterator type for recording the reduced aggregate @iterator + //! + //! @tparam BeginOffsetIteratorT + //! **[inferred]** Random-access input iterator type for reading segment beginning offsets @iterator + //! + //! @tparam EndOffsetIteratorT + //! **[inferred]** Random-access input iterator type for reading segment ending offsets @iterator + //! + //! @param[in] d_temp_storage + //! Device-accessible allocation of temporary storage. When `nullptr`, the + //! required allocation size is written to `temp_storage_bytes` and no work is done. + //! + //! @param[in,out] temp_storage_bytes + //! Reference to size in bytes of `d_temp_storage` allocation + //! + //! @param[in] d_in + //! Pointer to the input sequence of data items + //! + //! @param[out] d_out + //! Pointer to the output aggregate + //! + //! @param[in] num_segments + //! The number of segments that comprise the sorting data + //! + //! @param[in] d_begin_offsets + //! @rst + //! Random-access input iterator to the sequence of beginning offsets of + //! length ``num_segments`, such that ``d_begin_offsets[i]`` is the first + //! element of the *i*\ :sup:`th` data segment in ``d_keys_*`` and + //! ``d_values_*`` + //! @endrst + //! + //! @param[in] d_end_offsets + //! @rst + //! Random-access input iterator to the sequence of ending offsets of length + //! ``num_segments``, such that ``d_end_offsets[i] - 1`` is the last element of + //! the *i*\ :sup:`th` data segment in ``d_keys_*`` and ``d_values_*``. + //! If ``d_end_offsets[i] - 1 <= d_begin_offsets[i]``, the *i*\ :sup:`th` is considered empty. + //! @endrst + //! + //! @param[in] stream + //! @rst + //! **[optional]** CUDA stream to launch kernels within. Default is stream\ :sub:`0`. + //! @endrst + template + CUB_RUNTIME_FUNCTION static cudaError_t + Sum(void* d_temp_storage, + size_t& temp_storage_bytes, + InputIteratorT d_in, + OutputIteratorT d_out, + int num_segments, + BeginOffsetIteratorT d_begin_offsets, + EndOffsetIteratorT d_end_offsets, + cudaStream_t stream = 0) + { + CUB_DETAIL_NVTX_RANGE_SCOPE_IF(d_temp_storage, "cub::DeviceSegmentedReduce::Sum"); + + // Integer type for global offsets + using OffsetT = detail::common_iterator_value_t; + + // The output value type + using OutputT = cub::detail::non_void_value_t>; + using integral_offset_check = ::cuda::std::is_integral; + + static_assert(integral_offset_check::value, "Offset iterator value type should be integral."); + + return segmented_reduce>( + integral_offset_check{}, + d_temp_storage, + temp_storage_bytes, + d_in, + d_out, + num_segments, + d_begin_offsets, + d_end_offsets, + ::cuda::std::plus<>{}, + OutputT(), // zero-initialize + stream); + } + +#ifndef _CCCL_DOXYGEN_INVOKED // Do not document + template + CUB_DETAIL_RUNTIME_DEBUG_SYNC_IS_NOT_SUPPORTED CUB_RUNTIME_FUNCTION static cudaError_t + Sum(void* d_temp_storage, + size_t& temp_storage_bytes, + InputIteratorT d_in, + OutputIteratorT d_out, + int num_segments, + BeginOffsetIteratorT d_begin_offsets, + EndOffsetIteratorT d_end_offsets, + cudaStream_t stream, + bool debug_synchronous) + { + CUB_DETAIL_RUNTIME_DEBUG_SYNC_USAGE_LOG + + return Sum( + d_temp_storage, temp_storage_bytes, d_in, d_out, num_segments, d_begin_offsets, d_end_offsets, stream); + } +#endif // _CCCL_DOXYGEN_INVOKED + + //! @rst + //! Computes a device-wide segmented minimum using the less-than (``<``) operator. + //! + //! - Uses ``std::numeric_limits::max()`` as the initial value of the reduction for each segment. + //! - When input a contiguous sequence of segments, a single sequence + //! ``segment_offsets`` (of length ``num_segments + 1``) can be aliased for both + //! the ``d_begin_offsets`` and ``d_end_offsets`` parameters (where the latter is + //! specified as ``segment_offsets + 1``). + //! - Does not support ``<`` operators that are non-commutative. + //! - Let ``s`` be in ``[0, num_segments)``. The range + //! ``[d_out + d_begin_offsets[s], d_out + d_end_offsets[s])`` shall not + //! overlap ``[d_in + d_begin_offsets[s], d_in + d_end_offsets[s])``, + //! ``[d_begin_offsets, d_begin_offsets + num_segments)`` nor + //! ``[d_end_offsets, d_end_offsets + num_segments)``. + //! - @devicestorage + //! + //! Snippet + //! +++++++++++++++++++++++++++++++++++++++++++++ + //! + //! The code snippet below illustrates the min-reduction of a device vector of ``int`` data elements. + //! + //! .. literalinclude:: ../../../cub/test/catch2_test_device_segmented_reduce_api.cu + //! :language: c++ + //! :dedent: + //! :start-after: example-begin segmented-reduce-custommin + //! :end-before: example-end segmented-reduce-custommin + //! + //! .. literalinclude:: ../../../cub/test/catch2_test_device_segmented_reduce_api.cu + //! :language: c++ + //! :dedent: + //! :start-after: example-begin segmented-reduce-min + //! :end-before: example-end segmented-reduce-min + //! + //! @endrst + //! + //! @tparam InputIteratorT + //! **[inferred]** Random-access input iterator type for reading input items @iterator + //! + //! @tparam OutputIteratorT + //! **[inferred]** Output iterator type for recording the reduced aggregate @iterator + //! + //! @tparam BeginOffsetIteratorT + //! **[inferred]** Random-access input iterator type for reading segment beginning offsets @iterator + //! + //! @tparam EndOffsetIteratorT + //! **[inferred]** Random-access input iterator type for reading segment ending offsets @iterator + //! + //! @param[in] d_temp_storage + //! Device-accessible allocation of temporary storage. When `nullptr`, the + //! required allocation size is written to `temp_storage_bytes` and no work is done. + //! + //! @param[in,out] temp_storage_bytes + //! Reference to size in bytes of `d_temp_storage` allocation + //! + //! @param[in] d_in + //! Pointer to the input sequence of data items + //! + //! @param[out] d_out + //! Pointer to the output aggregate + //! + //! @param[in] num_segments + //! The number of segments that comprise the sorting data + //! + //! @param[in] d_begin_offsets + //! @rst + //! Random-access input iterator to the sequence of beginning offsets of + //! length ``num_segments``, such that ``d_begin_offsets[i]`` is the first + //! element of the *i*\ :sup:`th` data segment in ``d_keys_*`` and ``d_values_*`` + //! @endrst + //! + //! @param[in] d_end_offsets + //! @rst + //! Random-access input iterator to the sequence of ending offsets of length + //! ``num_segments``, such that ``d_end_offsets[i] - 1`` is the last element of + //! the *i*\ :sup:`th` data segment in ``d_keys_*`` and ``d_values_*``. + //! If ``d_end_offsets[i] - 1 <= d_begin_offsets[i]``, the *i*\ :sup:`th` is considered empty. + //! @endrst + //! + //! @param[in] stream + //! @rst + //! **[optional]** CUDA stream to launch kernels within. Default is stream\ :sub:`0`. + //! @endrst + template + CUB_RUNTIME_FUNCTION static cudaError_t + Min(void* d_temp_storage, + size_t& temp_storage_bytes, + InputIteratorT d_in, + OutputIteratorT d_out, + int num_segments, + BeginOffsetIteratorT d_begin_offsets, + EndOffsetIteratorT d_end_offsets, + cudaStream_t stream = 0) + { + CUB_DETAIL_NVTX_RANGE_SCOPE_IF(d_temp_storage, "cub::DeviceSegmentedReduce::Min"); + + // Integer type for global offsets + using OffsetT = detail::common_iterator_value_t; + + // The input value type + using InputT = cub::detail::value_t; + using integral_offset_check = ::cuda::std::is_integral; + + static_assert(integral_offset_check::value, "Offset iterator value type should be integral."); + + return segmented_reduce>( + integral_offset_check{}, + d_temp_storage, + temp_storage_bytes, + d_in, + d_out, + num_segments, + d_begin_offsets, + d_end_offsets, + ::cuda::minimum<>{}, + Traits::Max(), // replace with + // std::numeric_limits::max() + // when C++11 support is + // more prevalent + stream); + } + +#ifndef _CCCL_DOXYGEN_INVOKED // Do not document + template + CUB_DETAIL_RUNTIME_DEBUG_SYNC_IS_NOT_SUPPORTED CUB_RUNTIME_FUNCTION static cudaError_t + Min(void* d_temp_storage, + size_t& temp_storage_bytes, + InputIteratorT d_in, + OutputIteratorT d_out, + int num_segments, + BeginOffsetIteratorT d_begin_offsets, + EndOffsetIteratorT d_end_offsets, + cudaStream_t stream, + bool debug_synchronous) + { + CUB_DETAIL_RUNTIME_DEBUG_SYNC_USAGE_LOG + + return Min( + d_temp_storage, temp_storage_bytes, d_in, d_out, num_segments, d_begin_offsets, d_end_offsets, stream); + } +#endif // _CCCL_DOXYGEN_INVOKED + + //! @rst + //! Finds the first device-wide minimum in each segment using the + //! less-than (``<``) operator, also returning the in-segment index of that item. + //! + //! - The output value type of ``d_out`` is ``cub::KeyValuePair`` + //! (assuming the value type of ``d_in`` is ``T``) + //! + //! - The minimum of the *i*\ :sup:`th` segment is written to + //! ``d_out[i].value`` and its offset in that segment is written to ``d_out[i].key``. + //! - The ``{1, std::numeric_limits::max()}`` tuple is produced for zero-length inputs + //! + //! - When input a contiguous sequence of segments, a single sequence + //! ``segment_offsets`` (of length ``num_segments + 1``) can be aliased for both + //! the ``d_begin_offsets`` and ``d_end_offsets`` parameters (where the latter + //! is specified as ``segment_offsets + 1``). + //! - Does not support ``<`` operators that are non-commutative. + //! - Let ``s`` be in ``[0, num_segments)``. The range + //! ``[d_out + d_begin_offsets[s], d_out + d_end_offsets[s])`` shall not + //! overlap ``[d_in + d_begin_offsets[s], d_in + d_end_offsets[s])``, + //! ``[d_begin_offsets, d_begin_offsets + num_segments)`` nor + //! ``[d_end_offsets, d_end_offsets + num_segments)``. + //! - @devicestorage + //! + //! Snippet + //! +++++++++++++++++++++++++++++++++++++++++++++ + //! + //! The code snippet below illustrates the argmin-reduction of a device vector of ``int`` data elements. + //! + //! .. literalinclude:: ../../../cub/test/catch2_test_device_segmented_reduce_api.cu + //! :language: c++ + //! :dedent: + //! :start-after: example-begin segmented-reduce-argmin + //! :end-before: example-end segmented-reduce-argmin + //! + //! @endrst + //! + //! @tparam InputIteratorT + //! **[inferred]** Random-access input iterator type for reading input items (of some type `T`) @iterator + //! + //! @tparam OutputIteratorT + //! **[inferred]** Output iterator type for recording the reduced aggregate + //! (having value type `KeyValuePair`) @iterator + //! + //! @tparam BeginOffsetIteratorT + //! **[inferred]** Random-access input iterator type for reading segment + //! beginning offsets @iterator + //! + //! @tparam EndOffsetIteratorT + //! **[inferred]** Random-access input iterator type for reading segment + //! ending offsets @iterator + //! + //! @param[in] d_temp_storage + //! Device-accessible allocation of temporary storage. When `nullptr`, the + //! required allocation size is written to `temp_storage_bytes` and no work is done. + //! + //! @param[in,out] temp_storage_bytes + //! Reference to size in bytes of `d_temp_storage` allocation + //! + //! @param[in] d_in + //! Pointer to the input sequence of data items + //! + //! @param[out] d_out + //! Pointer to the output aggregate + //! + //! @param[in] num_segments + //! The number of segments that comprise the sorting data + //! + //! @param[in] d_begin_offsets + //! @rst + //! Random-access input iterator to the sequence of beginning offsets of + //! length ``num_segments``, such that ``d_begin_offsets[i]`` is the first + //! element of the *i*\ :sup:`th` data segment in ``d_keys_*`` and ``d_values_*`` + //! @endrst + //! + //! @param[in] d_end_offsets + //! @rst + //! Random-access input iterator to the sequence of ending offsets of length + //! ``num_segments``, such that ``d_end_offsets[i] - 1`` is the last element of + //! the *i*\ :sup:`th` data segment in ``d_keys_*`` and ``d_values_*``. + //! If ``d_end_offsets[i] - 1 <= d_begin_offsets[i]``, the *i*\ :sup:`th` is considered empty. + //! @endrst + //! + //! @param[in] stream + //! @rst + //! **[optional]** CUDA stream to launch kernels within. Default is stream\ :sub:`0`. + //! @endrst + template + CUB_RUNTIME_FUNCTION static cudaError_t ArgMin( + void* d_temp_storage, + size_t& temp_storage_bytes, + InputIteratorT d_in, + OutputIteratorT d_out, + int num_segments, + BeginOffsetIteratorT d_begin_offsets, + EndOffsetIteratorT d_end_offsets, + cudaStream_t stream = 0) + { + CUB_DETAIL_NVTX_RANGE_SCOPE_IF(d_temp_storage, "cub::DeviceSegmentedReduce::ArgMin"); + + // Integer type for global offsets + // Using common iterator value type is a breaking change, see: + // https://github.com/NVIDIA/cccl/pull/414#discussion_r1330632615 + using OffsetT = int; // detail::common_iterator_value_t; + + // The input type + using InputValueT = cub::detail::value_t; + + // The output tuple type + using OutputTupleT = cub::detail::non_void_value_t>; + + // The output value type + using OutputValueT = typename OutputTupleT::Value; + + using AccumT = OutputTupleT; + + using InitT = detail::reduce::empty_problem_init_t; + + // Wrapped input iterator to produce index-value tuples + using ArgIndexInputIteratorT = ArgIndexInputIterator; + + ArgIndexInputIteratorT d_indexed_in(d_in); + + // Initial value + // TODO Address https://github.com/NVIDIA/cub/issues/651 + InitT initial_value{AccumT(1, Traits::Max())}; + + using integral_offset_check = ::cuda::std::is_integral; + static_assert(integral_offset_check::value, "Offset iterator value type should be integral."); + + return segmented_reduce( + integral_offset_check{}, + d_temp_storage, + temp_storage_bytes, + d_indexed_in, + d_out, + num_segments, + d_begin_offsets, + d_end_offsets, + cub::ArgMin(), + initial_value, + stream); + } + +#ifndef _CCCL_DOXYGEN_INVOKED // Do not document + template + CUB_DETAIL_RUNTIME_DEBUG_SYNC_IS_NOT_SUPPORTED CUB_RUNTIME_FUNCTION static cudaError_t ArgMin( + void* d_temp_storage, + size_t& temp_storage_bytes, + InputIteratorT d_in, + OutputIteratorT d_out, + int num_segments, + BeginOffsetIteratorT d_begin_offsets, + EndOffsetIteratorT d_end_offsets, + cudaStream_t stream, + bool debug_synchronous) + { + CUB_DETAIL_RUNTIME_DEBUG_SYNC_USAGE_LOG + + return ArgMin( + d_temp_storage, temp_storage_bytes, d_in, d_out, num_segments, d_begin_offsets, d_end_offsets, stream); + } +#endif // _CCCL_DOXYGEN_INVOKED + + //! @rst + //! Computes a device-wide segmented maximum using the greater-than (``>``) operator. + //! + //! - Uses ``std::numeric_limits::lowest()`` as the initial value of the reduction. + //! - When input a contiguous sequence of segments, a single sequence + //! ``segment_offsets`` (of length ``num_segments + 1``) can be aliased + //! for both the ``d_begin_offsets`` and ``d_end_offsets`` parameters (where + //! the latter is specified as ``segment_offsets + 1``). + //! - Does not support ``>`` operators that are non-commutative. + //! - Let ``s`` be in ``[0, num_segments)``. The range + //! ``[d_out + d_begin_offsets[s], d_out + d_end_offsets[s])`` shall not + //! overlap ``[d_in + d_begin_offsets[s], d_in + d_end_offsets[s])``, + //! ``[d_begin_offsets, d_begin_offsets + num_segments)`` nor + //! ``[d_end_offsets, d_end_offsets + num_segments)``. + //! - @devicestorage + //! + //! Snippet + //! +++++++++++++++++++++++++++++++++++++++++++++ + //! + //! The code snippet below illustrates the max-reduction of a device vector of ``int`` data elements. + //! + //! .. literalinclude:: ../../../cub/test/catch2_test_device_segmented_reduce_api.cu + //! :language: c++ + //! :dedent: + //! :start-after: example-begin segmented-reduce-max + //! :end-before: example-end segmented-reduce-max + //! + //! @endrst + //! + //! @tparam InputIteratorT + //! **[inferred]** Random-access input iterator type for reading input items @iterator + //! + //! @tparam OutputIteratorT + //! **[inferred]** Output iterator type for recording the reduced aggregate @iterator + //! + //! @tparam BeginOffsetIteratorT + //! **[inferred]** Random-access input iterator type for reading segment beginning offsets @iterator + //! + //! @tparam EndOffsetIteratorT + //! **[inferred]** Random-access input iterator type for reading segment ending offsets @iterator + //! + //! @param[in] d_temp_storage + //! Device-accessible allocation of temporary storage. When `nullptr`, the + //! required allocation size is written to `temp_storage_bytes` and no work is done. + //! + //! @param[in,out] temp_storage_bytes + //! Reference to size in bytes of `d_temp_storage` allocation + //! + //! @param[in] d_in + //! Pointer to the input sequence of data items + //! + //! @param[out] d_out + //! Pointer to the output aggregate + //! + //! @param[in] num_segments + //! The number of segments that comprise the sorting data + //! + //! @param[in] d_begin_offsets + //! @rst + //! Random-access input iterator to the sequence of beginning offsets of + //! length ``num_segments``, such that ``d_begin_offsets[i]`` is the first + //! element of the *i*\ :sup:`th` data segment in ``d_keys_*`` and ``d_values_*`` + //! @endrst + //! + //! @param[in] d_end_offsets + //! @rst + //! Random-access input iterator to the sequence of ending offsets of length + //! ``num_segments``, such that ``d_end_offsets[i] - 1`` is the last element of + //! the *i*\ :sup:`th` data segment in ``d_keys_*`` and ``d_values_*``. + //! If ``d_end_offsets[i] - 1 <= d_begin_offsets[i]``, the *i*\ :sup:`th` is considered empty. + //! @endrst + //! + //! @param[in] stream + //! @rst + //! **[optional]** CUDA stream to launch kernels within. Default is stream\ :sub:`0`. + //! @endrst + template + CUB_RUNTIME_FUNCTION static cudaError_t + Max(void* d_temp_storage, + size_t& temp_storage_bytes, + InputIteratorT d_in, + OutputIteratorT d_out, + int num_segments, + BeginOffsetIteratorT d_begin_offsets, + EndOffsetIteratorT d_end_offsets, + cudaStream_t stream = 0) + { + CUB_DETAIL_NVTX_RANGE_SCOPE_IF(d_temp_storage, "cub::DeviceSegmentedReduce::Max"); + + // Integer type for global offsets + using OffsetT = detail::common_iterator_value_t; + + // The input value type + using InputT = cub::detail::value_t; + + using integral_offset_check = ::cuda::std::is_integral; + static_assert(integral_offset_check::value, "Offset iterator value type should be integral."); + + return segmented_reduce( + integral_offset_check{}, + d_temp_storage, + temp_storage_bytes, + d_in, + d_out, + num_segments, + d_begin_offsets, + d_end_offsets, + ::cuda::maximum<>{}, + Traits::Lowest(), // replace with + // std::numeric_limits::lowest() + // when C++11 support is + // more prevalent + stream); + } + +#ifndef _CCCL_DOXYGEN_INVOKED // Do not document + template + CUB_DETAIL_RUNTIME_DEBUG_SYNC_IS_NOT_SUPPORTED CUB_RUNTIME_FUNCTION static cudaError_t + Max(void* d_temp_storage, + size_t& temp_storage_bytes, + InputIteratorT d_in, + OutputIteratorT d_out, + int num_segments, + BeginOffsetIteratorT d_begin_offsets, + EndOffsetIteratorT d_end_offsets, + cudaStream_t stream, + bool debug_synchronous) + { + CUB_DETAIL_RUNTIME_DEBUG_SYNC_USAGE_LOG + + return Max( + d_temp_storage, temp_storage_bytes, d_in, d_out, num_segments, d_begin_offsets, d_end_offsets, stream); + } +#endif // _CCCL_DOXYGEN_INVOKED + + //! @rst + //! Finds the first device-wide maximum in each segment using the + //! greater-than (``>``) operator, also returning the in-segment index of that item + //! + //! - The output value type of ``d_out`` is ``cub::KeyValuePair`` + //! (assuming the value type of ``d_in`` is ``T``) + //! + //! - The maximum of the *i*\ :sup:`th` segment is written to + //! ``d_out[i].value`` and its offset in that segment is written to ``d_out[i].key``. + //! - The ``{1, std::numeric_limits::lowest()}`` tuple is produced for zero-length inputs + //! + //! - When input a contiguous sequence of segments, a single sequence + //! ``segment_offsets`` (of length ``num_segments + 1``) can be aliased + //! for both the ``d_begin_offsets`` and ``d_end_offsets`` parameters (where + //! the latter is specified as ``segment_offsets + 1``). + //! - Does not support ``>`` operators that are non-commutative. + //! - Let ``s`` be in ``[0, num_segments)``. The range + //! ``[d_out + d_begin_offsets[s], d_out + d_end_offsets[s])`` shall not + //! overlap ``[d_in + d_begin_offsets[s], d_in + d_end_offsets[s])``, + //! ``[d_begin_offsets, d_begin_offsets + num_segments)`` nor + //! ``[d_end_offsets, d_end_offsets + num_segments)``. + //! - @devicestorage + //! + //! Snippet + //! +++++++++++++++++++++++++++++++++++++++++++++ + //! + //! The code snippet below illustrates the argmax-reduction of a device vector + //! of `int` data elements. + //! + //! .. literalinclude:: ../../../cub/test/catch2_test_device_segmented_reduce_api.cu + //! :language: c++ + //! :dedent: + //! :start-after: example-begin segmented-reduce-argmax + //! :end-before: example-end segmented-reduce-argmax + //! + //! @endrst + //! + //! @tparam InputIteratorT + //! **[inferred]** Random-access input iterator type for reading input items + //! (of some type `T`) @iterator + //! + //! @tparam OutputIteratorT + //! **[inferred]** Output iterator type for recording the reduced aggregate + //! (having value type `KeyValuePair`) @iterator + //! + //! @tparam BeginOffsetIteratorT + //! **[inferred]** Random-access input iterator type for reading segment + //! beginning offsets @iterator + //! + //! @tparam EndOffsetIteratorT + //! **[inferred]** Random-access input iterator type for reading segment + //! ending offsets @iterator + //! + //! @param[in] d_temp_storage + //! Device-accessible allocation of temporary storage. When `nullptr`, the + //! required allocation size is written to `temp_storage_bytes` and no work + //! is done. + //! + //! @param[in,out] temp_storage_bytes + //! Reference to size in bytes of `d_temp_storage` allocation + //! + //! @param[in] d_in + //! Pointer to the input sequence of data items + //! + //! @param[out] d_out + //! Pointer to the output aggregate + //! + //! @param[in] num_segments + //! The number of segments that comprise the sorting data + //! + //! @param[in] d_begin_offsets + //! @rst + //! Random-access input iterator to the sequence of beginning offsets of + //! length `num_segments`, such that ``d_begin_offsets[i]`` is the first + //! element of the *i*\ :sup:`th` data segment in ``d_keys_*`` and ``d_values_*`` + //! @endrst + //! + //! @param[in] d_end_offsets + //! @rst + //! Random-access input iterator to the sequence of ending offsets of length + //! ``num_segments``, such that ``d_end_offsets[i] - 1`` is the last element of + //! the *i*\ :sup:`th` data segment in ``d_keys_*`` and ``d_values_*``. + //! If ``d_end_offsets[i] - 1 <= d_begin_offsets[i]``, the *i*\ :sup:`th` is considered empty. + //! @endrst + //! + //! @param[in] stream + //! @rst + //! **[optional]** CUDA stream to launch kernels within. Default is stream\ :sub:`0`. + //! @endrst + template + CUB_RUNTIME_FUNCTION static cudaError_t ArgMax( + void* d_temp_storage, + size_t& temp_storage_bytes, + InputIteratorT d_in, + OutputIteratorT d_out, + int num_segments, + BeginOffsetIteratorT d_begin_offsets, + EndOffsetIteratorT d_end_offsets, + cudaStream_t stream = 0) + { + CUB_DETAIL_NVTX_RANGE_SCOPE_IF(d_temp_storage, "cub::DeviceSegmentedReduce::ArgMax"); + + // Integer type for global offsets + // Using common iterator value type is a breaking change, see: + // https://github.com/NVIDIA/cccl/pull/414#discussion_r1330632615 + using OffsetT = int; // detail::common_iterator_value_t; + + // The input type + using InputValueT = cub::detail::value_t; + + // The output tuple type + using OutputTupleT = cub::detail::non_void_value_t>; + + using AccumT = OutputTupleT; + + using InitT = detail::reduce::empty_problem_init_t; + + // The output value type + using OutputValueT = typename OutputTupleT::Value; + + // Wrapped input iterator to produce index-value tuples + using ArgIndexInputIteratorT = ArgIndexInputIterator; + + ArgIndexInputIteratorT d_indexed_in(d_in); + + // Initial value + // TODO Address https://github.com/NVIDIA/cub/issues/651 + InitT initial_value{AccumT(1, Traits::Lowest())}; + + using integral_offset_check = ::cuda::std::is_integral; + static_assert(integral_offset_check::value, "Offset iterator value type should be integral."); + + return segmented_reduce( + integral_offset_check{}, + d_temp_storage, + temp_storage_bytes, + d_indexed_in, + d_out, + num_segments, + d_begin_offsets, + d_end_offsets, + cub::ArgMax(), + initial_value, + stream); + } + +#ifndef _CCCL_DOXYGEN_INVOKED // Do not document + template + CUB_DETAIL_RUNTIME_DEBUG_SYNC_IS_NOT_SUPPORTED CUB_RUNTIME_FUNCTION static cudaError_t ArgMax( + void* d_temp_storage, + size_t& temp_storage_bytes, + InputIteratorT d_in, + OutputIteratorT d_out, + int num_segments, + BeginOffsetIteratorT d_begin_offsets, + EndOffsetIteratorT d_end_offsets, + cudaStream_t stream, + bool debug_synchronous) + { + CUB_DETAIL_RUNTIME_DEBUG_SYNC_USAGE_LOG + + return ArgMax( + d_temp_storage, temp_storage_bytes, d_in, d_out, num_segments, d_begin_offsets, d_end_offsets, stream); + } +#endif // _CCCL_DOXYGEN_INVOKED +}; + +CUB_NAMESPACE_END diff --git a/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/iterator/arg_index_input_iterator.cuh b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/iterator/arg_index_input_iterator.cuh new file mode 100644 index 0000000000000000000000000000000000000000..fdd3f1842035f406f906eab14593084335410e38 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/iterator/arg_index_input_iterator.cuh @@ -0,0 +1,256 @@ +/****************************************************************************** + * Copyright (c) 2011, Duane Merrill. All rights reserved. + * Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the NVIDIA CORPORATION nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + ******************************************************************************/ + +/** + * \file + * Random-access iterator types + */ + +#pragma once + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header + +#include +#include + +#include +#include +#include + +#include + +CUB_NAMESPACE_BEGIN + +/** + * @brief A random-access input wrapper for pairing dereferenced values with their corresponding + * indices (forming \p KeyValuePair tuples). + * + * @par Overview + * - ArgIndexInputIterator wraps a random access input iterator @p itr of type @p InputIteratorT. + * Dereferencing an ArgIndexInputIterator at offset @p i produces a @p KeyValuePair value whose + * @p key field is @p i and whose @p value field is itr[i]. + * - Can be used with any data type. + * - Can be constructed, manipulated, and exchanged within and between host and device + * functions. Wrapped host memory can only be dereferenced on the host, and wrapped + * device memory can only be dereferenced on the device. + * - Compatible with Thrust API v1.7 or newer. + * + * @par Snippet + * The code snippet below illustrates the use of @p ArgIndexInputIterator to + * dereference an array of doubles + * @par + * @code + * #include // or equivalently + * + * // Declare, allocate, and initialize a device array + * double *d_in; // e.g., [8.0, 6.0, 7.0, 5.0, 3.0, 0.0, 9.0] + * + * // Create an iterator wrapper + * cub::ArgIndexInputIterator itr(d_in); + * + * // Within device code: + * cub::ArgIndexInputIterator::value_type tup = *itr; + * printf("%f @ %ld\n", + * tup.value, + * tup.key); // 8.0 @ 0 + * + * itr = itr + 6; + * tup = *itr; + * printf("%f @ %ld\n", + * tup.value, + * tup.key); // 9.0 @ 6 + * + * @endcode + * + * @tparam InputIteratorT + * The value type of the wrapped input iterator + * + * @tparam OffsetT + * The difference type of this iterator (Default: @p ptrdiff_t) + * + * @tparam OutputValueT + * The paired value type of the tuple (Default: value type of input iterator) + */ +template > +class ArgIndexInputIterator +{ +public: + // Required iterator traits + + /// My own type + using self_type = ArgIndexInputIterator; + + /// Type to express the result of subtracting one iterator from another + using difference_type = OffsetT; + + /// The type of the element the iterator can point to + using value_type = KeyValuePair; + + /// The type of a pointer to an element the iterator can point to + using pointer = value_type*; + + /// The type of a reference to an element the iterator can point to + using reference = value_type; + + /// The iterator category + using iterator_category = typename THRUST_NS_QUALIFIER::detail::iterator_facade_category< + THRUST_NS_QUALIFIER::any_system_tag, + THRUST_NS_QUALIFIER::random_access_traversal_tag, + value_type, + reference>::type; + +private: + InputIteratorT itr; + difference_type offset; + +public: + /** + * @param itr + * Input iterator to wrap + * + * @param offset + * OffsetT (in items) from @p itr denoting the position of the iterator + */ + _CCCL_HOST_DEVICE _CCCL_FORCEINLINE ArgIndexInputIterator(InputIteratorT itr, difference_type offset = 0) + : itr(itr) + , offset(offset) + {} + + /// Postfix increment + _CCCL_HOST_DEVICE _CCCL_FORCEINLINE self_type operator++(int) + { + self_type retval = *this; + offset++; + return retval; + } + + /// Prefix increment + _CCCL_HOST_DEVICE _CCCL_FORCEINLINE self_type operator++() + { + offset++; + return *this; + } + + /// Indirection + _CCCL_HOST_DEVICE _CCCL_FORCEINLINE reference operator*() const + { + value_type retval; + retval.value = itr[offset]; + retval.key = offset; + return retval; + } + + /// Addition + template + _CCCL_HOST_DEVICE _CCCL_FORCEINLINE self_type operator+(Distance n) const + { + self_type retval(itr, offset + n); + return retval; + } + + /// Addition assignment + template + _CCCL_HOST_DEVICE _CCCL_FORCEINLINE self_type& operator+=(Distance n) + { + offset += n; + return *this; + } + + /// Subtraction + template + _CCCL_HOST_DEVICE _CCCL_FORCEINLINE self_type operator-(Distance n) const + { + self_type retval(itr, offset - n); + return retval; + } + + /// Subtraction assignment + template + _CCCL_HOST_DEVICE _CCCL_FORCEINLINE self_type& operator-=(Distance n) + { + offset -= n; + return *this; + } + + /// Distance + _CCCL_HOST_DEVICE _CCCL_FORCEINLINE difference_type operator-(self_type other) const + { + return offset - other.offset; + } + + /// Array subscript + template + _CCCL_HOST_DEVICE _CCCL_FORCEINLINE reference operator[](Distance n) const + { + self_type offset = (*this) + n; + return *offset; + } + + /// Structure dereference + _CCCL_HOST_DEVICE _CCCL_FORCEINLINE pointer operator->() + { + return &(*(*this)); + } + + /// Equal to + _CCCL_HOST_DEVICE _CCCL_FORCEINLINE bool operator==(const self_type& rhs) + { + return ((itr == rhs.itr) && (offset == rhs.offset)); + } + + /// Not equal to + _CCCL_HOST_DEVICE _CCCL_FORCEINLINE bool operator!=(const self_type& rhs) + { + return ((itr != rhs.itr) || (offset != rhs.offset)); + } + + /// Normalize + _CCCL_HOST_DEVICE _CCCL_FORCEINLINE void normalize() + { + itr += offset; + offset = 0; + } + + /// ostream operator + friend std::ostream& operator<<(std::ostream& os, const self_type& /*itr*/) + { + return os; + } +}; + +CUB_NAMESPACE_END diff --git a/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/iterator/cache_modified_input_iterator.cuh b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/iterator/cache_modified_input_iterator.cuh new file mode 100644 index 0000000000000000000000000000000000000000..eeb2a78f4b552b4e853de6ce93ac9db94d3737d0 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/iterator/cache_modified_input_iterator.cuh @@ -0,0 +1,239 @@ +/****************************************************************************** + * Copyright (c) 2011, Duane Merrill. All rights reserved. + * Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the NVIDIA CORPORATION nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + ******************************************************************************/ + +/** + * @file + * Random-access iterator types + */ + +#pragma once + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header + +#if _CCCL_COMPILER(NVRTC) +# include +#else // ^^^ _CCCL_COMPILER(NVRTC) ^^^ // vvv !_CCCL_COMPILER(NVRTC) vvv +# include +# include + +# include +# include +#endif // !_CCCL_COMPILER(NVRTC) + +#include +#include + +CUB_NAMESPACE_BEGIN + +/** + * @brief A random-access input wrapper for dereferencing array values using a PTX cache load + * modifier. + * + * @par Overview + * - CacheModifiedInputIterator is a random-access input iterator that wraps a native + * device pointer of type ValueType*. @p ValueType references are + * made by reading @p ValueType values through loads modified by @p MODIFIER. + * - Can be used to load any data type from memory using PTX cache load modifiers (e.g., "LOAD_LDG", + * "LOAD_CG", "LOAD_CA", "LOAD_CS", "LOAD_CV", etc.). + * - Can be constructed, manipulated, and exchanged within and between host and device + * functions, but can only be dereferenced within device functions. + * - Compatible with Thrust API v1.7 or newer. + * + * @par Snippet + * The code snippet below illustrates the use of @p CacheModifiedInputIterator to + * dereference a device array of double using the "ldg" PTX load modifier + * (i.e., load values through texture cache). + * @par + * @code + * #include // or equivalently + * + * // Declare, allocate, and initialize a device array + * double *d_in; // e.g., [8.0, 6.0, 7.0, 5.0, 3.0, 0.0, 9.0] + * + * // Create an iterator wrapper + * cub::CacheModifiedInputIterator itr(d_in); + * + * // Within device code: + * printf("%f\n", itr[0]); // 8.0 + * printf("%f\n", itr[1]); // 6.0 + * printf("%f\n", itr[6]); // 9.0 + * + * @endcode + * + * @tparam CacheLoadModifier + * The cub::CacheLoadModifier to use when accessing data + * + * @tparam ValueType + * The value type of this iterator + * + * @tparam OffsetT + * The difference type of this iterator (Default: @p ptrdiff_t) + */ +template +class CacheModifiedInputIterator +{ +public: + // Required iterator traits + + /// My own type + using self_type = CacheModifiedInputIterator; + + /// Type to express the result of subtracting one iterator from another + using difference_type = OffsetT; + + /// The type of the element the iterator can point to + using value_type = ValueType; + + /// The type of a pointer to an element the iterator can point to + using pointer = ValueType*; + + /// The type of a reference to an element the iterator can point to + using reference = ValueType; + +#if _CCCL_COMPILER(NVRTC) + using iterator_category = ::cuda::std::random_access_iterator_tag; +#else // ^^^ _CCCL_COMPILER(NVRTC) ^^^ // vvv !_CCCL_COMPILER(NVRTC) vvv + using iterator_category = typename THRUST_NS_QUALIFIER::detail::iterator_facade_category< + THRUST_NS_QUALIFIER::device_system_tag, + THRUST_NS_QUALIFIER::random_access_traversal_tag, + value_type, + reference>::type; +#endif // _CCCL_COMPILER(NVRTC) + +public: + /// Wrapped native pointer + ValueType* ptr; + + /// Constructor + template + _CCCL_HOST_DEVICE _CCCL_FORCEINLINE CacheModifiedInputIterator(QualifiedValueType* ptr) ///< Native pointer to wrap + : ptr(const_cast::type*>(ptr)) + {} + + /// Postfix increment + _CCCL_HOST_DEVICE _CCCL_FORCEINLINE self_type operator++(int) + { + self_type retval = *this; + ptr++; + return retval; + } + + /// Prefix increment + _CCCL_HOST_DEVICE _CCCL_FORCEINLINE self_type operator++() + { + ptr++; + return *this; + } + + /// Indirection + _CCCL_DEVICE _CCCL_FORCEINLINE reference operator*() const + { + return ThreadLoad(ptr); + } + + /// Addition + template + _CCCL_HOST_DEVICE _CCCL_FORCEINLINE self_type operator+(Distance n) const + { + self_type retval(ptr + n); + return retval; + } + + /// Addition assignment + template + _CCCL_HOST_DEVICE _CCCL_FORCEINLINE self_type& operator+=(Distance n) + { + ptr += n; + return *this; + } + + /// Subtraction + template + _CCCL_HOST_DEVICE _CCCL_FORCEINLINE self_type operator-(Distance n) const + { + self_type retval(ptr - n); + return retval; + } + + /// Subtraction assignment + template + _CCCL_HOST_DEVICE _CCCL_FORCEINLINE self_type& operator-=(Distance n) + { + ptr -= n; + return *this; + } + + /// Distance + _CCCL_HOST_DEVICE _CCCL_FORCEINLINE difference_type operator-(self_type other) const + { + return ptr - other.ptr; + } + + /// Array subscript + template + _CCCL_DEVICE _CCCL_FORCEINLINE reference operator[](Distance n) const + { + return ThreadLoad(ptr + n); + } + + /// Structure dereference + _CCCL_DEVICE _CCCL_FORCEINLINE pointer operator->() + { + return &ThreadLoad(ptr); + } + + /// Equal to + _CCCL_HOST_DEVICE _CCCL_FORCEINLINE bool operator==(const self_type& rhs) const + { + return (ptr == rhs.ptr); + } + + /// Not equal to + _CCCL_HOST_DEVICE _CCCL_FORCEINLINE bool operator!=(const self_type& rhs) const + { + return (ptr != rhs.ptr); + } + + /// ostream operator +#if !_CCCL_COMPILER(NVRTC) + friend std::ostream& operator<<(std::ostream& os, const self_type& /*itr*/) + { + return os; + } +#endif // !_CCCL_COMPILER(NVRTC) +}; + +CUB_NAMESPACE_END diff --git a/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/iterator/cache_modified_output_iterator.cuh b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/iterator/cache_modified_output_iterator.cuh new file mode 100644 index 0000000000000000000000000000000000000000..fa4e501b80f1b556c5c98fd5ad5a14dddad33519 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/iterator/cache_modified_output_iterator.cuh @@ -0,0 +1,247 @@ +/****************************************************************************** + * Copyright (c) 2011, Duane Merrill. All rights reserved. + * Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the NVIDIA CORPORATION nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + ******************************************************************************/ + +/** + * \file + * Random-access iterator types + */ + +#pragma once + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header + +#include +#include + +#include +#include + +#include + +CUB_NAMESPACE_BEGIN + +/** + * @brief A random-access output wrapper for storing array values using a PTX cache-modifier. + * + * @par Overview + * - CacheModifiedOutputIterator is a random-access output iterator that wraps a native + * device pointer of type ValueType*. @p ValueType references are + * made by writing @p ValueType values through stores modified by @p MODIFIER. + * - Can be used to store any data type to memory using PTX cache store modifiers (e.g., "STORE_WB", + * "STORE_CG", "STORE_CS", "STORE_WT", etc.). + * - Can be constructed, manipulated, and exchanged within and between host and device + * functions, but can only be dereferenced within device functions. + * - Compatible with Thrust API v1.7 or newer. + * + * @par Snippet + * The code snippet below illustrates the use of @p CacheModifiedOutputIterator to + * dereference a device array of doubles using the "wt" PTX load modifier + * (i.e., write-through to system memory). + * @par + * @code + * #include // or equivalently + * + * // Declare, allocate, and initialize a device array + * double *d_out; // e.g., [, , , , , , ] + * + * // Create an iterator wrapper + * cub::CacheModifiedOutputIterator itr(d_out); + * + * // Within device code: + * itr[0] = 8.0; + * itr[1] = 66.0; + * itr[55] = 24.0; + * + * @endcode + * + * @par Usage Considerations + * - Can only be dereferenced within device code + * + * @tparam CacheStoreModifier + * The cub::CacheStoreModifier to use when accessing data + * + * @tparam ValueType + * The value type of this iterator + * + * @tparam OffsetT + * The difference type of this iterator (Default: @p ptrdiff_t) + */ +template +class CacheModifiedOutputIterator +{ +private: + // Proxy object + struct Reference + { + ValueType* ptr; + + /// Constructor + _CCCL_HOST_DEVICE _CCCL_FORCEINLINE Reference(ValueType* ptr) + : ptr(ptr) + {} + + /// Assignment + _CCCL_DEVICE _CCCL_FORCEINLINE ValueType operator=(ValueType val) + { + ThreadStore(ptr, val); + return val; + } + }; + +public: + // Required iterator traits + + /// My own type + using self_type = CacheModifiedOutputIterator; + + /// Type to express the result of subtracting one iterator from another + using difference_type = OffsetT; + + /// The type of the element the iterator can point to + using value_type = void; + + /// The type of a pointer to an element the iterator can point to + using pointer = void; + + /// The type of a reference to an element the iterator can point to + using reference = Reference; + + /// The iterator category + using iterator_category = typename THRUST_NS_QUALIFIER::detail::iterator_facade_category< + THRUST_NS_QUALIFIER::device_system_tag, + THRUST_NS_QUALIFIER::random_access_traversal_tag, + value_type, + reference>::type; + +private: + ValueType* ptr; + +public: + /** + * @param ptr + * Native pointer to wrap + */ + template + _CCCL_HOST_DEVICE _CCCL_FORCEINLINE CacheModifiedOutputIterator(QualifiedValueType* ptr) + : ptr(const_cast::type*>(ptr)) + {} + + /// Postfix increment + _CCCL_HOST_DEVICE _CCCL_FORCEINLINE self_type operator++(int) + { + self_type retval = *this; + ptr++; + return retval; + } + + /// Prefix increment + _CCCL_HOST_DEVICE _CCCL_FORCEINLINE self_type operator++() + { + ptr++; + return *this; + } + + /// Indirection + _CCCL_HOST_DEVICE _CCCL_FORCEINLINE reference operator*() const + { + return Reference(ptr); + } + + /// Addition + template + _CCCL_HOST_DEVICE _CCCL_FORCEINLINE self_type operator+(Distance n) const + { + self_type retval(ptr + n); + return retval; + } + + /// Addition assignment + template + _CCCL_HOST_DEVICE _CCCL_FORCEINLINE self_type& operator+=(Distance n) + { + ptr += n; + return *this; + } + + /// Subtraction + template + _CCCL_HOST_DEVICE _CCCL_FORCEINLINE self_type operator-(Distance n) const + { + self_type retval(ptr - n); + return retval; + } + + /// Subtraction assignment + template + _CCCL_HOST_DEVICE _CCCL_FORCEINLINE self_type& operator-=(Distance n) + { + ptr -= n; + return *this; + } + + /// Distance + _CCCL_HOST_DEVICE _CCCL_FORCEINLINE difference_type operator-(self_type other) const + { + return ptr - other.ptr; + } + + /// Array subscript + template + _CCCL_HOST_DEVICE _CCCL_FORCEINLINE reference operator[](Distance n) const + { + return Reference(ptr + n); + } + + /// Equal to + _CCCL_HOST_DEVICE _CCCL_FORCEINLINE bool operator==(const self_type& rhs) + { + return (ptr == rhs.ptr); + } + + /// Not equal to + _CCCL_HOST_DEVICE _CCCL_FORCEINLINE bool operator!=(const self_type& rhs) + { + return (ptr != rhs.ptr); + } + + /// ostream operator + friend std::ostream& operator<<(std::ostream& os, const self_type& itr) + { + return os; + } +}; + +CUB_NAMESPACE_END diff --git a/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/iterator/constant_input_iterator.cuh b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/iterator/constant_input_iterator.cuh new file mode 100644 index 0000000000000000000000000000000000000000..a9e1813ad408132ae03a62753d50aaa474c9e30d --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/iterator/constant_input_iterator.cuh @@ -0,0 +1,234 @@ +/****************************************************************************** + * Copyright (c) 2011, Duane Merrill. All rights reserved. + * Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the NVIDIA CORPORATION nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + ******************************************************************************/ + +/** + * @file + * Random-access iterator types + */ + +#pragma once + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header + +#include +#include + +#include +#include + +#include + +CUB_NAMESPACE_BEGIN + +/** + * @brief A random-access input generator for dereferencing a sequence of homogeneous values + * + * @par Overview + * - Read references to a ConstantInputIterator always return the supplied constant + * of type @p ValueType. + * - Can be used with any data type. + * - Can be constructed, manipulated, dereferenced, and exchanged within and between host and device + * functions. + * - Compatible with Thrust API v1.7 or newer. + * + * @par Snippet + * The code snippet below illustrates the use of @p ConstantInputIterator to + * dereference a sequence of homogeneous doubles. + * @par + * @code + * #include // or equivalently + * + * cub::ConstantInputIterator itr(5.0); + * + * printf("%f\n", itr[0]); // 5.0 + * printf("%f\n", itr[1]); // 5.0 + * printf("%f\n", itr[2]); // 5.0 + * printf("%f\n", itr[50]); // 5.0 + * + * @endcode + * + * @tparam ValueType + * The value type of this iterator + * + * @tparam OffsetT + * The difference type of this iterator (Default: @p ptrdiff_t) + */ +template +class +#ifndef __CUDA_ARCH__ + // Avoid generating a deprecation warning from length_encode.compute_xx.cpp1.ii, which is compiled by cicc for which + // we cannot suppress the warning + CCCL_DEPRECATED_BECAUSE("Use thrust::constant_iterator instead") +#endif + ConstantInputIterator +{ +public: + // Required iterator traits + + /// My own type + using self_type = ConstantInputIterator; + + /// Type to express the result of subtracting one iterator from another + using difference_type = OffsetT; + + /// The type of the element the iterator can point to + using value_type = ValueType; + + /// The type of a pointer to an element the iterator can point to + using pointer = ValueType*; + + /// The type of a reference to an element the iterator can point to + using reference = ValueType; + + /// The iterator category + using iterator_category = typename THRUST_NS_QUALIFIER::detail::iterator_facade_category< + THRUST_NS_QUALIFIER::any_system_tag, + THRUST_NS_QUALIFIER::random_access_traversal_tag, + value_type, + reference>::type; + +private: + ValueType val; + OffsetT offset; + +public: + /** + * @param val + * Starting value for the iterator instance to report + * + * @param offset + * Base offset + */ + _CCCL_HOST_DEVICE _CCCL_FORCEINLINE ConstantInputIterator(ValueType val, OffsetT offset = 0) + : val(val) + , offset(offset) + {} + + /// Postfix increment + _CCCL_HOST_DEVICE _CCCL_FORCEINLINE self_type operator++(int) + { + self_type retval = *this; + offset++; + return retval; + } + + /// Prefix increment + _CCCL_HOST_DEVICE _CCCL_FORCEINLINE self_type operator++() + { + offset++; + return *this; + } + + /// Indirection + _CCCL_HOST_DEVICE _CCCL_FORCEINLINE reference operator*() const + { + return val; + } + + /// Addition + template + _CCCL_HOST_DEVICE _CCCL_FORCEINLINE self_type operator+(Distance n) const + { + self_type retval(val, offset + n); + return retval; + } + + /// Addition assignment + template + _CCCL_HOST_DEVICE _CCCL_FORCEINLINE self_type& operator+=(Distance n) + { + offset += n; + return *this; + } + + /// Subtraction + template + _CCCL_HOST_DEVICE _CCCL_FORCEINLINE self_type operator-(Distance n) const + { + self_type retval(val, offset - n); + return retval; + } + + /// Subtraction assignment + template + _CCCL_HOST_DEVICE _CCCL_FORCEINLINE self_type& operator-=(Distance n) + { + offset -= n; + return *this; + } + + /// Distance + _CCCL_HOST_DEVICE _CCCL_FORCEINLINE difference_type operator-(self_type other) const + { + return offset - other.offset; + } + + /// Array subscript + template + _CCCL_HOST_DEVICE _CCCL_FORCEINLINE reference operator[](Distance /*n*/) const + { + return val; + } + + /// Structure dereference + _CCCL_HOST_DEVICE _CCCL_FORCEINLINE pointer operator->() + { + return &val; + } + + /// Equal to + _CCCL_HOST_DEVICE _CCCL_FORCEINLINE bool operator==(const self_type& rhs) const + { + return (offset == rhs.offset) && ((val == rhs.val)); + } + + /// Not equal to + _CCCL_HOST_DEVICE _CCCL_FORCEINLINE bool operator!=(const self_type& rhs) const + { + return (offset != rhs.offset) || (val != rhs.val); + } + + /// ostream operator + _CCCL_SUPPRESS_DEPRECATED_PUSH + friend std::ostream& operator<<(std::ostream& os, const self_type& itr) + { + os << "[" << itr.val << "," << itr.offset << "]"; + return os; + } + _CCCL_SUPPRESS_DEPRECATED_POP +}; + +CUB_NAMESPACE_END diff --git a/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/iterator/counting_input_iterator.cuh b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/iterator/counting_input_iterator.cuh new file mode 100644 index 0000000000000000000000000000000000000000..f24867ebf74eb16689824c06d944b58688ca0e0b --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/iterator/counting_input_iterator.cuh @@ -0,0 +1,231 @@ +/****************************************************************************** + * Copyright (c) 2011, Duane Merrill. All rights reserved. + * Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the NVIDIA CORPORATION nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + ******************************************************************************/ + +/** + * \file + * Random-access iterator types + */ + +#pragma once + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header + +#include +#include + +#include + +#if !_CCCL_COMPILER(NVRTC) +# include +# include + +# include +#endif // !_CCCL_COMPILER(NVRTC) + +CUB_NAMESPACE_BEGIN + +/** + * @brief A random-access input generator for dereferencing a sequence of incrementing integer values. + * + * @par Overview + * - After initializing a CountingInputIterator to a certain integer @p base, read references + * at @p offset will return the value @p base + @p offset. + * - Can be constructed, manipulated, dereferenced, and exchanged within and between host and device + * functions. + * - Compatible with Thrust API v1.7 or newer. + * + * @par Snippet + * The code snippet below illustrates the use of @p CountingInputIterator to + * dereference a sequence of incrementing integers. + * @par + * @code + * #include // or equivalently + * + * cub::CountingInputIterator itr(5); + * + * printf("%d\n", itr[0]); // 5 + * printf("%d\n", itr[1]); // 6 + * printf("%d\n", itr[2]); // 7 + * printf("%d\n", itr[50]); // 55 + * + * @endcode + * + * @tparam ValueType + * The value type of this iterator + * + * @tparam OffsetT + * The difference type of this iterator (Default: @p ptrdiff_t) + */ +template +class CCCL_DEPRECATED_BECAUSE("Use thrust::counting_iterator instead") CountingInputIterator +{ +public: + // Required iterator traits + + /// My own type + using self_type = CountingInputIterator; + + /// Type to express the result of subtracting one iterator from another + using difference_type = OffsetT; + + /// The type of the element the iterator can point to + using value_type = ValueType; + + /// The type of a pointer to an element the iterator can point to + using pointer = ValueType*; + + /// The type of a reference to an element the iterator can point to + using reference = ValueType; + +#if _CCCL_COMPILER(NVRTC) + using iterator_category = ::cuda::std::random_access_iterator_tag; +#else // ^^^ _CCCL_COMPILER(NVRTC) ^^^ // vvv !_CCCL_COMPILER(NVRTC) vvv + using iterator_category = typename THRUST_NS_QUALIFIER::detail::iterator_facade_category< + THRUST_NS_QUALIFIER::any_system_tag, + THRUST_NS_QUALIFIER::random_access_traversal_tag, + value_type, + reference>::type; +#endif // _CCCL_COMPILER(NVRTC) + +private: + ValueType val; + +public: + /** + * @param val + * Starting value for the iterator instance to report + */ + _CCCL_HOST_DEVICE _CCCL_FORCEINLINE CountingInputIterator(const ValueType& val) + : val(val) + {} + + /// Postfix increment + _CCCL_HOST_DEVICE _CCCL_FORCEINLINE self_type operator++(int) + { + self_type retval = *this; + val++; + return retval; + } + + /// Prefix increment + _CCCL_HOST_DEVICE _CCCL_FORCEINLINE self_type operator++() + { + val++; + return *this; + } + + /// Indirection + _CCCL_HOST_DEVICE _CCCL_FORCEINLINE reference operator*() const + { + return val; + } + + /// Addition + template + _CCCL_HOST_DEVICE _CCCL_FORCEINLINE self_type operator+(Distance n) const + { + self_type retval(val + (ValueType) n); + return retval; + } + + /// Addition assignment + template + _CCCL_HOST_DEVICE _CCCL_FORCEINLINE self_type& operator+=(Distance n) + { + val += (ValueType) n; + return *this; + } + + /// Subtraction + template + _CCCL_HOST_DEVICE _CCCL_FORCEINLINE self_type operator-(Distance n) const + { + self_type retval(val - (ValueType) n); + return retval; + } + + /// Subtraction assignment + template + _CCCL_HOST_DEVICE _CCCL_FORCEINLINE self_type& operator-=(Distance n) + { + val -= n; + return *this; + } + + /// Distance + _CCCL_HOST_DEVICE _CCCL_FORCEINLINE difference_type operator-(self_type other) const + { + return (difference_type) (val - other.val); + } + + /// Array subscript + template + _CCCL_HOST_DEVICE _CCCL_FORCEINLINE reference operator[](Distance n) const + { + return val + (ValueType) n; + } + + /// Structure dereference + _CCCL_HOST_DEVICE _CCCL_FORCEINLINE pointer operator->() + { + return &val; + } + + /// Equal to + _CCCL_HOST_DEVICE _CCCL_FORCEINLINE bool operator==(const self_type& rhs) const + { + return (val == rhs.val); + } + + /// Not equal to + _CCCL_HOST_DEVICE _CCCL_FORCEINLINE bool operator!=(const self_type& rhs) const + { + return (val != rhs.val); + } + + /// ostream operator +#if !_CCCL_COMPILER(NVRTC) + _CCCL_SUPPRESS_DEPRECATED_PUSH + friend std::ostream& operator<<(std::ostream& os, const self_type& itr) + { + os << "[" << itr.val << "]"; + return os; + } + _CCCL_SUPPRESS_DEPRECATED_POP +#endif // !_CCCL_COMPILER(NVRTC) +}; + +CUB_NAMESPACE_END diff --git a/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/iterator/discard_output_iterator.cuh b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/iterator/discard_output_iterator.cuh new file mode 100644 index 0000000000000000000000000000000000000000..0d7fe50048a0ebce2affe1d144cee7e8fca183d2 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/iterator/discard_output_iterator.cuh @@ -0,0 +1,203 @@ +/****************************************************************************** + * Copyright (c) 2011, Duane Merrill. All rights reserved. + * Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the NVIDIA CORPORATION nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + ******************************************************************************/ + +/** + * @file + * Random-access iterator types + */ + +#pragma once + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header + +#include +#include + +#include + +CUB_NAMESPACE_BEGIN + +/** + * @brief A discard iterator + */ +template +class CCCL_DEPRECATED_BECAUSE("Use thrust::discard_iterator instead") DiscardOutputIterator +{ +public: + // Required iterator traits + + /// My own type + using self_type = DiscardOutputIterator; + + /// Type to express the result of subtracting one iterator from another + using difference_type = OffsetT; + + /// The type of the element the iterator can point to + using value_type = void; + + /// The type of a pointer to an element the iterator can point to + using pointer = void; + + /// The type of a reference to an element the iterator can point to + using reference = void; + + /// The iterator category + using iterator_category = typename THRUST_NS_QUALIFIER::detail::iterator_facade_category< + THRUST_NS_QUALIFIER::any_system_tag, + THRUST_NS_QUALIFIER::random_access_traversal_tag, + value_type, + reference>::type; + +private: + OffsetT offset; + +public: + /** + * @param offset + * Base offset + */ + _CCCL_HOST_DEVICE _CCCL_FORCEINLINE DiscardOutputIterator(OffsetT offset = 0) + : offset(offset) + {} + + /// Postfix increment + _CCCL_HOST_DEVICE _CCCL_FORCEINLINE self_type operator++(int) + { + self_type retval = *this; + offset++; + return retval; + } + + /// Prefix increment + _CCCL_HOST_DEVICE _CCCL_FORCEINLINE self_type operator++() + { + offset++; + return *this; + } + + /// Indirection + _CCCL_HOST_DEVICE _CCCL_FORCEINLINE self_type& operator*() + { + // return self reference, which can be assigned to anything + return *this; + } + + /// Addition + template + _CCCL_HOST_DEVICE _CCCL_FORCEINLINE self_type operator+(Distance n) const + { + self_type retval(offset + n); + return retval; + } + + /// Addition assignment + template + _CCCL_HOST_DEVICE _CCCL_FORCEINLINE self_type& operator+=(Distance n) + { + offset += n; + return *this; + } + + /// Subtraction + template + _CCCL_HOST_DEVICE _CCCL_FORCEINLINE self_type operator-(Distance n) const + { + self_type retval(offset - n); + return retval; + } + + /// Subtraction assignment + template + _CCCL_HOST_DEVICE _CCCL_FORCEINLINE self_type& operator-=(Distance n) + { + offset -= n; + return *this; + } + + /// Distance + _CCCL_HOST_DEVICE _CCCL_FORCEINLINE difference_type operator-(self_type other) const + { + return offset - other.offset; + } + + /// Array subscript + template + _CCCL_HOST_DEVICE _CCCL_FORCEINLINE self_type& operator[](Distance n) + { + // return self reference, which can be assigned to anything + return *this; + } + + /// Structure dereference + _CCCL_HOST_DEVICE _CCCL_FORCEINLINE pointer operator->() + { + return; + } + + /// Assignment to anything else (no-op) + template + _CCCL_HOST_DEVICE _CCCL_FORCEINLINE void operator=(T const&) + {} + + /// Cast to void* operator + _CCCL_HOST_DEVICE _CCCL_FORCEINLINE operator void*() const + { + return nullptr; + } + + /// Equal to + _CCCL_HOST_DEVICE _CCCL_FORCEINLINE bool operator==(const self_type& rhs) + { + return (offset == rhs.offset); + } + + /// Not equal to + _CCCL_HOST_DEVICE _CCCL_FORCEINLINE bool operator!=(const self_type& rhs) + { + return (offset != rhs.offset); + } + + /// ostream operator + _CCCL_SUPPRESS_DEPRECATED_PUSH + friend std::ostream& operator<<(std::ostream& os, const self_type& itr) + { + os << "[" << itr.offset << "]"; + return os; + } + _CCCL_SUPPRESS_DEPRECATED_POP +}; + +CUB_NAMESPACE_END diff --git a/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/iterator/tex_obj_input_iterator.cuh b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/iterator/tex_obj_input_iterator.cuh new file mode 100644 index 0000000000000000000000000000000000000000..76ba47924e67d409f75b8f039c614a8b2da5b44e --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/iterator/tex_obj_input_iterator.cuh @@ -0,0 +1,315 @@ +/****************************************************************************** + * Copyright (c) 2011, Duane Merrill. All rights reserved. + * Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the NVIDIA CORPORATION nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + ******************************************************************************/ + +/** + * @file + * Random-access iterator types + */ + +#pragma once + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header + +#include +#include +#include + +#include +#include + +#include + +#include + +CUB_NAMESPACE_BEGIN + +/** + * @brief A random-access input wrapper for dereferencing array values through texture cache. + * Uses newer Kepler-style texture objects. + * + * @par Overview + * - TexObjInputIterator wraps a native device pointer of type ValueType*. References + * to elements are to be loaded through texture cache. + * - Can be used to load any data type from memory through texture cache. + * - Can be manipulated and exchanged within and between host and device + * functions, can only be constructed within host functions, and can only be + * dereferenced within device functions. + * - With regard to nested/dynamic parallelism, TexObjInputIterator iterators may only be + * created by the host thread, but can be used by any descendant kernel. + * - Compatible with Thrust API v1.7 or newer. + * + * @par Snippet + * The code snippet below illustrates the use of @p TexObjInputIterator to + * dereference a device array of doubles through texture cache. + * @par + * @code + * #include // or equivalently + * + * // Declare, allocate, and initialize a device array + * int num_items; // e.g., 7 + * double *d_in; // e.g., [8.0, 6.0, 7.0, 5.0, 3.0, 0.0, 9.0] + * + * // Create an iterator wrapper + * cub::TexObjInputIterator itr; + * itr.BindTexture(d_in, sizeof(double) * num_items); + * ... + * + * // Within device code: + * printf("%f\n", itr[0]); // 8.0 + * printf("%f\n", itr[1]); // 6.0 + * printf("%f\n", itr[6]); // 9.0 + * + * ... + * itr.UnbindTexture(); + * + * @endcode + * + * @tparam T + * The value type of this iterator + * + * @tparam OffsetT + * The difference type of this iterator (Default: @p ptrdiff_t) + */ +template +class TexObjInputIterator +{ +public: + // Required iterator traits + + /// My own type + using self_type = TexObjInputIterator; + + /// Type to express the result of subtracting one iterator from another + using difference_type = OffsetT; + + /// The type of the element the iterator can point to + using value_type = T; + + /// The type of a pointer to an element the iterator can point to + using pointer = T*; + + /// The type of a reference to an element the iterator can point to + using reference = T; + + /// The iterator category + using iterator_category = typename THRUST_NS_QUALIFIER::detail::iterator_facade_category< + THRUST_NS_QUALIFIER::device_system_tag, + THRUST_NS_QUALIFIER::random_access_traversal_tag, + value_type, + reference>::type; + +private: + // Largest texture word we can use in device + using TextureWord = typename UnitWord::TextureWord; + + // Number of texture words per T + enum + { + TEXTURE_MULTIPLE = sizeof(T) / sizeof(TextureWord) + }; + +private: + T* ptr; + difference_type tex_offset; + cudaTextureObject_t tex_obj; + +public: + /// Constructor + _CCCL_HOST_DEVICE _CCCL_FORCEINLINE TexObjInputIterator() + : ptr(nullptr) + , tex_offset(0) + , tex_obj(0) + {} + + /** + * @brief Use this iterator to bind @p ptr with a texture reference + * + * @param ptr + * Native pointer to wrap that is aligned to cudaDeviceProp::textureAlignment + * + * @param bytes + * Number of bytes in the range + * + * @param tex_offset + * OffsetT (in items) from @p ptr denoting the position of the iterator + */ + template + cudaError_t BindTexture(QualifiedT* ptr, size_t bytes, size_t tex_offset = 0) + { + this->ptr = const_cast::type*>(ptr); + this->tex_offset = static_cast(tex_offset); + + cudaChannelFormatDesc channel_desc = cudaCreateChannelDesc(); + cudaResourceDesc res_desc; + cudaTextureDesc tex_desc; + memset(&res_desc, 0, sizeof(cudaResourceDesc)); + memset(&tex_desc, 0, sizeof(cudaTextureDesc)); + res_desc.resType = cudaResourceTypeLinear; + res_desc.res.linear.devPtr = this->ptr; + res_desc.res.linear.desc = channel_desc; + res_desc.res.linear.sizeInBytes = bytes; + tex_desc.readMode = cudaReadModeElementType; + return CubDebug(cudaCreateTextureObject(&tex_obj, &res_desc, &tex_desc, nullptr)); + } + + /// Unbind this iterator from its texture reference + cudaError_t UnbindTexture() + { + return CubDebug(cudaDestroyTextureObject(tex_obj)); + } + + /// Postfix increment + _CCCL_HOST_DEVICE _CCCL_FORCEINLINE self_type operator++(int) + { + self_type retval = *this; + tex_offset++; + return retval; + } + + /// Prefix increment + _CCCL_HOST_DEVICE _CCCL_FORCEINLINE self_type operator++() + { + tex_offset++; + return *this; + } + + /// Indirection + _CCCL_HOST_DEVICE _CCCL_FORCEINLINE reference operator*() const + { + NV_IF_TARGET(NV_IS_HOST, (return ptr[tex_offset];), (return this->device_deref();)); + } + + /// Addition + template + _CCCL_HOST_DEVICE _CCCL_FORCEINLINE self_type operator+(Distance n) const + { + self_type retval; + retval.ptr = ptr; + retval.tex_obj = tex_obj; + retval.tex_offset = tex_offset + n; + return retval; + } + + /// Addition assignment + template + _CCCL_HOST_DEVICE _CCCL_FORCEINLINE self_type& operator+=(Distance n) + { + tex_offset += n; + return *this; + } + + /// Subtraction + template + _CCCL_HOST_DEVICE _CCCL_FORCEINLINE self_type operator-(Distance n) const + { + self_type retval; + retval.ptr = ptr; + retval.tex_obj = tex_obj; + retval.tex_offset = tex_offset - n; + return retval; + } + + /// Subtraction assignment + template + _CCCL_HOST_DEVICE _CCCL_FORCEINLINE self_type& operator-=(Distance n) + { + tex_offset -= n; + return *this; + } + + /// Distance + _CCCL_HOST_DEVICE _CCCL_FORCEINLINE difference_type operator-(self_type other) const + { + return tex_offset - other.tex_offset; + } + + /// Array subscript + template + _CCCL_HOST_DEVICE _CCCL_FORCEINLINE reference operator[](Distance n) const + { + self_type offset = (*this) + n; + return *offset; + } + + /// Structure dereference + _CCCL_HOST_DEVICE _CCCL_FORCEINLINE pointer operator->() + { + return &(*(*this)); + } + + /// Equal to + _CCCL_HOST_DEVICE _CCCL_FORCEINLINE bool operator==(const self_type& rhs) const + { + return ((ptr == rhs.ptr) && (tex_offset == rhs.tex_offset) && (tex_obj == rhs.tex_obj)); + } + + /// Not equal to + _CCCL_HOST_DEVICE _CCCL_FORCEINLINE bool operator!=(const self_type& rhs) const + { + return ((ptr != rhs.ptr) || (tex_offset != rhs.tex_offset) || (tex_obj != rhs.tex_obj)); + } + + /// ostream operator + friend std::ostream& operator<<(std::ostream& os, const self_type& itr) + { + os << "cub::TexObjInputIterator( ptr=" << itr.ptr << ", offset=" << itr.tex_offset << ", tex_obj=" << itr.tex_obj + << " )"; + return os; + } + +private: + // This is hoisted out of operator* because #pragma can't be used inside of + // NV_IF_TARGET + _CCCL_DEVICE _CCCL_FORCEINLINE reference device_deref() const + { + // Move array of uninitialized words, then alias and assign to return + // value + TextureWord words[TEXTURE_MULTIPLE]; + + const auto tex_idx_base = tex_offset * TEXTURE_MULTIPLE; + +#pragma unroll + for (int i = 0; i < TEXTURE_MULTIPLE; ++i) + { + words[i] = tex1Dfetch(tex_obj, tex_idx_base + i); + } + + // Load from words + return *reinterpret_cast(words); + } +}; + +CUB_NAMESPACE_END diff --git a/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/iterator/transform_input_iterator.cuh b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/iterator/transform_input_iterator.cuh new file mode 100644 index 0000000000000000000000000000000000000000..150e28bc0ca310e73edf0776cc57517cf2a0cdb2 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/iterator/transform_input_iterator.cuh @@ -0,0 +1,244 @@ +/****************************************************************************** + * Copyright (c) 2011, Duane Merrill. All rights reserved. + * Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the NVIDIA CORPORATION nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + ******************************************************************************/ + +/** + * @file + * Random-access iterator types + */ + +#pragma once + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header + +#include +#include + +#include +#include + +#include + +CUB_NAMESPACE_BEGIN + +/** + * @brief A random-access input wrapper for transforming dereferenced values. + * + * @par Overview + * - TransformInputIterator wraps a unary conversion functor of type + * @p ConversionOp and a random-access input iterator of type InputIteratorT, + * using the former to produce references of type @p ValueType from the latter. + * - Can be used with any data type. + * - Can be constructed, manipulated, and exchanged within and between host and device + * functions. Wrapped host memory can only be dereferenced on the host, and wrapped + * device memory can only be dereferenced on the device. + * - Compatible with Thrust API v1.7 or newer. + * + * @par Snippet + * The code snippet below illustrates the use of @p TransformInputIterator to + * dereference an array of integers, tripling the values and converting them to doubles. + * @par + * @code + * #include // or equivalently + * + * // Functor for tripling integer values and converting to doubles + * struct TripleDoubler + * { + * __host__ __device__ __forceinline__ + * double operator()(const int &a) const { + * return double(a * 3); + * } + * }; + * + * // Declare, allocate, and initialize a device array + * int *d_in; // e.g., [8, 6, 7, 5, 3, 0, 9] + * TripleDoubler conversion_op; + * + * // Create an iterator wrapper + * cub::TransformInputIterator itr(d_in, conversion_op); + * + * // Within device code: + * printf("%f\n", itr[0]); // 24.0 + * printf("%f\n", itr[1]); // 18.0 + * printf("%f\n", itr[6]); // 27.0 + * + * @endcode + * + * @tparam ValueType + * The value type of this iterator + * + * @tparam ConversionOp + * Unary functor type for mapping objects of type @p InputType to type @p ValueType. + * Must have member ValueType operator()(const InputType &datum). + * + * @tparam InputIteratorT + * The type of the wrapped input iterator + * + * @tparam OffsetT + * The difference type of this iterator (Default: @p ptrdiff_t) + */ +template +class CCCL_DEPRECATED_BECAUSE("Use thrust::transform_iterator instead") TransformInputIterator +{ +public: + // Required iterator traits + + /// My own type + using self_type = TransformInputIterator; + + /// Type to express the result of subtracting one iterator from another + using difference_type = OffsetT; + + /// The type of the element the iterator can point to + using value_type = ValueType; + + /// The type of a pointer to an element the iterator can point to + using pointer = ValueType*; + + /// The type of a reference to an element the iterator can point to + using reference = ValueType; + + /// The iterator category + using iterator_category = typename THRUST_NS_QUALIFIER::detail::iterator_facade_category< + THRUST_NS_QUALIFIER::any_system_tag, + THRUST_NS_QUALIFIER::random_access_traversal_tag, + value_type, + reference>::type; + +private: + ConversionOp conversion_op; + InputIteratorT input_itr; + +public: + /** + * @param input_itr + * Input iterator to wrap + * + * @param conversion_op + * Conversion functor to wrap + */ + _CCCL_HOST_DEVICE _CCCL_FORCEINLINE TransformInputIterator(InputIteratorT input_itr, ConversionOp conversion_op) + : conversion_op(conversion_op) + , input_itr(input_itr) + {} + + /// Postfix increment + _CCCL_HOST_DEVICE _CCCL_FORCEINLINE self_type operator++(int) + { + self_type retval = *this; + input_itr++; + return retval; + } + + /// Prefix increment + _CCCL_HOST_DEVICE _CCCL_FORCEINLINE self_type operator++() + { + input_itr++; + return *this; + } + + /// Indirection + _CCCL_HOST_DEVICE _CCCL_FORCEINLINE reference operator*() const + { + return conversion_op(*input_itr); + } + + /// Addition + template + _CCCL_HOST_DEVICE _CCCL_FORCEINLINE self_type operator+(Distance n) const + { + self_type retval(input_itr + n, conversion_op); + return retval; + } + + /// Addition assignment + template + _CCCL_HOST_DEVICE _CCCL_FORCEINLINE self_type& operator+=(Distance n) + { + input_itr += n; + return *this; + } + + /// Subtraction + template + _CCCL_HOST_DEVICE _CCCL_FORCEINLINE self_type operator-(Distance n) const + { + self_type retval(input_itr - n, conversion_op); + return retval; + } + + /// Subtraction assignment + template + _CCCL_HOST_DEVICE _CCCL_FORCEINLINE self_type& operator-=(Distance n) + { + input_itr -= n; + return *this; + } + + /// Distance + _CCCL_HOST_DEVICE _CCCL_FORCEINLINE difference_type operator-(self_type other) const + { + return input_itr - other.input_itr; + } + + /// Array subscript + template + _CCCL_HOST_DEVICE _CCCL_FORCEINLINE reference operator[](Distance n) const + { + return conversion_op(input_itr[n]); + } + + /// Equal to + _CCCL_HOST_DEVICE _CCCL_FORCEINLINE bool operator==(const self_type& rhs) const + { + return (input_itr == rhs.input_itr); + } + + /// Not equal to + _CCCL_HOST_DEVICE _CCCL_FORCEINLINE bool operator!=(const self_type& rhs) const + { + return (input_itr != rhs.input_itr); + } + + /// ostream operator + _CCCL_SUPPRESS_DEPRECATED_PUSH + friend std::ostream& operator<<(std::ostream& os, const self_type& /* itr */) + { + return os; + } + _CCCL_SUPPRESS_DEPRECATED_POP +}; + +CUB_NAMESPACE_END diff --git a/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/warp/specializations/warp_exchange_shfl.cuh b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/warp/specializations/warp_exchange_shfl.cuh new file mode 100644 index 0000000000000000000000000000000000000000..f874f961caa329f281ab441a9dd137d3e03a69a8 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/warp/specializations/warp_exchange_shfl.cuh @@ -0,0 +1,329 @@ +/****************************************************************************** + * Copyright (c) 2011-2023, NVIDIA CORPORATION. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the NVIDIA CORPORATION nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + ******************************************************************************/ + +#pragma once + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header + +#include +#include + +#include + +CUB_NAMESPACE_BEGIN + +namespace detail +{ + +template +class WarpExchangeShfl +{ + static_assert(PowerOfTwo::VALUE, "LOGICAL_WARP_THREADS must be a power of two"); + + static_assert(ITEMS_PER_THREAD == LOGICAL_WARP_THREADS, + "WARP_EXCHANGE_SHUFFLE currently only works when ITEMS_PER_THREAD == " + "LOGICAL_WARP_THREADS"); + + static constexpr bool IS_ARCH_WARP = LOGICAL_WARP_THREADS == CUB_WARP_THREADS(0); + + // concrete recursion class + template + class CompileTimeArray : protected CompileTimeArray + { + protected: + InputT val; + + template + _CCCL_DEVICE void Foreach(const bool xor_bit_set, const unsigned mask) + { + // The implementation here is a recursive divide-and-conquer approach + // that takes inspiration from: + // https://forums.developer.nvidia.com/t/transposing-register-held-matrices-with-warp-shuffles-need-help/38652/2 + // + // At its core, the problem can be boiled down to transposing the matrix + // + // A B + // C D + // + // by swapping the off-diagonal elements/sub-matrices B and C recursively. + // + // This implementation requires power-of-two matrices. In order to avoid + // the use of local or shared memory, all index computation has to occur + // at compile-time, since registers cannot be indexed dynamically. + // Furthermore, using recursive templates reduces the mental load on the + // optimizer, since lowering for-loops into registers oftentimes requires + // finagling them with #pragma unroll, which leads to brittle code. + // + // To illustrate this algorithm, let's pretend we have warpSize = 8, + // where t0, ..., t7 denote the 8 threads, and thread i has an array of + // size 8 with data = [Ai, Bi, ..., Hi] (the columns in the schematics). + // + // In the first round, we exchange the largest 4x4 off-diagonal + // submatrix. Boxes illustrate the submatrices to be exchanged. + // + // ROUND 1 + // ======= + // t0 t1 t2 t3 t4 t5 t6 t7 + // ┌──────────────┐ + // A0 A1 A2 A3 │A4 A5 A6 A7│ NUM_ENTRIES == 4 tells us how many + // │ │ entries we have in a submatrix, + // │ │ in this case 4 and the size of + // B0 B1 B2 B3 │B4 B5 B6 B7│ the jumps between submatrices. + // │ │ + // │ │ 1. t[0,1,2,3] data[4] swap with t[4,5,6,7]'s data[0] + // C0 C1 C2 C3 │C4 C5 C6 C7│ 2. t[0,1,2,3] data[5] swap with t[4,5,6,7]'s data[1] + // │ │ 3. t[0,1,2,3] data[6] swap with t[4,5,6,7]'s data[2] + // │ │ 4. t[0,1,2,3] data[7] swap with t[4,5,6,7]'s data[3] + // D0 D1 D2 D3 │D4 D5 D6 D7│ + // └──────────────┘ + // ┌──────────────┐ + // │E0 E1 E2 E3│ E4 E5 E6 E7 + // │ │ + // │ │ + // │F0 F1 F2 F3│ F4 F5 F6 F7 + // │ │ + // │ │ + // │G0 G1 G2 G3│ G4 G5 G6 G7 + // │ │ + // │ │ + // │H0 H1 H2 H3│ H4 H5 H6 H7 + // └──────────────┘ + // + // ROUND 2 + // ======= + // t0 t1 t2 t3 t4 t5 t6 t7 + // ┌──────┐ ┌──────┐ + // A0 A1 │A2 A3│ E0 E1 │E2 E3│ NUM_ENTRIES == 2 so we have 2 + // │ │ │ │ submatrices per thread and there + // │ │ │ │ are 2 elements between these + // B0 B1 │B2 B3│ F0 F1 │F2 F3│ submatrices. + // └──────┘ └──────┘ + // ┌──────┐ ┌──────┐ 1. t[0,1,4,5] data[2] swap with t[2,3,6,7]'s data[0] + // │C0 C1│ C2 C3 │G0 G1│ G2 G3 2. t[0,1,4,5] data[3] swap with t[2,3,6,7]'s data[1] + // │ │ │ │ 3. t[0,1,4,5] data[6] swap with t[2,3,6,7]'s data[4] + // │ │ │ │ 4. t[0,1,4,5] data[7] swap with t[2,3,6,7]'s data[5] + // │D0 D1│ D2 D3 │H0 H1│ H2 H3 + // └──────┘ └──────┘ + // ┌──────┐ ┌──────┐ + // A4 A5 │A6 A7│ E4 E5 │E6 E7│ + // │ │ │ │ + // │ │ │ │ + // B4 B5 │B6 B7│ F4 F5 │F6 F7│ + // └──────┘ └──────┘ + // ┌──────┐ ┌──────┐ + // │C4 C5│ C6 C7 │G4 G5│ G6 G7 + // │ │ │ │ + // │ │ │ │ + // │D4 D5│ D6 D7 │H4 H5│ H6 H7 + // └──────┘ └──────┘ + // + // ROUND 3 + // ======= + // t0 t1 t2 t3 t4 t5 t6 t7 + // ┌──┐ ┌──┐ ┌──┐ ┌──┐ + // A0 │A1│ C0 │C1│ E0 │E1│ G0 │G1│ NUM_ENTRIES == 1 so we have 4 + // └──┘ └──┘ └──┘ └──┘ submatrices per thread and there + // ┌──┐ ┌──┐ ┌──┐ ┌──┐ is 1 element between these + // │B0│ B1 │D0│ D1 │F0│ F1 │H0│ H1 submatrices. + // └──┘ └──┘ └──┘ └──┘ + // ┌──┐ ┌──┐ ┌──┐ ┌──┐ 1. t[0,2,4,6] data[1] swap with t[1,3,5,7]'s data[0] + // A2 │A3│ C2 │C3│ E2 │E3│ G2 │G3│ 2. t[0,2,4,6] data[3] swap with t[1,3,5,7]'s data[2] + // └──┘ └──┘ └──┘ └──┘ 3. t[0,2,4,6] data[5] swap with t[1,3,5,7]'s data[4] + // ┌──┐ ┌──┐ ┌──┐ ┌──┐ 4. t[0,2,4,6] data[7] swap with t[1,3,5,7]'s data[6] + // │B2│ B3 │D2│ D3 │F2│ F3 │H2│ H3 + // └──┘ └──┘ └──┘ └──┘ + // ┌──┐ ┌──┐ ┌──┐ ┌──┐ + // A4 │A5│ C4 │C5│ E4 │E5│ G4 │G5│ + // └──┘ └──┘ └──┘ └──┘ + // ┌──┐ ┌──┐ ┌──┐ ┌──┐ + // │B4│ B5 │D4│ D5 │F4│ F5 │H4│ H5 + // └──┘ └──┘ └──┘ └──┘ + // ┌──┐ ┌──┐ ┌──┐ ┌──┐ + // A6 │A7│ C6 │C7│ E6 │E7│ G6 │G7│ + // └──┘ └──┘ └──┘ └──┘ + // ┌──┐ ┌──┐ ┌──┐ ┌──┐ + // │B6│ B7 │D6│ D7 │F6│ F7 │H6│ H7 + // └──┘ └──┘ └──┘ └──┘ + // + // RESULT + // ====== + // t0 t1 t2 t3 t4 t5 t6 t7 + // + // A0 B0 C0 D0 E0 F0 G0 H0 + // + // + // A1 B1 C1 D1 E1 F1 G1 H1 + // + // + // A2 B2 C2 D2 E2 F2 G2 H2 + // + // + // A3 B3 C3 D3 E3 F3 G3 H3 + // + // + // A4 B4 C4 D4 E4 F4 G4 H4 + // + // + // A5 B5 C5 D5 E5 F5 G5 H5 + // + // + // A6 B6 C6 D6 E6 F6 G6 H6 + // + // + // A7 B7 C7 D7 E7 F7 G7 H7 + // + + // NOTE: Do *NOT* try to refactor this code to use a reference, since nvcc + // tends to choke on it and then drop everything into local memory. + const InputT send_val = (xor_bit_set ? CompileTimeArray::val + : CompileTimeArray::val); + const InputT recv_val = __shfl_xor_sync(mask, send_val, NUM_ENTRIES, LOGICAL_WARP_THREADS); + (xor_bit_set ? CompileTimeArray::val + : CompileTimeArray::val) = recv_val; + + constexpr int next_idx = IDX + 1 + ((IDX + 1) % NUM_ENTRIES == 0) * NUM_ENTRIES; + CompileTimeArray::template Foreach(xor_bit_set, mask); + } + + // terminate recursion + _CCCL_DEVICE void TransposeImpl(unsigned int, unsigned int, Int2Type<0>) {} + + template + _CCCL_DEVICE void TransposeImpl(const unsigned int lane_id, const unsigned int mask, Int2Type) + { + const bool xor_bit_set = lane_id & NUM_ENTRIES; + Foreach(xor_bit_set, mask); + + TransposeImpl(lane_id, mask, Int2Type()); + } + + public: + _CCCL_DEVICE + CompileTimeArray(const InputT (&input_items)[ITEMS_PER_THREAD], OutputT (&output_items)[ITEMS_PER_THREAD]) + : CompileTimeArray{input_items, output_items} + , val{input_items[IDX]} + {} + + _CCCL_DEVICE ~CompileTimeArray() + { + this->output_items[IDX] = val; + } + + _CCCL_DEVICE void Transpose(const unsigned int lane_id, const unsigned int mask) + { + TransposeImpl(lane_id, mask, Int2Type()); + } + }; + + // terminating partial specialization + template + class CompileTimeArray + { + protected: + // used for dumping back the individual values after transposing + InputT (&output_items)[ITEMS_PER_THREAD]; + + template + _CCCL_DEVICE void Foreach(bool, unsigned) + {} + + public: + _CCCL_DEVICE CompileTimeArray(const InputT (&)[ITEMS_PER_THREAD], OutputT (&output_items)[ITEMS_PER_THREAD]) + : output_items{output_items} + {} + }; + + const unsigned int lane_id; + const unsigned int warp_id; + const unsigned int member_mask; + +public: + using TempStorage = NullType; + + WarpExchangeShfl() = delete; + + explicit _CCCL_DEVICE _CCCL_FORCEINLINE WarpExchangeShfl(TempStorage&) + : lane_id(IS_ARCH_WARP ? ::cuda::ptx::get_sreg_laneid() : (::cuda::ptx::get_sreg_laneid() % LOGICAL_WARP_THREADS)) + , warp_id(IS_ARCH_WARP ? 0 : (::cuda::ptx::get_sreg_laneid() / LOGICAL_WARP_THREADS)) + , member_mask(WarpMask(warp_id)) + {} + + template + _CCCL_DEVICE _CCCL_FORCEINLINE void + BlockedToStriped(const InputT (&input_items)[ITEMS_PER_THREAD], OutputT (&output_items)[ITEMS_PER_THREAD]) + { + CompileTimeArray arr{input_items, output_items}; + arr.Transpose(lane_id, member_mask); + } + + template + _CCCL_DEVICE _CCCL_FORCEINLINE void + StripedToBlocked(const InputT (&input_items)[ITEMS_PER_THREAD], OutputT (&output_items)[ITEMS_PER_THREAD]) + { + BlockedToStriped(input_items, output_items); + } + + // Trick to keep the compiler from inferring that the + // condition in the static_assert is always false. + template + struct dependent_false + { + static constexpr bool value = false; + }; + + template + _CCCL_DEVICE _CCCL_FORCEINLINE void ScatterToStriped(InputT (&)[ITEMS_PER_THREAD], OffsetT (&)[ITEMS_PER_THREAD]) + { + static_assert(dependent_false::value, + "Shuffle specialization of warp exchange does not support\n" + "ScatterToStriped(InputT (&items)[ITEMS_PER_THREAD],\n" + " OffsetT (&ranks)[ITEMS_PER_THREAD])"); + } + + template + _CCCL_DEVICE _CCCL_FORCEINLINE void + ScatterToStriped(const InputT (&)[ITEMS_PER_THREAD], OutputT (&)[ITEMS_PER_THREAD], OffsetT (&)[ITEMS_PER_THREAD]) + { + static_assert(dependent_false::value, + "Shuffle specialization of warp exchange does not support\n" + "ScatterToStriped(const InputT (&input_items)[ITEMS_PER_THREAD],\n" + " OutputT (&output_items)[ITEMS_PER_THREAD],\n" + " OffsetT (&ranks)[ITEMS_PER_THREAD])"); + } +}; + +} // namespace detail + +CUB_NAMESPACE_END diff --git a/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/warp/specializations/warp_exchange_smem.cuh b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/warp/specializations/warp_exchange_smem.cuh new file mode 100644 index 0000000000000000000000000000000000000000..35b688f813c96aaef9ac5ba4aad1df9a7fa61de4 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/warp/specializations/warp_exchange_smem.cuh @@ -0,0 +1,177 @@ +/****************************************************************************** + * Copyright (c) 2011-2023, NVIDIA CORPORATION. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the NVIDIA CORPORATION nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + ******************************************************************************/ + +/** + * @file + * The cub::WarpExchangeSmem class provides [collective](index.html#sec0) + * methods for rearranging data partitioned across a CUDA warp. + */ + +#pragma once + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header + +#include +#include + +#include + +CUB_NAMESPACE_BEGIN + +namespace detail +{ + +template +class WarpExchangeSmem +{ + static_assert(PowerOfTwo::VALUE, "LOGICAL_WARP_THREADS must be a power of two"); + + static constexpr int ITEMS_PER_TILE = ITEMS_PER_THREAD * LOGICAL_WARP_THREADS + 1; + + static constexpr bool IS_ARCH_WARP = LOGICAL_WARP_THREADS == CUB_WARP_THREADS(0); + + static constexpr int LOG_SMEM_BANKS = CUB_LOG_SMEM_BANKS(0); + + // Insert padding if the number of items per thread is a power of two + // and > 4 (otherwise we can typically use 128b loads) + static constexpr bool INSERT_PADDING = (ITEMS_PER_THREAD > 4) && (PowerOfTwo::VALUE); + + static constexpr int PADDING_ITEMS = INSERT_PADDING ? (ITEMS_PER_TILE >> LOG_SMEM_BANKS) : 0; + + union _TempStorage + { + InputT items_shared[ITEMS_PER_TILE + PADDING_ITEMS]; + }; // union TempStorage + + /// Shared storage reference + _TempStorage& temp_storage; + + const unsigned int lane_id; + const unsigned int warp_id; + const unsigned int member_mask; + +public: + struct TempStorage : Uninitialized<_TempStorage> + {}; + + WarpExchangeSmem() = delete; + + explicit _CCCL_DEVICE _CCCL_FORCEINLINE WarpExchangeSmem(TempStorage& temp_storage) + : temp_storage(temp_storage.Alias()) + , lane_id(IS_ARCH_WARP ? ::cuda::ptx::get_sreg_laneid() : (::cuda::ptx::get_sreg_laneid() % LOGICAL_WARP_THREADS)) + , warp_id(IS_ARCH_WARP ? 0 : (::cuda::ptx::get_sreg_laneid() / LOGICAL_WARP_THREADS)) + , member_mask(WarpMask(warp_id)) + {} + + template + _CCCL_DEVICE _CCCL_FORCEINLINE void + BlockedToStriped(const InputT (&input_items)[ITEMS_PER_THREAD], OutputT (&output_items)[ITEMS_PER_THREAD]) + { + for (int item = 0; item < ITEMS_PER_THREAD; item++) + { + const int idx = ITEMS_PER_THREAD * lane_id + item; + temp_storage.items_shared[idx] = input_items[item]; + } + __syncwarp(member_mask); + + for (int item = 0; item < ITEMS_PER_THREAD; item++) + { + const int idx = LOGICAL_WARP_THREADS * item + lane_id; + output_items[item] = temp_storage.items_shared[idx]; + } + } + + template + _CCCL_DEVICE _CCCL_FORCEINLINE void + StripedToBlocked(const InputT (&input_items)[ITEMS_PER_THREAD], OutputT (&output_items)[ITEMS_PER_THREAD]) + { + for (int item = 0; item < ITEMS_PER_THREAD; item++) + { + const int idx = LOGICAL_WARP_THREADS * item + lane_id; + temp_storage.items_shared[idx] = input_items[item]; + } + __syncwarp(member_mask); + + for (int item = 0; item < ITEMS_PER_THREAD; item++) + { + const int idx = ITEMS_PER_THREAD * lane_id + item; + output_items[item] = temp_storage.items_shared[idx]; + } + } + + template + _CCCL_DEVICE _CCCL_FORCEINLINE void + ScatterToStriped(InputT (&items)[ITEMS_PER_THREAD], OffsetT (&ranks)[ITEMS_PER_THREAD]) + { + ScatterToStriped(items, items, ranks); + } + + template + _CCCL_DEVICE _CCCL_FORCEINLINE void ScatterToStriped( + const InputT (&input_items)[ITEMS_PER_THREAD], + OutputT (&output_items)[ITEMS_PER_THREAD], + OffsetT (&ranks)[ITEMS_PER_THREAD]) + { +#pragma unroll + for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) + { + if (INSERT_PADDING) + { + ranks[ITEM] = (ranks[ITEM] >> LOG_SMEM_BANKS) + ranks[ITEM]; + } + + temp_storage.items_shared[ranks[ITEM]] = input_items[ITEM]; + } + + __syncwarp(member_mask); + +#pragma unroll + for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) + { + int item_offset = (ITEM * LOGICAL_WARP_THREADS) + lane_id; + + if (INSERT_PADDING) + { + item_offset = (item_offset >> LOG_SMEM_BANKS) + item_offset; + } + + output_items[ITEM] = temp_storage.items_shared[item_offset]; + } + } +}; + +} // namespace detail + +CUB_NAMESPACE_END diff --git a/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/warp/specializations/warp_reduce_shfl.cuh b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/warp/specializations/warp_reduce_shfl.cuh new file mode 100644 index 0000000000000000000000000000000000000000..8c4ad78d1ade9445a495553b85e9dd82e5beb3b9 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/warp/specializations/warp_reduce_shfl.cuh @@ -0,0 +1,747 @@ +/****************************************************************************** + * Copyright (c) 2011, Duane Merrill. All rights reserved. + * Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the NVIDIA CORPORATION nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + ******************************************************************************/ + +/** + * @file + * cub::WarpReduceShfl provides SHFL-based variants of parallel reduction of items partitioned across a CUDA thread + * warp. + */ + +#pragma once + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header + +#include +#include +#include + +#include +#include +#include + +#include + +CUB_NAMESPACE_BEGIN + +namespace detail +{ + +template +struct reduce_add_exists : ::cuda::std::false_type +{}; + +template +struct reduce_add_exists : ::cuda::std::true_type +{}; + +template +struct reduce_min_exists : ::cuda::std::false_type +{}; + +template +struct reduce_min_exists : ::cuda::std::true_type +{}; + +template +struct reduce_max_exists : ::cuda::std::false_type +{}; + +template +struct reduce_max_exists : ::cuda::std::true_type +{}; + +/** + * @brief WarpReduceShfl provides SHFL-based variants of parallel reduction of items partitioned + * across a CUDA thread warp. + * + * @tparam T + * Data type being reduced + * + * @tparam LOGICAL_WARP_THREADS + * Number of threads per logical warp (must be a power-of-two) + * + * @tparam LEGACY_PTX_ARCH + * The PTX compute capability for which to to specialize this collective + */ +template +struct WarpReduceShfl +{ + static_assert(PowerOfTwo::VALUE, "LOGICAL_WARP_THREADS must be a power of two"); + + //--------------------------------------------------------------------- + // Constants and type definitions + //--------------------------------------------------------------------- + + enum + { + /// Whether the logical warp size and the PTX warp size coincide + IS_ARCH_WARP = (LOGICAL_WARP_THREADS == CUB_WARP_THREADS(0)), + + /// The number of warp reduction steps + STEPS = Log2::VALUE, + + /// Number of logical warps in a PTX warp + LOGICAL_WARPS = CUB_WARP_THREADS(0) / LOGICAL_WARP_THREADS, + + /// The 5-bit SHFL mask for logically splitting warps into sub-segments starts 8-bits up + SHFL_C = (CUB_WARP_THREADS(0) - LOGICAL_WARP_THREADS) << 8 + + }; + + template + struct IsInteger + { + enum + { + /// Whether the data type is a small (32b or less) integer for which we can use a single SHFL instruction per + /// exchange + IS_SMALL_UNSIGNED = (Traits::CATEGORY == UNSIGNED_INTEGER) && (sizeof(S) <= sizeof(unsigned int)) + }; + }; + + /// Shared memory storage layout type + using TempStorage = NullType; + + //--------------------------------------------------------------------- + // Thread fields + //--------------------------------------------------------------------- + + /// Lane index in logical warp + int lane_id; + + /// Logical warp index in 32-thread physical warp + int warp_id; + + /// 32-thread physical warp member mask of logical warp + ::cuda::std::uint32_t member_mask; + + //--------------------------------------------------------------------- + // Construction + //--------------------------------------------------------------------- + + /// Constructor + _CCCL_DEVICE _CCCL_FORCEINLINE WarpReduceShfl(TempStorage& /*temp_storage*/) + : lane_id(static_cast(::cuda::ptx::get_sreg_laneid())) + , warp_id(IS_ARCH_WARP ? 0 : (lane_id / LOGICAL_WARP_THREADS)) + , member_mask(WarpMask(warp_id)) + { + if (!IS_ARCH_WARP) + { + lane_id = lane_id % LOGICAL_WARP_THREADS; + } + } + + //--------------------------------------------------------------------- + // Reduction steps + //--------------------------------------------------------------------- + + /** + * @brief Reduction (specialized for summation across uint32 types) + * + * @param[in] input + * Calling thread's input item. + * + * @param[in] reduction_op + * Binary reduction operator + * + * @param[in] last_lane + * Index of last lane in segment + * + * @param[in] offset + * Up-offset to pull from + */ + _CCCL_DEVICE _CCCL_FORCEINLINE unsigned int + ReduceStep(unsigned int input, ::cuda::std::plus<> /*reduction_op*/, int last_lane, int offset) + { + unsigned int output; + int shfl_c = last_lane | SHFL_C; // Shuffle control (mask and last_lane) + + // Use predicate set from SHFL to guard against invalid peers + asm volatile( + "{" + " .reg .u32 r0;" + " .reg .pred p;" + " shfl.sync.down.b32 r0|p, %1, %2, %3, %5;" + " @p add.u32 r0, r0, %4;" + " mov.u32 %0, r0;" + "}" + : "=r"(output) + : "r"(input), "r"(offset), "r"(shfl_c), "r"(input), "r"(member_mask)); + + return output; + } + + /** + * @brief Reduction (specialized for summation across fp32 types) + * + * @param[in] input + * Calling thread's input item. + * + * @param[in] reduction_op + * Binary reduction operator + * + * @param[in] last_lane + * Index of last lane in segment + * + * @param[in] offset + * Up-offset to pull from + */ + _CCCL_DEVICE _CCCL_FORCEINLINE float + ReduceStep(float input, ::cuda::std::plus<> /*reduction_op*/, int last_lane, int offset) + { + float output; + int shfl_c = last_lane | SHFL_C; // Shuffle control (mask and last_lane) + + // Use predicate set from SHFL to guard against invalid peers + asm volatile( + "{" + " .reg .f32 r0;" + " .reg .pred p;" + " shfl.sync.down.b32 r0|p, %1, %2, %3, %5;" + " @p add.f32 r0, r0, %4;" + " mov.f32 %0, r0;" + "}" + : "=f"(output) + : "f"(input), "r"(offset), "r"(shfl_c), "f"(input), "r"(member_mask)); + + return output; + } + + /** + * @brief Reduction (specialized for summation across unsigned long long types) + * + * @param[in] input + * Calling thread's input item + * + * @param[in] reduction_op + * Binary reduction operator + * + * @param[in] last_lane + * Index of last lane in segment + * + * @param[in] offset + * Up-offset to pull from + */ + _CCCL_DEVICE _CCCL_FORCEINLINE unsigned long long + ReduceStep(unsigned long long input, ::cuda::std::plus<> /*reduction_op*/, int last_lane, int offset) + { + unsigned long long output; + int shfl_c = last_lane | SHFL_C; // Shuffle control (mask and last_lane) + + asm volatile( + "{" + " .reg .u32 lo;" + " .reg .u32 hi;" + " .reg .pred p;" + " mov.b64 {lo, hi}, %1;" + " shfl.sync.down.b32 lo|p, lo, %2, %3, %4;" + " shfl.sync.down.b32 hi|p, hi, %2, %3, %4;" + " mov.b64 %0, {lo, hi};" + " @p add.u64 %0, %0, %1;" + "}" + : "=l"(output) + : "l"(input), "r"(offset), "r"(shfl_c), "r"(member_mask)); + + return output; + } + + /** + * @brief Reduction (specialized for summation across long long types) + * + * @param[in] input + * Calling thread's input item + * + * @param[in] reduction_op + * Binary reduction operator + * + * @param[in] last_lane + * Index of last lane in segment + * + * @param[in] offset + * Up-offset to pull from + */ + _CCCL_DEVICE _CCCL_FORCEINLINE long long + ReduceStep(long long input, ::cuda::std::plus<> /*reduction_op*/, int last_lane, int offset) + { + long long output; + int shfl_c = last_lane | SHFL_C; // Shuffle control (mask and last_lane) + + // Use predicate set from SHFL to guard against invalid peers + asm volatile( + "{" + " .reg .u32 lo;" + " .reg .u32 hi;" + " .reg .pred p;" + " mov.b64 {lo, hi}, %1;" + " shfl.sync.down.b32 lo|p, lo, %2, %3, %4;" + " shfl.sync.down.b32 hi|p, hi, %2, %3, %4;" + " mov.b64 %0, {lo, hi};" + " @p add.s64 %0, %0, %1;" + "}" + : "=l"(output) + : "l"(input), "r"(offset), "r"(shfl_c), "r"(member_mask)); + + return output; + } + + /** + * @brief Reduction (specialized for summation across double types) + * + * @param[in] input + * Calling thread's input item. + * + * @param[in] reduction_op + * Binary reduction operator + * + * @param[in] last_lane + * Index of last lane in segment + * + * @param[in] offset + * Up-offset to pull from + */ + _CCCL_DEVICE _CCCL_FORCEINLINE double + ReduceStep(double input, ::cuda::std::plus<> /*reduction_op*/, int last_lane, int offset) + { + double output; + int shfl_c = last_lane | SHFL_C; // Shuffle control (mask and last_lane) + + // Use predicate set from SHFL to guard against invalid peers + asm volatile( + "{" + " .reg .u32 lo;" + " .reg .u32 hi;" + " .reg .pred p;" + " .reg .f64 r0;" + " mov.b64 %0, %1;" + " mov.b64 {lo, hi}, %1;" + " shfl.sync.down.b32 lo|p, lo, %2, %3, %4;" + " shfl.sync.down.b32 hi|p, hi, %2, %3, %4;" + " mov.b64 r0, {lo, hi};" + " @p add.f64 %0, %0, r0;" + "}" + : "=d"(output) + : "d"(input), "r"(offset), "r"(shfl_c), "r"(member_mask)); + + return output; + } + + /** + * @brief Reduction (specialized for swizzled ReduceByKeyOp<::cuda::std::plus<>> across + * KeyValuePair types) + * + * @param[in] input + * Calling thread's input item + * + * @param[in] reduction_op + * Binary reduction operator + * + * @param[in] last_lane + * Index of last lane in segment + * + * @param[in] offset + * Up-offset to pull from + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE KeyValuePair ReduceStep( + KeyValuePair input, + SwizzleScanOp>> /*reduction_op*/, + int last_lane, + int offset) + { + KeyValuePair output; + + KeyT other_key = ShuffleDown(input.key, offset, last_lane, member_mask); + + output.key = input.key; + output.value = ReduceStep( + input.value, ::cuda::std::plus<>{}, last_lane, offset, Int2Type::IS_SMALL_UNSIGNED>()); + + if (input.key != other_key) + { + output.value = input.value; + } + + return output; + } + + /** + * @brief Reduction (specialized for swizzled ReduceBySegmentOp> across + * KeyValuePair types) + * + * @param[in] input + * Calling thread's input item. + * + * @param[in] reduction_op + * Binary reduction operator + * + * @param[in] last_lane + * Index of last lane in segment + * + * @param[in] offset + * Up-offset to pull from + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE KeyValuePair ReduceStep( + KeyValuePair input, + SwizzleScanOp>> /*reduction_op*/, + int last_lane, + int offset) + { + KeyValuePair output; + + output.value = ReduceStep( + input.value, ::cuda::std::plus<>{}, last_lane, offset, Int2Type::IS_SMALL_UNSIGNED>()); + output.key = ReduceStep( + input.key, ::cuda::std::plus<>{}, last_lane, offset, Int2Type::IS_SMALL_UNSIGNED>()); + + if (input.key > 0) + { + output.value = input.value; + } + + return output; + } + + /** + * @brief Reduction step (generic) + * + * @param[in] input + * Calling thread's input item + * + * @param[in] reduction_op + * Binary reduction operator + * + * @param[in] last_lane + * Index of last lane in segment + * + * @param[in] offset + * Up-offset to pull from + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE _T ReduceStep(_T input, ReductionOp reduction_op, int last_lane, int offset) + { + _T output = input; + + _T temp = ShuffleDown(output, offset, last_lane, member_mask); + + // Perform reduction op if valid + if (offset + lane_id <= last_lane) + { + output = reduction_op(input, temp); + } + + return output; + } + + /** + * @brief Reduction step (specialized for small unsigned integers size 32b or less) + * + * @param[in] input + * Calling thread's input item. + * + * @param[in] reduction_op + * Binary reduction operator + * + * @param[in] last_lane + * Index of last lane in segment + * + * @param[in] offset + * Up-offset to pull from + * + * @param[in] is_small_unsigned + * Marker type indicating whether T is a small unsigned integer + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE _T + ReduceStep(_T input, ReductionOp reduction_op, int last_lane, int offset, Int2Type /*is_small_unsigned*/) + { + return ReduceStep(input, reduction_op, last_lane, offset); + } + + /** + * @brief Reduction step (specialized for types other than small unsigned integers size + * 32b or less) + * + * @param[in] input + * Calling thread's input item. + * + * @param[in] reduction_op + * Binary reduction operator + * + * @param[in] last_lane + * Index of last lane in segment + * + * @param[in] offset + * Up-offset to pull from + * + * @param[in] is_small_unsigned + * Marker type indicating whether T is a small unsigned integer + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE _T + ReduceStep(_T input, ReductionOp reduction_op, int last_lane, int offset, Int2Type /*is_small_unsigned*/) + { + return ReduceStep(input, reduction_op, last_lane, offset); + } + + //--------------------------------------------------------------------- + // Templated reduction iteration + //--------------------------------------------------------------------- + + /** + * @param[in] input + * Calling thread's input item. + * + * @param[in] reduction_op + * Binary reduction operator + * + * @param[in] last_lane + * Index of last lane in segment + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE void + ReduceStep(T& input, ReductionOp reduction_op, int last_lane, Int2Type /*step*/) + { + input = ReduceStep(input, reduction_op, last_lane, 1 << STEP, Int2Type::IS_SMALL_UNSIGNED>()); + + ReduceStep(input, reduction_op, last_lane, Int2Type()); + } + + /** + * @param[in] input + * Calling thread's input item. + * + * @param[in] reduction_op + * Binary reduction operator + * + * @param[in] last_lane + * Index of last lane in segment + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE void + ReduceStep(T& /*input*/, ReductionOp /*reduction_op*/, int /*last_lane*/, Int2Type /*step*/) + {} + + //--------------------------------------------------------------------- + // Reduction operations + //--------------------------------------------------------------------- + + /** + * @param[in] input + * Calling thread's input + * + * @param[in] valid_items + * Total number of valid items across the logical warp + * + * @param[in] reduction_op + * Binary reduction operator + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE T + ReduceImpl(Int2Type<0> /* all_lanes_valid */, T input, int valid_items, ReductionOp reduction_op) + { + int last_lane = valid_items - 1; + + T output = input; + + // Template-iterate reduction steps + ReduceStep(output, reduction_op, last_lane, Int2Type<0>()); + + return output; + } + + /** + * @param[in] input + * Calling thread's input + * + * @param[in] valid_items + * Total number of valid items across the logical warp + * + * @param[in] reduction_op + * Binary reduction operator + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE T + ReduceImpl(Int2Type<1> /* all_lanes_valid */, T input, int /* valid_items */, ReductionOp reduction_op) + { + int last_lane = LOGICAL_WARP_THREADS - 1; + + T output = input; + + // Template-iterate reduction steps + ReduceStep(output, reduction_op, last_lane, Int2Type<0>()); + + return output; + } + + template + _CCCL_DEVICE _CCCL_FORCEINLINE + typename ::cuda::std::enable_if<(::cuda::std::is_same::value || ::cuda::std::is_same::value) + && detail::reduce_add_exists<>::value, + T>::type + ReduceImpl(Int2Type<1> /* all_lanes_valid */, T input, int /* valid_items */, ::cuda::std::plus<> /* reduction_op */) + { + T output = input; + + NV_IF_TARGET( + NV_PROVIDES_SM_80, + (output = __reduce_add_sync(member_mask, input);), + (output = ReduceImpl<::cuda::std::plus<>>(Int2Type<1>{}, input, LOGICAL_WARP_THREADS, ::cuda::std::plus<>{});)); + + return output; + } + + template + _CCCL_DEVICE _CCCL_FORCEINLINE + typename ::cuda::std::enable_if<(::cuda::std::is_same::value || ::cuda::std::is_same::value) + && detail::reduce_min_exists<>::value, + T>::type + ReduceImpl(Int2Type<1> /* all_lanes_valid */, T input, int /* valid_items */, ::cuda::minimum<> /* reduction_op */) + { + T output = input; + + NV_IF_TARGET( + NV_PROVIDES_SM_80, + (output = __reduce_min_sync(member_mask, input);), + (output = ReduceImpl<::cuda::minimum<>>(Int2Type<1>{}, input, LOGICAL_WARP_THREADS, ::cuda::minimum<>{});)); + + return output; + } + + template + _CCCL_DEVICE _CCCL_FORCEINLINE + typename ::cuda::std::enable_if<(::cuda::std::is_same::value || ::cuda::std::is_same::value) + && detail::reduce_max_exists<>::value, + T>::type + ReduceImpl(Int2Type<1> /* all_lanes_valid */, T input, int /* valid_items */, ::cuda::maximum<> /* reduction_op */) + { + T output = input; + + NV_IF_TARGET( + NV_PROVIDES_SM_80, + (output = __reduce_max_sync(member_mask, input);), + (output = ReduceImpl<::cuda::maximum<>>(Int2Type<1>{}, input, LOGICAL_WARP_THREADS, ::cuda::maximum<>{});)); + + return output; + } + + /** + * @brief Reduction + * + * @tparam ALL_LANES_VALID + * Whether all lanes in each warp are contributing a valid fold of items + * + * @param[in] input + * Calling thread's input + * + * @param[in] valid_items + * Total number of valid items across the logical warp + * + * @param[in] reduction_op + * Binary reduction operator + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE T Reduce(T input, int valid_items, ReductionOp reduction_op) + { + return ReduceImpl(Int2Type{}, input, valid_items, reduction_op); + } + + /** + * @brief Segmented reduction + * + * @tparam HEAD_SEGMENTED + * Whether flags indicate a segment-head or a segment-tail + * + * @param[in] input + * Calling thread's input + * + * @param[in] flag + * Whether or not the current lane is a segment head/tail + * + * @param[in] reduction_op + * Binary reduction operator + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE T SegmentedReduce(T input, FlagT flag, ReductionOp reduction_op) + { + // Get the start flags for each thread in the warp. + int warp_flags = __ballot_sync(member_mask, flag); + + // Convert to tail-segmented + if (HEAD_SEGMENTED) + { + warp_flags >>= 1; + } + + // Mask out the bits below the current thread + warp_flags &= ::cuda::ptx::get_sreg_lanemask_ge(); + + // Mask of physical lanes outside the logical warp and convert to logical lanemask + if (!IS_ARCH_WARP) + { + warp_flags = (warp_flags & member_mask) >> (warp_id * LOGICAL_WARP_THREADS); + } + + // Mask in the last lane of logical warp + warp_flags |= 1u << (LOGICAL_WARP_THREADS - 1); + + // Find the next set flag + int last_lane = __clz(__brev(warp_flags)); + + T output = input; + + // // Iterate reduction steps + // #pragma unroll + // for (int STEP = 0; STEP < STEPS; STEP++) + // { + // output = ReduceStep(output, reduction_op, last_lane, 1 << STEP, + // Int2Type::IS_SMALL_UNSIGNED>()); + // } + + // Template-iterate reduction steps + ReduceStep(output, reduction_op, last_lane, Int2Type<0>()); + + return output; + } +}; +} // namespace detail + +template +using WarpReduceShfl CCCL_DEPRECATED_BECAUSE( + "This class is considered an implementation detail and the public interface will be " + "removed.") = detail::WarpReduceShfl; + +CUB_NAMESPACE_END diff --git a/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/warp/specializations/warp_reduce_smem.cuh b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/warp/specializations/warp_reduce_smem.cuh new file mode 100644 index 0000000000000000000000000000000000000000..ade195ee6cb89a3b85a96db59f92ca57cc10a453 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/warp/specializations/warp_reduce_smem.cuh @@ -0,0 +1,421 @@ +/****************************************************************************** + * Copyright (c) 2011, Duane Merrill. All rights reserved. + * Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the NVIDIA CORPORATION nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + ******************************************************************************/ + +/** + * @file + * cub::WarpReduceSmem provides smem-based variants of parallel reduction of items partitioned + * across a CUDA thread warp. + */ + +#pragma once + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header + +#include +#include +#include +#include + +#include + +CUB_NAMESPACE_BEGIN +namespace detail +{ +/** + * @brief WarpReduceSmem provides smem-based variants of parallel reduction of items partitioned + * across a CUDA thread warp. + * + * @tparam T + * Data type being reduced + * + * @tparam LOGICAL_WARP_THREADS + * Number of threads per logical warp + * + * @tparam LEGACY_PTX_ARCH + * The PTX compute capability for which to to specialize this collective + */ +template +struct WarpReduceSmem +{ + /****************************************************************************** + * Constants and type definitions + ******************************************************************************/ + + enum + { + /// Whether the logical warp size and the PTX warp size coincide + IS_ARCH_WARP = (LOGICAL_WARP_THREADS == CUB_WARP_THREADS(0)), + + /// Whether the logical warp size is a power-of-two + IS_POW_OF_TWO = PowerOfTwo::VALUE, + + /// The number of warp reduction steps + STEPS = Log2::VALUE, + + /// The number of threads in half a warp + HALF_WARP_THREADS = 1 << (STEPS - 1), + + /// The number of shared memory elements per warp + WARP_SMEM_ELEMENTS = LOGICAL_WARP_THREADS + HALF_WARP_THREADS, + + /// FlagT status (when not using ballot) + UNSET = 0x0, // Is initially unset + SET = 0x1, // Is initially set + SEEN = 0x2, // Has seen another head flag from a successor peer + }; + + /// Shared memory flag type + using SmemFlag = unsigned char; + + /// Shared memory storage layout type (1.5 warps-worth of elements for each warp) + struct _TempStorage + { + T reduce[WARP_SMEM_ELEMENTS]; + SmemFlag flags[WARP_SMEM_ELEMENTS]; + }; + + // Alias wrapper allowing storage to be unioned + struct TempStorage : Uninitialized<_TempStorage> + {}; + + /****************************************************************************** + * Thread fields + ******************************************************************************/ + + _TempStorage& temp_storage; + unsigned int lane_id; + unsigned int member_mask; + + /****************************************************************************** + * Construction + ******************************************************************************/ + + /// Constructor + explicit _CCCL_DEVICE _CCCL_FORCEINLINE WarpReduceSmem(TempStorage& temp_storage) + : temp_storage(temp_storage.Alias()) + , lane_id(IS_ARCH_WARP ? ::cuda::ptx::get_sreg_laneid() : ::cuda::ptx::get_sreg_laneid() % LOGICAL_WARP_THREADS) + , member_mask(WarpMask(::cuda::ptx::get_sreg_laneid() / LOGICAL_WARP_THREADS)) + {} + + /****************************************************************************** + * Utility methods + ******************************************************************************/ + + //--------------------------------------------------------------------- + // Regular reduction + //--------------------------------------------------------------------- + + /** + * @brief Reduction step + * + * @tparam ALL_LANES_VALID + * Whether all lanes in each warp are contributing a valid fold of items + * + * @param[in] input + * Calling thread's input + * + * @param[in] valid_items + * Total number of valid items across the logical warp + * + * @param[in] reduction_op + * Reduction operator + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE T + ReduceStep(T input, int valid_items, ReductionOp reduction_op, Int2Type /*step*/) + { + constexpr int OFFSET = 1 << STEP; + + // Share input through buffer + ThreadStore(&temp_storage.reduce[lane_id], input); + + __syncwarp(member_mask); + + // Update input if peer_addend is in range + if ((ALL_LANES_VALID && IS_POW_OF_TWO) || ((lane_id + OFFSET) < valid_items)) + { + T peer_addend = ThreadLoad(&temp_storage.reduce[lane_id + OFFSET]); + input = reduction_op(input, peer_addend); + } + + __syncwarp(member_mask); + + return ReduceStep(input, valid_items, reduction_op, Int2Type()); + } + + /** + * @brief Reduction step (terminate) + * + * @tparam ALL_LANES_VALID + * Whether all lanes in each warp are contributing a valid fold of items + * + * @param[in] input + * Calling thread's input + * + * @param[in] valid_items + * Total number of valid items across the logical warp + * + * @param[in] reduction_op + * Reduction operator + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE T + ReduceStep(T input, int valid_items, ReductionOp /*reduction_op*/, Int2Type /*step*/) + { + return input; + } + + //--------------------------------------------------------------------- + // Segmented reduction + //--------------------------------------------------------------------- + + /** + * @brief Ballot-based segmented reduce + * + * @tparam HEAD_SEGMENTED + * Whether flags indicate a segment-head or a segment-tail + * + * @param[in] input + * Calling thread's input + * + * @param[in] flag + * Whether or not the current lane is a segment head/tail + * + * @param[in] reduction_op + * Reduction operator + * + * @param[in] has_ballot + * Marker type for whether the target arch has ballot functionality + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE T + SegmentedReduce(T input, FlagT flag, ReductionOp reduction_op, Int2Type /*has_ballot*/) + { + // Get the start flags for each thread in the warp. + int warp_flags = __ballot_sync(member_mask, flag); + + if (!HEAD_SEGMENTED) + { + warp_flags <<= 1; + } + + // Keep bits above the current thread. + warp_flags &= ::cuda::ptx::get_sreg_lanemask_gt(); + + // Accommodate packing of multiple logical warps in a single physical warp + if (!IS_ARCH_WARP) + { + warp_flags >>= (::cuda::ptx::get_sreg_laneid() / LOGICAL_WARP_THREADS) * LOGICAL_WARP_THREADS; + } + + // Find next flag + int next_flag = __clz(__brev(warp_flags)); + + // Clip the next segment at the warp boundary if necessary + if (LOGICAL_WARP_THREADS != 32) + { + next_flag = CUB_MIN(next_flag, LOGICAL_WARP_THREADS); + } + +#pragma unroll + for (int STEP = 0; STEP < STEPS; STEP++) + { + const int OFFSET = 1 << STEP; + + // Share input into buffer + ThreadStore(&temp_storage.reduce[lane_id], input); + + __syncwarp(member_mask); + + // Update input if peer_addend is in range + if (OFFSET + lane_id < next_flag) + { + T peer_addend = ThreadLoad(&temp_storage.reduce[lane_id + OFFSET]); + input = reduction_op(input, peer_addend); + } + + __syncwarp(member_mask); + } + + return input; + } + + /** + * @brief Smem-based segmented reduce + * + * @tparam HEAD_SEGMENTED + * Whether flags indicate a segment-head or a segment-tail + * + * @param[in] input + * Calling thread's input + * + * @param[in] flag + * Whether or not the current lane is a segment head/tail + * + * @param[in] reduction_op + * Reduction operator + * + * @param[in] has_ballot + * Marker type for whether the target arch has ballot functionality + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE T + SegmentedReduce(T input, FlagT flag, ReductionOp reduction_op, Int2Type /*has_ballot*/) + { + enum + { + UNSET = 0x0, // Is initially unset + SET = 0x1, // Is initially set + SEEN = 0x2, // Has seen another head flag from a successor peer + }; + + // Alias flags onto shared data storage + volatile SmemFlag* flag_storage = temp_storage.flags; + + SmemFlag flag_status = (flag) ? SET : UNSET; + + for (int STEP = 0; STEP < STEPS; STEP++) + { + const int OFFSET = 1 << STEP; + + // Share input through buffer + ThreadStore(&temp_storage.reduce[lane_id], input); + + __syncwarp(member_mask); + + // Get peer from buffer + T peer_addend = ThreadLoad(&temp_storage.reduce[lane_id + OFFSET]); + + __syncwarp(member_mask); + + // Share flag through buffer + flag_storage[lane_id] = flag_status; + + // Get peer flag from buffer + SmemFlag peer_flag_status = flag_storage[lane_id + OFFSET]; + + // Update input if peer was in range + if (lane_id < LOGICAL_WARP_THREADS - OFFSET) + { + if (HEAD_SEGMENTED) + { + // Head-segmented + if ((flag_status & SEEN) == 0) + { + // Has not seen a more distant head flag + if (peer_flag_status & SET) + { + // Has now seen a head flag + flag_status |= SEEN; + } + else + { + // Peer is not a head flag: grab its count + input = reduction_op(input, peer_addend); + } + + // Update seen status to include that of peer + flag_status |= (peer_flag_status & SEEN); + } + } + else + { + // Tail-segmented. Simply propagate flag status + if (!flag_status) + { + input = reduction_op(input, peer_addend); + flag_status |= peer_flag_status; + } + } + } + } + + return input; + } + + /****************************************************************************** + * Interface + ******************************************************************************/ + + /** + * @brief Reduction + * + * @tparam ALL_LANES_VALID + * Whether all lanes in each warp are contributing a valid fold of items + * + * @param[in] input + * Calling thread's input + * + * @param[in] valid_items + * Total number of valid items across the logical warp + * + * @param[in] reduction_op + * Reduction operator + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE T Reduce(T input, int valid_items, ReductionOp reduction_op) + { + return ReduceStep(input, valid_items, reduction_op, Int2Type<0>()); + } + + /** + * @brief Segmented reduction + * + * @tparam HEAD_SEGMENTED + * Whether flags indicate a segment-head or a segment-tail + * + * @param[in] input + * Calling thread's input + * + * @param[in] flag + * Whether or not the current lane is a segment head/tail + * + * @param[in] reduction_op + * Reduction operator + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE T SegmentedReduce(T input, FlagT flag, ReductionOp reduction_op) + { + return SegmentedReduce(input, flag, reduction_op, Int2Type()); + } +}; +} // namespace detail + +template +using WarpReduceSmem CCCL_DEPRECATED_BECAUSE( + "This class is considered an implementation detail and the public interface will be " + "removed.") = detail::WarpReduceSmem; +CUB_NAMESPACE_END diff --git a/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/warp/specializations/warp_scan_shfl.cuh b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/warp/specializations/warp_scan_shfl.cuh new file mode 100644 index 0000000000000000000000000000000000000000..402b476c4e461f84ce55720926abdc42a8d40acd --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/warp/specializations/warp_scan_shfl.cuh @@ -0,0 +1,685 @@ +/****************************************************************************** + * Copyright (c) 2011, Duane Merrill. All rights reserved. + * Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the NVIDIA CORPORATION nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + ******************************************************************************/ + +/** + * @file + * cub::WarpScanShfl provides SHFL-based variants of parallel prefix scan of items partitioned + * across a CUDA thread warp. + */ + +#pragma once + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header + +#include +#include +#include + +#include + +CUB_NAMESPACE_BEGIN +namespace detail +{ +/** + * @brief WarpScanShfl provides SHFL-based variants of parallel prefix scan of items partitioned + * across a CUDA thread warp. + * + * @tparam T + * Data type being scanned + * + * @tparam LOGICAL_WARP_THREADS + * Number of threads per logical warp (must be a power-of-two) + * + * @tparam LEGACY_PTX_ARCH + * The PTX compute capability for which to to specialize this collective + */ +template +struct WarpScanShfl +{ + //--------------------------------------------------------------------- + // Constants and type definitions + //--------------------------------------------------------------------- + + enum + { + /// Whether the logical warp size and the PTX warp size coincide + IS_ARCH_WARP = (LOGICAL_WARP_THREADS == CUB_WARP_THREADS(0)), + + /// The number of warp scan steps + STEPS = Log2::VALUE, + + /// The 5-bit SHFL mask for logically splitting warps into sub-segments starts 8-bits up + SHFL_C = (CUB_WARP_THREADS(0) - LOGICAL_WARP_THREADS) << 8 + }; + + template + struct IntegerTraits + { + enum + { + /// Whether the data type is a small (32b or less) integer for which we can use a single SFHL instruction per + /// exchange + IS_SMALL_UNSIGNED = (Traits::CATEGORY == UNSIGNED_INTEGER) && (sizeof(S) <= sizeof(unsigned int)) + }; + }; + + /// Shared memory storage layout type + struct TempStorage + {}; + + //--------------------------------------------------------------------- + // Thread fields + //--------------------------------------------------------------------- + + /// Lane index in logical warp + unsigned int lane_id; + + /// Logical warp index in 32-thread physical warp + unsigned int warp_id; + + /// 32-thread physical warp member mask of logical warp + unsigned int member_mask; + + //--------------------------------------------------------------------- + // Construction + //--------------------------------------------------------------------- + + /// Constructor + explicit _CCCL_DEVICE _CCCL_FORCEINLINE WarpScanShfl(TempStorage& /*temp_storage*/) + : lane_id(::cuda::ptx::get_sreg_laneid()) + , warp_id(IS_ARCH_WARP ? 0 : (lane_id / LOGICAL_WARP_THREADS)) + , member_mask(WarpMask(warp_id)) + { + if (!IS_ARCH_WARP) + { + lane_id = lane_id % LOGICAL_WARP_THREADS; + } + } + + //--------------------------------------------------------------------- + // Inclusive scan steps + //--------------------------------------------------------------------- + + /** + * @brief Inclusive prefix scan step (specialized for summation across int32 types) + * + * @param[in] input + * Calling thread's input item. + * + * @param[in] scan_op + * Binary scan operator + * + * @param[in] first_lane + * Index of first lane in segment + * + * @param[in] offset + * Up-offset to pull from + */ + _CCCL_DEVICE _CCCL_FORCEINLINE int + InclusiveScanStep(int input, ::cuda::std::plus<> /*scan_op*/, int first_lane, int offset) + { + int output; + int shfl_c = first_lane | SHFL_C; // Shuffle control (mask and first-lane) + + // Use predicate set from SHFL to guard against invalid peers + asm volatile( + "{" + " .reg .s32 r0;" + " .reg .pred p;" + " shfl.sync.up.b32 r0|p, %1, %2, %3, %5;" + " @p add.s32 r0, r0, %4;" + " mov.s32 %0, r0;" + "}" + : "=r"(output) + : "r"(input), "r"(offset), "r"(shfl_c), "r"(input), "r"(member_mask)); + + return output; + } + + /** + * @brief Inclusive prefix scan step (specialized for summation across uint32 types) + * + * @param[in] input + * Calling thread's input item + * + * @param[in] scan_op + * Binary scan operator + * + * @param[in] first_lane + * Index of first lane in segment + * + * @param[in] offset + * Up-offset to pull from + */ + _CCCL_DEVICE _CCCL_FORCEINLINE unsigned int + InclusiveScanStep(unsigned int input, ::cuda::std::plus<> /*scan_op*/, int first_lane, int offset) + { + unsigned int output; + int shfl_c = first_lane | SHFL_C; // Shuffle control (mask and first-lane) + + // Use predicate set from SHFL to guard against invalid peers + asm volatile( + "{" + " .reg .u32 r0;" + " .reg .pred p;" + " shfl.sync.up.b32 r0|p, %1, %2, %3, %5;" + " @p add.u32 r0, r0, %4;" + " mov.u32 %0, r0;" + "}" + : "=r"(output) + : "r"(input), "r"(offset), "r"(shfl_c), "r"(input), "r"(member_mask)); + + return output; + } + + /** + * @brief Inclusive prefix scan step (specialized for summation across fp32 types) + * + * @param[in] input + * Calling thread's input item + * + * @param[in] scan_op + * Binary scan operator + * + * @param[in] first_lane + * Index of first lane in segment + * + * @param[in] offset + * Up-offset to pull from + */ + _CCCL_DEVICE _CCCL_FORCEINLINE float + InclusiveScanStep(float input, ::cuda::std::plus<> /*scan_op*/, int first_lane, int offset) + { + float output; + int shfl_c = first_lane | SHFL_C; // Shuffle control (mask and first-lane) + + // Use predicate set from SHFL to guard against invalid peers + asm volatile( + "{" + " .reg .f32 r0;" + " .reg .pred p;" + " shfl.sync.up.b32 r0|p, %1, %2, %3, %5;" + " @p add.f32 r0, r0, %4;" + " mov.f32 %0, r0;" + "}" + : "=f"(output) + : "f"(input), "r"(offset), "r"(shfl_c), "f"(input), "r"(member_mask)); + + return output; + } + + /** + * @brief Inclusive prefix scan step (specialized for summation across unsigned long long types) + * + * @param[in] input + * Calling thread's input item + * + * @param[in] scan_op + * Binary scan operator + * + * @param[in] first_lane + * Index of first lane in segment + * + * @param[in] offset + * Up-offset to pull from + */ + _CCCL_DEVICE _CCCL_FORCEINLINE unsigned long long + InclusiveScanStep(unsigned long long input, ::cuda::std::plus<> /*scan_op*/, int first_lane, int offset) + { + unsigned long long output; + int shfl_c = first_lane | SHFL_C; // Shuffle control (mask and first-lane) + + // Use predicate set from SHFL to guard against invalid peers + asm volatile( + "{" + " .reg .u64 r0;" + " .reg .u32 lo;" + " .reg .u32 hi;" + " .reg .pred p;" + " mov.b64 {lo, hi}, %1;" + " shfl.sync.up.b32 lo|p, lo, %2, %3, %5;" + " shfl.sync.up.b32 hi|p, hi, %2, %3, %5;" + " mov.b64 r0, {lo, hi};" + " @p add.u64 r0, r0, %4;" + " mov.u64 %0, r0;" + "}" + : "=l"(output) + : "l"(input), "r"(offset), "r"(shfl_c), "l"(input), "r"(member_mask)); + + return output; + } + + /** + * @brief Inclusive prefix scan step (specialized for summation across long long types) + * + * @param[in] input + * Calling thread's input item. + * + * @param[in] scan_op + * Binary scan operator + * + * @param[in] first_lane + * Index of first lane in segment + * + * @param[in] offset + * Up-offset to pull from + */ + _CCCL_DEVICE _CCCL_FORCEINLINE long long + InclusiveScanStep(long long input, ::cuda::std::plus<> /*scan_op*/, int first_lane, int offset) + { + long long output; + int shfl_c = first_lane | SHFL_C; // Shuffle control (mask and first-lane) + + // Use predicate set from SHFL to guard against invalid peers + asm volatile( + "{" + " .reg .s64 r0;" + " .reg .u32 lo;" + " .reg .u32 hi;" + " .reg .pred p;" + " mov.b64 {lo, hi}, %1;" + " shfl.sync.up.b32 lo|p, lo, %2, %3, %5;" + " shfl.sync.up.b32 hi|p, hi, %2, %3, %5;" + " mov.b64 r0, {lo, hi};" + " @p add.s64 r0, r0, %4;" + " mov.s64 %0, r0;" + "}" + : "=l"(output) + : "l"(input), "r"(offset), "r"(shfl_c), "l"(input), "r"(member_mask)); + + return output; + } + + /** + * @brief Inclusive prefix scan step (specialized for summation across fp64 types) + * + * @param[in] input + * Calling thread's input item. + * + * @param[in] scan_op + * Binary scan operator + * + * @param[in] first_lane + * Index of first lane in segment + * + * @param[in] offset + * Up-offset to pull from + */ + _CCCL_DEVICE _CCCL_FORCEINLINE double + InclusiveScanStep(double input, ::cuda::std::plus<> /*scan_op*/, int first_lane, int offset) + { + double output; + int shfl_c = first_lane | SHFL_C; // Shuffle control (mask and first-lane) + + // Use predicate set from SHFL to guard against invalid peers + asm volatile( + "{" + " .reg .u32 lo;" + " .reg .u32 hi;" + " .reg .pred p;" + " .reg .f64 r0;" + " mov.b64 %0, %1;" + " mov.b64 {lo, hi}, %1;" + " shfl.sync.up.b32 lo|p, lo, %2, %3, %4;" + " shfl.sync.up.b32 hi|p, hi, %2, %3, %4;" + " mov.b64 r0, {lo, hi};" + " @p add.f64 %0, %0, r0;" + "}" + : "=d"(output) + : "d"(input), "r"(offset), "r"(shfl_c), "r"(member_mask)); + + return output; + } + + /* + /// Inclusive prefix scan (specialized for ReduceBySegmentOp<::cuda::std::plus<>> across KeyValuePair + /// types) + template + _CCCL_DEVICE _CCCL_FORCEINLINE KeyValuePair InclusiveScanStep( + KeyValuePair input, ///< [in] Calling thread's input item. + ReduceBySegmentOp<::cuda::std::plus<>> scan_op, ///< [in] Binary scan operator + int first_lane, ///< [in] Index of first lane in segment + int offset) ///< [in] Up-offset to pull from + { + KeyValuePair output; + output.value = InclusiveScanStep( + input.value, ::cuda::std::plus<>{}, first_lane, offset, Int2Type::IS_SMALL_UNSIGNED>()); + output.key = InclusiveScanStep( + input.key, ::cuda::std::plus<>{}, first_lane, offset, Int2Type::IS_SMALL_UNSIGNED>()); + + if (input.key > 0) + output.value = input.value; + + return output; + } + */ + + /** + * @brief Inclusive prefix scan step (generic) + * + * @param[in] input + * Calling thread's input item. + * + * @param[in] scan_op + * Binary scan operator + * + * @param[in] first_lane + * Index of first lane in segment + * + * @param[in] offset + * Up-offset to pull from + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE _T InclusiveScanStep(_T input, ScanOpT scan_op, int first_lane, int offset) + { + _T temp = ShuffleUp(input, offset, first_lane, member_mask); + + // Perform scan op if from a valid peer + _T output = scan_op(temp, input); + if (static_cast(lane_id) < first_lane + offset) + { + output = input; + } + + return output; + } + + /** + * @brief Inclusive prefix scan step (specialized for small integers size 32b or less) + * + * @param[in] input + * Calling thread's input item + * + * @param[in] scan_op + * Binary scan operator + * + * @param[in] first_lane + * Index of first lane in segment + * + * @param[in] offset + * Up-offset to pull from + * + * @param[in] is_small_unsigned + * Marker type indicating whether T is a small integer + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE _T + InclusiveScanStep(_T input, ScanOpT scan_op, int first_lane, int offset, Int2Type /*is_small_unsigned*/) + { + return InclusiveScanStep(input, scan_op, first_lane, offset); + } + + /** + * @brief Inclusive prefix scan step (specialized for types other than small integers size + * 32b or less) + * + * @param[in] input + * Calling thread's input item. + * + * @param[in] scan_op + * Binary scan operator + * + * @param[in] first_lane + * Index of first lane in segment + * + * @param[in] offset + * Up-offset to pull from + * + * @param[in] is_small_unsigned + * Marker type indicating whether T is a small integer + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE _T + InclusiveScanStep(_T input, ScanOpT scan_op, int first_lane, int offset, Int2Type /*is_small_unsigned*/) + { + return InclusiveScanStep(input, scan_op, first_lane, offset); + } + + /****************************************************************************** + * Interface + ******************************************************************************/ + + //--------------------------------------------------------------------- + // Broadcast + //--------------------------------------------------------------------- + + /** + * @brief Broadcast + * + * @param[in] input + * The value to broadcast + * + * @param[in] src_lane + * Which warp lane is to do the broadcasting + */ + _CCCL_DEVICE _CCCL_FORCEINLINE T Broadcast(T input, int src_lane) + { + return ShuffleIndex(input, src_lane, member_mask); + } + + //--------------------------------------------------------------------- + // Inclusive operations + //--------------------------------------------------------------------- + + /** + * @brief Inclusive scan + * + * @param[in] input + * Calling thread's input item + * + * @param[out] inclusive_output + * Calling thread's output item. May be aliased with @p input + * + * @param[in] scan_op + * Binary scan operator + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE void InclusiveScan(_T input, _T& inclusive_output, ScanOpT scan_op) + { + inclusive_output = input; + + // Iterate scan steps + int segment_first_lane = 0; + + // Iterate scan steps +#pragma unroll + for (int STEP = 0; STEP < STEPS; STEP++) + { + inclusive_output = InclusiveScanStep( + inclusive_output, scan_op, segment_first_lane, (1 << STEP), Int2Type::IS_SMALL_UNSIGNED>()); + } + } + + /** + * @brief Inclusive scan, specialized for reduce-value-by-key + * + * @param[in] input + * Calling thread's input item + * + * @param[out] inclusive_output + * Calling thread's output item. May be aliased with @p input + * + * @param[in] scan_op + * Binary scan operator + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE void InclusiveScan( + KeyValuePair input, KeyValuePair& inclusive_output, ReduceByKeyOp scan_op) + { + inclusive_output = input; + + KeyT pred_key = ShuffleUp(inclusive_output.key, 1, 0, member_mask); + + unsigned int ballot = __ballot_sync(member_mask, (pred_key != inclusive_output.key)); + + // Mask away all lanes greater than ours + ballot = ballot & ::cuda::ptx::get_sreg_lanemask_le(); + + // Find index of first set bit + int segment_first_lane = CUB_MAX(0, 31 - __clz(ballot)); + + // Iterate scan steps +#pragma unroll + for (int STEP = 0; STEP < STEPS; STEP++) + { + inclusive_output.value = InclusiveScanStep( + inclusive_output.value, + scan_op.op, + segment_first_lane, + (1 << STEP), + Int2Type::IS_SMALL_UNSIGNED>()); + } + } + + /** + * @brief Inclusive scan with aggregate + * + * @param[in] input + * Calling thread's input item + * + * @param[out] inclusive_output + * Calling thread's output item. May be aliased with @p input + * + * @param[in] scan_op + * Binary scan operator + * + * @param[out] warp_aggregate + * Warp-wide aggregate reduction of input items + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE void InclusiveScan(T input, T& inclusive_output, ScanOpT scan_op, T& warp_aggregate) + { + InclusiveScan(input, inclusive_output, scan_op); + + // Grab aggregate from last warp lane + warp_aggregate = ShuffleIndex(inclusive_output, LOGICAL_WARP_THREADS - 1, member_mask); + } + + //--------------------------------------------------------------------- + // Get exclusive from inclusive + //--------------------------------------------------------------------- + + /** + * @brief Update inclusive and exclusive using input and inclusive + * + * @param[in] input + * + * @param[out] inclusive + * + * @param[out] exclusive + * + * @param[in] scan_op + * + * @param[in] is_integer + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE void + Update(T /*input*/, T& inclusive, T& exclusive, ScanOpT /*scan_op*/, IsIntegerT /*is_integer*/) + { + // initial value unknown + exclusive = ShuffleUp(inclusive, 1, 0, member_mask); + } + + /** + * @brief Update inclusive and exclusive using input and inclusive (specialized for summation of + * integer types) + */ + _CCCL_DEVICE _CCCL_FORCEINLINE void + Update(T input, T& inclusive, T& exclusive, ::cuda::std::plus<> /*scan_op*/, Int2Type /*is_integer*/) + { + // initial value presumed 0 + exclusive = inclusive - input; + } + + /** + * @brief Update inclusive and exclusive using initial value using input, inclusive, and initial + * value + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE void + Update(T /*input*/, T& inclusive, T& exclusive, ScanOpT scan_op, T initial_value, IsIntegerT /*is_integer*/) + { + inclusive = scan_op(initial_value, inclusive); + exclusive = ShuffleUp(inclusive, 1, 0, member_mask); + + if (lane_id == 0) + { + exclusive = initial_value; + } + } + + /** + * @brief Update inclusive and exclusive using initial value using input and inclusive + * (specialized for summation of integer types) + */ + _CCCL_DEVICE _CCCL_FORCEINLINE void Update( + T input, T& inclusive, T& exclusive, ::cuda::std::plus<> scan_op, T initial_value, Int2Type /*is_integer*/) + { + inclusive = scan_op(initial_value, inclusive); + exclusive = inclusive - input; + } + + /** + * @brief Update inclusive, exclusive, and warp aggregate using input and inclusive + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE void + Update(T input, T& inclusive, T& exclusive, T& warp_aggregate, ScanOpT scan_op, IsIntegerT is_integer) + { + warp_aggregate = ShuffleIndex(inclusive, LOGICAL_WARP_THREADS - 1, member_mask); + Update(input, inclusive, exclusive, scan_op, is_integer); + } + + /** + * @brief Update inclusive, exclusive, and warp aggregate using input, inclusive, and initial + * value + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE void Update( + T input, T& inclusive, T& exclusive, T& warp_aggregate, ScanOpT scan_op, T initial_value, IsIntegerT is_integer) + { + warp_aggregate = ShuffleIndex(inclusive, LOGICAL_WARP_THREADS - 1, member_mask); + Update(input, inclusive, exclusive, scan_op, initial_value, is_integer); + } +}; +} // namespace detail + +template +using WarpScanShfl CCCL_DEPRECATED_BECAUSE( + "This class is considered an implementation detail and the public interface will be " + "removed.") = detail::WarpScanShfl; + +CUB_NAMESPACE_END diff --git a/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/warp/specializations/warp_scan_smem.cuh b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/warp/specializations/warp_scan_smem.cuh new file mode 100644 index 0000000000000000000000000000000000000000..090f0f96cb5742e47bc420b34379d79fbff56e11 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/warp/specializations/warp_scan_smem.cuh @@ -0,0 +1,443 @@ +/****************************************************************************** + * Copyright (c) 2011, Duane Merrill. All rights reserved. + * Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the NVIDIA CORPORATION nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + ******************************************************************************/ + +/** + * @file + * cub::WarpScanSmem provides smem-based variants of parallel prefix scan of items partitioned + * across a CUDA thread warp. + */ + +#pragma once + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header + +#include +#include +#include +#include + +#include + +CUB_NAMESPACE_BEGIN +namespace detail +{ +/** + * @brief WarpScanSmem provides smem-based variants of parallel prefix scan of items partitioned + * across a CUDA thread warp. + * + * @tparam T + * Data type being scanned + * + * @tparam LOGICAL_WARP_THREADS + * Number of threads per logical warp + * + * @tparam LEGACY_PTX_ARCH + * The PTX compute capability for which to to specialize this collective + */ +template +struct WarpScanSmem +{ + /****************************************************************************** + * Constants and type definitions + ******************************************************************************/ + + enum + { + /// Whether the logical warp size and the PTX warp size coincide + IS_ARCH_WARP = (LOGICAL_WARP_THREADS == CUB_WARP_THREADS(0)), + + /// The number of warp scan steps + STEPS = Log2::VALUE, + + /// The number of threads in half a warp + HALF_WARP_THREADS = 1 << (STEPS - 1), + + /// The number of shared memory elements per warp + WARP_SMEM_ELEMENTS = LOGICAL_WARP_THREADS + HALF_WARP_THREADS, + }; + + /// Storage cell type (workaround for SM1x compiler bugs with custom-ops like Max() on signed chars) + using CellT = T; + + /// Shared memory storage layout type (1.5 warps-worth of elements for each warp) + using _TempStorage = CellT[WARP_SMEM_ELEMENTS]; + + // Alias wrapper allowing storage to be unioned + struct TempStorage : Uninitialized<_TempStorage> + {}; + + /****************************************************************************** + * Thread fields + ******************************************************************************/ + + _TempStorage& temp_storage; + unsigned int lane_id; + unsigned int member_mask; + + /****************************************************************************** + * Construction + ******************************************************************************/ + + /// Constructor + explicit _CCCL_DEVICE _CCCL_FORCEINLINE WarpScanSmem(TempStorage& temp_storage) + : temp_storage(temp_storage.Alias()) + , + + lane_id(IS_ARCH_WARP ? ::cuda::ptx::get_sreg_laneid() : ::cuda::ptx::get_sreg_laneid() % LOGICAL_WARP_THREADS) + , + + member_mask(WarpMask(::cuda::ptx::get_sreg_laneid() / LOGICAL_WARP_THREADS)) + {} + + /****************************************************************************** + * Utility methods + ******************************************************************************/ + + /// Basic inclusive scan iteration (template unrolled, inductive-case specialization) + template + _CCCL_DEVICE _CCCL_FORCEINLINE void ScanStep(T& partial, ScanOp scan_op, Int2Type /*step*/) + { + constexpr int OFFSET = 1 << STEP; + + // Share partial into buffer + ThreadStore(&temp_storage[HALF_WARP_THREADS + lane_id], (CellT) partial); + + __syncwarp(member_mask); + + // Update partial if addend is in range + if (HAS_IDENTITY || (lane_id >= OFFSET)) + { + T addend = (T) ThreadLoad(&temp_storage[HALF_WARP_THREADS + lane_id - OFFSET]); + partial = scan_op(addend, partial); + } + __syncwarp(member_mask); + + ScanStep(partial, scan_op, Int2Type()); + } + + /// Basic inclusive scan iteration(template unrolled, base-case specialization) + template + _CCCL_DEVICE _CCCL_FORCEINLINE void ScanStep(T& /*partial*/, ScanOp /*scan_op*/, Int2Type /*step*/) + {} + + /** + * @brief Inclusive prefix scan (specialized for summation across primitive types) + * + * @param[in] input + * Calling thread's input item + * + * @param[out] output + * Calling thread's output item. May be aliased with @p input + * + * @param[in] scan_op + * Binary scan operator + * + * @param[in] + * Marker type indicating whether T is primitive type + */ + _CCCL_DEVICE _CCCL_FORCEINLINE void + InclusiveScan(T input, T& output, ::cuda::std::plus<> scan_op, Int2Type /*is_primitive*/) + { + T identity = 0; + ThreadStore(&temp_storage[lane_id], (CellT) identity); + + __syncwarp(member_mask); + + // Iterate scan steps + output = input; + ScanStep(output, scan_op, Int2Type<0>()); + } + + /** + * @brief Inclusive prefix scan + * + * @param[in] input + * Calling thread's input item + * + * @param[out] output + * Calling thread's output item. May be aliased with @p input + * + * @param[in] scan_op + * Binary scan operator + * + * @param[in] is_primitive + * Marker type indicating whether T is primitive type + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE void + InclusiveScan(T input, T& output, ScanOp scan_op, Int2Type /*is_primitive*/) + { + // Iterate scan steps + output = input; + ScanStep(output, scan_op, Int2Type<0>()); + } + + /****************************************************************************** + * Interface + ******************************************************************************/ + + //--------------------------------------------------------------------- + // Broadcast + //--------------------------------------------------------------------- + + /** + * @brief Broadcast + * + * @param[in] input + * The value to broadcast + * + * @param[in] src_lane + * Which warp lane is to do the broadcasting + */ + _CCCL_DEVICE _CCCL_FORCEINLINE T Broadcast(T input, unsigned int src_lane) + { + if (lane_id == src_lane) + { + ThreadStore(temp_storage, (CellT) input); + } + + __syncwarp(member_mask); + + return (T) ThreadLoad(temp_storage); + } + + //--------------------------------------------------------------------- + // Inclusive operations + //--------------------------------------------------------------------- + + /** + * @brief Inclusive scan + * + * @param[in] input + * Calling thread's input item. + * + * @param[out] inclusive_output + * Calling thread's output item. May be aliased with @p input + * + * @param[in] scan_op + * Binary scan operator + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE void InclusiveScan(T input, T& inclusive_output, ScanOp scan_op) + { + InclusiveScan(input, inclusive_output, scan_op, Int2Type::PRIMITIVE>()); + } + + /** + * @brief Inclusive scan with aggregate + * + * @param[in] input + * Calling thread's input item + * + * @param[out] inclusive_output + * Calling thread's output item. May be aliased with @p input + * + * @param[in] scan_op + * Binary scan operator + * + * @param[out] warp_aggregate + * Warp-wide aggregate reduction of input items. + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE void InclusiveScan(T input, T& inclusive_output, ScanOp scan_op, T& warp_aggregate) + { + InclusiveScan(input, inclusive_output, scan_op); + + // Retrieve aggregate + ThreadStore(&temp_storage[HALF_WARP_THREADS + lane_id], (CellT) inclusive_output); + + __syncwarp(member_mask); + + warp_aggregate = (T) ThreadLoad(&temp_storage[WARP_SMEM_ELEMENTS - 1]); + + __syncwarp(member_mask); + } + + //--------------------------------------------------------------------- + // Get exclusive from inclusive + //--------------------------------------------------------------------- + + /** + * @brief Update inclusive and exclusive using input and inclusive + * + * @param[in] input + * + * @param[in, out] inclusive + * + * @param[out] exclusive + * + * @param[in] scan_op + * + * @param[in] is_integer + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE void + Update(T /*input*/, T& inclusive, T& exclusive, ScanOpT /*scan_op*/, IsIntegerT /*is_integer*/) + { + // initial value unknown + ThreadStore(&temp_storage[HALF_WARP_THREADS + lane_id], (CellT) inclusive); + + __syncwarp(member_mask); + + exclusive = (T) ThreadLoad(&temp_storage[HALF_WARP_THREADS + lane_id - 1]); + } + + /** + * @brief Update inclusive and exclusive using input and inclusive (specialized for summation of + * integer types) + */ + _CCCL_DEVICE _CCCL_FORCEINLINE void + Update(T input, T& inclusive, T& exclusive, ::cuda::std::plus<> /*scan_op*/, Int2Type /*is_integer*/) + { + // initial value presumed 0 + exclusive = inclusive - input; + } + + /** + * @brief Update inclusive and exclusive using initial value using input, inclusive, and initial + * value + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE void + Update(T /*input*/, T& inclusive, T& exclusive, ScanOpT scan_op, T initial_value, IsIntegerT /*is_integer*/) + { + inclusive = scan_op(initial_value, inclusive); + ThreadStore(&temp_storage[HALF_WARP_THREADS + lane_id], (CellT) inclusive); + + __syncwarp(member_mask); + + exclusive = (T) ThreadLoad(&temp_storage[HALF_WARP_THREADS + lane_id - 1]); + if (lane_id == 0) + { + exclusive = initial_value; + } + } + + /** + * @brief Update inclusive and exclusive using initial value using input and inclusive + * (specialized for summation of integer types) + */ + _CCCL_DEVICE _CCCL_FORCEINLINE void Update( + T input, T& inclusive, T& exclusive, ::cuda::std::plus<> scan_op, T initial_value, Int2Type /*is_integer*/) + { + inclusive = scan_op(initial_value, inclusive); + exclusive = inclusive - input; + } + + /** + * @brief Update inclusive, exclusive, and warp aggregate using input and inclusive + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE void + Update(T /*input*/, T& inclusive, T& exclusive, T& warp_aggregate, ScanOpT /*scan_op*/, IsIntegerT /*is_integer*/) + { + // Initial value presumed to be unknown or identity (either way our padding is correct) + ThreadStore(&temp_storage[HALF_WARP_THREADS + lane_id], (CellT) inclusive); + + __syncwarp(member_mask); + + exclusive = (T) ThreadLoad(&temp_storage[HALF_WARP_THREADS + lane_id - 1]); + warp_aggregate = (T) ThreadLoad(&temp_storage[WARP_SMEM_ELEMENTS - 1]); + } + + /** + * @brief Update inclusive, exclusive, and warp aggregate using input and inclusive (specialized + * for summation of integer types) + */ + _CCCL_DEVICE _CCCL_FORCEINLINE void Update( + T input, + T& inclusive, + T& exclusive, + T& warp_aggregate, + ::cuda::std::plus<> /*scan_o*/, + Int2Type /*is_integer*/) + { + // Initial value presumed to be unknown or identity (either way our padding is correct) + ThreadStore(&temp_storage[HALF_WARP_THREADS + lane_id], (CellT) inclusive); + + __syncwarp(member_mask); + + warp_aggregate = (T) ThreadLoad(&temp_storage[WARP_SMEM_ELEMENTS - 1]); + exclusive = inclusive - input; + } + + /** + * @brief Update inclusive, exclusive, and warp aggregate using input, inclusive, and initial + * value + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE void Update( + T /*input*/, + T& inclusive, + T& exclusive, + T& warp_aggregate, + ScanOpT scan_op, + T initial_value, + IsIntegerT /*is_integer*/) + { + // Broadcast warp aggregate + ThreadStore(&temp_storage[HALF_WARP_THREADS + lane_id], (CellT) inclusive); + + __syncwarp(member_mask); + + warp_aggregate = (T) ThreadLoad(&temp_storage[WARP_SMEM_ELEMENTS - 1]); + + __syncwarp(member_mask); + + // Update inclusive with initial value + inclusive = scan_op(initial_value, inclusive); + + // Get exclusive from exclusive + ThreadStore(&temp_storage[HALF_WARP_THREADS + lane_id - 1], (CellT) inclusive); + + __syncwarp(member_mask); + + exclusive = (T) ThreadLoad(&temp_storage[HALF_WARP_THREADS + lane_id - 2]); + + if (lane_id == 0) + { + exclusive = initial_value; + } + } +}; +} // namespace detail + +template +using WarpScanSmem CCCL_DEPRECATED_BECAUSE( + "This class is considered an implementation detail and the public interface will be " + "removed.") = detail::WarpScanSmem; + +CUB_NAMESPACE_END diff --git a/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/warp/warp_merge_sort.cuh b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/warp/warp_merge_sort.cuh new file mode 100644 index 0000000000000000000000000000000000000000..de3d311ae59b148ea37e71acb5f6b646e85d3fb5 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/warp/warp_merge_sort.cuh @@ -0,0 +1,176 @@ +/****************************************************************************** + * Copyright (c) 2011-2021, NVIDIA CORPORATION. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the NVIDIA CORPORATION nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + ******************************************************************************/ + +#pragma once + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header + +#include +#include +#include + +#include +#include + +CUB_NAMESPACE_BEGIN + +//! @rst +//! The WarpMergeSort class provides methods for sorting items partitioned across a CUDA warp +//! using a merge sorting method. +//! +//! Overview +//! ++++++++++++++++ +//! +//! WarpMergeSort arranges items into ascending order using a comparison +//! functor with less-than semantics. Merge sort can handle arbitrary types +//! and comparison functors. +//! +//! A Simple Example +//! ++++++++++++++++ +//! +//! The code snippet below illustrates a sort of 64 integer keys that are +//! partitioned across 16 threads where each thread owns 4 consecutive items. +//! +//! .. code-block:: c++ +//! +//! #include // or equivalently +//! +//! struct CustomLess +//! { +//! template +//! __device__ bool operator()(const DataType &lhs, const DataType &rhs) +//! { +//! return lhs < rhs; +//! } +//! }; +//! +//! __global__ void ExampleKernel(...) +//! { +//! constexpr int warp_threads = 16; +//! constexpr int block_threads = 256; +//! constexpr int items_per_thread = 4; +//! constexpr int warps_per_block = block_threads / warp_threads; +//! const int warp_id = static_cast(threadIdx.x) / warp_threads; +//! +//! // Specialize WarpMergeSort for a virtual warp of 16 threads +//! // owning 4 integer items each +//! using WarpMergeSortT = +//! cub::WarpMergeSort; +//! +//! // Allocate shared memory for WarpMergeSort +//! __shared__ typename WarpMergeSortT::TempStorage temp_storage[warps_per_block]; +//! +//! // Obtain a segment of consecutive items that are blocked across threads +//! int thread_keys[items_per_thread]; +//! // ... +//! +//! WarpMergeSortT(temp_storage[warp_id]).Sort(thread_keys, CustomLess()); +//! // ... +//! } +//! +//! Suppose the set of input ``thread_keys`` across a warp of threads is +//! ``{ [0,64,1,63], [2,62,3,61], [4,60,5,59], ..., [31,34,32,33] }``. +//! The corresponding output ``thread_keys`` in those threads will be +//! ``{ [0,1,2,3], [4,5,6,7], [8,9,10,11], ..., [31,32,33,34] }``. +//! @endrst +//! +//! @tparam KeyT +//! Key type +//! +//! @tparam ITEMS_PER_THREAD +//! The number of items per thread +//! +//! @tparam LOGICAL_WARP_THREADS +//! [optional] The number of threads per "logical" warp (may be less +//! than the number of hardware warp threads). Default is the warp size of the +//! targeted CUDA compute-capability (e.g., 32 threads for SM86). Must be a +//! power of two. +//! +//! @tparam ValueT +//! [optional] Value type (default: cub::NullType, which indicates a +//! keys-only sort) +//! +//! @tparam LEGACY_PTX_ARCH +//! Unused. +//! +template +class WarpMergeSort + : public BlockMergeSortStrategy> +{ +private: + static constexpr bool IS_ARCH_WARP = LOGICAL_WARP_THREADS == CUB_WARP_THREADS(0); + static constexpr bool KEYS_ONLY = ::cuda::std::is_same::value; + static constexpr int TILE_SIZE = ITEMS_PER_THREAD * LOGICAL_WARP_THREADS; + + using BlockMergeSortStrategyT = + BlockMergeSortStrategy; + + const unsigned int warp_id; + const unsigned int member_mask; + +public: + WarpMergeSort() = delete; + + _CCCL_DEVICE _CCCL_FORCEINLINE WarpMergeSort(typename BlockMergeSortStrategyT::TempStorage& temp_storage) + : BlockMergeSortStrategyT( + temp_storage, + IS_ARCH_WARP ? ::cuda::ptx::get_sreg_laneid() : (::cuda::ptx::get_sreg_laneid() % LOGICAL_WARP_THREADS)) + , warp_id(IS_ARCH_WARP ? 0 : (::cuda::ptx::get_sreg_laneid() / LOGICAL_WARP_THREADS)) + , member_mask(WarpMask(warp_id)) + {} + + _CCCL_DEVICE _CCCL_FORCEINLINE unsigned int get_member_mask() const + { + return member_mask; + } + +private: + _CCCL_DEVICE _CCCL_FORCEINLINE void SyncImplementation() const + { + __syncwarp(member_mask); + } + + friend BlockMergeSortStrategyT; +}; + +CUB_NAMESPACE_END diff --git a/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/warp/warp_reduce.cuh b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/warp/warp_reduce.cuh new file mode 100644 index 0000000000000000000000000000000000000000..4b2c61e343aed346f6e3e8146380776e72697332 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/warp/warp_reduce.cuh @@ -0,0 +1,745 @@ +/****************************************************************************** + * Copyright (c) 2011, Duane Merrill. All rights reserved. + * Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the NVIDIA CORPORATION nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + ******************************************************************************/ + +//! @file +//! @rst +//! The ``cub::WarpReduce`` class provides :ref:`collective ` methods for +//! computing a parallel reduction of items partitioned across a CUDA thread warp. +//! @endrst + +#pragma once + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header + +#include +#include +#include +#include + +#include + +CUB_NAMESPACE_BEGIN + +//! @rst +//! The ``WarpReduce`` class provides :ref:`collective ` methods for +//! computing a parallel reduction of items partitioned across a CUDA thread warp. +//! +//! .. image:: ../../img/warp_reduce_logo.png +//! :align: center +//! +//! Overview +//! ++++++++++++++++++++++++++ +//! +//! - A `reduction `__ (or *fold*) +//! uses a binary combining operator to compute a single aggregate from a list of input elements. +//! - Supports "logical" warps smaller than the physical warp size (e.g., logical warps of 8 +//! threads) +//! - The number of entrant threads must be an multiple of ``LOGICAL_WARP_THREADS`` +//! +//! Performance Considerations +//! ++++++++++++++++++++++++++ +//! +//! - Uses special instructions when applicable (e.g., warp ``SHFL`` instructions) +//! - Uses synchronization-free communication between warp lanes when applicable +//! - Incurs zero bank conflicts for most types +//! - Computation is slightly more efficient (i.e., having lower instruction overhead) for: +//! +//! - Summation (**vs.** generic reduction) +//! - The architecture's warp size is a whole multiple of ``LOGICAL_WARP_THREADS`` +//! +//! Simple Examples +//! ++++++++++++++++++++++++++ +//! +//! @warpcollective{WarpReduce} +//! +//! The code snippet below illustrates four concurrent warp sum reductions within a block of +//! 128 threads (one per each of the 32-thread warps). +//! +//! .. code-block:: c++ +//! +//! #include +//! +//! __global__ void ExampleKernel(...) +//! { +//! // Specialize WarpReduce for type int +//! using WarpReduce = cub::WarpReduce; +//! +//! // Allocate WarpReduce shared memory for 4 warps +//! __shared__ typename WarpReduce::TempStorage temp_storage[4]; +//! +//! // Obtain one input item per thread +//! int thread_data = ... +//! +//! // Return the warp-wide sums to each lane0 (threads 0, 32, 64, and 96) +//! int warp_id = threadIdx.x / 32; +//! int aggregate = WarpReduce(temp_storage[warp_id]).Sum(thread_data); +//! +//! Suppose the set of input ``thread_data`` across the block of threads is +//! ``{0, 1, 2, 3, ..., 127}``. The corresponding output ``aggregate`` in threads 0, 32, 64, and 96 +//! will be ``496``, ``1520``, ``2544``, and ``3568``, respectively +//! (and is undefined in other threads). +//! +//! The code snippet below illustrates a single warp sum reduction within a block of +//! 128 threads. +//! +//! .. code-block:: c++ +//! +//! #include +//! +//! __global__ void ExampleKernel(...) +//! { +//! // Specialize WarpReduce for type int +//! using WarpReduce = cub::WarpReduce; +//! +//! // Allocate WarpReduce shared memory for one warp +//! __shared__ typename WarpReduce::TempStorage temp_storage; +//! ... +//! +//! // Only the first warp performs a reduction +//! if (threadIdx.x < 32) +//! { +//! // Obtain one input item per thread +//! int thread_data = ... +//! +//! // Return the warp-wide sum to lane0 +//! int aggregate = WarpReduce(temp_storage).Sum(thread_data); +//! +//! Suppose the set of input ``thread_data`` across the warp of threads is +//! ``{0, 1, 2, 3, ..., 31}``. The corresponding output ``aggregate`` in thread0 will be ``496`` +//! (and is undefined in other threads). +//! @endrst +//! +//! @tparam T +//! The reduction input/output element type +//! +//! @tparam LOGICAL_WARP_THREADS +//! [optional] The number of threads per "logical" warp (may be less than the number of +//! hardware warp threads). Default is the warp size of the targeted CUDA compute-capability +//! (e.g., 32 threads for SM20). +//! +//! @tparam LEGACY_PTX_ARCH +//! [optional] Unused. +template +class WarpReduce +{ +private: + /****************************************************************************** + * Constants and type definitions + ******************************************************************************/ + + enum + { + /// Whether the logical warp size and the PTX warp size coincide + IS_ARCH_WARP = (LOGICAL_WARP_THREADS == CUB_WARP_THREADS(0)), + + /// Whether the logical warp size is a power-of-two + IS_POW_OF_TWO = PowerOfTwo::VALUE, + }; + +public: +#ifndef _CCCL_DOXYGEN_INVOKED // Do not document + + /// Internal specialization. + /// Use SHFL-based reduction if LOGICAL_WARP_THREADS is a power-of-two + using InternalWarpReduce = ::cuda::std:: + _If, detail::WarpReduceSmem>; + +#endif // _CCCL_DOXYGEN_INVOKED + +private: + /// Shared memory storage layout type for WarpReduce + using _TempStorage = typename InternalWarpReduce::TempStorage; + + /****************************************************************************** + * Thread fields + ******************************************************************************/ + + /// Shared storage reference + _TempStorage& temp_storage; + + /****************************************************************************** + * Utility methods + ******************************************************************************/ + +public: + /// \smemstorage{WarpReduce} + struct TempStorage : Uninitialized<_TempStorage> + {}; + + //! @name Collective constructors + //! @{ + + //! @rst + //! Collective constructor using the specified memory allocation as temporary storage. + //! Logical warp and lane identifiers are constructed from ``threadIdx.x``. + //! @endrst + //! + //! @param[in] temp_storage Reference to memory allocation having layout type TempStorage + _CCCL_DEVICE _CCCL_FORCEINLINE WarpReduce(TempStorage& temp_storage) + : temp_storage(temp_storage.Alias()) + {} + + //! @} end member group + //! @name Summation reductions + //! @{ + + //! @rst + //! Computes a warp-wide sum in the calling warp. + //! The output is valid in warp *lane*\ :sub:`0`. + //! + //! @smemwarpreuse + //! + //! Snippet + //! +++++++ + //! + //! The code snippet below illustrates four concurrent warp sum reductions within a block of + //! 128 threads (one per each of the 32-thread warps). + //! + //! .. code-block:: c++ + //! + //! #include + //! + //! __global__ void ExampleKernel(...) + //! { + //! // Specialize WarpReduce for type int + //! using WarpReduce = cub::WarpReduce; + //! + //! // Allocate WarpReduce shared memory for 4 warps + //! __shared__ typename WarpReduce::TempStorage temp_storage[4]; + //! + //! // Obtain one input item per thread + //! int thread_data = ... + //! + //! // Return the warp-wide sums to each lane0 + //! int warp_id = threadIdx.x / 32; + //! int aggregate = WarpReduce(temp_storage[warp_id]).Sum(thread_data); + //! + //! Suppose the set of input ``thread_data`` across the block of threads is + //! ``{0, 1, 2, 3, ..., 127}``. + //! The corresponding output ``aggregate`` in threads 0, 32, 64, and 96 will ``496``, ``1520``, + //! ``2544``, and ``3568``, respectively (and is undefined in other threads). + //! @endrst + //! + //! @param[in] input Calling thread's input + _CCCL_DEVICE _CCCL_FORCEINLINE T Sum(T input) + { + return InternalWarpReduce(temp_storage).template Reduce(input, LOGICAL_WARP_THREADS, ::cuda::std::plus<>{}); + } + + //! @rst + //! Computes a partially-full warp-wide sum in the calling warp. + //! The output is valid in warp *lane*\ :sub:`0`. + //! + //! All threads across the calling warp must agree on the same value for ``valid_items``. + //! Otherwise the result is undefined. + //! + //! @smemwarpreuse + //! + //! Snippet + //! +++++++ + //! + //! The code snippet below illustrates a sum reduction within a single, partially-full + //! block of 32 threads (one warp). + //! + //! .. code-block:: c++ + //! + //! #include + //! + //! __global__ void ExampleKernel(int *d_data, int valid_items) + //! { + //! // Specialize WarpReduce for type int + //! using WarpReduce = cub::WarpReduce; + //! + //! // Allocate WarpReduce shared memory for one warp + //! __shared__ typename WarpReduce::TempStorage temp_storage; + //! + //! // Obtain one input item per thread if in range + //! int thread_data; + //! if (threadIdx.x < valid_items) + //! thread_data = d_data[threadIdx.x]; + //! + //! // Return the warp-wide sums to each lane0 + //! int aggregate = WarpReduce(temp_storage).Sum( + //! thread_data, valid_items); + //! + //! Suppose the input ``d_data`` is ``{0, 1, 2, 3, 4, ...`` and ``valid_items`` is ``4``. + //! The corresponding output ``aggregate`` in *lane*\ :sub:`0` is ``6`` + //! (and is undefined in other threads). + //! @endrst + //! + //! @param[in] input + //! Calling thread's input + //! + //! @param[in] valid_items + //! Total number of valid items in the calling thread's logical warp + //! (may be less than ``LOGICAL_WARP_THREADS``) + _CCCL_DEVICE _CCCL_FORCEINLINE T Sum(T input, int valid_items) + { + // Determine if we don't need bounds checking + return InternalWarpReduce(temp_storage).template Reduce(input, valid_items, ::cuda::std::plus<>{}); + } + + //! @rst + //! Computes a segmented sum in the calling warp where segments are defined by head-flags. + //! The sum of each segment is returned to the first lane in that segment + //! (which always includes *lane*\ :sub:`0`). + //! + //! @smemwarpreuse + //! + //! Snippet + //! +++++++ + //! + //! The code snippet below illustrates a head-segmented warp sum + //! reduction within a block of 32 threads (one warp). + //! + //! .. code-block:: c++ + //! + //! #include + //! + //! __global__ void ExampleKernel(...) + //! { + //! // Specialize WarpReduce for type int + //! using WarpReduce = cub::WarpReduce; + //! + //! // Allocate WarpReduce shared memory for one warp + //! __shared__ typename WarpReduce::TempStorage temp_storage; + //! + //! // Obtain one input item and flag per thread + //! int thread_data = ... + //! int head_flag = ... + //! + //! // Return the warp-wide sums to each lane0 + //! int aggregate = WarpReduce(temp_storage).HeadSegmentedSum( + //! thread_data, head_flag); + //! + //! Suppose the set of input ``thread_data`` and ``head_flag`` across the block of threads + //! is ``{0, 1, 2, 3, ..., 31`` and is ``{1, 0, 0, 0, 1, 0, 0, 0, ..., 1, 0, 0, 0``, + //! respectively. The corresponding output ``aggregate`` in threads 0, 4, 8, etc. will be + //! ``6``, ``22``, ``38``, etc. (and is undefined in other threads). + //! @endrst + //! + //! @tparam ReductionOp + //! **[inferred]** Binary reduction operator type having member + //! `T operator()(const T &a, const T &b)` + //! + //! @param[in] input + //! Calling thread's input + //! + //! @param[in] head_flag + //! Head flag denoting whether or not `input` is the start of a new segment + template + _CCCL_DEVICE _CCCL_FORCEINLINE T HeadSegmentedSum(T input, FlagT head_flag) + { + return HeadSegmentedReduce(input, head_flag, ::cuda::std::plus<>{}); + } + + //! @rst + //! Computes a segmented sum in the calling warp where segments are defined by tail-flags. + //! The sum of each segment is returned to the first lane in that segment + //! (which always includes *lane*\ :sub:`0`). + //! + //! @smemwarpreuse + //! + //! Snippet + //! +++++++ + //! + //! The code snippet below illustrates a tail-segmented warp sum reduction within a block of 32 + //! threads (one warp). + //! + //! .. code-block:: c++ + //! + //! #include + //! + //! __global__ void ExampleKernel(...) + //! { + //! // Specialize WarpReduce for type int + //! using WarpReduce = cub::WarpReduce; + //! + //! // Allocate WarpReduce shared memory for one warp + //! __shared__ typename WarpReduce::TempStorage temp_storage; + //! + //! // Obtain one input item and flag per thread + //! int thread_data = ... + //! int tail_flag = ... + //! + //! // Return the warp-wide sums to each lane0 + //! int aggregate = WarpReduce(temp_storage).TailSegmentedSum( + //! thread_data, tail_flag); + //! + //! Suppose the set of input ``thread_data`` and ``tail_flag`` across the block of threads + //! is ``{0, 1, 2, 3, ..., 31}`` and is ``{0, 0, 0, 1, 0, 0, 0, 1, ..., 0, 0, 0, 1}``, + //! respectively. The corresponding output ``aggregate`` in threads 0, 4, 8, etc. will be + //! ``6``, ``22``, ``38``, etc. (and is undefined in other threads). + //! @endrst + //! + //! @tparam ReductionOp + //! **[inferred]** Binary reduction operator type having member + //! `T operator()(const T &a, const T &b)` + //! + //! @param[in] input + //! Calling thread's input + //! + //! @param[in] tail_flag + //! Head flag denoting whether or not `input` is the start of a new segment + template + _CCCL_DEVICE _CCCL_FORCEINLINE T TailSegmentedSum(T input, FlagT tail_flag) + { + return TailSegmentedReduce(input, tail_flag, ::cuda::std::plus<>{}); + } + + //! @} end member group + //! @name Generic reductions + //! @{ + + //! @rst + //! Computes a warp-wide reduction in the calling warp using the specified binary reduction + //! functor. The output is valid in warp *lane*\ :sub:`0`. + //! + //! Supports non-commutative reduction operators + //! + //! @smemwarpreuse + //! + //! Snippet + //! +++++++ + //! + //! The code snippet below illustrates four concurrent warp max reductions within a block of + //! 128 threads (one per each of the 32-thread warps). + //! + //! .. code-block:: c++ + //! + //! #include + //! + //! __global__ void ExampleKernel(...) + //! { + //! // Specialize WarpReduce for type int + //! using WarpReduce = cub::WarpReduce; + //! + //! // Allocate WarpReduce shared memory for 4 warps + //! __shared__ typename WarpReduce::TempStorage temp_storage[4]; + //! + //! // Obtain one input item per thread + //! int thread_data = ... + //! + //! // Return the warp-wide reductions to each lane0 + //! int warp_id = threadIdx.x / 32; + //! int aggregate = WarpReduce(temp_storage[warp_id]).Reduce( + //! thread_data, cuda::maximum<>{}); + //! + //! Suppose the set of input ``thread_data`` across the block of threads is + //! ``{0, 1, 2, 3, ..., 127}``. The corresponding output ``aggregate`` in threads 0, 32, 64, and + //! 96 will be ``31``, ``63``, ``95``, and ``127``, respectively + //! (and is undefined in other threads). + //! @endrst + //! + //! @tparam ReductionOp + //! **[inferred]** Binary reduction operator type having member + //! `T operator()(const T &a, const T &b)` + //! + //! @param[in] input + //! Calling thread's input + //! + //! @param[in] reduction_op + //! Binary reduction operator + template + _CCCL_DEVICE _CCCL_FORCEINLINE T Reduce(T input, ReductionOp reduction_op) + { + return InternalWarpReduce(temp_storage).template Reduce(input, LOGICAL_WARP_THREADS, reduction_op); + } + + //! @rst + //! Computes a partially-full warp-wide reduction in the calling warp using the specified binary + //! reduction functor. The output is valid in warp *lane*\ :sub:`0`. + //! + //! All threads across the calling warp must agree on the same value for ``valid_items``. + //! Otherwise the result is undefined. + //! + //! Supports non-commutative reduction operators + //! + //! @smemwarpreuse + //! + //! Snippet + //! +++++++ + //! + //! The code snippet below illustrates a max reduction within a single, partially-full + //! block of 32 threads (one warp). + //! + //! .. code-block:: c++ + //! + //! #include + //! + //! __global__ void ExampleKernel(int *d_data, int valid_items) + //! { + //! // Specialize WarpReduce for type int + //! using WarpReduce = cub::WarpReduce; + //! + //! // Allocate WarpReduce shared memory for one warp + //! __shared__ typename WarpReduce::TempStorage temp_storage; + //! + //! // Obtain one input item per thread if in range + //! int thread_data; + //! if (threadIdx.x < valid_items) + //! thread_data = d_data[threadIdx.x]; + //! + //! // Return the warp-wide reductions to each lane0 + //! int aggregate = WarpReduce(temp_storage).Reduce( + //! thread_data, cuda::maximum<>{}, valid_items); + //! + //! Suppose the input ``d_data`` is ``{0, 1, 2, 3, 4, ... }`` and ``valid_items`` + //! is ``4``. The corresponding output ``aggregate`` in thread0 is ``3`` (and is + //! undefined in other threads). + //! @endrst + //! + //! @tparam ReductionOp + //! **[inferred]** Binary reduction operator type having member + //! `T operator()(const T &a, const T &b)` + //! + //! @param[in] input + //! Calling thread's input + //! + //! @param[in] reduction_op + //! Binary reduction operator + //! + //! @param[in] valid_items + //! Total number of valid items in the calling thread's logical warp + //! (may be less than ``LOGICAL_WARP_THREADS``) + template + _CCCL_DEVICE _CCCL_FORCEINLINE T Reduce(T input, ReductionOp reduction_op, int valid_items) + { + return InternalWarpReduce(temp_storage).template Reduce(input, valid_items, reduction_op); + } + + //! @rst + //! Computes a segmented reduction in the calling warp where segments are defined by head-flags. + //! The reduction of each segment is returned to the first lane in that segment + //! (which always includes *lane*\ :sub:`0`). + //! + //! Supports non-commutative reduction operators + //! + //! @smemwarpreuse + //! + //! Snippet + //! +++++++ + //! + //! The code snippet below illustrates a head-segmented warp max + //! reduction within a block of 32 threads (one warp). + //! + //! .. code-block:: c++ + //! + //! #include + //! + //! __global__ void ExampleKernel(...) + //! { + //! // Specialize WarpReduce for type int + //! using WarpReduce = cub::WarpReduce; + //! + //! // Allocate WarpReduce shared memory for one warp + //! __shared__ typename WarpReduce::TempStorage temp_storage; + //! + //! // Obtain one input item and flag per thread + //! int thread_data = ... + //! int head_flag = ... + //! + //! // Return the warp-wide reductions to each lane0 + //! int aggregate = WarpReduce(temp_storage).HeadSegmentedReduce( + //! thread_data, head_flag, cuda::maximum<>{}); + //! + //! Suppose the set of input ``thread_data`` and ``head_flag`` across the block of threads + //! is ``{0, 1, 2, 3, ..., 31}`` and is ``{1, 0, 0, 0, 1, 0, 0, 0, ..., 1, 0, 0, 0}``, + //! respectively. The corresponding output ``aggregate`` in threads 0, 4, 8, etc. will be + //! ``3``, ``7``, ``11``, etc. (and is undefined in other threads). + //! @endrst + //! + //! @tparam ReductionOp + //! **[inferred]** Binary reduction operator type having member + //! `T operator()(const T &a, const T &b)` + //! + //! @param[in] input + //! Calling thread's input + //! + //! @param[in] head_flag + //! Head flag denoting whether or not `input` is the start of a new segment + //! + //! @param[in] reduction_op + //! Reduction operator + template + _CCCL_DEVICE _CCCL_FORCEINLINE T HeadSegmentedReduce(T input, FlagT head_flag, ReductionOp reduction_op) + { + return InternalWarpReduce(temp_storage).template SegmentedReduce(input, head_flag, reduction_op); + } + + //! @rst + //! Computes a segmented reduction in the calling warp where segments are defined by tail-flags. + //! The reduction of each segment is returned to the first lane in that segment + //! (which always includes *lane*\ :sub:`0`). + //! + //! Supports non-commutative reduction operators + //! + //! @smemwarpreuse + //! + //! Snippet + //! +++++++ + //! + //! The code snippet below illustrates a tail-segmented warp max + //! reduction within a block of 32 threads (one warp). + //! + //! .. code-block:: c++ + //! + //! #include + //! + //! __global__ void ExampleKernel(...) + //! { + //! // Specialize WarpReduce for type int + //! using WarpReduce = cub::WarpReduce; + //! + //! // Allocate WarpReduce shared memory for one warp + //! __shared__ typename WarpReduce::TempStorage temp_storage; + //! + //! // Obtain one input item and flag per thread + //! int thread_data = ... + //! int tail_flag = ... + //! + //! // Return the warp-wide reductions to each lane0 + //! int aggregate = WarpReduce(temp_storage).TailSegmentedReduce( + //! thread_data, tail_flag, cuda::maximum<>{}); + //! + //! Suppose the set of input ``thread_data`` and ``tail_flag`` across the block of threads + //! is ``{0, 1, 2, 3, ..., 31}`` and is ``{0, 0, 0, 1, 0, 0, 0, 1, ..., 0, 0, 0, 1}``, + //! respectively. The corresponding output ``aggregate`` in threads 0, 4, 8, etc. will be + //! ``3``, ``7``, ``11``, etc. (and is undefined in other threads). + //! @endrst + //! + //! @tparam ReductionOp + //! **[inferred]** Binary reduction operator type having member + //! `T operator()(const T &a, const T &b)` + //! + //! @param[in] input + //! Calling thread's input + //! + //! @param[in] tail_flag + //! Tail flag denoting whether or not \p input is the end of the current segment + //! + //! @param[in] reduction_op + //! Reduction operator + template + _CCCL_DEVICE _CCCL_FORCEINLINE T TailSegmentedReduce(T input, FlagT tail_flag, ReductionOp reduction_op) + { + return InternalWarpReduce(temp_storage).template SegmentedReduce(input, tail_flag, reduction_op); + } + + //! @} end member group +}; + +#ifndef _CCCL_DOXYGEN_INVOKED // Do not document +template +class WarpReduce +{ +private: + using _TempStorage = cub::NullType; + +public: + struct InternalWarpReduce + { + struct TempStorage : Uninitialized<_TempStorage> + {}; + + _CCCL_DEVICE _CCCL_FORCEINLINE InternalWarpReduce(TempStorage& /*temp_storage */) {} + + template + _CCCL_DEVICE _CCCL_FORCEINLINE T Reduce(T input, int /* valid_items */, ReductionOp /* reduction_op */) + { + return input; + } + + template + _CCCL_DEVICE _CCCL_FORCEINLINE T SegmentedReduce(T input, FlagT /* flag */, ReductionOp /* reduction_op */) + { + return input; + } + }; + + using TempStorage = typename InternalWarpReduce::TempStorage; + + _CCCL_DEVICE _CCCL_FORCEINLINE WarpReduce(TempStorage& /*temp_storage */) {} + + _CCCL_DEVICE _CCCL_FORCEINLINE T Sum(T input) + { + return input; + } + + _CCCL_DEVICE _CCCL_FORCEINLINE T Sum(T input, int /* valid_items */) + { + return input; + } + + template + _CCCL_DEVICE _CCCL_FORCEINLINE T HeadSegmentedSum(T input, FlagT /* head_flag */) + { + return input; + } + + template + _CCCL_DEVICE _CCCL_FORCEINLINE T TailSegmentedSum(T input, FlagT /* tail_flag */) + { + return input; + } + + template + _CCCL_DEVICE _CCCL_FORCEINLINE T Reduce(T input, ReductionOp /* reduction_op */) + { + return input; + } + + template + _CCCL_DEVICE _CCCL_FORCEINLINE T Reduce(T input, ReductionOp /* reduction_op */, int /* valid_items */) + { + return input; + } + + template + _CCCL_DEVICE _CCCL_FORCEINLINE T HeadSegmentedReduce(T input, FlagT /* head_flag */, ReductionOp /* reduction_op */) + { + return input; + } + + template + _CCCL_DEVICE _CCCL_FORCEINLINE T TailSegmentedReduce(T input, FlagT /* tail_flag */, ReductionOp /* reduction_op */) + { + return input; + } +}; +#endif // _CCCL_DOXYGEN_INVOKED + +CUB_NAMESPACE_END diff --git a/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/warp/warp_scan.cuh b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/warp/warp_scan.cuh new file mode 100644 index 0000000000000000000000000000000000000000..6eb6a35562b1cc73e016d8859eef01d5d0e301a2 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/warp/warp_scan.cuh @@ -0,0 +1,1142 @@ +/****************************************************************************** + * Copyright (c) 2011, Duane Merrill. All rights reserved. + * Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the NVIDIA CORPORATION nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + ******************************************************************************/ + +//! @file +//! @rst +//! The ``cub::WarpScan`` class provides :ref:`collective ` methods for +//! computing a parallel prefix scan of items partitioned across a CUDA thread warp. +//! @endrst + +#pragma once + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header + +#include +#include +#include +#include + +#include +#include + +CUB_NAMESPACE_BEGIN + +//! @rst +//! The WarpScan class provides :ref:`collective ` methods for computing a +//! parallel prefix scan of items partitioned across a CUDA thread warp. +//! +//! .. image:: ../../img/warp_scan_logo.png +//! :align: center +//! +//! Overview +//! ++++++++++++++++++++++++++ +//! +//! * Given a list of input elements and a binary reduction operator, a +//! `prefix scan `__ produces an output list where each +//! element is computed to be the reduction of the elements occurring earlier in the input list. +//! *Prefix sum* connotes a prefix scan with the addition operator. The term *inclusive* +//! indicates that the *i*\ :sup:`th` output reduction incorporates the *i*\ :sup:`th` input. +//! The term *exclusive* indicates the *i*\ :sup:`th` input is not incorporated into +//! the *i*\ :sup:`th` output reduction. +//! * Supports non-commutative scan operators +//! * Supports "logical" warps smaller than the physical warp size +//! (e.g., a logical warp of 8 threads) +//! * The number of entrant threads must be an multiple of ``LOGICAL_WARP_THREADS`` +//! +//! Performance Considerations +//! ++++++++++++++++++++++++++ +//! +//! * Uses special instructions when applicable (e.g., warp ``SHFL``) +//! * Uses synchronization-free communication between warp lanes when applicable +//! * Incurs zero bank conflicts for most types +//! * Computation is slightly more efficient (i.e., having lower instruction overhead) for: +//! +//! * Summation (**vs.** generic scan) +//! * The architecture's warp size is a whole multiple of ``LOGICAL_WARP_THREADS`` +//! +//! Simple Examples +//! ++++++++++++++++++++++++++ +//! +//! @warpcollective{WarpScan} +//! +//! The code snippet below illustrates four concurrent warp prefix sums within a block of +//! 128 threads (one per each of the 32-thread warps). +//! +//! .. code-block:: c++ +//! +//! #include +//! +//! __global__ void ExampleKernel(...) +//! { +//! // Specialize WarpScan for type int +//! using WarpScan = cub::WarpScan; +//! +//! // Allocate WarpScan shared memory for 4 warps +//! __shared__ typename WarpScan::TempStorage temp_storage[4]; +//! +//! // Obtain one input item per thread +//! int thread_data = ... +//! +//! // Compute warp-wide prefix sums +//! int warp_id = threadIdx.x / 32; +//! WarpScan(temp_storage[warp_id]).ExclusiveSum(thread_data, thread_data); +//! +//! Suppose the set of input ``thread_data`` across the block of threads is +//! ``{1, 1, 1, 1, ...}``. The corresponding output ``thread_data`` in each of the four warps of +//! threads will be ``0, 1, 2, 3, ..., 31}``. +//! +//! The code snippet below illustrates a single warp prefix sum within a block of +//! 128 threads. +//! +//! .. code-block:: c++ +//! +//! #include +//! +//! __global__ void ExampleKernel(...) +//! { +//! // Specialize WarpScan for type int +//! using WarpScan = cub::WarpScan; +//! +//! // Allocate WarpScan shared memory for one warp +//! __shared__ typename WarpScan::TempStorage temp_storage; +//! ... +//! +//! // Only the first warp performs a prefix sum +//! if (threadIdx.x < 32) +//! { +//! // Obtain one input item per thread +//! int thread_data = ... +//! +//! // Compute warp-wide prefix sums +//! WarpScan(temp_storage).ExclusiveSum(thread_data, thread_data); +//! +//! Suppose the set of input ``thread_data`` across the warp of threads is +//! ``{1, 1, 1, 1, ...}``. The corresponding output ``thread_data`` will be +//! ``{0, 1, 2, 3, ..., 31}``. +//! @endrst +//! +//! @tparam T +//! The scan input/output element type +//! +//! @tparam LOGICAL_WARP_THREADS +//! **[optional]** The number of threads per "logical" warp (may be less than the number of +//! hardware warp threads). Default is the warp size associated with the CUDA Compute Capability +//! targeted by the compiler (e.g., 32 threads for SM20). +//! +//! @tparam LEGACY_PTX_ARCH +//! **[optional]** Unused. +template +class WarpScan +{ +private: + /****************************************************************************** + * Constants and type definitions + ******************************************************************************/ + + enum + { + /// Whether the logical warp size and the PTX warp size coincide + IS_ARCH_WARP = (LOGICAL_WARP_THREADS == CUB_WARP_THREADS(0)), + + /// Whether the logical warp size is a power-of-two + IS_POW_OF_TWO = ((LOGICAL_WARP_THREADS & (LOGICAL_WARP_THREADS - 1)) == 0), + + /// Whether the data type is an integer (which has fully-associative addition) + IS_INTEGER = ((Traits::CATEGORY == SIGNED_INTEGER) || (Traits::CATEGORY == UNSIGNED_INTEGER)) + }; + + /// Internal specialization. + /// Use SHFL-based scan if LOGICAL_WARP_THREADS is a power-of-two + using InternalWarpScan = ::cuda::std:: + _If, detail::WarpScanSmem>; + + /// Shared memory storage layout type for WarpScan + using _TempStorage = typename InternalWarpScan::TempStorage; + + /****************************************************************************** + * Thread fields + ******************************************************************************/ + + /// Shared storage reference + _TempStorage& temp_storage; + unsigned int lane_id; + + /****************************************************************************** + * Public types + ******************************************************************************/ + +public: + /// @smemstorage{WarpScan} + struct TempStorage : Uninitialized<_TempStorage> + {}; + + //! @name Collective constructors + //! @{ + + //! @brief Collective constructor using the specified memory allocation as temporary storage. + //! Logical warp and lane identifiers are constructed from `threadIdx.x`. + //! + //! @param[in] temp_storage + //! Reference to memory allocation having layout type TempStorage + _CCCL_DEVICE _CCCL_FORCEINLINE WarpScan(TempStorage& temp_storage) + : temp_storage(temp_storage.Alias()) + , lane_id(IS_ARCH_WARP ? ::cuda::ptx::get_sreg_laneid() : ::cuda::ptx::get_sreg_laneid() % LOGICAL_WARP_THREADS) + {} + + //! @} end member group + //! @name Inclusive prefix sums + //! @{ + + //! @rst + //! Computes an inclusive prefix sum across the calling warp. + //! + //! * @smemwarpreuse + //! + //! Snippet + //! +++++++ + //! + //! The code snippet below illustrates four concurrent warp-wide inclusive prefix sums within a + //! block of 128 threads (one per each of the 32-thread warps). + //! + //! .. code-block:: c++ + //! + //! #include + //! + //! __global__ void ExampleKernel(...) + //! { + //! // Specialize WarpScan for type int + //! using WarpScan = cub::WarpScan; + //! + //! // Allocate WarpScan shared memory for 4 warps + //! __shared__ typename WarpScan::TempStorage temp_storage[4]; + //! + //! // Obtain one input item per thread + //! int thread_data = ... + //! + //! // Compute inclusive warp-wide prefix sums + //! int warp_id = threadIdx.x / 32; + //! WarpScan(temp_storage[warp_id]).InclusiveSum(thread_data, thread_data); + //! + //! Suppose the set of input ``thread_data`` across the block of threads is + //! ``{1, 1, 1, 1, ...}``. The corresponding output ``thread_data`` in each of the four warps + //! of threads will be ``1, 2, 3, ..., 32}``. + //! @endrst + //! + //! @param[in] input + //! Calling thread's input item. + //! + //! @param[out] inclusive_output + //! Calling thread's output item. May be aliased with `input`. + _CCCL_DEVICE _CCCL_FORCEINLINE void InclusiveSum(T input, T& inclusive_output) + { + InclusiveScan(input, inclusive_output, ::cuda::std::plus<>{}); + } + + //! @rst + //! Computes an inclusive prefix sum across the calling warp. + //! Also provides every thread with the warp-wide ``warp_aggregate`` of all inputs. + //! + //! * @smemwarpreuse + //! + //! Snippet + //! +++++++ + //! + //! The code snippet below illustrates four concurrent warp-wide inclusive prefix sums within a + //! block of 128 threads (one per each of the 32-thread warps). + //! + //! .. code-block:: c++ + //! + //! #include + //! + //! __global__ void ExampleKernel(...) + //! { + //! // Specialize WarpScan for type int + //! using WarpScan = cub::WarpScan; + //! + //! // Allocate WarpScan shared memory for 4 warps + //! __shared__ typename WarpScan::TempStorage temp_storage[4]; + //! + //! // Obtain one input item per thread + //! int thread_data = ... + //! + //! // Compute inclusive warp-wide prefix sums + //! int warp_aggregate; + //! int warp_id = threadIdx.x / 32; + //! WarpScan(temp_storage[warp_id]).InclusiveSum(thread_data, + //! thread_data, + //! warp_aggregate); + //! + //! Suppose the set of input ``thread_data`` across the block of threads is + //! ``{1, 1, 1, 1, ...}``. The corresponding output ``thread_data`` in each of the four warps + //! of threads will be ``1, 2, 3, ..., 32}``. Furthermore, ``warp_aggregate`` for all threads + //! in all warps will be ``32``. + //! @endrst + //! + //! @param[in] input + //! Calling thread's input item + //! + //! @param[out] inclusive_output + //! Calling thread's output item. May be aliased with `input` + //! + //! @param[out] warp_aggregate + //! Warp-wide aggregate reduction of input items + _CCCL_DEVICE _CCCL_FORCEINLINE void InclusiveSum(T input, T& inclusive_output, T& warp_aggregate) + { + InclusiveScan(input, inclusive_output, ::cuda::std::plus<>{}, warp_aggregate); + } + + //! @} end member group + //! @name Exclusive prefix sums + //! @{ + + //! @rst + //! Computes an exclusive prefix sum across the calling warp. The value of 0 is applied as the + //! initial value, and is assigned to ``exclusive_output`` in *lane*\ :sub:`0`. + //! + //! * @identityzero + //! * @smemwarpreuse + //! + //! Snippet + //! +++++++ + //! + //! The code snippet below illustrates four concurrent warp-wide exclusive prefix sums within a + //! block of 128 threads (one per each of the 32-thread warps). + //! + //! .. code-block:: c++ + //! + //! #include + //! + //! __global__ void ExampleKernel(...) + //! { + //! // Specialize WarpScan for type int + //! using WarpScan = cub::WarpScan; + //! + //! // Allocate WarpScan shared memory for 4 warps + //! __shared__ typename WarpScan::TempStorage temp_storage[4]; + //! + //! // Obtain one input item per thread + //! int thread_data = ... + //! + //! // Compute exclusive warp-wide prefix sums + //! int warp_id = threadIdx.x / 32; + //! WarpScan(temp_storage[warp_id]).ExclusiveSum(thread_data, thread_data); + //! + //! Suppose the set of input ``thread_data`` across the block of threads is + //! ``{1, 1, 1, 1, ...}``. The corresponding output ``thread_data`` in each of the four warps + //! of threads will be ``0, 1, 2, ..., 31}``. + //! @endrst + //! + //! @param[in] input + //! Calling thread's input item. + //! + //! @param[out] exclusive_output + //! Calling thread's output item. May be aliased with `input`. + _CCCL_DEVICE _CCCL_FORCEINLINE void ExclusiveSum(T input, T& exclusive_output) + { + T initial_value{}; + ExclusiveScan(input, exclusive_output, initial_value, ::cuda::std::plus<>{}); + } + + //! @rst + //! Computes an exclusive prefix sum across the calling warp. The value of 0 is applied as the + //! initial value, and is assigned to ``exclusive_output`` in *lane*\ :sub:`0`. + //! Also provides every thread with the warp-wide ``warp_aggregate`` of all inputs. + //! + //! * @identityzero + //! * @smemwarpreuse + //! + //! Snippet + //! +++++++ + //! + //! The code snippet below illustrates four concurrent warp-wide exclusive prefix sums within a + //! block of 128 threads (one per each of the 32-thread warps). + //! + //! .. code-block:: c++ + //! + //! #include + //! + //! __global__ void ExampleKernel(...) + //! { + //! // Specialize WarpScan for type int + //! using WarpScan = cub::WarpScan; + //! + //! // Allocate WarpScan shared memory for 4 warps + //! __shared__ typename WarpScan::TempStorage temp_storage[4]; + //! + //! // Obtain one input item per thread + //! int thread_data = ... + //! + //! // Compute exclusive warp-wide prefix sums + //! int warp_aggregate; + //! int warp_id = threadIdx.x / 32; + //! WarpScan(temp_storage[warp_id]).ExclusiveSum(thread_data, + //! thread_data, + //! warp_aggregate); + //! + //! Suppose the set of input ``thread_data`` across the block of threads is + //! ``{1, 1, 1, 1, ...}``. The corresponding output ``thread_data`` in each of the four warps + //! of threads will be ``0, 1, 2, ..., 31}``. Furthermore, ``warp_aggregate`` for all threads + //! in all warps will be ``32``. + //! @endrst + //! + //! + //! @param[in] input + //! Calling thread's input item + //! + //! @param[out] exclusive_output + //! Calling thread's output item. May be aliased with `input` + //! + //! @param[out] warp_aggregate + //! Warp-wide aggregate reduction of input items + _CCCL_DEVICE _CCCL_FORCEINLINE void ExclusiveSum(T input, T& exclusive_output, T& warp_aggregate) + { + T initial_value{}; + ExclusiveScan(input, exclusive_output, initial_value, ::cuda::std::plus<>{}, warp_aggregate); + } + + //! @} end member group + //! @name Inclusive prefix scans + //! @{ + + //! @rst + //! Computes an inclusive prefix scan using the specified binary scan functor across the + //! calling warp. + //! + //! * @smemwarpreuse + //! + //! Snippet + //! +++++++ + //! + //! The code snippet below illustrates four concurrent warp-wide inclusive prefix max scans + //! within a block of 128 threads (one per each of the 32-thread warps). + //! + //! .. code-block:: c++ + //! + //! #include + //! + //! __global__ void ExampleKernel(...) + //! { + //! // Specialize WarpScan for type int + //! using WarpScan = cub::WarpScan; + //! + //! // Allocate WarpScan shared memory for 4 warps + //! __shared__ typename WarpScan::TempStorage temp_storage[4]; + //! + //! // Obtain one input item per thread + //! int thread_data = ... + //! + //! // Compute inclusive warp-wide prefix max scans + //! int warp_id = threadIdx.x / 32; + //! WarpScan(temp_storage[warp_id]).InclusiveScan(thread_data, thread_data, cuda::maximum<>{}); + //! + //! Suppose the set of input ``thread_data`` across the block of threads is + //! ``{0, -1, 2, -3, ..., 126, -127}``. The corresponding output ``thread_data`` in the first + //! warp would be ``0, 0, 2, 2, ..., 30, 30``, the output for the second warp would be + //! ``32, 32, 34, 34, ..., 62, 62``, etc. + //! @endrst + //! + //! @tparam ScanOp + //! **[inferred]** Binary scan operator type having member + //! `T operator()(const T &a, const T &b)` + //! + //! @param[in] input + //! Calling thread's input item + //! + //! @param[out] inclusive_output + //! Calling thread's output item. May be aliased with `input` + //! + //! @param[in] scan_op + //! Binary scan operator + template + _CCCL_DEVICE _CCCL_FORCEINLINE void InclusiveScan(T input, T& inclusive_output, ScanOp scan_op) + { + InternalWarpScan(temp_storage).InclusiveScan(input, inclusive_output, scan_op); + } + + //! @rst + //! Computes an inclusive prefix scan using the specified binary scan functor across the + //! calling warp. + //! + //! * @smemwarpreuse + //! + //! Snippet + //! +++++++ + //! + //! The code snippet below illustrates four concurrent warp-wide inclusive prefix sum scans + //! within a block of 128 threads (one per each of the 32-thread warps). + //! + //! .. literalinclude:: ../../../cub/test/catch2_test_warp_scan_api.cu + //! :language: c++ + //! :dedent: + //! :start-after: example-begin inclusive-warp-scan-init-value + //! :end-before: example-end inclusive-warp-scan-init-value + //! + //! Suppose the set of input ``thread_data`` in the first warp is + //! ``{0, 1, 2, 3, ..., 31}``, in the second warp is ``{1, 2, 3, 4, ..., 32}`` etc. + //! The corresponding output ``thread_data`` for a max operation in the first + //! warp would be ``{3, 3, 3, 3, ..., 31}``, the output for the second warp would be + //! ``{3, 3, 3, 4, ..., 32}``, etc. + //! @endrst + //! + //! @tparam ScanOp + //! **[inferred]** Binary scan operator type having member + //! `T operator()(const T &a, const T &b)` + //! + //! @param[in] input + //! Calling thread's input item + //! + //! @param[out] inclusive_output + //! Calling thread's output item. May be aliased with `input` + //! + //! @param[in] initial_value + //! Initial value to seed the inclusive scan (uniform across warp) + //! + //! @param[in] scan_op + //! Binary scan operator + template + _CCCL_DEVICE _CCCL_FORCEINLINE void InclusiveScan(T input, T& inclusive_output, T initial_value, ScanOp scan_op) + { + InternalWarpScan internal(temp_storage); + + T exclusive_output; + internal.InclusiveScan(input, inclusive_output, scan_op); + + internal.Update(input, inclusive_output, exclusive_output, scan_op, initial_value, Int2Type()); + } + + //! @rst + //! Computes an inclusive prefix scan using the specified binary scan functor across the + //! calling warp. Also provides every thread with the warp-wide ``warp_aggregate`` of + //! all inputs. + //! + //! * @smemwarpreuse + //! + //! Snippet + //! +++++++ + //! + //! The code snippet below illustrates four concurrent warp-wide inclusive prefix max scans + //! within a block of 128 threads (one per each of the 32-thread warps). + //! + //! .. code-block:: c++ + //! + //! #include + //! + //! __global__ void ExampleKernel(...) + //! { + //! // Specialize WarpScan for type int + //! using WarpScan = cub::WarpScan; + //! + //! // Allocate WarpScan shared memory for 4 warps + //! __shared__ typename WarpScan::TempStorage temp_storage[4]; + //! + //! // Obtain one input item per thread + //! int thread_data = ... + //! + //! // Compute inclusive warp-wide prefix max scans + //! int warp_aggregate; + //! int warp_id = threadIdx.x / 32; + //! WarpScan(temp_storage[warp_id]).InclusiveScan( + //! thread_data, thread_data, cuda::maximum<>{}, warp_aggregate); + //! + //! Suppose the set of input ``thread_data`` across the block of threads is + //! ``{0, -1, 2, -3, ..., 126, -127}``. The corresponding output ``thread_data`` in the first + //! warp would be ``0, 0, 2, 2, ..., 30, 30``, the output for the second warp would be + //! ``32, 32, 34, 34, ..., 62, 62``, etc. Furthermore, ``warp_aggregate`` would be assigned + //! ``30`` for threads in the first warp, ``62`` for threads in the second warp, etc. + //! @endrst + //! + //! @tparam ScanOp + //! **[inferred]** Binary scan operator type having member + //! `T operator()(const T &a, const T &b)` + //! + //! @param[in] input + //! Calling thread's input item + //! + //! @param[out] inclusive_output + //! Calling thread's output item. May be aliased with ``input`` + //! + //! @param[in] scan_op + //! Binary scan operator + //! + //! @param[out] warp_aggregate + //! Warp-wide aggregate reduction of input items. + template + _CCCL_DEVICE _CCCL_FORCEINLINE void InclusiveScan(T input, T& inclusive_output, ScanOp scan_op, T& warp_aggregate) + { + InternalWarpScan(temp_storage).InclusiveScan(input, inclusive_output, scan_op, warp_aggregate); + } + + //! @rst + //! Computes an inclusive prefix scan using the specified binary scan functor across the + //! calling warp. Also provides every thread with the warp-wide ``warp_aggregate`` of + //! all inputs. + //! + //! * @smemwarpreuse + //! + //! Snippet + //! +++++++ + //! + //! The code snippet below illustrates four concurrent warp-wide inclusive prefix max scans + //! within a block of 128 threads (one scan per warp). + //! + //! .. literalinclude:: ../../../cub/test/catch2_test_warp_scan_api.cu + //! :language: c++ + //! :dedent: + //! :start-after: example-begin inclusive-warp-scan-init-value-aggregate + //! :end-before: example-end inclusive-warp-scan-init-value-aggregate + //! + //! Suppose the set of input ``thread_data`` across the block of threads is + //! ``{1, 1, 1, 1, ..., 1}``. For initial value equal to 3, the corresponding output + //! ``thread_data`` for a sum operation in the first warp would be + //! ``{4, 5, 6, 7, ..., 35}``, the output for the second warp would be + //! ``{4, 5, 6, 7, ..., 35}``, etc. Furthermore, ``warp_aggregate`` would be assigned + //! ``32`` for threads in each warp. + //! @endrst + //! + //! @tparam ScanOp + //! **[inferred]** Binary scan operator type having member + //! `T operator()(const T &a, const T &b)` + //! + //! @param[in] input + //! Calling thread's input item + //! + //! @param[out] inclusive_output + //! Calling thread's output item. May be aliased with ``input`` + //! + //! @param[in] initial_value + //! Initial value to seed the inclusive scan (uniform across warp). It is not taken + //! into account for warp_aggregate. + //! + //! @param[in] scan_op + //! Binary scan operator + //! + //! @param[out] warp_aggregate + //! Warp-wide aggregate reduction of input items. + template + _CCCL_DEVICE _CCCL_FORCEINLINE void + InclusiveScan(T input, T& inclusive_output, T initial_value, ScanOp scan_op, T& warp_aggregate) + { + InternalWarpScan internal(temp_storage); + + // Perform the inclusive scan operation + internal.InclusiveScan(input, inclusive_output, scan_op); + + // Update the inclusive_output and warp_aggregate using the Update function + T exclusive_output; + internal.Update( + input, inclusive_output, exclusive_output, warp_aggregate, scan_op, initial_value, Int2Type()); + } + + //! @} end member group + //! @name Exclusive prefix scans + //! @{ + + //! @rst + //! Computes an exclusive prefix scan using the specified binary scan functor across the + //! calling warp. Because no initial value is supplied, the ``output`` computed for + //! *lane*\ :sub:`0` is undefined. + //! + //! * @smemwarpreuse + //! + //! Snippet + //! +++++++ + //! + //! The code snippet below illustrates four concurrent warp-wide exclusive prefix max scans + //! within a block of 128 threads (one per each of the 32-thread warps). + //! + //! .. code-block:: c++ + //! + //! #include + //! + //! __global__ void ExampleKernel(...) + //! { + //! // Specialize WarpScan for type int + //! using WarpScan = cub::WarpScan; + //! + //! // Allocate WarpScan shared memory for 4 warps + //! __shared__ typename WarpScan::TempStorage temp_storage[4]; + //! + //! // Obtain one input item per thread + //! int thread_data = ... + //! + //! // Compute exclusive warp-wide prefix max scans + //! int warp_id = threadIdx.x / 32; + //! WarpScan(temp_storage[warp_id]).ExclusiveScan(thread_data, thread_data, cuda::maximum<>{}); + //! + //! Suppose the set of input ``thread_data`` across the block of threads is + //! ``{0, -1, 2, -3, ..., 126, -127}``. The corresponding output ``thread_data`` in the first + //! warp would be ``?, 0, 0, 2, ..., 28, 30``, the output for the second warp would be + //! ``?, 32, 32, 34, ..., 60, 62``, etc. + //! (The output ``thread_data`` in warp *lane*\ :sub:`0` is undefined.) + //! @endrst + //! + //! @tparam ScanOp + //! **[inferred]** Binary scan operator type having member + //! `T operator()(const T &a, const T &b)` + //! + //! @param[in] input + //! Calling thread's input item + //! + //! @param[out] exclusive_output + //! Calling thread's output item. May be aliased with `input` + //! + //! @param[in] scan_op + //! Binary scan operator + template + _CCCL_DEVICE _CCCL_FORCEINLINE void ExclusiveScan(T input, T& exclusive_output, ScanOp scan_op) + { + InternalWarpScan internal(temp_storage); + + T inclusive_output; + internal.InclusiveScan(input, inclusive_output, scan_op); + + internal.Update(input, inclusive_output, exclusive_output, scan_op, Int2Type()); + } + + //! @rst + //! Computes an exclusive prefix scan using the specified binary scan functor across the + //! calling warp. + //! + //! * @smemwarpreuse + //! + //! Snippet + //! +++++++ + //! + //! The code snippet below illustrates four concurrent warp-wide exclusive prefix max scans + //! within a block of 128 threads (one per each of the 32-thread warps). + //! + //! .. code-block:: c++ + //! + //! #include + //! + //! __global__ void ExampleKernel(...) + //! { + //! // Specialize WarpScan for type int + //! using WarpScan = cub::WarpScan; + //! + //! // Allocate WarpScan shared memory for 4 warps + //! __shared__ typename WarpScan::TempStorage temp_storage[4]; + //! + //! // Obtain one input item per thread + //! int thread_data = ... + //! + //! // Compute exclusive warp-wide prefix max scans + //! int warp_id = threadIdx.x / 32; + //! WarpScan(temp_storage[warp_id]).ExclusiveScan(thread_data, + //! thread_data, + //! INT_MIN, + //! cuda::maximum<>{}); + //! + //! Suppose the set of input ``thread_data`` across the block of threads is + //! ``{0, -1, 2, -3, ..., 126, -127}``. The corresponding output ``thread_data`` in the first + //! warp would be ``INT_MIN, 0, 0, 2, ..., 28, 30``, the output for the second warp would be + //! ``30, 32, 32, 34, ..., 60, 62``, etc. + //! @endrst + //! + //! @tparam ScanOp + //! **[inferred]** Binary scan operator type having member + //! `T operator()(const T &a, const T &b)` + //! + //! @param[in] input + //! Calling thread's input item + //! + //! @param[out] exclusive_output + //! Calling thread's output item. May be aliased with `input` + //! + //! @param[in] initial_value + //! Initial value to seed the exclusive scan + //! + //! @param[in] scan_op + //! Binary scan operator + template + _CCCL_DEVICE _CCCL_FORCEINLINE void ExclusiveScan(T input, T& exclusive_output, T initial_value, ScanOp scan_op) + { + InternalWarpScan internal(temp_storage); + + T inclusive_output; + internal.InclusiveScan(input, inclusive_output, scan_op); + + internal.Update(input, inclusive_output, exclusive_output, scan_op, initial_value, Int2Type()); + } + + //! @rst + //! Computes an exclusive prefix scan using the specified binary scan functor across the + //! calling warp. Because no initial value is supplied, the ``output`` computed for + //! *lane*\ :sub:`0` is undefined. Also provides every thread with the warp-wide + //! ``warp_aggregate`` of all inputs. + //! + //! * @smemwarpreuse + //! + //! Snippet + //! +++++++ + //! + //! The code snippet below illustrates four concurrent warp-wide exclusive prefix max scans + //! within a block of 128 threads (one per each of the 32-thread warps). + //! + //! .. code-block:: c++ + //! + //! #include + //! + //! __global__ void ExampleKernel(...) + //! { + //! // Specialize WarpScan for type int + //! using WarpScan = cub::WarpScan; + //! + //! // Allocate WarpScan shared memory for 4 warps + //! __shared__ typename WarpScan::TempStorage temp_storage[4]; + //! + //! // Obtain one input item per thread + //! int thread_data = ... + //! + //! // Compute exclusive warp-wide prefix max scans + //! int warp_aggregate; + //! int warp_id = threadIdx.x / 32; + //! WarpScan(temp_storage[warp_id]).ExclusiveScan(thread_data, + //! thread_data, + //! cuda::maximum<>{}, + //! warp_aggregate); + //! + //! Suppose the set of input ``thread_data`` across the block of threads is + //! ``{0, -1, 2, -3, ..., 126, -127}``. The corresponding output ``thread_data`` in the first + //! warp would be ``?, 0, 0, 2, ..., 28, 30``, the output for the second warp would be + //! ``?, 32, 32, 34, ..., 60, 62``, etc. (The output ``thread_data`` in warp *lane*\ :sub:`0` + //! is undefined). Furthermore, ``warp_aggregate`` would be assigned ``30`` for threads in the + //! first warp, \p 62 for threads in the second warp, etc. + //! @endrst + //! + //! @tparam ScanOp + //! **[inferred]** Binary scan operator type having member + //! `T operator()(const T &a, const T &b)` + //! + //! @param[in] input + //! Calling thread's input item + //! + //! @param[out] exclusive_output + //! Calling thread's output item. May be aliased with `input` + //! + //! @param[in] scan_op + //! Binary scan operator + //! + //! @param[out] warp_aggregate + //! Warp-wide aggregate reduction of input items + template + _CCCL_DEVICE _CCCL_FORCEINLINE void ExclusiveScan(T input, T& exclusive_output, ScanOp scan_op, T& warp_aggregate) + { + InternalWarpScan internal(temp_storage); + + T inclusive_output; + internal.InclusiveScan(input, inclusive_output, scan_op); + + internal.Update(input, inclusive_output, exclusive_output, warp_aggregate, scan_op, Int2Type()); + } + + //! @rst + //! Computes an exclusive prefix scan using the specified binary scan functor across the + //! calling warp. Also provides every thread with the warp-wide ``warp_aggregate`` of + //! all inputs. + //! + //! * @smemwarpreuse + //! + //! Snippet + //! +++++++ + //! + //! The code snippet below illustrates four concurrent warp-wide exclusive prefix max scans + //! within a block of 128 threads (one per each of the 32-thread warps). + //! + //! .. code-block:: c++ + //! + //! #include + //! + //! __global__ void ExampleKernel(...) + //! { + //! // Specialize WarpScan for type int + //! using WarpScan = cub::WarpScan; + //! + //! // Allocate WarpScan shared memory for 4 warps + //! __shared__ typename WarpScan::TempStorage temp_storage[4]; + //! + //! // Obtain one input item per thread + //! int thread_data = ... + //! + //! // Compute exclusive warp-wide prefix max scans + //! int warp_aggregate; + //! int warp_id = threadIdx.x / 32; + //! WarpScan(temp_storage[warp_id]).ExclusiveScan(thread_data, + //! thread_data, + //! INT_MIN, + //! cuda::maximum<>{}, + //! warp_aggregate); + //! + //! Suppose the set of input ``thread_data`` across the block of threads is + //! ``{0, -1, 2, -3, ..., 126, -127}``. The corresponding output ``thread_data`` in the first + //! warp would be ``INT_MIN, 0, 0, 2, ..., 28, 30``, the output for the second warp would be + //! ``30, 32, 32, 34, ..., 60, 62``, etc. Furthermore, ``warp_aggregate`` would be assigned + //! ``30`` for threads in the first warp, ``62`` for threads in the second warp, etc. + //! @endrst + //! + //! @tparam ScanOp + //! **[inferred]** Binary scan operator type having member + //! `T operator()(const T &a, const T &b)` + //! + //! @param[in] input + //! Calling thread's input item + //! + //! @param[out] exclusive_output + //! Calling thread's output item. May be aliased with `input` + //! + //! @param[in] initial_value + //! Initial value to seed the exclusive scan + //! + //! @param[in] scan_op + //! Binary scan operator + //! + //! @param[out] warp_aggregate + //! Warp-wide aggregate reduction of input items + //! + template + _CCCL_DEVICE _CCCL_FORCEINLINE void + ExclusiveScan(T input, T& exclusive_output, T initial_value, ScanOp scan_op, T& warp_aggregate) + { + InternalWarpScan internal(temp_storage); + + T inclusive_output; + internal.InclusiveScan(input, inclusive_output, scan_op); + + internal.Update( + input, inclusive_output, exclusive_output, warp_aggregate, scan_op, initial_value, Int2Type()); + } + + //! @} end member group + //! @name Combination (inclusive & exclusive) prefix scans + //! @{ + + //! @rst + //! Computes both inclusive and exclusive prefix scans using the specified binary scan functor + //! across the calling warp. Because no initial value is supplied, the ``exclusive_output`` + //! computed for *lane*\ :sub:`0` is undefined. + //! + //! * @smemwarpreuse + //! + //! Snippet + //! +++++++ + //! + //! The code snippet below illustrates four concurrent warp-wide exclusive prefix max scans + //! within a block of 128 threads (one per each of the 32-thread warps). + //! + //! .. code-block:: c++ + //! + //! #include + //! + //! __global__ void ExampleKernel(...) + //! { + //! // Specialize WarpScan for type int + //! using WarpScan = cub::WarpScan; + //! + //! // Allocate WarpScan shared memory for 4 warps + //! __shared__ typename WarpScan::TempStorage temp_storage[4]; + //! + //! // Obtain one input item per thread + //! int thread_data = ... + //! + //! // Compute exclusive warp-wide prefix max scans + //! int inclusive_partial, exclusive_partial; + //! WarpScan(temp_storage[warp_id]).Scan(thread_data, + //! inclusive_partial, + //! exclusive_partial, + //! cuda::maximum<>{}); + //! + //! Suppose the set of input ``thread_data`` across the block of threads is + //! ``{0, -1, 2, -3, ..., 126, -127}``. The corresponding output ``inclusive_partial`` in the + //! first warp would be ``0, 0, 2, 2, ..., 30, 30``, the output for the second warp would be + //! ``32, 32, 34, 34, ..., 62, 62``, etc. The corresponding output ``exclusive_partial`` in the + //! first warp would be ``?, 0, 0, 2, ..., 28, 30``, the output for the second warp would be + //! ``?, 32, 32, 34, ..., 60, 62``, etc. + //! (The output ``thread_data`` in warp *lane*\ :sub:`0` is undefined.) + //! @endrst + //! + //! @tparam ScanOp + //! **[inferred]** Binary scan operator type having member + //! `T operator()(const T &a, const T &b)` + //! + //! @param[in] input + //! Calling thread's input item + //! + //! @param[out] inclusive_output + //! Calling thread's inclusive-scan output item + //! + //! @param[out] exclusive_output + //! Calling thread's exclusive-scan output item + //! + //! @param[in] scan_op + //! Binary scan operator + template + _CCCL_DEVICE _CCCL_FORCEINLINE void Scan(T input, T& inclusive_output, T& exclusive_output, ScanOp scan_op) + { + InternalWarpScan internal(temp_storage); + + internal.InclusiveScan(input, inclusive_output, scan_op); + + internal.Update(input, inclusive_output, exclusive_output, scan_op, Int2Type()); + } + + //! @rst + //! Computes both inclusive and exclusive prefix scans using the specified binary scan functor + //! across the calling warp. + //! + //! * @smemwarpreuse + //! + //! Snippet + //! +++++++ + //! + //! The code snippet below illustrates four concurrent warp-wide prefix max scans within a + //! block of 128 threads (one per each of the 32-thread warps). + //! + //! .. code-block:: c++ + //! + //! #include + //! + //! __global__ void ExampleKernel(...) + //! { + //! // Specialize WarpScan for type int + //! using WarpScan = cub::WarpScan; + //! + //! // Allocate WarpScan shared memory for 4 warps + //! __shared__ typename WarpScan::TempStorage temp_storage[4]; + //! + //! // Obtain one input item per thread + //! int thread_data = ... + //! + //! // Compute inclusive warp-wide prefix max scans + //! int warp_id = threadIdx.x / 32; + //! int inclusive_partial, exclusive_partial; + //! WarpScan(temp_storage[warp_id]).Scan(thread_data, + //! inclusive_partial, + //! exclusive_partial, + //! INT_MIN, + //! cuda::maximum<>{}); + //! + //! Suppose the set of input ``thread_data`` across the block of threads is + //! ``{0, -1, 2, -3, ..., 126, -127}``. The corresponding output ``inclusive_partial`` in the + //! first warp would be ``0, 0, 2, 2, ..., 30, 30``, the output for the second warp would be + //! ``32, 32, 34, 34, ..., 62, 62``, etc. The corresponding output ``exclusive_partial`` in the + //! first warp would be ``INT_MIN, 0, 0, 2, ..., 28, 30``, the output for the second warp would + //! be ``30, 32, 32, 34, ..., 60, 62``, etc. + //! @endrst + //! + //! @tparam ScanOp + //! **[inferred]** Binary scan operator type having member + //! `T operator()(const T &a, const T &b)` + //! + //! @param[in] input + //! Calling thread's input item + //! + //! @param[out] inclusive_output + //! Calling thread's inclusive-scan output item + //! + //! @param[out] exclusive_output + //! Calling thread's exclusive-scan output item + //! + //! @param[in] initial_value + //! Initial value to seed the exclusive scan + //! + //! @param[in] scan_op + //! Binary scan operator + template + _CCCL_DEVICE _CCCL_FORCEINLINE void + Scan(T input, T& inclusive_output, T& exclusive_output, T initial_value, ScanOp scan_op) + { + InternalWarpScan internal(temp_storage); + + internal.InclusiveScan(input, inclusive_output, scan_op); + + internal.Update(input, inclusive_output, exclusive_output, scan_op, initial_value, Int2Type()); + } + + //! @} end member group + //! @name Data exchange + //! @{ + + //! @rst + //! Broadcast the value ``input`` from *lane*\ :sub:`src_lane` to all lanes in the warp + //! + //! * @smemwarpreuse + //! + //! Snippet + //! +++++++ + //! + //! The code snippet below illustrates the warp-wide broadcasts of values from *lane*\ :sub:`0` + //! in each of four warps to all other threads in those warps. + //! + //! .. code-block:: c++ + //! + //! #include + //! + //! __global__ void ExampleKernel(...) + //! { + //! // Specialize WarpScan for type int + //! using WarpScan = cub::WarpScan; + //! + //! // Allocate WarpScan shared memory for 4 warps + //! __shared__ typename WarpScan::TempStorage temp_storage[4]; + //! + //! // Obtain one input item per thread + //! int thread_data = ... + //! + //! // Broadcast from lane0 in each warp to all other threads in the warp + //! int warp_id = threadIdx.x / 32; + //! thread_data = WarpScan(temp_storage[warp_id]).Broadcast(thread_data, 0); + //! + //! Suppose the set of input ``thread_data`` across the block of threads is + //! ``{0, 1, 2, 3, ..., 127}``. The corresponding output ``thread_data`` will be + //! ``{0, 0, ..., 0}`` in warp\ :sub:`0`, + //! ``{32, 32, ..., 32}`` in warp\ :sub:`1`, + //! ``{64, 64, ..., 64}`` in warp\ :sub:`2`, etc. + //! @endrst + //! + //! @param[in] input + //! The value to broadcast + //! + //! @param[in] src_lane + //! Which warp lane is to do the broadcasting + _CCCL_DEVICE _CCCL_FORCEINLINE T Broadcast(T input, unsigned int src_lane) + { + return InternalWarpScan(temp_storage).Broadcast(input, src_lane); + } + + //@} end member group +}; + +CUB_NAMESPACE_END diff --git a/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/warp/warp_store.cuh b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/warp/warp_store.cuh new file mode 100644 index 0000000000000000000000000000000000000000..f0a9929e24fef80e70622b3b8ab54fb70e2e5ca4 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/warp/warp_store.cuh @@ -0,0 +1,523 @@ +/****************************************************************************** + * Copyright (c) 2011-2021, NVIDIA CORPORATION. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the NVIDIA CORPORATION nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + ******************************************************************************/ + +//! @file +//! Operations for writing linear segments of data from the CUDA warp + +#pragma once + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header + +#include +#include +#include +#include + +#include + +CUB_NAMESPACE_BEGIN + +//! @rst +//! ``cub::WarpStoreAlgorithm`` enumerates alternative algorithms for :cpp:struct:`cub::WarpStore` +//! to write a blocked arrangement of items across a CUDA warp to a linear segment of memory. +//! @endrst +enum WarpStoreAlgorithm +{ + //! @rst + //! Overview + //! ++++++++++++++++++++++++++ + //! + //! A :ref:`blocked arrangement ` of data is written directly + //! to memory. + //! + //! Performance Considerations + //! ++++++++++++++++++++++++++ + //! + //! The utilization of memory transactions (coalescing) decreases as the + //! access stride between threads increases (i.e., the number items per thread). + //! @endrst + WARP_STORE_DIRECT, + + //! @rst + //! Overview + //! ++++++++++++++++++++++++++ + //! + //! A :ref:`striped arrangement ` of data is written + //! directly to memory. + //! + //! Performance Considerations + //! ++++++++++++++++++++++++++ + //! + //! The utilization of memory transactions (coalescing) remains high regardless + //! of items written per thread. + //! @endrst + WARP_STORE_STRIPED, + + //! @rst + //! Overview + //! ++++++++++++++++++++++++++ + //! + //! A :ref:`blocked arrangement ` of data is written + //! directly to memory using CUDA's built-in vectorized stores as a coalescing + //! optimization. For example, ``st.global.v4.s32`` instructions will be + //! generated when ``T = int`` and ``ITEMS_PER_THREAD % 4 == 0``. + //! + //! Performance Considerations + //! ++++++++++++++++++++++++++ + //! + //! * The utilization of memory transactions (coalescing) remains high until + //! the the access stride between threads (i.e., the number items per thread) + //! exceeds the maximum vector store width (typically 4 items or 64B, + //! whichever is lower). + //! * The following conditions will prevent vectorization and writing will fall + //! back to ``cub::WARP_STORE_DIRECT``: + //! + //! * ``ITEMS_PER_THREAD`` is odd + //! * The ``OutputIteratorT`` is not a simple pointer type + //! * The block output offset is not quadword-aligned + //! * The data type ``T`` is not a built-in primitive or CUDA vector type + //! (e.g., ``short``, ``int2``, ``double``, ``float2``, etc.) + //! + //! @endrst + WARP_STORE_VECTORIZE, + + //! @rst + //! Overview + //! ++++++++++++++++++++++++++ + //! + //! A :ref:`blocked arrangement ` is locally + //! transposed and then efficiently written to memory as a + //! :ref:`striped arrangement `. + //! + //! Performance Considerations + //! ++++++++++++++++++++++++++ + //! + //! * The utilization of memory transactions (coalescing) remains high + //! regardless of items written per thread. + //! * The local reordering incurs slightly longer latencies and throughput than the + //! direct ``cub::WARP_STORE_DIRECT`` and ``cub::WARP_STORE_VECTORIZE`` alternatives. + //! + //! @endrst + WARP_STORE_TRANSPOSE +}; + +//! @rst +//! The WarpStore class provides :ref:`collective ` +//! data movement methods for writing a :ref:`blocked arrangement ` +//! of items partitioned across a CUDA warp to a linear segment of memory. +//! +//! Overview +//! ++++++++++++++++ +//! +//! * The WarpStore class provides a single data movement abstraction that can be +//! specialized to implement different cub::WarpStoreAlgorithm strategies. This +//! facilitates different performance policies for different architectures, +//! data types, granularity sizes, etc. +//! * WarpStore can be optionally specialized by different data movement strategies: +//! +//! #. :cpp:enumerator:`cub::WARP_STORE_DIRECT`: +//! a :ref:`blocked arrangement ` of data is written directly to +//! memory. +//! #. :cpp:enumerator:`cub::WARP_STORE_STRIPED`: +//! a :ref:`striped arrangement ` of data is written directly to +//! memory. +//! #. :cpp:enumerator:`cub::WARP_STORE_VECTORIZE`: +//! a :ref:`blocked arrangement ` of data is written directly to +//! memory using CUDA's built-in vectorized stores as a coalescing optimization. +//! #. :cpp:enumerator:`cub::WARP_STORE_TRANSPOSE`: +//! a :ref:`blocked arrangement ` is locally transposed into a +//! :ref:`striped arrangement ` which is then written to memory. +//! +//! * @rowmajor +//! +//! A Simple Example +//! ++++++++++++++++ +//! +//! The code snippet below illustrates the storing of a "blocked" arrangement +//! of 64 integers across 16 threads (where each thread owns 4 consecutive items) +//! into a linear segment of memory. The store is specialized for +//! ``WARP_STORE_TRANSPOSE``, meaning items are locally reordered among threads so +//! that memory references will be efficiently coalesced using a warp-striped +//! access pattern. +//! +//! .. code-block:: c++ +//! +//! #include // or equivalently +//! +//! __global__ void ExampleKernel(int *d_data, ...) +//! { +//! constexpr int warp_threads = 16; +//! constexpr int block_threads = 256; +//! constexpr int items_per_thread = 4; +//! +//! // Specialize WarpStore for a virtual warp of 16 threads owning 4 integer items each +//! using WarpStoreT = WarpStore; +//! +//! constexpr int warps_in_block = block_threads / warp_threads; +//! constexpr int tile_size = items_per_thread * warp_threads; +//! const int warp_id = static_cast(threadIdx.x) / warp_threads; +//! +//! // Allocate shared memory for WarpStore +//! __shared__ typename WarpStoreT::TempStorage temp_storage[warps_in_block]; +//! +//! // Obtain a segment of consecutive items that are blocked across threads +//! int thread_data[4]; +//! ... +//! +//! // Store items to linear memory +//! WarpStoreT(temp_storage[warp_id]).Store(d_data + warp_id * tile_size, thread_data); +//! +//! Suppose the set of ``thread_data`` across the warp threads is +//! ``{ [0,1,2,3], [4,5,6,7], ..., [60,61,62,63] }``. +//! The output ``d_data`` will be ``0, 1, 2, 3, 4, 5, ...``. +//! @endrst +//! +//! @tparam T +//! The type of data to be written. +//! +//! @tparam ITEMS_PER_THREAD +//! The number of consecutive items partitioned onto each thread. +//! +//! @tparam ALGORITHM +//! [optional] cub::WarpStoreAlgorithm tuning policy enumeration. +//! default: cub::WARP_STORE_DIRECT. +//! +//! @tparam LOGICAL_WARP_THREADS +//! [optional] The number of threads per "logical" warp (may be less +//! than the number of hardware warp threads). Default is the warp size of the +//! targeted CUDA compute-capability (e.g., 32 threads for SM86). Must be a +//! power of two. +//! +//! @tparam LEGACY_PTX_ARCH +//! Unused. +template +class WarpStore +{ + static_assert(PowerOfTwo::VALUE, "LOGICAL_WARP_THREADS must be a power of two"); + + static constexpr bool IS_ARCH_WARP = LOGICAL_WARP_THREADS == CUB_WARP_THREADS(0); + +private: + /// Store helper + template + struct StoreInternal; + + template + struct StoreInternal + { + using TempStorage = NullType; + + int linear_tid; + + _CCCL_DEVICE _CCCL_FORCEINLINE StoreInternal(TempStorage& /*temp_storage*/, int linear_tid) + : linear_tid(linear_tid) + {} + + template + _CCCL_DEVICE _CCCL_FORCEINLINE void Store(OutputIteratorT block_itr, T (&items)[ITEMS_PER_THREAD]) + { + StoreDirectBlocked(linear_tid, block_itr, items); + } + + template + _CCCL_DEVICE _CCCL_FORCEINLINE void Store(OutputIteratorT block_itr, T (&items)[ITEMS_PER_THREAD], int valid_items) + { + StoreDirectBlocked(linear_tid, block_itr, items, valid_items); + } + }; + + template + struct StoreInternal + { + using TempStorage = NullType; + + int linear_tid; + + _CCCL_DEVICE _CCCL_FORCEINLINE StoreInternal(TempStorage& /*temp_storage*/, int linear_tid) + : linear_tid(linear_tid) + {} + + template + _CCCL_DEVICE _CCCL_FORCEINLINE void Store(OutputIteratorT block_itr, T (&items)[ITEMS_PER_THREAD]) + { + StoreDirectStriped(linear_tid, block_itr, items); + } + + template + _CCCL_DEVICE _CCCL_FORCEINLINE void Store(OutputIteratorT block_itr, T (&items)[ITEMS_PER_THREAD], int valid_items) + { + StoreDirectStriped(linear_tid, block_itr, items, valid_items); + } + }; + + template + struct StoreInternal + { + using TempStorage = NullType; + + int linear_tid; + + _CCCL_DEVICE _CCCL_FORCEINLINE StoreInternal(TempStorage& /*temp_storage*/, int linear_tid) + : linear_tid(linear_tid) + {} + + _CCCL_DEVICE _CCCL_FORCEINLINE void Store(T* block_ptr, T (&items)[ITEMS_PER_THREAD]) + { + StoreDirectBlockedVectorized(linear_tid, block_ptr, items); + } + + template + _CCCL_DEVICE _CCCL_FORCEINLINE void Store(OutputIteratorT block_itr, T (&items)[ITEMS_PER_THREAD]) + { + StoreDirectBlocked(linear_tid, block_itr, items); + } + + template + _CCCL_DEVICE _CCCL_FORCEINLINE void Store(OutputIteratorT block_itr, T (&items)[ITEMS_PER_THREAD], int valid_items) + { + StoreDirectBlocked(linear_tid, block_itr, items, valid_items); + } + }; + + template + struct StoreInternal + { + using WarpExchangeT = WarpExchange; + + struct _TempStorage : WarpExchangeT::TempStorage + {}; + + struct TempStorage : Uninitialized<_TempStorage> + {}; + + _TempStorage& temp_storage; + + int linear_tid; + + _CCCL_DEVICE _CCCL_FORCEINLINE StoreInternal(TempStorage& temp_storage, int linear_tid) + : temp_storage(temp_storage.Alias()) + , linear_tid(linear_tid) + {} + + template + _CCCL_DEVICE _CCCL_FORCEINLINE void Store(OutputIteratorT block_itr, T (&items)[ITEMS_PER_THREAD]) + { + WarpExchangeT(temp_storage).BlockedToStriped(items, items); + StoreDirectStriped(linear_tid, block_itr, items); + } + + template + _CCCL_DEVICE _CCCL_FORCEINLINE void Store(OutputIteratorT block_itr, T (&items)[ITEMS_PER_THREAD], int valid_items) + { + WarpExchangeT(temp_storage).BlockedToStriped(items, items); + StoreDirectStriped(linear_tid, block_itr, items, valid_items); + } + }; + + /// Internal load implementation to use + using InternalStore = StoreInternal; + + /// Shared memory storage layout type + using _TempStorage = typename InternalStore::TempStorage; + + _CCCL_DEVICE _CCCL_FORCEINLINE _TempStorage& PrivateStorage() + { + __shared__ _TempStorage private_storage; + return private_storage; + } + + _TempStorage& temp_storage; + + int linear_tid; + +public: + struct TempStorage : Uninitialized<_TempStorage> + {}; + + //! @name Collective constructors + //! @{ + + //! @brief Collective constructor using a private static allocation of shared + //! memory as temporary storage. + _CCCL_DEVICE _CCCL_FORCEINLINE WarpStore() + : temp_storage(PrivateStorage()) + , linear_tid( + IS_ARCH_WARP ? ::cuda::ptx::get_sreg_laneid() : (::cuda::ptx::get_sreg_laneid() % LOGICAL_WARP_THREADS)) + {} + + //! @brief Collective constructor using the specified memory allocation as + //! temporary storage. + _CCCL_DEVICE _CCCL_FORCEINLINE WarpStore(TempStorage& temp_storage) + : temp_storage(temp_storage.Alias()) + , linear_tid( + IS_ARCH_WARP ? ::cuda::ptx::get_sreg_laneid() : (::cuda::ptx::get_sreg_laneid() % LOGICAL_WARP_THREADS)) + {} + + //! @} end member group + //! @name Data movement + //! @{ + + //! @rst + //! Store items into a linear segment of memory. + //! + //! @smemwarpreuse + //! + //! Snippet + //! +++++++ + //! + //! The code snippet below illustrates the storing of a "blocked" arrangement + //! of 64 integers across 16 threads (where each thread owns 4 consecutive items) + //! into a linear segment of memory. The store is specialized for + //! ``WARP_STORE_TRANSPOSE``, meaning items are locally reordered among threads so + //! that memory references will be efficiently coalesced using a warp-striped + //! access pattern. + //! + //! .. code-block:: c++ + //! + //! #include // or equivalently + //! + //! __global__ void ExampleKernel(int *d_data, ...) + //! { + //! constexpr int warp_threads = 16; + //! constexpr int block_threads = 256; + //! constexpr int items_per_thread = 4; + //! + //! // Specialize WarpStore for a virtual warp of 16 threads owning 4 integer items each + //! using WarpStoreT = WarpStore; + //! + //! constexpr int warps_in_block = block_threads / warp_threads; + //! constexpr int tile_size = items_per_thread * warp_threads; + //! const int warp_id = static_cast(threadIdx.x) / warp_threads; + //! + //! // Allocate shared memory for WarpStore + //! __shared__ typename WarpStoreT::TempStorage temp_storage[warps_in_block]; + //! + //! // Obtain a segment of consecutive items that are blocked across threads + //! int thread_data[4]; + //! ... + //! + //! // Store items to linear memory + //! WarpStoreT(temp_storage[warp_id]).Store(d_data + warp_id * tile_size, thread_data); + //! + //! Suppose the set of ``thread_data`` across the warp threads is + //! ``{ [0,1,2,3], [4,5,6,7], ..., [60,61,62,63] }``. + //! The output ``d_data`` will be ``0, 1, 2, 3, 4, 5, ...``. + //! @endrst + //! + //! @param[out] block_itr The thread block's base output iterator for storing to + //! @param[in] items Data to store + template + _CCCL_DEVICE _CCCL_FORCEINLINE void Store(OutputIteratorT block_itr, T (&items)[ITEMS_PER_THREAD]) + { + InternalStore(temp_storage, linear_tid).Store(block_itr, items); + } + + //! @rst + //! Store items into a linear segment of memory, guarded by range. + //! + //! @smemwarpreuse + //! + //! Snippet + //! +++++++ + //! + //! The code snippet below illustrates the storing of a "blocked" arrangement + //! of 64 integers across 16 threads (where each thread owns 4 consecutive items) + //! into a linear segment of memory. The store is specialized for + //! ``WARP_STORE_TRANSPOSE``, meaning items are locally reordered among threads so + //! that memory references will be efficiently coalesced using a warp-striped + //! access pattern. + //! + //! .. code-block:: c++ + //! + //! #include // or equivalently + //! + //! __global__ void ExampleKernel(int *d_data, int valid_items ...) + //! { + //! constexpr int warp_threads = 16; + //! constexpr int block_threads = 256; + //! constexpr int items_per_thread = 4; + //! + //! // Specialize WarpStore for a virtual warp of 16 threads owning 4 integer items each + //! using WarpStoreT = WarpStore; + //! + //! constexpr int warps_in_block = block_threads / warp_threads; + //! constexpr int tile_size = items_per_thread * warp_threads; + //! const int warp_id = static_cast(threadIdx.x) / warp_threads; + //! + //! // Allocate shared memory for WarpStore + //! __shared__ typename WarpStoreT::TempStorage temp_storage[warps_in_block]; + //! + //! // Obtain a segment of consecutive items that are blocked across threads + //! int thread_data[4]; + //! ... + //! + //! // Store items to linear memory + //! WarpStoreT(temp_storage[warp_id]).Store( + //! d_data + warp_id * tile_size, thread_data, valid_items); + //! + //! Suppose the set of ``thread_data`` across the warp threads is + //! ``{ [0,1,2,3], [4,5,6,7], ..., [60,61,62,63] }`` and ``valid_items`` + //! is ``5``. The output ``d_data`` will be ``0, 1, 2, 3, 4, ?, ?, ...``, + //! with only the first two threads being unmasked to store portions of valid + //! data. + //! @endrst + //! + //! @param[out] block_itr The thread block's base output iterator for storing to + //! @param[in] items Data to store + //! @param[in] valid_items Number of valid items to write + //! + template + _CCCL_DEVICE _CCCL_FORCEINLINE void Store(OutputIteratorT block_itr, T (&items)[ITEMS_PER_THREAD], int valid_items) + { + InternalStore(temp_storage, linear_tid).Store(block_itr, items, valid_items); + } + + //! @} end member group +}; + +CUB_NAMESPACE_END diff --git a/vllm/lib/python3.10/site-packages/cupy/lib/_polynomial.cpython-310-x86_64-linux-gnu.so b/vllm/lib/python3.10/site-packages/cupy/lib/_polynomial.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..2de8a8f38ecf41d35dfe526a58cc4cd736f9b114 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/lib/_polynomial.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f4f86740176f582eab90a9c67d331933836b3baf38a0df8618f211bf0c44a4d4 +size 697720