content
stringlengths
1
103k
path
stringlengths
8
216
filename
stringlengths
2
179
language
stringclasses
15 values
size_bytes
int64
2
189k
quality_score
float64
0.5
0.95
complexity
float64
0
1
documentation_ratio
float64
0
1
repository
stringclasses
5 values
stars
int64
0
1k
created_date
stringdate
2023-07-10 19:21:08
2025-07-09 19:11:45
license
stringclasses
4 values
is_test
bool
2 classes
file_hash
stringlengths
32
32
from __future__ import annotations\n\nfrom functools import partial\nfrom typing import TYPE_CHECKING, Any, Protocol, overload\n\nfrom narwhals._compliant.typing import (\n CompliantExprT,\n CompliantFrameT,\n CompliantLazyFrameT,\n DepthTrackingExprT,\n EagerDataFrameT,\n EagerExprT,\n EagerSeriesT,\n LazyExprT,\n NativeFrameT,\n NativeFrameT_co,\n NativeSeriesT,\n)\nfrom narwhals._utils import (\n exclude_column_names,\n get_column_names,\n passthrough_column_names,\n)\nfrom narwhals.dependencies import is_numpy_array_2d\n\nif TYPE_CHECKING:\n from collections.abc import Container, Iterable, Mapping, Sequence\n\n from typing_extensions import TypeAlias\n\n from narwhals._compliant.selectors import CompliantSelectorNamespace\n from narwhals._compliant.when_then import CompliantWhen, EagerWhen\n from narwhals._utils import Implementation, Version\n from narwhals.dtypes import DType\n from narwhals.schema import Schema\n from narwhals.typing import (\n ConcatMethod,\n Into1DArray,\n IntoDType,\n NonNestedLiteral,\n _2DArray,\n )\n\n Incomplete: TypeAlias = Any\n\n__all__ = ["CompliantNamespace", "EagerNamespace"]\n\n\nclass CompliantNamespace(Protocol[CompliantFrameT, CompliantExprT]):\n _implementation: Implementation\n _backend_version: tuple[int, ...]\n _version: Version\n\n def all(self) -> CompliantExprT:\n return self._expr.from_column_names(get_column_names, context=self)\n\n def col(self, *column_names: str) -> CompliantExprT:\n return self._expr.from_column_names(\n passthrough_column_names(column_names), context=self\n )\n\n def exclude(self, excluded_names: Container[str]) -> CompliantExprT:\n return self._expr.from_column_names(\n partial(exclude_column_names, names=excluded_names), context=self\n )\n\n def nth(self, *column_indices: int) -> CompliantExprT:\n return self._expr.from_column_indices(*column_indices, context=self)\n\n def len(self) -> CompliantExprT: ...\n def lit(self, value: NonNestedLiteral, dtype: IntoDType | None) -> CompliantExprT: ...\n def all_horizontal(\n self, *exprs: CompliantExprT, ignore_nulls: bool\n ) -> CompliantExprT: ...\n def any_horizontal(\n self, *exprs: CompliantExprT, ignore_nulls: bool\n ) -> CompliantExprT: ...\n def sum_horizontal(self, *exprs: CompliantExprT) -> CompliantExprT: ...\n def mean_horizontal(self, *exprs: CompliantExprT) -> CompliantExprT: ...\n def min_horizontal(self, *exprs: CompliantExprT) -> CompliantExprT: ...\n def max_horizontal(self, *exprs: CompliantExprT) -> CompliantExprT: ...\n def concat(\n self, items: Iterable[CompliantFrameT], *, how: ConcatMethod\n ) -> CompliantFrameT: ...\n def when(\n self, predicate: CompliantExprT\n ) -> CompliantWhen[CompliantFrameT, Incomplete, CompliantExprT]: ...\n def concat_str(\n self, *exprs: CompliantExprT, separator: str, ignore_nulls: bool\n ) -> CompliantExprT: ...\n @property\n def selectors(self) -> CompliantSelectorNamespace[Any, Any]: ...\n @property\n def _expr(self) -> type[CompliantExprT]: ...\n\n\nclass DepthTrackingNamespace(\n CompliantNamespace[CompliantFrameT, DepthTrackingExprT],\n Protocol[CompliantFrameT, DepthTrackingExprT],\n):\n def all(self) -> DepthTrackingExprT:\n return self._expr.from_column_names(\n get_column_names, function_name="all", context=self\n )\n\n def col(self, *column_names: str) -> DepthTrackingExprT:\n return self._expr.from_column_names(\n passthrough_column_names(column_names), function_name="col", context=self\n )\n\n def exclude(self, excluded_names: Container[str]) -> DepthTrackingExprT:\n return self._expr.from_column_names(\n partial(exclude_column_names, names=excluded_names),\n function_name="exclude",\n context=self,\n )\n\n\nclass LazyNamespace(\n CompliantNamespace[CompliantLazyFrameT, LazyExprT],\n Protocol[CompliantLazyFrameT, LazyExprT, NativeFrameT_co],\n):\n @property\n def _lazyframe(self) -> type[CompliantLazyFrameT]: ...\n\n def from_native(self, data: NativeFrameT_co | Any, /) -> CompliantLazyFrameT:\n if self._lazyframe._is_native(data):\n return self._lazyframe.from_native(data, context=self)\n else: # pragma: no cover\n msg = f"Unsupported type: {type(data).__name__!r}"\n raise TypeError(msg)\n\n\nclass EagerNamespace(\n DepthTrackingNamespace[EagerDataFrameT, EagerExprT],\n Protocol[EagerDataFrameT, EagerSeriesT, EagerExprT, NativeFrameT, NativeSeriesT],\n):\n @property\n def _dataframe(self) -> type[EagerDataFrameT]: ...\n @property\n def _series(self) -> type[EagerSeriesT]: ...\n def when(\n self, predicate: EagerExprT\n ) -> EagerWhen[EagerDataFrameT, EagerSeriesT, EagerExprT, NativeSeriesT]: ...\n\n @overload\n def from_native(self, data: NativeFrameT, /) -> EagerDataFrameT: ...\n @overload\n def from_native(self, data: NativeSeriesT, /) -> EagerSeriesT: ...\n def from_native(\n self, data: NativeFrameT | NativeSeriesT | Any, /\n ) -> EagerDataFrameT | EagerSeriesT:\n if self._dataframe._is_native(data):\n return self._dataframe.from_native(data, context=self)\n elif self._series._is_native(data):\n return self._series.from_native(data, context=self)\n msg = f"Unsupported type: {type(data).__name__!r}"\n raise TypeError(msg)\n\n @overload\n def from_numpy(self, data: Into1DArray, /, schema: None = ...) -> EagerSeriesT: ...\n\n @overload\n def from_numpy(\n self,\n data: _2DArray,\n /,\n schema: Mapping[str, DType] | Schema | Sequence[str] | None,\n ) -> EagerDataFrameT: ...\n\n def from_numpy(\n self,\n data: Into1DArray | _2DArray,\n /,\n schema: Mapping[str, DType] | Schema | Sequence[str] | None = None,\n ) -> EagerDataFrameT | EagerSeriesT:\n if is_numpy_array_2d(data):\n return self._dataframe.from_numpy(data, schema=schema, context=self)\n return self._series.from_numpy(data, context=self)\n\n def _concat_diagonal(self, dfs: Sequence[NativeFrameT], /) -> NativeFrameT: ...\n def _concat_horizontal(\n self, dfs: Sequence[NativeFrameT | Any], /\n ) -> NativeFrameT: ...\n def _concat_vertical(self, dfs: Sequence[NativeFrameT], /) -> NativeFrameT: ...\n def concat(\n self, items: Iterable[EagerDataFrameT], *, how: ConcatMethod\n ) -> EagerDataFrameT:\n dfs = [item.native for item in items]\n if how == "horizontal":\n native = self._concat_horizontal(dfs)\n elif how == "vertical":\n native = self._concat_vertical(dfs)\n elif how == "diagonal":\n native = self._concat_diagonal(dfs)\n else: # pragma: no cover\n raise NotImplementedError\n return self._dataframe.from_native(native, context=self)\n
.venv\Lib\site-packages\narwhals\_compliant\namespace.py
namespace.py
Python
6,986
0.95
0.227273
0
vue-tools
196
2024-10-30T20:06:02.690350
MIT
false
5e1d1f6992e91b1c54aac7199aa0ee21
"""Almost entirely complete, generic `selectors` implementation."""\n\nfrom __future__ import annotations\n\nimport re\nfrom functools import partial\nfrom typing import TYPE_CHECKING, Protocol, TypeVar, overload\n\nfrom narwhals._compliant.expr import CompliantExpr\nfrom narwhals._typing_compat import Protocol38\nfrom narwhals._utils import (\n _parse_time_unit_and_time_zone,\n dtype_matches_time_unit_and_time_zone,\n get_column_names,\n is_compliant_dataframe,\n)\n\nif TYPE_CHECKING:\n from collections.abc import Collection, Iterable, Iterator, Sequence\n from datetime import timezone\n\n from typing_extensions import Self, TypeAlias, TypeIs\n\n from narwhals._compliant.expr import NativeExpr\n from narwhals._compliant.typing import (\n CompliantDataFrameAny,\n CompliantExprAny,\n CompliantFrameAny,\n CompliantLazyFrameAny,\n CompliantSeriesAny,\n CompliantSeriesOrNativeExprAny,\n EvalNames,\n EvalSeries,\n ScalarKwargs,\n )\n from narwhals._utils import Implementation, Version, _FullContext\n from narwhals.dtypes import DType\n from narwhals.typing import TimeUnit\n\n__all__ = [\n "CompliantSelector",\n "CompliantSelectorNamespace",\n "EagerSelectorNamespace",\n "LazySelectorNamespace",\n]\n\n\nSeriesOrExprT = TypeVar("SeriesOrExprT", bound="CompliantSeriesOrNativeExprAny")\nSeriesT = TypeVar("SeriesT", bound="CompliantSeriesAny")\nExprT = TypeVar("ExprT", bound="NativeExpr")\nFrameT = TypeVar("FrameT", bound="CompliantFrameAny")\nDataFrameT = TypeVar("DataFrameT", bound="CompliantDataFrameAny")\nLazyFrameT = TypeVar("LazyFrameT", bound="CompliantLazyFrameAny")\nSelectorOrExpr: TypeAlias = (\n "CompliantSelector[FrameT, SeriesOrExprT] | CompliantExpr[FrameT, SeriesOrExprT]"\n)\n\n\nclass CompliantSelectorNamespace(Protocol[FrameT, SeriesOrExprT]):\n _implementation: Implementation\n _backend_version: tuple[int, ...]\n _version: Version\n\n @classmethod\n def from_namespace(cls, context: _FullContext, /) -> Self:\n obj = cls.__new__(cls)\n obj._implementation = context._implementation\n obj._backend_version = context._backend_version\n obj._version = context._version\n return obj\n\n @property\n def _selector(self) -> type[CompliantSelector[FrameT, SeriesOrExprT]]: ...\n\n def _iter_columns(self, df: FrameT, /) -> Iterator[SeriesOrExprT]: ...\n\n def _iter_schema(self, df: FrameT, /) -> Iterator[tuple[str, DType]]: ...\n\n def _iter_columns_dtypes(\n self, df: FrameT, /\n ) -> Iterator[tuple[SeriesOrExprT, DType]]: ...\n\n def _iter_columns_names(self, df: FrameT, /) -> Iterator[tuple[SeriesOrExprT, str]]:\n yield from zip(self._iter_columns(df), df.columns)\n\n def _is_dtype(\n self: CompliantSelectorNamespace[FrameT, SeriesOrExprT], dtype: type[DType], /\n ) -> CompliantSelector[FrameT, SeriesOrExprT]:\n def series(df: FrameT) -> Sequence[SeriesOrExprT]:\n return [\n ser for ser, tp in self._iter_columns_dtypes(df) if isinstance(tp, dtype)\n ]\n\n def names(df: FrameT) -> Sequence[str]:\n return [name for name, tp in self._iter_schema(df) if isinstance(tp, dtype)]\n\n return self._selector.from_callables(series, names, context=self)\n\n def by_dtype(\n self, dtypes: Collection[DType | type[DType]]\n ) -> CompliantSelector[FrameT, SeriesOrExprT]:\n def series(df: FrameT) -> Sequence[SeriesOrExprT]:\n return [ser for ser, tp in self._iter_columns_dtypes(df) if tp in dtypes]\n\n def names(df: FrameT) -> Sequence[str]:\n return [name for name, tp in self._iter_schema(df) if tp in dtypes]\n\n return self._selector.from_callables(series, names, context=self)\n\n def matches(self, pattern: str) -> CompliantSelector[FrameT, SeriesOrExprT]:\n p = re.compile(pattern)\n\n def series(df: FrameT) -> Sequence[SeriesOrExprT]:\n if (\n is_compliant_dataframe(df)\n and not self._implementation.is_duckdb()\n and not self._implementation.is_ibis()\n ):\n return [df.get_column(col) for col in df.columns if p.search(col)]\n\n return [ser for ser, name in self._iter_columns_names(df) if p.search(name)]\n\n def names(df: FrameT) -> Sequence[str]:\n return [col for col in df.columns if p.search(col)]\n\n return self._selector.from_callables(series, names, context=self)\n\n def numeric(self) -> CompliantSelector[FrameT, SeriesOrExprT]:\n def series(df: FrameT) -> Sequence[SeriesOrExprT]:\n return [ser for ser, tp in self._iter_columns_dtypes(df) if tp.is_numeric()]\n\n def names(df: FrameT) -> Sequence[str]:\n return [name for name, tp in self._iter_schema(df) if tp.is_numeric()]\n\n return self._selector.from_callables(series, names, context=self)\n\n def categorical(self) -> CompliantSelector[FrameT, SeriesOrExprT]:\n return self._is_dtype(self._version.dtypes.Categorical)\n\n def string(self) -> CompliantSelector[FrameT, SeriesOrExprT]:\n return self._is_dtype(self._version.dtypes.String)\n\n def boolean(self) -> CompliantSelector[FrameT, SeriesOrExprT]:\n return self._is_dtype(self._version.dtypes.Boolean)\n\n def all(self) -> CompliantSelector[FrameT, SeriesOrExprT]:\n def series(df: FrameT) -> Sequence[SeriesOrExprT]:\n return list(self._iter_columns(df))\n\n return self._selector.from_callables(series, get_column_names, context=self)\n\n def datetime(\n self,\n time_unit: TimeUnit | Iterable[TimeUnit] | None,\n time_zone: str | timezone | Iterable[str | timezone | None] | None,\n ) -> CompliantSelector[FrameT, SeriesOrExprT]:\n time_units, time_zones = _parse_time_unit_and_time_zone(time_unit, time_zone)\n matches = partial(\n dtype_matches_time_unit_and_time_zone,\n dtypes=self._version.dtypes,\n time_units=time_units,\n time_zones=time_zones,\n )\n\n def series(df: FrameT) -> Sequence[SeriesOrExprT]:\n return [ser for ser, tp in self._iter_columns_dtypes(df) if matches(tp)]\n\n def names(df: FrameT) -> Sequence[str]:\n return [name for name, tp in self._iter_schema(df) if matches(tp)]\n\n return self._selector.from_callables(series, names, context=self)\n\n\nclass EagerSelectorNamespace(\n CompliantSelectorNamespace[DataFrameT, SeriesT], Protocol[DataFrameT, SeriesT]\n):\n def _iter_schema(self, df: DataFrameT, /) -> Iterator[tuple[str, DType]]:\n for ser in self._iter_columns(df):\n yield ser.name, ser.dtype\n\n def _iter_columns(self, df: DataFrameT, /) -> Iterator[SeriesT]:\n yield from df.iter_columns()\n\n def _iter_columns_dtypes(self, df: DataFrameT, /) -> Iterator[tuple[SeriesT, DType]]:\n for ser in self._iter_columns(df):\n yield ser, ser.dtype\n\n\nclass LazySelectorNamespace(\n CompliantSelectorNamespace[LazyFrameT, ExprT], Protocol[LazyFrameT, ExprT]\n):\n def _iter_schema(self, df: LazyFrameT) -> Iterator[tuple[str, DType]]:\n yield from df.schema.items()\n\n def _iter_columns(self, df: LazyFrameT) -> Iterator[ExprT]:\n yield from df._iter_columns()\n\n def _iter_columns_dtypes(self, df: LazyFrameT, /) -> Iterator[tuple[ExprT, DType]]:\n yield from zip(self._iter_columns(df), df.schema.values())\n\n\nclass CompliantSelector(\n CompliantExpr[FrameT, SeriesOrExprT], Protocol38[FrameT, SeriesOrExprT]\n):\n _call: EvalSeries[FrameT, SeriesOrExprT]\n _window_function: None\n _function_name: str\n _depth: int\n _implementation: Implementation\n _backend_version: tuple[int, ...]\n _version: Version\n _scalar_kwargs: ScalarKwargs\n\n @classmethod\n def from_callables(\n cls,\n call: EvalSeries[FrameT, SeriesOrExprT],\n evaluate_output_names: EvalNames[FrameT],\n *,\n context: _FullContext,\n ) -> Self:\n obj = cls.__new__(cls)\n obj._call = call\n obj._window_function = None\n obj._depth = 0\n obj._function_name = "selector"\n obj._evaluate_output_names = evaluate_output_names\n obj._alias_output_names = None\n obj._implementation = context._implementation\n obj._backend_version = context._backend_version\n obj._version = context._version\n obj._scalar_kwargs = {}\n return obj\n\n @property\n def selectors(self) -> CompliantSelectorNamespace[FrameT, SeriesOrExprT]:\n return self.__narwhals_namespace__().selectors\n\n def _to_expr(self) -> CompliantExpr[FrameT, SeriesOrExprT]: ...\n\n def _is_selector(\n self, other: Self | CompliantExpr[FrameT, SeriesOrExprT]\n ) -> TypeIs[CompliantSelector[FrameT, SeriesOrExprT]]:\n return isinstance(other, type(self))\n\n @overload\n def __sub__(self, other: Self) -> Self: ...\n @overload\n def __sub__(\n self, other: CompliantExpr[FrameT, SeriesOrExprT]\n ) -> CompliantExpr[FrameT, SeriesOrExprT]: ...\n def __sub__(\n self, other: SelectorOrExpr[FrameT, SeriesOrExprT]\n ) -> SelectorOrExpr[FrameT, SeriesOrExprT]:\n if self._is_selector(other):\n\n def series(df: FrameT) -> Sequence[SeriesOrExprT]:\n lhs_names, rhs_names = _eval_lhs_rhs(df, self, other)\n return [\n x for x, name in zip(self(df), lhs_names) if name not in rhs_names\n ]\n\n def names(df: FrameT) -> Sequence[str]:\n lhs_names, rhs_names = _eval_lhs_rhs(df, self, other)\n return [x for x in lhs_names if x not in rhs_names]\n\n return self.selectors._selector.from_callables(series, names, context=self)\n return self._to_expr() - other\n\n @overload\n def __or__(self, other: Self) -> Self: ...\n @overload\n def __or__(\n self, other: CompliantExpr[FrameT, SeriesOrExprT]\n ) -> CompliantExpr[FrameT, SeriesOrExprT]: ...\n def __or__(\n self, other: SelectorOrExpr[FrameT, SeriesOrExprT]\n ) -> SelectorOrExpr[FrameT, SeriesOrExprT]:\n if self._is_selector(other):\n\n def series(df: FrameT) -> Sequence[SeriesOrExprT]:\n lhs_names, rhs_names = _eval_lhs_rhs(df, self, other)\n return [\n *(x for x, name in zip(self(df), lhs_names) if name not in rhs_names),\n *other(df),\n ]\n\n def names(df: FrameT) -> Sequence[str]:\n lhs_names, rhs_names = _eval_lhs_rhs(df, self, other)\n return [*(x for x in lhs_names if x not in rhs_names), *rhs_names]\n\n return self.selectors._selector.from_callables(series, names, context=self)\n return self._to_expr() | other\n\n @overload\n def __and__(self, other: Self) -> Self: ...\n @overload\n def __and__(\n self, other: CompliantExpr[FrameT, SeriesOrExprT]\n ) -> CompliantExpr[FrameT, SeriesOrExprT]: ...\n def __and__(\n self, other: SelectorOrExpr[FrameT, SeriesOrExprT]\n ) -> SelectorOrExpr[FrameT, SeriesOrExprT]:\n if self._is_selector(other):\n\n def series(df: FrameT) -> Sequence[SeriesOrExprT]:\n lhs_names, rhs_names = _eval_lhs_rhs(df, self, other)\n return [x for x, name in zip(self(df), lhs_names) if name in rhs_names]\n\n def names(df: FrameT) -> Sequence[str]:\n lhs_names, rhs_names = _eval_lhs_rhs(df, self, other)\n return [x for x in lhs_names if x in rhs_names]\n\n return self.selectors._selector.from_callables(series, names, context=self)\n return self._to_expr() & other\n\n def __invert__(self) -> CompliantSelector[FrameT, SeriesOrExprT]:\n return self.selectors.all() - self\n\n\ndef _eval_lhs_rhs(\n df: CompliantFrameAny, lhs: CompliantExprAny, rhs: CompliantExprAny\n) -> tuple[Sequence[str], Sequence[str]]:\n return lhs._evaluate_output_names(df), rhs._evaluate_output_names(df)\n
.venv\Lib\site-packages\narwhals\_compliant\selectors.py
selectors.py
Python
12,018
0.85
0.302469
0.011765
awesome-app
686
2023-08-13T20:27:35.228782
GPL-3.0
false
4a2406aaacbaa09c6f0fd771b804c587
from __future__ import annotations\n\nfrom typing import TYPE_CHECKING, Any, Generic, Protocol\n\nfrom narwhals._compliant.any_namespace import (\n CatNamespace,\n DateTimeNamespace,\n ListNamespace,\n StringNamespace,\n StructNamespace,\n)\nfrom narwhals._compliant.typing import (\n CompliantSeriesT_co,\n EagerSeriesT_co,\n NativeSeriesT,\n NativeSeriesT_co,\n)\nfrom narwhals._translate import FromIterable, FromNative, NumpyConvertible, ToNarwhals\nfrom narwhals._typing_compat import assert_never\nfrom narwhals._utils import (\n _StoresCompliant,\n _StoresNative,\n is_compliant_series,\n is_sized_multi_index_selector,\n unstable,\n)\n\nif TYPE_CHECKING:\n from collections.abc import Iterable, Iterator, Mapping, Sequence\n from types import ModuleType\n\n import pandas as pd\n import polars as pl\n import pyarrow as pa\n from typing_extensions import Self\n\n from narwhals._compliant.dataframe import CompliantDataFrame\n from narwhals._compliant.expr import CompliantExpr, EagerExpr\n from narwhals._compliant.namespace import CompliantNamespace, EagerNamespace\n from narwhals._utils import Implementation, Version, _FullContext\n from narwhals.dtypes import DType\n from narwhals.series import Series\n from narwhals.typing import (\n ClosedInterval,\n FillNullStrategy,\n Into1DArray,\n IntoDType,\n MultiIndexSelector,\n NonNestedLiteral,\n NumericLiteral,\n RankMethod,\n RollingInterpolationMethod,\n SizedMultiIndexSelector,\n TemporalLiteral,\n _1DArray,\n _SliceIndex,\n )\n\n__all__ = ["CompliantSeries", "EagerSeries"]\n\n\nclass CompliantSeries(\n NumpyConvertible["_1DArray", "Into1DArray"],\n FromIterable,\n FromNative[NativeSeriesT],\n ToNarwhals["Series[NativeSeriesT]"],\n Protocol[NativeSeriesT],\n):\n _implementation: Implementation\n _backend_version: tuple[int, ...]\n _version: Version\n\n @property\n def dtype(self) -> DType: ...\n @property\n def name(self) -> str: ...\n @property\n def native(self) -> NativeSeriesT: ...\n def __narwhals_series__(self) -> Self:\n return self\n\n def __narwhals_namespace__(self) -> CompliantNamespace[Any, Any]: ...\n def __native_namespace__(self) -> ModuleType: ...\n def __array__(self, dtype: Any, *, copy: bool | None) -> _1DArray: ...\n def __contains__(self, other: Any) -> bool: ...\n def __getitem__(self, item: MultiIndexSelector[Self]) -> Any: ...\n def __iter__(self) -> Iterator[Any]: ...\n def __len__(self) -> int:\n return len(self.native)\n\n def _with_native(self, series: Any) -> Self: ...\n def _with_version(self, version: Version) -> Self: ...\n def _to_expr(self) -> CompliantExpr[Any, Self]: ...\n @classmethod\n def from_native(cls, data: NativeSeriesT, /, *, context: _FullContext) -> Self: ...\n @classmethod\n def from_numpy(cls, data: Into1DArray, /, *, context: _FullContext) -> Self: ...\n @classmethod\n def from_iterable(\n cls,\n data: Iterable[Any],\n /,\n *,\n context: _FullContext,\n name: str = "",\n dtype: IntoDType | None = None,\n ) -> Self: ...\n def to_narwhals(self) -> Series[NativeSeriesT]:\n return self._version.series(self, level="full")\n\n # Operators\n def __add__(self, other: Any) -> Self: ...\n def __and__(self, other: Any) -> Self: ...\n def __eq__(self, other: object) -> Self: ... # type: ignore[override]\n def __floordiv__(self, other: Any) -> Self: ...\n def __ge__(self, other: Any) -> Self: ...\n def __gt__(self, other: Any) -> Self: ...\n def __invert__(self) -> Self: ...\n def __le__(self, other: Any) -> Self: ...\n def __lt__(self, other: Any) -> Self: ...\n def __mod__(self, other: Any) -> Self: ...\n def __mul__(self, other: Any) -> Self: ...\n def __ne__(self, other: object) -> Self: ... # type: ignore[override]\n def __or__(self, other: Any) -> Self: ...\n def __pow__(self, other: Any) -> Self: ...\n def __radd__(self, other: Any) -> Self: ...\n def __rand__(self, other: Any) -> Self: ...\n def __rfloordiv__(self, other: Any) -> Self: ...\n def __rmod__(self, other: Any) -> Self: ...\n def __rmul__(self, other: Any) -> Self: ...\n def __ror__(self, other: Any) -> Self: ...\n def __rpow__(self, other: Any) -> Self: ...\n def __rsub__(self, other: Any) -> Self: ...\n def __rtruediv__(self, other: Any) -> Self: ...\n def __sub__(self, other: Any) -> Self: ...\n def __truediv__(self, other: Any) -> Self: ...\n\n def abs(self) -> Self: ...\n def alias(self, name: str) -> Self: ...\n def all(self) -> bool: ...\n def any(self) -> bool: ...\n def arg_max(self) -> int: ...\n def arg_min(self) -> int: ...\n def arg_true(self) -> Self: ...\n def cast(self, dtype: IntoDType) -> Self: ...\n def clip(\n self,\n lower_bound: Self | NumericLiteral | TemporalLiteral | None,\n upper_bound: Self | NumericLiteral | TemporalLiteral | None,\n ) -> Self: ...\n def count(self) -> int: ...\n def cum_count(self, *, reverse: bool) -> Self: ...\n def cum_max(self, *, reverse: bool) -> Self: ...\n def cum_min(self, *, reverse: bool) -> Self: ...\n def cum_prod(self, *, reverse: bool) -> Self: ...\n def cum_sum(self, *, reverse: bool) -> Self: ...\n def diff(self) -> Self: ...\n def drop_nulls(self) -> Self: ...\n def ewm_mean(\n self,\n *,\n com: float | None,\n span: float | None,\n half_life: float | None,\n alpha: float | None,\n adjust: bool,\n min_samples: int,\n ignore_nulls: bool,\n ) -> Self: ...\n def exp(self) -> Self: ...\n def sqrt(self) -> Self: ...\n def fill_null(\n self,\n value: Self | NonNestedLiteral,\n strategy: FillNullStrategy | None,\n limit: int | None,\n ) -> Self: ...\n def filter(self, predicate: Any) -> Self: ...\n def gather_every(self, n: int, offset: int) -> Self: ...\n @unstable\n def hist(\n self,\n bins: list[float | int] | None,\n *,\n bin_count: int | None,\n include_breakpoint: bool,\n ) -> CompliantDataFrame[Self, Any, Any, Any]: ...\n def head(self, n: int) -> Self: ...\n def is_between(\n self, lower_bound: Any, upper_bound: Any, closed: ClosedInterval\n ) -> Self: ...\n def is_finite(self) -> Self: ...\n def is_first_distinct(self) -> Self: ...\n def is_in(self, other: Any) -> Self: ...\n def is_last_distinct(self) -> Self: ...\n def is_nan(self) -> Self: ...\n def is_null(self) -> Self: ...\n def is_sorted(self, *, descending: bool) -> bool: ...\n def is_unique(self) -> Self: ...\n def item(self, index: int | None) -> Any: ...\n def kurtosis(self) -> float | None: ...\n def len(self) -> int: ...\n def log(self, base: float) -> Self: ...\n def max(self) -> Any: ...\n def mean(self) -> float: ...\n def median(self) -> float: ...\n def min(self) -> Any: ...\n def mode(self) -> Self: ...\n def n_unique(self) -> int: ...\n def null_count(self) -> int: ...\n def quantile(\n self, quantile: float, interpolation: RollingInterpolationMethod\n ) -> float: ...\n def rank(self, method: RankMethod, *, descending: bool) -> Self: ...\n def replace_strict(\n self,\n old: Sequence[Any] | Mapping[Any, Any],\n new: Sequence[Any],\n *,\n return_dtype: IntoDType | None,\n ) -> Self: ...\n def rolling_mean(\n self, window_size: int, *, min_samples: int, center: bool\n ) -> Self: ...\n def rolling_std(\n self, window_size: int, *, min_samples: int, center: bool, ddof: int\n ) -> Self: ...\n def rolling_sum(\n self, window_size: int, *, min_samples: int, center: bool\n ) -> Self: ...\n def rolling_var(\n self, window_size: int, *, min_samples: int, center: bool, ddof: int\n ) -> Self: ...\n def round(self, decimals: int) -> Self: ...\n def sample(\n self,\n n: int | None,\n *,\n fraction: float | None,\n with_replacement: bool,\n seed: int | None,\n ) -> Self: ...\n def scatter(self, indices: int | Sequence[int], values: Any) -> Self: ...\n def shift(self, n: int) -> Self: ...\n def skew(self) -> float | None: ...\n def sort(self, *, descending: bool, nulls_last: bool) -> Self: ...\n def std(self, *, ddof: int) -> float: ...\n def sum(self) -> float: ...\n def tail(self, n: int) -> Self: ...\n def to_arrow(self) -> pa.Array[Any]: ...\n def to_dummies(\n self, *, separator: str, drop_first: bool\n ) -> CompliantDataFrame[Self, Any, Any, Any]: ...\n def to_frame(self) -> CompliantDataFrame[Self, Any, Any, Any]: ...\n def to_list(self) -> list[Any]: ...\n def to_pandas(self) -> pd.Series[Any]: ...\n def to_polars(self) -> pl.Series: ...\n def unique(self, *, maintain_order: bool) -> Self: ...\n def value_counts(\n self, *, sort: bool, parallel: bool, name: str | None, normalize: bool\n ) -> CompliantDataFrame[Self, Any, Any, Any]: ...\n def var(self, *, ddof: int) -> float: ...\n def zip_with(self, mask: Any, other: Any) -> Self: ...\n\n @property\n def str(self) -> Any: ...\n @property\n def dt(self) -> Any: ...\n @property\n def cat(self) -> Any: ...\n @property\n def list(self) -> Any: ...\n @property\n def struct(self) -> Any: ...\n\n\nclass EagerSeries(CompliantSeries[NativeSeriesT], Protocol[NativeSeriesT]):\n _native_series: Any\n _implementation: Implementation\n _backend_version: tuple[int, ...]\n _version: Version\n _broadcast: bool\n\n @classmethod\n def _align_full_broadcast(cls, *series: Self) -> Sequence[Self]:\n """Ensure all of `series` have the same length (and index if `pandas`).\n\n Scalars get broadcasted to the full length of the longest Series.\n\n This is useful when you need to construct a full Series anyway, such as:\n\n DataFrame.select(...)\n\n It should not be used in binary operations, such as:\n\n nw.col("a") - nw.col("a").mean()\n\n because then it's more efficient to extract the right-hand-side's single element as a scalar.\n """\n ...\n\n def _from_scalar(self, value: Any) -> Self:\n return self.from_iterable([value], name=self.name, context=self)\n\n def _with_native(\n self, series: NativeSeriesT, *, preserve_broadcast: bool = False\n ) -> Self:\n """Return a new `CompliantSeries`, wrapping the native `series`.\n\n In cases when operations are known to not affect whether a result should\n be broadcast, we can pass `preserve_broadcast=True`.\n Set this with care - it should only be set for unary expressions which don't\n change length or order, such as `.alias` or `.fill_null`. If in doubt, don't\n set it, you probably don't need it.\n """\n ...\n\n def __narwhals_namespace__(\n self,\n ) -> EagerNamespace[Any, Self, Any, Any, NativeSeriesT]: ...\n\n def _to_expr(self) -> EagerExpr[Any, Any]:\n return self.__narwhals_namespace__()._expr._from_series(self) # type: ignore[no-any-return]\n\n def _gather(self, rows: SizedMultiIndexSelector[NativeSeriesT]) -> Self: ...\n def _gather_slice(self, rows: _SliceIndex | range) -> Self: ...\n def __getitem__(self, item: MultiIndexSelector[Self]) -> Self:\n if isinstance(item, (slice, range)):\n return self._gather_slice(item)\n elif is_compliant_series(item):\n return self._gather(item.native)\n elif is_sized_multi_index_selector(item):\n return self._gather(item)\n else:\n assert_never(item)\n\n @property\n def str(self) -> EagerSeriesStringNamespace[Self, NativeSeriesT]: ...\n @property\n def dt(self) -> EagerSeriesDateTimeNamespace[Self, NativeSeriesT]: ...\n @property\n def cat(self) -> EagerSeriesCatNamespace[Self, NativeSeriesT]: ...\n @property\n def list(self) -> EagerSeriesListNamespace[Self, NativeSeriesT]: ...\n @property\n def struct(self) -> EagerSeriesStructNamespace[Self, NativeSeriesT]: ...\n\n\nclass _SeriesNamespace( # type: ignore[misc]\n _StoresCompliant[CompliantSeriesT_co],\n _StoresNative[NativeSeriesT_co],\n Protocol[CompliantSeriesT_co, NativeSeriesT_co],\n):\n _compliant_series: CompliantSeriesT_co\n\n @property\n def compliant(self) -> CompliantSeriesT_co:\n return self._compliant_series\n\n @property\n def native(self) -> NativeSeriesT_co:\n return self._compliant_series.native # type: ignore[no-any-return]\n\n def with_native(self, series: Any, /) -> CompliantSeriesT_co:\n return self.compliant._with_native(series)\n\n\nclass EagerSeriesNamespace(\n _SeriesNamespace[EagerSeriesT_co, NativeSeriesT_co],\n Generic[EagerSeriesT_co, NativeSeriesT_co],\n):\n _compliant_series: EagerSeriesT_co\n\n def __init__(self, series: EagerSeriesT_co, /) -> None:\n self._compliant_series = series\n\n\nclass EagerSeriesCatNamespace( # type: ignore[misc]\n _SeriesNamespace[EagerSeriesT_co, NativeSeriesT_co],\n CatNamespace[EagerSeriesT_co],\n Protocol[EagerSeriesT_co, NativeSeriesT_co],\n): ...\n\n\nclass EagerSeriesDateTimeNamespace( # type: ignore[misc]\n _SeriesNamespace[EagerSeriesT_co, NativeSeriesT_co],\n DateTimeNamespace[EagerSeriesT_co],\n Protocol[EagerSeriesT_co, NativeSeriesT_co],\n): ...\n\n\nclass EagerSeriesListNamespace( # type: ignore[misc]\n _SeriesNamespace[EagerSeriesT_co, NativeSeriesT_co],\n ListNamespace[EagerSeriesT_co],\n Protocol[EagerSeriesT_co, NativeSeriesT_co],\n): ...\n\n\nclass EagerSeriesStringNamespace( # type: ignore[misc]\n _SeriesNamespace[EagerSeriesT_co, NativeSeriesT_co],\n StringNamespace[EagerSeriesT_co],\n Protocol[EagerSeriesT_co, NativeSeriesT_co],\n): ...\n\n\nclass EagerSeriesStructNamespace( # type: ignore[misc]\n _SeriesNamespace[EagerSeriesT_co, NativeSeriesT_co],\n StructNamespace[EagerSeriesT_co],\n Protocol[EagerSeriesT_co, NativeSeriesT_co],\n): ...\n
.venv\Lib\site-packages\narwhals\_compliant\series.py
series.py
Python
14,017
0.95
0.363415
0.016575
awesome-app
649
2023-11-13T00:34:42.839063
Apache-2.0
false
3f0bd6314ccac1738ce33058d32a9692
from __future__ import annotations\n\nfrom collections.abc import Sequence\nfrom typing import TYPE_CHECKING, Any, Callable, TypedDict, TypeVar\n\nif TYPE_CHECKING:\n from typing_extensions import TypeAlias\n\n from narwhals._compliant.dataframe import (\n CompliantDataFrame,\n CompliantLazyFrame,\n EagerDataFrame,\n )\n from narwhals._compliant.expr import (\n CompliantExpr,\n DepthTrackingExpr,\n EagerExpr,\n LazyExpr,\n NativeExpr,\n )\n from narwhals._compliant.namespace import CompliantNamespace, EagerNamespace\n from narwhals._compliant.series import CompliantSeries, EagerSeries\n from narwhals._compliant.window import WindowInputs\n from narwhals.typing import FillNullStrategy, NativeFrame, NativeSeries, RankMethod\n\n class ScalarKwargs(TypedDict, total=False):\n """Non-expressifiable args which we may need to reuse in `agg` or `over`."""\n\n center: int\n ddof: int\n descending: bool\n limit: int | None\n method: RankMethod\n min_samples: int\n n: int\n reverse: bool\n strategy: FillNullStrategy | None\n window_size: int\n\n\n__all__ = [\n "AliasName",\n "AliasNames",\n "CompliantDataFrameT",\n "CompliantFrameT",\n "CompliantLazyFrameT",\n "CompliantSeriesT",\n "EvalNames",\n "EvalSeries",\n "IntoCompliantExpr",\n "NativeFrameT_co",\n "NativeSeriesT_co",\n]\nCompliantExprAny: TypeAlias = "CompliantExpr[Any, Any]"\nCompliantSeriesAny: TypeAlias = "CompliantSeries[Any]"\nCompliantSeriesOrNativeExprAny: TypeAlias = "CompliantSeriesAny | NativeExpr"\nCompliantDataFrameAny: TypeAlias = "CompliantDataFrame[Any, Any, Any, Any]"\nCompliantLazyFrameAny: TypeAlias = "CompliantLazyFrame[Any, Any, Any]"\nCompliantFrameAny: TypeAlias = "CompliantDataFrameAny | CompliantLazyFrameAny"\nCompliantNamespaceAny: TypeAlias = "CompliantNamespace[Any, Any]"\n\nDepthTrackingExprAny: TypeAlias = "DepthTrackingExpr[Any, Any]"\n\nEagerDataFrameAny: TypeAlias = "EagerDataFrame[Any, Any, Any, Any]"\nEagerSeriesAny: TypeAlias = "EagerSeries[Any]"\nEagerExprAny: TypeAlias = "EagerExpr[Any, Any]"\nEagerNamespaceAny: TypeAlias = "EagerNamespace[EagerDataFrameAny, EagerSeriesAny, EagerExprAny, NativeFrame, NativeSeries]"\n\nLazyExprAny: TypeAlias = "LazyExpr[Any, Any]"\n\nNativeExprT = TypeVar("NativeExprT", bound="NativeExpr")\nNativeExprT_co = TypeVar("NativeExprT_co", bound="NativeExpr", covariant=True)\nNativeSeriesT = TypeVar("NativeSeriesT", bound="NativeSeries")\nNativeSeriesT_co = TypeVar("NativeSeriesT_co", bound="NativeSeries", covariant=True)\nNativeSeriesT_contra = TypeVar(\n "NativeSeriesT_contra", bound="NativeSeries", contravariant=True\n)\nNativeFrameT = TypeVar("NativeFrameT", bound="NativeFrame")\nNativeFrameT_co = TypeVar("NativeFrameT_co", bound="NativeFrame", covariant=True)\nNativeFrameT_contra = TypeVar(\n "NativeFrameT_contra", bound="NativeFrame", contravariant=True\n)\n\nCompliantExprT = TypeVar("CompliantExprT", bound=CompliantExprAny)\nCompliantExprT_co = TypeVar("CompliantExprT_co", bound=CompliantExprAny, covariant=True)\nCompliantExprT_contra = TypeVar(\n "CompliantExprT_contra", bound=CompliantExprAny, contravariant=True\n)\nCompliantSeriesT = TypeVar("CompliantSeriesT", bound=CompliantSeriesAny)\nCompliantSeriesT_co = TypeVar(\n "CompliantSeriesT_co", bound=CompliantSeriesAny, covariant=True\n)\nCompliantSeriesOrNativeExprT = TypeVar(\n "CompliantSeriesOrNativeExprT", bound=CompliantSeriesOrNativeExprAny\n)\nCompliantSeriesOrNativeExprT_co = TypeVar(\n "CompliantSeriesOrNativeExprT_co",\n bound=CompliantSeriesOrNativeExprAny,\n covariant=True,\n)\nCompliantFrameT = TypeVar("CompliantFrameT", bound=CompliantFrameAny)\nCompliantFrameT_co = TypeVar(\n "CompliantFrameT_co", bound=CompliantFrameAny, covariant=True\n)\nCompliantDataFrameT = TypeVar("CompliantDataFrameT", bound=CompliantDataFrameAny)\nCompliantDataFrameT_co = TypeVar(\n "CompliantDataFrameT_co", bound=CompliantDataFrameAny, covariant=True\n)\nCompliantLazyFrameT = TypeVar("CompliantLazyFrameT", bound=CompliantLazyFrameAny)\nCompliantLazyFrameT_co = TypeVar(\n "CompliantLazyFrameT_co", bound=CompliantLazyFrameAny, covariant=True\n)\nCompliantNamespaceT = TypeVar("CompliantNamespaceT", bound=CompliantNamespaceAny)\nCompliantNamespaceT_co = TypeVar(\n "CompliantNamespaceT_co", bound=CompliantNamespaceAny, covariant=True\n)\n\nIntoCompliantExpr: TypeAlias = "CompliantExpr[CompliantFrameT, CompliantSeriesOrNativeExprT_co] | CompliantSeriesOrNativeExprT_co"\n\nDepthTrackingExprT = TypeVar("DepthTrackingExprT", bound=DepthTrackingExprAny)\nDepthTrackingExprT_contra = TypeVar(\n "DepthTrackingExprT_contra", bound=DepthTrackingExprAny, contravariant=True\n)\n\nEagerExprT = TypeVar("EagerExprT", bound=EagerExprAny)\nEagerExprT_contra = TypeVar("EagerExprT_contra", bound=EagerExprAny, contravariant=True)\nEagerSeriesT = TypeVar("EagerSeriesT", bound=EagerSeriesAny)\nEagerSeriesT_co = TypeVar("EagerSeriesT_co", bound=EagerSeriesAny, covariant=True)\n\n# NOTE: `pyright` gives false (8) positives if this uses `EagerDataFrameAny`?\nEagerDataFrameT = TypeVar("EagerDataFrameT", bound="EagerDataFrame[Any, Any, Any, Any]")\n\nLazyExprT = TypeVar("LazyExprT", bound=LazyExprAny)\nLazyExprT_contra = TypeVar("LazyExprT_contra", bound=LazyExprAny, contravariant=True)\n\nAliasNames: TypeAlias = Callable[[Sequence[str]], Sequence[str]]\n"""A function aliasing a *sequence* of column names."""\n\nAliasName: TypeAlias = Callable[[str], str]\n"""A function aliasing a *single* column name."""\n\nEvalSeries: TypeAlias = Callable[\n [CompliantFrameT], Sequence[CompliantSeriesOrNativeExprT]\n]\n"""A function from a `Frame` to a sequence of `Series`*.\n\nSee [underwater unicorn magic](https://narwhals-dev.github.io/narwhals/how_it_works/).\n"""\n\nEvalNames: TypeAlias = Callable[[CompliantFrameT], Sequence[str]]\n"""A function from a `Frame` to a sequence of columns names *before* any aliasing takes place."""\n\nWindowFunction: TypeAlias = (\n "Callable[[CompliantFrameT, WindowInputs[NativeExprT]], Sequence[NativeExprT]]"\n)\n"""A function evaluated with `over(partition_by=..., order_by=...)`."""\n
.venv\Lib\site-packages\narwhals\_compliant\typing.py
typing.py
Python
6,131
0.95
0.051282
0.007519
node-utils
647
2024-04-01T17:11:54.605834
MIT
false
4427db78888bc6118706eac340291f12
from __future__ import annotations\n\nfrom typing import TYPE_CHECKING, Any, Callable, TypeVar, cast\n\nfrom narwhals._compliant.expr import CompliantExpr\nfrom narwhals._compliant.typing import (\n CompliantExprAny,\n CompliantFrameAny,\n CompliantLazyFrameT,\n CompliantSeriesOrNativeExprAny,\n EagerDataFrameT,\n EagerExprT,\n EagerSeriesT,\n LazyExprAny,\n NativeExprT,\n NativeSeriesT,\n WindowFunction,\n)\nfrom narwhals._typing_compat import Protocol38\n\nif TYPE_CHECKING:\n from collections.abc import Sequence\n\n from typing_extensions import Self, TypeAlias\n\n from narwhals._compliant.typing import EvalSeries, ScalarKwargs\n from narwhals._compliant.window import WindowInputs\n from narwhals._utils import Implementation, Version, _FullContext\n from narwhals.typing import NonNestedLiteral\n\n\n__all__ = ["CompliantThen", "CompliantWhen", "EagerWhen", "LazyThen", "LazyWhen"]\n\nExprT = TypeVar("ExprT", bound=CompliantExprAny)\nLazyExprT = TypeVar("LazyExprT", bound=LazyExprAny)\nSeriesT = TypeVar("SeriesT", bound=CompliantSeriesOrNativeExprAny)\nFrameT = TypeVar("FrameT", bound=CompliantFrameAny)\n\nScalar: TypeAlias = Any\n"""A native literal value."""\n\nIntoExpr: TypeAlias = "SeriesT | ExprT | NonNestedLiteral | Scalar"\n"""Anything that is convertible into a `CompliantExpr`."""\n\n\nclass CompliantWhen(Protocol38[FrameT, SeriesT, ExprT]):\n _condition: ExprT\n _then_value: IntoExpr[SeriesT, ExprT]\n _otherwise_value: IntoExpr[SeriesT, ExprT] | None\n _implementation: Implementation\n _backend_version: tuple[int, ...]\n _version: Version\n\n @property\n def _then(self) -> type[CompliantThen[FrameT, SeriesT, ExprT]]: ...\n def __call__(self, compliant_frame: FrameT, /) -> Sequence[SeriesT]: ...\n def _window_function(\n self, compliant_frame: FrameT, window_inputs: WindowInputs[Any]\n ) -> Sequence[SeriesT]: ...\n\n def then(\n self, value: IntoExpr[SeriesT, ExprT], /\n ) -> CompliantThen[FrameT, SeriesT, ExprT]:\n return self._then.from_when(self, value)\n\n @classmethod\n def from_expr(cls, condition: ExprT, /, *, context: _FullContext) -> Self:\n obj = cls.__new__(cls)\n obj._condition = condition\n obj._then_value = None\n obj._otherwise_value = None\n obj._implementation = context._implementation\n obj._backend_version = context._backend_version\n obj._version = context._version\n return obj\n\n\nclass CompliantThen(CompliantExpr[FrameT, SeriesT], Protocol38[FrameT, SeriesT, ExprT]):\n _call: EvalSeries[FrameT, SeriesT]\n _when_value: CompliantWhen[FrameT, SeriesT, ExprT]\n _function_name: str\n _depth: int\n _implementation: Implementation\n _backend_version: tuple[int, ...]\n _version: Version\n _scalar_kwargs: ScalarKwargs\n\n @classmethod\n def from_when(\n cls,\n when: CompliantWhen[FrameT, SeriesT, ExprT],\n then: IntoExpr[SeriesT, ExprT],\n /,\n ) -> Self:\n when._then_value = then\n obj = cls.__new__(cls)\n obj._call = when\n obj._when_value = when\n obj._depth = 0\n obj._function_name = "whenthen"\n obj._evaluate_output_names = getattr(\n then, "_evaluate_output_names", lambda _df: ["literal"]\n )\n obj._alias_output_names = getattr(then, "_alias_output_names", None)\n obj._implementation = when._implementation\n obj._backend_version = when._backend_version\n obj._version = when._version\n obj._scalar_kwargs = {}\n return obj\n\n def otherwise(self, otherwise: IntoExpr[SeriesT, ExprT], /) -> ExprT:\n self._when_value._otherwise_value = otherwise\n self._function_name = "whenotherwise"\n return cast("ExprT", self)\n\n\nclass LazyThen(\n CompliantThen[CompliantLazyFrameT, NativeExprT, LazyExprT],\n Protocol38[CompliantLazyFrameT, NativeExprT, LazyExprT],\n):\n _window_function: WindowFunction[CompliantLazyFrameT, NativeExprT] | None\n\n @classmethod\n def from_when(\n cls,\n when: CompliantWhen[CompliantLazyFrameT, NativeExprT, LazyExprT],\n then: IntoExpr[NativeExprT, LazyExprT],\n /,\n ) -> Self:\n when._then_value = then\n obj = cls.__new__(cls)\n obj._call = when\n\n obj._window_function = when._window_function\n\n obj._when_value = when\n obj._depth = 0\n obj._function_name = "whenthen"\n obj._evaluate_output_names = getattr(\n then, "_evaluate_output_names", lambda _df: ["literal"]\n )\n obj._alias_output_names = getattr(then, "_alias_output_names", None)\n obj._implementation = when._implementation\n obj._backend_version = when._backend_version\n obj._version = when._version\n obj._scalar_kwargs = {}\n return obj\n\n\nclass EagerWhen(\n CompliantWhen[EagerDataFrameT, EagerSeriesT, EagerExprT],\n Protocol38[EagerDataFrameT, EagerSeriesT, EagerExprT, NativeSeriesT],\n):\n def _if_then_else(\n self,\n when: NativeSeriesT,\n then: NativeSeriesT,\n otherwise: NativeSeriesT | NonNestedLiteral | Scalar,\n /,\n ) -> NativeSeriesT: ...\n\n def __call__(self, df: EagerDataFrameT, /) -> Sequence[EagerSeriesT]:\n is_expr = self._condition._is_expr\n when: EagerSeriesT = self._condition(df)[0]\n then: EagerSeriesT\n align = when._align_full_broadcast\n\n if is_expr(self._then_value):\n then = self._then_value(df)[0]\n else:\n then = when.alias("literal")._from_scalar(self._then_value)\n then._broadcast = True\n\n if is_expr(self._otherwise_value):\n otherwise = self._otherwise_value(df)[0]\n when, then, otherwise = align(when, then, otherwise)\n result = self._if_then_else(when.native, then.native, otherwise.native)\n else:\n when, then = align(when, then)\n result = self._if_then_else(when.native, then.native, self._otherwise_value)\n return [then._with_native(result)]\n\n\nclass LazyWhen(\n CompliantWhen[CompliantLazyFrameT, NativeExprT, LazyExprT],\n Protocol38[CompliantLazyFrameT, NativeExprT, LazyExprT],\n):\n when: Callable[..., NativeExprT]\n lit: Callable[..., NativeExprT]\n\n def __call__(self, df: CompliantLazyFrameT) -> Sequence[NativeExprT]:\n is_expr = self._condition._is_expr\n when = self.when\n lit = self.lit\n condition = df._evaluate_expr(self._condition)\n then_ = self._then_value\n then = df._evaluate_expr(then_) if is_expr(then_) else lit(then_)\n other_ = self._otherwise_value\n if other_ is None:\n result = when(condition, then)\n else:\n otherwise = df._evaluate_expr(other_) if is_expr(other_) else lit(other_)\n result = when(condition, then).otherwise(otherwise) # type: ignore # noqa: PGH003\n return [result]\n\n @classmethod\n def from_expr(cls, condition: LazyExprT, /, *, context: _FullContext) -> Self:\n obj = cls.__new__(cls)\n obj._condition = condition\n\n obj._then_value = None\n obj._otherwise_value = None\n obj._implementation = context._implementation\n obj._backend_version = context._backend_version\n obj._version = context._version\n return obj\n\n def _window_function(\n self, df: CompliantLazyFrameT, window_inputs: WindowInputs[NativeExprT]\n ) -> Sequence[NativeExprT]:\n is_expr = self._condition._is_expr\n condition = self._condition.window_function(df, window_inputs)[0]\n then_ = self._then_value\n then = (\n then_.window_function(df, window_inputs)[0]\n if is_expr(then_)\n else self.lit(then_)\n )\n\n other_ = self._otherwise_value\n if other_ is None:\n result = self.when(condition, then)\n else:\n other = (\n other_.window_function(df, window_inputs)[0]\n if is_expr(other_)\n else self.lit(other_)\n )\n result = self.when(condition, then).otherwise(other) # type: ignore # noqa: PGH003\n return [result]\n
.venv\Lib\site-packages\narwhals\_compliant\when_then.py
when_then.py
Python
8,172
0.95
0.1125
0
vue-tools
86
2025-04-09T20:20:09.762698
BSD-3-Clause
false
6486e3f6ade0446852e6871a3c612869
from __future__ import annotations\n\nfrom typing import TYPE_CHECKING, Generic\n\nfrom narwhals._compliant.typing import NativeExprT_co\n\nif TYPE_CHECKING:\n from collections.abc import Sequence\n\n\nclass WindowInputs(Generic[NativeExprT_co]):\n __slots__ = ("order_by", "partition_by")\n\n def __init__(\n self, partition_by: Sequence[str | NativeExprT_co], order_by: Sequence[str]\n ) -> None:\n self.partition_by = partition_by\n self.order_by = order_by\n
.venv\Lib\site-packages\narwhals\_compliant\window.py
window.py
Python
477
0.85
0.166667
0
node-utils
578
2023-08-27T09:36:53.388722
BSD-3-Clause
false
14b364f0a0ff41a572759e2cd4461edc
from __future__ import annotations\n\nfrom narwhals._compliant.dataframe import (\n CompliantDataFrame,\n CompliantLazyFrame,\n EagerDataFrame,\n)\nfrom narwhals._compliant.expr import CompliantExpr, EagerExpr, LazyExpr\nfrom narwhals._compliant.group_by import (\n CompliantGroupBy,\n DepthTrackingGroupBy,\n EagerGroupBy,\n LazyGroupBy,\n)\nfrom narwhals._compliant.namespace import (\n CompliantNamespace,\n EagerNamespace,\n LazyNamespace,\n)\nfrom narwhals._compliant.selectors import (\n CompliantSelector,\n CompliantSelectorNamespace,\n EagerSelectorNamespace,\n LazySelectorNamespace,\n)\nfrom narwhals._compliant.series import CompliantSeries, EagerSeries\nfrom narwhals._compliant.typing import (\n CompliantExprT,\n CompliantFrameT,\n CompliantSeriesOrNativeExprT_co,\n CompliantSeriesT,\n EagerDataFrameT,\n EagerSeriesT,\n EvalNames,\n EvalSeries,\n IntoCompliantExpr,\n NativeFrameT_co,\n NativeSeriesT_co,\n)\nfrom narwhals._compliant.when_then import (\n CompliantThen,\n CompliantWhen,\n EagerWhen,\n LazyThen,\n LazyWhen,\n)\n\n__all__ = [\n "CompliantDataFrame",\n "CompliantExpr",\n "CompliantExprT",\n "CompliantFrameT",\n "CompliantGroupBy",\n "CompliantLazyFrame",\n "CompliantNamespace",\n "CompliantSelector",\n "CompliantSelectorNamespace",\n "CompliantSeries",\n "CompliantSeriesOrNativeExprT_co",\n "CompliantSeriesT",\n "CompliantThen",\n "CompliantWhen",\n "DepthTrackingGroupBy",\n "EagerDataFrame",\n "EagerDataFrameT",\n "EagerExpr",\n "EagerGroupBy",\n "EagerNamespace",\n "EagerSelectorNamespace",\n "EagerSeries",\n "EagerSeriesT",\n "EagerWhen",\n "EvalNames",\n "EvalSeries",\n "IntoCompliantExpr",\n "LazyExpr",\n "LazyGroupBy",\n "LazyNamespace",\n "LazySelectorNamespace",\n "LazyThen",\n "LazyWhen",\n "NativeFrameT_co",\n "NativeSeriesT_co",\n]\n
.venv\Lib\site-packages\narwhals\_compliant\__init__.py
__init__.py
Python
1,901
0.85
0
0
node-utils
41
2025-07-06T04:11:59.496910
GPL-3.0
false
8c68a01129f0d0c2a0d251f5c3fafeb9
\n\n
.venv\Lib\site-packages\narwhals\_compliant\__pycache__\any_namespace.cpython-313.pyc
any_namespace.cpython-313.pyc
Other
8,591
0.95
0
0
vue-tools
640
2024-06-25T22:14:34.387760
Apache-2.0
false
1e2b4af5adf24345c2341849d0091a34
\n\n
.venv\Lib\site-packages\narwhals\_compliant\__pycache__\dataframe.cpython-313.pyc
dataframe.cpython-313.pyc
Other
29,028
0.95
0.010811
0
awesome-app
619
2025-01-16T23:50:04.206089
MIT
false
90053b962d285cad060d20bb5cd83923
\n\n
.venv\Lib\site-packages\narwhals\_compliant\__pycache__\expr.cpython-313.pyc
expr.cpython-313.pyc
Other
70,371
0.75
0.022222
0.002212
vue-tools
925
2024-11-23T02:14:13.849058
GPL-3.0
false
b090355d8ca3d0743117e69924ec43c3
\n\n
.venv\Lib\site-packages\narwhals\_compliant\__pycache__\group_by.cpython-313.pyc
group_by.cpython-313.pyc
Other
10,868
0.95
0.018868
0
awesome-app
611
2024-11-23T17:57:46.866036
BSD-3-Clause
false
29c63ed4eff746821e3c0a31d1295814
\n\n
.venv\Lib\site-packages\narwhals\_compliant\__pycache__\namespace.cpython-313.pyc
namespace.cpython-313.pyc
Other
11,634
0.95
0
0.023256
node-utils
464
2023-07-16T17:13:05.284644
Apache-2.0
false
079bc3cbb84458ba6103d9b29fb2075b
\n\n
.venv\Lib\site-packages\narwhals\_compliant\__pycache__\selectors.cpython-313.pyc
selectors.cpython-313.pyc
Other
21,302
0.95
0
0
node-utils
516
2025-02-08T09:26:11.193039
GPL-3.0
false
f8cb946ad332289d1a49ff52ee7eae77
\n\n
.venv\Lib\site-packages\narwhals\_compliant\__pycache__\series.cpython-313.pyc
series.cpython-313.pyc
Other
28,480
0.8
0.01227
0.006452
python-kit
281
2023-10-30T04:37:51.463159
BSD-3-Clause
false
e44b8be62fa092d1d7f58c378f732e37
\n\n
.venv\Lib\site-packages\narwhals\_compliant\__pycache__\typing.cpython-313.pyc
typing.cpython-313.pyc
Other
5,479
0.8
0
0
awesome-app
898
2024-10-31T14:08:08.252284
BSD-3-Clause
false
5c64929ef261bf966b4d93f6dc595646
\n\n
.venv\Lib\site-packages\narwhals\_compliant\__pycache__\when_then.cpython-313.pyc
when_then.cpython-313.pyc
Other
11,152
0.95
0
0
vue-tools
211
2023-12-17T02:18:50.960097
Apache-2.0
false
a8ef1762494b645c296ffc434b190ed7
\n\n
.venv\Lib\site-packages\narwhals\_compliant\__pycache__\window.cpython-313.pyc
window.cpython-313.pyc
Other
1,015
0.7
0
0
python-kit
963
2024-09-03T06:43:37.343320
GPL-3.0
false
7dea93eab3edca0338ea1bc329f76828
\n\n
.venv\Lib\site-packages\narwhals\_compliant\__pycache__\__init__.cpython-313.pyc
__init__.cpython-313.pyc
Other
1,747
0.8
0
0
node-utils
784
2025-03-22T16:35:14.494802
BSD-3-Clause
false
5004a805a9c5323e31470ce41202299b
from __future__ import annotations\n\nfrom typing import TYPE_CHECKING, Any\n\nimport dask.dataframe as dd\nimport pandas as pd\n\nfrom narwhals._dask.utils import add_row_index, evaluate_exprs\nfrom narwhals._expression_parsing import ExprKind\nfrom narwhals._pandas_like.utils import native_to_narwhals_dtype, select_columns_by_name\nfrom narwhals._typing_compat import assert_never\nfrom narwhals._utils import (\n Implementation,\n _remap_full_join_keys,\n check_column_names_are_unique,\n generate_temporary_column_name,\n not_implemented,\n parse_columns_to_drop,\n parse_version,\n validate_backend_version,\n)\nfrom narwhals.typing import CompliantLazyFrame\n\nif TYPE_CHECKING:\n from collections.abc import Iterator, Mapping, Sequence\n from types import ModuleType\n\n import dask.dataframe.dask_expr as dx\n from typing_extensions import Self, TypeAlias, TypeIs\n\n from narwhals._compliant.typing import CompliantDataFrameAny\n from narwhals._dask.expr import DaskExpr\n from narwhals._dask.group_by import DaskLazyGroupBy\n from narwhals._dask.namespace import DaskNamespace\n from narwhals._utils import Version, _FullContext\n from narwhals.dataframe import LazyFrame\n from narwhals.dtypes import DType\n from narwhals.typing import AsofJoinStrategy, JoinStrategy, LazyUniqueKeepStrategy\n\nIncomplete: TypeAlias = "Any"\n"""Using `_pandas_like` utils with `_dask`.\n\nTyping this correctly will complicate the `_pandas_like`-side.\nVery low priority until `dask` adds typing.\n"""\n\n\nclass DaskLazyFrame(\n CompliantLazyFrame["DaskExpr", "dd.DataFrame", "LazyFrame[dd.DataFrame]"]\n):\n def __init__(\n self,\n native_dataframe: dd.DataFrame,\n *,\n backend_version: tuple[int, ...],\n version: Version,\n ) -> None:\n self._native_frame: dd.DataFrame = native_dataframe\n self._backend_version = backend_version\n self._implementation = Implementation.DASK\n self._version = version\n self._cached_schema: dict[str, DType] | None = None\n self._cached_columns: list[str] | None = None\n validate_backend_version(self._implementation, self._backend_version)\n\n @staticmethod\n def _is_native(obj: dd.DataFrame | Any) -> TypeIs[dd.DataFrame]:\n return isinstance(obj, dd.DataFrame)\n\n @classmethod\n def from_native(cls, data: dd.DataFrame, /, *, context: _FullContext) -> Self:\n return cls(\n data, backend_version=context._backend_version, version=context._version\n )\n\n def to_narwhals(self) -> LazyFrame[dd.DataFrame]:\n return self._version.lazyframe(self, level="lazy")\n\n def __native_namespace__(self) -> ModuleType:\n if self._implementation is Implementation.DASK:\n return self._implementation.to_native_namespace()\n\n msg = f"Expected dask, got: {type(self._implementation)}" # pragma: no cover\n raise AssertionError(msg)\n\n def __narwhals_namespace__(self) -> DaskNamespace:\n from narwhals._dask.namespace import DaskNamespace\n\n return DaskNamespace(backend_version=self._backend_version, version=self._version)\n\n def __narwhals_lazyframe__(self) -> Self:\n return self\n\n def _with_version(self, version: Version) -> Self:\n return self.__class__(\n self.native, backend_version=self._backend_version, version=version\n )\n\n def _with_native(self, df: Any) -> Self:\n return self.__class__(\n df, backend_version=self._backend_version, version=self._version\n )\n\n def _iter_columns(self) -> Iterator[dx.Series]:\n for _col, ser in self.native.items(): # noqa: PERF102\n yield ser\n\n def with_columns(self, *exprs: DaskExpr) -> Self:\n new_series = evaluate_exprs(self, *exprs)\n return self._with_native(self.native.assign(**dict(new_series)))\n\n def collect(\n self, backend: Implementation | None, **kwargs: Any\n ) -> CompliantDataFrameAny:\n result = self.native.compute(**kwargs)\n\n if backend is None or backend is Implementation.PANDAS:\n from narwhals._pandas_like.dataframe import PandasLikeDataFrame\n\n return PandasLikeDataFrame(\n result,\n implementation=Implementation.PANDAS,\n backend_version=parse_version(pd),\n version=self._version,\n validate_column_names=True,\n )\n\n if backend is Implementation.POLARS:\n import polars as pl # ignore-banned-import\n\n from narwhals._polars.dataframe import PolarsDataFrame\n\n return PolarsDataFrame(\n pl.from_pandas(result),\n backend_version=parse_version(pl),\n version=self._version,\n )\n\n if backend is Implementation.PYARROW:\n import pyarrow as pa # ignore-banned-import\n\n from narwhals._arrow.dataframe import ArrowDataFrame\n\n return ArrowDataFrame(\n pa.Table.from_pandas(result),\n backend_version=parse_version(pa),\n version=self._version,\n validate_column_names=True,\n )\n\n msg = f"Unsupported `backend` value: {backend}" # pragma: no cover\n raise ValueError(msg) # pragma: no cover\n\n @property\n def columns(self) -> list[str]:\n if self._cached_columns is None:\n self._cached_columns = (\n list(self.schema)\n if self._cached_schema is not None\n else self.native.columns.tolist()\n )\n return self._cached_columns\n\n def filter(self, predicate: DaskExpr) -> Self:\n # `[0]` is safe as the predicate's expression only returns a single column\n mask = predicate(self)[0]\n return self._with_native(self.native.loc[mask])\n\n def simple_select(self, *column_names: str) -> Self:\n df: Incomplete = self.native\n native = select_columns_by_name(\n df, list(column_names), self._backend_version, self._implementation\n )\n return self._with_native(native)\n\n def aggregate(self, *exprs: DaskExpr) -> Self:\n new_series = evaluate_exprs(self, *exprs)\n df = dd.concat([val.rename(name) for name, val in new_series], axis=1)\n return self._with_native(df)\n\n def select(self, *exprs: DaskExpr) -> Self:\n new_series = evaluate_exprs(self, *exprs)\n df: Incomplete = self.native\n df = select_columns_by_name(\n df.assign(**dict(new_series)),\n [s[0] for s in new_series],\n self._backend_version,\n self._implementation,\n )\n return self._with_native(df)\n\n def drop_nulls(self, subset: Sequence[str] | None) -> Self:\n if subset is None:\n return self._with_native(self.native.dropna())\n plx = self.__narwhals_namespace__()\n mask = ~plx.any_horizontal(plx.col(*subset).is_null(), ignore_nulls=True)\n return self.filter(mask)\n\n @property\n def schema(self) -> dict[str, DType]:\n if self._cached_schema is None:\n native_dtypes = self.native.dtypes\n self._cached_schema = {\n col: native_to_narwhals_dtype(\n native_dtypes[col], self._version, self._implementation\n )\n for col in self.native.columns\n }\n return self._cached_schema\n\n def collect_schema(self) -> dict[str, DType]:\n return self.schema\n\n def drop(self, columns: Sequence[str], *, strict: bool) -> Self:\n to_drop = parse_columns_to_drop(self, columns, strict=strict)\n\n return self._with_native(self.native.drop(columns=to_drop))\n\n def with_row_index(self, name: str, order_by: Sequence[str] | None) -> Self:\n # Implementation is based on the following StackOverflow reply:\n # https://stackoverflow.com/questions/60831518/in-dask-how-does-one-add-a-range-of-integersauto-increment-to-a-new-column/60852409#60852409\n if order_by is None:\n return self._with_native(\n add_row_index(\n self.native, name, self._backend_version, self._implementation\n )\n )\n else:\n plx = self.__narwhals_namespace__()\n columns = self.columns\n const_expr = (\n plx.lit(value=1, dtype=None).alias(name).broadcast(ExprKind.LITERAL)\n )\n row_index_expr = (\n plx.col(name)\n .cum_sum(reverse=False)\n .over(partition_by=[], order_by=order_by)\n - 1\n )\n return self.with_columns(const_expr).select(row_index_expr, plx.col(*columns))\n\n def rename(self, mapping: Mapping[str, str]) -> Self:\n return self._with_native(self.native.rename(columns=mapping))\n\n def head(self, n: int) -> Self:\n return self._with_native(self.native.head(n=n, compute=False, npartitions=-1))\n\n def unique(\n self, subset: Sequence[str] | None, *, keep: LazyUniqueKeepStrategy\n ) -> Self:\n if subset and (error := self._check_columns_exist(subset)):\n raise error\n if keep == "none":\n subset = subset or self.columns\n token = generate_temporary_column_name(n_bytes=8, columns=subset)\n ser = self.native.groupby(subset).size().rename(token)\n ser = ser[ser == 1]\n unique = ser.reset_index().drop(columns=token)\n result = self.native.merge(unique, on=subset, how="inner")\n else:\n mapped_keep = {"any": "first"}.get(keep, keep)\n result = self.native.drop_duplicates(subset=subset, keep=mapped_keep)\n return self._with_native(result)\n\n def sort(self, *by: str, descending: bool | Sequence[bool], nulls_last: bool) -> Self:\n if isinstance(descending, bool):\n ascending: bool | list[bool] = not descending\n else:\n ascending = [not d for d in descending]\n position = "last" if nulls_last else "first"\n return self._with_native(\n self.native.sort_values(list(by), ascending=ascending, na_position=position)\n )\n\n def _join_inner(\n self, other: Self, *, left_on: Sequence[str], right_on: Sequence[str], suffix: str\n ) -> dd.DataFrame:\n return self.native.merge(\n other.native,\n left_on=left_on,\n right_on=right_on,\n how="inner",\n suffixes=("", suffix),\n )\n\n def _join_left(\n self, other: Self, *, left_on: Sequence[str], right_on: Sequence[str], suffix: str\n ) -> dd.DataFrame:\n result_native = self.native.merge(\n other.native,\n how="left",\n left_on=left_on,\n right_on=right_on,\n suffixes=("", suffix),\n )\n extra = [\n right_key if right_key not in self.columns else f"{right_key}{suffix}"\n for left_key, right_key in zip(left_on, right_on)\n if right_key != left_key\n ]\n return result_native.drop(columns=extra)\n\n def _join_full(\n self, other: Self, *, left_on: Sequence[str], right_on: Sequence[str], suffix: str\n ) -> dd.DataFrame:\n # dask does not retain keys post-join\n # we must append the suffix to each key before-hand\n\n right_on_mapper = _remap_full_join_keys(left_on, right_on, suffix)\n other_native = other.native.rename(columns=right_on_mapper)\n check_column_names_are_unique(other_native.columns)\n right_suffixed = list(right_on_mapper.values())\n return self.native.merge(\n other_native,\n left_on=left_on,\n right_on=right_suffixed,\n how="outer",\n suffixes=("", suffix),\n )\n\n def _join_cross(self, other: Self, *, suffix: str) -> dd.DataFrame:\n key_token = generate_temporary_column_name(\n n_bytes=8, columns=(*self.columns, *other.columns)\n )\n return (\n self.native.assign(**{key_token: 0})\n .merge(\n other.native.assign(**{key_token: 0}),\n how="inner",\n left_on=key_token,\n right_on=key_token,\n suffixes=("", suffix),\n )\n .drop(columns=key_token)\n )\n\n def _join_semi(\n self, other: Self, *, left_on: Sequence[str], right_on: Sequence[str]\n ) -> dd.DataFrame:\n other_native = self._join_filter_rename(\n other=other,\n columns_to_select=list(right_on),\n columns_mapping=dict(zip(right_on, left_on)),\n )\n return self.native.merge(\n other_native, how="inner", left_on=left_on, right_on=left_on\n )\n\n def _join_anti(\n self, other: Self, *, left_on: Sequence[str], right_on: Sequence[str]\n ) -> dd.DataFrame:\n indicator_token = generate_temporary_column_name(\n n_bytes=8, columns=(*self.columns, *other.columns)\n )\n other_native = self._join_filter_rename(\n other=other,\n columns_to_select=list(right_on),\n columns_mapping=dict(zip(right_on, left_on)),\n )\n df = self.native.merge(\n other_native,\n how="left",\n indicator=indicator_token, # pyright: ignore[reportArgumentType]\n left_on=left_on,\n right_on=left_on,\n )\n return df[df[indicator_token] == "left_only"].drop(columns=[indicator_token])\n\n def _join_filter_rename(\n self, other: Self, columns_to_select: list[str], columns_mapping: dict[str, str]\n ) -> dd.DataFrame:\n """Helper function to avoid creating extra columns and row duplication.\n\n Used in `"anti"` and `"semi`" join's.\n\n Notice that a native object is returned.\n """\n other_native: Incomplete = other.native\n return (\n select_columns_by_name(\n other_native,\n column_names=columns_to_select,\n backend_version=self._backend_version,\n implementation=self._implementation,\n )\n # rename to avoid creating extra columns in join\n .rename(columns=columns_mapping)\n .drop_duplicates()\n )\n\n def join(\n self,\n other: Self,\n *,\n how: JoinStrategy,\n left_on: Sequence[str] | None,\n right_on: Sequence[str] | None,\n suffix: str,\n ) -> Self:\n if how == "cross":\n result = self._join_cross(other=other, suffix=suffix)\n\n elif left_on is None or right_on is None: # pragma: no cover\n raise ValueError(left_on, right_on)\n\n elif how == "inner":\n result = self._join_inner(\n other=other, left_on=left_on, right_on=right_on, suffix=suffix\n )\n elif how == "anti":\n result = self._join_anti(other=other, left_on=left_on, right_on=right_on)\n elif how == "semi":\n result = self._join_semi(other=other, left_on=left_on, right_on=right_on)\n elif how == "left":\n result = self._join_left(\n other=other, left_on=left_on, right_on=right_on, suffix=suffix\n )\n elif how == "full":\n result = self._join_full(\n other=other, left_on=left_on, right_on=right_on, suffix=suffix\n )\n else:\n assert_never(how)\n return self._with_native(result)\n\n def join_asof(\n self,\n other: Self,\n *,\n left_on: str,\n right_on: str,\n by_left: Sequence[str] | None,\n by_right: Sequence[str] | None,\n strategy: AsofJoinStrategy,\n suffix: str,\n ) -> Self:\n plx = self.__native_namespace__()\n return self._with_native(\n plx.merge_asof(\n self.native,\n other.native,\n left_on=left_on,\n right_on=right_on,\n left_by=by_left,\n right_by=by_right,\n direction=strategy,\n suffixes=("", suffix),\n )\n )\n\n def group_by(\n self, keys: Sequence[str] | Sequence[DaskExpr], *, drop_null_keys: bool\n ) -> DaskLazyGroupBy:\n from narwhals._dask.group_by import DaskLazyGroupBy\n\n return DaskLazyGroupBy(self, keys, drop_null_keys=drop_null_keys)\n\n def tail(self, n: int) -> Self: # pragma: no cover\n native_frame = self.native\n n_partitions = native_frame.npartitions\n\n if n_partitions == 1:\n return self._with_native(self.native.tail(n=n, compute=False))\n else:\n msg = "`LazyFrame.tail` is not supported for Dask backend with multiple partitions."\n raise NotImplementedError(msg)\n\n def gather_every(self, n: int, offset: int) -> Self:\n row_index_token = generate_temporary_column_name(n_bytes=8, columns=self.columns)\n plx = self.__narwhals_namespace__()\n return (\n self.with_row_index(row_index_token, order_by=None)\n .filter(\n (plx.col(row_index_token) >= offset)\n & ((plx.col(row_index_token) - offset) % n == 0)\n )\n .drop([row_index_token], strict=False)\n )\n\n def unpivot(\n self,\n on: Sequence[str] | None,\n index: Sequence[str] | None,\n variable_name: str,\n value_name: str,\n ) -> Self:\n return self._with_native(\n self.native.melt(\n id_vars=index,\n value_vars=on,\n var_name=variable_name,\n value_name=value_name,\n )\n )\n\n explode = not_implemented()\n
.venv\Lib\site-packages\narwhals\_dask\dataframe.py
dataframe.py
Python
17,761
0.95
0.133065
0.021028
python-kit
687
2024-10-12T06:17:56.904369
BSD-3-Clause
false
5fd00e2dcd514737d5c3d945d79af999
from __future__ import annotations\n\nimport warnings\nfrom typing import TYPE_CHECKING, Any, Callable, Literal\n\nfrom narwhals._compliant import LazyExpr\nfrom narwhals._compliant.expr import DepthTrackingExpr\nfrom narwhals._dask.expr_dt import DaskExprDateTimeNamespace\nfrom narwhals._dask.expr_str import DaskExprStringNamespace\nfrom narwhals._dask.utils import (\n add_row_index,\n maybe_evaluate_expr,\n narwhals_to_native_dtype,\n)\nfrom narwhals._expression_parsing import ExprKind, evaluate_output_names_and_aliases\nfrom narwhals._pandas_like.utils import native_to_narwhals_dtype\nfrom narwhals._utils import (\n Implementation,\n generate_temporary_column_name,\n not_implemented,\n)\nfrom narwhals.exceptions import InvalidOperationError\n\nif TYPE_CHECKING:\n from collections.abc import Sequence\n\n import dask.dataframe.dask_expr as dx\n from typing_extensions import Self\n\n from narwhals._compliant.typing import AliasNames, EvalNames, EvalSeries, ScalarKwargs\n from narwhals._dask.dataframe import DaskLazyFrame\n from narwhals._dask.namespace import DaskNamespace\n from narwhals._expression_parsing import ExprKind, ExprMetadata\n from narwhals._utils import Version, _FullContext\n from narwhals.typing import (\n FillNullStrategy,\n IntoDType,\n NonNestedLiteral,\n NumericLiteral,\n RollingInterpolationMethod,\n TemporalLiteral,\n )\n\n\nclass DaskExpr(\n LazyExpr["DaskLazyFrame", "dx.Series"],\n DepthTrackingExpr["DaskLazyFrame", "dx.Series"],\n):\n _implementation: Implementation = Implementation.DASK\n\n def __init__(\n self,\n call: EvalSeries[DaskLazyFrame, dx.Series],\n *,\n depth: int,\n function_name: str,\n evaluate_output_names: EvalNames[DaskLazyFrame],\n alias_output_names: AliasNames | None,\n backend_version: tuple[int, ...],\n version: Version,\n scalar_kwargs: ScalarKwargs | None = None,\n ) -> None:\n self._call = call\n self._depth = depth\n self._function_name = function_name\n self._evaluate_output_names = evaluate_output_names\n self._alias_output_names = alias_output_names\n self._backend_version = backend_version\n self._version = version\n self._scalar_kwargs = scalar_kwargs or {}\n self._metadata: ExprMetadata | None = None\n\n def __call__(self, df: DaskLazyFrame) -> Sequence[dx.Series]:\n return self._call(df)\n\n def __narwhals_expr__(self) -> None: ...\n\n def __narwhals_namespace__(self) -> DaskNamespace: # pragma: no cover\n # Unused, just for compatibility with PandasLikeExpr\n from narwhals._dask.namespace import DaskNamespace\n\n return DaskNamespace(backend_version=self._backend_version, version=self._version)\n\n def broadcast(self, kind: Literal[ExprKind.AGGREGATION, ExprKind.LITERAL]) -> Self:\n def func(df: DaskLazyFrame) -> list[dx.Series]:\n # result.loc[0][0] is a workaround for dask~<=2024.10.0/dask_expr~<=1.1.16\n # that raised a KeyErrror for result[0] during collection.\n return [result.loc[0][0] for result in self(df)]\n\n return self.__class__(\n func,\n depth=self._depth,\n function_name=self._function_name,\n evaluate_output_names=self._evaluate_output_names,\n alias_output_names=self._alias_output_names,\n backend_version=self._backend_version,\n version=self._version,\n scalar_kwargs=self._scalar_kwargs,\n )\n\n @classmethod\n def from_column_names(\n cls: type[Self],\n evaluate_column_names: EvalNames[DaskLazyFrame],\n /,\n *,\n context: _FullContext,\n function_name: str = "",\n ) -> Self:\n def func(df: DaskLazyFrame) -> list[dx.Series]:\n try:\n return [\n df._native_frame[column_name]\n for column_name in evaluate_column_names(df)\n ]\n except KeyError as e:\n if error := df._check_columns_exist(evaluate_column_names(df)):\n raise error from e\n raise\n\n return cls(\n func,\n depth=0,\n function_name=function_name,\n evaluate_output_names=evaluate_column_names,\n alias_output_names=None,\n backend_version=context._backend_version,\n version=context._version,\n )\n\n @classmethod\n def from_column_indices(cls, *column_indices: int, context: _FullContext) -> Self:\n def func(df: DaskLazyFrame) -> list[dx.Series]:\n return [df.native.iloc[:, i] for i in column_indices]\n\n return cls(\n func,\n depth=0,\n function_name="nth",\n evaluate_output_names=cls._eval_names_indices(column_indices),\n alias_output_names=None,\n backend_version=context._backend_version,\n version=context._version,\n )\n\n def _with_callable(\n self,\n # First argument to `call` should be `dx.Series`\n call: Callable[..., dx.Series],\n /,\n expr_name: str = "",\n scalar_kwargs: ScalarKwargs | None = None,\n **expressifiable_args: Self | Any,\n ) -> Self:\n def func(df: DaskLazyFrame) -> list[dx.Series]:\n native_results: list[dx.Series] = []\n native_series_list = self._call(df)\n other_native_series = {\n key: maybe_evaluate_expr(df, value)\n for key, value in expressifiable_args.items()\n }\n for native_series in native_series_list:\n result_native = call(native_series, **other_native_series)\n native_results.append(result_native)\n return native_results\n\n return self.__class__(\n func,\n depth=self._depth + 1,\n function_name=f"{self._function_name}->{expr_name}",\n evaluate_output_names=self._evaluate_output_names,\n alias_output_names=self._alias_output_names,\n backend_version=self._backend_version,\n version=self._version,\n scalar_kwargs=scalar_kwargs,\n )\n\n def _with_alias_output_names(self, func: AliasNames | None, /) -> Self:\n return type(self)(\n call=self._call,\n depth=self._depth,\n function_name=self._function_name,\n evaluate_output_names=self._evaluate_output_names,\n alias_output_names=func,\n backend_version=self._backend_version,\n version=self._version,\n scalar_kwargs=self._scalar_kwargs,\n )\n\n def __add__(self, other: Any) -> Self:\n return self._with_callable(\n lambda expr, other: expr.__add__(other), "__add__", other=other\n )\n\n def __sub__(self, other: Any) -> Self:\n return self._with_callable(\n lambda expr, other: expr.__sub__(other), "__sub__", other=other\n )\n\n def __rsub__(self, other: Any) -> Self:\n return self._with_callable(\n lambda expr, other: other - expr, "__rsub__", other=other\n ).alias("literal")\n\n def __mul__(self, other: Any) -> Self:\n return self._with_callable(\n lambda expr, other: expr.__mul__(other), "__mul__", other=other\n )\n\n def __truediv__(self, other: Any) -> Self:\n return self._with_callable(\n lambda expr, other: expr.__truediv__(other), "__truediv__", other=other\n )\n\n def __rtruediv__(self, other: Any) -> Self:\n return self._with_callable(\n lambda expr, other: other / expr, "__rtruediv__", other=other\n ).alias("literal")\n\n def __floordiv__(self, other: Any) -> Self:\n return self._with_callable(\n lambda expr, other: expr.__floordiv__(other), "__floordiv__", other=other\n )\n\n def __rfloordiv__(self, other: Any) -> Self:\n return self._with_callable(\n lambda expr, other: other // expr, "__rfloordiv__", other=other\n ).alias("literal")\n\n def __pow__(self, other: Any) -> Self:\n return self._with_callable(\n lambda expr, other: expr.__pow__(other), "__pow__", other=other\n )\n\n def __rpow__(self, other: Any) -> Self:\n return self._with_callable(\n lambda expr, other: other**expr, "__rpow__", other=other\n ).alias("literal")\n\n def __mod__(self, other: Any) -> Self:\n return self._with_callable(\n lambda expr, other: expr.__mod__(other), "__mod__", other=other\n )\n\n def __rmod__(self, other: Any) -> Self:\n return self._with_callable(\n lambda expr, other: other % expr, "__rmod__", other=other\n ).alias("literal")\n\n def __eq__(self, other: DaskExpr) -> Self: # type: ignore[override]\n return self._with_callable(\n lambda expr, other: expr.__eq__(other), "__eq__", other=other\n )\n\n def __ne__(self, other: DaskExpr) -> Self: # type: ignore[override]\n return self._with_callable(\n lambda expr, other: expr.__ne__(other), "__ne__", other=other\n )\n\n def __ge__(self, other: DaskExpr | Any) -> Self:\n return self._with_callable(\n lambda expr, other: expr.__ge__(other), "__ge__", other=other\n )\n\n def __gt__(self, other: DaskExpr) -> Self:\n return self._with_callable(\n lambda expr, other: expr.__gt__(other), "__gt__", other=other\n )\n\n def __le__(self, other: DaskExpr) -> Self:\n return self._with_callable(\n lambda expr, other: expr.__le__(other), "__le__", other=other\n )\n\n def __lt__(self, other: DaskExpr) -> Self:\n return self._with_callable(\n lambda expr, other: expr.__lt__(other), "__lt__", other=other\n )\n\n def __and__(self, other: DaskExpr | Any) -> Self:\n return self._with_callable(\n lambda expr, other: expr.__and__(other), "__and__", other=other\n )\n\n def __or__(self, other: DaskExpr) -> Self:\n return self._with_callable(\n lambda expr, other: expr.__or__(other), "__or__", other=other\n )\n\n def __invert__(self) -> Self:\n return self._with_callable(lambda expr: expr.__invert__(), "__invert__")\n\n def mean(self) -> Self:\n return self._with_callable(lambda expr: expr.mean().to_series(), "mean")\n\n def median(self) -> Self:\n from narwhals.exceptions import InvalidOperationError\n\n def func(s: dx.Series) -> dx.Series:\n dtype = native_to_narwhals_dtype(s.dtype, self._version, Implementation.DASK)\n if not dtype.is_numeric():\n msg = "`median` operation not supported for non-numeric input type."\n raise InvalidOperationError(msg)\n return s.median_approximate().to_series()\n\n return self._with_callable(func, "median")\n\n def min(self) -> Self:\n return self._with_callable(lambda expr: expr.min().to_series(), "min")\n\n def max(self) -> Self:\n return self._with_callable(lambda expr: expr.max().to_series(), "max")\n\n def std(self, ddof: int) -> Self:\n return self._with_callable(\n lambda expr: expr.std(ddof=ddof).to_series(),\n "std",\n scalar_kwargs={"ddof": ddof},\n )\n\n def var(self, ddof: int) -> Self:\n return self._with_callable(\n lambda expr: expr.var(ddof=ddof).to_series(),\n "var",\n scalar_kwargs={"ddof": ddof},\n )\n\n def skew(self) -> Self:\n return self._with_callable(lambda expr: expr.skew().to_series(), "skew")\n\n def kurtosis(self) -> Self:\n return self._with_callable(lambda expr: expr.kurtosis().to_series(), "kurtosis")\n\n def shift(self, n: int) -> Self:\n return self._with_callable(lambda expr: expr.shift(n), "shift")\n\n def cum_sum(self, *, reverse: bool) -> Self:\n if reverse: # pragma: no cover\n # https://github.com/dask/dask/issues/11802\n msg = "`cum_sum(reverse=True)` is not supported with Dask backend"\n raise NotImplementedError(msg)\n\n return self._with_callable(lambda expr: expr.cumsum(), "cum_sum")\n\n def cum_count(self, *, reverse: bool) -> Self:\n if reverse: # pragma: no cover\n msg = "`cum_count(reverse=True)` is not supported with Dask backend"\n raise NotImplementedError(msg)\n\n return self._with_callable(\n lambda expr: (~expr.isna()).astype(int).cumsum(), "cum_count"\n )\n\n def cum_min(self, *, reverse: bool) -> Self:\n if reverse: # pragma: no cover\n msg = "`cum_min(reverse=True)` is not supported with Dask backend"\n raise NotImplementedError(msg)\n\n return self._with_callable(lambda expr: expr.cummin(), "cum_min")\n\n def cum_max(self, *, reverse: bool) -> Self:\n if reverse: # pragma: no cover\n msg = "`cum_max(reverse=True)` is not supported with Dask backend"\n raise NotImplementedError(msg)\n\n return self._with_callable(lambda expr: expr.cummax(), "cum_max")\n\n def cum_prod(self, *, reverse: bool) -> Self:\n if reverse: # pragma: no cover\n msg = "`cum_prod(reverse=True)` is not supported with Dask backend"\n raise NotImplementedError(msg)\n\n return self._with_callable(lambda expr: expr.cumprod(), "cum_prod")\n\n def rolling_sum(self, window_size: int, *, min_samples: int, center: bool) -> Self:\n return self._with_callable(\n lambda expr: expr.rolling(\n window=window_size, min_periods=min_samples, center=center\n ).sum(),\n "rolling_sum",\n )\n\n def rolling_mean(self, window_size: int, *, min_samples: int, center: bool) -> Self:\n return self._with_callable(\n lambda expr: expr.rolling(\n window=window_size, min_periods=min_samples, center=center\n ).mean(),\n "rolling_mean",\n )\n\n def rolling_var(\n self, window_size: int, *, min_samples: int, center: bool, ddof: int\n ) -> Self:\n if ddof == 1:\n return self._with_callable(\n lambda expr: expr.rolling(\n window=window_size, min_periods=min_samples, center=center\n ).var(),\n "rolling_var",\n )\n else:\n msg = "Dask backend only supports `ddof=1` for `rolling_var`"\n raise NotImplementedError(msg)\n\n def rolling_std(\n self, window_size: int, *, min_samples: int, center: bool, ddof: int\n ) -> Self:\n if ddof == 1:\n return self._with_callable(\n lambda expr: expr.rolling(\n window=window_size, min_periods=min_samples, center=center\n ).std(),\n "rolling_std",\n )\n else:\n msg = "Dask backend only supports `ddof=1` for `rolling_std`"\n raise NotImplementedError(msg)\n\n def sum(self) -> Self:\n return self._with_callable(lambda expr: expr.sum().to_series(), "sum")\n\n def count(self) -> Self:\n return self._with_callable(lambda expr: expr.count().to_series(), "count")\n\n def round(self, decimals: int) -> Self:\n return self._with_callable(lambda expr: expr.round(decimals), "round")\n\n def unique(self) -> Self:\n return self._with_callable(lambda expr: expr.unique(), "unique")\n\n def drop_nulls(self) -> Self:\n return self._with_callable(lambda expr: expr.dropna(), "drop_nulls")\n\n def abs(self) -> Self:\n return self._with_callable(lambda expr: expr.abs(), "abs")\n\n def all(self) -> Self:\n return self._with_callable(\n lambda expr: expr.all(\n axis=None, skipna=True, split_every=False, out=None\n ).to_series(),\n "all",\n )\n\n def any(self) -> Self:\n return self._with_callable(\n lambda expr: expr.any(axis=0, skipna=True, split_every=False).to_series(),\n "any",\n )\n\n def fill_null(\n self,\n value: Self | NonNestedLiteral,\n strategy: FillNullStrategy | None,\n limit: int | None,\n ) -> Self:\n def func(expr: dx.Series) -> dx.Series:\n if value is not None:\n res_ser = expr.fillna(value)\n else:\n res_ser = (\n expr.ffill(limit=limit)\n if strategy == "forward"\n else expr.bfill(limit=limit)\n )\n return res_ser\n\n return self._with_callable(func, "fillna")\n\n def clip(\n self,\n lower_bound: Self | NumericLiteral | TemporalLiteral | None,\n upper_bound: Self | NumericLiteral | TemporalLiteral | None,\n ) -> Self:\n return self._with_callable(\n lambda expr, lower_bound, upper_bound: expr.clip(\n lower=lower_bound, upper=upper_bound\n ),\n "clip",\n lower_bound=lower_bound,\n upper_bound=upper_bound,\n )\n\n def diff(self) -> Self:\n return self._with_callable(lambda expr: expr.diff(), "diff")\n\n def n_unique(self) -> Self:\n return self._with_callable(\n lambda expr: expr.nunique(dropna=False).to_series(), "n_unique"\n )\n\n def is_null(self) -> Self:\n return self._with_callable(lambda expr: expr.isna(), "is_null")\n\n def is_nan(self) -> Self:\n def func(expr: dx.Series) -> dx.Series:\n dtype = native_to_narwhals_dtype(\n expr.dtype, self._version, self._implementation\n )\n if dtype.is_numeric():\n return expr != expr # pyright: ignore[reportReturnType] # noqa: PLR0124\n msg = f"`.is_nan` only supported for numeric dtypes and not {dtype}, did you mean `.is_null`?"\n raise InvalidOperationError(msg)\n\n return self._with_callable(func, "is_null")\n\n def len(self) -> Self:\n return self._with_callable(lambda expr: expr.size.to_series(), "len")\n\n def quantile(\n self, quantile: float, interpolation: RollingInterpolationMethod\n ) -> Self:\n if interpolation == "linear":\n\n def func(expr: dx.Series, quantile: float) -> dx.Series:\n if expr.npartitions > 1:\n msg = "`Expr.quantile` is not supported for Dask backend with multiple partitions."\n raise NotImplementedError(msg)\n return expr.quantile(\n q=quantile, method="dask"\n ).to_series() # pragma: no cover\n\n return self._with_callable(func, "quantile", quantile=quantile)\n else:\n msg = "`higher`, `lower`, `midpoint`, `nearest` - interpolation methods are not supported by Dask. Please use `linear` instead."\n raise NotImplementedError(msg)\n\n def is_first_distinct(self) -> Self:\n def func(expr: dx.Series) -> dx.Series:\n _name = expr.name\n col_token = generate_temporary_column_name(n_bytes=8, columns=[_name])\n frame = add_row_index(\n expr.to_frame(), col_token, self._backend_version, self._implementation\n )\n first_distinct_index = frame.groupby(_name).agg({col_token: "min"})[col_token]\n return frame[col_token].isin(first_distinct_index)\n\n return self._with_callable(func, "is_first_distinct")\n\n def is_last_distinct(self) -> Self:\n def func(expr: dx.Series) -> dx.Series:\n _name = expr.name\n col_token = generate_temporary_column_name(n_bytes=8, columns=[_name])\n frame = add_row_index(\n expr.to_frame(), col_token, self._backend_version, self._implementation\n )\n last_distinct_index = frame.groupby(_name).agg({col_token: "max"})[col_token]\n return frame[col_token].isin(last_distinct_index)\n\n return self._with_callable(func, "is_last_distinct")\n\n def is_unique(self) -> Self:\n def func(expr: dx.Series) -> dx.Series:\n _name = expr.name\n return (\n expr.to_frame()\n .groupby(_name, dropna=False)\n .transform("size", meta=(_name, int))\n == 1\n )\n\n return self._with_callable(func, "is_unique")\n\n def is_in(self, other: Any) -> Self:\n return self._with_callable(lambda expr: expr.isin(other), "is_in")\n\n def null_count(self) -> Self:\n return self._with_callable(\n lambda expr: expr.isna().sum().to_series(), "null_count"\n )\n\n def over(self, partition_by: Sequence[str], order_by: Sequence[str]) -> Self:\n # pandas is a required dependency of dask so it's safe to import this\n from narwhals._pandas_like.group_by import PandasLikeGroupBy\n\n if not partition_by:\n assert order_by # noqa: S101\n\n # This is something like `nw.col('a').cum_sum().order_by(key)`\n # which we can always easily support, as it doesn't require grouping.\n def func(df: DaskLazyFrame) -> Sequence[dx.Series]:\n return self(df.sort(*order_by, descending=False, nulls_last=False))\n elif not self._is_elementary(): # pragma: no cover\n msg = (\n "Only elementary expressions are supported for `.over` in dask.\n\n"\n "Please see: "\n "https://narwhals-dev.github.io/narwhals/concepts/improve_group_by_operation/"\n )\n raise NotImplementedError(msg)\n elif order_by:\n # Wrong results https://github.com/dask/dask/issues/11806.\n msg = "`over` with `order_by` is not yet supported in Dask."\n raise NotImplementedError(msg)\n else:\n function_name = PandasLikeGroupBy._leaf_name(self)\n try:\n dask_function_name = PandasLikeGroupBy._REMAP_AGGS[function_name]\n except KeyError:\n # window functions are unsupported: https://github.com/dask/dask/issues/11806\n msg = (\n f"Unsupported function: {function_name} in `over` context.\n\n"\n f"Supported functions are {', '.join(PandasLikeGroupBy._REMAP_AGGS)}\n"\n )\n raise NotImplementedError(msg) from None\n\n def func(df: DaskLazyFrame) -> Sequence[dx.Series]:\n output_names, aliases = evaluate_output_names_and_aliases(self, df, [])\n\n with warnings.catch_warnings():\n # https://github.com/dask/dask/issues/11804\n warnings.filterwarnings(\n "ignore",\n message=".*`meta` is not specified",\n category=UserWarning,\n )\n grouped = df.native.groupby(partition_by)\n if dask_function_name == "size":\n if len(output_names) != 1: # pragma: no cover\n msg = "Safety check failed, please report a bug."\n raise AssertionError(msg)\n res_native = grouped.transform(\n dask_function_name, **self._scalar_kwargs\n ).to_frame(output_names[0])\n else:\n res_native = grouped[list(output_names)].transform(\n dask_function_name, **self._scalar_kwargs\n )\n result_frame = df._with_native(\n res_native.rename(columns=dict(zip(output_names, aliases)))\n ).native\n return [result_frame[name] for name in aliases]\n\n return self.__class__(\n func,\n depth=self._depth + 1,\n function_name=self._function_name + "->over",\n evaluate_output_names=self._evaluate_output_names,\n alias_output_names=self._alias_output_names,\n backend_version=self._backend_version,\n version=self._version,\n )\n\n def cast(self, dtype: IntoDType) -> Self:\n def func(expr: dx.Series) -> dx.Series:\n native_dtype = narwhals_to_native_dtype(dtype, self._version)\n return expr.astype(native_dtype)\n\n return self._with_callable(func, "cast")\n\n def is_finite(self) -> Self:\n import dask.array as da\n\n return self._with_callable(da.isfinite, "is_finite")\n\n def log(self, base: float) -> Self:\n import dask.array as da\n\n def _log(expr: dx.Series) -> dx.Series:\n return da.log(expr) / da.log(base)\n\n return self._with_callable(_log, "log")\n\n def exp(self) -> Self:\n import dask.array as da\n\n return self._with_callable(da.exp, "exp")\n\n def sqrt(self) -> Self:\n import dask.array as da\n\n return self._with_callable(da.sqrt, "sqrt")\n\n @property\n def str(self) -> DaskExprStringNamespace:\n return DaskExprStringNamespace(self)\n\n @property\n def dt(self) -> DaskExprDateTimeNamespace:\n return DaskExprDateTimeNamespace(self)\n\n list = not_implemented() # type: ignore[assignment]\n struct = not_implemented() # type: ignore[assignment]\n rank = not_implemented() # pyright: ignore[reportAssignmentType]\n _alias_native = not_implemented()\n window_function = not_implemented() # pyright: ignore[reportAssignmentType]\n _from_elementwise_horizontal_op = not_implemented()\n _with_binary = not_implemented()\n
.venv\Lib\site-packages\narwhals\_dask\expr.py
expr.py
Python
25,598
0.95
0.187773
0.024476
python-kit
872
2025-05-19T19:46:07.224009
GPL-3.0
false
e3a92da9a36d83332949b0013a050d85
from __future__ import annotations\n\nfrom typing import TYPE_CHECKING\n\nfrom narwhals._compliant.any_namespace import DateTimeNamespace\nfrom narwhals._compliant.expr import LazyExprNamespace\nfrom narwhals._constants import MS_PER_SECOND, NS_PER_SECOND, US_PER_SECOND\nfrom narwhals._duration import parse_interval_string\nfrom narwhals._pandas_like.utils import (\n UNIT_DICT,\n calculate_timestamp_date,\n calculate_timestamp_datetime,\n native_to_narwhals_dtype,\n)\nfrom narwhals._utils import Implementation\n\nif TYPE_CHECKING:\n import dask.dataframe.dask_expr as dx\n\n from narwhals._dask.expr import DaskExpr\n from narwhals.typing import TimeUnit\n\n\nclass DaskExprDateTimeNamespace(\n LazyExprNamespace["DaskExpr"], DateTimeNamespace["DaskExpr"]\n):\n def date(self) -> DaskExpr:\n return self.compliant._with_callable(lambda expr: expr.dt.date, "date")\n\n def year(self) -> DaskExpr:\n return self.compliant._with_callable(lambda expr: expr.dt.year, "year")\n\n def month(self) -> DaskExpr:\n return self.compliant._with_callable(lambda expr: expr.dt.month, "month")\n\n def day(self) -> DaskExpr:\n return self.compliant._with_callable(lambda expr: expr.dt.day, "day")\n\n def hour(self) -> DaskExpr:\n return self.compliant._with_callable(lambda expr: expr.dt.hour, "hour")\n\n def minute(self) -> DaskExpr:\n return self.compliant._with_callable(lambda expr: expr.dt.minute, "minute")\n\n def second(self) -> DaskExpr:\n return self.compliant._with_callable(lambda expr: expr.dt.second, "second")\n\n def millisecond(self) -> DaskExpr:\n return self.compliant._with_callable(\n lambda expr: expr.dt.microsecond // 1000, "millisecond"\n )\n\n def microsecond(self) -> DaskExpr:\n return self.compliant._with_callable(\n lambda expr: expr.dt.microsecond, "microsecond"\n )\n\n def nanosecond(self) -> DaskExpr:\n return self.compliant._with_callable(\n lambda expr: expr.dt.microsecond * 1000 + expr.dt.nanosecond, "nanosecond"\n )\n\n def ordinal_day(self) -> DaskExpr:\n return self.compliant._with_callable(\n lambda expr: expr.dt.dayofyear, "ordinal_day"\n )\n\n def weekday(self) -> DaskExpr:\n return self.compliant._with_callable(\n lambda expr: expr.dt.weekday + 1, # Dask is 0-6\n "weekday",\n )\n\n def to_string(self, format: str) -> DaskExpr:\n return self.compliant._with_callable(\n lambda expr, format: expr.dt.strftime(format.replace("%.f", ".%f")),\n "strftime",\n format=format,\n )\n\n def replace_time_zone(self, time_zone: str | None) -> DaskExpr:\n return self.compliant._with_callable(\n lambda expr, time_zone: expr.dt.tz_localize(None).dt.tz_localize(time_zone)\n if time_zone is not None\n else expr.dt.tz_localize(None),\n "tz_localize",\n time_zone=time_zone,\n )\n\n def convert_time_zone(self, time_zone: str) -> DaskExpr:\n def func(s: dx.Series, time_zone: str) -> dx.Series:\n dtype = native_to_narwhals_dtype(\n s.dtype, self.compliant._version, Implementation.DASK\n )\n if dtype.time_zone is None: # type: ignore[attr-defined]\n return s.dt.tz_localize("UTC").dt.tz_convert(time_zone) # pyright: ignore[reportAttributeAccessIssue]\n else:\n return s.dt.tz_convert(time_zone) # pyright: ignore[reportAttributeAccessIssue]\n\n return self.compliant._with_callable(func, "tz_convert", time_zone=time_zone)\n\n def timestamp(self, time_unit: TimeUnit) -> DaskExpr:\n def func(s: dx.Series, time_unit: TimeUnit) -> dx.Series:\n dtype = native_to_narwhals_dtype(\n s.dtype, self.compliant._version, Implementation.DASK\n )\n is_pyarrow_dtype = "pyarrow" in str(dtype)\n mask_na = s.isna()\n dtypes = self.compliant._version.dtypes\n if dtype == dtypes.Date:\n # Date is only supported in pandas dtypes if pyarrow-backed\n s_cast = s.astype("Int32[pyarrow]")\n result = calculate_timestamp_date(s_cast, time_unit)\n elif isinstance(dtype, dtypes.Datetime):\n original_time_unit = dtype.time_unit\n s_cast = (\n s.astype("Int64[pyarrow]") if is_pyarrow_dtype else s.astype("int64")\n )\n result = calculate_timestamp_datetime(\n s_cast, original_time_unit, time_unit\n )\n else:\n msg = "Input should be either of Date or Datetime type"\n raise TypeError(msg)\n return result.where(~mask_na) # pyright: ignore[reportReturnType]\n\n return self.compliant._with_callable(func, "datetime", time_unit=time_unit)\n\n def total_minutes(self) -> DaskExpr:\n return self.compliant._with_callable(\n lambda expr: expr.dt.total_seconds() // 60, "total_minutes"\n )\n\n def total_seconds(self) -> DaskExpr:\n return self.compliant._with_callable(\n lambda expr: expr.dt.total_seconds() // 1, "total_seconds"\n )\n\n def total_milliseconds(self) -> DaskExpr:\n return self.compliant._with_callable(\n lambda expr: expr.dt.total_seconds() * MS_PER_SECOND // 1,\n "total_milliseconds",\n )\n\n def total_microseconds(self) -> DaskExpr:\n return self.compliant._with_callable(\n lambda expr: expr.dt.total_seconds() * US_PER_SECOND // 1,\n "total_microseconds",\n )\n\n def total_nanoseconds(self) -> DaskExpr:\n return self.compliant._with_callable(\n lambda expr: expr.dt.total_seconds() * NS_PER_SECOND // 1, "total_nanoseconds"\n )\n\n def truncate(self, every: str) -> DaskExpr:\n multiple, unit = parse_interval_string(every)\n if unit in {"mo", "q", "y"}:\n msg = f"Truncating to {unit} is not supported yet for dask."\n raise NotImplementedError(msg)\n freq = f"{multiple}{UNIT_DICT.get(unit, unit)}"\n return self.compliant._with_callable(lambda expr: expr.dt.floor(freq), "truncate")\n
.venv\Lib\site-packages\narwhals\_dask\expr_dt.py
expr_dt.py
Python
6,277
0.95
0.203704
0.007519
awesome-app
450
2023-09-28T21:06:03.864218
GPL-3.0
false
bcf8b55b0298b535c4a6e0d99a864cb1
from __future__ import annotations\n\nfrom typing import TYPE_CHECKING\n\nimport dask.dataframe as dd\n\nfrom narwhals._compliant.any_namespace import StringNamespace\nfrom narwhals._compliant.expr import LazyExprNamespace\nfrom narwhals._utils import not_implemented\n\nif TYPE_CHECKING:\n from narwhals._dask.expr import DaskExpr\n\n\nclass DaskExprStringNamespace(LazyExprNamespace["DaskExpr"], StringNamespace["DaskExpr"]):\n def len_chars(self) -> DaskExpr:\n return self.compliant._with_callable(lambda expr: expr.str.len(), "len")\n\n def replace(self, pattern: str, value: str, *, literal: bool, n: int) -> DaskExpr:\n return self.compliant._with_callable(\n lambda expr, pattern, value, literal, n: expr.str.replace(\n pattern, value, regex=not literal, n=n\n ),\n "replace",\n pattern=pattern,\n value=value,\n literal=literal,\n n=n,\n )\n\n def replace_all(self, pattern: str, value: str, *, literal: bool) -> DaskExpr:\n return self.compliant._with_callable(\n lambda expr, pattern, value, literal: expr.str.replace(\n pattern, value, n=-1, regex=not literal\n ),\n "replace",\n pattern=pattern,\n value=value,\n literal=literal,\n )\n\n def strip_chars(self, characters: str | None) -> DaskExpr:\n return self.compliant._with_callable(\n lambda expr, characters: expr.str.strip(characters),\n "strip",\n characters=characters,\n )\n\n def starts_with(self, prefix: str) -> DaskExpr:\n return self.compliant._with_callable(\n lambda expr, prefix: expr.str.startswith(prefix), "starts_with", prefix=prefix\n )\n\n def ends_with(self, suffix: str) -> DaskExpr:\n return self.compliant._with_callable(\n lambda expr, suffix: expr.str.endswith(suffix), "ends_with", suffix=suffix\n )\n\n def contains(self, pattern: str, *, literal: bool) -> DaskExpr:\n return self.compliant._with_callable(\n lambda expr, pattern, literal: expr.str.contains(\n pat=pattern, regex=not literal\n ),\n "contains",\n pattern=pattern,\n literal=literal,\n )\n\n def slice(self, offset: int, length: int | None) -> DaskExpr:\n return self.compliant._with_callable(\n lambda expr, offset, length: expr.str.slice(\n start=offset, stop=offset + length if length else None\n ),\n "slice",\n offset=offset,\n length=length,\n )\n\n def split(self, by: str) -> DaskExpr:\n return self.compliant._with_callable(\n lambda expr, by: expr.str.split(pat=by), "split", by=by\n )\n\n def to_datetime(self, format: str | None) -> DaskExpr:\n return self.compliant._with_callable(\n lambda expr, format: dd.to_datetime(expr, format=format),\n "to_datetime",\n format=format,\n )\n\n def to_uppercase(self) -> DaskExpr:\n return self.compliant._with_callable(\n lambda expr: expr.str.upper(), "to_uppercase"\n )\n\n def to_lowercase(self) -> DaskExpr:\n return self.compliant._with_callable(\n lambda expr: expr.str.lower(), "to_lowercase"\n )\n\n def zfill(self, width: int) -> DaskExpr:\n return self.compliant._with_callable(\n lambda expr, width: expr.str.zfill(width), "zfill", width=width\n )\n\n to_date = not_implemented()\n
.venv\Lib\site-packages\narwhals\_dask\expr_str.py
expr_str.py
Python
3,563
0.85
0.150943
0
python-kit
141
2024-05-16T17:09:14.355332
GPL-3.0
false
3fd233bd86d89a02f6cd12e459d3bbbc
from __future__ import annotations\n\nfrom functools import partial\nfrom typing import TYPE_CHECKING, Any, Callable, ClassVar\n\nimport dask.dataframe as dd\n\nfrom narwhals._compliant import DepthTrackingGroupBy\nfrom narwhals._expression_parsing import evaluate_output_names_and_aliases\n\nif TYPE_CHECKING:\n from collections.abc import Mapping, Sequence\n\n import pandas as pd\n from dask.dataframe.api import GroupBy as _DaskGroupBy\n from pandas.core.groupby import SeriesGroupBy as _PandasSeriesGroupBy\n from typing_extensions import TypeAlias\n\n from narwhals._compliant.group_by import NarwhalsAggregation\n from narwhals._dask.dataframe import DaskLazyFrame\n from narwhals._dask.expr import DaskExpr\n\n PandasSeriesGroupBy: TypeAlias = _PandasSeriesGroupBy[Any, Any]\n _AggFn: TypeAlias = Callable[..., Any]\n\nelse:\n try:\n import dask.dataframe.dask_expr as dx\n except ModuleNotFoundError: # pragma: no cover\n import dask_expr as dx\n _DaskGroupBy = dx._groupby.GroupBy\n\nAggregation: TypeAlias = "str | _AggFn"\n"""The name of an aggregation function, or the function itself."""\n\n\ndef n_unique() -> dd.Aggregation:\n def chunk(s: PandasSeriesGroupBy) -> pd.Series[Any]:\n return s.nunique(dropna=False)\n\n def agg(s0: PandasSeriesGroupBy) -> pd.Series[Any]:\n return s0.sum()\n\n return dd.Aggregation(name="nunique", chunk=chunk, agg=agg)\n\n\ndef var(ddof: int) -> _AggFn:\n return partial(_DaskGroupBy.var, ddof=ddof)\n\n\ndef std(ddof: int) -> _AggFn:\n return partial(_DaskGroupBy.std, ddof=ddof)\n\n\nclass DaskLazyGroupBy(DepthTrackingGroupBy["DaskLazyFrame", "DaskExpr", Aggregation]):\n _REMAP_AGGS: ClassVar[Mapping[NarwhalsAggregation, Aggregation]] = {\n "sum": "sum",\n "mean": "mean",\n "median": "median",\n "max": "max",\n "min": "min",\n "std": std,\n "var": var,\n "len": "size",\n "n_unique": n_unique,\n "count": "count",\n }\n\n def __init__(\n self,\n df: DaskLazyFrame,\n keys: Sequence[DaskExpr] | Sequence[str],\n /,\n *,\n drop_null_keys: bool,\n ) -> None:\n self._compliant_frame, self._keys, self._output_key_names = self._parse_keys(\n df, keys=keys\n )\n self._grouped = self.compliant.native.groupby(\n self._keys, dropna=drop_null_keys, observed=True\n )\n\n def agg(self, *exprs: DaskExpr) -> DaskLazyFrame:\n from narwhals._dask.dataframe import DaskLazyFrame\n\n if not exprs:\n # No aggregation provided\n return (\n self.compliant.simple_select(*self._keys)\n .unique(self._keys, keep="any")\n .rename(dict(zip(self._keys, self._output_key_names)))\n )\n\n self._ensure_all_simple(exprs)\n # This should be the fastpath, but cuDF is too far behind to use it.\n # - https://github.com/rapidsai/cudf/issues/15118\n # - https://github.com/rapidsai/cudf/issues/15084\n simple_aggregations: dict[str, tuple[str, Aggregation]] = {}\n exclude = (*self._keys, *self._output_key_names)\n for expr in exprs:\n output_names, aliases = evaluate_output_names_and_aliases(\n expr, self.compliant, exclude\n )\n if expr._depth == 0:\n # e.g. `agg(nw.len())`\n column = self._keys[0]\n agg_fn = self._remap_expr_name(expr._function_name)\n simple_aggregations.update(dict.fromkeys(aliases, (column, agg_fn)))\n continue\n\n # e.g. `agg(nw.mean('a'))`\n agg_fn = self._remap_expr_name(self._leaf_name(expr))\n # deal with n_unique case in a "lazy" mode to not depend on dask globally\n agg_fn = agg_fn(**expr._scalar_kwargs) if callable(agg_fn) else agg_fn\n simple_aggregations.update(\n (alias, (output_name, agg_fn))\n for alias, output_name in zip(aliases, output_names)\n )\n return DaskLazyFrame(\n self._grouped.agg(**simple_aggregations).reset_index(),\n backend_version=self.compliant._backend_version,\n version=self.compliant._version,\n ).rename(dict(zip(self._keys, self._output_key_names)))\n
.venv\Lib\site-packages\narwhals\_dask\group_by.py
group_by.py
Python
4,297
0.95
0.137097
0.08
vue-tools
749
2024-08-02T10:15:07.529146
MIT
false
f221b8e863271eacfa57f215be0169fc
from __future__ import annotations\n\nimport operator\nfrom functools import reduce\nfrom typing import TYPE_CHECKING, cast\n\nimport dask.dataframe as dd\nimport pandas as pd\n\nfrom narwhals._compliant import CompliantThen, CompliantWhen, LazyNamespace\nfrom narwhals._compliant.namespace import DepthTrackingNamespace\nfrom narwhals._dask.dataframe import DaskLazyFrame\nfrom narwhals._dask.expr import DaskExpr\nfrom narwhals._dask.selectors import DaskSelectorNamespace\nfrom narwhals._dask.utils import (\n align_series_full_broadcast,\n narwhals_to_native_dtype,\n validate_comparand,\n)\nfrom narwhals._expression_parsing import (\n ExprKind,\n combine_alias_output_names,\n combine_evaluate_output_names,\n)\nfrom narwhals._utils import Implementation\n\nif TYPE_CHECKING:\n from collections.abc import Iterable, Sequence\n\n import dask.dataframe.dask_expr as dx\n\n from narwhals._utils import Version\n from narwhals.typing import ConcatMethod, IntoDType, NonNestedLiteral\n\n\nclass DaskNamespace(\n LazyNamespace[DaskLazyFrame, DaskExpr, dd.DataFrame],\n DepthTrackingNamespace[DaskLazyFrame, DaskExpr],\n):\n _implementation: Implementation = Implementation.DASK\n\n @property\n def selectors(self) -> DaskSelectorNamespace:\n return DaskSelectorNamespace.from_namespace(self)\n\n @property\n def _expr(self) -> type[DaskExpr]:\n return DaskExpr\n\n @property\n def _lazyframe(self) -> type[DaskLazyFrame]:\n return DaskLazyFrame\n\n def __init__(self, *, backend_version: tuple[int, ...], version: Version) -> None:\n self._backend_version = backend_version\n self._version = version\n\n def lit(self, value: NonNestedLiteral, dtype: IntoDType | None) -> DaskExpr:\n def func(df: DaskLazyFrame) -> list[dx.Series]:\n if dtype is not None:\n native_dtype = narwhals_to_native_dtype(dtype, self._version)\n native_pd_series = pd.Series([value], dtype=native_dtype, name="literal")\n else:\n native_pd_series = pd.Series([value], name="literal")\n npartitions = df._native_frame.npartitions\n dask_series = dd.from_pandas(native_pd_series, npartitions=npartitions)\n return [dask_series[0].to_series()]\n\n return self._expr(\n func,\n depth=0,\n function_name="lit",\n evaluate_output_names=lambda _df: ["literal"],\n alias_output_names=None,\n backend_version=self._backend_version,\n version=self._version,\n )\n\n def len(self) -> DaskExpr:\n def func(df: DaskLazyFrame) -> list[dx.Series]:\n # We don't allow dataframes with 0 columns, so `[0]` is safe.\n return [df._native_frame[df.columns[0]].size.to_series()]\n\n return self._expr(\n func,\n depth=0,\n function_name="len",\n evaluate_output_names=lambda _df: ["len"],\n alias_output_names=None,\n backend_version=self._backend_version,\n version=self._version,\n )\n\n def all_horizontal(self, *exprs: DaskExpr, ignore_nulls: bool) -> DaskExpr:\n def func(df: DaskLazyFrame) -> list[dx.Series]:\n series = (s for _expr in exprs for s in _expr(df))\n # Note on `ignore_nulls`: Dask doesn't support storing arbitrary Python\n # objects in `object` dtype, so we don't need the same check we have for pandas-like.\n it = (\n (\n # NumPy-backed 'bool' dtype can't contain nulls so doesn't need filling.\n s if s.dtype == "bool" else s.fillna(True) # noqa: FBT003\n for s in series\n )\n if ignore_nulls\n else series\n )\n return [reduce(operator.and_, align_series_full_broadcast(df, *it))]\n\n return self._expr(\n call=func,\n depth=max(x._depth for x in exprs) + 1,\n function_name="all_horizontal",\n evaluate_output_names=combine_evaluate_output_names(*exprs),\n alias_output_names=combine_alias_output_names(*exprs),\n backend_version=self._backend_version,\n version=self._version,\n )\n\n def any_horizontal(self, *exprs: DaskExpr, ignore_nulls: bool) -> DaskExpr:\n def func(df: DaskLazyFrame) -> list[dx.Series]:\n series = (s for _expr in exprs for s in _expr(df))\n # Note on `ignore_nulls`: Dask doesn't support storing arbitrary Python\n # objects in `object` dtype, so we don't need the same check we have for pandas-like.\n it = (\n (\n # NumPy-backed 'bool' dtype can't contain nulls so doesn't need filling.\n s if s.dtype == "bool" else s.fillna(False) # noqa: FBT003\n for s in series\n )\n if ignore_nulls\n else series\n )\n return [reduce(operator.or_, align_series_full_broadcast(df, *it))]\n\n return self._expr(\n call=func,\n depth=max(x._depth for x in exprs) + 1,\n function_name="any_horizontal",\n evaluate_output_names=combine_evaluate_output_names(*exprs),\n alias_output_names=combine_alias_output_names(*exprs),\n backend_version=self._backend_version,\n version=self._version,\n )\n\n def sum_horizontal(self, *exprs: DaskExpr) -> DaskExpr:\n def func(df: DaskLazyFrame) -> list[dx.Series]:\n series = align_series_full_broadcast(\n df, *(s for _expr in exprs for s in _expr(df))\n )\n return [dd.concat(series, axis=1).sum(axis=1)]\n\n return self._expr(\n call=func,\n depth=max(x._depth for x in exprs) + 1,\n function_name="sum_horizontal",\n evaluate_output_names=combine_evaluate_output_names(*exprs),\n alias_output_names=combine_alias_output_names(*exprs),\n backend_version=self._backend_version,\n version=self._version,\n )\n\n def concat(\n self, items: Iterable[DaskLazyFrame], *, how: ConcatMethod\n ) -> DaskLazyFrame:\n if not items:\n msg = "No items to concatenate" # pragma: no cover\n raise AssertionError(msg)\n dfs = [i._native_frame for i in items]\n cols_0 = dfs[0].columns\n if how == "vertical":\n for i, df in enumerate(dfs[1:], start=1):\n cols_current = df.columns\n if not (\n (len(cols_current) == len(cols_0)) and (cols_current == cols_0).all()\n ):\n msg = (\n "unable to vstack, column names don't match:\n"\n f" - dataframe 0: {cols_0.to_list()}\n"\n f" - dataframe {i}: {cols_current.to_list()}\n"\n )\n raise TypeError(msg)\n return DaskLazyFrame(\n dd.concat(dfs, axis=0, join="inner"),\n backend_version=self._backend_version,\n version=self._version,\n )\n if how == "diagonal":\n return DaskLazyFrame(\n dd.concat(dfs, axis=0, join="outer"),\n backend_version=self._backend_version,\n version=self._version,\n )\n\n raise NotImplementedError\n\n def mean_horizontal(self, *exprs: DaskExpr) -> DaskExpr:\n def func(df: DaskLazyFrame) -> list[dx.Series]:\n expr_results = [s for _expr in exprs for s in _expr(df)]\n series = align_series_full_broadcast(df, *(s.fillna(0) for s in expr_results))\n non_na = align_series_full_broadcast(\n df, *(1 - s.isna() for s in expr_results)\n )\n num = reduce(lambda x, y: x + y, series) # pyright: ignore[reportOperatorIssue]\n den = reduce(lambda x, y: x + y, non_na) # pyright: ignore[reportOperatorIssue]\n return [cast("dx.Series", num / den)] # pyright: ignore[reportOperatorIssue]\n\n return self._expr(\n call=func,\n depth=max(x._depth for x in exprs) + 1,\n function_name="mean_horizontal",\n evaluate_output_names=combine_evaluate_output_names(*exprs),\n alias_output_names=combine_alias_output_names(*exprs),\n backend_version=self._backend_version,\n version=self._version,\n )\n\n def min_horizontal(self, *exprs: DaskExpr) -> DaskExpr:\n def func(df: DaskLazyFrame) -> list[dx.Series]:\n series = align_series_full_broadcast(\n df, *(s for _expr in exprs for s in _expr(df))\n )\n\n return [dd.concat(series, axis=1).min(axis=1)]\n\n return self._expr(\n call=func,\n depth=max(x._depth for x in exprs) + 1,\n function_name="min_horizontal",\n evaluate_output_names=combine_evaluate_output_names(*exprs),\n alias_output_names=combine_alias_output_names(*exprs),\n backend_version=self._backend_version,\n version=self._version,\n )\n\n def max_horizontal(self, *exprs: DaskExpr) -> DaskExpr:\n def func(df: DaskLazyFrame) -> list[dx.Series]:\n series = align_series_full_broadcast(\n df, *(s for _expr in exprs for s in _expr(df))\n )\n\n return [dd.concat(series, axis=1).max(axis=1)]\n\n return self._expr(\n call=func,\n depth=max(x._depth for x in exprs) + 1,\n function_name="max_horizontal",\n evaluate_output_names=combine_evaluate_output_names(*exprs),\n alias_output_names=combine_alias_output_names(*exprs),\n backend_version=self._backend_version,\n version=self._version,\n )\n\n def when(self, predicate: DaskExpr) -> DaskWhen:\n return DaskWhen.from_expr(predicate, context=self)\n\n def concat_str(\n self, *exprs: DaskExpr, separator: str, ignore_nulls: bool\n ) -> DaskExpr:\n def func(df: DaskLazyFrame) -> list[dx.Series]:\n expr_results = [s for _expr in exprs for s in _expr(df)]\n series = (\n s.astype(str) for s in align_series_full_broadcast(df, *expr_results)\n )\n null_mask = [s.isna() for s in align_series_full_broadcast(df, *expr_results)]\n\n if not ignore_nulls:\n null_mask_result = reduce(operator.or_, null_mask)\n result = reduce(lambda x, y: x + separator + y, series).where(\n ~null_mask_result, None\n )\n else:\n init_value, *values = [\n s.where(~nm, "") for s, nm in zip(series, null_mask)\n ]\n\n separators = (\n nm.map({True: "", False: separator}, meta=str)\n for nm in null_mask[:-1]\n )\n result = reduce(\n operator.add, (s + v for s, v in zip(separators, values)), init_value\n )\n\n return [result]\n\n return self._expr(\n call=func,\n depth=max(x._depth for x in exprs) + 1,\n function_name="concat_str",\n evaluate_output_names=getattr(\n exprs[0], "_evaluate_output_names", lambda _df: ["literal"]\n ),\n alias_output_names=getattr(exprs[0], "_alias_output_names", None),\n backend_version=self._backend_version,\n version=self._version,\n )\n\n\nclass DaskWhen(CompliantWhen[DaskLazyFrame, "dx.Series", DaskExpr]):\n @property\n def _then(self) -> type[DaskThen]:\n return DaskThen\n\n def __call__(self, df: DaskLazyFrame) -> Sequence[dx.Series]:\n then_value = (\n self._then_value(df)[0]\n if isinstance(self._then_value, DaskExpr)\n else self._then_value\n )\n otherwise_value = (\n self._otherwise_value(df)[0]\n if isinstance(self._otherwise_value, DaskExpr)\n else self._otherwise_value\n )\n\n condition = self._condition(df)[0]\n # re-evaluate DataFrame if the condition aggregates to force\n # then/otherwise to be evaluated against the aggregated frame\n assert self._condition._metadata is not None # noqa: S101\n if self._condition._metadata.is_scalar_like:\n new_df = df._with_native(condition.to_frame())\n condition = self._condition.broadcast(ExprKind.AGGREGATION)(df)[0]\n df = new_df\n\n if self._otherwise_value is None:\n (condition, then_series) = align_series_full_broadcast(\n df, condition, then_value\n )\n validate_comparand(condition, then_series)\n return [then_series.where(condition)] # pyright: ignore[reportArgumentType]\n (condition, then_series, otherwise_series) = align_series_full_broadcast(\n df, condition, then_value, otherwise_value\n )\n validate_comparand(condition, then_series)\n validate_comparand(condition, otherwise_series)\n return [then_series.where(condition, otherwise_series)] # pyright: ignore[reportArgumentType]\n\n\nclass DaskThen(CompliantThen[DaskLazyFrame, "dx.Series", DaskExpr], DaskExpr): ...\n
.venv\Lib\site-packages\narwhals\_dask\namespace.py
namespace.py
Python
13,387
0.95
0.232353
0.030508
vue-tools
294
2024-06-01T01:15:46.709847
GPL-3.0
false
d2ef1ccfd5aadb0adcc4de102e5376a1
from __future__ import annotations\n\nfrom typing import TYPE_CHECKING\n\nfrom narwhals._compliant import CompliantSelector, LazySelectorNamespace\nfrom narwhals._dask.expr import DaskExpr\n\nif TYPE_CHECKING:\n import dask.dataframe.dask_expr as dx # noqa: F401\n\n from narwhals._dask.dataframe import DaskLazyFrame # noqa: F401\n\n\nclass DaskSelectorNamespace(LazySelectorNamespace["DaskLazyFrame", "dx.Series"]):\n @property\n def _selector(self) -> type[DaskSelector]:\n return DaskSelector\n\n\nclass DaskSelector(CompliantSelector["DaskLazyFrame", "dx.Series"], DaskExpr): # type: ignore[misc]\n def _to_expr(self) -> DaskExpr:\n return DaskExpr(\n self._call,\n depth=self._depth,\n function_name=self._function_name,\n evaluate_output_names=self._evaluate_output_names,\n alias_output_names=self._alias_output_names,\n backend_version=self._backend_version,\n version=self._version,\n )\n
.venv\Lib\site-packages\narwhals\_dask\selectors.py
selectors.py
Python
984
0.95
0.166667
0
python-kit
797
2024-03-10T04:18:41.265022
Apache-2.0
false
a141b6035a3ec6b3cf7b4b783881190b
from __future__ import annotations\n\nfrom typing import TYPE_CHECKING, Any\n\nfrom narwhals._pandas_like.utils import select_columns_by_name\nfrom narwhals._utils import (\n Implementation,\n Version,\n isinstance_or_issubclass,\n parse_version,\n)\nfrom narwhals.dependencies import get_pandas, get_pyarrow\n\nif TYPE_CHECKING:\n from collections.abc import Sequence\n\n import dask.dataframe as dd\n import dask.dataframe.dask_expr as dx\n\n from narwhals._dask.dataframe import DaskLazyFrame, Incomplete\n from narwhals._dask.expr import DaskExpr\n from narwhals.typing import IntoDType\nelse:\n try:\n import dask.dataframe.dask_expr as dx\n except ModuleNotFoundError: # pragma: no cover\n import dask_expr as dx\n\n\ndef maybe_evaluate_expr(df: DaskLazyFrame, obj: DaskExpr | object) -> dx.Series | object:\n from narwhals._dask.expr import DaskExpr\n\n if isinstance(obj, DaskExpr):\n results = obj._call(df)\n assert len(results) == 1 # debug assertion # noqa: S101\n return results[0]\n return obj\n\n\ndef evaluate_exprs(df: DaskLazyFrame, /, *exprs: DaskExpr) -> list[tuple[str, dx.Series]]:\n native_results: list[tuple[str, dx.Series]] = []\n for expr in exprs:\n native_series_list = expr(df)\n aliases = expr._evaluate_aliases(df)\n if len(aliases) != len(native_series_list): # pragma: no cover\n msg = f"Internal error: got aliases {aliases}, but only got {len(native_series_list)} results"\n raise AssertionError(msg)\n native_results.extend(zip(aliases, native_series_list))\n return native_results\n\n\ndef align_series_full_broadcast(\n df: DaskLazyFrame, *series: dx.Series | object\n) -> Sequence[dx.Series]:\n return [\n s if isinstance(s, dx.Series) else df._native_frame.assign(_tmp=s)["_tmp"]\n for s in series\n ] # pyright: ignore[reportReturnType]\n\n\ndef add_row_index(\n frame: dd.DataFrame,\n name: str,\n backend_version: tuple[int, ...],\n implementation: Implementation,\n) -> dd.DataFrame:\n original_cols = frame.columns\n df: Incomplete = frame.assign(**{name: 1})\n return select_columns_by_name(\n df.assign(**{name: df[name].cumsum(method="blelloch") - 1}),\n [name, *original_cols],\n backend_version,\n implementation,\n )\n\n\ndef validate_comparand(lhs: dx.Series, rhs: dx.Series) -> None:\n if not dx.expr.are_co_aligned(lhs._expr, rhs._expr): # pragma: no cover\n # are_co_aligned is a method which cheaply checks if two Dask expressions\n # have the same index, and therefore don't require index alignment.\n # If someone only operates on a Dask DataFrame via expressions, then this\n # should always be the case: expression outputs (by definition) all come from the\n # same input dataframe, and Dask Series does not have any operations which\n # change the index. Nonetheless, we perform this safety check anyway.\n\n # However, we still need to carefully vet which methods we support for Dask, to\n # avoid issues where `are_co_aligned` doesn't do what we want it to do:\n # https://github.com/dask/dask-expr/issues/1112.\n msg = "Objects are not co-aligned, so this operation is not supported for Dask backend"\n raise RuntimeError(msg)\n\n\ndef narwhals_to_native_dtype(dtype: IntoDType, version: Version) -> Any: # noqa: C901, PLR0912\n dtypes = version.dtypes\n if isinstance_or_issubclass(dtype, dtypes.Float64):\n return "float64"\n if isinstance_or_issubclass(dtype, dtypes.Float32):\n return "float32"\n if isinstance_or_issubclass(dtype, dtypes.Int64):\n return "int64"\n if isinstance_or_issubclass(dtype, dtypes.Int32):\n return "int32"\n if isinstance_or_issubclass(dtype, dtypes.Int16):\n return "int16"\n if isinstance_or_issubclass(dtype, dtypes.Int8):\n return "int8"\n if isinstance_or_issubclass(dtype, dtypes.UInt64):\n return "uint64"\n if isinstance_or_issubclass(dtype, dtypes.UInt32):\n return "uint32"\n if isinstance_or_issubclass(dtype, dtypes.UInt16):\n return "uint16"\n if isinstance_or_issubclass(dtype, dtypes.UInt8):\n return "uint8"\n if isinstance_or_issubclass(dtype, dtypes.String):\n if (pd := get_pandas()) is not None and parse_version(pd) >= (2, 0, 0):\n if get_pyarrow() is not None:\n return "string[pyarrow]"\n return "string[python]" # pragma: no cover\n return "object" # pragma: no cover\n if isinstance_or_issubclass(dtype, dtypes.Boolean):\n return "bool"\n if isinstance_or_issubclass(dtype, dtypes.Enum):\n if version is Version.V1:\n msg = "Converting to Enum is not supported in narwhals.stable.v1"\n raise NotImplementedError(msg)\n if isinstance(dtype, dtypes.Enum):\n import pandas as pd\n\n # NOTE: `pandas-stubs.core.dtypes.dtypes.CategoricalDtype.categories` is too narrow\n # Should be one of the `ListLike*` types\n # https://github.com/pandas-dev/pandas-stubs/blob/8434bde95460b996323cc8c0fea7b0a8bb00ea26/pandas-stubs/_typing.pyi#L497-L505\n return pd.CategoricalDtype(dtype.categories, ordered=True) # pyright: ignore[reportArgumentType]\n msg = "Can not cast / initialize Enum without categories present"\n raise ValueError(msg)\n\n if isinstance_or_issubclass(dtype, dtypes.Categorical):\n return "category"\n if isinstance_or_issubclass(dtype, dtypes.Datetime):\n return "datetime64[us]"\n if isinstance_or_issubclass(dtype, dtypes.Date):\n return "date32[day][pyarrow]"\n if isinstance_or_issubclass(dtype, dtypes.Duration):\n return "timedelta64[ns]"\n if isinstance_or_issubclass(dtype, dtypes.List): # pragma: no cover\n msg = "Converting to List dtype is not supported yet"\n raise NotImplementedError(msg)\n if isinstance_or_issubclass(dtype, dtypes.Struct): # pragma: no cover\n msg = "Converting to Struct dtype is not supported yet"\n raise NotImplementedError(msg)\n if isinstance_or_issubclass(dtype, dtypes.Array): # pragma: no cover\n msg = "Converting to Array dtype is not supported yet"\n raise NotImplementedError(msg)\n if isinstance_or_issubclass(dtype, dtypes.Time): # pragma: no cover\n msg = "Converting to Time dtype is not supported yet"\n raise NotImplementedError(msg)\n if isinstance_or_issubclass(dtype, dtypes.Binary): # pragma: no cover\n msg = "Converting to Binary dtype is not supported yet"\n raise NotImplementedError(msg)\n\n msg = f"Unknown dtype: {dtype}" # pragma: no cover\n raise AssertionError(msg)\n
.venv\Lib\site-packages\narwhals\_dask\utils.py
utils.py
Python
6,716
0.95
0.265432
0.085714
python-kit
606
2025-03-10T22:58:28.181770
GPL-3.0
false
989dd0673221c97d69daa2fa544d3faa
\n\n
.venv\Lib\site-packages\narwhals\_dask\__pycache__\dataframe.cpython-313.pyc
dataframe.cpython-313.pyc
Other
23,214
0.95
0.007752
0.004292
python-kit
282
2024-04-04T19:02:14.916470
BSD-3-Clause
false
1c4b7ff8f2546955e1c9fb47c16023f3
\n\n
.venv\Lib\site-packages\narwhals\_dask\__pycache__\expr.cpython-313.pyc
expr.cpython-313.pyc
Other
39,660
0.95
0.020115
0
python-kit
0
2023-11-13T19:17:17.902956
BSD-3-Clause
false
5143ed400a882407690fe090a370e38a
\n\n
.venv\Lib\site-packages\narwhals\_dask\__pycache__\expr_dt.cpython-313.pyc
expr_dt.cpython-313.pyc
Other
13,192
0.8
0.012658
0
vue-tools
768
2024-02-11T16:53:53.594077
MIT
false
74094e1a8773d615b9a259276a3b0b18
\n\n
.venv\Lib\site-packages\narwhals\_dask\__pycache__\expr_str.cpython-313.pyc
expr_str.cpython-313.pyc
Other
7,450
0.8
0
0
vue-tools
57
2023-10-17T11:13:35.591195
Apache-2.0
false
11abf7ea04855d63bb8f35908a062a9c
\n\n
.venv\Lib\site-packages\narwhals\_dask\__pycache__\group_by.cpython-313.pyc
group_by.cpython-313.pyc
Other
6,046
0.95
0
0
node-utils
406
2025-05-05T08:37:21.846551
BSD-3-Clause
false
f806418b43e586b8f52f495832a876f0
\n\n
.venv\Lib\site-packages\narwhals\_dask\__pycache__\namespace.cpython-313.pyc
namespace.cpython-313.pyc
Other
19,760
0.95
0
0.016393
vue-tools
845
2025-01-20T11:49:01.454726
GPL-3.0
false
80339bfe7ec6f3eed811999024c889a9
\n\n
.venv\Lib\site-packages\narwhals\_dask\__pycache__\selectors.cpython-313.pyc
selectors.cpython-313.pyc
Other
1,855
0.95
0
0
node-utils
809
2024-06-01T18:38:32.612196
MIT
false
d16596ed093c9bb59967c5a4b2f96612
\n\n
.venv\Lib\site-packages\narwhals\_dask\__pycache__\utils.cpython-313.pyc
utils.cpython-313.pyc
Other
7,205
0.8
0.015873
0.016393
vue-tools
264
2025-02-03T18:06:47.962468
GPL-3.0
false
6a5fadca59b609a435ea7aa737fb1be2
\n\n
.venv\Lib\site-packages\narwhals\_dask\__pycache__\__init__.cpython-313.pyc
__init__.cpython-313.pyc
Other
189
0.7
0
0
python-kit
694
2024-10-10T22:41:38.834267
BSD-3-Clause
false
06ba734a8f645f855ff96a727b39c1e3
from __future__ import annotations\n\nfrom functools import reduce\nfrom operator import and_\nfrom typing import TYPE_CHECKING, Any\n\nimport duckdb\nfrom duckdb import StarExpression\n\nfrom narwhals._duckdb.utils import (\n DeferredTimeZone,\n F,\n col,\n evaluate_exprs,\n lit,\n native_to_narwhals_dtype,\n window_expression,\n)\nfrom narwhals._utils import (\n Implementation,\n Version,\n generate_temporary_column_name,\n not_implemented,\n parse_columns_to_drop,\n parse_version,\n requires,\n validate_backend_version,\n)\nfrom narwhals.dependencies import get_duckdb\nfrom narwhals.exceptions import InvalidOperationError\nfrom narwhals.typing import CompliantLazyFrame\n\nif TYPE_CHECKING:\n from collections.abc import Iterator, Mapping, Sequence\n from types import ModuleType\n\n import pandas as pd\n import pyarrow as pa\n from duckdb import Expression\n from duckdb.typing import DuckDBPyType\n from typing_extensions import Self, TypeIs\n\n from narwhals._compliant.typing import CompliantDataFrameAny\n from narwhals._duckdb.expr import DuckDBExpr\n from narwhals._duckdb.group_by import DuckDBGroupBy\n from narwhals._duckdb.namespace import DuckDBNamespace\n from narwhals._duckdb.series import DuckDBInterchangeSeries\n from narwhals._utils import _FullContext\n from narwhals.dataframe import LazyFrame\n from narwhals.dtypes import DType\n from narwhals.stable.v1 import DataFrame as DataFrameV1\n from narwhals.typing import AsofJoinStrategy, JoinStrategy, LazyUniqueKeepStrategy\n\n\nclass DuckDBLazyFrame(\n CompliantLazyFrame[\n "DuckDBExpr",\n "duckdb.DuckDBPyRelation",\n "LazyFrame[duckdb.DuckDBPyRelation] | DataFrameV1[duckdb.DuckDBPyRelation]",\n ]\n):\n _implementation = Implementation.DUCKDB\n\n def __init__(\n self,\n df: duckdb.DuckDBPyRelation,\n *,\n backend_version: tuple[int, ...],\n version: Version,\n ) -> None:\n self._native_frame: duckdb.DuckDBPyRelation = df\n self._version = version\n self._backend_version = backend_version\n self._cached_native_schema: dict[str, DuckDBPyType] | None = None\n self._cached_columns: list[str] | None = None\n validate_backend_version(self._implementation, self._backend_version)\n\n @staticmethod\n def _is_native(obj: duckdb.DuckDBPyRelation | Any) -> TypeIs[duckdb.DuckDBPyRelation]:\n return isinstance(obj, duckdb.DuckDBPyRelation)\n\n @classmethod\n def from_native(\n cls, data: duckdb.DuckDBPyRelation, /, *, context: _FullContext\n ) -> Self:\n return cls(\n data, backend_version=context._backend_version, version=context._version\n )\n\n def to_narwhals(\n self, *args: Any, **kwds: Any\n ) -> LazyFrame[duckdb.DuckDBPyRelation] | DataFrameV1[duckdb.DuckDBPyRelation]:\n if self._version is Version.MAIN:\n return self._version.lazyframe(self, level="lazy")\n\n from narwhals.stable.v1 import DataFrame as DataFrameV1\n\n return DataFrameV1(self, level="interchange") # type: ignore[no-any-return]\n\n def __narwhals_dataframe__(self) -> Self: # pragma: no cover\n # Keep around for backcompat.\n if self._version is not Version.V1:\n msg = "__narwhals_dataframe__ is not implemented for DuckDBLazyFrame"\n raise AttributeError(msg)\n return self\n\n def __narwhals_lazyframe__(self) -> Self:\n return self\n\n def __native_namespace__(self) -> ModuleType:\n return get_duckdb() # type: ignore[no-any-return]\n\n def __narwhals_namespace__(self) -> DuckDBNamespace:\n from narwhals._duckdb.namespace import DuckDBNamespace\n\n return DuckDBNamespace(\n backend_version=self._backend_version, version=self._version\n )\n\n def get_column(self, name: str) -> DuckDBInterchangeSeries:\n from narwhals._duckdb.series import DuckDBInterchangeSeries\n\n return DuckDBInterchangeSeries(self.native.select(name), version=self._version)\n\n def _iter_columns(self) -> Iterator[Expression]:\n for name in self.columns:\n yield col(name)\n\n def collect(\n self, backend: ModuleType | Implementation | str | None, **kwargs: Any\n ) -> CompliantDataFrameAny:\n if backend is None or backend is Implementation.PYARROW:\n import pyarrow as pa # ignore-banned-import\n\n from narwhals._arrow.dataframe import ArrowDataFrame\n\n return ArrowDataFrame(\n self.native.arrow(),\n backend_version=parse_version(pa),\n version=self._version,\n validate_column_names=True,\n )\n\n if backend is Implementation.PANDAS:\n import pandas as pd # ignore-banned-import\n\n from narwhals._pandas_like.dataframe import PandasLikeDataFrame\n\n return PandasLikeDataFrame(\n self.native.df(),\n implementation=Implementation.PANDAS,\n backend_version=parse_version(pd),\n version=self._version,\n validate_column_names=True,\n )\n\n if backend is Implementation.POLARS:\n import polars as pl # ignore-banned-import\n\n from narwhals._polars.dataframe import PolarsDataFrame\n\n return PolarsDataFrame(\n self.native.pl(), backend_version=parse_version(pl), version=self._version\n )\n\n msg = f"Unsupported `backend` value: {backend}" # pragma: no cover\n raise ValueError(msg) # pragma: no cover\n\n def head(self, n: int) -> Self:\n return self._with_native(self.native.limit(n))\n\n def simple_select(self, *column_names: str) -> Self:\n return self._with_native(self.native.select(*column_names))\n\n def aggregate(self, *exprs: DuckDBExpr) -> Self:\n selection = [val.alias(name) for name, val in evaluate_exprs(self, *exprs)]\n return self._with_native(self.native.aggregate(selection)) # type: ignore[arg-type]\n\n def select(self, *exprs: DuckDBExpr) -> Self:\n selection = (val.alias(name) for name, val in evaluate_exprs(self, *exprs))\n return self._with_native(self.native.select(*selection))\n\n def drop(self, columns: Sequence[str], *, strict: bool) -> Self:\n columns_to_drop = parse_columns_to_drop(self, columns, strict=strict)\n selection = (name for name in self.columns if name not in columns_to_drop)\n return self._with_native(self.native.select(*selection))\n\n def lazy(self, *, backend: Implementation | None = None) -> Self:\n # The `backend`` argument has no effect but we keep it here for\n # backwards compatibility because in `narwhals.stable.v1`\n # function `.from_native()` will return a DataFrame for DuckDB.\n\n if backend is not None: # pragma: no cover\n msg = "`backend` argument is not supported for DuckDB"\n raise ValueError(msg)\n return self\n\n def with_columns(self, *exprs: DuckDBExpr) -> Self:\n new_columns_map = dict(evaluate_exprs(self, *exprs))\n result = [\n new_columns_map.pop(name).alias(name)\n if name in new_columns_map\n else col(name)\n for name in self.columns\n ]\n result.extend(value.alias(name) for name, value in new_columns_map.items())\n return self._with_native(self.native.select(*result))\n\n def filter(self, predicate: DuckDBExpr) -> Self:\n # `[0]` is safe as the predicate's expression only returns a single column\n mask = predicate(self)[0]\n return self._with_native(self.native.filter(mask))\n\n @property\n def schema(self) -> dict[str, DType]:\n if self._cached_native_schema is None:\n # Note: prefer `self._cached_native_schema` over `functools.cached_property`\n # due to Python3.13 failures.\n self._cached_native_schema = dict(zip(self.columns, self.native.types))\n\n deferred_time_zone = DeferredTimeZone(self.native)\n return {\n column_name: native_to_narwhals_dtype(\n duckdb_dtype, self._version, deferred_time_zone\n )\n for column_name, duckdb_dtype in zip(self.native.columns, self.native.types)\n }\n\n @property\n def columns(self) -> list[str]:\n if self._cached_columns is None:\n self._cached_columns = (\n list(self.schema)\n if self._cached_native_schema is not None\n else self.native.columns\n )\n return self._cached_columns\n\n def to_pandas(self) -> pd.DataFrame:\n # only if version is v1, keep around for backcompat\n import pandas as pd # ignore-banned-import()\n\n if parse_version(pd) >= (1, 0, 0):\n return self.native.df()\n else: # pragma: no cover\n msg = f"Conversion to pandas requires 'pandas>=1.0.0', found {pd.__version__}"\n raise NotImplementedError(msg)\n\n def to_arrow(self) -> pa.Table:\n # only if version is v1, keep around for backcompat\n return self.native.arrow()\n\n def _with_version(self, version: Version) -> Self:\n return self.__class__(\n self.native, version=version, backend_version=self._backend_version\n )\n\n def _with_native(self, df: duckdb.DuckDBPyRelation) -> Self:\n return self.__class__(\n df, backend_version=self._backend_version, version=self._version\n )\n\n def group_by(\n self, keys: Sequence[str] | Sequence[DuckDBExpr], *, drop_null_keys: bool\n ) -> DuckDBGroupBy:\n from narwhals._duckdb.group_by import DuckDBGroupBy\n\n return DuckDBGroupBy(self, keys, drop_null_keys=drop_null_keys)\n\n def rename(self, mapping: Mapping[str, str]) -> Self:\n df = self.native\n selection = (\n col(name).alias(mapping[name]) if name in mapping else col(name)\n for name in df.columns\n )\n return self._with_native(self.native.select(*selection))\n\n def join(\n self,\n other: Self,\n *,\n how: JoinStrategy,\n left_on: Sequence[str] | None,\n right_on: Sequence[str] | None,\n suffix: str,\n ) -> Self:\n native_how = "outer" if how == "full" else how\n\n if native_how == "cross":\n if self._backend_version < (1, 1, 4):\n msg = f"'duckdb>=1.1.4' is required for cross-join, found version: {self._backend_version}"\n raise NotImplementedError(msg)\n rel = self.native.set_alias("lhs").cross(other.native.set_alias("rhs"))\n else:\n # help mypy\n assert left_on is not None # noqa: S101\n assert right_on is not None # noqa: S101\n it = (\n col(f'lhs."{left}"') == col(f'rhs."{right}"')\n for left, right in zip(left_on, right_on)\n )\n condition: Expression = reduce(and_, it)\n rel = self.native.set_alias("lhs").join(\n other.native.set_alias("rhs"),\n # NOTE: Fixed in `--pre` https://github.com/duckdb/duckdb/pull/16933\n condition=condition, # type: ignore[arg-type, unused-ignore]\n how=native_how,\n )\n\n if native_how in {"inner", "left", "cross", "outer"}:\n select = [col(f'lhs."{x}"') for x in self.columns]\n for name in other.columns:\n col_in_lhs: bool = name in self.columns\n if native_how == "outer" and not col_in_lhs:\n select.append(col(f'rhs."{name}"'))\n elif (native_how == "outer") or (\n col_in_lhs and (right_on is None or name not in right_on)\n ):\n select.append(col(f'rhs."{name}"').alias(f"{name}{suffix}"))\n elif right_on is None or name not in right_on:\n select.append(col(name))\n res = rel.select(*select).set_alias(self.native.alias)\n else: # semi, anti\n res = rel.select("lhs.*").set_alias(self.native.alias)\n\n return self._with_native(res)\n\n def join_asof(\n self,\n other: Self,\n *,\n left_on: str,\n right_on: str,\n by_left: Sequence[str] | None,\n by_right: Sequence[str] | None,\n strategy: AsofJoinStrategy,\n suffix: str,\n ) -> Self:\n lhs = self.native\n rhs = other.native\n conditions: list[Expression] = []\n if by_left is not None and by_right is not None:\n conditions.extend(\n col(f'lhs."{left}"') == col(f'rhs."{right}"')\n for left, right in zip(by_left, by_right)\n )\n else:\n by_left = by_right = []\n if strategy == "backward":\n conditions.append(col(f'lhs."{left_on}"') >= col(f'rhs."{right_on}"'))\n elif strategy == "forward":\n conditions.append(col(f'lhs."{left_on}"') <= col(f'rhs."{right_on}"'))\n else:\n msg = "Only 'backward' and 'forward' strategies are currently supported for DuckDB"\n raise NotImplementedError(msg)\n condition: Expression = reduce(and_, conditions)\n select = ["lhs.*"]\n for name in rhs.columns:\n if name in lhs.columns and (\n right_on is None or name not in {right_on, *by_right}\n ):\n select.append(f'rhs."{name}" as "{name}{suffix}"')\n elif right_on is None or name not in {right_on, *by_right}:\n select.append(str(col(name)))\n # Replace with Python API call once\n # https://github.com/duckdb/duckdb/discussions/16947 is addressed.\n query = f"""\n SELECT {",".join(select)}\n FROM lhs\n ASOF LEFT JOIN rhs\n ON {condition}\n """ # noqa: S608\n return self._with_native(duckdb.sql(query))\n\n def collect_schema(self) -> dict[str, DType]:\n return self.schema\n\n def unique(\n self, subset: Sequence[str] | None, *, keep: LazyUniqueKeepStrategy\n ) -> Self:\n if subset_ := subset if keep == "any" else (subset or self.columns):\n # Sanitise input\n if error := self._check_columns_exist(subset_):\n raise error\n idx_name = generate_temporary_column_name(8, self.columns)\n count_name = generate_temporary_column_name(8, [*self.columns, idx_name])\n name = count_name if keep == "none" else idx_name\n idx_expr = window_expression(F("row_number"), subset_).alias(idx_name)\n count_expr = window_expression(\n F("count", StarExpression()), subset_, ()\n ).alias(count_name)\n return self._with_native(\n self.native.select(StarExpression(), idx_expr, count_expr)\n .filter(col(name) == lit(1))\n .select(StarExpression(exclude=[count_name, idx_name]))\n )\n return self._with_native(self.native.unique(", ".join(self.columns)))\n\n def sort(self, *by: str, descending: bool | Sequence[bool], nulls_last: bool) -> Self:\n if isinstance(descending, bool):\n descending = [descending] * len(by)\n if nulls_last:\n it = (\n col(name).nulls_last() if not desc else col(name).desc().nulls_last()\n for name, desc in zip(by, descending)\n )\n else:\n it = (\n col(name).nulls_first() if not desc else col(name).desc().nulls_first()\n for name, desc in zip(by, descending)\n )\n return self._with_native(self.native.sort(*it))\n\n def drop_nulls(self, subset: Sequence[str] | None) -> Self:\n subset_ = subset if subset is not None else self.columns\n keep_condition = reduce(and_, (col(name).isnotnull() for name in subset_))\n return self._with_native(self.native.filter(keep_condition))\n\n def explode(self, columns: Sequence[str]) -> Self:\n dtypes = self._version.dtypes\n schema = self.collect_schema()\n for name in columns:\n dtype = schema[name]\n if dtype != dtypes.List:\n msg = (\n f"`explode` operation not supported for dtype `{dtype}`, "\n "expected List type"\n )\n raise InvalidOperationError(msg)\n\n if len(columns) != 1:\n msg = (\n "Exploding on multiple columns is not supported with DuckDB backend since "\n "we cannot guarantee that the exploded columns have matching element counts."\n )\n raise NotImplementedError(msg)\n\n col_to_explode = col(columns[0])\n rel = self.native\n original_columns = self.columns\n\n not_null_condition = col_to_explode.isnotnull() & F("len", col_to_explode) > lit(\n 0\n )\n non_null_rel = rel.filter(not_null_condition).select(\n *(\n F("unnest", col_to_explode).alias(name) if name in columns else name\n for name in original_columns\n )\n )\n\n null_rel = rel.filter(~not_null_condition).select(\n *(\n lit(None).alias(name) if name in columns else name\n for name in original_columns\n )\n )\n\n return self._with_native(non_null_rel.union(null_rel))\n\n def unpivot(\n self,\n on: Sequence[str] | None,\n index: Sequence[str] | None,\n variable_name: str,\n value_name: str,\n ) -> Self:\n index_ = [] if index is None else index\n on_ = [c for c in self.columns if c not in index_] if on is None else on\n\n if variable_name == "":\n msg = "`variable_name` cannot be empty string for duckdb backend."\n raise NotImplementedError(msg)\n\n if value_name == "":\n msg = "`value_name` cannot be empty string for duckdb backend."\n raise NotImplementedError(msg)\n\n unpivot_on = ", ".join(str(col(name)) for name in on_)\n rel = self.native # noqa: F841\n # Replace with Python API once\n # https://github.com/duckdb/duckdb/discussions/16980 is addressed.\n query = f"""\n unpivot rel\n on {unpivot_on}\n into\n name "{variable_name}"\n value "{value_name}"\n """\n return self._with_native(\n duckdb.sql(query).select(*[*index_, variable_name, value_name])\n )\n\n @requires.backend_version((1, 3))\n def with_row_index(self, name: str, order_by: Sequence[str]) -> Self:\n expr = (window_expression(F("row_number"), order_by=order_by) - lit(1)).alias(\n name\n )\n return self._with_native(self.native.select(expr, StarExpression()))\n\n gather_every = not_implemented.deprecated(\n "`LazyFrame.gather_every` is deprecated and will be removed in a future version."\n )\n tail = not_implemented.deprecated(\n "`LazyFrame.tail` is deprecated and will be removed in a future version."\n )\n
.venv\Lib\site-packages\narwhals\_duckdb\dataframe.py
dataframe.py
Python
19,153
0.95
0.22332
0.048499
python-kit
46
2024-08-15T05:20:15.058779
GPL-3.0
false
11a2f6f523c175e7f5af062c0abd5a9e
from __future__ import annotations\n\nimport operator\nfrom typing import TYPE_CHECKING, Any, Callable, Literal, cast\n\nfrom duckdb import CoalesceOperator, StarExpression\nfrom duckdb.typing import DuckDBPyType\n\nfrom narwhals._compliant import LazyExpr\nfrom narwhals._compliant.window import WindowInputs\nfrom narwhals._duckdb.expr_dt import DuckDBExprDateTimeNamespace\nfrom narwhals._duckdb.expr_list import DuckDBExprListNamespace\nfrom narwhals._duckdb.expr_str import DuckDBExprStringNamespace\nfrom narwhals._duckdb.expr_struct import DuckDBExprStructNamespace\nfrom narwhals._duckdb.utils import (\n F,\n col,\n lit,\n narwhals_to_native_dtype,\n when,\n window_expression,\n)\nfrom narwhals._expression_parsing import (\n ExprKind,\n combine_alias_output_names,\n combine_evaluate_output_names,\n)\nfrom narwhals._utils import Implementation, not_implemented, requires\n\nif TYPE_CHECKING:\n from collections.abc import Iterable, Sequence\n\n from duckdb import Expression\n from typing_extensions import Self\n\n from narwhals._compliant.typing import (\n AliasNames,\n EvalNames,\n EvalSeries,\n WindowFunction,\n )\n from narwhals._duckdb.dataframe import DuckDBLazyFrame\n from narwhals._duckdb.namespace import DuckDBNamespace\n from narwhals._duckdb.typing import WindowExpressionKwargs\n from narwhals._expression_parsing import ExprMetadata\n from narwhals._utils import Version, _FullContext\n from narwhals.typing import (\n FillNullStrategy,\n IntoDType,\n NonNestedLiteral,\n NumericLiteral,\n RankMethod,\n RollingInterpolationMethod,\n TemporalLiteral,\n )\n\n DuckDBWindowFunction = WindowFunction[DuckDBLazyFrame, Expression]\n DuckDBWindowInputs = WindowInputs[Expression]\n\n\nclass DuckDBExpr(LazyExpr["DuckDBLazyFrame", "Expression"]):\n _implementation = Implementation.DUCKDB\n\n def __init__(\n self,\n call: EvalSeries[DuckDBLazyFrame, Expression],\n window_function: DuckDBWindowFunction | None = None,\n *,\n evaluate_output_names: EvalNames[DuckDBLazyFrame],\n alias_output_names: AliasNames | None,\n backend_version: tuple[int, ...],\n version: Version,\n ) -> None:\n self._call = call\n self._evaluate_output_names = evaluate_output_names\n self._alias_output_names = alias_output_names\n self._backend_version = backend_version\n self._version = version\n self._metadata: ExprMetadata | None = None\n self._window_function: DuckDBWindowFunction | None = window_function\n\n @property\n def window_function(self) -> DuckDBWindowFunction:\n def default_window_func(\n df: DuckDBLazyFrame, inputs: DuckDBWindowInputs\n ) -> list[Expression]:\n assert not inputs.order_by # noqa: S101\n return [\n window_expression(expr, inputs.partition_by, inputs.order_by)\n for expr in self(df)\n ]\n\n return self._window_function or default_window_func\n\n def __call__(self, df: DuckDBLazyFrame) -> Sequence[Expression]:\n return self._call(df)\n\n def __narwhals_expr__(self) -> None: ...\n\n def __narwhals_namespace__(self) -> DuckDBNamespace: # pragma: no cover\n # Unused, just for compatibility with PandasLikeExpr\n from narwhals._duckdb.namespace import DuckDBNamespace\n\n return DuckDBNamespace(\n backend_version=self._backend_version, version=self._version\n )\n\n def _cum_window_func(\n self,\n func_name: Literal["sum", "max", "min", "count", "product"],\n *,\n reverse: bool,\n ) -> DuckDBWindowFunction:\n def func(df: DuckDBLazyFrame, inputs: DuckDBWindowInputs) -> list[Expression]:\n return [\n window_expression(\n F(func_name, expr),\n inputs.partition_by,\n inputs.order_by,\n descending=reverse,\n nulls_last=reverse,\n rows_start="unbounded preceding",\n rows_end="current row",\n )\n for expr in self(df)\n ]\n\n return func\n\n def _rolling_window_func(\n self,\n func_name: Literal["sum", "mean", "std", "var"],\n window_size: int,\n min_samples: int,\n ddof: int | None = None,\n *,\n center: bool,\n ) -> DuckDBWindowFunction:\n supported_funcs = ["sum", "mean", "std", "var"]\n if center:\n half = (window_size - 1) // 2\n remainder = (window_size - 1) % 2\n start = f"{half + remainder} preceding"\n end = f"{half} following"\n else:\n start = f"{window_size - 1} preceding"\n end = "current row"\n\n def func(df: DuckDBLazyFrame, inputs: DuckDBWindowInputs) -> list[Expression]:\n if func_name in {"sum", "mean"}:\n func_: str = func_name\n elif func_name == "var" and ddof == 0:\n func_ = "var_pop"\n elif func_name in "var" and ddof == 1:\n func_ = "var_samp"\n elif func_name == "std" and ddof == 0:\n func_ = "stddev_pop"\n elif func_name == "std" and ddof == 1:\n func_ = "stddev_samp"\n elif func_name in {"var", "std"}: # pragma: no cover\n msg = f"Only ddof=0 and ddof=1 are currently supported for rolling_{func_name}."\n raise ValueError(msg)\n else: # pragma: no cover\n msg = f"Only the following functions are supported: {supported_funcs}.\nGot: {func_name}."\n raise ValueError(msg)\n window_kwargs: WindowExpressionKwargs = {\n "partition_by": inputs.partition_by,\n "order_by": inputs.order_by,\n "rows_start": start,\n "rows_end": end,\n }\n return [\n when(\n window_expression(F("count", expr), **window_kwargs)\n >= lit(min_samples),\n window_expression(F(func_, expr), **window_kwargs),\n )\n for expr in self(df)\n ]\n\n return func\n\n def broadcast(self, kind: Literal[ExprKind.AGGREGATION, ExprKind.LITERAL]) -> Self:\n if kind is ExprKind.LITERAL:\n return self\n if self._backend_version < (1, 3):\n msg = "At least version 1.3 of DuckDB is required for binary operations between aggregates and columns."\n raise NotImplementedError(msg)\n return self.over([lit(1)], [])\n\n @classmethod\n def from_column_names(\n cls,\n evaluate_column_names: EvalNames[DuckDBLazyFrame],\n /,\n *,\n context: _FullContext,\n ) -> Self:\n def func(df: DuckDBLazyFrame) -> list[Expression]:\n return [col(name) for name in evaluate_column_names(df)]\n\n return cls(\n func,\n evaluate_output_names=evaluate_column_names,\n alias_output_names=None,\n backend_version=context._backend_version,\n version=context._version,\n )\n\n @classmethod\n def from_column_indices(cls, *column_indices: int, context: _FullContext) -> Self:\n def func(df: DuckDBLazyFrame) -> list[Expression]:\n columns = df.columns\n return [col(columns[i]) for i in column_indices]\n\n return cls(\n func,\n evaluate_output_names=cls._eval_names_indices(column_indices),\n alias_output_names=None,\n backend_version=context._backend_version,\n version=context._version,\n )\n\n @classmethod\n def _from_elementwise_horizontal_op(\n cls, func: Callable[[Iterable[Expression]], Expression], *exprs: Self\n ) -> Self:\n def call(df: DuckDBLazyFrame) -> list[Expression]:\n cols = (col for _expr in exprs for col in _expr(df))\n return [func(cols)]\n\n def window_function(\n df: DuckDBLazyFrame, window_inputs: DuckDBWindowInputs\n ) -> list[Expression]:\n cols = (\n col for _expr in exprs for col in _expr.window_function(df, window_inputs)\n )\n return [func(cols)]\n\n context = exprs[0]\n return cls(\n call=call,\n window_function=window_function,\n evaluate_output_names=combine_evaluate_output_names(*exprs),\n alias_output_names=combine_alias_output_names(*exprs),\n backend_version=context._backend_version,\n version=context._version,\n )\n\n def _callable_to_eval_series(\n self, call: Callable[..., Expression], /, **expressifiable_args: Self | Any\n ) -> EvalSeries[DuckDBLazyFrame, Expression]:\n def func(df: DuckDBLazyFrame) -> list[Expression]:\n native_series_list = self(df)\n other_native_series = {\n key: df._evaluate_expr(value) if self._is_expr(value) else lit(value)\n for key, value in expressifiable_args.items()\n }\n return [\n call(native_series, **other_native_series)\n for native_series in native_series_list\n ]\n\n return func\n\n def _push_down_window_function(\n self, call: Callable[..., Expression], /, **expressifiable_args: Self | Any\n ) -> DuckDBWindowFunction:\n def window_f(df: DuckDBLazyFrame, inputs: DuckDBWindowInputs) -> list[Expression]:\n # If a function `f` is elementwise, and `g` is another function, then\n # - `f(g) over (window)`\n # - `f(g over (window))\n # are equivalent.\n # Make sure to only use with if `call` is elementwise!\n native_series_list = self.window_function(df, inputs)\n other_native_series = {\n key: df._evaluate_window_expr(value, inputs)\n if self._is_expr(value)\n else lit(value)\n for key, value in expressifiable_args.items()\n }\n return [\n call(native_series, **other_native_series)\n for native_series in native_series_list\n ]\n\n return window_f\n\n def _with_callable(\n self, call: Callable[..., Expression], /, **expressifiable_args: Self | Any\n ) -> Self:\n """Create expression from callable.\n\n Arguments:\n call: Callable from compliant DataFrame to native Expression\n expr_name: Expression name\n expressifiable_args: arguments pass to expression which should be parsed\n as expressions (e.g. in `nw.col('a').is_between('b', 'c')`)\n """\n return self.__class__(\n self._callable_to_eval_series(call, **expressifiable_args),\n evaluate_output_names=self._evaluate_output_names,\n alias_output_names=self._alias_output_names,\n backend_version=self._backend_version,\n version=self._version,\n )\n\n def _with_elementwise(\n self, call: Callable[..., Expression], /, **expressifiable_args: Self | Any\n ) -> Self:\n return self.__class__(\n self._callable_to_eval_series(call, **expressifiable_args),\n self._push_down_window_function(call, **expressifiable_args),\n evaluate_output_names=self._evaluate_output_names,\n alias_output_names=self._alias_output_names,\n backend_version=self._backend_version,\n version=self._version,\n )\n\n def _with_binary(self, op: Callable[..., Expression], other: Self | Any) -> Self:\n return self.__class__(\n self._callable_to_eval_series(op, other=other),\n self._push_down_window_function(op, other=other),\n evaluate_output_names=self._evaluate_output_names,\n alias_output_names=self._alias_output_names,\n backend_version=self._backend_version,\n version=self._version,\n )\n\n def _with_alias_output_names(self, func: AliasNames | None, /) -> Self:\n return type(self)(\n self._call,\n self._window_function,\n evaluate_output_names=self._evaluate_output_names,\n alias_output_names=func,\n backend_version=self._backend_version,\n version=self._version,\n )\n\n def _with_window_function(self, window_function: DuckDBWindowFunction) -> Self:\n return self.__class__(\n self._call,\n window_function,\n evaluate_output_names=self._evaluate_output_names,\n alias_output_names=self._alias_output_names,\n backend_version=self._backend_version,\n version=self._version,\n )\n\n @classmethod\n def _alias_native(cls, expr: Expression, name: str) -> Expression:\n return expr.alias(name)\n\n def __invert__(self) -> Self:\n invert = cast("Callable[..., Expression]", operator.invert)\n return self._with_elementwise(invert)\n\n def abs(self) -> Self:\n return self._with_elementwise(lambda expr: F("abs", expr))\n\n def mean(self) -> Self:\n return self._with_callable(lambda expr: F("mean", expr))\n\n def skew(self) -> Self:\n def func(expr: Expression) -> Expression:\n count = F("count", expr)\n # Adjust population skewness by correction factor to get sample skewness\n sample_skewness = (\n F("skewness", expr)\n * (count - lit(2))\n / F("sqrt", count * (count - lit(1)))\n )\n return when(count == lit(0), lit(None)).otherwise(\n when(count == lit(1), lit(float("nan"))).otherwise(\n when(count == lit(2), lit(0.0)).otherwise(sample_skewness)\n )\n )\n\n return self._with_callable(func)\n\n def kurtosis(self) -> Self:\n return self._with_callable(lambda expr: F("kurtosis_pop", expr))\n\n def median(self) -> Self:\n return self._with_callable(lambda expr: F("median", expr))\n\n def all(self) -> Self:\n def f(expr: Expression) -> Expression:\n return CoalesceOperator(F("bool_and", expr), lit(True)) # noqa: FBT003\n\n def window_f(df: DuckDBLazyFrame, inputs: DuckDBWindowInputs) -> list[Expression]:\n return [\n CoalesceOperator(\n window_expression(F("bool_and", expr), inputs.partition_by),\n lit(True), # noqa: FBT003\n )\n for expr in self(df)\n ]\n\n return self._with_callable(f)._with_window_function(window_f)\n\n def any(self) -> Self:\n def f(expr: Expression) -> Expression:\n return CoalesceOperator(F("bool_or", expr), lit(False)) # noqa: FBT003\n\n def window_f(df: DuckDBLazyFrame, inputs: DuckDBWindowInputs) -> list[Expression]:\n return [\n CoalesceOperator(\n window_expression(F("bool_or", expr), inputs.partition_by),\n lit(False), # noqa: FBT003\n )\n for expr in self(df)\n ]\n\n return self._with_callable(f)._with_window_function(window_f)\n\n def quantile(\n self, quantile: float, interpolation: RollingInterpolationMethod\n ) -> Self:\n def func(expr: Expression) -> Expression:\n if interpolation == "linear":\n return F("quantile_cont", expr, lit(quantile))\n msg = "Only linear interpolation methods are supported for DuckDB quantile."\n raise NotImplementedError(msg)\n\n return self._with_callable(func)\n\n def clip(\n self,\n lower_bound: Self | NumericLiteral | TemporalLiteral | None,\n upper_bound: Self | NumericLiteral | TemporalLiteral | None,\n ) -> Self:\n def _clip_lower(expr: Expression, lower_bound: Any) -> Expression:\n return F("greatest", expr, lower_bound)\n\n def _clip_upper(expr: Expression, upper_bound: Any) -> Expression:\n return F("least", expr, upper_bound)\n\n def _clip_both(\n expr: Expression, lower_bound: Any, upper_bound: Any\n ) -> Expression:\n return F("greatest", F("least", expr, upper_bound), lower_bound)\n\n if lower_bound is None:\n return self._with_elementwise(_clip_upper, upper_bound=upper_bound)\n if upper_bound is None:\n return self._with_elementwise(_clip_lower, lower_bound=lower_bound)\n return self._with_elementwise(\n _clip_both, lower_bound=lower_bound, upper_bound=upper_bound\n )\n\n def sum(self) -> Self:\n def f(expr: Expression) -> Expression:\n return CoalesceOperator(F("sum", expr), lit(0))\n\n def window_f(df: DuckDBLazyFrame, inputs: DuckDBWindowInputs) -> list[Expression]:\n return [\n CoalesceOperator(\n window_expression(F("sum", expr), inputs.partition_by), lit(0)\n )\n for expr in self(df)\n ]\n\n return self._with_callable(f)._with_window_function(window_f)\n\n def n_unique(self) -> Self:\n def func(expr: Expression) -> Expression:\n # https://stackoverflow.com/a/79338887/4451315\n return F("array_unique", F("array_agg", expr)) + F(\n "max", when(expr.isnotnull(), lit(0)).otherwise(lit(1))\n )\n\n return self._with_callable(func)\n\n def count(self) -> Self:\n return self._with_callable(lambda expr: F("count", expr))\n\n def len(self) -> Self:\n return self._with_callable(lambda _expr: F("count"))\n\n def std(self, ddof: int) -> Self:\n if ddof == 0:\n return self._with_callable(lambda expr: F("stddev_pop", expr))\n if ddof == 1:\n return self._with_callable(lambda expr: F("stddev_samp", expr))\n\n def _std(expr: Expression) -> Expression:\n n_samples = F("count", expr)\n return (\n F("stddev_pop", expr)\n * F("sqrt", n_samples)\n / (F("sqrt", (n_samples - lit(ddof))))\n )\n\n return self._with_callable(_std)\n\n def var(self, ddof: int) -> Self:\n if ddof == 0:\n return self._with_callable(lambda expr: F("var_pop", expr))\n if ddof == 1:\n return self._with_callable(lambda expr: F("var_samp", expr))\n\n def _var(expr: Expression) -> Expression:\n n_samples = F("count", expr)\n return F("var_pop", expr) * n_samples / (n_samples - lit(ddof))\n\n return self._with_callable(_var)\n\n def max(self) -> Self:\n return self._with_callable(lambda expr: F("max", expr))\n\n def min(self) -> Self:\n return self._with_callable(lambda expr: F("min", expr))\n\n def null_count(self) -> Self:\n return self._with_callable(lambda expr: F("sum", expr.isnull().cast("int")))\n\n @requires.backend_version((1, 3))\n def over(\n self, partition_by: Sequence[str | Expression], order_by: Sequence[str]\n ) -> Self:\n def func(df: DuckDBLazyFrame) -> Sequence[Expression]:\n return self.window_function(df, WindowInputs(partition_by, order_by))\n\n return self.__class__(\n func,\n evaluate_output_names=self._evaluate_output_names,\n alias_output_names=self._alias_output_names,\n backend_version=self._backend_version,\n version=self._version,\n )\n\n def is_null(self) -> Self:\n return self._with_elementwise(lambda expr: expr.isnull())\n\n def is_nan(self) -> Self:\n return self._with_elementwise(lambda expr: F("isnan", expr))\n\n def is_finite(self) -> Self:\n return self._with_elementwise(lambda expr: F("isfinite", expr))\n\n def is_in(self, other: Sequence[Any]) -> Self:\n return self._with_elementwise(lambda expr: F("contains", lit(other), expr))\n\n def round(self, decimals: int) -> Self:\n return self._with_elementwise(lambda expr: F("round", expr, lit(decimals)))\n\n @requires.backend_version((1, 3))\n def shift(self, n: int) -> Self:\n def func(df: DuckDBLazyFrame, inputs: DuckDBWindowInputs) -> Sequence[Expression]:\n return [\n window_expression(\n F("lag", expr, lit(n)), inputs.partition_by, inputs.order_by\n )\n for expr in self(df)\n ]\n\n return self._with_window_function(func)\n\n @requires.backend_version((1, 3))\n def is_first_distinct(self) -> Self:\n def func(df: DuckDBLazyFrame, inputs: DuckDBWindowInputs) -> Sequence[Expression]:\n return [\n window_expression(\n F("row_number"), (*inputs.partition_by, expr), inputs.order_by\n )\n == lit(1)\n for expr in self(df)\n ]\n\n return self._with_window_function(func)\n\n @requires.backend_version((1, 3))\n def is_last_distinct(self) -> Self:\n def func(df: DuckDBLazyFrame, inputs: DuckDBWindowInputs) -> Sequence[Expression]:\n return [\n window_expression(\n F("row_number"),\n (*inputs.partition_by, expr),\n inputs.order_by,\n descending=True,\n nulls_last=True,\n )\n == lit(1)\n for expr in self(df)\n ]\n\n return self._with_window_function(func)\n\n @requires.backend_version((1, 3))\n def diff(self) -> Self:\n def func(df: DuckDBLazyFrame, inputs: DuckDBWindowInputs) -> list[Expression]:\n return [\n expr\n - window_expression(F("lag", expr), inputs.partition_by, inputs.order_by)\n for expr in self(df)\n ]\n\n return self._with_window_function(func)\n\n @requires.backend_version((1, 3))\n def cum_sum(self, *, reverse: bool) -> Self:\n return self._with_window_function(self._cum_window_func("sum", reverse=reverse))\n\n @requires.backend_version((1, 3))\n def cum_max(self, *, reverse: bool) -> Self:\n return self._with_window_function(self._cum_window_func("max", reverse=reverse))\n\n @requires.backend_version((1, 3))\n def cum_min(self, *, reverse: bool) -> Self:\n return self._with_window_function(self._cum_window_func("min", reverse=reverse))\n\n @requires.backend_version((1, 3))\n def cum_count(self, *, reverse: bool) -> Self:\n return self._with_window_function(self._cum_window_func("count", reverse=reverse))\n\n @requires.backend_version((1, 3))\n def cum_prod(self, *, reverse: bool) -> Self:\n return self._with_window_function(\n self._cum_window_func("product", reverse=reverse)\n )\n\n @requires.backend_version((1, 3))\n def rolling_sum(self, window_size: int, *, min_samples: int, center: bool) -> Self:\n return self._with_window_function(\n self._rolling_window_func("sum", window_size, min_samples, center=center)\n )\n\n @requires.backend_version((1, 3))\n def rolling_mean(self, window_size: int, *, min_samples: int, center: bool) -> Self:\n return self._with_window_function(\n self._rolling_window_func("mean", window_size, min_samples, center=center)\n )\n\n @requires.backend_version((1, 3))\n def rolling_var(\n self, window_size: int, *, min_samples: int, center: bool, ddof: int\n ) -> Self:\n return self._with_window_function(\n self._rolling_window_func(\n "var", window_size, min_samples, ddof=ddof, center=center\n )\n )\n\n @requires.backend_version((1, 3))\n def rolling_std(\n self, window_size: int, *, min_samples: int, center: bool, ddof: int\n ) -> Self:\n return self._with_window_function(\n self._rolling_window_func(\n "std", window_size, min_samples, ddof=ddof, center=center\n )\n )\n\n def fill_null(\n self,\n value: Self | NonNestedLiteral,\n strategy: FillNullStrategy | None,\n limit: int | None,\n ) -> Self:\n if strategy is not None:\n if self._backend_version < (1, 3): # pragma: no cover\n msg = f"`fill_null` with `strategy={strategy}` is only available in 'duckdb>=1.3.0'."\n raise NotImplementedError(msg)\n\n def _fill_with_strategy(\n df: DuckDBLazyFrame, inputs: DuckDBWindowInputs\n ) -> Sequence[Expression]:\n fill_func = "last_value" if strategy == "forward" else "first_value"\n _limit = "unbounded" if limit is None else limit\n rows_start, rows_end = (\n (f"{_limit} preceding", "current row")\n if strategy == "forward"\n else ("current row", f"{_limit} following")\n )\n return [\n window_expression(\n F(fill_func, expr),\n inputs.partition_by,\n inputs.order_by,\n rows_start=rows_start,\n rows_end=rows_end,\n ignore_nulls=True,\n )\n for expr in self(df)\n ]\n\n return self._with_window_function(_fill_with_strategy)\n\n def _fill_constant(expr: Expression, value: Any) -> Expression:\n return CoalesceOperator(expr, value)\n\n return self._with_elementwise(_fill_constant, value=value)\n\n def cast(self, dtype: IntoDType) -> Self:\n def func(expr: Expression) -> Expression:\n native_dtype = narwhals_to_native_dtype(dtype, self._version)\n return expr.cast(DuckDBPyType(native_dtype))\n\n return self._with_elementwise(func)\n\n @requires.backend_version((1, 3))\n def is_unique(self) -> Self:\n def _is_unique(expr: Expression, *partition_by: str | Expression) -> Expression:\n return window_expression(\n F("count", StarExpression()), (expr, *partition_by)\n ) == lit(1)\n\n def _unpartitioned_is_unique(expr: Expression) -> Expression:\n return _is_unique(expr)\n\n def _partitioned_is_unique(\n df: DuckDBLazyFrame, inputs: DuckDBWindowInputs\n ) -> Sequence[Expression]:\n assert not inputs.order_by # noqa: S101\n return [_is_unique(expr, *inputs.partition_by) for expr in self(df)]\n\n return self._with_callable(_unpartitioned_is_unique)._with_window_function(\n _partitioned_is_unique\n )\n\n @requires.backend_version((1, 3))\n def rank(self, method: RankMethod, *, descending: bool) -> Self:\n if method in {"min", "max", "average"}:\n func = F("rank")\n elif method == "dense":\n func = F("dense_rank")\n else: # method == "ordinal"\n func = F("row_number")\n\n def _rank(\n expr: Expression,\n *,\n descending: bool,\n partition_by: Sequence[str | Expression],\n ) -> Expression:\n count_expr = F("count", StarExpression())\n window_kwargs: WindowExpressionKwargs = {\n "partition_by": partition_by,\n "order_by": (expr,),\n "descending": descending,\n "nulls_last": True,\n }\n count_window_kwargs: WindowExpressionKwargs = {\n "partition_by": (*partition_by, expr)\n }\n if method == "max":\n rank_expr = (\n window_expression(func, **window_kwargs)\n + window_expression(count_expr, **count_window_kwargs)\n - lit(1)\n )\n elif method == "average":\n rank_expr = window_expression(func, **window_kwargs) + (\n window_expression(count_expr, **count_window_kwargs) - lit(1)\n ) / lit(2.0)\n else:\n rank_expr = window_expression(func, **window_kwargs)\n return when(expr.isnotnull(), rank_expr)\n\n def _unpartitioned_rank(expr: Expression) -> Expression:\n return _rank(expr, partition_by=(), descending=descending)\n\n def _partitioned_rank(\n df: DuckDBLazyFrame, inputs: DuckDBWindowInputs\n ) -> Sequence[Expression]:\n assert not inputs.order_by # noqa: S101\n return [\n _rank(expr, descending=descending, partition_by=inputs.partition_by)\n for expr in self(df)\n ]\n\n return self._with_callable(_unpartitioned_rank)._with_window_function(\n _partitioned_rank\n )\n\n def log(self, base: float) -> Self:\n def _log(expr: Expression) -> Expression:\n log = F("log", expr)\n return (\n when(expr < lit(0), lit(float("nan")))\n .when(expr == lit(0), lit(float("-inf")))\n .otherwise(log / F("log", lit(base)))\n )\n\n return self._with_elementwise(_log)\n\n def exp(self) -> Self:\n def _exp(expr: Expression) -> Expression:\n return F("exp", expr)\n\n return self._with_elementwise(_exp)\n\n def sqrt(self) -> Self:\n def _sqrt(expr: Expression) -> Expression:\n return when(expr < lit(0), lit(float("nan"))).otherwise(F("sqrt", expr))\n\n return self._with_elementwise(_sqrt)\n\n @property\n def str(self) -> DuckDBExprStringNamespace:\n return DuckDBExprStringNamespace(self)\n\n @property\n def dt(self) -> DuckDBExprDateTimeNamespace:\n return DuckDBExprDateTimeNamespace(self)\n\n @property\n def list(self) -> DuckDBExprListNamespace:\n return DuckDBExprListNamespace(self)\n\n @property\n def struct(self) -> DuckDBExprStructNamespace:\n return DuckDBExprStructNamespace(self)\n\n drop_nulls = not_implemented()\n unique = not_implemented()\n
.venv\Lib\site-packages\narwhals\_duckdb\expr.py
expr.py
Python
30,085
0.95
0.195838
0.021708
awesome-app
889
2024-09-08T23:37:38.625006
BSD-3-Clause
false
a2e390d792a8b61e3e4132b3d54f495f
from __future__ import annotations\n\nfrom typing import TYPE_CHECKING\n\nfrom narwhals._compliant.any_namespace import DateTimeNamespace\nfrom narwhals._compliant.expr import LazyExprNamespace\nfrom narwhals._constants import (\n MS_PER_MINUTE,\n MS_PER_SECOND,\n NS_PER_SECOND,\n SECONDS_PER_MINUTE,\n US_PER_MINUTE,\n US_PER_SECOND,\n)\nfrom narwhals._duckdb.utils import UNITS_DICT, F, fetch_rel_time_zone, lit\nfrom narwhals._duration import parse_interval_string\nfrom narwhals._utils import not_implemented\n\nif TYPE_CHECKING:\n from collections.abc import Sequence\n\n from duckdb import Expression\n\n from narwhals._duckdb.dataframe import DuckDBLazyFrame\n from narwhals._duckdb.expr import DuckDBExpr\n\n\nclass DuckDBExprDateTimeNamespace(\n LazyExprNamespace["DuckDBExpr"], DateTimeNamespace["DuckDBExpr"]\n):\n def year(self) -> DuckDBExpr:\n return self.compliant._with_callable(lambda expr: F("year", expr))\n\n def month(self) -> DuckDBExpr:\n return self.compliant._with_callable(lambda expr: F("month", expr))\n\n def day(self) -> DuckDBExpr:\n return self.compliant._with_callable(lambda expr: F("day", expr))\n\n def hour(self) -> DuckDBExpr:\n return self.compliant._with_callable(lambda expr: F("hour", expr))\n\n def minute(self) -> DuckDBExpr:\n return self.compliant._with_callable(lambda expr: F("minute", expr))\n\n def second(self) -> DuckDBExpr:\n return self.compliant._with_callable(lambda expr: F("second", expr))\n\n def millisecond(self) -> DuckDBExpr:\n return self.compliant._with_callable(\n lambda expr: F("millisecond", expr) - F("second", expr) * lit(MS_PER_SECOND)\n )\n\n def microsecond(self) -> DuckDBExpr:\n return self.compliant._with_callable(\n lambda expr: F("microsecond", expr) - F("second", expr) * lit(US_PER_SECOND)\n )\n\n def nanosecond(self) -> DuckDBExpr:\n return self.compliant._with_callable(\n lambda expr: F("nanosecond", expr) - F("second", expr) * lit(NS_PER_SECOND)\n )\n\n def to_string(self, format: str) -> DuckDBExpr:\n return self.compliant._with_callable(\n lambda expr: F("strftime", expr, lit(format))\n )\n\n def weekday(self) -> DuckDBExpr:\n return self.compliant._with_callable(lambda expr: F("isodow", expr))\n\n def ordinal_day(self) -> DuckDBExpr:\n return self.compliant._with_callable(lambda expr: F("dayofyear", expr))\n\n def date(self) -> DuckDBExpr:\n return self.compliant._with_callable(lambda expr: expr.cast("date"))\n\n def total_minutes(self) -> DuckDBExpr:\n return self.compliant._with_callable(\n lambda expr: F("datepart", lit("minute"), expr)\n )\n\n def total_seconds(self) -> DuckDBExpr:\n return self.compliant._with_callable(\n lambda expr: lit(SECONDS_PER_MINUTE) * F("datepart", lit("minute"), expr)\n + F("datepart", lit("second"), expr)\n )\n\n def total_milliseconds(self) -> DuckDBExpr:\n return self.compliant._with_callable(\n lambda expr: lit(MS_PER_MINUTE) * F("datepart", lit("minute"), expr)\n + F("datepart", lit("millisecond"), expr)\n )\n\n def total_microseconds(self) -> DuckDBExpr:\n return self.compliant._with_callable(\n lambda expr: lit(US_PER_MINUTE) * F("datepart", lit("minute"), expr)\n + F("datepart", lit("microsecond"), expr)\n )\n\n def truncate(self, every: str) -> DuckDBExpr:\n multiple, unit = parse_interval_string(every)\n if multiple != 1:\n # https://github.com/duckdb/duckdb/issues/17554\n msg = f"Only multiple 1 is currently supported for DuckDB.\nGot {multiple!s}."\n raise ValueError(msg)\n if unit == "ns":\n msg = "Truncating to nanoseconds is not yet supported for DuckDB."\n raise NotImplementedError(msg)\n format = lit(UNITS_DICT[unit])\n\n def _truncate(expr: Expression) -> Expression:\n return F("date_trunc", format, expr)\n\n return self.compliant._with_callable(_truncate)\n\n def _no_op_time_zone(self, time_zone: str) -> DuckDBExpr:\n def func(df: DuckDBLazyFrame) -> Sequence[Expression]:\n native_series_list = self.compliant(df)\n conn_time_zone = fetch_rel_time_zone(df.native)\n if conn_time_zone != time_zone:\n msg = (\n "DuckDB stores the time zone in the connection, rather than in the "\n f"data type, so changing the timezone to anything other than {conn_time_zone} "\n " (the current connection time zone) is not supported."\n )\n raise NotImplementedError(msg)\n return native_series_list\n\n return self.compliant.__class__(\n func,\n evaluate_output_names=self.compliant._evaluate_output_names,\n alias_output_names=self.compliant._alias_output_names,\n backend_version=self.compliant._backend_version,\n version=self.compliant._version,\n )\n\n def convert_time_zone(self, time_zone: str) -> DuckDBExpr:\n return self._no_op_time_zone(time_zone)\n\n def replace_time_zone(self, time_zone: str | None) -> DuckDBExpr:\n if time_zone is None:\n return self.compliant._with_callable(lambda _input: _input.cast("timestamp"))\n else:\n return self._no_op_time_zone(time_zone)\n\n total_nanoseconds = not_implemented()\n timestamp = not_implemented()\n
.venv\Lib\site-packages\narwhals\_duckdb\expr_dt.py
expr_dt.py
Python
5,546
0.95
0.209459
0.008547
python-kit
148
2025-03-23T18:21:14.580436
MIT
false
77c8f8575cd6cf644e513fa985a37805
from __future__ import annotations\n\nfrom typing import TYPE_CHECKING\n\nfrom narwhals._compliant.any_namespace import ListNamespace\nfrom narwhals._compliant.expr import LazyExprNamespace\nfrom narwhals._duckdb.utils import F\n\nif TYPE_CHECKING:\n from narwhals._duckdb.expr import DuckDBExpr\n\n\nclass DuckDBExprListNamespace(\n LazyExprNamespace["DuckDBExpr"], ListNamespace["DuckDBExpr"]\n):\n def len(self) -> DuckDBExpr:\n return self.compliant._with_callable(lambda expr: F("len", expr))\n
.venv\Lib\site-packages\narwhals\_duckdb\expr_list.py
expr_list.py
Python
498
0.85
0.176471
0
vue-tools
938
2024-06-20T08:11:11.496289
BSD-3-Clause
false
d2398897824277f94c00910d7b2ceb1e
from __future__ import annotations\n\nfrom typing import TYPE_CHECKING\n\nfrom narwhals._compliant.any_namespace import StringNamespace\nfrom narwhals._compliant.expr import LazyExprNamespace\nfrom narwhals._duckdb.utils import F, lit, when\nfrom narwhals._utils import not_implemented\n\nif TYPE_CHECKING:\n from duckdb import Expression\n\n from narwhals._duckdb.expr import DuckDBExpr\n\n\nclass DuckDBExprStringNamespace(\n LazyExprNamespace["DuckDBExpr"], StringNamespace["DuckDBExpr"]\n):\n def starts_with(self, prefix: str) -> DuckDBExpr:\n return self.compliant._with_callable(\n lambda expr: F("starts_with", expr, lit(prefix))\n )\n\n def ends_with(self, suffix: str) -> DuckDBExpr:\n return self.compliant._with_callable(\n lambda expr: F("ends_with", expr, lit(suffix))\n )\n\n def contains(self, pattern: str, *, literal: bool) -> DuckDBExpr:\n def func(expr: Expression) -> Expression:\n if literal:\n return F("contains", expr, lit(pattern))\n return F("regexp_matches", expr, lit(pattern))\n\n return self.compliant._with_callable(func)\n\n def slice(self, offset: int, length: int | None) -> DuckDBExpr:\n def func(expr: Expression) -> Expression:\n offset_lit = lit(offset)\n return F(\n "array_slice",\n expr,\n lit(offset + 1)\n if offset >= 0\n else F("length", expr) + offset_lit + lit(1),\n F("length", expr) if length is None else lit(length) + offset_lit,\n )\n\n return self.compliant._with_callable(func)\n\n def split(self, by: str) -> DuckDBExpr:\n return self.compliant._with_callable(lambda expr: F("str_split", expr, lit(by)))\n\n def len_chars(self) -> DuckDBExpr:\n return self.compliant._with_callable(lambda expr: F("length", expr))\n\n def to_lowercase(self) -> DuckDBExpr:\n return self.compliant._with_callable(lambda expr: F("lower", expr))\n\n def to_uppercase(self) -> DuckDBExpr:\n return self.compliant._with_callable(lambda expr: F("upper", expr))\n\n def strip_chars(self, characters: str | None) -> DuckDBExpr:\n import string\n\n return self.compliant._with_callable(\n lambda expr: F(\n "trim", expr, lit(string.whitespace if characters is None else characters)\n )\n )\n\n def replace_all(self, pattern: str, value: str, *, literal: bool) -> DuckDBExpr:\n if not literal:\n return self.compliant._with_callable(\n lambda expr: F("regexp_replace", expr, lit(pattern), lit(value), lit("g"))\n )\n return self.compliant._with_callable(\n lambda expr: F("replace", expr, lit(pattern), lit(value))\n )\n\n def to_datetime(self, format: str | None) -> DuckDBExpr:\n if format is None:\n msg = "Cannot infer format with DuckDB backend, please specify `format` explicitly."\n raise NotImplementedError(msg)\n\n return self.compliant._with_callable(\n lambda expr: F("strptime", expr, lit(format))\n )\n\n def to_date(self, format: str | None) -> DuckDBExpr:\n if format is not None:\n return self.to_datetime(format=format).dt.date()\n\n compliant_expr = self.compliant\n return compliant_expr.cast(compliant_expr._version.dtypes.Date())\n\n def zfill(self, width: int) -> DuckDBExpr:\n # DuckDB does not have a built-in zfill function, so we need to implement it manually\n # using string manipulation functions.\n\n def func(expr: Expression) -> Expression:\n less_than_width = F("length", expr) < lit(width)\n zero, hyphen, plus = lit("0"), lit("-"), lit("+")\n\n starts_with_minus = F("starts_with", expr, hyphen)\n starts_with_plus = F("starts_with", expr, plus)\n substring = F("substr", expr, lit(2))\n padded_substring = F("lpad", substring, lit(width - 1), zero)\n return (\n when(\n starts_with_minus & less_than_width,\n F("concat", hyphen, padded_substring),\n )\n .when(\n starts_with_plus & less_than_width,\n F("concat", plus, padded_substring),\n )\n .when(less_than_width, F("lpad", expr, lit(width), zero))\n .otherwise(expr)\n )\n\n return self.compliant._with_callable(func)\n\n replace = not_implemented()\n
.venv\Lib\site-packages\narwhals\_duckdb\expr_str.py
expr_str.py
Python
4,559
0.95
0.209677
0.020619
react-lib
37
2024-07-21T09:38:20.493834
Apache-2.0
false
6f8b775864aa7f6bcd6bce263f27e53d
from __future__ import annotations\n\nfrom typing import TYPE_CHECKING\n\nfrom narwhals._compliant.any_namespace import StructNamespace\nfrom narwhals._compliant.expr import LazyExprNamespace\nfrom narwhals._duckdb.utils import F, lit\n\nif TYPE_CHECKING:\n from narwhals._duckdb.expr import DuckDBExpr\n\n\nclass DuckDBExprStructNamespace(\n LazyExprNamespace["DuckDBExpr"], StructNamespace["DuckDBExpr"]\n):\n def field(self, name: str) -> DuckDBExpr:\n return self.compliant._with_callable(\n lambda expr: F("struct_extract", expr, lit(name))\n ).alias(name)\n
.venv\Lib\site-packages\narwhals\_duckdb\expr_struct.py
expr_struct.py
Python
578
0.85
0.157895
0
node-utils
682
2024-05-20T11:13:56.202926
GPL-3.0
false
dbb7c376eb9ac83c6a66cbae13fc0f8a
from __future__ import annotations\n\nfrom itertools import chain\nfrom typing import TYPE_CHECKING\n\nfrom narwhals._compliant import LazyGroupBy\n\nif TYPE_CHECKING:\n from collections.abc import Sequence\n\n from duckdb import Expression # noqa: F401\n\n from narwhals._duckdb.dataframe import DuckDBLazyFrame\n from narwhals._duckdb.expr import DuckDBExpr\n\n\nclass DuckDBGroupBy(LazyGroupBy["DuckDBLazyFrame", "DuckDBExpr", "Expression"]):\n def __init__(\n self,\n df: DuckDBLazyFrame,\n keys: Sequence[DuckDBExpr] | Sequence[str],\n /,\n *,\n drop_null_keys: bool,\n ) -> None:\n frame, self._keys, self._output_key_names = self._parse_keys(df, keys=keys)\n self._compliant_frame = frame.drop_nulls(self._keys) if drop_null_keys else frame\n\n def agg(self, *exprs: DuckDBExpr) -> DuckDBLazyFrame:\n agg_columns = list(chain(self._keys, self._evaluate_exprs(exprs)))\n return self.compliant._with_native(\n self.compliant.native.aggregate(agg_columns) # type: ignore[arg-type]\n ).rename(dict(zip(self._keys, self._output_key_names)))\n
.venv\Lib\site-packages\narwhals\_duckdb\group_by.py
group_by.py
Python
1,122
0.95
0.151515
0.04
vue-tools
195
2025-02-22T13:58:18.110909
Apache-2.0
false
6cd3b8526c74b00a14ec73bad1bd7dbb
from __future__ import annotations\n\nimport operator\nfrom functools import reduce\nfrom itertools import chain\nfrom typing import TYPE_CHECKING\n\nimport duckdb\nfrom duckdb import CoalesceOperator, Expression\nfrom duckdb.typing import BIGINT, VARCHAR\n\nfrom narwhals._compliant import LazyNamespace, LazyThen, LazyWhen\nfrom narwhals._duckdb.dataframe import DuckDBLazyFrame\nfrom narwhals._duckdb.expr import DuckDBExpr\nfrom narwhals._duckdb.selectors import DuckDBSelectorNamespace\nfrom narwhals._duckdb.utils import F, concat_str, lit, narwhals_to_native_dtype, when\nfrom narwhals._expression_parsing import (\n combine_alias_output_names,\n combine_evaluate_output_names,\n)\nfrom narwhals._utils import Implementation\n\nif TYPE_CHECKING:\n from collections.abc import Iterable, Sequence\n\n from narwhals._duckdb.expr import DuckDBWindowInputs\n from narwhals._utils import Version\n from narwhals.typing import ConcatMethod, IntoDType, NonNestedLiteral\n\n\nclass DuckDBNamespace(\n LazyNamespace[DuckDBLazyFrame, DuckDBExpr, duckdb.DuckDBPyRelation]\n):\n _implementation: Implementation = Implementation.DUCKDB\n\n def __init__(self, *, backend_version: tuple[int, ...], version: Version) -> None:\n self._backend_version = backend_version\n self._version = version\n\n @property\n def selectors(self) -> DuckDBSelectorNamespace:\n return DuckDBSelectorNamespace.from_namespace(self)\n\n @property\n def _expr(self) -> type[DuckDBExpr]:\n return DuckDBExpr\n\n @property\n def _lazyframe(self) -> type[DuckDBLazyFrame]:\n return DuckDBLazyFrame\n\n def concat(\n self, items: Iterable[DuckDBLazyFrame], *, how: ConcatMethod\n ) -> DuckDBLazyFrame:\n native_items = [item._native_frame for item in items]\n items = list(items)\n first = items[0]\n schema = first.schema\n if how == "vertical" and not all(x.schema == schema for x in items[1:]):\n msg = "inputs should all have the same schema"\n raise TypeError(msg)\n res = reduce(lambda x, y: x.union(y), native_items)\n return first._with_native(res)\n\n def concat_str(\n self, *exprs: DuckDBExpr, separator: str, ignore_nulls: bool\n ) -> DuckDBExpr:\n def func(df: DuckDBLazyFrame) -> list[Expression]:\n cols = list(chain.from_iterable(expr(df) for expr in exprs))\n if not ignore_nulls:\n null_mask_result = reduce(operator.or_, (s.isnull() for s in cols))\n cols_separated = [\n y\n for x in [\n (col.cast(VARCHAR),)\n if i == len(cols) - 1\n else (col.cast(VARCHAR), lit(separator))\n for i, col in enumerate(cols)\n ]\n for y in x\n ]\n return [when(~null_mask_result, concat_str(*cols_separated))]\n else:\n return [concat_str(*cols, separator=separator)]\n\n return self._expr(\n call=func,\n evaluate_output_names=combine_evaluate_output_names(*exprs),\n alias_output_names=combine_alias_output_names(*exprs),\n backend_version=self._backend_version,\n version=self._version,\n )\n\n def all_horizontal(self, *exprs: DuckDBExpr, ignore_nulls: bool) -> DuckDBExpr:\n def func(cols: Iterable[Expression]) -> Expression:\n it = (\n (CoalesceOperator(expr, lit(True)) for expr in cols) # noqa: FBT003\n if ignore_nulls\n else cols\n )\n return reduce(operator.and_, it)\n\n return self._expr._from_elementwise_horizontal_op(func, *exprs)\n\n def any_horizontal(self, *exprs: DuckDBExpr, ignore_nulls: bool) -> DuckDBExpr:\n def func(cols: Iterable[Expression]) -> Expression:\n it = (\n (CoalesceOperator(expr, lit(False)) for expr in cols) # noqa: FBT003\n if ignore_nulls\n else cols\n )\n return reduce(operator.or_, it)\n\n return self._expr._from_elementwise_horizontal_op(func, *exprs)\n\n def max_horizontal(self, *exprs: DuckDBExpr) -> DuckDBExpr:\n def func(cols: Iterable[Expression]) -> Expression:\n return F("greatest", *cols)\n\n return self._expr._from_elementwise_horizontal_op(func, *exprs)\n\n def min_horizontal(self, *exprs: DuckDBExpr) -> DuckDBExpr:\n def func(cols: Iterable[Expression]) -> Expression:\n return F("least", *cols)\n\n return self._expr._from_elementwise_horizontal_op(func, *exprs)\n\n def sum_horizontal(self, *exprs: DuckDBExpr) -> DuckDBExpr:\n def func(cols: Iterable[Expression]) -> Expression:\n return reduce(operator.add, (CoalesceOperator(col, lit(0)) for col in cols))\n\n return self._expr._from_elementwise_horizontal_op(func, *exprs)\n\n def mean_horizontal(self, *exprs: DuckDBExpr) -> DuckDBExpr:\n def func(cols: Iterable[Expression]) -> Expression:\n cols = list(cols)\n return reduce(\n operator.add, (CoalesceOperator(col, lit(0)) for col in cols)\n ) / reduce(operator.add, (col.isnotnull().cast(BIGINT) for col in cols))\n\n return self._expr._from_elementwise_horizontal_op(func, *exprs)\n\n def when(self, predicate: DuckDBExpr) -> DuckDBWhen:\n return DuckDBWhen.from_expr(predicate, context=self)\n\n def lit(self, value: NonNestedLiteral, dtype: IntoDType | None) -> DuckDBExpr:\n def func(_df: DuckDBLazyFrame) -> list[Expression]:\n if dtype is not None:\n return [\n lit(value).cast(\n narwhals_to_native_dtype(dtype, version=self._version) # type: ignore[arg-type]\n )\n ]\n return [lit(value)]\n\n return self._expr(\n func,\n evaluate_output_names=lambda _df: ["literal"],\n alias_output_names=None,\n backend_version=self._backend_version,\n version=self._version,\n )\n\n def len(self) -> DuckDBExpr:\n def func(_df: DuckDBLazyFrame) -> list[Expression]:\n return [F("count")]\n\n return self._expr(\n call=func,\n evaluate_output_names=lambda _df: ["len"],\n alias_output_names=None,\n backend_version=self._backend_version,\n version=self._version,\n )\n\n\nclass DuckDBWhen(LazyWhen["DuckDBLazyFrame", Expression, DuckDBExpr]):\n @property\n def _then(self) -> type[DuckDBThen]:\n return DuckDBThen\n\n def __call__(self, df: DuckDBLazyFrame) -> Sequence[Expression]:\n self.when = when\n self.lit = lit\n return super().__call__(df)\n\n def _window_function(\n self, df: DuckDBLazyFrame, window_inputs: DuckDBWindowInputs\n ) -> Sequence[Expression]:\n self.when = when\n self.lit = lit\n return super()._window_function(df, window_inputs)\n\n\nclass DuckDBThen(LazyThen["DuckDBLazyFrame", Expression, DuckDBExpr], DuckDBExpr): ...\n
.venv\Lib\site-packages\narwhals\_duckdb\namespace.py
namespace.py
Python
7,126
0.95
0.251282
0
node-utils
940
2025-02-23T09:46:04.851304
BSD-3-Clause
false
a930bb861d7a085589c74eec00cbf1f6
from __future__ import annotations\n\nfrom typing import TYPE_CHECKING\n\nfrom narwhals._compliant import CompliantSelector, LazySelectorNamespace\nfrom narwhals._duckdb.expr import DuckDBExpr\n\nif TYPE_CHECKING:\n from duckdb import Expression # noqa: F401\n\n from narwhals._duckdb.dataframe import DuckDBLazyFrame # noqa: F401\n\n\nclass DuckDBSelectorNamespace(LazySelectorNamespace["DuckDBLazyFrame", "Expression"]):\n @property\n def _selector(self) -> type[DuckDBSelector]:\n return DuckDBSelector\n\n\nclass DuckDBSelector( # type: ignore[misc]\n CompliantSelector["DuckDBLazyFrame", "Expression"], DuckDBExpr\n):\n def _to_expr(self) -> DuckDBExpr:\n return DuckDBExpr(\n self._call,\n self._window_function,\n evaluate_output_names=self._evaluate_output_names,\n alias_output_names=self._alias_output_names,\n backend_version=self._backend_version,\n version=self._version,\n )\n
.venv\Lib\site-packages\narwhals\_duckdb\selectors.py
selectors.py
Python
967
0.95
0.16129
0
node-utils
812
2024-02-03T06:05:12.378578
GPL-3.0
false
396b918a9ab9830cc338dabbfe54a72f
from __future__ import annotations\n\nfrom typing import TYPE_CHECKING\n\nfrom narwhals._duckdb.utils import DeferredTimeZone, native_to_narwhals_dtype\nfrom narwhals.dependencies import get_duckdb\n\nif TYPE_CHECKING:\n from types import ModuleType\n\n import duckdb\n from typing_extensions import Never, Self\n\n from narwhals._utils import Version\n from narwhals.dtypes import DType\n\n\nclass DuckDBInterchangeSeries:\n def __init__(self, df: duckdb.DuckDBPyRelation, version: Version) -> None:\n self._native_series = df\n self._version = version\n\n def __narwhals_series__(self) -> Self:\n return self\n\n def __native_namespace__(self) -> ModuleType:\n return get_duckdb() # type: ignore[no-any-return]\n\n @property\n def dtype(self) -> DType:\n return native_to_narwhals_dtype(\n self._native_series.types[0],\n self._version,\n DeferredTimeZone(self._native_series),\n )\n\n def __getattr__(self, attr: str) -> Never:\n msg = ( # pragma: no cover\n f"Attribute {attr} is not supported for interchange-level dataframes.\n\n"\n "If you would like to see this kind of object better supported in "\n "Narwhals, please open a feature request "\n "at https://github.com/narwhals-dev/narwhals/issues."\n )\n raise NotImplementedError(msg) # pragma: no cover\n
.venv\Lib\site-packages\narwhals\_duckdb\series.py
series.py
Python
1,397
0.95
0.181818
0
node-utils
342
2024-05-15T07:17:30.724936
GPL-3.0
false
8dcbd6a98f78c57b40992f4cdb5da255
from __future__ import annotations\n\nfrom typing import TYPE_CHECKING, TypedDict\n\nif TYPE_CHECKING:\n from collections.abc import Sequence\n\n from duckdb import Expression\n\n\nclass WindowExpressionKwargs(TypedDict, total=False):\n partition_by: Sequence[str | Expression]\n order_by: Sequence[str | Expression]\n rows_start: str\n rows_end: str\n descending: bool\n nulls_last: bool\n ignore_nulls: bool\n
.venv\Lib\site-packages\narwhals\_duckdb\typing.py
typing.py
Python
420
0.85
0.111111
0
awesome-app
539
2024-03-11T12:01:21.806358
Apache-2.0
false
487ff6d9d3d9cd33613514919c78250d
from __future__ import annotations\n\nfrom functools import lru_cache\nfrom typing import TYPE_CHECKING, Any\n\nimport duckdb\n\nfrom narwhals._utils import Version, isinstance_or_issubclass\n\nif TYPE_CHECKING:\n from collections.abc import Sequence\n\n from duckdb import DuckDBPyRelation, Expression\n from duckdb.typing import DuckDBPyType\n\n from narwhals._duckdb.dataframe import DuckDBLazyFrame\n from narwhals._duckdb.expr import DuckDBExpr\n from narwhals.dtypes import DType\n from narwhals.typing import IntoDType\n\nUNITS_DICT = {\n "y": "year",\n "q": "quarter",\n "mo": "month",\n "d": "day",\n "h": "hour",\n "m": "minute",\n "s": "second",\n "ms": "millisecond",\n "us": "microsecond",\n "ns": "nanosecond",\n}\n\ncol = duckdb.ColumnExpression\n"""Alias for `duckdb.ColumnExpression`."""\n\nlit = duckdb.ConstantExpression\n"""Alias for `duckdb.ConstantExpression`."""\n\nwhen = duckdb.CaseExpression\n"""Alias for `duckdb.CaseExpression`."""\n\nF = duckdb.FunctionExpression\n"""Alias for `duckdb.FunctionExpression`."""\n\n\ndef concat_str(*exprs: Expression, separator: str = "") -> Expression:\n """Concatenate many strings, NULL inputs are skipped.\n\n Wraps [concat] and [concat_ws] `FunctionExpression`(s).\n\n Arguments:\n exprs: Native columns.\n separator: String that will be used to separate the values of each column.\n\n Returns:\n A new native expression.\n\n [concat]: https://duckdb.org/docs/stable/sql/functions/char.html#concatstring-\n [concat_ws]: https://duckdb.org/docs/stable/sql/functions/char.html#concat_wsseparator-string-\n """\n return F("concat_ws", lit(separator), *exprs) if separator else F("concat", *exprs)\n\n\ndef evaluate_exprs(\n df: DuckDBLazyFrame, /, *exprs: DuckDBExpr\n) -> list[tuple[str, Expression]]:\n native_results: list[tuple[str, Expression]] = []\n for expr in exprs:\n native_series_list = expr._call(df)\n output_names = expr._evaluate_output_names(df)\n if expr._alias_output_names is not None:\n output_names = expr._alias_output_names(output_names)\n if len(output_names) != len(native_series_list): # pragma: no cover\n msg = f"Internal error: got output names {output_names}, but only got {len(native_series_list)} results"\n raise AssertionError(msg)\n native_results.extend(zip(output_names, native_series_list))\n return native_results\n\n\nclass DeferredTimeZone:\n """Object which gets passed between `native_to_narwhals_dtype` calls.\n\n DuckDB stores the time zone in the connection, rather than in the dtypes, so\n this ensures that when calculating the schema of a dataframe with multiple\n timezone-aware columns, that the connection's time zone is only fetched once.\n\n Note: we cannot make the time zone a cached `DuckDBLazyFrame` property because\n the time zone can be modified after `DuckDBLazyFrame` creation:\n\n ```python\n df = nw.from_native(rel)\n print(df.collect_schema())\n rel.query("set timezone = 'Asia/Kolkata'")\n print(df.collect_schema()) # should change to reflect new time zone\n ```\n """\n\n _cached_time_zone: str | None = None\n\n def __init__(self, rel: DuckDBPyRelation) -> None:\n self._rel = rel\n\n @property\n def time_zone(self) -> str:\n """Fetch relation time zone (if it wasn't calculated already)."""\n if self._cached_time_zone is None:\n self._cached_time_zone = fetch_rel_time_zone(self._rel)\n return self._cached_time_zone\n\n\ndef native_to_narwhals_dtype(\n duckdb_dtype: DuckDBPyType, version: Version, deferred_time_zone: DeferredTimeZone\n) -> DType:\n duckdb_dtype_id = duckdb_dtype.id\n dtypes = version.dtypes\n\n # Handle nested data types first\n if duckdb_dtype_id == "list":\n return dtypes.List(\n native_to_narwhals_dtype(duckdb_dtype.child, version, deferred_time_zone)\n )\n\n if duckdb_dtype_id == "struct":\n children = duckdb_dtype.children\n return dtypes.Struct(\n [\n dtypes.Field(\n name=child[0],\n dtype=native_to_narwhals_dtype(child[1], version, deferred_time_zone),\n )\n for child in children\n ]\n )\n\n if duckdb_dtype_id == "array":\n child, size = duckdb_dtype.children\n shape: list[int] = [size[1]]\n\n while child[1].id == "array":\n child, size = child[1].children\n shape.insert(0, size[1])\n\n inner = native_to_narwhals_dtype(child[1], version, deferred_time_zone)\n return dtypes.Array(inner=inner, shape=tuple(shape))\n\n if duckdb_dtype_id == "enum":\n if version is Version.V1:\n return dtypes.Enum() # type: ignore[call-arg]\n categories = duckdb_dtype.children[0][1]\n return dtypes.Enum(categories=categories)\n\n if duckdb_dtype_id == "timestamp with time zone":\n return dtypes.Datetime(time_zone=deferred_time_zone.time_zone)\n\n return _non_nested_native_to_narwhals_dtype(duckdb_dtype_id, version)\n\n\ndef fetch_rel_time_zone(rel: duckdb.DuckDBPyRelation) -> str:\n result = rel.query(\n "duckdb_settings()", "select value from duckdb_settings() where name = 'TimeZone'"\n ).fetchone()\n assert result is not None # noqa: S101\n return result[0] # type: ignore[no-any-return]\n\n\n@lru_cache(maxsize=16)\ndef _non_nested_native_to_narwhals_dtype(duckdb_dtype_id: str, version: Version) -> DType:\n dtypes = version.dtypes\n return {\n "hugeint": dtypes.Int128(),\n "bigint": dtypes.Int64(),\n "integer": dtypes.Int32(),\n "smallint": dtypes.Int16(),\n "tinyint": dtypes.Int8(),\n "uhugeint": dtypes.UInt128(),\n "ubigint": dtypes.UInt64(),\n "uinteger": dtypes.UInt32(),\n "usmallint": dtypes.UInt16(),\n "utinyint": dtypes.UInt8(),\n "double": dtypes.Float64(),\n "float": dtypes.Float32(),\n "varchar": dtypes.String(),\n "date": dtypes.Date(),\n "timestamp": dtypes.Datetime(),\n "boolean": dtypes.Boolean(),\n "interval": dtypes.Duration(),\n "decimal": dtypes.Decimal(),\n "time": dtypes.Time(),\n "blob": dtypes.Binary(),\n }.get(duckdb_dtype_id, dtypes.Unknown())\n\n\ndef narwhals_to_native_dtype(dtype: IntoDType, version: Version) -> str: # noqa: C901, PLR0912, PLR0915\n dtypes = version.dtypes\n if isinstance_or_issubclass(dtype, dtypes.Decimal):\n msg = "Casting to Decimal is not supported yet."\n raise NotImplementedError(msg)\n if isinstance_or_issubclass(dtype, dtypes.Float64):\n return "DOUBLE"\n if isinstance_or_issubclass(dtype, dtypes.Float32):\n return "FLOAT"\n if isinstance_or_issubclass(dtype, dtypes.Int128):\n return "INT128"\n if isinstance_or_issubclass(dtype, dtypes.Int64):\n return "BIGINT"\n if isinstance_or_issubclass(dtype, dtypes.Int32):\n return "INTEGER"\n if isinstance_or_issubclass(dtype, dtypes.Int16):\n return "SMALLINT"\n if isinstance_or_issubclass(dtype, dtypes.Int8):\n return "TINYINT"\n if isinstance_or_issubclass(dtype, dtypes.UInt128):\n return "UINT128"\n if isinstance_or_issubclass(dtype, dtypes.UInt64):\n return "UBIGINT"\n if isinstance_or_issubclass(dtype, dtypes.UInt32):\n return "UINTEGER"\n if isinstance_or_issubclass(dtype, dtypes.UInt16): # pragma: no cover\n return "USMALLINT"\n if isinstance_or_issubclass(dtype, dtypes.UInt8): # pragma: no cover\n return "UTINYINT"\n if isinstance_or_issubclass(dtype, dtypes.String):\n return "VARCHAR"\n if isinstance_or_issubclass(dtype, dtypes.Boolean): # pragma: no cover\n return "BOOLEAN"\n if isinstance_or_issubclass(dtype, dtypes.Time):\n return "TIME"\n if isinstance_or_issubclass(dtype, dtypes.Binary):\n return "BLOB"\n if isinstance_or_issubclass(dtype, dtypes.Categorical):\n msg = "Categorical not supported by DuckDB"\n raise NotImplementedError(msg)\n if isinstance_or_issubclass(dtype, dtypes.Enum):\n if version is Version.V1:\n msg = "Converting to Enum is not supported in narwhals.stable.v1"\n raise NotImplementedError(msg)\n if isinstance(dtype, dtypes.Enum):\n categories = "'" + "', '".join(dtype.categories) + "'"\n return f"ENUM ({categories})"\n msg = "Can not cast / initialize Enum without categories present"\n raise ValueError(msg)\n\n if isinstance_or_issubclass(dtype, dtypes.Datetime):\n _time_unit = dtype.time_unit\n _time_zone = dtype.time_zone\n msg = "todo"\n raise NotImplementedError(msg)\n if isinstance_or_issubclass(dtype, dtypes.Duration): # pragma: no cover\n _time_unit = dtype.time_unit\n msg = "todo"\n raise NotImplementedError(msg)\n if isinstance_or_issubclass(dtype, dtypes.Date): # pragma: no cover\n return "DATE"\n if isinstance_or_issubclass(dtype, dtypes.List):\n inner = narwhals_to_native_dtype(dtype.inner, version)\n return f"{inner}[]"\n if isinstance_or_issubclass(dtype, dtypes.Struct): # pragma: no cover\n inner = ", ".join(\n f'"{field.name}" {narwhals_to_native_dtype(field.dtype, version)}'\n for field in dtype.fields\n )\n return f"STRUCT({inner})"\n if isinstance_or_issubclass(dtype, dtypes.Array): # pragma: no cover\n shape = dtype.shape\n duckdb_shape_fmt = "".join(f"[{item}]" for item in shape)\n inner_dtype: Any = dtype\n for _ in shape:\n inner_dtype = inner_dtype.inner\n duckdb_inner = narwhals_to_native_dtype(inner_dtype, version)\n return f"{duckdb_inner}{duckdb_shape_fmt}"\n msg = f"Unknown dtype: {dtype}" # pragma: no cover\n raise AssertionError(msg)\n\n\ndef parse_into_expression(into_expression: str | Expression) -> Expression:\n return col(into_expression) if isinstance(into_expression, str) else into_expression\n\n\ndef generate_partition_by_sql(*partition_by: str | Expression) -> str:\n if not partition_by:\n return ""\n by_sql = ", ".join([f"{parse_into_expression(x)}" for x in partition_by])\n return f"partition by {by_sql}"\n\n\ndef generate_order_by_sql(\n *order_by: str | Expression, ascending: bool, nulls_first: bool\n) -> str:\n if not order_by:\n return ""\n nulls = "nulls first" if nulls_first else "nulls last"\n if ascending:\n by_sql = ", ".join([f"{parse_into_expression(x)} asc {nulls}" for x in order_by])\n else:\n by_sql = ", ".join([f"{parse_into_expression(x)} desc {nulls}" for x in order_by])\n return f"order by {by_sql}"\n\n\ndef window_expression(\n expr: Expression,\n partition_by: Sequence[str | Expression] = (),\n order_by: Sequence[str | Expression] = (),\n rows_start: str = "",\n rows_end: str = "",\n *,\n descending: bool = False,\n nulls_last: bool = False,\n ignore_nulls: bool = False,\n) -> Expression:\n # TODO(unassigned): Replace with `duckdb.WindowExpression` when they release it.\n # https://github.com/duckdb/duckdb/discussions/14725#discussioncomment-11200348\n try:\n from duckdb import SQLExpression\n except ModuleNotFoundError as exc: # pragma: no cover\n msg = f"DuckDB>=1.3.0 is required for this operation. Found: DuckDB {duckdb.__version__}"\n raise NotImplementedError(msg) from exc\n pb = generate_partition_by_sql(*partition_by)\n ob = generate_order_by_sql(\n *order_by, ascending=not descending, nulls_first=not nulls_last\n )\n\n if rows_start and rows_end:\n rows = f"rows between {rows_start} and {rows_end}"\n elif rows_start or rows_end: # pragma: no cover\n msg = "Either both `rows_start` and `rows_end` must be specified, or neither."\n else:\n rows = ""\n\n func = f"{str(expr).removesuffix(')')} ignore nulls)" if ignore_nulls else str(expr)\n return SQLExpression(f"{func} over ({pb} {ob} {rows})")\n
.venv\Lib\site-packages\narwhals\_duckdb\utils.py
utils.py
Python
12,006
0.95
0.223565
0.021661
node-utils
507
2025-04-17T11:01:38.094504
BSD-3-Clause
false
b683b5cc845ff5862df421d339a7ea6b
\n\n
.venv\Lib\site-packages\narwhals\_duckdb\__pycache__\dataframe.cpython-313.pyc
dataframe.cpython-313.pyc
Other
27,991
0.95
0.027778
0.004184
vue-tools
394
2024-12-24T19:05:39.695874
GPL-3.0
false
c0fb7d3aee2e6dc385a4cdb0587a0825
\n\n
.venv\Lib\site-packages\narwhals\_duckdb\__pycache__\expr.cpython-313.pyc
expr.cpython-313.pyc
Other
43,674
0.95
0.009174
0
awesome-app
127
2024-11-20T05:04:52.424402
MIT
false
ad9d37ee4ae07fd63beb20a4313843da
\n\n
.venv\Lib\site-packages\narwhals\_duckdb\__pycache__\expr_dt.cpython-313.pyc
expr_dt.cpython-313.pyc
Other
11,745
0.8
0.02439
0.013158
awesome-app
916
2025-03-03T02:21:44.781422
BSD-3-Clause
false
d86b1aa5852e81c7e9e3126f2c3b8b52
\n\n
.venv\Lib\site-packages\narwhals\_duckdb\__pycache__\expr_list.cpython-313.pyc
expr_list.cpython-313.pyc
Other
1,237
0.8
0
0
vue-tools
453
2023-10-10T00:19:41.388527
GPL-3.0
false
0b06545123bd18cf9f2242461c0f8b4e
\n\n
.venv\Lib\site-packages\narwhals\_duckdb\__pycache__\expr_str.cpython-313.pyc
expr_str.cpython-313.pyc
Other
8,902
0.8
0
0
node-utils
270
2024-07-09T12:00:01.901596
GPL-3.0
false
2170d96187473f166a34ba091c24c19a
\n\n
.venv\Lib\site-packages\narwhals\_duckdb\__pycache__\expr_struct.cpython-313.pyc
expr_struct.cpython-313.pyc
Other
1,388
0.8
0
0
react-lib
256
2024-01-30T16:33:39.919275
MIT
false
d47d14e545764a2a0790b73add0c0fa5
\n\n
.venv\Lib\site-packages\narwhals\_duckdb\__pycache__\group_by.cpython-313.pyc
group_by.cpython-313.pyc
Other
2,176
0.7
0
0
python-kit
193
2025-03-28T11:36:38.245735
MIT
false
36dc4243f46e8759f8bd50dc1245aaf6
\n\n
.venv\Lib\site-packages\narwhals\_duckdb\__pycache__\namespace.cpython-313.pyc
namespace.cpython-313.pyc
Other
13,344
0.95
0
0
awesome-app
850
2023-08-01T20:51:26.747483
GPL-3.0
false
012c50c6b1d85140901a7a09d624908c
\n\n
.venv\Lib\site-packages\narwhals\_duckdb\__pycache__\selectors.cpython-313.pyc
selectors.cpython-313.pyc
Other
1,778
0.95
0
0
react-lib
385
2023-07-13T13:26:39.968776
BSD-3-Clause
false
27cf02c14b4228a17a48705681c573a9
\n\n
.venv\Lib\site-packages\narwhals\_duckdb\__pycache__\series.cpython-313.pyc
series.cpython-313.pyc
Other
2,430
0.8
0.033333
0
python-kit
993
2025-01-25T20:20:05.650115
Apache-2.0
false
8c0ae9c25d663fde094e551a4debff08
\n\n
.venv\Lib\site-packages\narwhals\_duckdb\__pycache__\typing.cpython-313.pyc
typing.cpython-313.pyc
Other
919
0.7
0
0
vue-tools
230
2024-04-21T00:20:30.673319
BSD-3-Clause
false
10071a22595a690330659ec3700e7f43
\n\n
.venv\Lib\site-packages\narwhals\_duckdb\__pycache__\utils.cpython-313.pyc
utils.cpython-313.pyc
Other
15,637
0.95
0.012048
0.019231
python-kit
847
2024-12-31T17:25:25.040526
MIT
false
123f235ef160cd8afc409f9dee824fba
\n\n
.venv\Lib\site-packages\narwhals\_duckdb\__pycache__\__init__.cpython-313.pyc
__init__.cpython-313.pyc
Other
191
0.7
0
0
python-kit
791
2023-12-24T00:56:32.736885
BSD-3-Clause
false
7eb9a5cc7d5e6d53eb705da2d7f650f3
from __future__ import annotations\n\nimport operator\nfrom typing import TYPE_CHECKING, Any, Literal, cast\n\nimport ibis\nimport ibis.expr.types as ir\n\nfrom narwhals._ibis.utils import evaluate_exprs, native_to_narwhals_dtype\nfrom narwhals._utils import (\n Implementation,\n Version,\n not_implemented,\n parse_columns_to_drop,\n parse_version,\n validate_backend_version,\n)\nfrom narwhals.exceptions import ColumnNotFoundError, InvalidOperationError\nfrom narwhals.typing import CompliantLazyFrame\n\nif TYPE_CHECKING:\n from collections.abc import Iterable, Iterator, Mapping, Sequence\n from types import ModuleType\n\n import pandas as pd\n import pyarrow as pa\n from ibis.expr.operations import Binary\n from typing_extensions import Self, TypeAlias, TypeIs\n\n from narwhals._compliant.typing import CompliantDataFrameAny\n from narwhals._ibis.expr import IbisExpr\n from narwhals._ibis.group_by import IbisGroupBy\n from narwhals._ibis.namespace import IbisNamespace\n from narwhals._ibis.series import IbisInterchangeSeries\n from narwhals._utils import _FullContext\n from narwhals.dataframe import LazyFrame\n from narwhals.dtypes import DType\n from narwhals.stable.v1 import DataFrame as DataFrameV1\n from narwhals.typing import AsofJoinStrategy, JoinStrategy, LazyUniqueKeepStrategy\n\n JoinPredicates: TypeAlias = "Sequence[ir.BooleanColumn] | Sequence[str]"\n\n\nclass IbisLazyFrame(\n CompliantLazyFrame[\n "IbisExpr", "ir.Table", "LazyFrame[ir.Table] | DataFrameV1[ir.Table]"\n ]\n):\n _implementation = Implementation.IBIS\n\n def __init__(\n self, df: ir.Table, *, backend_version: tuple[int, ...], version: Version\n ) -> None:\n self._native_frame: ir.Table = df\n self._version = version\n self._backend_version = backend_version\n self._cached_schema: dict[str, DType] | None = None\n self._cached_columns: list[str] | None = None\n validate_backend_version(self._implementation, self._backend_version)\n\n @staticmethod\n def _is_native(obj: ir.Table | Any) -> TypeIs[ir.Table]:\n return isinstance(obj, ir.Table)\n\n @classmethod\n def from_native(cls, data: ir.Table, /, *, context: _FullContext) -> Self:\n return cls(\n data, backend_version=context._backend_version, version=context._version\n )\n\n def to_narwhals(self) -> LazyFrame[ir.Table] | DataFrameV1[ir.Table]:\n if self._version is Version.MAIN:\n return self._version.lazyframe(self, level="lazy")\n\n from narwhals.stable.v1 import DataFrame as DataFrameV1\n\n return DataFrameV1(self, level="interchange")\n\n def __narwhals_dataframe__(self) -> Self: # pragma: no cover\n # Keep around for backcompat.\n if self._version is not Version.V1:\n msg = "__narwhals_dataframe__ is not implemented for IbisLazyFrame"\n raise AttributeError(msg)\n return self\n\n def __narwhals_lazyframe__(self) -> Self:\n return self\n\n def __native_namespace__(self) -> ModuleType:\n return ibis\n\n def __narwhals_namespace__(self) -> IbisNamespace:\n from narwhals._ibis.namespace import IbisNamespace\n\n return IbisNamespace(backend_version=self._backend_version, version=self._version)\n\n def get_column(self, name: str) -> IbisInterchangeSeries:\n from narwhals._ibis.series import IbisInterchangeSeries\n\n return IbisInterchangeSeries(self.native.select(name), version=self._version)\n\n def _iter_columns(self) -> Iterator[ir.Expr]:\n for name in self.columns:\n yield self.native[name]\n\n def collect(\n self, backend: ModuleType | Implementation | str | None, **kwargs: Any\n ) -> CompliantDataFrameAny:\n if backend is None or backend is Implementation.PYARROW:\n import pyarrow as pa # ignore-banned-import\n\n from narwhals._arrow.dataframe import ArrowDataFrame\n\n return ArrowDataFrame(\n self.native.to_pyarrow(),\n backend_version=parse_version(pa),\n version=self._version,\n validate_column_names=True,\n )\n\n if backend is Implementation.PANDAS:\n import pandas as pd # ignore-banned-import\n\n from narwhals._pandas_like.dataframe import PandasLikeDataFrame\n\n return PandasLikeDataFrame(\n self.native.to_pandas(),\n implementation=Implementation.PANDAS,\n backend_version=parse_version(pd),\n version=self._version,\n validate_column_names=True,\n )\n\n if backend is Implementation.POLARS:\n import polars as pl # ignore-banned-import\n\n from narwhals._polars.dataframe import PolarsDataFrame\n\n return PolarsDataFrame(\n self.native.to_polars(),\n backend_version=parse_version(pl),\n version=self._version,\n )\n\n msg = f"Unsupported `backend` value: {backend}" # pragma: no cover\n raise ValueError(msg) # pragma: no cover\n\n def head(self, n: int) -> Self:\n return self._with_native(self.native.head(n))\n\n def simple_select(self, *column_names: str) -> Self:\n return self._with_native(self.native.select(*column_names))\n\n def aggregate(self, *exprs: IbisExpr) -> Self:\n selection = [\n cast("ir.Scalar", val.name(name))\n for name, val in evaluate_exprs(self, *exprs)\n ]\n return self._with_native(self.native.aggregate(selection))\n\n def select(self, *exprs: IbisExpr) -> Self:\n selection = [val.name(name) for name, val in evaluate_exprs(self, *exprs)]\n if not selection:\n msg = "At least one expression must be provided to `select` with the Ibis backend."\n raise ValueError(msg)\n\n t = self.native.select(*selection)\n return self._with_native(t)\n\n def drop(self, columns: Sequence[str], *, strict: bool) -> Self:\n columns_to_drop = parse_columns_to_drop(self, columns, strict=strict)\n selection = (col for col in self.columns if col not in columns_to_drop)\n return self._with_native(self.native.select(*selection))\n\n def lazy(self, *, backend: Implementation | None = None) -> Self:\n # The `backend`` argument has no effect but we keep it here for\n # backwards compatibility because in `narwhals.stable.v1`\n # function `.from_native()` will return a DataFrame for Ibis.\n\n if backend is not None: # pragma: no cover\n msg = "`backend` argument is not supported for Ibis"\n raise ValueError(msg)\n return self\n\n def with_columns(self, *exprs: IbisExpr) -> Self:\n new_columns_map = dict(evaluate_exprs(self, *exprs))\n return self._with_native(self.native.mutate(**new_columns_map))\n\n def filter(self, predicate: IbisExpr) -> Self:\n # `[0]` is safe as the predicate's expression only returns a single column\n mask = cast("ir.BooleanValue", predicate(self)[0])\n return self._with_native(self.native.filter(mask))\n\n @property\n def schema(self) -> dict[str, DType]:\n if self._cached_schema is None:\n # Note: prefer `self._cached_schema` over `functools.cached_property`\n # due to Python3.13 failures.\n self._cached_schema = {\n name: native_to_narwhals_dtype(dtype, self._version)\n for name, dtype in self.native.schema().fields.items()\n }\n return self._cached_schema\n\n @property\n def columns(self) -> list[str]:\n if self._cached_columns is None:\n self._cached_columns = (\n list(self.schema)\n if self._cached_schema is not None\n else list(self.native.columns)\n )\n return self._cached_columns\n\n def to_pandas(self) -> pd.DataFrame:\n # only if version is v1, keep around for backcompat\n import pandas as pd # ignore-banned-import()\n\n if parse_version(pd) >= (1, 0, 0):\n return self.native.to_pandas()\n else: # pragma: no cover\n msg = f"Conversion to pandas requires pandas>=1.0.0, found {pd.__version__}"\n raise NotImplementedError(msg)\n\n def to_arrow(self) -> pa.Table:\n # only if version is v1, keep around for backcompat\n return self.native.to_pyarrow()\n\n def _with_version(self, version: Version) -> Self:\n return self.__class__(\n self.native, version=version, backend_version=self._backend_version\n )\n\n def _with_native(self, df: ir.Table) -> Self:\n return self.__class__(\n df, backend_version=self._backend_version, version=self._version\n )\n\n def group_by(\n self, keys: Sequence[str] | Sequence[IbisExpr], *, drop_null_keys: bool\n ) -> IbisGroupBy:\n from narwhals._ibis.group_by import IbisGroupBy\n\n return IbisGroupBy(self, keys, drop_null_keys=drop_null_keys)\n\n def rename(self, mapping: Mapping[str, str]) -> Self:\n def _rename(col: str) -> str:\n return mapping.get(col, col)\n\n return self._with_native(self.native.rename(_rename))\n\n @staticmethod\n def _join_drop_duplicate_columns(df: ir.Table, columns: Iterable[str], /) -> ir.Table:\n """Ibis adds a suffix to the right table col, even when it matches the left during a join."""\n duplicates = set(df.columns).intersection(columns)\n return df.drop(*duplicates) if duplicates else df\n\n def join(\n self,\n other: Self,\n *,\n how: JoinStrategy,\n left_on: Sequence[str] | None,\n right_on: Sequence[str] | None,\n suffix: str,\n ) -> Self:\n how_native = "outer" if how == "full" else how\n rname = "{name}" + suffix\n if other == self:\n # Ibis does not support self-references unless created as a view\n other = self._with_native(other.native.view())\n if how_native == "cross":\n joined = self.native.join(other.native, how=how_native, rname=rname)\n return self._with_native(joined)\n # help mypy\n assert left_on is not None # noqa: S101\n assert right_on is not None # noqa: S101\n predicates = self._convert_predicates(other, left_on, right_on)\n joined = self.native.join(other.native, predicates, how=how_native, rname=rname)\n if how_native == "left":\n right_names = (n + suffix for n in right_on)\n joined = self._join_drop_duplicate_columns(joined, right_names)\n it = (cast("Binary", p.op()) for p in predicates if not isinstance(p, str))\n to_drop = []\n for pred in it:\n right = pred.right.name\n # Mirrors how polars works.\n if right not in self.columns and pred.left.name != right:\n to_drop.append(right)\n if to_drop:\n joined = joined.drop(*to_drop)\n return self._with_native(joined)\n\n def join_asof(\n self,\n other: Self,\n *,\n left_on: str,\n right_on: str,\n by_left: Sequence[str] | None,\n by_right: Sequence[str] | None,\n strategy: AsofJoinStrategy,\n suffix: str,\n ) -> Self:\n rname = "{name}" + suffix\n strategy_op = {"backward": operator.ge, "forward": operator.le}\n predicates: JoinPredicates = []\n if op := strategy_op.get(strategy):\n on: ir.BooleanColumn = op(self.native[left_on], other.native[right_on])\n else:\n msg = "Only `backward` and `forward` strategies are currently supported for Ibis"\n raise NotImplementedError(msg)\n if by_left is not None and by_right is not None:\n predicates = self._convert_predicates(other, by_left, by_right)\n joined = self.native.asof_join(other.native, on, predicates, rname=rname)\n joined = self._join_drop_duplicate_columns(joined, [right_on + suffix])\n if by_right is not None:\n right_names = (n + suffix for n in by_right)\n joined = self._join_drop_duplicate_columns(joined, right_names)\n return self._with_native(joined)\n\n def _convert_predicates(\n self, other: Self, left_on: Sequence[str], right_on: Sequence[str]\n ) -> JoinPredicates:\n if left_on == right_on:\n return left_on\n return [\n cast("ir.BooleanColumn", (self.native[left] == other.native[right]))\n for left, right in zip(left_on, right_on)\n ]\n\n def collect_schema(self) -> dict[str, DType]:\n return {\n name: native_to_narwhals_dtype(dtype, self._version)\n for name, dtype in self.native.schema().fields.items()\n }\n\n def unique(\n self, subset: Sequence[str] | None, *, keep: LazyUniqueKeepStrategy\n ) -> Self:\n if subset_ := subset if keep == "any" else (subset or self.columns):\n # Sanitise input\n if any(x not in self.columns for x in subset_):\n msg = f"Columns {set(subset_).difference(self.columns)} not found in {self.columns}."\n raise ColumnNotFoundError(msg)\n\n mapped_keep: dict[str, Literal["first"] | None] = {\n "any": "first",\n "none": None,\n }\n to_keep = mapped_keep[keep]\n return self._with_native(self.native.distinct(on=subset_, keep=to_keep))\n return self._with_native(self.native.distinct(on=subset))\n\n def sort(self, *by: str, descending: bool | Sequence[bool], nulls_last: bool) -> Self:\n if isinstance(descending, bool):\n descending = [descending for _ in range(len(by))]\n\n sort_cols = []\n\n for i in range(len(by)):\n direction_fn = ibis.desc if descending[i] else ibis.asc\n col = direction_fn(by[i], nulls_first=not nulls_last)\n sort_cols.append(cast("ir.Column", col))\n\n return self._with_native(self.native.order_by(*sort_cols))\n\n def drop_nulls(self, subset: Sequence[str] | None) -> Self:\n subset_ = subset if subset is not None else self.columns\n return self._with_native(self.native.drop_null(subset_))\n\n def explode(self, columns: Sequence[str]) -> Self:\n dtypes = self._version.dtypes\n schema = self.collect_schema()\n for col in columns:\n dtype = schema[col]\n\n if dtype != dtypes.List:\n msg = (\n f"`explode` operation not supported for dtype `{dtype}`, "\n "expected List type"\n )\n raise InvalidOperationError(msg)\n\n if len(columns) != 1:\n msg = (\n "Exploding on multiple columns is not supported with Ibis backend since "\n "we cannot guarantee that the exploded columns have matching element counts."\n )\n raise NotImplementedError(msg)\n\n return self._with_native(self.native.unnest(columns[0], keep_empty=True))\n\n def unpivot(\n self,\n on: Sequence[str] | None,\n index: Sequence[str] | None,\n variable_name: str,\n value_name: str,\n ) -> Self:\n import ibis.selectors as s\n\n index_: Sequence[str] = [] if index is None else index\n on_: Sequence[str] = (\n [c for c in self.columns if c not in index_] if on is None else on\n )\n\n # Discard columns not in the index\n final_columns = list(dict.fromkeys([*index_, variable_name, value_name]))\n\n unpivoted = self.native.pivot_longer(\n s.cols(*on_), names_to=variable_name, values_to=value_name\n )\n return self._with_native(unpivoted.select(*final_columns))\n\n def with_row_index(self, name: str, order_by: Sequence[str]) -> Self:\n to_select = [\n ibis.row_number().over(ibis.window(order_by=order_by)).name(name),\n ibis.selectors.all(),\n ]\n return self._with_native(self.native.select(*to_select))\n\n gather_every = not_implemented.deprecated(\n "`LazyFrame.gather_every` is deprecated and will be removed in a future version."\n )\n tail = not_implemented.deprecated(\n "`LazyFrame.tail` is deprecated and will be removed in a future version."\n )\n
.venv\Lib\site-packages\narwhals\_ibis\dataframe.py
dataframe.py
Python
16,359
0.95
0.242991
0.045455
vue-tools
829
2024-01-08T16:25:36.948646
BSD-3-Clause
false
90cff95d304dad91cf24c9c845e78a84
from __future__ import annotations\n\nimport operator\nfrom functools import partial\nfrom typing import TYPE_CHECKING, Any, Callable, Literal, TypeVar, cast\n\nimport ibis\n\nfrom narwhals._compliant import LazyExpr\nfrom narwhals._compliant.window import WindowInputs\nfrom narwhals._expression_parsing import (\n combine_alias_output_names,\n combine_evaluate_output_names,\n)\nfrom narwhals._ibis.expr_dt import IbisExprDateTimeNamespace\nfrom narwhals._ibis.expr_list import IbisExprListNamespace\nfrom narwhals._ibis.expr_str import IbisExprStringNamespace\nfrom narwhals._ibis.expr_struct import IbisExprStructNamespace\nfrom narwhals._ibis.utils import is_floating, lit, narwhals_to_native_dtype\nfrom narwhals._utils import Implementation, not_implemented\n\nif TYPE_CHECKING:\n from collections.abc import Iterable, Iterator, Sequence\n\n import ibis.expr.types as ir\n from typing_extensions import Self\n\n from narwhals._compliant.typing import (\n AliasNames,\n EvalNames,\n EvalSeries,\n WindowFunction,\n )\n from narwhals._expression_parsing import ExprKind, ExprMetadata\n from narwhals._ibis.dataframe import IbisLazyFrame\n from narwhals._ibis.namespace import IbisNamespace\n from narwhals._utils import Version, _FullContext\n from narwhals.typing import IntoDType, RankMethod, RollingInterpolationMethod\n\n ExprT = TypeVar("ExprT", bound=ir.Value)\n IbisWindowFunction = WindowFunction[IbisLazyFrame, ir.Value]\n IbisWindowInputs = WindowInputs[ir.Value]\n\n\nclass IbisExpr(LazyExpr["IbisLazyFrame", "ir.Column"]):\n _implementation = Implementation.IBIS\n\n def __init__(\n self,\n call: EvalSeries[IbisLazyFrame, ir.Value],\n window_function: IbisWindowFunction | None = None,\n *,\n evaluate_output_names: EvalNames[IbisLazyFrame],\n alias_output_names: AliasNames | None,\n backend_version: tuple[int, ...],\n version: Version,\n ) -> None:\n self._call = call\n self._evaluate_output_names = evaluate_output_names\n self._alias_output_names = alias_output_names\n self._backend_version = backend_version\n self._version = version\n self._metadata: ExprMetadata | None = None\n self._window_function: IbisWindowFunction | None = window_function\n\n @property\n def window_function(self) -> IbisWindowFunction:\n def default_window_func(\n df: IbisLazyFrame, window_inputs: IbisWindowInputs\n ) -> list[ir.Value]:\n assert not window_inputs.order_by # noqa: S101\n return [\n expr.over(ibis.window(group_by=window_inputs.partition_by))\n for expr in self(df)\n ]\n\n return self._window_function or default_window_func\n\n def __call__(self, df: IbisLazyFrame) -> Sequence[ir.Value]:\n return self._call(df)\n\n def __narwhals_expr__(self) -> None: ...\n\n def __narwhals_namespace__(self) -> IbisNamespace: # pragma: no cover\n # Unused, just for compatibility with PandasLikeExpr\n from narwhals._ibis.namespace import IbisNamespace\n\n return IbisNamespace(backend_version=self._backend_version, version=self._version)\n\n def _cum_window_func(\n self, *, reverse: bool, func_name: Literal["sum", "max", "min", "count"]\n ) -> IbisWindowFunction:\n def func(df: IbisLazyFrame, inputs: IbisWindowInputs) -> Sequence[ir.Value]:\n window = ibis.window(\n group_by=list(inputs.partition_by),\n order_by=self._sort(\n *inputs.order_by, descending=reverse, nulls_last=reverse\n ),\n preceding=None, # unbounded\n following=0,\n )\n\n return [getattr(expr, func_name)().over(window) for expr in self(df)]\n\n return func\n\n def _rolling_window_func(\n self,\n *,\n func_name: Literal["sum", "mean", "std", "var"],\n center: bool,\n window_size: int,\n min_samples: int,\n ddof: int | None = None,\n ) -> IbisWindowFunction:\n supported_funcs = ["sum", "mean", "std", "var"]\n\n if center:\n preceding = window_size // 2\n following = window_size - preceding - 1\n else:\n preceding = window_size - 1\n following = 0\n\n def func(df: IbisLazyFrame, inputs: IbisWindowInputs) -> Sequence[ir.Value]:\n window = ibis.window(\n group_by=list(inputs.partition_by),\n order_by=self._sort(*inputs.order_by),\n preceding=preceding,\n following=following,\n )\n\n def inner_f(expr: ir.NumericColumn) -> ir.Value:\n if func_name in {"sum", "mean"}:\n func_ = getattr(expr, func_name)()\n elif func_name == "var" and ddof == 0:\n func_ = expr.var(how="pop")\n elif func_name in "var" and ddof == 1:\n func_ = expr.var(how="sample")\n elif func_name == "std" and ddof == 0:\n func_ = expr.std(how="pop")\n elif func_name == "std" and ddof == 1:\n func_ = expr.std(how="sample")\n elif func_name in {"var", "std"}: # pragma: no cover\n msg = f"Only ddof=0 and ddof=1 are currently supported for rolling_{func_name}."\n raise ValueError(msg)\n else: # pragma: no cover\n msg = f"Only the following functions are supported: {supported_funcs}.\nGot: {func_name}."\n raise ValueError(msg)\n\n rolling_calc = func_.over(window)\n valid_count = expr.count().over(window)\n return ibis.cases(\n (valid_count >= ibis.literal(min_samples), rolling_calc),\n else_=ibis.null(),\n )\n\n return [inner_f(cast("ir.NumericColumn", expr)) for expr in self(df)]\n\n return func\n\n def broadcast(self, kind: Literal[ExprKind.AGGREGATION, ExprKind.LITERAL]) -> Self:\n # Ibis does its own broadcasting.\n return self\n\n def _sort(\n self, *cols: ir.Column | str, descending: bool = False, nulls_last: bool = False\n ) -> Iterator[ir.Column]:\n mapping = {\n (False, False): partial(ibis.asc, nulls_first=True),\n (False, True): partial(ibis.asc, nulls_first=False),\n (True, False): partial(ibis.desc, nulls_first=True),\n (True, True): partial(ibis.desc, nulls_first=False),\n }\n sort = mapping[(descending, nulls_last)]\n yield from (cast("ir.Column", sort(col)) for col in cols)\n\n @classmethod\n def from_column_names(\n cls: type[Self],\n evaluate_column_names: EvalNames[IbisLazyFrame],\n /,\n *,\n context: _FullContext,\n ) -> Self:\n def func(df: IbisLazyFrame) -> list[ir.Column]:\n return [df.native[name] for name in evaluate_column_names(df)]\n\n return cls(\n func,\n evaluate_output_names=evaluate_column_names,\n alias_output_names=None,\n backend_version=context._backend_version,\n version=context._version,\n )\n\n @classmethod\n def from_column_indices(cls, *column_indices: int, context: _FullContext) -> Self:\n def func(df: IbisLazyFrame) -> list[ir.Column]:\n return [df.native[i] for i in column_indices]\n\n return cls(\n func,\n evaluate_output_names=cls._eval_names_indices(column_indices),\n alias_output_names=None,\n backend_version=context._backend_version,\n version=context._version,\n )\n\n @classmethod\n def _from_elementwise_horizontal_op(\n cls, func: Callable[[Iterable[ir.Value]], ir.Value], *exprs: Self\n ) -> Self:\n def call(df: IbisLazyFrame) -> list[ir.Value]:\n cols = (col for _expr in exprs for col in _expr(df))\n return [func(cols)]\n\n context = exprs[0]\n return cls(\n call=call,\n evaluate_output_names=combine_evaluate_output_names(*exprs),\n alias_output_names=combine_alias_output_names(*exprs),\n backend_version=context._backend_version,\n version=context._version,\n )\n\n def _with_callable(\n self, call: Callable[..., ir.Value], /, **expressifiable_args: Self | Any\n ) -> Self:\n """Create expression from callable.\n\n Arguments:\n call: Callable from compliant DataFrame to native Expression\n expr_name: Expression name\n expressifiable_args: arguments pass to expression which should be parsed\n as expressions (e.g. in `nw.col('a').is_between('b', 'c')`)\n """\n\n def func(df: IbisLazyFrame) -> list[ir.Value]:\n native_series_list = self(df)\n other_native_series = {\n key: df._evaluate_expr(value) if self._is_expr(value) else lit(value)\n for key, value in expressifiable_args.items()\n }\n return [\n call(native_series, **other_native_series)\n for native_series in native_series_list\n ]\n\n return self.__class__(\n func,\n evaluate_output_names=self._evaluate_output_names,\n alias_output_names=self._alias_output_names,\n backend_version=self._backend_version,\n version=self._version,\n )\n\n def _with_binary(self, op: Callable[..., ir.Value], other: Self | Any) -> Self:\n return self._with_callable(op, other=other)\n\n def _with_alias_output_names(self, func: AliasNames | None, /) -> Self:\n return type(self)(\n self._call,\n self._window_function,\n evaluate_output_names=self._evaluate_output_names,\n alias_output_names=func,\n backend_version=self._backend_version,\n version=self._version,\n )\n\n def _with_window_function(self, window_function: IbisWindowFunction) -> Self:\n return self.__class__(\n self._call,\n window_function,\n evaluate_output_names=self._evaluate_output_names,\n alias_output_names=self._alias_output_names,\n backend_version=self._backend_version,\n version=self._version,\n )\n\n @classmethod\n def _alias_native(cls, expr: ExprT, name: str, /) -> ExprT:\n return cast("ExprT", expr.name(name))\n\n def __invert__(self) -> Self:\n invert = cast("Callable[..., ir.Value]", operator.invert)\n return self._with_callable(invert)\n\n def abs(self) -> Self:\n return self._with_callable(lambda expr: expr.abs())\n\n def mean(self) -> Self:\n return self._with_callable(lambda expr: expr.mean())\n\n def median(self) -> Self:\n return self._with_callable(lambda expr: expr.median())\n\n def all(self) -> Self:\n return self._with_callable(lambda expr: expr.all().fill_null(lit(True))) # noqa: FBT003\n\n def any(self) -> Self:\n return self._with_callable(lambda expr: expr.any().fill_null(lit(False))) # noqa: FBT003\n\n def quantile(\n self, quantile: float, interpolation: RollingInterpolationMethod\n ) -> Self:\n if interpolation != "linear":\n msg = "Only linear interpolation methods are supported for Ibis quantile."\n raise NotImplementedError(msg)\n return self._with_callable(lambda expr: expr.quantile(quantile))\n\n def clip(self, lower_bound: Any, upper_bound: Any) -> Self:\n def _clip(\n expr: ir.NumericValue, lower: Any | None = None, upper: Any | None = None\n ) -> ir.NumericValue:\n return expr.clip(lower=lower, upper=upper)\n\n if lower_bound is None:\n return self._with_callable(_clip, upper=upper_bound)\n if upper_bound is None:\n return self._with_callable(_clip, lower=lower_bound)\n return self._with_callable(_clip, lower=lower_bound, upper=upper_bound)\n\n def sum(self) -> Self:\n return self._with_callable(lambda expr: expr.sum().fill_null(lit(0)))\n\n def n_unique(self) -> Self:\n return self._with_callable(\n lambda expr: expr.nunique() + expr.isnull().any().cast("int8")\n )\n\n def count(self) -> Self:\n return self._with_callable(lambda expr: expr.count())\n\n def len(self) -> Self:\n def func(df: IbisLazyFrame) -> list[ir.IntegerScalar]:\n return [df.native.count()]\n\n return self.__class__(\n func,\n evaluate_output_names=self._evaluate_output_names,\n alias_output_names=self._alias_output_names,\n backend_version=self._backend_version,\n version=self._version,\n )\n\n def std(self, ddof: int) -> Self:\n def _std(expr: ir.NumericColumn, ddof: int) -> ir.Value:\n if ddof == 0:\n return expr.std(how="pop")\n elif ddof == 1:\n return expr.std(how="sample")\n else:\n n_samples = expr.count()\n std_pop = expr.std(how="pop")\n ddof_lit = cast("ir.IntegerScalar", ibis.literal(ddof))\n return std_pop * n_samples.sqrt() / (n_samples - ddof_lit).sqrt()\n\n return self._with_callable(lambda expr: _std(expr, ddof))\n\n def var(self, ddof: int) -> Self:\n def _var(expr: ir.NumericColumn, ddof: int) -> ir.Value:\n if ddof == 0:\n return expr.var(how="pop")\n elif ddof == 1:\n return expr.var(how="sample")\n else:\n n_samples = expr.count()\n var_pop = expr.var(how="pop")\n ddof_lit = cast("ir.IntegerScalar", ibis.literal(ddof))\n return var_pop * n_samples / (n_samples - ddof_lit)\n\n return self._with_callable(lambda expr: _var(expr, ddof))\n\n def max(self) -> Self:\n return self._with_callable(lambda expr: expr.max())\n\n def min(self) -> Self:\n return self._with_callable(lambda expr: expr.min())\n\n def null_count(self) -> Self:\n return self._with_callable(lambda expr: expr.isnull().sum())\n\n def over(self, partition_by: Sequence[str], order_by: Sequence[str]) -> Self:\n def func(df: IbisLazyFrame) -> Sequence[ir.Value]:\n return self.window_function(df, WindowInputs(partition_by, order_by))\n\n return self.__class__(\n func,\n evaluate_output_names=self._evaluate_output_names,\n alias_output_names=self._alias_output_names,\n backend_version=self._backend_version,\n version=self._version,\n )\n\n def is_null(self) -> Self:\n return self._with_callable(lambda expr: expr.isnull())\n\n def is_nan(self) -> Self:\n def func(expr: ir.FloatingValue | Any) -> ir.Value:\n otherwise = expr.isnan() if is_floating(expr.type()) else False\n return ibis.ifelse(expr.isnull(), None, otherwise)\n\n return self._with_callable(func)\n\n def is_finite(self) -> Self:\n return self._with_callable(lambda expr: ~(expr.isinf() | expr.isnan()))\n\n def is_in(self, other: Sequence[Any]) -> Self:\n return self._with_callable(lambda expr: expr.isin(other))\n\n def round(self, decimals: int) -> Self:\n return self._with_callable(lambda expr: expr.round(decimals))\n\n def shift(self, n: int) -> Self:\n def _func(df: IbisLazyFrame, inputs: IbisWindowInputs) -> Sequence[ir.Value]:\n return [\n expr.lag(n).over( # type: ignore[attr-defined, unused-ignore]\n ibis.window(\n group_by=inputs.partition_by,\n order_by=self._sort(*inputs.order_by),\n )\n )\n for expr in self(df)\n ]\n\n return self._with_window_function(_func)\n\n def is_first_distinct(self) -> Self:\n def func(\n df: IbisLazyFrame, inputs: IbisWindowInputs\n ) -> Sequence[ir.BooleanValue]:\n # ibis row_number starts at 0, so need to compare with 0 instead of the usual `1`\n return [\n ibis.row_number().over(\n ibis.window(\n group_by=[*inputs.partition_by, expr],\n order_by=self._sort(*inputs.order_by),\n )\n )\n == lit(0)\n for expr in self(df)\n ]\n\n return self._with_window_function(func)\n\n def is_last_distinct(self) -> Self:\n def func(\n df: IbisLazyFrame, inputs: IbisWindowInputs\n ) -> Sequence[ir.BooleanValue]:\n # ibis row_number starts at 0, so need to compare with 0 instead of the usual `1`\n return [\n ibis.row_number().over(\n ibis.window(\n group_by=[*inputs.partition_by, expr],\n order_by=self._sort(\n *inputs.order_by, descending=True, nulls_last=True\n ),\n )\n )\n == lit(0)\n for expr in self(df)\n ]\n\n return self._with_window_function(func)\n\n def diff(self) -> Self:\n def _func(df: IbisLazyFrame, inputs: IbisWindowInputs) -> Sequence[ir.Value]:\n return [\n expr\n - expr.lag().over( # type: ignore[attr-defined, unused-ignore]\n ibis.window(\n following=0,\n group_by=inputs.partition_by,\n order_by=self._sort(*inputs.order_by),\n )\n )\n for expr in self(df)\n ]\n\n return self._with_window_function(_func)\n\n def cum_sum(self, *, reverse: bool) -> Self:\n return self._with_window_function(\n self._cum_window_func(reverse=reverse, func_name="sum")\n )\n\n def cum_max(self, *, reverse: bool) -> Self:\n return self._with_window_function(\n self._cum_window_func(reverse=reverse, func_name="max")\n )\n\n def cum_min(self, *, reverse: bool) -> Self:\n return self._with_window_function(\n self._cum_window_func(reverse=reverse, func_name="min")\n )\n\n def cum_count(self, *, reverse: bool) -> Self:\n return self._with_window_function(\n self._cum_window_func(reverse=reverse, func_name="count")\n )\n\n def rolling_sum(self, window_size: int, *, min_samples: int, center: bool) -> Self:\n return self._with_window_function(\n self._rolling_window_func(\n func_name="sum",\n center=center,\n window_size=window_size,\n min_samples=min_samples,\n )\n )\n\n def rolling_mean(self, window_size: int, *, min_samples: int, center: bool) -> Self:\n return self._with_window_function(\n self._rolling_window_func(\n func_name="mean",\n center=center,\n window_size=window_size,\n min_samples=min_samples,\n )\n )\n\n def rolling_var(\n self, window_size: int, *, min_samples: int, center: bool, ddof: int\n ) -> Self:\n return self._with_window_function(\n self._rolling_window_func(\n func_name="var",\n center=center,\n window_size=window_size,\n min_samples=min_samples,\n ddof=ddof,\n )\n )\n\n def rolling_std(\n self, window_size: int, *, min_samples: int, center: bool, ddof: int\n ) -> Self:\n return self._with_window_function(\n self._rolling_window_func(\n func_name="std",\n center=center,\n window_size=window_size,\n min_samples=min_samples,\n ddof=ddof,\n )\n )\n\n def fill_null(self, value: Self | Any, strategy: Any, limit: int | None) -> Self:\n # Ibis doesn't yet allow ignoring nulls in first/last with window functions, which makes forward/backward\n # strategies inconsistent when there are nulls present: https://github.com/ibis-project/ibis/issues/9539\n if strategy is not None:\n msg = "`strategy` is not supported for the Ibis backend"\n raise NotImplementedError(msg)\n if limit is not None:\n msg = "`limit` is not supported for the Ibis backend" # pragma: no cover\n raise NotImplementedError(msg)\n\n def _fill_null(expr: ir.Value, value: ir.Scalar) -> ir.Value:\n return expr.fill_null(value)\n\n return self._with_callable(_fill_null, value=value)\n\n def cast(self, dtype: IntoDType) -> Self:\n def _func(expr: ir.Column) -> ir.Value:\n native_dtype = narwhals_to_native_dtype(dtype, self._version)\n # ibis `cast` overloads do not include DataType, only literals\n return expr.cast(native_dtype) # type: ignore[unused-ignore]\n\n return self._with_callable(_func)\n\n def is_unique(self) -> Self:\n return self._with_callable(\n lambda expr: expr.isnull().count().over(ibis.window(group_by=(expr))) == 1\n )\n\n def rank(self, method: RankMethod, *, descending: bool) -> Self:\n def _rank(expr: ir.Column) -> ir.Column:\n order_by = next(self._sort(expr, descending=descending, nulls_last=True))\n window = ibis.window(order_by=order_by)\n\n if method == "dense":\n rank_ = order_by.dense_rank()\n elif method == "ordinal":\n rank_ = cast("ir.IntegerColumn", ibis.row_number().over(window))\n else:\n rank_ = order_by.rank()\n\n # Ibis uses 0-based ranking. Add 1 to match polars 1-based rank.\n rank_ = rank_ + cast("ir.IntegerValue", lit(1))\n\n # For "max" and "average", adjust using the count of rows in the partition.\n if method == "max":\n # Define a window partitioned by expr (i.e. each distinct value)\n partition = ibis.window(group_by=[expr])\n cnt = cast("ir.IntegerValue", expr.count().over(partition))\n rank_ = rank_ + cnt - cast("ir.IntegerValue", lit(1))\n elif method == "average":\n partition = ibis.window(group_by=[expr])\n cnt = cast("ir.IntegerValue", expr.count().over(partition))\n avg = cast(\n "ir.NumericValue", (cnt - cast("ir.IntegerScalar", lit(1))) / lit(2.0)\n )\n rank_ = rank_ + avg\n\n return cast("ir.Column", ibis.cases((expr.notnull(), rank_)))\n\n return self._with_callable(_rank)\n\n def log(self, base: float) -> Self:\n def _log(expr: ir.NumericColumn) -> ir.Value:\n otherwise = expr.log(cast("ir.NumericValue", lit(base)))\n return ibis.cases(\n (expr < lit(0), lit(float("nan"))),\n (expr == lit(0), lit(float("-inf"))),\n else_=otherwise,\n )\n\n return self._with_callable(_log)\n\n def exp(self) -> Self:\n def _exp(expr: ir.NumericColumn) -> ir.Value:\n return expr.exp()\n\n return self._with_callable(_exp)\n\n def sqrt(self) -> Self:\n def _sqrt(expr: ir.NumericColumn) -> ir.Value:\n return ibis.cases((expr < lit(0), lit(float("nan"))), else_=expr.sqrt())\n\n return self._with_callable(_sqrt)\n\n @property\n def str(self) -> IbisExprStringNamespace:\n return IbisExprStringNamespace(self)\n\n @property\n def dt(self) -> IbisExprDateTimeNamespace:\n return IbisExprDateTimeNamespace(self)\n\n @property\n def list(self) -> IbisExprListNamespace:\n return IbisExprListNamespace(self)\n\n @property\n def struct(self) -> IbisExprStructNamespace:\n return IbisExprStructNamespace(self)\n\n # NOTE: https://github.com/ibis-project/ibis/issues/10542\n cum_prod = not_implemented()\n drop_nulls = not_implemented()\n\n # NOTE: https://github.com/ibis-project/ibis/issues/11176\n skew = not_implemented()\n kurtosis = not_implemented()\n unique = not_implemented()\n
.venv\Lib\site-packages\narwhals\_ibis\expr.py
expr.py
Python
24,351
0.95
0.184451
0.031193
node-utils
868
2025-06-09T04:35:36.703549
GPL-3.0
false
8b9bc22be52563ac2a813ce9d3174e6b
from __future__ import annotations\n\nfrom typing import TYPE_CHECKING, Any, Callable\n\nfrom narwhals._compliant.any_namespace import DateTimeNamespace\nfrom narwhals._compliant.expr import LazyExprNamespace\nfrom narwhals._duration import parse_interval_string\nfrom narwhals._ibis.utils import UNITS_DICT_BUCKET, UNITS_DICT_TRUNCATE\nfrom narwhals._utils import not_implemented\n\nif TYPE_CHECKING:\n import ibis.expr.types as ir\n\n from narwhals._ibis.expr import IbisExpr\n from narwhals._ibis.utils import BucketUnit, TruncateUnit\n\n\nclass IbisExprDateTimeNamespace(\n LazyExprNamespace["IbisExpr"], DateTimeNamespace["IbisExpr"]\n):\n def year(self) -> IbisExpr:\n return self.compliant._with_callable(lambda expr: expr.year())\n\n def month(self) -> IbisExpr:\n return self.compliant._with_callable(lambda expr: expr.month())\n\n def day(self) -> IbisExpr:\n return self.compliant._with_callable(lambda expr: expr.day())\n\n def hour(self) -> IbisExpr:\n return self.compliant._with_callable(lambda expr: expr.hour())\n\n def minute(self) -> IbisExpr:\n return self.compliant._with_callable(lambda expr: expr.minute())\n\n def second(self) -> IbisExpr:\n return self.compliant._with_callable(lambda expr: expr.second())\n\n def millisecond(self) -> IbisExpr:\n return self.compliant._with_callable(lambda expr: expr.millisecond())\n\n def microsecond(self) -> IbisExpr:\n return self.compliant._with_callable(lambda expr: expr.microsecond())\n\n def to_string(self, format: str) -> IbisExpr:\n return self.compliant._with_callable(lambda expr: expr.strftime(format))\n\n def weekday(self) -> IbisExpr:\n # Ibis uses 0-6 for Monday-Sunday. Add 1 to match polars.\n return self.compliant._with_callable(lambda expr: expr.day_of_week.index() + 1)\n\n def ordinal_day(self) -> IbisExpr:\n return self.compliant._with_callable(lambda expr: expr.day_of_year())\n\n def date(self) -> IbisExpr:\n return self.compliant._with_callable(lambda expr: expr.date())\n\n def _bucket(self, kwds: dict[BucketUnit, Any], /) -> Callable[..., ir.TimestampValue]:\n def fn(expr: ir.TimestampValue) -> ir.TimestampValue:\n return expr.bucket(**kwds)\n\n return fn\n\n def _truncate(self, unit: TruncateUnit, /) -> Callable[..., ir.TimestampValue]:\n def fn(expr: ir.TimestampValue) -> ir.TimestampValue:\n return expr.truncate(unit)\n\n return fn\n\n def truncate(self, every: str) -> IbisExpr:\n multiple, unit = parse_interval_string(every)\n if unit == "q":\n multiple, unit = 3 * multiple, "mo"\n if multiple != 1:\n if self.compliant._backend_version < (7, 1): # pragma: no cover\n msg = "Truncating datetimes with multiples of the unit is only supported in Ibis >= 7.1."\n raise NotImplementedError(msg)\n fn = self._bucket({UNITS_DICT_BUCKET[unit]: multiple})\n else:\n fn = self._truncate(UNITS_DICT_TRUNCATE[unit])\n return self.compliant._with_callable(fn)\n\n def replace_time_zone(self, time_zone: str | None) -> IbisExpr:\n if time_zone is None:\n return self.compliant._with_callable(lambda _input: _input.cast("timestamp"))\n else: # pragma: no cover\n msg = "`replace_time_zone` with non-null `time_zone` not yet implemented for Ibis"\n raise NotImplementedError(msg)\n\n nanosecond = not_implemented()\n total_minutes = not_implemented()\n total_seconds = not_implemented()\n total_milliseconds = not_implemented()\n total_microseconds = not_implemented()\n total_nanoseconds = not_implemented()\n convert_time_zone = not_implemented()\n timestamp = not_implemented()\n
.venv\Lib\site-packages\narwhals\_ibis\expr_dt.py
expr_dt.py
Python
3,761
0.95
0.268041
0.013699
vue-tools
937
2024-11-07T06:49:33.809196
Apache-2.0
false
0699f4901c4bb7fee197455d1bf6629d
from __future__ import annotations\n\nfrom typing import TYPE_CHECKING\n\nfrom narwhals._compliant.any_namespace import ListNamespace\nfrom narwhals._compliant.expr import LazyExprNamespace\n\nif TYPE_CHECKING:\n from narwhals._ibis.expr import IbisExpr\n\n\nclass IbisExprListNamespace(LazyExprNamespace["IbisExpr"], ListNamespace["IbisExpr"]):\n def len(self) -> IbisExpr:\n return self.compliant._with_callable(lambda expr: expr.length())\n
.venv\Lib\site-packages\narwhals\_ibis\expr_list.py
expr_list.py
Python
442
0.85
0.214286
0
vue-tools
146
2024-08-01T07:44:09.993549
Apache-2.0
false
ef9aad1441c910d5b7a273607374acb0
from __future__ import annotations\n\nfrom typing import TYPE_CHECKING, Any, Callable, cast\n\nimport ibis\nimport ibis.expr.types as ir\nfrom ibis.expr.datatypes import Timestamp\n\nfrom narwhals._compliant.any_namespace import StringNamespace\nfrom narwhals._compliant.expr import LazyExprNamespace\nfrom narwhals._ibis.utils import lit\nfrom narwhals._utils import _is_naive_format, not_implemented\n\nif TYPE_CHECKING:\n from narwhals._ibis.expr import IbisExpr\n\n\nclass IbisExprStringNamespace(LazyExprNamespace["IbisExpr"], StringNamespace["IbisExpr"]):\n def starts_with(self, prefix: str) -> IbisExpr:\n def fn(expr: ir.StringColumn) -> ir.BooleanValue:\n return expr.startswith(prefix)\n\n return self.compliant._with_callable(fn)\n\n def ends_with(self, suffix: str) -> IbisExpr:\n def fn(expr: ir.StringColumn) -> ir.BooleanValue:\n return expr.endswith(suffix)\n\n return self.compliant._with_callable(fn)\n\n def contains(self, pattern: str, *, literal: bool) -> IbisExpr:\n def fn(expr: ir.StringColumn) -> ir.BooleanValue:\n return expr.contains(pattern) if literal else expr.re_search(pattern)\n\n return self.compliant._with_callable(fn)\n\n def slice(self, offset: int, length: int | None) -> IbisExpr:\n def fn(expr: ir.StringColumn) -> ir.StringValue:\n return expr.substr(start=offset, length=length)\n\n return self.compliant._with_callable(fn)\n\n def split(self, by: str) -> IbisExpr:\n def fn(expr: ir.StringColumn) -> ir.ArrayValue:\n return expr.split(by)\n\n return self.compliant._with_callable(fn)\n\n def len_chars(self) -> IbisExpr:\n return self.compliant._with_callable(lambda expr: expr.length())\n\n def to_lowercase(self) -> IbisExpr:\n return self.compliant._with_callable(lambda expr: expr.lower())\n\n def to_uppercase(self) -> IbisExpr:\n return self.compliant._with_callable(lambda expr: expr.upper())\n\n def strip_chars(self, characters: str | None) -> IbisExpr:\n if characters is not None:\n msg = "Ibis does not support `characters` argument in `str.strip_chars`"\n raise NotImplementedError(msg)\n\n return self.compliant._with_callable(lambda expr: expr.strip())\n\n def _replace_all(self, pattern: str, value: str) -> Callable[..., ir.StringValue]:\n def fn(expr: ir.StringColumn) -> ir.StringValue:\n return expr.re_replace(pattern, value)\n\n return fn\n\n def _replace_all_literal(\n self, pattern: str, value: str\n ) -> Callable[..., ir.StringValue]:\n def fn(expr: ir.StringColumn) -> ir.StringValue:\n return expr.replace(pattern, value) # pyright: ignore[reportArgumentType]\n\n return fn\n\n def replace_all(self, pattern: str, value: str, *, literal: bool) -> IbisExpr:\n fn = self._replace_all_literal if literal else self._replace_all\n return self.compliant._with_callable(fn(pattern, value))\n\n def _to_datetime(self, format: str) -> Callable[..., ir.TimestampValue]:\n def fn(expr: ir.StringColumn) -> ir.TimestampValue:\n return expr.as_timestamp(format)\n\n return fn\n\n def _to_datetime_naive(self, format: str) -> Callable[..., ir.TimestampValue]:\n def fn(expr: ir.StringColumn) -> ir.TimestampValue:\n dtype: Any = Timestamp(timezone=None)\n return expr.as_timestamp(format).cast(dtype)\n\n return fn\n\n def to_datetime(self, format: str | None) -> IbisExpr:\n if format is None:\n msg = "Cannot infer format with Ibis backend"\n raise NotImplementedError(msg)\n fn = self._to_datetime_naive if _is_naive_format(format) else self._to_datetime\n return self.compliant._with_callable(fn(format))\n\n def to_date(self, format: str | None) -> IbisExpr:\n if format is None:\n msg = "Cannot infer format with Ibis backend"\n raise NotImplementedError(msg)\n\n def fn(expr: ir.StringColumn) -> ir.DateValue:\n return expr.as_date(format)\n\n return self._compliant_expr._with_callable(fn)\n\n def zfill(self, width: int) -> IbisExpr:\n def func(expr: ir.StringColumn) -> ir.Value:\n length = expr.length()\n less_than_width = length < lit(width)\n zero, hyphen, plus = "0", "-", "+"\n starts_with_minus = expr.startswith(hyphen)\n starts_with_plus = expr.startswith(plus)\n one = cast("ir.IntegerScalar", lit(1))\n sub_length = cast("ir.IntegerValue", length - one)\n substring = expr.substr(one, sub_length).lpad(width - 1, zero)\n return ibis.cases(\n (starts_with_minus & less_than_width, (substring.lpad(width, hyphen))),\n (starts_with_plus & less_than_width, (substring.lpad(width, plus))),\n (less_than_width, expr.lpad(width, zero)),\n else_=expr,\n )\n\n return self.compliant._with_callable(func)\n\n replace = not_implemented()\n
.venv\Lib\site-packages\narwhals\_ibis\expr_str.py
expr_str.py
Python
5,034
0.95
0.272727
0
python-kit
800
2025-06-04T01:51:22.533288
GPL-3.0
false
64ade07aa4be23381d2acc67c31eedf1
from __future__ import annotations\n\nfrom typing import TYPE_CHECKING\n\nfrom narwhals._compliant.any_namespace import StructNamespace\nfrom narwhals._compliant.expr import LazyExprNamespace\n\nif TYPE_CHECKING:\n import ibis.expr.types as ir\n\n from narwhals._ibis.expr import IbisExpr\n\n\nclass IbisExprStructNamespace(LazyExprNamespace["IbisExpr"], StructNamespace["IbisExpr"]):\n def field(self, name: str) -> IbisExpr:\n def func(expr: ir.StructColumn) -> ir.Column:\n return expr[name]\n\n return self.compliant._with_callable(func).alias(name)\n
.venv\Lib\site-packages\narwhals\_ibis\expr_struct.py
expr_struct.py
Python
570
0.85
0.210526
0
python-kit
879
2023-12-19T23:08:35.449395
BSD-3-Clause
false
376480576d5987f06c0cd32a096657f9
from __future__ import annotations\n\nfrom typing import TYPE_CHECKING\n\nfrom narwhals._compliant import LazyGroupBy\n\nif TYPE_CHECKING:\n from collections.abc import Sequence\n\n import ibis.expr.types as ir # noqa: F401\n\n from narwhals._ibis.dataframe import IbisLazyFrame\n from narwhals._ibis.expr import IbisExpr\n\n\nclass IbisGroupBy(LazyGroupBy["IbisLazyFrame", "IbisExpr", "ir.Value"]):\n def __init__(\n self,\n df: IbisLazyFrame,\n keys: Sequence[str] | Sequence[IbisExpr],\n /,\n *,\n drop_null_keys: bool,\n ) -> None:\n frame, self._keys, self._output_key_names = self._parse_keys(df, keys=keys)\n self._compliant_frame = frame.drop_nulls(self._keys) if drop_null_keys else frame\n\n def agg(self, *exprs: IbisExpr) -> IbisLazyFrame:\n native = self.compliant.native\n return self.compliant._with_native(\n native.group_by(self._keys).aggregate(*self._evaluate_exprs(exprs))\n ).rename(dict(zip(self._keys, self._output_key_names)))\n
.venv\Lib\site-packages\narwhals\_ibis\group_by.py
group_by.py
Python
1,030
0.95
0.15625
0.041667
awesome-app
177
2023-07-24T15:09:34.644421
GPL-3.0
false
a4dbc535b708b291ad579e7ef0deaec2
from __future__ import annotations\n\nimport operator\nfrom functools import reduce\nfrom itertools import chain\nfrom typing import TYPE_CHECKING, Any, cast\n\nimport ibis\nimport ibis.expr.types as ir\n\nfrom narwhals._compliant import LazyNamespace, LazyThen, LazyWhen\nfrom narwhals._expression_parsing import (\n combine_alias_output_names,\n combine_evaluate_output_names,\n)\nfrom narwhals._ibis.dataframe import IbisLazyFrame\nfrom narwhals._ibis.expr import IbisExpr\nfrom narwhals._ibis.selectors import IbisSelectorNamespace\nfrom narwhals._ibis.utils import lit, narwhals_to_native_dtype\nfrom narwhals._utils import Implementation, requires\n\nif TYPE_CHECKING:\n from collections.abc import Iterable, Sequence\n\n from narwhals._utils import Version\n from narwhals.typing import ConcatMethod, IntoDType\n\n\nclass IbisNamespace(LazyNamespace[IbisLazyFrame, IbisExpr, "ir.Table"]):\n _implementation: Implementation = Implementation.IBIS\n\n def __init__(self, *, backend_version: tuple[int, ...], version: Version) -> None:\n self._backend_version = backend_version\n self._version = version\n\n @property\n def selectors(self) -> IbisSelectorNamespace:\n return IbisSelectorNamespace.from_namespace(self)\n\n @property\n def _expr(self) -> type[IbisExpr]:\n return IbisExpr\n\n @property\n def _lazyframe(self) -> type[IbisLazyFrame]:\n return IbisLazyFrame\n\n def concat(\n self, items: Iterable[IbisLazyFrame], *, how: ConcatMethod\n ) -> IbisLazyFrame:\n if how == "diagonal":\n msg = "diagonal concat not supported for Ibis. Please join instead."\n raise NotImplementedError(msg)\n\n items = list(items)\n native_items = [item.native for item in items]\n schema = items[0].schema\n if not all(x.schema == schema for x in items[1:]):\n msg = "inputs should all have the same schema"\n raise TypeError(msg)\n return self._lazyframe.from_native(ibis.union(*native_items), context=self)\n\n def concat_str(\n self, *exprs: IbisExpr, separator: str, ignore_nulls: bool\n ) -> IbisExpr:\n def func(df: IbisLazyFrame) -> list[ir.Value]:\n cols = list(chain.from_iterable(expr(df) for expr in exprs))\n cols_casted = [s.cast("string") for s in cols]\n\n if not ignore_nulls:\n result = cols_casted[0]\n for col in cols_casted[1:]:\n result = result + separator + col\n else:\n sep = cast("ir.StringValue", lit(separator))\n result = sep.join(cols_casted)\n\n return [result]\n\n return self._expr(\n call=func,\n evaluate_output_names=combine_evaluate_output_names(*exprs),\n alias_output_names=combine_alias_output_names(*exprs),\n backend_version=self._backend_version,\n version=self._version,\n )\n\n def all_horizontal(self, *exprs: IbisExpr, ignore_nulls: bool) -> IbisExpr:\n def func(cols: Iterable[ir.Value]) -> ir.Value:\n it = (\n (col.fill_null(lit(True)) for col in cols) # noqa: FBT003\n if ignore_nulls\n else cols\n )\n return reduce(operator.and_, it)\n\n return self._expr._from_elementwise_horizontal_op(func, *exprs)\n\n def any_horizontal(self, *exprs: IbisExpr, ignore_nulls: bool) -> IbisExpr:\n def func(cols: Iterable[ir.Value]) -> ir.Value:\n it = (\n (col.fill_null(lit(False)) for col in cols) # noqa: FBT003\n if ignore_nulls\n else cols\n )\n return reduce(operator.or_, it)\n\n return self._expr._from_elementwise_horizontal_op(func, *exprs)\n\n def max_horizontal(self, *exprs: IbisExpr) -> IbisExpr:\n def func(cols: Iterable[ir.Value]) -> ir.Value:\n return ibis.greatest(*cols)\n\n return self._expr._from_elementwise_horizontal_op(func, *exprs)\n\n def min_horizontal(self, *exprs: IbisExpr) -> IbisExpr:\n def func(cols: Iterable[ir.Value]) -> ir.Value:\n return ibis.least(*cols)\n\n return self._expr._from_elementwise_horizontal_op(func, *exprs)\n\n def sum_horizontal(self, *exprs: IbisExpr) -> IbisExpr:\n def func(cols: Iterable[ir.Value]) -> ir.Value:\n cols = (col.fill_null(lit(0)) for col in cols)\n return reduce(operator.add, cols)\n\n return self._expr._from_elementwise_horizontal_op(func, *exprs)\n\n def mean_horizontal(self, *exprs: IbisExpr) -> IbisExpr:\n def func(cols: Iterable[ir.Value]) -> ir.Value:\n cols = list(cols)\n return reduce(operator.add, (col.fill_null(lit(0)) for col in cols)) / reduce(\n operator.add, (col.isnull().ifelse(lit(0), lit(1)) for col in cols)\n )\n\n return self._expr._from_elementwise_horizontal_op(func, *exprs)\n\n @requires.backend_version((10, 0))\n def when(self, predicate: IbisExpr) -> IbisWhen:\n return IbisWhen.from_expr(predicate, context=self)\n\n def lit(self, value: Any, dtype: IntoDType | None) -> IbisExpr:\n def func(_df: IbisLazyFrame) -> list[ir.Value]:\n ibis_dtype = narwhals_to_native_dtype(dtype, self._version) if dtype else None\n return [lit(value, ibis_dtype)]\n\n return self._expr(\n func,\n evaluate_output_names=lambda _df: ["literal"],\n alias_output_names=None,\n backend_version=self._backend_version,\n version=self._version,\n )\n\n def len(self) -> IbisExpr:\n def func(_df: IbisLazyFrame) -> list[ir.Value]:\n return [_df.native.count()]\n\n return self._expr(\n call=func,\n evaluate_output_names=lambda _df: ["len"],\n alias_output_names=None,\n backend_version=self._backend_version,\n version=self._version,\n )\n\n\nclass IbisWhen(LazyWhen["IbisLazyFrame", "ir.Value", IbisExpr]):\n lit = lit\n\n @property\n def _then(self) -> type[IbisThen]:\n return IbisThen\n\n def __call__(self, df: IbisLazyFrame) -> Sequence[ir.Value]:\n is_expr = self._condition._is_expr\n condition = df._evaluate_expr(self._condition)\n then_ = self._then_value\n then = df._evaluate_expr(then_) if is_expr(then_) else lit(then_)\n other_ = self._otherwise_value\n if other_ is None:\n result = ibis.cases((condition, then))\n else:\n otherwise = df._evaluate_expr(other_) if is_expr(other_) else lit(other_)\n result = ibis.cases((condition, then), else_=otherwise)\n return [result]\n\n\nclass IbisThen(LazyThen["IbisLazyFrame", "ir.Value", IbisExpr], IbisExpr): ...\n
.venv\Lib\site-packages\narwhals\_ibis\namespace.py
namespace.py
Python
6,766
0.95
0.26455
0
react-lib
928
2023-10-08T09:48:18.266425
GPL-3.0
false
b063217b3628989e137f9700941419bf
from __future__ import annotations\n\nfrom typing import TYPE_CHECKING\n\nfrom narwhals._compliant import CompliantSelector, LazySelectorNamespace\nfrom narwhals._ibis.expr import IbisExpr\n\nif TYPE_CHECKING:\n import ibis.expr.types as ir # noqa: F401\n\n from narwhals._ibis.dataframe import IbisLazyFrame # noqa: F401\n\n\nclass IbisSelectorNamespace(LazySelectorNamespace["IbisLazyFrame", "ir.Value"]):\n @property\n def _selector(self) -> type[IbisSelector]:\n return IbisSelector\n\n\nclass IbisSelector( # type: ignore[misc]\n CompliantSelector["IbisLazyFrame", "ir.Value"], IbisExpr\n):\n def _to_expr(self) -> IbisExpr:\n return IbisExpr(\n self._call,\n evaluate_output_names=self._evaluate_output_names,\n alias_output_names=self._alias_output_names,\n backend_version=self._backend_version,\n version=self._version,\n )\n
.venv\Lib\site-packages\narwhals\_ibis\selectors.py
selectors.py
Python
901
0.95
0.166667
0
react-lib
36
2025-01-27T19:22:56.855376
GPL-3.0
false
f7be9ee54eca20f954719d74d6d2d735
from __future__ import annotations\n\nfrom typing import TYPE_CHECKING, Any, NoReturn\n\nfrom narwhals._ibis.utils import native_to_narwhals_dtype\nfrom narwhals.dependencies import get_ibis\n\nif TYPE_CHECKING:\n from types import ModuleType\n\n from typing_extensions import Self\n\n from narwhals._utils import Version\n from narwhals.dtypes import DType\n\n\nclass IbisInterchangeSeries:\n def __init__(self, df: Any, version: Version) -> None:\n self._native_series = df\n self._version = version\n\n def __narwhals_series__(self) -> Self:\n return self\n\n def __native_namespace__(self) -> ModuleType:\n return get_ibis()\n\n @property\n def dtype(self) -> DType:\n return native_to_narwhals_dtype(\n self._native_series.schema().types[0], self._version\n )\n\n def __getattr__(self, attr: str) -> NoReturn:\n msg = (\n f"Attribute {attr} is not supported for interchange-level dataframes.\n\n"\n "If you would like to see this kind of object better supported in "\n "Narwhals, please open a feature request "\n "at https://github.com/narwhals-dev/narwhals/issues."\n )\n raise NotImplementedError(msg)\n
.venv\Lib\site-packages\narwhals\_ibis\series.py
series.py
Python
1,218
0.95
0.195122
0
awesome-app
75
2024-12-04T23:49:59.310133
BSD-3-Clause
false
65a07fea87adc2759c92a56942a116d6
from __future__ import annotations\n\nfrom functools import lru_cache\nfrom typing import TYPE_CHECKING, Any, Literal, cast\n\nimport ibis\nimport ibis.expr.datatypes as ibis_dtypes\n\nfrom narwhals._utils import isinstance_or_issubclass\n\nif TYPE_CHECKING:\n from collections.abc import Mapping\n\n import ibis.expr.types as ir\n from ibis.common.temporal import TimestampUnit\n from ibis.expr.datatypes import DataType as IbisDataType\n from typing_extensions import TypeAlias, TypeIs\n\n from narwhals._duration import IntervalUnit\n from narwhals._ibis.dataframe import IbisLazyFrame\n from narwhals._ibis.expr import IbisExpr\n from narwhals._utils import Version\n from narwhals.dtypes import DType\n from narwhals.typing import IntoDType\n\nlit = ibis.literal\n"""Alias for `ibis.literal`."""\n\nBucketUnit: TypeAlias = Literal[\n "years",\n "quarters",\n "months",\n "days",\n "hours",\n "minutes",\n "seconds",\n "milliseconds",\n "microseconds",\n "nanoseconds",\n]\nTruncateUnit: TypeAlias = Literal[\n "Y", "Q", "M", "W", "D", "h", "m", "s", "ms", "us", "ns"\n]\n\nUNITS_DICT_BUCKET: Mapping[IntervalUnit, BucketUnit] = {\n "y": "years",\n "q": "quarters",\n "mo": "months",\n "d": "days",\n "h": "hours",\n "m": "minutes",\n "s": "seconds",\n "ms": "milliseconds",\n "us": "microseconds",\n "ns": "nanoseconds",\n}\n\nUNITS_DICT_TRUNCATE: Mapping[IntervalUnit, TruncateUnit] = {\n "y": "Y",\n "q": "Q",\n "mo": "M",\n "d": "D",\n "h": "h",\n "m": "m",\n "s": "s",\n "ms": "ms",\n "us": "us",\n "ns": "ns",\n}\n\n\ndef evaluate_exprs(df: IbisLazyFrame, /, *exprs: IbisExpr) -> list[tuple[str, ir.Value]]:\n native_results: list[tuple[str, ir.Value]] = []\n for expr in exprs:\n native_series_list = expr(df)\n output_names = expr._evaluate_output_names(df)\n if expr._alias_output_names is not None:\n output_names = expr._alias_output_names(output_names)\n if len(output_names) != len(native_series_list): # pragma: no cover\n msg = f"Internal error: got output names {output_names}, but only got {len(native_series_list)} results"\n raise AssertionError(msg)\n native_results.extend(zip(output_names, native_series_list))\n return native_results\n\n\n@lru_cache(maxsize=16)\ndef native_to_narwhals_dtype(ibis_dtype: IbisDataType, version: Version) -> DType: # noqa: C901, PLR0912\n dtypes = version.dtypes\n if ibis_dtype.is_int64():\n return dtypes.Int64()\n if ibis_dtype.is_int32():\n return dtypes.Int32()\n if ibis_dtype.is_int16():\n return dtypes.Int16()\n if ibis_dtype.is_int8():\n return dtypes.Int8()\n if ibis_dtype.is_uint64():\n return dtypes.UInt64()\n if ibis_dtype.is_uint32():\n return dtypes.UInt32()\n if ibis_dtype.is_uint16():\n return dtypes.UInt16()\n if ibis_dtype.is_uint8():\n return dtypes.UInt8()\n if ibis_dtype.is_boolean():\n return dtypes.Boolean()\n if ibis_dtype.is_float64():\n return dtypes.Float64()\n if ibis_dtype.is_float32():\n return dtypes.Float32()\n if ibis_dtype.is_string():\n return dtypes.String()\n if ibis_dtype.is_date():\n return dtypes.Date()\n if is_timestamp(ibis_dtype):\n _unit = cast("TimestampUnit", ibis_dtype.unit)\n return dtypes.Datetime(time_unit=_unit.value, time_zone=ibis_dtype.timezone)\n if is_interval(ibis_dtype):\n _time_unit = ibis_dtype.unit.value\n if _time_unit not in {"ns", "us", "ms", "s"}: # pragma: no cover\n msg = f"Unsupported interval unit: {_time_unit}"\n raise NotImplementedError(msg)\n return dtypes.Duration(_time_unit)\n if is_array(ibis_dtype):\n if ibis_dtype.length:\n return dtypes.Array(\n native_to_narwhals_dtype(ibis_dtype.value_type, version),\n ibis_dtype.length,\n )\n else:\n return dtypes.List(native_to_narwhals_dtype(ibis_dtype.value_type, version))\n if is_struct(ibis_dtype):\n return dtypes.Struct(\n [\n dtypes.Field(name, native_to_narwhals_dtype(dtype, version))\n for name, dtype in ibis_dtype.items()\n ]\n )\n if ibis_dtype.is_decimal(): # pragma: no cover\n return dtypes.Decimal()\n if ibis_dtype.is_time():\n return dtypes.Time()\n if ibis_dtype.is_binary():\n return dtypes.Binary()\n return dtypes.Unknown() # pragma: no cover\n\n\ndef is_timestamp(obj: IbisDataType) -> TypeIs[ibis_dtypes.Timestamp]:\n return obj.is_timestamp()\n\n\ndef is_interval(obj: IbisDataType) -> TypeIs[ibis_dtypes.Interval]:\n return obj.is_interval()\n\n\ndef is_array(obj: IbisDataType) -> TypeIs[ibis_dtypes.Array[Any]]:\n return obj.is_array()\n\n\ndef is_struct(obj: IbisDataType) -> TypeIs[ibis_dtypes.Struct]:\n return obj.is_struct()\n\n\ndef is_floating(obj: IbisDataType) -> TypeIs[ibis_dtypes.Floating]:\n return obj.is_floating()\n\n\ndef narwhals_to_native_dtype( # noqa: C901, PLR0912\n dtype: IntoDType, version: Version\n) -> IbisDataType:\n dtypes = version.dtypes\n\n if isinstance_or_issubclass(dtype, dtypes.Decimal): # pragma: no cover\n return ibis_dtypes.Decimal()\n if isinstance_or_issubclass(dtype, dtypes.Float64):\n return ibis_dtypes.Float64()\n if isinstance_or_issubclass(dtype, dtypes.Float32):\n return ibis_dtypes.Float32()\n if isinstance_or_issubclass(dtype, dtypes.Int128): # pragma: no cover\n msg = "Int128 not supported by Ibis"\n raise NotImplementedError(msg)\n if isinstance_or_issubclass(dtype, dtypes.Int64):\n return ibis_dtypes.Int64()\n if isinstance_or_issubclass(dtype, dtypes.Int32):\n return ibis_dtypes.Int32()\n if isinstance_or_issubclass(dtype, dtypes.Int16):\n return ibis_dtypes.Int16()\n if isinstance_or_issubclass(dtype, dtypes.Int8):\n return ibis_dtypes.Int8()\n if isinstance_or_issubclass(dtype, dtypes.UInt128): # pragma: no cover\n msg = "UInt128 not supported by Ibis"\n raise NotImplementedError(msg)\n if isinstance_or_issubclass(dtype, dtypes.UInt64):\n return ibis_dtypes.UInt64()\n if isinstance_or_issubclass(dtype, dtypes.UInt32):\n return ibis_dtypes.UInt32()\n if isinstance_or_issubclass(dtype, dtypes.UInt16):\n return ibis_dtypes.UInt16()\n if isinstance_or_issubclass(dtype, dtypes.UInt8):\n return ibis_dtypes.UInt8()\n if isinstance_or_issubclass(dtype, dtypes.String):\n return ibis_dtypes.String()\n if isinstance_or_issubclass(dtype, dtypes.Boolean):\n return ibis_dtypes.Boolean()\n if isinstance_or_issubclass(dtype, dtypes.Categorical):\n msg = "Categorical not supported by Ibis"\n raise NotImplementedError(msg)\n if isinstance_or_issubclass(dtype, dtypes.Datetime):\n return ibis_dtypes.Timestamp.from_unit(dtype.time_unit, timezone=dtype.time_zone)\n if isinstance_or_issubclass(dtype, dtypes.Duration):\n return ibis_dtypes.Interval(unit=dtype.time_unit) # pyright: ignore[reportArgumentType]\n if isinstance_or_issubclass(dtype, dtypes.Date):\n return ibis_dtypes.Date()\n if isinstance_or_issubclass(dtype, dtypes.Time):\n return ibis_dtypes.Time()\n if isinstance_or_issubclass(dtype, dtypes.List):\n inner = narwhals_to_native_dtype(dtype.inner, version)\n return ibis_dtypes.Array(value_type=inner)\n if isinstance_or_issubclass(dtype, dtypes.Struct):\n fields = [\n (field.name, narwhals_to_native_dtype(field.dtype, version))\n for field in dtype.fields\n ]\n return ibis_dtypes.Struct.from_tuples(fields)\n if isinstance_or_issubclass(dtype, dtypes.Array):\n inner = narwhals_to_native_dtype(dtype.inner, version)\n return ibis_dtypes.Array(value_type=inner, length=dtype.size)\n if isinstance_or_issubclass(dtype, dtypes.Binary):\n return ibis_dtypes.Binary()\n if isinstance_or_issubclass(dtype, dtypes.Enum):\n # Ibis does not support: https://github.com/ibis-project/ibis/issues/10991\n msg = "Enum not supported by Ibis"\n raise NotImplementedError(msg)\n msg = f"Unknown dtype: {dtype}" # pragma: no cover\n raise AssertionError(msg)\n
.venv\Lib\site-packages\narwhals\_ibis\utils.py
utils.py
Python
8,277
0.95
0.26383
0.004808
awesome-app
455
2024-03-17T13:52:50.511648
MIT
false
ce18d2de51c018dd68bf22fa08d34e77
\n\n
.venv\Lib\site-packages\narwhals\_ibis\__pycache__\dataframe.cpython-313.pyc
dataframe.cpython-313.pyc
Other
24,077
0.95
0.022727
0.005988
python-kit
725
2025-03-28T18:05:21.380435
MIT
false
596dcb719655765c8d3d50cb6369623b
\n\n
.venv\Lib\site-packages\narwhals\_ibis\__pycache__\expr.cpython-313.pyc
expr.cpython-313.pyc
Other
36,569
0.95
0.013889
0
awesome-app
739
2025-05-16T22:18:08.314996
Apache-2.0
false
89963cde731fcd2add7388c113937724
\n\n
.venv\Lib\site-packages\narwhals\_ibis\__pycache__\expr_dt.cpython-313.pyc
expr_dt.cpython-313.pyc
Other
8,326
0.8
0.027027
0
awesome-app
47
2024-09-12T21:22:01.394745
Apache-2.0
false
e71fb636335184083935faf8659f38cd
\n\n
.venv\Lib\site-packages\narwhals\_ibis\__pycache__\expr_list.cpython-313.pyc
expr_list.cpython-313.pyc
Other
1,184
0.7
0
0
python-kit
335
2025-05-27T08:08:27.609087
MIT
false
0eaa1e1d4b9eb09152fe10344995155b
\n\n
.venv\Lib\site-packages\narwhals\_ibis\__pycache__\expr_str.cpython-313.pyc
expr_str.cpython-313.pyc
Other
10,045
0.8
0
0
python-kit
654
2024-04-30T11:00:38.986463
BSD-3-Clause
false
4eaab9df217ab7aba0057ea06474c85f
\n\n
.venv\Lib\site-packages\narwhals\_ibis\__pycache__\expr_struct.cpython-313.pyc
expr_struct.cpython-313.pyc
Other
1,364
0.7
0
0
vue-tools
94
2025-04-16T05:56:05.873625
BSD-3-Clause
false
a0a3d43cd8bf9633315aa23f7a61e32b
\n\n
.venv\Lib\site-packages\narwhals\_ibis\__pycache__\group_by.cpython-313.pyc
group_by.cpython-313.pyc
Other
2,115
0.7
0
0
node-utils
248
2023-11-03T18:45:44.988671
MIT
false
18bd03040d32701f33ed5ba66f955586
\n\n
.venv\Lib\site-packages\narwhals\_ibis\__pycache__\namespace.cpython-313.pyc
namespace.cpython-313.pyc
Other
12,923
0.95
0.015152
0
react-lib
974
2025-01-30T03:36:19.967347
Apache-2.0
false
6c8ce3981de002a321e56dd58d3fe384
\n\n
.venv\Lib\site-packages\narwhals\_ibis\__pycache__\selectors.cpython-313.pyc
selectors.cpython-313.pyc
Other
1,730
0.8
0
0
python-kit
438
2024-09-08T11:42:15.010706
GPL-3.0
false
e729c09e8d5a3c50c096a842aa38f3f9
\n\n
.venv\Lib\site-packages\narwhals\_ibis\__pycache__\series.cpython-313.pyc
series.cpython-313.pyc
Other
2,351
0.8
0.034483
0
python-kit
501
2025-07-03T17:04:26.531931
MIT
false
81490de030ea90ccd500a9ee56182e32
\n\n
.venv\Lib\site-packages\narwhals\_ibis\__pycache__\utils.cpython-313.pyc
utils.cpython-313.pyc
Other
11,542
0.8
0
0.015152
awesome-app
897
2024-05-04T22:09:57.549034
BSD-3-Clause
false
d22db60177a6bde82e14d46126c23cdf
\n\n
.venv\Lib\site-packages\narwhals\_ibis\__pycache__\__init__.cpython-313.pyc
__init__.cpython-313.pyc
Other
189
0.7
0
0
vue-tools
56
2023-07-28T12:32:38.114794
BSD-3-Clause
false
e3672c576439d3d9887ac71af7248989
from __future__ import annotations\n\nimport enum\nfrom typing import TYPE_CHECKING, Any, NoReturn\n\nfrom narwhals._utils import Version, parse_version\n\nif TYPE_CHECKING:\n import pandas as pd\n import pyarrow as pa\n from typing_extensions import Self\n\n from narwhals._interchange.series import InterchangeSeries\n from narwhals.dtypes import DType\n from narwhals.typing import DataFrameLike\n\n\nclass DtypeKind(enum.IntEnum):\n # https://data-apis.org/dataframe-protocol/latest/API.html\n INT = 0\n UINT = 1\n FLOAT = 2\n BOOL = 20\n STRING = 21 # UTF-8\n DATETIME = 22\n CATEGORICAL = 23\n\n\ndef map_interchange_dtype_to_narwhals_dtype( # noqa: C901, PLR0911, PLR0912\n interchange_dtype: tuple[DtypeKind, int, Any, Any],\n) -> DType:\n dtypes = Version.V1.dtypes\n if interchange_dtype[0] == DtypeKind.INT:\n if interchange_dtype[1] == 64:\n return dtypes.Int64()\n if interchange_dtype[1] == 32:\n return dtypes.Int32()\n if interchange_dtype[1] == 16:\n return dtypes.Int16()\n if interchange_dtype[1] == 8:\n return dtypes.Int8()\n msg = "Invalid bit width for INT" # pragma: no cover\n raise AssertionError(msg)\n if interchange_dtype[0] == DtypeKind.UINT:\n if interchange_dtype[1] == 64:\n return dtypes.UInt64()\n if interchange_dtype[1] == 32:\n return dtypes.UInt32()\n if interchange_dtype[1] == 16:\n return dtypes.UInt16()\n if interchange_dtype[1] == 8:\n return dtypes.UInt8()\n msg = "Invalid bit width for UINT" # pragma: no cover\n raise AssertionError(msg)\n if interchange_dtype[0] == DtypeKind.FLOAT:\n if interchange_dtype[1] == 64:\n return dtypes.Float64()\n if interchange_dtype[1] == 32:\n return dtypes.Float32()\n msg = "Invalid bit width for FLOAT" # pragma: no cover\n raise AssertionError(msg)\n if interchange_dtype[0] == DtypeKind.BOOL:\n return dtypes.Boolean()\n if interchange_dtype[0] == DtypeKind.STRING:\n return dtypes.String()\n if interchange_dtype[0] == DtypeKind.DATETIME:\n return dtypes.Datetime()\n if interchange_dtype[0] == DtypeKind.CATEGORICAL: # pragma: no cover\n # upstream issue: https://github.com/ibis-project/ibis/issues/9570\n return dtypes.Categorical()\n msg = f"Invalid dtype, got: {interchange_dtype}" # pragma: no cover\n raise AssertionError(msg)\n\n\nclass InterchangeFrame:\n _version = Version.V1\n\n def __init__(self, df: DataFrameLike) -> None:\n self._interchange_frame = df.__dataframe__()\n\n def __narwhals_dataframe__(self) -> Self:\n return self\n\n def __native_namespace__(self) -> NoReturn:\n msg = (\n "Cannot access native namespace for interchange-level dataframes with unknown backend."\n "If you would like to see this kind of object supported in Narwhals, please "\n "open a feature request at https://github.com/narwhals-dev/narwhals/issues."\n )\n raise NotImplementedError(msg)\n\n def get_column(self, name: str) -> InterchangeSeries:\n from narwhals._interchange.series import InterchangeSeries\n\n return InterchangeSeries(self._interchange_frame.get_column_by_name(name))\n\n def to_pandas(self) -> pd.DataFrame:\n import pandas as pd # ignore-banned-import()\n\n if parse_version(pd) >= (1, 5, 0):\n return pd.api.interchange.from_dataframe(self._interchange_frame)\n else: # pragma: no cover\n msg = (\n "Conversion to pandas is achieved via interchange protocol which requires"\n f" 'pandas>=1.5.0' to be installed, found {pd.__version__}"\n )\n raise NotImplementedError(msg)\n\n def to_arrow(self) -> pa.Table:\n from pyarrow.interchange.from_dataframe import ( # ignore-banned-import()\n from_dataframe,\n )\n\n return from_dataframe(self._interchange_frame)\n\n @property\n def schema(self) -> dict[str, DType]:\n return {\n column_name: map_interchange_dtype_to_narwhals_dtype(\n self._interchange_frame.get_column_by_name(column_name).dtype\n )\n for column_name in self._interchange_frame.column_names()\n }\n\n @property\n def columns(self) -> list[str]:\n return list(self._interchange_frame.column_names())\n\n def __getattr__(self, attr: str) -> NoReturn:\n msg = (\n f"Attribute {attr} is not supported for interchange-level dataframes.\n\n"\n "Hint: you probably called `nw.from_native` on an object which isn't fully "\n "supported by Narwhals, yet implements `__dataframe__`. If you would like to "\n "see this kind of object supported in Narwhals, please open a feature request "\n "at https://github.com/narwhals-dev/narwhals/issues."\n )\n raise NotImplementedError(msg)\n\n def simple_select(self, *column_names: str) -> Self:\n frame = self._interchange_frame.select_columns_by_name(list(column_names))\n if not hasattr(frame, "_df"): # pragma: no cover\n msg = (\n "Expected interchange object to implement `_df` property to allow for recovering original object.\n"\n "See https://github.com/data-apis/dataframe-api/issues/360."\n )\n raise NotImplementedError(msg)\n return self.__class__(frame._df)\n\n def select(self, *exprs: str) -> Self: # pragma: no cover\n msg = (\n "`select`-ing not by name is not supported for interchange-only level.\n\n"\n "If you would like to see this kind of object better supported in "\n "Narwhals, please open a feature request "\n "at https://github.com/narwhals-dev/narwhals/issues."\n )\n raise NotImplementedError(msg)\n
.venv\Lib\site-packages\narwhals\_interchange\dataframe.py
dataframe.py
Python
5,921
0.95
0.269231
0.015152
node-utils
556
2025-06-13T15:13:38.813570
Apache-2.0
false
703cdd331b41f7b5f93b4971d11b76a5
from __future__ import annotations\n\nfrom typing import TYPE_CHECKING, Any, NoReturn\n\nfrom narwhals._interchange.dataframe import map_interchange_dtype_to_narwhals_dtype\nfrom narwhals._utils import Version\n\nif TYPE_CHECKING:\n from typing_extensions import Self\n\n from narwhals.dtypes import DType\n\n\nclass InterchangeSeries:\n _version = Version.V1\n\n def __init__(self, df: Any) -> None:\n self._native_series = df\n\n def __narwhals_series__(self) -> Self:\n return self\n\n def __native_namespace__(self) -> NoReturn:\n msg = (\n "Cannot access native namespace for interchange-level series with unknown backend. "\n "If you would like to see this kind of object supported in Narwhals, please "\n "open a feature request at https://github.com/narwhals-dev/narwhals/issues."\n )\n raise NotImplementedError(msg)\n\n @property\n def dtype(self) -> DType:\n return map_interchange_dtype_to_narwhals_dtype(self._native_series.dtype)\n\n @property\n def native(self) -> Any:\n return self._native_series\n\n def __getattr__(self, attr: str) -> NoReturn:\n msg = ( # pragma: no cover\n f"Attribute {attr} is not supported for interchange-level dataframes.\n\n"\n "Hint: you probably called `nw.from_native` on an object which isn't fully "\n "supported by Narwhals, yet implements `__dataframe__`. If you would like to "\n "see this kind of object supported in Narwhals, please open a feature request "\n "at https://github.com/narwhals-dev/narwhals/issues."\n )\n raise NotImplementedError(msg)\n
.venv\Lib\site-packages\narwhals\_interchange\series.py
series.py
Python
1,651
0.95
0.212766
0
python-kit
880
2023-10-26T05:54:27.100101
MIT
false
418ef134bf44713a768dd0d68d72bafc
\n\n
.venv\Lib\site-packages\narwhals\_interchange\__pycache__\dataframe.cpython-313.pyc
dataframe.cpython-313.pyc
Other
8,177
0.95
0.111111
0
vue-tools
575
2023-07-15T16:29:34.426626
GPL-3.0
false
9813baee732782386ae8e7574301e729
\n\n
.venv\Lib\site-packages\narwhals\_interchange\__pycache__\series.cpython-313.pyc
series.cpython-313.pyc
Other
2,757
0.8
0.086957
0
python-kit
813
2023-10-16T08:00:03.792417
Apache-2.0
false
22a0c72baf427a746be302c7b3541643
\n\n
.venv\Lib\site-packages\narwhals\_interchange\__pycache__\__init__.cpython-313.pyc
__init__.cpython-313.pyc
Other
196
0.7
0
0
vue-tools
11
2024-04-05T01:12:22.488594
GPL-3.0
false
72c6349fbf153531077527c1672eabb9
from __future__ import annotations\n\nfrom collections.abc import Iterable, Iterator, Mapping, Sequence\nfrom itertools import chain, product\nfrom typing import TYPE_CHECKING, Any, Callable, Literal, cast, overload\n\nimport numpy as np\n\nfrom narwhals._compliant import EagerDataFrame\nfrom narwhals._pandas_like.series import PANDAS_TO_NUMPY_DTYPE_MISSING, PandasLikeSeries\nfrom narwhals._pandas_like.utils import (\n align_and_extract_native,\n get_dtype_backend,\n import_array_module,\n native_to_narwhals_dtype,\n object_native_to_narwhals_dtype,\n rename,\n select_columns_by_name,\n set_index,\n)\nfrom narwhals._typing_compat import assert_never\nfrom narwhals._utils import (\n Implementation,\n _into_arrow_table,\n _remap_full_join_keys,\n check_column_names_are_unique,\n exclude_column_names,\n generate_temporary_column_name,\n parse_columns_to_drop,\n parse_version,\n scale_bytes,\n validate_backend_version,\n)\nfrom narwhals.dependencies import is_pandas_like_dataframe\nfrom narwhals.exceptions import InvalidOperationError, ShapeError\n\nif TYPE_CHECKING:\n from io import BytesIO\n from pathlib import Path\n from types import ModuleType\n\n import pandas as pd\n import polars as pl\n from typing_extensions import Self, TypeAlias, TypeIs\n\n from narwhals._compliant.typing import CompliantDataFrameAny, CompliantLazyFrameAny\n from narwhals._pandas_like.expr import PandasLikeExpr\n from narwhals._pandas_like.group_by import PandasLikeGroupBy\n from narwhals._pandas_like.namespace import PandasLikeNamespace\n from narwhals._translate import IntoArrowTable\n from narwhals._utils import Version, _FullContext\n from narwhals.dtypes import DType\n from narwhals.schema import Schema\n from narwhals.typing import (\n AsofJoinStrategy,\n DTypeBackend,\n JoinStrategy,\n PivotAgg,\n SizedMultiIndexSelector,\n SizedMultiNameSelector,\n SizeUnit,\n UniqueKeepStrategy,\n _2DArray,\n _SliceIndex,\n _SliceName,\n )\n\n Constructor: TypeAlias = Callable[..., pd.DataFrame]\n\n\nCLASSICAL_NUMPY_DTYPES: frozenset[np.dtype[Any]] = frozenset(\n [\n np.dtype("float64"),\n np.dtype("float32"),\n np.dtype("int64"),\n np.dtype("int32"),\n np.dtype("int16"),\n np.dtype("int8"),\n np.dtype("uint64"),\n np.dtype("uint32"),\n np.dtype("uint16"),\n np.dtype("uint8"),\n np.dtype("bool"),\n np.dtype("datetime64[s]"),\n np.dtype("datetime64[ms]"),\n np.dtype("datetime64[us]"),\n np.dtype("datetime64[ns]"),\n np.dtype("timedelta64[s]"),\n np.dtype("timedelta64[ms]"),\n np.dtype("timedelta64[us]"),\n np.dtype("timedelta64[ns]"),\n np.dtype("object"),\n ]\n)\n\n\nclass PandasLikeDataFrame(\n EagerDataFrame["PandasLikeSeries", "PandasLikeExpr", "Any", "pd.Series[Any]"]\n):\n def __init__(\n self,\n native_dataframe: Any,\n *,\n implementation: Implementation,\n backend_version: tuple[int, ...],\n version: Version,\n validate_column_names: bool,\n ) -> None:\n self._native_frame = native_dataframe\n self._implementation = implementation\n self._backend_version = backend_version\n self._version = version\n validate_backend_version(self._implementation, self._backend_version)\n if validate_column_names:\n check_column_names_are_unique(native_dataframe.columns)\n\n @classmethod\n def from_arrow(cls, data: IntoArrowTable, /, *, context: _FullContext) -> Self:\n implementation = context._implementation\n tbl = _into_arrow_table(data, context)\n if implementation.is_pandas():\n native = tbl.to_pandas()\n elif implementation.is_modin(): # pragma: no cover\n from modin.pandas.utils import (\n from_arrow as mpd_from_arrow, # pyright: ignore[reportAttributeAccessIssue]\n )\n\n native = mpd_from_arrow(tbl)\n elif implementation.is_cudf(): # pragma: no cover\n native = implementation.to_native_namespace().DataFrame.from_arrow(tbl)\n else: # pragma: no cover\n msg = "congratulations, you entered unreachable code - please report a bug"\n raise AssertionError(msg)\n return cls.from_native(native, context=context)\n\n @classmethod\n def from_dict(\n cls,\n data: Mapping[str, Any],\n /,\n *,\n context: _FullContext,\n schema: Mapping[str, DType] | Schema | None,\n ) -> Self:\n from narwhals.schema import Schema\n\n implementation = context._implementation\n ns = implementation.to_native_namespace()\n Series = cast("type[pd.Series[Any]]", ns.Series) # noqa: N806\n DataFrame = cast("type[pd.DataFrame]", ns.DataFrame) # noqa: N806\n aligned_data: dict[str, pd.Series[Any] | Any] = {}\n left_most: PandasLikeSeries | None = None\n for name, series in data.items():\n if isinstance(series, Series):\n compliant = PandasLikeSeries.from_native(series, context=context)\n if left_most is None:\n left_most = compliant\n aligned_data[name] = series\n else:\n aligned_data[name] = align_and_extract_native(left_most, compliant)[1]\n else:\n aligned_data[name] = series\n\n native = DataFrame.from_dict(aligned_data)\n if schema:\n it: Iterable[DTypeBackend] = (\n get_dtype_backend(dtype, implementation) for dtype in native.dtypes\n )\n native = native.astype(Schema(schema).to_pandas(it))\n return cls.from_native(native, context=context)\n\n @staticmethod\n def _is_native(obj: Any) -> TypeIs[Any]:\n return is_pandas_like_dataframe(obj) # pragma: no cover\n\n @classmethod\n def from_native(cls, data: Any, /, *, context: _FullContext) -> Self:\n return cls(\n data,\n implementation=context._implementation,\n backend_version=context._backend_version,\n version=context._version,\n validate_column_names=True,\n )\n\n @classmethod\n def from_numpy(\n cls,\n data: _2DArray,\n /,\n *,\n context: _FullContext,\n schema: Mapping[str, DType] | Schema | Sequence[str] | None,\n ) -> Self:\n from narwhals.schema import Schema\n\n implementation = context._implementation\n DataFrame: Constructor = implementation.to_native_namespace().DataFrame # noqa: N806\n if isinstance(schema, (Mapping, Schema)):\n it: Iterable[DTypeBackend] = (\n get_dtype_backend(native_type, implementation)\n for native_type in schema.values()\n )\n native = DataFrame(data, columns=schema.keys()).astype(\n Schema(schema).to_pandas(it)\n )\n else:\n native = DataFrame(data, columns=cls._numpy_column_names(data, schema))\n return cls.from_native(native, context=context)\n\n def __narwhals_dataframe__(self) -> Self:\n return self\n\n def __narwhals_lazyframe__(self) -> Self:\n return self\n\n def __narwhals_namespace__(self) -> PandasLikeNamespace:\n from narwhals._pandas_like.namespace import PandasLikeNamespace\n\n return PandasLikeNamespace(\n self._implementation, self._backend_version, version=self._version\n )\n\n def __native_namespace__(self) -> ModuleType:\n if self._implementation in {\n Implementation.PANDAS,\n Implementation.MODIN,\n Implementation.CUDF,\n }:\n return self._implementation.to_native_namespace()\n\n msg = f"Expected pandas/modin/cudf, got: {type(self._implementation)}" # pragma: no cover\n raise AssertionError(msg)\n\n def __len__(self) -> int:\n return len(self.native)\n\n def _with_version(self, version: Version) -> Self:\n return self.__class__(\n self.native,\n implementation=self._implementation,\n backend_version=self._backend_version,\n version=version,\n validate_column_names=False,\n )\n\n def _with_native(self, df: Any, *, validate_column_names: bool = True) -> Self:\n return self.__class__(\n df,\n implementation=self._implementation,\n backend_version=self._backend_version,\n version=self._version,\n validate_column_names=validate_column_names,\n )\n\n def _extract_comparand(self, other: PandasLikeSeries) -> pd.Series[Any]:\n index = self.native.index\n if other._broadcast:\n s = other.native\n return type(s)(s.iloc[0], index=index, dtype=s.dtype, name=s.name)\n if (len_other := len(other)) != (len_idx := len(index)):\n msg = f"Expected object of length {len_idx}, got: {len_other}."\n raise ShapeError(msg)\n if other.native.index is not index:\n return set_index(\n other.native,\n index,\n implementation=other._implementation,\n backend_version=other._backend_version,\n )\n return other.native\n\n @property\n def _array_funcs(self): # type: ignore[no-untyped-def] # noqa: ANN202\n if TYPE_CHECKING:\n import numpy as np\n\n return np\n else:\n return import_array_module(self._implementation)\n\n def get_column(self, name: str) -> PandasLikeSeries:\n return PandasLikeSeries.from_native(self.native[name], context=self)\n\n def __array__(self, dtype: Any = None, *, copy: bool | None = None) -> _2DArray:\n return self.to_numpy(dtype=dtype, copy=copy)\n\n def _gather(self, rows: SizedMultiIndexSelector[pd.Series[Any]]) -> Self:\n items = list(rows) if isinstance(rows, tuple) else rows\n return self._with_native(self.native.iloc[items, :])\n\n def _gather_slice(self, rows: _SliceIndex | range) -> Self:\n return self._with_native(\n self.native.iloc[slice(rows.start, rows.stop, rows.step), :],\n validate_column_names=False,\n )\n\n def _select_slice_name(self, columns: _SliceName) -> Self:\n start = (\n self.native.columns.get_loc(columns.start)\n if columns.start is not None\n else None\n )\n stop = (\n self.native.columns.get_loc(columns.stop) + 1\n if columns.stop is not None\n else None\n )\n selector = slice(start, stop, columns.step)\n return self._with_native(\n self.native.iloc[:, selector], validate_column_names=False\n )\n\n def _select_slice_index(self, columns: _SliceIndex | range) -> Self:\n return self._with_native(\n self.native.iloc[:, columns], validate_column_names=False\n )\n\n def _select_multi_index(\n self, columns: SizedMultiIndexSelector[pd.Series[Any]]\n ) -> Self:\n columns = list(columns) if isinstance(columns, tuple) else columns\n return self._with_native(\n self.native.iloc[:, columns], validate_column_names=False\n )\n\n def _select_multi_name(self, columns: SizedMultiNameSelector[pd.Series[Any]]) -> Self:\n return self._with_native(self.native.loc[:, columns])\n\n # --- properties ---\n @property\n def columns(self) -> list[str]:\n return self.native.columns.tolist()\n\n @overload\n def rows(self, *, named: Literal[True]) -> list[dict[str, Any]]: ...\n\n @overload\n def rows(self, *, named: Literal[False]) -> list[tuple[Any, ...]]: ...\n\n @overload\n def rows(self, *, named: bool) -> list[tuple[Any, ...]] | list[dict[str, Any]]: ...\n\n def rows(self, *, named: bool) -> list[tuple[Any, ...]] | list[dict[str, Any]]:\n if not named:\n # cuDF does not support itertuples. But it does support to_dict!\n if self._implementation is Implementation.CUDF:\n # Extract the row values from the named rows\n return [tuple(row.values()) for row in self.rows(named=True)]\n\n return list(self.native.itertuples(index=False, name=None))\n\n return self.native.to_dict(orient="records")\n\n def iter_columns(self) -> Iterator[PandasLikeSeries]:\n for _name, series in self.native.items(): # noqa: PERF102\n yield PandasLikeSeries.from_native(series, context=self)\n\n _iter_columns = iter_columns\n\n def iter_rows(\n self, *, named: bool, buffer_size: int\n ) -> Iterator[tuple[Any, ...]] | Iterator[dict[str, Any]]:\n # The param ``buffer_size`` is only here for compatibility with the Polars API\n # and has no effect on the output.\n if not named:\n yield from self.native.itertuples(index=False, name=None)\n else:\n col_names = self.native.columns\n for row in self.native.itertuples(index=False):\n yield dict(zip(col_names, row))\n\n @property\n def schema(self) -> dict[str, DType]:\n native_dtypes = self.native.dtypes\n return {\n col: native_to_narwhals_dtype(\n native_dtypes[col], self._version, self._implementation\n )\n if native_dtypes[col] != "object"\n else object_native_to_narwhals_dtype(\n self.native[col], self._version, self._implementation\n )\n for col in self.native.columns\n }\n\n def collect_schema(self) -> dict[str, DType]:\n return self.schema\n\n # --- reshape ---\n def simple_select(self, *column_names: str) -> Self:\n return self._with_native(\n select_columns_by_name(\n self.native,\n list(column_names),\n self._backend_version,\n self._implementation,\n ),\n validate_column_names=False,\n )\n\n def select(self, *exprs: PandasLikeExpr) -> Self:\n new_series = self._evaluate_into_exprs(*exprs)\n if not new_series:\n # return empty dataframe, like Polars does\n return self._with_native(type(self.native)(), validate_column_names=False)\n new_series = new_series[0]._align_full_broadcast(*new_series)\n namespace = self.__narwhals_namespace__()\n df = namespace._concat_horizontal([s.native for s in new_series])\n # `concat` creates a new object, so fine to modify `.columns.name` inplace.\n df.columns.name = self.native.columns.name\n return self._with_native(df, validate_column_names=True)\n\n def drop_nulls(self, subset: Sequence[str] | None) -> Self:\n if subset is None:\n return self._with_native(\n self.native.dropna(axis=0), validate_column_names=False\n )\n plx = self.__narwhals_namespace__()\n mask = ~plx.any_horizontal(plx.col(*subset).is_null(), ignore_nulls=True)\n return self.filter(mask)\n\n def estimated_size(self, unit: SizeUnit) -> int | float:\n sz = self.native.memory_usage(deep=True).sum()\n return scale_bytes(sz, unit=unit)\n\n def with_row_index(self, name: str, order_by: Sequence[str] | None) -> Self:\n plx = self.__narwhals_namespace__()\n if order_by is None:\n size = len(self)\n data = self._array_funcs.arange(size)\n\n row_index = plx._expr._from_series(\n plx._series.from_iterable(\n data, context=self, index=self.native.index, name=name\n )\n )\n else:\n rank = plx.col(order_by[0]).rank(method="ordinal", descending=False)\n row_index = (rank.over(partition_by=[], order_by=order_by) - 1).alias(name)\n return self.select(row_index, plx.all())\n\n def row(self, index: int) -> tuple[Any, ...]:\n return tuple(x for x in self.native.iloc[index])\n\n def filter(self, predicate: PandasLikeExpr | list[bool]) -> Self:\n if isinstance(predicate, list):\n mask_native: pd.Series[Any] | list[bool] = predicate\n else:\n # `[0]` is safe as the predicate's expression only returns a single column\n mask = self._evaluate_into_exprs(predicate)[0]\n mask_native = self._extract_comparand(mask)\n return self._with_native(\n self.native.loc[mask_native], validate_column_names=False\n )\n\n def with_columns(self, *exprs: PandasLikeExpr) -> Self:\n columns = self._evaluate_into_exprs(*exprs)\n if not columns and len(self) == 0:\n return self\n name_columns: dict[str, PandasLikeSeries] = {s.name: s for s in columns}\n to_concat = []\n # Make sure to preserve column order\n for name in self.native.columns:\n if name in name_columns:\n series = self._extract_comparand(name_columns.pop(name))\n else:\n series = self.native[name]\n to_concat.append(series)\n to_concat.extend(self._extract_comparand(s) for s in name_columns.values())\n namespace = self.__narwhals_namespace__()\n df = namespace._concat_horizontal(to_concat)\n # `concat` creates a new object, so fine to modify `.columns.name` inplace.\n df.columns.name = self.native.columns.name\n return self._with_native(df, validate_column_names=False)\n\n def rename(self, mapping: Mapping[str, str]) -> Self:\n return self._with_native(\n rename(\n self.native,\n columns=mapping,\n implementation=self._implementation,\n backend_version=self._backend_version,\n )\n )\n\n def drop(self, columns: Sequence[str], *, strict: bool) -> Self:\n to_drop = parse_columns_to_drop(self, columns, strict=strict)\n return self._with_native(\n self.native.drop(columns=to_drop), validate_column_names=False\n )\n\n # --- transform ---\n def sort(self, *by: str, descending: bool | Sequence[bool], nulls_last: bool) -> Self:\n df = self.native\n if isinstance(descending, bool):\n ascending: bool | list[bool] = not descending\n else:\n ascending = [not d for d in descending]\n na_position = "last" if nulls_last else "first"\n return self._with_native(\n df.sort_values(list(by), ascending=ascending, na_position=na_position),\n validate_column_names=False,\n )\n\n # --- convert ---\n def collect(\n self, backend: Implementation | None, **kwargs: Any\n ) -> CompliantDataFrameAny:\n if backend is None:\n return PandasLikeDataFrame(\n self.native,\n implementation=self._implementation,\n backend_version=self._backend_version,\n version=self._version,\n validate_column_names=False,\n )\n\n if backend is Implementation.PANDAS:\n import pandas as pd # ignore-banned-import\n\n return PandasLikeDataFrame(\n self.to_pandas(),\n implementation=Implementation.PANDAS,\n backend_version=parse_version(pd),\n version=self._version,\n validate_column_names=False,\n )\n\n if backend is Implementation.PYARROW:\n import pyarrow as pa # ignore-banned-import\n\n from narwhals._arrow.dataframe import ArrowDataFrame\n\n return ArrowDataFrame(\n native_dataframe=self.to_arrow(),\n backend_version=parse_version(pa),\n version=self._version,\n validate_column_names=False,\n )\n\n if backend is Implementation.POLARS:\n import polars as pl # ignore-banned-import\n\n from narwhals._polars.dataframe import PolarsDataFrame\n\n return PolarsDataFrame(\n df=self.to_polars(),\n backend_version=parse_version(pl),\n version=self._version,\n )\n\n msg = f"Unsupported `backend` value: {backend}" # pragma: no cover\n raise ValueError(msg) # pragma: no cover\n\n # --- actions ---\n def group_by(\n self, keys: Sequence[str] | Sequence[PandasLikeExpr], *, drop_null_keys: bool\n ) -> PandasLikeGroupBy:\n from narwhals._pandas_like.group_by import PandasLikeGroupBy\n\n return PandasLikeGroupBy(self, keys, drop_null_keys=drop_null_keys)\n\n def _join_inner(\n self, other: Self, *, left_on: Sequence[str], right_on: Sequence[str], suffix: str\n ) -> pd.DataFrame:\n return self.native.merge(\n other.native,\n left_on=left_on,\n right_on=right_on,\n how="inner",\n suffixes=("", suffix),\n )\n\n def _join_left(\n self, other: Self, *, left_on: Sequence[str], right_on: Sequence[str], suffix: str\n ) -> pd.DataFrame:\n result_native = self.native.merge(\n other.native,\n how="left",\n left_on=left_on,\n right_on=right_on,\n suffixes=("", suffix),\n )\n extra = [\n right_key if right_key not in self.columns else f"{right_key}{suffix}"\n for left_key, right_key in zip(left_on, right_on)\n if right_key != left_key\n ]\n return result_native.drop(columns=extra)\n\n def _join_full(\n self, other: Self, *, left_on: Sequence[str], right_on: Sequence[str], suffix: str\n ) -> pd.DataFrame:\n # Pandas coalesces keys in full joins unless there's no collision\n right_on_mapper = _remap_full_join_keys(left_on, right_on, suffix)\n other_native = other.native.rename(columns=right_on_mapper)\n check_column_names_are_unique(other_native.columns)\n right_suffixed = list(right_on_mapper.values())\n return self.native.merge(\n other_native,\n left_on=left_on,\n right_on=right_suffixed,\n how="outer",\n suffixes=("", suffix),\n )\n\n def _join_cross(self, other: Self, *, suffix: str) -> pd.DataFrame:\n implementation = self._implementation\n backend_version = self._backend_version\n if (implementation.is_modin() or implementation.is_cudf()) or (\n implementation.is_pandas() and backend_version < (1, 4)\n ):\n key_token = generate_temporary_column_name(\n n_bytes=8, columns=(*self.columns, *other.columns)\n )\n return (\n self.native.assign(**{key_token: 0})\n .merge(\n other.native.assign(**{key_token: 0}),\n how="inner",\n left_on=key_token,\n right_on=key_token,\n suffixes=("", suffix),\n )\n .drop(columns=key_token)\n )\n return self.native.merge(other.native, how="cross", suffixes=("", suffix))\n\n def _join_semi(\n self, other: Self, *, left_on: Sequence[str], right_on: Sequence[str]\n ) -> pd.DataFrame:\n other_native = self._join_filter_rename(\n other=other,\n columns_to_select=list(right_on),\n columns_mapping=dict(zip(right_on, left_on)),\n )\n return self.native.merge(\n other_native, how="inner", left_on=left_on, right_on=left_on\n )\n\n def _join_anti(\n self, other: Self, *, left_on: Sequence[str], right_on: Sequence[str]\n ) -> pd.DataFrame:\n implementation = self._implementation\n\n if implementation.is_cudf():\n return self.native.merge(\n other.native, how="leftanti", left_on=left_on, right_on=right_on\n )\n\n indicator_token = generate_temporary_column_name(\n n_bytes=8, columns=(*self.columns, *other.columns)\n )\n\n other_native = self._join_filter_rename(\n other=other,\n columns_to_select=list(right_on),\n columns_mapping=dict(zip(right_on, left_on)),\n )\n return (\n self.native.merge(\n other_native,\n # TODO(FBruzzesi): See https://github.com/modin-project/modin/issues/7384\n how="left" if implementation.is_pandas() else "outer",\n indicator=indicator_token,\n left_on=left_on,\n right_on=left_on,\n )\n .loc[lambda t: t[indicator_token] == "left_only"]\n .drop(columns=indicator_token)\n )\n\n def _join_filter_rename(\n self, other: Self, columns_to_select: list[str], columns_mapping: dict[str, str]\n ) -> pd.DataFrame:\n """Helper function to avoid creating extra columns and row duplication.\n\n Used in `"anti"` and `"semi`" join's.\n\n Notice that a native object is returned.\n """\n implementation = self._implementation\n backend_version = self._backend_version\n\n return rename(\n select_columns_by_name(\n other.native,\n column_names=columns_to_select,\n backend_version=backend_version,\n implementation=implementation,\n ),\n columns=columns_mapping,\n implementation=implementation,\n backend_version=backend_version,\n ).drop_duplicates()\n\n def join(\n self,\n other: Self,\n *,\n how: JoinStrategy,\n left_on: Sequence[str] | None,\n right_on: Sequence[str] | None,\n suffix: str,\n ) -> Self:\n if how == "cross":\n result = self._join_cross(other=other, suffix=suffix)\n\n elif left_on is None or right_on is None: # pragma: no cover\n raise ValueError(left_on, right_on)\n\n elif how == "inner":\n result = self._join_inner(\n other=other, left_on=left_on, right_on=right_on, suffix=suffix\n )\n elif how == "anti":\n result = self._join_anti(other=other, left_on=left_on, right_on=right_on)\n elif how == "semi":\n result = self._join_semi(other=other, left_on=left_on, right_on=right_on)\n elif how == "left":\n result = self._join_left(\n other=other, left_on=left_on, right_on=right_on, suffix=suffix\n )\n elif how == "full":\n result = self._join_full(\n other=other, left_on=left_on, right_on=right_on, suffix=suffix\n )\n else:\n assert_never(how)\n\n return self._with_native(result)\n\n def join_asof(\n self,\n other: Self,\n *,\n left_on: str,\n right_on: str,\n by_left: Sequence[str] | None,\n by_right: Sequence[str] | None,\n strategy: AsofJoinStrategy,\n suffix: str,\n ) -> Self:\n plx = self.__native_namespace__()\n return self._with_native(\n plx.merge_asof(\n self.native,\n other.native,\n left_on=left_on,\n right_on=right_on,\n left_by=by_left,\n right_by=by_right,\n direction=strategy,\n suffixes=("", suffix),\n )\n )\n\n # --- partial reduction ---\n\n def head(self, n: int) -> Self:\n return self._with_native(self.native.head(n), validate_column_names=False)\n\n def tail(self, n: int) -> Self:\n return self._with_native(self.native.tail(n), validate_column_names=False)\n\n def unique(\n self,\n subset: Sequence[str] | None,\n *,\n keep: UniqueKeepStrategy,\n maintain_order: bool | None = None,\n ) -> Self:\n # The param `maintain_order` is only here for compatibility with the Polars API\n # and has no effect on the output.\n mapped_keep = {"none": False, "any": "first"}.get(keep, keep)\n if subset and (error := self._check_columns_exist(subset)):\n raise error\n return self._with_native(\n self.native.drop_duplicates(subset=subset, keep=mapped_keep),\n validate_column_names=False,\n )\n\n # --- lazy-only ---\n def lazy(self, *, backend: Implementation | None = None) -> CompliantLazyFrameAny:\n from narwhals.utils import parse_version\n\n pandas_df = self.to_pandas()\n if backend is None:\n return self\n elif backend is Implementation.DUCKDB:\n import duckdb # ignore-banned-import\n\n from narwhals._duckdb.dataframe import DuckDBLazyFrame\n\n return DuckDBLazyFrame(\n df=duckdb.table("pandas_df"),\n backend_version=parse_version(duckdb),\n version=self._version,\n )\n elif backend is Implementation.POLARS:\n import polars as pl # ignore-banned-import\n\n from narwhals._polars.dataframe import PolarsLazyFrame\n\n return PolarsLazyFrame(\n df=pl.from_pandas(pandas_df).lazy(),\n backend_version=parse_version(pl),\n version=self._version,\n )\n elif backend is Implementation.DASK:\n import dask # ignore-banned-import\n import dask.dataframe as dd # ignore-banned-import\n\n from narwhals._dask.dataframe import DaskLazyFrame\n\n return DaskLazyFrame(\n native_dataframe=dd.from_pandas(pandas_df),\n backend_version=parse_version(dask),\n version=self._version,\n )\n raise AssertionError # pragma: no cover\n\n @property\n def shape(self) -> tuple[int, int]:\n return self.native.shape\n\n def to_dict(self, *, as_series: bool) -> dict[str, Any]:\n if as_series:\n return {\n col: PandasLikeSeries.from_native(self.native[col], context=self)\n for col in self.columns\n }\n return self.native.to_dict(orient="list")\n\n def to_numpy(self, dtype: Any = None, *, copy: bool | None = None) -> _2DArray:\n native_dtypes = self.native.dtypes\n\n if copy is None:\n # pandas default differs from Polars, but cuDF default is True\n copy = self._implementation is Implementation.CUDF\n\n if native_dtypes.isin(CLASSICAL_NUMPY_DTYPES).all():\n # Fast path, no conversions necessary.\n if dtype is not None:\n return self.native.to_numpy(dtype=dtype, copy=copy)\n return self.native.to_numpy(copy=copy)\n\n dtype_datetime = self._version.dtypes.Datetime\n to_convert = [\n key\n for key, val in self.schema.items()\n if isinstance(val, dtype_datetime) and val.time_zone is not None\n ]\n if to_convert:\n df = self.with_columns(\n self.__narwhals_namespace__()\n .col(*to_convert)\n .dt.convert_time_zone("UTC")\n .dt.replace_time_zone(None)\n ).native\n else:\n df = self.native\n\n if dtype is not None:\n return df.to_numpy(dtype=dtype, copy=copy)\n\n # pandas return `object` dtype for nullable dtypes if dtype=None,\n # so we cast each Series to numpy and let numpy find a common dtype.\n # If there aren't any dtypes where `to_numpy()` is "broken" (i.e. it\n # returns Object) then we just call `to_numpy()` on the DataFrame.\n for col_dtype in native_dtypes:\n if str(col_dtype) in PANDAS_TO_NUMPY_DTYPE_MISSING:\n arr: Any = np.hstack(\n [\n self.get_column(col).to_numpy(copy=copy, dtype=None)[:, None]\n for col in self.columns\n ]\n )\n return arr\n return df.to_numpy(copy=copy)\n\n def to_pandas(self) -> pd.DataFrame:\n if self._implementation is Implementation.PANDAS:\n return self.native\n elif self._implementation is Implementation.CUDF:\n return self.native.to_pandas()\n elif self._implementation is Implementation.MODIN:\n return self.native._to_pandas()\n msg = f"Unknown implementation: {self._implementation}" # pragma: no cover\n raise AssertionError(msg)\n\n def to_polars(self) -> pl.DataFrame:\n import polars as pl # ignore-banned-import\n\n return pl.from_pandas(self.to_pandas())\n\n def write_parquet(self, file: str | Path | BytesIO) -> None:\n self.native.to_parquet(file)\n\n @overload\n def write_csv(self, file: None) -> str: ...\n\n @overload\n def write_csv(self, file: str | Path | BytesIO) -> None: ...\n\n def write_csv(self, file: str | Path | BytesIO | None) -> str | None:\n return self.native.to_csv(file, index=False)\n\n # --- descriptive ---\n def is_unique(self) -> PandasLikeSeries:\n return PandasLikeSeries.from_native(\n ~self.native.duplicated(keep=False), context=self\n )\n\n def item(self, row: int | None, column: int | str | None) -> Any:\n if row is None and column is None:\n if self.shape != (1, 1):\n msg = (\n "can only call `.item()` if the dataframe is of shape (1, 1),"\n " or if explicit row/col values are provided;"\n f" frame has shape {self.shape!r}"\n )\n raise ValueError(msg)\n return self.native.iloc[0, 0]\n\n elif row is None or column is None:\n msg = "cannot call `.item()` with only one of `row` or `column`"\n raise ValueError(msg)\n\n _col = self.columns.index(column) if isinstance(column, str) else column\n return self.native.iloc[row, _col]\n\n def clone(self) -> Self:\n return self._with_native(self.native.copy(), validate_column_names=False)\n\n def gather_every(self, n: int, offset: int) -> Self:\n return self._with_native(self.native.iloc[offset::n], validate_column_names=False)\n\n def _pivot_into_index_values(\n self,\n on: Sequence[str],\n index: Sequence[str] | None,\n values: Sequence[str] | None,\n /,\n ) -> tuple[Sequence[str], Sequence[str]]:\n index = index or (\n exclude_column_names(self, {*on, *values})\n if values\n else exclude_column_names(self, on)\n )\n values = values or exclude_column_names(self, {*on, *index})\n return index, values\n\n @staticmethod\n def _pivot_multi_on_name(unique_values: tuple[str, ...], /) -> str:\n LB, RB, Q = "{", "}", '"' # noqa: N806\n body = '","'.join(unique_values)\n return f"{LB}{Q}{body}{Q}{RB}"\n\n @staticmethod\n def _pivot_single_on_names(\n column_names: Iterable[str], n_values: int, separator: str, /\n ) -> list[str]:\n if n_values > 1:\n return [separator.join(col).strip() for col in column_names]\n return [col[-1] for col in column_names]\n\n def _pivot_multi_on_names(\n self,\n column_names: Iterable[tuple[str, ...]],\n n_on: int,\n n_values: int,\n separator: str,\n /,\n ) -> Iterator[str]:\n if n_values > 1:\n for col in column_names:\n names = col[-n_on:]\n prefix = col[0]\n yield separator.join((prefix, self._pivot_multi_on_name(names)))\n else:\n for col in column_names:\n yield self._pivot_multi_on_name(col[-n_on:])\n\n def _pivot_remap_column_names(\n self, column_names: Iterable[Any], *, n_on: int, n_values: int, separator: str\n ) -> list[str]:\n """Reformat output column names from a native pivot operation, to match `polars`.\n\n Note:\n `column_names` is a `pd.MultiIndex`, but not in the stubs.\n """\n if n_on == 1:\n return self._pivot_single_on_names(column_names, n_values, separator)\n return list(self._pivot_multi_on_names(column_names, n_on, n_values, separator))\n\n def _pivot_table(\n self,\n on: Sequence[str],\n index: Sequence[str],\n values: Sequence[str],\n aggregate_function: Literal[\n "min", "max", "first", "last", "sum", "mean", "median"\n ],\n /,\n ) -> Any:\n categorical = self._version.dtypes.Categorical\n kwds: dict[Any, Any] = {"observed": True}\n if self._implementation is Implementation.CUDF:\n kwds.pop("observed")\n cols = set(chain(values, index, on))\n schema = self.schema.items()\n if any(\n tp for name, tp in schema if name in cols and isinstance(tp, categorical)\n ):\n msg = "`pivot` with Categoricals is not implemented for cuDF backend"\n raise NotImplementedError(msg)\n return self.native.pivot_table(\n values=values,\n index=index,\n columns=on,\n aggfunc=aggregate_function,\n margins=False,\n **kwds,\n )\n\n def _pivot(\n self,\n on: Sequence[str],\n index: Sequence[str],\n values: Sequence[str],\n aggregate_function: PivotAgg | None,\n /,\n ) -> pd.DataFrame:\n if aggregate_function is None:\n return self.native.pivot(columns=on, index=index, values=values)\n elif aggregate_function == "len":\n return (\n self.native.groupby([*on, *index], as_index=False)\n .agg(dict.fromkeys(values, "size"))\n .pivot(columns=on, index=index, values=values)\n )\n return self._pivot_table(on, index, values, aggregate_function)\n\n def pivot(\n self,\n on: Sequence[str],\n *,\n index: Sequence[str] | None,\n values: Sequence[str] | None,\n aggregate_function: PivotAgg | None,\n sort_columns: bool,\n separator: str,\n ) -> Self:\n implementation = self._implementation\n if implementation.is_modin():\n msg = "pivot is not supported for Modin backend due to https://github.com/modin-project/modin/issues/7409."\n raise NotImplementedError(msg)\n\n index, values = self._pivot_into_index_values(on, index, values)\n result = self._pivot(on, index, values, aggregate_function)\n\n # Select the columns in the right order\n uniques = (\n (\n self.get_column(col)\n .unique()\n .sort(descending=False, nulls_last=False)\n .to_list()\n for col in on\n )\n if sort_columns\n else (self.get_column(col).unique().to_list() for col in on)\n )\n ordered_cols = list(product(values, *chain(uniques)))\n result = result.loc[:, ordered_cols]\n columns = result.columns\n remapped = self._pivot_remap_column_names(\n columns, n_on=len(on), n_values=len(values), separator=separator\n )\n result.columns = remapped # type: ignore[assignment]\n result.columns.names = [""]\n return self._with_native(result.reset_index())\n\n def to_arrow(self) -> Any:\n if self._implementation is Implementation.CUDF:\n return self.native.to_arrow(preserve_index=False)\n\n import pyarrow as pa # ignore-banned-import()\n\n return pa.Table.from_pandas(self.native)\n\n def sample(\n self,\n n: int | None,\n *,\n fraction: float | None,\n with_replacement: bool,\n seed: int | None,\n ) -> Self:\n return self._with_native(\n self.native.sample(\n n=n, frac=fraction, replace=with_replacement, random_state=seed\n ),\n validate_column_names=False,\n )\n\n def unpivot(\n self,\n on: Sequence[str] | None,\n index: Sequence[str] | None,\n variable_name: str,\n value_name: str,\n ) -> Self:\n return self._with_native(\n self.native.melt(\n id_vars=index,\n value_vars=on,\n var_name=variable_name,\n value_name=value_name,\n )\n )\n\n def explode(self, columns: Sequence[str]) -> Self:\n dtypes = self._version.dtypes\n\n schema = self.collect_schema()\n for col_to_explode in columns:\n dtype = schema[col_to_explode]\n\n if dtype != dtypes.List:\n msg = (\n f"`explode` operation not supported for dtype `{dtype}`, "\n "expected List type"\n )\n raise InvalidOperationError(msg)\n\n if len(columns) == 1:\n return self._with_native(\n self.native.explode(columns[0]), validate_column_names=False\n )\n else:\n native_frame = self.native\n anchor_series = native_frame[columns[0]].list.len()\n\n if not all(\n (native_frame[col_name].list.len() == anchor_series).all()\n for col_name in columns[1:]\n ):\n msg = "exploded columns must have matching element counts"\n raise ShapeError(msg)\n\n original_columns = self.columns\n other_columns = [c for c in original_columns if c not in columns]\n\n exploded_frame = native_frame[[*other_columns, columns[0]]].explode(\n columns[0]\n )\n exploded_series = [\n native_frame[col_name].explode().to_frame() for col_name in columns[1:]\n ]\n\n plx = self.__native_namespace__()\n return self._with_native(\n plx.concat([exploded_frame, *exploded_series], axis=1)[original_columns],\n validate_column_names=False,\n )\n
.venv\Lib\site-packages\narwhals\_pandas_like\dataframe.py
dataframe.py
Python
41,915
0.95
0.163527
0.036382
awesome-app
677
2024-04-13T09:44:53.746932
Apache-2.0
false
a7a7651277b09b821765c3675779e37a
from __future__ import annotations\n\nfrom typing import TYPE_CHECKING\n\nfrom narwhals._compliant import EagerExpr\nfrom narwhals._expression_parsing import evaluate_output_names_and_aliases\nfrom narwhals._pandas_like.group_by import PandasLikeGroupBy\nfrom narwhals._pandas_like.series import PandasLikeSeries\nfrom narwhals._utils import generate_temporary_column_name\n\nif TYPE_CHECKING:\n from collections.abc import Sequence\n\n from typing_extensions import Self\n\n from narwhals._compliant.typing import AliasNames, EvalNames, EvalSeries, ScalarKwargs\n from narwhals._expression_parsing import ExprMetadata\n from narwhals._pandas_like.dataframe import PandasLikeDataFrame\n from narwhals._pandas_like.namespace import PandasLikeNamespace\n from narwhals._utils import Implementation, Version, _FullContext\n from narwhals.typing import (\n FillNullStrategy,\n NonNestedLiteral,\n PythonLiteral,\n RankMethod,\n )\n\nWINDOW_FUNCTIONS_TO_PANDAS_EQUIVALENT = {\n "cum_sum": "cumsum",\n "cum_min": "cummin",\n "cum_max": "cummax",\n "cum_prod": "cumprod",\n # Pandas cumcount starts counting from 0 while Polars starts from 1\n # Pandas cumcount counts nulls while Polars does not\n # So, instead of using "cumcount" we use "cumsum" on notna() to get the same result\n "cum_count": "cumsum",\n "rolling_sum": "sum",\n "rolling_mean": "mean",\n "rolling_std": "std",\n "rolling_var": "var",\n "shift": "shift",\n "rank": "rank",\n "diff": "diff",\n "fill_null": "fillna",\n}\n\n\ndef window_kwargs_to_pandas_equivalent(\n function_name: str, kwargs: ScalarKwargs\n) -> dict[str, PythonLiteral]:\n if function_name == "shift":\n assert "n" in kwargs # noqa: S101\n pandas_kwargs: dict[str, PythonLiteral] = {"periods": kwargs["n"]}\n elif function_name == "rank":\n assert "method" in kwargs # noqa: S101\n assert "descending" in kwargs # noqa: S101\n _method = kwargs["method"]\n pandas_kwargs = {\n "method": "first" if _method == "ordinal" else _method,\n "ascending": not kwargs["descending"],\n "na_option": "keep",\n "pct": False,\n }\n elif function_name.startswith("cum_"): # Cumulative operation\n pandas_kwargs = {"skipna": True}\n elif function_name.startswith("rolling_"): # Rolling operation\n assert "min_samples" in kwargs # noqa: S101\n assert "window_size" in kwargs # noqa: S101\n assert "center" in kwargs # noqa: S101\n pandas_kwargs = {\n "min_periods": kwargs["min_samples"],\n "window": kwargs["window_size"],\n "center": kwargs["center"],\n }\n elif function_name in {"std", "var"}:\n assert "ddof" in kwargs # noqa: S101\n pandas_kwargs = {"ddof": kwargs["ddof"]}\n elif function_name == "fill_null":\n assert "strategy" in kwargs # noqa: S101\n assert "limit" in kwargs # noqa: S101\n pandas_kwargs = {"strategy": kwargs["strategy"], "limit": kwargs["limit"]}\n else: # sum, len, ...\n pandas_kwargs = {}\n return pandas_kwargs\n\n\nclass PandasLikeExpr(EagerExpr["PandasLikeDataFrame", PandasLikeSeries]):\n def __init__(\n self,\n call: EvalSeries[PandasLikeDataFrame, PandasLikeSeries],\n *,\n depth: int,\n function_name: str,\n evaluate_output_names: EvalNames[PandasLikeDataFrame],\n alias_output_names: AliasNames | None,\n implementation: Implementation,\n backend_version: tuple[int, ...],\n version: Version,\n scalar_kwargs: ScalarKwargs | None = None,\n ) -> None:\n self._call = call\n self._depth = depth\n self._function_name = function_name\n self._evaluate_output_names = evaluate_output_names\n self._alias_output_names = alias_output_names\n self._implementation = implementation\n self._backend_version = backend_version\n self._version = version\n self._scalar_kwargs = scalar_kwargs or {}\n self._metadata: ExprMetadata | None = None\n\n def __narwhals_namespace__(self) -> PandasLikeNamespace:\n from narwhals._pandas_like.namespace import PandasLikeNamespace\n\n return PandasLikeNamespace(\n self._implementation, self._backend_version, version=self._version\n )\n\n def __narwhals_expr__(self) -> None: ...\n\n @classmethod\n def from_column_names(\n cls: type[Self],\n evaluate_column_names: EvalNames[PandasLikeDataFrame],\n /,\n *,\n context: _FullContext,\n function_name: str = "",\n ) -> Self:\n def func(df: PandasLikeDataFrame) -> list[PandasLikeSeries]:\n try:\n return [\n PandasLikeSeries(\n df._native_frame[column_name],\n implementation=df._implementation,\n backend_version=df._backend_version,\n version=df._version,\n )\n for column_name in evaluate_column_names(df)\n ]\n except KeyError as e:\n if error := df._check_columns_exist(evaluate_column_names(df)):\n raise error from e\n raise\n\n return cls(\n func,\n depth=0,\n function_name=function_name,\n evaluate_output_names=evaluate_column_names,\n alias_output_names=None,\n implementation=context._implementation,\n backend_version=context._backend_version,\n version=context._version,\n )\n\n @classmethod\n def from_column_indices(cls, *column_indices: int, context: _FullContext) -> Self:\n def func(df: PandasLikeDataFrame) -> list[PandasLikeSeries]:\n native = df.native\n return [\n PandasLikeSeries.from_native(native.iloc[:, i], context=df)\n for i in column_indices\n ]\n\n return cls(\n func,\n depth=0,\n function_name="nth",\n evaluate_output_names=cls._eval_names_indices(column_indices),\n alias_output_names=None,\n implementation=context._implementation,\n backend_version=context._backend_version,\n version=context._version,\n )\n\n def ewm_mean(\n self,\n *,\n com: float | None,\n span: float | None,\n half_life: float | None,\n alpha: float | None,\n adjust: bool,\n min_samples: int,\n ignore_nulls: bool,\n ) -> Self:\n return self._reuse_series(\n "ewm_mean",\n com=com,\n span=span,\n half_life=half_life,\n alpha=alpha,\n adjust=adjust,\n min_samples=min_samples,\n ignore_nulls=ignore_nulls,\n )\n\n def cum_sum(self, *, reverse: bool) -> Self:\n return self._reuse_series("cum_sum", scalar_kwargs={"reverse": reverse})\n\n def shift(self, n: int) -> Self:\n return self._reuse_series("shift", scalar_kwargs={"n": n})\n\n def over( # noqa: C901, PLR0915\n self, partition_by: Sequence[str], order_by: Sequence[str]\n ) -> Self:\n if not partition_by:\n # e.g. `nw.col('a').cum_sum().order_by(key)`\n # We can always easily support this as it doesn't require grouping.\n assert order_by # noqa: S101\n\n def func(df: PandasLikeDataFrame) -> Sequence[PandasLikeSeries]:\n token = generate_temporary_column_name(8, df.columns)\n df = df.with_row_index(token, order_by=None).sort(\n *order_by, descending=False, nulls_last=False\n )\n results = self(df.drop([token], strict=True))\n sorting_indices = df.get_column(token)\n for s in results:\n s._scatter_in_place(sorting_indices, s)\n return results\n elif not self._is_elementary():\n msg = (\n "Only elementary expressions are supported for `.over` in pandas-like backends.\n\n"\n "Please see: "\n "https://narwhals-dev.github.io/narwhals/concepts/improve_group_by_operation/"\n )\n raise NotImplementedError(msg)\n else:\n function_name = PandasLikeGroupBy._leaf_name(self)\n pandas_function_name = WINDOW_FUNCTIONS_TO_PANDAS_EQUIVALENT.get(\n function_name, PandasLikeGroupBy._REMAP_AGGS.get(function_name)\n )\n if pandas_function_name is None:\n msg = (\n f"Unsupported function: {function_name} in `over` context.\n\n"\n f"Supported functions are {', '.join(WINDOW_FUNCTIONS_TO_PANDAS_EQUIVALENT)}\n"\n f"and {', '.join(PandasLikeGroupBy._REMAP_AGGS)}."\n )\n raise NotImplementedError(msg)\n pandas_kwargs = window_kwargs_to_pandas_equivalent(\n function_name, self._scalar_kwargs\n )\n\n def func(df: PandasLikeDataFrame) -> Sequence[PandasLikeSeries]: # noqa: C901, PLR0912\n output_names, aliases = evaluate_output_names_and_aliases(self, df, [])\n if function_name == "cum_count":\n plx = self.__narwhals_namespace__()\n df = df.with_columns(~plx.col(*output_names).is_null())\n\n if function_name.startswith("cum_"):\n assert "reverse" in self._scalar_kwargs # noqa: S101\n reverse = self._scalar_kwargs["reverse"]\n else:\n assert "reverse" not in self._scalar_kwargs # noqa: S101\n reverse = False\n\n if order_by:\n columns = list(set(partition_by).union(output_names).union(order_by))\n token = generate_temporary_column_name(8, columns)\n df = (\n df.simple_select(*columns)\n .with_row_index(token, order_by=None)\n .sort(*order_by, descending=reverse, nulls_last=reverse)\n )\n sorting_indices = df.get_column(token)\n elif reverse:\n columns = list(set(partition_by).union(output_names))\n df = df.simple_select(*columns)._gather_slice(slice(None, None, -1))\n grouped = df._native_frame.groupby(partition_by)\n if function_name.startswith("rolling"):\n rolling = grouped[list(output_names)].rolling(**pandas_kwargs)\n assert pandas_function_name is not None # help mypy # noqa: S101\n if pandas_function_name in {"std", "var"}:\n assert "ddof" in self._scalar_kwargs # noqa: S101\n res_native = getattr(rolling, pandas_function_name)(\n ddof=self._scalar_kwargs["ddof"]\n )\n else:\n res_native = getattr(rolling, pandas_function_name)()\n elif function_name == "fill_null":\n assert "strategy" in self._scalar_kwargs # noqa: S101\n assert "limit" in self._scalar_kwargs # noqa: S101\n df_grouped = grouped[list(output_names)]\n if self._scalar_kwargs["strategy"] == "forward":\n res_native = df_grouped.ffill(limit=self._scalar_kwargs["limit"])\n elif self._scalar_kwargs["strategy"] == "backward":\n res_native = df_grouped.bfill(limit=self._scalar_kwargs["limit"])\n else: # pragma: no cover\n # This is deprecated in pandas. Indeed, `nw.col('a').fill_null(3).over('b')`\n # does not seem very useful, and DuckDB doesn't support it either.\n msg = "`fill_null` with `over` without `strategy` specified is not supported."\n raise NotImplementedError(msg)\n elif function_name == "len":\n if len(output_names) != 1: # pragma: no cover\n msg = "Safety check failed, please report a bug."\n raise AssertionError(msg)\n res_native = grouped.transform("size").to_frame(aliases[0])\n else:\n res_native = grouped[list(output_names)].transform(\n pandas_function_name, **pandas_kwargs\n )\n result_frame = df._with_native(res_native).rename(\n dict(zip(output_names, aliases))\n )\n results = [result_frame.get_column(name) for name in aliases]\n if order_by:\n for s in results:\n s._scatter_in_place(sorting_indices, s)\n return results\n if reverse:\n return [s._gather_slice(slice(None, None, -1)) for s in results]\n return results\n\n return self.__class__(\n func,\n depth=self._depth + 1,\n function_name=self._function_name + "->over",\n evaluate_output_names=self._evaluate_output_names,\n alias_output_names=self._alias_output_names,\n implementation=self._implementation,\n backend_version=self._backend_version,\n version=self._version,\n )\n\n def cum_count(self, *, reverse: bool) -> Self:\n return self._reuse_series("cum_count", scalar_kwargs={"reverse": reverse})\n\n def cum_min(self, *, reverse: bool) -> Self:\n return self._reuse_series("cum_min", scalar_kwargs={"reverse": reverse})\n\n def cum_max(self, *, reverse: bool) -> Self:\n return self._reuse_series("cum_max", scalar_kwargs={"reverse": reverse})\n\n def cum_prod(self, *, reverse: bool) -> Self:\n return self._reuse_series("cum_prod", scalar_kwargs={"reverse": reverse})\n\n def fill_null(\n self,\n value: Self | NonNestedLiteral,\n strategy: FillNullStrategy | None,\n limit: int | None,\n ) -> Self:\n return self._reuse_series(\n "fill_null", scalar_kwargs={"strategy": strategy, "limit": limit}, value=value\n )\n\n def rolling_sum(self, window_size: int, *, min_samples: int, center: bool) -> Self:\n return self._reuse_series(\n "rolling_sum",\n scalar_kwargs={\n "window_size": window_size,\n "min_samples": min_samples,\n "center": center,\n },\n )\n\n def rolling_mean(self, window_size: int, *, min_samples: int, center: bool) -> Self:\n return self._reuse_series(\n "rolling_mean",\n scalar_kwargs={\n "window_size": window_size,\n "min_samples": min_samples,\n "center": center,\n },\n )\n\n def rolling_std(\n self, window_size: int, *, min_samples: int, center: bool, ddof: int\n ) -> Self:\n return self._reuse_series(\n "rolling_std",\n scalar_kwargs={\n "window_size": window_size,\n "min_samples": min_samples,\n "center": center,\n "ddof": ddof,\n },\n )\n\n def rolling_var(\n self, window_size: int, *, min_samples: int, center: bool, ddof: int\n ) -> Self:\n return self._reuse_series(\n "rolling_var",\n scalar_kwargs={\n "window_size": window_size,\n "min_samples": min_samples,\n "center": center,\n "ddof": ddof,\n },\n )\n\n def rank(self, method: RankMethod, *, descending: bool) -> Self:\n return self._reuse_series(\n "rank", scalar_kwargs={"method": method, "descending": descending}\n )\n\n def log(self, base: float) -> Self:\n return self._reuse_series("log", base=base)\n\n def exp(self) -> Self:\n return self._reuse_series("exp")\n\n def sqrt(self) -> Self:\n return self._reuse_series("sqrt")\n
.venv\Lib\site-packages\narwhals\_pandas_like\expr.py
expr.py
Python
16,230
0.95
0.132678
0.029891
react-lib
175
2024-09-15T12:30:26.419072
MIT
false
d110a671fe6f32adbabda1fc8ee276e1
from __future__ import annotations\n\nimport collections\nimport warnings\nfrom typing import TYPE_CHECKING, Any, ClassVar\n\nfrom narwhals._compliant import EagerGroupBy\nfrom narwhals._expression_parsing import evaluate_output_names_and_aliases\nfrom narwhals._pandas_like.utils import select_columns_by_name\nfrom narwhals._utils import find_stacklevel\n\nif TYPE_CHECKING:\n from collections.abc import Iterator, Mapping, Sequence\n\n from narwhals._compliant.group_by import NarwhalsAggregation\n from narwhals._pandas_like.dataframe import PandasLikeDataFrame\n from narwhals._pandas_like.expr import PandasLikeExpr\n\n\nclass PandasLikeGroupBy(EagerGroupBy["PandasLikeDataFrame", "PandasLikeExpr", str]):\n _REMAP_AGGS: ClassVar[Mapping[NarwhalsAggregation, Any]] = {\n "sum": "sum",\n "mean": "mean",\n "median": "median",\n "max": "max",\n "min": "min",\n "std": "std",\n "var": "var",\n "len": "size",\n "n_unique": "nunique",\n "count": "count",\n }\n\n def __init__(\n self,\n df: PandasLikeDataFrame,\n keys: Sequence[PandasLikeExpr] | Sequence[str],\n /,\n *,\n drop_null_keys: bool,\n ) -> None:\n self._df = df\n self._drop_null_keys = drop_null_keys\n self._compliant_frame, self._keys, self._output_key_names = self._parse_keys(\n df, keys=keys\n )\n # Drop index to avoid potential collisions:\n # https://github.com/narwhals-dev/narwhals/issues/1907.\n if set(self.compliant.native.index.names).intersection(self.compliant.columns):\n native_frame = self.compliant.native.reset_index(drop=True)\n else:\n native_frame = self.compliant.native\n\n self._grouped = native_frame.groupby(\n list(self._keys),\n sort=False,\n as_index=True,\n dropna=drop_null_keys,\n observed=True,\n )\n\n def agg(self, *exprs: PandasLikeExpr) -> PandasLikeDataFrame: # noqa: C901, PLR0912, PLR0914, PLR0915\n implementation = self.compliant._implementation\n backend_version = self.compliant._backend_version\n new_names: list[str] = self._keys.copy()\n\n all_aggs_are_simple = True\n exclude = (*self._keys, *self._output_key_names)\n for expr in exprs:\n _, aliases = evaluate_output_names_and_aliases(expr, self.compliant, exclude)\n new_names.extend(aliases)\n if not self._is_simple(expr):\n all_aggs_are_simple = False\n\n # dict of {output_name: root_name} that we count n_unique on\n # We need to do this separately from the rest so that we\n # can pass the `dropna` kwargs.\n nunique_aggs: dict[str, str] = {}\n simple_aggs: dict[str, list[str]] = collections.defaultdict(list)\n simple_aggs_functions: set[str] = set()\n\n # ddof to (output_names, aliases) mapping\n std_aggs: dict[int, tuple[list[str], list[str]]] = collections.defaultdict(\n lambda: ([], [])\n )\n var_aggs: dict[int, tuple[list[str], list[str]]] = collections.defaultdict(\n lambda: ([], [])\n )\n\n expected_old_names: list[str] = []\n simple_agg_new_names: list[str] = []\n\n if all_aggs_are_simple: # noqa: PLR1702\n for expr in exprs:\n output_names, aliases = evaluate_output_names_and_aliases(\n expr, self.compliant, exclude\n )\n if expr._depth == 0:\n # e.g. `agg(nw.len())`\n function_name = self._remap_expr_name(expr._function_name)\n simple_aggs_functions.add(function_name)\n\n for alias in aliases:\n expected_old_names.append(f"{self._keys[0]}_{function_name}")\n simple_aggs[self._keys[0]].append(function_name)\n simple_agg_new_names.append(alias)\n continue\n\n # e.g. `agg(nw.mean('a'))`\n function_name = self._remap_expr_name(self._leaf_name(expr))\n is_n_unique = function_name == "nunique"\n is_std = function_name == "std"\n is_var = function_name == "var"\n for output_name, alias in zip(output_names, aliases):\n if is_n_unique:\n nunique_aggs[alias] = output_name\n elif is_std and (ddof := expr._scalar_kwargs["ddof"]) != 1: # pyright: ignore[reportTypedDictNotRequiredAccess]\n std_aggs[ddof][0].append(output_name)\n std_aggs[ddof][1].append(alias)\n elif is_var and (ddof := expr._scalar_kwargs["ddof"]) != 1: # pyright: ignore[reportTypedDictNotRequiredAccess]\n var_aggs[ddof][0].append(output_name)\n var_aggs[ddof][1].append(alias)\n else:\n expected_old_names.append(f"{output_name}_{function_name}")\n simple_aggs[output_name].append(function_name)\n simple_agg_new_names.append(alias)\n simple_aggs_functions.add(function_name)\n\n result_aggs = []\n\n if simple_aggs:\n # Fast path for single aggregation such as `df.groupby(...).mean()`\n if (\n len(simple_aggs_functions) == 1\n and (agg_method := simple_aggs_functions.pop()) != "size"\n and len(simple_aggs) > 1\n ):\n result_simple_aggs = getattr(\n self._grouped[list(simple_aggs.keys())], agg_method\n )()\n result_simple_aggs.columns = [\n f"{a}_{agg_method}" for a in result_simple_aggs.columns\n ]\n else:\n result_simple_aggs = self._grouped.agg(simple_aggs)\n result_simple_aggs.columns = [\n f"{a}_{b}" for a, b in result_simple_aggs.columns\n ]\n if not (\n set(result_simple_aggs.columns) == set(expected_old_names)\n and len(result_simple_aggs.columns) == len(expected_old_names)\n ): # pragma: no cover\n msg = (\n f"Safety assertion failed, expected {expected_old_names} "\n f"got {result_simple_aggs.columns}, "\n "please report a bug at https://github.com/narwhals-dev/narwhals/issues"\n )\n raise AssertionError(msg)\n\n # Rename columns, being very careful\n expected_old_names_indices: dict[str, list[int]] = (\n collections.defaultdict(list)\n )\n for idx, item in enumerate(expected_old_names):\n expected_old_names_indices[item].append(idx)\n index_map: list[int] = [\n expected_old_names_indices[item].pop(0)\n for item in result_simple_aggs.columns\n ]\n result_simple_aggs.columns = [simple_agg_new_names[i] for i in index_map]\n result_aggs.append(result_simple_aggs)\n\n if nunique_aggs:\n result_nunique_aggs = self._grouped[list(nunique_aggs.values())].nunique(\n dropna=False\n )\n result_nunique_aggs.columns = list(nunique_aggs.keys())\n\n result_aggs.append(result_nunique_aggs)\n\n if std_aggs:\n for ddof, (std_output_names, std_aliases) in std_aggs.items():\n _aggregation = self._grouped[std_output_names].std(ddof=ddof)\n # `_aggregation` is a new object so it's OK to operate inplace.\n _aggregation.columns = std_aliases\n result_aggs.append(_aggregation)\n if var_aggs:\n for ddof, (var_output_names, var_aliases) in var_aggs.items():\n _aggregation = self._grouped[var_output_names].var(ddof=ddof)\n # `_aggregation` is a new object so it's OK to operate inplace.\n _aggregation.columns = var_aliases\n result_aggs.append(_aggregation)\n\n if result_aggs:\n output_names_counter = collections.Counter(\n c for frame in result_aggs for c in frame\n )\n if any(v > 1 for v in output_names_counter.values()):\n msg = ""\n for key, value in output_names_counter.items():\n if value > 1:\n msg += f"\n- '{key}' {value} times"\n else: # pragma: no cover\n pass\n msg = f"Expected unique output names, got:{msg}"\n raise ValueError(msg)\n namespace = self.compliant.__narwhals_namespace__()\n result = namespace._concat_horizontal(result_aggs)\n else:\n # No aggregation provided\n result = self.compliant.__native_namespace__().DataFrame(\n list(self._grouped.groups.keys()), columns=self._keys\n )\n # Keep inplace=True to avoid making a redundant copy.\n # This may need updating, depending on https://github.com/pandas-dev/pandas/pull/51466/files\n result.reset_index(inplace=True) # noqa: PD002\n return self.compliant._with_native(\n select_columns_by_name(result, new_names, backend_version, implementation)\n ).rename(dict(zip(self._keys, self._output_key_names)))\n\n if self.compliant.native.empty:\n # Don't even attempt this, it's way too inconsistent across pandas versions.\n msg = (\n "No results for group-by aggregation.\n\n"\n "Hint: you were probably trying to apply a non-elementary aggregation with a "\n "pandas-like API.\n"\n "Please rewrite your query such that group-by aggregations "\n "are elementary. For example, instead of:\n\n"\n " df.group_by('a').agg(nw.col('b').round(2).mean())\n\n"\n "use:\n\n"\n " df.with_columns(nw.col('b').round(2)).group_by('a').agg(nw.col('b').mean())\n\n"\n )\n raise ValueError(msg)\n\n warnings.warn(\n "Found complex group-by expression, which can't be expressed efficiently with the "\n "pandas API. If you can, please rewrite your query such that group-by aggregations "\n "are simple (e.g. mean, std, min, max, ...). \n\n"\n "Please see: "\n "https://narwhals-dev.github.io/narwhals/concepts/improve_group_by_operation/",\n UserWarning,\n stacklevel=find_stacklevel(),\n )\n\n def func(df: Any) -> Any:\n out_group = []\n out_names = []\n for expr in exprs:\n results_keys = expr(self.compliant._with_native(df))\n for result_keys in results_keys:\n out_group.append(result_keys.native.iloc[0])\n out_names.append(result_keys.name)\n ns = self.compliant.__narwhals_namespace__()\n return ns._series.from_iterable(out_group, index=out_names, context=ns).native\n\n if implementation.is_pandas() and backend_version >= (2, 2):\n result_complex = self._grouped.apply(func, include_groups=False)\n else: # pragma: no cover\n result_complex = self._grouped.apply(func)\n\n # Keep inplace=True to avoid making a redundant copy.\n # This may need updating, depending on https://github.com/pandas-dev/pandas/pull/51466/files\n result_complex.reset_index(inplace=True) # noqa: PD002\n return self.compliant._with_native(\n select_columns_by_name(\n result_complex, new_names, backend_version, implementation\n )\n ).rename(dict(zip(self._keys, self._output_key_names)))\n\n def __iter__(self) -> Iterator[tuple[Any, PandasLikeDataFrame]]:\n with warnings.catch_warnings():\n warnings.filterwarnings(\n "ignore",\n message=".*a length 1 tuple will be returned",\n category=FutureWarning,\n )\n\n for key, group in self._grouped:\n yield (\n key,\n self.compliant._with_native(group).simple_select(*self._df.columns),\n )\n
.venv\Lib\site-packages\narwhals\_pandas_like\group_by.py
group_by.py
Python
12,747
0.95
0.148936
0.075397
python-kit
355
2025-04-06T06:29:17.156247
BSD-3-Clause
false
86f38874ed63707c8737900fff948a68
from __future__ import annotations\n\nimport operator\nimport warnings\nfrom functools import reduce\nfrom itertools import chain\nfrom typing import TYPE_CHECKING, Any, Literal, Protocol, overload\n\nfrom narwhals._compliant import CompliantThen, EagerNamespace, EagerWhen\nfrom narwhals._expression_parsing import (\n combine_alias_output_names,\n combine_evaluate_output_names,\n)\nfrom narwhals._pandas_like.dataframe import PandasLikeDataFrame\nfrom narwhals._pandas_like.expr import PandasLikeExpr\nfrom narwhals._pandas_like.selectors import PandasSelectorNamespace\nfrom narwhals._pandas_like.series import PandasLikeSeries\nfrom narwhals._pandas_like.typing import NativeDataFrameT, NativeSeriesT\nfrom narwhals._pandas_like.utils import is_non_nullable_boolean\n\nif TYPE_CHECKING:\n from collections.abc import Iterable, Sequence\n\n from typing_extensions import TypeAlias\n\n from narwhals._utils import Implementation, Version\n from narwhals.typing import IntoDType, NonNestedLiteral\n\n\nIncomplete: TypeAlias = Any\n"""Escape hatch, but leaving a trace that this isn't ideal."""\n\n\n_Vertical: TypeAlias = Literal[0]\n_Horizontal: TypeAlias = Literal[1]\nAxis: TypeAlias = Literal[_Vertical, _Horizontal]\n\nVERTICAL: _Vertical = 0\nHORIZONTAL: _Horizontal = 1\n\n\nclass PandasLikeNamespace(\n EagerNamespace[\n PandasLikeDataFrame,\n PandasLikeSeries,\n PandasLikeExpr,\n NativeDataFrameT,\n NativeSeriesT,\n ]\n):\n @property\n def _dataframe(self) -> type[PandasLikeDataFrame]:\n return PandasLikeDataFrame\n\n @property\n def _expr(self) -> type[PandasLikeExpr]:\n return PandasLikeExpr\n\n @property\n def _series(self) -> type[PandasLikeSeries]:\n return PandasLikeSeries\n\n @property\n def selectors(self) -> PandasSelectorNamespace:\n return PandasSelectorNamespace.from_namespace(self)\n\n # --- not in spec ---\n def __init__(\n self,\n implementation: Implementation,\n backend_version: tuple[int, ...],\n version: Version,\n ) -> None:\n self._implementation = implementation\n self._backend_version = backend_version\n self._version = version\n\n def lit(self, value: NonNestedLiteral, dtype: IntoDType | None) -> PandasLikeExpr:\n def _lit_pandas_series(df: PandasLikeDataFrame) -> PandasLikeSeries:\n pandas_series = self._series.from_iterable(\n data=[value],\n name="literal",\n index=df._native_frame.index[0:1],\n context=self,\n )\n if dtype:\n return pandas_series.cast(dtype)\n return pandas_series\n\n return PandasLikeExpr(\n lambda df: [_lit_pandas_series(df)],\n depth=0,\n function_name="lit",\n evaluate_output_names=lambda _df: ["literal"],\n alias_output_names=None,\n implementation=self._implementation,\n backend_version=self._backend_version,\n version=self._version,\n )\n\n def len(self) -> PandasLikeExpr:\n return PandasLikeExpr(\n lambda df: [\n self._series.from_iterable(\n [len(df._native_frame)], name="len", index=[0], context=self\n )\n ],\n depth=0,\n function_name="len",\n evaluate_output_names=lambda _df: ["len"],\n alias_output_names=None,\n implementation=self._implementation,\n backend_version=self._backend_version,\n version=self._version,\n )\n\n # --- horizontal ---\n def sum_horizontal(self, *exprs: PandasLikeExpr) -> PandasLikeExpr:\n def func(df: PandasLikeDataFrame) -> list[PandasLikeSeries]:\n align = self._series._align_full_broadcast\n it = chain.from_iterable(expr(df) for expr in exprs)\n series = align(*it)\n native_series = (s.fill_null(0, None, None) for s in series)\n return [reduce(operator.add, native_series)]\n\n return self._expr._from_callable(\n func=func,\n depth=max(x._depth for x in exprs) + 1,\n function_name="sum_horizontal",\n evaluate_output_names=combine_evaluate_output_names(*exprs),\n alias_output_names=combine_alias_output_names(*exprs),\n context=self,\n )\n\n def all_horizontal(\n self, *exprs: PandasLikeExpr, ignore_nulls: bool\n ) -> PandasLikeExpr:\n def func(df: PandasLikeDataFrame) -> list[PandasLikeSeries]:\n align = self._series._align_full_broadcast\n series = [s for _expr in exprs for s in _expr(df)]\n if not ignore_nulls and any(\n s.native.dtype == "object" and s.is_null().any() for s in series\n ):\n # classical NumPy boolean columns don't support missing values, so\n # only do the full scan with `is_null` if we have `object` dtype.\n msg = "Cannot use `ignore_nulls=False` in `all_horizontal` for non-nullable NumPy-backed pandas Series when nulls are present."\n raise ValueError(msg)\n it = (\n (\n # NumPy-backed 'bool' dtype can't contain nulls so doesn't need filling.\n s if is_non_nullable_boolean(s) else s.fill_null(True, None, None) # noqa: FBT003\n for s in series\n )\n if ignore_nulls\n else iter(series)\n )\n return [reduce(operator.and_, align(*it))]\n\n return self._expr._from_callable(\n func=func,\n depth=max(x._depth for x in exprs) + 1,\n function_name="all_horizontal",\n evaluate_output_names=combine_evaluate_output_names(*exprs),\n alias_output_names=combine_alias_output_names(*exprs),\n context=self,\n )\n\n def any_horizontal(\n self, *exprs: PandasLikeExpr, ignore_nulls: bool\n ) -> PandasLikeExpr:\n def func(df: PandasLikeDataFrame) -> list[PandasLikeSeries]:\n align = self._series._align_full_broadcast\n series = [s for _expr in exprs for s in _expr(df)]\n if not ignore_nulls and any(\n s.native.dtype == "object" and s.is_null().any() for s in series\n ):\n # classical NumPy boolean columns don't support missing values, so\n # only do the full scan with `is_null` if we have `object` dtype.\n msg = "Cannot use `ignore_nulls=False` in `any_horizontal` for non-nullable NumPy-backed pandas Series when nulls are present."\n raise ValueError(msg)\n it = (\n (\n # NumPy-backed 'bool' dtype can't contain nulls so doesn't need filling.\n s if is_non_nullable_boolean(s) else s.fill_null(False, None, None) # noqa: FBT003\n for s in series\n )\n if ignore_nulls\n else iter(series)\n )\n return [reduce(operator.or_, align(*it))]\n\n return self._expr._from_callable(\n func=func,\n depth=max(x._depth for x in exprs) + 1,\n function_name="any_horizontal",\n evaluate_output_names=combine_evaluate_output_names(*exprs),\n alias_output_names=combine_alias_output_names(*exprs),\n context=self,\n )\n\n def mean_horizontal(self, *exprs: PandasLikeExpr) -> PandasLikeExpr:\n def func(df: PandasLikeDataFrame) -> list[PandasLikeSeries]:\n expr_results = [s for _expr in exprs for s in _expr(df)]\n align = self._series._align_full_broadcast\n series = align(\n *(s.fill_null(0, strategy=None, limit=None) for s in expr_results)\n )\n non_na = align(*(1 - s.is_null() for s in expr_results))\n return [reduce(operator.add, series) / reduce(operator.add, non_na)]\n\n return self._expr._from_callable(\n func=func,\n depth=max(x._depth for x in exprs) + 1,\n function_name="mean_horizontal",\n evaluate_output_names=combine_evaluate_output_names(*exprs),\n alias_output_names=combine_alias_output_names(*exprs),\n context=self,\n )\n\n def min_horizontal(self, *exprs: PandasLikeExpr) -> PandasLikeExpr:\n def func(df: PandasLikeDataFrame) -> list[PandasLikeSeries]:\n it = chain.from_iterable(expr(df) for expr in exprs)\n align = self._series._align_full_broadcast\n series = align(*it)\n\n return [\n PandasLikeSeries(\n self.concat(\n (s.to_frame() for s in series), how="horizontal"\n )._native_frame.min(axis=1),\n implementation=self._implementation,\n backend_version=self._backend_version,\n version=self._version,\n ).alias(series[0].name)\n ]\n\n return self._expr._from_callable(\n func=func,\n depth=max(x._depth for x in exprs) + 1,\n function_name="min_horizontal",\n evaluate_output_names=combine_evaluate_output_names(*exprs),\n alias_output_names=combine_alias_output_names(*exprs),\n context=self,\n )\n\n def max_horizontal(self, *exprs: PandasLikeExpr) -> PandasLikeExpr:\n def func(df: PandasLikeDataFrame) -> list[PandasLikeSeries]:\n it = chain.from_iterable(expr(df) for expr in exprs)\n align = self._series._align_full_broadcast\n series = align(*it)\n\n return [\n PandasLikeSeries(\n self.concat(\n (s.to_frame() for s in series), how="horizontal"\n ).native.max(axis=1),\n implementation=self._implementation,\n backend_version=self._backend_version,\n version=self._version,\n ).alias(series[0].name)\n ]\n\n return self._expr._from_callable(\n func=func,\n depth=max(x._depth for x in exprs) + 1,\n function_name="max_horizontal",\n evaluate_output_names=combine_evaluate_output_names(*exprs),\n alias_output_names=combine_alias_output_names(*exprs),\n context=self,\n )\n\n @property\n def _concat(self) -> _NativeConcat[NativeDataFrameT, NativeSeriesT]:\n """Concatenate pandas objects along a particular axis.\n\n Return the **native** equivalent of `pd.concat`.\n """\n return self._implementation.to_native_namespace().concat\n\n def _concat_diagonal(self, dfs: Sequence[NativeDataFrameT], /) -> NativeDataFrameT:\n if self._implementation.is_pandas() and self._backend_version < (3,):\n return self._concat(dfs, axis=VERTICAL, copy=False)\n return self._concat(dfs, axis=VERTICAL)\n\n def _concat_horizontal(\n self, dfs: Sequence[NativeDataFrameT | NativeSeriesT], /\n ) -> NativeDataFrameT:\n if self._implementation.is_cudf():\n with warnings.catch_warnings():\n warnings.filterwarnings(\n "ignore",\n message="The behavior of array concatenation with empty entries is deprecated",\n category=FutureWarning,\n )\n return self._concat(dfs, axis=HORIZONTAL)\n elif self._implementation.is_pandas() and self._backend_version < (3,):\n return self._concat(dfs, axis=HORIZONTAL, copy=False)\n return self._concat(dfs, axis=HORIZONTAL)\n\n def _concat_vertical(self, dfs: Sequence[NativeDataFrameT], /) -> NativeDataFrameT:\n cols_0 = dfs[0].columns\n for i, df in enumerate(dfs[1:], start=1):\n cols_current = df.columns\n if not (\n (len(cols_current) == len(cols_0)) and (cols_current == cols_0).all()\n ):\n msg = (\n "unable to vstack, column names don't match:\n"\n f" - dataframe 0: {cols_0.to_list()}\n"\n f" - dataframe {i}: {cols_current.to_list()}\n"\n )\n raise TypeError(msg)\n if self._implementation.is_pandas() and self._backend_version < (3,):\n return self._concat(dfs, axis=VERTICAL, copy=False)\n return self._concat(dfs, axis=VERTICAL)\n\n def when(self, predicate: PandasLikeExpr) -> PandasWhen[NativeSeriesT]:\n return PandasWhen[NativeSeriesT].from_expr(predicate, context=self)\n\n def concat_str(\n self, *exprs: PandasLikeExpr, separator: str, ignore_nulls: bool\n ) -> PandasLikeExpr:\n string = self._version.dtypes.String()\n\n def func(df: PandasLikeDataFrame) -> list[PandasLikeSeries]:\n expr_results = [s for _expr in exprs for s in _expr(df)]\n align = self._series._align_full_broadcast\n series = align(*(s.cast(string) for s in expr_results))\n null_mask = align(*(s.is_null() for s in expr_results))\n\n if not ignore_nulls:\n null_mask_result = reduce(operator.or_, null_mask)\n result = reduce(lambda x, y: x + separator + y, series).zip_with(\n ~null_mask_result, None\n )\n else:\n # NOTE: Trying to help `mypy` later\n # error: Cannot determine type of "values" [has-type]\n values: list[PandasLikeSeries]\n init_value, *values = [\n s.zip_with(~nm, "") for s, nm in zip(series, null_mask)\n ]\n\n sep_array = init_value.from_iterable(\n data=[separator] * len(init_value),\n name="sep",\n index=init_value.native.index,\n context=self,\n )\n separators = (sep_array.zip_with(~nm, "") for nm in null_mask[:-1])\n result = reduce(\n operator.add, (s + v for s, v in zip(separators, values)), init_value\n )\n\n return [result]\n\n return self._expr._from_callable(\n func=func,\n depth=max(x._depth for x in exprs) + 1,\n function_name="concat_str",\n evaluate_output_names=combine_evaluate_output_names(*exprs),\n alias_output_names=combine_alias_output_names(*exprs),\n context=self,\n )\n\n\nclass _NativeConcat(Protocol[NativeDataFrameT, NativeSeriesT]):\n @overload\n def __call__(\n self,\n objs: Iterable[NativeDataFrameT],\n *,\n axis: _Vertical,\n copy: bool | None = ...,\n ) -> NativeDataFrameT: ...\n @overload\n def __call__(\n self, objs: Iterable[NativeSeriesT], *, axis: _Vertical, copy: bool | None = ...\n ) -> NativeSeriesT: ...\n @overload\n def __call__(\n self,\n objs: Iterable[NativeDataFrameT | NativeSeriesT],\n *,\n axis: _Horizontal,\n copy: bool | None = ...,\n ) -> NativeDataFrameT: ...\n @overload\n def __call__(\n self,\n objs: Iterable[NativeDataFrameT | NativeSeriesT],\n *,\n axis: Axis,\n copy: bool | None = ...,\n ) -> NativeDataFrameT | NativeSeriesT: ...\n\n def __call__(\n self,\n objs: Iterable[NativeDataFrameT | NativeSeriesT],\n *,\n axis: Axis,\n copy: bool | None = None,\n ) -> NativeDataFrameT | NativeSeriesT: ...\n\n\nclass PandasWhen(\n EagerWhen[PandasLikeDataFrame, PandasLikeSeries, PandasLikeExpr, NativeSeriesT]\n):\n @property\n def _then(self) -> type[PandasThen]:\n return PandasThen\n\n def _if_then_else(\n self,\n when: NativeSeriesT,\n then: NativeSeriesT,\n otherwise: NativeSeriesT | NonNestedLiteral,\n ) -> NativeSeriesT:\n where: Incomplete = then.where\n return where(when) if otherwise is None else where(when, otherwise)\n\n\nclass PandasThen(\n CompliantThen[PandasLikeDataFrame, PandasLikeSeries, PandasLikeExpr], PandasLikeExpr\n): ...\n
.venv\Lib\site-packages\narwhals\_pandas_like\namespace.py
namespace.py
Python
16,149
0.95
0.209412
0.040323
react-lib
137
2023-11-23T11:24:33.712806
BSD-3-Clause
false
3d570c60d7c018dd0cd41f047d3ab100
from __future__ import annotations\n\nfrom typing import TYPE_CHECKING\n\nfrom narwhals._compliant import CompliantSelector, EagerSelectorNamespace\nfrom narwhals._pandas_like.expr import PandasLikeExpr\n\nif TYPE_CHECKING:\n from narwhals._pandas_like.dataframe import PandasLikeDataFrame # noqa: F401\n from narwhals._pandas_like.series import PandasLikeSeries # noqa: F401\n\n\nclass PandasSelectorNamespace(\n EagerSelectorNamespace["PandasLikeDataFrame", "PandasLikeSeries"]\n):\n @property\n def _selector(self) -> type[PandasSelector]:\n return PandasSelector\n\n\nclass PandasSelector( # type: ignore[misc]\n CompliantSelector["PandasLikeDataFrame", "PandasLikeSeries"], PandasLikeExpr\n):\n def _to_expr(self) -> PandasLikeExpr:\n return PandasLikeExpr(\n self._call,\n depth=self._depth,\n function_name=self._function_name,\n evaluate_output_names=self._evaluate_output_names,\n alias_output_names=self._alias_output_names,\n implementation=self._implementation,\n backend_version=self._backend_version,\n version=self._version,\n )\n
.venv\Lib\site-packages\narwhals\_pandas_like\selectors.py
selectors.py
Python
1,144
0.95
0.147059
0
react-lib
562
2025-02-15T17:52:54.658716
BSD-3-Clause
false
57acb033b8180e07ad1fee4ac5c63826
from __future__ import annotations\n\nimport warnings\nfrom typing import TYPE_CHECKING, Any, cast\n\nimport numpy as np\n\nfrom narwhals._compliant import EagerSeries\nfrom narwhals._pandas_like.series_cat import PandasLikeSeriesCatNamespace\nfrom narwhals._pandas_like.series_dt import PandasLikeSeriesDateTimeNamespace\nfrom narwhals._pandas_like.series_list import PandasLikeSeriesListNamespace\nfrom narwhals._pandas_like.series_str import PandasLikeSeriesStringNamespace\nfrom narwhals._pandas_like.series_struct import PandasLikeSeriesStructNamespace\nfrom narwhals._pandas_like.utils import (\n align_and_extract_native,\n get_dtype_backend,\n import_array_module,\n narwhals_to_native_dtype,\n native_to_narwhals_dtype,\n object_native_to_narwhals_dtype,\n rename,\n select_columns_by_name,\n set_index,\n)\nfrom narwhals._typing_compat import assert_never\nfrom narwhals._utils import (\n Implementation,\n is_list_of,\n parse_version,\n validate_backend_version,\n)\nfrom narwhals.dependencies import is_numpy_array_1d, is_pandas_like_series\nfrom narwhals.exceptions import InvalidOperationError\n\nif TYPE_CHECKING:\n from collections.abc import Hashable, Iterable, Iterator, Mapping, Sequence\n from types import ModuleType\n\n import pandas as pd\n import polars as pl\n import pyarrow as pa\n from typing_extensions import Self, TypeIs\n\n from narwhals._arrow.typing import ChunkedArrayAny\n from narwhals._pandas_like.dataframe import PandasLikeDataFrame\n from narwhals._pandas_like.namespace import PandasLikeNamespace\n from narwhals._utils import Version, _FullContext\n from narwhals.dtypes import DType\n from narwhals.typing import (\n ClosedInterval,\n FillNullStrategy,\n Into1DArray,\n IntoDType,\n NonNestedLiteral,\n NumericLiteral,\n RankMethod,\n RollingInterpolationMethod,\n SizedMultiIndexSelector,\n TemporalLiteral,\n _1DArray,\n _AnyDArray,\n _SliceIndex,\n )\n\nPANDAS_TO_NUMPY_DTYPE_NO_MISSING = {\n "Int64": "int64",\n "int64[pyarrow]": "int64",\n "Int32": "int32",\n "int32[pyarrow]": "int32",\n "Int16": "int16",\n "int16[pyarrow]": "int16",\n "Int8": "int8",\n "int8[pyarrow]": "int8",\n "UInt64": "uint64",\n "uint64[pyarrow]": "uint64",\n "UInt32": "uint32",\n "uint32[pyarrow]": "uint32",\n "UInt16": "uint16",\n "uint16[pyarrow]": "uint16",\n "UInt8": "uint8",\n "uint8[pyarrow]": "uint8",\n "Float64": "float64",\n "float64[pyarrow]": "float64",\n "Float32": "float32",\n "float32[pyarrow]": "float32",\n}\nPANDAS_TO_NUMPY_DTYPE_MISSING = {\n "Int64": "float64",\n "int64[pyarrow]": "float64",\n "Int32": "float64",\n "int32[pyarrow]": "float64",\n "Int16": "float64",\n "int16[pyarrow]": "float64",\n "Int8": "float64",\n "int8[pyarrow]": "float64",\n "UInt64": "float64",\n "uint64[pyarrow]": "float64",\n "UInt32": "float64",\n "uint32[pyarrow]": "float64",\n "UInt16": "float64",\n "uint16[pyarrow]": "float64",\n "UInt8": "float64",\n "uint8[pyarrow]": "float64",\n "Float64": "float64",\n "float64[pyarrow]": "float64",\n "Float32": "float32",\n "float32[pyarrow]": "float32",\n}\n\n\nclass PandasLikeSeries(EagerSeries[Any]):\n def __init__(\n self,\n native_series: Any,\n *,\n implementation: Implementation,\n backend_version: tuple[int, ...],\n version: Version,\n ) -> None:\n self._name = native_series.name\n self._native_series = native_series\n self._implementation = implementation\n self._backend_version = backend_version\n self._version = version\n validate_backend_version(self._implementation, self._backend_version)\n # Flag which indicates if, in the final step before applying an operation,\n # the single value behind the PandasLikeSeries should be extract and treated\n # as a Scalar. For example, in `nw.col('a') - nw.lit(3)`, the latter would\n # become a Series of length 1. Rather that doing a full broadcast so it matches\n # the length of the whole dataframe, we just extract the scalar.\n self._broadcast = False\n\n @property\n def native(self) -> Any:\n return self._native_series\n\n def __native_namespace__(self) -> ModuleType:\n if self._implementation.is_pandas_like():\n return self._implementation.to_native_namespace()\n\n msg = f"Expected pandas/modin/cudf, got: {type(self._implementation)}" # pragma: no cover\n raise AssertionError(msg)\n\n def __narwhals_namespace__(self) -> PandasLikeNamespace:\n from narwhals._pandas_like.namespace import PandasLikeNamespace\n\n return PandasLikeNamespace(\n self._implementation, self._backend_version, self._version\n )\n\n def _gather(self, rows: SizedMultiIndexSelector[pd.Series[Any]]) -> Self:\n rows = list(rows) if isinstance(rows, tuple) else rows\n return self._with_native(self.native.iloc[rows])\n\n def _gather_slice(self, rows: _SliceIndex | range) -> Self:\n return self._with_native(\n self.native.iloc[slice(rows.start, rows.stop, rows.step)]\n )\n\n def _with_version(self, version: Version) -> Self:\n return self.__class__(\n self.native,\n implementation=self._implementation,\n backend_version=self._backend_version,\n version=version,\n )\n\n def _with_native(self, series: Any, *, preserve_broadcast: bool = False) -> Self:\n result = self.__class__(\n series,\n implementation=self._implementation,\n backend_version=self._backend_version,\n version=self._version,\n )\n if preserve_broadcast:\n result._broadcast = self._broadcast\n return result\n\n @classmethod\n def from_iterable(\n cls,\n data: Iterable[Any],\n *,\n context: _FullContext,\n name: str = "",\n dtype: IntoDType | None = None,\n index: Any = None,\n ) -> Self:\n implementation = context._implementation\n backend_version = context._backend_version\n version = context._version\n ns = implementation.to_native_namespace()\n kwds: dict[str, Any] = {}\n if dtype:\n kwds["dtype"] = narwhals_to_native_dtype(\n dtype, None, implementation, backend_version, version\n )\n else:\n if implementation.is_pandas():\n kwds["copy"] = False\n if index is not None and len(index):\n kwds["index"] = index\n return cls.from_native(ns.Series(data, name=name, **kwds), context=context)\n\n @staticmethod\n def _is_native(obj: Any) -> TypeIs[Any]:\n return is_pandas_like_series(obj) # pragma: no cover\n\n @classmethod\n def from_native(cls, data: Any, /, *, context: _FullContext) -> Self:\n return cls(\n data,\n implementation=context._implementation,\n backend_version=context._backend_version,\n version=context._version,\n )\n\n @classmethod\n def from_numpy(cls, data: Into1DArray, /, *, context: _FullContext) -> Self:\n implementation = context._implementation\n arr = data if is_numpy_array_1d(data) else [data]\n native = implementation.to_native_namespace().Series(arr, name="")\n return cls.from_native(native, context=context)\n\n @classmethod\n def _align_full_broadcast(cls, *series: Self) -> Sequence[Self]:\n Series = series[0].__native_namespace__().Series # noqa: N806\n lengths = [len(s) for s in series]\n max_length = max(lengths)\n idx = series[lengths.index(max_length)].native.index\n reindexed = []\n for s in series:\n if s._broadcast:\n native = Series(\n s.native.iloc[0], index=idx, name=s.name, dtype=s.native.dtype\n )\n compliant = s._with_native(native)\n elif s.native.index is not idx:\n native = set_index(\n s.native,\n idx,\n implementation=s._implementation,\n backend_version=s._backend_version,\n )\n compliant = s._with_native(native)\n else:\n compliant = s\n reindexed.append(compliant)\n return reindexed\n\n @property\n def name(self) -> str:\n return self._name\n\n @property\n def dtype(self) -> DType:\n native_dtype = self.native.dtype\n return (\n native_to_narwhals_dtype(native_dtype, self._version, self._implementation)\n if native_dtype != "object"\n else object_native_to_narwhals_dtype(\n self.native, self._version, self._implementation\n )\n )\n\n @property\n def _array_funcs(self): # type: ignore[no-untyped-def] # noqa: ANN202\n if TYPE_CHECKING:\n import numpy as np\n\n return np\n else:\n return import_array_module(self._implementation)\n\n def ewm_mean(\n self,\n *,\n com: float | None,\n span: float | None,\n half_life: float | None,\n alpha: float | None,\n adjust: bool,\n min_samples: int,\n ignore_nulls: bool,\n ) -> Self:\n ser = self.native\n mask_na = ser.isna()\n if self._implementation is Implementation.CUDF:\n if (min_samples == 0 and not ignore_nulls) or (not mask_na.any()):\n result = ser.ewm(\n com=com, span=span, halflife=half_life, alpha=alpha, adjust=adjust\n ).mean()\n else:\n msg = (\n "cuDF only supports `ewm_mean` when there are no missing values "\n "or when both `min_period=0` and `ignore_nulls=False`"\n )\n raise NotImplementedError(msg)\n else:\n result = ser.ewm(\n com, span, half_life, alpha, min_samples, adjust, ignore_na=ignore_nulls\n ).mean()\n result[mask_na] = None\n return self._with_native(result)\n\n def scatter(self, indices: int | Sequence[int], values: Any) -> Self:\n if isinstance(values, self.__class__):\n values = set_index(\n values.native,\n self.native.index[indices],\n implementation=self._implementation,\n backend_version=self._backend_version,\n )\n s = self.native.copy(deep=True)\n s.iloc[indices] = values\n s.name = self.name\n return self._with_native(s)\n\n def _scatter_in_place(self, indices: Self, values: Self) -> None:\n implementation = self._implementation\n backend_version = self._backend_version\n # Scatter, modifying original Series. Use with care!\n values_native = set_index(\n values.native,\n self.native.index[indices.native],\n implementation=implementation,\n backend_version=backend_version,\n )\n if implementation is Implementation.PANDAS and parse_version(np) < (2,):\n values_native = values_native.copy() # pragma: no cover\n min_pd_version = (1, 2)\n if implementation is Implementation.PANDAS and backend_version < min_pd_version:\n self.native.iloc[indices.native.values] = values_native # noqa: PD011\n else:\n self.native.iloc[indices.native] = values_native\n\n def cast(self, dtype: IntoDType) -> Self:\n pd_dtype = narwhals_to_native_dtype(\n dtype,\n dtype_backend=get_dtype_backend(self.native.dtype, self._implementation),\n implementation=self._implementation,\n backend_version=self._backend_version,\n version=self._version,\n )\n return self._with_native(self.native.astype(pd_dtype), preserve_broadcast=True)\n\n def item(self, index: int | None) -> Any:\n # cuDF doesn't have Series.item().\n if index is None:\n if len(self) != 1:\n msg = (\n "can only call '.item()' if the Series is of length 1,"\n f" or an explicit index is provided (Series is of length {len(self)})"\n )\n raise ValueError(msg)\n return self.native.iloc[0]\n return self.native.iloc[index]\n\n def to_frame(self) -> PandasLikeDataFrame:\n from narwhals._pandas_like.dataframe import PandasLikeDataFrame\n\n return PandasLikeDataFrame(\n self.native.to_frame(),\n implementation=self._implementation,\n backend_version=self._backend_version,\n version=self._version,\n validate_column_names=False,\n )\n\n def to_list(self) -> list[Any]:\n is_cudf = self._implementation.is_cudf()\n return self.native.to_arrow().to_pylist() if is_cudf else self.native.to_list()\n\n def is_between(\n self, lower_bound: Any, upper_bound: Any, closed: ClosedInterval\n ) -> Self:\n ser = self.native\n _, lower_bound = align_and_extract_native(self, lower_bound)\n _, upper_bound = align_and_extract_native(self, upper_bound)\n if closed == "left":\n res = ser.ge(lower_bound) & ser.lt(upper_bound)\n elif closed == "right":\n res = ser.gt(lower_bound) & ser.le(upper_bound)\n elif closed == "none":\n res = ser.gt(lower_bound) & ser.lt(upper_bound)\n elif closed == "both":\n res = ser.ge(lower_bound) & ser.le(upper_bound)\n else:\n assert_never(closed)\n return self._with_native(res).alias(ser.name)\n\n def is_in(self, other: Any) -> Self:\n return self._with_native(self.native.isin(other))\n\n def arg_true(self) -> Self:\n ser = self.native\n size = len(ser)\n data = self._array_funcs.arange(size)\n result = ser.__class__(data, name=ser.name, index=ser.index).loc[ser]\n return self._with_native(result)\n\n def arg_min(self) -> int:\n return self.native.argmin()\n\n def arg_max(self) -> int:\n return self.native.argmax()\n\n # Binary comparisons\n\n def filter(self, predicate: Any) -> Self:\n if not is_list_of(predicate, bool):\n _, other_native = align_and_extract_native(self, predicate)\n else:\n other_native = predicate\n return self._with_native(self.native.loc[other_native]).alias(self.name)\n\n def __eq__(self, other: object) -> Self: # type: ignore[override]\n ser, other = align_and_extract_native(self, other)\n return self._with_native(ser == other).alias(self.name)\n\n def __ne__(self, other: object) -> Self: # type: ignore[override]\n ser, other = align_and_extract_native(self, other)\n return self._with_native(ser != other).alias(self.name)\n\n def __ge__(self, other: Any) -> Self:\n ser, other = align_and_extract_native(self, other)\n return self._with_native(ser >= other).alias(self.name)\n\n def __gt__(self, other: Any) -> Self:\n ser, other = align_and_extract_native(self, other)\n return self._with_native(ser > other).alias(self.name)\n\n def __le__(self, other: Any) -> Self:\n ser, other = align_and_extract_native(self, other)\n return self._with_native(ser <= other).alias(self.name)\n\n def __lt__(self, other: Any) -> Self:\n ser, other = align_and_extract_native(self, other)\n return self._with_native(ser < other).alias(self.name)\n\n def __and__(self, other: Any) -> Self:\n ser, other = align_and_extract_native(self, other)\n return self._with_native(ser & other).alias(self.name)\n\n def __rand__(self, other: Any) -> Self:\n ser, other = align_and_extract_native(self, other)\n ser = cast("pd.Series[Any]", ser)\n return self._with_native(ser.__and__(other)).alias(self.name)\n\n def __or__(self, other: Any) -> Self:\n ser, other = align_and_extract_native(self, other)\n return self._with_native(ser | other).alias(self.name)\n\n def __ror__(self, other: Any) -> Self:\n ser, other = align_and_extract_native(self, other)\n ser = cast("pd.Series[Any]", ser)\n return self._with_native(ser.__or__(other)).alias(self.name)\n\n def __add__(self, other: Any) -> Self:\n ser, other = align_and_extract_native(self, other)\n return self._with_native(ser + other).alias(self.name)\n\n def __radd__(self, other: Any) -> Self:\n _, other_native = align_and_extract_native(self, other)\n return self._with_native(self.native.__radd__(other_native)).alias(self.name)\n\n def __sub__(self, other: Any) -> Self:\n ser, other = align_and_extract_native(self, other)\n return self._with_native(ser - other).alias(self.name)\n\n def __rsub__(self, other: Any) -> Self:\n _, other_native = align_and_extract_native(self, other)\n return self._with_native(self.native.__rsub__(other_native)).alias(self.name)\n\n def __mul__(self, other: Any) -> Self:\n ser, other = align_and_extract_native(self, other)\n return self._with_native(ser * other).alias(self.name)\n\n def __rmul__(self, other: Any) -> Self:\n _, other_native = align_and_extract_native(self, other)\n return self._with_native(self.native.__rmul__(other_native)).alias(self.name)\n\n def __truediv__(self, other: Any) -> Self:\n ser, other = align_and_extract_native(self, other)\n return self._with_native(ser / other).alias(self.name)\n\n def __rtruediv__(self, other: Any) -> Self:\n _, other_native = align_and_extract_native(self, other)\n return self._with_native(self.native.__rtruediv__(other_native)).alias(self.name)\n\n def __floordiv__(self, other: Any) -> Self:\n ser, other = align_and_extract_native(self, other)\n return self._with_native(ser // other).alias(self.name)\n\n def __rfloordiv__(self, other: Any) -> Self:\n _, other_native = align_and_extract_native(self, other)\n return self._with_native(self.native.__rfloordiv__(other_native)).alias(self.name)\n\n def __pow__(self, other: Any) -> Self:\n ser, other = align_and_extract_native(self, other)\n return self._with_native(ser**other).alias(self.name)\n\n def __rpow__(self, other: Any) -> Self:\n _, other_native = align_and_extract_native(self, other)\n return self._with_native(self.native.__rpow__(other_native)).alias(self.name)\n\n def __mod__(self, other: Any) -> Self:\n ser, other = align_and_extract_native(self, other)\n return self._with_native(ser % other).alias(self.name)\n\n def __rmod__(self, other: Any) -> Self:\n _, other_native = align_and_extract_native(self, other)\n return self._with_native(self.native.__rmod__(other_native)).alias(self.name)\n\n # Unary\n\n def __invert__(self) -> Self:\n return self._with_native(~self.native)\n\n # Reductions\n\n def any(self) -> bool:\n return self.native.any()\n\n def all(self) -> bool:\n return self.native.all()\n\n def min(self) -> Any:\n return self.native.min()\n\n def max(self) -> Any:\n return self.native.max()\n\n def sum(self) -> float:\n return self.native.sum()\n\n def count(self) -> int:\n return self.native.count()\n\n def mean(self) -> float:\n return self.native.mean()\n\n def median(self) -> float:\n if not self.dtype.is_numeric():\n msg = "`median` operation not supported for non-numeric input type."\n raise InvalidOperationError(msg)\n return self.native.median()\n\n def std(self, *, ddof: int) -> float:\n return self.native.std(ddof=ddof)\n\n def var(self, *, ddof: int) -> float:\n return self.native.var(ddof=ddof)\n\n def skew(self) -> float | None:\n ser_not_null = self.native.dropna()\n if len(ser_not_null) == 0:\n return None\n elif len(ser_not_null) == 1:\n return float("nan")\n elif len(ser_not_null) == 2:\n return 0.0\n else:\n m = ser_not_null - ser_not_null.mean()\n m2 = (m**2).mean()\n m3 = (m**3).mean()\n return m3 / (m2**1.5) if m2 != 0 else float("nan")\n\n def kurtosis(self) -> float | None:\n ser_not_null = self.native.dropna()\n if len(ser_not_null) == 0:\n return None\n elif len(ser_not_null) == 1:\n return float("nan")\n else:\n m = ser_not_null - ser_not_null.mean()\n m2 = (m**2).mean()\n m4 = (m**4).mean()\n return m4 / (m2**2) - 3.0 if m2 != 0 else float("nan")\n\n def len(self) -> int:\n return len(self.native)\n\n # Transformations\n\n def is_null(self) -> Self:\n return self._with_native(self.native.isna(), preserve_broadcast=True)\n\n def is_nan(self) -> Self:\n ser = self.native\n if self.dtype.is_numeric():\n return self._with_native(ser != ser, preserve_broadcast=True) # noqa: PLR0124\n msg = f"`.is_nan` only supported for numeric dtype and not {self.dtype}, did you mean `.is_null`?"\n raise InvalidOperationError(msg)\n\n def fill_null(\n self,\n value: Self | NonNestedLiteral,\n strategy: FillNullStrategy | None,\n limit: int | None,\n ) -> Self:\n ser = self.native\n kwargs = (\n {"downcast": False}\n if self._implementation is Implementation.PANDAS\n and self._backend_version < (3,)\n else {}\n )\n with warnings.catch_warnings():\n warnings.filterwarnings(\n "ignore", "The 'downcast' keyword .*is deprecated", category=FutureWarning\n )\n if value is not None:\n _, native_value = align_and_extract_native(self, value)\n res_ser = self._with_native(\n ser.fillna(value=native_value, **kwargs), preserve_broadcast=True\n )\n else:\n res_ser = self._with_native(\n ser.ffill(limit=limit, **kwargs)\n if strategy == "forward"\n else ser.bfill(limit=limit, **kwargs),\n preserve_broadcast=True,\n )\n return res_ser\n\n def drop_nulls(self) -> Self:\n return self._with_native(self.native.dropna())\n\n def n_unique(self) -> int:\n return self.native.nunique(dropna=False)\n\n def sample(\n self,\n n: int | None,\n *,\n fraction: float | None,\n with_replacement: bool,\n seed: int | None,\n ) -> Self:\n return self._with_native(\n self.native.sample(\n n=n, frac=fraction, replace=with_replacement, random_state=seed\n )\n )\n\n def abs(self) -> Self:\n return self._with_native(self.native.abs())\n\n def cum_sum(self, *, reverse: bool) -> Self:\n result = (\n self.native.cumsum(skipna=True)\n if not reverse\n else self.native[::-1].cumsum(skipna=True)[::-1]\n )\n return self._with_native(result)\n\n def unique(self, *, maintain_order: bool = True) -> Self:\n """Pandas always maintains order, as per its docstring.\n\n > Uniques are returned in order of appearance.\n """\n return self._with_native(type(self.native)(self.native.unique(), name=self.name))\n\n def diff(self) -> Self:\n return self._with_native(self.native.diff())\n\n def shift(self, n: int) -> Self:\n return self._with_native(self.native.shift(n))\n\n def replace_strict(\n self,\n old: Sequence[Any] | Mapping[Any, Any],\n new: Sequence[Any],\n *,\n return_dtype: IntoDType | None,\n ) -> PandasLikeSeries:\n tmp_name = f"{self.name}_tmp"\n dtype_backend = get_dtype_backend(self.native.dtype, self._implementation)\n dtype = (\n narwhals_to_native_dtype(\n return_dtype,\n dtype_backend,\n self._implementation,\n self._backend_version,\n self._version,\n )\n if return_dtype\n else None\n )\n namespace = self.__native_namespace__()\n other = namespace.DataFrame(\n {self.name: old, tmp_name: namespace.Series(new, dtype=dtype)}\n )\n result = self._with_native(\n self.native.to_frame().merge(other, on=self.name, how="left")[tmp_name]\n ).alias(self.name)\n if result.is_null().sum() != self.is_null().sum():\n msg = (\n "replace_strict did not replace all non-null values.\n\n"\n f"The following did not get replaced: {self.filter(~self.is_null() & result.is_null()).unique(maintain_order=False).to_list()}"\n )\n raise ValueError(msg)\n return result\n\n def sort(self, *, descending: bool, nulls_last: bool) -> PandasLikeSeries:\n na_position = "last" if nulls_last else "first"\n return self._with_native(\n self.native.sort_values(ascending=not descending, na_position=na_position)\n ).alias(self.name)\n\n def alias(self, name: str | Hashable) -> Self:\n if name != self.name:\n return self._with_native(\n rename(\n self.native,\n name,\n implementation=self._implementation,\n backend_version=self._backend_version,\n ),\n preserve_broadcast=True,\n )\n return self\n\n def __array__(self, dtype: Any, *, copy: bool | None) -> _1DArray:\n # pandas used to always return object dtype for nullable dtypes.\n # So, we intercept __array__ and pass to `to_numpy` ourselves to make\n # sure an appropriate numpy dtype is returned.\n return self.to_numpy(dtype=dtype, copy=copy)\n\n def to_numpy(self, dtype: Any = None, *, copy: bool | None = None) -> _1DArray:\n # the default is meant to be None, but pandas doesn't allow it?\n # https://numpy.org/doc/stable/reference/generated/numpy.ndarray.__array__.html\n dtypes = self._version.dtypes\n if isinstance(self.dtype, dtypes.Datetime) and self.dtype.time_zone is not None:\n s = self.dt.convert_time_zone("UTC").dt.replace_time_zone(None).native\n else:\n s = self.native\n\n has_missing = s.isna().any()\n kwargs: dict[Any, Any] = {"copy": copy or self._implementation.is_cudf()}\n if has_missing and str(s.dtype) in PANDAS_TO_NUMPY_DTYPE_MISSING:\n kwargs.update({"na_value": float("nan")})\n dtype = dtype or PANDAS_TO_NUMPY_DTYPE_MISSING[str(s.dtype)]\n if not has_missing and str(s.dtype) in PANDAS_TO_NUMPY_DTYPE_NO_MISSING:\n dtype = dtype or PANDAS_TO_NUMPY_DTYPE_NO_MISSING[str(s.dtype)]\n return s.to_numpy(dtype=dtype, **kwargs)\n\n def to_pandas(self) -> pd.Series[Any]:\n if self._implementation is Implementation.PANDAS:\n return self.native\n elif self._implementation is Implementation.CUDF: # pragma: no cover\n return self.native.to_pandas()\n elif self._implementation is Implementation.MODIN:\n return self.native._to_pandas()\n msg = f"Unknown implementation: {self._implementation}" # pragma: no cover\n raise AssertionError(msg)\n\n def to_polars(self) -> pl.Series:\n import polars as pl # ignore-banned-import\n\n return pl.from_pandas(self.to_pandas())\n\n # --- descriptive ---\n def is_unique(self) -> Self:\n return self._with_native(~self.native.duplicated(keep=False)).alias(self.name)\n\n def null_count(self) -> int:\n return self.native.isna().sum()\n\n def is_first_distinct(self) -> Self:\n return self._with_native(~self.native.duplicated(keep="first")).alias(self.name)\n\n def is_last_distinct(self) -> Self:\n return self._with_native(~self.native.duplicated(keep="last")).alias(self.name)\n\n def is_sorted(self, *, descending: bool) -> bool:\n if not isinstance(descending, bool):\n msg = f"argument 'descending' should be boolean, found {type(descending)}"\n raise TypeError(msg)\n\n if descending:\n return self.native.is_monotonic_decreasing\n else:\n return self.native.is_monotonic_increasing\n\n def value_counts(\n self, *, sort: bool, parallel: bool, name: str | None, normalize: bool\n ) -> PandasLikeDataFrame:\n """Parallel is unused, exists for compatibility."""\n from narwhals._pandas_like.dataframe import PandasLikeDataFrame\n\n index_name_ = "index" if self._name is None else self._name\n value_name_ = name or ("proportion" if normalize else "count")\n val_count = self.native.value_counts(\n dropna=False, sort=False, normalize=normalize\n ).reset_index()\n\n val_count.columns = [index_name_, value_name_]\n\n if sort:\n val_count = val_count.sort_values(value_name_, ascending=False)\n\n return PandasLikeDataFrame.from_native(val_count, context=self)\n\n def quantile(\n self, quantile: float, interpolation: RollingInterpolationMethod\n ) -> float:\n return self.native.quantile(q=quantile, interpolation=interpolation)\n\n def zip_with(self, mask: Any, other: Any) -> Self:\n ser = self.native\n _, mask = align_and_extract_native(self, mask)\n _, other = align_and_extract_native(self, other)\n res = ser.where(mask, other)\n return self._with_native(res)\n\n def head(self, n: int) -> Self:\n return self._with_native(self.native.head(n))\n\n def tail(self, n: int) -> Self:\n return self._with_native(self.native.tail(n))\n\n def round(self, decimals: int) -> Self:\n return self._with_native(self.native.round(decimals=decimals))\n\n def to_dummies(self, *, separator: str, drop_first: bool) -> PandasLikeDataFrame:\n from narwhals._pandas_like.dataframe import PandasLikeDataFrame\n\n plx = self.__native_namespace__()\n series = self.native\n name = str(self._name) if self._name else ""\n\n null_col_pl = f"{name}{separator}null"\n\n has_nulls = series.isna().any()\n result = plx.get_dummies(\n series,\n prefix=name,\n prefix_sep=separator,\n drop_first=drop_first,\n # Adds a null column at the end, depending on whether or not there are any.\n dummy_na=has_nulls,\n dtype="int8",\n )\n if has_nulls:\n *cols, null_col_pd = list(result.columns)\n output_order = [null_col_pd, *cols]\n result = rename(\n select_columns_by_name(\n result, output_order, self._backend_version, self._implementation\n ),\n columns={null_col_pd: null_col_pl},\n implementation=self._implementation,\n backend_version=self._backend_version,\n )\n return PandasLikeDataFrame.from_native(result, context=self)\n\n def gather_every(self, n: int, offset: int) -> Self:\n return self._with_native(self.native.iloc[offset::n])\n\n def clip(\n self,\n lower_bound: Self | NumericLiteral | TemporalLiteral | None,\n upper_bound: Self | NumericLiteral | TemporalLiteral | None,\n ) -> Self:\n _, lower = (\n align_and_extract_native(self, lower_bound) if lower_bound else (None, None)\n )\n _, upper = (\n align_and_extract_native(self, upper_bound) if upper_bound else (None, None)\n )\n kwargs = {"axis": 0} if self._implementation is Implementation.MODIN else {}\n return self._with_native(self.native.clip(lower, upper, **kwargs))\n\n def to_arrow(self) -> pa.Array[Any]:\n if self._implementation is Implementation.CUDF:\n return self.native.to_arrow()\n\n import pyarrow as pa # ignore-banned-import()\n\n return pa.Array.from_pandas(self.native)\n\n def mode(self) -> Self:\n result = self.native.mode()\n result.name = self.name\n return self._with_native(result)\n\n def cum_count(self, *, reverse: bool) -> Self:\n not_na_series = ~self.native.isna()\n result = (\n not_na_series.cumsum()\n if not reverse\n else len(self) - not_na_series.cumsum() + not_na_series - 1\n )\n return self._with_native(result)\n\n def cum_min(self, *, reverse: bool) -> Self:\n result = (\n self.native.cummin(skipna=True)\n if not reverse\n else self.native[::-1].cummin(skipna=True)[::-1]\n )\n return self._with_native(result)\n\n def cum_max(self, *, reverse: bool) -> Self:\n result = (\n self.native.cummax(skipna=True)\n if not reverse\n else self.native[::-1].cummax(skipna=True)[::-1]\n )\n return self._with_native(result)\n\n def cum_prod(self, *, reverse: bool) -> Self:\n result = (\n self.native.cumprod(skipna=True)\n if not reverse\n else self.native[::-1].cumprod(skipna=True)[::-1]\n )\n return self._with_native(result)\n\n def rolling_sum(self, window_size: int, *, min_samples: int, center: bool) -> Self:\n result = self.native.rolling(\n window=window_size, min_periods=min_samples, center=center\n ).sum()\n return self._with_native(result)\n\n def rolling_mean(self, window_size: int, *, min_samples: int, center: bool) -> Self:\n result = self.native.rolling(\n window=window_size, min_periods=min_samples, center=center\n ).mean()\n return self._with_native(result)\n\n def rolling_var(\n self, window_size: int, *, min_samples: int, center: bool, ddof: int\n ) -> Self:\n result = self.native.rolling(\n window=window_size, min_periods=min_samples, center=center\n ).var(ddof=ddof)\n return self._with_native(result)\n\n def rolling_std(\n self, window_size: int, *, min_samples: int, center: bool, ddof: int\n ) -> Self:\n result = self.native.rolling(\n window=window_size, min_periods=min_samples, center=center\n ).std(ddof=ddof)\n return self._with_native(result)\n\n def __iter__(self) -> Iterator[Any]:\n yield from self.native.__iter__()\n\n def __contains__(self, other: Any) -> bool:\n return self.native.isna().any() if other is None else (self.native == other).any()\n\n def is_finite(self) -> Self:\n s = self.native\n return self._with_native((s > float("-inf")) & (s < float("inf")))\n\n def rank(self, method: RankMethod, *, descending: bool) -> Self:\n pd_method = "first" if method == "ordinal" else method\n name = self.name\n if (\n self._implementation is Implementation.PANDAS\n and self._backend_version < (3,)\n and self.dtype.is_integer()\n and (null_mask := self.native.isna()).any()\n ):\n # crazy workaround for the case of `na_option="keep"` and nullable\n # integer dtypes. This should be supported in pandas > 3.0\n # https://github.com/pandas-dev/pandas/issues/56976\n ranked_series = (\n self.native.to_frame()\n .assign(**{f"{name}_is_null": null_mask})\n .groupby(f"{name}_is_null")\n .rank(\n method=pd_method,\n na_option="keep",\n ascending=not descending,\n pct=False,\n )[name]\n )\n else:\n ranked_series = self.native.rank(\n method=pd_method, na_option="keep", ascending=not descending, pct=False\n )\n return self._with_native(ranked_series)\n\n def hist( # noqa: C901, PLR0912\n self,\n bins: list[float | int] | None,\n *,\n bin_count: int | None,\n include_breakpoint: bool,\n ) -> PandasLikeDataFrame:\n from numpy import linspace, zeros\n\n from narwhals._pandas_like.dataframe import PandasLikeDataFrame\n\n ns = self.__native_namespace__()\n data: dict[str, Sequence[int | float | str] | _AnyDArray]\n\n if bin_count == 0 or (bins is not None and len(bins) <= 1):\n data = {}\n if include_breakpoint:\n data["breakpoint"] = []\n data["count"] = []\n return PandasLikeDataFrame.from_native(ns.DataFrame(data), context=self)\n\n if self.native.count() < 1:\n if bins is not None:\n data = {"breakpoint": bins[1:], "count": zeros(shape=len(bins) - 1)}\n else:\n count = cast("int", bin_count)\n if bin_count == 1:\n data = {"breakpoint": [1.0], "count": [0]}\n else:\n data = {\n "breakpoint": linspace(0, 1, count + 1)[1:],\n "count": zeros(shape=count),\n }\n if not include_breakpoint:\n del data["breakpoint"]\n return PandasLikeDataFrame.from_native(ns.DataFrame(data), context=self)\n\n if bin_count is not None:\n # use Polars binning behavior\n lower, upper = self.native.min(), self.native.max()\n if lower == upper:\n lower -= 0.5\n upper += 0.5\n\n if bin_count == 1:\n data = {"breakpoint": [upper], "count": [self.native.count()]}\n if not include_breakpoint:\n del data["breakpoint"]\n return PandasLikeDataFrame.from_native(ns.DataFrame(data), context=self)\n\n bins = linspace(lower, upper, bin_count + 1)\n bin_count = None\n\n # pandas (2.2.*) .value_counts(bins=int) adjusts the lowest bin twice, result in improper counts.\n # pandas (2.2.*) .value_counts(bins=[...]) adjusts the lowest bin which should not happen since\n # the bins were explicitly passed in.\n categories = ns.cut(\n self.native,\n bins=bins if bin_count is None else bin_count,\n include_lowest=True, # Polars 1.27.0 always includes the lowest bin\n )\n # modin (0.32.0) .value_counts(...) silently drops bins with empty observations, .reindex\n # is necessary to restore these bins.\n result = categories.value_counts(dropna=True, sort=False).reindex(\n categories.cat.categories, fill_value=0\n )\n data = {}\n if include_breakpoint:\n data["breakpoint"] = bins[1:] if bins is not None else result.index.right\n data["count"] = result.reset_index(drop=True)\n return PandasLikeDataFrame.from_native(ns.DataFrame(data), context=self)\n\n def log(self, base: float) -> Self:\n native = self.native\n native_cls = type(native)\n implementation = self._implementation\n\n if get_dtype_backend(native.dtype, implementation=implementation) == "pyarrow":\n import pyarrow.compute as pc\n\n from narwhals._arrow.utils import native_to_narwhals_dtype\n\n ca = native.array._pa_array\n result_arr = cast("ChunkedArrayAny", pc.logb(ca, base))\n nw_dtype = native_to_narwhals_dtype(result_arr.type, self._version)\n out_dtype = narwhals_to_native_dtype(\n nw_dtype,\n "pyarrow",\n self._implementation,\n self._backend_version,\n self._version,\n )\n result_native = native_cls(\n result_arr, dtype=out_dtype, index=native.index, name=native.name\n )\n else:\n array_funcs = self._array_funcs\n result_arr = array_funcs.log(native) / array_funcs.log(base)\n result_native = (\n native_cls(result_arr, index=native.index, name=native.name)\n if implementation.is_cudf()\n else result_arr\n )\n\n return self._with_native(result_native)\n\n def exp(self) -> Self:\n native = self.native\n native_cls = type(native)\n implementation = self._implementation\n\n if get_dtype_backend(native.dtype, implementation=implementation) == "pyarrow":\n import pyarrow.compute as pc\n\n from narwhals._arrow.utils import native_to_narwhals_dtype\n\n ca = native.array._pa_array\n result_arr = cast("ChunkedArrayAny", pc.exp(ca))\n nw_dtype = native_to_narwhals_dtype(result_arr.type, self._version)\n out_dtype = narwhals_to_native_dtype(\n nw_dtype,\n "pyarrow",\n self._implementation,\n self._backend_version,\n self._version,\n )\n result_native = native_cls(\n result_arr, dtype=out_dtype, index=native.index, name=native.name\n )\n else:\n result_arr = self._array_funcs.exp(native)\n result_native = (\n native_cls(result_arr, index=native.index, name=native.name)\n if implementation.is_cudf()\n else result_arr\n )\n\n return self._with_native(result_native)\n\n def sqrt(self) -> Self:\n return self._with_native(self.native.pow(0.5))\n\n @property\n def str(self) -> PandasLikeSeriesStringNamespace:\n return PandasLikeSeriesStringNamespace(self)\n\n @property\n def dt(self) -> PandasLikeSeriesDateTimeNamespace:\n return PandasLikeSeriesDateTimeNamespace(self)\n\n @property\n def cat(self) -> PandasLikeSeriesCatNamespace:\n return PandasLikeSeriesCatNamespace(self)\n\n @property\n def list(self) -> PandasLikeSeriesListNamespace:\n if not hasattr(self.native, "list"):\n msg = "Series must be of PyArrow List type to support list namespace."\n raise TypeError(msg)\n return PandasLikeSeriesListNamespace(self)\n\n @property\n def struct(self) -> PandasLikeSeriesStructNamespace:\n if not hasattr(self.native, "struct"):\n msg = "Series must be of PyArrow Struct type to support struct namespace."\n raise TypeError(msg)\n return PandasLikeSeriesStructNamespace(self)\n
.venv\Lib\site-packages\narwhals\_pandas_like\series.py
series.py
Python
42,479
0.95
0.180952
0.034413
python-kit
900
2023-11-29T10:52:48.071162
GPL-3.0
false
0e3a07d742356167b0b3b3f05aa68890
from __future__ import annotations\n\nfrom typing import TYPE_CHECKING\n\nfrom narwhals._compliant.any_namespace import CatNamespace\nfrom narwhals._pandas_like.utils import PandasLikeSeriesNamespace\n\nif TYPE_CHECKING:\n from narwhals._pandas_like.series import PandasLikeSeries\n\n\nclass PandasLikeSeriesCatNamespace(\n PandasLikeSeriesNamespace, CatNamespace["PandasLikeSeries"]\n):\n def get_categories(self) -> PandasLikeSeries:\n s = self.native\n return self.with_native(type(s)(s.cat.categories, name=s.name))\n
.venv\Lib\site-packages\narwhals\_pandas_like\series_cat.py
series_cat.py
Python
527
0.85
0.176471
0
react-lib
788
2024-12-14T00:49:10.929469
BSD-3-Clause
false
7ba7ee4dfa7f8f97bf108e3ce151ee87
from __future__ import annotations\n\nfrom typing import TYPE_CHECKING, Any\n\nfrom narwhals._compliant.any_namespace import DateTimeNamespace\nfrom narwhals._constants import (\n EPOCH_YEAR,\n MS_PER_SECOND,\n NS_PER_SECOND,\n SECONDS_PER_DAY,\n US_PER_SECOND,\n)\nfrom narwhals._duration import parse_interval_string\nfrom narwhals._pandas_like.utils import (\n UNIT_DICT,\n PandasLikeSeriesNamespace,\n calculate_timestamp_date,\n calculate_timestamp_datetime,\n get_dtype_backend,\n int_dtype_mapper,\n is_dtype_pyarrow,\n)\n\nif TYPE_CHECKING:\n from narwhals._pandas_like.series import PandasLikeSeries\n from narwhals.typing import TimeUnit\n\n\nclass PandasLikeSeriesDateTimeNamespace(\n PandasLikeSeriesNamespace, DateTimeNamespace["PandasLikeSeries"]\n):\n def date(self) -> PandasLikeSeries:\n result = self.with_native(self.native.dt.date)\n if str(result.dtype).lower() == "object":\n msg = (\n "Accessing `date` on the default pandas backend "\n "will return a Series of type `object`."\n "\nThis differs from polars API and will prevent `.dt` chaining. "\n "Please switch to the `pyarrow` backend:"\n '\ndf.convert_dtypes(dtype_backend="pyarrow")'\n )\n raise NotImplementedError(msg)\n return result\n\n def year(self) -> PandasLikeSeries:\n return self.with_native(self.native.dt.year)\n\n def month(self) -> PandasLikeSeries:\n return self.with_native(self.native.dt.month)\n\n def day(self) -> PandasLikeSeries:\n return self.with_native(self.native.dt.day)\n\n def hour(self) -> PandasLikeSeries:\n return self.with_native(self.native.dt.hour)\n\n def minute(self) -> PandasLikeSeries:\n return self.with_native(self.native.dt.minute)\n\n def second(self) -> PandasLikeSeries:\n return self.with_native(self.native.dt.second)\n\n def millisecond(self) -> PandasLikeSeries:\n return self.microsecond() // 1000\n\n def microsecond(self) -> PandasLikeSeries:\n if self.backend_version < (3, 0, 0) and self._is_pyarrow():\n # crazy workaround for https://github.com/pandas-dev/pandas/issues/59154\n import pyarrow.compute as pc # ignore-banned-import()\n\n from narwhals._arrow.utils import lit\n\n arr_ns = self.native.array\n arr = arr_ns.__arrow_array__()\n result_arr = pc.add(\n pc.multiply(pc.millisecond(arr), lit(1_000)), pc.microsecond(arr)\n )\n result = type(self.native)(type(arr_ns)(result_arr), name=self.native.name)\n return self.with_native(result)\n\n return self.with_native(self.native.dt.microsecond)\n\n def nanosecond(self) -> PandasLikeSeries:\n return self.microsecond() * 1_000 + self.native.dt.nanosecond\n\n def ordinal_day(self) -> PandasLikeSeries:\n year_start = self.native.dt.year\n result = (\n self.native.to_numpy().astype("datetime64[D]")\n - (year_start.to_numpy() - EPOCH_YEAR).astype("datetime64[Y]")\n ).astype("int32") + 1\n dtype = "Int64[pyarrow]" if self._is_pyarrow() else "int32"\n return self.with_native(\n type(self.native)(result, dtype=dtype, name=year_start.name)\n )\n\n def weekday(self) -> PandasLikeSeries:\n # Pandas is 0-6 while Polars is 1-7\n return self.with_native(self.native.dt.weekday) + 1\n\n def _is_pyarrow(self) -> bool:\n return is_dtype_pyarrow(self.native.dtype)\n\n def _get_total_seconds(self) -> Any:\n if hasattr(self.native.dt, "total_seconds"):\n return self.native.dt.total_seconds()\n else: # pragma: no cover\n return (\n self.native.dt.days * SECONDS_PER_DAY\n + self.native.dt.seconds\n + (self.native.dt.microseconds / US_PER_SECOND)\n + (self.native.dt.nanoseconds / NS_PER_SECOND)\n )\n\n def total_minutes(self) -> PandasLikeSeries:\n s = self._get_total_seconds()\n # this calculates the sign of each series element\n s_sign = 2 * (s > 0).astype(int_dtype_mapper(s.dtype)) - 1\n s_abs = s.abs() // 60\n if ~s.isna().any():\n s_abs = s_abs.astype(int_dtype_mapper(s.dtype))\n return self.with_native(s_abs * s_sign)\n\n def total_seconds(self) -> PandasLikeSeries:\n s = self._get_total_seconds()\n # this calculates the sign of each series element\n s_sign = 2 * (s > 0).astype(int_dtype_mapper(s.dtype)) - 1\n s_abs = s.abs() // 1\n if ~s.isna().any():\n s_abs = s_abs.astype(int_dtype_mapper(s.dtype))\n return self.with_native(s_abs * s_sign)\n\n def total_milliseconds(self) -> PandasLikeSeries:\n s = self._get_total_seconds() * MS_PER_SECOND\n # this calculates the sign of each series element\n s_sign = 2 * (s > 0).astype(int_dtype_mapper(s.dtype)) - 1\n s_abs = s.abs() // 1\n if ~s.isna().any():\n s_abs = s_abs.astype(int_dtype_mapper(s.dtype))\n return self.with_native(s_abs * s_sign)\n\n def total_microseconds(self) -> PandasLikeSeries:\n s = self._get_total_seconds() * US_PER_SECOND\n # this calculates the sign of each series element\n s_sign = 2 * (s > 0).astype(int_dtype_mapper(s.dtype)) - 1\n s_abs = s.abs() // 1\n if ~s.isna().any():\n s_abs = s_abs.astype(int_dtype_mapper(s.dtype))\n return self.with_native(s_abs * s_sign)\n\n def total_nanoseconds(self) -> PandasLikeSeries:\n s = self._get_total_seconds() * NS_PER_SECOND\n # this calculates the sign of each series element\n s_sign = 2 * (s > 0).astype(int_dtype_mapper(s.dtype)) - 1\n s_abs = s.abs() // 1\n if ~s.isna().any():\n s_abs = s_abs.astype(int_dtype_mapper(s.dtype))\n return self.with_native(s_abs * s_sign)\n\n def to_string(self, format: str) -> PandasLikeSeries:\n # Polars' parser treats `'%.f'` as pandas does `'.%f'`\n # PyArrow interprets `'%S'` as "seconds, plus fractional seconds"\n # and doesn't support `%f`\n if not self._is_pyarrow():\n format = format.replace("%S%.f", "%S.%f")\n else:\n format = format.replace("%S.%f", "%S").replace("%S%.f", "%S")\n return self.with_native(self.native.dt.strftime(format))\n\n def replace_time_zone(self, time_zone: str | None) -> PandasLikeSeries:\n de_zone = self.native.dt.tz_localize(None)\n result = de_zone.dt.tz_localize(time_zone) if time_zone is not None else de_zone\n return self.with_native(result)\n\n def convert_time_zone(self, time_zone: str) -> PandasLikeSeries:\n if self.compliant.dtype.time_zone is None: # type: ignore[attr-defined]\n result = self.native.dt.tz_localize("UTC").dt.tz_convert(time_zone)\n else:\n result = self.native.dt.tz_convert(time_zone)\n return self.with_native(result)\n\n def timestamp(self, time_unit: TimeUnit) -> PandasLikeSeries:\n s = self.native\n dtype = self.compliant.dtype\n mask_na = s.isna()\n dtypes = self.version.dtypes\n if dtype == dtypes.Date:\n # Date is only supported in pandas dtypes if pyarrow-backed\n s_cast = s.astype("Int32[pyarrow]")\n result = calculate_timestamp_date(s_cast, time_unit)\n elif isinstance(dtype, dtypes.Datetime):\n fn = (\n s.view\n if (self.implementation.is_pandas() and self.backend_version < (2,))\n else s.astype\n )\n s_cast = fn("Int64[pyarrow]") if self._is_pyarrow() else fn("int64")\n result = calculate_timestamp_datetime(s_cast, dtype.time_unit, time_unit)\n else:\n msg = "Input should be either of Date or Datetime type"\n raise TypeError(msg)\n result[mask_na] = None\n return self.with_native(result)\n\n def truncate(self, every: str) -> PandasLikeSeries:\n multiple, unit = parse_interval_string(every)\n native = self.native\n if self.implementation.is_cudf():\n if multiple != 1:\n msg = f"Only multiple `1` is supported for cuDF, got: {multiple}."\n raise NotImplementedError(msg)\n return self.with_native(self.native.dt.floor(UNIT_DICT.get(unit, unit)))\n dtype_backend = get_dtype_backend(native.dtype, self.compliant._implementation)\n if unit in {"mo", "q", "y"}:\n if self.implementation.is_cudf():\n msg = f"Truncating to {unit} is not supported yet for cuDF."\n raise NotImplementedError(msg)\n if dtype_backend == "pyarrow":\n import pyarrow.compute as pc # ignore-banned-import\n\n from narwhals._arrow.utils import UNITS_DICT\n\n ca = native.array._pa_array\n result_arr = pc.floor_temporal(ca, multiple, UNITS_DICT[unit])\n else:\n if unit == "q":\n multiple *= 3\n np_unit = "M"\n elif unit == "mo":\n np_unit = "M"\n else:\n np_unit = "Y"\n arr = native.values\n arr_dtype = arr.dtype\n result_arr = arr.astype(f"datetime64[{multiple}{np_unit}]").astype(\n arr_dtype\n )\n result_native = type(native)(\n result_arr, dtype=native.dtype, index=native.index, name=native.name\n )\n return self.with_native(result_native)\n return self.with_native(\n self.native.dt.floor(f"{multiple}{UNIT_DICT.get(unit, unit)}")\n )\n
.venv\Lib\site-packages\narwhals\_pandas_like\series_dt.py
series_dt.py
Python
9,769
0.95
0.217213
0.052133
react-lib
453
2024-07-19T10:02:46.926041
MIT
false
620a255b4589fd899257e40c48dd8923
from __future__ import annotations\n\nfrom typing import TYPE_CHECKING\n\nfrom narwhals._compliant.any_namespace import ListNamespace\nfrom narwhals._pandas_like.utils import (\n PandasLikeSeriesNamespace,\n get_dtype_backend,\n narwhals_to_native_dtype,\n)\n\nif TYPE_CHECKING:\n from narwhals._pandas_like.series import PandasLikeSeries\n\n\nclass PandasLikeSeriesListNamespace(\n PandasLikeSeriesNamespace, ListNamespace["PandasLikeSeries"]\n):\n def len(self) -> PandasLikeSeries:\n result = self.native.list.len()\n implementation = self.implementation\n backend_version = self.backend_version\n if implementation.is_pandas() and backend_version < (3, 0): # pragma: no cover\n # `result` is a new object so it's safe to do this inplace.\n result.index = self.native.index\n dtype = narwhals_to_native_dtype(\n self.version.dtypes.UInt32(),\n get_dtype_backend(result.dtype, implementation),\n implementation,\n backend_version,\n self.version,\n )\n return self.with_native(result.astype(dtype)).alias(self.native.name)\n
.venv\Lib\site-packages\narwhals\_pandas_like\series_list.py
series_list.py
Python
1,138
0.95
0.121212
0.035714
python-kit
277
2023-12-07T13:47:34.738099
Apache-2.0
false
961d0924ba33a5fe4985a990154a5203
from __future__ import annotations\n\nfrom typing import TYPE_CHECKING, Any\n\nfrom narwhals._compliant.any_namespace import StringNamespace\nfrom narwhals._pandas_like.utils import PandasLikeSeriesNamespace, is_dtype_pyarrow\n\nif TYPE_CHECKING:\n from narwhals._pandas_like.series import PandasLikeSeries\n\n\nclass PandasLikeSeriesStringNamespace(\n PandasLikeSeriesNamespace, StringNamespace["PandasLikeSeries"]\n):\n def len_chars(self) -> PandasLikeSeries:\n return self.with_native(self.native.str.len())\n\n def replace(\n self, pattern: str, value: str, *, literal: bool, n: int\n ) -> PandasLikeSeries:\n return self.with_native(\n self.native.str.replace(pat=pattern, repl=value, n=n, regex=not literal)\n )\n\n def replace_all(self, pattern: str, value: str, *, literal: bool) -> PandasLikeSeries:\n return self.replace(pattern, value, literal=literal, n=-1)\n\n def strip_chars(self, characters: str | None) -> PandasLikeSeries:\n return self.with_native(self.native.str.strip(characters))\n\n def starts_with(self, prefix: str) -> PandasLikeSeries:\n return self.with_native(self.native.str.startswith(prefix))\n\n def ends_with(self, suffix: str) -> PandasLikeSeries:\n return self.with_native(self.native.str.endswith(suffix))\n\n def contains(self, pattern: str, *, literal: bool) -> PandasLikeSeries:\n return self.with_native(self.native.str.contains(pat=pattern, regex=not literal))\n\n def slice(self, offset: int, length: int | None) -> PandasLikeSeries:\n stop = offset + length if length else None\n return self.with_native(self.native.str.slice(start=offset, stop=stop))\n\n def split(self, by: str) -> PandasLikeSeries:\n implementation = self.implementation\n if not implementation.is_cudf() and not is_dtype_pyarrow(self.native.dtype):\n msg = (\n "This operation requires a pyarrow-backed series. "\n "Please refer to https://narwhals-dev.github.io/narwhals/api-reference/narwhals/#narwhals.maybe_convert_dtypes "\n "and ensure you are using dtype_backend='pyarrow'. "\n "Additionally, make sure you have pandas version 1.5+ and pyarrow installed. "\n )\n raise TypeError(msg)\n return self.with_native(self.native.str.split(pat=by))\n\n def to_datetime(self, format: str | None) -> PandasLikeSeries:\n # If we know inputs are timezone-aware, we can pass `utc=True` for better performance.\n if format and any(x in format for x in ("%z", "Z")):\n return self.with_native(self._to_datetime(format, utc=True))\n result = self.with_native(self._to_datetime(format, utc=False))\n if (tz := getattr(result.dtype, "time_zone", None)) and tz != "UTC":\n return result.dt.convert_time_zone("UTC")\n return result\n\n def _to_datetime(self, format: str | None, *, utc: bool) -> Any:\n result = self.implementation.to_native_namespace().to_datetime(\n self.native, format=format, utc=utc\n )\n return (\n result.convert_dtypes(dtype_backend="pyarrow")\n if is_dtype_pyarrow(self.native.dtype)\n else result\n )\n\n def to_date(self, format: str | None) -> PandasLikeSeries:\n return self.to_datetime(format=format).dt.date()\n\n def to_uppercase(self) -> PandasLikeSeries:\n return self.with_native(self.native.str.upper())\n\n def to_lowercase(self) -> PandasLikeSeries:\n return self.with_native(self.native.str.lower())\n\n def zfill(self, width: int) -> PandasLikeSeries:\n return self.with_native(self.native.str.zfill(width))\n
.venv\Lib\site-packages\narwhals\_pandas_like\series_str.py
series_str.py
Python
3,680
0.95
0.282353
0.015152
python-kit
61
2025-02-10T13:10:25.176973
GPL-3.0
false
72289ba142ef43d847aaf2ba6b675f14
from __future__ import annotations\n\nfrom typing import TYPE_CHECKING\n\nfrom narwhals._compliant.any_namespace import StructNamespace\nfrom narwhals._pandas_like.utils import PandasLikeSeriesNamespace\n\nif TYPE_CHECKING:\n from narwhals._pandas_like.series import PandasLikeSeries\n\n\nclass PandasLikeSeriesStructNamespace(\n PandasLikeSeriesNamespace, StructNamespace["PandasLikeSeries"]\n):\n def field(self, name: str) -> PandasLikeSeries:\n return self.with_native(self.native.struct.field(name)).alias(name)\n
.venv\Lib\site-packages\narwhals\_pandas_like\series_struct.py
series_struct.py
Python
518
0.85
0.1875
0
node-utils
908
2024-07-22T01:10:38.385347
MIT
false
7ef9d19ca531b26b656b98d6270e463a
from __future__ import annotations # pragma: no cover\n\nfrom typing import TYPE_CHECKING # pragma: no cover\n\nfrom narwhals._typing_compat import TypeVar\n\nif TYPE_CHECKING:\n from typing import Any\n\n import pandas as pd\n from typing_extensions import TypeAlias\n\n from narwhals._namespace import (\n _CuDFDataFrame,\n _CuDFSeries,\n _ModinDataFrame,\n _ModinSeries,\n _NativePandasLikeDataFrame,\n )\n from narwhals._pandas_like.expr import PandasLikeExpr\n from narwhals._pandas_like.series import PandasLikeSeries\n\n IntoPandasLikeExpr: TypeAlias = "PandasLikeExpr | PandasLikeSeries"\n\nNativeSeriesT = TypeVar(\n "NativeSeriesT",\n "pd.Series[Any]",\n "_CuDFSeries",\n "_ModinSeries",\n default="pd.Series[Any]",\n)\nNativeDataFrameT = TypeVar(\n "NativeDataFrameT", bound="_NativePandasLikeDataFrame", default="pd.DataFrame"\n)\nNativeNDFrameT = TypeVar(\n "NativeNDFrameT",\n "pd.DataFrame",\n "pd.Series[Any]",\n "_CuDFDataFrame",\n "_CuDFSeries",\n "_ModinDataFrame",\n "_ModinSeries",\n)\n
.venv\Lib\site-packages\narwhals\_pandas_like\typing.py
typing.py
Python
1,064
0.95
0.023256
0
node-utils
926
2024-12-16T02:57:29.214423
GPL-3.0
false
f6cf3622ff6a64dedda1be46195d489a