content
stringlengths
1
103k
path
stringlengths
8
216
filename
stringlengths
2
179
language
stringclasses
15 values
size_bytes
int64
2
189k
quality_score
float64
0.5
0.95
complexity
float64
0
1
documentation_ratio
float64
0
1
repository
stringclasses
5 values
stars
int64
0
1k
created_date
stringdate
2023-07-10 19:21:08
2025-07-09 19:11:45
license
stringclasses
4 values
is_test
bool
2 classes
file_hash
stringlengths
32
32
\n\n
.venv\Lib\site-packages\pandas\core\arrays\arrow\__pycache__\accessors.cpython-313.pyc
accessors.cpython-313.pyc
Other
14,849
0.95
0.01462
0.006494
react-lib
738
2024-10-15T04:51:09.618027
GPL-3.0
false
e449de9da8ffd78818968fad20d37289
\n\n
.venv\Lib\site-packages\pandas\core\arrays\arrow\__pycache__\extension_types.cpython-313.pyc
extension_types.cpython-313.pyc
Other
9,144
0.95
0
0
python-kit
380
2024-12-15T21:14:56.749169
MIT
false
a7aaef39e54ceb6e067e899d75e6996c
\n\n
.venv\Lib\site-packages\pandas\core\arrays\arrow\__pycache__\_arrow_utils.cpython-313.pyc
_arrow_utils.cpython-313.pyc
Other
2,170
0.8
0
0
python-kit
164
2025-05-29T21:47:19.088201
Apache-2.0
false
a1d54487a35c8e77d74a6a59648e889a
\n\n
.venv\Lib\site-packages\pandas\core\arrays\arrow\__pycache__\__init__.cpython-313.pyc
__init__.cpython-313.pyc
Other
416
0.7
0
0
awesome-app
510
2023-10-14T18:10:43.074079
GPL-3.0
false
7f83d1ee403cbb5a9eb7e5b97902327c
"""Sparse accessor"""\nfrom __future__ import annotations\n\nfrom typing import TYPE_CHECKING\n\nimport numpy as np\n\nfrom pandas.compat._optional import import_optional_dependency\n\nfrom pandas.core.dtypes.cast import find_common_type\nfrom pandas.core.dtypes.dtypes import SparseDtype\n\nfrom pandas.core.accessor import (\n PandasDelegate,\n delegate_names,\n)\nfrom pandas.core.arrays.sparse.array import SparseArray\n\nif TYPE_CHECKING:\n from pandas import (\n DataFrame,\n Series,\n )\n\n\nclass BaseAccessor:\n _validation_msg = "Can only use the '.sparse' accessor with Sparse data."\n\n def __init__(self, data=None) -> None:\n self._parent = data\n self._validate(data)\n\n def _validate(self, data):\n raise NotImplementedError\n\n\n@delegate_names(\n SparseArray, ["npoints", "density", "fill_value", "sp_values"], typ="property"\n)\nclass SparseAccessor(BaseAccessor, PandasDelegate):\n """\n Accessor for SparseSparse from other sparse matrix data types.\n\n Examples\n --------\n >>> ser = pd.Series([0, 0, 2, 2, 2], dtype="Sparse[int]")\n >>> ser.sparse.density\n 0.6\n >>> ser.sparse.sp_values\n array([2, 2, 2])\n """\n\n def _validate(self, data):\n if not isinstance(data.dtype, SparseDtype):\n raise AttributeError(self._validation_msg)\n\n def _delegate_property_get(self, name: str, *args, **kwargs):\n return getattr(self._parent.array, name)\n\n def _delegate_method(self, name: str, *args, **kwargs):\n if name == "from_coo":\n return self.from_coo(*args, **kwargs)\n elif name == "to_coo":\n return self.to_coo(*args, **kwargs)\n else:\n raise ValueError\n\n @classmethod\n def from_coo(cls, A, dense_index: bool = False) -> Series:\n """\n Create a Series with sparse values from a scipy.sparse.coo_matrix.\n\n Parameters\n ----------\n A : scipy.sparse.coo_matrix\n dense_index : bool, default False\n If False (default), the index consists of only the\n coords of the non-null entries of the original coo_matrix.\n If True, the index consists of the full sorted\n (row, col) coordinates of the coo_matrix.\n\n Returns\n -------\n s : Series\n A Series with sparse values.\n\n Examples\n --------\n >>> from scipy import sparse\n\n >>> A = sparse.coo_matrix(\n ... ([3.0, 1.0, 2.0], ([1, 0, 0], [0, 2, 3])), shape=(3, 4)\n ... )\n >>> A\n <COOrdinate sparse matrix of dtype 'float64'\n with 3 stored elements and shape (3, 4)>\n\n >>> A.todense()\n matrix([[0., 0., 1., 2.],\n [3., 0., 0., 0.],\n [0., 0., 0., 0.]])\n\n >>> ss = pd.Series.sparse.from_coo(A)\n >>> ss\n 0 2 1.0\n 3 2.0\n 1 0 3.0\n dtype: Sparse[float64, nan]\n """\n from pandas import Series\n from pandas.core.arrays.sparse.scipy_sparse import coo_to_sparse_series\n\n result = coo_to_sparse_series(A, dense_index=dense_index)\n result = Series(result.array, index=result.index, copy=False)\n\n return result\n\n def to_coo(self, row_levels=(0,), column_levels=(1,), sort_labels: bool = False):\n """\n Create a scipy.sparse.coo_matrix from a Series with MultiIndex.\n\n Use row_levels and column_levels to determine the row and column\n coordinates respectively. row_levels and column_levels are the names\n (labels) or numbers of the levels. {row_levels, column_levels} must be\n a partition of the MultiIndex level names (or numbers).\n\n Parameters\n ----------\n row_levels : tuple/list\n column_levels : tuple/list\n sort_labels : bool, default False\n Sort the row and column labels before forming the sparse matrix.\n When `row_levels` and/or `column_levels` refer to a single level,\n set to `True` for a faster execution.\n\n Returns\n -------\n y : scipy.sparse.coo_matrix\n rows : list (row labels)\n columns : list (column labels)\n\n Examples\n --------\n >>> s = pd.Series([3.0, np.nan, 1.0, 3.0, np.nan, np.nan])\n >>> s.index = pd.MultiIndex.from_tuples(\n ... [\n ... (1, 2, "a", 0),\n ... (1, 2, "a", 1),\n ... (1, 1, "b", 0),\n ... (1, 1, "b", 1),\n ... (2, 1, "b", 0),\n ... (2, 1, "b", 1)\n ... ],\n ... names=["A", "B", "C", "D"],\n ... )\n >>> s\n A B C D\n 1 2 a 0 3.0\n 1 NaN\n 1 b 0 1.0\n 1 3.0\n 2 1 b 0 NaN\n 1 NaN\n dtype: float64\n\n >>> ss = s.astype("Sparse")\n >>> ss\n A B C D\n 1 2 a 0 3.0\n 1 NaN\n 1 b 0 1.0\n 1 3.0\n 2 1 b 0 NaN\n 1 NaN\n dtype: Sparse[float64, nan]\n\n >>> A, rows, columns = ss.sparse.to_coo(\n ... row_levels=["A", "B"], column_levels=["C", "D"], sort_labels=True\n ... )\n >>> A\n <COOrdinate sparse matrix of dtype 'float64'\n with 3 stored elements and shape (3, 4)>\n >>> A.todense()\n matrix([[0., 0., 1., 3.],\n [3., 0., 0., 0.],\n [0., 0., 0., 0.]])\n\n >>> rows\n [(1, 1), (1, 2), (2, 1)]\n >>> columns\n [('a', 0), ('a', 1), ('b', 0), ('b', 1)]\n """\n from pandas.core.arrays.sparse.scipy_sparse import sparse_series_to_coo\n\n A, rows, columns = sparse_series_to_coo(\n self._parent, row_levels, column_levels, sort_labels=sort_labels\n )\n return A, rows, columns\n\n def to_dense(self) -> Series:\n """\n Convert a Series from sparse values to dense.\n\n Returns\n -------\n Series:\n A Series with the same values, stored as a dense array.\n\n Examples\n --------\n >>> series = pd.Series(pd.arrays.SparseArray([0, 1, 0]))\n >>> series\n 0 0\n 1 1\n 2 0\n dtype: Sparse[int64, 0]\n\n >>> series.sparse.to_dense()\n 0 0\n 1 1\n 2 0\n dtype: int64\n """\n from pandas import Series\n\n return Series(\n self._parent.array.to_dense(),\n index=self._parent.index,\n name=self._parent.name,\n copy=False,\n )\n\n\nclass SparseFrameAccessor(BaseAccessor, PandasDelegate):\n """\n DataFrame accessor for sparse data.\n\n Examples\n --------\n >>> df = pd.DataFrame({"a": [1, 2, 0, 0],\n ... "b": [3, 0, 0, 4]}, dtype="Sparse[int]")\n >>> df.sparse.density\n 0.5\n """\n\n def _validate(self, data):\n dtypes = data.dtypes\n if not all(isinstance(t, SparseDtype) for t in dtypes):\n raise AttributeError(self._validation_msg)\n\n @classmethod\n def from_spmatrix(cls, data, index=None, columns=None) -> DataFrame:\n """\n Create a new DataFrame from a scipy sparse matrix.\n\n Parameters\n ----------\n data : scipy.sparse.spmatrix\n Must be convertible to csc format.\n index, columns : Index, optional\n Row and column labels to use for the resulting DataFrame.\n Defaults to a RangeIndex.\n\n Returns\n -------\n DataFrame\n Each column of the DataFrame is stored as a\n :class:`arrays.SparseArray`.\n\n Examples\n --------\n >>> import scipy.sparse\n >>> mat = scipy.sparse.eye(3, dtype=float)\n >>> pd.DataFrame.sparse.from_spmatrix(mat)\n 0 1 2\n 0 1.0 0 0\n 1 0 1.0 0\n 2 0 0 1.0\n """\n from pandas._libs.sparse import IntIndex\n\n from pandas import DataFrame\n\n data = data.tocsc()\n index, columns = cls._prep_index(data, index, columns)\n n_rows, n_columns = data.shape\n # We need to make sure indices are sorted, as we create\n # IntIndex with no input validation (i.e. check_integrity=False ).\n # Indices may already be sorted in scipy in which case this adds\n # a small overhead.\n data.sort_indices()\n indices = data.indices\n indptr = data.indptr\n array_data = data.data\n dtype = SparseDtype(array_data.dtype, 0)\n arrays = []\n for i in range(n_columns):\n sl = slice(indptr[i], indptr[i + 1])\n idx = IntIndex(n_rows, indices[sl], check_integrity=False)\n arr = SparseArray._simple_new(array_data[sl], idx, dtype)\n arrays.append(arr)\n return DataFrame._from_arrays(\n arrays, columns=columns, index=index, verify_integrity=False\n )\n\n def to_dense(self) -> DataFrame:\n """\n Convert a DataFrame with sparse values to dense.\n\n Returns\n -------\n DataFrame\n A DataFrame with the same values stored as dense arrays.\n\n Examples\n --------\n >>> df = pd.DataFrame({"A": pd.arrays.SparseArray([0, 1, 0])})\n >>> df.sparse.to_dense()\n A\n 0 0\n 1 1\n 2 0\n """\n from pandas import DataFrame\n\n data = {k: v.array.to_dense() for k, v in self._parent.items()}\n return DataFrame(data, index=self._parent.index, columns=self._parent.columns)\n\n def to_coo(self):\n """\n Return the contents of the frame as a sparse SciPy COO matrix.\n\n Returns\n -------\n scipy.sparse.spmatrix\n If the caller is heterogeneous and contains booleans or objects,\n the result will be of dtype=object. See Notes.\n\n Notes\n -----\n The dtype will be the lowest-common-denominator type (implicit\n upcasting); that is to say if the dtypes (even of numeric types)\n are mixed, the one that accommodates all will be chosen.\n\n e.g. If the dtypes are float16 and float32, dtype will be upcast to\n float32. By numpy.find_common_type convention, mixing int64 and\n and uint64 will result in a float64 dtype.\n\n Examples\n --------\n >>> df = pd.DataFrame({"A": pd.arrays.SparseArray([0, 1, 0, 1])})\n >>> df.sparse.to_coo()\n <COOrdinate sparse matrix of dtype 'int64'\n with 2 stored elements and shape (4, 1)>\n """\n import_optional_dependency("scipy")\n from scipy.sparse import coo_matrix\n\n dtype = find_common_type(self._parent.dtypes.to_list())\n if isinstance(dtype, SparseDtype):\n dtype = dtype.subtype\n\n cols, rows, data = [], [], []\n for col, (_, ser) in enumerate(self._parent.items()):\n sp_arr = ser.array\n if sp_arr.fill_value != 0:\n raise ValueError("fill value must be 0 when converting to COO matrix")\n\n row = sp_arr.sp_index.indices\n cols.append(np.repeat(col, len(row)))\n rows.append(row)\n data.append(sp_arr.sp_values.astype(dtype, copy=False))\n\n cols = np.concatenate(cols)\n rows = np.concatenate(rows)\n data = np.concatenate(data)\n return coo_matrix((data, (rows, cols)), shape=self._parent.shape)\n\n @property\n def density(self) -> float:\n """\n Ratio of non-sparse points to total (dense) data points.\n\n Examples\n --------\n >>> df = pd.DataFrame({"A": pd.arrays.SparseArray([0, 1, 0, 1])})\n >>> df.sparse.density\n 0.5\n """\n tmp = np.mean([column.array.density for _, column in self._parent.items()])\n return tmp\n\n @staticmethod\n def _prep_index(data, index, columns):\n from pandas.core.indexes.api import (\n default_index,\n ensure_index,\n )\n\n N, K = data.shape\n if index is None:\n index = default_index(N)\n else:\n index = ensure_index(index)\n if columns is None:\n columns = default_index(K)\n else:\n columns = ensure_index(columns)\n\n if len(columns) != K:\n raise ValueError(f"Column length mismatch: {len(columns)} vs. {K}")\n if len(index) != N:\n raise ValueError(f"Index length mismatch: {len(index)} vs. {N}")\n return index, columns\n
.venv\Lib\site-packages\pandas\core\arrays\sparse\accessor.py
accessor.py
Python
12,503
0.95
0.091787
0.011527
node-utils
421
2024-10-05T18:31:22.917219
MIT
false
1636e45b3bf721789b52de5f02792d96
"""\nSparseArray data structure\n"""\nfrom __future__ import annotations\n\nfrom collections import abc\nimport numbers\nimport operator\nfrom typing import (\n TYPE_CHECKING,\n Any,\n Callable,\n Literal,\n cast,\n overload,\n)\nimport warnings\n\nimport numpy as np\n\nfrom pandas._libs import lib\nimport pandas._libs.sparse as splib\nfrom pandas._libs.sparse import (\n BlockIndex,\n IntIndex,\n SparseIndex,\n)\nfrom pandas._libs.tslibs import NaT\nfrom pandas.compat.numpy import function as nv\nfrom pandas.errors import PerformanceWarning\nfrom pandas.util._decorators import doc\nfrom pandas.util._exceptions import find_stack_level\nfrom pandas.util._validators import (\n validate_bool_kwarg,\n validate_insert_loc,\n)\n\nfrom pandas.core.dtypes.astype import astype_array\nfrom pandas.core.dtypes.cast import (\n construct_1d_arraylike_from_scalar,\n find_common_type,\n maybe_box_datetimelike,\n)\nfrom pandas.core.dtypes.common import (\n is_bool_dtype,\n is_integer,\n is_list_like,\n is_object_dtype,\n is_scalar,\n is_string_dtype,\n pandas_dtype,\n)\nfrom pandas.core.dtypes.dtypes import (\n DatetimeTZDtype,\n SparseDtype,\n)\nfrom pandas.core.dtypes.generic import (\n ABCIndex,\n ABCSeries,\n)\nfrom pandas.core.dtypes.missing import (\n isna,\n na_value_for_dtype,\n notna,\n)\n\nfrom pandas.core import arraylike\nimport pandas.core.algorithms as algos\nfrom pandas.core.arraylike import OpsMixin\nfrom pandas.core.arrays import ExtensionArray\nfrom pandas.core.base import PandasObject\nimport pandas.core.common as com\nfrom pandas.core.construction import (\n ensure_wrapped_if_datetimelike,\n extract_array,\n sanitize_array,\n)\nfrom pandas.core.indexers import (\n check_array_indexer,\n unpack_tuple_and_ellipses,\n)\nfrom pandas.core.nanops import check_below_min_count\n\nfrom pandas.io.formats import printing\n\n# See https://github.com/python/typing/issues/684\nif TYPE_CHECKING:\n from collections.abc import Sequence\n from enum import Enum\n\n class ellipsis(Enum):\n Ellipsis = "..."\n\n Ellipsis = ellipsis.Ellipsis\n\n from scipy.sparse import spmatrix\n\n from pandas._typing import (\n FillnaOptions,\n NumpySorter,\n )\n\n SparseIndexKind = Literal["integer", "block"]\n\n from pandas._typing import (\n ArrayLike,\n AstypeArg,\n Axis,\n AxisInt,\n Dtype,\n NpDtype,\n PositionalIndexer,\n Scalar,\n ScalarIndexer,\n Self,\n SequenceIndexer,\n npt,\n )\n\n from pandas import Series\n\nelse:\n ellipsis = type(Ellipsis)\n\n\n# ----------------------------------------------------------------------------\n# Array\n\n_sparray_doc_kwargs = {"klass": "SparseArray"}\n\n\ndef _get_fill(arr: SparseArray) -> np.ndarray:\n """\n Create a 0-dim ndarray containing the fill value\n\n Parameters\n ----------\n arr : SparseArray\n\n Returns\n -------\n fill_value : ndarray\n 0-dim ndarray with just the fill value.\n\n Notes\n -----\n coerce fill_value to arr dtype if possible\n int64 SparseArray can have NaN as fill_value if there is no missing\n """\n try:\n return np.asarray(arr.fill_value, dtype=arr.dtype.subtype)\n except ValueError:\n return np.asarray(arr.fill_value)\n\n\ndef _sparse_array_op(\n left: SparseArray, right: SparseArray, op: Callable, name: str\n) -> SparseArray:\n """\n Perform a binary operation between two arrays.\n\n Parameters\n ----------\n left : Union[SparseArray, ndarray]\n right : Union[SparseArray, ndarray]\n op : Callable\n The binary operation to perform\n name str\n Name of the callable.\n\n Returns\n -------\n SparseArray\n """\n if name.startswith("__"):\n # For lookups in _libs.sparse we need non-dunder op name\n name = name[2:-2]\n\n # dtype used to find corresponding sparse method\n ltype = left.dtype.subtype\n rtype = right.dtype.subtype\n\n if ltype != rtype:\n subtype = find_common_type([ltype, rtype])\n ltype = SparseDtype(subtype, left.fill_value)\n rtype = SparseDtype(subtype, right.fill_value)\n\n left = left.astype(ltype, copy=False)\n right = right.astype(rtype, copy=False)\n dtype = ltype.subtype\n else:\n dtype = ltype\n\n # dtype the result must have\n result_dtype = None\n\n if left.sp_index.ngaps == 0 or right.sp_index.ngaps == 0:\n with np.errstate(all="ignore"):\n result = op(left.to_dense(), right.to_dense())\n fill = op(_get_fill(left), _get_fill(right))\n\n if left.sp_index.ngaps == 0:\n index = left.sp_index\n else:\n index = right.sp_index\n elif left.sp_index.equals(right.sp_index):\n with np.errstate(all="ignore"):\n result = op(left.sp_values, right.sp_values)\n fill = op(_get_fill(left), _get_fill(right))\n index = left.sp_index\n else:\n if name[0] == "r":\n left, right = right, left\n name = name[1:]\n\n if name in ("and", "or", "xor") and dtype == "bool":\n opname = f"sparse_{name}_uint8"\n # to make template simple, cast here\n left_sp_values = left.sp_values.view(np.uint8)\n right_sp_values = right.sp_values.view(np.uint8)\n result_dtype = bool\n else:\n opname = f"sparse_{name}_{dtype}"\n left_sp_values = left.sp_values\n right_sp_values = right.sp_values\n\n if (\n name in ["floordiv", "mod"]\n and (right == 0).any()\n and left.dtype.kind in "iu"\n ):\n # Match the non-Sparse Series behavior\n opname = f"sparse_{name}_float64"\n left_sp_values = left_sp_values.astype("float64")\n right_sp_values = right_sp_values.astype("float64")\n\n sparse_op = getattr(splib, opname)\n\n with np.errstate(all="ignore"):\n result, index, fill = sparse_op(\n left_sp_values,\n left.sp_index,\n left.fill_value,\n right_sp_values,\n right.sp_index,\n right.fill_value,\n )\n\n if name == "divmod":\n # result is a 2-tuple\n # error: Incompatible return value type (got "Tuple[SparseArray,\n # SparseArray]", expected "SparseArray")\n return ( # type: ignore[return-value]\n _wrap_result(name, result[0], index, fill[0], dtype=result_dtype),\n _wrap_result(name, result[1], index, fill[1], dtype=result_dtype),\n )\n\n if result_dtype is None:\n result_dtype = result.dtype\n\n return _wrap_result(name, result, index, fill, dtype=result_dtype)\n\n\ndef _wrap_result(\n name: str, data, sparse_index, fill_value, dtype: Dtype | None = None\n) -> SparseArray:\n """\n wrap op result to have correct dtype\n """\n if name.startswith("__"):\n # e.g. __eq__ --> eq\n name = name[2:-2]\n\n if name in ("eq", "ne", "lt", "gt", "le", "ge"):\n dtype = bool\n\n fill_value = lib.item_from_zerodim(fill_value)\n\n if is_bool_dtype(dtype):\n # fill_value may be np.bool_\n fill_value = bool(fill_value)\n return SparseArray(\n data, sparse_index=sparse_index, fill_value=fill_value, dtype=dtype\n )\n\n\nclass SparseArray(OpsMixin, PandasObject, ExtensionArray):\n """\n An ExtensionArray for storing sparse data.\n\n Parameters\n ----------\n data : array-like or scalar\n A dense array of values to store in the SparseArray. This may contain\n `fill_value`.\n sparse_index : SparseIndex, optional\n fill_value : scalar, optional\n Elements in data that are ``fill_value`` are not stored in the\n SparseArray. For memory savings, this should be the most common value\n in `data`. By default, `fill_value` depends on the dtype of `data`:\n\n =========== ==========\n data.dtype na_value\n =========== ==========\n float ``np.nan``\n int ``0``\n bool False\n datetime64 ``pd.NaT``\n timedelta64 ``pd.NaT``\n =========== ==========\n\n The fill value is potentially specified in three ways. In order of\n precedence, these are\n\n 1. The `fill_value` argument\n 2. ``dtype.fill_value`` if `fill_value` is None and `dtype` is\n a ``SparseDtype``\n 3. ``data.dtype.fill_value`` if `fill_value` is None and `dtype`\n is not a ``SparseDtype`` and `data` is a ``SparseArray``.\n\n kind : str\n Can be 'integer' or 'block', default is 'integer'.\n The type of storage for sparse locations.\n\n * 'block': Stores a `block` and `block_length` for each\n contiguous *span* of sparse values. This is best when\n sparse data tends to be clumped together, with large\n regions of ``fill-value`` values between sparse values.\n * 'integer': uses an integer to store the location of\n each sparse value.\n\n dtype : np.dtype or SparseDtype, optional\n The dtype to use for the SparseArray. For numpy dtypes, this\n determines the dtype of ``self.sp_values``. For SparseDtype,\n this determines ``self.sp_values`` and ``self.fill_value``.\n copy : bool, default False\n Whether to explicitly copy the incoming `data` array.\n\n Attributes\n ----------\n None\n\n Methods\n -------\n None\n\n Examples\n --------\n >>> from pandas.arrays import SparseArray\n >>> arr = SparseArray([0, 0, 1, 2])\n >>> arr\n [0, 0, 1, 2]\n Fill: 0\n IntIndex\n Indices: array([2, 3], dtype=int32)\n """\n\n _subtyp = "sparse_array" # register ABCSparseArray\n _hidden_attrs = PandasObject._hidden_attrs | frozenset([])\n _sparse_index: SparseIndex\n _sparse_values: np.ndarray\n _dtype: SparseDtype\n\n def __init__(\n self,\n data,\n sparse_index=None,\n fill_value=None,\n kind: SparseIndexKind = "integer",\n dtype: Dtype | None = None,\n copy: bool = False,\n ) -> None:\n if fill_value is None and isinstance(dtype, SparseDtype):\n fill_value = dtype.fill_value\n\n if isinstance(data, type(self)):\n # disable normal inference on dtype, sparse_index, & fill_value\n if sparse_index is None:\n sparse_index = data.sp_index\n if fill_value is None:\n fill_value = data.fill_value\n if dtype is None:\n dtype = data.dtype\n # TODO: make kind=None, and use data.kind?\n data = data.sp_values\n\n # Handle use-provided dtype\n if isinstance(dtype, str):\n # Two options: dtype='int', regular numpy dtype\n # or dtype='Sparse[int]', a sparse dtype\n try:\n dtype = SparseDtype.construct_from_string(dtype)\n except TypeError:\n dtype = pandas_dtype(dtype)\n\n if isinstance(dtype, SparseDtype):\n if fill_value is None:\n fill_value = dtype.fill_value\n dtype = dtype.subtype\n\n if is_scalar(data):\n warnings.warn(\n f"Constructing {type(self).__name__} with scalar data is deprecated "\n "and will raise in a future version. Pass a sequence instead.",\n FutureWarning,\n stacklevel=find_stack_level(),\n )\n if sparse_index is None:\n npoints = 1\n else:\n npoints = sparse_index.length\n\n data = construct_1d_arraylike_from_scalar(data, npoints, dtype=None)\n dtype = data.dtype\n\n if dtype is not None:\n dtype = pandas_dtype(dtype)\n\n # TODO: disentangle the fill_value dtype inference from\n # dtype inference\n if data is None:\n # TODO: What should the empty dtype be? Object or float?\n\n # error: Argument "dtype" to "array" has incompatible type\n # "Union[ExtensionDtype, dtype[Any], None]"; expected "Union[dtype[Any],\n # None, type, _SupportsDType, str, Union[Tuple[Any, int], Tuple[Any,\n # Union[int, Sequence[int]]], List[Any], _DTypeDict, Tuple[Any, Any]]]"\n data = np.array([], dtype=dtype) # type: ignore[arg-type]\n\n try:\n data = sanitize_array(data, index=None)\n except ValueError:\n # NumPy may raise a ValueError on data like [1, []]\n # we retry with object dtype here.\n if dtype is None:\n dtype = np.dtype(object)\n data = np.atleast_1d(np.asarray(data, dtype=dtype))\n else:\n raise\n\n if copy:\n # TODO: avoid double copy when dtype forces cast.\n data = data.copy()\n\n if fill_value is None:\n fill_value_dtype = data.dtype if dtype is None else dtype\n if fill_value_dtype is None:\n fill_value = np.nan\n else:\n fill_value = na_value_for_dtype(fill_value_dtype)\n\n if isinstance(data, type(self)) and sparse_index is None:\n sparse_index = data._sparse_index\n # error: Argument "dtype" to "asarray" has incompatible type\n # "Union[ExtensionDtype, dtype[Any], None]"; expected "None"\n sparse_values = np.asarray(\n data.sp_values, dtype=dtype # type: ignore[arg-type]\n )\n elif sparse_index is None:\n data = extract_array(data, extract_numpy=True)\n if not isinstance(data, np.ndarray):\n # EA\n if isinstance(data.dtype, DatetimeTZDtype):\n warnings.warn(\n f"Creating SparseArray from {data.dtype} data "\n "loses timezone information. Cast to object before "\n "sparse to retain timezone information.",\n UserWarning,\n stacklevel=find_stack_level(),\n )\n data = np.asarray(data, dtype="datetime64[ns]")\n if fill_value is NaT:\n fill_value = np.datetime64("NaT", "ns")\n data = np.asarray(data)\n sparse_values, sparse_index, fill_value = _make_sparse(\n # error: Argument "dtype" to "_make_sparse" has incompatible type\n # "Union[ExtensionDtype, dtype[Any], None]"; expected\n # "Optional[dtype[Any]]"\n data,\n kind=kind,\n fill_value=fill_value,\n dtype=dtype, # type: ignore[arg-type]\n )\n else:\n # error: Argument "dtype" to "asarray" has incompatible type\n # "Union[ExtensionDtype, dtype[Any], None]"; expected "None"\n sparse_values = np.asarray(data, dtype=dtype) # type: ignore[arg-type]\n if len(sparse_values) != sparse_index.npoints:\n raise AssertionError(\n f"Non array-like type {type(sparse_values)} must "\n "have the same length as the index"\n )\n self._sparse_index = sparse_index\n self._sparse_values = sparse_values\n self._dtype = SparseDtype(sparse_values.dtype, fill_value)\n\n @classmethod\n def _simple_new(\n cls,\n sparse_array: np.ndarray,\n sparse_index: SparseIndex,\n dtype: SparseDtype,\n ) -> Self:\n new = object.__new__(cls)\n new._sparse_index = sparse_index\n new._sparse_values = sparse_array\n new._dtype = dtype\n return new\n\n @classmethod\n def from_spmatrix(cls, data: spmatrix) -> Self:\n """\n Create a SparseArray from a scipy.sparse matrix.\n\n Parameters\n ----------\n data : scipy.sparse.sp_matrix\n This should be a SciPy sparse matrix where the size\n of the second dimension is 1. In other words, a\n sparse matrix with a single column.\n\n Returns\n -------\n SparseArray\n\n Examples\n --------\n >>> import scipy.sparse\n >>> mat = scipy.sparse.coo_matrix((4, 1))\n >>> pd.arrays.SparseArray.from_spmatrix(mat)\n [0.0, 0.0, 0.0, 0.0]\n Fill: 0.0\n IntIndex\n Indices: array([], dtype=int32)\n """\n length, ncol = data.shape\n\n if ncol != 1:\n raise ValueError(f"'data' must have a single column, not '{ncol}'")\n\n # our sparse index classes require that the positions be strictly\n # increasing. So we need to sort loc, and arr accordingly.\n data = data.tocsc()\n data.sort_indices()\n arr = data.data\n idx = data.indices\n\n zero = np.array(0, dtype=arr.dtype).item()\n dtype = SparseDtype(arr.dtype, zero)\n index = IntIndex(length, idx)\n\n return cls._simple_new(arr, index, dtype)\n\n def __array__(\n self, dtype: NpDtype | None = None, copy: bool | None = None\n ) -> np.ndarray:\n if self.sp_index.ngaps == 0:\n # Compat for na dtype and int values.\n if copy is True:\n return np.array(self.sp_values)\n else:\n return self.sp_values\n\n if copy is False:\n warnings.warn(\n "Starting with NumPy 2.0, the behavior of the 'copy' keyword has "\n "changed and passing 'copy=False' raises an error when returning "\n "a zero-copy NumPy array is not possible. pandas will follow "\n "this behavior starting with pandas 3.0.\nThis conversion to "\n "NumPy requires a copy, but 'copy=False' was passed. Consider "\n "using 'np.asarray(..)' instead.",\n FutureWarning,\n stacklevel=find_stack_level(),\n )\n\n fill_value = self.fill_value\n\n if dtype is None:\n # Can NumPy represent this type?\n # If not, `np.result_type` will raise. We catch that\n # and return object.\n if self.sp_values.dtype.kind == "M":\n # However, we *do* special-case the common case of\n # a datetime64 with pandas NaT.\n if fill_value is NaT:\n # Can't put pd.NaT in a datetime64[ns]\n fill_value = np.datetime64("NaT")\n try:\n dtype = np.result_type(self.sp_values.dtype, type(fill_value))\n except TypeError:\n dtype = object\n\n out = np.full(self.shape, fill_value, dtype=dtype)\n out[self.sp_index.indices] = self.sp_values\n return out\n\n def __setitem__(self, key, value) -> None:\n # I suppose we could allow setting of non-fill_value elements.\n # TODO(SparseArray.__setitem__): remove special cases in\n # ExtensionBlock.where\n msg = "SparseArray does not support item assignment via setitem"\n raise TypeError(msg)\n\n @classmethod\n def _from_sequence(cls, scalars, *, dtype: Dtype | None = None, copy: bool = False):\n return cls(scalars, dtype=dtype)\n\n @classmethod\n def _from_factorized(cls, values, original):\n return cls(values, dtype=original.dtype)\n\n # ------------------------------------------------------------------------\n # Data\n # ------------------------------------------------------------------------\n @property\n def sp_index(self) -> SparseIndex:\n """\n The SparseIndex containing the location of non- ``fill_value`` points.\n """\n return self._sparse_index\n\n @property\n def sp_values(self) -> np.ndarray:\n """\n An ndarray containing the non- ``fill_value`` values.\n\n Examples\n --------\n >>> from pandas.arrays import SparseArray\n >>> s = SparseArray([0, 0, 1, 0, 2], fill_value=0)\n >>> s.sp_values\n array([1, 2])\n """\n return self._sparse_values\n\n @property\n def dtype(self) -> SparseDtype:\n return self._dtype\n\n @property\n def fill_value(self):\n """\n Elements in `data` that are `fill_value` are not stored.\n\n For memory savings, this should be the most common value in the array.\n\n Examples\n --------\n >>> ser = pd.Series([0, 0, 2, 2, 2], dtype="Sparse[int]")\n >>> ser.sparse.fill_value\n 0\n >>> spa_dtype = pd.SparseDtype(dtype=np.int32, fill_value=2)\n >>> ser = pd.Series([0, 0, 2, 2, 2], dtype=spa_dtype)\n >>> ser.sparse.fill_value\n 2\n """\n return self.dtype.fill_value\n\n @fill_value.setter\n def fill_value(self, value) -> None:\n self._dtype = SparseDtype(self.dtype.subtype, value)\n\n @property\n def kind(self) -> SparseIndexKind:\n """\n The kind of sparse index for this array. One of {'integer', 'block'}.\n """\n if isinstance(self.sp_index, IntIndex):\n return "integer"\n else:\n return "block"\n\n @property\n def _valid_sp_values(self) -> np.ndarray:\n sp_vals = self.sp_values\n mask = notna(sp_vals)\n return sp_vals[mask]\n\n def __len__(self) -> int:\n return self.sp_index.length\n\n @property\n def _null_fill_value(self) -> bool:\n return self._dtype._is_na_fill_value\n\n def _fill_value_matches(self, fill_value) -> bool:\n if self._null_fill_value:\n return isna(fill_value)\n else:\n return self.fill_value == fill_value\n\n @property\n def nbytes(self) -> int:\n return self.sp_values.nbytes + self.sp_index.nbytes\n\n @property\n def density(self) -> float:\n """\n The percent of non- ``fill_value`` points, as decimal.\n\n Examples\n --------\n >>> from pandas.arrays import SparseArray\n >>> s = SparseArray([0, 0, 1, 1, 1], fill_value=0)\n >>> s.density\n 0.6\n """\n return self.sp_index.npoints / self.sp_index.length\n\n @property\n def npoints(self) -> int:\n """\n The number of non- ``fill_value`` points.\n\n Examples\n --------\n >>> from pandas.arrays import SparseArray\n >>> s = SparseArray([0, 0, 1, 1, 1], fill_value=0)\n >>> s.npoints\n 3\n """\n return self.sp_index.npoints\n\n # error: Return type "SparseArray" of "isna" incompatible with return type\n # "ndarray[Any, Any] | ExtensionArraySupportsAnyAll" in supertype "ExtensionArray"\n def isna(self) -> Self: # type: ignore[override]\n # If null fill value, we want SparseDtype[bool, true]\n # to preserve the same memory usage.\n dtype = SparseDtype(bool, self._null_fill_value)\n if self._null_fill_value:\n return type(self)._simple_new(isna(self.sp_values), self.sp_index, dtype)\n mask = np.full(len(self), False, dtype=np.bool_)\n mask[self.sp_index.indices] = isna(self.sp_values)\n return type(self)(mask, fill_value=False, dtype=dtype)\n\n def _pad_or_backfill( # pylint: disable=useless-parent-delegation\n self,\n *,\n method: FillnaOptions,\n limit: int | None = None,\n limit_area: Literal["inside", "outside"] | None = None,\n copy: bool = True,\n ) -> Self:\n # TODO(3.0): We can remove this method once deprecation for fillna method\n # keyword is enforced.\n return super()._pad_or_backfill(\n method=method, limit=limit, limit_area=limit_area, copy=copy\n )\n\n def fillna(\n self,\n value=None,\n method: FillnaOptions | None = None,\n limit: int | None = None,\n copy: bool = True,\n ) -> Self:\n """\n Fill missing values with `value`.\n\n Parameters\n ----------\n value : scalar, optional\n method : str, optional\n\n .. warning::\n\n Using 'method' will result in high memory use,\n as all `fill_value` methods will be converted to\n an in-memory ndarray\n\n limit : int, optional\n\n copy: bool, default True\n Ignored for SparseArray.\n\n Returns\n -------\n SparseArray\n\n Notes\n -----\n When `value` is specified, the result's ``fill_value`` depends on\n ``self.fill_value``. The goal is to maintain low-memory use.\n\n If ``self.fill_value`` is NA, the result dtype will be\n ``SparseDtype(self.dtype, fill_value=value)``. This will preserve\n amount of memory used before and after filling.\n\n When ``self.fill_value`` is not NA, the result dtype will be\n ``self.dtype``. Again, this preserves the amount of memory used.\n """\n if (method is None and value is None) or (\n method is not None and value is not None\n ):\n raise ValueError("Must specify one of 'method' or 'value'.")\n\n if method is not None:\n return super().fillna(method=method, limit=limit)\n\n else:\n new_values = np.where(isna(self.sp_values), value, self.sp_values)\n\n if self._null_fill_value:\n # This is essentially just updating the dtype.\n new_dtype = SparseDtype(self.dtype.subtype, fill_value=value)\n else:\n new_dtype = self.dtype\n\n return self._simple_new(new_values, self._sparse_index, new_dtype)\n\n def shift(self, periods: int = 1, fill_value=None) -> Self:\n if not len(self) or periods == 0:\n return self.copy()\n\n if isna(fill_value):\n fill_value = self.dtype.na_value\n\n subtype = np.result_type(fill_value, self.dtype.subtype)\n\n if subtype != self.dtype.subtype:\n # just coerce up front\n arr = self.astype(SparseDtype(subtype, self.fill_value))\n else:\n arr = self\n\n empty = self._from_sequence(\n [fill_value] * min(abs(periods), len(self)), dtype=arr.dtype\n )\n\n if periods > 0:\n a = empty\n b = arr[:-periods]\n else:\n a = arr[abs(periods) :]\n b = empty\n return arr._concat_same_type([a, b])\n\n def _first_fill_value_loc(self):\n """\n Get the location of the first fill value.\n\n Returns\n -------\n int\n """\n if len(self) == 0 or self.sp_index.npoints == len(self):\n return -1\n\n indices = self.sp_index.indices\n if not len(indices) or indices[0] > 0:\n return 0\n\n # a number larger than 1 should be appended to\n # the last in case of fill value only appears\n # in the tail of array\n diff = np.r_[np.diff(indices), 2]\n return indices[(diff > 1).argmax()] + 1\n\n @doc(ExtensionArray.duplicated)\n def duplicated(\n self, keep: Literal["first", "last", False] = "first"\n ) -> npt.NDArray[np.bool_]:\n values = np.asarray(self)\n mask = np.asarray(self.isna())\n return algos.duplicated(values, keep=keep, mask=mask)\n\n def unique(self) -> Self:\n uniques = algos.unique(self.sp_values)\n if len(self.sp_values) != len(self):\n fill_loc = self._first_fill_value_loc()\n # Inorder to align the behavior of pd.unique or\n # pd.Series.unique, we should keep the original\n # order, here we use unique again to find the\n # insertion place. Since the length of sp_values\n # is not large, maybe minor performance hurt\n # is worthwhile to the correctness.\n insert_loc = len(algos.unique(self.sp_values[:fill_loc]))\n uniques = np.insert(uniques, insert_loc, self.fill_value)\n return type(self)._from_sequence(uniques, dtype=self.dtype)\n\n def _values_for_factorize(self):\n # Still override this for hash_pandas_object\n return np.asarray(self), self.fill_value\n\n def factorize(\n self,\n use_na_sentinel: bool = True,\n ) -> tuple[np.ndarray, SparseArray]:\n # Currently, ExtensionArray.factorize -> Tuple[ndarray, EA]\n # The sparsity on this is backwards from what Sparse would want. Want\n # ExtensionArray.factorize -> Tuple[EA, EA]\n # Given that we have to return a dense array of codes, why bother\n # implementing an efficient factorize?\n codes, uniques = algos.factorize(\n np.asarray(self), use_na_sentinel=use_na_sentinel\n )\n uniques_sp = SparseArray(uniques, dtype=self.dtype)\n return codes, uniques_sp\n\n def value_counts(self, dropna: bool = True) -> Series:\n """\n Returns a Series containing counts of unique values.\n\n Parameters\n ----------\n dropna : bool, default True\n Don't include counts of NaN, even if NaN is in sp_values.\n\n Returns\n -------\n counts : Series\n """\n from pandas import (\n Index,\n Series,\n )\n\n keys, counts, _ = algos.value_counts_arraylike(self.sp_values, dropna=dropna)\n fcounts = self.sp_index.ngaps\n if fcounts > 0 and (not self._null_fill_value or not dropna):\n mask = isna(keys) if self._null_fill_value else keys == self.fill_value\n if mask.any():\n counts[mask] += fcounts\n else:\n # error: Argument 1 to "insert" has incompatible type "Union[\n # ExtensionArray,ndarray[Any, Any]]"; expected "Union[\n # _SupportsArray[dtype[Any]], Sequence[_SupportsArray[dtype\n # [Any]]], Sequence[Sequence[_SupportsArray[dtype[Any]]]],\n # Sequence[Sequence[Sequence[_SupportsArray[dtype[Any]]]]], Sequence\n # [Sequence[Sequence[Sequence[_SupportsArray[dtype[Any]]]]]]]"\n keys = np.insert(keys, 0, self.fill_value) # type: ignore[arg-type]\n counts = np.insert(counts, 0, fcounts)\n\n if not isinstance(keys, ABCIndex):\n index = Index(keys)\n else:\n index = keys\n return Series(counts, index=index, copy=False)\n\n # --------\n # Indexing\n # --------\n @overload\n def __getitem__(self, key: ScalarIndexer) -> Any:\n ...\n\n @overload\n def __getitem__(\n self,\n key: SequenceIndexer | tuple[int | ellipsis, ...],\n ) -> Self:\n ...\n\n def __getitem__(\n self,\n key: PositionalIndexer | tuple[int | ellipsis, ...],\n ) -> Self | Any:\n if isinstance(key, tuple):\n key = unpack_tuple_and_ellipses(key)\n if key is Ellipsis:\n raise ValueError("Cannot slice with Ellipsis")\n\n if is_integer(key):\n return self._get_val_at(key)\n elif isinstance(key, tuple):\n # error: Invalid index type "Tuple[Union[int, ellipsis], ...]"\n # for "ndarray[Any, Any]"; expected type\n # "Union[SupportsIndex, _SupportsArray[dtype[Union[bool_,\n # integer[Any]]]], _NestedSequence[_SupportsArray[dtype[\n # Union[bool_, integer[Any]]]]], _NestedSequence[Union[\n # bool, int]], Tuple[Union[SupportsIndex, _SupportsArray[\n # dtype[Union[bool_, integer[Any]]]], _NestedSequence[\n # _SupportsArray[dtype[Union[bool_, integer[Any]]]]],\n # _NestedSequence[Union[bool, int]]], ...]]"\n data_slice = self.to_dense()[key] # type: ignore[index]\n elif isinstance(key, slice):\n # Avoid densifying when handling contiguous slices\n if key.step is None or key.step == 1:\n start = 0 if key.start is None else key.start\n if start < 0:\n start += len(self)\n\n end = len(self) if key.stop is None else key.stop\n if end < 0:\n end += len(self)\n\n indices = self.sp_index.indices\n keep_inds = np.flatnonzero((indices >= start) & (indices < end))\n sp_vals = self.sp_values[keep_inds]\n\n sp_index = indices[keep_inds].copy()\n\n # If we've sliced to not include the start of the array, all our indices\n # should be shifted. NB: here we are careful to also not shift by a\n # negative value for a case like [0, 1][-100:] where the start index\n # should be treated like 0\n if start > 0:\n sp_index -= start\n\n # Length of our result should match applying this slice to a range\n # of the length of our original array\n new_len = len(range(len(self))[key])\n new_sp_index = make_sparse_index(new_len, sp_index, self.kind)\n return type(self)._simple_new(sp_vals, new_sp_index, self.dtype)\n else:\n indices = np.arange(len(self), dtype=np.int32)[key]\n return self.take(indices)\n\n elif not is_list_like(key):\n # e.g. "foo" or 2.5\n # exception message copied from numpy\n raise IndexError(\n r"only integers, slices (`:`), ellipsis (`...`), numpy.newaxis "\n r"(`None`) and integer or boolean arrays are valid indices"\n )\n\n else:\n if isinstance(key, SparseArray):\n # NOTE: If we guarantee that SparseDType(bool)\n # has only fill_value - true, false or nan\n # (see GH PR 44955)\n # we can apply mask very fast:\n if is_bool_dtype(key):\n if isna(key.fill_value):\n return self.take(key.sp_index.indices[key.sp_values])\n if not key.fill_value:\n return self.take(key.sp_index.indices)\n n = len(self)\n mask = np.full(n, True, dtype=np.bool_)\n mask[key.sp_index.indices] = False\n return self.take(np.arange(n)[mask])\n else:\n key = np.asarray(key)\n\n key = check_array_indexer(self, key)\n\n if com.is_bool_indexer(key):\n # mypy doesn't know we have an array here\n key = cast(np.ndarray, key)\n return self.take(np.arange(len(key), dtype=np.int32)[key])\n elif hasattr(key, "__len__"):\n return self.take(key)\n else:\n raise ValueError(f"Cannot slice with '{key}'")\n\n return type(self)(data_slice, kind=self.kind)\n\n def _get_val_at(self, loc):\n loc = validate_insert_loc(loc, len(self))\n\n sp_loc = self.sp_index.lookup(loc)\n if sp_loc == -1:\n return self.fill_value\n else:\n val = self.sp_values[sp_loc]\n val = maybe_box_datetimelike(val, self.sp_values.dtype)\n return val\n\n def take(self, indices, *, allow_fill: bool = False, fill_value=None) -> Self:\n if is_scalar(indices):\n raise ValueError(f"'indices' must be an array, not a scalar '{indices}'.")\n indices = np.asarray(indices, dtype=np.int32)\n\n dtype = None\n if indices.size == 0:\n result = np.array([], dtype="object")\n dtype = self.dtype\n elif allow_fill:\n result = self._take_with_fill(indices, fill_value=fill_value)\n else:\n return self._take_without_fill(indices)\n\n return type(self)(\n result, fill_value=self.fill_value, kind=self.kind, dtype=dtype\n )\n\n def _take_with_fill(self, indices, fill_value=None) -> np.ndarray:\n if fill_value is None:\n fill_value = self.dtype.na_value\n\n if indices.min() < -1:\n raise ValueError(\n "Invalid value in 'indices'. Must be between -1 "\n "and the length of the array."\n )\n\n if indices.max() >= len(self):\n raise IndexError("out of bounds value in 'indices'.")\n\n if len(self) == 0:\n # Empty... Allow taking only if all empty\n if (indices == -1).all():\n dtype = np.result_type(self.sp_values, type(fill_value))\n taken = np.empty_like(indices, dtype=dtype)\n taken.fill(fill_value)\n return taken\n else:\n raise IndexError("cannot do a non-empty take from an empty axes.")\n\n # sp_indexer may be -1 for two reasons\n # 1.) we took for an index of -1 (new)\n # 2.) we took a value that was self.fill_value (old)\n sp_indexer = self.sp_index.lookup_array(indices)\n new_fill_indices = indices == -1\n old_fill_indices = (sp_indexer == -1) & ~new_fill_indices\n\n if self.sp_index.npoints == 0 and old_fill_indices.all():\n # We've looked up all valid points on an all-sparse array.\n taken = np.full(\n sp_indexer.shape, fill_value=self.fill_value, dtype=self.dtype.subtype\n )\n\n elif self.sp_index.npoints == 0:\n # Use the old fill_value unless we took for an index of -1\n _dtype = np.result_type(self.dtype.subtype, type(fill_value))\n taken = np.full(sp_indexer.shape, fill_value=fill_value, dtype=_dtype)\n taken[old_fill_indices] = self.fill_value\n else:\n taken = self.sp_values.take(sp_indexer)\n\n # Fill in two steps.\n # Old fill values\n # New fill values\n # potentially coercing to a new dtype at each stage.\n\n m0 = sp_indexer[old_fill_indices] < 0\n m1 = sp_indexer[new_fill_indices] < 0\n\n result_type = taken.dtype\n\n if m0.any():\n result_type = np.result_type(result_type, type(self.fill_value))\n taken = taken.astype(result_type)\n taken[old_fill_indices] = self.fill_value\n\n if m1.any():\n result_type = np.result_type(result_type, type(fill_value))\n taken = taken.astype(result_type)\n taken[new_fill_indices] = fill_value\n\n return taken\n\n def _take_without_fill(self, indices) -> Self:\n to_shift = indices < 0\n\n n = len(self)\n\n if (indices.max() >= n) or (indices.min() < -n):\n if n == 0:\n raise IndexError("cannot do a non-empty take from an empty axes.")\n raise IndexError("out of bounds value in 'indices'.")\n\n if to_shift.any():\n indices = indices.copy()\n indices[to_shift] += n\n\n sp_indexer = self.sp_index.lookup_array(indices)\n value_mask = sp_indexer != -1\n new_sp_values = self.sp_values[sp_indexer[value_mask]]\n\n value_indices = np.flatnonzero(value_mask).astype(np.int32, copy=False)\n\n new_sp_index = make_sparse_index(len(indices), value_indices, kind=self.kind)\n return type(self)._simple_new(new_sp_values, new_sp_index, dtype=self.dtype)\n\n def searchsorted(\n self,\n v: ArrayLike | object,\n side: Literal["left", "right"] = "left",\n sorter: NumpySorter | None = None,\n ) -> npt.NDArray[np.intp] | np.intp:\n msg = "searchsorted requires high memory usage."\n warnings.warn(msg, PerformanceWarning, stacklevel=find_stack_level())\n v = np.asarray(v)\n return np.asarray(self, dtype=self.dtype.subtype).searchsorted(v, side, sorter)\n\n def copy(self) -> Self:\n values = self.sp_values.copy()\n return self._simple_new(values, self.sp_index, self.dtype)\n\n @classmethod\n def _concat_same_type(cls, to_concat: Sequence[Self]) -> Self:\n fill_value = to_concat[0].fill_value\n\n values = []\n length = 0\n\n if to_concat:\n sp_kind = to_concat[0].kind\n else:\n sp_kind = "integer"\n\n sp_index: SparseIndex\n if sp_kind == "integer":\n indices = []\n\n for arr in to_concat:\n int_idx = arr.sp_index.indices.copy()\n int_idx += length # TODO: wraparound\n length += arr.sp_index.length\n\n values.append(arr.sp_values)\n indices.append(int_idx)\n\n data = np.concatenate(values)\n indices_arr = np.concatenate(indices)\n # error: Argument 2 to "IntIndex" has incompatible type\n # "ndarray[Any, dtype[signedinteger[_32Bit]]]";\n # expected "Sequence[int]"\n sp_index = IntIndex(length, indices_arr) # type: ignore[arg-type]\n\n else:\n # when concatenating block indices, we don't claim that you'll\n # get an identical index as concatenating the values and then\n # creating a new index. We don't want to spend the time trying\n # to merge blocks across arrays in `to_concat`, so the resulting\n # BlockIndex may have more blocks.\n blengths = []\n blocs = []\n\n for arr in to_concat:\n block_idx = arr.sp_index.to_block_index()\n\n values.append(arr.sp_values)\n blocs.append(block_idx.blocs.copy() + length)\n blengths.append(block_idx.blengths)\n length += arr.sp_index.length\n\n data = np.concatenate(values)\n blocs_arr = np.concatenate(blocs)\n blengths_arr = np.concatenate(blengths)\n\n sp_index = BlockIndex(length, blocs_arr, blengths_arr)\n\n return cls(data, sparse_index=sp_index, fill_value=fill_value)\n\n def astype(self, dtype: AstypeArg | None = None, copy: bool = True):\n """\n Change the dtype of a SparseArray.\n\n The output will always be a SparseArray. To convert to a dense\n ndarray with a certain dtype, use :meth:`numpy.asarray`.\n\n Parameters\n ----------\n dtype : np.dtype or ExtensionDtype\n For SparseDtype, this changes the dtype of\n ``self.sp_values`` and the ``self.fill_value``.\n\n For other dtypes, this only changes the dtype of\n ``self.sp_values``.\n\n copy : bool, default True\n Whether to ensure a copy is made, even if not necessary.\n\n Returns\n -------\n SparseArray\n\n Examples\n --------\n >>> arr = pd.arrays.SparseArray([0, 0, 1, 2])\n >>> arr\n [0, 0, 1, 2]\n Fill: 0\n IntIndex\n Indices: array([2, 3], dtype=int32)\n\n >>> arr.astype(SparseDtype(np.dtype('int32')))\n [0, 0, 1, 2]\n Fill: 0\n IntIndex\n Indices: array([2, 3], dtype=int32)\n\n Using a NumPy dtype with a different kind (e.g. float) will coerce\n just ``self.sp_values``.\n\n >>> arr.astype(SparseDtype(np.dtype('float64')))\n ... # doctest: +NORMALIZE_WHITESPACE\n [nan, nan, 1.0, 2.0]\n Fill: nan\n IntIndex\n Indices: array([2, 3], dtype=int32)\n\n Using a SparseDtype, you can also change the fill value as well.\n\n >>> arr.astype(SparseDtype("float64", fill_value=0.0))\n ... # doctest: +NORMALIZE_WHITESPACE\n [0.0, 0.0, 1.0, 2.0]\n Fill: 0.0\n IntIndex\n Indices: array([2, 3], dtype=int32)\n """\n if dtype == self._dtype:\n if not copy:\n return self\n else:\n return self.copy()\n\n future_dtype = pandas_dtype(dtype)\n if not isinstance(future_dtype, SparseDtype):\n # GH#34457\n values = np.asarray(self)\n values = ensure_wrapped_if_datetimelike(values)\n return astype_array(values, dtype=future_dtype, copy=False)\n\n dtype = self.dtype.update_dtype(dtype)\n subtype = pandas_dtype(dtype._subtype_with_str)\n subtype = cast(np.dtype, subtype) # ensured by update_dtype\n values = ensure_wrapped_if_datetimelike(self.sp_values)\n sp_values = astype_array(values, subtype, copy=copy)\n sp_values = np.asarray(sp_values)\n\n return self._simple_new(sp_values, self.sp_index, dtype)\n\n def map(self, mapper, na_action=None) -> Self:\n """\n Map categories using an input mapping or function.\n\n Parameters\n ----------\n mapper : dict, Series, callable\n The correspondence from old values to new.\n na_action : {None, 'ignore'}, default None\n If 'ignore', propagate NA values, without passing them to the\n mapping correspondence.\n\n Returns\n -------\n SparseArray\n The output array will have the same density as the input.\n The output fill value will be the result of applying the\n mapping to ``self.fill_value``\n\n Examples\n --------\n >>> arr = pd.arrays.SparseArray([0, 1, 2])\n >>> arr.map(lambda x: x + 10)\n [10, 11, 12]\n Fill: 10\n IntIndex\n Indices: array([1, 2], dtype=int32)\n\n >>> arr.map({0: 10, 1: 11, 2: 12})\n [10, 11, 12]\n Fill: 10\n IntIndex\n Indices: array([1, 2], dtype=int32)\n\n >>> arr.map(pd.Series([10, 11, 12], index=[0, 1, 2]))\n [10, 11, 12]\n Fill: 10\n IntIndex\n Indices: array([1, 2], dtype=int32)\n """\n is_map = isinstance(mapper, (abc.Mapping, ABCSeries))\n\n fill_val = self.fill_value\n\n if na_action is None or notna(fill_val):\n fill_val = mapper.get(fill_val, fill_val) if is_map else mapper(fill_val)\n\n def func(sp_val):\n new_sp_val = mapper.get(sp_val, None) if is_map else mapper(sp_val)\n # check identity and equality because nans are not equal to each other\n if new_sp_val is fill_val or new_sp_val == fill_val:\n msg = "fill value in the sparse values not supported"\n raise ValueError(msg)\n return new_sp_val\n\n sp_values = [func(x) for x in self.sp_values]\n\n return type(self)(sp_values, sparse_index=self.sp_index, fill_value=fill_val)\n\n def to_dense(self) -> np.ndarray:\n """\n Convert SparseArray to a NumPy array.\n\n Returns\n -------\n arr : NumPy array\n """\n return np.asarray(self, dtype=self.sp_values.dtype)\n\n def _where(self, mask, value):\n # NB: may not preserve dtype, e.g. result may be Sparse[float64]\n # while self is Sparse[int64]\n naive_implementation = np.where(mask, self, value)\n dtype = SparseDtype(naive_implementation.dtype, fill_value=self.fill_value)\n result = type(self)._from_sequence(naive_implementation, dtype=dtype)\n return result\n\n # ------------------------------------------------------------------------\n # IO\n # ------------------------------------------------------------------------\n def __setstate__(self, state) -> None:\n """Necessary for making this object picklable"""\n if isinstance(state, tuple):\n # Compat for pandas < 0.24.0\n nd_state, (fill_value, sp_index) = state\n sparse_values = np.array([])\n sparse_values.__setstate__(nd_state)\n\n self._sparse_values = sparse_values\n self._sparse_index = sp_index\n self._dtype = SparseDtype(sparse_values.dtype, fill_value)\n else:\n self.__dict__.update(state)\n\n def nonzero(self) -> tuple[npt.NDArray[np.int32]]:\n if self.fill_value == 0:\n return (self.sp_index.indices,)\n else:\n return (self.sp_index.indices[self.sp_values != 0],)\n\n # ------------------------------------------------------------------------\n # Reductions\n # ------------------------------------------------------------------------\n\n def _reduce(\n self, name: str, *, skipna: bool = True, keepdims: bool = False, **kwargs\n ):\n method = getattr(self, name, None)\n\n if method is None:\n raise TypeError(f"cannot perform {name} with type {self.dtype}")\n\n if skipna:\n arr = self\n else:\n arr = self.dropna()\n\n result = getattr(arr, name)(**kwargs)\n\n if keepdims:\n return type(self)([result], dtype=self.dtype)\n else:\n return result\n\n def all(self, axis=None, *args, **kwargs):\n """\n Tests whether all elements evaluate True\n\n Returns\n -------\n all : bool\n\n See Also\n --------\n numpy.all\n """\n nv.validate_all(args, kwargs)\n\n values = self.sp_values\n\n if len(values) != len(self) and not np.all(self.fill_value):\n return False\n\n return values.all()\n\n def any(self, axis: AxisInt = 0, *args, **kwargs) -> bool:\n """\n Tests whether at least one of elements evaluate True\n\n Returns\n -------\n any : bool\n\n See Also\n --------\n numpy.any\n """\n nv.validate_any(args, kwargs)\n\n values = self.sp_values\n\n if len(values) != len(self) and np.any(self.fill_value):\n return True\n\n return values.any().item()\n\n def sum(\n self,\n axis: AxisInt = 0,\n min_count: int = 0,\n skipna: bool = True,\n *args,\n **kwargs,\n ) -> Scalar:\n """\n Sum of non-NA/null values\n\n Parameters\n ----------\n axis : int, default 0\n Not Used. NumPy compatibility.\n min_count : int, default 0\n The required number of valid values to perform the summation. If fewer\n than ``min_count`` valid values are present, the result will be the missing\n value indicator for subarray type.\n *args, **kwargs\n Not Used. NumPy compatibility.\n\n Returns\n -------\n scalar\n """\n nv.validate_sum(args, kwargs)\n valid_vals = self._valid_sp_values\n sp_sum = valid_vals.sum()\n has_na = self.sp_index.ngaps > 0 and not self._null_fill_value\n\n if has_na and not skipna:\n return na_value_for_dtype(self.dtype.subtype, compat=False)\n\n if self._null_fill_value:\n if check_below_min_count(valid_vals.shape, None, min_count):\n return na_value_for_dtype(self.dtype.subtype, compat=False)\n return sp_sum\n else:\n nsparse = self.sp_index.ngaps\n if check_below_min_count(valid_vals.shape, None, min_count - nsparse):\n return na_value_for_dtype(self.dtype.subtype, compat=False)\n return sp_sum + self.fill_value * nsparse\n\n def cumsum(self, axis: AxisInt = 0, *args, **kwargs) -> SparseArray:\n """\n Cumulative sum of non-NA/null values.\n\n When performing the cumulative summation, any non-NA/null values will\n be skipped. The resulting SparseArray will preserve the locations of\n NaN values, but the fill value will be `np.nan` regardless.\n\n Parameters\n ----------\n axis : int or None\n Axis over which to perform the cumulative summation. If None,\n perform cumulative summation over flattened array.\n\n Returns\n -------\n cumsum : SparseArray\n """\n nv.validate_cumsum(args, kwargs)\n\n if axis is not None and axis >= self.ndim: # Mimic ndarray behaviour.\n raise ValueError(f"axis(={axis}) out of bounds")\n\n if not self._null_fill_value:\n return SparseArray(self.to_dense()).cumsum()\n\n return SparseArray(\n self.sp_values.cumsum(),\n sparse_index=self.sp_index,\n fill_value=self.fill_value,\n )\n\n def mean(self, axis: Axis = 0, *args, **kwargs):\n """\n Mean of non-NA/null values\n\n Returns\n -------\n mean : float\n """\n nv.validate_mean(args, kwargs)\n valid_vals = self._valid_sp_values\n sp_sum = valid_vals.sum()\n ct = len(valid_vals)\n\n if self._null_fill_value:\n return sp_sum / ct\n else:\n nsparse = self.sp_index.ngaps\n return (sp_sum + self.fill_value * nsparse) / (ct + nsparse)\n\n def max(self, *, axis: AxisInt | None = None, skipna: bool = True):\n """\n Max of array values, ignoring NA values if specified.\n\n Parameters\n ----------\n axis : int, default 0\n Not Used. NumPy compatibility.\n skipna : bool, default True\n Whether to ignore NA values.\n\n Returns\n -------\n scalar\n """\n nv.validate_minmax_axis(axis, self.ndim)\n return self._min_max("max", skipna=skipna)\n\n def min(self, *, axis: AxisInt | None = None, skipna: bool = True):\n """\n Min of array values, ignoring NA values if specified.\n\n Parameters\n ----------\n axis : int, default 0\n Not Used. NumPy compatibility.\n skipna : bool, default True\n Whether to ignore NA values.\n\n Returns\n -------\n scalar\n """\n nv.validate_minmax_axis(axis, self.ndim)\n return self._min_max("min", skipna=skipna)\n\n def _min_max(self, kind: Literal["min", "max"], skipna: bool) -> Scalar:\n """\n Min/max of non-NA/null values\n\n Parameters\n ----------\n kind : {"min", "max"}\n skipna : bool\n\n Returns\n -------\n scalar\n """\n valid_vals = self._valid_sp_values\n has_nonnull_fill_vals = not self._null_fill_value and self.sp_index.ngaps > 0\n\n if len(valid_vals) > 0:\n sp_min_max = getattr(valid_vals, kind)()\n\n # If a non-null fill value is currently present, it might be the min/max\n if has_nonnull_fill_vals:\n func = max if kind == "max" else min\n return func(sp_min_max, self.fill_value)\n elif skipna:\n return sp_min_max\n elif self.sp_index.ngaps == 0:\n # No NAs present\n return sp_min_max\n else:\n return na_value_for_dtype(self.dtype.subtype, compat=False)\n elif has_nonnull_fill_vals:\n return self.fill_value\n else:\n return na_value_for_dtype(self.dtype.subtype, compat=False)\n\n def _argmin_argmax(self, kind: Literal["argmin", "argmax"]) -> int:\n values = self._sparse_values\n index = self._sparse_index.indices\n mask = np.asarray(isna(values))\n func = np.argmax if kind == "argmax" else np.argmin\n\n idx = np.arange(values.shape[0])\n non_nans = values[~mask]\n non_nan_idx = idx[~mask]\n\n _candidate = non_nan_idx[func(non_nans)]\n candidate = index[_candidate]\n\n if isna(self.fill_value):\n return candidate\n if kind == "argmin" and self[candidate] < self.fill_value:\n return candidate\n if kind == "argmax" and self[candidate] > self.fill_value:\n return candidate\n _loc = self._first_fill_value_loc()\n if _loc == -1:\n # fill_value doesn't exist\n return candidate\n else:\n return _loc\n\n def argmax(self, skipna: bool = True) -> int:\n validate_bool_kwarg(skipna, "skipna")\n if not skipna and self._hasna:\n raise NotImplementedError\n return self._argmin_argmax("argmax")\n\n def argmin(self, skipna: bool = True) -> int:\n validate_bool_kwarg(skipna, "skipna")\n if not skipna and self._hasna:\n raise NotImplementedError\n return self._argmin_argmax("argmin")\n\n # ------------------------------------------------------------------------\n # Ufuncs\n # ------------------------------------------------------------------------\n\n _HANDLED_TYPES = (np.ndarray, numbers.Number)\n\n def __array_ufunc__(self, ufunc: np.ufunc, method: str, *inputs, **kwargs):\n out = kwargs.get("out", ())\n\n for x in inputs + out:\n if not isinstance(x, self._HANDLED_TYPES + (SparseArray,)):\n return NotImplemented\n\n # for binary ops, use our custom dunder methods\n result = arraylike.maybe_dispatch_ufunc_to_dunder_op(\n self, ufunc, method, *inputs, **kwargs\n )\n if result is not NotImplemented:\n return result\n\n if "out" in kwargs:\n # e.g. tests.arrays.sparse.test_arithmetics.test_ndarray_inplace\n res = arraylike.dispatch_ufunc_with_out(\n self, ufunc, method, *inputs, **kwargs\n )\n return res\n\n if method == "reduce":\n result = arraylike.dispatch_reduction_ufunc(\n self, ufunc, method, *inputs, **kwargs\n )\n if result is not NotImplemented:\n # e.g. tests.series.test_ufunc.TestNumpyReductions\n return result\n\n if len(inputs) == 1:\n # No alignment necessary.\n sp_values = getattr(ufunc, method)(self.sp_values, **kwargs)\n fill_value = getattr(ufunc, method)(self.fill_value, **kwargs)\n\n if ufunc.nout > 1:\n # multiple outputs. e.g. modf\n arrays = tuple(\n self._simple_new(\n sp_value, self.sp_index, SparseDtype(sp_value.dtype, fv)\n )\n for sp_value, fv in zip(sp_values, fill_value)\n )\n return arrays\n elif method == "reduce":\n # e.g. reductions\n return sp_values\n\n return self._simple_new(\n sp_values, self.sp_index, SparseDtype(sp_values.dtype, fill_value)\n )\n\n new_inputs = tuple(np.asarray(x) for x in inputs)\n result = getattr(ufunc, method)(*new_inputs, **kwargs)\n if out:\n if len(out) == 1:\n out = out[0]\n return out\n\n if ufunc.nout > 1:\n return tuple(type(self)(x) for x in result)\n elif method == "at":\n # no return value\n return None\n else:\n return type(self)(result)\n\n # ------------------------------------------------------------------------\n # Ops\n # ------------------------------------------------------------------------\n\n def _arith_method(self, other, op):\n op_name = op.__name__\n\n if isinstance(other, SparseArray):\n return _sparse_array_op(self, other, op, op_name)\n\n elif is_scalar(other):\n with np.errstate(all="ignore"):\n fill = op(_get_fill(self), np.asarray(other))\n result = op(self.sp_values, other)\n\n if op_name == "divmod":\n left, right = result\n lfill, rfill = fill\n return (\n _wrap_result(op_name, left, self.sp_index, lfill),\n _wrap_result(op_name, right, self.sp_index, rfill),\n )\n\n return _wrap_result(op_name, result, self.sp_index, fill)\n\n else:\n other = np.asarray(other)\n with np.errstate(all="ignore"):\n if len(self) != len(other):\n raise AssertionError(\n f"length mismatch: {len(self)} vs. {len(other)}"\n )\n if not isinstance(other, SparseArray):\n dtype = getattr(other, "dtype", None)\n other = SparseArray(other, fill_value=self.fill_value, dtype=dtype)\n return _sparse_array_op(self, other, op, op_name)\n\n def _cmp_method(self, other, op) -> SparseArray:\n if not is_scalar(other) and not isinstance(other, type(self)):\n # convert list-like to ndarray\n other = np.asarray(other)\n\n if isinstance(other, np.ndarray):\n # TODO: make this more flexible than just ndarray...\n other = SparseArray(other, fill_value=self.fill_value)\n\n if isinstance(other, SparseArray):\n if len(self) != len(other):\n raise ValueError(\n f"operands have mismatched length {len(self)} and {len(other)}"\n )\n\n op_name = op.__name__.strip("_")\n return _sparse_array_op(self, other, op, op_name)\n else:\n # scalar\n fill_value = op(self.fill_value, other)\n result = np.full(len(self), fill_value, dtype=np.bool_)\n result[self.sp_index.indices] = op(self.sp_values, other)\n\n return type(self)(\n result,\n fill_value=fill_value,\n dtype=np.bool_,\n )\n\n _logical_method = _cmp_method\n\n def _unary_method(self, op) -> SparseArray:\n fill_value = op(np.array(self.fill_value)).item()\n dtype = SparseDtype(self.dtype.subtype, fill_value)\n # NOTE: if fill_value doesn't change\n # we just have to apply op to sp_values\n if isna(self.fill_value) or fill_value == self.fill_value:\n values = op(self.sp_values)\n return type(self)._simple_new(values, self.sp_index, self.dtype)\n # In the other case we have to recalc indexes\n return type(self)(op(self.to_dense()), dtype=dtype)\n\n def __pos__(self) -> SparseArray:\n return self._unary_method(operator.pos)\n\n def __neg__(self) -> SparseArray:\n return self._unary_method(operator.neg)\n\n def __invert__(self) -> SparseArray:\n return self._unary_method(operator.invert)\n\n def __abs__(self) -> SparseArray:\n return self._unary_method(operator.abs)\n\n # ----------\n # Formatting\n # -----------\n def __repr__(self) -> str:\n pp_str = printing.pprint_thing(self)\n pp_fill = printing.pprint_thing(self.fill_value)\n pp_index = printing.pprint_thing(self.sp_index)\n return f"{pp_str}\nFill: {pp_fill}\n{pp_index}"\n\n def _formatter(self, boxed: bool = False):\n # Defer to the formatter from the GenericArrayFormatter calling us.\n # This will infer the correct formatter from the dtype of the values.\n return None\n\n\ndef _make_sparse(\n arr: np.ndarray,\n kind: SparseIndexKind = "block",\n fill_value=None,\n dtype: np.dtype | None = None,\n):\n """\n Convert ndarray to sparse format\n\n Parameters\n ----------\n arr : ndarray\n kind : {'block', 'integer'}\n fill_value : NaN or another value\n dtype : np.dtype, optional\n copy : bool, default False\n\n Returns\n -------\n (sparse_values, index, fill_value) : (ndarray, SparseIndex, Scalar)\n """\n assert isinstance(arr, np.ndarray)\n\n if arr.ndim > 1:\n raise TypeError("expected dimension <= 1 data")\n\n if fill_value is None:\n fill_value = na_value_for_dtype(arr.dtype)\n\n if isna(fill_value):\n mask = notna(arr)\n else:\n # cast to object comparison to be safe\n if is_string_dtype(arr.dtype):\n arr = arr.astype(object)\n\n if is_object_dtype(arr.dtype):\n # element-wise equality check method in numpy doesn't treat\n # each element type, eg. 0, 0.0, and False are treated as\n # same. So we have to check the both of its type and value.\n mask = splib.make_mask_object_ndarray(arr, fill_value)\n else:\n mask = arr != fill_value\n\n length = len(arr)\n if length != len(mask):\n # the arr is a SparseArray\n indices = mask.sp_index.indices\n else:\n indices = mask.nonzero()[0].astype(np.int32)\n\n index = make_sparse_index(length, indices, kind)\n sparsified_values = arr[mask]\n if dtype is not None:\n sparsified_values = ensure_wrapped_if_datetimelike(sparsified_values)\n sparsified_values = astype_array(sparsified_values, dtype=dtype)\n sparsified_values = np.asarray(sparsified_values)\n\n # TODO: copy\n return sparsified_values, index, fill_value\n\n\n@overload\ndef make_sparse_index(length: int, indices, kind: Literal["block"]) -> BlockIndex:\n ...\n\n\n@overload\ndef make_sparse_index(length: int, indices, kind: Literal["integer"]) -> IntIndex:\n ...\n\n\ndef make_sparse_index(length: int, indices, kind: SparseIndexKind) -> SparseIndex:\n index: SparseIndex\n if kind == "block":\n locs, lens = splib.get_blocks(indices)\n index = BlockIndex(length, locs, lens)\n elif kind == "integer":\n index = IntIndex(length, indices)\n else: # pragma: no cover\n raise ValueError("must be block or integer type")\n return index\n
.venv\Lib\site-packages\pandas\core\arrays\sparse\array.py
array.py
Python
64,585
0.75
0.137789
0.107474
react-lib
267
2024-04-06T23:46:53.172881
Apache-2.0
false
0cb1fe95ddb94472967ff74943911ba8
"""\nInteraction with scipy.sparse matrices.\n\nCurrently only includes to_coo helpers.\n"""\nfrom __future__ import annotations\n\nfrom typing import TYPE_CHECKING\n\nfrom pandas._libs import lib\n\nfrom pandas.core.dtypes.missing import notna\n\nfrom pandas.core.algorithms import factorize\nfrom pandas.core.indexes.api import MultiIndex\nfrom pandas.core.series import Series\n\nif TYPE_CHECKING:\n from collections.abc import Iterable\n\n import numpy as np\n import scipy.sparse\n\n from pandas._typing import (\n IndexLabel,\n npt,\n )\n\n\ndef _check_is_partition(parts: Iterable, whole: Iterable):\n whole = set(whole)\n parts = [set(x) for x in parts]\n if set.intersection(*parts) != set():\n raise ValueError("Is not a partition because intersection is not null.")\n if set.union(*parts) != whole:\n raise ValueError("Is not a partition because union is not the whole.")\n\n\ndef _levels_to_axis(\n ss,\n levels: tuple[int] | list[int],\n valid_ilocs: npt.NDArray[np.intp],\n sort_labels: bool = False,\n) -> tuple[npt.NDArray[np.intp], list[IndexLabel]]:\n """\n For a MultiIndexed sparse Series `ss`, return `ax_coords` and `ax_labels`,\n where `ax_coords` are the coordinates along one of the two axes of the\n destination sparse matrix, and `ax_labels` are the labels from `ss`' Index\n which correspond to these coordinates.\n\n Parameters\n ----------\n ss : Series\n levels : tuple/list\n valid_ilocs : numpy.ndarray\n Array of integer positions of valid values for the sparse matrix in ss.\n sort_labels : bool, default False\n Sort the axis labels before forming the sparse matrix. When `levels`\n refers to a single level, set to True for a faster execution.\n\n Returns\n -------\n ax_coords : numpy.ndarray (axis coordinates)\n ax_labels : list (axis labels)\n """\n # Since the labels are sorted in `Index.levels`, when we wish to sort and\n # there is only one level of the MultiIndex for this axis, the desired\n # output can be obtained in the following simpler, more efficient way.\n if sort_labels and len(levels) == 1:\n ax_coords = ss.index.codes[levels[0]][valid_ilocs]\n ax_labels = ss.index.levels[levels[0]]\n\n else:\n levels_values = lib.fast_zip(\n [ss.index.get_level_values(lvl).to_numpy() for lvl in levels]\n )\n codes, ax_labels = factorize(levels_values, sort=sort_labels)\n ax_coords = codes[valid_ilocs]\n\n ax_labels = ax_labels.tolist()\n return ax_coords, ax_labels\n\n\ndef _to_ijv(\n ss,\n row_levels: tuple[int] | list[int] = (0,),\n column_levels: tuple[int] | list[int] = (1,),\n sort_labels: bool = False,\n) -> tuple[\n np.ndarray,\n npt.NDArray[np.intp],\n npt.NDArray[np.intp],\n list[IndexLabel],\n list[IndexLabel],\n]:\n """\n For an arbitrary MultiIndexed sparse Series return (v, i, j, ilabels,\n jlabels) where (v, (i, j)) is suitable for passing to scipy.sparse.coo\n constructor, and ilabels and jlabels are the row and column labels\n respectively.\n\n Parameters\n ----------\n ss : Series\n row_levels : tuple/list\n column_levels : tuple/list\n sort_labels : bool, default False\n Sort the row and column labels before forming the sparse matrix.\n When `row_levels` and/or `column_levels` refer to a single level,\n set to `True` for a faster execution.\n\n Returns\n -------\n values : numpy.ndarray\n Valid values to populate a sparse matrix, extracted from\n ss.\n i_coords : numpy.ndarray (row coordinates of the values)\n j_coords : numpy.ndarray (column coordinates of the values)\n i_labels : list (row labels)\n j_labels : list (column labels)\n """\n # index and column levels must be a partition of the index\n _check_is_partition([row_levels, column_levels], range(ss.index.nlevels))\n # From the sparse Series, get the integer indices and data for valid sparse\n # entries.\n sp_vals = ss.array.sp_values\n na_mask = notna(sp_vals)\n values = sp_vals[na_mask]\n valid_ilocs = ss.array.sp_index.indices[na_mask]\n\n i_coords, i_labels = _levels_to_axis(\n ss, row_levels, valid_ilocs, sort_labels=sort_labels\n )\n\n j_coords, j_labels = _levels_to_axis(\n ss, column_levels, valid_ilocs, sort_labels=sort_labels\n )\n\n return values, i_coords, j_coords, i_labels, j_labels\n\n\ndef sparse_series_to_coo(\n ss: Series,\n row_levels: Iterable[int] = (0,),\n column_levels: Iterable[int] = (1,),\n sort_labels: bool = False,\n) -> tuple[scipy.sparse.coo_matrix, list[IndexLabel], list[IndexLabel]]:\n """\n Convert a sparse Series to a scipy.sparse.coo_matrix using index\n levels row_levels, column_levels as the row and column\n labels respectively. Returns the sparse_matrix, row and column labels.\n """\n import scipy.sparse\n\n if ss.index.nlevels < 2:\n raise ValueError("to_coo requires MultiIndex with nlevels >= 2.")\n if not ss.index.is_unique:\n raise ValueError(\n "Duplicate index entries are not allowed in to_coo transformation."\n )\n\n # to keep things simple, only rely on integer indexing (not labels)\n row_levels = [ss.index._get_level_number(x) for x in row_levels]\n column_levels = [ss.index._get_level_number(x) for x in column_levels]\n\n v, i, j, rows, columns = _to_ijv(\n ss, row_levels=row_levels, column_levels=column_levels, sort_labels=sort_labels\n )\n sparse_matrix = scipy.sparse.coo_matrix(\n (v, (i, j)), shape=(len(rows), len(columns))\n )\n return sparse_matrix, rows, columns\n\n\ndef coo_to_sparse_series(\n A: scipy.sparse.coo_matrix, dense_index: bool = False\n) -> Series:\n """\n Convert a scipy.sparse.coo_matrix to a Series with type sparse.\n\n Parameters\n ----------\n A : scipy.sparse.coo_matrix\n dense_index : bool, default False\n\n Returns\n -------\n Series\n\n Raises\n ------\n TypeError if A is not a coo_matrix\n """\n from pandas import SparseDtype\n\n try:\n ser = Series(A.data, MultiIndex.from_arrays((A.row, A.col)), copy=False)\n except AttributeError as err:\n raise TypeError(\n f"Expected coo_matrix. Got {type(A).__name__} instead."\n ) from err\n ser = ser.sort_index()\n ser = ser.astype(SparseDtype(ser.dtype))\n if dense_index:\n ind = MultiIndex.from_product([A.row, A.col])\n ser = ser.reindex(ind)\n return ser\n
.venv\Lib\site-packages\pandas\core\arrays\sparse\scipy_sparse.py
scipy_sparse.py
Python
6,462
0.95
0.115942
0.040462
node-utils
697
2024-06-21T09:39:29.366824
MIT
false
f16dae3cde4a4cb7df0523d4bd67df82
from pandas.core.arrays.sparse.accessor import (\n SparseAccessor,\n SparseFrameAccessor,\n)\nfrom pandas.core.arrays.sparse.array import (\n BlockIndex,\n IntIndex,\n SparseArray,\n make_sparse_index,\n)\n\n__all__ = [\n "BlockIndex",\n "IntIndex",\n "make_sparse_index",\n "SparseAccessor",\n "SparseArray",\n "SparseFrameAccessor",\n]\n
.venv\Lib\site-packages\pandas\core\arrays\sparse\__init__.py
__init__.py
Python
356
0.85
0
0
vue-tools
857
2024-05-02T03:24:28.242820
BSD-3-Clause
false
ac4228e4566bf7172e7adb6f2aafdd99
\n\n
.venv\Lib\site-packages\pandas\core\arrays\sparse\__pycache__\accessor.cpython-313.pyc
accessor.cpython-313.pyc
Other
14,691
0.95
0.020478
0
awesome-app
263
2025-03-18T23:30:35.972081
GPL-3.0
false
2479bb6525ab6b5ca3e0c1d7ed974a1a
\n\n
.venv\Lib\site-packages\pandas\core\arrays\sparse\__pycache__\array.cpython-313.pyc
array.cpython-313.pyc
Other
70,045
0.75
0.020737
0.009044
vue-tools
143
2024-04-05T01:57:14.005198
MIT
false
82134a7986004ac81599e64ab25d1a87
\n\n
.venv\Lib\site-packages\pandas\core\arrays\sparse\__pycache__\scipy_sparse.cpython-313.pyc
scipy_sparse.cpython-313.pyc
Other
7,946
0.95
0.035971
0.007874
python-kit
887
2024-03-09T22:34:58.144619
Apache-2.0
false
698df8cfc836e9dbe7cf0448c7c2d19f
\n\n
.venv\Lib\site-packages\pandas\core\arrays\sparse\__pycache__\__init__.cpython-313.pyc
__init__.cpython-313.pyc
Other
507
0.7
0
0
awesome-app
214
2024-02-28T04:01:58.261083
BSD-3-Clause
false
367bc0658d18a40894c22102d4e54268
\n\n
.venv\Lib\site-packages\pandas\core\arrays\__pycache__\base.cpython-313.pyc
base.cpython-313.pyc
Other
80,216
0.75
0.068732
0.034143
awesome-app
667
2024-06-15T13:10:13.297173
Apache-2.0
false
dd6d2052e8f7a7e19858922f13accf52
\n\n
.venv\Lib\site-packages\pandas\core\arrays\__pycache__\boolean.cpython-313.pyc
boolean.cpython-313.pyc
Other
16,219
0.95
0.037234
0.006024
awesome-app
228
2024-11-12T02:16:52.249173
BSD-3-Clause
false
10569e153b85f46879d5c2eb4600bf29
\n\n
.venv\Lib\site-packages\pandas\core\arrays\__pycache__\categorical.cpython-313.pyc
categorical.cpython-313.pyc
Other
101,325
0.75
0.044503
0.00425
vue-tools
251
2024-12-12T19:16:38.119873
Apache-2.0
false
fd139fd0a1742ac3b37863c6fb12c7f8
\n\n
.venv\Lib\site-packages\pandas\core\arrays\__pycache__\datetimelike.cpython-313.pyc
datetimelike.cpython-313.pyc
Other
97,283
0.75
0.034545
0.028283
python-kit
1,000
2025-06-26T15:48:05.311527
MIT
false
beb190aa02af4a15179f48a0e1519be1
\n\n
.venv\Lib\site-packages\pandas\core\arrays\__pycache__\datetimes.cpython-313.pyc
datetimes.cpython-313.pyc
Other
86,308
0.75
0.024899
0.005246
react-lib
224
2024-02-01T16:10:55.834401
Apache-2.0
false
6c5f16a4c1864a92bbcaa52fc01ae188
\n\n
.venv\Lib\site-packages\pandas\core\arrays\__pycache__\floating.cpython-313.pyc
floating.cpython-313.pyc
Other
5,230
0.8
0.021583
0
react-lib
496
2024-12-03T05:18:34.095159
BSD-3-Clause
false
84fec3811a2d42c635a7d921daa30496
\n\n
.venv\Lib\site-packages\pandas\core\arrays\__pycache__\integer.cpython-313.pyc
integer.cpython-313.pyc
Other
9,088
0.8
0.019048
0
node-utils
636
2025-03-25T10:32:44.791809
BSD-3-Clause
false
da718a21043acad722983f8690e26286
\n\n
.venv\Lib\site-packages\pandas\core\arrays\__pycache__\interval.cpython-313.pyc
interval.cpython-313.pyc
Other
69,631
0.75
0.039614
0.006608
react-lib
639
2024-05-01T03:32:19.489489
BSD-3-Clause
false
7c04809c8c9d40eeb0e0b7b43b33e162
\n\n
.venv\Lib\site-packages\pandas\core\arrays\__pycache__\masked.cpython-313.pyc
masked.cpython-313.pyc
Other
65,115
0.75
0.036179
0.013051
react-lib
677
2024-04-24T20:56:02.287846
BSD-3-Clause
false
98e681c91e18390db13cc71fd8d62433
\n\n
.venv\Lib\site-packages\pandas\core\arrays\__pycache__\numeric.cpython-313.pyc
numeric.cpython-313.pyc
Other
12,006
0.95
0.030303
0.010526
vue-tools
17
2024-06-11T17:32:14.624040
BSD-3-Clause
false
1e782d004e7e8e07fb967f6df5c68c31
\n\n
.venv\Lib\site-packages\pandas\core\arrays\__pycache__\numpy_.cpython-313.pyc
numpy_.cpython-313.pyc
Other
20,434
0.95
0.017045
0.006135
vue-tools
82
2023-09-08T18:48:49.747396
BSD-3-Clause
false
783de5c2be3aa797293885d653548bfd
\n\n
.venv\Lib\site-packages\pandas\core\arrays\__pycache__\period.cpython-313.pyc
period.cpython-313.pyc
Other
45,890
0.95
0.024279
0.003431
awesome-app
652
2024-03-30T07:10:20.094703
GPL-3.0
false
7367c4ea6aa969ab710c52b9e63c1ac9
\n\n
.venv\Lib\site-packages\pandas\core\arrays\__pycache__\string_.cpython-313.pyc
string_.cpython-313.pyc
Other
41,395
0.95
0.043902
0.00551
python-kit
590
2024-03-24T09:14:26.843183
GPL-3.0
false
c50b4a80c25dc3f5532c1521e49a3d2e
\n\n
.venv\Lib\site-packages\pandas\core\arrays\__pycache__\string_arrow.cpython-313.pyc
string_arrow.cpython-313.pyc
Other
24,306
0.95
0.041237
0
react-lib
433
2025-06-30T13:32:04.819926
Apache-2.0
false
22a80537603ab0bacec364b6a2fad443
\n\n
.venv\Lib\site-packages\pandas\core\arrays\__pycache__\timedeltas.cpython-313.pyc
timedeltas.cpython-313.pyc
Other
42,613
0.95
0.037182
0.008909
awesome-app
833
2023-12-10T16:41:59.056427
MIT
false
5759ceb9a1d90c2374ba526f5de77171
\n\n
.venv\Lib\site-packages\pandas\core\arrays\__pycache__\_arrow_string_mixins.cpython-313.pyc
_arrow_string_mixins.cpython-313.pyc
Other
19,734
0.8
0
0.026786
python-kit
357
2024-12-23T05:33:19.825517
BSD-3-Clause
false
f0d72caeccbbe4cb6a1eefe343ea8445
\n\n
.venv\Lib\site-packages\pandas\core\arrays\__pycache__\_mixins.cpython-313.pyc
_mixins.cpython-313.pyc
Other
20,375
0.8
0.010152
0.00565
awesome-app
144
2025-03-07T15:34:16.074492
GPL-3.0
false
67db023234c66df67ae2a2e31247c8c1
\n\n
.venv\Lib\site-packages\pandas\core\arrays\__pycache__\_ranges.cpython-313.pyc
_ranges.cpython-313.pyc
Other
7,165
0.95
0.028986
0.046154
react-lib
905
2024-11-14T04:34:13.988267
BSD-3-Clause
false
0bc52a849c9783efa029931adb9bed7e
\n\n
.venv\Lib\site-packages\pandas\core\arrays\__pycache__\_utils.cpython-313.pyc
_utils.cpython-313.pyc
Other
2,557
0.8
0
0
python-kit
332
2024-10-15T11:20:19.962056
MIT
false
af50227805fb0659ad229c474b6efc16
\n\n
.venv\Lib\site-packages\pandas\core\arrays\__pycache__\__init__.cpython-313.pyc
__init__.cpython-313.pyc
Other
1,400
0.7
0
0
react-lib
826
2024-11-12T03:22:40.256863
MIT
false
4af913459c575912f15d9a3d0005f5e2
"""\ndatetimelke_accumulations.py is for accumulations of datetimelike extension arrays\n"""\n\nfrom __future__ import annotations\n\nfrom typing import Callable\n\nimport numpy as np\n\nfrom pandas._libs import iNaT\n\nfrom pandas.core.dtypes.missing import isna\n\n\ndef _cum_func(\n func: Callable,\n values: np.ndarray,\n *,\n skipna: bool = True,\n):\n """\n Accumulations for 1D datetimelike arrays.\n\n Parameters\n ----------\n func : np.cumsum, np.maximum.accumulate, np.minimum.accumulate\n values : np.ndarray\n Numpy array with the values (can be of any dtype that support the\n operation). Values is changed is modified inplace.\n skipna : bool, default True\n Whether to skip NA.\n """\n try:\n fill_value = {\n np.maximum.accumulate: np.iinfo(np.int64).min,\n np.cumsum: 0,\n np.minimum.accumulate: np.iinfo(np.int64).max,\n }[func]\n except KeyError:\n raise ValueError(f"No accumulation for {func} implemented on BaseMaskedArray")\n\n mask = isna(values)\n y = values.view("i8")\n y[mask] = fill_value\n\n if not skipna:\n mask = np.maximum.accumulate(mask)\n\n result = func(y)\n result[mask] = iNaT\n\n if values.dtype.kind in "mM":\n return result.view(values.dtype.base)\n return result\n\n\ndef cumsum(values: np.ndarray, *, skipna: bool = True) -> np.ndarray:\n return _cum_func(np.cumsum, values, skipna=skipna)\n\n\ndef cummin(values: np.ndarray, *, skipna: bool = True):\n return _cum_func(np.minimum.accumulate, values, skipna=skipna)\n\n\ndef cummax(values: np.ndarray, *, skipna: bool = True):\n return _cum_func(np.maximum.accumulate, values, skipna=skipna)\n
.venv\Lib\site-packages\pandas\core\array_algos\datetimelike_accumulations.py
datetimelike_accumulations.py
Python
1,686
0.85
0.149254
0.020408
vue-tools
995
2024-12-07T03:57:29.328865
Apache-2.0
false
07acf1be585c80b6c3bff1683bbecefa
"""\nmasked_accumulations.py is for accumulation algorithms using a mask-based approach\nfor missing values.\n"""\n\nfrom __future__ import annotations\n\nfrom typing import (\n TYPE_CHECKING,\n Callable,\n)\n\nimport numpy as np\n\nif TYPE_CHECKING:\n from pandas._typing import npt\n\n\ndef _cum_func(\n func: Callable,\n values: np.ndarray,\n mask: npt.NDArray[np.bool_],\n *,\n skipna: bool = True,\n):\n """\n Accumulations for 1D masked array.\n\n We will modify values in place to replace NAs with the appropriate fill value.\n\n Parameters\n ----------\n func : np.cumsum, np.cumprod, np.maximum.accumulate, np.minimum.accumulate\n values : np.ndarray\n Numpy array with the values (can be of any dtype that support the\n operation).\n mask : np.ndarray\n Boolean numpy array (True values indicate missing values).\n skipna : bool, default True\n Whether to skip NA.\n """\n dtype_info: np.iinfo | np.finfo\n if values.dtype.kind == "f":\n dtype_info = np.finfo(values.dtype.type)\n elif values.dtype.kind in "iu":\n dtype_info = np.iinfo(values.dtype.type)\n elif values.dtype.kind == "b":\n # Max value of bool is 1, but since we are setting into a boolean\n # array, 255 is fine as well. Min value has to be 0 when setting\n # into the boolean array.\n dtype_info = np.iinfo(np.uint8)\n else:\n raise NotImplementedError(\n f"No masked accumulation defined for dtype {values.dtype.type}"\n )\n try:\n fill_value = {\n np.cumprod: 1,\n np.maximum.accumulate: dtype_info.min,\n np.cumsum: 0,\n np.minimum.accumulate: dtype_info.max,\n }[func]\n except KeyError:\n raise NotImplementedError(\n f"No accumulation for {func} implemented on BaseMaskedArray"\n )\n\n values[mask] = fill_value\n\n if not skipna:\n mask = np.maximum.accumulate(mask)\n\n values = func(values)\n return values, mask\n\n\ndef cumsum(values: np.ndarray, mask: npt.NDArray[np.bool_], *, skipna: bool = True):\n return _cum_func(np.cumsum, values, mask, skipna=skipna)\n\n\ndef cumprod(values: np.ndarray, mask: npt.NDArray[np.bool_], *, skipna: bool = True):\n return _cum_func(np.cumprod, values, mask, skipna=skipna)\n\n\ndef cummin(values: np.ndarray, mask: npt.NDArray[np.bool_], *, skipna: bool = True):\n return _cum_func(np.minimum.accumulate, values, mask, skipna=skipna)\n\n\ndef cummax(values: np.ndarray, mask: npt.NDArray[np.bool_], *, skipna: bool = True):\n return _cum_func(np.maximum.accumulate, values, mask, skipna=skipna)\n
.venv\Lib\site-packages\pandas\core\array_algos\masked_accumulations.py
masked_accumulations.py
Python
2,618
0.95
0.155556
0.056338
react-lib
731
2024-08-31T10:01:10.982146
Apache-2.0
false
f03e4aa7cd6777693e8c139ccdaaccf3
"""\nmasked_reductions.py is for reduction algorithms using a mask-based approach\nfor missing values.\n"""\nfrom __future__ import annotations\n\nfrom typing import (\n TYPE_CHECKING,\n Callable,\n)\nimport warnings\n\nimport numpy as np\n\nfrom pandas._libs import missing as libmissing\n\nfrom pandas.core.nanops import check_below_min_count\n\nif TYPE_CHECKING:\n from pandas._typing import (\n AxisInt,\n npt,\n )\n\n\ndef _reductions(\n func: Callable,\n values: np.ndarray,\n mask: npt.NDArray[np.bool_],\n *,\n skipna: bool = True,\n min_count: int = 0,\n axis: AxisInt | None = None,\n **kwargs,\n):\n """\n Sum, mean or product for 1D masked array.\n\n Parameters\n ----------\n func : np.sum or np.prod\n values : np.ndarray\n Numpy array with the values (can be of any dtype that support the\n operation).\n mask : np.ndarray[bool]\n Boolean numpy array (True values indicate missing values).\n skipna : bool, default True\n Whether to skip NA.\n min_count : int, default 0\n The required number of valid values to perform the operation. If fewer than\n ``min_count`` non-NA values are present the result will be NA.\n axis : int, optional, default None\n """\n if not skipna:\n if mask.any() or check_below_min_count(values.shape, None, min_count):\n return libmissing.NA\n else:\n return func(values, axis=axis, **kwargs)\n else:\n if check_below_min_count(values.shape, mask, min_count) and (\n axis is None or values.ndim == 1\n ):\n return libmissing.NA\n\n if values.dtype == np.dtype(object):\n # object dtype does not support `where` without passing an initial\n values = values[~mask]\n return func(values, axis=axis, **kwargs)\n return func(values, where=~mask, axis=axis, **kwargs)\n\n\ndef sum(\n values: np.ndarray,\n mask: npt.NDArray[np.bool_],\n *,\n skipna: bool = True,\n min_count: int = 0,\n axis: AxisInt | None = None,\n):\n return _reductions(\n np.sum, values=values, mask=mask, skipna=skipna, min_count=min_count, axis=axis\n )\n\n\ndef prod(\n values: np.ndarray,\n mask: npt.NDArray[np.bool_],\n *,\n skipna: bool = True,\n min_count: int = 0,\n axis: AxisInt | None = None,\n):\n return _reductions(\n np.prod, values=values, mask=mask, skipna=skipna, min_count=min_count, axis=axis\n )\n\n\ndef _minmax(\n func: Callable,\n values: np.ndarray,\n mask: npt.NDArray[np.bool_],\n *,\n skipna: bool = True,\n axis: AxisInt | None = None,\n):\n """\n Reduction for 1D masked array.\n\n Parameters\n ----------\n func : np.min or np.max\n values : np.ndarray\n Numpy array with the values (can be of any dtype that support the\n operation).\n mask : np.ndarray[bool]\n Boolean numpy array (True values indicate missing values).\n skipna : bool, default True\n Whether to skip NA.\n axis : int, optional, default None\n """\n if not skipna:\n if mask.any() or not values.size:\n # min/max with empty array raise in numpy, pandas returns NA\n return libmissing.NA\n else:\n return func(values, axis=axis)\n else:\n subset = values[~mask]\n if subset.size:\n return func(subset, axis=axis)\n else:\n # min/max with empty array raise in numpy, pandas returns NA\n return libmissing.NA\n\n\ndef min(\n values: np.ndarray,\n mask: npt.NDArray[np.bool_],\n *,\n skipna: bool = True,\n axis: AxisInt | None = None,\n):\n return _minmax(np.min, values=values, mask=mask, skipna=skipna, axis=axis)\n\n\ndef max(\n values: np.ndarray,\n mask: npt.NDArray[np.bool_],\n *,\n skipna: bool = True,\n axis: AxisInt | None = None,\n):\n return _minmax(np.max, values=values, mask=mask, skipna=skipna, axis=axis)\n\n\ndef mean(\n values: np.ndarray,\n mask: npt.NDArray[np.bool_],\n *,\n skipna: bool = True,\n axis: AxisInt | None = None,\n):\n if not values.size or mask.all():\n return libmissing.NA\n return _reductions(np.mean, values=values, mask=mask, skipna=skipna, axis=axis)\n\n\ndef var(\n values: np.ndarray,\n mask: npt.NDArray[np.bool_],\n *,\n skipna: bool = True,\n axis: AxisInt | None = None,\n ddof: int = 1,\n):\n if not values.size or mask.all():\n return libmissing.NA\n\n with warnings.catch_warnings():\n warnings.simplefilter("ignore", RuntimeWarning)\n return _reductions(\n np.var, values=values, mask=mask, skipna=skipna, axis=axis, ddof=ddof\n )\n\n\ndef std(\n values: np.ndarray,\n mask: npt.NDArray[np.bool_],\n *,\n skipna: bool = True,\n axis: AxisInt | None = None,\n ddof: int = 1,\n):\n if not values.size or mask.all():\n return libmissing.NA\n\n with warnings.catch_warnings():\n warnings.simplefilter("ignore", RuntimeWarning)\n return _reductions(\n np.std, values=values, mask=mask, skipna=skipna, axis=axis, ddof=ddof\n )\n
.venv\Lib\site-packages\pandas\core\array_algos\masked_reductions.py
masked_reductions.py
Python
5,067
0.95
0.119403
0.075145
react-lib
620
2024-05-04T06:20:20.533023
MIT
false
8b73d3c64549aa47460068a186b29522
"""\nEA-compatible analogue to np.putmask\n"""\nfrom __future__ import annotations\n\nfrom typing import (\n TYPE_CHECKING,\n Any,\n)\n\nimport numpy as np\n\nfrom pandas._libs import lib\n\nfrom pandas.core.dtypes.cast import infer_dtype_from\nfrom pandas.core.dtypes.common import is_list_like\n\nfrom pandas.core.arrays import ExtensionArray\n\nif TYPE_CHECKING:\n from pandas._typing import (\n ArrayLike,\n npt,\n )\n\n from pandas import MultiIndex\n\n\ndef putmask_inplace(values: ArrayLike, mask: npt.NDArray[np.bool_], value: Any) -> None:\n """\n ExtensionArray-compatible implementation of np.putmask. The main\n difference is we do not handle repeating or truncating like numpy.\n\n Parameters\n ----------\n values: np.ndarray or ExtensionArray\n mask : np.ndarray[bool]\n We assume extract_bool_array has already been called.\n value : Any\n """\n\n if (\n not isinstance(values, np.ndarray)\n or (values.dtype == object and not lib.is_scalar(value))\n # GH#43424: np.putmask raises TypeError if we cannot cast between types with\n # rule = "safe", a stricter guarantee we may not have here\n or (\n isinstance(value, np.ndarray) and not np.can_cast(value.dtype, values.dtype)\n )\n ):\n # GH#19266 using np.putmask gives unexpected results with listlike value\n # along with object dtype\n if is_list_like(value) and len(value) == len(values):\n values[mask] = value[mask]\n else:\n values[mask] = value\n else:\n # GH#37833 np.putmask is more performant than __setitem__\n np.putmask(values, mask, value)\n\n\ndef putmask_without_repeat(\n values: np.ndarray, mask: npt.NDArray[np.bool_], new: Any\n) -> None:\n """\n np.putmask will truncate or repeat if `new` is a listlike with\n len(new) != len(values). We require an exact match.\n\n Parameters\n ----------\n values : np.ndarray\n mask : np.ndarray[bool]\n new : Any\n """\n if getattr(new, "ndim", 0) >= 1:\n new = new.astype(values.dtype, copy=False)\n\n # TODO: this prob needs some better checking for 2D cases\n nlocs = mask.sum()\n if nlocs > 0 and is_list_like(new) and getattr(new, "ndim", 1) == 1:\n shape = np.shape(new)\n # np.shape compat for if setitem_datetimelike_compat\n # changed arraylike to list e.g. test_where_dt64_2d\n if nlocs == shape[-1]:\n # GH#30567\n # If length of ``new`` is less than the length of ``values``,\n # `np.putmask` would first repeat the ``new`` array and then\n # assign the masked values hence produces incorrect result.\n # `np.place` on the other hand uses the ``new`` values at it is\n # to place in the masked locations of ``values``\n np.place(values, mask, new)\n # i.e. values[mask] = new\n elif mask.shape[-1] == shape[-1] or shape[-1] == 1:\n np.putmask(values, mask, new)\n else:\n raise ValueError("cannot assign mismatch length to masked array")\n else:\n np.putmask(values, mask, new)\n\n\ndef validate_putmask(\n values: ArrayLike | MultiIndex, mask: np.ndarray\n) -> tuple[npt.NDArray[np.bool_], bool]:\n """\n Validate mask and check if this putmask operation is a no-op.\n """\n mask = extract_bool_array(mask)\n if mask.shape != values.shape:\n raise ValueError("putmask: mask and data must be the same size")\n\n noop = not mask.any()\n return mask, noop\n\n\ndef extract_bool_array(mask: ArrayLike) -> npt.NDArray[np.bool_]:\n """\n If we have a SparseArray or BooleanArray, convert it to ndarray[bool].\n """\n if isinstance(mask, ExtensionArray):\n # We could have BooleanArray, Sparse[bool], ...\n # Except for BooleanArray, this is equivalent to just\n # np.asarray(mask, dtype=bool)\n mask = mask.to_numpy(dtype=bool, na_value=False)\n\n mask = np.asarray(mask, dtype=bool)\n return mask\n\n\ndef setitem_datetimelike_compat(values: np.ndarray, num_set: int, other):\n """\n Parameters\n ----------\n values : np.ndarray\n num_set : int\n For putmask, this is mask.sum()\n other : Any\n """\n if values.dtype == object:\n dtype, _ = infer_dtype_from(other)\n\n if lib.is_np_dtype(dtype, "mM"):\n # https://github.com/numpy/numpy/issues/12550\n # timedelta64 will incorrectly cast to int\n if not is_list_like(other):\n other = [other] * num_set\n else:\n other = list(other)\n\n return other\n
.venv\Lib\site-packages\pandas\core\array_algos\putmask.py
putmask.py
Python
4,593
0.95
0.154362
0.16129
awesome-app
617
2024-05-18T01:56:44.833006
Apache-2.0
false
e896037e07fdb7584d21fba350d5659a
from __future__ import annotations\n\nfrom typing import TYPE_CHECKING\n\nimport numpy as np\n\nfrom pandas.core.dtypes.missing import (\n isna,\n na_value_for_dtype,\n)\n\nif TYPE_CHECKING:\n from pandas._typing import (\n ArrayLike,\n Scalar,\n npt,\n )\n\n\ndef quantile_compat(\n values: ArrayLike, qs: npt.NDArray[np.float64], interpolation: str\n) -> ArrayLike:\n """\n Compute the quantiles of the given values for each quantile in `qs`.\n\n Parameters\n ----------\n values : np.ndarray or ExtensionArray\n qs : np.ndarray[float64]\n interpolation : str\n\n Returns\n -------\n np.ndarray or ExtensionArray\n """\n if isinstance(values, np.ndarray):\n fill_value = na_value_for_dtype(values.dtype, compat=False)\n mask = isna(values)\n return quantile_with_mask(values, mask, fill_value, qs, interpolation)\n else:\n return values._quantile(qs, interpolation)\n\n\ndef quantile_with_mask(\n values: np.ndarray,\n mask: npt.NDArray[np.bool_],\n fill_value,\n qs: npt.NDArray[np.float64],\n interpolation: str,\n) -> np.ndarray:\n """\n Compute the quantiles of the given values for each quantile in `qs`.\n\n Parameters\n ----------\n values : np.ndarray\n For ExtensionArray, this is _values_for_factorize()[0]\n mask : np.ndarray[bool]\n mask = isna(values)\n For ExtensionArray, this is computed before calling _value_for_factorize\n fill_value : Scalar\n The value to interpret fill NA entries with\n For ExtensionArray, this is _values_for_factorize()[1]\n qs : np.ndarray[float64]\n interpolation : str\n Type of interpolation\n\n Returns\n -------\n np.ndarray\n\n Notes\n -----\n Assumes values is already 2D. For ExtensionArray this means np.atleast_2d\n has been called on _values_for_factorize()[0]\n\n Quantile is computed along axis=1.\n """\n assert values.shape == mask.shape\n if values.ndim == 1:\n # unsqueeze, operate, re-squeeze\n values = np.atleast_2d(values)\n mask = np.atleast_2d(mask)\n res_values = quantile_with_mask(values, mask, fill_value, qs, interpolation)\n return res_values[0]\n\n assert values.ndim == 2\n\n is_empty = values.shape[1] == 0\n\n if is_empty:\n # create the array of na_values\n # 2d len(values) * len(qs)\n flat = np.array([fill_value] * len(qs))\n result = np.repeat(flat, len(values)).reshape(len(values), len(qs))\n else:\n result = _nanpercentile(\n values,\n qs * 100.0,\n na_value=fill_value,\n mask=mask,\n interpolation=interpolation,\n )\n\n result = np.asarray(result)\n result = result.T\n\n return result\n\n\ndef _nanpercentile_1d(\n values: np.ndarray,\n mask: npt.NDArray[np.bool_],\n qs: npt.NDArray[np.float64],\n na_value: Scalar,\n interpolation: str,\n) -> Scalar | np.ndarray:\n """\n Wrapper for np.percentile that skips missing values, specialized to\n 1-dimensional case.\n\n Parameters\n ----------\n values : array over which to find quantiles\n mask : ndarray[bool]\n locations in values that should be considered missing\n qs : np.ndarray[float64] of quantile indices to find\n na_value : scalar\n value to return for empty or all-null values\n interpolation : str\n\n Returns\n -------\n quantiles : scalar or array\n """\n # mask is Union[ExtensionArray, ndarray]\n values = values[~mask]\n\n if len(values) == 0:\n # Can't pass dtype=values.dtype here bc we might have na_value=np.nan\n # with values.dtype=int64 see test_quantile_empty\n # equiv: 'np.array([na_value] * len(qs))' but much faster\n return np.full(len(qs), na_value)\n\n return np.percentile(\n values,\n qs,\n # error: No overload variant of "percentile" matches argument\n # types "ndarray[Any, Any]", "ndarray[Any, dtype[floating[_64Bit]]]"\n # , "Dict[str, str]" [call-overload]\n method=interpolation, # type: ignore[call-overload]\n )\n\n\ndef _nanpercentile(\n values: np.ndarray,\n qs: npt.NDArray[np.float64],\n *,\n na_value,\n mask: npt.NDArray[np.bool_],\n interpolation: str,\n):\n """\n Wrapper for np.percentile that skips missing values.\n\n Parameters\n ----------\n values : np.ndarray[ndim=2] over which to find quantiles\n qs : np.ndarray[float64] of quantile indices to find\n na_value : scalar\n value to return for empty or all-null values\n mask : np.ndarray[bool]\n locations in values that should be considered missing\n interpolation : str\n\n Returns\n -------\n quantiles : scalar or array\n """\n\n if values.dtype.kind in "mM":\n # need to cast to integer to avoid rounding errors in numpy\n result = _nanpercentile(\n values.view("i8"),\n qs=qs,\n na_value=na_value.view("i8"),\n mask=mask,\n interpolation=interpolation,\n )\n\n # Note: we have to do `astype` and not view because in general we\n # have float result at this point, not i8\n return result.astype(values.dtype)\n\n if mask.any():\n # Caller is responsible for ensuring mask shape match\n assert mask.shape == values.shape\n result = [\n _nanpercentile_1d(val, m, qs, na_value, interpolation=interpolation)\n for (val, m) in zip(list(values), list(mask))\n ]\n if values.dtype.kind == "f":\n # preserve itemsize\n result = np.asarray(result, dtype=values.dtype).T\n else:\n result = np.asarray(result).T\n if (\n result.dtype != values.dtype\n and not mask.all()\n and (result == result.astype(values.dtype, copy=False)).all()\n ):\n # mask.all() will never get cast back to int\n # e.g. values id integer dtype and result is floating dtype,\n # only cast back to integer dtype if result values are all-integer.\n result = result.astype(values.dtype, copy=False)\n return result\n else:\n return np.percentile(\n values,\n qs,\n axis=1,\n # error: No overload variant of "percentile" matches argument types\n # "ndarray[Any, Any]", "ndarray[Any, dtype[floating[_64Bit]]]",\n # "int", "Dict[str, str]" [call-overload]\n method=interpolation, # type: ignore[call-overload]\n )\n
.venv\Lib\site-packages\pandas\core\array_algos\quantile.py
quantile.py
Python
6,548
0.95
0.097345
0.113402
awesome-app
867
2023-07-14T17:01:43.181013
BSD-3-Clause
false
539402025cccf1696bbd141974bf7ee8
"""\nMethods used by Block.replace and related methods.\n"""\nfrom __future__ import annotations\n\nimport operator\nimport re\nfrom re import Pattern\nfrom typing import (\n TYPE_CHECKING,\n Any,\n)\n\nimport numpy as np\n\nfrom pandas.core.dtypes.common import (\n is_bool,\n is_re,\n is_re_compilable,\n)\nfrom pandas.core.dtypes.missing import isna\n\nif TYPE_CHECKING:\n from pandas._typing import (\n ArrayLike,\n Scalar,\n npt,\n )\n\n\ndef should_use_regex(regex: bool, to_replace: Any) -> bool:\n """\n Decide whether to treat `to_replace` as a regular expression.\n """\n if is_re(to_replace):\n regex = True\n\n regex = regex and is_re_compilable(to_replace)\n\n # Don't use regex if the pattern is empty.\n regex = regex and re.compile(to_replace).pattern != ""\n return regex\n\n\ndef compare_or_regex_search(\n a: ArrayLike, b: Scalar | Pattern, regex: bool, mask: npt.NDArray[np.bool_]\n) -> ArrayLike:\n """\n Compare two array-like inputs of the same shape or two scalar values\n\n Calls operator.eq or re.search, depending on regex argument. If regex is\n True, perform an element-wise regex matching.\n\n Parameters\n ----------\n a : array-like\n b : scalar or regex pattern\n regex : bool\n mask : np.ndarray[bool]\n\n Returns\n -------\n mask : array-like of bool\n """\n if isna(b):\n return ~mask\n\n def _check_comparison_types(\n result: ArrayLike | bool, a: ArrayLike, b: Scalar | Pattern\n ):\n """\n Raises an error if the two arrays (a,b) cannot be compared.\n Otherwise, returns the comparison result as expected.\n """\n if is_bool(result) and isinstance(a, np.ndarray):\n type_names = [type(a).__name__, type(b).__name__]\n\n type_names[0] = f"ndarray(dtype={a.dtype})"\n\n raise TypeError(\n f"Cannot compare types {repr(type_names[0])} and {repr(type_names[1])}"\n )\n\n if not regex or not should_use_regex(regex, b):\n # TODO: should use missing.mask_missing?\n op = lambda x: operator.eq(x, b)\n else:\n op = np.vectorize(\n lambda x: bool(re.search(b, x))\n if isinstance(x, str) and isinstance(b, (str, Pattern))\n else False\n )\n\n # GH#32621 use mask to avoid comparing to NAs\n if isinstance(a, np.ndarray):\n a = a[mask]\n\n result = op(a)\n\n if isinstance(result, np.ndarray) and mask is not None:\n # The shape of the mask can differ to that of the result\n # since we may compare only a subset of a's or b's elements\n tmp = np.zeros(mask.shape, dtype=np.bool_)\n np.place(tmp, mask, result)\n result = tmp\n\n _check_comparison_types(result, a, b)\n return result\n\n\ndef replace_regex(\n values: ArrayLike, rx: re.Pattern, value, mask: npt.NDArray[np.bool_] | None\n) -> None:\n """\n Parameters\n ----------\n values : ArrayLike\n Object dtype.\n rx : re.Pattern\n value : Any\n mask : np.ndarray[bool], optional\n\n Notes\n -----\n Alters values in-place.\n """\n\n # deal with replacing values with objects (strings) that match but\n # whose replacement is not a string (numeric, nan, object)\n if isna(value) or not isinstance(value, str):\n\n def re_replacer(s):\n if is_re(rx) and isinstance(s, str):\n return value if rx.search(s) is not None else s\n else:\n return s\n\n else:\n # value is guaranteed to be a string here, s can be either a string\n # or null if it's null it gets returned\n def re_replacer(s):\n if is_re(rx) and isinstance(s, str):\n return rx.sub(value, s)\n else:\n return s\n\n f = np.vectorize(re_replacer, otypes=[np.object_])\n\n if mask is None:\n values[:] = f(values)\n else:\n if values.ndim != mask.ndim:\n mask = np.broadcast_to(mask, values.shape)\n values[mask] = f(values[mask])\n
.venv\Lib\site-packages\pandas\core\array_algos\replace.py
replace.py
Python
4,010
0.95
0.149351
0.072
python-kit
370
2024-05-15T22:37:39.719234
MIT
false
e4ba9d2c291979b61b40589c4c3082b8
from __future__ import annotations\n\nimport functools\nfrom typing import (\n TYPE_CHECKING,\n cast,\n overload,\n)\n\nimport numpy as np\n\nfrom pandas._libs import (\n algos as libalgos,\n lib,\n)\n\nfrom pandas.core.dtypes.cast import maybe_promote\nfrom pandas.core.dtypes.common import (\n ensure_platform_int,\n is_1d_only_ea_dtype,\n)\nfrom pandas.core.dtypes.missing import na_value_for_dtype\n\nfrom pandas.core.construction import ensure_wrapped_if_datetimelike\n\nif TYPE_CHECKING:\n from pandas._typing import (\n ArrayLike,\n AxisInt,\n npt,\n )\n\n from pandas.core.arrays._mixins import NDArrayBackedExtensionArray\n from pandas.core.arrays.base import ExtensionArray\n\n\n@overload\ndef take_nd(\n arr: np.ndarray,\n indexer,\n axis: AxisInt = ...,\n fill_value=...,\n allow_fill: bool = ...,\n) -> np.ndarray:\n ...\n\n\n@overload\ndef take_nd(\n arr: ExtensionArray,\n indexer,\n axis: AxisInt = ...,\n fill_value=...,\n allow_fill: bool = ...,\n) -> ArrayLike:\n ...\n\n\ndef take_nd(\n arr: ArrayLike,\n indexer,\n axis: AxisInt = 0,\n fill_value=lib.no_default,\n allow_fill: bool = True,\n) -> ArrayLike:\n """\n Specialized Cython take which sets NaN values in one pass\n\n This dispatches to ``take`` defined on ExtensionArrays.\n\n Note: this function assumes that the indexer is a valid(ated) indexer with\n no out of bound indices.\n\n Parameters\n ----------\n arr : np.ndarray or ExtensionArray\n Input array.\n indexer : ndarray\n 1-D array of indices to take, subarrays corresponding to -1 value\n indices are filed with fill_value\n axis : int, default 0\n Axis to take from\n fill_value : any, default np.nan\n Fill value to replace -1 values with\n allow_fill : bool, default True\n If False, indexer is assumed to contain no -1 values so no filling\n will be done. This short-circuits computation of a mask. Result is\n undefined if allow_fill == False and -1 is present in indexer.\n\n Returns\n -------\n subarray : np.ndarray or ExtensionArray\n May be the same type as the input, or cast to an ndarray.\n """\n if fill_value is lib.no_default:\n fill_value = na_value_for_dtype(arr.dtype, compat=False)\n elif lib.is_np_dtype(arr.dtype, "mM"):\n dtype, fill_value = maybe_promote(arr.dtype, fill_value)\n if arr.dtype != dtype:\n # EA.take is strict about returning a new object of the same type\n # so for that case cast upfront\n arr = arr.astype(dtype)\n\n if not isinstance(arr, np.ndarray):\n # i.e. ExtensionArray,\n # includes for EA to catch DatetimeArray, TimedeltaArray\n if not is_1d_only_ea_dtype(arr.dtype):\n # i.e. DatetimeArray, TimedeltaArray\n arr = cast("NDArrayBackedExtensionArray", arr)\n return arr.take(\n indexer, fill_value=fill_value, allow_fill=allow_fill, axis=axis\n )\n\n return arr.take(indexer, fill_value=fill_value, allow_fill=allow_fill)\n\n arr = np.asarray(arr)\n return _take_nd_ndarray(arr, indexer, axis, fill_value, allow_fill)\n\n\ndef _take_nd_ndarray(\n arr: np.ndarray,\n indexer: npt.NDArray[np.intp] | None,\n axis: AxisInt,\n fill_value,\n allow_fill: bool,\n) -> np.ndarray:\n if indexer is None:\n indexer = np.arange(arr.shape[axis], dtype=np.intp)\n dtype, fill_value = arr.dtype, arr.dtype.type()\n else:\n indexer = ensure_platform_int(indexer)\n\n dtype, fill_value, mask_info = _take_preprocess_indexer_and_fill_value(\n arr, indexer, fill_value, allow_fill\n )\n\n flip_order = False\n if arr.ndim == 2 and arr.flags.f_contiguous:\n flip_order = True\n\n if flip_order:\n arr = arr.T\n axis = arr.ndim - axis - 1\n\n # at this point, it's guaranteed that dtype can hold both the arr values\n # and the fill_value\n out_shape_ = list(arr.shape)\n out_shape_[axis] = len(indexer)\n out_shape = tuple(out_shape_)\n if arr.flags.f_contiguous and axis == arr.ndim - 1:\n # minor tweak that can make an order-of-magnitude difference\n # for dataframes initialized directly from 2-d ndarrays\n # (s.t. df.values is c-contiguous and df._mgr.blocks[0] is its\n # f-contiguous transpose)\n out = np.empty(out_shape, dtype=dtype, order="F")\n else:\n out = np.empty(out_shape, dtype=dtype)\n\n func = _get_take_nd_function(\n arr.ndim, arr.dtype, out.dtype, axis=axis, mask_info=mask_info\n )\n func(arr, indexer, out, fill_value)\n\n if flip_order:\n out = out.T\n return out\n\n\ndef take_1d(\n arr: ArrayLike,\n indexer: npt.NDArray[np.intp],\n fill_value=None,\n allow_fill: bool = True,\n mask: npt.NDArray[np.bool_] | None = None,\n) -> ArrayLike:\n """\n Specialized version for 1D arrays. Differences compared to `take_nd`:\n\n - Assumes input array has already been converted to numpy array / EA\n - Assumes indexer is already guaranteed to be intp dtype ndarray\n - Only works for 1D arrays\n\n To ensure the lowest possible overhead.\n\n Note: similarly to `take_nd`, this function assumes that the indexer is\n a valid(ated) indexer with no out of bound indices.\n\n Parameters\n ----------\n arr : np.ndarray or ExtensionArray\n Input array.\n indexer : ndarray\n 1-D array of indices to take (validated indices, intp dtype).\n fill_value : any, default np.nan\n Fill value to replace -1 values with\n allow_fill : bool, default True\n If False, indexer is assumed to contain no -1 values so no filling\n will be done. This short-circuits computation of a mask. Result is\n undefined if allow_fill == False and -1 is present in indexer.\n mask : np.ndarray, optional, default None\n If `allow_fill` is True, and the mask (where indexer == -1) is already\n known, it can be passed to avoid recomputation.\n """\n if not isinstance(arr, np.ndarray):\n # ExtensionArray -> dispatch to their method\n return arr.take(indexer, fill_value=fill_value, allow_fill=allow_fill)\n\n if not allow_fill:\n return arr.take(indexer)\n\n dtype, fill_value, mask_info = _take_preprocess_indexer_and_fill_value(\n arr, indexer, fill_value, True, mask\n )\n\n # at this point, it's guaranteed that dtype can hold both the arr values\n # and the fill_value\n out = np.empty(indexer.shape, dtype=dtype)\n\n func = _get_take_nd_function(\n arr.ndim, arr.dtype, out.dtype, axis=0, mask_info=mask_info\n )\n func(arr, indexer, out, fill_value)\n\n return out\n\n\ndef take_2d_multi(\n arr: np.ndarray,\n indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]],\n fill_value=np.nan,\n) -> np.ndarray:\n """\n Specialized Cython take which sets NaN values in one pass.\n """\n # This is only called from one place in DataFrame._reindex_multi,\n # so we know indexer is well-behaved.\n assert indexer is not None\n assert indexer[0] is not None\n assert indexer[1] is not None\n\n row_idx, col_idx = indexer\n\n row_idx = ensure_platform_int(row_idx)\n col_idx = ensure_platform_int(col_idx)\n indexer = row_idx, col_idx\n mask_info = None\n\n # check for promotion based on types only (do this first because\n # it's faster than computing a mask)\n dtype, fill_value = maybe_promote(arr.dtype, fill_value)\n if dtype != arr.dtype:\n # check if promotion is actually required based on indexer\n row_mask = row_idx == -1\n col_mask = col_idx == -1\n row_needs = row_mask.any()\n col_needs = col_mask.any()\n mask_info = (row_mask, col_mask), (row_needs, col_needs)\n\n if not (row_needs or col_needs):\n # if not, then depromote, set fill_value to dummy\n # (it won't be used but we don't want the cython code\n # to crash when trying to cast it to dtype)\n dtype, fill_value = arr.dtype, arr.dtype.type()\n\n # at this point, it's guaranteed that dtype can hold both the arr values\n # and the fill_value\n out_shape = len(row_idx), len(col_idx)\n out = np.empty(out_shape, dtype=dtype)\n\n func = _take_2d_multi_dict.get((arr.dtype.name, out.dtype.name), None)\n if func is None and arr.dtype != out.dtype:\n func = _take_2d_multi_dict.get((out.dtype.name, out.dtype.name), None)\n if func is not None:\n func = _convert_wrapper(func, out.dtype)\n\n if func is not None:\n func(arr, indexer, out=out, fill_value=fill_value)\n else:\n # test_reindex_multi\n _take_2d_multi_object(\n arr, indexer, out, fill_value=fill_value, mask_info=mask_info\n )\n\n return out\n\n\n@functools.lru_cache\ndef _get_take_nd_function_cached(\n ndim: int, arr_dtype: np.dtype, out_dtype: np.dtype, axis: AxisInt\n):\n """\n Part of _get_take_nd_function below that doesn't need `mask_info` and thus\n can be cached (mask_info potentially contains a numpy ndarray which is not\n hashable and thus cannot be used as argument for cached function).\n """\n tup = (arr_dtype.name, out_dtype.name)\n if ndim == 1:\n func = _take_1d_dict.get(tup, None)\n elif ndim == 2:\n if axis == 0:\n func = _take_2d_axis0_dict.get(tup, None)\n else:\n func = _take_2d_axis1_dict.get(tup, None)\n if func is not None:\n return func\n\n # We get here with string, uint, float16, and complex dtypes that could\n # potentially be handled in algos_take_helper.\n # Also a couple with (M8[ns], object) and (m8[ns], object)\n tup = (out_dtype.name, out_dtype.name)\n if ndim == 1:\n func = _take_1d_dict.get(tup, None)\n elif ndim == 2:\n if axis == 0:\n func = _take_2d_axis0_dict.get(tup, None)\n else:\n func = _take_2d_axis1_dict.get(tup, None)\n if func is not None:\n func = _convert_wrapper(func, out_dtype)\n return func\n\n return None\n\n\ndef _get_take_nd_function(\n ndim: int,\n arr_dtype: np.dtype,\n out_dtype: np.dtype,\n axis: AxisInt = 0,\n mask_info=None,\n):\n """\n Get the appropriate "take" implementation for the given dimension, axis\n and dtypes.\n """\n func = None\n if ndim <= 2:\n # for this part we don't need `mask_info` -> use the cached algo lookup\n func = _get_take_nd_function_cached(ndim, arr_dtype, out_dtype, axis)\n\n if func is None:\n\n def func(arr, indexer, out, fill_value=np.nan) -> None:\n indexer = ensure_platform_int(indexer)\n _take_nd_object(\n arr, indexer, out, axis=axis, fill_value=fill_value, mask_info=mask_info\n )\n\n return func\n\n\ndef _view_wrapper(f, arr_dtype=None, out_dtype=None, fill_wrap=None):\n def wrapper(\n arr: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=np.nan\n ) -> None:\n if arr_dtype is not None:\n arr = arr.view(arr_dtype)\n if out_dtype is not None:\n out = out.view(out_dtype)\n if fill_wrap is not None:\n # FIXME: if we get here with dt64/td64 we need to be sure we have\n # matching resos\n if fill_value.dtype.kind == "m":\n fill_value = fill_value.astype("m8[ns]")\n else:\n fill_value = fill_value.astype("M8[ns]")\n fill_value = fill_wrap(fill_value)\n\n f(arr, indexer, out, fill_value=fill_value)\n\n return wrapper\n\n\ndef _convert_wrapper(f, conv_dtype):\n def wrapper(\n arr: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=np.nan\n ) -> None:\n if conv_dtype == object:\n # GH#39755 avoid casting dt64/td64 to integers\n arr = ensure_wrapped_if_datetimelike(arr)\n arr = arr.astype(conv_dtype)\n f(arr, indexer, out, fill_value=fill_value)\n\n return wrapper\n\n\n_take_1d_dict = {\n ("int8", "int8"): libalgos.take_1d_int8_int8,\n ("int8", "int32"): libalgos.take_1d_int8_int32,\n ("int8", "int64"): libalgos.take_1d_int8_int64,\n ("int8", "float64"): libalgos.take_1d_int8_float64,\n ("int16", "int16"): libalgos.take_1d_int16_int16,\n ("int16", "int32"): libalgos.take_1d_int16_int32,\n ("int16", "int64"): libalgos.take_1d_int16_int64,\n ("int16", "float64"): libalgos.take_1d_int16_float64,\n ("int32", "int32"): libalgos.take_1d_int32_int32,\n ("int32", "int64"): libalgos.take_1d_int32_int64,\n ("int32", "float64"): libalgos.take_1d_int32_float64,\n ("int64", "int64"): libalgos.take_1d_int64_int64,\n ("int64", "float64"): libalgos.take_1d_int64_float64,\n ("float32", "float32"): libalgos.take_1d_float32_float32,\n ("float32", "float64"): libalgos.take_1d_float32_float64,\n ("float64", "float64"): libalgos.take_1d_float64_float64,\n ("object", "object"): libalgos.take_1d_object_object,\n ("bool", "bool"): _view_wrapper(libalgos.take_1d_bool_bool, np.uint8, np.uint8),\n ("bool", "object"): _view_wrapper(libalgos.take_1d_bool_object, np.uint8, None),\n ("datetime64[ns]", "datetime64[ns]"): _view_wrapper(\n libalgos.take_1d_int64_int64, np.int64, np.int64, np.int64\n ),\n ("timedelta64[ns]", "timedelta64[ns]"): _view_wrapper(\n libalgos.take_1d_int64_int64, np.int64, np.int64, np.int64\n ),\n}\n\n_take_2d_axis0_dict = {\n ("int8", "int8"): libalgos.take_2d_axis0_int8_int8,\n ("int8", "int32"): libalgos.take_2d_axis0_int8_int32,\n ("int8", "int64"): libalgos.take_2d_axis0_int8_int64,\n ("int8", "float64"): libalgos.take_2d_axis0_int8_float64,\n ("int16", "int16"): libalgos.take_2d_axis0_int16_int16,\n ("int16", "int32"): libalgos.take_2d_axis0_int16_int32,\n ("int16", "int64"): libalgos.take_2d_axis0_int16_int64,\n ("int16", "float64"): libalgos.take_2d_axis0_int16_float64,\n ("int32", "int32"): libalgos.take_2d_axis0_int32_int32,\n ("int32", "int64"): libalgos.take_2d_axis0_int32_int64,\n ("int32", "float64"): libalgos.take_2d_axis0_int32_float64,\n ("int64", "int64"): libalgos.take_2d_axis0_int64_int64,\n ("int64", "float64"): libalgos.take_2d_axis0_int64_float64,\n ("float32", "float32"): libalgos.take_2d_axis0_float32_float32,\n ("float32", "float64"): libalgos.take_2d_axis0_float32_float64,\n ("float64", "float64"): libalgos.take_2d_axis0_float64_float64,\n ("object", "object"): libalgos.take_2d_axis0_object_object,\n ("bool", "bool"): _view_wrapper(\n libalgos.take_2d_axis0_bool_bool, np.uint8, np.uint8\n ),\n ("bool", "object"): _view_wrapper(\n libalgos.take_2d_axis0_bool_object, np.uint8, None\n ),\n ("datetime64[ns]", "datetime64[ns]"): _view_wrapper(\n libalgos.take_2d_axis0_int64_int64, np.int64, np.int64, fill_wrap=np.int64\n ),\n ("timedelta64[ns]", "timedelta64[ns]"): _view_wrapper(\n libalgos.take_2d_axis0_int64_int64, np.int64, np.int64, fill_wrap=np.int64\n ),\n}\n\n_take_2d_axis1_dict = {\n ("int8", "int8"): libalgos.take_2d_axis1_int8_int8,\n ("int8", "int32"): libalgos.take_2d_axis1_int8_int32,\n ("int8", "int64"): libalgos.take_2d_axis1_int8_int64,\n ("int8", "float64"): libalgos.take_2d_axis1_int8_float64,\n ("int16", "int16"): libalgos.take_2d_axis1_int16_int16,\n ("int16", "int32"): libalgos.take_2d_axis1_int16_int32,\n ("int16", "int64"): libalgos.take_2d_axis1_int16_int64,\n ("int16", "float64"): libalgos.take_2d_axis1_int16_float64,\n ("int32", "int32"): libalgos.take_2d_axis1_int32_int32,\n ("int32", "int64"): libalgos.take_2d_axis1_int32_int64,\n ("int32", "float64"): libalgos.take_2d_axis1_int32_float64,\n ("int64", "int64"): libalgos.take_2d_axis1_int64_int64,\n ("int64", "float64"): libalgos.take_2d_axis1_int64_float64,\n ("float32", "float32"): libalgos.take_2d_axis1_float32_float32,\n ("float32", "float64"): libalgos.take_2d_axis1_float32_float64,\n ("float64", "float64"): libalgos.take_2d_axis1_float64_float64,\n ("object", "object"): libalgos.take_2d_axis1_object_object,\n ("bool", "bool"): _view_wrapper(\n libalgos.take_2d_axis1_bool_bool, np.uint8, np.uint8\n ),\n ("bool", "object"): _view_wrapper(\n libalgos.take_2d_axis1_bool_object, np.uint8, None\n ),\n ("datetime64[ns]", "datetime64[ns]"): _view_wrapper(\n libalgos.take_2d_axis1_int64_int64, np.int64, np.int64, fill_wrap=np.int64\n ),\n ("timedelta64[ns]", "timedelta64[ns]"): _view_wrapper(\n libalgos.take_2d_axis1_int64_int64, np.int64, np.int64, fill_wrap=np.int64\n ),\n}\n\n_take_2d_multi_dict = {\n ("int8", "int8"): libalgos.take_2d_multi_int8_int8,\n ("int8", "int32"): libalgos.take_2d_multi_int8_int32,\n ("int8", "int64"): libalgos.take_2d_multi_int8_int64,\n ("int8", "float64"): libalgos.take_2d_multi_int8_float64,\n ("int16", "int16"): libalgos.take_2d_multi_int16_int16,\n ("int16", "int32"): libalgos.take_2d_multi_int16_int32,\n ("int16", "int64"): libalgos.take_2d_multi_int16_int64,\n ("int16", "float64"): libalgos.take_2d_multi_int16_float64,\n ("int32", "int32"): libalgos.take_2d_multi_int32_int32,\n ("int32", "int64"): libalgos.take_2d_multi_int32_int64,\n ("int32", "float64"): libalgos.take_2d_multi_int32_float64,\n ("int64", "int64"): libalgos.take_2d_multi_int64_int64,\n ("int64", "float64"): libalgos.take_2d_multi_int64_float64,\n ("float32", "float32"): libalgos.take_2d_multi_float32_float32,\n ("float32", "float64"): libalgos.take_2d_multi_float32_float64,\n ("float64", "float64"): libalgos.take_2d_multi_float64_float64,\n ("object", "object"): libalgos.take_2d_multi_object_object,\n ("bool", "bool"): _view_wrapper(\n libalgos.take_2d_multi_bool_bool, np.uint8, np.uint8\n ),\n ("bool", "object"): _view_wrapper(\n libalgos.take_2d_multi_bool_object, np.uint8, None\n ),\n ("datetime64[ns]", "datetime64[ns]"): _view_wrapper(\n libalgos.take_2d_multi_int64_int64, np.int64, np.int64, fill_wrap=np.int64\n ),\n ("timedelta64[ns]", "timedelta64[ns]"): _view_wrapper(\n libalgos.take_2d_multi_int64_int64, np.int64, np.int64, fill_wrap=np.int64\n ),\n}\n\n\ndef _take_nd_object(\n arr: np.ndarray,\n indexer: npt.NDArray[np.intp],\n out: np.ndarray,\n axis: AxisInt,\n fill_value,\n mask_info,\n) -> None:\n if mask_info is not None:\n mask, needs_masking = mask_info\n else:\n mask = indexer == -1\n needs_masking = mask.any()\n if arr.dtype != out.dtype:\n arr = arr.astype(out.dtype)\n if arr.shape[axis] > 0:\n arr.take(indexer, axis=axis, out=out)\n if needs_masking:\n outindexer = [slice(None)] * arr.ndim\n outindexer[axis] = mask\n out[tuple(outindexer)] = fill_value\n\n\ndef _take_2d_multi_object(\n arr: np.ndarray,\n indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]],\n out: np.ndarray,\n fill_value,\n mask_info,\n) -> None:\n # this is not ideal, performance-wise, but it's better than raising\n # an exception (best to optimize in Cython to avoid getting here)\n row_idx, col_idx = indexer # both np.intp\n if mask_info is not None:\n (row_mask, col_mask), (row_needs, col_needs) = mask_info\n else:\n row_mask = row_idx == -1\n col_mask = col_idx == -1\n row_needs = row_mask.any()\n col_needs = col_mask.any()\n if fill_value is not None:\n if row_needs:\n out[row_mask, :] = fill_value\n if col_needs:\n out[:, col_mask] = fill_value\n for i, u_ in enumerate(row_idx):\n if u_ != -1:\n for j, v in enumerate(col_idx):\n if v != -1:\n out[i, j] = arr[u_, v]\n\n\ndef _take_preprocess_indexer_and_fill_value(\n arr: np.ndarray,\n indexer: npt.NDArray[np.intp],\n fill_value,\n allow_fill: bool,\n mask: npt.NDArray[np.bool_] | None = None,\n):\n mask_info: tuple[np.ndarray | None, bool] | None = None\n\n if not allow_fill:\n dtype, fill_value = arr.dtype, arr.dtype.type()\n mask_info = None, False\n else:\n # check for promotion based on types only (do this first because\n # it's faster than computing a mask)\n dtype, fill_value = maybe_promote(arr.dtype, fill_value)\n if dtype != arr.dtype:\n # check if promotion is actually required based on indexer\n if mask is not None:\n needs_masking = True\n else:\n mask = indexer == -1\n needs_masking = bool(mask.any())\n mask_info = mask, needs_masking\n if not needs_masking:\n # if not, then depromote, set fill_value to dummy\n # (it won't be used but we don't want the cython code\n # to crash when trying to cast it to dtype)\n dtype, fill_value = arr.dtype, arr.dtype.type()\n\n return dtype, fill_value, mask_info\n
.venv\Lib\site-packages\pandas\core\array_algos\take.py
take.py
Python
20,815
0.95
0.139731
0.077519
awesome-app
23
2024-08-11T10:48:39.330429
GPL-3.0
false
fd7bc5a8f09647d63aa76a6b99036248
"""\ntransforms.py is for shape-preserving functions.\n"""\n\nfrom __future__ import annotations\n\nfrom typing import TYPE_CHECKING\n\nimport numpy as np\n\nif TYPE_CHECKING:\n from pandas._typing import (\n AxisInt,\n Scalar,\n )\n\n\ndef shift(\n values: np.ndarray, periods: int, axis: AxisInt, fill_value: Scalar\n) -> np.ndarray:\n new_values = values\n\n if periods == 0 or values.size == 0:\n return new_values.copy()\n\n # make sure array sent to np.roll is c_contiguous\n f_ordered = values.flags.f_contiguous\n if f_ordered:\n new_values = new_values.T\n axis = new_values.ndim - axis - 1\n\n if new_values.size:\n new_values = np.roll(\n new_values,\n np.intp(periods),\n axis=axis,\n )\n\n axis_indexer = [slice(None)] * values.ndim\n if periods > 0:\n axis_indexer[axis] = slice(None, periods)\n else:\n axis_indexer[axis] = slice(periods, None)\n new_values[tuple(axis_indexer)] = fill_value\n\n # restore original order\n if f_ordered:\n new_values = new_values.T\n\n return new_values\n
.venv\Lib\site-packages\pandas\core\array_algos\transforms.py
transforms.py
Python
1,104
0.95
0.16
0.052632
vue-tools
601
2024-11-17T21:14:16.448283
BSD-3-Clause
false
2df0012c50401be9e5390d5389834924
"""\ncore.array_algos is for algorithms that operate on ndarray and ExtensionArray.\nThese should:\n\n- Assume that any Index, Series, or DataFrame objects have already been unwrapped.\n- Assume that any list arguments have already been cast to ndarray/EA.\n- Not depend on Index, Series, or DataFrame, nor import any of these.\n- May dispatch to ExtensionArray methods, but should not import from core.arrays.\n"""\n
.venv\Lib\site-packages\pandas\core\array_algos\__init__.py
__init__.py
Python
408
0.85
0.111111
0
vue-tools
193
2025-04-21T08:20:37.142056
BSD-3-Clause
false
9ef9876c3914c28ee8952800064dc0c8
\n\n
.venv\Lib\site-packages\pandas\core\array_algos\__pycache__\datetimelike_accumulations.cpython-313.pyc
datetimelike_accumulations.cpython-313.pyc
Other
2,953
0.8
0.090909
0
awesome-app
721
2023-09-29T08:47:31.183605
GPL-3.0
false
10fa38bce79962abfb20701b57c79daa
\n\n
.venv\Lib\site-packages\pandas\core\array_algos\__pycache__\masked_accumulations.cpython-313.pyc
masked_accumulations.cpython-313.pyc
Other
3,621
0.8
0.092593
0
vue-tools
388
2024-09-01T23:54:41.399773
MIT
false
dadf80117da63a2dd83a70305c14a89b
\n\n
.venv\Lib\site-packages\pandas\core\array_algos\__pycache__\masked_reductions.cpython-313.pyc
masked_reductions.cpython-313.pyc
Other
6,245
0.95
0.030303
0.024
react-lib
920
2024-01-17T09:40:54.649427
MIT
false
5e59e16c1d86f496e090908d33fe0db9
\n\n
.venv\Lib\site-packages\pandas\core\array_algos\__pycache__\putmask.cpython-313.pyc
putmask.cpython-313.pyc
Other
4,844
0.95
0.027778
0
python-kit
282
2025-01-29T12:42:23.813996
GPL-3.0
false
58e85051fcc7681a4804aa83e57eee9e
\n\n
.venv\Lib\site-packages\pandas\core\array_algos\__pycache__\quantile.cpython-313.pyc
quantile.cpython-313.pyc
Other
6,434
0.8
0.043478
0
node-utils
571
2025-05-13T00:04:20.353776
MIT
false
dac420082ad9eaf4665eed92fda4f2e7
\n\n
.venv\Lib\site-packages\pandas\core\array_algos\__pycache__\replace.cpython-313.pyc
replace.cpython-313.pyc
Other
5,642
0.8
0.013514
0
node-utils
831
2025-05-24T00:45:30.953413
Apache-2.0
false
38fe46608cd80947116afca8d42ee097
\n\n
.venv\Lib\site-packages\pandas\core\array_algos\__pycache__\take.cpython-313.pyc
take.cpython-313.pyc
Other
22,311
0.95
0.045
0.02139
react-lib
409
2025-02-12T00:56:55.811745
Apache-2.0
false
457552f1262390063d8fe2fe2f9233d2
\n\n
.venv\Lib\site-packages\pandas\core\array_algos\__pycache__\transforms.cpython-313.pyc
transforms.cpython-313.pyc
Other
1,576
0.95
0.052632
0
react-lib
77
2024-07-01T17:56:19.558884
GPL-3.0
false
10f8e0149345fce71e7fc4056ed09151
\n\n
.venv\Lib\site-packages\pandas\core\array_algos\__pycache__\__init__.cpython-313.pyc
__init__.cpython-313.pyc
Other
619
0.85
0.090909
0
vue-tools
920
2024-03-30T15:08:26.341031
MIT
false
75963c815a4b9474e554ca234b8ac4d3
"""\nCore eval alignment algorithms.\n"""\nfrom __future__ import annotations\n\nfrom functools import (\n partial,\n wraps,\n)\nfrom typing import (\n TYPE_CHECKING,\n Callable,\n)\nimport warnings\n\nimport numpy as np\n\nfrom pandas.errors import PerformanceWarning\nfrom pandas.util._exceptions import find_stack_level\n\nfrom pandas.core.dtypes.generic import (\n ABCDataFrame,\n ABCSeries,\n)\n\nfrom pandas.core.base import PandasObject\nimport pandas.core.common as com\nfrom pandas.core.computation.common import result_type_many\n\nif TYPE_CHECKING:\n from collections.abc import Sequence\n\n from pandas._typing import F\n\n from pandas.core.generic import NDFrame\n from pandas.core.indexes.api import Index\n\n\ndef _align_core_single_unary_op(\n term,\n) -> tuple[partial | type[NDFrame], dict[str, Index] | None]:\n typ: partial | type[NDFrame]\n axes: dict[str, Index] | None = None\n\n if isinstance(term.value, np.ndarray):\n typ = partial(np.asanyarray, dtype=term.value.dtype)\n else:\n typ = type(term.value)\n if hasattr(term.value, "axes"):\n axes = _zip_axes_from_type(typ, term.value.axes)\n\n return typ, axes\n\n\ndef _zip_axes_from_type(\n typ: type[NDFrame], new_axes: Sequence[Index]\n) -> dict[str, Index]:\n return {name: new_axes[i] for i, name in enumerate(typ._AXIS_ORDERS)}\n\n\ndef _any_pandas_objects(terms) -> bool:\n """\n Check a sequence of terms for instances of PandasObject.\n """\n return any(isinstance(term.value, PandasObject) for term in terms)\n\n\ndef _filter_special_cases(f) -> Callable[[F], F]:\n @wraps(f)\n def wrapper(terms):\n # single unary operand\n if len(terms) == 1:\n return _align_core_single_unary_op(terms[0])\n\n term_values = (term.value for term in terms)\n\n # we don't have any pandas objects\n if not _any_pandas_objects(terms):\n return result_type_many(*term_values), None\n\n return f(terms)\n\n return wrapper\n\n\n@_filter_special_cases\ndef _align_core(terms):\n term_index = [i for i, term in enumerate(terms) if hasattr(term.value, "axes")]\n term_dims = [terms[i].value.ndim for i in term_index]\n\n from pandas import Series\n\n ndims = Series(dict(zip(term_index, term_dims)))\n\n # initial axes are the axes of the largest-axis'd term\n biggest = terms[ndims.idxmax()].value\n typ = biggest._constructor\n axes = biggest.axes\n naxes = len(axes)\n gt_than_one_axis = naxes > 1\n\n for value in (terms[i].value for i in term_index):\n is_series = isinstance(value, ABCSeries)\n is_series_and_gt_one_axis = is_series and gt_than_one_axis\n\n for axis, items in enumerate(value.axes):\n if is_series_and_gt_one_axis:\n ax, itm = naxes - 1, value.index\n else:\n ax, itm = axis, items\n\n if not axes[ax].is_(itm):\n axes[ax] = axes[ax].union(itm)\n\n for i, ndim in ndims.items():\n for axis, items in zip(range(ndim), axes):\n ti = terms[i].value\n\n if hasattr(ti, "reindex"):\n transpose = isinstance(ti, ABCSeries) and naxes > 1\n reindexer = axes[naxes - 1] if transpose else items\n\n term_axis_size = len(ti.axes[axis])\n reindexer_size = len(reindexer)\n\n ordm = np.log10(max(1, abs(reindexer_size - term_axis_size)))\n if ordm >= 1 and reindexer_size >= 10000:\n w = (\n f"Alignment difference on axis {axis} is larger "\n f"than an order of magnitude on term {repr(terms[i].name)}, "\n f"by more than {ordm:.4g}; performance may suffer."\n )\n warnings.warn(\n w, category=PerformanceWarning, stacklevel=find_stack_level()\n )\n\n obj = ti.reindex(reindexer, axis=axis, copy=False)\n terms[i].update(obj)\n\n terms[i].update(terms[i].value.values)\n\n return typ, _zip_axes_from_type(typ, axes)\n\n\ndef align_terms(terms):\n """\n Align a set of terms.\n """\n try:\n # flatten the parse tree (a nested list, really)\n terms = list(com.flatten(terms))\n except TypeError:\n # can't iterate so it must just be a constant or single variable\n if isinstance(terms.value, (ABCSeries, ABCDataFrame)):\n typ = type(terms.value)\n return typ, _zip_axes_from_type(typ, terms.value.axes)\n return np.result_type(terms.type), None\n\n # if all resolved variables are numeric scalars\n if all(term.is_scalar for term in terms):\n return result_type_many(*(term.value for term in terms)).type, None\n\n # perform the main alignment\n typ, axes = _align_core(terms)\n return typ, axes\n\n\ndef reconstruct_object(typ, obj, axes, dtype):\n """\n Reconstruct an object given its type, raw value, and possibly empty\n (None) axes.\n\n Parameters\n ----------\n typ : object\n A type\n obj : object\n The value to use in the type constructor\n axes : dict\n The axes to use to construct the resulting pandas object\n\n Returns\n -------\n ret : typ\n An object of type ``typ`` with the value `obj` and possible axes\n `axes`.\n """\n try:\n typ = typ.type\n except AttributeError:\n pass\n\n res_t = np.result_type(obj.dtype, dtype)\n\n if not isinstance(typ, partial) and issubclass(typ, PandasObject):\n return typ(obj, dtype=res_t, **axes)\n\n # special case for pathological things like ~True/~False\n if hasattr(res_t, "type") and typ == np.bool_ and res_t != np.bool_:\n ret_value = res_t.type(obj)\n else:\n ret_value = typ(obj).astype(res_t)\n # The condition is to distinguish 0-dim array (returned in case of\n # scalar) and 1 element array\n # e.g. np.array(0) and np.array([0])\n if (\n len(obj.shape) == 1\n and len(obj) == 1\n and not isinstance(ret_value, np.ndarray)\n ):\n ret_value = np.array([ret_value]).astype(res_t)\n\n return ret_value\n
.venv\Lib\site-packages\pandas\core\computation\align.py
align.py
Python
6,161
0.95
0.192488
0.067073
vue-tools
701
2024-05-14T11:45:16.555171
Apache-2.0
false
8e490b71daa72b369c6b7b8cf8fcee87
__all__ = ["eval"]\nfrom pandas.core.computation.eval import eval\n
.venv\Lib\site-packages\pandas\core\computation\api.py
api.py
Python
65
0.65
0
0
awesome-app
221
2025-04-16T14:26:46.492229
MIT
false
271437ff1412a12f279cce6bf4c0a962
from __future__ import annotations\n\nfrom pandas.compat._optional import import_optional_dependency\n\nne = import_optional_dependency("numexpr", errors="warn")\nNUMEXPR_INSTALLED = ne is not None\n\n__all__ = ["NUMEXPR_INSTALLED"]\n
.venv\Lib\site-packages\pandas\core\computation\check.py
check.py
Python
226
0.85
0
0
react-lib
927
2024-07-06T19:10:02.638826
MIT
false
185df330e25b8494268214bcf50cf759
from __future__ import annotations\n\nfrom functools import reduce\n\nimport numpy as np\n\nfrom pandas._config import get_option\n\n\ndef ensure_decoded(s) -> str:\n """\n If we have bytes, decode them to unicode.\n """\n if isinstance(s, (np.bytes_, bytes)):\n s = s.decode(get_option("display.encoding"))\n return s\n\n\ndef result_type_many(*arrays_and_dtypes):\n """\n Wrapper around numpy.result_type which overcomes the NPY_MAXARGS (32)\n argument limit.\n """\n try:\n return np.result_type(*arrays_and_dtypes)\n except ValueError:\n # we have > NPY_MAXARGS terms in our expression\n return reduce(np.result_type, arrays_and_dtypes)\n except TypeError:\n from pandas.core.dtypes.cast import find_common_type\n from pandas.core.dtypes.common import is_extension_array_dtype\n\n arr_and_dtypes = list(arrays_and_dtypes)\n ea_dtypes, non_ea_dtypes = [], []\n for arr_or_dtype in arr_and_dtypes:\n if is_extension_array_dtype(arr_or_dtype):\n ea_dtypes.append(arr_or_dtype)\n else:\n non_ea_dtypes.append(arr_or_dtype)\n\n if non_ea_dtypes:\n try:\n np_dtype = np.result_type(*non_ea_dtypes)\n except ValueError:\n np_dtype = reduce(np.result_type, arrays_and_dtypes)\n return find_common_type(ea_dtypes + [np_dtype])\n\n return find_common_type(ea_dtypes)\n
.venv\Lib\site-packages\pandas\core\computation\common.py
common.py
Python
1,442
0.95
0.166667
0.026316
node-utils
138
2024-10-11T16:06:42.731639
Apache-2.0
false
3dc97f57202c54c8c71c206d7e377594
"""\nEngine classes for :func:`~pandas.eval`\n"""\nfrom __future__ import annotations\n\nimport abc\nfrom typing import TYPE_CHECKING\n\nfrom pandas.errors import NumExprClobberingError\n\nfrom pandas.core.computation.align import (\n align_terms,\n reconstruct_object,\n)\nfrom pandas.core.computation.ops import (\n MATHOPS,\n REDUCTIONS,\n)\n\nfrom pandas.io.formats import printing\n\nif TYPE_CHECKING:\n from pandas.core.computation.expr import Expr\n\n_ne_builtins = frozenset(MATHOPS + REDUCTIONS)\n\n\ndef _check_ne_builtin_clash(expr: Expr) -> None:\n """\n Attempt to prevent foot-shooting in a helpful way.\n\n Parameters\n ----------\n expr : Expr\n Terms can contain\n """\n names = expr.names\n overlap = names & _ne_builtins\n\n if overlap:\n s = ", ".join([repr(x) for x in overlap])\n raise NumExprClobberingError(\n f'Variables in expression "{expr}" overlap with builtins: ({s})'\n )\n\n\nclass AbstractEngine(metaclass=abc.ABCMeta):\n """Object serving as a base class for all engines."""\n\n has_neg_frac = False\n\n def __init__(self, expr) -> None:\n self.expr = expr\n self.aligned_axes = None\n self.result_type = None\n\n def convert(self) -> str:\n """\n Convert an expression for evaluation.\n\n Defaults to return the expression as a string.\n """\n return printing.pprint_thing(self.expr)\n\n def evaluate(self) -> object:\n """\n Run the engine on the expression.\n\n This method performs alignment which is necessary no matter what engine\n is being used, thus its implementation is in the base class.\n\n Returns\n -------\n object\n The result of the passed expression.\n """\n if not self._is_aligned:\n self.result_type, self.aligned_axes = align_terms(self.expr.terms)\n\n # make sure no names in resolvers and locals/globals clash\n res = self._evaluate()\n return reconstruct_object(\n self.result_type, res, self.aligned_axes, self.expr.terms.return_type\n )\n\n @property\n def _is_aligned(self) -> bool:\n return self.aligned_axes is not None and self.result_type is not None\n\n @abc.abstractmethod\n def _evaluate(self):\n """\n Return an evaluated expression.\n\n Parameters\n ----------\n env : Scope\n The local and global environment in which to evaluate an\n expression.\n\n Notes\n -----\n Must be implemented by subclasses.\n """\n\n\nclass NumExprEngine(AbstractEngine):\n """NumExpr engine class"""\n\n has_neg_frac = True\n\n def _evaluate(self):\n import numexpr as ne\n\n # convert the expression to a valid numexpr expression\n s = self.convert()\n\n env = self.expr.env\n scope = env.full_scope\n _check_ne_builtin_clash(self.expr)\n return ne.evaluate(s, local_dict=scope)\n\n\nclass PythonEngine(AbstractEngine):\n """\n Evaluate an expression in Python space.\n\n Mostly for testing purposes.\n """\n\n has_neg_frac = False\n\n def evaluate(self):\n return self.expr()\n\n def _evaluate(self) -> None:\n pass\n\n\nENGINES: dict[str, type[AbstractEngine]] = {\n "numexpr": NumExprEngine,\n "python": PythonEngine,\n}\n
.venv\Lib\site-packages\pandas\core\computation\engines.py
engines.py
Python
3,314
0.95
0.160839
0.019048
react-lib
608
2024-06-19T09:28:02.442748
BSD-3-Clause
false
75e3d7662f251e16641af39fed9c0ac6
"""\nTop level ``eval`` module.\n"""\nfrom __future__ import annotations\n\nimport tokenize\nfrom typing import TYPE_CHECKING\nimport warnings\n\nfrom pandas.util._exceptions import find_stack_level\nfrom pandas.util._validators import validate_bool_kwarg\n\nfrom pandas.core.dtypes.common import (\n is_extension_array_dtype,\n is_string_dtype,\n)\n\nfrom pandas.core.computation.engines import ENGINES\nfrom pandas.core.computation.expr import (\n PARSERS,\n Expr,\n)\nfrom pandas.core.computation.parsing import tokenize_string\nfrom pandas.core.computation.scope import ensure_scope\nfrom pandas.core.generic import NDFrame\n\nfrom pandas.io.formats.printing import pprint_thing\n\nif TYPE_CHECKING:\n from pandas.core.computation.ops import BinOp\n\n\ndef _check_engine(engine: str | None) -> str:\n """\n Make sure a valid engine is passed.\n\n Parameters\n ----------\n engine : str\n String to validate.\n\n Raises\n ------\n KeyError\n * If an invalid engine is passed.\n ImportError\n * If numexpr was requested but doesn't exist.\n\n Returns\n -------\n str\n Engine name.\n """\n from pandas.core.computation.check import NUMEXPR_INSTALLED\n from pandas.core.computation.expressions import USE_NUMEXPR\n\n if engine is None:\n engine = "numexpr" if USE_NUMEXPR else "python"\n\n if engine not in ENGINES:\n valid_engines = list(ENGINES.keys())\n raise KeyError(\n f"Invalid engine '{engine}' passed, valid engines are {valid_engines}"\n )\n\n # TODO: validate this in a more general way (thinking of future engines\n # that won't necessarily be import-able)\n # Could potentially be done on engine instantiation\n if engine == "numexpr" and not NUMEXPR_INSTALLED:\n raise ImportError(\n "'numexpr' is not installed or an unsupported version. Cannot use "\n "engine='numexpr' for query/eval if 'numexpr' is not installed"\n )\n\n return engine\n\n\ndef _check_parser(parser: str):\n """\n Make sure a valid parser is passed.\n\n Parameters\n ----------\n parser : str\n\n Raises\n ------\n KeyError\n * If an invalid parser is passed\n """\n if parser not in PARSERS:\n raise KeyError(\n f"Invalid parser '{parser}' passed, valid parsers are {PARSERS.keys()}"\n )\n\n\ndef _check_resolvers(resolvers):\n if resolvers is not None:\n for resolver in resolvers:\n if not hasattr(resolver, "__getitem__"):\n name = type(resolver).__name__\n raise TypeError(\n f"Resolver of type '{name}' does not "\n "implement the __getitem__ method"\n )\n\n\ndef _check_expression(expr):\n """\n Make sure an expression is not an empty string\n\n Parameters\n ----------\n expr : object\n An object that can be converted to a string\n\n Raises\n ------\n ValueError\n * If expr is an empty string\n """\n if not expr:\n raise ValueError("expr cannot be an empty string")\n\n\ndef _convert_expression(expr) -> str:\n """\n Convert an object to an expression.\n\n This function converts an object to an expression (a unicode string) and\n checks to make sure it isn't empty after conversion. This is used to\n convert operators to their string representation for recursive calls to\n :func:`~pandas.eval`.\n\n Parameters\n ----------\n expr : object\n The object to be converted to a string.\n\n Returns\n -------\n str\n The string representation of an object.\n\n Raises\n ------\n ValueError\n * If the expression is empty.\n """\n s = pprint_thing(expr)\n _check_expression(s)\n return s\n\n\ndef _check_for_locals(expr: str, stack_level: int, parser: str):\n at_top_of_stack = stack_level == 0\n not_pandas_parser = parser != "pandas"\n\n if not_pandas_parser:\n msg = "The '@' prefix is only supported by the pandas parser"\n elif at_top_of_stack:\n msg = (\n "The '@' prefix is not allowed in top-level eval calls.\n"\n "please refer to your variables by name without the '@' prefix."\n )\n\n if at_top_of_stack or not_pandas_parser:\n for toknum, tokval in tokenize_string(expr):\n if toknum == tokenize.OP and tokval == "@":\n raise SyntaxError(msg)\n\n\ndef eval(\n expr: str | BinOp, # we leave BinOp out of the docstr bc it isn't for users\n parser: str = "pandas",\n engine: str | None = None,\n local_dict=None,\n global_dict=None,\n resolvers=(),\n level: int = 0,\n target=None,\n inplace: bool = False,\n):\n """\n Evaluate a Python expression as a string using various backends.\n\n The following arithmetic operations are supported: ``+``, ``-``, ``*``,\n ``/``, ``**``, ``%``, ``//`` (python engine only) along with the following\n boolean operations: ``|`` (or), ``&`` (and), and ``~`` (not).\n Additionally, the ``'pandas'`` parser allows the use of :keyword:`and`,\n :keyword:`or`, and :keyword:`not` with the same semantics as the\n corresponding bitwise operators. :class:`~pandas.Series` and\n :class:`~pandas.DataFrame` objects are supported and behave as they would\n with plain ol' Python evaluation.\n\n Parameters\n ----------\n expr : str\n The expression to evaluate. This string cannot contain any Python\n `statements\n <https://docs.python.org/3/reference/simple_stmts.html#simple-statements>`__,\n only Python `expressions\n <https://docs.python.org/3/reference/simple_stmts.html#expression-statements>`__.\n parser : {'pandas', 'python'}, default 'pandas'\n The parser to use to construct the syntax tree from the expression. The\n default of ``'pandas'`` parses code slightly different than standard\n Python. Alternatively, you can parse an expression using the\n ``'python'`` parser to retain strict Python semantics. See the\n :ref:`enhancing performance <enhancingperf.eval>` documentation for\n more details.\n engine : {'python', 'numexpr'}, default 'numexpr'\n\n The engine used to evaluate the expression. Supported engines are\n\n - None : tries to use ``numexpr``, falls back to ``python``\n - ``'numexpr'`` : This default engine evaluates pandas objects using\n numexpr for large speed ups in complex expressions with large frames.\n - ``'python'`` : Performs operations as if you had ``eval``'d in top\n level python. This engine is generally not that useful.\n\n More backends may be available in the future.\n local_dict : dict or None, optional\n A dictionary of local variables, taken from locals() by default.\n global_dict : dict or None, optional\n A dictionary of global variables, taken from globals() by default.\n resolvers : list of dict-like or None, optional\n A list of objects implementing the ``__getitem__`` special method that\n you can use to inject an additional collection of namespaces to use for\n variable lookup. For example, this is used in the\n :meth:`~DataFrame.query` method to inject the\n ``DataFrame.index`` and ``DataFrame.columns``\n variables that refer to their respective :class:`~pandas.DataFrame`\n instance attributes.\n level : int, optional\n The number of prior stack frames to traverse and add to the current\n scope. Most users will **not** need to change this parameter.\n target : object, optional, default None\n This is the target object for assignment. It is used when there is\n variable assignment in the expression. If so, then `target` must\n support item assignment with string keys, and if a copy is being\n returned, it must also support `.copy()`.\n inplace : bool, default False\n If `target` is provided, and the expression mutates `target`, whether\n to modify `target` inplace. Otherwise, return a copy of `target` with\n the mutation.\n\n Returns\n -------\n ndarray, numeric scalar, DataFrame, Series, or None\n The completion value of evaluating the given code or None if ``inplace=True``.\n\n Raises\n ------\n ValueError\n There are many instances where such an error can be raised:\n\n - `target=None`, but the expression is multiline.\n - The expression is multiline, but not all them have item assignment.\n An example of such an arrangement is this:\n\n a = b + 1\n a + 2\n\n Here, there are expressions on different lines, making it multiline,\n but the last line has no variable assigned to the output of `a + 2`.\n - `inplace=True`, but the expression is missing item assignment.\n - Item assignment is provided, but the `target` does not support\n string item assignment.\n - Item assignment is provided and `inplace=False`, but the `target`\n does not support the `.copy()` method\n\n See Also\n --------\n DataFrame.query : Evaluates a boolean expression to query the columns\n of a frame.\n DataFrame.eval : Evaluate a string describing operations on\n DataFrame columns.\n\n Notes\n -----\n The ``dtype`` of any objects involved in an arithmetic ``%`` operation are\n recursively cast to ``float64``.\n\n See the :ref:`enhancing performance <enhancingperf.eval>` documentation for\n more details.\n\n Examples\n --------\n >>> df = pd.DataFrame({"animal": ["dog", "pig"], "age": [10, 20]})\n >>> df\n animal age\n 0 dog 10\n 1 pig 20\n\n We can add a new column using ``pd.eval``:\n\n >>> pd.eval("double_age = df.age * 2", target=df)\n animal age double_age\n 0 dog 10 20\n 1 pig 20 40\n """\n inplace = validate_bool_kwarg(inplace, "inplace")\n\n exprs: list[str | BinOp]\n if isinstance(expr, str):\n _check_expression(expr)\n exprs = [e.strip() for e in expr.splitlines() if e.strip() != ""]\n else:\n # ops.BinOp; for internal compat, not intended to be passed by users\n exprs = [expr]\n multi_line = len(exprs) > 1\n\n if multi_line and target is None:\n raise ValueError(\n "multi-line expressions are only valid in the "\n "context of data, use DataFrame.eval"\n )\n engine = _check_engine(engine)\n _check_parser(parser)\n _check_resolvers(resolvers)\n\n ret = None\n first_expr = True\n target_modified = False\n\n for expr in exprs:\n expr = _convert_expression(expr)\n _check_for_locals(expr, level, parser)\n\n # get our (possibly passed-in) scope\n env = ensure_scope(\n level + 1,\n global_dict=global_dict,\n local_dict=local_dict,\n resolvers=resolvers,\n target=target,\n )\n\n parsed_expr = Expr(expr, engine=engine, parser=parser, env=env)\n\n if engine == "numexpr" and (\n (\n is_extension_array_dtype(parsed_expr.terms.return_type)\n and not is_string_dtype(parsed_expr.terms.return_type)\n )\n or getattr(parsed_expr.terms, "operand_types", None) is not None\n and any(\n (is_extension_array_dtype(elem) and not is_string_dtype(elem))\n for elem in parsed_expr.terms.operand_types\n )\n ):\n warnings.warn(\n "Engine has switched to 'python' because numexpr does not support "\n "extension array dtypes. Please set your engine to python manually.",\n RuntimeWarning,\n stacklevel=find_stack_level(),\n )\n engine = "python"\n\n # construct the engine and evaluate the parsed expression\n eng = ENGINES[engine]\n eng_inst = eng(parsed_expr)\n ret = eng_inst.evaluate()\n\n if parsed_expr.assigner is None:\n if multi_line:\n raise ValueError(\n "Multi-line expressions are only valid "\n "if all expressions contain an assignment"\n )\n if inplace:\n raise ValueError("Cannot operate inplace if there is no assignment")\n\n # assign if needed\n assigner = parsed_expr.assigner\n if env.target is not None and assigner is not None:\n target_modified = True\n\n # if returning a copy, copy only on the first assignment\n if not inplace and first_expr:\n try:\n target = env.target\n if isinstance(target, NDFrame):\n target = target.copy(deep=None)\n else:\n target = target.copy()\n except AttributeError as err:\n raise ValueError("Cannot return a copy of the target") from err\n else:\n target = env.target\n\n # TypeError is most commonly raised (e.g. int, list), but you\n # get IndexError if you try to do this assignment on np.ndarray.\n # we will ignore numpy warnings here; e.g. if trying\n # to use a non-numeric indexer\n try:\n if inplace and isinstance(target, NDFrame):\n target.loc[:, assigner] = ret\n else:\n target[assigner] = ret # pyright: ignore[reportGeneralTypeIssues]\n except (TypeError, IndexError) as err:\n raise ValueError("Cannot assign expression output to target") from err\n\n if not resolvers:\n resolvers = ({assigner: ret},)\n else:\n # existing resolver needs updated to handle\n # case of mutating existing column in copy\n for resolver in resolvers:\n if assigner in resolver:\n resolver[assigner] = ret\n break\n else:\n resolvers += ({assigner: ret},)\n\n ret = None\n first_expr = False\n\n # We want to exclude `inplace=None` as being False.\n if inplace is False:\n return target if target_modified else ret\n
.venv\Lib\site-packages\pandas\core\computation\eval.py
eval.py
Python
14,212
0.95
0.15677
0.056657
vue-tools
95
2025-03-17T18:19:25.523508
GPL-3.0
false
ee032d91bf513e31db3d0d6955835071
"""\n:func:`~pandas.eval` parsers.\n"""\nfrom __future__ import annotations\n\nimport ast\nfrom functools import (\n partial,\n reduce,\n)\nfrom keyword import iskeyword\nimport tokenize\nfrom typing import (\n Callable,\n ClassVar,\n TypeVar,\n)\n\nimport numpy as np\n\nfrom pandas.errors import UndefinedVariableError\n\nfrom pandas.core.dtypes.common import is_string_dtype\n\nimport pandas.core.common as com\nfrom pandas.core.computation.ops import (\n ARITH_OPS_SYMS,\n BOOL_OPS_SYMS,\n CMP_OPS_SYMS,\n LOCAL_TAG,\n MATHOPS,\n REDUCTIONS,\n UNARY_OPS_SYMS,\n BinOp,\n Constant,\n FuncNode,\n Op,\n Term,\n UnaryOp,\n is_term,\n)\nfrom pandas.core.computation.parsing import (\n clean_backtick_quoted_toks,\n tokenize_string,\n)\nfrom pandas.core.computation.scope import Scope\n\nfrom pandas.io.formats import printing\n\n\ndef _rewrite_assign(tok: tuple[int, str]) -> tuple[int, str]:\n """\n Rewrite the assignment operator for PyTables expressions that use ``=``\n as a substitute for ``==``.\n\n Parameters\n ----------\n tok : tuple of int, str\n ints correspond to the all caps constants in the tokenize module\n\n Returns\n -------\n tuple of int, str\n Either the input or token or the replacement values\n """\n toknum, tokval = tok\n return toknum, "==" if tokval == "=" else tokval\n\n\ndef _replace_booleans(tok: tuple[int, str]) -> tuple[int, str]:\n """\n Replace ``&`` with ``and`` and ``|`` with ``or`` so that bitwise\n precedence is changed to boolean precedence.\n\n Parameters\n ----------\n tok : tuple of int, str\n ints correspond to the all caps constants in the tokenize module\n\n Returns\n -------\n tuple of int, str\n Either the input or token or the replacement values\n """\n toknum, tokval = tok\n if toknum == tokenize.OP:\n if tokval == "&":\n return tokenize.NAME, "and"\n elif tokval == "|":\n return tokenize.NAME, "or"\n return toknum, tokval\n return toknum, tokval\n\n\ndef _replace_locals(tok: tuple[int, str]) -> tuple[int, str]:\n """\n Replace local variables with a syntactically valid name.\n\n Parameters\n ----------\n tok : tuple of int, str\n ints correspond to the all caps constants in the tokenize module\n\n Returns\n -------\n tuple of int, str\n Either the input or token or the replacement values\n\n Notes\n -----\n This is somewhat of a hack in that we rewrite a string such as ``'@a'`` as\n ``'__pd_eval_local_a'`` by telling the tokenizer that ``__pd_eval_local_``\n is a ``tokenize.OP`` and to replace the ``'@'`` symbol with it.\n """\n toknum, tokval = tok\n if toknum == tokenize.OP and tokval == "@":\n return tokenize.OP, LOCAL_TAG\n return toknum, tokval\n\n\ndef _compose2(f, g):\n """\n Compose 2 callables.\n """\n return lambda *args, **kwargs: f(g(*args, **kwargs))\n\n\ndef _compose(*funcs):\n """\n Compose 2 or more callables.\n """\n assert len(funcs) > 1, "At least 2 callables must be passed to compose"\n return reduce(_compose2, funcs)\n\n\ndef _preparse(\n source: str,\n f=_compose(\n _replace_locals, _replace_booleans, _rewrite_assign, clean_backtick_quoted_toks\n ),\n) -> str:\n """\n Compose a collection of tokenization functions.\n\n Parameters\n ----------\n source : str\n A Python source code string\n f : callable\n This takes a tuple of (toknum, tokval) as its argument and returns a\n tuple with the same structure but possibly different elements. Defaults\n to the composition of ``_rewrite_assign``, ``_replace_booleans``, and\n ``_replace_locals``.\n\n Returns\n -------\n str\n Valid Python source code\n\n Notes\n -----\n The `f` parameter can be any callable that takes *and* returns input of the\n form ``(toknum, tokval)``, where ``toknum`` is one of the constants from\n the ``tokenize`` module and ``tokval`` is a string.\n """\n assert callable(f), "f must be callable"\n return tokenize.untokenize(f(x) for x in tokenize_string(source))\n\n\ndef _is_type(t):\n """\n Factory for a type checking function of type ``t`` or tuple of types.\n """\n return lambda x: isinstance(x.value, t)\n\n\n_is_list = _is_type(list)\n_is_str = _is_type(str)\n\n\n# partition all AST nodes\n_all_nodes = frozenset(\n node\n for node in (getattr(ast, name) for name in dir(ast))\n if isinstance(node, type) and issubclass(node, ast.AST)\n)\n\n\ndef _filter_nodes(superclass, all_nodes=_all_nodes):\n """\n Filter out AST nodes that are subclasses of ``superclass``.\n """\n node_names = (node.__name__ for node in all_nodes if issubclass(node, superclass))\n return frozenset(node_names)\n\n\n_all_node_names = frozenset(x.__name__ for x in _all_nodes)\n_mod_nodes = _filter_nodes(ast.mod)\n_stmt_nodes = _filter_nodes(ast.stmt)\n_expr_nodes = _filter_nodes(ast.expr)\n_expr_context_nodes = _filter_nodes(ast.expr_context)\n_boolop_nodes = _filter_nodes(ast.boolop)\n_operator_nodes = _filter_nodes(ast.operator)\n_unary_op_nodes = _filter_nodes(ast.unaryop)\n_cmp_op_nodes = _filter_nodes(ast.cmpop)\n_comprehension_nodes = _filter_nodes(ast.comprehension)\n_handler_nodes = _filter_nodes(ast.excepthandler)\n_arguments_nodes = _filter_nodes(ast.arguments)\n_keyword_nodes = _filter_nodes(ast.keyword)\n_alias_nodes = _filter_nodes(ast.alias)\n\n\n# nodes that we don't support directly but are needed for parsing\n_hacked_nodes = frozenset(["Assign", "Module", "Expr"])\n\n\n_unsupported_expr_nodes = frozenset(\n [\n "Yield",\n "GeneratorExp",\n "IfExp",\n "DictComp",\n "SetComp",\n "Repr",\n "Lambda",\n "Set",\n "AST",\n "Is",\n "IsNot",\n ]\n)\n\n# these nodes are low priority or won't ever be supported (e.g., AST)\n_unsupported_nodes = (\n _stmt_nodes\n | _mod_nodes\n | _handler_nodes\n | _arguments_nodes\n | _keyword_nodes\n | _alias_nodes\n | _expr_context_nodes\n | _unsupported_expr_nodes\n) - _hacked_nodes\n\n# we're adding a different assignment in some cases to be equality comparison\n# and we don't want `stmt` and friends in their so get only the class whose\n# names are capitalized\n_base_supported_nodes = (_all_node_names - _unsupported_nodes) | _hacked_nodes\nintersection = _unsupported_nodes & _base_supported_nodes\n_msg = f"cannot both support and not support {intersection}"\nassert not intersection, _msg\n\n\ndef _node_not_implemented(node_name: str) -> Callable[..., None]:\n """\n Return a function that raises a NotImplementedError with a passed node name.\n """\n\n def f(self, *args, **kwargs):\n raise NotImplementedError(f"'{node_name}' nodes are not implemented")\n\n return f\n\n\n# should be bound by BaseExprVisitor but that creates a circular dependency:\n# _T is used in disallow, but disallow is used to define BaseExprVisitor\n# https://github.com/microsoft/pyright/issues/2315\n_T = TypeVar("_T")\n\n\ndef disallow(nodes: set[str]) -> Callable[[type[_T]], type[_T]]:\n """\n Decorator to disallow certain nodes from parsing. Raises a\n NotImplementedError instead.\n\n Returns\n -------\n callable\n """\n\n def disallowed(cls: type[_T]) -> type[_T]:\n # error: "Type[_T]" has no attribute "unsupported_nodes"\n cls.unsupported_nodes = () # type: ignore[attr-defined]\n for node in nodes:\n new_method = _node_not_implemented(node)\n name = f"visit_{node}"\n # error: "Type[_T]" has no attribute "unsupported_nodes"\n cls.unsupported_nodes += (name,) # type: ignore[attr-defined]\n setattr(cls, name, new_method)\n return cls\n\n return disallowed\n\n\ndef _op_maker(op_class, op_symbol):\n """\n Return a function to create an op class with its symbol already passed.\n\n Returns\n -------\n callable\n """\n\n def f(self, node, *args, **kwargs):\n """\n Return a partial function with an Op subclass with an operator already passed.\n\n Returns\n -------\n callable\n """\n return partial(op_class, op_symbol, *args, **kwargs)\n\n return f\n\n\n_op_classes = {"binary": BinOp, "unary": UnaryOp}\n\n\ndef add_ops(op_classes):\n """\n Decorator to add default implementation of ops.\n """\n\n def f(cls):\n for op_attr_name, op_class in op_classes.items():\n ops = getattr(cls, f"{op_attr_name}_ops")\n ops_map = getattr(cls, f"{op_attr_name}_op_nodes_map")\n for op in ops:\n op_node = ops_map[op]\n if op_node is not None:\n made_op = _op_maker(op_class, op)\n setattr(cls, f"visit_{op_node}", made_op)\n return cls\n\n return f\n\n\n@disallow(_unsupported_nodes)\n@add_ops(_op_classes)\nclass BaseExprVisitor(ast.NodeVisitor):\n """\n Custom ast walker. Parsers of other engines should subclass this class\n if necessary.\n\n Parameters\n ----------\n env : Scope\n engine : str\n parser : str\n preparser : callable\n """\n\n const_type: ClassVar[type[Term]] = Constant\n term_type: ClassVar[type[Term]] = Term\n\n binary_ops = CMP_OPS_SYMS + BOOL_OPS_SYMS + ARITH_OPS_SYMS\n binary_op_nodes = (\n "Gt",\n "Lt",\n "GtE",\n "LtE",\n "Eq",\n "NotEq",\n "In",\n "NotIn",\n "BitAnd",\n "BitOr",\n "And",\n "Or",\n "Add",\n "Sub",\n "Mult",\n "Div",\n "Pow",\n "FloorDiv",\n "Mod",\n )\n binary_op_nodes_map = dict(zip(binary_ops, binary_op_nodes))\n\n unary_ops = UNARY_OPS_SYMS\n unary_op_nodes = "UAdd", "USub", "Invert", "Not"\n unary_op_nodes_map = dict(zip(unary_ops, unary_op_nodes))\n\n rewrite_map = {\n ast.Eq: ast.In,\n ast.NotEq: ast.NotIn,\n ast.In: ast.In,\n ast.NotIn: ast.NotIn,\n }\n\n unsupported_nodes: tuple[str, ...]\n\n def __init__(self, env, engine, parser, preparser=_preparse) -> None:\n self.env = env\n self.engine = engine\n self.parser = parser\n self.preparser = preparser\n self.assigner = None\n\n def visit(self, node, **kwargs):\n if isinstance(node, str):\n clean = self.preparser(node)\n try:\n node = ast.fix_missing_locations(ast.parse(clean))\n except SyntaxError as e:\n if any(iskeyword(x) for x in clean.split()):\n e.msg = "Python keyword not valid identifier in numexpr query"\n raise e\n\n method = f"visit_{type(node).__name__}"\n visitor = getattr(self, method)\n return visitor(node, **kwargs)\n\n def visit_Module(self, node, **kwargs):\n if len(node.body) != 1:\n raise SyntaxError("only a single expression is allowed")\n expr = node.body[0]\n return self.visit(expr, **kwargs)\n\n def visit_Expr(self, node, **kwargs):\n return self.visit(node.value, **kwargs)\n\n def _rewrite_membership_op(self, node, left, right):\n # the kind of the operator (is actually an instance)\n op_instance = node.op\n op_type = type(op_instance)\n\n # must be two terms and the comparison operator must be ==/!=/in/not in\n if is_term(left) and is_term(right) and op_type in self.rewrite_map:\n left_list, right_list = map(_is_list, (left, right))\n left_str, right_str = map(_is_str, (left, right))\n\n # if there are any strings or lists in the expression\n if left_list or right_list or left_str or right_str:\n op_instance = self.rewrite_map[op_type]()\n\n # pop the string variable out of locals and replace it with a list\n # of one string, kind of a hack\n if right_str:\n name = self.env.add_tmp([right.value])\n right = self.term_type(name, self.env)\n\n if left_str:\n name = self.env.add_tmp([left.value])\n left = self.term_type(name, self.env)\n\n op = self.visit(op_instance)\n return op, op_instance, left, right\n\n def _maybe_transform_eq_ne(self, node, left=None, right=None):\n if left is None:\n left = self.visit(node.left, side="left")\n if right is None:\n right = self.visit(node.right, side="right")\n op, op_class, left, right = self._rewrite_membership_op(node, left, right)\n return op, op_class, left, right\n\n def _maybe_downcast_constants(self, left, right):\n f32 = np.dtype(np.float32)\n if (\n left.is_scalar\n and hasattr(left, "value")\n and not right.is_scalar\n and right.return_type == f32\n ):\n # right is a float32 array, left is a scalar\n name = self.env.add_tmp(np.float32(left.value))\n left = self.term_type(name, self.env)\n if (\n right.is_scalar\n and hasattr(right, "value")\n and not left.is_scalar\n and left.return_type == f32\n ):\n # left is a float32 array, right is a scalar\n name = self.env.add_tmp(np.float32(right.value))\n right = self.term_type(name, self.env)\n\n return left, right\n\n def _maybe_eval(self, binop, eval_in_python):\n # eval `in` and `not in` (for now) in "partial" python space\n # things that can be evaluated in "eval" space will be turned into\n # temporary variables. for example,\n # [1,2] in a + 2 * b\n # in that case a + 2 * b will be evaluated using numexpr, and the "in"\n # call will be evaluated using isin (in python space)\n return binop.evaluate(\n self.env, self.engine, self.parser, self.term_type, eval_in_python\n )\n\n def _maybe_evaluate_binop(\n self,\n op,\n op_class,\n lhs,\n rhs,\n eval_in_python=("in", "not in"),\n maybe_eval_in_python=("==", "!=", "<", ">", "<=", ">="),\n ):\n res = op(lhs, rhs)\n\n if res.has_invalid_return_type:\n raise TypeError(\n f"unsupported operand type(s) for {res.op}: "\n f"'{lhs.type}' and '{rhs.type}'"\n )\n\n if self.engine != "pytables" and (\n res.op in CMP_OPS_SYMS\n and getattr(lhs, "is_datetime", False)\n or getattr(rhs, "is_datetime", False)\n ):\n # all date ops must be done in python bc numexpr doesn't work\n # well with NaT\n return self._maybe_eval(res, self.binary_ops)\n\n if res.op in eval_in_python:\n # "in"/"not in" ops are always evaluated in python\n return self._maybe_eval(res, eval_in_python)\n elif self.engine != "pytables":\n if (\n getattr(lhs, "return_type", None) == object\n or is_string_dtype(getattr(lhs, "return_type", None))\n or getattr(rhs, "return_type", None) == object\n or is_string_dtype(getattr(rhs, "return_type", None))\n ):\n # evaluate "==" and "!=" in python if either of our operands\n # has an object or string return type\n return self._maybe_eval(res, eval_in_python + maybe_eval_in_python)\n return res\n\n def visit_BinOp(self, node, **kwargs):\n op, op_class, left, right = self._maybe_transform_eq_ne(node)\n left, right = self._maybe_downcast_constants(left, right)\n return self._maybe_evaluate_binop(op, op_class, left, right)\n\n def visit_UnaryOp(self, node, **kwargs):\n op = self.visit(node.op)\n operand = self.visit(node.operand)\n return op(operand)\n\n def visit_Name(self, node, **kwargs) -> Term:\n return self.term_type(node.id, self.env, **kwargs)\n\n # TODO(py314): deprecated since Python 3.8. Remove after Python 3.14 is min\n def visit_NameConstant(self, node, **kwargs) -> Term:\n return self.const_type(node.value, self.env)\n\n # TODO(py314): deprecated since Python 3.8. Remove after Python 3.14 is min\n def visit_Num(self, node, **kwargs) -> Term:\n return self.const_type(node.value, self.env)\n\n def visit_Constant(self, node, **kwargs) -> Term:\n return self.const_type(node.value, self.env)\n\n # TODO(py314): deprecated since Python 3.8. Remove after Python 3.14 is min\n def visit_Str(self, node, **kwargs) -> Term:\n name = self.env.add_tmp(node.s)\n return self.term_type(name, self.env)\n\n def visit_List(self, node, **kwargs) -> Term:\n name = self.env.add_tmp([self.visit(e)(self.env) for e in node.elts])\n return self.term_type(name, self.env)\n\n visit_Tuple = visit_List\n\n def visit_Index(self, node, **kwargs):\n """df.index[4]"""\n return self.visit(node.value)\n\n def visit_Subscript(self, node, **kwargs) -> Term:\n from pandas import eval as pd_eval\n\n value = self.visit(node.value)\n slobj = self.visit(node.slice)\n result = pd_eval(\n slobj, local_dict=self.env, engine=self.engine, parser=self.parser\n )\n try:\n # a Term instance\n v = value.value[result]\n except AttributeError:\n # an Op instance\n lhs = pd_eval(\n value, local_dict=self.env, engine=self.engine, parser=self.parser\n )\n v = lhs[result]\n name = self.env.add_tmp(v)\n return self.term_type(name, env=self.env)\n\n def visit_Slice(self, node, **kwargs) -> slice:\n """df.index[slice(4,6)]"""\n lower = node.lower\n if lower is not None:\n lower = self.visit(lower).value\n upper = node.upper\n if upper is not None:\n upper = self.visit(upper).value\n step = node.step\n if step is not None:\n step = self.visit(step).value\n\n return slice(lower, upper, step)\n\n def visit_Assign(self, node, **kwargs):\n """\n support a single assignment node, like\n\n c = a + b\n\n set the assigner at the top level, must be a Name node which\n might or might not exist in the resolvers\n\n """\n if len(node.targets) != 1:\n raise SyntaxError("can only assign a single expression")\n if not isinstance(node.targets[0], ast.Name):\n raise SyntaxError("left hand side of an assignment must be a single name")\n if self.env.target is None:\n raise ValueError("cannot assign without a target object")\n\n try:\n assigner = self.visit(node.targets[0], **kwargs)\n except UndefinedVariableError:\n assigner = node.targets[0].id\n\n self.assigner = getattr(assigner, "name", assigner)\n if self.assigner is None:\n raise SyntaxError(\n "left hand side of an assignment must be a single resolvable name"\n )\n\n return self.visit(node.value, **kwargs)\n\n def visit_Attribute(self, node, **kwargs):\n attr = node.attr\n value = node.value\n\n ctx = node.ctx\n if isinstance(ctx, ast.Load):\n # resolve the value\n resolved = self.visit(value).value\n try:\n v = getattr(resolved, attr)\n name = self.env.add_tmp(v)\n return self.term_type(name, self.env)\n except AttributeError:\n # something like datetime.datetime where scope is overridden\n if isinstance(value, ast.Name) and value.id == attr:\n return resolved\n raise\n\n raise ValueError(f"Invalid Attribute context {type(ctx).__name__}")\n\n def visit_Call(self, node, side=None, **kwargs):\n if isinstance(node.func, ast.Attribute) and node.func.attr != "__call__":\n res = self.visit_Attribute(node.func)\n elif not isinstance(node.func, ast.Name):\n raise TypeError("Only named functions are supported")\n else:\n try:\n res = self.visit(node.func)\n except UndefinedVariableError:\n # Check if this is a supported function name\n try:\n res = FuncNode(node.func.id)\n except ValueError:\n # Raise original error\n raise\n\n if res is None:\n # error: "expr" has no attribute "id"\n raise ValueError(\n f"Invalid function call {node.func.id}" # type: ignore[attr-defined]\n )\n if hasattr(res, "value"):\n res = res.value\n\n if isinstance(res, FuncNode):\n new_args = [self.visit(arg) for arg in node.args]\n\n if node.keywords:\n raise TypeError(\n f'Function "{res.name}" does not support keyword arguments'\n )\n\n return res(*new_args)\n\n else:\n new_args = [self.visit(arg)(self.env) for arg in node.args]\n\n for key in node.keywords:\n if not isinstance(key, ast.keyword):\n # error: "expr" has no attribute "id"\n raise ValueError(\n "keyword error in function call "\n f"'{node.func.id}'" # type: ignore[attr-defined]\n )\n\n if key.arg:\n kwargs[key.arg] = self.visit(key.value)(self.env)\n\n name = self.env.add_tmp(res(*new_args, **kwargs))\n return self.term_type(name=name, env=self.env)\n\n def translate_In(self, op):\n return op\n\n def visit_Compare(self, node, **kwargs):\n ops = node.ops\n comps = node.comparators\n\n # base case: we have something like a CMP b\n if len(comps) == 1:\n op = self.translate_In(ops[0])\n binop = ast.BinOp(op=op, left=node.left, right=comps[0])\n return self.visit(binop)\n\n # recursive case: we have a chained comparison, a CMP b CMP c, etc.\n left = node.left\n values = []\n for op, comp in zip(ops, comps):\n new_node = self.visit(\n ast.Compare(comparators=[comp], left=left, ops=[self.translate_In(op)])\n )\n left = comp\n values.append(new_node)\n return self.visit(ast.BoolOp(op=ast.And(), values=values))\n\n def _try_visit_binop(self, bop):\n if isinstance(bop, (Op, Term)):\n return bop\n return self.visit(bop)\n\n def visit_BoolOp(self, node, **kwargs):\n def visitor(x, y):\n lhs = self._try_visit_binop(x)\n rhs = self._try_visit_binop(y)\n\n op, op_class, lhs, rhs = self._maybe_transform_eq_ne(node, lhs, rhs)\n return self._maybe_evaluate_binop(op, node.op, lhs, rhs)\n\n operands = node.values\n return reduce(visitor, operands)\n\n\n_python_not_supported = frozenset(["Dict", "BoolOp", "In", "NotIn"])\n_numexpr_supported_calls = frozenset(REDUCTIONS + MATHOPS)\n\n\n@disallow(\n (_unsupported_nodes | _python_not_supported)\n - (_boolop_nodes | frozenset(["BoolOp", "Attribute", "In", "NotIn", "Tuple"]))\n)\nclass PandasExprVisitor(BaseExprVisitor):\n def __init__(\n self,\n env,\n engine,\n parser,\n preparser=partial(\n _preparse,\n f=_compose(_replace_locals, _replace_booleans, clean_backtick_quoted_toks),\n ),\n ) -> None:\n super().__init__(env, engine, parser, preparser)\n\n\n@disallow(_unsupported_nodes | _python_not_supported | frozenset(["Not"]))\nclass PythonExprVisitor(BaseExprVisitor):\n def __init__(\n self, env, engine, parser, preparser=lambda source, f=None: source\n ) -> None:\n super().__init__(env, engine, parser, preparser=preparser)\n\n\nclass Expr:\n """\n Object encapsulating an expression.\n\n Parameters\n ----------\n expr : str\n engine : str, optional, default 'numexpr'\n parser : str, optional, default 'pandas'\n env : Scope, optional, default None\n level : int, optional, default 2\n """\n\n env: Scope\n engine: str\n parser: str\n\n def __init__(\n self,\n expr,\n engine: str = "numexpr",\n parser: str = "pandas",\n env: Scope | None = None,\n level: int = 0,\n ) -> None:\n self.expr = expr\n self.env = env or Scope(level=level + 1)\n self.engine = engine\n self.parser = parser\n self._visitor = PARSERS[parser](self.env, self.engine, self.parser)\n self.terms = self.parse()\n\n @property\n def assigner(self):\n return getattr(self._visitor, "assigner", None)\n\n def __call__(self):\n return self.terms(self.env)\n\n def __repr__(self) -> str:\n return printing.pprint_thing(self.terms)\n\n def __len__(self) -> int:\n return len(self.expr)\n\n def parse(self):\n """\n Parse an expression.\n """\n return self._visitor.visit(self.expr)\n\n @property\n def names(self):\n """\n Get the names in an expression.\n """\n if is_term(self.terms):\n return frozenset([self.terms.name])\n return frozenset(term.name for term in com.flatten(self.terms))\n\n\nPARSERS = {"python": PythonExprVisitor, "pandas": PandasExprVisitor}\n
.venv\Lib\site-packages\pandas\core\computation\expr.py
expr.py
Python
25,269
0.95
0.166667
0.061224
python-kit
43
2025-05-04T09:15:39.327952
Apache-2.0
false
17d928008c7c2d0e0ac0defff494db55
"""\nExpressions\n-----------\n\nOffer fast expression evaluation through numexpr\n\n"""\nfrom __future__ import annotations\n\nimport operator\nfrom typing import TYPE_CHECKING\nimport warnings\n\nimport numpy as np\n\nfrom pandas._config import get_option\n\nfrom pandas.util._exceptions import find_stack_level\n\nfrom pandas.core import roperator\nfrom pandas.core.computation.check import NUMEXPR_INSTALLED\n\nif NUMEXPR_INSTALLED:\n import numexpr as ne\n\nif TYPE_CHECKING:\n from pandas._typing import FuncType\n\n_TEST_MODE: bool | None = None\n_TEST_RESULT: list[bool] = []\nUSE_NUMEXPR = NUMEXPR_INSTALLED\n_evaluate: FuncType | None = None\n_where: FuncType | None = None\n\n# the set of dtypes that we will allow pass to numexpr\n_ALLOWED_DTYPES = {\n "evaluate": {"int64", "int32", "float64", "float32", "bool"},\n "where": {"int64", "float64", "bool"},\n}\n\n# the minimum prod shape that we will use numexpr\n_MIN_ELEMENTS = 1_000_000\n\n\ndef set_use_numexpr(v: bool = True) -> None:\n # set/unset to use numexpr\n global USE_NUMEXPR\n if NUMEXPR_INSTALLED:\n USE_NUMEXPR = v\n\n # choose what we are going to do\n global _evaluate, _where\n\n _evaluate = _evaluate_numexpr if USE_NUMEXPR else _evaluate_standard\n _where = _where_numexpr if USE_NUMEXPR else _where_standard\n\n\ndef set_numexpr_threads(n=None) -> None:\n # if we are using numexpr, set the threads to n\n # otherwise reset\n if NUMEXPR_INSTALLED and USE_NUMEXPR:\n if n is None:\n n = ne.detect_number_of_cores()\n ne.set_num_threads(n)\n\n\ndef _evaluate_standard(op, op_str, a, b):\n """\n Standard evaluation.\n """\n if _TEST_MODE:\n _store_test_result(False)\n return op(a, b)\n\n\ndef _can_use_numexpr(op, op_str, a, b, dtype_check) -> bool:\n """return a boolean if we WILL be using numexpr"""\n if op_str is not None:\n # required min elements (otherwise we are adding overhead)\n if a.size > _MIN_ELEMENTS:\n # check for dtype compatibility\n dtypes: set[str] = set()\n for o in [a, b]:\n # ndarray and Series Case\n if hasattr(o, "dtype"):\n dtypes |= {o.dtype.name}\n\n # allowed are a superset\n if not len(dtypes) or _ALLOWED_DTYPES[dtype_check] >= dtypes:\n return True\n\n return False\n\n\ndef _evaluate_numexpr(op, op_str, a, b):\n result = None\n\n if _can_use_numexpr(op, op_str, a, b, "evaluate"):\n is_reversed = op.__name__.strip("_").startswith("r")\n if is_reversed:\n # we were originally called by a reversed op method\n a, b = b, a\n\n a_value = a\n b_value = b\n\n try:\n result = ne.evaluate(\n f"a_value {op_str} b_value",\n local_dict={"a_value": a_value, "b_value": b_value},\n casting="safe",\n )\n except TypeError:\n # numexpr raises eg for array ** array with integers\n # (https://github.com/pydata/numexpr/issues/379)\n pass\n except NotImplementedError:\n if _bool_arith_fallback(op_str, a, b):\n pass\n else:\n raise\n\n if is_reversed:\n # reverse order to original for fallback\n a, b = b, a\n\n if _TEST_MODE:\n _store_test_result(result is not None)\n\n if result is None:\n result = _evaluate_standard(op, op_str, a, b)\n\n return result\n\n\n_op_str_mapping = {\n operator.add: "+",\n roperator.radd: "+",\n operator.mul: "*",\n roperator.rmul: "*",\n operator.sub: "-",\n roperator.rsub: "-",\n operator.truediv: "/",\n roperator.rtruediv: "/",\n # floordiv not supported by numexpr 2.x\n operator.floordiv: None,\n roperator.rfloordiv: None,\n # we require Python semantics for mod of negative for backwards compatibility\n # see https://github.com/pydata/numexpr/issues/365\n # so sticking with unaccelerated for now GH#36552\n operator.mod: None,\n roperator.rmod: None,\n operator.pow: "**",\n roperator.rpow: "**",\n operator.eq: "==",\n operator.ne: "!=",\n operator.le: "<=",\n operator.lt: "<",\n operator.ge: ">=",\n operator.gt: ">",\n operator.and_: "&",\n roperator.rand_: "&",\n operator.or_: "|",\n roperator.ror_: "|",\n operator.xor: "^",\n roperator.rxor: "^",\n divmod: None,\n roperator.rdivmod: None,\n}\n\n\ndef _where_standard(cond, a, b):\n # Caller is responsible for extracting ndarray if necessary\n return np.where(cond, a, b)\n\n\ndef _where_numexpr(cond, a, b):\n # Caller is responsible for extracting ndarray if necessary\n result = None\n\n if _can_use_numexpr(None, "where", a, b, "where"):\n result = ne.evaluate(\n "where(cond_value, a_value, b_value)",\n local_dict={"cond_value": cond, "a_value": a, "b_value": b},\n casting="safe",\n )\n\n if result is None:\n result = _where_standard(cond, a, b)\n\n return result\n\n\n# turn myself on\nset_use_numexpr(get_option("compute.use_numexpr"))\n\n\ndef _has_bool_dtype(x):\n try:\n return x.dtype == bool\n except AttributeError:\n return isinstance(x, (bool, np.bool_))\n\n\n_BOOL_OP_UNSUPPORTED = {"+": "|", "*": "&", "-": "^"}\n\n\ndef _bool_arith_fallback(op_str, a, b) -> bool:\n """\n Check if we should fallback to the python `_evaluate_standard` in case\n of an unsupported operation by numexpr, which is the case for some\n boolean ops.\n """\n if _has_bool_dtype(a) and _has_bool_dtype(b):\n if op_str in _BOOL_OP_UNSUPPORTED:\n warnings.warn(\n f"evaluating in Python space because the {repr(op_str)} "\n "operator is not supported by numexpr for the bool dtype, "\n f"use {repr(_BOOL_OP_UNSUPPORTED[op_str])} instead.",\n stacklevel=find_stack_level(),\n )\n return True\n return False\n\n\ndef evaluate(op, a, b, use_numexpr: bool = True):\n """\n Evaluate and return the expression of the op on a and b.\n\n Parameters\n ----------\n op : the actual operand\n a : left operand\n b : right operand\n use_numexpr : bool, default True\n Whether to try to use numexpr.\n """\n op_str = _op_str_mapping[op]\n if op_str is not None:\n if use_numexpr:\n # error: "None" not callable\n return _evaluate(op, op_str, a, b) # type: ignore[misc]\n return _evaluate_standard(op, op_str, a, b)\n\n\ndef where(cond, a, b, use_numexpr: bool = True):\n """\n Evaluate the where condition cond on a and b.\n\n Parameters\n ----------\n cond : np.ndarray[bool]\n a : return if cond is True\n b : return if cond is False\n use_numexpr : bool, default True\n Whether to try to use numexpr.\n """\n assert _where is not None\n return _where(cond, a, b) if use_numexpr else _where_standard(cond, a, b)\n\n\ndef set_test_mode(v: bool = True) -> None:\n """\n Keeps track of whether numexpr was used.\n\n Stores an additional ``True`` for every successful use of evaluate with\n numexpr since the last ``get_test_result``.\n """\n global _TEST_MODE, _TEST_RESULT\n _TEST_MODE = v\n _TEST_RESULT = []\n\n\ndef _store_test_result(used_numexpr: bool) -> None:\n if used_numexpr:\n _TEST_RESULT.append(used_numexpr)\n\n\ndef get_test_result() -> list[bool]:\n """\n Get test result and reset test_results.\n """\n global _TEST_RESULT\n res = _TEST_RESULT\n _TEST_RESULT = []\n return res\n
.venv\Lib\site-packages\pandas\core\computation\expressions.py
expressions.py
Python
7,503
0.95
0.22028
0.098655
awesome-app
431
2024-08-15T17:31:34.807908
Apache-2.0
false
869762b5d03cf617504c39dabd616a01
"""\nOperator classes for eval.\n"""\n\nfrom __future__ import annotations\n\nfrom datetime import datetime\nfrom functools import partial\nimport operator\nfrom typing import (\n TYPE_CHECKING,\n Callable,\n Literal,\n)\n\nimport numpy as np\n\nfrom pandas._libs.tslibs import Timestamp\n\nfrom pandas.core.dtypes.common import (\n is_list_like,\n is_scalar,\n)\n\nimport pandas.core.common as com\nfrom pandas.core.computation.common import (\n ensure_decoded,\n result_type_many,\n)\nfrom pandas.core.computation.scope import DEFAULT_GLOBALS\n\nfrom pandas.io.formats.printing import (\n pprint_thing,\n pprint_thing_encoded,\n)\n\nif TYPE_CHECKING:\n from collections.abc import (\n Iterable,\n Iterator,\n )\n\nREDUCTIONS = ("sum", "prod", "min", "max")\n\n_unary_math_ops = (\n "sin",\n "cos",\n "exp",\n "log",\n "expm1",\n "log1p",\n "sqrt",\n "sinh",\n "cosh",\n "tanh",\n "arcsin",\n "arccos",\n "arctan",\n "arccosh",\n "arcsinh",\n "arctanh",\n "abs",\n "log10",\n "floor",\n "ceil",\n)\n_binary_math_ops = ("arctan2",)\n\nMATHOPS = _unary_math_ops + _binary_math_ops\n\n\nLOCAL_TAG = "__pd_eval_local_"\n\n\nclass Term:\n def __new__(cls, name, env, side=None, encoding=None):\n klass = Constant if not isinstance(name, str) else cls\n # error: Argument 2 for "super" not an instance of argument 1\n supr_new = super(Term, klass).__new__ # type: ignore[misc]\n return supr_new(klass)\n\n is_local: bool\n\n def __init__(self, name, env, side=None, encoding=None) -> None:\n # name is a str for Term, but may be something else for subclasses\n self._name = name\n self.env = env\n self.side = side\n tname = str(name)\n self.is_local = tname.startswith(LOCAL_TAG) or tname in DEFAULT_GLOBALS\n self._value = self._resolve_name()\n self.encoding = encoding\n\n @property\n def local_name(self) -> str:\n return self.name.replace(LOCAL_TAG, "")\n\n def __repr__(self) -> str:\n return pprint_thing(self.name)\n\n def __call__(self, *args, **kwargs):\n return self.value\n\n def evaluate(self, *args, **kwargs) -> Term:\n return self\n\n def _resolve_name(self):\n local_name = str(self.local_name)\n is_local = self.is_local\n if local_name in self.env.scope and isinstance(\n self.env.scope[local_name], type\n ):\n is_local = False\n\n res = self.env.resolve(local_name, is_local=is_local)\n self.update(res)\n\n if hasattr(res, "ndim") and res.ndim > 2:\n raise NotImplementedError(\n "N-dimensional objects, where N > 2, are not supported with eval"\n )\n return res\n\n def update(self, value) -> None:\n """\n search order for local (i.e., @variable) variables:\n\n scope, key_variable\n [('locals', 'local_name'),\n ('globals', 'local_name'),\n ('locals', 'key'),\n ('globals', 'key')]\n """\n key = self.name\n\n # if it's a variable name (otherwise a constant)\n if isinstance(key, str):\n self.env.swapkey(self.local_name, key, new_value=value)\n\n self.value = value\n\n @property\n def is_scalar(self) -> bool:\n return is_scalar(self._value)\n\n @property\n def type(self):\n try:\n # potentially very slow for large, mixed dtype frames\n return self._value.values.dtype\n except AttributeError:\n try:\n # ndarray\n return self._value.dtype\n except AttributeError:\n # scalar\n return type(self._value)\n\n return_type = type\n\n @property\n def raw(self) -> str:\n return f"{type(self).__name__}(name={repr(self.name)}, type={self.type})"\n\n @property\n def is_datetime(self) -> bool:\n try:\n t = self.type.type\n except AttributeError:\n t = self.type\n\n return issubclass(t, (datetime, np.datetime64))\n\n @property\n def value(self):\n return self._value\n\n @value.setter\n def value(self, new_value) -> None:\n self._value = new_value\n\n @property\n def name(self):\n return self._name\n\n @property\n def ndim(self) -> int:\n return self._value.ndim\n\n\nclass Constant(Term):\n def _resolve_name(self):\n return self._name\n\n @property\n def name(self):\n return self.value\n\n def __repr__(self) -> str:\n # in python 2 str() of float\n # can truncate shorter than repr()\n return repr(self.name)\n\n\n_bool_op_map = {"not": "~", "and": "&", "or": "|"}\n\n\nclass Op:\n """\n Hold an operator of arbitrary arity.\n """\n\n op: str\n\n def __init__(self, op: str, operands: Iterable[Term | Op], encoding=None) -> None:\n self.op = _bool_op_map.get(op, op)\n self.operands = operands\n self.encoding = encoding\n\n def __iter__(self) -> Iterator:\n return iter(self.operands)\n\n def __repr__(self) -> str:\n """\n Print a generic n-ary operator and its operands using infix notation.\n """\n # recurse over the operands\n parened = (f"({pprint_thing(opr)})" for opr in self.operands)\n return pprint_thing(f" {self.op} ".join(parened))\n\n @property\n def return_type(self):\n # clobber types to bool if the op is a boolean operator\n if self.op in (CMP_OPS_SYMS + BOOL_OPS_SYMS):\n return np.bool_\n return result_type_many(*(term.type for term in com.flatten(self)))\n\n @property\n def has_invalid_return_type(self) -> bool:\n types = self.operand_types\n obj_dtype_set = frozenset([np.dtype("object")])\n return self.return_type == object and types - obj_dtype_set\n\n @property\n def operand_types(self):\n return frozenset(term.type for term in com.flatten(self))\n\n @property\n def is_scalar(self) -> bool:\n return all(operand.is_scalar for operand in self.operands)\n\n @property\n def is_datetime(self) -> bool:\n try:\n t = self.return_type.type\n except AttributeError:\n t = self.return_type\n\n return issubclass(t, (datetime, np.datetime64))\n\n\ndef _in(x, y):\n """\n Compute the vectorized membership of ``x in y`` if possible, otherwise\n use Python.\n """\n try:\n return x.isin(y)\n except AttributeError:\n if is_list_like(x):\n try:\n return y.isin(x)\n except AttributeError:\n pass\n return x in y\n\n\ndef _not_in(x, y):\n """\n Compute the vectorized membership of ``x not in y`` if possible,\n otherwise use Python.\n """\n try:\n return ~x.isin(y)\n except AttributeError:\n if is_list_like(x):\n try:\n return ~y.isin(x)\n except AttributeError:\n pass\n return x not in y\n\n\nCMP_OPS_SYMS = (">", "<", ">=", "<=", "==", "!=", "in", "not in")\n_cmp_ops_funcs = (\n operator.gt,\n operator.lt,\n operator.ge,\n operator.le,\n operator.eq,\n operator.ne,\n _in,\n _not_in,\n)\n_cmp_ops_dict = dict(zip(CMP_OPS_SYMS, _cmp_ops_funcs))\n\nBOOL_OPS_SYMS = ("&", "|", "and", "or")\n_bool_ops_funcs = (operator.and_, operator.or_, operator.and_, operator.or_)\n_bool_ops_dict = dict(zip(BOOL_OPS_SYMS, _bool_ops_funcs))\n\nARITH_OPS_SYMS = ("+", "-", "*", "/", "**", "//", "%")\n_arith_ops_funcs = (\n operator.add,\n operator.sub,\n operator.mul,\n operator.truediv,\n operator.pow,\n operator.floordiv,\n operator.mod,\n)\n_arith_ops_dict = dict(zip(ARITH_OPS_SYMS, _arith_ops_funcs))\n\nSPECIAL_CASE_ARITH_OPS_SYMS = ("**", "//", "%")\n_special_case_arith_ops_funcs = (operator.pow, operator.floordiv, operator.mod)\n_special_case_arith_ops_dict = dict(\n zip(SPECIAL_CASE_ARITH_OPS_SYMS, _special_case_arith_ops_funcs)\n)\n\n_binary_ops_dict = {}\n\nfor d in (_cmp_ops_dict, _bool_ops_dict, _arith_ops_dict):\n _binary_ops_dict.update(d)\n\n\ndef is_term(obj) -> bool:\n return isinstance(obj, Term)\n\n\nclass BinOp(Op):\n """\n Hold a binary operator and its operands.\n\n Parameters\n ----------\n op : str\n lhs : Term or Op\n rhs : Term or Op\n """\n\n def __init__(self, op: str, lhs, rhs) -> None:\n super().__init__(op, (lhs, rhs))\n self.lhs = lhs\n self.rhs = rhs\n\n self._disallow_scalar_only_bool_ops()\n\n self.convert_values()\n\n try:\n self.func = _binary_ops_dict[op]\n except KeyError as err:\n # has to be made a list for python3\n keys = list(_binary_ops_dict.keys())\n raise ValueError(\n f"Invalid binary operator {repr(op)}, valid operators are {keys}"\n ) from err\n\n def __call__(self, env):\n """\n Recursively evaluate an expression in Python space.\n\n Parameters\n ----------\n env : Scope\n\n Returns\n -------\n object\n The result of an evaluated expression.\n """\n # recurse over the left/right nodes\n left = self.lhs(env)\n right = self.rhs(env)\n\n return self.func(left, right)\n\n def evaluate(self, env, engine: str, parser, term_type, eval_in_python):\n """\n Evaluate a binary operation *before* being passed to the engine.\n\n Parameters\n ----------\n env : Scope\n engine : str\n parser : str\n term_type : type\n eval_in_python : list\n\n Returns\n -------\n term_type\n The "pre-evaluated" expression as an instance of ``term_type``\n """\n if engine == "python":\n res = self(env)\n else:\n # recurse over the left/right nodes\n\n left = self.lhs.evaluate(\n env,\n engine=engine,\n parser=parser,\n term_type=term_type,\n eval_in_python=eval_in_python,\n )\n\n right = self.rhs.evaluate(\n env,\n engine=engine,\n parser=parser,\n term_type=term_type,\n eval_in_python=eval_in_python,\n )\n\n # base cases\n if self.op in eval_in_python:\n res = self.func(left.value, right.value)\n else:\n from pandas.core.computation.eval import eval\n\n res = eval(self, local_dict=env, engine=engine, parser=parser)\n\n name = env.add_tmp(res)\n return term_type(name, env=env)\n\n def convert_values(self) -> None:\n """\n Convert datetimes to a comparable value in an expression.\n """\n\n def stringify(value):\n encoder: Callable\n if self.encoding is not None:\n encoder = partial(pprint_thing_encoded, encoding=self.encoding)\n else:\n encoder = pprint_thing\n return encoder(value)\n\n lhs, rhs = self.lhs, self.rhs\n\n if is_term(lhs) and lhs.is_datetime and is_term(rhs) and rhs.is_scalar:\n v = rhs.value\n if isinstance(v, (int, float)):\n v = stringify(v)\n v = Timestamp(ensure_decoded(v))\n if v.tz is not None:\n v = v.tz_convert("UTC")\n self.rhs.update(v)\n\n if is_term(rhs) and rhs.is_datetime and is_term(lhs) and lhs.is_scalar:\n v = lhs.value\n if isinstance(v, (int, float)):\n v = stringify(v)\n v = Timestamp(ensure_decoded(v))\n if v.tz is not None:\n v = v.tz_convert("UTC")\n self.lhs.update(v)\n\n def _disallow_scalar_only_bool_ops(self):\n rhs = self.rhs\n lhs = self.lhs\n\n # GH#24883 unwrap dtype if necessary to ensure we have a type object\n rhs_rt = rhs.return_type\n rhs_rt = getattr(rhs_rt, "type", rhs_rt)\n lhs_rt = lhs.return_type\n lhs_rt = getattr(lhs_rt, "type", lhs_rt)\n if (\n (lhs.is_scalar or rhs.is_scalar)\n and self.op in _bool_ops_dict\n and (\n not (\n issubclass(rhs_rt, (bool, np.bool_))\n and issubclass(lhs_rt, (bool, np.bool_))\n )\n )\n ):\n raise NotImplementedError("cannot evaluate scalar only bool ops")\n\n\ndef isnumeric(dtype) -> bool:\n return issubclass(np.dtype(dtype).type, np.number)\n\n\nUNARY_OPS_SYMS = ("+", "-", "~", "not")\n_unary_ops_funcs = (operator.pos, operator.neg, operator.invert, operator.invert)\n_unary_ops_dict = dict(zip(UNARY_OPS_SYMS, _unary_ops_funcs))\n\n\nclass UnaryOp(Op):\n """\n Hold a unary operator and its operands.\n\n Parameters\n ----------\n op : str\n The token used to represent the operator.\n operand : Term or Op\n The Term or Op operand to the operator.\n\n Raises\n ------\n ValueError\n * If no function associated with the passed operator token is found.\n """\n\n def __init__(self, op: Literal["+", "-", "~", "not"], operand) -> None:\n super().__init__(op, (operand,))\n self.operand = operand\n\n try:\n self.func = _unary_ops_dict[op]\n except KeyError as err:\n raise ValueError(\n f"Invalid unary operator {repr(op)}, "\n f"valid operators are {UNARY_OPS_SYMS}"\n ) from err\n\n def __call__(self, env) -> MathCall:\n operand = self.operand(env)\n # error: Cannot call function of unknown type\n return self.func(operand) # type: ignore[operator]\n\n def __repr__(self) -> str:\n return pprint_thing(f"{self.op}({self.operand})")\n\n @property\n def return_type(self) -> np.dtype:\n operand = self.operand\n if operand.return_type == np.dtype("bool"):\n return np.dtype("bool")\n if isinstance(operand, Op) and (\n operand.op in _cmp_ops_dict or operand.op in _bool_ops_dict\n ):\n return np.dtype("bool")\n return np.dtype("int")\n\n\nclass MathCall(Op):\n def __init__(self, func, args) -> None:\n super().__init__(func.name, args)\n self.func = func\n\n def __call__(self, env):\n # error: "Op" not callable\n operands = [op(env) for op in self.operands] # type: ignore[operator]\n return self.func.func(*operands)\n\n def __repr__(self) -> str:\n operands = map(str, self.operands)\n return pprint_thing(f"{self.op}({','.join(operands)})")\n\n\nclass FuncNode:\n def __init__(self, name: str) -> None:\n if name not in MATHOPS:\n raise ValueError(f'"{name}" is not a supported function')\n self.name = name\n self.func = getattr(np, name)\n\n def __call__(self, *args) -> MathCall:\n return MathCall(self, args)\n
.venv\Lib\site-packages\pandas\core\computation\ops.py
ops.py
Python
14,829
0.95
0.183566
0.039387
awesome-app
805
2024-09-08T08:48:12.249411
MIT
false
a6b90fcacc6b34c3c48c558f1cc719d5
"""\n:func:`~pandas.eval` source string parsing functions\n"""\nfrom __future__ import annotations\n\nfrom io import StringIO\nfrom keyword import iskeyword\nimport token\nimport tokenize\nfrom typing import TYPE_CHECKING\n\nif TYPE_CHECKING:\n from collections.abc import (\n Hashable,\n Iterator,\n )\n\n# A token value Python's tokenizer probably will never use.\nBACKTICK_QUOTED_STRING = 100\n\n\ndef create_valid_python_identifier(name: str) -> str:\n """\n Create valid Python identifiers from any string.\n\n Check if name contains any special characters. If it contains any\n special characters, the special characters will be replaced by\n a special string and a prefix is added.\n\n Raises\n ------\n SyntaxError\n If the returned name is not a Python valid identifier, raise an exception.\n This can happen if there is a hashtag in the name, as the tokenizer will\n than terminate and not find the backtick.\n But also for characters that fall out of the range of (U+0001..U+007F).\n """\n if name.isidentifier() and not iskeyword(name):\n return name\n\n # Create a dict with the special characters and their replacement string.\n # EXACT_TOKEN_TYPES contains these special characters\n # token.tok_name contains a readable description of the replacement string.\n special_characters_replacements = {\n char: f"_{token.tok_name[tokval]}_"\n for char, tokval in (tokenize.EXACT_TOKEN_TYPES.items())\n }\n special_characters_replacements.update(\n {\n " ": "_",\n "?": "_QUESTIONMARK_",\n "!": "_EXCLAMATIONMARK_",\n "$": "_DOLLARSIGN_",\n "€": "_EUROSIGN_",\n "°": "_DEGREESIGN_",\n # Including quotes works, but there are exceptions.\n "'": "_SINGLEQUOTE_",\n '"': "_DOUBLEQUOTE_",\n # Currently not possible. Terminates parser and won't find backtick.\n # "#": "_HASH_",\n }\n )\n\n name = "".join([special_characters_replacements.get(char, char) for char in name])\n name = f"BACKTICK_QUOTED_STRING_{name}"\n\n if not name.isidentifier():\n raise SyntaxError(f"Could not convert '{name}' to a valid Python identifier.")\n\n return name\n\n\ndef clean_backtick_quoted_toks(tok: tuple[int, str]) -> tuple[int, str]:\n """\n Clean up a column name if surrounded by backticks.\n\n Backtick quoted string are indicated by a certain tokval value. If a string\n is a backtick quoted token it will processed by\n :func:`_create_valid_python_identifier` so that the parser can find this\n string when the query is executed.\n In this case the tok will get the NAME tokval.\n\n Parameters\n ----------\n tok : tuple of int, str\n ints correspond to the all caps constants in the tokenize module\n\n Returns\n -------\n tok : Tuple[int, str]\n Either the input or token or the replacement values\n """\n toknum, tokval = tok\n if toknum == BACKTICK_QUOTED_STRING:\n return tokenize.NAME, create_valid_python_identifier(tokval)\n return toknum, tokval\n\n\ndef clean_column_name(name: Hashable) -> Hashable:\n """\n Function to emulate the cleaning of a backtick quoted name.\n\n The purpose for this function is to see what happens to the name of\n identifier if it goes to the process of being parsed a Python code\n inside a backtick quoted string and than being cleaned\n (removed of any special characters).\n\n Parameters\n ----------\n name : hashable\n Name to be cleaned.\n\n Returns\n -------\n name : hashable\n Returns the name after tokenizing and cleaning.\n\n Notes\n -----\n For some cases, a name cannot be converted to a valid Python identifier.\n In that case :func:`tokenize_string` raises a SyntaxError.\n In that case, we just return the name unmodified.\n\n If this name was used in the query string (this makes the query call impossible)\n an error will be raised by :func:`tokenize_backtick_quoted_string` instead,\n which is not caught and propagates to the user level.\n """\n try:\n tokenized = tokenize_string(f"`{name}`")\n tokval = next(tokenized)[1]\n return create_valid_python_identifier(tokval)\n except SyntaxError:\n return name\n\n\ndef tokenize_backtick_quoted_string(\n token_generator: Iterator[tokenize.TokenInfo], source: str, string_start: int\n) -> tuple[int, str]:\n """\n Creates a token from a backtick quoted string.\n\n Moves the token_generator forwards till right after the next backtick.\n\n Parameters\n ----------\n token_generator : Iterator[tokenize.TokenInfo]\n The generator that yields the tokens of the source string (Tuple[int, str]).\n The generator is at the first token after the backtick (`)\n\n source : str\n The Python source code string.\n\n string_start : int\n This is the start of backtick quoted string inside the source string.\n\n Returns\n -------\n tok: Tuple[int, str]\n The token that represents the backtick quoted string.\n The integer is equal to BACKTICK_QUOTED_STRING (100).\n """\n for _, tokval, start, _, _ in token_generator:\n if tokval == "`":\n string_end = start[1]\n break\n\n return BACKTICK_QUOTED_STRING, source[string_start:string_end]\n\n\ndef tokenize_string(source: str) -> Iterator[tuple[int, str]]:\n """\n Tokenize a Python source code string.\n\n Parameters\n ----------\n source : str\n The Python source code string.\n\n Returns\n -------\n tok_generator : Iterator[Tuple[int, str]]\n An iterator yielding all tokens with only toknum and tokval (Tuple[ing, str]).\n """\n line_reader = StringIO(source).readline\n token_generator = tokenize.generate_tokens(line_reader)\n\n # Loop over all tokens till a backtick (`) is found.\n # Then, take all tokens till the next backtick to form a backtick quoted string\n for toknum, tokval, start, _, _ in token_generator:\n if tokval == "`":\n try:\n yield tokenize_backtick_quoted_string(\n token_generator, source, string_start=start[1] + 1\n )\n except Exception as err:\n raise SyntaxError(f"Failed to parse backticks in '{source}'.") from err\n else:\n yield toknum, tokval\n
.venv\Lib\site-packages\pandas\core\computation\parsing.py
parsing.py
Python
6,399
0.95
0.121212
0.055556
awesome-app
833
2025-05-30T00:44:58.593270
Apache-2.0
false
5593c964c4859a47786d6515acd09c41
""" manage PyTables query interface via Expressions """\nfrom __future__ import annotations\n\nimport ast\nfrom decimal import (\n Decimal,\n InvalidOperation,\n)\nfrom functools import partial\nfrom typing import (\n TYPE_CHECKING,\n Any,\n ClassVar,\n)\n\nimport numpy as np\n\nfrom pandas._libs.tslibs import (\n Timedelta,\n Timestamp,\n)\nfrom pandas.errors import UndefinedVariableError\n\nfrom pandas.core.dtypes.common import is_list_like\n\nimport pandas.core.common as com\nfrom pandas.core.computation import (\n expr,\n ops,\n scope as _scope,\n)\nfrom pandas.core.computation.common import ensure_decoded\nfrom pandas.core.computation.expr import BaseExprVisitor\nfrom pandas.core.computation.ops import is_term\nfrom pandas.core.construction import extract_array\nfrom pandas.core.indexes.base import Index\n\nfrom pandas.io.formats.printing import (\n pprint_thing,\n pprint_thing_encoded,\n)\n\nif TYPE_CHECKING:\n from pandas._typing import (\n Self,\n npt,\n )\n\n\nclass PyTablesScope(_scope.Scope):\n __slots__ = ("queryables",)\n\n queryables: dict[str, Any]\n\n def __init__(\n self,\n level: int,\n global_dict=None,\n local_dict=None,\n queryables: dict[str, Any] | None = None,\n ) -> None:\n super().__init__(level + 1, global_dict=global_dict, local_dict=local_dict)\n self.queryables = queryables or {}\n\n\nclass Term(ops.Term):\n env: PyTablesScope\n\n def __new__(cls, name, env, side=None, encoding=None):\n if isinstance(name, str):\n klass = cls\n else:\n klass = Constant\n return object.__new__(klass)\n\n def __init__(self, name, env: PyTablesScope, side=None, encoding=None) -> None:\n super().__init__(name, env, side=side, encoding=encoding)\n\n def _resolve_name(self):\n # must be a queryables\n if self.side == "left":\n # Note: The behavior of __new__ ensures that self.name is a str here\n if self.name not in self.env.queryables:\n raise NameError(f"name {repr(self.name)} is not defined")\n return self.name\n\n # resolve the rhs (and allow it to be None)\n try:\n return self.env.resolve(self.name, is_local=False)\n except UndefinedVariableError:\n return self.name\n\n # read-only property overwriting read/write property\n @property # type: ignore[misc]\n def value(self):\n return self._value\n\n\nclass Constant(Term):\n def __init__(self, name, env: PyTablesScope, side=None, encoding=None) -> None:\n assert isinstance(env, PyTablesScope), type(env)\n super().__init__(name, env, side=side, encoding=encoding)\n\n def _resolve_name(self):\n return self._name\n\n\nclass BinOp(ops.BinOp):\n _max_selectors = 31\n\n op: str\n queryables: dict[str, Any]\n condition: str | None\n\n def __init__(self, op: str, lhs, rhs, queryables: dict[str, Any], encoding) -> None:\n super().__init__(op, lhs, rhs)\n self.queryables = queryables\n self.encoding = encoding\n self.condition = None\n\n def _disallow_scalar_only_bool_ops(self) -> None:\n pass\n\n def prune(self, klass):\n def pr(left, right):\n """create and return a new specialized BinOp from myself"""\n if left is None:\n return right\n elif right is None:\n return left\n\n k = klass\n if isinstance(left, ConditionBinOp):\n if isinstance(right, ConditionBinOp):\n k = JointConditionBinOp\n elif isinstance(left, k):\n return left\n elif isinstance(right, k):\n return right\n\n elif isinstance(left, FilterBinOp):\n if isinstance(right, FilterBinOp):\n k = JointFilterBinOp\n elif isinstance(left, k):\n return left\n elif isinstance(right, k):\n return right\n\n return k(\n self.op, left, right, queryables=self.queryables, encoding=self.encoding\n ).evaluate()\n\n left, right = self.lhs, self.rhs\n\n if is_term(left) and is_term(right):\n res = pr(left.value, right.value)\n elif not is_term(left) and is_term(right):\n res = pr(left.prune(klass), right.value)\n elif is_term(left) and not is_term(right):\n res = pr(left.value, right.prune(klass))\n elif not (is_term(left) or is_term(right)):\n res = pr(left.prune(klass), right.prune(klass))\n\n return res\n\n def conform(self, rhs):\n """inplace conform rhs"""\n if not is_list_like(rhs):\n rhs = [rhs]\n if isinstance(rhs, np.ndarray):\n rhs = rhs.ravel()\n return rhs\n\n @property\n def is_valid(self) -> bool:\n """return True if this is a valid field"""\n return self.lhs in self.queryables\n\n @property\n def is_in_table(self) -> bool:\n """\n return True if this is a valid column name for generation (e.g. an\n actual column in the table)\n """\n return self.queryables.get(self.lhs) is not None\n\n @property\n def kind(self):\n """the kind of my field"""\n return getattr(self.queryables.get(self.lhs), "kind", None)\n\n @property\n def meta(self):\n """the meta of my field"""\n return getattr(self.queryables.get(self.lhs), "meta", None)\n\n @property\n def metadata(self):\n """the metadata of my field"""\n return getattr(self.queryables.get(self.lhs), "metadata", None)\n\n def generate(self, v) -> str:\n """create and return the op string for this TermValue"""\n val = v.tostring(self.encoding)\n return f"({self.lhs} {self.op} {val})"\n\n def convert_value(self, v) -> TermValue:\n """\n convert the expression that is in the term to something that is\n accepted by pytables\n """\n\n def stringify(value):\n if self.encoding is not None:\n return pprint_thing_encoded(value, encoding=self.encoding)\n return pprint_thing(value)\n\n kind = ensure_decoded(self.kind)\n meta = ensure_decoded(self.meta)\n if kind == "datetime" or (kind and kind.startswith("datetime64")):\n if isinstance(v, (int, float)):\n v = stringify(v)\n v = ensure_decoded(v)\n v = Timestamp(v).as_unit("ns")\n if v.tz is not None:\n v = v.tz_convert("UTC")\n return TermValue(v, v._value, kind)\n elif kind in ("timedelta64", "timedelta"):\n if isinstance(v, str):\n v = Timedelta(v)\n else:\n v = Timedelta(v, unit="s")\n v = v.as_unit("ns")._value\n return TermValue(int(v), v, kind)\n elif meta == "category":\n metadata = extract_array(self.metadata, extract_numpy=True)\n result: npt.NDArray[np.intp] | np.intp | int\n if v not in metadata:\n result = -1\n else:\n result = metadata.searchsorted(v, side="left")\n return TermValue(result, result, "integer")\n elif kind == "integer":\n try:\n v_dec = Decimal(v)\n except InvalidOperation:\n # GH 54186\n # convert v to float to raise float's ValueError\n float(v)\n else:\n v = int(v_dec.to_integral_exact(rounding="ROUND_HALF_EVEN"))\n return TermValue(v, v, kind)\n elif kind == "float":\n v = float(v)\n return TermValue(v, v, kind)\n elif kind == "bool":\n if isinstance(v, str):\n v = v.strip().lower() not in [\n "false",\n "f",\n "no",\n "n",\n "none",\n "0",\n "[]",\n "{}",\n "",\n ]\n else:\n v = bool(v)\n return TermValue(v, v, kind)\n elif isinstance(v, str):\n # string quoting\n return TermValue(v, stringify(v), "string")\n else:\n raise TypeError(f"Cannot compare {v} of type {type(v)} to {kind} column")\n\n def convert_values(self) -> None:\n pass\n\n\nclass FilterBinOp(BinOp):\n filter: tuple[Any, Any, Index] | None = None\n\n def __repr__(self) -> str:\n if self.filter is None:\n return "Filter: Not Initialized"\n return pprint_thing(f"[Filter : [{self.filter[0]}] -> [{self.filter[1]}]")\n\n def invert(self) -> Self:\n """invert the filter"""\n if self.filter is not None:\n self.filter = (\n self.filter[0],\n self.generate_filter_op(invert=True),\n self.filter[2],\n )\n return self\n\n def format(self):\n """return the actual filter format"""\n return [self.filter]\n\n # error: Signature of "evaluate" incompatible with supertype "BinOp"\n def evaluate(self) -> Self | None: # type: ignore[override]\n if not self.is_valid:\n raise ValueError(f"query term is not valid [{self}]")\n\n rhs = self.conform(self.rhs)\n values = list(rhs)\n\n if self.is_in_table:\n # if too many values to create the expression, use a filter instead\n if self.op in ["==", "!="] and len(values) > self._max_selectors:\n filter_op = self.generate_filter_op()\n self.filter = (self.lhs, filter_op, Index(values))\n\n return self\n return None\n\n # equality conditions\n if self.op in ["==", "!="]:\n filter_op = self.generate_filter_op()\n self.filter = (self.lhs, filter_op, Index(values))\n\n else:\n raise TypeError(\n f"passing a filterable condition to a non-table indexer [{self}]"\n )\n\n return self\n\n def generate_filter_op(self, invert: bool = False):\n if (self.op == "!=" and not invert) or (self.op == "==" and invert):\n return lambda axis, vals: ~axis.isin(vals)\n else:\n return lambda axis, vals: axis.isin(vals)\n\n\nclass JointFilterBinOp(FilterBinOp):\n def format(self):\n raise NotImplementedError("unable to collapse Joint Filters")\n\n # error: Signature of "evaluate" incompatible with supertype "BinOp"\n def evaluate(self) -> Self: # type: ignore[override]\n return self\n\n\nclass ConditionBinOp(BinOp):\n def __repr__(self) -> str:\n return pprint_thing(f"[Condition : [{self.condition}]]")\n\n def invert(self):\n """invert the condition"""\n # if self.condition is not None:\n # self.condition = "~(%s)" % self.condition\n # return self\n raise NotImplementedError(\n "cannot use an invert condition when passing to numexpr"\n )\n\n def format(self):\n """return the actual ne format"""\n return self.condition\n\n # error: Signature of "evaluate" incompatible with supertype "BinOp"\n def evaluate(self) -> Self | None: # type: ignore[override]\n if not self.is_valid:\n raise ValueError(f"query term is not valid [{self}]")\n\n # convert values if we are in the table\n if not self.is_in_table:\n return None\n\n rhs = self.conform(self.rhs)\n values = [self.convert_value(v) for v in rhs]\n\n # equality conditions\n if self.op in ["==", "!="]:\n # too many values to create the expression?\n if len(values) <= self._max_selectors:\n vs = [self.generate(v) for v in values]\n self.condition = f"({' | '.join(vs)})"\n\n # use a filter after reading\n else:\n return None\n else:\n self.condition = self.generate(values[0])\n\n return self\n\n\nclass JointConditionBinOp(ConditionBinOp):\n # error: Signature of "evaluate" incompatible with supertype "BinOp"\n def evaluate(self) -> Self: # type: ignore[override]\n self.condition = f"({self.lhs.condition} {self.op} {self.rhs.condition})"\n return self\n\n\nclass UnaryOp(ops.UnaryOp):\n def prune(self, klass):\n if self.op != "~":\n raise NotImplementedError("UnaryOp only support invert type ops")\n\n operand = self.operand\n operand = operand.prune(klass)\n\n if operand is not None and (\n issubclass(klass, ConditionBinOp)\n and operand.condition is not None\n or not issubclass(klass, ConditionBinOp)\n and issubclass(klass, FilterBinOp)\n and operand.filter is not None\n ):\n return operand.invert()\n return None\n\n\nclass PyTablesExprVisitor(BaseExprVisitor):\n const_type: ClassVar[type[ops.Term]] = Constant\n term_type: ClassVar[type[Term]] = Term\n\n def __init__(self, env, engine, parser, **kwargs) -> None:\n super().__init__(env, engine, parser)\n for bin_op in self.binary_ops:\n bin_node = self.binary_op_nodes_map[bin_op]\n setattr(\n self,\n f"visit_{bin_node}",\n lambda node, bin_op=bin_op: partial(BinOp, bin_op, **kwargs),\n )\n\n def visit_UnaryOp(self, node, **kwargs) -> ops.Term | UnaryOp | None:\n if isinstance(node.op, (ast.Not, ast.Invert)):\n return UnaryOp("~", self.visit(node.operand))\n elif isinstance(node.op, ast.USub):\n return self.const_type(-self.visit(node.operand).value, self.env)\n elif isinstance(node.op, ast.UAdd):\n raise NotImplementedError("Unary addition not supported")\n # TODO: return None might never be reached\n return None\n\n def visit_Index(self, node, **kwargs):\n return self.visit(node.value).value\n\n def visit_Assign(self, node, **kwargs):\n cmpr = ast.Compare(\n ops=[ast.Eq()], left=node.targets[0], comparators=[node.value]\n )\n return self.visit(cmpr)\n\n def visit_Subscript(self, node, **kwargs) -> ops.Term:\n # only allow simple subscripts\n\n value = self.visit(node.value)\n slobj = self.visit(node.slice)\n try:\n value = value.value\n except AttributeError:\n pass\n\n if isinstance(slobj, Term):\n # In py39 np.ndarray lookups with Term containing int raise\n slobj = slobj.value\n\n try:\n return self.const_type(value[slobj], self.env)\n except TypeError as err:\n raise ValueError(\n f"cannot subscript {repr(value)} with {repr(slobj)}"\n ) from err\n\n def visit_Attribute(self, node, **kwargs):\n attr = node.attr\n value = node.value\n\n ctx = type(node.ctx)\n if ctx == ast.Load:\n # resolve the value\n resolved = self.visit(value)\n\n # try to get the value to see if we are another expression\n try:\n resolved = resolved.value\n except AttributeError:\n pass\n\n try:\n return self.term_type(getattr(resolved, attr), self.env)\n except AttributeError:\n # something like datetime.datetime where scope is overridden\n if isinstance(value, ast.Name) and value.id == attr:\n return resolved\n\n raise ValueError(f"Invalid Attribute context {ctx.__name__}")\n\n def translate_In(self, op):\n return ast.Eq() if isinstance(op, ast.In) else op\n\n def _rewrite_membership_op(self, node, left, right):\n return self.visit(node.op), node.op, left, right\n\n\ndef _validate_where(w):\n """\n Validate that the where statement is of the right type.\n\n The type may either be String, Expr, or list-like of Exprs.\n\n Parameters\n ----------\n w : String term expression, Expr, or list-like of Exprs.\n\n Returns\n -------\n where : The original where clause if the check was successful.\n\n Raises\n ------\n TypeError : An invalid data type was passed in for w (e.g. dict).\n """\n if not (isinstance(w, (PyTablesExpr, str)) or is_list_like(w)):\n raise TypeError(\n "where must be passed as a string, PyTablesExpr, "\n "or list-like of PyTablesExpr"\n )\n\n return w\n\n\nclass PyTablesExpr(expr.Expr):\n """\n Hold a pytables-like expression, comprised of possibly multiple 'terms'.\n\n Parameters\n ----------\n where : string term expression, PyTablesExpr, or list-like of PyTablesExprs\n queryables : a "kinds" map (dict of column name -> kind), or None if column\n is non-indexable\n encoding : an encoding that will encode the query terms\n\n Returns\n -------\n a PyTablesExpr object\n\n Examples\n --------\n 'index>=date'\n "columns=['A', 'D']"\n 'columns=A'\n 'columns==A'\n "~(columns=['A','B'])"\n 'index>df.index[3] & string="bar"'\n '(index>df.index[3] & index<=df.index[6]) | string="bar"'\n "ts>=Timestamp('2012-02-01')"\n "major_axis>=20130101"\n """\n\n _visitor: PyTablesExprVisitor | None\n env: PyTablesScope\n expr: str\n\n def __init__(\n self,\n where,\n queryables: dict[str, Any] | None = None,\n encoding=None,\n scope_level: int = 0,\n ) -> None:\n where = _validate_where(where)\n\n self.encoding = encoding\n self.condition = None\n self.filter = None\n self.terms = None\n self._visitor = None\n\n # capture the environment if needed\n local_dict: _scope.DeepChainMap[Any, Any] | None = None\n\n if isinstance(where, PyTablesExpr):\n local_dict = where.env.scope\n _where = where.expr\n\n elif is_list_like(where):\n where = list(where)\n for idx, w in enumerate(where):\n if isinstance(w, PyTablesExpr):\n local_dict = w.env.scope\n else:\n where[idx] = _validate_where(w)\n _where = " & ".join([f"({w})" for w in com.flatten(where)])\n else:\n # _validate_where ensures we otherwise have a string\n _where = where\n\n self.expr = _where\n self.env = PyTablesScope(scope_level + 1, local_dict=local_dict)\n\n if queryables is not None and isinstance(self.expr, str):\n self.env.queryables.update(queryables)\n self._visitor = PyTablesExprVisitor(\n self.env,\n queryables=queryables,\n parser="pytables",\n engine="pytables",\n encoding=encoding,\n )\n self.terms = self.parse()\n\n def __repr__(self) -> str:\n if self.terms is not None:\n return pprint_thing(self.terms)\n return pprint_thing(self.expr)\n\n def evaluate(self):\n """create and return the numexpr condition and filter"""\n try:\n self.condition = self.terms.prune(ConditionBinOp)\n except AttributeError as err:\n raise ValueError(\n f"cannot process expression [{self.expr}], [{self}] "\n "is not a valid condition"\n ) from err\n try:\n self.filter = self.terms.prune(FilterBinOp)\n except AttributeError as err:\n raise ValueError(\n f"cannot process expression [{self.expr}], [{self}] "\n "is not a valid filter"\n ) from err\n\n return self.condition, self.filter\n\n\nclass TermValue:\n """hold a term value the we use to construct a condition/filter"""\n\n def __init__(self, value, converted, kind: str) -> None:\n assert isinstance(kind, str), kind\n self.value = value\n self.converted = converted\n self.kind = kind\n\n def tostring(self, encoding) -> str:\n """quote the string if not encoded else encode and return"""\n if self.kind == "string":\n if encoding is not None:\n return str(self.converted)\n return f'"{self.converted}"'\n elif self.kind == "float":\n # python 2 str(float) is not always\n # round-trippable so use repr()\n return repr(self.converted)\n return str(self.converted)\n\n\ndef maybe_expression(s) -> bool:\n """loose checking if s is a pytables-acceptable expression"""\n if not isinstance(s, str):\n return False\n operations = PyTablesExprVisitor.binary_ops + PyTablesExprVisitor.unary_ops + ("=",)\n\n # make sure we have an op at least\n return any(op in s for op in operations)\n
.venv\Lib\site-packages\pandas\core\computation\pytables.py
pytables.py
Python
20,745
0.95
0.201201
0.05709
python-kit
593
2024-12-05T16:17:49.423149
BSD-3-Clause
false
bd6691657e9227099286cf1c2de270a8
"""\nModule for scope operations\n"""\nfrom __future__ import annotations\n\nfrom collections import ChainMap\nimport datetime\nimport inspect\nfrom io import StringIO\nimport itertools\nimport pprint\nimport struct\nimport sys\nfrom typing import TypeVar\n\nimport numpy as np\n\nfrom pandas._libs.tslibs import Timestamp\nfrom pandas.errors import UndefinedVariableError\n\n_KT = TypeVar("_KT")\n_VT = TypeVar("_VT")\n\n\n# https://docs.python.org/3/library/collections.html#chainmap-examples-and-recipes\nclass DeepChainMap(ChainMap[_KT, _VT]):\n """\n Variant of ChainMap that allows direct updates to inner scopes.\n\n Only works when all passed mapping are mutable.\n """\n\n def __setitem__(self, key: _KT, value: _VT) -> None:\n for mapping in self.maps:\n if key in mapping:\n mapping[key] = value\n return\n self.maps[0][key] = value\n\n def __delitem__(self, key: _KT) -> None:\n """\n Raises\n ------\n KeyError\n If `key` doesn't exist.\n """\n for mapping in self.maps:\n if key in mapping:\n del mapping[key]\n return\n raise KeyError(key)\n\n\ndef ensure_scope(\n level: int, global_dict=None, local_dict=None, resolvers=(), target=None\n) -> Scope:\n """Ensure that we are grabbing the correct scope."""\n return Scope(\n level + 1,\n global_dict=global_dict,\n local_dict=local_dict,\n resolvers=resolvers,\n target=target,\n )\n\n\ndef _replacer(x) -> str:\n """\n Replace a number with its hexadecimal representation. Used to tag\n temporary variables with their calling scope's id.\n """\n # get the hex repr of the binary char and remove 0x and pad by pad_size\n # zeros\n try:\n hexin = ord(x)\n except TypeError:\n # bytes literals masquerade as ints when iterating in py3\n hexin = x\n\n return hex(hexin)\n\n\ndef _raw_hex_id(obj) -> str:\n """Return the padded hexadecimal id of ``obj``."""\n # interpret as a pointer since that's what really what id returns\n packed = struct.pack("@P", id(obj))\n return "".join([_replacer(x) for x in packed])\n\n\nDEFAULT_GLOBALS = {\n "Timestamp": Timestamp,\n "datetime": datetime.datetime,\n "True": True,\n "False": False,\n "list": list,\n "tuple": tuple,\n "inf": np.inf,\n "Inf": np.inf,\n}\n\n\ndef _get_pretty_string(obj) -> str:\n """\n Return a prettier version of obj.\n\n Parameters\n ----------\n obj : object\n Object to pretty print\n\n Returns\n -------\n str\n Pretty print object repr\n """\n sio = StringIO()\n pprint.pprint(obj, stream=sio)\n return sio.getvalue()\n\n\nclass Scope:\n """\n Object to hold scope, with a few bells to deal with some custom syntax\n and contexts added by pandas.\n\n Parameters\n ----------\n level : int\n global_dict : dict or None, optional, default None\n local_dict : dict or Scope or None, optional, default None\n resolvers : list-like or None, optional, default None\n target : object\n\n Attributes\n ----------\n level : int\n scope : DeepChainMap\n target : object\n temps : dict\n """\n\n __slots__ = ["level", "scope", "target", "resolvers", "temps"]\n level: int\n scope: DeepChainMap\n resolvers: DeepChainMap\n temps: dict\n\n def __init__(\n self, level: int, global_dict=None, local_dict=None, resolvers=(), target=None\n ) -> None:\n self.level = level + 1\n\n # shallow copy because we don't want to keep filling this up with what\n # was there before if there are multiple calls to Scope/_ensure_scope\n self.scope = DeepChainMap(DEFAULT_GLOBALS.copy())\n self.target = target\n\n if isinstance(local_dict, Scope):\n self.scope.update(local_dict.scope)\n if local_dict.target is not None:\n self.target = local_dict.target\n self._update(local_dict.level)\n\n frame = sys._getframe(self.level)\n\n try:\n # shallow copy here because we don't want to replace what's in\n # scope when we align terms (alignment accesses the underlying\n # numpy array of pandas objects)\n scope_global = self.scope.new_child(\n (global_dict if global_dict is not None else frame.f_globals).copy()\n )\n self.scope = DeepChainMap(scope_global)\n if not isinstance(local_dict, Scope):\n scope_local = self.scope.new_child(\n (local_dict if local_dict is not None else frame.f_locals).copy()\n )\n self.scope = DeepChainMap(scope_local)\n finally:\n del frame\n\n # assumes that resolvers are going from outermost scope to inner\n if isinstance(local_dict, Scope):\n resolvers += tuple(local_dict.resolvers.maps)\n self.resolvers = DeepChainMap(*resolvers)\n self.temps = {}\n\n def __repr__(self) -> str:\n scope_keys = _get_pretty_string(list(self.scope.keys()))\n res_keys = _get_pretty_string(list(self.resolvers.keys()))\n return f"{type(self).__name__}(scope={scope_keys}, resolvers={res_keys})"\n\n @property\n def has_resolvers(self) -> bool:\n """\n Return whether we have any extra scope.\n\n For example, DataFrames pass Their columns as resolvers during calls to\n ``DataFrame.eval()`` and ``DataFrame.query()``.\n\n Returns\n -------\n hr : bool\n """\n return bool(len(self.resolvers))\n\n def resolve(self, key: str, is_local: bool):\n """\n Resolve a variable name in a possibly local context.\n\n Parameters\n ----------\n key : str\n A variable name\n is_local : bool\n Flag indicating whether the variable is local or not (prefixed with\n the '@' symbol)\n\n Returns\n -------\n value : object\n The value of a particular variable\n """\n try:\n # only look for locals in outer scope\n if is_local:\n return self.scope[key]\n\n # not a local variable so check in resolvers if we have them\n if self.has_resolvers:\n return self.resolvers[key]\n\n # if we're here that means that we have no locals and we also have\n # no resolvers\n assert not is_local and not self.has_resolvers\n return self.scope[key]\n except KeyError:\n try:\n # last ditch effort we look in temporaries\n # these are created when parsing indexing expressions\n # e.g., df[df > 0]\n return self.temps[key]\n except KeyError as err:\n raise UndefinedVariableError(key, is_local) from err\n\n def swapkey(self, old_key: str, new_key: str, new_value=None) -> None:\n """\n Replace a variable name, with a potentially new value.\n\n Parameters\n ----------\n old_key : str\n Current variable name to replace\n new_key : str\n New variable name to replace `old_key` with\n new_value : object\n Value to be replaced along with the possible renaming\n """\n if self.has_resolvers:\n maps = self.resolvers.maps + self.scope.maps\n else:\n maps = self.scope.maps\n\n maps.append(self.temps)\n\n for mapping in maps:\n if old_key in mapping:\n mapping[new_key] = new_value\n return\n\n def _get_vars(self, stack, scopes: list[str]) -> None:\n """\n Get specifically scoped variables from a list of stack frames.\n\n Parameters\n ----------\n stack : list\n A list of stack frames as returned by ``inspect.stack()``\n scopes : sequence of strings\n A sequence containing valid stack frame attribute names that\n evaluate to a dictionary. For example, ('locals', 'globals')\n """\n variables = itertools.product(scopes, stack)\n for scope, (frame, _, _, _, _, _) in variables:\n try:\n d = getattr(frame, f"f_{scope}")\n self.scope = DeepChainMap(self.scope.new_child(d))\n finally:\n # won't remove it, but DECREF it\n # in Py3 this probably isn't necessary since frame won't be\n # scope after the loop\n del frame\n\n def _update(self, level: int) -> None:\n """\n Update the current scope by going back `level` levels.\n\n Parameters\n ----------\n level : int\n """\n sl = level + 1\n\n # add sl frames to the scope starting with the\n # most distant and overwriting with more current\n # makes sure that we can capture variable scope\n stack = inspect.stack()\n\n try:\n self._get_vars(stack[:sl], scopes=["locals"])\n finally:\n del stack[:], stack\n\n def add_tmp(self, value) -> str:\n """\n Add a temporary variable to the scope.\n\n Parameters\n ----------\n value : object\n An arbitrary object to be assigned to a temporary variable.\n\n Returns\n -------\n str\n The name of the temporary variable created.\n """\n name = f"{type(value).__name__}_{self.ntemps}_{_raw_hex_id(self)}"\n\n # add to inner most scope\n assert name not in self.temps\n self.temps[name] = value\n assert name in self.temps\n\n # only increment if the variable gets put in the scope\n return name\n\n @property\n def ntemps(self) -> int:\n """The number of temporary variables in this scope"""\n return len(self.temps)\n\n @property\n def full_scope(self) -> DeepChainMap:\n """\n Return the full scope for use with passing to engines transparently\n as a mapping.\n\n Returns\n -------\n vars : DeepChainMap\n All variables in this scope.\n """\n maps = [self.temps] + self.resolvers.maps + self.scope.maps\n return DeepChainMap(*maps)\n
.venv\Lib\site-packages\pandas\core\computation\scope.py
scope.py
Python
10,203
0.95
0.135211
0.088136
awesome-app
48
2025-06-17T19:24:42.001545
MIT
false
af3fd7c975e9502f16cb5ca8a2dbd0b8
\n\n
.venv\Lib\site-packages\pandas\core\computation\__pycache__\align.cpython-313.pyc
align.cpython-313.pyc
Other
9,652
0.8
0.011905
0
vue-tools
909
2023-12-11T06:56:52.534944
Apache-2.0
false
390fa84ca54b759096c96ca195968b02
\n\n
.venv\Lib\site-packages\pandas\core\computation\__pycache__\api.cpython-313.pyc
api.cpython-313.pyc
Other
280
0.7
0
0
vue-tools
993
2023-10-18T19:05:02.973215
GPL-3.0
false
39f8f7221b76ebde2af3db9bffc42d42
\n\n
.venv\Lib\site-packages\pandas\core\computation\__pycache__\check.cpython-313.pyc
check.cpython-313.pyc
Other
439
0.85
0
0
awesome-app
513
2025-01-14T01:43:29.197402
BSD-3-Clause
false
80dd67783c3d0b788cbaf6509a49b7df
\n\n
.venv\Lib\site-packages\pandas\core\computation\__pycache__\common.cpython-313.pyc
common.cpython-313.pyc
Other
2,011
0.8
0
0
python-kit
58
2023-12-11T01:22:05.797610
MIT
false
a34e973b7e253d4e2b9e945bd9d233a2
\n\n
.venv\Lib\site-packages\pandas\core\computation\__pycache__\engines.cpython-313.pyc
engines.cpython-313.pyc
Other
5,289
0.95
0.072289
0.013333
awesome-app
823
2025-01-13T23:05:00.935546
GPL-3.0
false
b11d6516f98b61b2a529bf5049661620
\n\n
.venv\Lib\site-packages\pandas\core\computation\__pycache__\eval.cpython-313.pyc
eval.cpython-313.pyc
Other
13,813
0.95
0.066148
0.02193
vue-tools
556
2025-04-13T09:33:38.060869
MIT
false
2ab6280a8a2d780f9392c574c7570a10
\n\n
.venv\Lib\site-packages\pandas\core\computation\__pycache__\expr.cpython-313.pyc
expr.cpython-313.pyc
Other
35,278
0.95
0.037143
0.00627
node-utils
520
2024-09-14T23:14:06.940741
MIT
false
33cb10f57dad37c291aae0a3bdf1b35d
\n\n
.venv\Lib\site-packages\pandas\core\computation\__pycache__\expressions.cpython-313.pyc
expressions.cpython-313.pyc
Other
8,633
0.8
0.078261
0
awesome-app
638
2024-03-17T07:19:16.620470
BSD-3-Clause
false
c0282a6cf20a3dc357aad040044b1558
\n\n
.venv\Lib\site-packages\pandas\core\computation\__pycache__\ops.cpython-313.pyc
ops.cpython-313.pyc
Other
23,915
0.95
0.028436
0.005076
awesome-app
68
2023-07-25T13:56:17.957424
MIT
false
43ddb937a5e474b50eee049f4bb9432d
\n\n
.venv\Lib\site-packages\pandas\core\computation\__pycache__\parsing.cpython-313.pyc
parsing.cpython-313.pyc
Other
6,645
0.95
0.056
0
awesome-app
590
2023-08-15T20:30:15.892990
MIT
false
37bbd68bc19a0729f7792cbec2c2078a
\n\n
.venv\Lib\site-packages\pandas\core\computation\__pycache__\pytables.cpython-313.pyc
pytables.cpython-313.pyc
Other
30,722
0.8
0.035573
0.016949
vue-tools
826
2024-02-26T10:08:48.123805
GPL-3.0
false
8dbb773b79b41cab35487138e2709d1b
\n\n
.venv\Lib\site-packages\pandas\core\computation\__pycache__\scope.cpython-313.pyc
scope.cpython-313.pyc
Other
11,716
0.8
0.01005
0
node-utils
472
2023-07-11T06:04:12.906062
MIT
false
7230fae209168d1701cd7b6f70dd48b1
\n\n
.venv\Lib\site-packages\pandas\core\computation\__pycache__\__init__.cpython-313.pyc
__init__.cpython-313.pyc
Other
198
0.7
0
0
node-utils
811
2023-09-02T11:45:00.599098
Apache-2.0
false
300bb2f9e6811e78a4861fb48af7bb8a
from pandas.core.dtypes.common import (\n is_any_real_numeric_dtype,\n is_array_like,\n is_bool,\n is_bool_dtype,\n is_categorical_dtype,\n is_complex,\n is_complex_dtype,\n is_datetime64_any_dtype,\n is_datetime64_dtype,\n is_datetime64_ns_dtype,\n is_datetime64tz_dtype,\n is_dict_like,\n is_dtype_equal,\n is_extension_array_dtype,\n is_file_like,\n is_float,\n is_float_dtype,\n is_hashable,\n is_int64_dtype,\n is_integer,\n is_integer_dtype,\n is_interval,\n is_interval_dtype,\n is_iterator,\n is_list_like,\n is_named_tuple,\n is_number,\n is_numeric_dtype,\n is_object_dtype,\n is_period_dtype,\n is_re,\n is_re_compilable,\n is_scalar,\n is_signed_integer_dtype,\n is_sparse,\n is_string_dtype,\n is_timedelta64_dtype,\n is_timedelta64_ns_dtype,\n is_unsigned_integer_dtype,\n pandas_dtype,\n)\n\n__all__ = [\n "is_any_real_numeric_dtype",\n "is_array_like",\n "is_bool",\n "is_bool_dtype",\n "is_categorical_dtype",\n "is_complex",\n "is_complex_dtype",\n "is_datetime64_any_dtype",\n "is_datetime64_dtype",\n "is_datetime64_ns_dtype",\n "is_datetime64tz_dtype",\n "is_dict_like",\n "is_dtype_equal",\n "is_extension_array_dtype",\n "is_file_like",\n "is_float",\n "is_float_dtype",\n "is_hashable",\n "is_int64_dtype",\n "is_integer",\n "is_integer_dtype",\n "is_interval",\n "is_interval_dtype",\n "is_iterator",\n "is_list_like",\n "is_named_tuple",\n "is_number",\n "is_numeric_dtype",\n "is_object_dtype",\n "is_period_dtype",\n "is_re",\n "is_re_compilable",\n "is_scalar",\n "is_signed_integer_dtype",\n "is_sparse",\n "is_string_dtype",\n "is_timedelta64_dtype",\n "is_timedelta64_ns_dtype",\n "is_unsigned_integer_dtype",\n "pandas_dtype",\n]\n
.venv\Lib\site-packages\pandas\core\dtypes\api.py
api.py
Python
1,819
0.85
0
0
react-lib
213
2023-12-11T10:26:30.626614
Apache-2.0
false
0370bb71a8d1009ee31d210b79cb5337
"""\nFunctions for implementing 'astype' methods according to pandas conventions,\nparticularly ones that differ from numpy.\n"""\nfrom __future__ import annotations\n\nimport inspect\nfrom typing import (\n TYPE_CHECKING,\n overload,\n)\nimport warnings\n\nimport numpy as np\n\nfrom pandas._libs import lib\nfrom pandas._libs.tslibs.timedeltas import array_to_timedelta64\nfrom pandas.errors import IntCastingNaNError\n\nfrom pandas.core.dtypes.common import (\n is_object_dtype,\n is_string_dtype,\n pandas_dtype,\n)\nfrom pandas.core.dtypes.dtypes import (\n ExtensionDtype,\n NumpyEADtype,\n)\n\nif TYPE_CHECKING:\n from pandas._typing import (\n ArrayLike,\n DtypeObj,\n IgnoreRaise,\n )\n\n from pandas.core.arrays import ExtensionArray\n\n_dtype_obj = np.dtype(object)\n\n\n@overload\ndef _astype_nansafe(\n arr: np.ndarray, dtype: np.dtype, copy: bool = ..., skipna: bool = ...\n) -> np.ndarray:\n ...\n\n\n@overload\ndef _astype_nansafe(\n arr: np.ndarray, dtype: ExtensionDtype, copy: bool = ..., skipna: bool = ...\n) -> ExtensionArray:\n ...\n\n\ndef _astype_nansafe(\n arr: np.ndarray, dtype: DtypeObj, copy: bool = True, skipna: bool = False\n) -> ArrayLike:\n """\n Cast the elements of an array to a given dtype a nan-safe manner.\n\n Parameters\n ----------\n arr : ndarray\n dtype : np.dtype or ExtensionDtype\n copy : bool, default True\n If False, a view will be attempted but may fail, if\n e.g. the item sizes don't align.\n skipna: bool, default False\n Whether or not we should skip NaN when casting as a string-type.\n\n Raises\n ------\n ValueError\n The dtype was a datetime64/timedelta64 dtype, but it had no unit.\n """\n\n # dispatch on extension dtype if needed\n if isinstance(dtype, ExtensionDtype):\n return dtype.construct_array_type()._from_sequence(arr, dtype=dtype, copy=copy)\n\n elif not isinstance(dtype, np.dtype): # pragma: no cover\n raise ValueError("dtype must be np.dtype or ExtensionDtype")\n\n if arr.dtype.kind in "mM":\n from pandas.core.construction import ensure_wrapped_if_datetimelike\n\n arr = ensure_wrapped_if_datetimelike(arr)\n res = arr.astype(dtype, copy=copy)\n return np.asarray(res)\n\n if issubclass(dtype.type, str):\n shape = arr.shape\n if arr.ndim > 1:\n arr = arr.ravel()\n return lib.ensure_string_array(\n arr, skipna=skipna, convert_na_value=False\n ).reshape(shape)\n\n elif np.issubdtype(arr.dtype, np.floating) and dtype.kind in "iu":\n return _astype_float_to_int_nansafe(arr, dtype, copy)\n\n elif arr.dtype == object:\n # if we have a datetime/timedelta array of objects\n # then coerce to datetime64[ns] and use DatetimeArray.astype\n\n if lib.is_np_dtype(dtype, "M"):\n from pandas.core.arrays import DatetimeArray\n\n dta = DatetimeArray._from_sequence(arr, dtype=dtype)\n return dta._ndarray\n\n elif lib.is_np_dtype(dtype, "m"):\n from pandas.core.construction import ensure_wrapped_if_datetimelike\n\n # bc we know arr.dtype == object, this is equivalent to\n # `np.asarray(to_timedelta(arr))`, but using a lower-level API that\n # does not require a circular import.\n tdvals = array_to_timedelta64(arr).view("m8[ns]")\n\n tda = ensure_wrapped_if_datetimelike(tdvals)\n return tda.astype(dtype, copy=False)._ndarray\n\n if dtype.name in ("datetime64", "timedelta64"):\n msg = (\n f"The '{dtype.name}' dtype has no unit. Please pass in "\n f"'{dtype.name}[ns]' instead."\n )\n raise ValueError(msg)\n\n if copy or arr.dtype == object or dtype == object:\n # Explicit copy, or required since NumPy can't view from / to object.\n return arr.astype(dtype, copy=True)\n\n return arr.astype(dtype, copy=copy)\n\n\ndef _astype_float_to_int_nansafe(\n values: np.ndarray, dtype: np.dtype, copy: bool\n) -> np.ndarray:\n """\n astype with a check preventing converting NaN to an meaningless integer value.\n """\n if not np.isfinite(values).all():\n raise IntCastingNaNError(\n "Cannot convert non-finite values (NA or inf) to integer"\n )\n if dtype.kind == "u":\n # GH#45151\n if not (values >= 0).all():\n raise ValueError(f"Cannot losslessly cast from {values.dtype} to {dtype}")\n with warnings.catch_warnings():\n warnings.filterwarnings("ignore", category=RuntimeWarning)\n return values.astype(dtype, copy=copy)\n\n\ndef astype_array(values: ArrayLike, dtype: DtypeObj, copy: bool = False) -> ArrayLike:\n """\n Cast array (ndarray or ExtensionArray) to the new dtype.\n\n Parameters\n ----------\n values : ndarray or ExtensionArray\n dtype : dtype object\n copy : bool, default False\n copy if indicated\n\n Returns\n -------\n ndarray or ExtensionArray\n """\n if values.dtype == dtype:\n if copy:\n return values.copy()\n return values\n\n if not isinstance(values, np.ndarray):\n # i.e. ExtensionArray\n values = values.astype(dtype, copy=copy)\n\n else:\n values = _astype_nansafe(values, dtype, copy=copy)\n\n # in pandas we don't store numpy str dtypes, so convert to object\n if isinstance(dtype, np.dtype) and issubclass(values.dtype.type, str):\n values = np.array(values, dtype=object)\n\n return values\n\n\ndef astype_array_safe(\n values: ArrayLike, dtype, copy: bool = False, errors: IgnoreRaise = "raise"\n) -> ArrayLike:\n """\n Cast array (ndarray or ExtensionArray) to the new dtype.\n\n This basically is the implementation for DataFrame/Series.astype and\n includes all custom logic for pandas (NaN-safety, converting str to object,\n not allowing )\n\n Parameters\n ----------\n values : ndarray or ExtensionArray\n dtype : str, dtype convertible\n copy : bool, default False\n copy if indicated\n errors : str, {'raise', 'ignore'}, default 'raise'\n - ``raise`` : allow exceptions to be raised\n - ``ignore`` : suppress exceptions. On error return original object\n\n Returns\n -------\n ndarray or ExtensionArray\n """\n errors_legal_values = ("raise", "ignore")\n\n if errors not in errors_legal_values:\n invalid_arg = (\n "Expected value of kwarg 'errors' to be one of "\n f"{list(errors_legal_values)}. Supplied value is '{errors}'"\n )\n raise ValueError(invalid_arg)\n\n if inspect.isclass(dtype) and issubclass(dtype, ExtensionDtype):\n msg = (\n f"Expected an instance of {dtype.__name__}, "\n "but got the class instead. Try instantiating 'dtype'."\n )\n raise TypeError(msg)\n\n dtype = pandas_dtype(dtype)\n if isinstance(dtype, NumpyEADtype):\n # Ensure we don't end up with a NumpyExtensionArray\n dtype = dtype.numpy_dtype\n\n try:\n new_values = astype_array(values, dtype, copy=copy)\n except (ValueError, TypeError):\n # e.g. _astype_nansafe can fail on object-dtype of strings\n # trying to convert to float\n if errors == "ignore":\n new_values = values\n else:\n raise\n\n return new_values\n\n\ndef astype_is_view(dtype: DtypeObj, new_dtype: DtypeObj) -> bool:\n """Checks if astype avoided copying the data.\n\n Parameters\n ----------\n dtype : Original dtype\n new_dtype : target dtype\n\n Returns\n -------\n True if new data is a view or not guaranteed to be a copy, False otherwise\n """\n if isinstance(dtype, np.dtype) and not isinstance(new_dtype, np.dtype):\n new_dtype, dtype = dtype, new_dtype\n\n if dtype == new_dtype:\n return True\n\n elif isinstance(dtype, np.dtype) and isinstance(new_dtype, np.dtype):\n # Only equal numpy dtypes avoid a copy\n return False\n\n elif is_string_dtype(dtype) and is_string_dtype(new_dtype):\n # Potentially! a view when converting from object to string\n return True\n\n elif is_object_dtype(dtype) and new_dtype.kind == "O":\n # When the underlying array has dtype object, we don't have to make a copy\n return True\n\n elif dtype.kind in "mM" and new_dtype.kind in "mM":\n dtype = getattr(dtype, "numpy_dtype", dtype)\n new_dtype = getattr(new_dtype, "numpy_dtype", new_dtype)\n return getattr(dtype, "unit", None) == getattr(new_dtype, "unit", None)\n\n numpy_dtype = getattr(dtype, "numpy_dtype", None)\n new_numpy_dtype = getattr(new_dtype, "numpy_dtype", None)\n\n if numpy_dtype is None and isinstance(dtype, np.dtype):\n numpy_dtype = dtype\n\n if new_numpy_dtype is None and isinstance(new_dtype, np.dtype):\n new_numpy_dtype = new_dtype\n\n if numpy_dtype is not None and new_numpy_dtype is not None:\n # if both have NumPy dtype or one of them is a numpy dtype\n # they are only a view when the numpy dtypes are equal, e.g.\n # int64 -> Int64 or int64[pyarrow]\n # int64 -> Int32 copies\n return numpy_dtype == new_numpy_dtype\n\n # Assume this is a view since we don't know for sure if a copy was made\n return True\n
.venv\Lib\site-packages\pandas\core\dtypes\astype.py
astype.py
Python
9,207
0.95
0.152824
0.088608
node-utils
152
2024-04-23T00:22:30.457766
GPL-3.0
false
36081f4c53cc74799a876b403834051f
"""\nExtend pandas with custom array types.\n"""\nfrom __future__ import annotations\n\nfrom typing import (\n TYPE_CHECKING,\n Any,\n TypeVar,\n cast,\n overload,\n)\n\nimport numpy as np\n\nfrom pandas._libs import missing as libmissing\nfrom pandas._libs.hashtable import object_hash\nfrom pandas._libs.properties import cache_readonly\nfrom pandas.errors import AbstractMethodError\n\nfrom pandas.core.dtypes.generic import (\n ABCDataFrame,\n ABCIndex,\n ABCSeries,\n)\n\nif TYPE_CHECKING:\n from pandas._typing import (\n DtypeObj,\n Self,\n Shape,\n npt,\n type_t,\n )\n\n from pandas import Index\n from pandas.core.arrays import ExtensionArray\n\n # To parameterize on same ExtensionDtype\n ExtensionDtypeT = TypeVar("ExtensionDtypeT", bound="ExtensionDtype")\n\n\nclass ExtensionDtype:\n """\n A custom data type, to be paired with an ExtensionArray.\n\n See Also\n --------\n extensions.register_extension_dtype: Register an ExtensionType\n with pandas as class decorator.\n extensions.ExtensionArray: Abstract base class for custom 1-D array types.\n\n Notes\n -----\n The interface includes the following abstract methods that must\n be implemented by subclasses:\n\n * type\n * name\n * construct_array_type\n\n The following attributes and methods influence the behavior of the dtype in\n pandas operations\n\n * _is_numeric\n * _is_boolean\n * _get_common_dtype\n\n The `na_value` class attribute can be used to set the default NA value\n for this type. :attr:`numpy.nan` is used by default.\n\n ExtensionDtypes are required to be hashable. The base class provides\n a default implementation, which relies on the ``_metadata`` class\n attribute. ``_metadata`` should be a tuple containing the strings\n that define your data type. For example, with ``PeriodDtype`` that's\n the ``freq`` attribute.\n\n **If you have a parametrized dtype you should set the ``_metadata``\n class property**.\n\n Ideally, the attributes in ``_metadata`` will match the\n parameters to your ``ExtensionDtype.__init__`` (if any). If any of\n the attributes in ``_metadata`` don't implement the standard\n ``__eq__`` or ``__hash__``, the default implementations here will not\n work.\n\n Examples\n --------\n\n For interaction with Apache Arrow (pyarrow), a ``__from_arrow__`` method\n can be implemented: this method receives a pyarrow Array or ChunkedArray\n as only argument and is expected to return the appropriate pandas\n ExtensionArray for this dtype and the passed values:\n\n >>> import pyarrow\n >>> from pandas.api.extensions import ExtensionArray\n >>> class ExtensionDtype:\n ... def __from_arrow__(\n ... self,\n ... array: pyarrow.Array | pyarrow.ChunkedArray\n ... ) -> ExtensionArray:\n ... ...\n\n This class does not inherit from 'abc.ABCMeta' for performance reasons.\n Methods and properties required by the interface raise\n ``pandas.errors.AbstractMethodError`` and no ``register`` method is\n provided for registering virtual subclasses.\n """\n\n _metadata: tuple[str, ...] = ()\n\n def __str__(self) -> str:\n return self.name\n\n def __eq__(self, other: object) -> bool:\n """\n Check whether 'other' is equal to self.\n\n By default, 'other' is considered equal if either\n\n * it's a string matching 'self.name'.\n * it's an instance of this type and all of the attributes\n in ``self._metadata`` are equal between `self` and `other`.\n\n Parameters\n ----------\n other : Any\n\n Returns\n -------\n bool\n """\n if isinstance(other, str):\n try:\n other = self.construct_from_string(other)\n except TypeError:\n return False\n if isinstance(other, type(self)):\n return all(\n getattr(self, attr) == getattr(other, attr) for attr in self._metadata\n )\n return False\n\n def __hash__(self) -> int:\n # for python>=3.10, different nan objects have different hashes\n # we need to avoid that and thus use hash function with old behavior\n return object_hash(tuple(getattr(self, attr) for attr in self._metadata))\n\n def __ne__(self, other: object) -> bool:\n return not self.__eq__(other)\n\n @property\n def na_value(self) -> object:\n """\n Default NA value to use for this type.\n\n This is used in e.g. ExtensionArray.take. This should be the\n user-facing "boxed" version of the NA value, not the physical NA value\n for storage. e.g. for JSONArray, this is an empty dictionary.\n """\n return np.nan\n\n @property\n def type(self) -> type_t[Any]:\n """\n The scalar type for the array, e.g. ``int``\n\n It's expected ``ExtensionArray[item]`` returns an instance\n of ``ExtensionDtype.type`` for scalar ``item``, assuming\n that value is valid (not NA). NA values do not need to be\n instances of `type`.\n """\n raise AbstractMethodError(self)\n\n @property\n def kind(self) -> str:\n """\n A character code (one of 'biufcmMOSUV'), default 'O'\n\n This should match the NumPy dtype used when the array is\n converted to an ndarray, which is probably 'O' for object if\n the extension type cannot be represented as a built-in NumPy\n type.\n\n See Also\n --------\n numpy.dtype.kind\n """\n return "O"\n\n @property\n def name(self) -> str:\n """\n A string identifying the data type.\n\n Will be used for display in, e.g. ``Series.dtype``\n """\n raise AbstractMethodError(self)\n\n @property\n def names(self) -> list[str] | None:\n """\n Ordered list of field names, or None if there are no fields.\n\n This is for compatibility with NumPy arrays, and may be removed in the\n future.\n """\n return None\n\n @classmethod\n def construct_array_type(cls) -> type_t[ExtensionArray]:\n """\n Return the array type associated with this dtype.\n\n Returns\n -------\n type\n """\n raise AbstractMethodError(cls)\n\n def empty(self, shape: Shape) -> ExtensionArray:\n """\n Construct an ExtensionArray of this dtype with the given shape.\n\n Analogous to numpy.empty.\n\n Parameters\n ----------\n shape : int or tuple[int]\n\n Returns\n -------\n ExtensionArray\n """\n cls = self.construct_array_type()\n return cls._empty(shape, dtype=self)\n\n @classmethod\n def construct_from_string(cls, string: str) -> Self:\n r"""\n Construct this type from a string.\n\n This is useful mainly for data types that accept parameters.\n For example, a period dtype accepts a frequency parameter that\n can be set as ``period[h]`` (where H means hourly frequency).\n\n By default, in the abstract class, just the name of the type is\n expected. But subclasses can overwrite this method to accept\n parameters.\n\n Parameters\n ----------\n string : str\n The name of the type, for example ``category``.\n\n Returns\n -------\n ExtensionDtype\n Instance of the dtype.\n\n Raises\n ------\n TypeError\n If a class cannot be constructed from this 'string'.\n\n Examples\n --------\n For extension dtypes with arguments the following may be an\n adequate implementation.\n\n >>> import re\n >>> @classmethod\n ... def construct_from_string(cls, string):\n ... pattern = re.compile(r"^my_type\[(?P<arg_name>.+)\]$")\n ... match = pattern.match(string)\n ... if match:\n ... return cls(**match.groupdict())\n ... else:\n ... raise TypeError(\n ... f"Cannot construct a '{cls.__name__}' from '{string}'"\n ... )\n """\n if not isinstance(string, str):\n raise TypeError(\n f"'construct_from_string' expects a string, got {type(string)}"\n )\n # error: Non-overlapping equality check (left operand type: "str", right\n # operand type: "Callable[[ExtensionDtype], str]") [comparison-overlap]\n assert isinstance(cls.name, str), (cls, type(cls.name))\n if string != cls.name:\n raise TypeError(f"Cannot construct a '{cls.__name__}' from '{string}'")\n return cls()\n\n @classmethod\n def is_dtype(cls, dtype: object) -> bool:\n """\n Check if we match 'dtype'.\n\n Parameters\n ----------\n dtype : object\n The object to check.\n\n Returns\n -------\n bool\n\n Notes\n -----\n The default implementation is True if\n\n 1. ``cls.construct_from_string(dtype)`` is an instance\n of ``cls``.\n 2. ``dtype`` is an object and is an instance of ``cls``\n 3. ``dtype`` has a ``dtype`` attribute, and any of the above\n conditions is true for ``dtype.dtype``.\n """\n dtype = getattr(dtype, "dtype", dtype)\n\n if isinstance(dtype, (ABCSeries, ABCIndex, ABCDataFrame, np.dtype)):\n # https://github.com/pandas-dev/pandas/issues/22960\n # avoid passing data to `construct_from_string`. This could\n # cause a FutureWarning from numpy about failing elementwise\n # comparison from, e.g., comparing DataFrame == 'category'.\n return False\n elif dtype is None:\n return False\n elif isinstance(dtype, cls):\n return True\n if isinstance(dtype, str):\n try:\n return cls.construct_from_string(dtype) is not None\n except TypeError:\n return False\n return False\n\n @property\n def _is_numeric(self) -> bool:\n """\n Whether columns with this dtype should be considered numeric.\n\n By default ExtensionDtypes are assumed to be non-numeric.\n They'll be excluded from operations that exclude non-numeric\n columns, like (groupby) reductions, plotting, etc.\n """\n return False\n\n @property\n def _is_boolean(self) -> bool:\n """\n Whether this dtype should be considered boolean.\n\n By default, ExtensionDtypes are assumed to be non-numeric.\n Setting this to True will affect the behavior of several places,\n e.g.\n\n * is_bool\n * boolean indexing\n\n Returns\n -------\n bool\n """\n return False\n\n def _get_common_dtype(self, dtypes: list[DtypeObj]) -> DtypeObj | None:\n """\n Return the common dtype, if one exists.\n\n Used in `find_common_type` implementation. This is for example used\n to determine the resulting dtype in a concat operation.\n\n If no common dtype exists, return None (which gives the other dtypes\n the chance to determine a common dtype). If all dtypes in the list\n return None, then the common dtype will be "object" dtype (this means\n it is never needed to return "object" dtype from this method itself).\n\n Parameters\n ----------\n dtypes : list of dtypes\n The dtypes for which to determine a common dtype. This is a list\n of np.dtype or ExtensionDtype instances.\n\n Returns\n -------\n Common dtype (np.dtype or ExtensionDtype) or None\n """\n if len(set(dtypes)) == 1:\n # only itself\n return self\n else:\n return None\n\n @property\n def _can_hold_na(self) -> bool:\n """\n Can arrays of this dtype hold NA values?\n """\n return True\n\n @property\n def _is_immutable(self) -> bool:\n """\n Can arrays with this dtype be modified with __setitem__? If not, return\n True.\n\n Immutable arrays are expected to raise TypeError on __setitem__ calls.\n """\n return False\n\n @cache_readonly\n def index_class(self) -> type_t[Index]:\n """\n The Index subclass to return from Index.__new__ when this dtype is\n encountered.\n """\n from pandas import Index\n\n return Index\n\n @property\n def _supports_2d(self) -> bool:\n """\n Do ExtensionArrays with this dtype support 2D arrays?\n\n Historically ExtensionArrays were limited to 1D. By returning True here,\n authors can indicate that their arrays support 2D instances. This can\n improve performance in some cases, particularly operations with `axis=1`.\n\n Arrays that support 2D values should:\n\n - implement Array.reshape\n - subclass the Dim2CompatTests in tests.extension.base\n - _concat_same_type should support `axis` keyword\n - _reduce and reductions should support `axis` keyword\n """\n return False\n\n @property\n def _can_fast_transpose(self) -> bool:\n """\n Is transposing an array with this dtype zero-copy?\n\n Only relevant for cases where _supports_2d is True.\n """\n return False\n\n\nclass StorageExtensionDtype(ExtensionDtype):\n """ExtensionDtype that may be backed by more than one implementation."""\n\n name: str\n _metadata = ("storage",)\n\n def __init__(self, storage: str | None = None) -> None:\n self.storage = storage\n\n def __repr__(self) -> str:\n return f"{self.name}[{self.storage}]"\n\n def __str__(self) -> str:\n return self.name\n\n def __eq__(self, other: object) -> bool:\n if isinstance(other, str) and other == self.name:\n return True\n return super().__eq__(other)\n\n def __hash__(self) -> int:\n # custom __eq__ so have to override __hash__\n return super().__hash__()\n\n @property\n def na_value(self) -> libmissing.NAType:\n return libmissing.NA\n\n\ndef register_extension_dtype(cls: type_t[ExtensionDtypeT]) -> type_t[ExtensionDtypeT]:\n """\n Register an ExtensionType with pandas as class decorator.\n\n This enables operations like ``.astype(name)`` for the name\n of the ExtensionDtype.\n\n Returns\n -------\n callable\n A class decorator.\n\n Examples\n --------\n >>> from pandas.api.extensions import register_extension_dtype, ExtensionDtype\n >>> @register_extension_dtype\n ... class MyExtensionDtype(ExtensionDtype):\n ... name = "myextension"\n """\n _registry.register(cls)\n return cls\n\n\nclass Registry:\n """\n Registry for dtype inference.\n\n The registry allows one to map a string repr of a extension\n dtype to an extension dtype. The string alias can be used in several\n places, including\n\n * Series and Index constructors\n * :meth:`pandas.array`\n * :meth:`pandas.Series.astype`\n\n Multiple extension types can be registered.\n These are tried in order.\n """\n\n def __init__(self) -> None:\n self.dtypes: list[type_t[ExtensionDtype]] = []\n\n def register(self, dtype: type_t[ExtensionDtype]) -> None:\n """\n Parameters\n ----------\n dtype : ExtensionDtype class\n """\n if not issubclass(dtype, ExtensionDtype):\n raise ValueError("can only register pandas extension dtypes")\n\n self.dtypes.append(dtype)\n\n @overload\n def find(self, dtype: type_t[ExtensionDtypeT]) -> type_t[ExtensionDtypeT]:\n ...\n\n @overload\n def find(self, dtype: ExtensionDtypeT) -> ExtensionDtypeT:\n ...\n\n @overload\n def find(self, dtype: str) -> ExtensionDtype | None:\n ...\n\n @overload\n def find(\n self, dtype: npt.DTypeLike\n ) -> type_t[ExtensionDtype] | ExtensionDtype | None:\n ...\n\n def find(\n self, dtype: type_t[ExtensionDtype] | ExtensionDtype | npt.DTypeLike\n ) -> type_t[ExtensionDtype] | ExtensionDtype | None:\n """\n Parameters\n ----------\n dtype : ExtensionDtype class or instance or str or numpy dtype or python type\n\n Returns\n -------\n return the first matching dtype, otherwise return None\n """\n if not isinstance(dtype, str):\n dtype_type: type_t\n if not isinstance(dtype, type):\n dtype_type = type(dtype)\n else:\n dtype_type = dtype\n if issubclass(dtype_type, ExtensionDtype):\n # cast needed here as mypy doesn't know we have figured\n # out it is an ExtensionDtype or type_t[ExtensionDtype]\n return cast("ExtensionDtype | type_t[ExtensionDtype]", dtype)\n\n return None\n\n for dtype_type in self.dtypes:\n try:\n return dtype_type.construct_from_string(dtype)\n except TypeError:\n pass\n\n return None\n\n\n_registry = Registry()\n
.venv\Lib\site-packages\pandas\core\dtypes\base.py
base.py
Python
17,042
0.95
0.180103
0.05794
react-lib
844
2025-01-20T16:10:30.241537
MIT
false
e170497f50a2e420499b1e1ef0d2f76e
"""\nRoutines for casting.\n"""\n\nfrom __future__ import annotations\n\nimport datetime as dt\nimport functools\nfrom typing import (\n TYPE_CHECKING,\n Any,\n Literal,\n TypeVar,\n cast,\n overload,\n)\nimport warnings\n\nimport numpy as np\n\nfrom pandas._config import using_string_dtype\n\nfrom pandas._libs import (\n Interval,\n Period,\n lib,\n)\nfrom pandas._libs.missing import (\n NA,\n NAType,\n checknull,\n)\nfrom pandas._libs.tslibs import (\n NaT,\n OutOfBoundsDatetime,\n OutOfBoundsTimedelta,\n Timedelta,\n Timestamp,\n is_supported_dtype,\n)\nfrom pandas._libs.tslibs.timedeltas import array_to_timedelta64\nfrom pandas.errors import (\n IntCastingNaNError,\n LossySetitemError,\n)\n\nfrom pandas.core.dtypes.common import (\n ensure_int8,\n ensure_int16,\n ensure_int32,\n ensure_int64,\n ensure_object,\n ensure_str,\n is_bool,\n is_complex,\n is_float,\n is_integer,\n is_object_dtype,\n is_scalar,\n is_string_dtype,\n pandas_dtype as pandas_dtype_func,\n)\nfrom pandas.core.dtypes.dtypes import (\n ArrowDtype,\n BaseMaskedDtype,\n CategoricalDtype,\n DatetimeTZDtype,\n ExtensionDtype,\n IntervalDtype,\n PandasExtensionDtype,\n PeriodDtype,\n)\nfrom pandas.core.dtypes.generic import (\n ABCExtensionArray,\n ABCIndex,\n ABCSeries,\n)\nfrom pandas.core.dtypes.inference import is_list_like\nfrom pandas.core.dtypes.missing import (\n is_valid_na_for_dtype,\n isna,\n na_value_for_dtype,\n notna,\n)\n\nfrom pandas.io._util import _arrow_dtype_mapping\n\nif TYPE_CHECKING:\n from collections.abc import (\n Collection,\n Sequence,\n )\n\n from pandas._typing import (\n ArrayLike,\n Dtype,\n DtypeObj,\n NumpyIndexT,\n Scalar,\n npt,\n )\n\n from pandas import Index\n from pandas.core.arrays import (\n Categorical,\n DatetimeArray,\n ExtensionArray,\n IntervalArray,\n PeriodArray,\n TimedeltaArray,\n )\n\n\n_int8_max = np.iinfo(np.int8).max\n_int16_max = np.iinfo(np.int16).max\n_int32_max = np.iinfo(np.int32).max\n\n_dtype_obj = np.dtype(object)\n\nNumpyArrayT = TypeVar("NumpyArrayT", bound=np.ndarray)\n\n\ndef maybe_convert_platform(\n values: list | tuple | range | np.ndarray | ExtensionArray,\n) -> ArrayLike:\n """try to do platform conversion, allow ndarray or list here"""\n arr: ArrayLike\n\n if isinstance(values, (list, tuple, range)):\n arr = construct_1d_object_array_from_listlike(values)\n else:\n # The caller is responsible for ensuring that we have np.ndarray\n # or ExtensionArray here.\n arr = values\n\n if arr.dtype == _dtype_obj:\n arr = cast(np.ndarray, arr)\n arr = lib.maybe_convert_objects(arr)\n\n return arr\n\n\ndef is_nested_object(obj) -> bool:\n """\n return a boolean if we have a nested object, e.g. a Series with 1 or\n more Series elements\n\n This may not be necessarily be performant.\n\n """\n return bool(\n isinstance(obj, ABCSeries)\n and is_object_dtype(obj.dtype)\n and any(isinstance(v, ABCSeries) for v in obj._values)\n )\n\n\ndef maybe_box_datetimelike(value: Scalar, dtype: Dtype | None = None) -> Scalar:\n """\n Cast scalar to Timestamp or Timedelta if scalar is datetime-like\n and dtype is not object.\n\n Parameters\n ----------\n value : scalar\n dtype : Dtype, optional\n\n Returns\n -------\n scalar\n """\n if dtype == _dtype_obj:\n pass\n elif isinstance(value, (np.datetime64, dt.datetime)):\n value = Timestamp(value)\n elif isinstance(value, (np.timedelta64, dt.timedelta)):\n value = Timedelta(value)\n\n return value\n\n\ndef maybe_box_native(value: Scalar | None | NAType) -> Scalar | None | NAType:\n """\n If passed a scalar cast the scalar to a python native type.\n\n Parameters\n ----------\n value : scalar or Series\n\n Returns\n -------\n scalar or Series\n """\n if is_float(value):\n value = float(value)\n elif is_integer(value):\n value = int(value)\n elif is_bool(value):\n value = bool(value)\n elif isinstance(value, (np.datetime64, np.timedelta64)):\n value = maybe_box_datetimelike(value)\n elif value is NA:\n value = None\n return value\n\n\ndef _maybe_unbox_datetimelike(value: Scalar, dtype: DtypeObj) -> Scalar:\n """\n Convert a Timedelta or Timestamp to timedelta64 or datetime64 for setting\n into a numpy array. Failing to unbox would risk dropping nanoseconds.\n\n Notes\n -----\n Caller is responsible for checking dtype.kind in "mM"\n """\n if is_valid_na_for_dtype(value, dtype):\n # GH#36541: can't fill array directly with pd.NaT\n # > np.empty(10, dtype="datetime64[ns]").fill(pd.NaT)\n # ValueError: cannot convert float NaN to integer\n value = dtype.type("NaT", "ns")\n elif isinstance(value, Timestamp):\n if value.tz is None:\n value = value.to_datetime64()\n elif not isinstance(dtype, DatetimeTZDtype):\n raise TypeError("Cannot unbox tzaware Timestamp to tznaive dtype")\n elif isinstance(value, Timedelta):\n value = value.to_timedelta64()\n\n _disallow_mismatched_datetimelike(value, dtype)\n return value\n\n\ndef _disallow_mismatched_datetimelike(value, dtype: DtypeObj):\n """\n numpy allows np.array(dt64values, dtype="timedelta64[ns]") and\n vice-versa, but we do not want to allow this, so we need to\n check explicitly\n """\n vdtype = getattr(value, "dtype", None)\n if vdtype is None:\n return\n elif (vdtype.kind == "m" and dtype.kind == "M") or (\n vdtype.kind == "M" and dtype.kind == "m"\n ):\n raise TypeError(f"Cannot cast {repr(value)} to {dtype}")\n\n\n@overload\ndef maybe_downcast_to_dtype(result: np.ndarray, dtype: str | np.dtype) -> np.ndarray:\n ...\n\n\n@overload\ndef maybe_downcast_to_dtype(result: ExtensionArray, dtype: str | np.dtype) -> ArrayLike:\n ...\n\n\ndef maybe_downcast_to_dtype(result: ArrayLike, dtype: str | np.dtype) -> ArrayLike:\n """\n try to cast to the specified dtype (e.g. convert back to bool/int\n or could be an astype of float64->float32\n """\n if isinstance(result, ABCSeries):\n result = result._values\n do_round = False\n\n if isinstance(dtype, str):\n if dtype == "infer":\n inferred_type = lib.infer_dtype(result, skipna=False)\n if inferred_type == "boolean":\n dtype = "bool"\n elif inferred_type == "integer":\n dtype = "int64"\n elif inferred_type == "datetime64":\n dtype = "datetime64[ns]"\n elif inferred_type in ["timedelta", "timedelta64"]:\n dtype = "timedelta64[ns]"\n\n # try to upcast here\n elif inferred_type == "floating":\n dtype = "int64"\n if issubclass(result.dtype.type, np.number):\n do_round = True\n\n else:\n # TODO: complex? what if result is already non-object?\n dtype = "object"\n\n dtype = np.dtype(dtype)\n\n if not isinstance(dtype, np.dtype):\n # enforce our signature annotation\n raise TypeError(dtype) # pragma: no cover\n\n converted = maybe_downcast_numeric(result, dtype, do_round)\n if converted is not result:\n return converted\n\n # a datetimelike\n # GH12821, iNaT is cast to float\n if dtype.kind in "mM" and result.dtype.kind in "if":\n result = result.astype(dtype)\n\n elif dtype.kind == "m" and result.dtype == _dtype_obj:\n # test_where_downcast_to_td64\n result = cast(np.ndarray, result)\n result = array_to_timedelta64(result)\n\n elif dtype == np.dtype("M8[ns]") and result.dtype == _dtype_obj:\n result = cast(np.ndarray, result)\n return np.asarray(maybe_cast_to_datetime(result, dtype=dtype))\n\n return result\n\n\n@overload\ndef maybe_downcast_numeric(\n result: np.ndarray, dtype: np.dtype, do_round: bool = False\n) -> np.ndarray:\n ...\n\n\n@overload\ndef maybe_downcast_numeric(\n result: ExtensionArray, dtype: DtypeObj, do_round: bool = False\n) -> ArrayLike:\n ...\n\n\ndef maybe_downcast_numeric(\n result: ArrayLike, dtype: DtypeObj, do_round: bool = False\n) -> ArrayLike:\n """\n Subset of maybe_downcast_to_dtype restricted to numeric dtypes.\n\n Parameters\n ----------\n result : ndarray or ExtensionArray\n dtype : np.dtype or ExtensionDtype\n do_round : bool\n\n Returns\n -------\n ndarray or ExtensionArray\n """\n if not isinstance(dtype, np.dtype) or not isinstance(result.dtype, np.dtype):\n # e.g. SparseDtype has no itemsize attr\n return result\n\n def trans(x):\n if do_round:\n return x.round()\n return x\n\n if dtype.kind == result.dtype.kind:\n # don't allow upcasts here (except if empty)\n if result.dtype.itemsize <= dtype.itemsize and result.size:\n return result\n\n if dtype.kind in "biu":\n if not result.size:\n # if we don't have any elements, just astype it\n return trans(result).astype(dtype)\n\n if isinstance(result, np.ndarray):\n element = result.item(0)\n else:\n element = result.iloc[0]\n if not isinstance(element, (np.integer, np.floating, int, float, bool)):\n # a comparable, e.g. a Decimal may slip in here\n return result\n\n if (\n issubclass(result.dtype.type, (np.object_, np.number))\n and notna(result).all()\n ):\n new_result = trans(result).astype(dtype)\n if new_result.dtype.kind == "O" or result.dtype.kind == "O":\n # np.allclose may raise TypeError on object-dtype\n if (new_result == result).all():\n return new_result\n else:\n if np.allclose(new_result, result, rtol=0):\n return new_result\n\n elif (\n issubclass(dtype.type, np.floating)\n and result.dtype.kind != "b"\n and not is_string_dtype(result.dtype)\n ):\n with warnings.catch_warnings():\n warnings.filterwarnings(\n "ignore", "overflow encountered in cast", RuntimeWarning\n )\n new_result = result.astype(dtype)\n\n # Adjust tolerances based on floating point size\n size_tols = {4: 5e-4, 8: 5e-8, 16: 5e-16}\n\n atol = size_tols.get(new_result.dtype.itemsize, 0.0)\n\n # Check downcast float values are still equal within 7 digits when\n # converting from float64 to float32\n if np.allclose(new_result, result, equal_nan=True, rtol=0.0, atol=atol):\n return new_result\n\n elif dtype.kind == result.dtype.kind == "c":\n new_result = result.astype(dtype)\n\n if np.array_equal(new_result, result, equal_nan=True):\n # TODO: use tolerance like we do for float?\n return new_result\n\n return result\n\n\ndef maybe_upcast_numeric_to_64bit(arr: NumpyIndexT) -> NumpyIndexT:\n """\n If array is a int/uint/float bit size lower than 64 bit, upcast it to 64 bit.\n\n Parameters\n ----------\n arr : ndarray or ExtensionArray\n\n Returns\n -------\n ndarray or ExtensionArray\n """\n dtype = arr.dtype\n if dtype.kind == "i" and dtype != np.int64:\n return arr.astype(np.int64)\n elif dtype.kind == "u" and dtype != np.uint64:\n return arr.astype(np.uint64)\n elif dtype.kind == "f" and dtype != np.float64:\n return arr.astype(np.float64)\n else:\n return arr\n\n\ndef maybe_cast_pointwise_result(\n result: ArrayLike,\n dtype: DtypeObj,\n numeric_only: bool = False,\n same_dtype: bool = True,\n) -> ArrayLike:\n """\n Try casting result of a pointwise operation back to the original dtype if\n appropriate.\n\n Parameters\n ----------\n result : array-like\n Result to cast.\n dtype : np.dtype or ExtensionDtype\n Input Series from which result was calculated.\n numeric_only : bool, default False\n Whether to cast only numerics or datetimes as well.\n same_dtype : bool, default True\n Specify dtype when calling _from_sequence\n\n Returns\n -------\n result : array-like\n result maybe casted to the dtype.\n """\n\n if isinstance(dtype, ExtensionDtype):\n cls = dtype.construct_array_type()\n if same_dtype:\n result = _maybe_cast_to_extension_array(cls, result, dtype=dtype)\n else:\n result = _maybe_cast_to_extension_array(cls, result)\n\n elif (numeric_only and dtype.kind in "iufcb") or not numeric_only:\n result = maybe_downcast_to_dtype(result, dtype)\n\n return result\n\n\ndef _maybe_cast_to_extension_array(\n cls: type[ExtensionArray], obj: ArrayLike, dtype: ExtensionDtype | None = None\n) -> ArrayLike:\n """\n Call to `_from_sequence` that returns the object unchanged on Exception.\n\n Parameters\n ----------\n cls : class, subclass of ExtensionArray\n obj : arraylike\n Values to pass to cls._from_sequence\n dtype : ExtensionDtype, optional\n\n Returns\n -------\n ExtensionArray or obj\n """\n result: ArrayLike\n\n if dtype is not None:\n try:\n result = cls._from_scalars(obj, dtype=dtype)\n except (TypeError, ValueError):\n return obj\n return result\n\n try:\n result = cls._from_sequence(obj, dtype=dtype)\n except Exception:\n # We can't predict what downstream EA constructors may raise\n result = obj\n return result\n\n\n@overload\ndef ensure_dtype_can_hold_na(dtype: np.dtype) -> np.dtype:\n ...\n\n\n@overload\ndef ensure_dtype_can_hold_na(dtype: ExtensionDtype) -> ExtensionDtype:\n ...\n\n\ndef ensure_dtype_can_hold_na(dtype: DtypeObj) -> DtypeObj:\n """\n If we have a dtype that cannot hold NA values, find the best match that can.\n """\n if isinstance(dtype, ExtensionDtype):\n if dtype._can_hold_na:\n return dtype\n elif isinstance(dtype, IntervalDtype):\n # TODO(GH#45349): don't special-case IntervalDtype, allow\n # overriding instead of returning object below.\n return IntervalDtype(np.float64, closed=dtype.closed)\n return _dtype_obj\n elif dtype.kind == "b":\n return _dtype_obj\n elif dtype.kind in "iu":\n return np.dtype(np.float64)\n return dtype\n\n\n_canonical_nans = {\n np.datetime64: np.datetime64("NaT", "ns"),\n np.timedelta64: np.timedelta64("NaT", "ns"),\n type(np.nan): np.nan,\n}\n\n\ndef maybe_promote(dtype: np.dtype, fill_value=np.nan):\n """\n Find the minimal dtype that can hold both the given dtype and fill_value.\n\n Parameters\n ----------\n dtype : np.dtype\n fill_value : scalar, default np.nan\n\n Returns\n -------\n dtype\n Upcasted from dtype argument if necessary.\n fill_value\n Upcasted from fill_value argument if necessary.\n\n Raises\n ------\n ValueError\n If fill_value is a non-scalar and dtype is not object.\n """\n orig = fill_value\n orig_is_nat = False\n if checknull(fill_value):\n # https://github.com/pandas-dev/pandas/pull/39692#issuecomment-1441051740\n # avoid cache misses with NaN/NaT values that are not singletons\n if fill_value is not NA:\n try:\n orig_is_nat = np.isnat(fill_value)\n except TypeError:\n pass\n\n fill_value = _canonical_nans.get(type(fill_value), fill_value)\n\n # for performance, we are using a cached version of the actual implementation\n # of the function in _maybe_promote. However, this doesn't always work (in case\n # of non-hashable arguments), so we fallback to the actual implementation if needed\n try:\n # error: Argument 3 to "__call__" of "_lru_cache_wrapper" has incompatible type\n # "Type[Any]"; expected "Hashable" [arg-type]\n dtype, fill_value = _maybe_promote_cached(\n dtype, fill_value, type(fill_value) # type: ignore[arg-type]\n )\n except TypeError:\n # if fill_value is not hashable (required for caching)\n dtype, fill_value = _maybe_promote(dtype, fill_value)\n\n if (dtype == _dtype_obj and orig is not None) or (\n orig_is_nat and np.datetime_data(orig)[0] != "ns"\n ):\n # GH#51592,53497 restore our potentially non-canonical fill_value\n fill_value = orig\n return dtype, fill_value\n\n\n@functools.lru_cache\ndef _maybe_promote_cached(dtype, fill_value, fill_value_type):\n # The cached version of _maybe_promote below\n # This also use fill_value_type as (unused) argument to use this in the\n # cache lookup -> to differentiate 1 and True\n return _maybe_promote(dtype, fill_value)\n\n\ndef _maybe_promote(dtype: np.dtype, fill_value=np.nan):\n # The actual implementation of the function, use `maybe_promote` above for\n # a cached version.\n if not is_scalar(fill_value):\n # with object dtype there is nothing to promote, and the user can\n # pass pretty much any weird fill_value they like\n if dtype != object:\n # with object dtype there is nothing to promote, and the user can\n # pass pretty much any weird fill_value they like\n raise ValueError("fill_value must be a scalar")\n dtype = _dtype_obj\n return dtype, fill_value\n\n if is_valid_na_for_dtype(fill_value, dtype) and dtype.kind in "iufcmM":\n dtype = ensure_dtype_can_hold_na(dtype)\n fv = na_value_for_dtype(dtype)\n return dtype, fv\n\n elif isinstance(dtype, CategoricalDtype):\n if fill_value in dtype.categories or isna(fill_value):\n return dtype, fill_value\n else:\n return object, ensure_object(fill_value)\n\n elif isna(fill_value):\n dtype = _dtype_obj\n if fill_value is None:\n # but we retain e.g. pd.NA\n fill_value = np.nan\n return dtype, fill_value\n\n # returns tuple of (dtype, fill_value)\n if issubclass(dtype.type, np.datetime64):\n inferred, fv = infer_dtype_from_scalar(fill_value)\n if inferred == dtype:\n return dtype, fv\n\n from pandas.core.arrays import DatetimeArray\n\n dta = DatetimeArray._from_sequence([], dtype="M8[ns]")\n try:\n fv = dta._validate_setitem_value(fill_value)\n return dta.dtype, fv\n except (ValueError, TypeError):\n return _dtype_obj, fill_value\n\n elif issubclass(dtype.type, np.timedelta64):\n inferred, fv = infer_dtype_from_scalar(fill_value)\n if inferred == dtype:\n return dtype, fv\n\n elif inferred.kind == "m":\n # different unit, e.g. passed np.timedelta64(24, "h") with dtype=m8[ns]\n # see if we can losslessly cast it to our dtype\n unit = np.datetime_data(dtype)[0]\n try:\n td = Timedelta(fill_value).as_unit(unit, round_ok=False)\n except OutOfBoundsTimedelta:\n return _dtype_obj, fill_value\n else:\n return dtype, td.asm8\n\n return _dtype_obj, fill_value\n\n elif is_float(fill_value):\n if issubclass(dtype.type, np.bool_):\n dtype = np.dtype(np.object_)\n\n elif issubclass(dtype.type, np.integer):\n dtype = np.dtype(np.float64)\n\n elif dtype.kind == "f":\n mst = np.min_scalar_type(fill_value)\n if mst > dtype:\n # e.g. mst is np.float64 and dtype is np.float32\n dtype = mst\n\n elif dtype.kind == "c":\n mst = np.min_scalar_type(fill_value)\n dtype = np.promote_types(dtype, mst)\n\n elif is_bool(fill_value):\n if not issubclass(dtype.type, np.bool_):\n dtype = np.dtype(np.object_)\n\n elif is_integer(fill_value):\n if issubclass(dtype.type, np.bool_):\n dtype = np.dtype(np.object_)\n\n elif issubclass(dtype.type, np.integer):\n if not np_can_cast_scalar(fill_value, dtype): # type: ignore[arg-type]\n # upcast to prevent overflow\n mst = np.min_scalar_type(fill_value)\n dtype = np.promote_types(dtype, mst)\n if dtype.kind == "f":\n # Case where we disagree with numpy\n dtype = np.dtype(np.object_)\n\n elif is_complex(fill_value):\n if issubclass(dtype.type, np.bool_):\n dtype = np.dtype(np.object_)\n\n elif issubclass(dtype.type, (np.integer, np.floating)):\n mst = np.min_scalar_type(fill_value)\n dtype = np.promote_types(dtype, mst)\n\n elif dtype.kind == "c":\n mst = np.min_scalar_type(fill_value)\n if mst > dtype:\n # e.g. mst is np.complex128 and dtype is np.complex64\n dtype = mst\n\n else:\n dtype = np.dtype(np.object_)\n\n # in case we have a string that looked like a number\n if issubclass(dtype.type, (bytes, str)):\n dtype = np.dtype(np.object_)\n\n fill_value = _ensure_dtype_type(fill_value, dtype)\n return dtype, fill_value\n\n\ndef _ensure_dtype_type(value, dtype: np.dtype):\n """\n Ensure that the given value is an instance of the given dtype.\n\n e.g. if out dtype is np.complex64_, we should have an instance of that\n as opposed to a python complex object.\n\n Parameters\n ----------\n value : object\n dtype : np.dtype\n\n Returns\n -------\n object\n """\n # Start with exceptions in which we do _not_ cast to numpy types\n\n if dtype == _dtype_obj:\n return value\n\n # Note: before we get here we have already excluded isna(value)\n return dtype.type(value)\n\n\ndef infer_dtype_from(val) -> tuple[DtypeObj, Any]:\n """\n Interpret the dtype from a scalar or array.\n\n Parameters\n ----------\n val : object\n """\n if not is_list_like(val):\n return infer_dtype_from_scalar(val)\n return infer_dtype_from_array(val)\n\n\ndef infer_dtype_from_scalar(val) -> tuple[DtypeObj, Any]:\n """\n Interpret the dtype from a scalar.\n\n Parameters\n ----------\n val : object\n """\n dtype: DtypeObj = _dtype_obj\n\n # a 1-element ndarray\n if isinstance(val, np.ndarray):\n if val.ndim != 0:\n msg = "invalid ndarray passed to infer_dtype_from_scalar"\n raise ValueError(msg)\n\n dtype = val.dtype\n val = lib.item_from_zerodim(val)\n\n elif isinstance(val, str):\n # If we create an empty array using a string to infer\n # the dtype, NumPy will only allocate one character per entry\n # so this is kind of bad. Alternately we could use np.repeat\n # instead of np.empty (but then you still don't want things\n # coming out as np.str_!\n\n dtype = _dtype_obj\n if using_string_dtype():\n from pandas.core.arrays.string_ import StringDtype\n\n dtype = StringDtype(na_value=np.nan)\n\n elif isinstance(val, (np.datetime64, dt.datetime)):\n try:\n val = Timestamp(val)\n except OutOfBoundsDatetime:\n return _dtype_obj, val\n\n if val is NaT or val.tz is None:\n val = val.to_datetime64()\n dtype = val.dtype\n # TODO: test with datetime(2920, 10, 1) based on test_replace_dtypes\n else:\n dtype = DatetimeTZDtype(unit=val.unit, tz=val.tz)\n\n elif isinstance(val, (np.timedelta64, dt.timedelta)):\n try:\n val = Timedelta(val)\n except (OutOfBoundsTimedelta, OverflowError):\n dtype = _dtype_obj\n else:\n if val is NaT:\n val = np.timedelta64("NaT", "ns")\n else:\n val = val.asm8\n dtype = val.dtype\n\n elif is_bool(val):\n dtype = np.dtype(np.bool_)\n\n elif is_integer(val):\n if isinstance(val, np.integer):\n dtype = np.dtype(type(val))\n else:\n dtype = np.dtype(np.int64)\n\n try:\n np.array(val, dtype=dtype)\n except OverflowError:\n dtype = np.array(val).dtype\n\n elif is_float(val):\n if isinstance(val, np.floating):\n dtype = np.dtype(type(val))\n else:\n dtype = np.dtype(np.float64)\n\n elif is_complex(val):\n dtype = np.dtype(np.complex128)\n\n if isinstance(val, Period):\n dtype = PeriodDtype(freq=val.freq)\n elif isinstance(val, Interval):\n subtype = infer_dtype_from_scalar(val.left)[0]\n dtype = IntervalDtype(subtype=subtype, closed=val.closed)\n\n return dtype, val\n\n\ndef dict_compat(d: dict[Scalar, Scalar]) -> dict[Scalar, Scalar]:\n """\n Convert datetimelike-keyed dicts to a Timestamp-keyed dict.\n\n Parameters\n ----------\n d: dict-like object\n\n Returns\n -------\n dict\n """\n return {maybe_box_datetimelike(key): value for key, value in d.items()}\n\n\ndef infer_dtype_from_array(arr) -> tuple[DtypeObj, ArrayLike]:\n """\n Infer the dtype from an array.\n\n Parameters\n ----------\n arr : array\n\n Returns\n -------\n tuple (pandas-compat dtype, array)\n\n\n Examples\n --------\n >>> np.asarray([1, '1'])\n array(['1', '1'], dtype='<U21')\n\n >>> infer_dtype_from_array([1, '1'])\n (dtype('O'), [1, '1'])\n """\n if isinstance(arr, np.ndarray):\n return arr.dtype, arr\n\n if not is_list_like(arr):\n raise TypeError("'arr' must be list-like")\n\n arr_dtype = getattr(arr, "dtype", None)\n if isinstance(arr_dtype, ExtensionDtype):\n return arr.dtype, arr\n\n elif isinstance(arr, ABCSeries):\n return arr.dtype, np.asarray(arr)\n\n # don't force numpy coerce with nan's\n inferred = lib.infer_dtype(arr, skipna=False)\n if inferred in ["string", "bytes", "mixed", "mixed-integer"]:\n return (np.dtype(np.object_), arr)\n\n arr = np.asarray(arr)\n return arr.dtype, arr\n\n\ndef _maybe_infer_dtype_type(element):\n """\n Try to infer an object's dtype, for use in arithmetic ops.\n\n Uses `element.dtype` if that's available.\n Objects implementing the iterator protocol are cast to a NumPy array,\n and from there the array's type is used.\n\n Parameters\n ----------\n element : object\n Possibly has a `.dtype` attribute, and possibly the iterator\n protocol.\n\n Returns\n -------\n tipo : type\n\n Examples\n --------\n >>> from collections import namedtuple\n >>> Foo = namedtuple("Foo", "dtype")\n >>> _maybe_infer_dtype_type(Foo(np.dtype("i8")))\n dtype('int64')\n """\n tipo = None\n if hasattr(element, "dtype"):\n tipo = element.dtype\n elif is_list_like(element):\n element = np.asarray(element)\n tipo = element.dtype\n return tipo\n\n\ndef invalidate_string_dtypes(dtype_set: set[DtypeObj]) -> None:\n """\n Change string like dtypes to object for\n ``DataFrame.select_dtypes()``.\n """\n # error: Argument 1 to <set> has incompatible type "Type[generic]"; expected\n # "Union[dtype[Any], ExtensionDtype, None]"\n # error: Argument 2 to <set> has incompatible type "Type[generic]"; expected\n # "Union[dtype[Any], ExtensionDtype, None]"\n non_string_dtypes = dtype_set - {\n np.dtype("S").type, # type: ignore[arg-type]\n np.dtype("<U").type, # type: ignore[arg-type]\n }\n if non_string_dtypes != dtype_set:\n raise TypeError("string dtypes are not allowed, use 'object' instead")\n\n\ndef coerce_indexer_dtype(indexer, categories) -> np.ndarray:\n """coerce the indexer input array to the smallest dtype possible"""\n length = len(categories)\n if length < _int8_max:\n return ensure_int8(indexer)\n elif length < _int16_max:\n return ensure_int16(indexer)\n elif length < _int32_max:\n return ensure_int32(indexer)\n return ensure_int64(indexer)\n\n\ndef convert_dtypes(\n input_array: ArrayLike,\n convert_string: bool = True,\n convert_integer: bool = True,\n convert_boolean: bool = True,\n convert_floating: bool = True,\n infer_objects: bool = False,\n dtype_backend: Literal["numpy_nullable", "pyarrow"] = "numpy_nullable",\n) -> DtypeObj:\n """\n Convert objects to best possible type, and optionally,\n to types supporting ``pd.NA``.\n\n Parameters\n ----------\n input_array : ExtensionArray or np.ndarray\n convert_string : bool, default True\n Whether object dtypes should be converted to ``StringDtype()``.\n convert_integer : bool, default True\n Whether, if possible, conversion can be done to integer extension types.\n convert_boolean : bool, defaults True\n Whether object dtypes should be converted to ``BooleanDtypes()``.\n convert_floating : bool, defaults True\n Whether, if possible, conversion can be done to floating extension types.\n If `convert_integer` is also True, preference will be give to integer\n dtypes if the floats can be faithfully casted to integers.\n infer_objects : bool, defaults False\n Whether to also infer objects to float/int if possible. Is only hit if the\n object array contains pd.NA.\n dtype_backend : {'numpy_nullable', 'pyarrow'}, default 'numpy_nullable'\n Back-end data type applied to the resultant :class:`DataFrame`\n (still experimental). Behaviour is as follows:\n\n * ``"numpy_nullable"``: returns nullable-dtype-backed :class:`DataFrame`\n (default).\n * ``"pyarrow"``: returns pyarrow-backed nullable :class:`ArrowDtype`\n DataFrame.\n\n .. versionadded:: 2.0\n\n Returns\n -------\n np.dtype, or ExtensionDtype\n """\n from pandas.core.arrays.string_ import StringDtype\n\n inferred_dtype: str | DtypeObj\n\n if (\n convert_string or convert_integer or convert_boolean or convert_floating\n ) and isinstance(input_array, np.ndarray):\n if input_array.dtype == object:\n inferred_dtype = lib.infer_dtype(input_array)\n else:\n inferred_dtype = input_array.dtype\n\n if is_string_dtype(inferred_dtype):\n if not convert_string or inferred_dtype == "bytes":\n inferred_dtype = input_array.dtype\n else:\n inferred_dtype = pandas_dtype_func("string")\n\n if convert_integer:\n target_int_dtype = pandas_dtype_func("Int64")\n\n if input_array.dtype.kind in "iu":\n from pandas.core.arrays.integer import NUMPY_INT_TO_DTYPE\n\n inferred_dtype = NUMPY_INT_TO_DTYPE.get(\n input_array.dtype, target_int_dtype\n )\n elif input_array.dtype.kind in "fcb":\n # TODO: de-dup with maybe_cast_to_integer_array?\n arr = input_array[notna(input_array)]\n if (arr.astype(int) == arr).all():\n inferred_dtype = target_int_dtype\n else:\n inferred_dtype = input_array.dtype\n elif (\n infer_objects\n and input_array.dtype == object\n and (isinstance(inferred_dtype, str) and inferred_dtype == "integer")\n ):\n inferred_dtype = target_int_dtype\n\n if convert_floating:\n if input_array.dtype.kind in "fcb":\n # i.e. numeric but not integer\n from pandas.core.arrays.floating import NUMPY_FLOAT_TO_DTYPE\n\n inferred_float_dtype: DtypeObj = NUMPY_FLOAT_TO_DTYPE.get(\n input_array.dtype, pandas_dtype_func("Float64")\n )\n # if we could also convert to integer, check if all floats\n # are actually integers\n if convert_integer:\n # TODO: de-dup with maybe_cast_to_integer_array?\n arr = input_array[notna(input_array)]\n if (arr.astype(int) == arr).all():\n inferred_dtype = pandas_dtype_func("Int64")\n else:\n inferred_dtype = inferred_float_dtype\n else:\n inferred_dtype = inferred_float_dtype\n elif (\n infer_objects\n and input_array.dtype == object\n and (\n isinstance(inferred_dtype, str)\n and inferred_dtype == "mixed-integer-float"\n )\n ):\n inferred_dtype = pandas_dtype_func("Float64")\n\n if convert_boolean:\n if input_array.dtype.kind == "b":\n inferred_dtype = pandas_dtype_func("boolean")\n elif isinstance(inferred_dtype, str) and inferred_dtype == "boolean":\n inferred_dtype = pandas_dtype_func("boolean")\n\n if isinstance(inferred_dtype, str):\n # If we couldn't do anything else, then we retain the dtype\n inferred_dtype = input_array.dtype\n\n elif (\n convert_string\n and isinstance(input_array.dtype, StringDtype)\n and input_array.dtype.na_value is np.nan\n ):\n inferred_dtype = pandas_dtype_func("string")\n\n else:\n inferred_dtype = input_array.dtype\n\n if dtype_backend == "pyarrow":\n from pandas.core.arrays.arrow.array import to_pyarrow_type\n\n assert not isinstance(inferred_dtype, str)\n\n if (\n (convert_integer and inferred_dtype.kind in "iu")\n or (convert_floating and inferred_dtype.kind in "fc")\n or (convert_boolean and inferred_dtype.kind == "b")\n or (convert_string and isinstance(inferred_dtype, StringDtype))\n or (\n inferred_dtype.kind not in "iufcb"\n and not isinstance(inferred_dtype, StringDtype)\n )\n ):\n if isinstance(inferred_dtype, PandasExtensionDtype) and not isinstance(\n inferred_dtype, DatetimeTZDtype\n ):\n base_dtype = inferred_dtype.base\n elif isinstance(inferred_dtype, (BaseMaskedDtype, ArrowDtype)):\n base_dtype = inferred_dtype.numpy_dtype\n elif isinstance(inferred_dtype, StringDtype):\n base_dtype = np.dtype(str)\n else:\n base_dtype = inferred_dtype\n if (\n base_dtype.kind == "O" # type: ignore[union-attr]\n and input_array.size > 0\n and isna(input_array).all()\n ):\n import pyarrow as pa\n\n pa_type = pa.null()\n else:\n pa_type = to_pyarrow_type(base_dtype)\n if pa_type is not None:\n inferred_dtype = ArrowDtype(pa_type)\n elif dtype_backend == "numpy_nullable" and isinstance(inferred_dtype, ArrowDtype):\n # GH 53648\n inferred_dtype = _arrow_dtype_mapping()[inferred_dtype.pyarrow_dtype]\n\n # error: Incompatible return value type (got "Union[str, Union[dtype[Any],\n # ExtensionDtype]]", expected "Union[dtype[Any], ExtensionDtype]")\n return inferred_dtype # type: ignore[return-value]\n\n\ndef maybe_infer_to_datetimelike(\n value: npt.NDArray[np.object_],\n convert_to_nullable_dtype: bool = False,\n) -> np.ndarray | DatetimeArray | TimedeltaArray | PeriodArray | IntervalArray:\n """\n we might have a array (or single object) that is datetime like,\n and no dtype is passed don't change the value unless we find a\n datetime/timedelta set\n\n this is pretty strict in that a datetime/timedelta is REQUIRED\n in addition to possible nulls/string likes\n\n Parameters\n ----------\n value : np.ndarray[object]\n\n Returns\n -------\n np.ndarray, DatetimeArray, TimedeltaArray, PeriodArray, or IntervalArray\n\n """\n if not isinstance(value, np.ndarray) or value.dtype != object:\n # Caller is responsible for passing only ndarray[object]\n raise TypeError(type(value)) # pragma: no cover\n if value.ndim != 1:\n # Caller is responsible\n raise ValueError(value.ndim) # pragma: no cover\n\n if not len(value):\n return value\n\n # error: Incompatible return value type (got "Union[ExtensionArray,\n # ndarray[Any, Any]]", expected "Union[ndarray[Any, Any], DatetimeArray,\n # TimedeltaArray, PeriodArray, IntervalArray]")\n return lib.maybe_convert_objects( # type: ignore[return-value]\n value,\n # Here we do not convert numeric dtypes, as if we wanted that,\n # numpy would have done it for us.\n convert_numeric=False,\n convert_non_numeric=True,\n convert_to_nullable_dtype=convert_to_nullable_dtype,\n dtype_if_all_nat=np.dtype("M8[ns]"),\n )\n\n\ndef maybe_cast_to_datetime(\n value: np.ndarray | list, dtype: np.dtype\n) -> ExtensionArray | np.ndarray:\n """\n try to cast the array/value to a datetimelike dtype, converting float\n nan to iNaT\n\n Caller is responsible for handling ExtensionDtype cases and non dt64/td64\n cases.\n """\n from pandas.core.arrays.datetimes import DatetimeArray\n from pandas.core.arrays.timedeltas import TimedeltaArray\n\n assert dtype.kind in "mM"\n if not is_list_like(value):\n raise TypeError("value must be listlike")\n\n # TODO: _from_sequence would raise ValueError in cases where\n # _ensure_nanosecond_dtype raises TypeError\n _ensure_nanosecond_dtype(dtype)\n\n if lib.is_np_dtype(dtype, "m"):\n res = TimedeltaArray._from_sequence(value, dtype=dtype)\n return res\n else:\n try:\n dta = DatetimeArray._from_sequence(value, dtype=dtype)\n except ValueError as err:\n # We can give a Series-specific exception message.\n if "cannot supply both a tz and a timezone-naive dtype" in str(err):\n raise ValueError(\n "Cannot convert timezone-aware data to "\n "timezone-naive dtype. Use "\n "pd.Series(values).dt.tz_localize(None) instead."\n ) from err\n raise\n\n return dta\n\n\ndef _ensure_nanosecond_dtype(dtype: DtypeObj) -> None:\n """\n Convert dtypes with granularity less than nanosecond to nanosecond\n\n >>> _ensure_nanosecond_dtype(np.dtype("M8[us]"))\n\n >>> _ensure_nanosecond_dtype(np.dtype("M8[D]"))\n Traceback (most recent call last):\n ...\n TypeError: dtype=datetime64[D] is not supported. Supported resolutions are 's', 'ms', 'us', and 'ns'\n\n >>> _ensure_nanosecond_dtype(np.dtype("m8[ps]"))\n Traceback (most recent call last):\n ...\n TypeError: dtype=timedelta64[ps] is not supported. Supported resolutions are 's', 'ms', 'us', and 'ns'\n """ # noqa: E501\n msg = (\n f"The '{dtype.name}' dtype has no unit. "\n f"Please pass in '{dtype.name}[ns]' instead."\n )\n\n # unpack e.g. SparseDtype\n dtype = getattr(dtype, "subtype", dtype)\n\n if not isinstance(dtype, np.dtype):\n # i.e. datetime64tz\n pass\n\n elif dtype.kind in "mM":\n if not is_supported_dtype(dtype):\n # pre-2.0 we would silently swap in nanos for lower-resolutions,\n # raise for above-nano resolutions\n if dtype.name in ["datetime64", "timedelta64"]:\n raise ValueError(msg)\n # TODO: ValueError or TypeError? existing test\n # test_constructor_generic_timestamp_bad_frequency expects TypeError\n raise TypeError(\n f"dtype={dtype} is not supported. Supported resolutions are 's', "\n "'ms', 'us', and 'ns'"\n )\n\n\n# TODO: other value-dependent functions to standardize here include\n# Index._find_common_type_compat\ndef find_result_type(left_dtype: DtypeObj, right: Any) -> DtypeObj:\n """\n Find the type/dtype for the result of an operation between objects.\n\n This is similar to find_common_type, but looks at the right object instead\n of just its dtype. This can be useful in particular when the right\n object does not have a `dtype`.\n\n Parameters\n ----------\n left_dtype : np.dtype or ExtensionDtype\n right : Any\n\n Returns\n -------\n np.dtype or ExtensionDtype\n\n See also\n --------\n find_common_type\n numpy.result_type\n """\n new_dtype: DtypeObj\n\n if (\n isinstance(left_dtype, np.dtype)\n and left_dtype.kind in "iuc"\n and (lib.is_integer(right) or lib.is_float(right))\n ):\n # e.g. with int8 dtype and right=512, we want to end up with\n # np.int16, whereas infer_dtype_from(512) gives np.int64,\n # which will make us upcast too far.\n if lib.is_float(right) and right.is_integer() and left_dtype.kind != "f":\n right = int(right)\n # After NEP 50, numpy won't inspect Python scalars\n # TODO: do we need to recreate numpy's inspection logic for floats too\n # (this breaks some tests)\n if isinstance(right, int) and not isinstance(right, np.integer):\n # This gives an unsigned type by default\n # (if our number is positive)\n\n # If our left dtype is signed, we might not want this since\n # this might give us 1 dtype too big\n # We should check if the corresponding int dtype (e.g. int64 for uint64)\n # can hold the number\n right_dtype = np.min_scalar_type(right)\n if right == 0:\n # Special case 0\n right = left_dtype\n elif (\n not np.issubdtype(left_dtype, np.unsignedinteger)\n and 0 < right <= np.iinfo(right_dtype).max\n ):\n # If left dtype isn't unsigned, check if it fits in the signed dtype\n right = np.dtype(f"i{right_dtype.itemsize}")\n else:\n right = right_dtype\n\n new_dtype = np.result_type(left_dtype, right)\n\n elif is_valid_na_for_dtype(right, left_dtype):\n # e.g. IntervalDtype[int] and None/np.nan\n new_dtype = ensure_dtype_can_hold_na(left_dtype)\n\n else:\n dtype, _ = infer_dtype_from(right)\n new_dtype = find_common_type([left_dtype, dtype])\n\n return new_dtype\n\n\ndef common_dtype_categorical_compat(\n objs: Sequence[Index | ArrayLike], dtype: DtypeObj\n) -> DtypeObj:\n """\n Update the result of find_common_type to account for NAs in a Categorical.\n\n Parameters\n ----------\n objs : list[np.ndarray | ExtensionArray | Index]\n dtype : np.dtype or ExtensionDtype\n\n Returns\n -------\n np.dtype or ExtensionDtype\n """\n # GH#38240\n\n # TODO: more generally, could do `not can_hold_na(dtype)`\n if lib.is_np_dtype(dtype, "iu"):\n for obj in objs:\n # We don't want to accientally allow e.g. "categorical" str here\n obj_dtype = getattr(obj, "dtype", None)\n if isinstance(obj_dtype, CategoricalDtype):\n if isinstance(obj, ABCIndex):\n # This check may already be cached\n hasnas = obj.hasnans\n else:\n # Categorical\n hasnas = cast("Categorical", obj)._hasna\n\n if hasnas:\n # see test_union_int_categorical_with_nan\n dtype = np.dtype(np.float64)\n break\n return dtype\n\n\ndef np_find_common_type(*dtypes: np.dtype) -> np.dtype:\n """\n np.find_common_type implementation pre-1.25 deprecation using np.result_type\n https://github.com/pandas-dev/pandas/pull/49569#issuecomment-1308300065\n\n Parameters\n ----------\n dtypes : np.dtypes\n\n Returns\n -------\n np.dtype\n """\n try:\n common_dtype = np.result_type(*dtypes)\n if common_dtype.kind in "mMSU":\n # NumPy promotion currently (1.25) misbehaves for for times and strings,\n # so fall back to object (find_common_dtype did unless there\n # was only one dtype)\n common_dtype = np.dtype("O")\n\n except TypeError:\n common_dtype = np.dtype("O")\n return common_dtype\n\n\n@overload\ndef find_common_type(types: list[np.dtype]) -> np.dtype:\n ...\n\n\n@overload\ndef find_common_type(types: list[ExtensionDtype]) -> DtypeObj:\n ...\n\n\n@overload\ndef find_common_type(types: list[DtypeObj]) -> DtypeObj:\n ...\n\n\ndef find_common_type(types):\n """\n Find a common data type among the given dtypes.\n\n Parameters\n ----------\n types : list of dtypes\n\n Returns\n -------\n pandas extension or numpy dtype\n\n See Also\n --------\n numpy.find_common_type\n\n """\n if not types:\n raise ValueError("no types given")\n\n first = types[0]\n\n # workaround for find_common_type([np.dtype('datetime64[ns]')] * 2)\n # => object\n if lib.dtypes_all_equal(list(types)):\n return first\n\n # get unique types (dict.fromkeys is used as order-preserving set())\n types = list(dict.fromkeys(types).keys())\n\n if any(isinstance(t, ExtensionDtype) for t in types):\n for t in types:\n if isinstance(t, ExtensionDtype):\n res = t._get_common_dtype(types)\n if res is not None:\n return res\n return np.dtype("object")\n\n # take lowest unit\n if all(lib.is_np_dtype(t, "M") for t in types):\n return np.dtype(max(types))\n if all(lib.is_np_dtype(t, "m") for t in types):\n return np.dtype(max(types))\n\n # don't mix bool / int or float or complex\n # this is different from numpy, which casts bool with float/int as int\n has_bools = any(t.kind == "b" for t in types)\n if has_bools:\n for t in types:\n if t.kind in "iufc":\n return np.dtype("object")\n\n return np_find_common_type(*types)\n\n\ndef construct_2d_arraylike_from_scalar(\n value: Scalar, length: int, width: int, dtype: np.dtype, copy: bool\n) -> np.ndarray:\n shape = (length, width)\n\n if dtype.kind in "mM":\n value = _maybe_box_and_unbox_datetimelike(value, dtype)\n elif dtype == _dtype_obj:\n if isinstance(value, (np.timedelta64, np.datetime64)):\n # calling np.array below would cast to pytimedelta/pydatetime\n out = np.empty(shape, dtype=object)\n out.fill(value)\n return out\n\n # Attempt to coerce to a numpy array\n try:\n if not copy:\n arr = np.asarray(value, dtype=dtype)\n else:\n arr = np.array(value, dtype=dtype, copy=copy)\n except (ValueError, TypeError) as err:\n raise TypeError(\n f"DataFrame constructor called with incompatible data and dtype: {err}"\n ) from err\n\n if arr.ndim != 0:\n raise ValueError("DataFrame constructor not properly called!")\n\n return np.full(shape, arr)\n\n\ndef construct_1d_arraylike_from_scalar(\n value: Scalar, length: int, dtype: DtypeObj | None\n) -> ArrayLike:\n """\n create a np.ndarray / pandas type of specified shape and dtype\n filled with values\n\n Parameters\n ----------\n value : scalar value\n length : int\n dtype : pandas_dtype or np.dtype\n\n Returns\n -------\n np.ndarray / pandas type of length, filled with value\n\n """\n\n if dtype is None:\n try:\n dtype, value = infer_dtype_from_scalar(value)\n except OutOfBoundsDatetime:\n dtype = _dtype_obj\n\n if isinstance(dtype, ExtensionDtype):\n cls = dtype.construct_array_type()\n seq = [] if length == 0 else [value]\n subarr = cls._from_sequence(seq, dtype=dtype).repeat(length)\n\n else:\n if length and dtype.kind in "iu" and isna(value):\n # coerce if we have nan for an integer dtype\n dtype = np.dtype("float64")\n elif lib.is_np_dtype(dtype, "US"):\n # we need to coerce to object dtype to avoid\n # to allow numpy to take our string as a scalar value\n dtype = np.dtype("object")\n if not isna(value):\n value = ensure_str(value)\n elif dtype.kind in "mM":\n value = _maybe_box_and_unbox_datetimelike(value, dtype)\n\n subarr = np.empty(length, dtype=dtype)\n if length:\n # GH 47391: numpy > 1.24 will raise filling np.nan into int dtypes\n subarr.fill(value)\n\n return subarr\n\n\ndef _maybe_box_and_unbox_datetimelike(value: Scalar, dtype: DtypeObj):\n # Caller is responsible for checking dtype.kind in "mM"\n\n if isinstance(value, dt.datetime):\n # we dont want to box dt64, in particular datetime64("NaT")\n value = maybe_box_datetimelike(value, dtype)\n\n return _maybe_unbox_datetimelike(value, dtype)\n\n\ndef construct_1d_object_array_from_listlike(values: Collection) -> np.ndarray:\n """\n Transform any list-like object in a 1-dimensional numpy array of object\n dtype.\n\n Parameters\n ----------\n values : any iterable which has a len()\n\n Raises\n ------\n TypeError\n * If `values` does not have a len()\n\n Returns\n -------\n 1-dimensional numpy array of dtype object\n """\n # numpy will try to interpret nested lists as further dimensions in np.array(),\n # hence explicitly making a 1D array using np.fromiter\n result = np.empty(len(values), dtype="object")\n for i, obj in enumerate(values):\n result[i] = obj\n return result\n\n\ndef maybe_cast_to_integer_array(arr: list | np.ndarray, dtype: np.dtype) -> np.ndarray:\n """\n Takes any dtype and returns the casted version, raising for when data is\n incompatible with integer/unsigned integer dtypes.\n\n Parameters\n ----------\n arr : np.ndarray or list\n The array to cast.\n dtype : np.dtype\n The integer dtype to cast the array to.\n\n Returns\n -------\n ndarray\n Array of integer or unsigned integer dtype.\n\n Raises\n ------\n OverflowError : the dtype is incompatible with the data\n ValueError : loss of precision has occurred during casting\n\n Examples\n --------\n If you try to coerce negative values to unsigned integers, it raises:\n\n >>> pd.Series([-1], dtype="uint64")\n Traceback (most recent call last):\n ...\n OverflowError: Trying to coerce negative values to unsigned integers\n\n Also, if you try to coerce float values to integers, it raises:\n\n >>> maybe_cast_to_integer_array([1, 2, 3.5], dtype=np.dtype("int64"))\n Traceback (most recent call last):\n ...\n ValueError: Trying to coerce float values to integers\n """\n assert dtype.kind in "iu"\n\n try:\n if not isinstance(arr, np.ndarray):\n with warnings.catch_warnings():\n # We already disallow dtype=uint w/ negative numbers\n # (test_constructor_coercion_signed_to_unsigned) so safe to ignore.\n warnings.filterwarnings(\n "ignore",\n "NumPy will stop allowing conversion of out-of-bound Python int",\n DeprecationWarning,\n )\n casted = np.asarray(arr, dtype=dtype)\n else:\n with warnings.catch_warnings():\n warnings.filterwarnings("ignore", category=RuntimeWarning)\n casted = arr.astype(dtype, copy=False)\n except OverflowError as err:\n raise OverflowError(\n "The elements provided in the data cannot all be "\n f"casted to the dtype {dtype}"\n ) from err\n\n if isinstance(arr, np.ndarray) and arr.dtype == dtype:\n # avoid expensive array_equal check\n return casted\n\n with warnings.catch_warnings():\n warnings.filterwarnings("ignore", category=RuntimeWarning)\n warnings.filterwarnings(\n "ignore", "elementwise comparison failed", FutureWarning\n )\n if np.array_equal(arr, casted):\n return casted\n\n # We do this casting to allow for proper\n # data and dtype checking.\n #\n # We didn't do this earlier because NumPy\n # doesn't handle `uint64` correctly.\n arr = np.asarray(arr)\n\n if np.issubdtype(arr.dtype, str):\n # TODO(numpy-2.0 min): This case will raise an OverflowError above\n if (casted.astype(str) == arr).all():\n return casted\n raise ValueError(f"string values cannot be losslessly cast to {dtype}")\n\n if dtype.kind == "u" and (arr < 0).any():\n # TODO: can this be hit anymore after numpy 2.0?\n raise OverflowError("Trying to coerce negative values to unsigned integers")\n\n if arr.dtype.kind == "f":\n if not np.isfinite(arr).all():\n raise IntCastingNaNError(\n "Cannot convert non-finite values (NA or inf) to integer"\n )\n raise ValueError("Trying to coerce float values to integers")\n if arr.dtype == object:\n raise ValueError("Trying to coerce float values to integers")\n\n if casted.dtype < arr.dtype:\n # TODO: Can this path be hit anymore with numpy > 2\n # GH#41734 e.g. [1, 200, 923442] and dtype="int8" -> overflows\n raise ValueError(\n f"Values are too large to be losslessly converted to {dtype}. "\n f"To cast anyway, use pd.Series(values).astype({dtype})"\n )\n\n if arr.dtype.kind in "mM":\n # test_constructor_maskedarray_nonfloat\n raise TypeError(\n f"Constructing a Series or DataFrame from {arr.dtype} values and "\n f"dtype={dtype} is not supported. Use values.view({dtype}) instead."\n )\n\n # No known cases that get here, but raising explicitly to cover our bases.\n raise ValueError(f"values cannot be losslessly cast to {dtype}")\n\n\ndef can_hold_element(arr: ArrayLike, element: Any) -> bool:\n """\n Can we do an inplace setitem with this element in an array with this dtype?\n\n Parameters\n ----------\n arr : np.ndarray or ExtensionArray\n element : Any\n\n Returns\n -------\n bool\n """\n dtype = arr.dtype\n if not isinstance(dtype, np.dtype) or dtype.kind in "mM":\n if isinstance(dtype, (PeriodDtype, IntervalDtype, DatetimeTZDtype, np.dtype)):\n # np.dtype here catches datetime64ns and timedelta64ns; we assume\n # in this case that we have DatetimeArray/TimedeltaArray\n arr = cast(\n "PeriodArray | DatetimeArray | TimedeltaArray | IntervalArray", arr\n )\n try:\n arr._validate_setitem_value(element)\n return True\n except (ValueError, TypeError):\n return False\n\n if dtype == "string":\n try:\n arr._maybe_convert_setitem_value(element) # type: ignore[union-attr]\n return True\n except (ValueError, TypeError):\n return False\n\n # This is technically incorrect, but maintains the behavior of\n # ExtensionBlock._can_hold_element\n return True\n\n try:\n np_can_hold_element(dtype, element)\n return True\n except (TypeError, LossySetitemError):\n return False\n\n\ndef np_can_hold_element(dtype: np.dtype, element: Any) -> Any:\n """\n Raise if we cannot losslessly set this element into an ndarray with this dtype.\n\n Specifically about places where we disagree with numpy. i.e. there are\n cases where numpy will raise in doing the setitem that we do not check\n for here, e.g. setting str "X" into a numeric ndarray.\n\n Returns\n -------\n Any\n The element, potentially cast to the dtype.\n\n Raises\n ------\n ValueError : If we cannot losslessly store this element with this dtype.\n """\n if dtype == _dtype_obj:\n return element\n\n tipo = _maybe_infer_dtype_type(element)\n\n if dtype.kind in "iu":\n if isinstance(element, range):\n if _dtype_can_hold_range(element, dtype):\n return element\n raise LossySetitemError\n\n if is_integer(element) or (is_float(element) and element.is_integer()):\n # e.g. test_setitem_series_int8 if we have a python int 1\n # tipo may be np.int32, despite the fact that it will fit\n # in smaller int dtypes.\n info = np.iinfo(dtype)\n if info.min <= element <= info.max:\n return dtype.type(element)\n raise LossySetitemError\n\n if tipo is not None:\n if tipo.kind not in "iu":\n if isinstance(element, np.ndarray) and element.dtype.kind == "f":\n # If all can be losslessly cast to integers, then we can hold them\n with np.errstate(invalid="ignore"):\n # We check afterwards if cast was losslessly, so no need to show\n # the warning\n casted = element.astype(dtype)\n comp = casted == element\n if comp.all():\n # Return the casted values bc they can be passed to\n # np.putmask, whereas the raw values cannot.\n # see TestSetitemFloatNDarrayIntoIntegerSeries\n return casted\n raise LossySetitemError\n\n elif isinstance(element, ABCExtensionArray) and isinstance(\n element.dtype, CategoricalDtype\n ):\n # GH#52927 setting Categorical value into non-EA frame\n # TODO: general-case for EAs?\n try:\n casted = element.astype(dtype)\n except (ValueError, TypeError):\n raise LossySetitemError\n # Check for cases of either\n # a) lossy overflow/rounding or\n # b) semantic changes like dt64->int64\n comp = casted == element\n if not comp.all():\n raise LossySetitemError\n return casted\n\n # Anything other than integer we cannot hold\n raise LossySetitemError\n if (\n dtype.kind == "u"\n and isinstance(element, np.ndarray)\n and element.dtype.kind == "i"\n ):\n # see test_where_uint64\n casted = element.astype(dtype)\n if (casted == element).all():\n # TODO: faster to check (element >=0).all()? potential\n # itemsize issues there?\n return casted\n raise LossySetitemError\n if dtype.itemsize < tipo.itemsize:\n raise LossySetitemError\n if not isinstance(tipo, np.dtype):\n # i.e. nullable IntegerDtype; we can put this into an ndarray\n # losslessly iff it has no NAs\n arr = element._values if isinstance(element, ABCSeries) else element\n if arr._hasna:\n raise LossySetitemError\n return element\n\n return element\n\n raise LossySetitemError\n\n if dtype.kind == "f":\n if lib.is_integer(element) or lib.is_float(element):\n casted = dtype.type(element)\n if np.isnan(casted) or casted == element:\n return casted\n # otherwise e.g. overflow see TestCoercionFloat32\n raise LossySetitemError\n\n if tipo is not None:\n # TODO: itemsize check?\n if tipo.kind not in "iuf":\n # Anything other than float/integer we cannot hold\n raise LossySetitemError\n if not isinstance(tipo, np.dtype):\n # i.e. nullable IntegerDtype or FloatingDtype;\n # we can put this into an ndarray losslessly iff it has no NAs\n if element._hasna:\n raise LossySetitemError\n return element\n elif tipo.itemsize > dtype.itemsize or tipo.kind != dtype.kind:\n if isinstance(element, np.ndarray):\n # e.g. TestDataFrameIndexingWhere::test_where_alignment\n casted = element.astype(dtype)\n if np.array_equal(casted, element, equal_nan=True):\n return casted\n raise LossySetitemError\n\n return element\n\n raise LossySetitemError\n\n if dtype.kind == "c":\n if lib.is_integer(element) or lib.is_complex(element) or lib.is_float(element):\n if np.isnan(element):\n # see test_where_complex GH#6345\n return dtype.type(element)\n\n with warnings.catch_warnings():\n warnings.filterwarnings("ignore")\n casted = dtype.type(element)\n if casted == element:\n return casted\n # otherwise e.g. overflow see test_32878_complex_itemsize\n raise LossySetitemError\n\n if tipo is not None:\n if tipo.kind in "iufc":\n return element\n raise LossySetitemError\n raise LossySetitemError\n\n if dtype.kind == "b":\n if tipo is not None:\n if tipo.kind == "b":\n if not isinstance(tipo, np.dtype):\n # i.e. we have a BooleanArray\n if element._hasna:\n # i.e. there are pd.NA elements\n raise LossySetitemError\n return element\n raise LossySetitemError\n if lib.is_bool(element):\n return element\n raise LossySetitemError\n\n if dtype.kind == "S":\n # TODO: test tests.frame.methods.test_replace tests get here,\n # need more targeted tests. xref phofl has a PR about this\n if tipo is not None:\n if tipo.kind == "S" and tipo.itemsize <= dtype.itemsize:\n return element\n raise LossySetitemError\n if isinstance(element, bytes) and len(element) <= dtype.itemsize:\n return element\n raise LossySetitemError\n\n if dtype.kind == "V":\n # i.e. np.void, which cannot hold _anything_\n raise LossySetitemError\n\n raise NotImplementedError(dtype)\n\n\ndef _dtype_can_hold_range(rng: range, dtype: np.dtype) -> bool:\n """\n _maybe_infer_dtype_type infers to int64 (and float64 for very large endpoints),\n but in many cases a range can be held by a smaller integer dtype.\n Check if this is one of those cases.\n """\n if not len(rng):\n return True\n return np_can_cast_scalar(rng.start, dtype) and np_can_cast_scalar(rng.stop, dtype)\n\n\ndef np_can_cast_scalar(element: Scalar, dtype: np.dtype) -> bool:\n """\n np.can_cast pandas-equivalent for pre 2-0 behavior that allowed scalar\n inference\n\n Parameters\n ----------\n element : Scalar\n dtype : np.dtype\n\n Returns\n -------\n bool\n """\n try:\n np_can_hold_element(dtype, element)\n return True\n except (LossySetitemError, NotImplementedError):\n return False\n
.venv\Lib\site-packages\pandas\core\dtypes\cast.py
cast.py
Python
62,726
0.75
0.172032
0.113358
node-utils
910
2024-04-13T02:09:42.285438
MIT
false
145450e99df4177d4331609733bd13a5
"""\nCommon type operations.\n"""\nfrom __future__ import annotations\n\nfrom typing import (\n TYPE_CHECKING,\n Any,\n Callable,\n)\nimport warnings\n\nimport numpy as np\n\nfrom pandas._config import using_string_dtype\n\nfrom pandas._libs import (\n Interval,\n Period,\n algos,\n lib,\n)\nfrom pandas._libs.tslibs import conversion\nfrom pandas.util._exceptions import find_stack_level\n\nfrom pandas.core.dtypes.base import _registry as registry\nfrom pandas.core.dtypes.dtypes import (\n CategoricalDtype,\n DatetimeTZDtype,\n ExtensionDtype,\n IntervalDtype,\n PeriodDtype,\n SparseDtype,\n)\nfrom pandas.core.dtypes.generic import ABCIndex\nfrom pandas.core.dtypes.inference import (\n is_array_like,\n is_bool,\n is_complex,\n is_dataclass,\n is_decimal,\n is_dict_like,\n is_file_like,\n is_float,\n is_hashable,\n is_integer,\n is_interval,\n is_iterator,\n is_list_like,\n is_named_tuple,\n is_nested_list_like,\n is_number,\n is_re,\n is_re_compilable,\n is_scalar,\n is_sequence,\n)\n\nif TYPE_CHECKING:\n from pandas._typing import (\n ArrayLike,\n DtypeObj,\n )\n\nDT64NS_DTYPE = conversion.DT64NS_DTYPE\nTD64NS_DTYPE = conversion.TD64NS_DTYPE\nINT64_DTYPE = np.dtype(np.int64)\n\n# oh the troubles to reduce import time\n_is_scipy_sparse = None\n\nensure_float64 = algos.ensure_float64\nensure_int64 = algos.ensure_int64\nensure_int32 = algos.ensure_int32\nensure_int16 = algos.ensure_int16\nensure_int8 = algos.ensure_int8\nensure_platform_int = algos.ensure_platform_int\nensure_object = algos.ensure_object\nensure_uint64 = algos.ensure_uint64\n\n\ndef ensure_str(value: bytes | Any) -> str:\n """\n Ensure that bytes and non-strings get converted into ``str`` objects.\n """\n if isinstance(value, bytes):\n value = value.decode("utf-8")\n elif not isinstance(value, str):\n value = str(value)\n return value\n\n\ndef ensure_python_int(value: int | np.integer) -> int:\n """\n Ensure that a value is a python int.\n\n Parameters\n ----------\n value: int or numpy.integer\n\n Returns\n -------\n int\n\n Raises\n ------\n TypeError: if the value isn't an int or can't be converted to one.\n """\n if not (is_integer(value) or is_float(value)):\n if not is_scalar(value):\n raise TypeError(\n f"Value needs to be a scalar value, was type {type(value).__name__}"\n )\n raise TypeError(f"Wrong type {type(value)} for value {value}")\n try:\n new_value = int(value)\n assert new_value == value\n except (TypeError, ValueError, AssertionError) as err:\n raise TypeError(f"Wrong type {type(value)} for value {value}") from err\n return new_value\n\n\ndef classes(*klasses) -> Callable:\n """Evaluate if the tipo is a subclass of the klasses."""\n return lambda tipo: issubclass(tipo, klasses)\n\n\ndef _classes_and_not_datetimelike(*klasses) -> Callable:\n """\n Evaluate if the tipo is a subclass of the klasses\n and not a datetimelike.\n """\n return lambda tipo: (\n issubclass(tipo, klasses)\n and not issubclass(tipo, (np.datetime64, np.timedelta64))\n )\n\n\ndef is_object_dtype(arr_or_dtype) -> bool:\n """\n Check whether an array-like or dtype is of the object dtype.\n\n Parameters\n ----------\n arr_or_dtype : array-like or dtype\n The array-like or dtype to check.\n\n Returns\n -------\n boolean\n Whether or not the array-like or dtype is of the object dtype.\n\n Examples\n --------\n >>> from pandas.api.types import is_object_dtype\n >>> is_object_dtype(object)\n True\n >>> is_object_dtype(int)\n False\n >>> is_object_dtype(np.array([], dtype=object))\n True\n >>> is_object_dtype(np.array([], dtype=int))\n False\n >>> is_object_dtype([1, 2, 3])\n False\n """\n return _is_dtype_type(arr_or_dtype, classes(np.object_))\n\n\ndef is_sparse(arr) -> bool:\n """\n Check whether an array-like is a 1-D pandas sparse array.\n\n .. deprecated:: 2.1.0\n Use isinstance(dtype, pd.SparseDtype) instead.\n\n Check that the one-dimensional array-like is a pandas sparse array.\n Returns True if it is a pandas sparse array, not another type of\n sparse array.\n\n Parameters\n ----------\n arr : array-like\n Array-like to check.\n\n Returns\n -------\n bool\n Whether or not the array-like is a pandas sparse array.\n\n Examples\n --------\n Returns `True` if the parameter is a 1-D pandas sparse array.\n\n >>> from pandas.api.types import is_sparse\n >>> is_sparse(pd.arrays.SparseArray([0, 0, 1, 0]))\n True\n >>> is_sparse(pd.Series(pd.arrays.SparseArray([0, 0, 1, 0])))\n True\n\n Returns `False` if the parameter is not sparse.\n\n >>> is_sparse(np.array([0, 0, 1, 0]))\n False\n >>> is_sparse(pd.Series([0, 1, 0, 0]))\n False\n\n Returns `False` if the parameter is not a pandas sparse array.\n\n >>> from scipy.sparse import bsr_matrix\n >>> is_sparse(bsr_matrix([0, 1, 0, 0]))\n False\n\n Returns `False` if the parameter has more than one dimension.\n """\n warnings.warn(\n "is_sparse is deprecated and will be removed in a future "\n "version. Check `isinstance(dtype, pd.SparseDtype)` instead.",\n DeprecationWarning,\n stacklevel=2,\n )\n\n dtype = getattr(arr, "dtype", arr)\n return isinstance(dtype, SparseDtype)\n\n\ndef is_scipy_sparse(arr) -> bool:\n """\n Check whether an array-like is a scipy.sparse.spmatrix instance.\n\n Parameters\n ----------\n arr : array-like\n The array-like to check.\n\n Returns\n -------\n boolean\n Whether or not the array-like is a scipy.sparse.spmatrix instance.\n\n Notes\n -----\n If scipy is not installed, this function will always return False.\n\n Examples\n --------\n >>> from scipy.sparse import bsr_matrix\n >>> is_scipy_sparse(bsr_matrix([1, 2, 3]))\n True\n >>> is_scipy_sparse(pd.arrays.SparseArray([1, 2, 3]))\n False\n """\n global _is_scipy_sparse\n\n if _is_scipy_sparse is None: # pylint: disable=used-before-assignment\n try:\n from scipy.sparse import issparse as _is_scipy_sparse\n except ImportError:\n _is_scipy_sparse = lambda _: False\n\n assert _is_scipy_sparse is not None\n return _is_scipy_sparse(arr)\n\n\ndef is_datetime64_dtype(arr_or_dtype) -> bool:\n """\n Check whether an array-like or dtype is of the datetime64 dtype.\n\n Parameters\n ----------\n arr_or_dtype : array-like or dtype\n The array-like or dtype to check.\n\n Returns\n -------\n boolean\n Whether or not the array-like or dtype is of the datetime64 dtype.\n\n Examples\n --------\n >>> from pandas.api.types import is_datetime64_dtype\n >>> is_datetime64_dtype(object)\n False\n >>> is_datetime64_dtype(np.datetime64)\n True\n >>> is_datetime64_dtype(np.array([], dtype=int))\n False\n >>> is_datetime64_dtype(np.array([], dtype=np.datetime64))\n True\n >>> is_datetime64_dtype([1, 2, 3])\n False\n """\n if isinstance(arr_or_dtype, np.dtype):\n # GH#33400 fastpath for dtype object\n return arr_or_dtype.kind == "M"\n return _is_dtype_type(arr_or_dtype, classes(np.datetime64))\n\n\ndef is_datetime64tz_dtype(arr_or_dtype) -> bool:\n """\n Check whether an array-like or dtype is of a DatetimeTZDtype dtype.\n\n .. deprecated:: 2.1.0\n Use isinstance(dtype, pd.DatetimeTZDtype) instead.\n\n Parameters\n ----------\n arr_or_dtype : array-like or dtype\n The array-like or dtype to check.\n\n Returns\n -------\n boolean\n Whether or not the array-like or dtype is of a DatetimeTZDtype dtype.\n\n Examples\n --------\n >>> from pandas.api.types import is_datetime64tz_dtype\n >>> is_datetime64tz_dtype(object)\n False\n >>> is_datetime64tz_dtype([1, 2, 3])\n False\n >>> is_datetime64tz_dtype(pd.DatetimeIndex([1, 2, 3])) # tz-naive\n False\n >>> is_datetime64tz_dtype(pd.DatetimeIndex([1, 2, 3], tz="US/Eastern"))\n True\n\n >>> from pandas.core.dtypes.dtypes import DatetimeTZDtype\n >>> dtype = DatetimeTZDtype("ns", tz="US/Eastern")\n >>> s = pd.Series([], dtype=dtype)\n >>> is_datetime64tz_dtype(dtype)\n True\n >>> is_datetime64tz_dtype(s)\n True\n """\n # GH#52607\n warnings.warn(\n "is_datetime64tz_dtype is deprecated and will be removed in a future "\n "version. Check `isinstance(dtype, pd.DatetimeTZDtype)` instead.",\n DeprecationWarning,\n stacklevel=2,\n )\n if isinstance(arr_or_dtype, DatetimeTZDtype):\n # GH#33400 fastpath for dtype object\n # GH 34986\n return True\n\n if arr_or_dtype is None:\n return False\n return DatetimeTZDtype.is_dtype(arr_or_dtype)\n\n\ndef is_timedelta64_dtype(arr_or_dtype) -> bool:\n """\n Check whether an array-like or dtype is of the timedelta64 dtype.\n\n Parameters\n ----------\n arr_or_dtype : array-like or dtype\n The array-like or dtype to check.\n\n Returns\n -------\n boolean\n Whether or not the array-like or dtype is of the timedelta64 dtype.\n\n Examples\n --------\n >>> from pandas.core.dtypes.common import is_timedelta64_dtype\n >>> is_timedelta64_dtype(object)\n False\n >>> is_timedelta64_dtype(np.timedelta64)\n True\n >>> is_timedelta64_dtype([1, 2, 3])\n False\n >>> is_timedelta64_dtype(pd.Series([], dtype="timedelta64[ns]"))\n True\n >>> is_timedelta64_dtype('0 days')\n False\n """\n if isinstance(arr_or_dtype, np.dtype):\n # GH#33400 fastpath for dtype object\n return arr_or_dtype.kind == "m"\n\n return _is_dtype_type(arr_or_dtype, classes(np.timedelta64))\n\n\ndef is_period_dtype(arr_or_dtype) -> bool:\n """\n Check whether an array-like or dtype is of the Period dtype.\n\n .. deprecated:: 2.2.0\n Use isinstance(dtype, pd.Period) instead.\n\n Parameters\n ----------\n arr_or_dtype : array-like or dtype\n The array-like or dtype to check.\n\n Returns\n -------\n boolean\n Whether or not the array-like or dtype is of the Period dtype.\n\n Examples\n --------\n >>> from pandas.core.dtypes.common import is_period_dtype\n >>> is_period_dtype(object)\n False\n >>> is_period_dtype(pd.PeriodDtype(freq="D"))\n True\n >>> is_period_dtype([1, 2, 3])\n False\n >>> is_period_dtype(pd.Period("2017-01-01"))\n False\n >>> is_period_dtype(pd.PeriodIndex([], freq="Y"))\n True\n """\n warnings.warn(\n "is_period_dtype is deprecated and will be removed in a future version. "\n "Use `isinstance(dtype, pd.PeriodDtype)` instead",\n DeprecationWarning,\n stacklevel=2,\n )\n if isinstance(arr_or_dtype, ExtensionDtype):\n # GH#33400 fastpath for dtype object\n return arr_or_dtype.type is Period\n\n if arr_or_dtype is None:\n return False\n return PeriodDtype.is_dtype(arr_or_dtype)\n\n\ndef is_interval_dtype(arr_or_dtype) -> bool:\n """\n Check whether an array-like or dtype is of the Interval dtype.\n\n .. deprecated:: 2.2.0\n Use isinstance(dtype, pd.IntervalDtype) instead.\n\n Parameters\n ----------\n arr_or_dtype : array-like or dtype\n The array-like or dtype to check.\n\n Returns\n -------\n boolean\n Whether or not the array-like or dtype is of the Interval dtype.\n\n Examples\n --------\n >>> from pandas.core.dtypes.common import is_interval_dtype\n >>> is_interval_dtype(object)\n False\n >>> is_interval_dtype(pd.IntervalDtype())\n True\n >>> is_interval_dtype([1, 2, 3])\n False\n >>>\n >>> interval = pd.Interval(1, 2, closed="right")\n >>> is_interval_dtype(interval)\n False\n >>> is_interval_dtype(pd.IntervalIndex([interval]))\n True\n """\n # GH#52607\n warnings.warn(\n "is_interval_dtype is deprecated and will be removed in a future version. "\n "Use `isinstance(dtype, pd.IntervalDtype)` instead",\n DeprecationWarning,\n stacklevel=2,\n )\n if isinstance(arr_or_dtype, ExtensionDtype):\n # GH#33400 fastpath for dtype object\n return arr_or_dtype.type is Interval\n\n if arr_or_dtype is None:\n return False\n return IntervalDtype.is_dtype(arr_or_dtype)\n\n\ndef is_categorical_dtype(arr_or_dtype) -> bool:\n """\n Check whether an array-like or dtype is of the Categorical dtype.\n\n .. deprecated:: 2.2.0\n Use isinstance(dtype, pd.CategoricalDtype) instead.\n\n Parameters\n ----------\n arr_or_dtype : array-like or dtype\n The array-like or dtype to check.\n\n Returns\n -------\n boolean\n Whether or not the array-like or dtype is of the Categorical dtype.\n\n Examples\n --------\n >>> from pandas.api.types import is_categorical_dtype\n >>> from pandas import CategoricalDtype\n >>> is_categorical_dtype(object)\n False\n >>> is_categorical_dtype(CategoricalDtype())\n True\n >>> is_categorical_dtype([1, 2, 3])\n False\n >>> is_categorical_dtype(pd.Categorical([1, 2, 3]))\n True\n >>> is_categorical_dtype(pd.CategoricalIndex([1, 2, 3]))\n True\n """\n # GH#52527\n warnings.warn(\n "is_categorical_dtype is deprecated and will be removed in a future "\n "version. Use isinstance(dtype, pd.CategoricalDtype) instead",\n DeprecationWarning,\n stacklevel=2,\n )\n if isinstance(arr_or_dtype, ExtensionDtype):\n # GH#33400 fastpath for dtype object\n return arr_or_dtype.name == "category"\n\n if arr_or_dtype is None:\n return False\n return CategoricalDtype.is_dtype(arr_or_dtype)\n\n\ndef is_string_or_object_np_dtype(dtype: np.dtype) -> bool:\n """\n Faster alternative to is_string_dtype, assumes we have a np.dtype object.\n """\n return dtype == object or dtype.kind in "SU"\n\n\ndef is_string_dtype(arr_or_dtype) -> bool:\n """\n Check whether the provided array or dtype is of the string dtype.\n\n If an array is passed with an object dtype, the elements must be\n inferred as strings.\n\n Parameters\n ----------\n arr_or_dtype : array-like or dtype\n The array or dtype to check.\n\n Returns\n -------\n boolean\n Whether or not the array or dtype is of the string dtype.\n\n Examples\n --------\n >>> from pandas.api.types import is_string_dtype\n >>> is_string_dtype(str)\n True\n >>> is_string_dtype(object)\n True\n >>> is_string_dtype(int)\n False\n >>> is_string_dtype(np.array(['a', 'b']))\n True\n >>> is_string_dtype(pd.Series([1, 2]))\n False\n >>> is_string_dtype(pd.Series([1, 2], dtype=object))\n False\n """\n if hasattr(arr_or_dtype, "dtype") and _get_dtype(arr_or_dtype).kind == "O":\n return is_all_strings(arr_or_dtype)\n\n def condition(dtype) -> bool:\n if is_string_or_object_np_dtype(dtype):\n return True\n try:\n return dtype == "string"\n except TypeError:\n return False\n\n return _is_dtype(arr_or_dtype, condition)\n\n\ndef is_dtype_equal(source, target) -> bool:\n """\n Check if two dtypes are equal.\n\n Parameters\n ----------\n source : The first dtype to compare\n target : The second dtype to compare\n\n Returns\n -------\n boolean\n Whether or not the two dtypes are equal.\n\n Examples\n --------\n >>> is_dtype_equal(int, float)\n False\n >>> is_dtype_equal("int", int)\n True\n >>> is_dtype_equal(object, "category")\n False\n >>> is_dtype_equal(CategoricalDtype(), "category")\n True\n >>> is_dtype_equal(DatetimeTZDtype(tz="UTC"), "datetime64")\n False\n """\n if isinstance(target, str):\n if not isinstance(source, str):\n # GH#38516 ensure we get the same behavior from\n # is_dtype_equal(CDT, "category") and CDT == "category"\n try:\n src = _get_dtype(source)\n if isinstance(src, ExtensionDtype):\n return src == target\n except (TypeError, AttributeError, ImportError):\n return False\n elif isinstance(source, str):\n return is_dtype_equal(target, source)\n\n try:\n source = _get_dtype(source)\n target = _get_dtype(target)\n return source == target\n except (TypeError, AttributeError, ImportError):\n # invalid comparison\n # object == category will hit this\n return False\n\n\ndef is_integer_dtype(arr_or_dtype) -> bool:\n """\n Check whether the provided array or dtype is of an integer dtype.\n\n Unlike in `is_any_int_dtype`, timedelta64 instances will return False.\n\n The nullable Integer dtypes (e.g. pandas.Int64Dtype) are also considered\n as integer by this function.\n\n Parameters\n ----------\n arr_or_dtype : array-like or dtype\n The array or dtype to check.\n\n Returns\n -------\n boolean\n Whether or not the array or dtype is of an integer dtype and\n not an instance of timedelta64.\n\n Examples\n --------\n >>> from pandas.api.types import is_integer_dtype\n >>> is_integer_dtype(str)\n False\n >>> is_integer_dtype(int)\n True\n >>> is_integer_dtype(float)\n False\n >>> is_integer_dtype(np.uint64)\n True\n >>> is_integer_dtype('int8')\n True\n >>> is_integer_dtype('Int8')\n True\n >>> is_integer_dtype(pd.Int8Dtype)\n True\n >>> is_integer_dtype(np.datetime64)\n False\n >>> is_integer_dtype(np.timedelta64)\n False\n >>> is_integer_dtype(np.array(['a', 'b']))\n False\n >>> is_integer_dtype(pd.Series([1, 2]))\n True\n >>> is_integer_dtype(np.array([], dtype=np.timedelta64))\n False\n >>> is_integer_dtype(pd.Index([1, 2.])) # float\n False\n """\n return _is_dtype_type(\n arr_or_dtype, _classes_and_not_datetimelike(np.integer)\n ) or _is_dtype(\n arr_or_dtype, lambda typ: isinstance(typ, ExtensionDtype) and typ.kind in "iu"\n )\n\n\ndef is_signed_integer_dtype(arr_or_dtype) -> bool:\n """\n Check whether the provided array or dtype is of a signed integer dtype.\n\n Unlike in `is_any_int_dtype`, timedelta64 instances will return False.\n\n The nullable Integer dtypes (e.g. pandas.Int64Dtype) are also considered\n as integer by this function.\n\n Parameters\n ----------\n arr_or_dtype : array-like or dtype\n The array or dtype to check.\n\n Returns\n -------\n boolean\n Whether or not the array or dtype is of a signed integer dtype\n and not an instance of timedelta64.\n\n Examples\n --------\n >>> from pandas.core.dtypes.common import is_signed_integer_dtype\n >>> is_signed_integer_dtype(str)\n False\n >>> is_signed_integer_dtype(int)\n True\n >>> is_signed_integer_dtype(float)\n False\n >>> is_signed_integer_dtype(np.uint64) # unsigned\n False\n >>> is_signed_integer_dtype('int8')\n True\n >>> is_signed_integer_dtype('Int8')\n True\n >>> is_signed_integer_dtype(pd.Int8Dtype)\n True\n >>> is_signed_integer_dtype(np.datetime64)\n False\n >>> is_signed_integer_dtype(np.timedelta64)\n False\n >>> is_signed_integer_dtype(np.array(['a', 'b']))\n False\n >>> is_signed_integer_dtype(pd.Series([1, 2]))\n True\n >>> is_signed_integer_dtype(np.array([], dtype=np.timedelta64))\n False\n >>> is_signed_integer_dtype(pd.Index([1, 2.])) # float\n False\n >>> is_signed_integer_dtype(np.array([1, 2], dtype=np.uint32)) # unsigned\n False\n """\n return _is_dtype_type(\n arr_or_dtype, _classes_and_not_datetimelike(np.signedinteger)\n ) or _is_dtype(\n arr_or_dtype, lambda typ: isinstance(typ, ExtensionDtype) and typ.kind == "i"\n )\n\n\ndef is_unsigned_integer_dtype(arr_or_dtype) -> bool:\n """\n Check whether the provided array or dtype is of an unsigned integer dtype.\n\n The nullable Integer dtypes (e.g. pandas.UInt64Dtype) are also\n considered as integer by this function.\n\n Parameters\n ----------\n arr_or_dtype : array-like or dtype\n The array or dtype to check.\n\n Returns\n -------\n boolean\n Whether or not the array or dtype is of an unsigned integer dtype.\n\n Examples\n --------\n >>> from pandas.api.types import is_unsigned_integer_dtype\n >>> is_unsigned_integer_dtype(str)\n False\n >>> is_unsigned_integer_dtype(int) # signed\n False\n >>> is_unsigned_integer_dtype(float)\n False\n >>> is_unsigned_integer_dtype(np.uint64)\n True\n >>> is_unsigned_integer_dtype('uint8')\n True\n >>> is_unsigned_integer_dtype('UInt8')\n True\n >>> is_unsigned_integer_dtype(pd.UInt8Dtype)\n True\n >>> is_unsigned_integer_dtype(np.array(['a', 'b']))\n False\n >>> is_unsigned_integer_dtype(pd.Series([1, 2])) # signed\n False\n >>> is_unsigned_integer_dtype(pd.Index([1, 2.])) # float\n False\n >>> is_unsigned_integer_dtype(np.array([1, 2], dtype=np.uint32))\n True\n """\n return _is_dtype_type(\n arr_or_dtype, _classes_and_not_datetimelike(np.unsignedinteger)\n ) or _is_dtype(\n arr_or_dtype, lambda typ: isinstance(typ, ExtensionDtype) and typ.kind == "u"\n )\n\n\ndef is_int64_dtype(arr_or_dtype) -> bool:\n """\n Check whether the provided array or dtype is of the int64 dtype.\n\n .. deprecated:: 2.1.0\n\n is_int64_dtype is deprecated and will be removed in a future\n version. Use dtype == np.int64 instead.\n\n Parameters\n ----------\n arr_or_dtype : array-like or dtype\n The array or dtype to check.\n\n Returns\n -------\n boolean\n Whether or not the array or dtype is of the int64 dtype.\n\n Notes\n -----\n Depending on system architecture, the return value of `is_int64_dtype(\n int)` will be True if the OS uses 64-bit integers and False if the OS\n uses 32-bit integers.\n\n Examples\n --------\n >>> from pandas.api.types import is_int64_dtype\n >>> is_int64_dtype(str) # doctest: +SKIP\n False\n >>> is_int64_dtype(np.int32) # doctest: +SKIP\n False\n >>> is_int64_dtype(np.int64) # doctest: +SKIP\n True\n >>> is_int64_dtype('int8') # doctest: +SKIP\n False\n >>> is_int64_dtype('Int8') # doctest: +SKIP\n False\n >>> is_int64_dtype(pd.Int64Dtype) # doctest: +SKIP\n True\n >>> is_int64_dtype(float) # doctest: +SKIP\n False\n >>> is_int64_dtype(np.uint64) # unsigned # doctest: +SKIP\n False\n >>> is_int64_dtype(np.array(['a', 'b'])) # doctest: +SKIP\n False\n >>> is_int64_dtype(np.array([1, 2], dtype=np.int64)) # doctest: +SKIP\n True\n >>> is_int64_dtype(pd.Index([1, 2.])) # float # doctest: +SKIP\n False\n >>> is_int64_dtype(np.array([1, 2], dtype=np.uint32)) # unsigned # doctest: +SKIP\n False\n """\n # GH#52564\n warnings.warn(\n "is_int64_dtype is deprecated and will be removed in a future "\n "version. Use dtype == np.int64 instead.",\n DeprecationWarning,\n stacklevel=2,\n )\n return _is_dtype_type(arr_or_dtype, classes(np.int64))\n\n\ndef is_datetime64_any_dtype(arr_or_dtype) -> bool:\n """\n Check whether the provided array or dtype is of the datetime64 dtype.\n\n Parameters\n ----------\n arr_or_dtype : array-like or dtype\n The array or dtype to check.\n\n Returns\n -------\n bool\n Whether or not the array or dtype is of the datetime64 dtype.\n\n Examples\n --------\n >>> from pandas.api.types import is_datetime64_any_dtype\n >>> from pandas.core.dtypes.dtypes import DatetimeTZDtype\n >>> is_datetime64_any_dtype(str)\n False\n >>> is_datetime64_any_dtype(int)\n False\n >>> is_datetime64_any_dtype(np.datetime64) # can be tz-naive\n True\n >>> is_datetime64_any_dtype(DatetimeTZDtype("ns", "US/Eastern"))\n True\n >>> is_datetime64_any_dtype(np.array(['a', 'b']))\n False\n >>> is_datetime64_any_dtype(np.array([1, 2]))\n False\n >>> is_datetime64_any_dtype(np.array([], dtype="datetime64[ns]"))\n True\n >>> is_datetime64_any_dtype(pd.DatetimeIndex([1, 2, 3], dtype="datetime64[ns]"))\n True\n """\n if isinstance(arr_or_dtype, (np.dtype, ExtensionDtype)):\n # GH#33400 fastpath for dtype object\n return arr_or_dtype.kind == "M"\n\n if arr_or_dtype is None:\n return False\n\n try:\n tipo = _get_dtype(arr_or_dtype)\n except TypeError:\n return False\n return lib.is_np_dtype(tipo, "M") or isinstance(tipo, DatetimeTZDtype)\n\n\ndef is_datetime64_ns_dtype(arr_or_dtype) -> bool:\n """\n Check whether the provided array or dtype is of the datetime64[ns] dtype.\n\n Parameters\n ----------\n arr_or_dtype : array-like or dtype\n The array or dtype to check.\n\n Returns\n -------\n bool\n Whether or not the array or dtype is of the datetime64[ns] dtype.\n\n Examples\n --------\n >>> from pandas.api.types import is_datetime64_ns_dtype\n >>> from pandas.core.dtypes.dtypes import DatetimeTZDtype\n >>> is_datetime64_ns_dtype(str)\n False\n >>> is_datetime64_ns_dtype(int)\n False\n >>> is_datetime64_ns_dtype(np.datetime64) # no unit\n False\n >>> is_datetime64_ns_dtype(DatetimeTZDtype("ns", "US/Eastern"))\n True\n >>> is_datetime64_ns_dtype(np.array(['a', 'b']))\n False\n >>> is_datetime64_ns_dtype(np.array([1, 2]))\n False\n >>> is_datetime64_ns_dtype(np.array([], dtype="datetime64")) # no unit\n False\n >>> is_datetime64_ns_dtype(np.array([], dtype="datetime64[ps]")) # wrong unit\n False\n >>> is_datetime64_ns_dtype(pd.DatetimeIndex([1, 2, 3], dtype="datetime64[ns]"))\n True\n """\n if arr_or_dtype is None:\n return False\n try:\n tipo = _get_dtype(arr_or_dtype)\n except TypeError:\n return False\n return tipo == DT64NS_DTYPE or (\n isinstance(tipo, DatetimeTZDtype) and tipo.unit == "ns"\n )\n\n\ndef is_timedelta64_ns_dtype(arr_or_dtype) -> bool:\n """\n Check whether the provided array or dtype is of the timedelta64[ns] dtype.\n\n This is a very specific dtype, so generic ones like `np.timedelta64`\n will return False if passed into this function.\n\n Parameters\n ----------\n arr_or_dtype : array-like or dtype\n The array or dtype to check.\n\n Returns\n -------\n boolean\n Whether or not the array or dtype is of the timedelta64[ns] dtype.\n\n Examples\n --------\n >>> from pandas.core.dtypes.common import is_timedelta64_ns_dtype\n >>> is_timedelta64_ns_dtype(np.dtype('m8[ns]'))\n True\n >>> is_timedelta64_ns_dtype(np.dtype('m8[ps]')) # Wrong frequency\n False\n >>> is_timedelta64_ns_dtype(np.array([1, 2], dtype='m8[ns]'))\n True\n >>> is_timedelta64_ns_dtype(np.array([1, 2], dtype=np.timedelta64))\n False\n """\n return _is_dtype(arr_or_dtype, lambda dtype: dtype == TD64NS_DTYPE)\n\n\n# This exists to silence numpy deprecation warnings, see GH#29553\ndef is_numeric_v_string_like(a: ArrayLike, b) -> bool:\n """\n Check if we are comparing a string-like object to a numeric ndarray.\n NumPy doesn't like to compare such objects, especially numeric arrays\n and scalar string-likes.\n\n Parameters\n ----------\n a : array-like, scalar\n The first object to check.\n b : array-like, scalar\n The second object to check.\n\n Returns\n -------\n boolean\n Whether we return a comparing a string-like object to a numeric array.\n\n Examples\n --------\n >>> is_numeric_v_string_like(np.array([1]), "foo")\n True\n >>> is_numeric_v_string_like(np.array([1, 2]), np.array(["foo"]))\n True\n >>> is_numeric_v_string_like(np.array(["foo"]), np.array([1, 2]))\n True\n >>> is_numeric_v_string_like(np.array([1]), np.array([2]))\n False\n >>> is_numeric_v_string_like(np.array(["foo"]), np.array(["foo"]))\n False\n """\n is_a_array = isinstance(a, np.ndarray)\n is_b_array = isinstance(b, np.ndarray)\n\n is_a_numeric_array = is_a_array and a.dtype.kind in ("u", "i", "f", "c", "b")\n is_b_numeric_array = is_b_array and b.dtype.kind in ("u", "i", "f", "c", "b")\n is_a_string_array = is_a_array and a.dtype.kind in ("S", "U")\n is_b_string_array = is_b_array and b.dtype.kind in ("S", "U")\n\n is_b_scalar_string_like = not is_b_array and isinstance(b, str)\n\n return (\n (is_a_numeric_array and is_b_scalar_string_like)\n or (is_a_numeric_array and is_b_string_array)\n or (is_b_numeric_array and is_a_string_array)\n )\n\n\ndef needs_i8_conversion(dtype: DtypeObj | None) -> bool:\n """\n Check whether the dtype should be converted to int64.\n\n Dtype "needs" such a conversion if the dtype is of a datetime-like dtype\n\n Parameters\n ----------\n dtype : np.dtype, ExtensionDtype, or None\n\n Returns\n -------\n boolean\n Whether or not the dtype should be converted to int64.\n\n Examples\n --------\n >>> needs_i8_conversion(str)\n False\n >>> needs_i8_conversion(np.int64)\n False\n >>> needs_i8_conversion(np.datetime64)\n False\n >>> needs_i8_conversion(np.dtype(np.datetime64))\n True\n >>> needs_i8_conversion(np.array(['a', 'b']))\n False\n >>> needs_i8_conversion(pd.Series([1, 2]))\n False\n >>> needs_i8_conversion(pd.Series([], dtype="timedelta64[ns]"))\n False\n >>> needs_i8_conversion(pd.DatetimeIndex([1, 2, 3], tz="US/Eastern"))\n False\n >>> needs_i8_conversion(pd.DatetimeIndex([1, 2, 3], tz="US/Eastern").dtype)\n True\n """\n if isinstance(dtype, np.dtype):\n return dtype.kind in "mM"\n return isinstance(dtype, (PeriodDtype, DatetimeTZDtype))\n\n\ndef is_numeric_dtype(arr_or_dtype) -> bool:\n """\n Check whether the provided array or dtype is of a numeric dtype.\n\n Parameters\n ----------\n arr_or_dtype : array-like or dtype\n The array or dtype to check.\n\n Returns\n -------\n boolean\n Whether or not the array or dtype is of a numeric dtype.\n\n Examples\n --------\n >>> from pandas.api.types import is_numeric_dtype\n >>> is_numeric_dtype(str)\n False\n >>> is_numeric_dtype(int)\n True\n >>> is_numeric_dtype(float)\n True\n >>> is_numeric_dtype(np.uint64)\n True\n >>> is_numeric_dtype(np.datetime64)\n False\n >>> is_numeric_dtype(np.timedelta64)\n False\n >>> is_numeric_dtype(np.array(['a', 'b']))\n False\n >>> is_numeric_dtype(pd.Series([1, 2]))\n True\n >>> is_numeric_dtype(pd.Index([1, 2.]))\n True\n >>> is_numeric_dtype(np.array([], dtype=np.timedelta64))\n False\n """\n return _is_dtype_type(\n arr_or_dtype, _classes_and_not_datetimelike(np.number, np.bool_)\n ) or _is_dtype(\n arr_or_dtype, lambda typ: isinstance(typ, ExtensionDtype) and typ._is_numeric\n )\n\n\ndef is_any_real_numeric_dtype(arr_or_dtype) -> bool:\n """\n Check whether the provided array or dtype is of a real number dtype.\n\n Parameters\n ----------\n arr_or_dtype : array-like or dtype\n The array or dtype to check.\n\n Returns\n -------\n boolean\n Whether or not the array or dtype is of a real number dtype.\n\n Examples\n --------\n >>> from pandas.api.types import is_any_real_numeric_dtype\n >>> is_any_real_numeric_dtype(int)\n True\n >>> is_any_real_numeric_dtype(float)\n True\n >>> is_any_real_numeric_dtype(object)\n False\n >>> is_any_real_numeric_dtype(str)\n False\n >>> is_any_real_numeric_dtype(complex(1, 2))\n False\n >>> is_any_real_numeric_dtype(bool)\n False\n """\n return (\n is_numeric_dtype(arr_or_dtype)\n and not is_complex_dtype(arr_or_dtype)\n and not is_bool_dtype(arr_or_dtype)\n )\n\n\ndef is_float_dtype(arr_or_dtype) -> bool:\n """\n Check whether the provided array or dtype is of a float dtype.\n\n Parameters\n ----------\n arr_or_dtype : array-like or dtype\n The array or dtype to check.\n\n Returns\n -------\n boolean\n Whether or not the array or dtype is of a float dtype.\n\n Examples\n --------\n >>> from pandas.api.types import is_float_dtype\n >>> is_float_dtype(str)\n False\n >>> is_float_dtype(int)\n False\n >>> is_float_dtype(float)\n True\n >>> is_float_dtype(np.array(['a', 'b']))\n False\n >>> is_float_dtype(pd.Series([1, 2]))\n False\n >>> is_float_dtype(pd.Index([1, 2.]))\n True\n """\n return _is_dtype_type(arr_or_dtype, classes(np.floating)) or _is_dtype(\n arr_or_dtype, lambda typ: isinstance(typ, ExtensionDtype) and typ.kind in "f"\n )\n\n\ndef is_bool_dtype(arr_or_dtype) -> bool:\n """\n Check whether the provided array or dtype is of a boolean dtype.\n\n Parameters\n ----------\n arr_or_dtype : array-like or dtype\n The array or dtype to check.\n\n Returns\n -------\n boolean\n Whether or not the array or dtype is of a boolean dtype.\n\n Notes\n -----\n An ExtensionArray is considered boolean when the ``_is_boolean``\n attribute is set to True.\n\n Examples\n --------\n >>> from pandas.api.types import is_bool_dtype\n >>> is_bool_dtype(str)\n False\n >>> is_bool_dtype(int)\n False\n >>> is_bool_dtype(bool)\n True\n >>> is_bool_dtype(np.bool_)\n True\n >>> is_bool_dtype(np.array(['a', 'b']))\n False\n >>> is_bool_dtype(pd.Series([1, 2]))\n False\n >>> is_bool_dtype(np.array([True, False]))\n True\n >>> is_bool_dtype(pd.Categorical([True, False]))\n True\n >>> is_bool_dtype(pd.arrays.SparseArray([True, False]))\n True\n """\n if arr_or_dtype is None:\n return False\n try:\n dtype = _get_dtype(arr_or_dtype)\n except (TypeError, ValueError):\n return False\n\n if isinstance(dtype, CategoricalDtype):\n arr_or_dtype = dtype.categories\n # now we use the special definition for Index\n\n if isinstance(arr_or_dtype, ABCIndex):\n # Allow Index[object] that is all-bools or Index["boolean"]\n if arr_or_dtype.inferred_type == "boolean":\n if not is_bool_dtype(arr_or_dtype.dtype):\n # GH#52680\n warnings.warn(\n "The behavior of is_bool_dtype with an object-dtype Index "\n "of bool objects is deprecated. In a future version, "\n "this will return False. Cast the Index to a bool dtype instead.",\n DeprecationWarning,\n stacklevel=2,\n )\n return True\n return False\n elif isinstance(dtype, ExtensionDtype):\n return getattr(dtype, "_is_boolean", False)\n\n return issubclass(dtype.type, np.bool_)\n\n\ndef is_1d_only_ea_dtype(dtype: DtypeObj | None) -> bool:\n """\n Analogue to is_extension_array_dtype but excluding DatetimeTZDtype.\n """\n return isinstance(dtype, ExtensionDtype) and not dtype._supports_2d\n\n\ndef is_extension_array_dtype(arr_or_dtype) -> bool:\n """\n Check if an object is a pandas extension array type.\n\n See the :ref:`Use Guide <extending.extension-types>` for more.\n\n Parameters\n ----------\n arr_or_dtype : object\n For array-like input, the ``.dtype`` attribute will\n be extracted.\n\n Returns\n -------\n bool\n Whether the `arr_or_dtype` is an extension array type.\n\n Notes\n -----\n This checks whether an object implements the pandas extension\n array interface. In pandas, this includes:\n\n * Categorical\n * Sparse\n * Interval\n * Period\n * DatetimeArray\n * TimedeltaArray\n\n Third-party libraries may implement arrays or types satisfying\n this interface as well.\n\n Examples\n --------\n >>> from pandas.api.types import is_extension_array_dtype\n >>> arr = pd.Categorical(['a', 'b'])\n >>> is_extension_array_dtype(arr)\n True\n >>> is_extension_array_dtype(arr.dtype)\n True\n\n >>> arr = np.array(['a', 'b'])\n >>> is_extension_array_dtype(arr.dtype)\n False\n """\n dtype = getattr(arr_or_dtype, "dtype", arr_or_dtype)\n if isinstance(dtype, ExtensionDtype):\n return True\n elif isinstance(dtype, np.dtype):\n return False\n else:\n try:\n with warnings.catch_warnings():\n # pandas_dtype(..) can raise UserWarning for class input\n warnings.simplefilter("ignore", UserWarning)\n dtype = pandas_dtype(dtype)\n except (TypeError, ValueError):\n # np.dtype(..) can raise ValueError\n return False\n return isinstance(dtype, ExtensionDtype)\n\n\ndef is_ea_or_datetimelike_dtype(dtype: DtypeObj | None) -> bool:\n """\n Check for ExtensionDtype, datetime64 dtype, or timedelta64 dtype.\n\n Notes\n -----\n Checks only for dtype objects, not dtype-castable strings or types.\n """\n return isinstance(dtype, ExtensionDtype) or (lib.is_np_dtype(dtype, "mM"))\n\n\ndef is_complex_dtype(arr_or_dtype) -> bool:\n """\n Check whether the provided array or dtype is of a complex dtype.\n\n Parameters\n ----------\n arr_or_dtype : array-like or dtype\n The array or dtype to check.\n\n Returns\n -------\n boolean\n Whether or not the array or dtype is of a complex dtype.\n\n Examples\n --------\n >>> from pandas.api.types import is_complex_dtype\n >>> is_complex_dtype(str)\n False\n >>> is_complex_dtype(int)\n False\n >>> is_complex_dtype(np.complex128)\n True\n >>> is_complex_dtype(np.array(['a', 'b']))\n False\n >>> is_complex_dtype(pd.Series([1, 2]))\n False\n >>> is_complex_dtype(np.array([1 + 1j, 5]))\n True\n """\n return _is_dtype_type(arr_or_dtype, classes(np.complexfloating))\n\n\ndef _is_dtype(arr_or_dtype, condition) -> bool:\n """\n Return true if the condition is satisfied for the arr_or_dtype.\n\n Parameters\n ----------\n arr_or_dtype : array-like, str, np.dtype, or ExtensionArrayType\n The array-like or dtype object whose dtype we want to extract.\n condition : callable[Union[np.dtype, ExtensionDtype]]\n\n Returns\n -------\n bool\n\n """\n if arr_or_dtype is None:\n return False\n try:\n dtype = _get_dtype(arr_or_dtype)\n except (TypeError, ValueError):\n return False\n return condition(dtype)\n\n\ndef _get_dtype(arr_or_dtype) -> DtypeObj:\n """\n Get the dtype instance associated with an array\n or dtype object.\n\n Parameters\n ----------\n arr_or_dtype : array-like or dtype\n The array-like or dtype object whose dtype we want to extract.\n\n Returns\n -------\n obj_dtype : The extract dtype instance from the\n passed in array or dtype object.\n\n Raises\n ------\n TypeError : The passed in object is None.\n """\n if arr_or_dtype is None:\n raise TypeError("Cannot deduce dtype from null object")\n\n # fastpath\n if isinstance(arr_or_dtype, np.dtype):\n return arr_or_dtype\n elif isinstance(arr_or_dtype, type):\n return np.dtype(arr_or_dtype)\n\n # if we have an array-like\n elif hasattr(arr_or_dtype, "dtype"):\n arr_or_dtype = arr_or_dtype.dtype\n\n return pandas_dtype(arr_or_dtype)\n\n\ndef _is_dtype_type(arr_or_dtype, condition) -> bool:\n """\n Return true if the condition is satisfied for the arr_or_dtype.\n\n Parameters\n ----------\n arr_or_dtype : array-like or dtype\n The array-like or dtype object whose dtype we want to extract.\n condition : callable[Union[np.dtype, ExtensionDtypeType]]\n\n Returns\n -------\n bool : if the condition is satisfied for the arr_or_dtype\n """\n if arr_or_dtype is None:\n return condition(type(None))\n\n # fastpath\n if isinstance(arr_or_dtype, np.dtype):\n return condition(arr_or_dtype.type)\n elif isinstance(arr_or_dtype, type):\n if issubclass(arr_or_dtype, ExtensionDtype):\n arr_or_dtype = arr_or_dtype.type\n return condition(np.dtype(arr_or_dtype).type)\n\n # if we have an array-like\n if hasattr(arr_or_dtype, "dtype"):\n arr_or_dtype = arr_or_dtype.dtype\n\n # we are not possibly a dtype\n elif is_list_like(arr_or_dtype):\n return condition(type(None))\n\n try:\n tipo = pandas_dtype(arr_or_dtype).type\n except (TypeError, ValueError):\n if is_scalar(arr_or_dtype):\n return condition(type(None))\n\n return False\n\n return condition(tipo)\n\n\ndef infer_dtype_from_object(dtype) -> type:\n """\n Get a numpy dtype.type-style object for a dtype object.\n\n This methods also includes handling of the datetime64[ns] and\n datetime64[ns, TZ] objects.\n\n If no dtype can be found, we return ``object``.\n\n Parameters\n ----------\n dtype : dtype, type\n The dtype object whose numpy dtype.type-style\n object we want to extract.\n\n Returns\n -------\n type\n """\n if isinstance(dtype, type) and issubclass(dtype, np.generic):\n # Type object from a dtype\n\n return dtype\n elif isinstance(dtype, (np.dtype, ExtensionDtype)):\n # dtype object\n try:\n _validate_date_like_dtype(dtype)\n except TypeError:\n # Should still pass if we don't have a date-like\n pass\n if hasattr(dtype, "numpy_dtype"):\n # TODO: Implement this properly\n # https://github.com/pandas-dev/pandas/issues/52576\n return dtype.numpy_dtype.type\n return dtype.type\n\n try:\n dtype = pandas_dtype(dtype)\n except TypeError:\n pass\n\n if isinstance(dtype, ExtensionDtype):\n return dtype.type\n elif isinstance(dtype, str):\n # TODO(jreback)\n # should deprecate these\n if dtype in ["datetimetz", "datetime64tz"]:\n return DatetimeTZDtype.type\n elif dtype in ["period"]:\n raise NotImplementedError\n\n if dtype in ["datetime", "timedelta"]:\n dtype += "64"\n try:\n return infer_dtype_from_object(getattr(np, dtype))\n except (AttributeError, TypeError):\n # Handles cases like _get_dtype(int) i.e.,\n # Python objects that are valid dtypes\n # (unlike user-defined types, in general)\n #\n # TypeError handles the float16 type code of 'e'\n # further handle internal types\n pass\n\n return infer_dtype_from_object(np.dtype(dtype))\n\n\ndef _validate_date_like_dtype(dtype) -> None:\n """\n Check whether the dtype is a date-like dtype. Raises an error if invalid.\n\n Parameters\n ----------\n dtype : dtype, type\n The dtype to check.\n\n Raises\n ------\n TypeError : The dtype could not be casted to a date-like dtype.\n ValueError : The dtype is an illegal date-like dtype (e.g. the\n frequency provided is too specific)\n """\n try:\n typ = np.datetime_data(dtype)[0]\n except ValueError as e:\n raise TypeError(e) from e\n if typ not in ["generic", "ns"]:\n raise ValueError(\n f"{repr(dtype.name)} is too specific of a frequency, "\n f"try passing {repr(dtype.type.__name__)}"\n )\n\n\ndef validate_all_hashable(*args, error_name: str | None = None) -> None:\n """\n Return None if all args are hashable, else raise a TypeError.\n\n Parameters\n ----------\n *args\n Arguments to validate.\n error_name : str, optional\n The name to use if error\n\n Raises\n ------\n TypeError : If an argument is not hashable\n\n Returns\n -------\n None\n """\n if not all(is_hashable(arg) for arg in args):\n if error_name:\n raise TypeError(f"{error_name} must be a hashable type")\n raise TypeError("All elements must be hashable")\n\n\ndef pandas_dtype(dtype) -> DtypeObj:\n """\n Convert input into a pandas only dtype object or a numpy dtype object.\n\n Parameters\n ----------\n dtype : object to be converted\n\n Returns\n -------\n np.dtype or a pandas dtype\n\n Raises\n ------\n TypeError if not a dtype\n\n Examples\n --------\n >>> pd.api.types.pandas_dtype(int)\n dtype('int64')\n """\n # short-circuit\n if isinstance(dtype, np.ndarray):\n return dtype.dtype\n elif isinstance(dtype, (np.dtype, ExtensionDtype)):\n return dtype\n\n # builtin aliases\n if dtype is str and using_string_dtype():\n from pandas.core.arrays.string_ import StringDtype\n\n return StringDtype(na_value=np.nan)\n\n # registered extension types\n result = registry.find(dtype)\n if result is not None:\n if isinstance(result, type):\n # GH 31356, GH 54592\n warnings.warn(\n f"Instantiating {result.__name__} without any arguments."\n f"Pass a {result.__name__} instance to silence this warning.",\n UserWarning,\n stacklevel=find_stack_level(),\n )\n result = result()\n return result\n\n # try a numpy dtype\n # raise a consistent TypeError if failed\n try:\n with warnings.catch_warnings():\n # TODO: warnings.catch_warnings can be removed when numpy>2.3.0\n # is the minimum version\n # GH#51523 - Series.astype(np.integer) doesn't show\n # numpy deprecation warning of np.integer\n # Hence enabling DeprecationWarning\n warnings.simplefilter("always", DeprecationWarning)\n npdtype = np.dtype(dtype)\n except SyntaxError as err:\n # np.dtype uses `eval` which can raise SyntaxError\n raise TypeError(f"data type '{dtype}' not understood") from err\n\n # Any invalid dtype (such as pd.Timestamp) should raise an error.\n # np.dtype(invalid_type).kind = 0 for such objects. However, this will\n # also catch some valid dtypes such as object, np.object_ and 'object'\n # which we safeguard against by catching them earlier and returning\n # np.dtype(valid_dtype) before this condition is evaluated.\n if is_hashable(dtype) and dtype in [\n object,\n np.object_,\n "object",\n "O",\n "object_",\n ]:\n # check hashability to avoid errors/DeprecationWarning when we get\n # here and `dtype` is an array\n return npdtype\n elif npdtype.kind == "O":\n raise TypeError(f"dtype '{dtype}' not understood")\n\n return npdtype\n\n\ndef is_all_strings(value: ArrayLike) -> bool:\n """\n Check if this is an array of strings that we should try parsing.\n\n Includes object-dtype ndarray containing all-strings, StringArray,\n and Categorical with all-string categories.\n Does not include numpy string dtypes.\n """\n dtype = value.dtype\n\n if isinstance(dtype, np.dtype):\n if len(value) == 0:\n return dtype == np.dtype("object")\n else:\n return dtype == np.dtype("object") and lib.is_string_array(\n np.asarray(value), skipna=False\n )\n elif isinstance(dtype, CategoricalDtype):\n return dtype.categories.inferred_type == "string"\n return dtype == "string"\n\n\n__all__ = [\n "classes",\n "DT64NS_DTYPE",\n "ensure_float64",\n "ensure_python_int",\n "ensure_str",\n "infer_dtype_from_object",\n "INT64_DTYPE",\n "is_1d_only_ea_dtype",\n "is_all_strings",\n "is_any_real_numeric_dtype",\n "is_array_like",\n "is_bool",\n "is_bool_dtype",\n "is_categorical_dtype",\n "is_complex",\n "is_complex_dtype",\n "is_dataclass",\n "is_datetime64_any_dtype",\n "is_datetime64_dtype",\n "is_datetime64_ns_dtype",\n "is_datetime64tz_dtype",\n "is_decimal",\n "is_dict_like",\n "is_dtype_equal",\n "is_ea_or_datetimelike_dtype",\n "is_extension_array_dtype",\n "is_file_like",\n "is_float_dtype",\n "is_int64_dtype",\n "is_integer_dtype",\n "is_interval",\n "is_interval_dtype",\n "is_iterator",\n "is_named_tuple",\n "is_nested_list_like",\n "is_number",\n "is_numeric_dtype",\n "is_object_dtype",\n "is_period_dtype",\n "is_re",\n "is_re_compilable",\n "is_scipy_sparse",\n "is_sequence",\n "is_signed_integer_dtype",\n "is_sparse",\n "is_string_dtype",\n "is_string_or_object_np_dtype",\n "is_timedelta64_dtype",\n "is_timedelta64_ns_dtype",\n "is_unsigned_integer_dtype",\n "needs_i8_conversion",\n "pandas_dtype",\n "TD64NS_DTYPE",\n "validate_all_hashable",\n]\n
.venv\Lib\site-packages\pandas\core\dtypes\common.py
common.py
Python
48,001
0.95
0.09513
0.044726
awesome-app
697
2024-02-20T23:59:08.932143
MIT
false
2d7cd4fb1b32d9b52f57cb7f4f53da67
"""\nUtility functions related to concat.\n"""\nfrom __future__ import annotations\n\nfrom typing import (\n TYPE_CHECKING,\n cast,\n)\nimport warnings\n\nimport numpy as np\n\nfrom pandas._libs import lib\nfrom pandas.util._exceptions import find_stack_level\n\nfrom pandas.core.dtypes.astype import astype_array\nfrom pandas.core.dtypes.cast import (\n common_dtype_categorical_compat,\n find_common_type,\n np_find_common_type,\n)\nfrom pandas.core.dtypes.dtypes import CategoricalDtype\nfrom pandas.core.dtypes.generic import (\n ABCCategoricalIndex,\n ABCSeries,\n)\n\nif TYPE_CHECKING:\n from collections.abc import Sequence\n\n from pandas._typing import (\n ArrayLike,\n AxisInt,\n DtypeObj,\n )\n\n from pandas.core.arrays import (\n Categorical,\n ExtensionArray,\n )\n\n\ndef _is_nonempty(x, axis) -> bool:\n # filter empty arrays\n # 1-d dtypes always are included here\n if x.ndim <= axis:\n return True\n return x.shape[axis] > 0\n\n\ndef concat_compat(\n to_concat: Sequence[ArrayLike], axis: AxisInt = 0, ea_compat_axis: bool = False\n) -> ArrayLike:\n """\n provide concatenation of an array of arrays each of which is a single\n 'normalized' dtypes (in that for example, if it's object, then it is a\n non-datetimelike and provide a combined dtype for the resulting array that\n preserves the overall dtype if possible)\n\n Parameters\n ----------\n to_concat : sequence of arrays\n axis : axis to provide concatenation\n ea_compat_axis : bool, default False\n For ExtensionArray compat, behave as if axis == 1 when determining\n whether to drop empty arrays.\n\n Returns\n -------\n a single array, preserving the combined dtypes\n """\n if len(to_concat) and lib.dtypes_all_equal([obj.dtype for obj in to_concat]):\n # fastpath!\n obj = to_concat[0]\n if isinstance(obj, np.ndarray):\n to_concat_arrs = cast("Sequence[np.ndarray]", to_concat)\n return np.concatenate(to_concat_arrs, axis=axis)\n\n to_concat_eas = cast("Sequence[ExtensionArray]", to_concat)\n if ea_compat_axis:\n # We have 1D objects, that don't support axis keyword\n return obj._concat_same_type(to_concat_eas)\n elif axis == 0:\n return obj._concat_same_type(to_concat_eas)\n else:\n # e.g. DatetimeArray\n # NB: We are assuming here that ensure_wrapped_if_arraylike has\n # been called where relevant.\n return obj._concat_same_type(\n # error: Unexpected keyword argument "axis" for "_concat_same_type"\n # of "ExtensionArray"\n to_concat_eas,\n axis=axis, # type: ignore[call-arg]\n )\n\n # If all arrays are empty, there's nothing to convert, just short-cut to\n # the concatenation, #3121.\n #\n # Creating an empty array directly is tempting, but the winnings would be\n # marginal given that it would still require shape & dtype calculation and\n # np.concatenate which has them both implemented is compiled.\n orig = to_concat\n non_empties = [x for x in to_concat if _is_nonempty(x, axis)]\n if non_empties and axis == 0 and not ea_compat_axis:\n # ea_compat_axis see GH#39574\n to_concat = non_empties\n\n any_ea, kinds, target_dtype = _get_result_dtype(to_concat, non_empties)\n\n if len(to_concat) < len(orig):\n _, _, alt_dtype = _get_result_dtype(orig, non_empties)\n if alt_dtype != target_dtype:\n # GH#39122\n warnings.warn(\n "The behavior of array concatenation with empty entries is "\n "deprecated. In a future version, this will no longer exclude "\n "empty items when determining the result dtype. "\n "To retain the old behavior, exclude the empty entries before "\n "the concat operation.",\n FutureWarning,\n stacklevel=find_stack_level(),\n )\n\n if target_dtype is not None:\n to_concat = [astype_array(arr, target_dtype, copy=False) for arr in to_concat]\n\n if not isinstance(to_concat[0], np.ndarray):\n # i.e. isinstance(to_concat[0], ExtensionArray)\n to_concat_eas = cast("Sequence[ExtensionArray]", to_concat)\n cls = type(to_concat[0])\n # GH#53640: eg. for datetime array, axis=1 but 0 is default\n # However, class method `_concat_same_type()` for some classes\n # may not support the `axis` keyword\n if ea_compat_axis or axis == 0:\n return cls._concat_same_type(to_concat_eas)\n else:\n return cls._concat_same_type(\n to_concat_eas,\n axis=axis, # type: ignore[call-arg]\n )\n else:\n to_concat_arrs = cast("Sequence[np.ndarray]", to_concat)\n result = np.concatenate(to_concat_arrs, axis=axis)\n\n if not any_ea and "b" in kinds and result.dtype.kind in "iuf":\n # GH#39817 cast to object instead of casting bools to numeric\n result = result.astype(object, copy=False)\n return result\n\n\ndef _get_result_dtype(\n to_concat: Sequence[ArrayLike], non_empties: Sequence[ArrayLike]\n) -> tuple[bool, set[str], DtypeObj | None]:\n target_dtype = None\n\n dtypes = {obj.dtype for obj in to_concat}\n kinds = {obj.dtype.kind for obj in to_concat}\n\n any_ea = any(not isinstance(x, np.ndarray) for x in to_concat)\n if any_ea:\n # i.e. any ExtensionArrays\n\n # we ignore axis here, as internally concatting with EAs is always\n # for axis=0\n if len(dtypes) != 1:\n target_dtype = find_common_type([x.dtype for x in to_concat])\n target_dtype = common_dtype_categorical_compat(to_concat, target_dtype)\n\n elif not len(non_empties):\n # we have all empties, but may need to coerce the result dtype to\n # object if we have non-numeric type operands (numpy would otherwise\n # cast this to float)\n if len(kinds) != 1:\n if not len(kinds - {"i", "u", "f"}) or not len(kinds - {"b", "i", "u"}):\n # let numpy coerce\n pass\n else:\n # coerce to object\n target_dtype = np.dtype(object)\n kinds = {"o"}\n else:\n # error: Argument 1 to "np_find_common_type" has incompatible type\n # "*Set[Union[ExtensionDtype, Any]]"; expected "dtype[Any]"\n target_dtype = np_find_common_type(*dtypes) # type: ignore[arg-type]\n\n return any_ea, kinds, target_dtype\n\n\ndef union_categoricals(\n to_union, sort_categories: bool = False, ignore_order: bool = False\n) -> Categorical:\n """\n Combine list-like of Categorical-like, unioning categories.\n\n All categories must have the same dtype.\n\n Parameters\n ----------\n to_union : list-like\n Categorical, CategoricalIndex, or Series with dtype='category'.\n sort_categories : bool, default False\n If true, resulting categories will be lexsorted, otherwise\n they will be ordered as they appear in the data.\n ignore_order : bool, default False\n If true, the ordered attribute of the Categoricals will be ignored.\n Results in an unordered categorical.\n\n Returns\n -------\n Categorical\n\n Raises\n ------\n TypeError\n - all inputs do not have the same dtype\n - all inputs do not have the same ordered property\n - all inputs are ordered and their categories are not identical\n - sort_categories=True and Categoricals are ordered\n ValueError\n Empty list of categoricals passed\n\n Notes\n -----\n To learn more about categories, see `link\n <https://pandas.pydata.org/pandas-docs/stable/user_guide/categorical.html#unioning>`__\n\n Examples\n --------\n If you want to combine categoricals that do not necessarily have\n the same categories, `union_categoricals` will combine a list-like\n of categoricals. The new categories will be the union of the\n categories being combined.\n\n >>> a = pd.Categorical(["b", "c"])\n >>> b = pd.Categorical(["a", "b"])\n >>> pd.api.types.union_categoricals([a, b])\n ['b', 'c', 'a', 'b']\n Categories (3, object): ['b', 'c', 'a']\n\n By default, the resulting categories will be ordered as they appear\n in the `categories` of the data. If you want the categories to be\n lexsorted, use `sort_categories=True` argument.\n\n >>> pd.api.types.union_categoricals([a, b], sort_categories=True)\n ['b', 'c', 'a', 'b']\n Categories (3, object): ['a', 'b', 'c']\n\n `union_categoricals` also works with the case of combining two\n categoricals of the same categories and order information (e.g. what\n you could also `append` for).\n\n >>> a = pd.Categorical(["a", "b"], ordered=True)\n >>> b = pd.Categorical(["a", "b", "a"], ordered=True)\n >>> pd.api.types.union_categoricals([a, b])\n ['a', 'b', 'a', 'b', 'a']\n Categories (2, object): ['a' < 'b']\n\n Raises `TypeError` because the categories are ordered and not identical.\n\n >>> a = pd.Categorical(["a", "b"], ordered=True)\n >>> b = pd.Categorical(["a", "b", "c"], ordered=True)\n >>> pd.api.types.union_categoricals([a, b])\n Traceback (most recent call last):\n ...\n TypeError: to union ordered Categoricals, all categories must be the same\n\n Ordered categoricals with different categories or orderings can be\n combined by using the `ignore_ordered=True` argument.\n\n >>> a = pd.Categorical(["a", "b", "c"], ordered=True)\n >>> b = pd.Categorical(["c", "b", "a"], ordered=True)\n >>> pd.api.types.union_categoricals([a, b], ignore_order=True)\n ['a', 'b', 'c', 'c', 'b', 'a']\n Categories (3, object): ['a', 'b', 'c']\n\n `union_categoricals` also works with a `CategoricalIndex`, or `Series`\n containing categorical data, but note that the resulting array will\n always be a plain `Categorical`\n\n >>> a = pd.Series(["b", "c"], dtype='category')\n >>> b = pd.Series(["a", "b"], dtype='category')\n >>> pd.api.types.union_categoricals([a, b])\n ['b', 'c', 'a', 'b']\n Categories (3, object): ['b', 'c', 'a']\n """\n from pandas import Categorical\n from pandas.core.arrays.categorical import recode_for_categories\n\n if len(to_union) == 0:\n raise ValueError("No Categoricals to union")\n\n def _maybe_unwrap(x):\n if isinstance(x, (ABCCategoricalIndex, ABCSeries)):\n return x._values\n elif isinstance(x, Categorical):\n return x\n else:\n raise TypeError("all components to combine must be Categorical")\n\n to_union = [_maybe_unwrap(x) for x in to_union]\n first = to_union[0]\n\n if not lib.dtypes_all_equal([obj.categories.dtype for obj in to_union]):\n raise TypeError("dtype of categories must be the same")\n\n ordered = False\n if all(first._categories_match_up_to_permutation(other) for other in to_union[1:]):\n # identical categories - fastpath\n categories = first.categories\n ordered = first.ordered\n\n all_codes = [first._encode_with_my_categories(x)._codes for x in to_union]\n new_codes = np.concatenate(all_codes)\n\n if sort_categories and not ignore_order and ordered:\n raise TypeError("Cannot use sort_categories=True with ordered Categoricals")\n\n if sort_categories and not categories.is_monotonic_increasing:\n categories = categories.sort_values()\n indexer = categories.get_indexer(first.categories)\n\n from pandas.core.algorithms import take_nd\n\n new_codes = take_nd(indexer, new_codes, fill_value=-1)\n elif ignore_order or all(not c.ordered for c in to_union):\n # different categories - union and recode\n cats = first.categories.append([c.categories for c in to_union[1:]])\n categories = cats.unique()\n if sort_categories:\n categories = categories.sort_values()\n\n new_codes = [\n recode_for_categories(c.codes, c.categories, categories) for c in to_union\n ]\n new_codes = np.concatenate(new_codes)\n else:\n # ordered - to show a proper error message\n if all(c.ordered for c in to_union):\n msg = "to union ordered Categoricals, all categories must be the same"\n raise TypeError(msg)\n raise TypeError("Categorical.ordered must be the same")\n\n if ignore_order:\n ordered = False\n\n dtype = CategoricalDtype(categories=categories, ordered=ordered)\n return Categorical._simple_new(new_codes, dtype=dtype)\n
.venv\Lib\site-packages\pandas\core\dtypes\concat.py
concat.py
Python
12,579
0.95
0.166667
0.121107
awesome-app
152
2023-12-28T12:18:12.054521
MIT
false
a47bf59ffbf748266d09e42c1c23acae
"""\nDefine extension dtypes.\n"""\nfrom __future__ import annotations\n\nfrom datetime import (\n date,\n datetime,\n time,\n timedelta,\n)\nfrom decimal import Decimal\nimport re\nfrom typing import (\n TYPE_CHECKING,\n Any,\n cast,\n)\nimport warnings\n\nimport numpy as np\nimport pytz\n\nfrom pandas._libs import (\n lib,\n missing as libmissing,\n)\nfrom pandas._libs.interval import Interval\nfrom pandas._libs.properties import cache_readonly\nfrom pandas._libs.tslibs import (\n BaseOffset,\n NaT,\n NaTType,\n Period,\n Timedelta,\n Timestamp,\n timezones,\n to_offset,\n tz_compare,\n)\nfrom pandas._libs.tslibs.dtypes import (\n PeriodDtypeBase,\n abbrev_to_npy_unit,\n)\nfrom pandas._libs.tslibs.offsets import BDay\nfrom pandas.compat import pa_version_under10p1\nfrom pandas.errors import PerformanceWarning\nfrom pandas.util._exceptions import find_stack_level\n\nfrom pandas.core.dtypes.base import (\n ExtensionDtype,\n StorageExtensionDtype,\n register_extension_dtype,\n)\nfrom pandas.core.dtypes.generic import (\n ABCCategoricalIndex,\n ABCIndex,\n ABCRangeIndex,\n)\nfrom pandas.core.dtypes.inference import (\n is_bool,\n is_list_like,\n)\n\nfrom pandas.util import capitalize_first_letter\n\nif not pa_version_under10p1:\n import pyarrow as pa\n\nif TYPE_CHECKING:\n from collections.abc import MutableMapping\n from datetime import tzinfo\n\n import pyarrow as pa # noqa: TCH004\n\n from pandas._typing import (\n Dtype,\n DtypeObj,\n IntervalClosedType,\n Ordered,\n Self,\n npt,\n type_t,\n )\n\n from pandas import (\n Categorical,\n CategoricalIndex,\n DatetimeIndex,\n Index,\n IntervalIndex,\n PeriodIndex,\n )\n from pandas.core.arrays import (\n BaseMaskedArray,\n DatetimeArray,\n IntervalArray,\n NumpyExtensionArray,\n PeriodArray,\n SparseArray,\n )\n from pandas.core.arrays.arrow import ArrowExtensionArray\n\nstr_type = str\n\n\nclass PandasExtensionDtype(ExtensionDtype):\n """\n A np.dtype duck-typed class, suitable for holding a custom dtype.\n\n THIS IS NOT A REAL NUMPY DTYPE\n """\n\n type: Any\n kind: Any\n # The Any type annotations above are here only because mypy seems to have a\n # problem dealing with multiple inheritance from PandasExtensionDtype\n # and ExtensionDtype's @properties in the subclasses below. The kind and\n # type variables in those subclasses are explicitly typed below.\n subdtype = None\n str: str_type\n num = 100\n shape: tuple[int, ...] = ()\n itemsize = 8\n base: DtypeObj | None = None\n isbuiltin = 0\n isnative = 0\n _cache_dtypes: dict[str_type, PandasExtensionDtype] = {}\n\n def __repr__(self) -> str_type:\n """\n Return a string representation for a particular object.\n """\n return str(self)\n\n def __hash__(self) -> int:\n raise NotImplementedError("sub-classes should implement an __hash__ method")\n\n def __getstate__(self) -> dict[str_type, Any]:\n # pickle support; we don't want to pickle the cache\n return {k: getattr(self, k, None) for k in self._metadata}\n\n @classmethod\n def reset_cache(cls) -> None:\n """clear the cache"""\n cls._cache_dtypes = {}\n\n\nclass CategoricalDtypeType(type):\n """\n the type of CategoricalDtype, this metaclass determines subclass ability\n """\n\n\n@register_extension_dtype\nclass CategoricalDtype(PandasExtensionDtype, ExtensionDtype):\n """\n Type for categorical data with the categories and orderedness.\n\n Parameters\n ----------\n categories : sequence, optional\n Must be unique, and must not contain any nulls.\n The categories are stored in an Index,\n and if an index is provided the dtype of that index will be used.\n ordered : bool or None, default False\n Whether or not this categorical is treated as a ordered categorical.\n None can be used to maintain the ordered value of existing categoricals when\n used in operations that combine categoricals, e.g. astype, and will resolve to\n False if there is no existing ordered to maintain.\n\n Attributes\n ----------\n categories\n ordered\n\n Methods\n -------\n None\n\n See Also\n --------\n Categorical : Represent a categorical variable in classic R / S-plus fashion.\n\n Notes\n -----\n This class is useful for specifying the type of a ``Categorical``\n independent of the values. See :ref:`categorical.categoricaldtype`\n for more.\n\n Examples\n --------\n >>> t = pd.CategoricalDtype(categories=['b', 'a'], ordered=True)\n >>> pd.Series(['a', 'b', 'a', 'c'], dtype=t)\n 0 a\n 1 b\n 2 a\n 3 NaN\n dtype: category\n Categories (2, object): ['b' < 'a']\n\n An empty CategoricalDtype with a specific dtype can be created\n by providing an empty index. As follows,\n\n >>> pd.CategoricalDtype(pd.DatetimeIndex([])).categories.dtype\n dtype('<M8[ns]')\n """\n\n # TODO: Document public vs. private API\n name = "category"\n type: type[CategoricalDtypeType] = CategoricalDtypeType\n kind: str_type = "O"\n str = "|O08"\n base = np.dtype("O")\n _metadata = ("categories", "ordered")\n _cache_dtypes: dict[str_type, PandasExtensionDtype] = {}\n _supports_2d = False\n _can_fast_transpose = False\n\n def __init__(self, categories=None, ordered: Ordered = False) -> None:\n self._finalize(categories, ordered, fastpath=False)\n\n @classmethod\n def _from_fastpath(\n cls, categories=None, ordered: bool | None = None\n ) -> CategoricalDtype:\n self = cls.__new__(cls)\n self._finalize(categories, ordered, fastpath=True)\n return self\n\n @classmethod\n def _from_categorical_dtype(\n cls, dtype: CategoricalDtype, categories=None, ordered: Ordered | None = None\n ) -> CategoricalDtype:\n if categories is ordered is None:\n return dtype\n if categories is None:\n categories = dtype.categories\n if ordered is None:\n ordered = dtype.ordered\n return cls(categories, ordered)\n\n @classmethod\n def _from_values_or_dtype(\n cls,\n values=None,\n categories=None,\n ordered: bool | None = None,\n dtype: Dtype | None = None,\n ) -> CategoricalDtype:\n """\n Construct dtype from the input parameters used in :class:`Categorical`.\n\n This constructor method specifically does not do the factorization\n step, if that is needed to find the categories. This constructor may\n therefore return ``CategoricalDtype(categories=None, ordered=None)``,\n which may not be useful. Additional steps may therefore have to be\n taken to create the final dtype.\n\n The return dtype is specified from the inputs in this prioritized\n order:\n 1. if dtype is a CategoricalDtype, return dtype\n 2. if dtype is the string 'category', create a CategoricalDtype from\n the supplied categories and ordered parameters, and return that.\n 3. if values is a categorical, use value.dtype, but override it with\n categories and ordered if either/both of those are not None.\n 4. if dtype is None and values is not a categorical, construct the\n dtype from categories and ordered, even if either of those is None.\n\n Parameters\n ----------\n values : list-like, optional\n The list-like must be 1-dimensional.\n categories : list-like, optional\n Categories for the CategoricalDtype.\n ordered : bool, optional\n Designating if the categories are ordered.\n dtype : CategoricalDtype or the string "category", optional\n If ``CategoricalDtype``, cannot be used together with\n `categories` or `ordered`.\n\n Returns\n -------\n CategoricalDtype\n\n Examples\n --------\n >>> pd.CategoricalDtype._from_values_or_dtype()\n CategoricalDtype(categories=None, ordered=None, categories_dtype=None)\n >>> pd.CategoricalDtype._from_values_or_dtype(\n ... categories=['a', 'b'], ordered=True\n ... )\n CategoricalDtype(categories=['a', 'b'], ordered=True, categories_dtype=object)\n >>> dtype1 = pd.CategoricalDtype(['a', 'b'], ordered=True)\n >>> dtype2 = pd.CategoricalDtype(['x', 'y'], ordered=False)\n >>> c = pd.Categorical([0, 1], dtype=dtype1)\n >>> pd.CategoricalDtype._from_values_or_dtype(\n ... c, ['x', 'y'], ordered=True, dtype=dtype2\n ... )\n Traceback (most recent call last):\n ...\n ValueError: Cannot specify `categories` or `ordered` together with\n `dtype`.\n\n The supplied dtype takes precedence over values' dtype:\n\n >>> pd.CategoricalDtype._from_values_or_dtype(c, dtype=dtype2)\n CategoricalDtype(categories=['x', 'y'], ordered=False, categories_dtype=object)\n """\n\n if dtype is not None:\n # The dtype argument takes precedence over values.dtype (if any)\n if isinstance(dtype, str):\n if dtype == "category":\n if ordered is None and cls.is_dtype(values):\n # GH#49309 preserve orderedness\n ordered = values.dtype.ordered\n\n dtype = CategoricalDtype(categories, ordered)\n else:\n raise ValueError(f"Unknown dtype {repr(dtype)}")\n elif categories is not None or ordered is not None:\n raise ValueError(\n "Cannot specify `categories` or `ordered` together with `dtype`."\n )\n elif not isinstance(dtype, CategoricalDtype):\n raise ValueError(f"Cannot not construct CategoricalDtype from {dtype}")\n elif cls.is_dtype(values):\n # If no "dtype" was passed, use the one from "values", but honor\n # the "ordered" and "categories" arguments\n dtype = values.dtype._from_categorical_dtype(\n values.dtype, categories, ordered\n )\n else:\n # If dtype=None and values is not categorical, create a new dtype.\n # Note: This could potentially have categories=None and\n # ordered=None.\n dtype = CategoricalDtype(categories, ordered)\n\n return cast(CategoricalDtype, dtype)\n\n @classmethod\n def construct_from_string(cls, string: str_type) -> CategoricalDtype:\n """\n Construct a CategoricalDtype from a string.\n\n Parameters\n ----------\n string : str\n Must be the string "category" in order to be successfully constructed.\n\n Returns\n -------\n CategoricalDtype\n Instance of the dtype.\n\n Raises\n ------\n TypeError\n If a CategoricalDtype cannot be constructed from the input.\n """\n if not isinstance(string, str):\n raise TypeError(\n f"'construct_from_string' expects a string, got {type(string)}"\n )\n if string != cls.name:\n raise TypeError(f"Cannot construct a 'CategoricalDtype' from '{string}'")\n\n # need ordered=None to ensure that operations specifying dtype="category" don't\n # override the ordered value for existing categoricals\n return cls(ordered=None)\n\n def _finalize(self, categories, ordered: Ordered, fastpath: bool = False) -> None:\n if ordered is not None:\n self.validate_ordered(ordered)\n\n if categories is not None:\n categories = self.validate_categories(categories, fastpath=fastpath)\n\n self._categories = categories\n self._ordered = ordered\n\n def __setstate__(self, state: MutableMapping[str_type, Any]) -> None:\n # for pickle compat. __get_state__ is defined in the\n # PandasExtensionDtype superclass and uses the public properties to\n # pickle -> need to set the settable private ones here (see GH26067)\n self._categories = state.pop("categories", None)\n self._ordered = state.pop("ordered", False)\n\n def __hash__(self) -> int:\n # _hash_categories returns a uint64, so use the negative\n # space for when we have unknown categories to avoid a conflict\n if self.categories is None:\n if self.ordered:\n return -1\n else:\n return -2\n # We *do* want to include the real self.ordered here\n return int(self._hash_categories)\n\n def __eq__(self, other: object) -> bool:\n """\n Rules for CDT equality:\n 1) Any CDT is equal to the string 'category'\n 2) Any CDT is equal to itself\n 3) Any CDT is equal to a CDT with categories=None regardless of ordered\n 4) A CDT with ordered=True is only equal to another CDT with\n ordered=True and identical categories in the same order\n 5) A CDT with ordered={False, None} is only equal to another CDT with\n ordered={False, None} and identical categories, but same order is\n not required. There is no distinction between False/None.\n 6) Any other comparison returns False\n """\n if isinstance(other, str):\n return other == self.name\n elif other is self:\n return True\n elif not (hasattr(other, "ordered") and hasattr(other, "categories")):\n return False\n elif self.categories is None or other.categories is None:\n # For non-fully-initialized dtypes, these are only equal to\n # - the string "category" (handled above)\n # - other CategoricalDtype with categories=None\n return self.categories is other.categories\n elif self.ordered or other.ordered:\n # At least one has ordered=True; equal if both have ordered=True\n # and the same values for categories in the same order.\n return (self.ordered == other.ordered) and self.categories.equals(\n other.categories\n )\n else:\n # Neither has ordered=True; equal if both have the same categories,\n # but same order is not necessary. There is no distinction between\n # ordered=False and ordered=None: CDT(., False) and CDT(., None)\n # will be equal if they have the same categories.\n left = self.categories\n right = other.categories\n\n # GH#36280 the ordering of checks here is for performance\n if not left.dtype == right.dtype:\n return False\n\n if len(left) != len(right):\n return False\n\n if self.categories.equals(other.categories):\n # Check and see if they happen to be identical categories\n return True\n\n if left.dtype != object:\n # Faster than calculating hash\n indexer = left.get_indexer(right)\n # Because left and right have the same length and are unique,\n # `indexer` not having any -1s implies that there is a\n # bijection between `left` and `right`.\n return bool((indexer != -1).all())\n\n # With object-dtype we need a comparison that identifies\n # e.g. int(2) as distinct from float(2)\n return set(left) == set(right)\n\n def __repr__(self) -> str_type:\n if self.categories is None:\n data = "None"\n dtype = "None"\n else:\n data = self.categories._format_data(name=type(self).__name__)\n if isinstance(self.categories, ABCRangeIndex):\n data = str(self.categories._range)\n data = data.rstrip(", ")\n dtype = self.categories.dtype\n\n return (\n f"CategoricalDtype(categories={data}, ordered={self.ordered}, "\n f"categories_dtype={dtype})"\n )\n\n @cache_readonly\n def _hash_categories(self) -> int:\n from pandas.core.util.hashing import (\n combine_hash_arrays,\n hash_array,\n hash_tuples,\n )\n\n categories = self.categories\n ordered = self.ordered\n\n if len(categories) and isinstance(categories[0], tuple):\n # assumes if any individual category is a tuple, then all our. ATM\n # I don't really want to support just some of the categories being\n # tuples.\n cat_list = list(categories) # breaks if a np.array of categories\n cat_array = hash_tuples(cat_list)\n else:\n if categories.dtype == "O" and len({type(x) for x in categories}) != 1:\n # TODO: hash_array doesn't handle mixed types. It casts\n # everything to a str first, which means we treat\n # {'1', '2'} the same as {'1', 2}\n # find a better solution\n hashed = hash((tuple(categories), ordered))\n return hashed\n\n if DatetimeTZDtype.is_dtype(categories.dtype):\n # Avoid future warning.\n categories = categories.view("datetime64[ns]")\n\n cat_array = hash_array(np.asarray(categories), categorize=False)\n if ordered:\n cat_array = np.vstack(\n [cat_array, np.arange(len(cat_array), dtype=cat_array.dtype)]\n )\n else:\n cat_array = np.array([cat_array])\n combined_hashed = combine_hash_arrays(iter(cat_array), num_items=len(cat_array))\n return np.bitwise_xor.reduce(combined_hashed)\n\n @classmethod\n def construct_array_type(cls) -> type_t[Categorical]:\n """\n Return the array type associated with this dtype.\n\n Returns\n -------\n type\n """\n from pandas import Categorical\n\n return Categorical\n\n @staticmethod\n def validate_ordered(ordered: Ordered) -> None:\n """\n Validates that we have a valid ordered parameter. If\n it is not a boolean, a TypeError will be raised.\n\n Parameters\n ----------\n ordered : object\n The parameter to be verified.\n\n Raises\n ------\n TypeError\n If 'ordered' is not a boolean.\n """\n if not is_bool(ordered):\n raise TypeError("'ordered' must either be 'True' or 'False'")\n\n @staticmethod\n def validate_categories(categories, fastpath: bool = False) -> Index:\n """\n Validates that we have good categories\n\n Parameters\n ----------\n categories : array-like\n fastpath : bool\n Whether to skip nan and uniqueness checks\n\n Returns\n -------\n categories : Index\n """\n from pandas.core.indexes.base import Index\n\n if not fastpath and not is_list_like(categories):\n raise TypeError(\n f"Parameter 'categories' must be list-like, was {repr(categories)}"\n )\n if not isinstance(categories, ABCIndex):\n categories = Index._with_infer(categories, tupleize_cols=False)\n\n if not fastpath:\n if categories.hasnans:\n raise ValueError("Categorical categories cannot be null")\n\n if not categories.is_unique:\n raise ValueError("Categorical categories must be unique")\n\n if isinstance(categories, ABCCategoricalIndex):\n categories = categories.categories\n\n return categories\n\n def update_dtype(self, dtype: str_type | CategoricalDtype) -> CategoricalDtype:\n """\n Returns a CategoricalDtype with categories and ordered taken from dtype\n if specified, otherwise falling back to self if unspecified\n\n Parameters\n ----------\n dtype : CategoricalDtype\n\n Returns\n -------\n new_dtype : CategoricalDtype\n """\n if isinstance(dtype, str) and dtype == "category":\n # dtype='category' should not change anything\n return self\n elif not self.is_dtype(dtype):\n raise ValueError(\n f"a CategoricalDtype must be passed to perform an update, "\n f"got {repr(dtype)}"\n )\n else:\n # from here on, dtype is a CategoricalDtype\n dtype = cast(CategoricalDtype, dtype)\n\n # update categories/ordered unless they've been explicitly passed as None\n new_categories = (\n dtype.categories if dtype.categories is not None else self.categories\n )\n new_ordered = dtype.ordered if dtype.ordered is not None else self.ordered\n\n return CategoricalDtype(new_categories, new_ordered)\n\n @property\n def categories(self) -> Index:\n """\n An ``Index`` containing the unique categories allowed.\n\n Examples\n --------\n >>> cat_type = pd.CategoricalDtype(categories=['a', 'b'], ordered=True)\n >>> cat_type.categories\n Index(['a', 'b'], dtype='object')\n """\n return self._categories\n\n @property\n def ordered(self) -> Ordered:\n """\n Whether the categories have an ordered relationship.\n\n Examples\n --------\n >>> cat_type = pd.CategoricalDtype(categories=['a', 'b'], ordered=True)\n >>> cat_type.ordered\n True\n\n >>> cat_type = pd.CategoricalDtype(categories=['a', 'b'], ordered=False)\n >>> cat_type.ordered\n False\n """\n return self._ordered\n\n @property\n def _is_boolean(self) -> bool:\n from pandas.core.dtypes.common import is_bool_dtype\n\n return is_bool_dtype(self.categories)\n\n def _get_common_dtype(self, dtypes: list[DtypeObj]) -> DtypeObj | None:\n # check if we have all categorical dtype with identical categories\n if all(isinstance(x, CategoricalDtype) for x in dtypes):\n first = dtypes[0]\n if all(first == other for other in dtypes[1:]):\n return first\n\n # special case non-initialized categorical\n # TODO we should figure out the expected return value in general\n non_init_cats = [\n isinstance(x, CategoricalDtype) and x.categories is None for x in dtypes\n ]\n if all(non_init_cats):\n return self\n elif any(non_init_cats):\n return None\n\n # categorical is aware of Sparse -> extract sparse subdtypes\n dtypes = [x.subtype if isinstance(x, SparseDtype) else x for x in dtypes]\n # extract the categories' dtype\n non_cat_dtypes = [\n x.categories.dtype if isinstance(x, CategoricalDtype) else x for x in dtypes\n ]\n # TODO should categorical always give an answer?\n from pandas.core.dtypes.cast import find_common_type\n\n return find_common_type(non_cat_dtypes)\n\n @cache_readonly\n def index_class(self) -> type_t[CategoricalIndex]:\n from pandas import CategoricalIndex\n\n return CategoricalIndex\n\n\n@register_extension_dtype\nclass DatetimeTZDtype(PandasExtensionDtype):\n """\n An ExtensionDtype for timezone-aware datetime data.\n\n **This is not an actual numpy dtype**, but a duck type.\n\n Parameters\n ----------\n unit : str, default "ns"\n The precision of the datetime data. Currently limited\n to ``"ns"``.\n tz : str, int, or datetime.tzinfo\n The timezone.\n\n Attributes\n ----------\n unit\n tz\n\n Methods\n -------\n None\n\n Raises\n ------\n ZoneInfoNotFoundError\n When the requested timezone cannot be found.\n\n Examples\n --------\n >>> from zoneinfo import ZoneInfo\n >>> pd.DatetimeTZDtype(tz=ZoneInfo('UTC'))\n datetime64[ns, UTC]\n\n >>> pd.DatetimeTZDtype(tz=ZoneInfo('Europe/Paris'))\n datetime64[ns, Europe/Paris]\n """\n\n type: type[Timestamp] = Timestamp\n kind: str_type = "M"\n num = 101\n _metadata = ("unit", "tz")\n _match = re.compile(r"(datetime64|M8)\[(?P<unit>.+), (?P<tz>.+)\]")\n _cache_dtypes: dict[str_type, PandasExtensionDtype] = {}\n _supports_2d = True\n _can_fast_transpose = True\n\n @property\n def na_value(self) -> NaTType:\n return NaT\n\n @cache_readonly\n def base(self) -> DtypeObj: # type: ignore[override]\n return np.dtype(f"M8[{self.unit}]")\n\n # error: Signature of "str" incompatible with supertype "PandasExtensionDtype"\n @cache_readonly\n def str(self) -> str: # type: ignore[override]\n return f"|M8[{self.unit}]"\n\n def __init__(self, unit: str_type | DatetimeTZDtype = "ns", tz=None) -> None:\n if isinstance(unit, DatetimeTZDtype):\n # error: "str" has no attribute "tz"\n unit, tz = unit.unit, unit.tz # type: ignore[attr-defined]\n\n if unit != "ns":\n if isinstance(unit, str) and tz is None:\n # maybe a string like datetime64[ns, tz], which we support for\n # now.\n result = type(self).construct_from_string(unit)\n unit = result.unit\n tz = result.tz\n msg = (\n f"Passing a dtype alias like 'datetime64[ns, {tz}]' "\n "to DatetimeTZDtype is no longer supported. Use "\n "'DatetimeTZDtype.construct_from_string()' instead."\n )\n raise ValueError(msg)\n if unit not in ["s", "ms", "us", "ns"]:\n raise ValueError("DatetimeTZDtype only supports s, ms, us, ns units")\n\n if tz:\n tz = timezones.maybe_get_tz(tz)\n tz = timezones.tz_standardize(tz)\n elif tz is not None:\n raise pytz.UnknownTimeZoneError(tz)\n if tz is None:\n raise TypeError("A 'tz' is required.")\n\n self._unit = unit\n self._tz = tz\n\n @cache_readonly\n def _creso(self) -> int:\n """\n The NPY_DATETIMEUNIT corresponding to this dtype's resolution.\n """\n return abbrev_to_npy_unit(self.unit)\n\n @property\n def unit(self) -> str_type:\n """\n The precision of the datetime data.\n\n Examples\n --------\n >>> from zoneinfo import ZoneInfo\n >>> dtype = pd.DatetimeTZDtype(tz=ZoneInfo('America/Los_Angeles'))\n >>> dtype.unit\n 'ns'\n """\n return self._unit\n\n @property\n def tz(self) -> tzinfo:\n """\n The timezone.\n\n Examples\n --------\n >>> from zoneinfo import ZoneInfo\n >>> dtype = pd.DatetimeTZDtype(tz=ZoneInfo('America/Los_Angeles'))\n >>> dtype.tz\n zoneinfo.ZoneInfo(key='America/Los_Angeles')\n """\n return self._tz\n\n @classmethod\n def construct_array_type(cls) -> type_t[DatetimeArray]:\n """\n Return the array type associated with this dtype.\n\n Returns\n -------\n type\n """\n from pandas.core.arrays import DatetimeArray\n\n return DatetimeArray\n\n @classmethod\n def construct_from_string(cls, string: str_type) -> DatetimeTZDtype:\n """\n Construct a DatetimeTZDtype from a string.\n\n Parameters\n ----------\n string : str\n The string alias for this DatetimeTZDtype.\n Should be formatted like ``datetime64[ns, <tz>]``,\n where ``<tz>`` is the timezone name.\n\n Examples\n --------\n >>> DatetimeTZDtype.construct_from_string('datetime64[ns, UTC]')\n datetime64[ns, UTC]\n """\n if not isinstance(string, str):\n raise TypeError(\n f"'construct_from_string' expects a string, got {type(string)}"\n )\n\n msg = f"Cannot construct a 'DatetimeTZDtype' from '{string}'"\n match = cls._match.match(string)\n if match:\n d = match.groupdict()\n try:\n return cls(unit=d["unit"], tz=d["tz"])\n except (KeyError, TypeError, ValueError) as err:\n # KeyError if maybe_get_tz tries and fails to get a\n # pytz timezone (actually pytz.UnknownTimeZoneError).\n # TypeError if we pass a nonsense tz;\n # ValueError if we pass a unit other than "ns"\n raise TypeError(msg) from err\n raise TypeError(msg)\n\n def __str__(self) -> str_type:\n return f"datetime64[{self.unit}, {self.tz}]"\n\n @property\n def name(self) -> str_type:\n """A string representation of the dtype."""\n return str(self)\n\n def __hash__(self) -> int:\n # make myself hashable\n # TODO: update this.\n return hash(str(self))\n\n def __eq__(self, other: object) -> bool:\n if isinstance(other, str):\n if other.startswith("M8["):\n other = f"datetime64[{other[3:]}"\n return other == self.name\n\n return (\n isinstance(other, DatetimeTZDtype)\n and self.unit == other.unit\n and tz_compare(self.tz, other.tz)\n )\n\n def __from_arrow__(self, array: pa.Array | pa.ChunkedArray) -> DatetimeArray:\n """\n Construct DatetimeArray from pyarrow Array/ChunkedArray.\n\n Note: If the units in the pyarrow Array are the same as this\n DatetimeDtype, then values corresponding to the integer representation\n of ``NaT`` (e.g. one nanosecond before :attr:`pandas.Timestamp.min`)\n are converted to ``NaT``, regardless of the null indicator in the\n pyarrow array.\n\n Parameters\n ----------\n array : pyarrow.Array or pyarrow.ChunkedArray\n The Arrow array to convert to DatetimeArray.\n\n Returns\n -------\n extension array : DatetimeArray\n """\n import pyarrow\n\n from pandas.core.arrays import DatetimeArray\n\n array = array.cast(pyarrow.timestamp(unit=self._unit), safe=True)\n\n if isinstance(array, pyarrow.Array):\n np_arr = array.to_numpy(zero_copy_only=False)\n else:\n np_arr = array.to_numpy()\n\n return DatetimeArray._simple_new(np_arr, dtype=self)\n\n def __setstate__(self, state) -> None:\n # for pickle compat. __get_state__ is defined in the\n # PandasExtensionDtype superclass and uses the public properties to\n # pickle -> need to set the settable private ones here (see GH26067)\n self._tz = state["tz"]\n self._unit = state["unit"]\n\n def _get_common_dtype(self, dtypes: list[DtypeObj]) -> DtypeObj | None:\n if all(isinstance(t, DatetimeTZDtype) and t.tz == self.tz for t in dtypes):\n np_dtype = np.max([cast(DatetimeTZDtype, t).base for t in [self, *dtypes]])\n unit = np.datetime_data(np_dtype)[0]\n return type(self)(unit=unit, tz=self.tz)\n return super()._get_common_dtype(dtypes)\n\n @cache_readonly\n def index_class(self) -> type_t[DatetimeIndex]:\n from pandas import DatetimeIndex\n\n return DatetimeIndex\n\n\n@register_extension_dtype\nclass PeriodDtype(PeriodDtypeBase, PandasExtensionDtype):\n """\n An ExtensionDtype for Period data.\n\n **This is not an actual numpy dtype**, but a duck type.\n\n Parameters\n ----------\n freq : str or DateOffset\n The frequency of this PeriodDtype.\n\n Attributes\n ----------\n freq\n\n Methods\n -------\n None\n\n Examples\n --------\n >>> pd.PeriodDtype(freq='D')\n period[D]\n\n >>> pd.PeriodDtype(freq=pd.offsets.MonthEnd())\n period[M]\n """\n\n type: type[Period] = Period\n kind: str_type = "O"\n str = "|O08"\n base = np.dtype("O")\n num = 102\n _metadata = ("freq",)\n _match = re.compile(r"(P|p)eriod\[(?P<freq>.+)\]")\n # error: Incompatible types in assignment (expression has type\n # "Dict[int, PandasExtensionDtype]", base class "PandasExtensionDtype"\n # defined the type as "Dict[str, PandasExtensionDtype]") [assignment]\n _cache_dtypes: dict[BaseOffset, int] = {} # type: ignore[assignment]\n __hash__ = PeriodDtypeBase.__hash__\n _freq: BaseOffset\n _supports_2d = True\n _can_fast_transpose = True\n\n def __new__(cls, freq) -> PeriodDtype: # noqa: PYI034\n """\n Parameters\n ----------\n freq : PeriodDtype, BaseOffset, or string\n """\n if isinstance(freq, PeriodDtype):\n return freq\n\n if not isinstance(freq, BaseOffset):\n freq = cls._parse_dtype_strict(freq)\n\n if isinstance(freq, BDay):\n # GH#53446\n # TODO(3.0): enforcing this will close GH#10575\n warnings.warn(\n "PeriodDtype[B] is deprecated and will be removed in a future "\n "version. Use a DatetimeIndex with freq='B' instead",\n FutureWarning,\n stacklevel=find_stack_level(),\n )\n\n try:\n dtype_code = cls._cache_dtypes[freq]\n except KeyError:\n dtype_code = freq._period_dtype_code\n cls._cache_dtypes[freq] = dtype_code\n u = PeriodDtypeBase.__new__(cls, dtype_code, freq.n)\n u._freq = freq\n return u\n\n def __reduce__(self) -> tuple[type_t[Self], tuple[str_type]]:\n return type(self), (self.name,)\n\n @property\n def freq(self) -> BaseOffset:\n """\n The frequency object of this PeriodDtype.\n\n Examples\n --------\n >>> dtype = pd.PeriodDtype(freq='D')\n >>> dtype.freq\n <Day>\n """\n return self._freq\n\n @classmethod\n def _parse_dtype_strict(cls, freq: str_type) -> BaseOffset:\n if isinstance(freq, str): # note: freq is already of type str!\n if freq.startswith(("Period[", "period[")):\n m = cls._match.search(freq)\n if m is not None:\n freq = m.group("freq")\n\n freq_offset = to_offset(freq, is_period=True)\n if freq_offset is not None:\n return freq_offset\n\n raise TypeError(\n "PeriodDtype argument should be string or BaseOffset, "\n f"got {type(freq).__name__}"\n )\n\n @classmethod\n def construct_from_string(cls, string: str_type) -> PeriodDtype:\n """\n Strict construction from a string, raise a TypeError if not\n possible\n """\n if (\n isinstance(string, str)\n and (string.startswith(("period[", "Period[")))\n or isinstance(string, BaseOffset)\n ):\n # do not parse string like U as period[U]\n # avoid tuple to be regarded as freq\n try:\n return cls(freq=string)\n except ValueError:\n pass\n if isinstance(string, str):\n msg = f"Cannot construct a 'PeriodDtype' from '{string}'"\n else:\n msg = f"'construct_from_string' expects a string, got {type(string)}"\n raise TypeError(msg)\n\n def __str__(self) -> str_type:\n return self.name\n\n @property\n def name(self) -> str_type:\n return f"period[{self._freqstr}]"\n\n @property\n def na_value(self) -> NaTType:\n return NaT\n\n def __eq__(self, other: object) -> bool:\n if isinstance(other, str):\n return other in [self.name, capitalize_first_letter(self.name)]\n\n return super().__eq__(other)\n\n def __ne__(self, other: object) -> bool:\n return not self.__eq__(other)\n\n @classmethod\n def is_dtype(cls, dtype: object) -> bool:\n """\n Return a boolean if we if the passed type is an actual dtype that we\n can match (via string or type)\n """\n if isinstance(dtype, str):\n # PeriodDtype can be instantiated from freq string like "U",\n # but doesn't regard freq str like "U" as dtype.\n if dtype.startswith(("period[", "Period[")):\n try:\n return cls._parse_dtype_strict(dtype) is not None\n except ValueError:\n return False\n else:\n return False\n return super().is_dtype(dtype)\n\n @classmethod\n def construct_array_type(cls) -> type_t[PeriodArray]:\n """\n Return the array type associated with this dtype.\n\n Returns\n -------\n type\n """\n from pandas.core.arrays import PeriodArray\n\n return PeriodArray\n\n def __from_arrow__(self, array: pa.Array | pa.ChunkedArray) -> PeriodArray:\n """\n Construct PeriodArray from pyarrow Array/ChunkedArray.\n """\n import pyarrow\n\n from pandas.core.arrays import PeriodArray\n from pandas.core.arrays.arrow._arrow_utils import (\n pyarrow_array_to_numpy_and_mask,\n )\n\n if isinstance(array, pyarrow.Array):\n chunks = [array]\n else:\n chunks = array.chunks\n\n results = []\n for arr in chunks:\n data, mask = pyarrow_array_to_numpy_and_mask(arr, dtype=np.dtype(np.int64))\n parr = PeriodArray(data.copy(), dtype=self, copy=False)\n # error: Invalid index type "ndarray[Any, dtype[bool_]]" for "PeriodArray";\n # expected type "Union[int, Sequence[int], Sequence[bool], slice]"\n parr[~mask] = NaT # type: ignore[index]\n results.append(parr)\n\n if not results:\n return PeriodArray(np.array([], dtype="int64"), dtype=self, copy=False)\n return PeriodArray._concat_same_type(results)\n\n @cache_readonly\n def index_class(self) -> type_t[PeriodIndex]:\n from pandas import PeriodIndex\n\n return PeriodIndex\n\n\n@register_extension_dtype\nclass IntervalDtype(PandasExtensionDtype):\n """\n An ExtensionDtype for Interval data.\n\n **This is not an actual numpy dtype**, but a duck type.\n\n Parameters\n ----------\n subtype : str, np.dtype\n The dtype of the Interval bounds.\n\n Attributes\n ----------\n subtype\n\n Methods\n -------\n None\n\n Examples\n --------\n >>> pd.IntervalDtype(subtype='int64', closed='both')\n interval[int64, both]\n """\n\n name = "interval"\n kind: str_type = "O"\n str = "|O08"\n base = np.dtype("O")\n num = 103\n _metadata = (\n "subtype",\n "closed",\n )\n\n _match = re.compile(\n r"(I|i)nterval\[(?P<subtype>[^,]+(\[.+\])?)"\n r"(, (?P<closed>(right|left|both|neither)))?\]"\n )\n\n _cache_dtypes: dict[str_type, PandasExtensionDtype] = {}\n _subtype: None | np.dtype\n _closed: IntervalClosedType | None\n\n def __init__(self, subtype=None, closed: IntervalClosedType | None = None) -> None:\n from pandas.core.dtypes.common import (\n is_string_dtype,\n pandas_dtype,\n )\n\n if closed is not None and closed not in {"right", "left", "both", "neither"}:\n raise ValueError("closed must be one of 'right', 'left', 'both', 'neither'")\n\n if isinstance(subtype, IntervalDtype):\n if closed is not None and closed != subtype.closed:\n raise ValueError(\n "dtype.closed and 'closed' do not match. "\n "Try IntervalDtype(dtype.subtype, closed) instead."\n )\n self._subtype = subtype._subtype\n self._closed = subtype._closed\n elif subtype is None:\n # we are called as an empty constructor\n # generally for pickle compat\n self._subtype = None\n self._closed = closed\n elif isinstance(subtype, str) and subtype.lower() == "interval":\n self._subtype = None\n self._closed = closed\n else:\n if isinstance(subtype, str):\n m = IntervalDtype._match.search(subtype)\n if m is not None:\n gd = m.groupdict()\n subtype = gd["subtype"]\n if gd.get("closed", None) is not None:\n if closed is not None:\n if closed != gd["closed"]:\n raise ValueError(\n "'closed' keyword does not match value "\n "specified in dtype string"\n )\n closed = gd["closed"] # type: ignore[assignment]\n\n try:\n subtype = pandas_dtype(subtype)\n except TypeError as err:\n raise TypeError("could not construct IntervalDtype") from err\n if CategoricalDtype.is_dtype(subtype) or is_string_dtype(subtype):\n # GH 19016\n msg = (\n "category, object, and string subtypes are not supported "\n "for IntervalDtype"\n )\n raise TypeError(msg)\n self._subtype = subtype\n self._closed = closed\n\n @cache_readonly\n def _can_hold_na(self) -> bool:\n subtype = self._subtype\n if subtype is None:\n # partially-initialized\n raise NotImplementedError(\n "_can_hold_na is not defined for partially-initialized IntervalDtype"\n )\n if subtype.kind in "iu":\n return False\n return True\n\n @property\n def closed(self) -> IntervalClosedType:\n return self._closed # type: ignore[return-value]\n\n @property\n def subtype(self):\n """\n The dtype of the Interval bounds.\n\n Examples\n --------\n >>> dtype = pd.IntervalDtype(subtype='int64', closed='both')\n >>> dtype.subtype\n dtype('int64')\n """\n return self._subtype\n\n @classmethod\n def construct_array_type(cls) -> type[IntervalArray]:\n """\n Return the array type associated with this dtype.\n\n Returns\n -------\n type\n """\n from pandas.core.arrays import IntervalArray\n\n return IntervalArray\n\n @classmethod\n def construct_from_string(cls, string: str_type) -> IntervalDtype:\n """\n attempt to construct this type from a string, raise a TypeError\n if its not possible\n """\n if not isinstance(string, str):\n raise TypeError(\n f"'construct_from_string' expects a string, got {type(string)}"\n )\n\n if string.lower() == "interval" or cls._match.search(string) is not None:\n return cls(string)\n\n msg = (\n f"Cannot construct a 'IntervalDtype' from '{string}'.\n\n"\n "Incorrectly formatted string passed to constructor. "\n "Valid formats include Interval or Interval[dtype] "\n "where dtype is numeric, datetime, or timedelta"\n )\n raise TypeError(msg)\n\n @property\n def type(self) -> type[Interval]:\n return Interval\n\n def __str__(self) -> str_type:\n if self.subtype is None:\n return "interval"\n if self.closed is None:\n # Only partially initialized GH#38394\n return f"interval[{self.subtype}]"\n return f"interval[{self.subtype}, {self.closed}]"\n\n def __hash__(self) -> int:\n # make myself hashable\n return hash(str(self))\n\n def __eq__(self, other: object) -> bool:\n if isinstance(other, str):\n return other.lower() in (self.name.lower(), str(self).lower())\n elif not isinstance(other, IntervalDtype):\n return False\n elif self.subtype is None or other.subtype is None:\n # None should match any subtype\n return True\n elif self.closed != other.closed:\n return False\n else:\n return self.subtype == other.subtype\n\n def __setstate__(self, state) -> None:\n # for pickle compat. __get_state__ is defined in the\n # PandasExtensionDtype superclass and uses the public properties to\n # pickle -> need to set the settable private ones here (see GH26067)\n self._subtype = state["subtype"]\n\n # backward-compat older pickles won't have "closed" key\n self._closed = state.pop("closed", None)\n\n @classmethod\n def is_dtype(cls, dtype: object) -> bool:\n """\n Return a boolean if we if the passed type is an actual dtype that we\n can match (via string or type)\n """\n if isinstance(dtype, str):\n if dtype.lower().startswith("interval"):\n try:\n return cls.construct_from_string(dtype) is not None\n except (ValueError, TypeError):\n return False\n else:\n return False\n return super().is_dtype(dtype)\n\n def __from_arrow__(self, array: pa.Array | pa.ChunkedArray) -> IntervalArray:\n """\n Construct IntervalArray from pyarrow Array/ChunkedArray.\n """\n import pyarrow\n\n from pandas.core.arrays import IntervalArray\n\n if isinstance(array, pyarrow.Array):\n chunks = [array]\n else:\n chunks = array.chunks\n\n results = []\n for arr in chunks:\n if isinstance(arr, pyarrow.ExtensionArray):\n arr = arr.storage\n left = np.asarray(arr.field("left"), dtype=self.subtype)\n right = np.asarray(arr.field("right"), dtype=self.subtype)\n iarr = IntervalArray.from_arrays(left, right, closed=self.closed)\n results.append(iarr)\n\n if not results:\n return IntervalArray.from_arrays(\n np.array([], dtype=self.subtype),\n np.array([], dtype=self.subtype),\n closed=self.closed,\n )\n return IntervalArray._concat_same_type(results)\n\n def _get_common_dtype(self, dtypes: list[DtypeObj]) -> DtypeObj | None:\n if not all(isinstance(x, IntervalDtype) for x in dtypes):\n return None\n\n closed = cast("IntervalDtype", dtypes[0]).closed\n if not all(cast("IntervalDtype", x).closed == closed for x in dtypes):\n return np.dtype(object)\n\n from pandas.core.dtypes.cast import find_common_type\n\n common = find_common_type([cast("IntervalDtype", x).subtype for x in dtypes])\n if common == object:\n return np.dtype(object)\n return IntervalDtype(common, closed=closed)\n\n @cache_readonly\n def index_class(self) -> type_t[IntervalIndex]:\n from pandas import IntervalIndex\n\n return IntervalIndex\n\n\nclass NumpyEADtype(ExtensionDtype):\n """\n A Pandas ExtensionDtype for NumPy dtypes.\n\n This is mostly for internal compatibility, and is not especially\n useful on its own.\n\n Parameters\n ----------\n dtype : object\n Object to be converted to a NumPy data type object.\n\n See Also\n --------\n numpy.dtype\n """\n\n _metadata = ("_dtype",)\n _supports_2d = False\n _can_fast_transpose = False\n\n def __init__(self, dtype: npt.DTypeLike | NumpyEADtype | None) -> None:\n if isinstance(dtype, NumpyEADtype):\n # make constructor idempotent\n dtype = dtype.numpy_dtype\n self._dtype = np.dtype(dtype)\n\n def __repr__(self) -> str:\n return f"NumpyEADtype({repr(self.name)})"\n\n @property\n def numpy_dtype(self) -> np.dtype:\n """\n The NumPy dtype this NumpyEADtype wraps.\n """\n return self._dtype\n\n @property\n def name(self) -> str:\n """\n A bit-width name for this data-type.\n """\n return self._dtype.name\n\n @property\n def type(self) -> type[np.generic]:\n """\n The type object used to instantiate a scalar of this NumPy data-type.\n """\n return self._dtype.type\n\n @property\n def _is_numeric(self) -> bool:\n # exclude object, str, unicode, void.\n return self.kind in set("biufc")\n\n @property\n def _is_boolean(self) -> bool:\n return self.kind == "b"\n\n @classmethod\n def construct_from_string(cls, string: str) -> NumpyEADtype:\n try:\n dtype = np.dtype(string)\n except TypeError as err:\n if not isinstance(string, str):\n msg = f"'construct_from_string' expects a string, got {type(string)}"\n else:\n msg = f"Cannot construct a 'NumpyEADtype' from '{string}'"\n raise TypeError(msg) from err\n return cls(dtype)\n\n @classmethod\n def construct_array_type(cls) -> type_t[NumpyExtensionArray]:\n """\n Return the array type associated with this dtype.\n\n Returns\n -------\n type\n """\n from pandas.core.arrays import NumpyExtensionArray\n\n return NumpyExtensionArray\n\n @property\n def kind(self) -> str:\n """\n A character code (one of 'biufcmMOSUV') identifying the general kind of data.\n """\n return self._dtype.kind\n\n @property\n def itemsize(self) -> int:\n """\n The element size of this data-type object.\n """\n return self._dtype.itemsize\n\n\nclass BaseMaskedDtype(ExtensionDtype):\n """\n Base class for dtypes for BaseMaskedArray subclasses.\n """\n\n base = None\n type: type\n\n @property\n def na_value(self) -> libmissing.NAType:\n return libmissing.NA\n\n @cache_readonly\n def numpy_dtype(self) -> np.dtype:\n """Return an instance of our numpy dtype"""\n return np.dtype(self.type)\n\n @cache_readonly\n def kind(self) -> str:\n return self.numpy_dtype.kind\n\n @cache_readonly\n def itemsize(self) -> int:\n """Return the number of bytes in this dtype"""\n return self.numpy_dtype.itemsize\n\n @classmethod\n def construct_array_type(cls) -> type_t[BaseMaskedArray]:\n """\n Return the array type associated with this dtype.\n\n Returns\n -------\n type\n """\n raise NotImplementedError\n\n @classmethod\n def from_numpy_dtype(cls, dtype: np.dtype) -> BaseMaskedDtype:\n """\n Construct the MaskedDtype corresponding to the given numpy dtype.\n """\n if dtype.kind == "b":\n from pandas.core.arrays.boolean import BooleanDtype\n\n return BooleanDtype()\n elif dtype.kind in "iu":\n from pandas.core.arrays.integer import NUMPY_INT_TO_DTYPE\n\n return NUMPY_INT_TO_DTYPE[dtype]\n elif dtype.kind == "f":\n from pandas.core.arrays.floating import NUMPY_FLOAT_TO_DTYPE\n\n return NUMPY_FLOAT_TO_DTYPE[dtype]\n else:\n raise NotImplementedError(dtype)\n\n def _get_common_dtype(self, dtypes: list[DtypeObj]) -> DtypeObj | None:\n # We unwrap any masked dtypes, find the common dtype we would use\n # for that, then re-mask the result.\n from pandas.core.dtypes.cast import find_common_type\n\n new_dtype = find_common_type(\n [\n dtype.numpy_dtype if isinstance(dtype, BaseMaskedDtype) else dtype\n for dtype in dtypes\n ]\n )\n if not isinstance(new_dtype, np.dtype):\n # If we ever support e.g. Masked[DatetimeArray] then this will change\n return None\n try:\n return type(self).from_numpy_dtype(new_dtype)\n except (KeyError, NotImplementedError):\n return None\n\n\n@register_extension_dtype\nclass SparseDtype(ExtensionDtype):\n """\n Dtype for data stored in :class:`SparseArray`.\n\n This dtype implements the pandas ExtensionDtype interface.\n\n Parameters\n ----------\n dtype : str, ExtensionDtype, numpy.dtype, type, default numpy.float64\n The dtype of the underlying array storing the non-fill value values.\n fill_value : scalar, optional\n The scalar value not stored in the SparseArray. By default, this\n depends on `dtype`.\n\n =========== ==========\n dtype na_value\n =========== ==========\n float ``np.nan``\n int ``0``\n bool ``False``\n datetime64 ``pd.NaT``\n timedelta64 ``pd.NaT``\n =========== ==========\n\n The default value may be overridden by specifying a `fill_value`.\n\n Attributes\n ----------\n None\n\n Methods\n -------\n None\n\n Examples\n --------\n >>> ser = pd.Series([1, 0, 0], dtype=pd.SparseDtype(dtype=int, fill_value=0))\n >>> ser\n 0 1\n 1 0\n 2 0\n dtype: Sparse[int64, 0]\n >>> ser.sparse.density\n 0.3333333333333333\n """\n\n _is_immutable = True\n\n # We include `_is_na_fill_value` in the metadata to avoid hash collisions\n # between SparseDtype(float, 0.0) and SparseDtype(float, nan).\n # Without is_na_fill_value in the comparison, those would be equal since\n # hash(nan) is (sometimes?) 0.\n _metadata = ("_dtype", "_fill_value", "_is_na_fill_value")\n\n def __init__(self, dtype: Dtype = np.float64, fill_value: Any = None) -> None:\n if isinstance(dtype, type(self)):\n if fill_value is None:\n fill_value = dtype.fill_value\n dtype = dtype.subtype\n\n from pandas.core.dtypes.common import (\n is_string_dtype,\n pandas_dtype,\n )\n from pandas.core.dtypes.missing import na_value_for_dtype\n\n dtype = pandas_dtype(dtype)\n if is_string_dtype(dtype):\n dtype = np.dtype("object")\n if not isinstance(dtype, np.dtype):\n # GH#53160\n raise TypeError("SparseDtype subtype must be a numpy dtype")\n\n if fill_value is None:\n fill_value = na_value_for_dtype(dtype)\n\n self._dtype = dtype\n self._fill_value = fill_value\n self._check_fill_value()\n\n def __hash__(self) -> int:\n # Python3 doesn't inherit __hash__ when a base class overrides\n # __eq__, so we explicitly do it here.\n return super().__hash__()\n\n def __eq__(self, other: object) -> bool:\n # We have to override __eq__ to handle NA values in _metadata.\n # The base class does simple == checks, which fail for NA.\n if isinstance(other, str):\n try:\n other = self.construct_from_string(other)\n except TypeError:\n return False\n\n if isinstance(other, type(self)):\n subtype = self.subtype == other.subtype\n if self._is_na_fill_value:\n # this case is complicated by two things:\n # SparseDtype(float, float(nan)) == SparseDtype(float, np.nan)\n # SparseDtype(float, np.nan) != SparseDtype(float, pd.NaT)\n # i.e. we want to treat any floating-point NaN as equal, but\n # not a floating-point NaN and a datetime NaT.\n fill_value = (\n other._is_na_fill_value\n and isinstance(self.fill_value, type(other.fill_value))\n or isinstance(other.fill_value, type(self.fill_value))\n )\n else:\n with warnings.catch_warnings():\n # Ignore spurious numpy warning\n warnings.filterwarnings(\n "ignore",\n "elementwise comparison failed",\n category=DeprecationWarning,\n )\n\n fill_value = self.fill_value == other.fill_value\n\n return subtype and fill_value\n return False\n\n @property\n def fill_value(self):\n """\n The fill value of the array.\n\n Converting the SparseArray to a dense ndarray will fill the\n array with this value.\n\n .. warning::\n\n It's possible to end up with a SparseArray that has ``fill_value``\n values in ``sp_values``. This can occur, for example, when setting\n ``SparseArray.fill_value`` directly.\n """\n return self._fill_value\n\n def _check_fill_value(self) -> None:\n if not lib.is_scalar(self._fill_value):\n raise ValueError(\n f"fill_value must be a scalar. Got {self._fill_value} instead"\n )\n\n from pandas.core.dtypes.cast import can_hold_element\n from pandas.core.dtypes.missing import (\n is_valid_na_for_dtype,\n isna,\n )\n\n from pandas.core.construction import ensure_wrapped_if_datetimelike\n\n # GH#23124 require fill_value and subtype to match\n val = self._fill_value\n if isna(val):\n if not is_valid_na_for_dtype(val, self.subtype):\n warnings.warn(\n "Allowing arbitrary scalar fill_value in SparseDtype is "\n "deprecated. In a future version, the fill_value must be "\n "a valid value for the SparseDtype.subtype.",\n FutureWarning,\n stacklevel=find_stack_level(),\n )\n else:\n dummy = np.empty(0, dtype=self.subtype)\n dummy = ensure_wrapped_if_datetimelike(dummy)\n\n if not can_hold_element(dummy, val):\n warnings.warn(\n "Allowing arbitrary scalar fill_value in SparseDtype is "\n "deprecated. In a future version, the fill_value must be "\n "a valid value for the SparseDtype.subtype.",\n FutureWarning,\n stacklevel=find_stack_level(),\n )\n\n @property\n def _is_na_fill_value(self) -> bool:\n from pandas import isna\n\n return isna(self.fill_value)\n\n @property\n def _is_numeric(self) -> bool:\n return self.subtype != object\n\n @property\n def _is_boolean(self) -> bool:\n return self.subtype.kind == "b"\n\n @property\n def kind(self) -> str:\n """\n The sparse kind. Either 'integer', or 'block'.\n """\n return self.subtype.kind\n\n @property\n def type(self):\n return self.subtype.type\n\n @property\n def subtype(self):\n return self._dtype\n\n @property\n def name(self) -> str:\n return f"Sparse[{self.subtype.name}, {repr(self.fill_value)}]"\n\n def __repr__(self) -> str:\n return self.name\n\n @classmethod\n def construct_array_type(cls) -> type_t[SparseArray]:\n """\n Return the array type associated with this dtype.\n\n Returns\n -------\n type\n """\n from pandas.core.arrays.sparse.array import SparseArray\n\n return SparseArray\n\n @classmethod\n def construct_from_string(cls, string: str) -> SparseDtype:\n """\n Construct a SparseDtype from a string form.\n\n Parameters\n ----------\n string : str\n Can take the following forms.\n\n string dtype\n ================ ============================\n 'int' SparseDtype[np.int64, 0]\n 'Sparse' SparseDtype[np.float64, nan]\n 'Sparse[int]' SparseDtype[np.int64, 0]\n 'Sparse[int, 0]' SparseDtype[np.int64, 0]\n ================ ============================\n\n It is not possible to specify non-default fill values\n with a string. An argument like ``'Sparse[int, 1]'``\n will raise a ``TypeError`` because the default fill value\n for integers is 0.\n\n Returns\n -------\n SparseDtype\n """\n if not isinstance(string, str):\n raise TypeError(\n f"'construct_from_string' expects a string, got {type(string)}"\n )\n msg = f"Cannot construct a 'SparseDtype' from '{string}'"\n if string.startswith("Sparse"):\n try:\n sub_type, has_fill_value = cls._parse_subtype(string)\n except ValueError as err:\n raise TypeError(msg) from err\n else:\n result = SparseDtype(sub_type)\n msg = (\n f"Cannot construct a 'SparseDtype' from '{string}'.\n\nIt "\n "looks like the fill_value in the string is not "\n "the default for the dtype. Non-default fill_values "\n "are not supported. Use the 'SparseDtype()' "\n "constructor instead."\n )\n if has_fill_value and str(result) != string:\n raise TypeError(msg)\n return result\n else:\n raise TypeError(msg)\n\n @staticmethod\n def _parse_subtype(dtype: str) -> tuple[str, bool]:\n """\n Parse a string to get the subtype\n\n Parameters\n ----------\n dtype : str\n A string like\n\n * Sparse[subtype]\n * Sparse[subtype, fill_value]\n\n Returns\n -------\n subtype : str\n\n Raises\n ------\n ValueError\n When the subtype cannot be extracted.\n """\n xpr = re.compile(r"Sparse\[(?P<subtype>[^,]*)(, )?(?P<fill_value>.*?)?\]$")\n m = xpr.match(dtype)\n has_fill_value = False\n if m:\n subtype = m.groupdict()["subtype"]\n has_fill_value = bool(m.groupdict()["fill_value"])\n elif dtype == "Sparse":\n subtype = "float64"\n else:\n raise ValueError(f"Cannot parse {dtype}")\n return subtype, has_fill_value\n\n @classmethod\n def is_dtype(cls, dtype: object) -> bool:\n dtype = getattr(dtype, "dtype", dtype)\n if isinstance(dtype, str) and dtype.startswith("Sparse"):\n sub_type, _ = cls._parse_subtype(dtype)\n dtype = np.dtype(sub_type)\n elif isinstance(dtype, cls):\n return True\n return isinstance(dtype, np.dtype) or dtype == "Sparse"\n\n def update_dtype(self, dtype) -> SparseDtype:\n """\n Convert the SparseDtype to a new dtype.\n\n This takes care of converting the ``fill_value``.\n\n Parameters\n ----------\n dtype : Union[str, numpy.dtype, SparseDtype]\n The new dtype to use.\n\n * For a SparseDtype, it is simply returned\n * For a NumPy dtype (or str), the current fill value\n is converted to the new dtype, and a SparseDtype\n with `dtype` and the new fill value is returned.\n\n Returns\n -------\n SparseDtype\n A new SparseDtype with the correct `dtype` and fill value\n for that `dtype`.\n\n Raises\n ------\n ValueError\n When the current fill value cannot be converted to the\n new `dtype` (e.g. trying to convert ``np.nan`` to an\n integer dtype).\n\n\n Examples\n --------\n >>> SparseDtype(int, 0).update_dtype(float)\n Sparse[float64, 0.0]\n\n >>> SparseDtype(int, 1).update_dtype(SparseDtype(float, np.nan))\n Sparse[float64, nan]\n """\n from pandas.core.dtypes.astype import astype_array\n from pandas.core.dtypes.common import pandas_dtype\n\n cls = type(self)\n dtype = pandas_dtype(dtype)\n\n if not isinstance(dtype, cls):\n if not isinstance(dtype, np.dtype):\n raise TypeError("sparse arrays of extension dtypes not supported")\n\n fv_asarray = np.atleast_1d(np.array(self.fill_value))\n fvarr = astype_array(fv_asarray, dtype)\n # NB: not fv_0d.item(), as that casts dt64->int\n fill_value = fvarr[0]\n dtype = cls(dtype, fill_value=fill_value)\n\n return dtype\n\n @property\n def _subtype_with_str(self):\n """\n Whether the SparseDtype's subtype should be considered ``str``.\n\n Typically, pandas will store string data in an object-dtype array.\n When converting values to a dtype, e.g. in ``.astype``, we need to\n be more specific, we need the actual underlying type.\n\n Returns\n -------\n >>> SparseDtype(int, 1)._subtype_with_str\n dtype('int64')\n\n >>> SparseDtype(object, 1)._subtype_with_str\n dtype('O')\n\n >>> dtype = SparseDtype(str, '')\n >>> dtype.subtype\n dtype('O')\n\n >>> dtype._subtype_with_str\n <class 'str'>\n """\n if isinstance(self.fill_value, str):\n return type(self.fill_value)\n return self.subtype\n\n def _get_common_dtype(self, dtypes: list[DtypeObj]) -> DtypeObj | None:\n # TODO for now only handle SparseDtypes and numpy dtypes => extend\n # with other compatible extension dtypes\n from pandas.core.dtypes.cast import np_find_common_type\n\n if any(\n isinstance(x, ExtensionDtype) and not isinstance(x, SparseDtype)\n for x in dtypes\n ):\n return None\n\n fill_values = [x.fill_value for x in dtypes if isinstance(x, SparseDtype)]\n fill_value = fill_values[0]\n\n from pandas import isna\n\n # np.nan isn't a singleton, so we may end up with multiple\n # NaNs here, so we ignore the all NA case too.\n if not (len(set(fill_values)) == 1 or isna(fill_values).all()):\n warnings.warn(\n "Concatenating sparse arrays with multiple fill "\n f"values: '{fill_values}'. Picking the first and "\n "converting the rest.",\n PerformanceWarning,\n stacklevel=find_stack_level(),\n )\n\n np_dtypes = (x.subtype if isinstance(x, SparseDtype) else x for x in dtypes)\n return SparseDtype(np_find_common_type(*np_dtypes), fill_value=fill_value)\n\n\n@register_extension_dtype\nclass ArrowDtype(StorageExtensionDtype):\n """\n An ExtensionDtype for PyArrow data types.\n\n .. warning::\n\n ArrowDtype is considered experimental. The implementation and\n parts of the API may change without warning.\n\n While most ``dtype`` arguments can accept the "string"\n constructor, e.g. ``"int64[pyarrow]"``, ArrowDtype is useful\n if the data type contains parameters like ``pyarrow.timestamp``.\n\n Parameters\n ----------\n pyarrow_dtype : pa.DataType\n An instance of a `pyarrow.DataType <https://arrow.apache.org/docs/python/api/datatypes.html#factory-functions>`__.\n\n Attributes\n ----------\n pyarrow_dtype\n\n Methods\n -------\n None\n\n Returns\n -------\n ArrowDtype\n\n Examples\n --------\n >>> import pyarrow as pa\n >>> pd.ArrowDtype(pa.int64())\n int64[pyarrow]\n\n Types with parameters must be constructed with ArrowDtype.\n\n >>> pd.ArrowDtype(pa.timestamp("s", tz="America/New_York"))\n timestamp[s, tz=America/New_York][pyarrow]\n >>> pd.ArrowDtype(pa.list_(pa.int64()))\n list<item: int64>[pyarrow]\n """\n\n _metadata = ("storage", "pyarrow_dtype") # type: ignore[assignment]\n\n def __init__(self, pyarrow_dtype: pa.DataType) -> None:\n super().__init__("pyarrow")\n if pa_version_under10p1:\n raise ImportError("pyarrow>=10.0.1 is required for ArrowDtype")\n if not isinstance(pyarrow_dtype, pa.DataType):\n raise ValueError(\n f"pyarrow_dtype ({pyarrow_dtype}) must be an instance "\n f"of a pyarrow.DataType. Got {type(pyarrow_dtype)} instead."\n )\n self.pyarrow_dtype = pyarrow_dtype\n\n def __repr__(self) -> str:\n return self.name\n\n def __hash__(self) -> int:\n # make myself hashable\n return hash(str(self))\n\n def __eq__(self, other: object) -> bool:\n if not isinstance(other, type(self)):\n return super().__eq__(other)\n return self.pyarrow_dtype == other.pyarrow_dtype\n\n @property\n def type(self):\n """\n Returns associated scalar type.\n """\n pa_type = self.pyarrow_dtype\n if pa.types.is_integer(pa_type):\n return int\n elif pa.types.is_floating(pa_type):\n return float\n elif pa.types.is_string(pa_type) or pa.types.is_large_string(pa_type):\n return str\n elif (\n pa.types.is_binary(pa_type)\n or pa.types.is_fixed_size_binary(pa_type)\n or pa.types.is_large_binary(pa_type)\n ):\n return bytes\n elif pa.types.is_boolean(pa_type):\n return bool\n elif pa.types.is_duration(pa_type):\n if pa_type.unit == "ns":\n return Timedelta\n else:\n return timedelta\n elif pa.types.is_timestamp(pa_type):\n if pa_type.unit == "ns":\n return Timestamp\n else:\n return datetime\n elif pa.types.is_date(pa_type):\n return date\n elif pa.types.is_time(pa_type):\n return time\n elif pa.types.is_decimal(pa_type):\n return Decimal\n elif pa.types.is_dictionary(pa_type):\n # TODO: Potentially change this & CategoricalDtype.type to\n # something more representative of the scalar\n return CategoricalDtypeType\n elif pa.types.is_list(pa_type) or pa.types.is_large_list(pa_type):\n return list\n elif pa.types.is_fixed_size_list(pa_type):\n return list\n elif pa.types.is_map(pa_type):\n return list\n elif pa.types.is_struct(pa_type):\n return dict\n elif pa.types.is_null(pa_type):\n # TODO: None? pd.NA? pa.null?\n return type(pa_type)\n elif isinstance(pa_type, pa.ExtensionType):\n return type(self)(pa_type.storage_type).type\n raise NotImplementedError(pa_type)\n\n @property\n def name(self) -> str: # type: ignore[override]\n """\n A string identifying the data type.\n """\n return f"{str(self.pyarrow_dtype)}[{self.storage}]"\n\n @cache_readonly\n def numpy_dtype(self) -> np.dtype:\n """Return an instance of the related numpy dtype"""\n if pa.types.is_timestamp(self.pyarrow_dtype):\n # pa.timestamp(unit).to_pandas_dtype() returns ns units\n # regardless of the pyarrow timestamp units.\n # This can be removed if/when pyarrow addresses it:\n # https://github.com/apache/arrow/issues/34462\n return np.dtype(f"datetime64[{self.pyarrow_dtype.unit}]")\n if pa.types.is_duration(self.pyarrow_dtype):\n # pa.duration(unit).to_pandas_dtype() returns ns units\n # regardless of the pyarrow duration units\n # This can be removed if/when pyarrow addresses it:\n # https://github.com/apache/arrow/issues/34462\n return np.dtype(f"timedelta64[{self.pyarrow_dtype.unit}]")\n if pa.types.is_string(self.pyarrow_dtype) or pa.types.is_large_string(\n self.pyarrow_dtype\n ):\n # pa.string().to_pandas_dtype() = object which we don't want\n return np.dtype(str)\n try:\n return np.dtype(self.pyarrow_dtype.to_pandas_dtype())\n except (NotImplementedError, TypeError):\n return np.dtype(object)\n\n @cache_readonly\n def kind(self) -> str:\n if pa.types.is_timestamp(self.pyarrow_dtype):\n # To mirror DatetimeTZDtype\n return "M"\n return self.numpy_dtype.kind\n\n @cache_readonly\n def itemsize(self) -> int:\n """Return the number of bytes in this dtype"""\n return self.numpy_dtype.itemsize\n\n @classmethod\n def construct_array_type(cls) -> type_t[ArrowExtensionArray]:\n """\n Return the array type associated with this dtype.\n\n Returns\n -------\n type\n """\n from pandas.core.arrays.arrow import ArrowExtensionArray\n\n return ArrowExtensionArray\n\n @classmethod\n def construct_from_string(cls, string: str) -> ArrowDtype:\n """\n Construct this type from a string.\n\n Parameters\n ----------\n string : str\n string should follow the format f"{pyarrow_type}[pyarrow]"\n e.g. int64[pyarrow]\n """\n if not isinstance(string, str):\n raise TypeError(\n f"'construct_from_string' expects a string, got {type(string)}"\n )\n if not string.endswith("[pyarrow]"):\n raise TypeError(f"'{string}' must end with '[pyarrow]'")\n if string in ("string[pyarrow]", "str[pyarrow]"):\n # Ensure Registry.find skips ArrowDtype to use StringDtype instead\n raise TypeError("string[pyarrow] should be constructed by StringDtype")\n\n base_type = string[:-9] # get rid of "[pyarrow]"\n try:\n pa_dtype = pa.type_for_alias(base_type)\n except ValueError as err:\n has_parameters = re.search(r"[\[\(].*[\]\)]", base_type)\n if has_parameters:\n # Fallback to try common temporal types\n try:\n return cls._parse_temporal_dtype_string(base_type)\n except (NotImplementedError, ValueError):\n # Fall through to raise with nice exception message below\n pass\n\n raise NotImplementedError(\n "Passing pyarrow type specific parameters "\n f"({has_parameters.group()}) in the string is not supported. "\n "Please construct an ArrowDtype object with a pyarrow_dtype "\n "instance with specific parameters."\n ) from err\n raise TypeError(f"'{base_type}' is not a valid pyarrow data type.") from err\n return cls(pa_dtype)\n\n # TODO(arrow#33642): This can be removed once supported by pyarrow\n @classmethod\n def _parse_temporal_dtype_string(cls, string: str) -> ArrowDtype:\n """\n Construct a temporal ArrowDtype from string.\n """\n # we assume\n # 1) "[pyarrow]" has already been stripped from the end of our string.\n # 2) we know "[" is present\n head, tail = string.split("[", 1)\n\n if not tail.endswith("]"):\n raise ValueError\n tail = tail[:-1]\n\n if head == "timestamp":\n assert "," in tail # otherwise type_for_alias should work\n unit, tz = tail.split(",", 1)\n unit = unit.strip()\n tz = tz.strip()\n if tz.startswith("tz="):\n tz = tz[3:]\n\n pa_type = pa.timestamp(unit, tz=tz)\n dtype = cls(pa_type)\n return dtype\n\n raise NotImplementedError(string)\n\n @property\n def _is_numeric(self) -> bool:\n """\n Whether columns with this dtype should be considered numeric.\n """\n # TODO: pa.types.is_boolean?\n return (\n pa.types.is_integer(self.pyarrow_dtype)\n or pa.types.is_floating(self.pyarrow_dtype)\n or pa.types.is_decimal(self.pyarrow_dtype)\n )\n\n @property\n def _is_boolean(self) -> bool:\n """\n Whether this dtype should be considered boolean.\n """\n return pa.types.is_boolean(self.pyarrow_dtype)\n\n def _get_common_dtype(self, dtypes: list[DtypeObj]) -> DtypeObj | None:\n # We unwrap any masked dtypes, find the common dtype we would use\n # for that, then re-mask the result.\n # Mirrors BaseMaskedDtype\n from pandas.core.dtypes.cast import find_common_type\n\n null_dtype = type(self)(pa.null())\n\n new_dtype = find_common_type(\n [\n dtype.numpy_dtype if isinstance(dtype, ArrowDtype) else dtype\n for dtype in dtypes\n if dtype != null_dtype\n ]\n )\n if not isinstance(new_dtype, np.dtype):\n return None\n try:\n pa_dtype = pa.from_numpy_dtype(new_dtype)\n return type(self)(pa_dtype)\n except NotImplementedError:\n return None\n\n def __from_arrow__(self, array: pa.Array | pa.ChunkedArray):\n """\n Construct IntegerArray/FloatingArray from pyarrow Array/ChunkedArray.\n """\n array_class = self.construct_array_type()\n arr = array.cast(self.pyarrow_dtype, safe=True)\n return array_class(arr)\n
.venv\Lib\site-packages\pandas\core\dtypes\dtypes.py
dtypes.py
Python
76,055
0.75
0.166099
0.075127
react-lib
362
2024-12-05T07:27:45.238136
BSD-3-Clause
false
58f863011136c5889656aa182433628c
""" define generic base classes for pandas objects """\nfrom __future__ import annotations\n\nfrom typing import (\n TYPE_CHECKING,\n Type,\n cast,\n)\n\nif TYPE_CHECKING:\n from pandas import (\n Categorical,\n CategoricalIndex,\n DataFrame,\n DatetimeIndex,\n Index,\n IntervalIndex,\n MultiIndex,\n PeriodIndex,\n RangeIndex,\n Series,\n TimedeltaIndex,\n )\n from pandas.core.arrays import (\n DatetimeArray,\n ExtensionArray,\n NumpyExtensionArray,\n PeriodArray,\n TimedeltaArray,\n )\n from pandas.core.generic import NDFrame\n\n\n# define abstract base classes to enable isinstance type checking on our\n# objects\ndef create_pandas_abc_type(name, attr, comp):\n def _check(inst) -> bool:\n return getattr(inst, attr, "_typ") in comp\n\n # https://github.com/python/mypy/issues/1006\n # error: 'classmethod' used with a non-method\n @classmethod # type: ignore[misc]\n def _instancecheck(cls, inst) -> bool:\n return _check(inst) and not isinstance(inst, type)\n\n @classmethod # type: ignore[misc]\n def _subclasscheck(cls, inst) -> bool:\n # Raise instead of returning False\n # This is consistent with default __subclasscheck__ behavior\n if not isinstance(inst, type):\n raise TypeError("issubclass() arg 1 must be a class")\n\n return _check(inst)\n\n dct = {"__instancecheck__": _instancecheck, "__subclasscheck__": _subclasscheck}\n meta = type("ABCBase", (type,), dct)\n return meta(name, (), dct)\n\n\nABCRangeIndex = cast(\n "Type[RangeIndex]",\n create_pandas_abc_type("ABCRangeIndex", "_typ", ("rangeindex",)),\n)\nABCMultiIndex = cast(\n "Type[MultiIndex]",\n create_pandas_abc_type("ABCMultiIndex", "_typ", ("multiindex",)),\n)\nABCDatetimeIndex = cast(\n "Type[DatetimeIndex]",\n create_pandas_abc_type("ABCDatetimeIndex", "_typ", ("datetimeindex",)),\n)\nABCTimedeltaIndex = cast(\n "Type[TimedeltaIndex]",\n create_pandas_abc_type("ABCTimedeltaIndex", "_typ", ("timedeltaindex",)),\n)\nABCPeriodIndex = cast(\n "Type[PeriodIndex]",\n create_pandas_abc_type("ABCPeriodIndex", "_typ", ("periodindex",)),\n)\nABCCategoricalIndex = cast(\n "Type[CategoricalIndex]",\n create_pandas_abc_type("ABCCategoricalIndex", "_typ", ("categoricalindex",)),\n)\nABCIntervalIndex = cast(\n "Type[IntervalIndex]",\n create_pandas_abc_type("ABCIntervalIndex", "_typ", ("intervalindex",)),\n)\nABCIndex = cast(\n "Type[Index]",\n create_pandas_abc_type(\n "ABCIndex",\n "_typ",\n {\n "index",\n "rangeindex",\n "multiindex",\n "datetimeindex",\n "timedeltaindex",\n "periodindex",\n "categoricalindex",\n "intervalindex",\n },\n ),\n)\n\n\nABCNDFrame = cast(\n "Type[NDFrame]",\n create_pandas_abc_type("ABCNDFrame", "_typ", ("series", "dataframe")),\n)\nABCSeries = cast(\n "Type[Series]",\n create_pandas_abc_type("ABCSeries", "_typ", ("series",)),\n)\nABCDataFrame = cast(\n "Type[DataFrame]", create_pandas_abc_type("ABCDataFrame", "_typ", ("dataframe",))\n)\n\nABCCategorical = cast(\n "Type[Categorical]",\n create_pandas_abc_type("ABCCategorical", "_typ", ("categorical")),\n)\nABCDatetimeArray = cast(\n "Type[DatetimeArray]",\n create_pandas_abc_type("ABCDatetimeArray", "_typ", ("datetimearray")),\n)\nABCTimedeltaArray = cast(\n "Type[TimedeltaArray]",\n create_pandas_abc_type("ABCTimedeltaArray", "_typ", ("timedeltaarray")),\n)\nABCPeriodArray = cast(\n "Type[PeriodArray]",\n create_pandas_abc_type("ABCPeriodArray", "_typ", ("periodarray",)),\n)\nABCExtensionArray = cast(\n "Type[ExtensionArray]",\n create_pandas_abc_type(\n "ABCExtensionArray",\n "_typ",\n # Note: IntervalArray and SparseArray are included bc they have _typ="extension"\n {"extension", "categorical", "periodarray", "datetimearray", "timedeltaarray"},\n ),\n)\nABCNumpyExtensionArray = cast(\n "Type[NumpyExtensionArray]",\n create_pandas_abc_type("ABCNumpyExtensionArray", "_typ", ("npy_extension",)),\n)\n
.venv\Lib\site-packages\pandas\core\dtypes\generic.py
generic.py
Python
4,122
0.95
0.054422
0.052239
awesome-app
325
2023-12-30T01:10:12.878130
BSD-3-Clause
false
21349f87173bd0a1a8bf88365cb469e5
""" basic inference routines """\n\nfrom __future__ import annotations\n\nfrom collections import abc\nfrom numbers import Number\nimport re\nfrom re import Pattern\nfrom typing import TYPE_CHECKING\n\nimport numpy as np\n\nfrom pandas._libs import lib\n\nif TYPE_CHECKING:\n from collections.abc import Hashable\n\n from pandas._typing import TypeGuard\n\nis_bool = lib.is_bool\n\nis_integer = lib.is_integer\n\nis_float = lib.is_float\n\nis_complex = lib.is_complex\n\nis_scalar = lib.is_scalar\n\nis_decimal = lib.is_decimal\n\nis_interval = lib.is_interval\n\nis_list_like = lib.is_list_like\n\nis_iterator = lib.is_iterator\n\n\ndef is_number(obj) -> TypeGuard[Number | np.number]:\n """\n Check if the object is a number.\n\n Returns True when the object is a number, and False if is not.\n\n Parameters\n ----------\n obj : any type\n The object to check if is a number.\n\n Returns\n -------\n bool\n Whether `obj` is a number or not.\n\n See Also\n --------\n api.types.is_integer: Checks a subgroup of numbers.\n\n Examples\n --------\n >>> from pandas.api.types import is_number\n >>> is_number(1)\n True\n >>> is_number(7.15)\n True\n\n Booleans are valid because they are int subclass.\n\n >>> is_number(False)\n True\n\n >>> is_number("foo")\n False\n >>> is_number("5")\n False\n """\n return isinstance(obj, (Number, np.number))\n\n\ndef iterable_not_string(obj) -> bool:\n """\n Check if the object is an iterable but not a string.\n\n Parameters\n ----------\n obj : The object to check.\n\n Returns\n -------\n is_iter_not_string : bool\n Whether `obj` is a non-string iterable.\n\n Examples\n --------\n >>> iterable_not_string([1, 2, 3])\n True\n >>> iterable_not_string("foo")\n False\n >>> iterable_not_string(1)\n False\n """\n return isinstance(obj, abc.Iterable) and not isinstance(obj, str)\n\n\ndef is_file_like(obj) -> bool:\n """\n Check if the object is a file-like object.\n\n For objects to be considered file-like, they must\n be an iterator AND have either a `read` and/or `write`\n method as an attribute.\n\n Note: file-like objects must be iterable, but\n iterable objects need not be file-like.\n\n Parameters\n ----------\n obj : The object to check\n\n Returns\n -------\n bool\n Whether `obj` has file-like properties.\n\n Examples\n --------\n >>> import io\n >>> from pandas.api.types import is_file_like\n >>> buffer = io.StringIO("data")\n >>> is_file_like(buffer)\n True\n >>> is_file_like([1, 2, 3])\n False\n """\n if not (hasattr(obj, "read") or hasattr(obj, "write")):\n return False\n\n return bool(hasattr(obj, "__iter__"))\n\n\ndef is_re(obj) -> TypeGuard[Pattern]:\n """\n Check if the object is a regex pattern instance.\n\n Parameters\n ----------\n obj : The object to check\n\n Returns\n -------\n bool\n Whether `obj` is a regex pattern.\n\n Examples\n --------\n >>> from pandas.api.types import is_re\n >>> import re\n >>> is_re(re.compile(".*"))\n True\n >>> is_re("foo")\n False\n """\n return isinstance(obj, Pattern)\n\n\ndef is_re_compilable(obj) -> bool:\n """\n Check if the object can be compiled into a regex pattern instance.\n\n Parameters\n ----------\n obj : The object to check\n\n Returns\n -------\n bool\n Whether `obj` can be compiled as a regex pattern.\n\n Examples\n --------\n >>> from pandas.api.types import is_re_compilable\n >>> is_re_compilable(".*")\n True\n >>> is_re_compilable(1)\n False\n """\n try:\n re.compile(obj)\n except TypeError:\n return False\n else:\n return True\n\n\ndef is_array_like(obj) -> bool:\n """\n Check if the object is array-like.\n\n For an object to be considered array-like, it must be list-like and\n have a `dtype` attribute.\n\n Parameters\n ----------\n obj : The object to check\n\n Returns\n -------\n is_array_like : bool\n Whether `obj` has array-like properties.\n\n Examples\n --------\n >>> is_array_like(np.array([1, 2, 3]))\n True\n >>> is_array_like(pd.Series(["a", "b"]))\n True\n >>> is_array_like(pd.Index(["2016-01-01"]))\n True\n >>> is_array_like([1, 2, 3])\n False\n >>> is_array_like(("a", "b"))\n False\n """\n return is_list_like(obj) and hasattr(obj, "dtype")\n\n\ndef is_nested_list_like(obj) -> bool:\n """\n Check if the object is list-like, and that all of its elements\n are also list-like.\n\n Parameters\n ----------\n obj : The object to check\n\n Returns\n -------\n is_list_like : bool\n Whether `obj` has list-like properties.\n\n Examples\n --------\n >>> is_nested_list_like([[1, 2, 3]])\n True\n >>> is_nested_list_like([{1, 2, 3}, {1, 2, 3}])\n True\n >>> is_nested_list_like(["foo"])\n False\n >>> is_nested_list_like([])\n False\n >>> is_nested_list_like([[1, 2, 3], 1])\n False\n\n Notes\n -----\n This won't reliably detect whether a consumable iterator (e. g.\n a generator) is a nested-list-like without consuming the iterator.\n To avoid consuming it, we always return False if the outer container\n doesn't define `__len__`.\n\n See Also\n --------\n is_list_like\n """\n return (\n is_list_like(obj)\n and hasattr(obj, "__len__")\n and len(obj) > 0\n and all(is_list_like(item) for item in obj)\n )\n\n\ndef is_dict_like(obj) -> bool:\n """\n Check if the object is dict-like.\n\n Parameters\n ----------\n obj : The object to check\n\n Returns\n -------\n bool\n Whether `obj` has dict-like properties.\n\n Examples\n --------\n >>> from pandas.api.types import is_dict_like\n >>> is_dict_like({1: 2})\n True\n >>> is_dict_like([1, 2, 3])\n False\n >>> is_dict_like(dict)\n False\n >>> is_dict_like(dict())\n True\n """\n dict_like_attrs = ("__getitem__", "keys", "__contains__")\n return (\n all(hasattr(obj, attr) for attr in dict_like_attrs)\n # [GH 25196] exclude classes\n and not isinstance(obj, type)\n )\n\n\ndef is_named_tuple(obj) -> bool:\n """\n Check if the object is a named tuple.\n\n Parameters\n ----------\n obj : The object to check\n\n Returns\n -------\n bool\n Whether `obj` is a named tuple.\n\n Examples\n --------\n >>> from collections import namedtuple\n >>> from pandas.api.types import is_named_tuple\n >>> Point = namedtuple("Point", ["x", "y"])\n >>> p = Point(1, 2)\n >>>\n >>> is_named_tuple(p)\n True\n >>> is_named_tuple((1, 2))\n False\n """\n return isinstance(obj, abc.Sequence) and hasattr(obj, "_fields")\n\n\ndef is_hashable(obj) -> TypeGuard[Hashable]:\n """\n Return True if hash(obj) will succeed, False otherwise.\n\n Some types will pass a test against collections.abc.Hashable but fail when\n they are actually hashed with hash().\n\n Distinguish between these and other types by trying the call to hash() and\n seeing if they raise TypeError.\n\n Returns\n -------\n bool\n\n Examples\n --------\n >>> import collections\n >>> from pandas.api.types import is_hashable\n >>> a = ([],)\n >>> isinstance(a, collections.abc.Hashable)\n True\n >>> is_hashable(a)\n False\n """\n # Unfortunately, we can't use isinstance(obj, collections.abc.Hashable),\n # which can be faster than calling hash. That is because numpy scalars\n # fail this test.\n\n # Reconsider this decision once this numpy bug is fixed:\n # https://github.com/numpy/numpy/issues/5562\n\n try:\n hash(obj)\n except TypeError:\n return False\n else:\n return True\n\n\ndef is_sequence(obj) -> bool:\n """\n Check if the object is a sequence of objects.\n String types are not included as sequences here.\n\n Parameters\n ----------\n obj : The object to check\n\n Returns\n -------\n is_sequence : bool\n Whether `obj` is a sequence of objects.\n\n Examples\n --------\n >>> l = [1, 2, 3]\n >>>\n >>> is_sequence(l)\n True\n >>> is_sequence(iter(l))\n False\n """\n try:\n iter(obj) # Can iterate over it.\n len(obj) # Has a length associated with it.\n return not isinstance(obj, (str, bytes))\n except (TypeError, AttributeError):\n return False\n\n\ndef is_dataclass(item) -> bool:\n """\n Checks if the object is a data-class instance\n\n Parameters\n ----------\n item : object\n\n Returns\n --------\n is_dataclass : bool\n True if the item is an instance of a data-class,\n will return false if you pass the data class itself\n\n Examples\n --------\n >>> from dataclasses import dataclass\n >>> @dataclass\n ... class Point:\n ... x: int\n ... y: int\n\n >>> is_dataclass(Point)\n False\n >>> is_dataclass(Point(0,2))\n True\n\n """\n try:\n import dataclasses\n\n return dataclasses.is_dataclass(item) and not isinstance(item, type)\n except ImportError:\n return False\n
.venv\Lib\site-packages\pandas\core\dtypes\inference.py
inference.py
Python
9,012
0.95
0.09611
0.017391
vue-tools
812
2025-04-07T11:43:20.074777
MIT
false
3f0bbd2ca0aa704e679c38332818beba
"""\nmissing types & inference\n"""\nfrom __future__ import annotations\n\nfrom decimal import Decimal\nfrom functools import partial\nfrom typing import (\n TYPE_CHECKING,\n overload,\n)\nimport warnings\n\nimport numpy as np\n\nfrom pandas._config import get_option\n\nfrom pandas._libs import lib\nimport pandas._libs.missing as libmissing\nfrom pandas._libs.tslibs import (\n NaT,\n iNaT,\n)\n\nfrom pandas.core.dtypes.common import (\n DT64NS_DTYPE,\n TD64NS_DTYPE,\n ensure_object,\n is_scalar,\n is_string_or_object_np_dtype,\n)\nfrom pandas.core.dtypes.dtypes import (\n CategoricalDtype,\n DatetimeTZDtype,\n ExtensionDtype,\n IntervalDtype,\n PeriodDtype,\n)\nfrom pandas.core.dtypes.generic import (\n ABCDataFrame,\n ABCExtensionArray,\n ABCIndex,\n ABCMultiIndex,\n ABCSeries,\n)\nfrom pandas.core.dtypes.inference import is_list_like\n\nif TYPE_CHECKING:\n from re import Pattern\n\n from pandas._typing import (\n ArrayLike,\n DtypeObj,\n NDFrame,\n NDFrameT,\n Scalar,\n npt,\n )\n\n from pandas import Series\n from pandas.core.indexes.base import Index\n\n\nisposinf_scalar = libmissing.isposinf_scalar\nisneginf_scalar = libmissing.isneginf_scalar\n\nnan_checker = np.isnan\nINF_AS_NA = False\n_dtype_object = np.dtype("object")\n_dtype_str = np.dtype(str)\n\n\n@overload\ndef isna(obj: Scalar | Pattern) -> bool:\n ...\n\n\n@overload\ndef isna(\n obj: ArrayLike | Index | list,\n) -> npt.NDArray[np.bool_]:\n ...\n\n\n@overload\ndef isna(obj: NDFrameT) -> NDFrameT:\n ...\n\n\n# handle unions\n@overload\ndef isna(obj: NDFrameT | ArrayLike | Index | list) -> NDFrameT | npt.NDArray[np.bool_]:\n ...\n\n\n@overload\ndef isna(obj: object) -> bool | npt.NDArray[np.bool_] | NDFrame:\n ...\n\n\ndef isna(obj: object) -> bool | npt.NDArray[np.bool_] | NDFrame:\n """\n Detect missing values for an array-like object.\n\n This function takes a scalar or array-like object and indicates\n whether values are missing (``NaN`` in numeric arrays, ``None`` or ``NaN``\n in object arrays, ``NaT`` in datetimelike).\n\n Parameters\n ----------\n obj : scalar or array-like\n Object to check for null or missing values.\n\n Returns\n -------\n bool or array-like of bool\n For scalar input, returns a scalar boolean.\n For array input, returns an array of boolean indicating whether each\n corresponding element is missing.\n\n See Also\n --------\n notna : Boolean inverse of pandas.isna.\n Series.isna : Detect missing values in a Series.\n DataFrame.isna : Detect missing values in a DataFrame.\n Index.isna : Detect missing values in an Index.\n\n Examples\n --------\n Scalar arguments (including strings) result in a scalar boolean.\n\n >>> pd.isna('dog')\n False\n\n >>> pd.isna(pd.NA)\n True\n\n >>> pd.isna(np.nan)\n True\n\n ndarrays result in an ndarray of booleans.\n\n >>> array = np.array([[1, np.nan, 3], [4, 5, np.nan]])\n >>> array\n array([[ 1., nan, 3.],\n [ 4., 5., nan]])\n >>> pd.isna(array)\n array([[False, True, False],\n [False, False, True]])\n\n For indexes, an ndarray of booleans is returned.\n\n >>> index = pd.DatetimeIndex(["2017-07-05", "2017-07-06", None,\n ... "2017-07-08"])\n >>> index\n DatetimeIndex(['2017-07-05', '2017-07-06', 'NaT', '2017-07-08'],\n dtype='datetime64[ns]', freq=None)\n >>> pd.isna(index)\n array([False, False, True, False])\n\n For Series and DataFrame, the same type is returned, containing booleans.\n\n >>> df = pd.DataFrame([['ant', 'bee', 'cat'], ['dog', None, 'fly']])\n >>> df\n 0 1 2\n 0 ant bee cat\n 1 dog None fly\n >>> pd.isna(df)\n 0 1 2\n 0 False False False\n 1 False True False\n\n >>> pd.isna(df[1])\n 0 False\n 1 True\n Name: 1, dtype: bool\n """\n return _isna(obj)\n\n\nisnull = isna\n\n\ndef _isna(obj, inf_as_na: bool = False):\n """\n Detect missing values, treating None, NaN or NA as null. Infinite\n values will also be treated as null if inf_as_na is True.\n\n Parameters\n ----------\n obj: ndarray or object value\n Input array or scalar value.\n inf_as_na: bool\n Whether to treat infinity as null.\n\n Returns\n -------\n boolean ndarray or boolean\n """\n if is_scalar(obj):\n return libmissing.checknull(obj, inf_as_na=inf_as_na)\n elif isinstance(obj, ABCMultiIndex):\n raise NotImplementedError("isna is not defined for MultiIndex")\n elif isinstance(obj, type):\n return False\n elif isinstance(obj, (np.ndarray, ABCExtensionArray)):\n return _isna_array(obj, inf_as_na=inf_as_na)\n elif isinstance(obj, ABCIndex):\n # Try to use cached isna, which also short-circuits for integer dtypes\n # and avoids materializing RangeIndex._values\n if not obj._can_hold_na:\n return obj.isna()\n return _isna_array(obj._values, inf_as_na=inf_as_na)\n\n elif isinstance(obj, ABCSeries):\n result = _isna_array(obj._values, inf_as_na=inf_as_na)\n # box\n result = obj._constructor(result, index=obj.index, name=obj.name, copy=False)\n return result\n elif isinstance(obj, ABCDataFrame):\n return obj.isna()\n elif isinstance(obj, list):\n return _isna_array(np.asarray(obj, dtype=object), inf_as_na=inf_as_na)\n elif hasattr(obj, "__array__"):\n return _isna_array(np.asarray(obj), inf_as_na=inf_as_na)\n else:\n return False\n\n\ndef _use_inf_as_na(key) -> None:\n """\n Option change callback for na/inf behaviour.\n\n Choose which replacement for numpy.isnan / -numpy.isfinite is used.\n\n Parameters\n ----------\n flag: bool\n True means treat None, NaN, INF, -INF as null (old way),\n False means None and NaN are null, but INF, -INF are not null\n (new way).\n\n Notes\n -----\n This approach to setting global module values is discussed and\n approved here:\n\n * https://stackoverflow.com/questions/4859217/\n programmatically-creating-variables-in-python/4859312#4859312\n """\n inf_as_na = get_option(key)\n globals()["_isna"] = partial(_isna, inf_as_na=inf_as_na)\n if inf_as_na:\n globals()["nan_checker"] = lambda x: ~np.isfinite(x)\n globals()["INF_AS_NA"] = True\n else:\n globals()["nan_checker"] = np.isnan\n globals()["INF_AS_NA"] = False\n\n\ndef _isna_array(values: ArrayLike, inf_as_na: bool = False):\n """\n Return an array indicating which values of the input array are NaN / NA.\n\n Parameters\n ----------\n obj: ndarray or ExtensionArray\n The input array whose elements are to be checked.\n inf_as_na: bool\n Whether or not to treat infinite values as NA.\n\n Returns\n -------\n array-like\n Array of boolean values denoting the NA status of each element.\n """\n dtype = values.dtype\n\n if not isinstance(values, np.ndarray):\n # i.e. ExtensionArray\n if inf_as_na and isinstance(dtype, CategoricalDtype):\n result = libmissing.isnaobj(values.to_numpy(), inf_as_na=inf_as_na)\n else:\n # error: Incompatible types in assignment (expression has type\n # "Union[ndarray[Any, Any], ExtensionArraySupportsAnyAll]", variable has\n # type "ndarray[Any, dtype[bool_]]")\n result = values.isna() # type: ignore[assignment]\n elif isinstance(values, np.rec.recarray):\n # GH 48526\n result = _isna_recarray_dtype(values, inf_as_na=inf_as_na)\n elif is_string_or_object_np_dtype(values.dtype):\n result = _isna_string_dtype(values, inf_as_na=inf_as_na)\n elif dtype.kind in "mM":\n # this is the NaT pattern\n result = values.view("i8") == iNaT\n else:\n if inf_as_na:\n result = ~np.isfinite(values)\n else:\n result = np.isnan(values)\n\n return result\n\n\ndef _isna_string_dtype(values: np.ndarray, inf_as_na: bool) -> npt.NDArray[np.bool_]:\n # Working around NumPy ticket 1542\n dtype = values.dtype\n\n if dtype.kind in ("S", "U"):\n result = np.zeros(values.shape, dtype=bool)\n else:\n if values.ndim in {1, 2}:\n result = libmissing.isnaobj(values, inf_as_na=inf_as_na)\n else:\n # 0-D, reached via e.g. mask_missing\n result = libmissing.isnaobj(values.ravel(), inf_as_na=inf_as_na)\n result = result.reshape(values.shape)\n\n return result\n\n\ndef _has_record_inf_value(record_as_array: np.ndarray) -> np.bool_:\n is_inf_in_record = np.zeros(len(record_as_array), dtype=bool)\n for i, value in enumerate(record_as_array):\n is_element_inf = False\n try:\n is_element_inf = np.isinf(value)\n except TypeError:\n is_element_inf = False\n is_inf_in_record[i] = is_element_inf\n\n return np.any(is_inf_in_record)\n\n\ndef _isna_recarray_dtype(\n values: np.rec.recarray, inf_as_na: bool\n) -> npt.NDArray[np.bool_]:\n result = np.zeros(values.shape, dtype=bool)\n for i, record in enumerate(values):\n record_as_array = np.array(record.tolist())\n does_record_contain_nan = isna_all(record_as_array)\n does_record_contain_inf = False\n if inf_as_na:\n does_record_contain_inf = bool(_has_record_inf_value(record_as_array))\n result[i] = np.any(\n np.logical_or(does_record_contain_nan, does_record_contain_inf)\n )\n\n return result\n\n\n@overload\ndef notna(obj: Scalar) -> bool:\n ...\n\n\n@overload\ndef notna(\n obj: ArrayLike | Index | list,\n) -> npt.NDArray[np.bool_]:\n ...\n\n\n@overload\ndef notna(obj: NDFrameT) -> NDFrameT:\n ...\n\n\n# handle unions\n@overload\ndef notna(obj: NDFrameT | ArrayLike | Index | list) -> NDFrameT | npt.NDArray[np.bool_]:\n ...\n\n\n@overload\ndef notna(obj: object) -> bool | npt.NDArray[np.bool_] | NDFrame:\n ...\n\n\ndef notna(obj: object) -> bool | npt.NDArray[np.bool_] | NDFrame:\n """\n Detect non-missing values for an array-like object.\n\n This function takes a scalar or array-like object and indicates\n whether values are valid (not missing, which is ``NaN`` in numeric\n arrays, ``None`` or ``NaN`` in object arrays, ``NaT`` in datetimelike).\n\n Parameters\n ----------\n obj : array-like or object value\n Object to check for *not* null or *non*-missing values.\n\n Returns\n -------\n bool or array-like of bool\n For scalar input, returns a scalar boolean.\n For array input, returns an array of boolean indicating whether each\n corresponding element is valid.\n\n See Also\n --------\n isna : Boolean inverse of pandas.notna.\n Series.notna : Detect valid values in a Series.\n DataFrame.notna : Detect valid values in a DataFrame.\n Index.notna : Detect valid values in an Index.\n\n Examples\n --------\n Scalar arguments (including strings) result in a scalar boolean.\n\n >>> pd.notna('dog')\n True\n\n >>> pd.notna(pd.NA)\n False\n\n >>> pd.notna(np.nan)\n False\n\n ndarrays result in an ndarray of booleans.\n\n >>> array = np.array([[1, np.nan, 3], [4, 5, np.nan]])\n >>> array\n array([[ 1., nan, 3.],\n [ 4., 5., nan]])\n >>> pd.notna(array)\n array([[ True, False, True],\n [ True, True, False]])\n\n For indexes, an ndarray of booleans is returned.\n\n >>> index = pd.DatetimeIndex(["2017-07-05", "2017-07-06", None,\n ... "2017-07-08"])\n >>> index\n DatetimeIndex(['2017-07-05', '2017-07-06', 'NaT', '2017-07-08'],\n dtype='datetime64[ns]', freq=None)\n >>> pd.notna(index)\n array([ True, True, False, True])\n\n For Series and DataFrame, the same type is returned, containing booleans.\n\n >>> df = pd.DataFrame([['ant', 'bee', 'cat'], ['dog', None, 'fly']])\n >>> df\n 0 1 2\n 0 ant bee cat\n 1 dog None fly\n >>> pd.notna(df)\n 0 1 2\n 0 True True True\n 1 True False True\n\n >>> pd.notna(df[1])\n 0 True\n 1 False\n Name: 1, dtype: bool\n """\n res = isna(obj)\n if isinstance(res, bool):\n return not res\n return ~res\n\n\nnotnull = notna\n\n\ndef array_equivalent(\n left,\n right,\n strict_nan: bool = False,\n dtype_equal: bool = False,\n) -> bool:\n """\n True if two arrays, left and right, have equal non-NaN elements, and NaNs\n in corresponding locations. False otherwise. It is assumed that left and\n right are NumPy arrays of the same dtype. The behavior of this function\n (particularly with respect to NaNs) is not defined if the dtypes are\n different.\n\n Parameters\n ----------\n left, right : ndarrays\n strict_nan : bool, default False\n If True, consider NaN and None to be different.\n dtype_equal : bool, default False\n Whether `left` and `right` are known to have the same dtype\n according to `is_dtype_equal`. Some methods like `BlockManager.equals`.\n require that the dtypes match. Setting this to ``True`` can improve\n performance, but will give different results for arrays that are\n equal but different dtypes.\n\n Returns\n -------\n b : bool\n Returns True if the arrays are equivalent.\n\n Examples\n --------\n >>> array_equivalent(\n ... np.array([1, 2, np.nan]),\n ... np.array([1, 2, np.nan]))\n True\n >>> array_equivalent(\n ... np.array([1, np.nan, 2]),\n ... np.array([1, 2, np.nan]))\n False\n """\n left, right = np.asarray(left), np.asarray(right)\n\n # shape compat\n if left.shape != right.shape:\n return False\n\n if dtype_equal:\n # fastpath when we require that the dtypes match (Block.equals)\n if left.dtype.kind in "fc":\n return _array_equivalent_float(left, right)\n elif left.dtype.kind in "mM":\n return _array_equivalent_datetimelike(left, right)\n elif is_string_or_object_np_dtype(left.dtype):\n # TODO: fastpath for pandas' StringDtype\n return _array_equivalent_object(left, right, strict_nan)\n else:\n return np.array_equal(left, right)\n\n # Slow path when we allow comparing different dtypes.\n # Object arrays can contain None, NaN and NaT.\n # string dtypes must be come to this path for NumPy 1.7.1 compat\n if left.dtype.kind in "OSU" or right.dtype.kind in "OSU":\n # Note: `in "OSU"` is non-trivially faster than `in ["O", "S", "U"]`\n # or `in ("O", "S", "U")`\n return _array_equivalent_object(left, right, strict_nan)\n\n # NaNs can occur in float and complex arrays.\n if left.dtype.kind in "fc":\n if not (left.size and right.size):\n return True\n return ((left == right) | (isna(left) & isna(right))).all()\n\n elif left.dtype.kind in "mM" or right.dtype.kind in "mM":\n # datetime64, timedelta64, Period\n if left.dtype != right.dtype:\n return False\n\n left = left.view("i8")\n right = right.view("i8")\n\n # if we have structured dtypes, compare first\n if (\n left.dtype.type is np.void or right.dtype.type is np.void\n ) and left.dtype != right.dtype:\n return False\n\n return np.array_equal(left, right)\n\n\ndef _array_equivalent_float(left: np.ndarray, right: np.ndarray) -> bool:\n return bool(((left == right) | (np.isnan(left) & np.isnan(right))).all())\n\n\ndef _array_equivalent_datetimelike(left: np.ndarray, right: np.ndarray):\n return np.array_equal(left.view("i8"), right.view("i8"))\n\n\ndef _array_equivalent_object(left: np.ndarray, right: np.ndarray, strict_nan: bool):\n left = ensure_object(left)\n right = ensure_object(right)\n\n mask: npt.NDArray[np.bool_] | None = None\n if strict_nan:\n mask = isna(left) & isna(right)\n if not mask.any():\n mask = None\n\n try:\n if mask is None:\n return lib.array_equivalent_object(left, right)\n if not lib.array_equivalent_object(left[~mask], right[~mask]):\n return False\n left_remaining = left[mask]\n right_remaining = right[mask]\n except ValueError:\n # can raise a ValueError if left and right cannot be\n # compared (e.g. nested arrays)\n left_remaining = left\n right_remaining = right\n\n for left_value, right_value in zip(left_remaining, right_remaining):\n if left_value is NaT and right_value is not NaT:\n return False\n\n elif left_value is libmissing.NA and right_value is not libmissing.NA:\n return False\n\n elif isinstance(left_value, float) and np.isnan(left_value):\n if not isinstance(right_value, float) or not np.isnan(right_value):\n return False\n else:\n with warnings.catch_warnings():\n # suppress numpy's "elementwise comparison failed"\n warnings.simplefilter("ignore", DeprecationWarning)\n try:\n if np.any(np.asarray(left_value != right_value)):\n return False\n except TypeError as err:\n if "boolean value of NA is ambiguous" in str(err):\n return False\n raise\n except ValueError:\n # numpy can raise a ValueError if left and right cannot be\n # compared (e.g. nested arrays)\n return False\n return True\n\n\ndef array_equals(left: ArrayLike, right: ArrayLike) -> bool:\n """\n ExtensionArray-compatible implementation of array_equivalent.\n """\n if left.dtype != right.dtype:\n return False\n elif isinstance(left, ABCExtensionArray):\n return left.equals(right)\n else:\n return array_equivalent(left, right, dtype_equal=True)\n\n\ndef infer_fill_value(val):\n """\n infer the fill value for the nan/NaT from the provided\n scalar/ndarray/list-like if we are a NaT, return the correct dtyped\n element to provide proper block construction\n """\n if not is_list_like(val):\n val = [val]\n val = np.asarray(val)\n if val.dtype.kind in "mM":\n return np.array("NaT", dtype=val.dtype)\n elif val.dtype == object:\n dtype = lib.infer_dtype(ensure_object(val), skipna=False)\n if dtype in ["datetime", "datetime64"]:\n return np.array("NaT", dtype=DT64NS_DTYPE)\n elif dtype in ["timedelta", "timedelta64"]:\n return np.array("NaT", dtype=TD64NS_DTYPE)\n return np.array(np.nan, dtype=object)\n elif val.dtype.kind == "U":\n return np.array(np.nan, dtype=val.dtype)\n return np.nan\n\n\ndef construct_1d_array_from_inferred_fill_value(\n value: object, length: int\n) -> ArrayLike:\n # Find our empty_value dtype by constructing an array\n # from our value and doing a .take on it\n from pandas.core.algorithms import take_nd\n from pandas.core.construction import sanitize_array\n from pandas.core.indexes.base import Index\n\n arr = sanitize_array(value, Index(range(1)), copy=False)\n taker = -1 * np.ones(length, dtype=np.intp)\n return take_nd(arr, taker)\n\n\ndef maybe_fill(arr: np.ndarray) -> np.ndarray:\n """\n Fill numpy.ndarray with NaN, unless we have a integer or boolean dtype.\n """\n if arr.dtype.kind not in "iub":\n arr.fill(np.nan)\n return arr\n\n\ndef na_value_for_dtype(dtype: DtypeObj, compat: bool = True):\n """\n Return a dtype compat na value\n\n Parameters\n ----------\n dtype : string / dtype\n compat : bool, default True\n\n Returns\n -------\n np.dtype or a pandas dtype\n\n Examples\n --------\n >>> na_value_for_dtype(np.dtype('int64'))\n 0\n >>> na_value_for_dtype(np.dtype('int64'), compat=False)\n nan\n >>> na_value_for_dtype(np.dtype('float64'))\n nan\n >>> na_value_for_dtype(np.dtype('bool'))\n False\n >>> na_value_for_dtype(np.dtype('datetime64[ns]'))\n numpy.datetime64('NaT')\n """\n\n if isinstance(dtype, ExtensionDtype):\n return dtype.na_value\n elif dtype.kind in "mM":\n unit = np.datetime_data(dtype)[0]\n return dtype.type("NaT", unit)\n elif dtype.kind == "f":\n return np.nan\n elif dtype.kind in "iu":\n if compat:\n return 0\n return np.nan\n elif dtype.kind == "b":\n if compat:\n return False\n return np.nan\n return np.nan\n\n\ndef remove_na_arraylike(arr: Series | Index | np.ndarray):\n """\n Return array-like containing only true/non-NaN values, possibly empty.\n """\n if isinstance(arr.dtype, ExtensionDtype):\n return arr[notna(arr)]\n else:\n return arr[notna(np.asarray(arr))]\n\n\ndef is_valid_na_for_dtype(obj, dtype: DtypeObj) -> bool:\n """\n isna check that excludes incompatible dtypes\n\n Parameters\n ----------\n obj : object\n dtype : np.datetime64, np.timedelta64, DatetimeTZDtype, or PeriodDtype\n\n Returns\n -------\n bool\n """\n if not lib.is_scalar(obj) or not isna(obj):\n return False\n elif dtype.kind == "M":\n if isinstance(dtype, np.dtype):\n # i.e. not tzaware\n return not isinstance(obj, (np.timedelta64, Decimal))\n # we have to rule out tznaive dt64("NaT")\n return not isinstance(obj, (np.timedelta64, np.datetime64, Decimal))\n elif dtype.kind == "m":\n return not isinstance(obj, (np.datetime64, Decimal))\n elif dtype.kind in "iufc":\n # Numeric\n return obj is not NaT and not isinstance(obj, (np.datetime64, np.timedelta64))\n elif dtype.kind == "b":\n # We allow pd.NA, None, np.nan in BooleanArray (same as IntervalDtype)\n return lib.is_float(obj) or obj is None or obj is libmissing.NA\n\n elif dtype == _dtype_str:\n # numpy string dtypes to avoid float np.nan\n return not isinstance(obj, (np.datetime64, np.timedelta64, Decimal, float))\n\n elif dtype == _dtype_object:\n # This is needed for Categorical, but is kind of weird\n return True\n\n elif isinstance(dtype, PeriodDtype):\n return not isinstance(obj, (np.datetime64, np.timedelta64, Decimal))\n\n elif isinstance(dtype, IntervalDtype):\n return lib.is_float(obj) or obj is None or obj is libmissing.NA\n\n elif isinstance(dtype, CategoricalDtype):\n return is_valid_na_for_dtype(obj, dtype.categories.dtype)\n\n # fallback, default to allowing NaN, None, NA, NaT\n return not isinstance(obj, (np.datetime64, np.timedelta64, Decimal))\n\n\ndef isna_all(arr: ArrayLike) -> bool:\n """\n Optimized equivalent to isna(arr).all()\n """\n total_len = len(arr)\n\n # Usually it's enough to check but a small fraction of values to see if\n # a block is NOT null, chunks should help in such cases.\n # parameters 1000 and 40 were chosen arbitrarily\n chunk_len = max(total_len // 40, 1000)\n\n dtype = arr.dtype\n if lib.is_np_dtype(dtype, "f"):\n checker = nan_checker\n\n elif (lib.is_np_dtype(dtype, "mM")) or isinstance(\n dtype, (DatetimeTZDtype, PeriodDtype)\n ):\n # error: Incompatible types in assignment (expression has type\n # "Callable[[Any], Any]", variable has type "ufunc")\n checker = lambda x: np.asarray(x.view("i8")) == iNaT # type: ignore[assignment]\n\n else:\n # error: Incompatible types in assignment (expression has type "Callable[[Any],\n # Any]", variable has type "ufunc")\n checker = lambda x: _isna_array( # type: ignore[assignment]\n x, inf_as_na=INF_AS_NA\n )\n\n return all(\n checker(arr[i : i + chunk_len]).all() for i in range(0, total_len, chunk_len)\n )\n
.venv\Lib\site-packages\pandas\core\dtypes\missing.py
missing.py
Python
23,632
0.95
0.124691
0.070122
react-lib
611
2024-01-18T08:58:12.637211
MIT
false
ec4850f512712876e036888054fd81c3
\n\n
.venv\Lib\site-packages\pandas\core\dtypes\__pycache__\api.cpython-313.pyc
api.cpython-313.pyc
Other
1,348
0.8
0
0
python-kit
638
2024-02-28T01:35:31.577948
MIT
false
621fcb00707a290d067abee228d5306f
\n\n
.venv\Lib\site-packages\pandas\core\dtypes\__pycache__\astype.cpython-313.pyc
astype.cpython-313.pyc
Other
10,321
0.95
0.056962
0.006897
vue-tools
307
2024-01-04T03:44:40.435407
BSD-3-Clause
false
a27c7a086f36c944f1aa7f7f08dbadd4
\n\n
.venv\Lib\site-packages\pandas\core\dtypes\__pycache__\base.cpython-313.pyc
base.cpython-313.pyc
Other
19,740
0.95
0.121372
0.050157
node-utils
451
2025-02-06T16:52:21.182220
Apache-2.0
false
f61acaa95b1bf61a66ac61ea9ce297be
\n\n
.venv\Lib\site-packages\pandas\core\dtypes\__pycache__\cast.cpython-313.pyc
cast.cpython-313.pyc
Other
65,952
0.75
0.040369
0.018277
vue-tools
831
2025-01-09T08:25:52.541656
MIT
false
8fa7fc9b50d6a48af1d59a583765eb6e
\n\n
.venv\Lib\site-packages\pandas\core\dtypes\__pycache__\common.cpython-313.pyc
common.cpython-313.pyc
Other
49,798
0.95
0.032095
0.008646
awesome-app
222
2025-05-03T15:01:03.844752
Apache-2.0
false
7a648d351798da8526007e85faf7274b
\n\n
.venv\Lib\site-packages\pandas\core\dtypes\__pycache__\concat.cpython-313.pyc
concat.cpython-313.pyc
Other
13,183
0.95
0.030303
0
vue-tools
310
2024-04-04T20:30:12.190697
GPL-3.0
false
6de9d00953fcffb4a329cd66154b408a
\n\n
.venv\Lib\site-packages\pandas\core\dtypes\__pycache__\dtypes.cpython-313.pyc
dtypes.cpython-313.pyc
Other
85,534
0.75
0.042132
0.015429
vue-tools
877
2024-10-20T12:28:34.460010
MIT
false
99f7b4142070fb5197331e950bdbaee4
\n\n
.venv\Lib\site-packages\pandas\core\dtypes\__pycache__\generic.cpython-313.pyc
generic.cpython-313.pyc
Other
4,261
0.8
0.037037
0
vue-tools
364
2024-05-11T08:40:47.970508
MIT
false
9a237784d48855b5420bc5f7d21805c8
\n\n
.venv\Lib\site-packages\pandas\core\dtypes\__pycache__\inference.cpython-313.pyc
inference.cpython-313.pyc
Other
10,138
0.95
0.065476
0
python-kit
389
2023-12-20T08:35:34.739005
Apache-2.0
false
63ab13d649bb89b103991f1d2d701016
\n\n
.venv\Lib\site-packages\pandas\core\dtypes\__pycache__\missing.cpython-313.pyc
missing.cpython-313.pyc
Other
28,604
0.95
0.038031
0.015306
react-lib
319
2025-06-01T09:47:19.747708
BSD-3-Clause
false
b4ef074f747a04b0b3790773aea44d7c
\n\n
.venv\Lib\site-packages\pandas\core\dtypes\__pycache__\__init__.cpython-313.pyc
__init__.cpython-313.pyc
Other
193
0.7
0
0
react-lib
93
2023-07-20T03:15:26.325574
BSD-3-Clause
false
36ecb2ce25eb20abb19ea22242ada5fb
"""\nProvide basic components for groupby.\n"""\nfrom __future__ import annotations\n\nimport dataclasses\nfrom typing import TYPE_CHECKING\n\nif TYPE_CHECKING:\n from collections.abc import Hashable\n\n\n@dataclasses.dataclass(order=True, frozen=True)\nclass OutputKey:\n label: Hashable\n position: int\n\n\n# special case to prevent duplicate plots when catching exceptions when\n# forwarding methods from NDFrames\nplotting_methods = frozenset(["plot", "hist"])\n\n# cythonized transformations or canned "agg+broadcast", which do not\n# require postprocessing of the result by transform.\ncythonized_kernels = frozenset(["cumprod", "cumsum", "shift", "cummin", "cummax"])\n\n# List of aggregation/reduction functions.\n# These map each group to a single numeric value\nreduction_kernels = frozenset(\n [\n "all",\n "any",\n "corrwith",\n "count",\n "first",\n "idxmax",\n "idxmin",\n "last",\n "max",\n "mean",\n "median",\n "min",\n "nunique",\n "prod",\n # as long as `quantile`'s signature accepts only\n # a single quantile value, it's a reduction.\n # GH#27526 might change that.\n "quantile",\n "sem",\n "size",\n "skew",\n "std",\n "sum",\n "var",\n ]\n)\n\n# List of transformation functions.\n# a transformation is a function that, for each group,\n# produces a result that has the same shape as the group.\n\n\ntransformation_kernels = frozenset(\n [\n "bfill",\n "cumcount",\n "cummax",\n "cummin",\n "cumprod",\n "cumsum",\n "diff",\n "ffill",\n "fillna",\n "ngroup",\n "pct_change",\n "rank",\n "shift",\n ]\n)\n\n# these are all the public methods on Grouper which don't belong\n# in either of the above lists\ngroupby_other_methods = frozenset(\n [\n "agg",\n "aggregate",\n "apply",\n "boxplot",\n # corr and cov return ngroups*ncolumns rows, so they\n # are neither a transformation nor a reduction\n "corr",\n "cov",\n "describe",\n "dtypes",\n "expanding",\n "ewm",\n "filter",\n "get_group",\n "groups",\n "head",\n "hist",\n "indices",\n "ndim",\n "ngroups",\n "nth",\n "ohlc",\n "pipe",\n "plot",\n "resample",\n "rolling",\n "tail",\n "take",\n "transform",\n "sample",\n "value_counts",\n ]\n)\n# Valid values of `name` for `groupby.transform(name)`\n# NOTE: do NOT edit this directly. New additions should be inserted\n# into the appropriate list above.\ntransform_kernel_allowlist = reduction_kernels | transformation_kernels\n
.venv\Lib\site-packages\pandas\core\groupby\base.py
base.py
Python
2,740
0.95
0.049587
0.174312
python-kit
731
2024-02-06T17:50:42.996198
BSD-3-Clause
false
56b24096ec5580c1a455bae64a5fb14d
from __future__ import annotations\n\nimport numpy as np\n\nfrom pandas.core.algorithms import unique1d\nfrom pandas.core.arrays.categorical import (\n Categorical,\n CategoricalDtype,\n recode_for_categories,\n)\n\n\ndef recode_for_groupby(\n c: Categorical, sort: bool, observed: bool\n) -> tuple[Categorical, Categorical | None]:\n """\n Code the categories to ensure we can groupby for categoricals.\n\n If observed=True, we return a new Categorical with the observed\n categories only.\n\n If sort=False, return a copy of self, coded with categories as\n returned by .unique(), followed by any categories not appearing in\n the data. If sort=True, return self.\n\n This method is needed solely to ensure the categorical index of the\n GroupBy result has categories in the order of appearance in the data\n (GH-8868).\n\n Parameters\n ----------\n c : Categorical\n sort : bool\n The value of the sort parameter groupby was called with.\n observed : bool\n Account only for the observed values\n\n Returns\n -------\n Categorical\n If sort=False, the new categories are set to the order of\n appearance in codes (unless ordered=True, in which case the\n original order is preserved), followed by any unrepresented\n categories in the original order.\n Categorical or None\n If we are observed, return the original categorical, otherwise None\n """\n # we only care about observed values\n if observed:\n # In cases with c.ordered, this is equivalent to\n # return c.remove_unused_categories(), c\n\n unique_codes = unique1d(c.codes)\n\n take_codes = unique_codes[unique_codes != -1]\n if sort:\n take_codes = np.sort(take_codes)\n\n # we recode according to the uniques\n categories = c.categories.take(take_codes)\n codes = recode_for_categories(c.codes, c.categories, categories)\n\n # return a new categorical that maps our new codes\n # and categories\n dtype = CategoricalDtype(categories, ordered=c.ordered)\n return Categorical._simple_new(codes, dtype=dtype), c\n\n # Already sorted according to c.categories; all is fine\n if sort:\n return c, None\n\n # sort=False should order groups in as-encountered order (GH-8868)\n\n # xref GH:46909: Re-ordering codes faster than using (set|add|reorder)_categories\n all_codes = np.arange(c.categories.nunique())\n # GH 38140: exclude nan from indexer for categories\n unique_notnan_codes = unique1d(c.codes[c.codes != -1])\n if sort:\n unique_notnan_codes = np.sort(unique_notnan_codes)\n if len(all_codes) > len(unique_notnan_codes):\n # GH 13179: All categories need to be present, even if missing from the data\n missing_codes = np.setdiff1d(all_codes, unique_notnan_codes, assume_unique=True)\n take_codes = np.concatenate((unique_notnan_codes, missing_codes))\n else:\n take_codes = unique_notnan_codes\n\n return Categorical(c, c.unique().categories.take(take_codes)), None\n
.venv\Lib\site-packages\pandas\core\groupby\categorical.py
categorical.py
Python
3,047
0.95
0.114943
0.157143
awesome-app
474
2024-02-05T16:27:48.902325
BSD-3-Clause
false
1ac899e3c49927d1a5ac2cbf8da30082
"""\nDefine the SeriesGroupBy and DataFrameGroupBy\nclasses that hold the groupby interfaces (and some implementations).\n\nThese are user facing as the result of the ``df.groupby(...)`` operations,\nwhich here returns a DataFrameGroupBy object.\n"""\nfrom __future__ import annotations\n\nfrom collections import abc\nfrom functools import partial\nfrom textwrap import dedent\nfrom typing import (\n TYPE_CHECKING,\n Any,\n Callable,\n Literal,\n NamedTuple,\n TypeVar,\n Union,\n cast,\n)\nimport warnings\n\nimport numpy as np\n\nfrom pandas._libs import (\n Interval,\n lib,\n)\nfrom pandas._libs.hashtable import duplicated\nfrom pandas.errors import SpecificationError\nfrom pandas.util._decorators import (\n Appender,\n Substitution,\n doc,\n)\nfrom pandas.util._exceptions import find_stack_level\n\nfrom pandas.core.dtypes.common import (\n ensure_int64,\n is_bool,\n is_dict_like,\n is_integer_dtype,\n is_list_like,\n is_numeric_dtype,\n is_scalar,\n)\nfrom pandas.core.dtypes.dtypes import (\n CategoricalDtype,\n IntervalDtype,\n)\nfrom pandas.core.dtypes.inference import is_hashable\nfrom pandas.core.dtypes.missing import (\n isna,\n notna,\n)\n\nfrom pandas.core import algorithms\nfrom pandas.core.apply import (\n GroupByApply,\n maybe_mangle_lambdas,\n reconstruct_func,\n validate_func_kwargs,\n warn_alias_replacement,\n)\nimport pandas.core.common as com\nfrom pandas.core.frame import DataFrame\nfrom pandas.core.groupby import (\n base,\n ops,\n)\nfrom pandas.core.groupby.groupby import (\n GroupBy,\n GroupByPlot,\n _agg_template_frame,\n _agg_template_series,\n _apply_docs,\n _transform_template,\n)\nfrom pandas.core.indexes.api import (\n Index,\n MultiIndex,\n all_indexes_same,\n default_index,\n)\nfrom pandas.core.series import Series\nfrom pandas.core.sorting import get_group_index\nfrom pandas.core.util.numba_ import maybe_use_numba\n\nfrom pandas.plotting import boxplot_frame_groupby\n\nif TYPE_CHECKING:\n from collections.abc import (\n Hashable,\n Mapping,\n Sequence,\n )\n\n from pandas._typing import (\n ArrayLike,\n Axis,\n AxisInt,\n CorrelationMethod,\n FillnaOptions,\n IndexLabel,\n Manager,\n Manager2D,\n SingleManager,\n TakeIndexer,\n )\n\n from pandas import Categorical\n from pandas.core.generic import NDFrame\n\n# TODO(typing) the return value on this callable should be any *scalar*.\nAggScalar = Union[str, Callable[..., Any]]\n# TODO: validate types on ScalarResult and move to _typing\n# Blocked from using by https://github.com/python/mypy/issues/1484\n# See note at _mangle_lambda_list\nScalarResult = TypeVar("ScalarResult")\n\n\nclass NamedAgg(NamedTuple):\n """\n Helper for column specific aggregation with control over output column names.\n\n Subclass of typing.NamedTuple.\n\n Parameters\n ----------\n column : Hashable\n Column label in the DataFrame to apply aggfunc.\n aggfunc : function or str\n Function to apply to the provided column. If string, the name of a built-in\n pandas function.\n\n Examples\n --------\n >>> df = pd.DataFrame({"key": [1, 1, 2], "a": [-1, 0, 1], 1: [10, 11, 12]})\n >>> agg_a = pd.NamedAgg(column="a", aggfunc="min")\n >>> agg_1 = pd.NamedAgg(column=1, aggfunc=lambda x: np.mean(x))\n >>> df.groupby("key").agg(result_a=agg_a, result_1=agg_1)\n result_a result_1\n key\n 1 -1 10.5\n 2 1 12.0\n """\n\n column: Hashable\n aggfunc: AggScalar\n\n\nclass SeriesGroupBy(GroupBy[Series]):\n def _wrap_agged_manager(self, mgr: Manager) -> Series:\n out = self.obj._constructor_from_mgr(mgr, axes=mgr.axes)\n out._name = self.obj.name\n return out\n\n def _get_data_to_aggregate(\n self, *, numeric_only: bool = False, name: str | None = None\n ) -> SingleManager:\n ser = self._obj_with_exclusions\n single = ser._mgr\n if numeric_only and not is_numeric_dtype(ser.dtype):\n # GH#41291 match Series behavior\n kwd_name = "numeric_only"\n raise TypeError(\n f"Cannot use {kwd_name}=True with "\n f"{type(self).__name__}.{name} and non-numeric dtypes."\n )\n return single\n\n _agg_examples_doc = dedent(\n """\n Examples\n --------\n >>> s = pd.Series([1, 2, 3, 4])\n\n >>> s\n 0 1\n 1 2\n 2 3\n 3 4\n dtype: int64\n\n >>> s.groupby([1, 1, 2, 2]).min()\n 1 1\n 2 3\n dtype: int64\n\n >>> s.groupby([1, 1, 2, 2]).agg('min')\n 1 1\n 2 3\n dtype: int64\n\n >>> s.groupby([1, 1, 2, 2]).agg(['min', 'max'])\n min max\n 1 1 2\n 2 3 4\n\n The output column names can be controlled by passing\n the desired column names and aggregations as keyword arguments.\n\n >>> s.groupby([1, 1, 2, 2]).agg(\n ... minimum='min',\n ... maximum='max',\n ... )\n minimum maximum\n 1 1 2\n 2 3 4\n\n .. versionchanged:: 1.3.0\n\n The resulting dtype will reflect the return value of the aggregating function.\n\n >>> s.groupby([1, 1, 2, 2]).agg(lambda x: x.astype(float).min())\n 1 1.0\n 2 3.0\n dtype: float64\n """\n )\n\n @Appender(\n _apply_docs["template"].format(\n input="series", examples=_apply_docs["series_examples"]\n )\n )\n def apply(self, func, *args, **kwargs) -> Series:\n return super().apply(func, *args, **kwargs)\n\n @doc(_agg_template_series, examples=_agg_examples_doc, klass="Series")\n def aggregate(self, func=None, *args, engine=None, engine_kwargs=None, **kwargs):\n relabeling = func is None\n columns = None\n if relabeling:\n columns, func = validate_func_kwargs(kwargs)\n kwargs = {}\n\n if isinstance(func, str):\n if maybe_use_numba(engine) and engine is not None:\n # Not all agg functions support numba, only propagate numba kwargs\n # if user asks for numba, and engine is not None\n # (if engine is None, the called function will handle the case where\n # numba is requested via the global option)\n kwargs["engine"] = engine\n if engine_kwargs is not None:\n kwargs["engine_kwargs"] = engine_kwargs\n return getattr(self, func)(*args, **kwargs)\n\n elif isinstance(func, abc.Iterable):\n # Catch instances of lists / tuples\n # but not the class list / tuple itself.\n func = maybe_mangle_lambdas(func)\n kwargs["engine"] = engine\n kwargs["engine_kwargs"] = engine_kwargs\n ret = self._aggregate_multiple_funcs(func, *args, **kwargs)\n if relabeling:\n # columns is not narrowed by mypy from relabeling flag\n assert columns is not None # for mypy\n ret.columns = columns\n if not self.as_index:\n ret = ret.reset_index()\n return ret\n\n else:\n cyfunc = com.get_cython_func(func)\n if cyfunc and not args and not kwargs:\n warn_alias_replacement(self, func, cyfunc)\n return getattr(self, cyfunc)()\n\n if maybe_use_numba(engine):\n return self._aggregate_with_numba(\n func, *args, engine_kwargs=engine_kwargs, **kwargs\n )\n\n if self.ngroups == 0:\n # e.g. test_evaluate_with_empty_groups without any groups to\n # iterate over, we have no output on which to do dtype\n # inference. We default to using the existing dtype.\n # xref GH#51445\n obj = self._obj_with_exclusions\n return self.obj._constructor(\n [],\n name=self.obj.name,\n index=self._grouper.result_index,\n dtype=obj.dtype,\n )\n\n if self._grouper.nkeys > 1:\n return self._python_agg_general(func, *args, **kwargs)\n\n try:\n return self._python_agg_general(func, *args, **kwargs)\n except KeyError:\n # KeyError raised in test_groupby.test_basic is bc the func does\n # a dictionary lookup on group.name, but group name is not\n # pinned in _python_agg_general, only in _aggregate_named\n result = self._aggregate_named(func, *args, **kwargs)\n\n warnings.warn(\n "Pinning the groupby key to each group in "\n f"{type(self).__name__}.agg is deprecated, and cases that "\n "relied on it will raise in a future version. "\n "If your operation requires utilizing the groupby keys, "\n "iterate over the groupby object instead.",\n FutureWarning,\n stacklevel=find_stack_level(),\n )\n\n # result is a dict whose keys are the elements of result_index\n result = Series(result, index=self._grouper.result_index)\n result = self._wrap_aggregated_output(result)\n return result\n\n agg = aggregate\n\n def _python_agg_general(self, func, *args, **kwargs):\n orig_func = func\n func = com.is_builtin_func(func)\n if orig_func != func:\n alias = com._builtin_table_alias[func]\n warn_alias_replacement(self, orig_func, alias)\n f = lambda x: func(x, *args, **kwargs)\n\n obj = self._obj_with_exclusions\n result = self._grouper.agg_series(obj, f)\n res = obj._constructor(result, name=obj.name)\n return self._wrap_aggregated_output(res)\n\n def _aggregate_multiple_funcs(self, arg, *args, **kwargs) -> DataFrame:\n if isinstance(arg, dict):\n if self.as_index:\n # GH 15931\n raise SpecificationError("nested renamer is not supported")\n else:\n # GH#50684 - This accidentally worked in 1.x\n msg = (\n "Passing a dictionary to SeriesGroupBy.agg is deprecated "\n "and will raise in a future version of pandas. Pass a list "\n "of aggregations instead."\n )\n warnings.warn(\n message=msg,\n category=FutureWarning,\n stacklevel=find_stack_level(),\n )\n arg = list(arg.items())\n elif any(isinstance(x, (tuple, list)) for x in arg):\n arg = [(x, x) if not isinstance(x, (tuple, list)) else x for x in arg]\n else:\n # list of functions / function names\n columns = (com.get_callable_name(f) or f for f in arg)\n arg = zip(columns, arg)\n\n results: dict[base.OutputKey, DataFrame | Series] = {}\n with com.temp_setattr(self, "as_index", True):\n # Combine results using the index, need to adjust index after\n # if as_index=False (GH#50724)\n for idx, (name, func) in enumerate(arg):\n key = base.OutputKey(label=name, position=idx)\n results[key] = self.aggregate(func, *args, **kwargs)\n\n if any(isinstance(x, DataFrame) for x in results.values()):\n from pandas import concat\n\n res_df = concat(\n results.values(), axis=1, keys=[key.label for key in results]\n )\n return res_df\n\n indexed_output = {key.position: val for key, val in results.items()}\n output = self.obj._constructor_expanddim(indexed_output, index=None)\n output.columns = Index(key.label for key in results)\n\n return output\n\n def _wrap_applied_output(\n self,\n data: Series,\n values: list[Any],\n not_indexed_same: bool = False,\n is_transform: bool = False,\n ) -> DataFrame | Series:\n """\n Wrap the output of SeriesGroupBy.apply into the expected result.\n\n Parameters\n ----------\n data : Series\n Input data for groupby operation.\n values : List[Any]\n Applied output for each group.\n not_indexed_same : bool, default False\n Whether the applied outputs are not indexed the same as the group axes.\n\n Returns\n -------\n DataFrame or Series\n """\n if len(values) == 0:\n # GH #6265\n if is_transform:\n # GH#47787 see test_group_on_empty_multiindex\n res_index = data.index\n else:\n res_index = self._grouper.result_index\n\n return self.obj._constructor(\n [],\n name=self.obj.name,\n index=res_index,\n dtype=data.dtype,\n )\n assert values is not None\n\n if isinstance(values[0], dict):\n # GH #823 #24880\n index = self._grouper.result_index\n res_df = self.obj._constructor_expanddim(values, index=index)\n res_df = self._reindex_output(res_df)\n # if self.observed is False,\n # keep all-NaN rows created while re-indexing\n res_ser = res_df.stack(future_stack=True)\n res_ser.name = self.obj.name\n return res_ser\n elif isinstance(values[0], (Series, DataFrame)):\n result = self._concat_objects(\n values,\n not_indexed_same=not_indexed_same,\n is_transform=is_transform,\n )\n if isinstance(result, Series):\n result.name = self.obj.name\n if not self.as_index and not_indexed_same:\n result = self._insert_inaxis_grouper(result)\n result.index = default_index(len(result))\n return result\n else:\n # GH #6265 #24880\n result = self.obj._constructor(\n data=values, index=self._grouper.result_index, name=self.obj.name\n )\n if not self.as_index:\n result = self._insert_inaxis_grouper(result)\n result.index = default_index(len(result))\n return self._reindex_output(result)\n\n def _aggregate_named(self, func, *args, **kwargs):\n # Note: this is very similar to _aggregate_series_pure_python,\n # but that does not pin group.name\n result = {}\n initialized = False\n\n for name, group in self._grouper.get_iterator(\n self._obj_with_exclusions, axis=self.axis\n ):\n # needed for pandas/tests/groupby/test_groupby.py::test_basic_aggregations\n object.__setattr__(group, "name", name)\n\n output = func(group, *args, **kwargs)\n output = ops.extract_result(output)\n if not initialized:\n # We only do this validation on the first iteration\n ops.check_result_array(output, group.dtype)\n initialized = True\n result[name] = output\n\n return result\n\n __examples_series_doc = dedent(\n """\n >>> ser = pd.Series([390.0, 350.0, 30.0, 20.0],\n ... index=["Falcon", "Falcon", "Parrot", "Parrot"],\n ... name="Max Speed")\n >>> grouped = ser.groupby([1, 1, 2, 2])\n >>> grouped.transform(lambda x: (x - x.mean()) / x.std())\n Falcon 0.707107\n Falcon -0.707107\n Parrot 0.707107\n Parrot -0.707107\n Name: Max Speed, dtype: float64\n\n Broadcast result of the transformation\n\n >>> grouped.transform(lambda x: x.max() - x.min())\n Falcon 40.0\n Falcon 40.0\n Parrot 10.0\n Parrot 10.0\n Name: Max Speed, dtype: float64\n\n >>> grouped.transform("mean")\n Falcon 370.0\n Falcon 370.0\n Parrot 25.0\n Parrot 25.0\n Name: Max Speed, dtype: float64\n\n .. versionchanged:: 1.3.0\n\n The resulting dtype will reflect the return value of the passed ``func``,\n for example:\n\n >>> grouped.transform(lambda x: x.astype(int).max())\n Falcon 390\n Falcon 390\n Parrot 30\n Parrot 30\n Name: Max Speed, dtype: int64\n """\n )\n\n @Substitution(klass="Series", example=__examples_series_doc)\n @Appender(_transform_template)\n def transform(self, func, *args, engine=None, engine_kwargs=None, **kwargs):\n return self._transform(\n func, *args, engine=engine, engine_kwargs=engine_kwargs, **kwargs\n )\n\n def _cython_transform(\n self, how: str, numeric_only: bool = False, axis: AxisInt = 0, **kwargs\n ):\n assert axis == 0 # handled by caller\n\n obj = self._obj_with_exclusions\n\n try:\n result = self._grouper._cython_operation(\n "transform", obj._values, how, axis, **kwargs\n )\n except NotImplementedError as err:\n # e.g. test_groupby_raises_string\n raise TypeError(f"{how} is not supported for {obj.dtype} dtype") from err\n\n return obj._constructor(result, index=self.obj.index, name=obj.name)\n\n def _transform_general(\n self, func: Callable, engine, engine_kwargs, *args, **kwargs\n ) -> Series:\n """\n Transform with a callable `func`.\n """\n if maybe_use_numba(engine):\n return self._transform_with_numba(\n func, *args, engine_kwargs=engine_kwargs, **kwargs\n )\n assert callable(func)\n klass = type(self.obj)\n\n results = []\n for name, group in self._grouper.get_iterator(\n self._obj_with_exclusions, axis=self.axis\n ):\n # this setattr is needed for test_transform_lambda_with_datetimetz\n object.__setattr__(group, "name", name)\n res = func(group, *args, **kwargs)\n\n results.append(klass(res, index=group.index))\n\n # check for empty "results" to avoid concat ValueError\n if results:\n from pandas.core.reshape.concat import concat\n\n concatenated = concat(results)\n result = self._set_result_index_ordered(concatenated)\n else:\n result = self.obj._constructor(dtype=np.float64)\n\n result.name = self.obj.name\n return result\n\n def filter(self, func, dropna: bool = True, *args, **kwargs):\n """\n Filter elements from groups that don't satisfy a criterion.\n\n Elements from groups are filtered if they do not satisfy the\n boolean criterion specified by func.\n\n Parameters\n ----------\n func : function\n Criterion to apply to each group. Should return True or False.\n dropna : bool\n Drop groups that do not pass the filter. True by default; if False,\n groups that evaluate False are filled with NaNs.\n\n Returns\n -------\n Series\n\n Notes\n -----\n Functions that mutate the passed object can produce unexpected\n behavior or errors and are not supported. See :ref:`gotchas.udf-mutation`\n for more details.\n\n Examples\n --------\n >>> df = pd.DataFrame({'A' : ['foo', 'bar', 'foo', 'bar',\n ... 'foo', 'bar'],\n ... 'B' : [1, 2, 3, 4, 5, 6],\n ... 'C' : [2.0, 5., 8., 1., 2., 9.]})\n >>> grouped = df.groupby('A')\n >>> df.groupby('A').B.filter(lambda x: x.mean() > 3.)\n 1 2\n 3 4\n 5 6\n Name: B, dtype: int64\n """\n if isinstance(func, str):\n wrapper = lambda x: getattr(x, func)(*args, **kwargs)\n else:\n wrapper = lambda x: func(x, *args, **kwargs)\n\n # Interpret np.nan as False.\n def true_and_notna(x) -> bool:\n b = wrapper(x)\n return notna(b) and b\n\n try:\n indices = [\n self._get_index(name)\n for name, group in self._grouper.get_iterator(\n self._obj_with_exclusions, axis=self.axis\n )\n if true_and_notna(group)\n ]\n except (ValueError, TypeError) as err:\n raise TypeError("the filter must return a boolean result") from err\n\n filtered = self._apply_filter(indices, dropna)\n return filtered\n\n def nunique(self, dropna: bool = True) -> Series | DataFrame:\n """\n Return number of unique elements in the group.\n\n Returns\n -------\n Series\n Number of unique values within each group.\n\n Examples\n --------\n For SeriesGroupby:\n\n >>> lst = ['a', 'a', 'b', 'b']\n >>> ser = pd.Series([1, 2, 3, 3], index=lst)\n >>> ser\n a 1\n a 2\n b 3\n b 3\n dtype: int64\n >>> ser.groupby(level=0).nunique()\n a 2\n b 1\n dtype: int64\n\n For Resampler:\n\n >>> ser = pd.Series([1, 2, 3, 3], index=pd.DatetimeIndex(\n ... ['2023-01-01', '2023-01-15', '2023-02-01', '2023-02-15']))\n >>> ser\n 2023-01-01 1\n 2023-01-15 2\n 2023-02-01 3\n 2023-02-15 3\n dtype: int64\n >>> ser.resample('MS').nunique()\n 2023-01-01 2\n 2023-02-01 1\n Freq: MS, dtype: int64\n """\n ids, _, ngroups = self._grouper.group_info\n val = self.obj._values\n codes, uniques = algorithms.factorize(val, use_na_sentinel=dropna, sort=False)\n\n if self._grouper.has_dropped_na:\n mask = ids >= 0\n ids = ids[mask]\n codes = codes[mask]\n\n group_index = get_group_index(\n labels=[ids, codes],\n shape=(ngroups, len(uniques)),\n sort=False,\n xnull=dropna,\n )\n\n if dropna:\n mask = group_index >= 0\n if (~mask).any():\n ids = ids[mask]\n group_index = group_index[mask]\n\n mask = duplicated(group_index, "first")\n res = np.bincount(ids[~mask], minlength=ngroups)\n res = ensure_int64(res)\n\n ri = self._grouper.result_index\n result: Series | DataFrame = self.obj._constructor(\n res, index=ri, name=self.obj.name\n )\n if not self.as_index:\n result = self._insert_inaxis_grouper(result)\n result.index = default_index(len(result))\n return self._reindex_output(result, fill_value=0)\n\n @doc(Series.describe)\n def describe(self, percentiles=None, include=None, exclude=None) -> Series:\n return super().describe(\n percentiles=percentiles, include=include, exclude=exclude\n )\n\n def value_counts(\n self,\n normalize: bool = False,\n sort: bool = True,\n ascending: bool = False,\n bins=None,\n dropna: bool = True,\n ) -> Series | DataFrame:\n name = "proportion" if normalize else "count"\n\n if bins is None:\n result = self._value_counts(\n normalize=normalize, sort=sort, ascending=ascending, dropna=dropna\n )\n result.name = name\n return result\n\n from pandas.core.reshape.merge import get_join_indexers\n from pandas.core.reshape.tile import cut\n\n ids, _, _ = self._grouper.group_info\n val = self.obj._values\n\n index_names = self._grouper.names + [self.obj.name]\n\n if isinstance(val.dtype, CategoricalDtype) or (\n bins is not None and not np.iterable(bins)\n ):\n # scalar bins cannot be done at top level\n # in a backward compatible way\n # GH38672 relates to categorical dtype\n ser = self.apply(\n Series.value_counts,\n normalize=normalize,\n sort=sort,\n ascending=ascending,\n bins=bins,\n )\n ser.name = name\n ser.index.names = index_names\n return ser\n\n # groupby removes null keys from groupings\n mask = ids != -1\n ids, val = ids[mask], val[mask]\n\n lab: Index | np.ndarray\n if bins is None:\n lab, lev = algorithms.factorize(val, sort=True)\n llab = lambda lab, inc: lab[inc]\n else:\n # lab is a Categorical with categories an IntervalIndex\n cat_ser = cut(Series(val, copy=False), bins, include_lowest=True)\n cat_obj = cast("Categorical", cat_ser._values)\n lev = cat_obj.categories\n lab = lev.take(\n cat_obj.codes,\n allow_fill=True,\n fill_value=lev._na_value,\n )\n llab = lambda lab, inc: lab[inc]._multiindex.codes[-1]\n\n if isinstance(lab.dtype, IntervalDtype):\n # TODO: should we do this inside II?\n lab_interval = cast(Interval, lab)\n\n sorter = np.lexsort((lab_interval.left, lab_interval.right, ids))\n else:\n sorter = np.lexsort((lab, ids))\n\n ids, lab = ids[sorter], lab[sorter]\n\n # group boundaries are where group ids change\n idchanges = 1 + np.nonzero(ids[1:] != ids[:-1])[0]\n idx = np.r_[0, idchanges]\n if not len(ids):\n idx = idchanges\n\n # new values are where sorted labels change\n lchanges = llab(lab, slice(1, None)) != llab(lab, slice(None, -1))\n inc = np.r_[True, lchanges]\n if not len(val):\n inc = lchanges\n inc[idx] = True # group boundaries are also new values\n out = np.diff(np.nonzero(np.r_[inc, True])[0]) # value counts\n\n # num. of times each group should be repeated\n rep = partial(np.repeat, repeats=np.add.reduceat(inc, idx))\n\n # multi-index components\n codes = self._grouper.reconstructed_codes\n codes = [rep(level_codes) for level_codes in codes] + [llab(lab, inc)]\n levels = [ping._group_index for ping in self._grouper.groupings] + [lev]\n\n if dropna:\n mask = codes[-1] != -1\n if mask.all():\n dropna = False\n else:\n out, codes = out[mask], [level_codes[mask] for level_codes in codes]\n\n if normalize:\n out = out.astype("float")\n d = np.diff(np.r_[idx, len(ids)])\n if dropna:\n m = ids[lab == -1]\n np.add.at(d, m, -1)\n acc = rep(d)[mask]\n else:\n acc = rep(d)\n out /= acc\n\n if sort and bins is None:\n cat = ids[inc][mask] if dropna else ids[inc]\n sorter = np.lexsort((out if ascending else -out, cat))\n out, codes[-1] = out[sorter], codes[-1][sorter]\n\n if bins is not None:\n # for compat. with libgroupby.value_counts need to ensure every\n # bin is present at every index level, null filled with zeros\n diff = np.zeros(len(out), dtype="bool")\n for level_codes in codes[:-1]:\n diff |= np.r_[True, level_codes[1:] != level_codes[:-1]]\n\n ncat, nbin = diff.sum(), len(levels[-1])\n\n left = [np.repeat(np.arange(ncat), nbin), np.tile(np.arange(nbin), ncat)]\n\n right = [diff.cumsum() - 1, codes[-1]]\n\n # error: Argument 1 to "get_join_indexers" has incompatible type\n # "List[ndarray[Any, Any]]"; expected "List[Union[Union[ExtensionArray,\n # ndarray[Any, Any]], Index, Series]]\n _, idx = get_join_indexers(\n left, right, sort=False, how="left" # type: ignore[arg-type]\n )\n if idx is not None:\n out = np.where(idx != -1, out[idx], 0)\n\n if sort:\n sorter = np.lexsort((out if ascending else -out, left[0]))\n out, left[-1] = out[sorter], left[-1][sorter]\n\n # build the multi-index w/ full levels\n def build_codes(lev_codes: np.ndarray) -> np.ndarray:\n return np.repeat(lev_codes[diff], nbin)\n\n codes = [build_codes(lev_codes) for lev_codes in codes[:-1]]\n codes.append(left[-1])\n\n mi = MultiIndex(\n levels=levels, codes=codes, names=index_names, verify_integrity=False\n )\n\n if is_integer_dtype(out.dtype):\n out = ensure_int64(out)\n result = self.obj._constructor(out, index=mi, name=name)\n if not self.as_index:\n result = result.reset_index()\n return result\n\n def fillna(\n self,\n value: object | ArrayLike | None = None,\n method: FillnaOptions | None = None,\n axis: Axis | None | lib.NoDefault = lib.no_default,\n inplace: bool = False,\n limit: int | None = None,\n downcast: dict | None | lib.NoDefault = lib.no_default,\n ) -> Series | None:\n """\n Fill NA/NaN values using the specified method within groups.\n\n .. deprecated:: 2.2.0\n This method is deprecated and will be removed in a future version.\n Use the :meth:`.SeriesGroupBy.ffill` or :meth:`.SeriesGroupBy.bfill`\n for forward or backward filling instead. If you want to fill with a\n single value, use :meth:`Series.fillna` instead.\n\n Parameters\n ----------\n value : scalar, dict, Series, or DataFrame\n Value to use to fill holes (e.g. 0), alternately a\n dict/Series/DataFrame of values specifying which value to use for\n each index (for a Series) or column (for a DataFrame). Values not\n in the dict/Series/DataFrame will not be filled. This value cannot\n be a list. Users wanting to use the ``value`` argument and not ``method``\n should prefer :meth:`.Series.fillna` as this\n will produce the same result and be more performant.\n method : {{'bfill', 'ffill', None}}, default None\n Method to use for filling holes. ``'ffill'`` will propagate\n the last valid observation forward within a group.\n ``'bfill'`` will use next valid observation to fill the gap.\n axis : {0 or 'index', 1 or 'columns'}\n Unused, only for compatibility with :meth:`DataFrameGroupBy.fillna`.\n inplace : bool, default False\n Broken. Do not set to True.\n limit : int, default None\n If method is specified, this is the maximum number of consecutive\n NaN values to forward/backward fill within a group. In other words,\n if there is a gap with more than this number of consecutive NaNs,\n it will only be partially filled. If method is not specified, this is the\n maximum number of entries along the entire axis where NaNs will be\n filled. Must be greater than 0 if not None.\n downcast : dict, default is None\n A dict of item->dtype of what to downcast if possible,\n or the string 'infer' which will try to downcast to an appropriate\n equal type (e.g. float64 to int64 if possible).\n\n Returns\n -------\n Series\n Object with missing values filled within groups.\n\n See Also\n --------\n ffill : Forward fill values within a group.\n bfill : Backward fill values within a group.\n\n Examples\n --------\n For SeriesGroupBy:\n\n >>> lst = ['cat', 'cat', 'cat', 'mouse', 'mouse']\n >>> ser = pd.Series([1, None, None, 2, None], index=lst)\n >>> ser\n cat 1.0\n cat NaN\n cat NaN\n mouse 2.0\n mouse NaN\n dtype: float64\n >>> ser.groupby(level=0).fillna(0, limit=1)\n cat 1.0\n cat 0.0\n cat NaN\n mouse 2.0\n mouse 0.0\n dtype: float64\n """\n warnings.warn(\n f"{type(self).__name__}.fillna is deprecated and "\n "will be removed in a future version. Use obj.ffill() or obj.bfill() "\n "for forward or backward filling instead. If you want to fill with a "\n f"single value, use {type(self.obj).__name__}.fillna instead",\n FutureWarning,\n stacklevel=find_stack_level(),\n )\n result = self._op_via_apply(\n "fillna",\n value=value,\n method=method,\n axis=axis,\n inplace=inplace,\n limit=limit,\n downcast=downcast,\n )\n return result\n\n def take(\n self,\n indices: TakeIndexer,\n axis: Axis | lib.NoDefault = lib.no_default,\n **kwargs,\n ) -> Series:\n """\n Return the elements in the given *positional* indices in each group.\n\n This means that we are not indexing according to actual values in\n the index attribute of the object. We are indexing according to the\n actual position of the element in the object.\n\n If a requested index does not exist for some group, this method will raise.\n To get similar behavior that ignores indices that don't exist, see\n :meth:`.SeriesGroupBy.nth`.\n\n Parameters\n ----------\n indices : array-like\n An array of ints indicating which positions to take in each group.\n axis : {0 or 'index', 1 or 'columns', None}, default 0\n The axis on which to select elements. ``0`` means that we are\n selecting rows, ``1`` means that we are selecting columns.\n For `SeriesGroupBy` this parameter is unused and defaults to 0.\n\n .. deprecated:: 2.1.0\n For axis=1, operate on the underlying object instead. Otherwise\n the axis keyword is not necessary.\n\n **kwargs\n For compatibility with :meth:`numpy.take`. Has no effect on the\n output.\n\n Returns\n -------\n Series\n A Series containing the elements taken from each group.\n\n See Also\n --------\n Series.take : Take elements from a Series along an axis.\n Series.loc : Select a subset of a DataFrame by labels.\n Series.iloc : Select a subset of a DataFrame by positions.\n numpy.take : Take elements from an array along an axis.\n SeriesGroupBy.nth : Similar to take, won't raise if indices don't exist.\n\n Examples\n --------\n >>> df = pd.DataFrame([('falcon', 'bird', 389.0),\n ... ('parrot', 'bird', 24.0),\n ... ('lion', 'mammal', 80.5),\n ... ('monkey', 'mammal', np.nan),\n ... ('rabbit', 'mammal', 15.0)],\n ... columns=['name', 'class', 'max_speed'],\n ... index=[4, 3, 2, 1, 0])\n >>> df\n name class max_speed\n 4 falcon bird 389.0\n 3 parrot bird 24.0\n 2 lion mammal 80.5\n 1 monkey mammal NaN\n 0 rabbit mammal 15.0\n >>> gb = df["name"].groupby([1, 1, 2, 2, 2])\n\n Take elements at positions 0 and 1 along the axis 0 in each group (default).\n\n >>> gb.take([0, 1])\n 1 4 falcon\n 3 parrot\n 2 2 lion\n 1 monkey\n Name: name, dtype: object\n\n We may take elements using negative integers for positive indices,\n starting from the end of the object, just like with Python lists.\n\n >>> gb.take([-1, -2])\n 1 3 parrot\n 4 falcon\n 2 0 rabbit\n 1 monkey\n Name: name, dtype: object\n """\n result = self._op_via_apply("take", indices=indices, axis=axis, **kwargs)\n return result\n\n def skew(\n self,\n axis: Axis | lib.NoDefault = lib.no_default,\n skipna: bool = True,\n numeric_only: bool = False,\n **kwargs,\n ) -> Series:\n """\n Return unbiased skew within groups.\n\n Normalized by N-1.\n\n Parameters\n ----------\n axis : {0 or 'index', 1 or 'columns', None}, default 0\n Axis for the function to be applied on.\n This parameter is only for compatibility with DataFrame and is unused.\n\n .. deprecated:: 2.1.0\n For axis=1, operate on the underlying object instead. Otherwise\n the axis keyword is not necessary.\n\n skipna : bool, default True\n Exclude NA/null values when computing the result.\n\n numeric_only : bool, default False\n Include only float, int, boolean columns. Not implemented for Series.\n\n **kwargs\n Additional keyword arguments to be passed to the function.\n\n Returns\n -------\n Series\n\n See Also\n --------\n Series.skew : Return unbiased skew over requested axis.\n\n Examples\n --------\n >>> ser = pd.Series([390., 350., 357., np.nan, 22., 20., 30.],\n ... index=['Falcon', 'Falcon', 'Falcon', 'Falcon',\n ... 'Parrot', 'Parrot', 'Parrot'],\n ... name="Max Speed")\n >>> ser\n Falcon 390.0\n Falcon 350.0\n Falcon 357.0\n Falcon NaN\n Parrot 22.0\n Parrot 20.0\n Parrot 30.0\n Name: Max Speed, dtype: float64\n >>> ser.groupby(level=0).skew()\n Falcon 1.525174\n Parrot 1.457863\n Name: Max Speed, dtype: float64\n >>> ser.groupby(level=0).skew(skipna=False)\n Falcon NaN\n Parrot 1.457863\n Name: Max Speed, dtype: float64\n """\n if axis is lib.no_default:\n axis = 0\n\n if axis != 0:\n result = self._op_via_apply(\n "skew",\n axis=axis,\n skipna=skipna,\n numeric_only=numeric_only,\n **kwargs,\n )\n return result\n\n def alt(obj):\n # This should not be reached since the cython path should raise\n # TypeError and not NotImplementedError.\n raise TypeError(f"'skew' is not supported for dtype={obj.dtype}")\n\n return self._cython_agg_general(\n "skew", alt=alt, skipna=skipna, numeric_only=numeric_only, **kwargs\n )\n\n @property\n @doc(Series.plot.__doc__)\n def plot(self) -> GroupByPlot:\n result = GroupByPlot(self)\n return result\n\n @doc(Series.nlargest.__doc__)\n def nlargest(\n self, n: int = 5, keep: Literal["first", "last", "all"] = "first"\n ) -> Series:\n f = partial(Series.nlargest, n=n, keep=keep)\n data = self._obj_with_exclusions\n # Don't change behavior if result index happens to be the same, i.e.\n # already ordered and n >= all group sizes.\n result = self._python_apply_general(f, data, not_indexed_same=True)\n return result\n\n @doc(Series.nsmallest.__doc__)\n def nsmallest(\n self, n: int = 5, keep: Literal["first", "last", "all"] = "first"\n ) -> Series:\n f = partial(Series.nsmallest, n=n, keep=keep)\n data = self._obj_with_exclusions\n # Don't change behavior if result index happens to be the same, i.e.\n # already ordered and n >= all group sizes.\n result = self._python_apply_general(f, data, not_indexed_same=True)\n return result\n\n @doc(Series.idxmin.__doc__)\n def idxmin(\n self, axis: Axis | lib.NoDefault = lib.no_default, skipna: bool = True\n ) -> Series:\n return self._idxmax_idxmin("idxmin", axis=axis, skipna=skipna)\n\n @doc(Series.idxmax.__doc__)\n def idxmax(\n self, axis: Axis | lib.NoDefault = lib.no_default, skipna: bool = True\n ) -> Series:\n return self._idxmax_idxmin("idxmax", axis=axis, skipna=skipna)\n\n @doc(Series.corr.__doc__)\n def corr(\n self,\n other: Series,\n method: CorrelationMethod = "pearson",\n min_periods: int | None = None,\n ) -> Series:\n result = self._op_via_apply(\n "corr", other=other, method=method, min_periods=min_periods\n )\n return result\n\n @doc(Series.cov.__doc__)\n def cov(\n self, other: Series, min_periods: int | None = None, ddof: int | None = 1\n ) -> Series:\n result = self._op_via_apply(\n "cov", other=other, min_periods=min_periods, ddof=ddof\n )\n return result\n\n @property\n def is_monotonic_increasing(self) -> Series:\n """\n Return whether each group's values are monotonically increasing.\n\n Returns\n -------\n Series\n\n Examples\n --------\n >>> s = pd.Series([2, 1, 3, 4], index=['Falcon', 'Falcon', 'Parrot', 'Parrot'])\n >>> s.groupby(level=0).is_monotonic_increasing\n Falcon False\n Parrot True\n dtype: bool\n """\n return self.apply(lambda ser: ser.is_monotonic_increasing)\n\n @property\n def is_monotonic_decreasing(self) -> Series:\n """\n Return whether each group's values are monotonically decreasing.\n\n Returns\n -------\n Series\n\n Examples\n --------\n >>> s = pd.Series([2, 1, 3, 4], index=['Falcon', 'Falcon', 'Parrot', 'Parrot'])\n >>> s.groupby(level=0).is_monotonic_decreasing\n Falcon True\n Parrot False\n dtype: bool\n """\n return self.apply(lambda ser: ser.is_monotonic_decreasing)\n\n @doc(Series.hist.__doc__)\n def hist(\n self,\n by=None,\n ax=None,\n grid: bool = True,\n xlabelsize: int | None = None,\n xrot: float | None = None,\n ylabelsize: int | None = None,\n yrot: float | None = None,\n figsize: tuple[int, int] | None = None,\n bins: int | Sequence[int] = 10,\n backend: str | None = None,\n legend: bool = False,\n **kwargs,\n ):\n result = self._op_via_apply(\n "hist",\n by=by,\n ax=ax,\n grid=grid,\n xlabelsize=xlabelsize,\n xrot=xrot,\n ylabelsize=ylabelsize,\n yrot=yrot,\n figsize=figsize,\n bins=bins,\n backend=backend,\n legend=legend,\n **kwargs,\n )\n return result\n\n @property\n @doc(Series.dtype.__doc__)\n def dtype(self) -> Series:\n return self.apply(lambda ser: ser.dtype)\n\n def unique(self) -> Series:\n """\n Return unique values for each group.\n\n It returns unique values for each of the grouped values. Returned in\n order of appearance. Hash table-based unique, therefore does NOT sort.\n\n Returns\n -------\n Series\n Unique values for each of the grouped values.\n\n See Also\n --------\n Series.unique : Return unique values of Series object.\n\n Examples\n --------\n >>> df = pd.DataFrame([('Chihuahua', 'dog', 6.1),\n ... ('Beagle', 'dog', 15.2),\n ... ('Chihuahua', 'dog', 6.9),\n ... ('Persian', 'cat', 9.2),\n ... ('Chihuahua', 'dog', 7),\n ... ('Persian', 'cat', 8.8)],\n ... columns=['breed', 'animal', 'height_in'])\n >>> df\n breed animal height_in\n 0 Chihuahua dog 6.1\n 1 Beagle dog 15.2\n 2 Chihuahua dog 6.9\n 3 Persian cat 9.2\n 4 Chihuahua dog 7.0\n 5 Persian cat 8.8\n >>> ser = df.groupby('animal')['breed'].unique()\n >>> ser\n animal\n cat [Persian]\n dog [Chihuahua, Beagle]\n Name: breed, dtype: object\n """\n result = self._op_via_apply("unique")\n return result\n\n\nclass DataFrameGroupBy(GroupBy[DataFrame]):\n _agg_examples_doc = dedent(\n """\n Examples\n --------\n >>> data = {"A": [1, 1, 2, 2],\n ... "B": [1, 2, 3, 4],\n ... "C": [0.362838, 0.227877, 1.267767, -0.562860]}\n >>> df = pd.DataFrame(data)\n >>> df\n A B C\n 0 1 1 0.362838\n 1 1 2 0.227877\n 2 2 3 1.267767\n 3 2 4 -0.562860\n\n The aggregation is for each column.\n\n >>> df.groupby('A').agg('min')\n B C\n A\n 1 1 0.227877\n 2 3 -0.562860\n\n Multiple aggregations\n\n >>> df.groupby('A').agg(['min', 'max'])\n B C\n min max min max\n A\n 1 1 2 0.227877 0.362838\n 2 3 4 -0.562860 1.267767\n\n Select a column for aggregation\n\n >>> df.groupby('A').B.agg(['min', 'max'])\n min max\n A\n 1 1 2\n 2 3 4\n\n User-defined function for aggregation\n\n >>> df.groupby('A').agg(lambda x: sum(x) + 2)\n B C\n A\n 1 5 2.590715\n 2 9 2.704907\n\n Different aggregations per column\n\n >>> df.groupby('A').agg({'B': ['min', 'max'], 'C': 'sum'})\n B C\n min max sum\n A\n 1 1 2 0.590715\n 2 3 4 0.704907\n\n To control the output names with different aggregations per column,\n pandas supports "named aggregation"\n\n >>> df.groupby("A").agg(\n ... b_min=pd.NamedAgg(column="B", aggfunc="min"),\n ... c_sum=pd.NamedAgg(column="C", aggfunc="sum")\n ... )\n b_min c_sum\n A\n 1 1 0.590715\n 2 3 0.704907\n\n - The keywords are the *output* column names\n - The values are tuples whose first element is the column to select\n and the second element is the aggregation to apply to that column.\n Pandas provides the ``pandas.NamedAgg`` namedtuple with the fields\n ``['column', 'aggfunc']`` to make it clearer what the arguments are.\n As usual, the aggregation can be a callable or a string alias.\n\n See :ref:`groupby.aggregate.named` for more.\n\n .. versionchanged:: 1.3.0\n\n The resulting dtype will reflect the return value of the aggregating function.\n\n >>> df.groupby("A")[["B"]].agg(lambda x: x.astype(float).min())\n B\n A\n 1 1.0\n 2 3.0\n """\n )\n\n @doc(_agg_template_frame, examples=_agg_examples_doc, klass="DataFrame")\n def aggregate(self, func=None, *args, engine=None, engine_kwargs=None, **kwargs):\n relabeling, func, columns, order = reconstruct_func(func, **kwargs)\n func = maybe_mangle_lambdas(func)\n\n if maybe_use_numba(engine):\n # Not all agg functions support numba, only propagate numba kwargs\n # if user asks for numba\n kwargs["engine"] = engine\n kwargs["engine_kwargs"] = engine_kwargs\n\n op = GroupByApply(self, func, args=args, kwargs=kwargs)\n result = op.agg()\n if not is_dict_like(func) and result is not None:\n # GH #52849\n if not self.as_index and is_list_like(func):\n return result.reset_index()\n else:\n return result\n elif relabeling:\n # this should be the only (non-raising) case with relabeling\n # used reordered index of columns\n result = cast(DataFrame, result)\n result = result.iloc[:, order]\n result = cast(DataFrame, result)\n # error: Incompatible types in assignment (expression has type\n # "Optional[List[str]]", variable has type\n # "Union[Union[Union[ExtensionArray, ndarray[Any, Any]],\n # Index, Series], Sequence[Any]]")\n result.columns = columns # type: ignore[assignment]\n\n if result is None:\n # Remove the kwargs we inserted\n # (already stored in engine, engine_kwargs arguments)\n if "engine" in kwargs:\n del kwargs["engine"]\n del kwargs["engine_kwargs"]\n # at this point func is not a str, list-like, dict-like,\n # or a known callable(e.g. sum)\n if maybe_use_numba(engine):\n return self._aggregate_with_numba(\n func, *args, engine_kwargs=engine_kwargs, **kwargs\n )\n # grouper specific aggregations\n if self._grouper.nkeys > 1:\n # test_groupby_as_index_series_scalar gets here with 'not self.as_index'\n return self._python_agg_general(func, *args, **kwargs)\n elif args or kwargs:\n # test_pass_args_kwargs gets here (with and without as_index)\n # can't return early\n result = self._aggregate_frame(func, *args, **kwargs)\n\n elif self.axis == 1:\n # _aggregate_multiple_funcs does not allow self.axis == 1\n # Note: axis == 1 precludes 'not self.as_index', see __init__\n result = self._aggregate_frame(func)\n return result\n\n else:\n # try to treat as if we are passing a list\n gba = GroupByApply(self, [func], args=(), kwargs={})\n try:\n result = gba.agg()\n\n except ValueError as err:\n if "No objects to concatenate" not in str(err):\n raise\n # _aggregate_frame can fail with e.g. func=Series.mode,\n # where it expects 1D values but would be getting 2D values\n # In other tests, using aggregate_frame instead of GroupByApply\n # would give correct values but incorrect dtypes\n # object vs float64 in test_cython_agg_empty_buckets\n # float64 vs int64 in test_category_order_apply\n result = self._aggregate_frame(func)\n\n else:\n # GH#32040, GH#35246\n # e.g. test_groupby_as_index_select_column_sum_empty_df\n result = cast(DataFrame, result)\n result.columns = self._obj_with_exclusions.columns.copy()\n\n if not self.as_index:\n result = self._insert_inaxis_grouper(result)\n result.index = default_index(len(result))\n\n return result\n\n agg = aggregate\n\n def _python_agg_general(self, func, *args, **kwargs):\n orig_func = func\n func = com.is_builtin_func(func)\n if orig_func != func:\n alias = com._builtin_table_alias[func]\n warn_alias_replacement(self, orig_func, alias)\n f = lambda x: func(x, *args, **kwargs)\n\n if self.ngroups == 0:\n # e.g. test_evaluate_with_empty_groups different path gets different\n # result dtype in empty case.\n return self._python_apply_general(f, self._selected_obj, is_agg=True)\n\n obj = self._obj_with_exclusions\n if self.axis == 1:\n obj = obj.T\n\n if not len(obj.columns):\n # e.g. test_margins_no_values_no_cols\n return self._python_apply_general(f, self._selected_obj)\n\n output: dict[int, ArrayLike] = {}\n for idx, (name, ser) in enumerate(obj.items()):\n result = self._grouper.agg_series(ser, f)\n output[idx] = result\n\n res = self.obj._constructor(output)\n res.columns = obj.columns.copy(deep=False)\n return self._wrap_aggregated_output(res)\n\n def _aggregate_frame(self, func, *args, **kwargs) -> DataFrame:\n if self._grouper.nkeys != 1:\n raise AssertionError("Number of keys must be 1")\n\n obj = self._obj_with_exclusions\n\n result: dict[Hashable, NDFrame | np.ndarray] = {}\n for name, grp_df in self._grouper.get_iterator(obj, self.axis):\n fres = func(grp_df, *args, **kwargs)\n result[name] = fres\n\n result_index = self._grouper.result_index\n other_ax = obj.axes[1 - self.axis]\n out = self.obj._constructor(result, index=other_ax, columns=result_index)\n if self.axis == 0:\n out = out.T\n\n return out\n\n def _wrap_applied_output(\n self,\n data: DataFrame,\n values: list,\n not_indexed_same: bool = False,\n is_transform: bool = False,\n ):\n if len(values) == 0:\n if is_transform:\n # GH#47787 see test_group_on_empty_multiindex\n res_index = data.index\n else:\n res_index = self._grouper.result_index\n\n result = self.obj._constructor(index=res_index, columns=data.columns)\n result = result.astype(data.dtypes, copy=False)\n return result\n\n # GH12824\n # using values[0] here breaks test_groupby_apply_none_first\n first_not_none = next(com.not_none(*values), None)\n\n if first_not_none is None:\n # GH9684 - All values are None, return an empty frame.\n return self.obj._constructor()\n elif isinstance(first_not_none, DataFrame):\n return self._concat_objects(\n values,\n not_indexed_same=not_indexed_same,\n is_transform=is_transform,\n )\n\n key_index = self._grouper.result_index if self.as_index else None\n\n if isinstance(first_not_none, (np.ndarray, Index)):\n # GH#1738: values is list of arrays of unequal lengths\n # fall through to the outer else clause\n # TODO: sure this is right? we used to do this\n # after raising AttributeError above\n # GH 18930\n if not is_hashable(self._selection):\n # error: Need type annotation for "name"\n name = tuple(self._selection) # type: ignore[var-annotated, arg-type]\n else:\n # error: Incompatible types in assignment\n # (expression has type "Hashable", variable\n # has type "Tuple[Any, ...]")\n name = self._selection # type: ignore[assignment]\n return self.obj._constructor_sliced(values, index=key_index, name=name)\n elif not isinstance(first_not_none, Series):\n # values are not series or array-like but scalars\n # self._selection not passed through to Series as the\n # result should not take the name of original selection\n # of columns\n if self.as_index:\n return self.obj._constructor_sliced(values, index=key_index)\n else:\n result = self.obj._constructor(values, columns=[self._selection])\n result = self._insert_inaxis_grouper(result)\n return result\n else:\n # values are Series\n return self._wrap_applied_output_series(\n values,\n not_indexed_same,\n first_not_none,\n key_index,\n is_transform,\n )\n\n def _wrap_applied_output_series(\n self,\n values: list[Series],\n not_indexed_same: bool,\n first_not_none,\n key_index: Index | None,\n is_transform: bool,\n ) -> DataFrame | Series:\n kwargs = first_not_none._construct_axes_dict()\n backup = Series(**kwargs)\n values = [x if (x is not None) else backup for x in values]\n\n all_indexed_same = all_indexes_same(x.index for x in values)\n\n if not all_indexed_same:\n # GH 8467\n return self._concat_objects(\n values,\n not_indexed_same=True,\n is_transform=is_transform,\n )\n\n # Combine values\n # vstack+constructor is faster than concat and handles MI-columns\n stacked_values = np.vstack([np.asarray(v) for v in values])\n\n if self.axis == 0:\n index = key_index\n columns = first_not_none.index.copy()\n if columns.name is None:\n # GH6124 - propagate name of Series when it's consistent\n names = {v.name for v in values}\n if len(names) == 1:\n columns.name = next(iter(names))\n else:\n index = first_not_none.index\n columns = key_index\n stacked_values = stacked_values.T\n\n if stacked_values.dtype == object:\n # We'll have the DataFrame constructor do inference\n stacked_values = stacked_values.tolist()\n result = self.obj._constructor(stacked_values, index=index, columns=columns)\n\n if not self.as_index:\n result = self._insert_inaxis_grouper(result)\n\n return self._reindex_output(result)\n\n def _cython_transform(\n self,\n how: str,\n numeric_only: bool = False,\n axis: AxisInt = 0,\n **kwargs,\n ) -> DataFrame:\n assert axis == 0 # handled by caller\n\n # With self.axis == 0, we have multi-block tests\n # e.g. test_rank_min_int, test_cython_transform_frame\n # test_transform_numeric_ret\n # With self.axis == 1, _get_data_to_aggregate does a transpose\n # so we always have a single block.\n mgr: Manager2D = self._get_data_to_aggregate(\n numeric_only=numeric_only, name=how\n )\n\n def arr_func(bvalues: ArrayLike) -> ArrayLike:\n return self._grouper._cython_operation(\n "transform", bvalues, how, 1, **kwargs\n )\n\n # We could use `mgr.apply` here and not have to set_axis, but\n # we would have to do shape gymnastics for ArrayManager compat\n res_mgr = mgr.grouped_reduce(arr_func)\n res_mgr.set_axis(1, mgr.axes[1])\n\n res_df = self.obj._constructor_from_mgr(res_mgr, axes=res_mgr.axes)\n res_df = self._maybe_transpose_result(res_df)\n return res_df\n\n def _transform_general(self, func, engine, engine_kwargs, *args, **kwargs):\n if maybe_use_numba(engine):\n return self._transform_with_numba(\n func, *args, engine_kwargs=engine_kwargs, **kwargs\n )\n from pandas.core.reshape.concat import concat\n\n applied = []\n obj = self._obj_with_exclusions\n gen = self._grouper.get_iterator(obj, axis=self.axis)\n fast_path, slow_path = self._define_paths(func, *args, **kwargs)\n\n # Determine whether to use slow or fast path by evaluating on the first group.\n # Need to handle the case of an empty generator and process the result so that\n # it does not need to be computed again.\n try:\n name, group = next(gen)\n except StopIteration:\n pass\n else:\n # 2023-02-27 No tests broken by disabling this pinning\n object.__setattr__(group, "name", name)\n try:\n path, res = self._choose_path(fast_path, slow_path, group)\n except ValueError as err:\n # e.g. test_transform_with_non_scalar_group\n msg = "transform must return a scalar value for each group"\n raise ValueError(msg) from err\n if group.size > 0:\n res = _wrap_transform_general_frame(self.obj, group, res)\n applied.append(res)\n\n # Compute and process with the remaining groups\n for name, group in gen:\n if group.size == 0:\n continue\n # 2023-02-27 No tests broken by disabling this pinning\n object.__setattr__(group, "name", name)\n res = path(group)\n\n res = _wrap_transform_general_frame(self.obj, group, res)\n applied.append(res)\n\n concat_index = obj.columns if self.axis == 0 else obj.index\n other_axis = 1 if self.axis == 0 else 0 # switches between 0 & 1\n concatenated = concat(applied, axis=self.axis, verify_integrity=False)\n concatenated = concatenated.reindex(concat_index, axis=other_axis, copy=False)\n return self._set_result_index_ordered(concatenated)\n\n __examples_dataframe_doc = dedent(\n """\n >>> df = pd.DataFrame({'A' : ['foo', 'bar', 'foo', 'bar',\n ... 'foo', 'bar'],\n ... 'B' : ['one', 'one', 'two', 'three',\n ... 'two', 'two'],\n ... 'C' : [1, 5, 5, 2, 5, 5],\n ... 'D' : [2.0, 5., 8., 1., 2., 9.]})\n >>> grouped = df.groupby('A')[['C', 'D']]\n >>> grouped.transform(lambda x: (x - x.mean()) / x.std())\n C D\n 0 -1.154701 -0.577350\n 1 0.577350 0.000000\n 2 0.577350 1.154701\n 3 -1.154701 -1.000000\n 4 0.577350 -0.577350\n 5 0.577350 1.000000\n\n Broadcast result of the transformation\n\n >>> grouped.transform(lambda x: x.max() - x.min())\n C D\n 0 4.0 6.0\n 1 3.0 8.0\n 2 4.0 6.0\n 3 3.0 8.0\n 4 4.0 6.0\n 5 3.0 8.0\n\n >>> grouped.transform("mean")\n C D\n 0 3.666667 4.0\n 1 4.000000 5.0\n 2 3.666667 4.0\n 3 4.000000 5.0\n 4 3.666667 4.0\n 5 4.000000 5.0\n\n .. versionchanged:: 1.3.0\n\n The resulting dtype will reflect the return value of the passed ``func``,\n for example:\n\n >>> grouped.transform(lambda x: x.astype(int).max())\n C D\n 0 5 8\n 1 5 9\n 2 5 8\n 3 5 9\n 4 5 8\n 5 5 9\n """\n )\n\n @Substitution(klass="DataFrame", example=__examples_dataframe_doc)\n @Appender(_transform_template)\n def transform(self, func, *args, engine=None, engine_kwargs=None, **kwargs):\n return self._transform(\n func, *args, engine=engine, engine_kwargs=engine_kwargs, **kwargs\n )\n\n def _define_paths(self, func, *args, **kwargs):\n if isinstance(func, str):\n fast_path = lambda group: getattr(group, func)(*args, **kwargs)\n slow_path = lambda group: group.apply(\n lambda x: getattr(x, func)(*args, **kwargs), axis=self.axis\n )\n else:\n fast_path = lambda group: func(group, *args, **kwargs)\n slow_path = lambda group: group.apply(\n lambda x: func(x, *args, **kwargs), axis=self.axis\n )\n return fast_path, slow_path\n\n def _choose_path(self, fast_path: Callable, slow_path: Callable, group: DataFrame):\n path = slow_path\n res = slow_path(group)\n\n if self.ngroups == 1:\n # no need to evaluate multiple paths when only\n # a single group exists\n return path, res\n\n # if we make it here, test if we can use the fast path\n try:\n res_fast = fast_path(group)\n except AssertionError:\n raise # pragma: no cover\n except Exception:\n # GH#29631 For user-defined function, we can't predict what may be\n # raised; see test_transform.test_transform_fastpath_raises\n return path, res\n\n # verify fast path returns either:\n # a DataFrame with columns equal to group.columns\n # OR a Series with index equal to group.columns\n if isinstance(res_fast, DataFrame):\n if not res_fast.columns.equals(group.columns):\n return path, res\n elif isinstance(res_fast, Series):\n if not res_fast.index.equals(group.columns):\n return path, res\n else:\n return path, res\n\n if res_fast.equals(res):\n path = fast_path\n\n return path, res\n\n def filter(self, func, dropna: bool = True, *args, **kwargs):\n """\n Filter elements from groups that don't satisfy a criterion.\n\n Elements from groups are filtered if they do not satisfy the\n boolean criterion specified by func.\n\n Parameters\n ----------\n func : function\n Criterion to apply to each group. Should return True or False.\n dropna : bool\n Drop groups that do not pass the filter. True by default; if False,\n groups that evaluate False are filled with NaNs.\n\n Returns\n -------\n DataFrame\n\n Notes\n -----\n Each subframe is endowed the attribute 'name' in case you need to know\n which group you are working on.\n\n Functions that mutate the passed object can produce unexpected\n behavior or errors and are not supported. See :ref:`gotchas.udf-mutation`\n for more details.\n\n Examples\n --------\n >>> df = pd.DataFrame({'A' : ['foo', 'bar', 'foo', 'bar',\n ... 'foo', 'bar'],\n ... 'B' : [1, 2, 3, 4, 5, 6],\n ... 'C' : [2.0, 5., 8., 1., 2., 9.]})\n >>> grouped = df.groupby('A')\n >>> grouped.filter(lambda x: x['B'].mean() > 3.)\n A B C\n 1 bar 2 5.0\n 3 bar 4 1.0\n 5 bar 6 9.0\n """\n indices = []\n\n obj = self._selected_obj\n gen = self._grouper.get_iterator(obj, axis=self.axis)\n\n for name, group in gen:\n # 2023-02-27 no tests are broken this pinning, but it is documented in the\n # docstring above.\n object.__setattr__(group, "name", name)\n\n res = func(group, *args, **kwargs)\n\n try:\n res = res.squeeze()\n except AttributeError: # allow e.g., scalars and frames to pass\n pass\n\n # interpret the result of the filter\n if is_bool(res) or (is_scalar(res) and isna(res)):\n if notna(res) and res:\n indices.append(self._get_index(name))\n else:\n # non scalars aren't allowed\n raise TypeError(\n f"filter function returned a {type(res).__name__}, "\n "but expected a scalar bool"\n )\n\n return self._apply_filter(indices, dropna)\n\n def __getitem__(self, key) -> DataFrameGroupBy | SeriesGroupBy:\n if self.axis == 1:\n # GH 37725\n raise ValueError("Cannot subset columns when using axis=1")\n # per GH 23566\n if isinstance(key, tuple) and len(key) > 1:\n # if len == 1, then it becomes a SeriesGroupBy and this is actually\n # valid syntax, so don't raise\n raise ValueError(\n "Cannot subset columns with a tuple with more than one element. "\n "Use a list instead."\n )\n return super().__getitem__(key)\n\n def _gotitem(self, key, ndim: int, subset=None):\n """\n sub-classes to define\n return a sliced object\n\n Parameters\n ----------\n key : string / list of selections\n ndim : {1, 2}\n requested ndim of result\n subset : object, default None\n subset to act on\n """\n if ndim == 2:\n if subset is None:\n subset = self.obj\n return DataFrameGroupBy(\n subset,\n self.keys,\n axis=self.axis,\n level=self.level,\n grouper=self._grouper,\n exclusions=self.exclusions,\n selection=key,\n as_index=self.as_index,\n sort=self.sort,\n group_keys=self.group_keys,\n observed=self.observed,\n dropna=self.dropna,\n )\n elif ndim == 1:\n if subset is None:\n subset = self.obj[key]\n return SeriesGroupBy(\n subset,\n self.keys,\n level=self.level,\n grouper=self._grouper,\n exclusions=self.exclusions,\n selection=key,\n as_index=self.as_index,\n sort=self.sort,\n group_keys=self.group_keys,\n observed=self.observed,\n dropna=self.dropna,\n )\n\n raise AssertionError("invalid ndim for _gotitem")\n\n def _get_data_to_aggregate(\n self, *, numeric_only: bool = False, name: str | None = None\n ) -> Manager2D:\n obj = self._obj_with_exclusions\n if self.axis == 1:\n mgr = obj.T._mgr\n else:\n mgr = obj._mgr\n\n if numeric_only:\n mgr = mgr.get_numeric_data()\n return mgr\n\n def _wrap_agged_manager(self, mgr: Manager2D) -> DataFrame:\n return self.obj._constructor_from_mgr(mgr, axes=mgr.axes)\n\n def _apply_to_column_groupbys(self, func) -> DataFrame:\n from pandas.core.reshape.concat import concat\n\n obj = self._obj_with_exclusions\n columns = obj.columns\n sgbs = [\n SeriesGroupBy(\n obj.iloc[:, i],\n selection=colname,\n grouper=self._grouper,\n exclusions=self.exclusions,\n observed=self.observed,\n )\n for i, colname in enumerate(obj.columns)\n ]\n results = [func(sgb) for sgb in sgbs]\n\n if not len(results):\n # concat would raise\n res_df = DataFrame([], columns=columns, index=self._grouper.result_index)\n else:\n res_df = concat(results, keys=columns, axis=1)\n\n if not self.as_index:\n res_df.index = default_index(len(res_df))\n res_df = self._insert_inaxis_grouper(res_df)\n return res_df\n\n def nunique(self, dropna: bool = True) -> DataFrame:\n """\n Return DataFrame with counts of unique elements in each position.\n\n Parameters\n ----------\n dropna : bool, default True\n Don't include NaN in the counts.\n\n Returns\n -------\n nunique: DataFrame\n\n Examples\n --------\n >>> df = pd.DataFrame({'id': ['spam', 'egg', 'egg', 'spam',\n ... 'ham', 'ham'],\n ... 'value1': [1, 5, 5, 2, 5, 5],\n ... 'value2': list('abbaxy')})\n >>> df\n id value1 value2\n 0 spam 1 a\n 1 egg 5 b\n 2 egg 5 b\n 3 spam 2 a\n 4 ham 5 x\n 5 ham 5 y\n\n >>> df.groupby('id').nunique()\n value1 value2\n id\n egg 1 1\n ham 1 2\n spam 2 1\n\n Check for rows with the same id but conflicting values:\n\n >>> df.groupby('id').filter(lambda g: (g.nunique() > 1).any())\n id value1 value2\n 0 spam 1 a\n 3 spam 2 a\n 4 ham 5 x\n 5 ham 5 y\n """\n\n if self.axis != 0:\n # see test_groupby_crash_on_nunique\n return self._python_apply_general(\n lambda sgb: sgb.nunique(dropna), self._obj_with_exclusions, is_agg=True\n )\n\n return self._apply_to_column_groupbys(lambda sgb: sgb.nunique(dropna))\n\n def idxmax(\n self,\n axis: Axis | None | lib.NoDefault = lib.no_default,\n skipna: bool = True,\n numeric_only: bool = False,\n ) -> DataFrame:\n """\n Return index of first occurrence of maximum over requested axis.\n\n NA/null values are excluded.\n\n Parameters\n ----------\n axis : {{0 or 'index', 1 or 'columns'}}, default None\n The axis to use. 0 or 'index' for row-wise, 1 or 'columns' for column-wise.\n If axis is not provided, grouper's axis is used.\n\n .. versionchanged:: 2.0.0\n\n .. deprecated:: 2.1.0\n For axis=1, operate on the underlying object instead. Otherwise\n the axis keyword is not necessary.\n\n skipna : bool, default True\n Exclude NA/null values. If an entire row/column is NA, the result\n will be NA.\n numeric_only : bool, default False\n Include only `float`, `int` or `boolean` data.\n\n .. versionadded:: 1.5.0\n\n Returns\n -------\n Series\n Indexes of maxima along the specified axis.\n\n Raises\n ------\n ValueError\n * If the row/column is empty\n\n See Also\n --------\n Series.idxmax : Return index of the maximum element.\n\n Notes\n -----\n This method is the DataFrame version of ``ndarray.argmax``.\n\n Examples\n --------\n Consider a dataset containing food consumption in Argentina.\n\n >>> df = pd.DataFrame({'consumption': [10.51, 103.11, 55.48],\n ... 'co2_emissions': [37.2, 19.66, 1712]},\n ... index=['Pork', 'Wheat Products', 'Beef'])\n\n >>> df\n consumption co2_emissions\n Pork 10.51 37.20\n Wheat Products 103.11 19.66\n Beef 55.48 1712.00\n\n By default, it returns the index for the maximum value in each column.\n\n >>> df.idxmax()\n consumption Wheat Products\n co2_emissions Beef\n dtype: object\n\n To return the index for the maximum value in each row, use ``axis="columns"``.\n\n >>> df.idxmax(axis="columns")\n Pork co2_emissions\n Wheat Products consumption\n Beef co2_emissions\n dtype: object\n """\n return self._idxmax_idxmin(\n "idxmax", axis=axis, numeric_only=numeric_only, skipna=skipna\n )\n\n def idxmin(\n self,\n axis: Axis | None | lib.NoDefault = lib.no_default,\n skipna: bool = True,\n numeric_only: bool = False,\n ) -> DataFrame:\n """\n Return index of first occurrence of minimum over requested axis.\n\n NA/null values are excluded.\n\n Parameters\n ----------\n axis : {{0 or 'index', 1 or 'columns'}}, default None\n The axis to use. 0 or 'index' for row-wise, 1 or 'columns' for column-wise.\n If axis is not provided, grouper's axis is used.\n\n .. versionchanged:: 2.0.0\n\n .. deprecated:: 2.1.0\n For axis=1, operate on the underlying object instead. Otherwise\n the axis keyword is not necessary.\n\n skipna : bool, default True\n Exclude NA/null values. If an entire row/column is NA, the result\n will be NA.\n numeric_only : bool, default False\n Include only `float`, `int` or `boolean` data.\n\n .. versionadded:: 1.5.0\n\n Returns\n -------\n Series\n Indexes of minima along the specified axis.\n\n Raises\n ------\n ValueError\n * If the row/column is empty\n\n See Also\n --------\n Series.idxmin : Return index of the minimum element.\n\n Notes\n -----\n This method is the DataFrame version of ``ndarray.argmin``.\n\n Examples\n --------\n Consider a dataset containing food consumption in Argentina.\n\n >>> df = pd.DataFrame({'consumption': [10.51, 103.11, 55.48],\n ... 'co2_emissions': [37.2, 19.66, 1712]},\n ... index=['Pork', 'Wheat Products', 'Beef'])\n\n >>> df\n consumption co2_emissions\n Pork 10.51 37.20\n Wheat Products 103.11 19.66\n Beef 55.48 1712.00\n\n By default, it returns the index for the minimum value in each column.\n\n >>> df.idxmin()\n consumption Pork\n co2_emissions Wheat Products\n dtype: object\n\n To return the index for the minimum value in each row, use ``axis="columns"``.\n\n >>> df.idxmin(axis="columns")\n Pork consumption\n Wheat Products co2_emissions\n Beef consumption\n dtype: object\n """\n return self._idxmax_idxmin(\n "idxmin", axis=axis, numeric_only=numeric_only, skipna=skipna\n )\n\n boxplot = boxplot_frame_groupby\n\n def value_counts(\n self,\n subset: Sequence[Hashable] | None = None,\n normalize: bool = False,\n sort: bool = True,\n ascending: bool = False,\n dropna: bool = True,\n ) -> DataFrame | Series:\n """\n Return a Series or DataFrame containing counts of unique rows.\n\n .. versionadded:: 1.4.0\n\n Parameters\n ----------\n subset : list-like, optional\n Columns to use when counting unique combinations.\n normalize : bool, default False\n Return proportions rather than frequencies.\n sort : bool, default True\n Sort by frequencies.\n ascending : bool, default False\n Sort in ascending order.\n dropna : bool, default True\n Don't include counts of rows that contain NA values.\n\n Returns\n -------\n Series or DataFrame\n Series if the groupby as_index is True, otherwise DataFrame.\n\n See Also\n --------\n Series.value_counts: Equivalent method on Series.\n DataFrame.value_counts: Equivalent method on DataFrame.\n SeriesGroupBy.value_counts: Equivalent method on SeriesGroupBy.\n\n Notes\n -----\n - If the groupby as_index is True then the returned Series will have a\n MultiIndex with one level per input column.\n - If the groupby as_index is False then the returned DataFrame will have an\n additional column with the value_counts. The column is labelled 'count' or\n 'proportion', depending on the ``normalize`` parameter.\n\n By default, rows that contain any NA values are omitted from\n the result.\n\n By default, the result will be in descending order so that the\n first element of each group is the most frequently-occurring row.\n\n Examples\n --------\n >>> df = pd.DataFrame({\n ... 'gender': ['male', 'male', 'female', 'male', 'female', 'male'],\n ... 'education': ['low', 'medium', 'high', 'low', 'high', 'low'],\n ... 'country': ['US', 'FR', 'US', 'FR', 'FR', 'FR']\n ... })\n\n >>> df\n gender education country\n 0 male low US\n 1 male medium FR\n 2 female high US\n 3 male low FR\n 4 female high FR\n 5 male low FR\n\n >>> df.groupby('gender').value_counts()\n gender education country\n female high FR 1\n US 1\n male low FR 2\n US 1\n medium FR 1\n Name: count, dtype: int64\n\n >>> df.groupby('gender').value_counts(ascending=True)\n gender education country\n female high FR 1\n US 1\n male low US 1\n medium FR 1\n low FR 2\n Name: count, dtype: int64\n\n >>> df.groupby('gender').value_counts(normalize=True)\n gender education country\n female high FR 0.50\n US 0.50\n male low FR 0.50\n US 0.25\n medium FR 0.25\n Name: proportion, dtype: float64\n\n >>> df.groupby('gender', as_index=False).value_counts()\n gender education country count\n 0 female high FR 1\n 1 female high US 1\n 2 male low FR 2\n 3 male low US 1\n 4 male medium FR 1\n\n >>> df.groupby('gender', as_index=False).value_counts(normalize=True)\n gender education country proportion\n 0 female high FR 0.50\n 1 female high US 0.50\n 2 male low FR 0.50\n 3 male low US 0.25\n 4 male medium FR 0.25\n """\n return self._value_counts(subset, normalize, sort, ascending, dropna)\n\n def fillna(\n self,\n value: Hashable | Mapping | Series | DataFrame | None = None,\n method: FillnaOptions | None = None,\n axis: Axis | None | lib.NoDefault = lib.no_default,\n inplace: bool = False,\n limit: int | None = None,\n downcast=lib.no_default,\n ) -> DataFrame | None:\n """\n Fill NA/NaN values using the specified method within groups.\n\n .. deprecated:: 2.2.0\n This method is deprecated and will be removed in a future version.\n Use the :meth:`.DataFrameGroupBy.ffill` or :meth:`.DataFrameGroupBy.bfill`\n for forward or backward filling instead. If you want to fill with a\n single value, use :meth:`DataFrame.fillna` instead.\n\n Parameters\n ----------\n value : scalar, dict, Series, or DataFrame\n Value to use to fill holes (e.g. 0), alternately a\n dict/Series/DataFrame of values specifying which value to use for\n each index (for a Series) or column (for a DataFrame). Values not\n in the dict/Series/DataFrame will not be filled. This value cannot\n be a list. Users wanting to use the ``value`` argument and not ``method``\n should prefer :meth:`.DataFrame.fillna` as this\n will produce the same result and be more performant.\n method : {{'bfill', 'ffill', None}}, default None\n Method to use for filling holes. ``'ffill'`` will propagate\n the last valid observation forward within a group.\n ``'bfill'`` will use next valid observation to fill the gap.\n axis : {0 or 'index', 1 or 'columns'}\n Axis along which to fill missing values. When the :class:`DataFrameGroupBy`\n ``axis`` argument is ``0``, using ``axis=1`` here will produce\n the same results as :meth:`.DataFrame.fillna`. When the\n :class:`DataFrameGroupBy` ``axis`` argument is ``1``, using ``axis=0``\n or ``axis=1`` here will produce the same results.\n inplace : bool, default False\n Broken. Do not set to True.\n limit : int, default None\n If method is specified, this is the maximum number of consecutive\n NaN values to forward/backward fill within a group. In other words,\n if there is a gap with more than this number of consecutive NaNs,\n it will only be partially filled. If method is not specified, this is the\n maximum number of entries along the entire axis where NaNs will be\n filled. Must be greater than 0 if not None.\n downcast : dict, default is None\n A dict of item->dtype of what to downcast if possible,\n or the string 'infer' which will try to downcast to an appropriate\n equal type (e.g. float64 to int64 if possible).\n\n Returns\n -------\n DataFrame\n Object with missing values filled.\n\n See Also\n --------\n ffill : Forward fill values within a group.\n bfill : Backward fill values within a group.\n\n Examples\n --------\n >>> df = pd.DataFrame(\n ... {\n ... "key": [0, 0, 1, 1, 1],\n ... "A": [np.nan, 2, np.nan, 3, np.nan],\n ... "B": [2, 3, np.nan, np.nan, np.nan],\n ... "C": [np.nan, np.nan, 2, np.nan, np.nan],\n ... }\n ... )\n >>> df\n key A B C\n 0 0 NaN 2.0 NaN\n 1 0 2.0 3.0 NaN\n 2 1 NaN NaN 2.0\n 3 1 3.0 NaN NaN\n 4 1 NaN NaN NaN\n\n Propagate non-null values forward or backward within each group along columns.\n\n >>> df.groupby("key").fillna(method="ffill")\n A B C\n 0 NaN 2.0 NaN\n 1 2.0 3.0 NaN\n 2 NaN NaN 2.0\n 3 3.0 NaN 2.0\n 4 3.0 NaN 2.0\n\n >>> df.groupby("key").fillna(method="bfill")\n A B C\n 0 2.0 2.0 NaN\n 1 2.0 3.0 NaN\n 2 3.0 NaN 2.0\n 3 3.0 NaN NaN\n 4 NaN NaN NaN\n\n Propagate non-null values forward or backward within each group along rows.\n\n >>> df.T.groupby(np.array([0, 0, 1, 1])).fillna(method="ffill").T\n key A B C\n 0 0.0 0.0 2.0 2.0\n 1 0.0 2.0 3.0 3.0\n 2 1.0 1.0 NaN 2.0\n 3 1.0 3.0 NaN NaN\n 4 1.0 1.0 NaN NaN\n\n >>> df.T.groupby(np.array([0, 0, 1, 1])).fillna(method="bfill").T\n key A B C\n 0 0.0 NaN 2.0 NaN\n 1 0.0 2.0 3.0 NaN\n 2 1.0 NaN 2.0 2.0\n 3 1.0 3.0 NaN NaN\n 4 1.0 NaN NaN NaN\n\n Only replace the first NaN element within a group along rows.\n\n >>> df.groupby("key").fillna(method="ffill", limit=1)\n A B C\n 0 NaN 2.0 NaN\n 1 2.0 3.0 NaN\n 2 NaN NaN 2.0\n 3 3.0 NaN 2.0\n 4 3.0 NaN NaN\n """\n warnings.warn(\n f"{type(self).__name__}.fillna is deprecated and "\n "will be removed in a future version. Use obj.ffill() or obj.bfill() "\n "for forward or backward filling instead. If you want to fill with a "\n f"single value, use {type(self.obj).__name__}.fillna instead",\n FutureWarning,\n stacklevel=find_stack_level(),\n )\n\n result = self._op_via_apply(\n "fillna",\n value=value,\n method=method,\n axis=axis,\n inplace=inplace,\n limit=limit,\n downcast=downcast,\n )\n return result\n\n def take(\n self,\n indices: TakeIndexer,\n axis: Axis | None | lib.NoDefault = lib.no_default,\n **kwargs,\n ) -> DataFrame:\n """\n Return the elements in the given *positional* indices in each group.\n\n This means that we are not indexing according to actual values in\n the index attribute of the object. We are indexing according to the\n actual position of the element in the object.\n\n If a requested index does not exist for some group, this method will raise.\n To get similar behavior that ignores indices that don't exist, see\n :meth:`.DataFrameGroupBy.nth`.\n\n Parameters\n ----------\n indices : array-like\n An array of ints indicating which positions to take.\n axis : {0 or 'index', 1 or 'columns', None}, default 0\n The axis on which to select elements. ``0`` means that we are\n selecting rows, ``1`` means that we are selecting columns.\n\n .. deprecated:: 2.1.0\n For axis=1, operate on the underlying object instead. Otherwise\n the axis keyword is not necessary.\n\n **kwargs\n For compatibility with :meth:`numpy.take`. Has no effect on the\n output.\n\n Returns\n -------\n DataFrame\n An DataFrame containing the elements taken from each group.\n\n See Also\n --------\n DataFrame.take : Take elements from a Series along an axis.\n DataFrame.loc : Select a subset of a DataFrame by labels.\n DataFrame.iloc : Select a subset of a DataFrame by positions.\n numpy.take : Take elements from an array along an axis.\n\n Examples\n --------\n >>> df = pd.DataFrame([('falcon', 'bird', 389.0),\n ... ('parrot', 'bird', 24.0),\n ... ('lion', 'mammal', 80.5),\n ... ('monkey', 'mammal', np.nan),\n ... ('rabbit', 'mammal', 15.0)],\n ... columns=['name', 'class', 'max_speed'],\n ... index=[4, 3, 2, 1, 0])\n >>> df\n name class max_speed\n 4 falcon bird 389.0\n 3 parrot bird 24.0\n 2 lion mammal 80.5\n 1 monkey mammal NaN\n 0 rabbit mammal 15.0\n >>> gb = df.groupby([1, 1, 2, 2, 2])\n\n Take elements at positions 0 and 1 along the axis 0 (default).\n\n Note how the indices selected in the result do not correspond to\n our input indices 0 and 1. That's because we are selecting the 0th\n and 1st rows, not rows whose indices equal 0 and 1.\n\n >>> gb.take([0, 1])\n name class max_speed\n 1 4 falcon bird 389.0\n 3 parrot bird 24.0\n 2 2 lion mammal 80.5\n 1 monkey mammal NaN\n\n The order of the specified indices influences the order in the result.\n Here, the order is swapped from the previous example.\n\n >>> gb.take([1, 0])\n name class max_speed\n 1 3 parrot bird 24.0\n 4 falcon bird 389.0\n 2 1 monkey mammal NaN\n 2 lion mammal 80.5\n\n Take elements at indices 1 and 2 along the axis 1 (column selection).\n\n We may take elements using negative integers for positive indices,\n starting from the end of the object, just like with Python lists.\n\n >>> gb.take([-1, -2])\n name class max_speed\n 1 3 parrot bird 24.0\n 4 falcon bird 389.0\n 2 0 rabbit mammal 15.0\n 1 monkey mammal NaN\n """\n result = self._op_via_apply("take", indices=indices, axis=axis, **kwargs)\n return result\n\n def skew(\n self,\n axis: Axis | None | lib.NoDefault = lib.no_default,\n skipna: bool = True,\n numeric_only: bool = False,\n **kwargs,\n ) -> DataFrame:\n """\n Return unbiased skew within groups.\n\n Normalized by N-1.\n\n Parameters\n ----------\n axis : {0 or 'index', 1 or 'columns', None}, default 0\n Axis for the function to be applied on.\n\n Specifying ``axis=None`` will apply the aggregation across both axes.\n\n .. versionadded:: 2.0.0\n\n .. deprecated:: 2.1.0\n For axis=1, operate on the underlying object instead. Otherwise\n the axis keyword is not necessary.\n\n skipna : bool, default True\n Exclude NA/null values when computing the result.\n\n numeric_only : bool, default False\n Include only float, int, boolean columns.\n\n **kwargs\n Additional keyword arguments to be passed to the function.\n\n Returns\n -------\n DataFrame\n\n See Also\n --------\n DataFrame.skew : Return unbiased skew over requested axis.\n\n Examples\n --------\n >>> arrays = [['falcon', 'parrot', 'cockatoo', 'kiwi',\n ... 'lion', 'monkey', 'rabbit'],\n ... ['bird', 'bird', 'bird', 'bird',\n ... 'mammal', 'mammal', 'mammal']]\n >>> index = pd.MultiIndex.from_arrays(arrays, names=('name', 'class'))\n >>> df = pd.DataFrame({'max_speed': [389.0, 24.0, 70.0, np.nan,\n ... 80.5, 21.5, 15.0]},\n ... index=index)\n >>> df\n max_speed\n name class\n falcon bird 389.0\n parrot bird 24.0\n cockatoo bird 70.0\n kiwi bird NaN\n lion mammal 80.5\n monkey mammal 21.5\n rabbit mammal 15.0\n >>> gb = df.groupby(["class"])\n >>> gb.skew()\n max_speed\n class\n bird 1.628296\n mammal 1.669046\n >>> gb.skew(skipna=False)\n max_speed\n class\n bird NaN\n mammal 1.669046\n """\n if axis is lib.no_default:\n axis = 0\n\n if axis != 0:\n result = self._op_via_apply(\n "skew",\n axis=axis,\n skipna=skipna,\n numeric_only=numeric_only,\n **kwargs,\n )\n return result\n\n def alt(obj):\n # This should not be reached since the cython path should raise\n # TypeError and not NotImplementedError.\n raise TypeError(f"'skew' is not supported for dtype={obj.dtype}")\n\n return self._cython_agg_general(\n "skew", alt=alt, skipna=skipna, numeric_only=numeric_only, **kwargs\n )\n\n @property\n @doc(DataFrame.plot.__doc__)\n def plot(self) -> GroupByPlot:\n result = GroupByPlot(self)\n return result\n\n @doc(DataFrame.corr.__doc__)\n def corr(\n self,\n method: str | Callable[[np.ndarray, np.ndarray], float] = "pearson",\n min_periods: int = 1,\n numeric_only: bool = False,\n ) -> DataFrame:\n result = self._op_via_apply(\n "corr", method=method, min_periods=min_periods, numeric_only=numeric_only\n )\n return result\n\n @doc(DataFrame.cov.__doc__)\n def cov(\n self,\n min_periods: int | None = None,\n ddof: int | None = 1,\n numeric_only: bool = False,\n ) -> DataFrame:\n result = self._op_via_apply(\n "cov", min_periods=min_periods, ddof=ddof, numeric_only=numeric_only\n )\n return result\n\n @doc(DataFrame.hist.__doc__)\n def hist(\n self,\n column: IndexLabel | None = None,\n by=None,\n grid: bool = True,\n xlabelsize: int | None = None,\n xrot: float | None = None,\n ylabelsize: int | None = None,\n yrot: float | None = None,\n ax=None,\n sharex: bool = False,\n sharey: bool = False,\n figsize: tuple[int, int] | None = None,\n layout: tuple[int, int] | None = None,\n bins: int | Sequence[int] = 10,\n backend: str | None = None,\n legend: bool = False,\n **kwargs,\n ):\n result = self._op_via_apply(\n "hist",\n column=column,\n by=by,\n grid=grid,\n xlabelsize=xlabelsize,\n xrot=xrot,\n ylabelsize=ylabelsize,\n yrot=yrot,\n ax=ax,\n sharex=sharex,\n sharey=sharey,\n figsize=figsize,\n layout=layout,\n bins=bins,\n backend=backend,\n legend=legend,\n **kwargs,\n )\n return result\n\n @property\n @doc(DataFrame.dtypes.__doc__)\n def dtypes(self) -> Series:\n # GH#51045\n warnings.warn(\n f"{type(self).__name__}.dtypes is deprecated and will be removed in "\n "a future version. Check the dtypes on the base object instead",\n FutureWarning,\n stacklevel=find_stack_level(),\n )\n\n # error: Incompatible return value type (got "DataFrame", expected "Series")\n return self._python_apply_general( # type: ignore[return-value]\n lambda df: df.dtypes, self._selected_obj\n )\n\n @doc(DataFrame.corrwith.__doc__)\n def corrwith(\n self,\n other: DataFrame | Series,\n axis: Axis | lib.NoDefault = lib.no_default,\n drop: bool = False,\n method: CorrelationMethod = "pearson",\n numeric_only: bool = False,\n ) -> DataFrame:\n result = self._op_via_apply(\n "corrwith",\n other=other,\n axis=axis,\n drop=drop,\n method=method,\n numeric_only=numeric_only,\n )\n return result\n\n\ndef _wrap_transform_general_frame(\n obj: DataFrame, group: DataFrame, res: DataFrame | Series\n) -> DataFrame:\n from pandas import concat\n\n if isinstance(res, Series):\n # we need to broadcast across the\n # other dimension; this will preserve dtypes\n # GH14457\n if res.index.is_(obj.index):\n res_frame = concat([res] * len(group.columns), axis=1)\n res_frame.columns = group.columns\n res_frame.index = group.index\n else:\n res_frame = obj._constructor(\n np.tile(res.values, (len(group.index), 1)),\n columns=group.columns,\n index=group.index,\n )\n assert isinstance(res_frame, DataFrame)\n return res_frame\n elif isinstance(res, DataFrame) and not res.index.is_(group.index):\n return res._align_frame(group)[0]\n else:\n return res\n
.venv\Lib\site-packages\pandas\core\groupby\generic.py
generic.py
Python
96,885
0.75
0.115358
0.069796
vue-tools
331
2024-05-11T20:08:32.119039
Apache-2.0
false
ccb369c8b8944f886e6f74794f648d59
"""\nProvide user facing operators for doing the split part of the\nsplit-apply-combine paradigm.\n"""\nfrom __future__ import annotations\n\nfrom typing import (\n TYPE_CHECKING,\n final,\n)\nimport warnings\n\nimport numpy as np\n\nfrom pandas._config import (\n using_copy_on_write,\n warn_copy_on_write,\n)\n\nfrom pandas._libs import lib\nfrom pandas._libs.tslibs import OutOfBoundsDatetime\nfrom pandas.errors import InvalidIndexError\nfrom pandas.util._decorators import cache_readonly\nfrom pandas.util._exceptions import find_stack_level\n\nfrom pandas.core.dtypes.common import (\n is_list_like,\n is_scalar,\n)\nfrom pandas.core.dtypes.dtypes import CategoricalDtype\n\nfrom pandas.core import algorithms\nfrom pandas.core.arrays import (\n Categorical,\n ExtensionArray,\n)\nimport pandas.core.common as com\nfrom pandas.core.frame import DataFrame\nfrom pandas.core.groupby import ops\nfrom pandas.core.groupby.categorical import recode_for_groupby\nfrom pandas.core.indexes.api import (\n CategoricalIndex,\n Index,\n MultiIndex,\n)\nfrom pandas.core.series import Series\n\nfrom pandas.io.formats.printing import pprint_thing\n\nif TYPE_CHECKING:\n from collections.abc import (\n Hashable,\n Iterator,\n )\n\n from pandas._typing import (\n ArrayLike,\n Axis,\n NDFrameT,\n npt,\n )\n\n from pandas.core.generic import NDFrame\n\n\nclass Grouper:\n """\n A Grouper allows the user to specify a groupby instruction for an object.\n\n This specification will select a column via the key parameter, or if the\n level and/or axis parameters are given, a level of the index of the target\n object.\n\n If `axis` and/or `level` are passed as keywords to both `Grouper` and\n `groupby`, the values passed to `Grouper` take precedence.\n\n Parameters\n ----------\n key : str, defaults to None\n Groupby key, which selects the grouping column of the target.\n level : name/number, defaults to None\n The level for the target index.\n freq : str / frequency object, defaults to None\n This will groupby the specified frequency if the target selection\n (via key or level) is a datetime-like object. For full specification\n of available frequencies, please see `here\n <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`_.\n axis : str, int, defaults to 0\n Number/name of the axis.\n sort : bool, default to False\n Whether to sort the resulting labels.\n closed : {'left' or 'right'}\n Closed end of interval. Only when `freq` parameter is passed.\n label : {'left' or 'right'}\n Interval boundary to use for labeling.\n Only when `freq` parameter is passed.\n convention : {'start', 'end', 'e', 's'}\n If grouper is PeriodIndex and `freq` parameter is passed.\n\n origin : Timestamp or str, default 'start_day'\n The timestamp on which to adjust the grouping. The timezone of origin must\n match the timezone of the index.\n If string, must be one of the following:\n\n - 'epoch': `origin` is 1970-01-01\n - 'start': `origin` is the first value of the timeseries\n - 'start_day': `origin` is the first day at midnight of the timeseries\n\n - 'end': `origin` is the last value of the timeseries\n - 'end_day': `origin` is the ceiling midnight of the last day\n\n .. versionadded:: 1.3.0\n\n offset : Timedelta or str, default is None\n An offset timedelta added to the origin.\n\n dropna : bool, default True\n If True, and if group keys contain NA values, NA values together with\n row/column will be dropped. If False, NA values will also be treated as\n the key in groups.\n\n Returns\n -------\n Grouper or pandas.api.typing.TimeGrouper\n A TimeGrouper is returned if ``freq`` is not ``None``. Otherwise, a Grouper\n is returned.\n\n Examples\n --------\n ``df.groupby(pd.Grouper(key="Animal"))`` is equivalent to ``df.groupby('Animal')``\n\n >>> df = pd.DataFrame(\n ... {\n ... "Animal": ["Falcon", "Parrot", "Falcon", "Falcon", "Parrot"],\n ... "Speed": [100, 5, 200, 300, 15],\n ... }\n ... )\n >>> df\n Animal Speed\n 0 Falcon 100\n 1 Parrot 5\n 2 Falcon 200\n 3 Falcon 300\n 4 Parrot 15\n >>> df.groupby(pd.Grouper(key="Animal")).mean()\n Speed\n Animal\n Falcon 200.0\n Parrot 10.0\n\n Specify a resample operation on the column 'Publish date'\n\n >>> df = pd.DataFrame(\n ... {\n ... "Publish date": [\n ... pd.Timestamp("2000-01-02"),\n ... pd.Timestamp("2000-01-02"),\n ... pd.Timestamp("2000-01-09"),\n ... pd.Timestamp("2000-01-16")\n ... ],\n ... "ID": [0, 1, 2, 3],\n ... "Price": [10, 20, 30, 40]\n ... }\n ... )\n >>> df\n Publish date ID Price\n 0 2000-01-02 0 10\n 1 2000-01-02 1 20\n 2 2000-01-09 2 30\n 3 2000-01-16 3 40\n >>> df.groupby(pd.Grouper(key="Publish date", freq="1W")).mean()\n ID Price\n Publish date\n 2000-01-02 0.5 15.0\n 2000-01-09 2.0 30.0\n 2000-01-16 3.0 40.0\n\n If you want to adjust the start of the bins based on a fixed timestamp:\n\n >>> start, end = '2000-10-01 23:30:00', '2000-10-02 00:30:00'\n >>> rng = pd.date_range(start, end, freq='7min')\n >>> ts = pd.Series(np.arange(len(rng)) * 3, index=rng)\n >>> ts\n 2000-10-01 23:30:00 0\n 2000-10-01 23:37:00 3\n 2000-10-01 23:44:00 6\n 2000-10-01 23:51:00 9\n 2000-10-01 23:58:00 12\n 2000-10-02 00:05:00 15\n 2000-10-02 00:12:00 18\n 2000-10-02 00:19:00 21\n 2000-10-02 00:26:00 24\n Freq: 7min, dtype: int64\n\n >>> ts.groupby(pd.Grouper(freq='17min')).sum()\n 2000-10-01 23:14:00 0\n 2000-10-01 23:31:00 9\n 2000-10-01 23:48:00 21\n 2000-10-02 00:05:00 54\n 2000-10-02 00:22:00 24\n Freq: 17min, dtype: int64\n\n >>> ts.groupby(pd.Grouper(freq='17min', origin='epoch')).sum()\n 2000-10-01 23:18:00 0\n 2000-10-01 23:35:00 18\n 2000-10-01 23:52:00 27\n 2000-10-02 00:09:00 39\n 2000-10-02 00:26:00 24\n Freq: 17min, dtype: int64\n\n >>> ts.groupby(pd.Grouper(freq='17min', origin='2000-01-01')).sum()\n 2000-10-01 23:24:00 3\n 2000-10-01 23:41:00 15\n 2000-10-01 23:58:00 45\n 2000-10-02 00:15:00 45\n Freq: 17min, dtype: int64\n\n If you want to adjust the start of the bins with an `offset` Timedelta, the two\n following lines are equivalent:\n\n >>> ts.groupby(pd.Grouper(freq='17min', origin='start')).sum()\n 2000-10-01 23:30:00 9\n 2000-10-01 23:47:00 21\n 2000-10-02 00:04:00 54\n 2000-10-02 00:21:00 24\n Freq: 17min, dtype: int64\n\n >>> ts.groupby(pd.Grouper(freq='17min', offset='23h30min')).sum()\n 2000-10-01 23:30:00 9\n 2000-10-01 23:47:00 21\n 2000-10-02 00:04:00 54\n 2000-10-02 00:21:00 24\n Freq: 17min, dtype: int64\n\n To replace the use of the deprecated `base` argument, you can now use `offset`,\n in this example it is equivalent to have `base=2`:\n\n >>> ts.groupby(pd.Grouper(freq='17min', offset='2min')).sum()\n 2000-10-01 23:16:00 0\n 2000-10-01 23:33:00 9\n 2000-10-01 23:50:00 36\n 2000-10-02 00:07:00 39\n 2000-10-02 00:24:00 24\n Freq: 17min, dtype: int64\n """\n\n sort: bool\n dropna: bool\n _gpr_index: Index | None\n _grouper: Index | None\n\n _attributes: tuple[str, ...] = ("key", "level", "freq", "axis", "sort", "dropna")\n\n def __new__(cls, *args, **kwargs):\n if kwargs.get("freq") is not None:\n from pandas.core.resample import TimeGrouper\n\n cls = TimeGrouper\n return super().__new__(cls)\n\n def __init__(\n self,\n key=None,\n level=None,\n freq=None,\n axis: Axis | lib.NoDefault = lib.no_default,\n sort: bool = False,\n dropna: bool = True,\n ) -> None:\n if type(self) is Grouper:\n # i.e. not TimeGrouper\n if axis is not lib.no_default:\n warnings.warn(\n "Grouper axis keyword is deprecated and will be removed in a "\n "future version. To group on axis=1, use obj.T.groupby(...) "\n "instead",\n FutureWarning,\n stacklevel=find_stack_level(),\n )\n else:\n axis = 0\n if axis is lib.no_default:\n axis = 0\n\n self.key = key\n self.level = level\n self.freq = freq\n self.axis = axis\n self.sort = sort\n self.dropna = dropna\n\n self._grouper_deprecated = None\n self._indexer_deprecated: npt.NDArray[np.intp] | None = None\n self._obj_deprecated = None\n self._gpr_index = None\n self.binner = None\n self._grouper = None\n self._indexer: npt.NDArray[np.intp] | None = None\n\n def _get_grouper(\n self, obj: NDFrameT, validate: bool = True\n ) -> tuple[ops.BaseGrouper, NDFrameT]:\n """\n Parameters\n ----------\n obj : Series or DataFrame\n validate : bool, default True\n if True, validate the grouper\n\n Returns\n -------\n a tuple of grouper, obj (possibly sorted)\n """\n obj, _, _ = self._set_grouper(obj)\n grouper, _, obj = get_grouper(\n obj,\n [self.key],\n axis=self.axis,\n level=self.level,\n sort=self.sort,\n validate=validate,\n dropna=self.dropna,\n )\n # Without setting this, subsequent lookups to .groups raise\n # error: Incompatible types in assignment (expression has type "BaseGrouper",\n # variable has type "None")\n self._grouper_deprecated = grouper # type: ignore[assignment]\n\n return grouper, obj\n\n def _set_grouper(\n self, obj: NDFrameT, sort: bool = False, *, gpr_index: Index | None = None\n ) -> tuple[NDFrameT, Index, npt.NDArray[np.intp] | None]:\n """\n given an object and the specifications, setup the internal grouper\n for this particular specification\n\n Parameters\n ----------\n obj : Series or DataFrame\n sort : bool, default False\n whether the resulting grouper should be sorted\n gpr_index : Index or None, default None\n\n Returns\n -------\n NDFrame\n Index\n np.ndarray[np.intp] | None\n """\n assert obj is not None\n\n if self.key is not None and self.level is not None:\n raise ValueError("The Grouper cannot specify both a key and a level!")\n\n # Keep self._grouper value before overriding\n if self._grouper is None:\n # TODO: What are we assuming about subsequent calls?\n self._grouper = gpr_index\n self._indexer = self._indexer_deprecated\n\n # the key must be a valid info item\n if self.key is not None:\n key = self.key\n # The 'on' is already defined\n if getattr(gpr_index, "name", None) == key and isinstance(obj, Series):\n # Sometimes self._grouper will have been resorted while\n # obj has not. In this case there is a mismatch when we\n # call self._grouper.take(obj.index) so we need to undo the sorting\n # before we call _grouper.take.\n assert self._grouper is not None\n if self._indexer is not None:\n reverse_indexer = self._indexer.argsort()\n unsorted_ax = self._grouper.take(reverse_indexer)\n ax = unsorted_ax.take(obj.index)\n else:\n ax = self._grouper.take(obj.index)\n else:\n if key not in obj._info_axis:\n raise KeyError(f"The grouper name {key} is not found")\n ax = Index(obj[key], name=key)\n\n else:\n ax = obj._get_axis(self.axis)\n if self.level is not None:\n level = self.level\n\n # if a level is given it must be a mi level or\n # equivalent to the axis name\n if isinstance(ax, MultiIndex):\n level = ax._get_level_number(level)\n ax = Index(ax._get_level_values(level), name=ax.names[level])\n\n else:\n if level not in (0, ax.name):\n raise ValueError(f"The level {level} is not valid")\n\n # possibly sort\n indexer: npt.NDArray[np.intp] | None = None\n if (self.sort or sort) and not ax.is_monotonic_increasing:\n # use stable sort to support first, last, nth\n # TODO: why does putting na_position="first" fix datetimelike cases?\n indexer = self._indexer_deprecated = ax.array.argsort(\n kind="mergesort", na_position="first"\n )\n ax = ax.take(indexer)\n obj = obj.take(indexer, axis=self.axis)\n\n # error: Incompatible types in assignment (expression has type\n # "NDFrameT", variable has type "None")\n self._obj_deprecated = obj # type: ignore[assignment]\n self._gpr_index = ax\n return obj, ax, indexer\n\n @final\n @property\n def ax(self) -> Index:\n warnings.warn(\n f"{type(self).__name__}.ax is deprecated and will be removed in a "\n "future version. Use Resampler.ax instead",\n FutureWarning,\n stacklevel=find_stack_level(),\n )\n index = self._gpr_index\n if index is None:\n raise ValueError("_set_grouper must be called before ax is accessed")\n return index\n\n @final\n @property\n def indexer(self):\n warnings.warn(\n f"{type(self).__name__}.indexer is deprecated and will be removed "\n "in a future version. Use Resampler.indexer instead.",\n FutureWarning,\n stacklevel=find_stack_level(),\n )\n return self._indexer_deprecated\n\n @final\n @property\n def obj(self):\n # TODO(3.0): enforcing these deprecations on Grouper should close\n # GH#25564, GH#41930\n warnings.warn(\n f"{type(self).__name__}.obj is deprecated and will be removed "\n "in a future version. Use GroupBy.indexer instead.",\n FutureWarning,\n stacklevel=find_stack_level(),\n )\n return self._obj_deprecated\n\n @final\n @property\n def grouper(self):\n warnings.warn(\n f"{type(self).__name__}.grouper is deprecated and will be removed "\n "in a future version. Use GroupBy.grouper instead.",\n FutureWarning,\n stacklevel=find_stack_level(),\n )\n return self._grouper_deprecated\n\n @final\n @property\n def groups(self):\n warnings.warn(\n f"{type(self).__name__}.groups is deprecated and will be removed "\n "in a future version. Use GroupBy.groups instead.",\n FutureWarning,\n stacklevel=find_stack_level(),\n )\n # error: "None" has no attribute "groups"\n return self._grouper_deprecated.groups # type: ignore[attr-defined]\n\n @final\n def __repr__(self) -> str:\n attrs_list = (\n f"{attr_name}={repr(getattr(self, attr_name))}"\n for attr_name in self._attributes\n if getattr(self, attr_name) is not None\n )\n attrs = ", ".join(attrs_list)\n cls_name = type(self).__name__\n return f"{cls_name}({attrs})"\n\n\n@final\nclass Grouping:\n """\n Holds the grouping information for a single key\n\n Parameters\n ----------\n index : Index\n grouper :\n obj : DataFrame or Series\n name : Label\n level :\n observed : bool, default False\n If we are a Categorical, use the observed values\n in_axis : if the Grouping is a column in self.obj and hence among\n Groupby.exclusions list\n dropna : bool, default True\n Whether to drop NA groups.\n uniques : Array-like, optional\n When specified, will be used for unique values. Enables including empty groups\n in the result for a BinGrouper. Must not contain duplicates.\n\n Attributes\n -------\n indices : dict\n Mapping of {group -> index_list}\n codes : ndarray\n Group codes\n group_index : Index or None\n unique groups\n groups : dict\n Mapping of {group -> label_list}\n """\n\n _codes: npt.NDArray[np.signedinteger] | None = None\n _all_grouper: Categorical | None\n _orig_cats: Index | None\n _index: Index\n\n def __init__(\n self,\n index: Index,\n grouper=None,\n obj: NDFrame | None = None,\n level=None,\n sort: bool = True,\n observed: bool = False,\n in_axis: bool = False,\n dropna: bool = True,\n uniques: ArrayLike | None = None,\n ) -> None:\n self.level = level\n self._orig_grouper = grouper\n grouping_vector = _convert_grouper(index, grouper)\n self._all_grouper = None\n self._orig_cats = None\n self._index = index\n self._sort = sort\n self.obj = obj\n self._observed = observed\n self.in_axis = in_axis\n self._dropna = dropna\n self._uniques = uniques\n\n # we have a single grouper which may be a myriad of things,\n # some of which are dependent on the passing in level\n\n ilevel = self._ilevel\n if ilevel is not None:\n # In extant tests, the new self.grouping_vector matches\n # `index.get_level_values(ilevel)` whenever\n # mapper is None and isinstance(index, MultiIndex)\n if isinstance(index, MultiIndex):\n index_level = index.get_level_values(ilevel)\n else:\n index_level = index\n\n if grouping_vector is None:\n grouping_vector = index_level\n else:\n mapper = grouping_vector\n grouping_vector = index_level.map(mapper)\n\n # a passed Grouper like, directly get the grouper in the same way\n # as single grouper groupby, use the group_info to get codes\n elif isinstance(grouping_vector, Grouper):\n # get the new grouper; we already have disambiguated\n # what key/level refer to exactly, don't need to\n # check again as we have by this point converted these\n # to an actual value (rather than a pd.Grouper)\n assert self.obj is not None # for mypy\n newgrouper, newobj = grouping_vector._get_grouper(self.obj, validate=False)\n self.obj = newobj\n\n if isinstance(newgrouper, ops.BinGrouper):\n # TODO: can we unwrap this and get a tighter typing\n # for self.grouping_vector?\n grouping_vector = newgrouper\n else:\n # ops.BaseGrouper\n # TODO: 2023-02-03 no test cases with len(newgrouper.groupings) > 1.\n # If that were to occur, would we be throwing out information?\n # error: Cannot determine type of "grouping_vector" [has-type]\n ng = newgrouper.groupings[0].grouping_vector # type: ignore[has-type]\n # use Index instead of ndarray so we can recover the name\n grouping_vector = Index(ng, name=newgrouper.result_index.name)\n\n elif not isinstance(\n grouping_vector, (Series, Index, ExtensionArray, np.ndarray)\n ):\n # no level passed\n if getattr(grouping_vector, "ndim", 1) != 1:\n t = str(type(grouping_vector))\n raise ValueError(f"Grouper for '{t}' not 1-dimensional")\n\n grouping_vector = index.map(grouping_vector)\n\n if not (\n hasattr(grouping_vector, "__len__")\n and len(grouping_vector) == len(index)\n ):\n grper = pprint_thing(grouping_vector)\n errmsg = (\n "Grouper result violates len(labels) == "\n f"len(data)\nresult: {grper}"\n )\n raise AssertionError(errmsg)\n\n if isinstance(grouping_vector, np.ndarray):\n if grouping_vector.dtype.kind in "mM":\n # if we have a date/time-like grouper, make sure that we have\n # Timestamps like\n # TODO 2022-10-08 we only have one test that gets here and\n # values are already in nanoseconds in that case.\n grouping_vector = Series(grouping_vector).to_numpy()\n elif isinstance(getattr(grouping_vector, "dtype", None), CategoricalDtype):\n # a passed Categorical\n self._orig_cats = grouping_vector.categories\n grouping_vector, self._all_grouper = recode_for_groupby(\n grouping_vector, sort, observed\n )\n\n self.grouping_vector = grouping_vector\n\n def __repr__(self) -> str:\n return f"Grouping({self.name})"\n\n def __iter__(self) -> Iterator:\n return iter(self.indices)\n\n @cache_readonly\n def _passed_categorical(self) -> bool:\n dtype = getattr(self.grouping_vector, "dtype", None)\n return isinstance(dtype, CategoricalDtype)\n\n @cache_readonly\n def name(self) -> Hashable:\n ilevel = self._ilevel\n if ilevel is not None:\n return self._index.names[ilevel]\n\n if isinstance(self._orig_grouper, (Index, Series)):\n return self._orig_grouper.name\n\n elif isinstance(self.grouping_vector, ops.BaseGrouper):\n return self.grouping_vector.result_index.name\n\n elif isinstance(self.grouping_vector, Index):\n return self.grouping_vector.name\n\n # otherwise we have ndarray or ExtensionArray -> no name\n return None\n\n @cache_readonly\n def _ilevel(self) -> int | None:\n """\n If necessary, converted index level name to index level position.\n """\n level = self.level\n if level is None:\n return None\n if not isinstance(level, int):\n index = self._index\n if level not in index.names:\n raise AssertionError(f"Level {level} not in index")\n return index.names.index(level)\n return level\n\n @property\n def ngroups(self) -> int:\n return len(self._group_index)\n\n @cache_readonly\n def indices(self) -> dict[Hashable, npt.NDArray[np.intp]]:\n # we have a list of groupers\n if isinstance(self.grouping_vector, ops.BaseGrouper):\n return self.grouping_vector.indices\n\n values = Categorical(self.grouping_vector)\n return values._reverse_indexer()\n\n @property\n def codes(self) -> npt.NDArray[np.signedinteger]:\n return self._codes_and_uniques[0]\n\n @cache_readonly\n def _group_arraylike(self) -> ArrayLike:\n """\n Analogous to result_index, but holding an ArrayLike to ensure\n we can retain ExtensionDtypes.\n """\n if self._all_grouper is not None:\n # retain dtype for categories, including unobserved ones\n return self._result_index._values\n\n elif self._passed_categorical:\n return self._group_index._values\n\n return self._codes_and_uniques[1]\n\n @property\n def group_arraylike(self) -> ArrayLike:\n """\n Analogous to result_index, but holding an ArrayLike to ensure\n we can retain ExtensionDtypes.\n """\n warnings.warn(\n "group_arraylike is deprecated and will be removed in a future "\n "version of pandas",\n category=FutureWarning,\n stacklevel=find_stack_level(),\n )\n return self._group_arraylike\n\n @cache_readonly\n def _result_index(self) -> Index:\n # result_index retains dtype for categories, including unobserved ones,\n # which group_index does not\n if self._all_grouper is not None:\n group_idx = self._group_index\n assert isinstance(group_idx, CategoricalIndex)\n cats = self._orig_cats\n # set_categories is dynamically added\n return group_idx.set_categories(cats) # type: ignore[attr-defined]\n return self._group_index\n\n @property\n def result_index(self) -> Index:\n warnings.warn(\n "result_index is deprecated and will be removed in a future "\n "version of pandas",\n category=FutureWarning,\n stacklevel=find_stack_level(),\n )\n return self._result_index\n\n @cache_readonly\n def _group_index(self) -> Index:\n codes, uniques = self._codes_and_uniques\n if not self._dropna and self._passed_categorical:\n assert isinstance(uniques, Categorical)\n if self._sort and (codes == len(uniques)).any():\n # Add NA value on the end when sorting\n uniques = Categorical.from_codes(\n np.append(uniques.codes, [-1]), uniques.categories, validate=False\n )\n elif len(codes) > 0:\n # Need to determine proper placement of NA value when not sorting\n cat = self.grouping_vector\n na_idx = (cat.codes < 0).argmax()\n if cat.codes[na_idx] < 0:\n # count number of unique codes that comes before the nan value\n na_unique_idx = algorithms.nunique_ints(cat.codes[:na_idx])\n new_codes = np.insert(uniques.codes, na_unique_idx, -1)\n uniques = Categorical.from_codes(\n new_codes, uniques.categories, validate=False\n )\n return Index._with_infer(uniques, name=self.name)\n\n @property\n def group_index(self) -> Index:\n warnings.warn(\n "group_index is deprecated and will be removed in a future "\n "version of pandas",\n category=FutureWarning,\n stacklevel=find_stack_level(),\n )\n return self._group_index\n\n @cache_readonly\n def _codes_and_uniques(self) -> tuple[npt.NDArray[np.signedinteger], ArrayLike]:\n uniques: ArrayLike\n if self._passed_categorical:\n # we make a CategoricalIndex out of the cat grouper\n # preserving the categories / ordered attributes;\n # doesn't (yet - GH#46909) handle dropna=False\n cat = self.grouping_vector\n categories = cat.categories\n\n if self._observed:\n ucodes = algorithms.unique1d(cat.codes)\n ucodes = ucodes[ucodes != -1]\n if self._sort:\n ucodes = np.sort(ucodes)\n else:\n ucodes = np.arange(len(categories))\n\n uniques = Categorical.from_codes(\n codes=ucodes, categories=categories, ordered=cat.ordered, validate=False\n )\n\n codes = cat.codes\n if not self._dropna:\n na_mask = codes < 0\n if np.any(na_mask):\n if self._sort:\n # Replace NA codes with `largest code + 1`\n na_code = len(categories)\n codes = np.where(na_mask, na_code, codes)\n else:\n # Insert NA code into the codes based on first appearance\n # A negative code must exist, no need to check codes[na_idx] < 0\n na_idx = na_mask.argmax()\n # count number of unique codes that comes before the nan value\n na_code = algorithms.nunique_ints(codes[:na_idx])\n codes = np.where(codes >= na_code, codes + 1, codes)\n codes = np.where(na_mask, na_code, codes)\n\n if not self._observed:\n uniques = uniques.reorder_categories(self._orig_cats)\n\n return codes, uniques\n\n elif isinstance(self.grouping_vector, ops.BaseGrouper):\n # we have a list of groupers\n codes = self.grouping_vector.codes_info\n uniques = self.grouping_vector.result_index._values\n elif self._uniques is not None:\n # GH#50486 Code grouping_vector using _uniques; allows\n # including uniques that are not present in grouping_vector.\n cat = Categorical(self.grouping_vector, categories=self._uniques)\n codes = cat.codes\n uniques = self._uniques\n else:\n # GH35667, replace dropna=False with use_na_sentinel=False\n # error: Incompatible types in assignment (expression has type "Union[\n # ndarray[Any, Any], Index]", variable has type "Categorical")\n codes, uniques = algorithms.factorize( # type: ignore[assignment]\n self.grouping_vector, sort=self._sort, use_na_sentinel=self._dropna\n )\n return codes, uniques\n\n @cache_readonly\n def groups(self) -> dict[Hashable, np.ndarray]:\n cats = Categorical.from_codes(self.codes, self._group_index, validate=False)\n return self._index.groupby(cats)\n\n\ndef get_grouper(\n obj: NDFrameT,\n key=None,\n axis: Axis = 0,\n level=None,\n sort: bool = True,\n observed: bool = False,\n validate: bool = True,\n dropna: bool = True,\n) -> tuple[ops.BaseGrouper, frozenset[Hashable], NDFrameT]:\n """\n Create and return a BaseGrouper, which is an internal\n mapping of how to create the grouper indexers.\n This may be composed of multiple Grouping objects, indicating\n multiple groupers\n\n Groupers are ultimately index mappings. They can originate as:\n index mappings, keys to columns, functions, or Groupers\n\n Groupers enable local references to axis,level,sort, while\n the passed in axis, level, and sort are 'global'.\n\n This routine tries to figure out what the passing in references\n are and then creates a Grouping for each one, combined into\n a BaseGrouper.\n\n If observed & we have a categorical grouper, only show the observed\n values.\n\n If validate, then check for key/level overlaps.\n\n """\n group_axis = obj._get_axis(axis)\n\n # validate that the passed single level is compatible with the passed\n # axis of the object\n if level is not None:\n # TODO: These if-block and else-block are almost same.\n # MultiIndex instance check is removable, but it seems that there are\n # some processes only for non-MultiIndex in else-block,\n # eg. `obj.index.name != level`. We have to consider carefully whether\n # these are applicable for MultiIndex. Even if these are applicable,\n # we need to check if it makes no side effect to subsequent processes\n # on the outside of this condition.\n # (GH 17621)\n if isinstance(group_axis, MultiIndex):\n if is_list_like(level) and len(level) == 1:\n level = level[0]\n\n if key is None and is_scalar(level):\n # Get the level values from group_axis\n key = group_axis.get_level_values(level)\n level = None\n\n else:\n # allow level to be a length-one list-like object\n # (e.g., level=[0])\n # GH 13901\n if is_list_like(level):\n nlevels = len(level)\n if nlevels == 1:\n level = level[0]\n elif nlevels == 0:\n raise ValueError("No group keys passed!")\n else:\n raise ValueError("multiple levels only valid with MultiIndex")\n\n if isinstance(level, str):\n if obj._get_axis(axis).name != level:\n raise ValueError(\n f"level name {level} is not the name "\n f"of the {obj._get_axis_name(axis)}"\n )\n elif level > 0 or level < -1:\n raise ValueError("level > 0 or level < -1 only valid with MultiIndex")\n\n # NOTE: `group_axis` and `group_axis.get_level_values(level)`\n # are same in this section.\n level = None\n key = group_axis\n\n # a passed-in Grouper, directly convert\n if isinstance(key, Grouper):\n grouper, obj = key._get_grouper(obj, validate=False)\n if key.key is None:\n return grouper, frozenset(), obj\n else:\n return grouper, frozenset({key.key}), obj\n\n # already have a BaseGrouper, just return it\n elif isinstance(key, ops.BaseGrouper):\n return key, frozenset(), obj\n\n if not isinstance(key, list):\n keys = [key]\n match_axis_length = False\n else:\n keys = key\n match_axis_length = len(keys) == len(group_axis)\n\n # what are we after, exactly?\n any_callable = any(callable(g) or isinstance(g, dict) for g in keys)\n any_groupers = any(isinstance(g, (Grouper, Grouping)) for g in keys)\n any_arraylike = any(\n isinstance(g, (list, tuple, Series, Index, np.ndarray)) for g in keys\n )\n\n # is this an index replacement?\n if (\n not any_callable\n and not any_arraylike\n and not any_groupers\n and match_axis_length\n and level is None\n ):\n if isinstance(obj, DataFrame):\n all_in_columns_index = all(\n g in obj.columns or g in obj.index.names for g in keys\n )\n else:\n assert isinstance(obj, Series)\n all_in_columns_index = all(g in obj.index.names for g in keys)\n\n if not all_in_columns_index:\n keys = [com.asarray_tuplesafe(keys)]\n\n if isinstance(level, (tuple, list)):\n if key is None:\n keys = [None] * len(level)\n levels = level\n else:\n levels = [level] * len(keys)\n\n groupings: list[Grouping] = []\n exclusions: set[Hashable] = set()\n\n # if the actual grouper should be obj[key]\n def is_in_axis(key) -> bool:\n if not _is_label_like(key):\n if obj.ndim == 1:\n return False\n\n # items -> .columns for DataFrame, .index for Series\n items = obj.axes[-1]\n try:\n items.get_loc(key)\n except (KeyError, TypeError, InvalidIndexError):\n # TypeError shows up here if we pass e.g. an Index\n return False\n\n return True\n\n # if the grouper is obj[name]\n def is_in_obj(gpr) -> bool:\n if not hasattr(gpr, "name"):\n return False\n if using_copy_on_write() or warn_copy_on_write():\n # For the CoW case, we check the references to determine if the\n # series is part of the object\n try:\n obj_gpr_column = obj[gpr.name]\n except (KeyError, IndexError, InvalidIndexError, OutOfBoundsDatetime):\n return False\n if isinstance(gpr, Series) and isinstance(obj_gpr_column, Series):\n return gpr._mgr.references_same_values( # type: ignore[union-attr]\n obj_gpr_column._mgr, 0 # type: ignore[arg-type]\n )\n return False\n try:\n return gpr is obj[gpr.name]\n except (KeyError, IndexError, InvalidIndexError, OutOfBoundsDatetime):\n # IndexError reached in e.g. test_skip_group_keys when we pass\n # lambda here\n # InvalidIndexError raised on key-types inappropriate for index,\n # e.g. DatetimeIndex.get_loc(tuple())\n # OutOfBoundsDatetime raised when obj is a Series with DatetimeIndex\n # and gpr.name is month str\n return False\n\n for gpr, level in zip(keys, levels):\n if isinstance(obj, DataFrame) and is_in_obj(gpr): # df.groupby(df['name'])\n in_axis = True\n exclusions.add(gpr.name)\n\n elif is_in_axis(gpr): # df.groupby('name')\n if obj.ndim != 1 and gpr in obj:\n if validate:\n obj._check_label_or_level_ambiguity(gpr, axis=axis)\n in_axis, name, gpr = True, gpr, obj[gpr]\n if gpr.ndim != 1:\n # non-unique columns; raise here to get the name in the\n # exception message\n raise ValueError(f"Grouper for '{name}' not 1-dimensional")\n exclusions.add(name)\n elif obj._is_level_reference(gpr, axis=axis):\n in_axis, level, gpr = False, gpr, None\n else:\n raise KeyError(gpr)\n elif isinstance(gpr, Grouper) and gpr.key is not None:\n # Add key to exclusions\n exclusions.add(gpr.key)\n in_axis = True\n else:\n in_axis = False\n\n # create the Grouping\n # allow us to passing the actual Grouping as the gpr\n ping = (\n Grouping(\n group_axis,\n gpr,\n obj=obj,\n level=level,\n sort=sort,\n observed=observed,\n in_axis=in_axis,\n dropna=dropna,\n )\n if not isinstance(gpr, Grouping)\n else gpr\n )\n\n groupings.append(ping)\n\n if len(groupings) == 0 and len(obj):\n raise ValueError("No group keys passed!")\n if len(groupings) == 0:\n groupings.append(Grouping(Index([], dtype="int"), np.array([], dtype=np.intp)))\n\n # create the internals grouper\n grouper = ops.BaseGrouper(group_axis, groupings, sort=sort, dropna=dropna)\n return grouper, frozenset(exclusions), obj\n\n\ndef _is_label_like(val) -> bool:\n return isinstance(val, (str, tuple)) or (val is not None and is_scalar(val))\n\n\ndef _convert_grouper(axis: Index, grouper):\n if isinstance(grouper, dict):\n return grouper.get\n elif isinstance(grouper, Series):\n if grouper.index.equals(axis):\n return grouper._values\n else:\n return grouper.reindex(axis)._values\n elif isinstance(grouper, MultiIndex):\n return grouper._values\n elif isinstance(grouper, (list, tuple, Index, Categorical, np.ndarray)):\n if len(grouper) != len(axis):\n raise ValueError("Grouper and axis must be same length")\n\n if isinstance(grouper, (list, tuple)):\n grouper = com.asarray_tuplesafe(grouper)\n return grouper\n else:\n return grouper\n
.venv\Lib\site-packages\pandas\core\groupby\grouper.py
grouper.py
Python
38,703
0.95
0.142468
0.110647
python-kit
501
2024-04-28T07:08:52.406649
Apache-2.0
false
929a09395e7b79488beeea55dfb52a5b