content
stringlengths
1
103k
path
stringlengths
8
216
filename
stringlengths
2
179
language
stringclasses
15 values
size_bytes
int64
2
189k
quality_score
float64
0.5
0.95
complexity
float64
0
1
documentation_ratio
float64
0
1
repository
stringclasses
5 values
stars
int64
0
1k
created_date
stringdate
2023-07-10 19:21:08
2025-07-09 19:11:45
license
stringclasses
4 values
is_test
bool
2 classes
file_hash
stringlengths
32
32
from __future__ import annotations\n\nimport itertools\nfrom typing import (\n TYPE_CHECKING,\n cast,\n)\nimport warnings\n\nimport numpy as np\n\nimport pandas._libs.reshape as libreshape\nfrom pandas.errors import PerformanceWarning\nfrom pandas.util._decorators import cache_readonly\nfrom pandas.util._exceptions import find_stack_level\n\nfrom pandas.core.dtypes.cast import (\n find_common_type,\n maybe_promote,\n)\nfrom pandas.core.dtypes.common import (\n ensure_platform_int,\n is_1d_only_ea_dtype,\n is_integer,\n needs_i8_conversion,\n)\nfrom pandas.core.dtypes.dtypes import ExtensionDtype\nfrom pandas.core.dtypes.missing import notna\n\nimport pandas.core.algorithms as algos\nfrom pandas.core.algorithms import (\n factorize,\n unique,\n)\nfrom pandas.core.arrays.categorical import factorize_from_iterable\nfrom pandas.core.construction import ensure_wrapped_if_datetimelike\nfrom pandas.core.frame import DataFrame\nfrom pandas.core.indexes.api import (\n Index,\n MultiIndex,\n RangeIndex,\n)\nfrom pandas.core.reshape.concat import concat\nfrom pandas.core.series import Series\nfrom pandas.core.sorting import (\n compress_group_index,\n decons_obs_group_ids,\n get_compressed_ids,\n get_group_index,\n get_group_index_sorter,\n)\n\nif TYPE_CHECKING:\n from pandas._typing import (\n ArrayLike,\n Level,\n npt,\n )\n\n from pandas.core.arrays import ExtensionArray\n from pandas.core.indexes.frozen import FrozenList\n\n\nclass _Unstacker:\n """\n Helper class to unstack data / pivot with multi-level index\n\n Parameters\n ----------\n index : MultiIndex\n level : int or str, default last level\n Level to "unstack". Accepts a name for the level.\n fill_value : scalar, optional\n Default value to fill in missing values if subgroups do not have the\n same set of labels. By default, missing values will be replaced with\n the default fill value for that data type, NaN for float, NaT for\n datetimelike, etc. For integer types, by default data will converted to\n float and missing values will be set to NaN.\n constructor : object\n Pandas ``DataFrame`` or subclass used to create unstacked\n response. If None, DataFrame will be used.\n\n Examples\n --------\n >>> index = pd.MultiIndex.from_tuples([('one', 'a'), ('one', 'b'),\n ... ('two', 'a'), ('two', 'b')])\n >>> s = pd.Series(np.arange(1, 5, dtype=np.int64), index=index)\n >>> s\n one a 1\n b 2\n two a 3\n b 4\n dtype: int64\n\n >>> s.unstack(level=-1)\n a b\n one 1 2\n two 3 4\n\n >>> s.unstack(level=0)\n one two\n a 1 3\n b 2 4\n\n Returns\n -------\n unstacked : DataFrame\n """\n\n def __init__(\n self, index: MultiIndex, level: Level, constructor, sort: bool = True\n ) -> None:\n self.constructor = constructor\n self.sort = sort\n\n self.index = index.remove_unused_levels()\n\n self.level = self.index._get_level_number(level)\n\n # when index includes `nan`, need to lift levels/strides by 1\n self.lift = 1 if -1 in self.index.codes[self.level] else 0\n\n # Note: the "pop" below alters these in-place.\n self.new_index_levels = list(self.index.levels)\n self.new_index_names = list(self.index.names)\n\n self.removed_name = self.new_index_names.pop(self.level)\n self.removed_level = self.new_index_levels.pop(self.level)\n self.removed_level_full = index.levels[self.level]\n if not self.sort:\n unique_codes = unique(self.index.codes[self.level])\n self.removed_level = self.removed_level.take(unique_codes)\n self.removed_level_full = self.removed_level_full.take(unique_codes)\n\n # Bug fix GH 20601\n # If the data frame is too big, the number of unique index combination\n # will cause int32 overflow on windows environments.\n # We want to check and raise an warning before this happens\n num_rows = np.max([index_level.size for index_level in self.new_index_levels])\n num_columns = self.removed_level.size\n\n # GH20601: This forces an overflow if the number of cells is too high.\n num_cells = num_rows * num_columns\n\n # GH 26314: Previous ValueError raised was too restrictive for many users.\n if num_cells > np.iinfo(np.int32).max:\n warnings.warn(\n f"The following operation may generate {num_cells} cells "\n f"in the resulting pandas object.",\n PerformanceWarning,\n stacklevel=find_stack_level(),\n )\n\n self._make_selectors()\n\n @cache_readonly\n def _indexer_and_to_sort(\n self,\n ) -> tuple[\n npt.NDArray[np.intp],\n list[np.ndarray], # each has _some_ signed integer dtype\n ]:\n v = self.level\n\n codes = list(self.index.codes)\n levs = list(self.index.levels)\n to_sort = codes[:v] + codes[v + 1 :] + [codes[v]]\n sizes = tuple(len(x) for x in levs[:v] + levs[v + 1 :] + [levs[v]])\n\n comp_index, obs_ids = get_compressed_ids(to_sort, sizes)\n ngroups = len(obs_ids)\n\n indexer = get_group_index_sorter(comp_index, ngroups)\n return indexer, to_sort\n\n @cache_readonly\n def sorted_labels(self) -> list[np.ndarray]:\n indexer, to_sort = self._indexer_and_to_sort\n if self.sort:\n return [line.take(indexer) for line in to_sort]\n return to_sort\n\n def _make_sorted_values(self, values: np.ndarray) -> np.ndarray:\n if self.sort:\n indexer, _ = self._indexer_and_to_sort\n\n sorted_values = algos.take_nd(values, indexer, axis=0)\n return sorted_values\n return values\n\n def _make_selectors(self):\n new_levels = self.new_index_levels\n\n # make the mask\n remaining_labels = self.sorted_labels[:-1]\n level_sizes = tuple(len(x) for x in new_levels)\n\n comp_index, obs_ids = get_compressed_ids(remaining_labels, level_sizes)\n ngroups = len(obs_ids)\n\n comp_index = ensure_platform_int(comp_index)\n stride = self.index.levshape[self.level] + self.lift\n self.full_shape = ngroups, stride\n\n selector = self.sorted_labels[-1] + stride * comp_index + self.lift\n mask = np.zeros(np.prod(self.full_shape), dtype=bool)\n mask.put(selector, True)\n\n if mask.sum() < len(self.index):\n raise ValueError("Index contains duplicate entries, cannot reshape")\n\n self.group_index = comp_index\n self.mask = mask\n if self.sort:\n self.compressor = comp_index.searchsorted(np.arange(ngroups))\n else:\n self.compressor = np.sort(np.unique(comp_index, return_index=True)[1])\n\n @cache_readonly\n def mask_all(self) -> bool:\n return bool(self.mask.all())\n\n @cache_readonly\n def arange_result(self) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.bool_]]:\n # We cache this for reuse in ExtensionBlock._unstack\n dummy_arr = np.arange(len(self.index), dtype=np.intp)\n new_values, mask = self.get_new_values(dummy_arr, fill_value=-1)\n return new_values, mask.any(0)\n # TODO: in all tests we have mask.any(0).all(); can we rely on that?\n\n def get_result(self, values, value_columns, fill_value) -> DataFrame:\n if values.ndim == 1:\n values = values[:, np.newaxis]\n\n if value_columns is None and values.shape[1] != 1: # pragma: no cover\n raise ValueError("must pass column labels for multi-column data")\n\n values, _ = self.get_new_values(values, fill_value)\n columns = self.get_new_columns(value_columns)\n index = self.new_index\n\n return self.constructor(\n values, index=index, columns=columns, dtype=values.dtype\n )\n\n def get_new_values(self, values, fill_value=None):\n if values.ndim == 1:\n values = values[:, np.newaxis]\n\n sorted_values = self._make_sorted_values(values)\n\n # place the values\n length, width = self.full_shape\n stride = values.shape[1]\n result_width = width * stride\n result_shape = (length, result_width)\n mask = self.mask\n mask_all = self.mask_all\n\n # we can simply reshape if we don't have a mask\n if mask_all and len(values):\n # TODO: Under what circumstances can we rely on sorted_values\n # matching values? When that holds, we can slice instead\n # of take (in particular for EAs)\n new_values = (\n sorted_values.reshape(length, width, stride)\n .swapaxes(1, 2)\n .reshape(result_shape)\n )\n new_mask = np.ones(result_shape, dtype=bool)\n return new_values, new_mask\n\n dtype = values.dtype\n\n # if our mask is all True, then we can use our existing dtype\n if mask_all:\n dtype = values.dtype\n new_values = np.empty(result_shape, dtype=dtype)\n else:\n if isinstance(dtype, ExtensionDtype):\n # GH#41875\n # We are assuming that fill_value can be held by this dtype,\n # unlike the non-EA case that promotes.\n cls = dtype.construct_array_type()\n new_values = cls._empty(result_shape, dtype=dtype)\n new_values[:] = fill_value\n else:\n dtype, fill_value = maybe_promote(dtype, fill_value)\n new_values = np.empty(result_shape, dtype=dtype)\n new_values.fill(fill_value)\n\n name = dtype.name\n new_mask = np.zeros(result_shape, dtype=bool)\n\n # we need to convert to a basic dtype\n # and possibly coerce an input to our output dtype\n # e.g. ints -> floats\n if needs_i8_conversion(values.dtype):\n sorted_values = sorted_values.view("i8")\n new_values = new_values.view("i8")\n else:\n sorted_values = sorted_values.astype(name, copy=False)\n\n # fill in our values & mask\n libreshape.unstack(\n sorted_values,\n mask.view("u1"),\n stride,\n length,\n width,\n new_values,\n new_mask.view("u1"),\n )\n\n # reconstruct dtype if needed\n if needs_i8_conversion(values.dtype):\n # view as datetime64 so we can wrap in DatetimeArray and use\n # DTA's view method\n new_values = new_values.view("M8[ns]")\n new_values = ensure_wrapped_if_datetimelike(new_values)\n new_values = new_values.view(values.dtype)\n\n return new_values, new_mask\n\n def get_new_columns(self, value_columns: Index | None):\n if value_columns is None:\n if self.lift == 0:\n return self.removed_level._rename(name=self.removed_name)\n\n lev = self.removed_level.insert(0, item=self.removed_level._na_value)\n return lev.rename(self.removed_name)\n\n stride = len(self.removed_level) + self.lift\n width = len(value_columns)\n propagator = np.repeat(np.arange(width), stride)\n\n new_levels: FrozenList | list[Index]\n\n if isinstance(value_columns, MultiIndex):\n # error: Cannot determine type of "__add__" [has-type]\n new_levels = value_columns.levels + ( # type: ignore[has-type]\n self.removed_level_full,\n )\n new_names = value_columns.names + (self.removed_name,)\n\n new_codes = [lab.take(propagator) for lab in value_columns.codes]\n else:\n new_levels = [\n value_columns,\n self.removed_level_full,\n ]\n new_names = [value_columns.name, self.removed_name]\n new_codes = [propagator]\n\n repeater = self._repeater\n\n # The entire level is then just a repetition of the single chunk:\n new_codes.append(np.tile(repeater, width))\n return MultiIndex(\n levels=new_levels, codes=new_codes, names=new_names, verify_integrity=False\n )\n\n @cache_readonly\n def _repeater(self) -> np.ndarray:\n # The two indices differ only if the unstacked level had unused items:\n if len(self.removed_level_full) != len(self.removed_level):\n # In this case, we remap the new codes to the original level:\n repeater = self.removed_level_full.get_indexer(self.removed_level)\n if self.lift:\n repeater = np.insert(repeater, 0, -1)\n else:\n # Otherwise, we just use each level item exactly once:\n stride = len(self.removed_level) + self.lift\n repeater = np.arange(stride) - self.lift\n\n return repeater\n\n @cache_readonly\n def new_index(self) -> MultiIndex:\n # Does not depend on values or value_columns\n result_codes = [lab.take(self.compressor) for lab in self.sorted_labels[:-1]]\n\n # construct the new index\n if len(self.new_index_levels) == 1:\n level, level_codes = self.new_index_levels[0], result_codes[0]\n if (level_codes == -1).any():\n level = level.insert(len(level), level._na_value)\n return level.take(level_codes).rename(self.new_index_names[0])\n\n return MultiIndex(\n levels=self.new_index_levels,\n codes=result_codes,\n names=self.new_index_names,\n verify_integrity=False,\n )\n\n\ndef _unstack_multiple(\n data: Series | DataFrame, clocs, fill_value=None, sort: bool = True\n):\n if len(clocs) == 0:\n return data\n\n # NOTE: This doesn't deal with hierarchical columns yet\n\n index = data.index\n index = cast(MultiIndex, index) # caller is responsible for checking\n\n # GH 19966 Make sure if MultiIndexed index has tuple name, they will be\n # recognised as a whole\n if clocs in index.names:\n clocs = [clocs]\n clocs = [index._get_level_number(i) for i in clocs]\n\n rlocs = [i for i in range(index.nlevels) if i not in clocs]\n\n clevels = [index.levels[i] for i in clocs]\n ccodes = [index.codes[i] for i in clocs]\n cnames = [index.names[i] for i in clocs]\n rlevels = [index.levels[i] for i in rlocs]\n rcodes = [index.codes[i] for i in rlocs]\n rnames = [index.names[i] for i in rlocs]\n\n shape = tuple(len(x) for x in clevels)\n group_index = get_group_index(ccodes, shape, sort=False, xnull=False)\n\n comp_ids, obs_ids = compress_group_index(group_index, sort=False)\n recons_codes = decons_obs_group_ids(comp_ids, obs_ids, shape, ccodes, xnull=False)\n\n if not rlocs:\n # Everything is in clocs, so the dummy df has a regular index\n dummy_index = Index(obs_ids, name="__placeholder__")\n else:\n dummy_index = MultiIndex(\n levels=rlevels + [obs_ids],\n codes=rcodes + [comp_ids],\n names=rnames + ["__placeholder__"],\n verify_integrity=False,\n )\n\n if isinstance(data, Series):\n dummy = data.copy()\n dummy.index = dummy_index\n\n unstacked = dummy.unstack("__placeholder__", fill_value=fill_value, sort=sort)\n new_levels = clevels\n new_names = cnames\n new_codes = recons_codes\n else:\n if isinstance(data.columns, MultiIndex):\n result = data\n while clocs:\n val = clocs.pop(0)\n result = result.unstack(val, fill_value=fill_value, sort=sort)\n clocs = [v if v < val else v - 1 for v in clocs]\n\n return result\n\n # GH#42579 deep=False to avoid consolidating\n dummy_df = data.copy(deep=False)\n dummy_df.index = dummy_index\n\n unstacked = dummy_df.unstack(\n "__placeholder__", fill_value=fill_value, sort=sort\n )\n if isinstance(unstacked, Series):\n unstcols = unstacked.index\n else:\n unstcols = unstacked.columns\n assert isinstance(unstcols, MultiIndex) # for mypy\n new_levels = [unstcols.levels[0]] + clevels\n new_names = [data.columns.name] + cnames\n\n new_codes = [unstcols.codes[0]]\n new_codes.extend(rec.take(unstcols.codes[-1]) for rec in recons_codes)\n\n new_columns = MultiIndex(\n levels=new_levels, codes=new_codes, names=new_names, verify_integrity=False\n )\n\n if isinstance(unstacked, Series):\n unstacked.index = new_columns\n else:\n unstacked.columns = new_columns\n\n return unstacked\n\n\ndef unstack(obj: Series | DataFrame, level, fill_value=None, sort: bool = True):\n if isinstance(level, (tuple, list)):\n if len(level) != 1:\n # _unstack_multiple only handles MultiIndexes,\n # and isn't needed for a single level\n return _unstack_multiple(obj, level, fill_value=fill_value, sort=sort)\n else:\n level = level[0]\n\n if not is_integer(level) and not level == "__placeholder__":\n # check if level is valid in case of regular index\n obj.index._get_level_number(level)\n\n if isinstance(obj, DataFrame):\n if isinstance(obj.index, MultiIndex):\n return _unstack_frame(obj, level, fill_value=fill_value, sort=sort)\n else:\n return obj.T.stack(future_stack=True)\n elif not isinstance(obj.index, MultiIndex):\n # GH 36113\n # Give nicer error messages when unstack a Series whose\n # Index is not a MultiIndex.\n raise ValueError(\n f"index must be a MultiIndex to unstack, {type(obj.index)} was passed"\n )\n else:\n if is_1d_only_ea_dtype(obj.dtype):\n return _unstack_extension_series(obj, level, fill_value, sort=sort)\n unstacker = _Unstacker(\n obj.index, level=level, constructor=obj._constructor_expanddim, sort=sort\n )\n return unstacker.get_result(\n obj._values, value_columns=None, fill_value=fill_value\n )\n\n\ndef _unstack_frame(\n obj: DataFrame, level, fill_value=None, sort: bool = True\n) -> DataFrame:\n assert isinstance(obj.index, MultiIndex) # checked by caller\n unstacker = _Unstacker(\n obj.index, level=level, constructor=obj._constructor, sort=sort\n )\n\n if not obj._can_fast_transpose:\n mgr = obj._mgr.unstack(unstacker, fill_value=fill_value)\n return obj._constructor_from_mgr(mgr, axes=mgr.axes)\n else:\n return unstacker.get_result(\n obj._values, value_columns=obj.columns, fill_value=fill_value\n )\n\n\ndef _unstack_extension_series(\n series: Series, level, fill_value, sort: bool\n) -> DataFrame:\n """\n Unstack an ExtensionArray-backed Series.\n\n The ExtensionDtype is preserved.\n\n Parameters\n ----------\n series : Series\n A Series with an ExtensionArray for values\n level : Any\n The level name or number.\n fill_value : Any\n The user-level (not physical storage) fill value to use for\n missing values introduced by the reshape. Passed to\n ``series.values.take``.\n sort : bool\n Whether to sort the resulting MuliIndex levels\n\n Returns\n -------\n DataFrame\n Each column of the DataFrame will have the same dtype as\n the input Series.\n """\n # Defer to the logic in ExtensionBlock._unstack\n df = series.to_frame()\n result = df.unstack(level=level, fill_value=fill_value, sort=sort)\n\n # equiv: result.droplevel(level=0, axis=1)\n # but this avoids an extra copy\n result.columns = result.columns._drop_level_numbers([0])\n return result\n\n\ndef stack(frame: DataFrame, level=-1, dropna: bool = True, sort: bool = True):\n """\n Convert DataFrame to Series with multi-level Index. Columns become the\n second level of the resulting hierarchical index\n\n Returns\n -------\n stacked : Series or DataFrame\n """\n\n def stack_factorize(index):\n if index.is_unique:\n return index, np.arange(len(index))\n codes, categories = factorize_from_iterable(index)\n return categories, codes\n\n N, K = frame.shape\n\n # Will also convert negative level numbers and check if out of bounds.\n level_num = frame.columns._get_level_number(level)\n\n if isinstance(frame.columns, MultiIndex):\n return _stack_multi_columns(\n frame, level_num=level_num, dropna=dropna, sort=sort\n )\n elif isinstance(frame.index, MultiIndex):\n new_levels = list(frame.index.levels)\n new_codes = [lab.repeat(K) for lab in frame.index.codes]\n\n clev, clab = stack_factorize(frame.columns)\n new_levels.append(clev)\n new_codes.append(np.tile(clab, N).ravel())\n\n new_names = list(frame.index.names)\n new_names.append(frame.columns.name)\n new_index = MultiIndex(\n levels=new_levels, codes=new_codes, names=new_names, verify_integrity=False\n )\n else:\n levels, (ilab, clab) = zip(*map(stack_factorize, (frame.index, frame.columns)))\n codes = ilab.repeat(K), np.tile(clab, N).ravel()\n new_index = MultiIndex(\n levels=levels,\n codes=codes,\n names=[frame.index.name, frame.columns.name],\n verify_integrity=False,\n )\n\n new_values: ArrayLike\n if not frame.empty and frame._is_homogeneous_type:\n # For homogeneous EAs, frame._values will coerce to object. So\n # we concatenate instead.\n dtypes = list(frame.dtypes._values)\n dtype = dtypes[0]\n\n if isinstance(dtype, ExtensionDtype):\n arr = dtype.construct_array_type()\n new_values = arr._concat_same_type(\n [col._values for _, col in frame.items()]\n )\n new_values = _reorder_for_extension_array_stack(new_values, N, K)\n else:\n # homogeneous, non-EA\n new_values = frame._values.ravel()\n\n else:\n # non-homogeneous\n new_values = frame._values.ravel()\n\n if dropna:\n mask = notna(new_values)\n new_values = new_values[mask]\n new_index = new_index[mask]\n\n return frame._constructor_sliced(new_values, index=new_index)\n\n\ndef stack_multiple(frame: DataFrame, level, dropna: bool = True, sort: bool = True):\n # If all passed levels match up to column names, no\n # ambiguity about what to do\n if all(lev in frame.columns.names for lev in level):\n result = frame\n for lev in level:\n result = stack(result, lev, dropna=dropna, sort=sort)\n\n # Otherwise, level numbers may change as each successive level is stacked\n elif all(isinstance(lev, int) for lev in level):\n # As each stack is done, the level numbers decrease, so we need\n # to account for that when level is a sequence of ints\n result = frame\n # _get_level_number() checks level numbers are in range and converts\n # negative numbers to positive\n level = [frame.columns._get_level_number(lev) for lev in level]\n\n while level:\n lev = level.pop(0)\n result = stack(result, lev, dropna=dropna, sort=sort)\n # Decrement all level numbers greater than current, as these\n # have now shifted down by one\n level = [v if v <= lev else v - 1 for v in level]\n\n else:\n raise ValueError(\n "level should contain all level names or all level "\n "numbers, not a mixture of the two."\n )\n\n return result\n\n\ndef _stack_multi_column_index(columns: MultiIndex) -> MultiIndex:\n """Creates a MultiIndex from the first N-1 levels of this MultiIndex."""\n if len(columns.levels) <= 2:\n return columns.levels[0]._rename(name=columns.names[0])\n\n levs = [\n [lev[c] if c >= 0 else None for c in codes]\n for lev, codes in zip(columns.levels[:-1], columns.codes[:-1])\n ]\n\n # Remove duplicate tuples in the MultiIndex.\n tuples = zip(*levs)\n unique_tuples = (key for key, _ in itertools.groupby(tuples))\n new_levs = zip(*unique_tuples)\n\n # The dtype of each level must be explicitly set to avoid inferring the wrong type.\n # See GH-36991.\n return MultiIndex.from_arrays(\n [\n # Not all indices can accept None values.\n Index(new_lev, dtype=lev.dtype) if None not in new_lev else new_lev\n for new_lev, lev in zip(new_levs, columns.levels)\n ],\n names=columns.names[:-1],\n )\n\n\ndef _stack_multi_columns(\n frame: DataFrame, level_num: int = -1, dropna: bool = True, sort: bool = True\n) -> DataFrame:\n def _convert_level_number(level_num: int, columns: Index):\n """\n Logic for converting the level number to something we can safely pass\n to swaplevel.\n\n If `level_num` matches a column name return the name from\n position `level_num`, otherwise return `level_num`.\n """\n if level_num in columns.names:\n return columns.names[level_num]\n\n return level_num\n\n this = frame.copy(deep=False)\n mi_cols = this.columns # cast(MultiIndex, this.columns)\n assert isinstance(mi_cols, MultiIndex) # caller is responsible\n\n # this makes life much simpler\n if level_num != mi_cols.nlevels - 1:\n # roll levels to put selected level at end\n roll_columns = mi_cols\n for i in range(level_num, mi_cols.nlevels - 1):\n # Need to check if the ints conflict with level names\n lev1 = _convert_level_number(i, roll_columns)\n lev2 = _convert_level_number(i + 1, roll_columns)\n roll_columns = roll_columns.swaplevel(lev1, lev2)\n this.columns = mi_cols = roll_columns\n\n if not mi_cols._is_lexsorted() and sort:\n # Workaround the edge case where 0 is one of the column names,\n # which interferes with trying to sort based on the first\n # level\n level_to_sort = _convert_level_number(0, mi_cols)\n this = this.sort_index(level=level_to_sort, axis=1)\n mi_cols = this.columns\n\n mi_cols = cast(MultiIndex, mi_cols)\n new_columns = _stack_multi_column_index(mi_cols)\n\n # time to ravel the values\n new_data = {}\n level_vals = mi_cols.levels[-1]\n level_codes = unique(mi_cols.codes[-1])\n if sort:\n level_codes = np.sort(level_codes)\n level_vals_nan = level_vals.insert(len(level_vals), None)\n\n level_vals_used = np.take(level_vals_nan, level_codes)\n levsize = len(level_codes)\n drop_cols = []\n for key in new_columns:\n try:\n loc = this.columns.get_loc(key)\n except KeyError:\n drop_cols.append(key)\n continue\n\n # can make more efficient?\n # we almost always return a slice\n # but if unsorted can get a boolean\n # indexer\n if not isinstance(loc, slice):\n slice_len = len(loc)\n else:\n slice_len = loc.stop - loc.start\n\n if slice_len != levsize:\n chunk = this.loc[:, this.columns[loc]]\n chunk.columns = level_vals_nan.take(chunk.columns.codes[-1])\n value_slice = chunk.reindex(columns=level_vals_used).values\n else:\n subset = this.iloc[:, loc]\n dtype = find_common_type(subset.dtypes.tolist())\n if isinstance(dtype, ExtensionDtype):\n # TODO(EA2D): won't need special case, can go through .values\n # paths below (might change to ._values)\n value_slice = dtype.construct_array_type()._concat_same_type(\n [x._values.astype(dtype, copy=False) for _, x in subset.items()]\n )\n N, K = subset.shape\n idx = np.arange(N * K).reshape(K, N).T.ravel()\n value_slice = value_slice.take(idx)\n else:\n value_slice = subset.values\n\n if value_slice.ndim > 1:\n # i.e. not extension\n value_slice = value_slice.ravel()\n\n new_data[key] = value_slice\n\n if len(drop_cols) > 0:\n new_columns = new_columns.difference(drop_cols)\n\n N = len(this)\n\n if isinstance(this.index, MultiIndex):\n new_levels = list(this.index.levels)\n new_names = list(this.index.names)\n new_codes = [lab.repeat(levsize) for lab in this.index.codes]\n else:\n old_codes, old_levels = factorize_from_iterable(this.index)\n new_levels = [old_levels]\n new_codes = [old_codes.repeat(levsize)]\n new_names = [this.index.name] # something better?\n\n new_levels.append(level_vals)\n new_codes.append(np.tile(level_codes, N))\n new_names.append(frame.columns.names[level_num])\n\n new_index = MultiIndex(\n levels=new_levels, codes=new_codes, names=new_names, verify_integrity=False\n )\n\n result = frame._constructor(new_data, index=new_index, columns=new_columns)\n\n if frame.columns.nlevels > 1:\n desired_columns = frame.columns._drop_level_numbers([level_num]).unique()\n if not result.columns.equals(desired_columns):\n result = result[desired_columns]\n\n # more efficient way to go about this? can do the whole masking biz but\n # will only save a small amount of time...\n if dropna:\n result = result.dropna(axis=0, how="all")\n\n return result\n\n\ndef _reorder_for_extension_array_stack(\n arr: ExtensionArray, n_rows: int, n_columns: int\n) -> ExtensionArray:\n """\n Re-orders the values when stacking multiple extension-arrays.\n\n The indirect stacking method used for EAs requires a followup\n take to get the order correct.\n\n Parameters\n ----------\n arr : ExtensionArray\n n_rows, n_columns : int\n The number of rows and columns in the original DataFrame.\n\n Returns\n -------\n taken : ExtensionArray\n The original `arr` with elements re-ordered appropriately\n\n Examples\n --------\n >>> arr = np.array(['a', 'b', 'c', 'd', 'e', 'f'])\n >>> _reorder_for_extension_array_stack(arr, 2, 3)\n array(['a', 'c', 'e', 'b', 'd', 'f'], dtype='<U1')\n\n >>> _reorder_for_extension_array_stack(arr, 3, 2)\n array(['a', 'd', 'b', 'e', 'c', 'f'], dtype='<U1')\n """\n # final take to get the order correct.\n # idx is an indexer like\n # [c0r0, c1r0, c2r0, ...,\n # c0r1, c1r1, c2r1, ...]\n idx = np.arange(n_rows * n_columns).reshape(n_columns, n_rows).T.ravel()\n return arr.take(idx)\n\n\ndef stack_v3(frame: DataFrame, level: list[int]) -> Series | DataFrame:\n if frame.columns.nunique() != len(frame.columns):\n raise ValueError("Columns with duplicate values are not supported in stack")\n\n # If we need to drop `level` from columns, it needs to be in descending order\n drop_levnums = sorted(level, reverse=True)\n stack_cols = frame.columns._drop_level_numbers(\n [k for k in range(frame.columns.nlevels) if k not in level][::-1]\n )\n if len(level) > 1:\n # Arrange columns in the order we want to take them, e.g. level=[2, 0, 1]\n sorter = np.argsort(level)\n ordered_stack_cols = stack_cols._reorder_ilevels(sorter)\n else:\n ordered_stack_cols = stack_cols\n\n stack_cols_unique = stack_cols.unique()\n ordered_stack_cols_unique = ordered_stack_cols.unique()\n\n # Grab data for each unique index to be stacked\n buf = []\n for idx in stack_cols_unique:\n if len(frame.columns) == 1:\n data = frame.copy()\n else:\n # Take the data from frame corresponding to this idx value\n if len(level) == 1:\n idx = (idx,)\n gen = iter(idx)\n column_indexer = tuple(\n next(gen) if k in level else slice(None)\n for k in range(frame.columns.nlevels)\n )\n data = frame.loc[:, column_indexer]\n\n if len(level) < frame.columns.nlevels:\n data.columns = data.columns._drop_level_numbers(drop_levnums)\n elif stack_cols.nlevels == 1:\n if data.ndim == 1:\n data.name = 0\n else:\n data.columns = RangeIndex(len(data.columns))\n buf.append(data)\n\n result: Series | DataFrame\n if len(buf) > 0 and not frame.empty:\n result = concat(buf)\n ratio = len(result) // len(frame)\n else:\n # input is empty\n if len(level) < frame.columns.nlevels:\n # concat column order may be different from dropping the levels\n new_columns = frame.columns._drop_level_numbers(drop_levnums).unique()\n else:\n new_columns = [0]\n result = DataFrame(columns=new_columns, dtype=frame._values.dtype)\n ratio = 0\n\n if len(level) < frame.columns.nlevels:\n # concat column order may be different from dropping the levels\n desired_columns = frame.columns._drop_level_numbers(drop_levnums).unique()\n if not result.columns.equals(desired_columns):\n result = result[desired_columns]\n\n # Construct the correct MultiIndex by combining the frame's index and\n # stacked columns.\n index_levels: list | FrozenList\n if isinstance(frame.index, MultiIndex):\n index_levels = frame.index.levels\n index_codes = list(np.tile(frame.index.codes, (1, ratio)))\n else:\n codes, uniques = factorize(frame.index, use_na_sentinel=False)\n index_levels = [uniques]\n index_codes = list(np.tile(codes, (1, ratio)))\n if isinstance(stack_cols, MultiIndex):\n column_levels = ordered_stack_cols.levels\n column_codes = ordered_stack_cols.drop_duplicates().codes\n else:\n column_levels = [ordered_stack_cols.unique()]\n column_codes = [factorize(ordered_stack_cols_unique, use_na_sentinel=False)[0]]\n column_codes = [np.repeat(codes, len(frame)) for codes in column_codes]\n result.index = MultiIndex(\n levels=index_levels + column_levels,\n codes=index_codes + column_codes,\n names=frame.index.names + list(ordered_stack_cols.names),\n verify_integrity=False,\n )\n\n # sort result, but faster than calling sort_index since we know the order we need\n len_df = len(frame)\n n_uniques = len(ordered_stack_cols_unique)\n indexer = np.arange(n_uniques)\n idxs = np.tile(len_df * indexer, len_df) + np.repeat(np.arange(len_df), n_uniques)\n result = result.take(idxs)\n\n # Reshape/rename if needed and dropna\n if result.ndim == 2 and frame.columns.nlevels == len(level):\n if len(result.columns) == 0:\n result = Series(index=result.index)\n else:\n result = result.iloc[:, 0]\n if result.ndim == 1:\n result.name = None\n\n return result\n
.venv\Lib\site-packages\pandas\core\reshape\reshape.py
reshape.py
Python
34,661
0.95
0.174924
0.118005
react-lib
107
2024-09-25T11:09:54.548200
MIT
false
7e32ae339d62ac716626b93b7d1abbc5
"""\nQuantilization functions and related stuff\n"""\nfrom __future__ import annotations\n\nfrom typing import (\n TYPE_CHECKING,\n Any,\n Callable,\n Literal,\n)\n\nimport numpy as np\n\nfrom pandas._libs import (\n Timedelta,\n Timestamp,\n lib,\n)\n\nfrom pandas.core.dtypes.common import (\n ensure_platform_int,\n is_bool_dtype,\n is_integer,\n is_list_like,\n is_numeric_dtype,\n is_scalar,\n)\nfrom pandas.core.dtypes.dtypes import (\n CategoricalDtype,\n DatetimeTZDtype,\n ExtensionDtype,\n)\nfrom pandas.core.dtypes.generic import ABCSeries\nfrom pandas.core.dtypes.missing import isna\n\nfrom pandas import (\n Categorical,\n Index,\n IntervalIndex,\n)\nimport pandas.core.algorithms as algos\nfrom pandas.core.arrays.datetimelike import dtype_to_unit\n\nif TYPE_CHECKING:\n from pandas._typing import (\n DtypeObj,\n IntervalLeftRight,\n )\n\n\ndef cut(\n x,\n bins,\n right: bool = True,\n labels=None,\n retbins: bool = False,\n precision: int = 3,\n include_lowest: bool = False,\n duplicates: str = "raise",\n ordered: bool = True,\n):\n """\n Bin values into discrete intervals.\n\n Use `cut` when you need to segment and sort data values into bins. This\n function is also useful for going from a continuous variable to a\n categorical variable. For example, `cut` could convert ages to groups of\n age ranges. Supports binning into an equal number of bins, or a\n pre-specified array of bins.\n\n Parameters\n ----------\n x : array-like\n The input array to be binned. Must be 1-dimensional.\n bins : int, sequence of scalars, or IntervalIndex\n The criteria to bin by.\n\n * int : Defines the number of equal-width bins in the range of `x`. The\n range of `x` is extended by .1% on each side to include the minimum\n and maximum values of `x`.\n * sequence of scalars : Defines the bin edges allowing for non-uniform\n width. No extension of the range of `x` is done.\n * IntervalIndex : Defines the exact bins to be used. Note that\n IntervalIndex for `bins` must be non-overlapping.\n\n right : bool, default True\n Indicates whether `bins` includes the rightmost edge or not. If\n ``right == True`` (the default), then the `bins` ``[1, 2, 3, 4]``\n indicate (1,2], (2,3], (3,4]. This argument is ignored when\n `bins` is an IntervalIndex.\n labels : array or False, default None\n Specifies the labels for the returned bins. Must be the same length as\n the resulting bins. If False, returns only integer indicators of the\n bins. This affects the type of the output container (see below).\n This argument is ignored when `bins` is an IntervalIndex. If True,\n raises an error. When `ordered=False`, labels must be provided.\n retbins : bool, default False\n Whether to return the bins or not. Useful when bins is provided\n as a scalar.\n precision : int, default 3\n The precision at which to store and display the bins labels.\n include_lowest : bool, default False\n Whether the first interval should be left-inclusive or not.\n duplicates : {default 'raise', 'drop'}, optional\n If bin edges are not unique, raise ValueError or drop non-uniques.\n ordered : bool, default True\n Whether the labels are ordered or not. Applies to returned types\n Categorical and Series (with Categorical dtype). If True,\n the resulting categorical will be ordered. If False, the resulting\n categorical will be unordered (labels must be provided).\n\n Returns\n -------\n out : Categorical, Series, or ndarray\n An array-like object representing the respective bin for each value\n of `x`. The type depends on the value of `labels`.\n\n * None (default) : returns a Series for Series `x` or a\n Categorical for all other inputs. The values stored within\n are Interval dtype.\n\n * sequence of scalars : returns a Series for Series `x` or a\n Categorical for all other inputs. The values stored within\n are whatever the type in the sequence is.\n\n * False : returns an ndarray of integers.\n\n bins : numpy.ndarray or IntervalIndex.\n The computed or specified bins. Only returned when `retbins=True`.\n For scalar or sequence `bins`, this is an ndarray with the computed\n bins. If set `duplicates=drop`, `bins` will drop non-unique bin. For\n an IntervalIndex `bins`, this is equal to `bins`.\n\n See Also\n --------\n qcut : Discretize variable into equal-sized buckets based on rank\n or based on sample quantiles.\n Categorical : Array type for storing data that come from a\n fixed set of values.\n Series : One-dimensional array with axis labels (including time series).\n IntervalIndex : Immutable Index implementing an ordered, sliceable set.\n\n Notes\n -----\n Any NA values will be NA in the result. Out of bounds values will be NA in\n the resulting Series or Categorical object.\n\n Reference :ref:`the user guide <reshaping.tile.cut>` for more examples.\n\n Examples\n --------\n Discretize into three equal-sized bins.\n\n >>> pd.cut(np.array([1, 7, 5, 4, 6, 3]), 3)\n ... # doctest: +ELLIPSIS\n [(0.994, 3.0], (5.0, 7.0], (3.0, 5.0], (3.0, 5.0], (5.0, 7.0], ...\n Categories (3, interval[float64, right]): [(0.994, 3.0] < (3.0, 5.0] ...\n\n >>> pd.cut(np.array([1, 7, 5, 4, 6, 3]), 3, retbins=True)\n ... # doctest: +ELLIPSIS\n ([(0.994, 3.0], (5.0, 7.0], (3.0, 5.0], (3.0, 5.0], (5.0, 7.0], ...\n Categories (3, interval[float64, right]): [(0.994, 3.0] < (3.0, 5.0] ...\n array([0.994, 3. , 5. , 7. ]))\n\n Discovers the same bins, but assign them specific labels. Notice that\n the returned Categorical's categories are `labels` and is ordered.\n\n >>> pd.cut(np.array([1, 7, 5, 4, 6, 3]),\n ... 3, labels=["bad", "medium", "good"])\n ['bad', 'good', 'medium', 'medium', 'good', 'bad']\n Categories (3, object): ['bad' < 'medium' < 'good']\n\n ``ordered=False`` will result in unordered categories when labels are passed.\n This parameter can be used to allow non-unique labels:\n\n >>> pd.cut(np.array([1, 7, 5, 4, 6, 3]), 3,\n ... labels=["B", "A", "B"], ordered=False)\n ['B', 'B', 'A', 'A', 'B', 'B']\n Categories (2, object): ['A', 'B']\n\n ``labels=False`` implies you just want the bins back.\n\n >>> pd.cut([0, 1, 1, 2], bins=4, labels=False)\n array([0, 1, 1, 3])\n\n Passing a Series as an input returns a Series with categorical dtype:\n\n >>> s = pd.Series(np.array([2, 4, 6, 8, 10]),\n ... index=['a', 'b', 'c', 'd', 'e'])\n >>> pd.cut(s, 3)\n ... # doctest: +ELLIPSIS\n a (1.992, 4.667]\n b (1.992, 4.667]\n c (4.667, 7.333]\n d (7.333, 10.0]\n e (7.333, 10.0]\n dtype: category\n Categories (3, interval[float64, right]): [(1.992, 4.667] < (4.667, ...\n\n Passing a Series as an input returns a Series with mapping value.\n It is used to map numerically to intervals based on bins.\n\n >>> s = pd.Series(np.array([2, 4, 6, 8, 10]),\n ... index=['a', 'b', 'c', 'd', 'e'])\n >>> pd.cut(s, [0, 2, 4, 6, 8, 10], labels=False, retbins=True, right=False)\n ... # doctest: +ELLIPSIS\n (a 1.0\n b 2.0\n c 3.0\n d 4.0\n e NaN\n dtype: float64,\n array([ 0, 2, 4, 6, 8, 10]))\n\n Use `drop` optional when bins is not unique\n\n >>> pd.cut(s, [0, 2, 4, 6, 10, 10], labels=False, retbins=True,\n ... right=False, duplicates='drop')\n ... # doctest: +ELLIPSIS\n (a 1.0\n b 2.0\n c 3.0\n d 3.0\n e NaN\n dtype: float64,\n array([ 0, 2, 4, 6, 10]))\n\n Passing an IntervalIndex for `bins` results in those categories exactly.\n Notice that values not covered by the IntervalIndex are set to NaN. 0\n is to the left of the first bin (which is closed on the right), and 1.5\n falls between two bins.\n\n >>> bins = pd.IntervalIndex.from_tuples([(0, 1), (2, 3), (4, 5)])\n >>> pd.cut([0, 0.5, 1.5, 2.5, 4.5], bins)\n [NaN, (0.0, 1.0], NaN, (2.0, 3.0], (4.0, 5.0]]\n Categories (3, interval[int64, right]): [(0, 1] < (2, 3] < (4, 5]]\n """\n # NOTE: this binning code is changed a bit from histogram for var(x) == 0\n\n original = x\n x_idx = _preprocess_for_cut(x)\n x_idx, _ = _coerce_to_type(x_idx)\n\n if not np.iterable(bins):\n bins = _nbins_to_bins(x_idx, bins, right)\n\n elif isinstance(bins, IntervalIndex):\n if bins.is_overlapping:\n raise ValueError("Overlapping IntervalIndex is not accepted.")\n\n else:\n bins = Index(bins)\n if not bins.is_monotonic_increasing:\n raise ValueError("bins must increase monotonically.")\n\n fac, bins = _bins_to_cuts(\n x_idx,\n bins,\n right=right,\n labels=labels,\n precision=precision,\n include_lowest=include_lowest,\n duplicates=duplicates,\n ordered=ordered,\n )\n\n return _postprocess_for_cut(fac, bins, retbins, original)\n\n\ndef qcut(\n x,\n q,\n labels=None,\n retbins: bool = False,\n precision: int = 3,\n duplicates: str = "raise",\n):\n """\n Quantile-based discretization function.\n\n Discretize variable into equal-sized buckets based on rank or based\n on sample quantiles. For example 1000 values for 10 quantiles would\n produce a Categorical object indicating quantile membership for each data point.\n\n Parameters\n ----------\n x : 1d ndarray or Series\n q : int or list-like of float\n Number of quantiles. 10 for deciles, 4 for quartiles, etc. Alternately\n array of quantiles, e.g. [0, .25, .5, .75, 1.] for quartiles.\n labels : array or False, default None\n Used as labels for the resulting bins. Must be of the same length as\n the resulting bins. If False, return only integer indicators of the\n bins. If True, raises an error.\n retbins : bool, optional\n Whether to return the (bins, labels) or not. Can be useful if bins\n is given as a scalar.\n precision : int, optional\n The precision at which to store and display the bins labels.\n duplicates : {default 'raise', 'drop'}, optional\n If bin edges are not unique, raise ValueError or drop non-uniques.\n\n Returns\n -------\n out : Categorical or Series or array of integers if labels is False\n The return type (Categorical or Series) depends on the input: a Series\n of type category if input is a Series else Categorical. Bins are\n represented as categories when categorical data is returned.\n bins : ndarray of floats\n Returned only if `retbins` is True.\n\n Notes\n -----\n Out of bounds values will be NA in the resulting Categorical object\n\n Examples\n --------\n >>> pd.qcut(range(5), 4)\n ... # doctest: +ELLIPSIS\n [(-0.001, 1.0], (-0.001, 1.0], (1.0, 2.0], (2.0, 3.0], (3.0, 4.0]]\n Categories (4, interval[float64, right]): [(-0.001, 1.0] < (1.0, 2.0] ...\n\n >>> pd.qcut(range(5), 3, labels=["good", "medium", "bad"])\n ... # doctest: +SKIP\n [good, good, medium, bad, bad]\n Categories (3, object): [good < medium < bad]\n\n >>> pd.qcut(range(5), 4, labels=False)\n array([0, 0, 1, 2, 3])\n """\n original = x\n x_idx = _preprocess_for_cut(x)\n x_idx, _ = _coerce_to_type(x_idx)\n\n quantiles = np.linspace(0, 1, q + 1) if is_integer(q) else q\n\n bins = x_idx.to_series().dropna().quantile(quantiles)\n\n fac, bins = _bins_to_cuts(\n x_idx,\n Index(bins),\n labels=labels,\n precision=precision,\n include_lowest=True,\n duplicates=duplicates,\n )\n\n return _postprocess_for_cut(fac, bins, retbins, original)\n\n\ndef _nbins_to_bins(x_idx: Index, nbins: int, right: bool) -> Index:\n """\n If a user passed an integer N for bins, convert this to a sequence of N\n equal(ish)-sized bins.\n """\n if is_scalar(nbins) and nbins < 1:\n raise ValueError("`bins` should be a positive integer.")\n\n if x_idx.size == 0:\n raise ValueError("Cannot cut empty array")\n\n rng = (x_idx.min(), x_idx.max())\n mn, mx = rng\n\n if is_numeric_dtype(x_idx.dtype) and (np.isinf(mn) or np.isinf(mx)):\n # GH#24314\n raise ValueError(\n "cannot specify integer `bins` when input data contains infinity"\n )\n\n if mn == mx: # adjust end points before binning\n if _is_dt_or_td(x_idx.dtype):\n # using seconds=1 is pretty arbitrary here\n # error: Argument 1 to "dtype_to_unit" has incompatible type\n # "dtype[Any] | ExtensionDtype"; expected "DatetimeTZDtype | dtype[Any]"\n unit = dtype_to_unit(x_idx.dtype) # type: ignore[arg-type]\n td = Timedelta(seconds=1).as_unit(unit)\n # Use DatetimeArray/TimedeltaArray method instead of linspace\n # error: Item "ExtensionArray" of "ExtensionArray | ndarray[Any, Any]"\n # has no attribute "_generate_range"\n bins = x_idx._values._generate_range( # type: ignore[union-attr]\n start=mn - td, end=mx + td, periods=nbins + 1, freq=None, unit=unit\n )\n else:\n mn -= 0.001 * abs(mn) if mn != 0 else 0.001\n mx += 0.001 * abs(mx) if mx != 0 else 0.001\n\n bins = np.linspace(mn, mx, nbins + 1, endpoint=True)\n else: # adjust end points after binning\n if _is_dt_or_td(x_idx.dtype):\n # Use DatetimeArray/TimedeltaArray method instead of linspace\n\n # error: Argument 1 to "dtype_to_unit" has incompatible type\n # "dtype[Any] | ExtensionDtype"; expected "DatetimeTZDtype | dtype[Any]"\n unit = dtype_to_unit(x_idx.dtype) # type: ignore[arg-type]\n # error: Item "ExtensionArray" of "ExtensionArray | ndarray[Any, Any]"\n # has no attribute "_generate_range"\n bins = x_idx._values._generate_range( # type: ignore[union-attr]\n start=mn, end=mx, periods=nbins + 1, freq=None, unit=unit\n )\n else:\n bins = np.linspace(mn, mx, nbins + 1, endpoint=True)\n adj = (mx - mn) * 0.001 # 0.1% of the range\n if right:\n bins[0] -= adj\n else:\n bins[-1] += adj\n\n return Index(bins)\n\n\ndef _bins_to_cuts(\n x_idx: Index,\n bins: Index,\n right: bool = True,\n labels=None,\n precision: int = 3,\n include_lowest: bool = False,\n duplicates: str = "raise",\n ordered: bool = True,\n):\n if not ordered and labels is None:\n raise ValueError("'labels' must be provided if 'ordered = False'")\n\n if duplicates not in ["raise", "drop"]:\n raise ValueError(\n "invalid value for 'duplicates' parameter, valid options are: raise, drop"\n )\n\n result: Categorical | np.ndarray\n\n if isinstance(bins, IntervalIndex):\n # we have a fast-path here\n ids = bins.get_indexer(x_idx)\n cat_dtype = CategoricalDtype(bins, ordered=True)\n result = Categorical.from_codes(ids, dtype=cat_dtype, validate=False)\n return result, bins\n\n unique_bins = algos.unique(bins)\n if len(unique_bins) < len(bins) and len(bins) != 2:\n if duplicates == "raise":\n raise ValueError(\n f"Bin edges must be unique: {repr(bins)}.\n"\n f"You can drop duplicate edges by setting the 'duplicates' kwarg"\n )\n bins = unique_bins\n\n side: Literal["left", "right"] = "left" if right else "right"\n\n try:\n ids = bins.searchsorted(x_idx, side=side)\n except TypeError as err:\n # e.g. test_datetime_nan_error if bins are DatetimeArray and x_idx\n # is integers\n if x_idx.dtype.kind == "m":\n raise ValueError("bins must be of timedelta64 dtype") from err\n elif x_idx.dtype.kind == bins.dtype.kind == "M":\n raise ValueError(\n "Cannot use timezone-naive bins with timezone-aware values, "\n "or vice-versa"\n ) from err\n elif x_idx.dtype.kind == "M":\n raise ValueError("bins must be of datetime64 dtype") from err\n else:\n raise\n ids = ensure_platform_int(ids)\n\n if include_lowest:\n ids[x_idx == bins[0]] = 1\n\n na_mask = isna(x_idx) | (ids == len(bins)) | (ids == 0)\n has_nas = na_mask.any()\n\n if labels is not False:\n if not (labels is None or is_list_like(labels)):\n raise ValueError(\n "Bin labels must either be False, None or passed in as a "\n "list-like argument"\n )\n\n if labels is None:\n labels = _format_labels(\n bins, precision, right=right, include_lowest=include_lowest\n )\n elif ordered and len(set(labels)) != len(labels):\n raise ValueError(\n "labels must be unique if ordered=True; pass ordered=False "\n "for duplicate labels"\n )\n else:\n if len(labels) != len(bins) - 1:\n raise ValueError(\n "Bin labels must be one fewer than the number of bin edges"\n )\n\n if not isinstance(getattr(labels, "dtype", None), CategoricalDtype):\n labels = Categorical(\n labels,\n categories=labels if len(set(labels)) == len(labels) else None,\n ordered=ordered,\n )\n # TODO: handle mismatch between categorical label order and pandas.cut order.\n np.putmask(ids, na_mask, 0)\n result = algos.take_nd(labels, ids - 1)\n\n else:\n result = ids - 1\n if has_nas:\n result = result.astype(np.float64)\n np.putmask(result, na_mask, np.nan)\n\n return result, bins\n\n\ndef _coerce_to_type(x: Index) -> tuple[Index, DtypeObj | None]:\n """\n if the passed data is of datetime/timedelta, bool or nullable int type,\n this method converts it to numeric so that cut or qcut method can\n handle it\n """\n dtype: DtypeObj | None = None\n\n if _is_dt_or_td(x.dtype):\n dtype = x.dtype\n elif is_bool_dtype(x.dtype):\n # GH 20303\n x = x.astype(np.int64)\n # To support cut and qcut for IntegerArray we convert to float dtype.\n # Will properly support in the future.\n # https://github.com/pandas-dev/pandas/pull/31290\n # https://github.com/pandas-dev/pandas/issues/31389\n elif isinstance(x.dtype, ExtensionDtype) and is_numeric_dtype(x.dtype):\n x_arr = x.to_numpy(dtype=np.float64, na_value=np.nan)\n x = Index(x_arr)\n\n return Index(x), dtype\n\n\ndef _is_dt_or_td(dtype: DtypeObj) -> bool:\n # Note: the dtype here comes from an Index.dtype, so we know that that any\n # dt64/td64 dtype is of a supported unit.\n return isinstance(dtype, DatetimeTZDtype) or lib.is_np_dtype(dtype, "mM")\n\n\ndef _format_labels(\n bins: Index,\n precision: int,\n right: bool = True,\n include_lowest: bool = False,\n):\n """based on the dtype, return our labels"""\n closed: IntervalLeftRight = "right" if right else "left"\n\n formatter: Callable[[Any], Timestamp] | Callable[[Any], Timedelta]\n\n if _is_dt_or_td(bins.dtype):\n # error: Argument 1 to "dtype_to_unit" has incompatible type\n # "dtype[Any] | ExtensionDtype"; expected "DatetimeTZDtype | dtype[Any]"\n unit = dtype_to_unit(bins.dtype) # type: ignore[arg-type]\n formatter = lambda x: x\n adjust = lambda x: x - Timedelta(1, unit=unit).as_unit(unit)\n else:\n precision = _infer_precision(precision, bins)\n formatter = lambda x: _round_frac(x, precision)\n adjust = lambda x: x - 10 ** (-precision)\n\n breaks = [formatter(b) for b in bins]\n if right and include_lowest:\n # adjust lhs of first interval by precision to account for being right closed\n breaks[0] = adjust(breaks[0])\n\n if _is_dt_or_td(bins.dtype):\n # error: "Index" has no attribute "as_unit"\n breaks = type(bins)(breaks).as_unit(unit) # type: ignore[attr-defined]\n\n return IntervalIndex.from_breaks(breaks, closed=closed)\n\n\ndef _preprocess_for_cut(x) -> Index:\n """\n handles preprocessing for cut where we convert passed\n input to array, strip the index information and store it\n separately\n """\n # Check that the passed array is a Pandas or Numpy object\n # We don't want to strip away a Pandas data-type here (e.g. datetimetz)\n ndim = getattr(x, "ndim", None)\n if ndim is None:\n x = np.asarray(x)\n if x.ndim != 1:\n raise ValueError("Input array must be 1 dimensional")\n\n return Index(x)\n\n\ndef _postprocess_for_cut(fac, bins, retbins: bool, original):\n """\n handles post processing for the cut method where\n we combine the index information if the originally passed\n datatype was a series\n """\n if isinstance(original, ABCSeries):\n fac = original._constructor(fac, index=original.index, name=original.name)\n\n if not retbins:\n return fac\n\n if isinstance(bins, Index) and is_numeric_dtype(bins.dtype):\n bins = bins._values\n\n return fac, bins\n\n\ndef _round_frac(x, precision: int):\n """\n Round the fractional part of the given number\n """\n if not np.isfinite(x) or x == 0:\n return x\n else:\n frac, whole = np.modf(x)\n if whole == 0:\n digits = -int(np.floor(np.log10(abs(frac)))) - 1 + precision\n else:\n digits = precision\n return np.around(x, digits)\n\n\ndef _infer_precision(base_precision: int, bins: Index) -> int:\n """\n Infer an appropriate precision for _round_frac\n """\n for precision in range(base_precision, 20):\n levels = np.asarray([_round_frac(b, precision) for b in bins])\n if algos.unique(levels).size == bins.size:\n return precision\n return base_precision # default\n
.venv\Lib\site-packages\pandas\core\reshape\tile.py
tile.py
Python
21,947
0.95
0.148903
0.067542
react-lib
886
2023-07-12T19:35:45.440141
MIT
false
87be6d23ea0f3ae2c65cb629558fb979
from __future__ import annotations\n\nfrom typing import TYPE_CHECKING\n\nimport numpy as np\n\nfrom pandas.core.dtypes.common import is_list_like\n\nif TYPE_CHECKING:\n from pandas._typing import NumpyIndexT\n\n\ndef cartesian_product(X) -> list[np.ndarray]:\n """\n Numpy version of itertools.product.\n Sometimes faster (for large inputs)...\n\n Parameters\n ----------\n X : list-like of list-likes\n\n Returns\n -------\n product : list of ndarrays\n\n Examples\n --------\n >>> cartesian_product([list('ABC'), [1, 2]])\n [array(['A', 'A', 'B', 'B', 'C', 'C'], dtype='<U1'), array([1, 2, 1, 2, 1, 2])]\n\n See Also\n --------\n itertools.product : Cartesian product of input iterables. Equivalent to\n nested for-loops.\n """\n msg = "Input must be a list-like of list-likes"\n if not is_list_like(X):\n raise TypeError(msg)\n for x in X:\n if not is_list_like(x):\n raise TypeError(msg)\n\n if len(X) == 0:\n return []\n\n lenX = np.fromiter((len(x) for x in X), dtype=np.intp)\n cumprodX = np.cumprod(lenX)\n\n if np.any(cumprodX < 0):\n raise ValueError("Product space too large to allocate arrays!")\n\n a = np.roll(cumprodX, 1)\n a[0] = 1\n\n if cumprodX[-1] != 0:\n b = cumprodX[-1] / cumprodX\n else:\n # if any factor is empty, the cartesian product is empty\n b = np.zeros_like(cumprodX)\n\n # error: Argument of type "int_" cannot be assigned to parameter "num" of\n # type "int" in function "tile_compat"\n return [\n tile_compat(\n np.repeat(x, b[i]),\n np.prod(a[i]),\n )\n for i, x in enumerate(X)\n ]\n\n\ndef tile_compat(arr: NumpyIndexT, num: int) -> NumpyIndexT:\n """\n Index compat for np.tile.\n\n Notes\n -----\n Does not support multi-dimensional `num`.\n """\n if isinstance(arr, np.ndarray):\n return np.tile(arr, num)\n\n # Otherwise we have an Index\n taker = np.tile(np.arange(len(arr)), num)\n return arr.take(taker)\n
.venv\Lib\site-packages\pandas\core\reshape\util.py
util.py
Python
2,014
0.95
0.2
0.061538
awesome-app
253
2024-11-22T09:43:59.995812
GPL-3.0
false
da9b2ae888182a4f89d84d24190bc5ae
\n\n
.venv\Lib\site-packages\pandas\core\reshape\__pycache__\api.cpython-313.pyc
api.cpython-313.pyc
Other
820
0.7
0
0
react-lib
514
2025-02-20T18:43:15.638496
Apache-2.0
false
955e82e9a84a6f082ff72ede6407918f
\n\n
.venv\Lib\site-packages\pandas\core\reshape\__pycache__\concat.cpython-313.pyc
concat.cpython-313.pyc
Other
30,416
0.8
0.016949
0
vue-tools
785
2024-03-13T01:36:19.160872
Apache-2.0
false
be271095091138f50896d2e4e16f6855
\n\n
.venv\Lib\site-packages\pandas\core\reshape\__pycache__\encoding.cpython-313.pyc
encoding.cpython-313.pyc
Other
19,340
0.8
0.035912
0.030769
awesome-app
718
2024-04-19T05:37:37.117198
MIT
false
ca4d5ae4510032425f76e2967cffb07d
\n\n
.venv\Lib\site-packages\pandas\core\reshape\__pycache__\melt.cpython-313.pyc
melt.cpython-313.pyc
Other
19,958
0.95
0.025907
0
python-kit
568
2025-05-16T19:25:21.653574
BSD-3-Clause
false
d8f9f39e35b1ff61e49135e610eb7dcd
\n\n
.venv\Lib\site-packages\pandas\core\reshape\__pycache__\merge.cpython-313.pyc
merge.cpython-313.pyc
Other
99,957
0.75
0.014471
0.012438
awesome-app
723
2024-07-11T01:55:18.181104
BSD-3-Clause
false
1875cbdf7a88201bd88afc3510efac98
\n\n
.venv\Lib\site-packages\pandas\core\reshape\__pycache__\pivot.cpython-313.pyc
pivot.cpython-313.pyc
Other
31,136
0.95
0.016949
0.010417
react-lib
719
2025-06-10T23:10:10.535512
MIT
false
c956a1683ca64cfa81ac453e3deb6ebb
\n\n
.venv\Lib\site-packages\pandas\core\reshape\__pycache__\reshape.cpython-313.pyc
reshape.cpython-313.pyc
Other
42,233
0.95
0.025822
0.002519
node-utils
669
2025-04-11T18:50:02.645740
BSD-3-Clause
false
0633c33740767ff2d6102ff502b99acc
\n\n
.venv\Lib\site-packages\pandas\core\reshape\__pycache__\tile.cpython-313.pyc
tile.cpython-313.pyc
Other
23,198
0.95
0.087855
0.017341
node-utils
637
2023-09-29T16:59:35.431704
GPL-3.0
false
84b27e48551b3bc9dd42eaf1194841ac
\n\n
.venv\Lib\site-packages\pandas\core\reshape\__pycache__\util.cpython-313.pyc
util.cpython-313.pyc
Other
3,083
0.8
0.06383
0
vue-tools
229
2023-07-11T07:33:21.018670
BSD-3-Clause
false
1a19911968f5367e7256cd689ef88e1b
\n\n
.venv\Lib\site-packages\pandas\core\reshape\__pycache__\__init__.cpython-313.pyc
__init__.cpython-313.pyc
Other
194
0.7
0
0
python-kit
230
2024-01-21T18:14:44.866599
BSD-3-Clause
false
70ea9b78ec4198a050c7003150a854c9
from pandas.core.dtypes.dtypes import SparseDtype\n\nfrom pandas.core.arrays.sparse import SparseArray\n\n__all__ = ["SparseArray", "SparseDtype"]\n
.venv\Lib\site-packages\pandas\core\sparse\api.py
api.py
Python
143
0.85
0
0
python-kit
122
2025-02-28T19:40:05.247005
Apache-2.0
false
ad11526d6da1ab7343d1fd84344ad374
\n\n
.venv\Lib\site-packages\pandas\core\sparse\__pycache__\api.cpython-313.pyc
api.cpython-313.pyc
Other
351
0.7
0
0
vue-tools
371
2024-10-03T13:45:02.539705
BSD-3-Clause
false
af8c7e0a70ffdf20462b0bd97868ab47
\n\n
.venv\Lib\site-packages\pandas\core\sparse\__pycache__\__init__.cpython-313.pyc
__init__.cpython-313.pyc
Other
193
0.7
0
0
vue-tools
439
2024-06-10T04:40:38.787529
BSD-3-Clause
false
852ee82cb48e52a5072cadfcb5a6fead
from __future__ import annotations\n\nimport abc\nfrom typing import (\n TYPE_CHECKING,\n Callable,\n Literal,\n)\n\nfrom pandas._libs import lib\n\nif TYPE_CHECKING:\n from collections.abc import Sequence\n import re\n\n from pandas._typing import Scalar\n\n from pandas import Series\n\n\nclass BaseStringArrayMethods(abc.ABC):\n """\n Base class for extension arrays implementing string methods.\n\n This is where our ExtensionArrays can override the implementation of\n Series.str.<method>. We don't expect this to work with\n 3rd-party extension arrays.\n\n * User calls Series.str.<method>\n * pandas extracts the extension array from the Series\n * pandas calls ``extension_array._str_<method>(*args, **kwargs)``\n * pandas wraps the result, to return to the user.\n\n See :ref:`Series.str` for the docstring of each method.\n """\n\n def _str_getitem(self, key):\n if isinstance(key, slice):\n return self._str_slice(start=key.start, stop=key.stop, step=key.step)\n else:\n return self._str_get(key)\n\n @abc.abstractmethod\n def _str_count(self, pat, flags: int = 0):\n pass\n\n @abc.abstractmethod\n def _str_pad(\n self,\n width: int,\n side: Literal["left", "right", "both"] = "left",\n fillchar: str = " ",\n ):\n pass\n\n @abc.abstractmethod\n def _str_contains(\n self, pat, case: bool = True, flags: int = 0, na=None, regex: bool = True\n ):\n pass\n\n @abc.abstractmethod\n def _str_startswith(self, pat, na=None):\n pass\n\n @abc.abstractmethod\n def _str_endswith(self, pat, na=None):\n pass\n\n @abc.abstractmethod\n def _str_replace(\n self,\n pat: str | re.Pattern,\n repl: str | Callable,\n n: int = -1,\n case: bool = True,\n flags: int = 0,\n regex: bool = True,\n ):\n pass\n\n @abc.abstractmethod\n def _str_repeat(self, repeats: int | Sequence[int]):\n pass\n\n @abc.abstractmethod\n def _str_match(\n self,\n pat: str,\n case: bool = True,\n flags: int = 0,\n na: Scalar | lib.NoDefault = lib.no_default,\n ):\n pass\n\n @abc.abstractmethod\n def _str_fullmatch(\n self,\n pat: str | re.Pattern,\n case: bool = True,\n flags: int = 0,\n na: Scalar | lib.NoDefault = lib.no_default,\n ):\n pass\n\n @abc.abstractmethod\n def _str_encode(self, encoding, errors: str = "strict"):\n pass\n\n @abc.abstractmethod\n def _str_find(self, sub, start: int = 0, end=None):\n pass\n\n @abc.abstractmethod\n def _str_rfind(self, sub, start: int = 0, end=None):\n pass\n\n @abc.abstractmethod\n def _str_findall(self, pat, flags: int = 0):\n pass\n\n @abc.abstractmethod\n def _str_get(self, i):\n pass\n\n @abc.abstractmethod\n def _str_index(self, sub, start: int = 0, end=None):\n pass\n\n @abc.abstractmethod\n def _str_rindex(self, sub, start: int = 0, end=None):\n pass\n\n @abc.abstractmethod\n def _str_join(self, sep: str):\n pass\n\n @abc.abstractmethod\n def _str_partition(self, sep: str, expand):\n pass\n\n @abc.abstractmethod\n def _str_rpartition(self, sep: str, expand):\n pass\n\n @abc.abstractmethod\n def _str_len(self):\n pass\n\n @abc.abstractmethod\n def _str_slice(self, start=None, stop=None, step=None):\n pass\n\n @abc.abstractmethod\n def _str_slice_replace(self, start=None, stop=None, repl=None):\n pass\n\n @abc.abstractmethod\n def _str_translate(self, table):\n pass\n\n @abc.abstractmethod\n def _str_wrap(self, width: int, **kwargs):\n pass\n\n @abc.abstractmethod\n def _str_get_dummies(self, sep: str = "|"):\n pass\n\n @abc.abstractmethod\n def _str_isalnum(self):\n pass\n\n @abc.abstractmethod\n def _str_isalpha(self):\n pass\n\n @abc.abstractmethod\n def _str_isdecimal(self):\n pass\n\n @abc.abstractmethod\n def _str_isdigit(self):\n pass\n\n @abc.abstractmethod\n def _str_islower(self):\n pass\n\n @abc.abstractmethod\n def _str_isnumeric(self):\n pass\n\n @abc.abstractmethod\n def _str_isspace(self):\n pass\n\n @abc.abstractmethod\n def _str_istitle(self):\n pass\n\n @abc.abstractmethod\n def _str_isupper(self):\n pass\n\n @abc.abstractmethod\n def _str_capitalize(self):\n pass\n\n @abc.abstractmethod\n def _str_casefold(self):\n pass\n\n @abc.abstractmethod\n def _str_title(self):\n pass\n\n @abc.abstractmethod\n def _str_swapcase(self):\n pass\n\n @abc.abstractmethod\n def _str_lower(self):\n pass\n\n @abc.abstractmethod\n def _str_upper(self):\n pass\n\n @abc.abstractmethod\n def _str_normalize(self, form):\n pass\n\n @abc.abstractmethod\n def _str_strip(self, to_strip=None):\n pass\n\n @abc.abstractmethod\n def _str_lstrip(self, to_strip=None):\n pass\n\n @abc.abstractmethod\n def _str_rstrip(self, to_strip=None):\n pass\n\n @abc.abstractmethod\n def _str_removeprefix(self, prefix: str) -> Series:\n pass\n\n @abc.abstractmethod\n def _str_removesuffix(self, suffix: str) -> Series:\n pass\n\n @abc.abstractmethod\n def _str_split(\n self, pat=None, n=-1, expand: bool = False, regex: bool | None = None\n ):\n pass\n\n @abc.abstractmethod\n def _str_rsplit(self, pat=None, n=-1):\n pass\n\n @abc.abstractmethod\n def _str_extract(self, pat: str, flags: int = 0, expand: bool = True):\n pass\n
.venv\Lib\site-packages\pandas\core\strings\base.py
base.py
Python
5,619
0.85
0.210526
0.019417
node-utils
41
2023-09-11T21:45:30.626143
Apache-2.0
false
3da99b4a1a2b005c9aca0c06a6096219
from __future__ import annotations\n\nimport functools\nimport re\nimport textwrap\nfrom typing import (\n TYPE_CHECKING,\n Callable,\n Literal,\n cast,\n)\nimport unicodedata\nimport warnings\n\nimport numpy as np\n\nfrom pandas._libs import lib\nimport pandas._libs.missing as libmissing\nimport pandas._libs.ops as libops\nfrom pandas.util._exceptions import find_stack_level\n\nfrom pandas.core.dtypes.missing import isna\n\nfrom pandas.core.strings.base import BaseStringArrayMethods\n\nif TYPE_CHECKING:\n from collections.abc import Sequence\n\n from pandas._typing import (\n NpDtype,\n Scalar,\n )\n\n from pandas import Series\n\n\nclass ObjectStringArrayMixin(BaseStringArrayMethods):\n """\n String Methods operating on object-dtype ndarrays.\n """\n\n def __len__(self) -> int:\n # For typing, _str_map relies on the object being sized.\n raise NotImplementedError\n\n def _str_map(\n self,\n f,\n na_value=lib.no_default,\n dtype: NpDtype | None = None,\n convert: bool = True,\n ):\n """\n Map a callable over valid elements of the array.\n\n Parameters\n ----------\n f : Callable\n A function to call on each non-NA element.\n na_value : Scalar, optional\n The value to set for NA values. Might also be used for the\n fill value if the callable `f` raises an exception.\n This defaults to ``self.dtype.na_value`` which is ``np.nan``\n for object-dtype and Categorical and ``pd.NA`` for StringArray.\n dtype : Dtype, optional\n The dtype of the result array.\n convert : bool, default True\n Whether to call `maybe_convert_objects` on the resulting ndarray\n """\n if dtype is None:\n dtype = np.dtype("object")\n if na_value is lib.no_default:\n na_value = self.dtype.na_value # type: ignore[attr-defined]\n\n if not len(self):\n return np.array([], dtype=dtype)\n\n arr = np.asarray(self, dtype=object)\n mask = isna(arr)\n map_convert = convert and not np.all(mask)\n try:\n result = lib.map_infer_mask(arr, f, mask.view(np.uint8), map_convert)\n except (TypeError, AttributeError) as err:\n # Reraise the exception if callable `f` got wrong number of args.\n # The user may want to be warned by this, instead of getting NaN\n p_err = (\n r"((takes)|(missing)) (?(2)from \d+ to )?\d+ "\n r"(?(3)required )positional arguments?"\n )\n\n if len(err.args) >= 1 and re.search(p_err, err.args[0]):\n # FIXME: this should be totally avoidable\n raise err\n\n def g(x):\n # This type of fallback behavior can be removed once\n # we remove object-dtype .str accessor.\n try:\n return f(x)\n except (TypeError, AttributeError):\n return na_value\n\n return self._str_map(g, na_value=na_value, dtype=dtype)\n if not isinstance(result, np.ndarray):\n return result\n if na_value is not np.nan:\n np.putmask(result, mask, na_value)\n if convert and result.dtype == object:\n result = lib.maybe_convert_objects(result)\n return result\n\n def _str_count(self, pat, flags: int = 0):\n regex = re.compile(pat, flags=flags)\n f = lambda x: len(regex.findall(x))\n return self._str_map(f, dtype="int64")\n\n def _str_pad(\n self,\n width: int,\n side: Literal["left", "right", "both"] = "left",\n fillchar: str = " ",\n ):\n if side == "left":\n f = lambda x: x.rjust(width, fillchar)\n elif side == "right":\n f = lambda x: x.ljust(width, fillchar)\n elif side == "both":\n f = lambda x: x.center(width, fillchar)\n else: # pragma: no cover\n raise ValueError("Invalid side")\n return self._str_map(f)\n\n def _str_contains(\n self,\n pat,\n case: bool = True,\n flags: int = 0,\n na=lib.no_default,\n regex: bool = True,\n ):\n if regex:\n if not case:\n flags |= re.IGNORECASE\n\n pat = re.compile(pat, flags=flags)\n\n f = lambda x: pat.search(x) is not None\n else:\n if case:\n f = lambda x: pat in x\n else:\n upper_pat = pat.upper()\n f = lambda x: upper_pat in x.upper()\n if na is not lib.no_default and not isna(na) and not isinstance(na, bool):\n # GH#59561\n warnings.warn(\n "Allowing a non-bool 'na' in obj.str.contains is deprecated "\n "and will raise in a future version.",\n FutureWarning,\n stacklevel=find_stack_level(),\n )\n return self._str_map(f, na, dtype=np.dtype("bool"))\n\n def _str_startswith(self, pat, na=lib.no_default):\n f = lambda x: x.startswith(pat)\n if na is not lib.no_default and not isna(na) and not isinstance(na, bool):\n # GH#59561\n warnings.warn(\n "Allowing a non-bool 'na' in obj.str.startswith is deprecated "\n "and will raise in a future version.",\n FutureWarning,\n stacklevel=find_stack_level(),\n )\n return self._str_map(f, na_value=na, dtype=np.dtype(bool))\n\n def _str_endswith(self, pat, na=lib.no_default):\n f = lambda x: x.endswith(pat)\n if na is not lib.no_default and not isna(na) and not isinstance(na, bool):\n # GH#59561\n warnings.warn(\n "Allowing a non-bool 'na' in obj.str.endswith is deprecated "\n "and will raise in a future version.",\n FutureWarning,\n stacklevel=find_stack_level(),\n )\n return self._str_map(f, na_value=na, dtype=np.dtype(bool))\n\n def _str_replace(\n self,\n pat: str | re.Pattern,\n repl: str | Callable,\n n: int = -1,\n case: bool = True,\n flags: int = 0,\n regex: bool = True,\n ):\n if case is False:\n # add case flag, if provided\n flags |= re.IGNORECASE\n\n if regex or flags or callable(repl):\n if not isinstance(pat, re.Pattern):\n if regex is False:\n pat = re.escape(pat)\n pat = re.compile(pat, flags=flags)\n\n n = n if n >= 0 else 0\n f = lambda x: pat.sub(repl=repl, string=x, count=n)\n else:\n f = lambda x: x.replace(pat, repl, n)\n\n return self._str_map(f, dtype=str)\n\n def _str_repeat(self, repeats: int | Sequence[int]):\n if lib.is_integer(repeats):\n rint = cast(int, repeats)\n\n def scalar_rep(x):\n try:\n return bytes.__mul__(x, rint)\n except TypeError:\n return str.__mul__(x, rint)\n\n return self._str_map(scalar_rep, dtype=str)\n else:\n from pandas.core.arrays.string_ import BaseStringArray\n\n def rep(x, r):\n if x is libmissing.NA:\n return x\n try:\n return bytes.__mul__(x, r)\n except TypeError:\n return str.__mul__(x, r)\n\n result = libops.vec_binop(\n np.asarray(self),\n np.asarray(repeats, dtype=object),\n rep,\n )\n if isinstance(self, BaseStringArray):\n # Not going through map, so we have to do this here.\n result = type(self)._from_sequence(result, dtype=self.dtype)\n return result\n\n def _str_match(\n self,\n pat: str,\n case: bool = True,\n flags: int = 0,\n na: Scalar | lib.NoDefault = lib.no_default,\n ):\n if not case:\n flags |= re.IGNORECASE\n\n regex = re.compile(pat, flags=flags)\n\n f = lambda x: regex.match(x) is not None\n return self._str_map(f, na_value=na, dtype=np.dtype(bool))\n\n def _str_fullmatch(\n self,\n pat: str | re.Pattern,\n case: bool = True,\n flags: int = 0,\n na: Scalar | lib.NoDefault = lib.no_default,\n ):\n if not case:\n flags |= re.IGNORECASE\n\n regex = re.compile(pat, flags=flags)\n\n f = lambda x: regex.fullmatch(x) is not None\n return self._str_map(f, na_value=na, dtype=np.dtype(bool))\n\n def _str_encode(self, encoding, errors: str = "strict"):\n f = lambda x: x.encode(encoding, errors=errors)\n return self._str_map(f, dtype=object)\n\n def _str_find(self, sub, start: int = 0, end=None):\n return self._str_find_(sub, start, end, side="left")\n\n def _str_rfind(self, sub, start: int = 0, end=None):\n return self._str_find_(sub, start, end, side="right")\n\n def _str_find_(self, sub, start, end, side):\n if side == "left":\n method = "find"\n elif side == "right":\n method = "rfind"\n else: # pragma: no cover\n raise ValueError("Invalid side")\n\n if end is None:\n f = lambda x: getattr(x, method)(sub, start)\n else:\n f = lambda x: getattr(x, method)(sub, start, end)\n return self._str_map(f, dtype="int64")\n\n def _str_findall(self, pat, flags: int = 0):\n regex = re.compile(pat, flags=flags)\n return self._str_map(regex.findall, dtype="object")\n\n def _str_get(self, i):\n def f(x):\n if isinstance(x, dict):\n return x.get(i)\n elif len(x) > i >= -len(x):\n return x[i]\n return self.dtype.na_value # type: ignore[attr-defined]\n\n return self._str_map(f)\n\n def _str_index(self, sub, start: int = 0, end=None):\n if end:\n f = lambda x: x.index(sub, start, end)\n else:\n f = lambda x: x.index(sub, start, end)\n return self._str_map(f, dtype="int64")\n\n def _str_rindex(self, sub, start: int = 0, end=None):\n if end:\n f = lambda x: x.rindex(sub, start, end)\n else:\n f = lambda x: x.rindex(sub, start, end)\n return self._str_map(f, dtype="int64")\n\n def _str_join(self, sep: str):\n return self._str_map(sep.join)\n\n def _str_partition(self, sep: str, expand):\n result = self._str_map(lambda x: x.partition(sep), dtype="object")\n return result\n\n def _str_rpartition(self, sep: str, expand):\n return self._str_map(lambda x: x.rpartition(sep), dtype="object")\n\n def _str_len(self):\n return self._str_map(len, dtype="int64")\n\n def _str_slice(self, start=None, stop=None, step=None):\n obj = slice(start, stop, step)\n return self._str_map(lambda x: x[obj])\n\n def _str_slice_replace(self, start=None, stop=None, repl=None):\n if repl is None:\n repl = ""\n\n def f(x):\n if x[start:stop] == "":\n local_stop = start\n else:\n local_stop = stop\n y = ""\n if start is not None:\n y += x[:start]\n y += repl\n if stop is not None:\n y += x[local_stop:]\n return y\n\n return self._str_map(f)\n\n def _str_split(\n self,\n pat: str | re.Pattern | None = None,\n n=-1,\n expand: bool = False,\n regex: bool | None = None,\n ):\n if pat is None:\n if n is None or n == 0:\n n = -1\n f = lambda x: x.split(pat, n)\n else:\n new_pat: str | re.Pattern\n if regex is True or isinstance(pat, re.Pattern):\n new_pat = re.compile(pat)\n elif regex is False:\n new_pat = pat\n # regex is None so link to old behavior #43563\n else:\n if len(pat) == 1:\n new_pat = pat\n else:\n new_pat = re.compile(pat)\n\n if isinstance(new_pat, re.Pattern):\n if n is None or n == -1:\n n = 0\n f = lambda x: new_pat.split(x, maxsplit=n)\n else:\n if n is None or n == 0:\n n = -1\n f = lambda x: x.split(pat, n)\n return self._str_map(f, dtype=object)\n\n def _str_rsplit(self, pat=None, n=-1):\n if n is None or n == 0:\n n = -1\n f = lambda x: x.rsplit(pat, n)\n return self._str_map(f, dtype="object")\n\n def _str_translate(self, table):\n return self._str_map(lambda x: x.translate(table))\n\n def _str_wrap(self, width: int, **kwargs):\n kwargs["width"] = width\n tw = textwrap.TextWrapper(**kwargs)\n return self._str_map(lambda s: "\n".join(tw.wrap(s)))\n\n def _str_get_dummies(self, sep: str = "|"):\n from pandas import Series\n\n arr = Series(self).fillna("")\n try:\n arr = sep + arr + sep\n except (TypeError, NotImplementedError):\n arr = sep + arr.astype(str) + sep\n\n tags: set[str] = set()\n for ts in Series(arr, copy=False).str.split(sep):\n tags.update(ts)\n tags2 = sorted(tags - {""})\n\n dummies = np.empty((len(arr), len(tags2)), dtype=np.int64)\n\n def _isin(test_elements: str, element: str) -> bool:\n return element in test_elements\n\n for i, t in enumerate(tags2):\n pat = sep + t + sep\n dummies[:, i] = lib.map_infer(\n arr.to_numpy(), functools.partial(_isin, element=pat)\n )\n return dummies, tags2\n\n def _str_upper(self):\n return self._str_map(lambda x: x.upper())\n\n def _str_isalnum(self):\n return self._str_map(str.isalnum, dtype="bool")\n\n def _str_isalpha(self):\n return self._str_map(str.isalpha, dtype="bool")\n\n def _str_isdecimal(self):\n return self._str_map(str.isdecimal, dtype="bool")\n\n def _str_isdigit(self):\n return self._str_map(str.isdigit, dtype="bool")\n\n def _str_islower(self):\n return self._str_map(str.islower, dtype="bool")\n\n def _str_isnumeric(self):\n return self._str_map(str.isnumeric, dtype="bool")\n\n def _str_isspace(self):\n return self._str_map(str.isspace, dtype="bool")\n\n def _str_istitle(self):\n return self._str_map(str.istitle, dtype="bool")\n\n def _str_isupper(self):\n return self._str_map(str.isupper, dtype="bool")\n\n def _str_capitalize(self):\n return self._str_map(str.capitalize)\n\n def _str_casefold(self):\n return self._str_map(str.casefold)\n\n def _str_title(self):\n return self._str_map(str.title)\n\n def _str_swapcase(self):\n return self._str_map(str.swapcase)\n\n def _str_lower(self):\n return self._str_map(str.lower)\n\n def _str_normalize(self, form):\n f = lambda x: unicodedata.normalize(form, x)\n return self._str_map(f)\n\n def _str_strip(self, to_strip=None):\n return self._str_map(lambda x: x.strip(to_strip))\n\n def _str_lstrip(self, to_strip=None):\n return self._str_map(lambda x: x.lstrip(to_strip))\n\n def _str_rstrip(self, to_strip=None):\n return self._str_map(lambda x: x.rstrip(to_strip))\n\n def _str_removeprefix(self, prefix: str) -> Series:\n # outstanding question on whether to use native methods for users on Python 3.9+\n # https://github.com/pandas-dev/pandas/pull/39226#issuecomment-836719770,\n # in which case we could do return self._str_map(str.removeprefix)\n\n def removeprefix(text: str) -> str:\n if text.startswith(prefix):\n return text[len(prefix) :]\n return text\n\n return self._str_map(removeprefix)\n\n def _str_removesuffix(self, suffix: str) -> Series:\n return self._str_map(lambda x: x.removesuffix(suffix))\n\n def _str_extract(self, pat: str, flags: int = 0, expand: bool = True):\n regex = re.compile(pat, flags=flags)\n na_value = self.dtype.na_value # type: ignore[attr-defined]\n\n if not expand:\n\n def g(x):\n m = regex.search(x)\n return m.groups()[0] if m else na_value\n\n return self._str_map(g, convert=False)\n\n empty_row = [na_value] * regex.groups\n\n def f(x):\n if not isinstance(x, str):\n return empty_row\n m = regex.search(x)\n if m:\n return [na_value if item is None else item for item in m.groups()]\n else:\n return empty_row\n\n return [f(val) for val in np.asarray(self)]\n
.venv\Lib\site-packages\pandas\core\strings\object_array.py
object_array.py
Python
16,842
0.95
0.2397
0.034483
awesome-app
558
2024-05-27T01:40:14.188209
BSD-3-Clause
false
9b12a0d7ef46f85bfb18f1849f245a2d
"""\nImplementation of pandas.Series.str and its interface.\n\n* strings.accessor.StringMethods : Accessor for Series.str\n* strings.base.BaseStringArrayMethods: Mixin ABC for EAs to implement str methods\n\nMost methods on the StringMethods accessor follow the pattern:\n\n 1. extract the array from the series (or index)\n 2. Call that array's implementation of the string method\n 3. Wrap the result (in a Series, index, or DataFrame)\n\nPandas extension arrays implementing string methods should inherit from\npandas.core.strings.base.BaseStringArrayMethods. This is an ABC defining\nthe various string methods. To avoid namespace clashes and pollution,\nthese are prefixed with `_str_`. So ``Series.str.upper()`` calls\n``Series.array._str_upper()``. The interface isn't currently public\nto other string extension arrays.\n"""\n# Pandas current implementation is in ObjectStringArrayMixin. This is designed\n# to work on object-dtype ndarrays.\n#\n# BaseStringArrayMethods\n# - ObjectStringArrayMixin\n# - StringArray\n# - NumpyExtensionArray\n# - Categorical\n# - ArrowStringArray\n
.venv\Lib\site-packages\pandas\core\strings\__init__.py
__init__.py
Python
1,087
0.8
0.071429
0.458333
python-kit
943
2024-03-27T15:31:52.654362
GPL-3.0
false
af03bcd733620ec2c860d03932df6a88
\n\n
.venv\Lib\site-packages\pandas\core\strings\__pycache__\base.cpython-313.pyc
base.cpython-313.pyc
Other
12,648
0.95
0.016216
0.033708
awesome-app
880
2024-09-29T23:40:22.906062
MIT
false
cb4e6794cef6181f74293831447bc1c6
\n\n
.venv\Lib\site-packages\pandas\core\strings\__pycache__\object_array.cpython-313.pyc
object_array.cpython-313.pyc
Other
32,131
0.95
0.033149
0.011905
vue-tools
48
2025-05-25T22:44:21.827031
BSD-3-Clause
false
71849b2dd6f039ddeb9bd5f0538484e3
\n\n
.venv\Lib\site-packages\pandas\core\strings\__pycache__\__init__.cpython-313.pyc
__init__.cpython-313.pyc
Other
1,031
0.7
0.095238
0.125
react-lib
634
2023-08-10T12:26:56.212911
Apache-2.0
false
1d7c6c47e0e2c2a916a088b0e91a94b4
from __future__ import annotations\n\nfrom collections import abc\nfrom datetime import date\nfrom functools import partial\nfrom itertools import islice\nfrom typing import (\n TYPE_CHECKING,\n Callable,\n TypedDict,\n Union,\n cast,\n overload,\n)\nimport warnings\n\nimport numpy as np\n\nfrom pandas._config import using_string_dtype\n\nfrom pandas._libs import (\n lib,\n tslib,\n)\nfrom pandas._libs.tslibs import (\n OutOfBoundsDatetime,\n Timedelta,\n Timestamp,\n astype_overflowsafe,\n is_supported_dtype,\n timezones as libtimezones,\n)\nfrom pandas._libs.tslibs.conversion import cast_from_unit_vectorized\nfrom pandas._libs.tslibs.parsing import (\n DateParseError,\n guess_datetime_format,\n)\nfrom pandas._libs.tslibs.strptime import array_strptime\nfrom pandas._typing import (\n AnyArrayLike,\n ArrayLike,\n DateTimeErrorChoices,\n)\nfrom pandas.util._exceptions import find_stack_level\n\nfrom pandas.core.dtypes.common import (\n ensure_object,\n is_float,\n is_integer,\n is_integer_dtype,\n is_list_like,\n is_numeric_dtype,\n)\nfrom pandas.core.dtypes.dtypes import (\n ArrowDtype,\n DatetimeTZDtype,\n)\nfrom pandas.core.dtypes.generic import (\n ABCDataFrame,\n ABCSeries,\n)\n\nfrom pandas.arrays import (\n DatetimeArray,\n IntegerArray,\n NumpyExtensionArray,\n)\nfrom pandas.core.algorithms import unique\nfrom pandas.core.arrays import ArrowExtensionArray\nfrom pandas.core.arrays.base import ExtensionArray\nfrom pandas.core.arrays.datetimes import (\n maybe_convert_dtype,\n objects_to_datetime64,\n tz_to_dtype,\n)\nfrom pandas.core.construction import extract_array\nfrom pandas.core.indexes.base import Index\nfrom pandas.core.indexes.datetimes import DatetimeIndex\n\nif TYPE_CHECKING:\n from collections.abc import Hashable\n\n from pandas._libs.tslibs.nattype import NaTType\n from pandas._libs.tslibs.timedeltas import UnitChoices\n\n from pandas import (\n DataFrame,\n Series,\n )\n\n# ---------------------------------------------------------------------\n# types used in annotations\n\nArrayConvertible = Union[list, tuple, AnyArrayLike]\nScalar = Union[float, str]\nDatetimeScalar = Union[Scalar, date, np.datetime64]\n\nDatetimeScalarOrArrayConvertible = Union[DatetimeScalar, ArrayConvertible]\n\nDatetimeDictArg = Union[list[Scalar], tuple[Scalar, ...], AnyArrayLike]\n\n\nclass YearMonthDayDict(TypedDict, total=True):\n year: DatetimeDictArg\n month: DatetimeDictArg\n day: DatetimeDictArg\n\n\nclass FulldatetimeDict(YearMonthDayDict, total=False):\n hour: DatetimeDictArg\n hours: DatetimeDictArg\n minute: DatetimeDictArg\n minutes: DatetimeDictArg\n second: DatetimeDictArg\n seconds: DatetimeDictArg\n ms: DatetimeDictArg\n us: DatetimeDictArg\n ns: DatetimeDictArg\n\n\nDictConvertible = Union[FulldatetimeDict, "DataFrame"]\nstart_caching_at = 50\n\n\n# ---------------------------------------------------------------------\n\n\ndef _guess_datetime_format_for_array(arr, dayfirst: bool | None = False) -> str | None:\n # Try to guess the format based on the first non-NaN element, return None if can't\n if (first_non_null := tslib.first_non_null(arr)) != -1:\n if type(first_non_nan_element := arr[first_non_null]) is str: # noqa: E721\n # GH#32264 np.str_ object\n guessed_format = guess_datetime_format(\n first_non_nan_element, dayfirst=dayfirst\n )\n if guessed_format is not None:\n return guessed_format\n # If there are multiple non-null elements, warn about\n # how parsing might not be consistent\n if tslib.first_non_null(arr[first_non_null + 1 :]) != -1:\n warnings.warn(\n "Could not infer format, so each element will be parsed "\n "individually, falling back to `dateutil`. To ensure parsing is "\n "consistent and as-expected, please specify a format.",\n UserWarning,\n stacklevel=find_stack_level(),\n )\n return None\n\n\ndef should_cache(\n arg: ArrayConvertible, unique_share: float = 0.7, check_count: int | None = None\n) -> bool:\n """\n Decides whether to do caching.\n\n If the percent of unique elements among `check_count` elements less\n than `unique_share * 100` then we can do caching.\n\n Parameters\n ----------\n arg: listlike, tuple, 1-d array, Series\n unique_share: float, default=0.7, optional\n 0 < unique_share < 1\n check_count: int, optional\n 0 <= check_count <= len(arg)\n\n Returns\n -------\n do_caching: bool\n\n Notes\n -----\n By default for a sequence of less than 50 items in size, we don't do\n caching; for the number of elements less than 5000, we take ten percent of\n all elements to check for a uniqueness share; if the sequence size is more\n than 5000, then we check only the first 500 elements.\n All constants were chosen empirically by.\n """\n do_caching = True\n\n # default realization\n if check_count is None:\n # in this case, the gain from caching is negligible\n if len(arg) <= start_caching_at:\n return False\n\n if len(arg) <= 5000:\n check_count = len(arg) // 10\n else:\n check_count = 500\n else:\n assert (\n 0 <= check_count <= len(arg)\n ), "check_count must be in next bounds: [0; len(arg)]"\n if check_count == 0:\n return False\n\n assert 0 < unique_share < 1, "unique_share must be in next bounds: (0; 1)"\n\n try:\n # We can't cache if the items are not hashable.\n unique_elements = set(islice(arg, check_count))\n except TypeError:\n return False\n if len(unique_elements) > check_count * unique_share:\n do_caching = False\n return do_caching\n\n\ndef _maybe_cache(\n arg: ArrayConvertible,\n format: str | None,\n cache: bool,\n convert_listlike: Callable,\n) -> Series:\n """\n Create a cache of unique dates from an array of dates\n\n Parameters\n ----------\n arg : listlike, tuple, 1-d array, Series\n format : string\n Strftime format to parse time\n cache : bool\n True attempts to create a cache of converted values\n convert_listlike : function\n Conversion function to apply on dates\n\n Returns\n -------\n cache_array : Series\n Cache of converted, unique dates. Can be empty\n """\n from pandas import Series\n\n cache_array = Series(dtype=object)\n\n if cache:\n # Perform a quicker unique check\n if not should_cache(arg):\n return cache_array\n\n if not isinstance(arg, (np.ndarray, ExtensionArray, Index, ABCSeries)):\n arg = np.array(arg)\n\n unique_dates = unique(arg)\n if len(unique_dates) < len(arg):\n cache_dates = convert_listlike(unique_dates, format)\n # GH#45319\n try:\n cache_array = Series(cache_dates, index=unique_dates, copy=False)\n except OutOfBoundsDatetime:\n return cache_array\n # GH#39882 and GH#35888 in case of None and NaT we get duplicates\n if not cache_array.index.is_unique:\n cache_array = cache_array[~cache_array.index.duplicated()]\n return cache_array\n\n\ndef _box_as_indexlike(\n dt_array: ArrayLike, utc: bool = False, name: Hashable | None = None\n) -> Index:\n """\n Properly boxes the ndarray of datetimes to DatetimeIndex\n if it is possible or to generic Index instead\n\n Parameters\n ----------\n dt_array: 1-d array\n Array of datetimes to be wrapped in an Index.\n utc : bool\n Whether to convert/localize timestamps to UTC.\n name : string, default None\n Name for a resulting index\n\n Returns\n -------\n result : datetime of converted dates\n - DatetimeIndex if convertible to sole datetime64 type\n - general Index otherwise\n """\n\n if lib.is_np_dtype(dt_array.dtype, "M"):\n tz = "utc" if utc else None\n return DatetimeIndex(dt_array, tz=tz, name=name)\n return Index(dt_array, name=name, dtype=dt_array.dtype)\n\n\ndef _convert_and_box_cache(\n arg: DatetimeScalarOrArrayConvertible,\n cache_array: Series,\n name: Hashable | None = None,\n) -> Index:\n """\n Convert array of dates with a cache and wrap the result in an Index.\n\n Parameters\n ----------\n arg : integer, float, string, datetime, list, tuple, 1-d array, Series\n cache_array : Series\n Cache of converted, unique dates\n name : string, default None\n Name for a DatetimeIndex\n\n Returns\n -------\n result : Index-like of converted dates\n """\n from pandas import Series\n\n result = Series(arg, dtype=cache_array.index.dtype).map(cache_array)\n return _box_as_indexlike(result._values, utc=False, name=name)\n\n\ndef _convert_listlike_datetimes(\n arg,\n format: str | None,\n name: Hashable | None = None,\n utc: bool = False,\n unit: str | None = None,\n errors: DateTimeErrorChoices = "raise",\n dayfirst: bool | None = None,\n yearfirst: bool | None = None,\n exact: bool = True,\n):\n """\n Helper function for to_datetime. Performs the conversions of 1D listlike\n of dates\n\n Parameters\n ----------\n arg : list, tuple, ndarray, Series, Index\n date to be parsed\n name : object\n None or string for the Index name\n utc : bool\n Whether to convert/localize timestamps to UTC.\n unit : str\n None or string of the frequency of the passed data\n errors : str\n error handing behaviors from to_datetime, 'raise', 'coerce', 'ignore'\n dayfirst : bool\n dayfirst parsing behavior from to_datetime\n yearfirst : bool\n yearfirst parsing behavior from to_datetime\n exact : bool, default True\n exact format matching behavior from to_datetime\n\n Returns\n -------\n Index-like of parsed dates\n """\n if isinstance(arg, (list, tuple)):\n arg = np.array(arg, dtype="O")\n elif isinstance(arg, NumpyExtensionArray):\n arg = np.array(arg)\n\n arg_dtype = getattr(arg, "dtype", None)\n # these are shortcutable\n tz = "utc" if utc else None\n if isinstance(arg_dtype, DatetimeTZDtype):\n if not isinstance(arg, (DatetimeArray, DatetimeIndex)):\n return DatetimeIndex(arg, tz=tz, name=name)\n if utc:\n arg = arg.tz_convert(None).tz_localize("utc")\n return arg\n\n elif isinstance(arg_dtype, ArrowDtype) and arg_dtype.type is Timestamp:\n # TODO: Combine with above if DTI/DTA supports Arrow timestamps\n if utc:\n # pyarrow uses UTC, not lowercase utc\n if isinstance(arg, Index):\n arg_array = cast(ArrowExtensionArray, arg.array)\n if arg_dtype.pyarrow_dtype.tz is not None:\n arg_array = arg_array._dt_tz_convert("UTC")\n else:\n arg_array = arg_array._dt_tz_localize("UTC")\n arg = Index(arg_array)\n else:\n # ArrowExtensionArray\n if arg_dtype.pyarrow_dtype.tz is not None:\n arg = arg._dt_tz_convert("UTC")\n else:\n arg = arg._dt_tz_localize("UTC")\n return arg\n\n elif lib.is_np_dtype(arg_dtype, "M"):\n if not is_supported_dtype(arg_dtype):\n # We go to closest supported reso, i.e. "s"\n arg = astype_overflowsafe(\n # TODO: looks like we incorrectly raise with errors=="ignore"\n np.asarray(arg),\n np.dtype("M8[s]"),\n is_coerce=errors == "coerce",\n )\n\n if not isinstance(arg, (DatetimeArray, DatetimeIndex)):\n return DatetimeIndex(arg, tz=tz, name=name)\n elif utc:\n # DatetimeArray, DatetimeIndex\n return arg.tz_localize("utc")\n\n return arg\n\n elif unit is not None:\n if format is not None:\n raise ValueError("cannot specify both format and unit")\n return _to_datetime_with_unit(arg, unit, name, utc, errors)\n elif getattr(arg, "ndim", 1) > 1:\n raise TypeError(\n "arg must be a string, datetime, list, tuple, 1-d array, or Series"\n )\n\n # warn if passing timedelta64, raise for PeriodDtype\n # NB: this must come after unit transformation\n try:\n arg, _ = maybe_convert_dtype(arg, copy=False, tz=libtimezones.maybe_get_tz(tz))\n except TypeError:\n if errors == "coerce":\n npvalues = np.array(["NaT"], dtype="datetime64[ns]").repeat(len(arg))\n return DatetimeIndex(npvalues, name=name)\n elif errors == "ignore":\n idx = Index(arg, name=name)\n return idx\n raise\n\n arg = ensure_object(arg)\n\n if format is None:\n format = _guess_datetime_format_for_array(arg, dayfirst=dayfirst)\n\n # `format` could be inferred, or user didn't ask for mixed-format parsing.\n if format is not None and format != "mixed":\n return _array_strptime_with_fallback(arg, name, utc, format, exact, errors)\n\n result, tz_parsed = objects_to_datetime64(\n arg,\n dayfirst=dayfirst,\n yearfirst=yearfirst,\n utc=utc,\n errors=errors,\n allow_object=True,\n )\n\n if tz_parsed is not None:\n # We can take a shortcut since the datetime64 numpy array\n # is in UTC\n out_unit = np.datetime_data(result.dtype)[0]\n dtype = cast(DatetimeTZDtype, tz_to_dtype(tz_parsed, out_unit))\n dt64_values = result.view(f"M8[{dtype.unit}]")\n dta = DatetimeArray._simple_new(dt64_values, dtype=dtype)\n return DatetimeIndex._simple_new(dta, name=name)\n\n return _box_as_indexlike(result, utc=utc, name=name)\n\n\ndef _array_strptime_with_fallback(\n arg,\n name,\n utc: bool,\n fmt: str,\n exact: bool,\n errors: str,\n) -> Index:\n """\n Call array_strptime, with fallback behavior depending on 'errors'.\n """\n result, tz_out = array_strptime(arg, fmt, exact=exact, errors=errors, utc=utc)\n if tz_out is not None:\n unit = np.datetime_data(result.dtype)[0]\n dtype = DatetimeTZDtype(tz=tz_out, unit=unit)\n dta = DatetimeArray._simple_new(result, dtype=dtype)\n if utc:\n dta = dta.tz_convert("UTC")\n return Index(dta, name=name)\n elif result.dtype != object and utc:\n unit = np.datetime_data(result.dtype)[0]\n res = Index(result, dtype=f"M8[{unit}, UTC]", name=name)\n return res\n elif using_string_dtype() and result.dtype == object:\n if lib.is_string_array(result):\n return Index(result, dtype="str", name=name)\n return Index(result, dtype=result.dtype, name=name)\n\n\ndef _to_datetime_with_unit(arg, unit, name, utc: bool, errors: str) -> Index:\n """\n to_datetime specalized to the case where a 'unit' is passed.\n """\n arg = extract_array(arg, extract_numpy=True)\n\n # GH#30050 pass an ndarray to tslib.array_with_unit_to_datetime\n # because it expects an ndarray argument\n if isinstance(arg, IntegerArray):\n arr = arg.astype(f"datetime64[{unit}]")\n tz_parsed = None\n else:\n arg = np.asarray(arg)\n\n if arg.dtype.kind in "iu":\n # Note we can't do "f" here because that could induce unwanted\n # rounding GH#14156, GH#20445\n arr = arg.astype(f"datetime64[{unit}]", copy=False)\n try:\n arr = astype_overflowsafe(arr, np.dtype("M8[ns]"), copy=False)\n except OutOfBoundsDatetime:\n if errors == "raise":\n raise\n arg = arg.astype(object)\n return _to_datetime_with_unit(arg, unit, name, utc, errors)\n tz_parsed = None\n\n elif arg.dtype.kind == "f":\n with np.errstate(over="raise"):\n try:\n arr = cast_from_unit_vectorized(arg, unit=unit)\n except OutOfBoundsDatetime:\n if errors != "raise":\n return _to_datetime_with_unit(\n arg.astype(object), unit, name, utc, errors\n )\n raise OutOfBoundsDatetime(\n f"cannot convert input with unit '{unit}'"\n )\n\n arr = arr.view("M8[ns]")\n tz_parsed = None\n else:\n arg = arg.astype(object, copy=False)\n arr, tz_parsed = tslib.array_with_unit_to_datetime(arg, unit, errors=errors)\n\n if errors == "ignore":\n # Index constructor _may_ infer to DatetimeIndex\n result = Index._with_infer(arr, name=name)\n else:\n result = DatetimeIndex(arr, name=name)\n\n if not isinstance(result, DatetimeIndex):\n return result\n\n # GH#23758: We may still need to localize the result with tz\n # GH#25546: Apply tz_parsed first (from arg), then tz (from caller)\n # result will be naive but in UTC\n result = result.tz_localize("UTC").tz_convert(tz_parsed)\n\n if utc:\n if result.tz is None:\n result = result.tz_localize("utc")\n else:\n result = result.tz_convert("utc")\n return result\n\n\ndef _adjust_to_origin(arg, origin, unit):\n """\n Helper function for to_datetime.\n Adjust input argument to the specified origin\n\n Parameters\n ----------\n arg : list, tuple, ndarray, Series, Index\n date to be adjusted\n origin : 'julian' or Timestamp\n origin offset for the arg\n unit : str\n passed unit from to_datetime, must be 'D'\n\n Returns\n -------\n ndarray or scalar of adjusted date(s)\n """\n if origin == "julian":\n original = arg\n j0 = Timestamp(0).to_julian_date()\n if unit != "D":\n raise ValueError("unit must be 'D' for origin='julian'")\n try:\n arg = arg - j0\n except TypeError as err:\n raise ValueError(\n "incompatible 'arg' type for given 'origin'='julian'"\n ) from err\n\n # preemptively check this for a nice range\n j_max = Timestamp.max.to_julian_date() - j0\n j_min = Timestamp.min.to_julian_date() - j0\n if np.any(arg > j_max) or np.any(arg < j_min):\n raise OutOfBoundsDatetime(\n f"{original} is Out of Bounds for origin='julian'"\n )\n else:\n # arg must be numeric\n if not (\n (is_integer(arg) or is_float(arg)) or is_numeric_dtype(np.asarray(arg))\n ):\n raise ValueError(\n f"'{arg}' is not compatible with origin='{origin}'; "\n "it must be numeric with a unit specified"\n )\n\n # we are going to offset back to unix / epoch time\n try:\n offset = Timestamp(origin, unit=unit)\n except OutOfBoundsDatetime as err:\n raise OutOfBoundsDatetime(f"origin {origin} is Out of Bounds") from err\n except ValueError as err:\n raise ValueError(\n f"origin {origin} cannot be converted to a Timestamp"\n ) from err\n\n if offset.tz is not None:\n raise ValueError(f"origin offset {offset} must be tz-naive")\n td_offset = offset - Timestamp(0)\n\n # convert the offset to the unit of the arg\n # this should be lossless in terms of precision\n ioffset = td_offset // Timedelta(1, unit=unit)\n\n # scalars & ndarray-like can handle the addition\n if is_list_like(arg) and not isinstance(arg, (ABCSeries, Index, np.ndarray)):\n arg = np.asarray(arg)\n arg = arg + ioffset\n return arg\n\n\n@overload\ndef to_datetime(\n arg: DatetimeScalar,\n errors: DateTimeErrorChoices = ...,\n dayfirst: bool = ...,\n yearfirst: bool = ...,\n utc: bool = ...,\n format: str | None = ...,\n exact: bool = ...,\n unit: str | None = ...,\n infer_datetime_format: bool = ...,\n origin=...,\n cache: bool = ...,\n) -> Timestamp:\n ...\n\n\n@overload\ndef to_datetime(\n arg: Series | DictConvertible,\n errors: DateTimeErrorChoices = ...,\n dayfirst: bool = ...,\n yearfirst: bool = ...,\n utc: bool = ...,\n format: str | None = ...,\n exact: bool = ...,\n unit: str | None = ...,\n infer_datetime_format: bool = ...,\n origin=...,\n cache: bool = ...,\n) -> Series:\n ...\n\n\n@overload\ndef to_datetime(\n arg: list | tuple | Index | ArrayLike,\n errors: DateTimeErrorChoices = ...,\n dayfirst: bool = ...,\n yearfirst: bool = ...,\n utc: bool = ...,\n format: str | None = ...,\n exact: bool = ...,\n unit: str | None = ...,\n infer_datetime_format: bool = ...,\n origin=...,\n cache: bool = ...,\n) -> DatetimeIndex:\n ...\n\n\ndef to_datetime(\n arg: DatetimeScalarOrArrayConvertible | DictConvertible,\n errors: DateTimeErrorChoices = "raise",\n dayfirst: bool = False,\n yearfirst: bool = False,\n utc: bool = False,\n format: str | None = None,\n exact: bool | lib.NoDefault = lib.no_default,\n unit: str | None = None,\n infer_datetime_format: lib.NoDefault | bool = lib.no_default,\n origin: str = "unix",\n cache: bool = True,\n) -> DatetimeIndex | Series | DatetimeScalar | NaTType | None:\n """\n Convert argument to datetime.\n\n This function converts a scalar, array-like, :class:`Series` or\n :class:`DataFrame`/dict-like to a pandas datetime object.\n\n Parameters\n ----------\n arg : int, float, str, datetime, list, tuple, 1-d array, Series, DataFrame/dict-like\n The object to convert to a datetime. If a :class:`DataFrame` is provided, the\n method expects minimally the following columns: :const:`"year"`,\n :const:`"month"`, :const:`"day"`. The column "year"\n must be specified in 4-digit format.\n errors : {'ignore', 'raise', 'coerce'}, default 'raise'\n - If :const:`'raise'`, then invalid parsing will raise an exception.\n - If :const:`'coerce'`, then invalid parsing will be set as :const:`NaT`.\n - If :const:`'ignore'`, then invalid parsing will return the input.\n dayfirst : bool, default False\n Specify a date parse order if `arg` is str or is list-like.\n If :const:`True`, parses dates with the day first, e.g. :const:`"10/11/12"`\n is parsed as :const:`2012-11-10`.\n\n .. warning::\n\n ``dayfirst=True`` is not strict, but will prefer to parse\n with day first.\n\n yearfirst : bool, default False\n Specify a date parse order if `arg` is str or is list-like.\n\n - If :const:`True` parses dates with the year first, e.g.\n :const:`"10/11/12"` is parsed as :const:`2010-11-12`.\n - If both `dayfirst` and `yearfirst` are :const:`True`, `yearfirst` is\n preceded (same as :mod:`dateutil`).\n\n .. warning::\n\n ``yearfirst=True`` is not strict, but will prefer to parse\n with year first.\n\n utc : bool, default False\n Control timezone-related parsing, localization and conversion.\n\n - If :const:`True`, the function *always* returns a timezone-aware\n UTC-localized :class:`Timestamp`, :class:`Series` or\n :class:`DatetimeIndex`. To do this, timezone-naive inputs are\n *localized* as UTC, while timezone-aware inputs are *converted* to UTC.\n\n - If :const:`False` (default), inputs will not be coerced to UTC.\n Timezone-naive inputs will remain naive, while timezone-aware ones\n will keep their time offsets. Limitations exist for mixed\n offsets (typically, daylight savings), see :ref:`Examples\n <to_datetime_tz_examples>` section for details.\n\n .. warning::\n\n In a future version of pandas, parsing datetimes with mixed time\n zones will raise an error unless `utc=True`.\n Please specify `utc=True` to opt in to the new behaviour\n and silence this warning. To create a `Series` with mixed offsets and\n `object` dtype, please use `apply` and `datetime.datetime.strptime`.\n\n See also: pandas general documentation about `timezone conversion and\n localization\n <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html\n #time-zone-handling>`_.\n\n format : str, default None\n The strftime to parse time, e.g. :const:`"%d/%m/%Y"`. See\n `strftime documentation\n <https://docs.python.org/3/library/datetime.html\n #strftime-and-strptime-behavior>`_ for more information on choices, though\n note that :const:`"%f"` will parse all the way up to nanoseconds.\n You can also pass:\n\n - "ISO8601", to parse any `ISO8601 <https://en.wikipedia.org/wiki/ISO_8601>`_\n time string (not necessarily in exactly the same format);\n - "mixed", to infer the format for each element individually. This is risky,\n and you should probably use it along with `dayfirst`.\n\n .. note::\n\n If a :class:`DataFrame` is passed, then `format` has no effect.\n\n exact : bool, default True\n Control how `format` is used:\n\n - If :const:`True`, require an exact `format` match.\n - If :const:`False`, allow the `format` to match anywhere in the target\n string.\n\n Cannot be used alongside ``format='ISO8601'`` or ``format='mixed'``.\n unit : str, default 'ns'\n The unit of the arg (D,s,ms,us,ns) denote the unit, which is an\n integer or float number. This will be based off the origin.\n Example, with ``unit='ms'`` and ``origin='unix'``, this would calculate\n the number of milliseconds to the unix epoch start.\n infer_datetime_format : bool, default False\n If :const:`True` and no `format` is given, attempt to infer the format\n of the datetime strings based on the first non-NaN element,\n and if it can be inferred, switch to a faster method of parsing them.\n In some cases this can increase the parsing speed by ~5-10x.\n\n .. deprecated:: 2.0.0\n A strict version of this argument is now the default, passing it has\n no effect.\n\n origin : scalar, default 'unix'\n Define the reference date. The numeric values would be parsed as number\n of units (defined by `unit`) since this reference date.\n\n - If :const:`'unix'` (or POSIX) time; origin is set to 1970-01-01.\n - If :const:`'julian'`, unit must be :const:`'D'`, and origin is set to\n beginning of Julian Calendar. Julian day number :const:`0` is assigned\n to the day starting at noon on January 1, 4713 BC.\n - If Timestamp convertible (Timestamp, dt.datetime, np.datetimt64 or date\n string), origin is set to Timestamp identified by origin.\n - If a float or integer, origin is the difference\n (in units determined by the ``unit`` argument) relative to 1970-01-01.\n cache : bool, default True\n If :const:`True`, use a cache of unique, converted dates to apply the\n datetime conversion. May produce significant speed-up when parsing\n duplicate date strings, especially ones with timezone offsets. The cache\n is only used when there are at least 50 values. The presence of\n out-of-bounds values will render the cache unusable and may slow down\n parsing.\n\n Returns\n -------\n datetime\n If parsing succeeded.\n Return type depends on input (types in parenthesis correspond to\n fallback in case of unsuccessful timezone or out-of-range timestamp\n parsing):\n\n - scalar: :class:`Timestamp` (or :class:`datetime.datetime`)\n - array-like: :class:`DatetimeIndex` (or :class:`Series` with\n :class:`object` dtype containing :class:`datetime.datetime`)\n - Series: :class:`Series` of :class:`datetime64` dtype (or\n :class:`Series` of :class:`object` dtype containing\n :class:`datetime.datetime`)\n - DataFrame: :class:`Series` of :class:`datetime64` dtype (or\n :class:`Series` of :class:`object` dtype containing\n :class:`datetime.datetime`)\n\n Raises\n ------\n ParserError\n When parsing a date from string fails.\n ValueError\n When another datetime conversion error happens. For example when one\n of 'year', 'month', day' columns is missing in a :class:`DataFrame`, or\n when a Timezone-aware :class:`datetime.datetime` is found in an array-like\n of mixed time offsets, and ``utc=False``.\n\n See Also\n --------\n DataFrame.astype : Cast argument to a specified dtype.\n to_timedelta : Convert argument to timedelta.\n convert_dtypes : Convert dtypes.\n\n Notes\n -----\n\n Many input types are supported, and lead to different output types:\n\n - **scalars** can be int, float, str, datetime object (from stdlib :mod:`datetime`\n module or :mod:`numpy`). They are converted to :class:`Timestamp` when\n possible, otherwise they are converted to :class:`datetime.datetime`.\n None/NaN/null scalars are converted to :const:`NaT`.\n\n - **array-like** can contain int, float, str, datetime objects. They are\n converted to :class:`DatetimeIndex` when possible, otherwise they are\n converted to :class:`Index` with :class:`object` dtype, containing\n :class:`datetime.datetime`. None/NaN/null entries are converted to\n :const:`NaT` in both cases.\n\n - **Series** are converted to :class:`Series` with :class:`datetime64`\n dtype when possible, otherwise they are converted to :class:`Series` with\n :class:`object` dtype, containing :class:`datetime.datetime`. None/NaN/null\n entries are converted to :const:`NaT` in both cases.\n\n - **DataFrame/dict-like** are converted to :class:`Series` with\n :class:`datetime64` dtype. For each row a datetime is created from assembling\n the various dataframe columns. Column keys can be common abbreviations\n like ['year', 'month', 'day', 'minute', 'second', 'ms', 'us', 'ns']) or\n plurals of the same.\n\n The following causes are responsible for :class:`datetime.datetime` objects\n being returned (possibly inside an :class:`Index` or a :class:`Series` with\n :class:`object` dtype) instead of a proper pandas designated type\n (:class:`Timestamp`, :class:`DatetimeIndex` or :class:`Series`\n with :class:`datetime64` dtype):\n\n - when any input element is before :const:`Timestamp.min` or after\n :const:`Timestamp.max`, see `timestamp limitations\n <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html\n #timeseries-timestamp-limits>`_.\n\n - when ``utc=False`` (default) and the input is an array-like or\n :class:`Series` containing mixed naive/aware datetime, or aware with mixed\n time offsets. Note that this happens in the (quite frequent) situation when\n the timezone has a daylight savings policy. In that case you may wish to\n use ``utc=True``.\n\n Examples\n --------\n\n **Handling various input formats**\n\n Assembling a datetime from multiple columns of a :class:`DataFrame`. The keys\n can be common abbreviations like ['year', 'month', 'day', 'minute', 'second',\n 'ms', 'us', 'ns']) or plurals of the same\n\n >>> df = pd.DataFrame({'year': [2015, 2016],\n ... 'month': [2, 3],\n ... 'day': [4, 5]})\n >>> pd.to_datetime(df)\n 0 2015-02-04\n 1 2016-03-05\n dtype: datetime64[ns]\n\n Using a unix epoch time\n\n >>> pd.to_datetime(1490195805, unit='s')\n Timestamp('2017-03-22 15:16:45')\n >>> pd.to_datetime(1490195805433502912, unit='ns')\n Timestamp('2017-03-22 15:16:45.433502912')\n\n .. warning:: For float arg, precision rounding might happen. To prevent\n unexpected behavior use a fixed-width exact type.\n\n Using a non-unix epoch origin\n\n >>> pd.to_datetime([1, 2, 3], unit='D',\n ... origin=pd.Timestamp('1960-01-01'))\n DatetimeIndex(['1960-01-02', '1960-01-03', '1960-01-04'],\n dtype='datetime64[ns]', freq=None)\n\n **Differences with strptime behavior**\n\n :const:`"%f"` will parse all the way up to nanoseconds.\n\n >>> pd.to_datetime('2018-10-26 12:00:00.0000000011',\n ... format='%Y-%m-%d %H:%M:%S.%f')\n Timestamp('2018-10-26 12:00:00.000000001')\n\n **Non-convertible date/times**\n\n Passing ``errors='coerce'`` will force an out-of-bounds date to :const:`NaT`,\n in addition to forcing non-dates (or non-parseable dates) to :const:`NaT`.\n\n >>> pd.to_datetime('13000101', format='%Y%m%d', errors='coerce')\n NaT\n\n .. _to_datetime_tz_examples:\n\n **Timezones and time offsets**\n\n The default behaviour (``utc=False``) is as follows:\n\n - Timezone-naive inputs are converted to timezone-naive :class:`DatetimeIndex`:\n\n >>> pd.to_datetime(['2018-10-26 12:00:00', '2018-10-26 13:00:15'])\n DatetimeIndex(['2018-10-26 12:00:00', '2018-10-26 13:00:15'],\n dtype='datetime64[ns]', freq=None)\n\n - Timezone-aware inputs *with constant time offset* are converted to\n timezone-aware :class:`DatetimeIndex`:\n\n >>> pd.to_datetime(['2018-10-26 12:00 -0500', '2018-10-26 13:00 -0500'])\n DatetimeIndex(['2018-10-26 12:00:00-05:00', '2018-10-26 13:00:00-05:00'],\n dtype='datetime64[ns, UTC-05:00]', freq=None)\n\n - However, timezone-aware inputs *with mixed time offsets* (for example\n issued from a timezone with daylight savings, such as Europe/Paris)\n are **not successfully converted** to a :class:`DatetimeIndex`.\n Parsing datetimes with mixed time zones will show a warning unless\n `utc=True`. If you specify `utc=False` the warning below will be shown\n and a simple :class:`Index` containing :class:`datetime.datetime`\n objects will be returned:\n\n >>> pd.to_datetime(['2020-10-25 02:00 +0200',\n ... '2020-10-25 04:00 +0100']) # doctest: +SKIP\n FutureWarning: In a future version of pandas, parsing datetimes with mixed\n time zones will raise an error unless `utc=True`. Please specify `utc=True`\n to opt in to the new behaviour and silence this warning. To create a `Series`\n with mixed offsets and `object` dtype, please use `apply` and\n `datetime.datetime.strptime`.\n Index([2020-10-25 02:00:00+02:00, 2020-10-25 04:00:00+01:00],\n dtype='object')\n\n - A mix of timezone-aware and timezone-naive inputs is also converted to\n a simple :class:`Index` containing :class:`datetime.datetime` objects:\n\n >>> from datetime import datetime\n >>> pd.to_datetime(["2020-01-01 01:00:00-01:00",\n ... datetime(2020, 1, 1, 3, 0)]) # doctest: +SKIP\n FutureWarning: In a future version of pandas, parsing datetimes with mixed\n time zones will raise an error unless `utc=True`. Please specify `utc=True`\n to opt in to the new behaviour and silence this warning. To create a `Series`\n with mixed offsets and `object` dtype, please use `apply` and\n `datetime.datetime.strptime`.\n Index([2020-01-01 01:00:00-01:00, 2020-01-01 03:00:00], dtype='object')\n\n |\n\n Setting ``utc=True`` solves most of the above issues:\n\n - Timezone-naive inputs are *localized* as UTC\n\n >>> pd.to_datetime(['2018-10-26 12:00', '2018-10-26 13:00'], utc=True)\n DatetimeIndex(['2018-10-26 12:00:00+00:00', '2018-10-26 13:00:00+00:00'],\n dtype='datetime64[ns, UTC]', freq=None)\n\n - Timezone-aware inputs are *converted* to UTC (the output represents the\n exact same datetime, but viewed from the UTC time offset `+00:00`).\n\n >>> pd.to_datetime(['2018-10-26 12:00 -0530', '2018-10-26 12:00 -0500'],\n ... utc=True)\n DatetimeIndex(['2018-10-26 17:30:00+00:00', '2018-10-26 17:00:00+00:00'],\n dtype='datetime64[ns, UTC]', freq=None)\n\n - Inputs can contain both string or datetime, the above\n rules still apply\n\n >>> pd.to_datetime(['2018-10-26 12:00', datetime(2020, 1, 1, 18)], utc=True)\n DatetimeIndex(['2018-10-26 12:00:00+00:00', '2020-01-01 18:00:00+00:00'],\n dtype='datetime64[ns, UTC]', freq=None)\n """\n if exact is not lib.no_default and format in {"mixed", "ISO8601"}:\n raise ValueError("Cannot use 'exact' when 'format' is 'mixed' or 'ISO8601'")\n if infer_datetime_format is not lib.no_default:\n warnings.warn(\n "The argument 'infer_datetime_format' is deprecated and will "\n "be removed in a future version. "\n "A strict version of it is now the default, see "\n "https://pandas.pydata.org/pdeps/0004-consistent-to-datetime-parsing.html. "\n "You can safely remove this argument.",\n stacklevel=find_stack_level(),\n )\n if errors == "ignore":\n # GH#54467\n warnings.warn(\n "errors='ignore' is deprecated and will raise in a future version. "\n "Use to_datetime without passing `errors` and catch exceptions "\n "explicitly instead",\n FutureWarning,\n stacklevel=find_stack_level(),\n )\n\n if arg is None:\n return None\n\n if origin != "unix":\n arg = _adjust_to_origin(arg, origin, unit)\n\n convert_listlike = partial(\n _convert_listlike_datetimes,\n utc=utc,\n unit=unit,\n dayfirst=dayfirst,\n yearfirst=yearfirst,\n errors=errors,\n exact=exact,\n )\n # pylint: disable-next=used-before-assignment\n result: Timestamp | NaTType | Series | Index\n\n if isinstance(arg, Timestamp):\n result = arg\n if utc:\n if arg.tz is not None:\n result = arg.tz_convert("utc")\n else:\n result = arg.tz_localize("utc")\n elif isinstance(arg, ABCSeries):\n cache_array = _maybe_cache(arg, format, cache, convert_listlike)\n if not cache_array.empty:\n result = arg.map(cache_array)\n else:\n values = convert_listlike(arg._values, format)\n result = arg._constructor(values, index=arg.index, name=arg.name)\n elif isinstance(arg, (ABCDataFrame, abc.MutableMapping)):\n result = _assemble_from_unit_mappings(arg, errors, utc)\n elif isinstance(arg, Index):\n cache_array = _maybe_cache(arg, format, cache, convert_listlike)\n if not cache_array.empty:\n result = _convert_and_box_cache(arg, cache_array, name=arg.name)\n else:\n result = convert_listlike(arg, format, name=arg.name)\n elif is_list_like(arg):\n try:\n # error: Argument 1 to "_maybe_cache" has incompatible type\n # "Union[float, str, datetime, List[Any], Tuple[Any, ...], ExtensionArray,\n # ndarray[Any, Any], Series]"; expected "Union[List[Any], Tuple[Any, ...],\n # Union[Union[ExtensionArray, ndarray[Any, Any]], Index, Series], Series]"\n argc = cast(\n Union[list, tuple, ExtensionArray, np.ndarray, "Series", Index], arg\n )\n cache_array = _maybe_cache(argc, format, cache, convert_listlike)\n except OutOfBoundsDatetime:\n # caching attempts to create a DatetimeIndex, which may raise\n # an OOB. If that's the desired behavior, then just reraise...\n if errors == "raise":\n raise\n # ... otherwise, continue without the cache.\n from pandas import Series\n\n cache_array = Series([], dtype=object) # just an empty array\n if not cache_array.empty:\n result = _convert_and_box_cache(argc, cache_array)\n else:\n result = convert_listlike(argc, format)\n else:\n result = convert_listlike(np.array([arg]), format)[0]\n if isinstance(arg, bool) and isinstance(result, np.bool_):\n result = bool(result) # TODO: avoid this kludge.\n\n # error: Incompatible return value type (got "Union[Timestamp, NaTType,\n # Series, Index]", expected "Union[DatetimeIndex, Series, float, str,\n # NaTType, None]")\n return result # type: ignore[return-value]\n\n\n# mappings for assembling units\n_unit_map = {\n "year": "year",\n "years": "year",\n "month": "month",\n "months": "month",\n "day": "day",\n "days": "day",\n "hour": "h",\n "hours": "h",\n "minute": "m",\n "minutes": "m",\n "second": "s",\n "seconds": "s",\n "ms": "ms",\n "millisecond": "ms",\n "milliseconds": "ms",\n "us": "us",\n "microsecond": "us",\n "microseconds": "us",\n "ns": "ns",\n "nanosecond": "ns",\n "nanoseconds": "ns",\n}\n\n\ndef _assemble_from_unit_mappings(arg, errors: DateTimeErrorChoices, utc: bool):\n """\n assemble the unit specified fields from the arg (DataFrame)\n Return a Series for actual parsing\n\n Parameters\n ----------\n arg : DataFrame\n errors : {'ignore', 'raise', 'coerce'}, default 'raise'\n\n - If :const:`'raise'`, then invalid parsing will raise an exception\n - If :const:`'coerce'`, then invalid parsing will be set as :const:`NaT`\n - If :const:`'ignore'`, then invalid parsing will return the input\n utc : bool\n Whether to convert/localize timestamps to UTC.\n\n Returns\n -------\n Series\n """\n from pandas import (\n DataFrame,\n to_numeric,\n to_timedelta,\n )\n\n arg = DataFrame(arg)\n if not arg.columns.is_unique:\n raise ValueError("cannot assemble with duplicate keys")\n\n # replace passed unit with _unit_map\n def f(value):\n if value in _unit_map:\n return _unit_map[value]\n\n # m is case significant\n if value.lower() in _unit_map:\n return _unit_map[value.lower()]\n\n return value\n\n unit = {k: f(k) for k in arg.keys()}\n unit_rev = {v: k for k, v in unit.items()}\n\n # we require at least Ymd\n required = ["year", "month", "day"]\n req = sorted(set(required) - set(unit_rev.keys()))\n if len(req):\n _required = ",".join(req)\n raise ValueError(\n "to assemble mappings requires at least that "\n f"[year, month, day] be specified: [{_required}] is missing"\n )\n\n # keys we don't recognize\n excess = sorted(set(unit_rev.keys()) - set(_unit_map.values()))\n if len(excess):\n _excess = ",".join(excess)\n raise ValueError(\n f"extra keys have been passed to the datetime assemblage: [{_excess}]"\n )\n\n def coerce(values):\n # we allow coercion to if errors allows\n values = to_numeric(values, errors=errors)\n\n # prevent overflow in case of int8 or int16\n if is_integer_dtype(values.dtype):\n values = values.astype("int64", copy=False)\n return values\n\n values = (\n coerce(arg[unit_rev["year"]]) * 10000\n + coerce(arg[unit_rev["month"]]) * 100\n + coerce(arg[unit_rev["day"]])\n )\n try:\n values = to_datetime(values, format="%Y%m%d", errors=errors, utc=utc)\n except (TypeError, ValueError) as err:\n raise ValueError(f"cannot assemble the datetimes: {err}") from err\n\n units: list[UnitChoices] = ["h", "m", "s", "ms", "us", "ns"]\n for u in units:\n value = unit_rev.get(u)\n if value is not None and value in arg:\n try:\n values += to_timedelta(coerce(arg[value]), unit=u, errors=errors)\n except (TypeError, ValueError) as err:\n raise ValueError(\n f"cannot assemble the datetimes [{value}]: {err}"\n ) from err\n return values\n\n\n__all__ = [\n "DateParseError",\n "should_cache",\n "to_datetime",\n]\n
.venv\Lib\site-packages\pandas\core\tools\datetimes.py
datetimes.py
Python
43,606
0.95
0.16129
0.063098
awesome-app
19
2023-07-13T21:25:37.349336
Apache-2.0
false
f9404f5508950a950853a13c672b6d6d
from __future__ import annotations\n\nfrom typing import (\n TYPE_CHECKING,\n Literal,\n)\nimport warnings\n\nimport numpy as np\n\nfrom pandas._libs import (\n lib,\n missing as libmissing,\n)\nfrom pandas.util._exceptions import find_stack_level\nfrom pandas.util._validators import check_dtype_backend\n\nfrom pandas.core.dtypes.cast import maybe_downcast_numeric\nfrom pandas.core.dtypes.common import (\n ensure_object,\n is_bool_dtype,\n is_decimal,\n is_integer_dtype,\n is_number,\n is_numeric_dtype,\n is_scalar,\n is_string_dtype,\n needs_i8_conversion,\n)\nfrom pandas.core.dtypes.dtypes import ArrowDtype\nfrom pandas.core.dtypes.generic import (\n ABCIndex,\n ABCSeries,\n)\n\nfrom pandas.core.arrays import BaseMaskedArray\nfrom pandas.core.arrays.string_ import StringDtype\n\nif TYPE_CHECKING:\n from pandas._typing import (\n DateTimeErrorChoices,\n DtypeBackend,\n npt,\n )\n\n\ndef to_numeric(\n arg,\n errors: DateTimeErrorChoices = "raise",\n downcast: Literal["integer", "signed", "unsigned", "float"] | None = None,\n dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default,\n):\n """\n Convert argument to a numeric type.\n\n The default return dtype is `float64` or `int64`\n depending on the data supplied. Use the `downcast` parameter\n to obtain other dtypes.\n\n Please note that precision loss may occur if really large numbers\n are passed in. Due to the internal limitations of `ndarray`, if\n numbers smaller than `-9223372036854775808` (np.iinfo(np.int64).min)\n or larger than `18446744073709551615` (np.iinfo(np.uint64).max) are\n passed in, it is very likely they will be converted to float so that\n they can be stored in an `ndarray`. These warnings apply similarly to\n `Series` since it internally leverages `ndarray`.\n\n Parameters\n ----------\n arg : scalar, list, tuple, 1-d array, or Series\n Argument to be converted.\n errors : {'ignore', 'raise', 'coerce'}, default 'raise'\n - If 'raise', then invalid parsing will raise an exception.\n - If 'coerce', then invalid parsing will be set as NaN.\n - If 'ignore', then invalid parsing will return the input.\n\n .. versionchanged:: 2.2\n\n "ignore" is deprecated. Catch exceptions explicitly instead.\n\n downcast : str, default None\n Can be 'integer', 'signed', 'unsigned', or 'float'.\n If not None, and if the data has been successfully cast to a\n numerical dtype (or if the data was numeric to begin with),\n downcast that resulting data to the smallest numerical dtype\n possible according to the following rules:\n\n - 'integer' or 'signed': smallest signed int dtype (min.: np.int8)\n - 'unsigned': smallest unsigned int dtype (min.: np.uint8)\n - 'float': smallest float dtype (min.: np.float32)\n\n As this behaviour is separate from the core conversion to\n numeric values, any errors raised during the downcasting\n will be surfaced regardless of the value of the 'errors' input.\n\n In addition, downcasting will only occur if the size\n of the resulting data's dtype is strictly larger than\n the dtype it is to be cast to, so if none of the dtypes\n checked satisfy that specification, no downcasting will be\n performed on the data.\n dtype_backend : {'numpy_nullable', 'pyarrow'}, default 'numpy_nullable'\n Back-end data type applied to the resultant :class:`DataFrame`\n (still experimental). Behaviour is as follows:\n\n * ``"numpy_nullable"``: returns nullable-dtype-backed :class:`DataFrame`\n (default).\n * ``"pyarrow"``: returns pyarrow-backed nullable :class:`ArrowDtype`\n DataFrame.\n\n .. versionadded:: 2.0\n\n Returns\n -------\n ret\n Numeric if parsing succeeded.\n Return type depends on input. Series if Series, otherwise ndarray.\n\n See Also\n --------\n DataFrame.astype : Cast argument to a specified dtype.\n to_datetime : Convert argument to datetime.\n to_timedelta : Convert argument to timedelta.\n numpy.ndarray.astype : Cast a numpy array to a specified type.\n DataFrame.convert_dtypes : Convert dtypes.\n\n Examples\n --------\n Take separate series and convert to numeric, coercing when told to\n\n >>> s = pd.Series(['1.0', '2', -3])\n >>> pd.to_numeric(s)\n 0 1.0\n 1 2.0\n 2 -3.0\n dtype: float64\n >>> pd.to_numeric(s, downcast='float')\n 0 1.0\n 1 2.0\n 2 -3.0\n dtype: float32\n >>> pd.to_numeric(s, downcast='signed')\n 0 1\n 1 2\n 2 -3\n dtype: int8\n >>> s = pd.Series(['apple', '1.0', '2', -3])\n >>> pd.to_numeric(s, errors='coerce')\n 0 NaN\n 1 1.0\n 2 2.0\n 3 -3.0\n dtype: float64\n\n Downcasting of nullable integer and floating dtypes is supported:\n\n >>> s = pd.Series([1, 2, 3], dtype="Int64")\n >>> pd.to_numeric(s, downcast="integer")\n 0 1\n 1 2\n 2 3\n dtype: Int8\n >>> s = pd.Series([1.0, 2.1, 3.0], dtype="Float64")\n >>> pd.to_numeric(s, downcast="float")\n 0 1.0\n 1 2.1\n 2 3.0\n dtype: Float32\n """\n if downcast not in (None, "integer", "signed", "unsigned", "float"):\n raise ValueError("invalid downcasting method provided")\n\n if errors not in ("ignore", "raise", "coerce"):\n raise ValueError("invalid error value specified")\n if errors == "ignore":\n # GH#54467\n warnings.warn(\n "errors='ignore' is deprecated and will raise in a future version. "\n "Use to_numeric without passing `errors` and catch exceptions "\n "explicitly instead",\n FutureWarning,\n stacklevel=find_stack_level(),\n )\n\n check_dtype_backend(dtype_backend)\n\n is_series = False\n is_index = False\n is_scalars = False\n\n if isinstance(arg, ABCSeries):\n is_series = True\n values = arg.values\n elif isinstance(arg, ABCIndex):\n is_index = True\n if needs_i8_conversion(arg.dtype):\n values = arg.view("i8")\n else:\n values = arg.values\n elif isinstance(arg, (list, tuple)):\n values = np.array(arg, dtype="O")\n elif is_scalar(arg):\n if is_decimal(arg):\n return float(arg)\n if is_number(arg):\n return arg\n is_scalars = True\n values = np.array([arg], dtype="O")\n elif getattr(arg, "ndim", 1) > 1:\n raise TypeError("arg must be a list, tuple, 1-d array, or Series")\n else:\n values = arg\n\n orig_values = values\n\n # GH33013: for IntegerArray & FloatingArray extract non-null values for casting\n # save mask to reconstruct the full array after casting\n mask: npt.NDArray[np.bool_] | None = None\n if isinstance(values, BaseMaskedArray):\n mask = values._mask\n values = values._data[~mask]\n\n values_dtype = getattr(values, "dtype", None)\n if isinstance(values_dtype, ArrowDtype):\n mask = values.isna()\n values = values.dropna().to_numpy()\n new_mask: np.ndarray | None = None\n if is_numeric_dtype(values_dtype):\n pass\n elif lib.is_np_dtype(values_dtype, "mM"):\n values = values.view(np.int64)\n else:\n values = ensure_object(values)\n coerce_numeric = errors not in ("ignore", "raise")\n try:\n values, new_mask = lib.maybe_convert_numeric( # type: ignore[call-overload]\n values,\n set(),\n coerce_numeric=coerce_numeric,\n convert_to_masked_nullable=dtype_backend is not lib.no_default\n or isinstance(values_dtype, StringDtype)\n and values_dtype.na_value is libmissing.NA,\n )\n except (ValueError, TypeError):\n if errors == "raise":\n raise\n values = orig_values\n\n if new_mask is not None:\n # Remove unnecessary values, is expected later anyway and enables\n # downcasting\n values = values[~new_mask]\n elif (\n dtype_backend is not lib.no_default\n and new_mask is None\n or isinstance(values_dtype, StringDtype)\n and values_dtype.na_value is libmissing.NA\n ):\n new_mask = np.zeros(values.shape, dtype=np.bool_)\n\n # attempt downcast only if the data has been successfully converted\n # to a numerical dtype and if a downcast method has been specified\n if downcast is not None and is_numeric_dtype(values.dtype):\n typecodes: str | None = None\n\n if downcast in ("integer", "signed"):\n typecodes = np.typecodes["Integer"]\n elif downcast == "unsigned" and (not len(values) or np.min(values) >= 0):\n typecodes = np.typecodes["UnsignedInteger"]\n elif downcast == "float":\n typecodes = np.typecodes["Float"]\n\n # pandas support goes only to np.float32,\n # as float dtypes smaller than that are\n # extremely rare and not well supported\n float_32_char = np.dtype(np.float32).char\n float_32_ind = typecodes.index(float_32_char)\n typecodes = typecodes[float_32_ind:]\n\n if typecodes is not None:\n # from smallest to largest\n for typecode in typecodes:\n dtype = np.dtype(typecode)\n if dtype.itemsize <= values.dtype.itemsize:\n values = maybe_downcast_numeric(values, dtype)\n\n # successful conversion\n if values.dtype == dtype:\n break\n\n # GH33013: for IntegerArray, BooleanArray & FloatingArray need to reconstruct\n # masked array\n if (mask is not None or new_mask is not None) and not is_string_dtype(values.dtype):\n if mask is None or (new_mask is not None and new_mask.shape == mask.shape):\n # GH 52588\n mask = new_mask\n else:\n mask = mask.copy()\n assert isinstance(mask, np.ndarray)\n data = np.zeros(mask.shape, dtype=values.dtype)\n data[~mask] = values\n\n from pandas.core.arrays import (\n ArrowExtensionArray,\n BooleanArray,\n FloatingArray,\n IntegerArray,\n )\n\n klass: type[IntegerArray | BooleanArray | FloatingArray]\n if is_integer_dtype(data.dtype):\n klass = IntegerArray\n elif is_bool_dtype(data.dtype):\n klass = BooleanArray\n else:\n klass = FloatingArray\n values = klass(data, mask)\n\n if dtype_backend == "pyarrow" or isinstance(values_dtype, ArrowDtype):\n values = ArrowExtensionArray(values.__arrow_array__())\n\n if is_series:\n return arg._constructor(values, index=arg.index, name=arg.name)\n elif is_index:\n # because we want to coerce to numeric if possible,\n # do not use _shallow_copy\n from pandas import Index\n\n return Index(values, name=arg.name)\n elif is_scalars:\n return values[0]\n else:\n return values\n
.venv\Lib\site-packages\pandas\core\tools\numeric.py
numeric.py
Python
11,051
0.95
0.13253
0.065972
python-kit
330
2024-11-27T13:19:15.462454
MIT
false
40dcb6d1cf1bc01846d7ebc4812c3026
"""\ntimedelta support tools\n"""\nfrom __future__ import annotations\n\nfrom typing import (\n TYPE_CHECKING,\n overload,\n)\nimport warnings\n\nimport numpy as np\n\nfrom pandas._libs import lib\nfrom pandas._libs.tslibs import (\n NaT,\n NaTType,\n)\nfrom pandas._libs.tslibs.timedeltas import (\n Timedelta,\n disallow_ambiguous_unit,\n parse_timedelta_unit,\n)\nfrom pandas.util._exceptions import find_stack_level\n\nfrom pandas.core.dtypes.common import is_list_like\nfrom pandas.core.dtypes.dtypes import ArrowDtype\nfrom pandas.core.dtypes.generic import (\n ABCIndex,\n ABCSeries,\n)\n\nfrom pandas.core.arrays.timedeltas import sequence_to_td64ns\n\nif TYPE_CHECKING:\n from collections.abc import Hashable\n from datetime import timedelta\n\n from pandas._libs.tslibs.timedeltas import UnitChoices\n from pandas._typing import (\n ArrayLike,\n DateTimeErrorChoices,\n )\n\n from pandas import (\n Index,\n Series,\n TimedeltaIndex,\n )\n\n\n@overload\ndef to_timedelta(\n arg: str | float | timedelta,\n unit: UnitChoices | None = ...,\n errors: DateTimeErrorChoices = ...,\n) -> Timedelta:\n ...\n\n\n@overload\ndef to_timedelta(\n arg: Series,\n unit: UnitChoices | None = ...,\n errors: DateTimeErrorChoices = ...,\n) -> Series:\n ...\n\n\n@overload\ndef to_timedelta(\n arg: list | tuple | range | ArrayLike | Index,\n unit: UnitChoices | None = ...,\n errors: DateTimeErrorChoices = ...,\n) -> TimedeltaIndex:\n ...\n\n\ndef to_timedelta(\n arg: str\n | int\n | float\n | timedelta\n | list\n | tuple\n | range\n | ArrayLike\n | Index\n | Series,\n unit: UnitChoices | None = None,\n errors: DateTimeErrorChoices = "raise",\n) -> Timedelta | TimedeltaIndex | Series:\n """\n Convert argument to timedelta.\n\n Timedeltas are absolute differences in times, expressed in difference\n units (e.g. days, hours, minutes, seconds). This method converts\n an argument from a recognized timedelta format / value into\n a Timedelta type.\n\n Parameters\n ----------\n arg : str, timedelta, list-like or Series\n The data to be converted to timedelta.\n\n .. versionchanged:: 2.0\n Strings with units 'M', 'Y' and 'y' do not represent\n unambiguous timedelta values and will raise an exception.\n\n unit : str, optional\n Denotes the unit of the arg for numeric `arg`. Defaults to ``"ns"``.\n\n Possible values:\n\n * 'W'\n * 'D' / 'days' / 'day'\n * 'hours' / 'hour' / 'hr' / 'h' / 'H'\n * 'm' / 'minute' / 'min' / 'minutes' / 'T'\n * 's' / 'seconds' / 'sec' / 'second' / 'S'\n * 'ms' / 'milliseconds' / 'millisecond' / 'milli' / 'millis' / 'L'\n * 'us' / 'microseconds' / 'microsecond' / 'micro' / 'micros' / 'U'\n * 'ns' / 'nanoseconds' / 'nano' / 'nanos' / 'nanosecond' / 'N'\n\n Must not be specified when `arg` contains strings and ``errors="raise"``.\n\n .. deprecated:: 2.2.0\n Units 'H', 'T', 'S', 'L', 'U' and 'N' are deprecated and will be removed\n in a future version. Please use 'h', 'min', 's', 'ms', 'us', and 'ns'\n instead of 'H', 'T', 'S', 'L', 'U' and 'N'.\n\n errors : {'ignore', 'raise', 'coerce'}, default 'raise'\n - If 'raise', then invalid parsing will raise an exception.\n - If 'coerce', then invalid parsing will be set as NaT.\n - If 'ignore', then invalid parsing will return the input.\n\n Returns\n -------\n timedelta\n If parsing succeeded.\n Return type depends on input:\n\n - list-like: TimedeltaIndex of timedelta64 dtype\n - Series: Series of timedelta64 dtype\n - scalar: Timedelta\n\n See Also\n --------\n DataFrame.astype : Cast argument to a specified dtype.\n to_datetime : Convert argument to datetime.\n convert_dtypes : Convert dtypes.\n\n Notes\n -----\n If the precision is higher than nanoseconds, the precision of the duration is\n truncated to nanoseconds for string inputs.\n\n Examples\n --------\n Parsing a single string to a Timedelta:\n\n >>> pd.to_timedelta('1 days 06:05:01.00003')\n Timedelta('1 days 06:05:01.000030')\n >>> pd.to_timedelta('15.5us')\n Timedelta('0 days 00:00:00.000015500')\n\n Parsing a list or array of strings:\n\n >>> pd.to_timedelta(['1 days 06:05:01.00003', '15.5us', 'nan'])\n TimedeltaIndex(['1 days 06:05:01.000030', '0 days 00:00:00.000015500', NaT],\n dtype='timedelta64[ns]', freq=None)\n\n Converting numbers by specifying the `unit` keyword argument:\n\n >>> pd.to_timedelta(np.arange(5), unit='s')\n TimedeltaIndex(['0 days 00:00:00', '0 days 00:00:01', '0 days 00:00:02',\n '0 days 00:00:03', '0 days 00:00:04'],\n dtype='timedelta64[ns]', freq=None)\n >>> pd.to_timedelta(np.arange(5), unit='d')\n TimedeltaIndex(['0 days', '1 days', '2 days', '3 days', '4 days'],\n dtype='timedelta64[ns]', freq=None)\n """\n if unit is not None:\n unit = parse_timedelta_unit(unit)\n disallow_ambiguous_unit(unit)\n\n if errors not in ("ignore", "raise", "coerce"):\n raise ValueError("errors must be one of 'ignore', 'raise', or 'coerce'.")\n if errors == "ignore":\n # GH#54467\n warnings.warn(\n "errors='ignore' is deprecated and will raise in a future version. "\n "Use to_timedelta without passing `errors` and catch exceptions "\n "explicitly instead",\n FutureWarning,\n stacklevel=find_stack_level(),\n )\n\n if arg is None:\n return arg\n elif isinstance(arg, ABCSeries):\n values = _convert_listlike(arg._values, unit=unit, errors=errors)\n return arg._constructor(values, index=arg.index, name=arg.name)\n elif isinstance(arg, ABCIndex):\n return _convert_listlike(arg, unit=unit, errors=errors, name=arg.name)\n elif isinstance(arg, np.ndarray) and arg.ndim == 0:\n # extract array scalar and process below\n # error: Incompatible types in assignment (expression has type "object",\n # variable has type "Union[str, int, float, timedelta, List[Any],\n # Tuple[Any, ...], Union[Union[ExtensionArray, ndarray[Any, Any]], Index,\n # Series]]") [assignment]\n arg = lib.item_from_zerodim(arg) # type: ignore[assignment]\n elif is_list_like(arg) and getattr(arg, "ndim", 1) == 1:\n return _convert_listlike(arg, unit=unit, errors=errors)\n elif getattr(arg, "ndim", 1) > 1:\n raise TypeError(\n "arg must be a string, timedelta, list, tuple, 1-d array, or Series"\n )\n\n if isinstance(arg, str) and unit is not None:\n raise ValueError("unit must not be specified if the input is/contains a str")\n\n # ...so it must be a scalar value. Return scalar.\n return _coerce_scalar_to_timedelta_type(arg, unit=unit, errors=errors)\n\n\ndef _coerce_scalar_to_timedelta_type(\n r, unit: UnitChoices | None = "ns", errors: DateTimeErrorChoices = "raise"\n):\n """Convert string 'r' to a timedelta object."""\n result: Timedelta | NaTType\n\n try:\n result = Timedelta(r, unit)\n except ValueError:\n if errors == "raise":\n raise\n if errors == "ignore":\n return r\n\n # coerce\n result = NaT\n\n return result\n\n\ndef _convert_listlike(\n arg,\n unit: UnitChoices | None = None,\n errors: DateTimeErrorChoices = "raise",\n name: Hashable | None = None,\n):\n """Convert a list of objects to a timedelta index object."""\n arg_dtype = getattr(arg, "dtype", None)\n if isinstance(arg, (list, tuple)) or arg_dtype is None:\n # This is needed only to ensure that in the case where we end up\n # returning arg (errors == "ignore"), and where the input is a\n # generator, we return a useful list-like instead of a\n # used-up generator\n if not hasattr(arg, "__array__"):\n arg = list(arg)\n arg = np.array(arg, dtype=object)\n elif isinstance(arg_dtype, ArrowDtype) and arg_dtype.kind == "m":\n return arg\n\n try:\n td64arr = sequence_to_td64ns(arg, unit=unit, errors=errors, copy=False)[0]\n except ValueError:\n if errors == "ignore":\n return arg\n else:\n # This else-block accounts for the cases when errors='raise'\n # and errors='coerce'. If errors == 'raise', these errors\n # should be raised. If errors == 'coerce', we shouldn't\n # expect any errors to be raised, since all parsing errors\n # cause coercion to pd.NaT. However, if an error / bug is\n # introduced that causes an Exception to be raised, we would\n # like to surface it.\n raise\n\n from pandas import TimedeltaIndex\n\n value = TimedeltaIndex(td64arr, name=name)\n return value\n
.venv\Lib\site-packages\pandas\core\tools\timedeltas.py
timedeltas.py
Python
8,858
0.95
0.088339
0.115385
vue-tools
640
2024-03-31T16:43:29.548430
BSD-3-Clause
false
977fa2b3a58924241f2729230ac0c01d
from __future__ import annotations\n\nfrom datetime import (\n datetime,\n time,\n)\nfrom typing import TYPE_CHECKING\nimport warnings\n\nimport numpy as np\n\nfrom pandas._libs.lib import is_list_like\nfrom pandas.util._exceptions import find_stack_level\n\nfrom pandas.core.dtypes.generic import (\n ABCIndex,\n ABCSeries,\n)\nfrom pandas.core.dtypes.missing import notna\n\nif TYPE_CHECKING:\n from pandas._typing import DateTimeErrorChoices\n\n\ndef to_time(\n arg,\n format: str | None = None,\n infer_time_format: bool = False,\n errors: DateTimeErrorChoices = "raise",\n):\n """\n Parse time strings to time objects using fixed strptime formats ("%H:%M",\n "%H%M", "%I:%M%p", "%I%M%p", "%H:%M:%S", "%H%M%S", "%I:%M:%S%p",\n "%I%M%S%p")\n\n Use infer_time_format if all the strings are in the same format to speed\n up conversion.\n\n Parameters\n ----------\n arg : string in time format, datetime.time, list, tuple, 1-d array, Series\n format : str, default None\n Format used to convert arg into a time object. If None, fixed formats\n are used.\n infer_time_format: bool, default False\n Infer the time format based on the first non-NaN element. If all\n strings are in the same format, this will speed up conversion.\n errors : {'ignore', 'raise', 'coerce'}, default 'raise'\n - If 'raise', then invalid parsing will raise an exception\n - If 'coerce', then invalid parsing will be set as None\n - If 'ignore', then invalid parsing will return the input\n\n Returns\n -------\n datetime.time\n """\n if errors == "ignore":\n # GH#54467\n warnings.warn(\n "errors='ignore' is deprecated and will raise in a future version. "\n "Use to_time without passing `errors` and catch exceptions "\n "explicitly instead",\n FutureWarning,\n stacklevel=find_stack_level(),\n )\n\n def _convert_listlike(arg, format):\n if isinstance(arg, (list, tuple)):\n arg = np.array(arg, dtype="O")\n\n elif getattr(arg, "ndim", 1) > 1:\n raise TypeError(\n "arg must be a string, datetime, list, tuple, 1-d array, or Series"\n )\n\n arg = np.asarray(arg, dtype="O")\n\n if infer_time_format and format is None:\n format = _guess_time_format_for_array(arg)\n\n times: list[time | None] = []\n if format is not None:\n for element in arg:\n try:\n times.append(datetime.strptime(element, format).time())\n except (ValueError, TypeError) as err:\n if errors == "raise":\n msg = (\n f"Cannot convert {element} to a time with given "\n f"format {format}"\n )\n raise ValueError(msg) from err\n if errors == "ignore":\n return arg\n else:\n times.append(None)\n else:\n formats = _time_formats[:]\n format_found = False\n for element in arg:\n time_object = None\n try:\n time_object = time.fromisoformat(element)\n except (ValueError, TypeError):\n for time_format in formats:\n try:\n time_object = datetime.strptime(element, time_format).time()\n if not format_found:\n # Put the found format in front\n fmt = formats.pop(formats.index(time_format))\n formats.insert(0, fmt)\n format_found = True\n break\n except (ValueError, TypeError):\n continue\n\n if time_object is not None:\n times.append(time_object)\n elif errors == "raise":\n raise ValueError(f"Cannot convert arg {arg} to a time")\n elif errors == "ignore":\n return arg\n else:\n times.append(None)\n\n return times\n\n if arg is None:\n return arg\n elif isinstance(arg, time):\n return arg\n elif isinstance(arg, ABCSeries):\n values = _convert_listlike(arg._values, format)\n return arg._constructor(values, index=arg.index, name=arg.name)\n elif isinstance(arg, ABCIndex):\n return _convert_listlike(arg, format)\n elif is_list_like(arg):\n return _convert_listlike(arg, format)\n\n return _convert_listlike(np.array([arg]), format)[0]\n\n\n# Fixed time formats for time parsing\n_time_formats = [\n "%H:%M",\n "%H%M",\n "%I:%M%p",\n "%I%M%p",\n "%H:%M:%S",\n "%H%M%S",\n "%I:%M:%S%p",\n "%I%M%S%p",\n]\n\n\ndef _guess_time_format_for_array(arr):\n # Try to guess the format based on the first non-NaN element\n non_nan_elements = notna(arr).nonzero()[0]\n if len(non_nan_elements):\n element = arr[non_nan_elements[0]]\n for time_format in _time_formats:\n try:\n datetime.strptime(element, time_format)\n return time_format\n except ValueError:\n pass\n\n return None\n
.venv\Lib\site-packages\pandas\core\tools\times.py
times.py
Python
5,373
0.95
0.14881
0.027778
vue-tools
393
2024-08-22T12:22:55.783288
Apache-2.0
false
76a72bbdf37faf5723ede72a315ca585
\n\n
.venv\Lib\site-packages\pandas\core\tools\__pycache__\datetimes.cpython-313.pyc
datetimes.cpython-313.pyc
Other
44,490
0.95
0.110429
0.016925
awesome-app
430
2024-09-05T04:25:08.466375
MIT
false
faf61c614524ce223b1c881af15b1c3b
\n\n
.venv\Lib\site-packages\pandas\core\tools\__pycache__\numeric.cpython-313.pyc
numeric.cpython-313.pyc
Other
11,004
0.8
0.058824
0.016393
vue-tools
837
2024-08-24T23:19:27.254216
GPL-3.0
false
405a3bb532e5d71dffee779b9ecfb840
\n\n
.venv\Lib\site-packages\pandas\core\tools\__pycache__\timedeltas.cpython-313.pyc
timedeltas.cpython-313.pyc
Other
8,731
0.8
0.025974
0.060606
awesome-app
885
2024-05-05T18:52:33.037537
MIT
false
8b25178ed546721f1b7152e8a21650c7
\n\n
.venv\Lib\site-packages\pandas\core\tools\__pycache__\times.cpython-313.pyc
times.cpython-313.pyc
Other
5,786
0.8
0.025974
0
python-kit
585
2023-11-11T10:26:39.615310
GPL-3.0
false
41d32279cd31bde6524c431398187157
\n\n
.venv\Lib\site-packages\pandas\core\tools\__pycache__\__init__.cpython-313.pyc
__init__.cpython-313.pyc
Other
192
0.7
0
0
awesome-app
20
2024-12-13T19:41:49.504941
MIT
false
83ad22c1a37c496ea3c4a401479710e6
"""\ndata hash pandas / numpy objects\n"""\nfrom __future__ import annotations\n\nimport itertools\nfrom typing import TYPE_CHECKING\n\nimport numpy as np\n\nfrom pandas._libs.hashing import hash_object_array\n\nfrom pandas.core.dtypes.common import is_list_like\nfrom pandas.core.dtypes.dtypes import CategoricalDtype\nfrom pandas.core.dtypes.generic import (\n ABCDataFrame,\n ABCExtensionArray,\n ABCIndex,\n ABCMultiIndex,\n ABCSeries,\n)\n\nif TYPE_CHECKING:\n from collections.abc import (\n Hashable,\n Iterable,\n Iterator,\n )\n\n from pandas._typing import (\n ArrayLike,\n npt,\n )\n\n from pandas import (\n DataFrame,\n Index,\n MultiIndex,\n Series,\n )\n\n\n# 16 byte long hashing key\n_default_hash_key = "0123456789123456"\n\n\ndef combine_hash_arrays(\n arrays: Iterator[np.ndarray], num_items: int\n) -> npt.NDArray[np.uint64]:\n """\n Parameters\n ----------\n arrays : Iterator[np.ndarray]\n num_items : int\n\n Returns\n -------\n np.ndarray[uint64]\n\n Should be the same as CPython's tupleobject.c\n """\n try:\n first = next(arrays)\n except StopIteration:\n return np.array([], dtype=np.uint64)\n\n arrays = itertools.chain([first], arrays)\n\n mult = np.uint64(1000003)\n out = np.zeros_like(first) + np.uint64(0x345678)\n last_i = 0\n for i, a in enumerate(arrays):\n inverse_i = num_items - i\n out ^= a\n out *= mult\n mult += np.uint64(82520 + inverse_i + inverse_i)\n last_i = i\n assert last_i + 1 == num_items, "Fed in wrong num_items"\n out += np.uint64(97531)\n return out\n\n\ndef hash_pandas_object(\n obj: Index | DataFrame | Series,\n index: bool = True,\n encoding: str = "utf8",\n hash_key: str | None = _default_hash_key,\n categorize: bool = True,\n) -> Series:\n """\n Return a data hash of the Index/Series/DataFrame.\n\n Parameters\n ----------\n obj : Index, Series, or DataFrame\n index : bool, default True\n Include the index in the hash (if Series/DataFrame).\n encoding : str, default 'utf8'\n Encoding for data & key when strings.\n hash_key : str, default _default_hash_key\n Hash_key for string key to encode.\n categorize : bool, default True\n Whether to first categorize object arrays before hashing. This is more\n efficient when the array contains duplicate values.\n\n Returns\n -------\n Series of uint64, same length as the object\n\n Examples\n --------\n >>> pd.util.hash_pandas_object(pd.Series([1, 2, 3]))\n 0 14639053686158035780\n 1 3869563279212530728\n 2 393322362522515241\n dtype: uint64\n """\n from pandas import Series\n\n if hash_key is None:\n hash_key = _default_hash_key\n\n if isinstance(obj, ABCMultiIndex):\n return Series(hash_tuples(obj, encoding, hash_key), dtype="uint64", copy=False)\n\n elif isinstance(obj, ABCIndex):\n h = hash_array(obj._values, encoding, hash_key, categorize).astype(\n "uint64", copy=False\n )\n ser = Series(h, index=obj, dtype="uint64", copy=False)\n\n elif isinstance(obj, ABCSeries):\n h = hash_array(obj._values, encoding, hash_key, categorize).astype(\n "uint64", copy=False\n )\n if index:\n index_iter = (\n hash_pandas_object(\n obj.index,\n index=False,\n encoding=encoding,\n hash_key=hash_key,\n categorize=categorize,\n )._values\n for _ in [None]\n )\n arrays = itertools.chain([h], index_iter)\n h = combine_hash_arrays(arrays, 2)\n\n ser = Series(h, index=obj.index, dtype="uint64", copy=False)\n\n elif isinstance(obj, ABCDataFrame):\n hashes = (\n hash_array(series._values, encoding, hash_key, categorize)\n for _, series in obj.items()\n )\n num_items = len(obj.columns)\n if index:\n index_hash_generator = (\n hash_pandas_object(\n obj.index,\n index=False,\n encoding=encoding,\n hash_key=hash_key,\n categorize=categorize,\n )._values\n for _ in [None]\n )\n num_items += 1\n\n # keep `hashes` specifically a generator to keep mypy happy\n _hashes = itertools.chain(hashes, index_hash_generator)\n hashes = (x for x in _hashes)\n h = combine_hash_arrays(hashes, num_items)\n\n ser = Series(h, index=obj.index, dtype="uint64", copy=False)\n else:\n raise TypeError(f"Unexpected type for hashing {type(obj)}")\n\n return ser\n\n\ndef hash_tuples(\n vals: MultiIndex | Iterable[tuple[Hashable, ...]],\n encoding: str = "utf8",\n hash_key: str = _default_hash_key,\n) -> npt.NDArray[np.uint64]:\n """\n Hash an MultiIndex / listlike-of-tuples efficiently.\n\n Parameters\n ----------\n vals : MultiIndex or listlike-of-tuples\n encoding : str, default 'utf8'\n hash_key : str, default _default_hash_key\n\n Returns\n -------\n ndarray[np.uint64] of hashed values\n """\n if not is_list_like(vals):\n raise TypeError("must be convertible to a list-of-tuples")\n\n from pandas import (\n Categorical,\n MultiIndex,\n )\n\n if not isinstance(vals, ABCMultiIndex):\n mi = MultiIndex.from_tuples(vals)\n else:\n mi = vals\n\n # create a list-of-Categoricals\n cat_vals = [\n Categorical._simple_new(\n mi.codes[level],\n CategoricalDtype(categories=mi.levels[level], ordered=False),\n )\n for level in range(mi.nlevels)\n ]\n\n # hash the list-of-ndarrays\n hashes = (\n cat._hash_pandas_object(encoding=encoding, hash_key=hash_key, categorize=False)\n for cat in cat_vals\n )\n h = combine_hash_arrays(hashes, len(cat_vals))\n\n return h\n\n\ndef hash_array(\n vals: ArrayLike,\n encoding: str = "utf8",\n hash_key: str = _default_hash_key,\n categorize: bool = True,\n) -> npt.NDArray[np.uint64]:\n """\n Given a 1d array, return an array of deterministic integers.\n\n Parameters\n ----------\n vals : ndarray or ExtensionArray\n encoding : str, default 'utf8'\n Encoding for data & key when strings.\n hash_key : str, default _default_hash_key\n Hash_key for string key to encode.\n categorize : bool, default True\n Whether to first categorize object arrays before hashing. This is more\n efficient when the array contains duplicate values.\n\n Returns\n -------\n ndarray[np.uint64, ndim=1]\n Hashed values, same length as the vals.\n\n Examples\n --------\n >>> pd.util.hash_array(np.array([1, 2, 3]))\n array([ 6238072747940578789, 15839785061582574730, 2185194620014831856],\n dtype=uint64)\n """\n if not hasattr(vals, "dtype"):\n raise TypeError("must pass a ndarray-like")\n\n if isinstance(vals, ABCExtensionArray):\n return vals._hash_pandas_object(\n encoding=encoding, hash_key=hash_key, categorize=categorize\n )\n\n if not isinstance(vals, np.ndarray):\n # GH#42003\n raise TypeError(\n "hash_array requires np.ndarray or ExtensionArray, not "\n f"{type(vals).__name__}. Use hash_pandas_object instead."\n )\n\n return _hash_ndarray(vals, encoding, hash_key, categorize)\n\n\ndef _hash_ndarray(\n vals: np.ndarray,\n encoding: str = "utf8",\n hash_key: str = _default_hash_key,\n categorize: bool = True,\n) -> npt.NDArray[np.uint64]:\n """\n See hash_array.__doc__.\n """\n dtype = vals.dtype\n\n # _hash_ndarray only takes 64-bit values, so handle 128-bit by parts\n if np.issubdtype(dtype, np.complex128):\n hash_real = _hash_ndarray(vals.real, encoding, hash_key, categorize)\n hash_imag = _hash_ndarray(vals.imag, encoding, hash_key, categorize)\n return hash_real + 23 * hash_imag\n\n # First, turn whatever array this is into unsigned 64-bit ints, if we can\n # manage it.\n if dtype == bool:\n vals = vals.astype("u8")\n elif issubclass(dtype.type, (np.datetime64, np.timedelta64)):\n vals = vals.view("i8").astype("u8", copy=False)\n elif issubclass(dtype.type, np.number) and dtype.itemsize <= 8:\n vals = vals.view(f"u{vals.dtype.itemsize}").astype("u8")\n else:\n # With repeated values, its MUCH faster to categorize object dtypes,\n # then hash and rename categories. We allow skipping the categorization\n # when the values are known/likely to be unique.\n if categorize:\n from pandas import (\n Categorical,\n Index,\n factorize,\n )\n\n codes, categories = factorize(vals, sort=False)\n dtype = CategoricalDtype(categories=Index(categories), ordered=False)\n cat = Categorical._simple_new(codes, dtype)\n return cat._hash_pandas_object(\n encoding=encoding, hash_key=hash_key, categorize=False\n )\n\n try:\n vals = hash_object_array(vals, hash_key, encoding)\n except TypeError:\n # we have mixed types\n vals = hash_object_array(\n vals.astype(str).astype(object), hash_key, encoding\n )\n\n # Then, redistribute these 64-bit ints within the space of 64-bit ints\n vals ^= vals >> 30\n vals *= np.uint64(0xBF58476D1CE4E5B9)\n vals ^= vals >> 27\n vals *= np.uint64(0x94D049BB133111EB)\n vals ^= vals >> 31\n return vals\n
.venv\Lib\site-packages\pandas\core\util\hashing.py
hashing.py
Python
9,657
0.95
0.100295
0.045455
vue-tools
310
2024-05-25T04:05:39.798156
Apache-2.0
false
79420d567d5f2d10495f95aacf702a20
"""Common utilities for Numba operations"""\nfrom __future__ import annotations\n\nimport types\nfrom typing import (\n TYPE_CHECKING,\n Callable,\n)\n\nimport numpy as np\n\nfrom pandas.compat._optional import import_optional_dependency\nfrom pandas.errors import NumbaUtilError\n\nGLOBAL_USE_NUMBA: bool = False\n\n\ndef maybe_use_numba(engine: str | None) -> bool:\n """Signal whether to use numba routines."""\n return engine == "numba" or (engine is None and GLOBAL_USE_NUMBA)\n\n\ndef set_use_numba(enable: bool = False) -> None:\n global GLOBAL_USE_NUMBA\n if enable:\n import_optional_dependency("numba")\n GLOBAL_USE_NUMBA = enable\n\n\ndef get_jit_arguments(\n engine_kwargs: dict[str, bool] | None = None, kwargs: dict | None = None\n) -> dict[str, bool]:\n """\n Return arguments to pass to numba.JIT, falling back on pandas default JIT settings.\n\n Parameters\n ----------\n engine_kwargs : dict, default None\n user passed keyword arguments for numba.JIT\n kwargs : dict, default None\n user passed keyword arguments to pass into the JITed function\n\n Returns\n -------\n dict[str, bool]\n nopython, nogil, parallel\n\n Raises\n ------\n NumbaUtilError\n """\n if engine_kwargs is None:\n engine_kwargs = {}\n\n nopython = engine_kwargs.get("nopython", True)\n if kwargs and nopython:\n raise NumbaUtilError(\n "numba does not support kwargs with nopython=True: "\n "https://github.com/numba/numba/issues/2916"\n )\n nogil = engine_kwargs.get("nogil", False)\n parallel = engine_kwargs.get("parallel", False)\n return {"nopython": nopython, "nogil": nogil, "parallel": parallel}\n\n\ndef jit_user_function(func: Callable) -> Callable:\n """\n If user function is not jitted already, mark the user's function\n as jitable.\n\n Parameters\n ----------\n func : function\n user defined function\n\n Returns\n -------\n function\n Numba JITed function, or function marked as JITable by numba\n """\n if TYPE_CHECKING:\n import numba\n else:\n numba = import_optional_dependency("numba")\n\n if numba.extending.is_jitted(func):\n # Don't jit a user passed jitted function\n numba_func = func\n elif getattr(np, func.__name__, False) is func or isinstance(\n func, types.BuiltinFunctionType\n ):\n # Not necessary to jit builtins or np functions\n # This will mess up register_jitable\n numba_func = func\n else:\n numba_func = numba.extending.register_jitable(func)\n\n return numba_func\n
.venv\Lib\site-packages\pandas\core\util\numba_.py
numba_.py
Python
2,582
0.95
0.204082
0.038462
vue-tools
338
2024-09-07T19:17:05.593089
GPL-3.0
false
6ecce918374ef282b5c8257ee55ee4f8
\n\n
.venv\Lib\site-packages\pandas\core\util\__pycache__\hashing.cpython-313.pyc
hashing.cpython-313.pyc
Other
11,699
0.95
0.032787
0.011696
python-kit
576
2024-11-30T12:25:36.707242
BSD-3-Clause
false
14796727b837db1efd73388c25c90cd2
\n\n
.venv\Lib\site-packages\pandas\core\util\__pycache__\numba_.cpython-313.pyc
numba_.cpython-313.pyc
Other
3,159
0.95
0.153846
0
react-lib
758
2024-05-01T11:17:36.112882
Apache-2.0
false
40988926078b1ce51b9eaea2eab9757c
\n\n
.venv\Lib\site-packages\pandas\core\util\__pycache__\__init__.cpython-313.pyc
__init__.cpython-313.pyc
Other
191
0.7
0
0
node-utils
864
2025-02-16T08:24:14.704712
BSD-3-Clause
false
916af8ac8271e07d27d2f1579843ff80
"""Common utility functions for rolling operations"""\nfrom __future__ import annotations\n\nfrom collections import defaultdict\nfrom typing import cast\n\nimport numpy as np\n\nfrom pandas.core.dtypes.generic import (\n ABCDataFrame,\n ABCSeries,\n)\n\nfrom pandas.core.indexes.api import MultiIndex\n\n\ndef flex_binary_moment(arg1, arg2, f, pairwise: bool = False):\n if isinstance(arg1, ABCSeries) and isinstance(arg2, ABCSeries):\n X, Y = prep_binary(arg1, arg2)\n return f(X, Y)\n\n elif isinstance(arg1, ABCDataFrame):\n from pandas import DataFrame\n\n def dataframe_from_int_dict(data, frame_template) -> DataFrame:\n result = DataFrame(data, index=frame_template.index)\n if len(result.columns) > 0:\n result.columns = frame_template.columns[result.columns]\n else:\n result.columns = frame_template.columns.copy()\n return result\n\n results = {}\n if isinstance(arg2, ABCDataFrame):\n if pairwise is False:\n if arg1 is arg2:\n # special case in order to handle duplicate column names\n for i in range(len(arg1.columns)):\n results[i] = f(arg1.iloc[:, i], arg2.iloc[:, i])\n return dataframe_from_int_dict(results, arg1)\n else:\n if not arg1.columns.is_unique:\n raise ValueError("'arg1' columns are not unique")\n if not arg2.columns.is_unique:\n raise ValueError("'arg2' columns are not unique")\n X, Y = arg1.align(arg2, join="outer")\n X, Y = prep_binary(X, Y)\n res_columns = arg1.columns.union(arg2.columns)\n for col in res_columns:\n if col in X and col in Y:\n results[col] = f(X[col], Y[col])\n return DataFrame(results, index=X.index, columns=res_columns)\n elif pairwise is True:\n results = defaultdict(dict)\n for i in range(len(arg1.columns)):\n for j in range(len(arg2.columns)):\n if j < i and arg2 is arg1:\n # Symmetric case\n results[i][j] = results[j][i]\n else:\n results[i][j] = f(\n *prep_binary(arg1.iloc[:, i], arg2.iloc[:, j])\n )\n\n from pandas import concat\n\n result_index = arg1.index.union(arg2.index)\n if len(result_index):\n # construct result frame\n result = concat(\n [\n concat(\n [results[i][j] for j in range(len(arg2.columns))],\n ignore_index=True,\n )\n for i in range(len(arg1.columns))\n ],\n ignore_index=True,\n axis=1,\n )\n result.columns = arg1.columns\n\n # set the index and reorder\n if arg2.columns.nlevels > 1:\n # mypy needs to know columns is a MultiIndex, Index doesn't\n # have levels attribute\n arg2.columns = cast(MultiIndex, arg2.columns)\n # GH 21157: Equivalent to MultiIndex.from_product(\n # [result_index], <unique combinations of arg2.columns.levels>,\n # )\n # A normal MultiIndex.from_product will produce too many\n # combinations.\n result_level = np.tile(\n result_index, len(result) // len(result_index)\n )\n arg2_levels = (\n np.repeat(\n arg2.columns.get_level_values(i),\n len(result) // len(arg2.columns),\n )\n for i in range(arg2.columns.nlevels)\n )\n result_names = list(arg2.columns.names) + [result_index.name]\n result.index = MultiIndex.from_arrays(\n [*arg2_levels, result_level], names=result_names\n )\n # GH 34440\n num_levels = len(result.index.levels)\n new_order = [num_levels - 1] + list(range(num_levels - 1))\n result = result.reorder_levels(new_order).sort_index()\n else:\n result.index = MultiIndex.from_product(\n [range(len(arg2.columns)), range(len(result_index))]\n )\n result = result.swaplevel(1, 0).sort_index()\n result.index = MultiIndex.from_product(\n [result_index] + [arg2.columns]\n )\n else:\n # empty result\n result = DataFrame(\n index=MultiIndex(\n levels=[arg1.index, arg2.columns], codes=[[], []]\n ),\n columns=arg2.columns,\n dtype="float64",\n )\n\n # reset our index names to arg1 names\n # reset our column names to arg2 names\n # careful not to mutate the original names\n result.columns = result.columns.set_names(arg1.columns.names)\n result.index = result.index.set_names(\n result_index.names + arg2.columns.names\n )\n\n return result\n else:\n results = {\n i: f(*prep_binary(arg1.iloc[:, i], arg2))\n for i in range(len(arg1.columns))\n }\n return dataframe_from_int_dict(results, arg1)\n\n else:\n return flex_binary_moment(arg2, arg1, f)\n\n\ndef zsqrt(x):\n with np.errstate(all="ignore"):\n result = np.sqrt(x)\n mask = x < 0\n\n if isinstance(x, ABCDataFrame):\n if mask._values.any():\n result[mask] = 0\n else:\n if mask.any():\n result[mask] = 0\n\n return result\n\n\ndef prep_binary(arg1, arg2):\n # mask out values, this also makes a common index...\n X = arg1 + 0 * arg2\n Y = arg2 + 0 * arg1\n\n return X, Y\n
.venv\Lib\site-packages\pandas\core\window\common.py
common.py
Python
6,714
0.95
0.159763
0.122449
python-kit
700
2024-07-04T22:28:53.269838
GPL-3.0
false
fcd2bc1efd3ba85efde2bf45c5bd17d7
"""Any shareable docstring components for rolling/expanding/ewm"""\nfrom __future__ import annotations\n\nfrom textwrap import dedent\n\nfrom pandas.core.shared_docs import _shared_docs\n\n_shared_docs = dict(**_shared_docs)\n\n\ndef create_section_header(header: str) -> str:\n """Create numpydoc section header"""\n return f"{header}\n{'-' * len(header)}\n"\n\n\ntemplate_header = "\nCalculate the {window_method} {aggregation_description}.\n\n"\n\ntemplate_returns = dedent(\n """\n Series or DataFrame\n Return type is the same as the original object with ``np.float64`` dtype.\n\n """\n).replace("\n", "", 1)\n\ntemplate_see_also = dedent(\n """\n pandas.Series.{window_method} : Calling {window_method} with Series data.\n pandas.DataFrame.{window_method} : Calling {window_method} with DataFrames.\n pandas.Series.{agg_method} : Aggregating {agg_method} for Series.\n pandas.DataFrame.{agg_method} : Aggregating {agg_method} for DataFrame.\n\n """\n).replace("\n", "", 1)\n\nkwargs_numeric_only = dedent(\n """\n numeric_only : bool, default False\n Include only float, int, boolean columns.\n\n .. versionadded:: 1.5.0\n\n """\n).replace("\n", "", 1)\n\nkwargs_scipy = dedent(\n """\n **kwargs\n Keyword arguments to configure the ``SciPy`` weighted window type.\n\n """\n).replace("\n", "", 1)\n\nwindow_apply_parameters = dedent(\n """\n func : function\n Must produce a single value from an ndarray input if ``raw=True``\n or a single value from a Series if ``raw=False``. Can also accept a\n Numba JIT function with ``engine='numba'`` specified.\n\n raw : bool, default False\n * ``False`` : passes each row or column as a Series to the\n function.\n * ``True`` : the passed function will receive ndarray\n objects instead.\n If you are just applying a NumPy reduction function this will\n achieve much better performance.\n\n engine : str, default None\n * ``'cython'`` : Runs rolling apply through C-extensions from cython.\n * ``'numba'`` : Runs rolling apply through JIT compiled code from numba.\n Only available when ``raw`` is set to ``True``.\n * ``None`` : Defaults to ``'cython'`` or globally setting ``compute.use_numba``\n\n engine_kwargs : dict, default None\n * For ``'cython'`` engine, there are no accepted ``engine_kwargs``\n * For ``'numba'`` engine, the engine can accept ``nopython``, ``nogil``\n and ``parallel`` dictionary keys. The values must either be ``True`` or\n ``False``. The default ``engine_kwargs`` for the ``'numba'`` engine is\n ``{{'nopython': True, 'nogil': False, 'parallel': False}}`` and will be\n applied to both the ``func`` and the ``apply`` rolling aggregation.\n\n args : tuple, default None\n Positional arguments to be passed into func.\n\n kwargs : dict, default None\n Keyword arguments to be passed into func.\n\n """\n).replace("\n", "", 1)\n\nnumba_notes = (\n "See :ref:`window.numba_engine` and :ref:`enhancingperf.numba` for "\n "extended documentation and performance considerations for the Numba engine.\n\n"\n)\n\n\ndef window_agg_numba_parameters(version: str = "1.3") -> str:\n return (\n dedent(\n """\n engine : str, default None\n * ``'cython'`` : Runs the operation through C-extensions from cython.\n * ``'numba'`` : Runs the operation through JIT compiled code from numba.\n * ``None`` : Defaults to ``'cython'`` or globally setting ``compute.use_numba``\n\n .. versionadded:: {version}.0\n\n engine_kwargs : dict, default None\n * For ``'cython'`` engine, there are no accepted ``engine_kwargs``\n * For ``'numba'`` engine, the engine can accept ``nopython``, ``nogil``\n and ``parallel`` dictionary keys. The values must either be ``True`` or\n ``False``. The default ``engine_kwargs`` for the ``'numba'`` engine is\n ``{{'nopython': True, 'nogil': False, 'parallel': False}}``\n\n .. versionadded:: {version}.0\n\n """\n )\n .replace("\n", "", 1)\n .replace("{version}", version)\n )\n
.venv\Lib\site-packages\pandas\core\window\doc.py
doc.py
Python
4,158
0.85
0.137931
0.141304
react-lib
415
2024-09-02T13:43:54.570508
GPL-3.0
false
23a811f1264e790c049c4bbcc18bba06
from __future__ import annotations\n\nimport datetime\nfrom functools import partial\nfrom textwrap import dedent\nfrom typing import TYPE_CHECKING\n\nimport numpy as np\n\nfrom pandas._libs.tslibs import Timedelta\nimport pandas._libs.window.aggregations as window_aggregations\nfrom pandas.util._decorators import doc\n\nfrom pandas.core.dtypes.common import (\n is_datetime64_dtype,\n is_numeric_dtype,\n)\nfrom pandas.core.dtypes.dtypes import DatetimeTZDtype\nfrom pandas.core.dtypes.generic import ABCSeries\nfrom pandas.core.dtypes.missing import isna\n\nfrom pandas.core import common\nfrom pandas.core.arrays.datetimelike import dtype_to_unit\nfrom pandas.core.indexers.objects import (\n BaseIndexer,\n ExponentialMovingWindowIndexer,\n GroupbyIndexer,\n)\nfrom pandas.core.util.numba_ import (\n get_jit_arguments,\n maybe_use_numba,\n)\nfrom pandas.core.window.common import zsqrt\nfrom pandas.core.window.doc import (\n _shared_docs,\n create_section_header,\n kwargs_numeric_only,\n numba_notes,\n template_header,\n template_returns,\n template_see_also,\n window_agg_numba_parameters,\n)\nfrom pandas.core.window.numba_ import (\n generate_numba_ewm_func,\n generate_numba_ewm_table_func,\n)\nfrom pandas.core.window.online import (\n EWMMeanState,\n generate_online_numba_ewma_func,\n)\nfrom pandas.core.window.rolling import (\n BaseWindow,\n BaseWindowGroupby,\n)\n\nif TYPE_CHECKING:\n from pandas._typing import (\n Axis,\n TimedeltaConvertibleTypes,\n npt,\n )\n\n from pandas import (\n DataFrame,\n Series,\n )\n from pandas.core.generic import NDFrame\n\n\ndef get_center_of_mass(\n comass: float | None,\n span: float | None,\n halflife: float | None,\n alpha: float | None,\n) -> float:\n valid_count = common.count_not_none(comass, span, halflife, alpha)\n if valid_count > 1:\n raise ValueError("comass, span, halflife, and alpha are mutually exclusive")\n\n # Convert to center of mass; domain checks ensure 0 < alpha <= 1\n if comass is not None:\n if comass < 0:\n raise ValueError("comass must satisfy: comass >= 0")\n elif span is not None:\n if span < 1:\n raise ValueError("span must satisfy: span >= 1")\n comass = (span - 1) / 2\n elif halflife is not None:\n if halflife <= 0:\n raise ValueError("halflife must satisfy: halflife > 0")\n decay = 1 - np.exp(np.log(0.5) / halflife)\n comass = 1 / decay - 1\n elif alpha is not None:\n if alpha <= 0 or alpha > 1:\n raise ValueError("alpha must satisfy: 0 < alpha <= 1")\n comass = (1 - alpha) / alpha\n else:\n raise ValueError("Must pass one of comass, span, halflife, or alpha")\n\n return float(comass)\n\n\ndef _calculate_deltas(\n times: np.ndarray | NDFrame,\n halflife: float | TimedeltaConvertibleTypes | None,\n) -> npt.NDArray[np.float64]:\n """\n Return the diff of the times divided by the half-life. These values are used in\n the calculation of the ewm mean.\n\n Parameters\n ----------\n times : np.ndarray, Series\n Times corresponding to the observations. Must be monotonically increasing\n and ``datetime64[ns]`` dtype.\n halflife : float, str, timedelta, optional\n Half-life specifying the decay\n\n Returns\n -------\n np.ndarray\n Diff of the times divided by the half-life\n """\n unit = dtype_to_unit(times.dtype)\n if isinstance(times, ABCSeries):\n times = times._values\n _times = np.asarray(times.view(np.int64), dtype=np.float64)\n _halflife = float(Timedelta(halflife).as_unit(unit)._value)\n return np.diff(_times) / _halflife\n\n\nclass ExponentialMovingWindow(BaseWindow):\n r"""\n Provide exponentially weighted (EW) calculations.\n\n Exactly one of ``com``, ``span``, ``halflife``, or ``alpha`` must be\n provided if ``times`` is not provided. If ``times`` is provided,\n ``halflife`` and one of ``com``, ``span`` or ``alpha`` may be provided.\n\n Parameters\n ----------\n com : float, optional\n Specify decay in terms of center of mass\n\n :math:`\alpha = 1 / (1 + com)`, for :math:`com \geq 0`.\n\n span : float, optional\n Specify decay in terms of span\n\n :math:`\alpha = 2 / (span + 1)`, for :math:`span \geq 1`.\n\n halflife : float, str, timedelta, optional\n Specify decay in terms of half-life\n\n :math:`\alpha = 1 - \exp\left(-\ln(2) / halflife\right)`, for\n :math:`halflife > 0`.\n\n If ``times`` is specified, a timedelta convertible unit over which an\n observation decays to half its value. Only applicable to ``mean()``,\n and halflife value will not apply to the other functions.\n\n alpha : float, optional\n Specify smoothing factor :math:`\alpha` directly\n\n :math:`0 < \alpha \leq 1`.\n\n min_periods : int, default 0\n Minimum number of observations in window required to have a value;\n otherwise, result is ``np.nan``.\n\n adjust : bool, default True\n Divide by decaying adjustment factor in beginning periods to account\n for imbalance in relative weightings (viewing EWMA as a moving average).\n\n - When ``adjust=True`` (default), the EW function is calculated using weights\n :math:`w_i = (1 - \alpha)^i`. For example, the EW moving average of the series\n [:math:`x_0, x_1, ..., x_t`] would be:\n\n .. math::\n y_t = \frac{x_t + (1 - \alpha)x_{t-1} + (1 - \alpha)^2 x_{t-2} + ... + (1 -\n \alpha)^t x_0}{1 + (1 - \alpha) + (1 - \alpha)^2 + ... + (1 - \alpha)^t}\n\n - When ``adjust=False``, the exponentially weighted function is calculated\n recursively:\n\n .. math::\n \begin{split}\n y_0 &= x_0\\\n y_t &= (1 - \alpha) y_{t-1} + \alpha x_t,\n \end{split}\n ignore_na : bool, default False\n Ignore missing values when calculating weights.\n\n - When ``ignore_na=False`` (default), weights are based on absolute positions.\n For example, the weights of :math:`x_0` and :math:`x_2` used in calculating\n the final weighted average of [:math:`x_0`, None, :math:`x_2`] are\n :math:`(1-\alpha)^2` and :math:`1` if ``adjust=True``, and\n :math:`(1-\alpha)^2` and :math:`\alpha` if ``adjust=False``.\n\n - When ``ignore_na=True``, weights are based\n on relative positions. For example, the weights of :math:`x_0` and :math:`x_2`\n used in calculating the final weighted average of\n [:math:`x_0`, None, :math:`x_2`] are :math:`1-\alpha` and :math:`1` if\n ``adjust=True``, and :math:`1-\alpha` and :math:`\alpha` if ``adjust=False``.\n\n axis : {0, 1}, default 0\n If ``0`` or ``'index'``, calculate across the rows.\n\n If ``1`` or ``'columns'``, calculate across the columns.\n\n For `Series` this parameter is unused and defaults to 0.\n\n times : np.ndarray, Series, default None\n\n Only applicable to ``mean()``.\n\n Times corresponding to the observations. Must be monotonically increasing and\n ``datetime64[ns]`` dtype.\n\n If 1-D array like, a sequence with the same shape as the observations.\n\n method : str {'single', 'table'}, default 'single'\n .. versionadded:: 1.4.0\n\n Execute the rolling operation per single column or row (``'single'``)\n or over the entire object (``'table'``).\n\n This argument is only implemented when specifying ``engine='numba'``\n in the method call.\n\n Only applicable to ``mean()``\n\n Returns\n -------\n pandas.api.typing.ExponentialMovingWindow\n\n See Also\n --------\n rolling : Provides rolling window calculations.\n expanding : Provides expanding transformations.\n\n Notes\n -----\n See :ref:`Windowing Operations <window.exponentially_weighted>`\n for further usage details and examples.\n\n Examples\n --------\n >>> df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]})\n >>> df\n B\n 0 0.0\n 1 1.0\n 2 2.0\n 3 NaN\n 4 4.0\n\n >>> df.ewm(com=0.5).mean()\n B\n 0 0.000000\n 1 0.750000\n 2 1.615385\n 3 1.615385\n 4 3.670213\n >>> df.ewm(alpha=2 / 3).mean()\n B\n 0 0.000000\n 1 0.750000\n 2 1.615385\n 3 1.615385\n 4 3.670213\n\n **adjust**\n\n >>> df.ewm(com=0.5, adjust=True).mean()\n B\n 0 0.000000\n 1 0.750000\n 2 1.615385\n 3 1.615385\n 4 3.670213\n >>> df.ewm(com=0.5, adjust=False).mean()\n B\n 0 0.000000\n 1 0.666667\n 2 1.555556\n 3 1.555556\n 4 3.650794\n\n **ignore_na**\n\n >>> df.ewm(com=0.5, ignore_na=True).mean()\n B\n 0 0.000000\n 1 0.750000\n 2 1.615385\n 3 1.615385\n 4 3.225000\n >>> df.ewm(com=0.5, ignore_na=False).mean()\n B\n 0 0.000000\n 1 0.750000\n 2 1.615385\n 3 1.615385\n 4 3.670213\n\n **times**\n\n Exponentially weighted mean with weights calculated with a timedelta ``halflife``\n relative to ``times``.\n\n >>> times = ['2020-01-01', '2020-01-03', '2020-01-10', '2020-01-15', '2020-01-17']\n >>> df.ewm(halflife='4 days', times=pd.DatetimeIndex(times)).mean()\n B\n 0 0.000000\n 1 0.585786\n 2 1.523889\n 3 1.523889\n 4 3.233686\n """\n\n _attributes = [\n "com",\n "span",\n "halflife",\n "alpha",\n "min_periods",\n "adjust",\n "ignore_na",\n "axis",\n "times",\n "method",\n ]\n\n def __init__(\n self,\n obj: NDFrame,\n com: float | None = None,\n span: float | None = None,\n halflife: float | TimedeltaConvertibleTypes | None = None,\n alpha: float | None = None,\n min_periods: int | None = 0,\n adjust: bool = True,\n ignore_na: bool = False,\n axis: Axis = 0,\n times: np.ndarray | NDFrame | None = None,\n method: str = "single",\n *,\n selection=None,\n ) -> None:\n super().__init__(\n obj=obj,\n min_periods=1 if min_periods is None else max(int(min_periods), 1),\n on=None,\n center=False,\n closed=None,\n method=method,\n axis=axis,\n selection=selection,\n )\n self.com = com\n self.span = span\n self.halflife = halflife\n self.alpha = alpha\n self.adjust = adjust\n self.ignore_na = ignore_na\n self.times = times\n if self.times is not None:\n if not self.adjust:\n raise NotImplementedError("times is not supported with adjust=False.")\n times_dtype = getattr(self.times, "dtype", None)\n if not (\n is_datetime64_dtype(times_dtype)\n or isinstance(times_dtype, DatetimeTZDtype)\n ):\n raise ValueError("times must be datetime64 dtype.")\n if len(self.times) != len(obj):\n raise ValueError("times must be the same length as the object.")\n if not isinstance(self.halflife, (str, datetime.timedelta, np.timedelta64)):\n raise ValueError("halflife must be a timedelta convertible object")\n if isna(self.times).any():\n raise ValueError("Cannot convert NaT values to integer")\n self._deltas = _calculate_deltas(self.times, self.halflife)\n # Halflife is no longer applicable when calculating COM\n # But allow COM to still be calculated if the user passes other decay args\n if common.count_not_none(self.com, self.span, self.alpha) > 0:\n self._com = get_center_of_mass(self.com, self.span, None, self.alpha)\n else:\n self._com = 1.0\n else:\n if self.halflife is not None and isinstance(\n self.halflife, (str, datetime.timedelta, np.timedelta64)\n ):\n raise ValueError(\n "halflife can only be a timedelta convertible argument if "\n "times is not None."\n )\n # Without times, points are equally spaced\n self._deltas = np.ones(\n max(self.obj.shape[self.axis] - 1, 0), dtype=np.float64\n )\n self._com = get_center_of_mass(\n # error: Argument 3 to "get_center_of_mass" has incompatible type\n # "Union[float, Any, None, timedelta64, signedinteger[_64Bit]]";\n # expected "Optional[float]"\n self.com,\n self.span,\n self.halflife, # type: ignore[arg-type]\n self.alpha,\n )\n\n def _check_window_bounds(\n self, start: np.ndarray, end: np.ndarray, num_vals: int\n ) -> None:\n # emw algorithms are iterative with each point\n # ExponentialMovingWindowIndexer "bounds" are the entire window\n pass\n\n def _get_window_indexer(self) -> BaseIndexer:\n """\n Return an indexer class that will compute the window start and end bounds\n """\n return ExponentialMovingWindowIndexer()\n\n def online(\n self, engine: str = "numba", engine_kwargs=None\n ) -> OnlineExponentialMovingWindow:\n """\n Return an ``OnlineExponentialMovingWindow`` object to calculate\n exponentially moving window aggregations in an online method.\n\n .. versionadded:: 1.3.0\n\n Parameters\n ----------\n engine: str, default ``'numba'``\n Execution engine to calculate online aggregations.\n Applies to all supported aggregation methods.\n\n engine_kwargs : dict, default None\n Applies to all supported aggregation methods.\n\n * For ``'numba'`` engine, the engine can accept ``nopython``, ``nogil``\n and ``parallel`` dictionary keys. The values must either be ``True`` or\n ``False``. The default ``engine_kwargs`` for the ``'numba'`` engine is\n ``{{'nopython': True, 'nogil': False, 'parallel': False}}`` and will be\n applied to the function\n\n Returns\n -------\n OnlineExponentialMovingWindow\n """\n return OnlineExponentialMovingWindow(\n obj=self.obj,\n com=self.com,\n span=self.span,\n halflife=self.halflife,\n alpha=self.alpha,\n min_periods=self.min_periods,\n adjust=self.adjust,\n ignore_na=self.ignore_na,\n axis=self.axis,\n times=self.times,\n engine=engine,\n engine_kwargs=engine_kwargs,\n selection=self._selection,\n )\n\n @doc(\n _shared_docs["aggregate"],\n see_also=dedent(\n """\n See Also\n --------\n pandas.DataFrame.rolling.aggregate\n """\n ),\n examples=dedent(\n """\n Examples\n --------\n >>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6], "C": [7, 8, 9]})\n >>> df\n A B C\n 0 1 4 7\n 1 2 5 8\n 2 3 6 9\n\n >>> df.ewm(alpha=0.5).mean()\n A B C\n 0 1.000000 4.000000 7.000000\n 1 1.666667 4.666667 7.666667\n 2 2.428571 5.428571 8.428571\n """\n ),\n klass="Series/Dataframe",\n axis="",\n )\n def aggregate(self, func, *args, **kwargs):\n return super().aggregate(func, *args, **kwargs)\n\n agg = aggregate\n\n @doc(\n template_header,\n create_section_header("Parameters"),\n kwargs_numeric_only,\n window_agg_numba_parameters(),\n create_section_header("Returns"),\n template_returns,\n create_section_header("See Also"),\n template_see_also,\n create_section_header("Notes"),\n numba_notes,\n create_section_header("Examples"),\n dedent(\n """\\n >>> ser = pd.Series([1, 2, 3, 4])\n >>> ser.ewm(alpha=.2).mean()\n 0 1.000000\n 1 1.555556\n 2 2.147541\n 3 2.775068\n dtype: float64\n """\n ),\n window_method="ewm",\n aggregation_description="(exponential weighted moment) mean",\n agg_method="mean",\n )\n def mean(\n self,\n numeric_only: bool = False,\n engine=None,\n engine_kwargs=None,\n ):\n if maybe_use_numba(engine):\n if self.method == "single":\n func = generate_numba_ewm_func\n else:\n func = generate_numba_ewm_table_func\n ewm_func = func(\n **get_jit_arguments(engine_kwargs),\n com=self._com,\n adjust=self.adjust,\n ignore_na=self.ignore_na,\n deltas=tuple(self._deltas),\n normalize=True,\n )\n return self._apply(ewm_func, name="mean")\n elif engine in ("cython", None):\n if engine_kwargs is not None:\n raise ValueError("cython engine does not accept engine_kwargs")\n\n deltas = None if self.times is None else self._deltas\n window_func = partial(\n window_aggregations.ewm,\n com=self._com,\n adjust=self.adjust,\n ignore_na=self.ignore_na,\n deltas=deltas,\n normalize=True,\n )\n return self._apply(window_func, name="mean", numeric_only=numeric_only)\n else:\n raise ValueError("engine must be either 'numba' or 'cython'")\n\n @doc(\n template_header,\n create_section_header("Parameters"),\n kwargs_numeric_only,\n window_agg_numba_parameters(),\n create_section_header("Returns"),\n template_returns,\n create_section_header("See Also"),\n template_see_also,\n create_section_header("Notes"),\n numba_notes,\n create_section_header("Examples"),\n dedent(\n """\\n >>> ser = pd.Series([1, 2, 3, 4])\n >>> ser.ewm(alpha=.2).sum()\n 0 1.000\n 1 2.800\n 2 5.240\n 3 8.192\n dtype: float64\n """\n ),\n window_method="ewm",\n aggregation_description="(exponential weighted moment) sum",\n agg_method="sum",\n )\n def sum(\n self,\n numeric_only: bool = False,\n engine=None,\n engine_kwargs=None,\n ):\n if not self.adjust:\n raise NotImplementedError("sum is not implemented with adjust=False")\n if maybe_use_numba(engine):\n if self.method == "single":\n func = generate_numba_ewm_func\n else:\n func = generate_numba_ewm_table_func\n ewm_func = func(\n **get_jit_arguments(engine_kwargs),\n com=self._com,\n adjust=self.adjust,\n ignore_na=self.ignore_na,\n deltas=tuple(self._deltas),\n normalize=False,\n )\n return self._apply(ewm_func, name="sum")\n elif engine in ("cython", None):\n if engine_kwargs is not None:\n raise ValueError("cython engine does not accept engine_kwargs")\n\n deltas = None if self.times is None else self._deltas\n window_func = partial(\n window_aggregations.ewm,\n com=self._com,\n adjust=self.adjust,\n ignore_na=self.ignore_na,\n deltas=deltas,\n normalize=False,\n )\n return self._apply(window_func, name="sum", numeric_only=numeric_only)\n else:\n raise ValueError("engine must be either 'numba' or 'cython'")\n\n @doc(\n template_header,\n create_section_header("Parameters"),\n dedent(\n """\\n bias : bool, default False\n Use a standard estimation bias correction.\n """\n ),\n kwargs_numeric_only,\n create_section_header("Returns"),\n template_returns,\n create_section_header("See Also"),\n template_see_also,\n create_section_header("Examples"),\n dedent(\n """\\n >>> ser = pd.Series([1, 2, 3, 4])\n >>> ser.ewm(alpha=.2).std()\n 0 NaN\n 1 0.707107\n 2 0.995893\n 3 1.277320\n dtype: float64\n """\n ),\n window_method="ewm",\n aggregation_description="(exponential weighted moment) standard deviation",\n agg_method="std",\n )\n def std(self, bias: bool = False, numeric_only: bool = False):\n if (\n numeric_only\n and self._selected_obj.ndim == 1\n and not is_numeric_dtype(self._selected_obj.dtype)\n ):\n # Raise directly so error message says std instead of var\n raise NotImplementedError(\n f"{type(self).__name__}.std does not implement numeric_only"\n )\n return zsqrt(self.var(bias=bias, numeric_only=numeric_only))\n\n @doc(\n template_header,\n create_section_header("Parameters"),\n dedent(\n """\\n bias : bool, default False\n Use a standard estimation bias correction.\n """\n ),\n kwargs_numeric_only,\n create_section_header("Returns"),\n template_returns,\n create_section_header("See Also"),\n template_see_also,\n create_section_header("Examples"),\n dedent(\n """\\n >>> ser = pd.Series([1, 2, 3, 4])\n >>> ser.ewm(alpha=.2).var()\n 0 NaN\n 1 0.500000\n 2 0.991803\n 3 1.631547\n dtype: float64\n """\n ),\n window_method="ewm",\n aggregation_description="(exponential weighted moment) variance",\n agg_method="var",\n )\n def var(self, bias: bool = False, numeric_only: bool = False):\n window_func = window_aggregations.ewmcov\n wfunc = partial(\n window_func,\n com=self._com,\n adjust=self.adjust,\n ignore_na=self.ignore_na,\n bias=bias,\n )\n\n def var_func(values, begin, end, min_periods):\n return wfunc(values, begin, end, min_periods, values)\n\n return self._apply(var_func, name="var", numeric_only=numeric_only)\n\n @doc(\n template_header,\n create_section_header("Parameters"),\n dedent(\n """\\n other : Series or DataFrame , optional\n If not supplied then will default to self and produce pairwise\n output.\n pairwise : bool, default None\n If False then only matching columns between self and other will be\n used and the output will be a DataFrame.\n If True then all pairwise combinations will be calculated and the\n output will be a MultiIndex DataFrame in the case of DataFrame\n inputs. In the case of missing elements, only complete pairwise\n observations will be used.\n bias : bool, default False\n Use a standard estimation bias correction.\n """\n ),\n kwargs_numeric_only,\n create_section_header("Returns"),\n template_returns,\n create_section_header("See Also"),\n template_see_also,\n create_section_header("Examples"),\n dedent(\n """\\n >>> ser1 = pd.Series([1, 2, 3, 4])\n >>> ser2 = pd.Series([10, 11, 13, 16])\n >>> ser1.ewm(alpha=.2).cov(ser2)\n 0 NaN\n 1 0.500000\n 2 1.524590\n 3 3.408836\n dtype: float64\n """\n ),\n window_method="ewm",\n aggregation_description="(exponential weighted moment) sample covariance",\n agg_method="cov",\n )\n def cov(\n self,\n other: DataFrame | Series | None = None,\n pairwise: bool | None = None,\n bias: bool = False,\n numeric_only: bool = False,\n ):\n from pandas import Series\n\n self._validate_numeric_only("cov", numeric_only)\n\n def cov_func(x, y):\n x_array = self._prep_values(x)\n y_array = self._prep_values(y)\n window_indexer = self._get_window_indexer()\n min_periods = (\n self.min_periods\n if self.min_periods is not None\n else window_indexer.window_size\n )\n start, end = window_indexer.get_window_bounds(\n num_values=len(x_array),\n min_periods=min_periods,\n center=self.center,\n closed=self.closed,\n step=self.step,\n )\n result = window_aggregations.ewmcov(\n x_array,\n start,\n end,\n # error: Argument 4 to "ewmcov" has incompatible type\n # "Optional[int]"; expected "int"\n self.min_periods, # type: ignore[arg-type]\n y_array,\n self._com,\n self.adjust,\n self.ignore_na,\n bias,\n )\n return Series(result, index=x.index, name=x.name, copy=False)\n\n return self._apply_pairwise(\n self._selected_obj, other, pairwise, cov_func, numeric_only\n )\n\n @doc(\n template_header,\n create_section_header("Parameters"),\n dedent(\n """\\n other : Series or DataFrame, optional\n If not supplied then will default to self and produce pairwise\n output.\n pairwise : bool, default None\n If False then only matching columns between self and other will be\n used and the output will be a DataFrame.\n If True then all pairwise combinations will be calculated and the\n output will be a MultiIndex DataFrame in the case of DataFrame\n inputs. In the case of missing elements, only complete pairwise\n observations will be used.\n """\n ),\n kwargs_numeric_only,\n create_section_header("Returns"),\n template_returns,\n create_section_header("See Also"),\n template_see_also,\n create_section_header("Examples"),\n dedent(\n """\\n >>> ser1 = pd.Series([1, 2, 3, 4])\n >>> ser2 = pd.Series([10, 11, 13, 16])\n >>> ser1.ewm(alpha=.2).corr(ser2)\n 0 NaN\n 1 1.000000\n 2 0.982821\n 3 0.977802\n dtype: float64\n """\n ),\n window_method="ewm",\n aggregation_description="(exponential weighted moment) sample correlation",\n agg_method="corr",\n )\n def corr(\n self,\n other: DataFrame | Series | None = None,\n pairwise: bool | None = None,\n numeric_only: bool = False,\n ):\n from pandas import Series\n\n self._validate_numeric_only("corr", numeric_only)\n\n def cov_func(x, y):\n x_array = self._prep_values(x)\n y_array = self._prep_values(y)\n window_indexer = self._get_window_indexer()\n min_periods = (\n self.min_periods\n if self.min_periods is not None\n else window_indexer.window_size\n )\n start, end = window_indexer.get_window_bounds(\n num_values=len(x_array),\n min_periods=min_periods,\n center=self.center,\n closed=self.closed,\n step=self.step,\n )\n\n def _cov(X, Y):\n return window_aggregations.ewmcov(\n X,\n start,\n end,\n min_periods,\n Y,\n self._com,\n self.adjust,\n self.ignore_na,\n True,\n )\n\n with np.errstate(all="ignore"):\n cov = _cov(x_array, y_array)\n x_var = _cov(x_array, x_array)\n y_var = _cov(y_array, y_array)\n result = cov / zsqrt(x_var * y_var)\n return Series(result, index=x.index, name=x.name, copy=False)\n\n return self._apply_pairwise(\n self._selected_obj, other, pairwise, cov_func, numeric_only\n )\n\n\nclass ExponentialMovingWindowGroupby(BaseWindowGroupby, ExponentialMovingWindow):\n """\n Provide an exponential moving window groupby implementation.\n """\n\n _attributes = ExponentialMovingWindow._attributes + BaseWindowGroupby._attributes\n\n def __init__(self, obj, *args, _grouper=None, **kwargs) -> None:\n super().__init__(obj, *args, _grouper=_grouper, **kwargs)\n\n if not obj.empty and self.times is not None:\n # sort the times and recalculate the deltas according to the groups\n groupby_order = np.concatenate(list(self._grouper.indices.values()))\n self._deltas = _calculate_deltas(\n self.times.take(groupby_order),\n self.halflife,\n )\n\n def _get_window_indexer(self) -> GroupbyIndexer:\n """\n Return an indexer class that will compute the window start and end bounds\n\n Returns\n -------\n GroupbyIndexer\n """\n window_indexer = GroupbyIndexer(\n groupby_indices=self._grouper.indices,\n window_indexer=ExponentialMovingWindowIndexer,\n )\n return window_indexer\n\n\nclass OnlineExponentialMovingWindow(ExponentialMovingWindow):\n def __init__(\n self,\n obj: NDFrame,\n com: float | None = None,\n span: float | None = None,\n halflife: float | TimedeltaConvertibleTypes | None = None,\n alpha: float | None = None,\n min_periods: int | None = 0,\n adjust: bool = True,\n ignore_na: bool = False,\n axis: Axis = 0,\n times: np.ndarray | NDFrame | None = None,\n engine: str = "numba",\n engine_kwargs: dict[str, bool] | None = None,\n *,\n selection=None,\n ) -> None:\n if times is not None:\n raise NotImplementedError(\n "times is not implemented with online operations."\n )\n super().__init__(\n obj=obj,\n com=com,\n span=span,\n halflife=halflife,\n alpha=alpha,\n min_periods=min_periods,\n adjust=adjust,\n ignore_na=ignore_na,\n axis=axis,\n times=times,\n selection=selection,\n )\n self._mean = EWMMeanState(\n self._com, self.adjust, self.ignore_na, self.axis, obj.shape\n )\n if maybe_use_numba(engine):\n self.engine = engine\n self.engine_kwargs = engine_kwargs\n else:\n raise ValueError("'numba' is the only supported engine")\n\n def reset(self) -> None:\n """\n Reset the state captured by `update` calls.\n """\n self._mean.reset()\n\n def aggregate(self, func, *args, **kwargs):\n raise NotImplementedError("aggregate is not implemented.")\n\n def std(self, bias: bool = False, *args, **kwargs):\n raise NotImplementedError("std is not implemented.")\n\n def corr(\n self,\n other: DataFrame | Series | None = None,\n pairwise: bool | None = None,\n numeric_only: bool = False,\n ):\n raise NotImplementedError("corr is not implemented.")\n\n def cov(\n self,\n other: DataFrame | Series | None = None,\n pairwise: bool | None = None,\n bias: bool = False,\n numeric_only: bool = False,\n ):\n raise NotImplementedError("cov is not implemented.")\n\n def var(self, bias: bool = False, numeric_only: bool = False):\n raise NotImplementedError("var is not implemented.")\n\n def mean(self, *args, update=None, update_times=None, **kwargs):\n """\n Calculate an online exponentially weighted mean.\n\n Parameters\n ----------\n update: DataFrame or Series, default None\n New values to continue calculating the\n exponentially weighted mean from the last values and weights.\n Values should be float64 dtype.\n\n ``update`` needs to be ``None`` the first time the\n exponentially weighted mean is calculated.\n\n update_times: Series or 1-D np.ndarray, default None\n New times to continue calculating the\n exponentially weighted mean from the last values and weights.\n If ``None``, values are assumed to be evenly spaced\n in time.\n This feature is currently unsupported.\n\n Returns\n -------\n DataFrame or Series\n\n Examples\n --------\n >>> df = pd.DataFrame({"a": range(5), "b": range(5, 10)})\n >>> online_ewm = df.head(2).ewm(0.5).online()\n >>> online_ewm.mean()\n a b\n 0 0.00 5.00\n 1 0.75 5.75\n >>> online_ewm.mean(update=df.tail(3))\n a b\n 2 1.615385 6.615385\n 3 2.550000 7.550000\n 4 3.520661 8.520661\n >>> online_ewm.reset()\n >>> online_ewm.mean()\n a b\n 0 0.00 5.00\n 1 0.75 5.75\n """\n result_kwargs = {}\n is_frame = self._selected_obj.ndim == 2\n if update_times is not None:\n raise NotImplementedError("update_times is not implemented.")\n update_deltas = np.ones(\n max(self._selected_obj.shape[self.axis - 1] - 1, 0), dtype=np.float64\n )\n if update is not None:\n if self._mean.last_ewm is None:\n raise ValueError(\n "Must call mean with update=None first before passing update"\n )\n result_from = 1\n result_kwargs["index"] = update.index\n if is_frame:\n last_value = self._mean.last_ewm[np.newaxis, :]\n result_kwargs["columns"] = update.columns\n else:\n last_value = self._mean.last_ewm\n result_kwargs["name"] = update.name\n np_array = np.concatenate((last_value, update.to_numpy()))\n else:\n result_from = 0\n result_kwargs["index"] = self._selected_obj.index\n if is_frame:\n result_kwargs["columns"] = self._selected_obj.columns\n else:\n result_kwargs["name"] = self._selected_obj.name\n np_array = self._selected_obj.astype(np.float64, copy=False).to_numpy()\n ewma_func = generate_online_numba_ewma_func(\n **get_jit_arguments(self.engine_kwargs)\n )\n result = self._mean.run_ewm(\n np_array if is_frame else np_array[:, np.newaxis],\n update_deltas,\n self.min_periods,\n ewma_func,\n )\n if not is_frame:\n result = result.squeeze()\n result = result[result_from:]\n result = self._selected_obj._constructor(result, **result_kwargs)\n return result\n
.venv\Lib\site-packages\pandas\core\window\ewm.py
ewm.py
Python
35,190
0.95
0.079452
0.022335
node-utils
483
2024-08-24T04:17:05.728458
BSD-3-Clause
false
84df520b175ac7d695fc8cf042ebed07
from __future__ import annotations\n\nfrom textwrap import dedent\nfrom typing import (\n TYPE_CHECKING,\n Any,\n Callable,\n Literal,\n)\n\nfrom pandas.util._decorators import (\n deprecate_kwarg,\n doc,\n)\n\nfrom pandas.core.indexers.objects import (\n BaseIndexer,\n ExpandingIndexer,\n GroupbyIndexer,\n)\nfrom pandas.core.window.doc import (\n _shared_docs,\n create_section_header,\n kwargs_numeric_only,\n numba_notes,\n template_header,\n template_returns,\n template_see_also,\n window_agg_numba_parameters,\n window_apply_parameters,\n)\nfrom pandas.core.window.rolling import (\n BaseWindowGroupby,\n RollingAndExpandingMixin,\n)\n\nif TYPE_CHECKING:\n from pandas._typing import (\n Axis,\n QuantileInterpolation,\n WindowingRankType,\n )\n\n from pandas import (\n DataFrame,\n Series,\n )\n from pandas.core.generic import NDFrame\n\n\nclass Expanding(RollingAndExpandingMixin):\n """\n Provide expanding window calculations.\n\n Parameters\n ----------\n min_periods : int, default 1\n Minimum number of observations in window required to have a value;\n otherwise, result is ``np.nan``.\n\n axis : int or str, default 0\n If ``0`` or ``'index'``, roll across the rows.\n\n If ``1`` or ``'columns'``, roll across the columns.\n\n For `Series` this parameter is unused and defaults to 0.\n\n method : str {'single', 'table'}, default 'single'\n Execute the rolling operation per single column or row (``'single'``)\n or over the entire object (``'table'``).\n\n This argument is only implemented when specifying ``engine='numba'``\n in the method call.\n\n .. versionadded:: 1.3.0\n\n Returns\n -------\n pandas.api.typing.Expanding\n\n See Also\n --------\n rolling : Provides rolling window calculations.\n ewm : Provides exponential weighted functions.\n\n Notes\n -----\n See :ref:`Windowing Operations <window.expanding>` for further usage details\n and examples.\n\n Examples\n --------\n >>> df = pd.DataFrame({"B": [0, 1, 2, np.nan, 4]})\n >>> df\n B\n 0 0.0\n 1 1.0\n 2 2.0\n 3 NaN\n 4 4.0\n\n **min_periods**\n\n Expanding sum with 1 vs 3 observations needed to calculate a value.\n\n >>> df.expanding(1).sum()\n B\n 0 0.0\n 1 1.0\n 2 3.0\n 3 3.0\n 4 7.0\n >>> df.expanding(3).sum()\n B\n 0 NaN\n 1 NaN\n 2 3.0\n 3 3.0\n 4 7.0\n """\n\n _attributes: list[str] = ["min_periods", "axis", "method"]\n\n def __init__(\n self,\n obj: NDFrame,\n min_periods: int = 1,\n axis: Axis = 0,\n method: str = "single",\n selection=None,\n ) -> None:\n super().__init__(\n obj=obj,\n min_periods=min_periods,\n axis=axis,\n method=method,\n selection=selection,\n )\n\n def _get_window_indexer(self) -> BaseIndexer:\n """\n Return an indexer class that will compute the window start and end bounds\n """\n return ExpandingIndexer()\n\n @doc(\n _shared_docs["aggregate"],\n see_also=dedent(\n """\n See Also\n --------\n pandas.DataFrame.aggregate : Similar DataFrame method.\n pandas.Series.aggregate : Similar Series method.\n """\n ),\n examples=dedent(\n """\n Examples\n --------\n >>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6], "C": [7, 8, 9]})\n >>> df\n A B C\n 0 1 4 7\n 1 2 5 8\n 2 3 6 9\n\n >>> df.ewm(alpha=0.5).mean()\n A B C\n 0 1.000000 4.000000 7.000000\n 1 1.666667 4.666667 7.666667\n 2 2.428571 5.428571 8.428571\n """\n ),\n klass="Series/Dataframe",\n axis="",\n )\n def aggregate(self, func, *args, **kwargs):\n return super().aggregate(func, *args, **kwargs)\n\n agg = aggregate\n\n @doc(\n template_header,\n create_section_header("Returns"),\n template_returns,\n create_section_header("See Also"),\n template_see_also,\n create_section_header("Examples"),\n dedent(\n """\\n >>> ser = pd.Series([1, 2, 3, 4], index=['a', 'b', 'c', 'd'])\n >>> ser.expanding().count()\n a 1.0\n b 2.0\n c 3.0\n d 4.0\n dtype: float64\n """\n ),\n window_method="expanding",\n aggregation_description="count of non NaN observations",\n agg_method="count",\n )\n def count(self, numeric_only: bool = False):\n return super().count(numeric_only=numeric_only)\n\n @doc(\n template_header,\n create_section_header("Parameters"),\n window_apply_parameters,\n create_section_header("Returns"),\n template_returns,\n create_section_header("See Also"),\n template_see_also,\n create_section_header("Examples"),\n dedent(\n """\\n >>> ser = pd.Series([1, 2, 3, 4], index=['a', 'b', 'c', 'd'])\n >>> ser.expanding().apply(lambda s: s.max() - 2 * s.min())\n a -1.0\n b 0.0\n c 1.0\n d 2.0\n dtype: float64\n """\n ),\n window_method="expanding",\n aggregation_description="custom aggregation function",\n agg_method="apply",\n )\n def apply(\n self,\n func: Callable[..., Any],\n raw: bool = False,\n engine: Literal["cython", "numba"] | None = None,\n engine_kwargs: dict[str, bool] | None = None,\n args: tuple[Any, ...] | None = None,\n kwargs: dict[str, Any] | None = None,\n ):\n return super().apply(\n func,\n raw=raw,\n engine=engine,\n engine_kwargs=engine_kwargs,\n args=args,\n kwargs=kwargs,\n )\n\n @doc(\n template_header,\n create_section_header("Parameters"),\n kwargs_numeric_only,\n window_agg_numba_parameters(),\n create_section_header("Returns"),\n template_returns,\n create_section_header("See Also"),\n template_see_also,\n create_section_header("Notes"),\n numba_notes,\n create_section_header("Examples"),\n dedent(\n """\\n >>> ser = pd.Series([1, 2, 3, 4], index=['a', 'b', 'c', 'd'])\n >>> ser.expanding().sum()\n a 1.0\n b 3.0\n c 6.0\n d 10.0\n dtype: float64\n """\n ),\n window_method="expanding",\n aggregation_description="sum",\n agg_method="sum",\n )\n def sum(\n self,\n numeric_only: bool = False,\n engine: Literal["cython", "numba"] | None = None,\n engine_kwargs: dict[str, bool] | None = None,\n ):\n return super().sum(\n numeric_only=numeric_only,\n engine=engine,\n engine_kwargs=engine_kwargs,\n )\n\n @doc(\n template_header,\n create_section_header("Parameters"),\n kwargs_numeric_only,\n window_agg_numba_parameters(),\n create_section_header("Returns"),\n template_returns,\n create_section_header("See Also"),\n template_see_also,\n create_section_header("Notes"),\n numba_notes,\n create_section_header("Examples"),\n dedent(\n """\\n >>> ser = pd.Series([3, 2, 1, 4], index=['a', 'b', 'c', 'd'])\n >>> ser.expanding().max()\n a 3.0\n b 3.0\n c 3.0\n d 4.0\n dtype: float64\n """\n ),\n window_method="expanding",\n aggregation_description="maximum",\n agg_method="max",\n )\n def max(\n self,\n numeric_only: bool = False,\n engine: Literal["cython", "numba"] | None = None,\n engine_kwargs: dict[str, bool] | None = None,\n ):\n return super().max(\n numeric_only=numeric_only,\n engine=engine,\n engine_kwargs=engine_kwargs,\n )\n\n @doc(\n template_header,\n create_section_header("Parameters"),\n kwargs_numeric_only,\n window_agg_numba_parameters(),\n create_section_header("Returns"),\n template_returns,\n create_section_header("See Also"),\n template_see_also,\n create_section_header("Notes"),\n numba_notes,\n create_section_header("Examples"),\n dedent(\n """\\n >>> ser = pd.Series([2, 3, 4, 1], index=['a', 'b', 'c', 'd'])\n >>> ser.expanding().min()\n a 2.0\n b 2.0\n c 2.0\n d 1.0\n dtype: float64\n """\n ),\n window_method="expanding",\n aggregation_description="minimum",\n agg_method="min",\n )\n def min(\n self,\n numeric_only: bool = False,\n engine: Literal["cython", "numba"] | None = None,\n engine_kwargs: dict[str, bool] | None = None,\n ):\n return super().min(\n numeric_only=numeric_only,\n engine=engine,\n engine_kwargs=engine_kwargs,\n )\n\n @doc(\n template_header,\n create_section_header("Parameters"),\n kwargs_numeric_only,\n window_agg_numba_parameters(),\n create_section_header("Returns"),\n template_returns,\n create_section_header("See Also"),\n template_see_also,\n create_section_header("Notes"),\n numba_notes,\n create_section_header("Examples"),\n dedent(\n """\\n >>> ser = pd.Series([1, 2, 3, 4], index=['a', 'b', 'c', 'd'])\n >>> ser.expanding().mean()\n a 1.0\n b 1.5\n c 2.0\n d 2.5\n dtype: float64\n """\n ),\n window_method="expanding",\n aggregation_description="mean",\n agg_method="mean",\n )\n def mean(\n self,\n numeric_only: bool = False,\n engine: Literal["cython", "numba"] | None = None,\n engine_kwargs: dict[str, bool] | None = None,\n ):\n return super().mean(\n numeric_only=numeric_only,\n engine=engine,\n engine_kwargs=engine_kwargs,\n )\n\n @doc(\n template_header,\n create_section_header("Parameters"),\n kwargs_numeric_only,\n window_agg_numba_parameters(),\n create_section_header("Returns"),\n template_returns,\n create_section_header("See Also"),\n template_see_also,\n create_section_header("Notes"),\n numba_notes,\n create_section_header("Examples"),\n dedent(\n """\\n >>> ser = pd.Series([1, 2, 3, 4], index=['a', 'b', 'c', 'd'])\n >>> ser.expanding().median()\n a 1.0\n b 1.5\n c 2.0\n d 2.5\n dtype: float64\n """\n ),\n window_method="expanding",\n aggregation_description="median",\n agg_method="median",\n )\n def median(\n self,\n numeric_only: bool = False,\n engine: Literal["cython", "numba"] | None = None,\n engine_kwargs: dict[str, bool] | None = None,\n ):\n return super().median(\n numeric_only=numeric_only,\n engine=engine,\n engine_kwargs=engine_kwargs,\n )\n\n @doc(\n template_header,\n create_section_header("Parameters"),\n dedent(\n """\n ddof : int, default 1\n Delta Degrees of Freedom. The divisor used in calculations\n is ``N - ddof``, where ``N`` represents the number of elements.\n\n """\n ).replace("\n", "", 1),\n kwargs_numeric_only,\n window_agg_numba_parameters("1.4"),\n create_section_header("Returns"),\n template_returns,\n create_section_header("See Also"),\n "numpy.std : Equivalent method for NumPy array.\n",\n template_see_also,\n create_section_header("Notes"),\n dedent(\n """\n The default ``ddof`` of 1 used in :meth:`Series.std` is different\n than the default ``ddof`` of 0 in :func:`numpy.std`.\n\n A minimum of one period is required for the rolling calculation.\n\n """\n ).replace("\n", "", 1),\n create_section_header("Examples"),\n dedent(\n """\n >>> s = pd.Series([5, 5, 6, 7, 5, 5, 5])\n\n >>> s.expanding(3).std()\n 0 NaN\n 1 NaN\n 2 0.577350\n 3 0.957427\n 4 0.894427\n 5 0.836660\n 6 0.786796\n dtype: float64\n """\n ).replace("\n", "", 1),\n window_method="expanding",\n aggregation_description="standard deviation",\n agg_method="std",\n )\n def std(\n self,\n ddof: int = 1,\n numeric_only: bool = False,\n engine: Literal["cython", "numba"] | None = None,\n engine_kwargs: dict[str, bool] | None = None,\n ):\n return super().std(\n ddof=ddof,\n numeric_only=numeric_only,\n engine=engine,\n engine_kwargs=engine_kwargs,\n )\n\n @doc(\n template_header,\n create_section_header("Parameters"),\n dedent(\n """\n ddof : int, default 1\n Delta Degrees of Freedom. The divisor used in calculations\n is ``N - ddof``, where ``N`` represents the number of elements.\n\n """\n ).replace("\n", "", 1),\n kwargs_numeric_only,\n window_agg_numba_parameters("1.4"),\n create_section_header("Returns"),\n template_returns,\n create_section_header("See Also"),\n "numpy.var : Equivalent method for NumPy array.\n",\n template_see_also,\n create_section_header("Notes"),\n dedent(\n """\n The default ``ddof`` of 1 used in :meth:`Series.var` is different\n than the default ``ddof`` of 0 in :func:`numpy.var`.\n\n A minimum of one period is required for the rolling calculation.\n\n """\n ).replace("\n", "", 1),\n create_section_header("Examples"),\n dedent(\n """\n >>> s = pd.Series([5, 5, 6, 7, 5, 5, 5])\n\n >>> s.expanding(3).var()\n 0 NaN\n 1 NaN\n 2 0.333333\n 3 0.916667\n 4 0.800000\n 5 0.700000\n 6 0.619048\n dtype: float64\n """\n ).replace("\n", "", 1),\n window_method="expanding",\n aggregation_description="variance",\n agg_method="var",\n )\n def var(\n self,\n ddof: int = 1,\n numeric_only: bool = False,\n engine: Literal["cython", "numba"] | None = None,\n engine_kwargs: dict[str, bool] | None = None,\n ):\n return super().var(\n ddof=ddof,\n numeric_only=numeric_only,\n engine=engine,\n engine_kwargs=engine_kwargs,\n )\n\n @doc(\n template_header,\n create_section_header("Parameters"),\n dedent(\n """\n ddof : int, default 1\n Delta Degrees of Freedom. The divisor used in calculations\n is ``N - ddof``, where ``N`` represents the number of elements.\n\n """\n ).replace("\n", "", 1),\n kwargs_numeric_only,\n create_section_header("Returns"),\n template_returns,\n create_section_header("See Also"),\n template_see_also,\n create_section_header("Notes"),\n "A minimum of one period is required for the calculation.\n\n",\n create_section_header("Examples"),\n dedent(\n """\n >>> s = pd.Series([0, 1, 2, 3])\n\n >>> s.expanding().sem()\n 0 NaN\n 1 0.707107\n 2 0.707107\n 3 0.745356\n dtype: float64\n """\n ).replace("\n", "", 1),\n window_method="expanding",\n aggregation_description="standard error of mean",\n agg_method="sem",\n )\n def sem(self, ddof: int = 1, numeric_only: bool = False):\n return super().sem(ddof=ddof, numeric_only=numeric_only)\n\n @doc(\n template_header,\n create_section_header("Parameters"),\n kwargs_numeric_only,\n create_section_header("Returns"),\n template_returns,\n create_section_header("See Also"),\n "scipy.stats.skew : Third moment of a probability density.\n",\n template_see_also,\n create_section_header("Notes"),\n "A minimum of three periods is required for the rolling calculation.\n\n",\n create_section_header("Examples"),\n dedent(\n """\\n >>> ser = pd.Series([-1, 0, 2, -1, 2], index=['a', 'b', 'c', 'd', 'e'])\n >>> ser.expanding().skew()\n a NaN\n b NaN\n c 0.935220\n d 1.414214\n e 0.315356\n dtype: float64\n """\n ),\n window_method="expanding",\n aggregation_description="unbiased skewness",\n agg_method="skew",\n )\n def skew(self, numeric_only: bool = False):\n return super().skew(numeric_only=numeric_only)\n\n @doc(\n template_header,\n create_section_header("Parameters"),\n kwargs_numeric_only,\n create_section_header("Returns"),\n template_returns,\n create_section_header("See Also"),\n "scipy.stats.kurtosis : Reference SciPy method.\n",\n template_see_also,\n create_section_header("Notes"),\n "A minimum of four periods is required for the calculation.\n\n",\n create_section_header("Examples"),\n dedent(\n """\n The example below will show a rolling calculation with a window size of\n four matching the equivalent function call using `scipy.stats`.\n\n >>> arr = [1, 2, 3, 4, 999]\n >>> import scipy.stats\n >>> print(f"{{scipy.stats.kurtosis(arr[:-1], bias=False):.6f}}")\n -1.200000\n >>> print(f"{{scipy.stats.kurtosis(arr, bias=False):.6f}}")\n 4.999874\n >>> s = pd.Series(arr)\n >>> s.expanding(4).kurt()\n 0 NaN\n 1 NaN\n 2 NaN\n 3 -1.200000\n 4 4.999874\n dtype: float64\n """\n ).replace("\n", "", 1),\n window_method="expanding",\n aggregation_description="Fisher's definition of kurtosis without bias",\n agg_method="kurt",\n )\n def kurt(self, numeric_only: bool = False):\n return super().kurt(numeric_only=numeric_only)\n\n @doc(\n template_header,\n create_section_header("Parameters"),\n dedent(\n """\n quantile : float\n Quantile to compute. 0 <= quantile <= 1.\n\n .. deprecated:: 2.1.0\n This will be renamed to 'q' in a future version.\n interpolation : {{'linear', 'lower', 'higher', 'midpoint', 'nearest'}}\n This optional parameter specifies the interpolation method to use,\n when the desired quantile lies between two data points `i` and `j`:\n\n * linear: `i + (j - i) * fraction`, where `fraction` is the\n fractional part of the index surrounded by `i` and `j`.\n * lower: `i`.\n * higher: `j`.\n * nearest: `i` or `j` whichever is nearest.\n * midpoint: (`i` + `j`) / 2.\n """\n ).replace("\n", "", 1),\n kwargs_numeric_only,\n create_section_header("Returns"),\n template_returns,\n create_section_header("See Also"),\n template_see_also,\n create_section_header("Examples"),\n dedent(\n """\\n >>> ser = pd.Series([1, 2, 3, 4, 5, 6], index=['a', 'b', 'c', 'd', 'e', 'f'])\n >>> ser.expanding(min_periods=4).quantile(.25)\n a NaN\n b NaN\n c NaN\n d 1.75\n e 2.00\n f 2.25\n dtype: float64\n """\n ),\n window_method="expanding",\n aggregation_description="quantile",\n agg_method="quantile",\n )\n @deprecate_kwarg(old_arg_name="quantile", new_arg_name="q")\n def quantile(\n self,\n q: float,\n interpolation: QuantileInterpolation = "linear",\n numeric_only: bool = False,\n ):\n return super().quantile(\n q=q,\n interpolation=interpolation,\n numeric_only=numeric_only,\n )\n\n @doc(\n template_header,\n ".. versionadded:: 1.4.0 \n\n",\n create_section_header("Parameters"),\n dedent(\n """\n method : {{'average', 'min', 'max'}}, default 'average'\n How to rank the group of records that have the same value (i.e. ties):\n\n * average: average rank of the group\n * min: lowest rank in the group\n * max: highest rank in the group\n\n ascending : bool, default True\n Whether or not the elements should be ranked in ascending order.\n pct : bool, default False\n Whether or not to display the returned rankings in percentile\n form.\n """\n ).replace("\n", "", 1),\n kwargs_numeric_only,\n create_section_header("Returns"),\n template_returns,\n create_section_header("See Also"),\n template_see_also,\n create_section_header("Examples"),\n dedent(\n """\n >>> s = pd.Series([1, 4, 2, 3, 5, 3])\n >>> s.expanding().rank()\n 0 1.0\n 1 2.0\n 2 2.0\n 3 3.0\n 4 5.0\n 5 3.5\n dtype: float64\n\n >>> s.expanding().rank(method="max")\n 0 1.0\n 1 2.0\n 2 2.0\n 3 3.0\n 4 5.0\n 5 4.0\n dtype: float64\n\n >>> s.expanding().rank(method="min")\n 0 1.0\n 1 2.0\n 2 2.0\n 3 3.0\n 4 5.0\n 5 3.0\n dtype: float64\n """\n ).replace("\n", "", 1),\n window_method="expanding",\n aggregation_description="rank",\n agg_method="rank",\n )\n def rank(\n self,\n method: WindowingRankType = "average",\n ascending: bool = True,\n pct: bool = False,\n numeric_only: bool = False,\n ):\n return super().rank(\n method=method,\n ascending=ascending,\n pct=pct,\n numeric_only=numeric_only,\n )\n\n @doc(\n template_header,\n create_section_header("Parameters"),\n dedent(\n """\n other : Series or DataFrame, optional\n If not supplied then will default to self and produce pairwise\n output.\n pairwise : bool, default None\n If False then only matching columns between self and other will be\n used and the output will be a DataFrame.\n If True then all pairwise combinations will be calculated and the\n output will be a MultiIndexed DataFrame in the case of DataFrame\n inputs. In the case of missing elements, only complete pairwise\n observations will be used.\n ddof : int, default 1\n Delta Degrees of Freedom. The divisor used in calculations\n is ``N - ddof``, where ``N`` represents the number of elements.\n """\n ).replace("\n", "", 1),\n kwargs_numeric_only,\n create_section_header("Returns"),\n template_returns,\n create_section_header("See Also"),\n template_see_also,\n create_section_header("Examples"),\n dedent(\n """\\n >>> ser1 = pd.Series([1, 2, 3, 4], index=['a', 'b', 'c', 'd'])\n >>> ser2 = pd.Series([10, 11, 13, 16], index=['a', 'b', 'c', 'd'])\n >>> ser1.expanding().cov(ser2)\n a NaN\n b 0.500000\n c 1.500000\n d 3.333333\n dtype: float64\n """\n ),\n window_method="expanding",\n aggregation_description="sample covariance",\n agg_method="cov",\n )\n def cov(\n self,\n other: DataFrame | Series | None = None,\n pairwise: bool | None = None,\n ddof: int = 1,\n numeric_only: bool = False,\n ):\n return super().cov(\n other=other,\n pairwise=pairwise,\n ddof=ddof,\n numeric_only=numeric_only,\n )\n\n @doc(\n template_header,\n create_section_header("Parameters"),\n dedent(\n """\n other : Series or DataFrame, optional\n If not supplied then will default to self and produce pairwise\n output.\n pairwise : bool, default None\n If False then only matching columns between self and other will be\n used and the output will be a DataFrame.\n If True then all pairwise combinations will be calculated and the\n output will be a MultiIndexed DataFrame in the case of DataFrame\n inputs. In the case of missing elements, only complete pairwise\n observations will be used.\n """\n ).replace("\n", "", 1),\n kwargs_numeric_only,\n create_section_header("Returns"),\n template_returns,\n create_section_header("See Also"),\n dedent(\n """\n cov : Similar method to calculate covariance.\n numpy.corrcoef : NumPy Pearson's correlation calculation.\n """\n ).replace("\n", "", 1),\n template_see_also,\n create_section_header("Notes"),\n dedent(\n """\n This function uses Pearson's definition of correlation\n (https://en.wikipedia.org/wiki/Pearson_correlation_coefficient).\n\n When `other` is not specified, the output will be self correlation (e.g.\n all 1's), except for :class:`~pandas.DataFrame` inputs with `pairwise`\n set to `True`.\n\n Function will return ``NaN`` for correlations of equal valued sequences;\n this is the result of a 0/0 division error.\n\n When `pairwise` is set to `False`, only matching columns between `self` and\n `other` will be used.\n\n When `pairwise` is set to `True`, the output will be a MultiIndex DataFrame\n with the original index on the first level, and the `other` DataFrame\n columns on the second level.\n\n In the case of missing elements, only complete pairwise observations\n will be used.\n\n """\n ),\n create_section_header("Examples"),\n dedent(\n """\\n >>> ser1 = pd.Series([1, 2, 3, 4], index=['a', 'b', 'c', 'd'])\n >>> ser2 = pd.Series([10, 11, 13, 16], index=['a', 'b', 'c', 'd'])\n >>> ser1.expanding().corr(ser2)\n a NaN\n b 1.000000\n c 0.981981\n d 0.975900\n dtype: float64\n """\n ),\n window_method="expanding",\n aggregation_description="correlation",\n agg_method="corr",\n )\n def corr(\n self,\n other: DataFrame | Series | None = None,\n pairwise: bool | None = None,\n ddof: int = 1,\n numeric_only: bool = False,\n ):\n return super().corr(\n other=other,\n pairwise=pairwise,\n ddof=ddof,\n numeric_only=numeric_only,\n )\n\n\nclass ExpandingGroupby(BaseWindowGroupby, Expanding):\n """\n Provide a expanding groupby implementation.\n """\n\n _attributes = Expanding._attributes + BaseWindowGroupby._attributes\n\n def _get_window_indexer(self) -> GroupbyIndexer:\n """\n Return an indexer class that will compute the window start and end bounds\n\n Returns\n -------\n GroupbyIndexer\n """\n window_indexer = GroupbyIndexer(\n groupby_indices=self._grouper.indices,\n window_indexer=ExpandingIndexer,\n )\n return window_indexer\n
.venv\Lib\site-packages\pandas\core\window\expanding.py
expanding.py
Python
27,845
0.95
0.040456
0.010011
react-lib
487
2025-01-14T21:03:19.894450
GPL-3.0
false
eb3910b6599a18aebac6accc989bdbfc
from __future__ import annotations\n\nimport functools\nfrom typing import (\n TYPE_CHECKING,\n Any,\n Callable,\n)\n\nimport numpy as np\n\nfrom pandas.compat._optional import import_optional_dependency\n\nfrom pandas.core.util.numba_ import jit_user_function\n\nif TYPE_CHECKING:\n from pandas._typing import Scalar\n\n\n@functools.cache\ndef generate_numba_apply_func(\n func: Callable[..., Scalar],\n nopython: bool,\n nogil: bool,\n parallel: bool,\n):\n """\n Generate a numba jitted apply function specified by values from engine_kwargs.\n\n 1. jit the user's function\n 2. Return a rolling apply function with the jitted function inline\n\n Configurations specified in engine_kwargs apply to both the user's\n function _AND_ the rolling apply function.\n\n Parameters\n ----------\n func : function\n function to be applied to each window and will be JITed\n nopython : bool\n nopython to be passed into numba.jit\n nogil : bool\n nogil to be passed into numba.jit\n parallel : bool\n parallel to be passed into numba.jit\n\n Returns\n -------\n Numba function\n """\n numba_func = jit_user_function(func)\n if TYPE_CHECKING:\n import numba\n else:\n numba = import_optional_dependency("numba")\n\n @numba.jit(nopython=nopython, nogil=nogil, parallel=parallel)\n def roll_apply(\n values: np.ndarray,\n begin: np.ndarray,\n end: np.ndarray,\n minimum_periods: int,\n *args: Any,\n ) -> np.ndarray:\n result = np.empty(len(begin))\n for i in numba.prange(len(result)):\n start = begin[i]\n stop = end[i]\n window = values[start:stop]\n count_nan = np.sum(np.isnan(window))\n if len(window) - count_nan >= minimum_periods:\n result[i] = numba_func(window, *args)\n else:\n result[i] = np.nan\n return result\n\n return roll_apply\n\n\n@functools.cache\ndef generate_numba_ewm_func(\n nopython: bool,\n nogil: bool,\n parallel: bool,\n com: float,\n adjust: bool,\n ignore_na: bool,\n deltas: tuple,\n normalize: bool,\n):\n """\n Generate a numba jitted ewm mean or sum function specified by values\n from engine_kwargs.\n\n Parameters\n ----------\n nopython : bool\n nopython to be passed into numba.jit\n nogil : bool\n nogil to be passed into numba.jit\n parallel : bool\n parallel to be passed into numba.jit\n com : float\n adjust : bool\n ignore_na : bool\n deltas : tuple\n normalize : bool\n\n Returns\n -------\n Numba function\n """\n if TYPE_CHECKING:\n import numba\n else:\n numba = import_optional_dependency("numba")\n\n @numba.jit(nopython=nopython, nogil=nogil, parallel=parallel)\n def ewm(\n values: np.ndarray,\n begin: np.ndarray,\n end: np.ndarray,\n minimum_periods: int,\n ) -> np.ndarray:\n result = np.empty(len(values))\n alpha = 1.0 / (1.0 + com)\n old_wt_factor = 1.0 - alpha\n new_wt = 1.0 if adjust else alpha\n\n for i in numba.prange(len(begin)):\n start = begin[i]\n stop = end[i]\n window = values[start:stop]\n sub_result = np.empty(len(window))\n\n weighted = window[0]\n nobs = int(not np.isnan(weighted))\n sub_result[0] = weighted if nobs >= minimum_periods else np.nan\n old_wt = 1.0\n\n for j in range(1, len(window)):\n cur = window[j]\n is_observation = not np.isnan(cur)\n nobs += is_observation\n if not np.isnan(weighted):\n if is_observation or not ignore_na:\n if normalize:\n # note that len(deltas) = len(vals) - 1 and deltas[i]\n # is to be used in conjunction with vals[i+1]\n old_wt *= old_wt_factor ** deltas[start + j - 1]\n else:\n weighted = old_wt_factor * weighted\n if is_observation:\n if normalize:\n # avoid numerical errors on constant series\n if weighted != cur:\n weighted = old_wt * weighted + new_wt * cur\n if normalize:\n weighted = weighted / (old_wt + new_wt)\n if adjust:\n old_wt += new_wt\n else:\n old_wt = 1.0\n else:\n weighted += cur\n elif is_observation:\n weighted = cur\n\n sub_result[j] = weighted if nobs >= minimum_periods else np.nan\n\n result[start:stop] = sub_result\n\n return result\n\n return ewm\n\n\n@functools.cache\ndef generate_numba_table_func(\n func: Callable[..., np.ndarray],\n nopython: bool,\n nogil: bool,\n parallel: bool,\n):\n """\n Generate a numba jitted function to apply window calculations table-wise.\n\n Func will be passed a M window size x N number of columns array, and\n must return a 1 x N number of columns array. Func is intended to operate\n row-wise, but the result will be transposed for axis=1.\n\n 1. jit the user's function\n 2. Return a rolling apply function with the jitted function inline\n\n Parameters\n ----------\n func : function\n function to be applied to each window and will be JITed\n nopython : bool\n nopython to be passed into numba.jit\n nogil : bool\n nogil to be passed into numba.jit\n parallel : bool\n parallel to be passed into numba.jit\n\n Returns\n -------\n Numba function\n """\n numba_func = jit_user_function(func)\n if TYPE_CHECKING:\n import numba\n else:\n numba = import_optional_dependency("numba")\n\n @numba.jit(nopython=nopython, nogil=nogil, parallel=parallel)\n def roll_table(\n values: np.ndarray,\n begin: np.ndarray,\n end: np.ndarray,\n minimum_periods: int,\n *args: Any,\n ):\n result = np.empty((len(begin), values.shape[1]))\n min_periods_mask = np.empty(result.shape)\n for i in numba.prange(len(result)):\n start = begin[i]\n stop = end[i]\n window = values[start:stop]\n count_nan = np.sum(np.isnan(window), axis=0)\n sub_result = numba_func(window, *args)\n nan_mask = len(window) - count_nan >= minimum_periods\n min_periods_mask[i, :] = nan_mask\n result[i, :] = sub_result\n result = np.where(min_periods_mask, result, np.nan)\n return result\n\n return roll_table\n\n\n# This function will no longer be needed once numba supports\n# axis for all np.nan* agg functions\n# https://github.com/numba/numba/issues/1269\n@functools.cache\ndef generate_manual_numpy_nan_agg_with_axis(nan_func):\n if TYPE_CHECKING:\n import numba\n else:\n numba = import_optional_dependency("numba")\n\n @numba.jit(nopython=True, nogil=True, parallel=True)\n def nan_agg_with_axis(table):\n result = np.empty(table.shape[1])\n for i in numba.prange(table.shape[1]):\n partition = table[:, i]\n result[i] = nan_func(partition)\n return result\n\n return nan_agg_with_axis\n\n\n@functools.cache\ndef generate_numba_ewm_table_func(\n nopython: bool,\n nogil: bool,\n parallel: bool,\n com: float,\n adjust: bool,\n ignore_na: bool,\n deltas: tuple,\n normalize: bool,\n):\n """\n Generate a numba jitted ewm mean or sum function applied table wise specified\n by values from engine_kwargs.\n\n Parameters\n ----------\n nopython : bool\n nopython to be passed into numba.jit\n nogil : bool\n nogil to be passed into numba.jit\n parallel : bool\n parallel to be passed into numba.jit\n com : float\n adjust : bool\n ignore_na : bool\n deltas : tuple\n normalize: bool\n\n Returns\n -------\n Numba function\n """\n if TYPE_CHECKING:\n import numba\n else:\n numba = import_optional_dependency("numba")\n\n @numba.jit(nopython=nopython, nogil=nogil, parallel=parallel)\n def ewm_table(\n values: np.ndarray,\n begin: np.ndarray,\n end: np.ndarray,\n minimum_periods: int,\n ) -> np.ndarray:\n alpha = 1.0 / (1.0 + com)\n old_wt_factor = 1.0 - alpha\n new_wt = 1.0 if adjust else alpha\n old_wt = np.ones(values.shape[1])\n\n result = np.empty(values.shape)\n weighted = values[0].copy()\n nobs = (~np.isnan(weighted)).astype(np.int64)\n result[0] = np.where(nobs >= minimum_periods, weighted, np.nan)\n for i in range(1, len(values)):\n cur = values[i]\n is_observations = ~np.isnan(cur)\n nobs += is_observations.astype(np.int64)\n for j in numba.prange(len(cur)):\n if not np.isnan(weighted[j]):\n if is_observations[j] or not ignore_na:\n if normalize:\n # note that len(deltas) = len(vals) - 1 and deltas[i]\n # is to be used in conjunction with vals[i+1]\n old_wt[j] *= old_wt_factor ** deltas[i - 1]\n else:\n weighted[j] = old_wt_factor * weighted[j]\n if is_observations[j]:\n if normalize:\n # avoid numerical errors on constant series\n if weighted[j] != cur[j]:\n weighted[j] = (\n old_wt[j] * weighted[j] + new_wt * cur[j]\n )\n if normalize:\n weighted[j] = weighted[j] / (old_wt[j] + new_wt)\n if adjust:\n old_wt[j] += new_wt\n else:\n old_wt[j] = 1.0\n else:\n weighted[j] += cur[j]\n elif is_observations[j]:\n weighted[j] = cur[j]\n\n result[i] = np.where(nobs >= minimum_periods, weighted, np.nan)\n\n return result\n\n return ewm_table\n
.venv\Lib\site-packages\pandas\core\window\numba_.py
numba_.py
Python
10,606
0.95
0.190883
0.036066
awesome-app
849
2024-05-30T01:10:19.629187
MIT
false
48e4f474046b659193676fa583cf2764
from __future__ import annotations\n\nfrom typing import TYPE_CHECKING\n\nimport numpy as np\n\nfrom pandas.compat._optional import import_optional_dependency\n\n\ndef generate_online_numba_ewma_func(\n nopython: bool,\n nogil: bool,\n parallel: bool,\n):\n """\n Generate a numba jitted groupby ewma function specified by values\n from engine_kwargs.\n\n Parameters\n ----------\n nopython : bool\n nopython to be passed into numba.jit\n nogil : bool\n nogil to be passed into numba.jit\n parallel : bool\n parallel to be passed into numba.jit\n\n Returns\n -------\n Numba function\n """\n if TYPE_CHECKING:\n import numba\n else:\n numba = import_optional_dependency("numba")\n\n @numba.jit(nopython=nopython, nogil=nogil, parallel=parallel)\n def online_ewma(\n values: np.ndarray,\n deltas: np.ndarray,\n minimum_periods: int,\n old_wt_factor: float,\n new_wt: float,\n old_wt: np.ndarray,\n adjust: bool,\n ignore_na: bool,\n ):\n """\n Compute online exponentially weighted mean per column over 2D values.\n\n Takes the first observation as is, then computes the subsequent\n exponentially weighted mean accounting minimum periods.\n """\n result = np.empty(values.shape)\n weighted_avg = values[0].copy()\n nobs = (~np.isnan(weighted_avg)).astype(np.int64)\n result[0] = np.where(nobs >= minimum_periods, weighted_avg, np.nan)\n\n for i in range(1, len(values)):\n cur = values[i]\n is_observations = ~np.isnan(cur)\n nobs += is_observations.astype(np.int64)\n for j in numba.prange(len(cur)):\n if not np.isnan(weighted_avg[j]):\n if is_observations[j] or not ignore_na:\n # note that len(deltas) = len(vals) - 1 and deltas[i] is to be\n # used in conjunction with vals[i+1]\n old_wt[j] *= old_wt_factor ** deltas[j - 1]\n if is_observations[j]:\n # avoid numerical errors on constant series\n if weighted_avg[j] != cur[j]:\n weighted_avg[j] = (\n (old_wt[j] * weighted_avg[j]) + (new_wt * cur[j])\n ) / (old_wt[j] + new_wt)\n if adjust:\n old_wt[j] += new_wt\n else:\n old_wt[j] = 1.0\n elif is_observations[j]:\n weighted_avg[j] = cur[j]\n\n result[i] = np.where(nobs >= minimum_periods, weighted_avg, np.nan)\n\n return result, old_wt\n\n return online_ewma\n\n\nclass EWMMeanState:\n def __init__(self, com, adjust, ignore_na, axis, shape) -> None:\n alpha = 1.0 / (1.0 + com)\n self.axis = axis\n self.shape = shape\n self.adjust = adjust\n self.ignore_na = ignore_na\n self.new_wt = 1.0 if adjust else alpha\n self.old_wt_factor = 1.0 - alpha\n self.old_wt = np.ones(self.shape[self.axis - 1])\n self.last_ewm = None\n\n def run_ewm(self, weighted_avg, deltas, min_periods, ewm_func):\n result, old_wt = ewm_func(\n weighted_avg,\n deltas,\n min_periods,\n self.old_wt_factor,\n self.new_wt,\n self.old_wt,\n self.adjust,\n self.ignore_na,\n )\n self.old_wt = old_wt\n self.last_ewm = result[-1]\n return result\n\n def reset(self) -> None:\n self.old_wt = np.ones(self.shape[self.axis - 1])\n self.last_ewm = None\n
.venv\Lib\site-packages\pandas\core\window\online.py
online.py
Python
3,735
0.95
0.144068
0.029703
react-lib
462
2024-10-30T05:09:29.439538
MIT
false
e7aa76f8513f8eee14cb92deacf27191
"""\nProvide a generic structure to support window functions,\nsimilar to how we have a Groupby object.\n"""\nfrom __future__ import annotations\n\nimport copy\nfrom datetime import timedelta\nfrom functools import partial\nimport inspect\nfrom textwrap import dedent\nfrom typing import (\n TYPE_CHECKING,\n Any,\n Callable,\n Literal,\n)\n\nimport numpy as np\n\nfrom pandas._libs.tslibs import (\n BaseOffset,\n Timedelta,\n to_offset,\n)\nimport pandas._libs.window.aggregations as window_aggregations\nfrom pandas.compat._optional import import_optional_dependency\nfrom pandas.errors import DataError\nfrom pandas.util._decorators import (\n deprecate_kwarg,\n doc,\n)\n\nfrom pandas.core.dtypes.common import (\n ensure_float64,\n is_bool,\n is_integer,\n is_numeric_dtype,\n needs_i8_conversion,\n)\nfrom pandas.core.dtypes.dtypes import ArrowDtype\nfrom pandas.core.dtypes.generic import (\n ABCDataFrame,\n ABCSeries,\n)\nfrom pandas.core.dtypes.missing import notna\n\nfrom pandas.core._numba import executor\nfrom pandas.core.algorithms import factorize\nfrom pandas.core.apply import ResamplerWindowApply\nfrom pandas.core.arrays import ExtensionArray\nfrom pandas.core.base import SelectionMixin\nimport pandas.core.common as com\nfrom pandas.core.indexers.objects import (\n BaseIndexer,\n FixedWindowIndexer,\n GroupbyIndexer,\n VariableWindowIndexer,\n)\nfrom pandas.core.indexes.api import (\n DatetimeIndex,\n Index,\n MultiIndex,\n PeriodIndex,\n TimedeltaIndex,\n)\nfrom pandas.core.reshape.concat import concat\nfrom pandas.core.util.numba_ import (\n get_jit_arguments,\n maybe_use_numba,\n)\nfrom pandas.core.window.common import (\n flex_binary_moment,\n zsqrt,\n)\nfrom pandas.core.window.doc import (\n _shared_docs,\n create_section_header,\n kwargs_numeric_only,\n kwargs_scipy,\n numba_notes,\n template_header,\n template_returns,\n template_see_also,\n window_agg_numba_parameters,\n window_apply_parameters,\n)\nfrom pandas.core.window.numba_ import (\n generate_manual_numpy_nan_agg_with_axis,\n generate_numba_apply_func,\n generate_numba_table_func,\n)\n\nif TYPE_CHECKING:\n from collections.abc import (\n Hashable,\n Iterator,\n Sized,\n )\n\n from pandas._typing import (\n ArrayLike,\n Axis,\n NDFrameT,\n QuantileInterpolation,\n WindowingRankType,\n npt,\n )\n\n from pandas import (\n DataFrame,\n Series,\n )\n from pandas.core.generic import NDFrame\n from pandas.core.groupby.ops import BaseGrouper\n\nfrom pandas.core.arrays.datetimelike import dtype_to_unit\n\n\nclass BaseWindow(SelectionMixin):\n """Provides utilities for performing windowing operations."""\n\n _attributes: list[str] = []\n exclusions: frozenset[Hashable] = frozenset()\n _on: Index\n\n def __init__(\n self,\n obj: NDFrame,\n window=None,\n min_periods: int | None = None,\n center: bool | None = False,\n win_type: str | None = None,\n axis: Axis = 0,\n on: str | Index | None = None,\n closed: str | None = None,\n step: int | None = None,\n method: str = "single",\n *,\n selection=None,\n ) -> None:\n self.obj = obj\n self.on = on\n self.closed = closed\n self.step = step\n self.window = window\n self.min_periods = min_periods\n self.center = center\n self.win_type = win_type\n self.axis = obj._get_axis_number(axis) if axis is not None else None\n self.method = method\n self._win_freq_i8: int | None = None\n if self.on is None:\n if self.axis == 0:\n self._on = self.obj.index\n else:\n # i.e. self.axis == 1\n self._on = self.obj.columns\n elif isinstance(self.on, Index):\n self._on = self.on\n elif isinstance(self.obj, ABCDataFrame) and self.on in self.obj.columns:\n self._on = Index(self.obj[self.on])\n else:\n raise ValueError(\n f"invalid on specified as {self.on}, "\n "must be a column (of DataFrame), an Index or None"\n )\n\n self._selection = selection\n self._validate()\n\n def _validate(self) -> None:\n if self.center is not None and not is_bool(self.center):\n raise ValueError("center must be a boolean")\n if self.min_periods is not None:\n if not is_integer(self.min_periods):\n raise ValueError("min_periods must be an integer")\n if self.min_periods < 0:\n raise ValueError("min_periods must be >= 0")\n if is_integer(self.window) and self.min_periods > self.window:\n raise ValueError(\n f"min_periods {self.min_periods} must be <= window {self.window}"\n )\n if self.closed is not None and self.closed not in [\n "right",\n "both",\n "left",\n "neither",\n ]:\n raise ValueError("closed must be 'right', 'left', 'both' or 'neither'")\n if not isinstance(self.obj, (ABCSeries, ABCDataFrame)):\n raise TypeError(f"invalid type: {type(self)}")\n if isinstance(self.window, BaseIndexer):\n # Validate that the passed BaseIndexer subclass has\n # a get_window_bounds with the correct signature.\n get_window_bounds_signature = inspect.signature(\n self.window.get_window_bounds\n ).parameters.keys()\n expected_signature = inspect.signature(\n BaseIndexer().get_window_bounds\n ).parameters.keys()\n if get_window_bounds_signature != expected_signature:\n raise ValueError(\n f"{type(self.window).__name__} does not implement "\n f"the correct signature for get_window_bounds"\n )\n if self.method not in ["table", "single"]:\n raise ValueError("method must be 'table' or 'single")\n if self.step is not None:\n if not is_integer(self.step):\n raise ValueError("step must be an integer")\n if self.step < 0:\n raise ValueError("step must be >= 0")\n\n def _check_window_bounds(\n self, start: np.ndarray, end: np.ndarray, num_vals: int\n ) -> None:\n if len(start) != len(end):\n raise ValueError(\n f"start ({len(start)}) and end ({len(end)}) bounds must be the "\n f"same length"\n )\n if len(start) != (num_vals + (self.step or 1) - 1) // (self.step or 1):\n raise ValueError(\n f"start and end bounds ({len(start)}) must be the same length "\n f"as the object ({num_vals}) divided by the step ({self.step}) "\n f"if given and rounded up"\n )\n\n def _slice_axis_for_step(self, index: Index, result: Sized | None = None) -> Index:\n """\n Slices the index for a given result and the preset step.\n """\n return (\n index\n if result is None or len(result) == len(index)\n else index[:: self.step]\n )\n\n def _validate_numeric_only(self, name: str, numeric_only: bool) -> None:\n """\n Validate numeric_only argument, raising if invalid for the input.\n\n Parameters\n ----------\n name : str\n Name of the operator (kernel).\n numeric_only : bool\n Value passed by user.\n """\n if (\n self._selected_obj.ndim == 1\n and numeric_only\n and not is_numeric_dtype(self._selected_obj.dtype)\n ):\n raise NotImplementedError(\n f"{type(self).__name__}.{name} does not implement numeric_only"\n )\n\n def _make_numeric_only(self, obj: NDFrameT) -> NDFrameT:\n """Subset DataFrame to numeric columns.\n\n Parameters\n ----------\n obj : DataFrame\n\n Returns\n -------\n obj subset to numeric-only columns.\n """\n result = obj.select_dtypes(include=["number"], exclude=["timedelta"])\n return result\n\n def _create_data(self, obj: NDFrameT, numeric_only: bool = False) -> NDFrameT:\n """\n Split data into blocks & return conformed data.\n """\n # filter out the on from the object\n if self.on is not None and not isinstance(self.on, Index) and obj.ndim == 2:\n obj = obj.reindex(columns=obj.columns.difference([self.on]), copy=False)\n if obj.ndim > 1 and (numeric_only or self.axis == 1):\n # GH: 20649 in case of mixed dtype and axis=1 we have to convert everything\n # to float to calculate the complete row at once. We exclude all non-numeric\n # dtypes.\n obj = self._make_numeric_only(obj)\n if self.axis == 1:\n obj = obj.astype("float64", copy=False)\n obj._mgr = obj._mgr.consolidate()\n return obj\n\n def _gotitem(self, key, ndim, subset=None):\n """\n Sub-classes to define. Return a sliced object.\n\n Parameters\n ----------\n key : str / list of selections\n ndim : {1, 2}\n requested ndim of result\n subset : object, default None\n subset to act on\n """\n # create a new object to prevent aliasing\n if subset is None:\n subset = self.obj\n\n # we need to make a shallow copy of ourselves\n # with the same groupby\n kwargs = {attr: getattr(self, attr) for attr in self._attributes}\n\n selection = self._infer_selection(key, subset)\n new_win = type(self)(subset, selection=selection, **kwargs)\n return new_win\n\n def __getattr__(self, attr: str):\n if attr in self._internal_names_set:\n return object.__getattribute__(self, attr)\n if attr in self.obj:\n return self[attr]\n\n raise AttributeError(\n f"'{type(self).__name__}' object has no attribute '{attr}'"\n )\n\n def _dir_additions(self):\n return self.obj._dir_additions()\n\n def __repr__(self) -> str:\n """\n Provide a nice str repr of our rolling object.\n """\n attrs_list = (\n f"{attr_name}={getattr(self, attr_name)}"\n for attr_name in self._attributes\n if getattr(self, attr_name, None) is not None and attr_name[0] != "_"\n )\n attrs = ",".join(attrs_list)\n return f"{type(self).__name__} [{attrs}]"\n\n def __iter__(self) -> Iterator:\n obj = self._selected_obj.set_axis(self._on)\n obj = self._create_data(obj)\n indexer = self._get_window_indexer()\n\n start, end = indexer.get_window_bounds(\n num_values=len(obj),\n min_periods=self.min_periods,\n center=self.center,\n closed=self.closed,\n step=self.step,\n )\n self._check_window_bounds(start, end, len(obj))\n\n for s, e in zip(start, end):\n result = obj.iloc[slice(s, e)]\n yield result\n\n def _prep_values(self, values: ArrayLike) -> np.ndarray:\n """Convert input to numpy arrays for Cython routines"""\n if needs_i8_conversion(values.dtype):\n raise NotImplementedError(\n f"ops for {type(self).__name__} for this "\n f"dtype {values.dtype} are not implemented"\n )\n # GH #12373 : rolling functions error on float32 data\n # make sure the data is coerced to float64\n try:\n if isinstance(values, ExtensionArray):\n values = values.to_numpy(np.float64, na_value=np.nan)\n else:\n values = ensure_float64(values)\n except (ValueError, TypeError) as err:\n raise TypeError(f"cannot handle this type -> {values.dtype}") from err\n\n # Convert inf to nan for C funcs\n inf = np.isinf(values)\n if inf.any():\n values = np.where(inf, np.nan, values)\n\n return values\n\n def _insert_on_column(self, result: DataFrame, obj: DataFrame) -> None:\n # if we have an 'on' column we want to put it back into\n # the results in the same location\n from pandas import Series\n\n if self.on is not None and not self._on.equals(obj.index):\n name = self._on.name\n extra_col = Series(self._on, index=self.obj.index, name=name, copy=False)\n if name in result.columns:\n # TODO: sure we want to overwrite results?\n result[name] = extra_col\n elif name in result.index.names:\n pass\n elif name in self._selected_obj.columns:\n # insert in the same location as we had in _selected_obj\n old_cols = self._selected_obj.columns\n new_cols = result.columns\n old_loc = old_cols.get_loc(name)\n overlap = new_cols.intersection(old_cols[:old_loc])\n new_loc = len(overlap)\n result.insert(new_loc, name, extra_col)\n else:\n # insert at the end\n result[name] = extra_col\n\n @property\n def _index_array(self) -> npt.NDArray[np.int64] | None:\n # TODO: why do we get here with e.g. MultiIndex?\n if isinstance(self._on, (PeriodIndex, DatetimeIndex, TimedeltaIndex)):\n return self._on.asi8\n elif isinstance(self._on.dtype, ArrowDtype) and self._on.dtype.kind in "mM":\n return self._on.to_numpy(dtype=np.int64)\n return None\n\n def _resolve_output(self, out: DataFrame, obj: DataFrame) -> DataFrame:\n """Validate and finalize result."""\n if out.shape[1] == 0 and obj.shape[1] > 0:\n raise DataError("No numeric types to aggregate")\n if out.shape[1] == 0:\n return obj.astype("float64")\n\n self._insert_on_column(out, obj)\n return out\n\n def _get_window_indexer(self) -> BaseIndexer:\n """\n Return an indexer class that will compute the window start and end bounds\n """\n if isinstance(self.window, BaseIndexer):\n return self.window\n if self._win_freq_i8 is not None:\n return VariableWindowIndexer(\n index_array=self._index_array,\n window_size=self._win_freq_i8,\n center=self.center,\n )\n return FixedWindowIndexer(window_size=self.window)\n\n def _apply_series(\n self, homogeneous_func: Callable[..., ArrayLike], name: str | None = None\n ) -> Series:\n """\n Series version of _apply_columnwise\n """\n obj = self._create_data(self._selected_obj)\n\n if name == "count":\n # GH 12541: Special case for count where we support date-like types\n obj = notna(obj).astype(int)\n try:\n values = self._prep_values(obj._values)\n except (TypeError, NotImplementedError) as err:\n raise DataError("No numeric types to aggregate") from err\n\n result = homogeneous_func(values)\n index = self._slice_axis_for_step(obj.index, result)\n return obj._constructor(result, index=index, name=obj.name)\n\n def _apply_columnwise(\n self,\n homogeneous_func: Callable[..., ArrayLike],\n name: str,\n numeric_only: bool = False,\n ) -> DataFrame | Series:\n """\n Apply the given function to the DataFrame broken down into homogeneous\n sub-frames.\n """\n self._validate_numeric_only(name, numeric_only)\n if self._selected_obj.ndim == 1:\n return self._apply_series(homogeneous_func, name)\n\n obj = self._create_data(self._selected_obj, numeric_only)\n if name == "count":\n # GH 12541: Special case for count where we support date-like types\n obj = notna(obj).astype(int)\n obj._mgr = obj._mgr.consolidate()\n\n if self.axis == 1:\n obj = obj.T\n\n taker = []\n res_values = []\n for i, arr in enumerate(obj._iter_column_arrays()):\n # GH#42736 operate column-wise instead of block-wise\n # As of 2.0, hfunc will raise for nuisance columns\n try:\n arr = self._prep_values(arr)\n except (TypeError, NotImplementedError) as err:\n raise DataError(\n f"Cannot aggregate non-numeric type: {arr.dtype}"\n ) from err\n res = homogeneous_func(arr)\n res_values.append(res)\n taker.append(i)\n\n index = self._slice_axis_for_step(\n obj.index, res_values[0] if len(res_values) > 0 else None\n )\n df = type(obj)._from_arrays(\n res_values,\n index=index,\n columns=obj.columns.take(taker),\n verify_integrity=False,\n )\n\n if self.axis == 1:\n df = df.T\n\n return self._resolve_output(df, obj)\n\n def _apply_tablewise(\n self,\n homogeneous_func: Callable[..., ArrayLike],\n name: str | None = None,\n numeric_only: bool = False,\n ) -> DataFrame | Series:\n """\n Apply the given function to the DataFrame across the entire object\n """\n if self._selected_obj.ndim == 1:\n raise ValueError("method='table' not applicable for Series objects.")\n obj = self._create_data(self._selected_obj, numeric_only)\n values = self._prep_values(obj.to_numpy())\n values = values.T if self.axis == 1 else values\n result = homogeneous_func(values)\n result = result.T if self.axis == 1 else result\n index = self._slice_axis_for_step(obj.index, result)\n columns = (\n obj.columns\n if result.shape[1] == len(obj.columns)\n else obj.columns[:: self.step]\n )\n out = obj._constructor(result, index=index, columns=columns)\n\n return self._resolve_output(out, obj)\n\n def _apply_pairwise(\n self,\n target: DataFrame | Series,\n other: DataFrame | Series | None,\n pairwise: bool | None,\n func: Callable[[DataFrame | Series, DataFrame | Series], DataFrame | Series],\n numeric_only: bool,\n ) -> DataFrame | Series:\n """\n Apply the given pairwise function given 2 pandas objects (DataFrame/Series)\n """\n target = self._create_data(target, numeric_only)\n if other is None:\n other = target\n # only default unset\n pairwise = True if pairwise is None else pairwise\n elif not isinstance(other, (ABCDataFrame, ABCSeries)):\n raise ValueError("other must be a DataFrame or Series")\n elif other.ndim == 2 and numeric_only:\n other = self._make_numeric_only(other)\n\n return flex_binary_moment(target, other, func, pairwise=bool(pairwise))\n\n def _apply(\n self,\n func: Callable[..., Any],\n name: str,\n numeric_only: bool = False,\n numba_args: tuple[Any, ...] = (),\n **kwargs,\n ):\n """\n Rolling statistical measure using supplied function.\n\n Designed to be used with passed-in Cython array-based functions.\n\n Parameters\n ----------\n func : callable function to apply\n name : str,\n numba_args : tuple\n args to be passed when func is a numba func\n **kwargs\n additional arguments for rolling function and window function\n\n Returns\n -------\n y : type of input\n """\n window_indexer = self._get_window_indexer()\n min_periods = (\n self.min_periods\n if self.min_periods is not None\n else window_indexer.window_size\n )\n\n def homogeneous_func(values: np.ndarray):\n # calculation function\n\n if values.size == 0:\n return values.copy()\n\n def calc(x):\n start, end = window_indexer.get_window_bounds(\n num_values=len(x),\n min_periods=min_periods,\n center=self.center,\n closed=self.closed,\n step=self.step,\n )\n self._check_window_bounds(start, end, len(x))\n\n return func(x, start, end, min_periods, *numba_args)\n\n with np.errstate(all="ignore"):\n result = calc(values)\n\n return result\n\n if self.method == "single":\n return self._apply_columnwise(homogeneous_func, name, numeric_only)\n else:\n return self._apply_tablewise(homogeneous_func, name, numeric_only)\n\n def _numba_apply(\n self,\n func: Callable[..., Any],\n engine_kwargs: dict[str, bool] | None = None,\n **func_kwargs,\n ):\n window_indexer = self._get_window_indexer()\n min_periods = (\n self.min_periods\n if self.min_periods is not None\n else window_indexer.window_size\n )\n obj = self._create_data(self._selected_obj)\n if self.axis == 1:\n obj = obj.T\n values = self._prep_values(obj.to_numpy())\n if values.ndim == 1:\n values = values.reshape(-1, 1)\n start, end = window_indexer.get_window_bounds(\n num_values=len(values),\n min_periods=min_periods,\n center=self.center,\n closed=self.closed,\n step=self.step,\n )\n self._check_window_bounds(start, end, len(values))\n # For now, map everything to float to match the Cython impl\n # even though it is wrong\n # TODO: Could preserve correct dtypes in future\n # xref #53214\n dtype_mapping = executor.float_dtype_mapping\n aggregator = executor.generate_shared_aggregator(\n func,\n dtype_mapping,\n is_grouped_kernel=False,\n **get_jit_arguments(engine_kwargs),\n )\n result = aggregator(\n values.T, start=start, end=end, min_periods=min_periods, **func_kwargs\n ).T\n result = result.T if self.axis == 1 else result\n index = self._slice_axis_for_step(obj.index, result)\n if obj.ndim == 1:\n result = result.squeeze()\n out = obj._constructor(result, index=index, name=obj.name)\n return out\n else:\n columns = self._slice_axis_for_step(obj.columns, result.T)\n out = obj._constructor(result, index=index, columns=columns)\n return self._resolve_output(out, obj)\n\n def aggregate(self, func, *args, **kwargs):\n result = ResamplerWindowApply(self, func, args=args, kwargs=kwargs).agg()\n if result is None:\n return self.apply(func, raw=False, args=args, kwargs=kwargs)\n return result\n\n agg = aggregate\n\n\nclass BaseWindowGroupby(BaseWindow):\n """\n Provide the groupby windowing facilities.\n """\n\n _grouper: BaseGrouper\n _as_index: bool\n _attributes: list[str] = ["_grouper"]\n\n def __init__(\n self,\n obj: DataFrame | Series,\n *args,\n _grouper: BaseGrouper,\n _as_index: bool = True,\n **kwargs,\n ) -> None:\n from pandas.core.groupby.ops import BaseGrouper\n\n if not isinstance(_grouper, BaseGrouper):\n raise ValueError("Must pass a BaseGrouper object.")\n self._grouper = _grouper\n self._as_index = _as_index\n # GH 32262: It's convention to keep the grouping column in\n # groupby.<agg_func>, but unexpected to users in\n # groupby.rolling.<agg_func>\n obj = obj.drop(columns=self._grouper.names, errors="ignore")\n # GH 15354\n if kwargs.get("step") is not None:\n raise NotImplementedError("step not implemented for groupby")\n super().__init__(obj, *args, **kwargs)\n\n def _apply(\n self,\n func: Callable[..., Any],\n name: str,\n numeric_only: bool = False,\n numba_args: tuple[Any, ...] = (),\n **kwargs,\n ) -> DataFrame | Series:\n result = super()._apply(\n func,\n name,\n numeric_only,\n numba_args,\n **kwargs,\n )\n # Reconstruct the resulting MultiIndex\n # 1st set of levels = group by labels\n # 2nd set of levels = original DataFrame/Series index\n grouped_object_index = self.obj.index\n grouped_index_name = [*grouped_object_index.names]\n groupby_keys = copy.copy(self._grouper.names)\n result_index_names = groupby_keys + grouped_index_name\n\n drop_columns = [\n key\n for key in self._grouper.names\n if key not in self.obj.index.names or key is None\n ]\n\n if len(drop_columns) != len(groupby_keys):\n # Our result will have still kept the column in the result\n result = result.drop(columns=drop_columns, errors="ignore")\n\n codes = self._grouper.codes\n levels = copy.copy(self._grouper.levels)\n\n group_indices = self._grouper.indices.values()\n if group_indices:\n indexer = np.concatenate(list(group_indices))\n else:\n indexer = np.array([], dtype=np.intp)\n codes = [c.take(indexer) for c in codes]\n\n # if the index of the original dataframe needs to be preserved, append\n # this index (but reordered) to the codes/levels from the groupby\n if grouped_object_index is not None:\n idx = grouped_object_index.take(indexer)\n if not isinstance(idx, MultiIndex):\n idx = MultiIndex.from_arrays([idx])\n codes.extend(list(idx.codes))\n levels.extend(list(idx.levels))\n\n result_index = MultiIndex(\n levels, codes, names=result_index_names, verify_integrity=False\n )\n\n result.index = result_index\n if not self._as_index:\n result = result.reset_index(level=list(range(len(groupby_keys))))\n return result\n\n def _apply_pairwise(\n self,\n target: DataFrame | Series,\n other: DataFrame | Series | None,\n pairwise: bool | None,\n func: Callable[[DataFrame | Series, DataFrame | Series], DataFrame | Series],\n numeric_only: bool,\n ) -> DataFrame | Series:\n """\n Apply the given pairwise function given 2 pandas objects (DataFrame/Series)\n """\n # Manually drop the grouping column first\n target = target.drop(columns=self._grouper.names, errors="ignore")\n result = super()._apply_pairwise(target, other, pairwise, func, numeric_only)\n # 1) Determine the levels + codes of the groupby levels\n if other is not None and not all(\n len(group) == len(other) for group in self._grouper.indices.values()\n ):\n # GH 42915\n # len(other) != len(any group), so must reindex (expand) the result\n # from flex_binary_moment to a "transform"-like result\n # per groupby combination\n old_result_len = len(result)\n result = concat(\n [\n result.take(gb_indices).reindex(result.index)\n for gb_indices in self._grouper.indices.values()\n ]\n )\n\n gb_pairs = (\n com.maybe_make_list(pair) for pair in self._grouper.indices.keys()\n )\n groupby_codes = []\n groupby_levels = []\n # e.g. [[1, 2], [4, 5]] as [[1, 4], [2, 5]]\n for gb_level_pair in map(list, zip(*gb_pairs)):\n labels = np.repeat(np.array(gb_level_pair), old_result_len)\n codes, levels = factorize(labels)\n groupby_codes.append(codes)\n groupby_levels.append(levels)\n else:\n # pairwise=True or len(other) == len(each group), so repeat\n # the groupby labels by the number of columns in the original object\n groupby_codes = self._grouper.codes\n # error: Incompatible types in assignment (expression has type\n # "List[Index]", variable has type "List[Union[ndarray, Index]]")\n groupby_levels = self._grouper.levels # type: ignore[assignment]\n\n group_indices = self._grouper.indices.values()\n if group_indices:\n indexer = np.concatenate(list(group_indices))\n else:\n indexer = np.array([], dtype=np.intp)\n\n if target.ndim == 1:\n repeat_by = 1\n else:\n repeat_by = len(target.columns)\n groupby_codes = [\n np.repeat(c.take(indexer), repeat_by) for c in groupby_codes\n ]\n # 2) Determine the levels + codes of the result from super()._apply_pairwise\n if isinstance(result.index, MultiIndex):\n result_codes = list(result.index.codes)\n result_levels = list(result.index.levels)\n result_names = list(result.index.names)\n else:\n idx_codes, idx_levels = factorize(result.index)\n result_codes = [idx_codes]\n result_levels = [idx_levels]\n result_names = [result.index.name]\n\n # 3) Create the resulting index by combining 1) + 2)\n result_codes = groupby_codes + result_codes\n result_levels = groupby_levels + result_levels\n result_names = self._grouper.names + result_names\n\n result_index = MultiIndex(\n result_levels, result_codes, names=result_names, verify_integrity=False\n )\n result.index = result_index\n return result\n\n def _create_data(self, obj: NDFrameT, numeric_only: bool = False) -> NDFrameT:\n """\n Split data into blocks & return conformed data.\n """\n # Ensure the object we're rolling over is monotonically sorted relative\n # to the groups\n # GH 36197\n if not obj.empty:\n groupby_order = np.concatenate(list(self._grouper.indices.values())).astype(\n np.int64\n )\n obj = obj.take(groupby_order)\n return super()._create_data(obj, numeric_only)\n\n def _gotitem(self, key, ndim, subset=None):\n # we are setting the index on the actual object\n # here so our index is carried through to the selected obj\n # when we do the splitting for the groupby\n if self.on is not None:\n # GH 43355\n subset = self.obj.set_index(self._on)\n return super()._gotitem(key, ndim, subset=subset)\n\n\nclass Window(BaseWindow):\n """\n Provide rolling window calculations.\n\n Parameters\n ----------\n window : int, timedelta, str, offset, or BaseIndexer subclass\n Size of the moving window.\n\n If an integer, the fixed number of observations used for\n each window.\n\n If a timedelta, str, or offset, the time period of each window. Each\n window will be a variable sized based on the observations included in\n the time-period. This is only valid for datetimelike indexes.\n To learn more about the offsets & frequency strings, please see `this link\n <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__.\n\n If a BaseIndexer subclass, the window boundaries\n based on the defined ``get_window_bounds`` method. Additional rolling\n keyword arguments, namely ``min_periods``, ``center``, ``closed`` and\n ``step`` will be passed to ``get_window_bounds``.\n\n min_periods : int, default None\n Minimum number of observations in window required to have a value;\n otherwise, result is ``np.nan``.\n\n For a window that is specified by an offset, ``min_periods`` will default to 1.\n\n For a window that is specified by an integer, ``min_periods`` will default\n to the size of the window.\n\n center : bool, default False\n If False, set the window labels as the right edge of the window index.\n\n If True, set the window labels as the center of the window index.\n\n win_type : str, default None\n If ``None``, all points are evenly weighted.\n\n If a string, it must be a valid `scipy.signal window function\n <https://docs.scipy.org/doc/scipy/reference/signal.windows.html#module-scipy.signal.windows>`__.\n\n Certain Scipy window types require additional parameters to be passed\n in the aggregation function. The additional parameters must match\n the keywords specified in the Scipy window type method signature.\n\n on : str, optional\n For a DataFrame, a column label or Index level on which\n to calculate the rolling window, rather than the DataFrame's index.\n\n Provided integer column is ignored and excluded from result since\n an integer index is not used to calculate the rolling window.\n\n axis : int or str, default 0\n If ``0`` or ``'index'``, roll across the rows.\n\n If ``1`` or ``'columns'``, roll across the columns.\n\n For `Series` this parameter is unused and defaults to 0.\n\n .. deprecated:: 2.1.0\n\n The axis keyword is deprecated. For ``axis=1``,\n transpose the DataFrame first instead.\n\n closed : str, default None\n If ``'right'``, the first point in the window is excluded from calculations.\n\n If ``'left'``, the last point in the window is excluded from calculations.\n\n If ``'both'``, the no points in the window are excluded from calculations.\n\n If ``'neither'``, the first and last points in the window are excluded\n from calculations.\n\n Default ``None`` (``'right'``).\n\n step : int, default None\n\n .. versionadded:: 1.5.0\n\n Evaluate the window at every ``step`` result, equivalent to slicing as\n ``[::step]``. ``window`` must be an integer. Using a step argument other\n than None or 1 will produce a result with a different shape than the input.\n\n method : str {'single', 'table'}, default 'single'\n\n .. versionadded:: 1.3.0\n\n Execute the rolling operation per single column or row (``'single'``)\n or over the entire object (``'table'``).\n\n This argument is only implemented when specifying ``engine='numba'``\n in the method call.\n\n Returns\n -------\n pandas.api.typing.Window or pandas.api.typing.Rolling\n An instance of Window is returned if ``win_type`` is passed. Otherwise,\n an instance of Rolling is returned.\n\n See Also\n --------\n expanding : Provides expanding transformations.\n ewm : Provides exponential weighted functions.\n\n Notes\n -----\n See :ref:`Windowing Operations <window.generic>` for further usage details\n and examples.\n\n Examples\n --------\n >>> df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]})\n >>> df\n B\n 0 0.0\n 1 1.0\n 2 2.0\n 3 NaN\n 4 4.0\n\n **window**\n\n Rolling sum with a window length of 2 observations.\n\n >>> df.rolling(2).sum()\n B\n 0 NaN\n 1 1.0\n 2 3.0\n 3 NaN\n 4 NaN\n\n Rolling sum with a window span of 2 seconds.\n\n >>> df_time = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]},\n ... index=[pd.Timestamp('20130101 09:00:00'),\n ... pd.Timestamp('20130101 09:00:02'),\n ... pd.Timestamp('20130101 09:00:03'),\n ... pd.Timestamp('20130101 09:00:05'),\n ... pd.Timestamp('20130101 09:00:06')])\n\n >>> df_time\n B\n 2013-01-01 09:00:00 0.0\n 2013-01-01 09:00:02 1.0\n 2013-01-01 09:00:03 2.0\n 2013-01-01 09:00:05 NaN\n 2013-01-01 09:00:06 4.0\n\n >>> df_time.rolling('2s').sum()\n B\n 2013-01-01 09:00:00 0.0\n 2013-01-01 09:00:02 1.0\n 2013-01-01 09:00:03 3.0\n 2013-01-01 09:00:05 NaN\n 2013-01-01 09:00:06 4.0\n\n Rolling sum with forward looking windows with 2 observations.\n\n >>> indexer = pd.api.indexers.FixedForwardWindowIndexer(window_size=2)\n >>> df.rolling(window=indexer, min_periods=1).sum()\n B\n 0 1.0\n 1 3.0\n 2 2.0\n 3 4.0\n 4 4.0\n\n **min_periods**\n\n Rolling sum with a window length of 2 observations, but only needs a minimum of 1\n observation to calculate a value.\n\n >>> df.rolling(2, min_periods=1).sum()\n B\n 0 0.0\n 1 1.0\n 2 3.0\n 3 2.0\n 4 4.0\n\n **center**\n\n Rolling sum with the result assigned to the center of the window index.\n\n >>> df.rolling(3, min_periods=1, center=True).sum()\n B\n 0 1.0\n 1 3.0\n 2 3.0\n 3 6.0\n 4 4.0\n\n >>> df.rolling(3, min_periods=1, center=False).sum()\n B\n 0 0.0\n 1 1.0\n 2 3.0\n 3 3.0\n 4 6.0\n\n **step**\n\n Rolling sum with a window length of 2 observations, minimum of 1 observation to\n calculate a value, and a step of 2.\n\n >>> df.rolling(2, min_periods=1, step=2).sum()\n B\n 0 0.0\n 2 3.0\n 4 4.0\n\n **win_type**\n\n Rolling sum with a window length of 2, using the Scipy ``'gaussian'``\n window type. ``std`` is required in the aggregation function.\n\n >>> df.rolling(2, win_type='gaussian').sum(std=3)\n B\n 0 NaN\n 1 0.986207\n 2 2.958621\n 3 NaN\n 4 NaN\n\n **on**\n\n Rolling sum with a window length of 2 days.\n\n >>> df = pd.DataFrame({\n ... 'A': [pd.to_datetime('2020-01-01'),\n ... pd.to_datetime('2020-01-01'),\n ... pd.to_datetime('2020-01-02'),],\n ... 'B': [1, 2, 3], },\n ... index=pd.date_range('2020', periods=3))\n\n >>> df\n A B\n 2020-01-01 2020-01-01 1\n 2020-01-02 2020-01-01 2\n 2020-01-03 2020-01-02 3\n\n >>> df.rolling('2D', on='A').sum()\n A B\n 2020-01-01 2020-01-01 1.0\n 2020-01-02 2020-01-01 3.0\n 2020-01-03 2020-01-02 6.0\n """\n\n _attributes = [\n "window",\n "min_periods",\n "center",\n "win_type",\n "axis",\n "on",\n "closed",\n "step",\n "method",\n ]\n\n def _validate(self):\n super()._validate()\n\n if not isinstance(self.win_type, str):\n raise ValueError(f"Invalid win_type {self.win_type}")\n signal = import_optional_dependency(\n "scipy.signal.windows", extra="Scipy is required to generate window weight."\n )\n self._scipy_weight_generator = getattr(signal, self.win_type, None)\n if self._scipy_weight_generator is None:\n raise ValueError(f"Invalid win_type {self.win_type}")\n\n if isinstance(self.window, BaseIndexer):\n raise NotImplementedError(\n "BaseIndexer subclasses not implemented with win_types."\n )\n if not is_integer(self.window) or self.window < 0:\n raise ValueError("window must be an integer 0 or greater")\n\n if self.method != "single":\n raise NotImplementedError("'single' is the only supported method type.")\n\n def _center_window(self, result: np.ndarray, offset: int) -> np.ndarray:\n """\n Center the result in the window for weighted rolling aggregations.\n """\n if offset > 0:\n lead_indexer = [slice(offset, None)]\n result = np.copy(result[tuple(lead_indexer)])\n return result\n\n def _apply(\n self,\n func: Callable[[np.ndarray, int, int], np.ndarray],\n name: str,\n numeric_only: bool = False,\n numba_args: tuple[Any, ...] = (),\n **kwargs,\n ):\n """\n Rolling with weights statistical measure using supplied function.\n\n Designed to be used with passed-in Cython array-based functions.\n\n Parameters\n ----------\n func : callable function to apply\n name : str,\n numeric_only : bool, default False\n Whether to only operate on bool, int, and float columns\n numba_args : tuple\n unused\n **kwargs\n additional arguments for scipy windows if necessary\n\n Returns\n -------\n y : type of input\n """\n # "None" not callable [misc]\n window = self._scipy_weight_generator( # type: ignore[misc]\n self.window, **kwargs\n )\n offset = (len(window) - 1) // 2 if self.center else 0\n\n def homogeneous_func(values: np.ndarray):\n # calculation function\n\n if values.size == 0:\n return values.copy()\n\n def calc(x):\n additional_nans = np.array([np.nan] * offset)\n x = np.concatenate((x, additional_nans))\n return func(\n x,\n window,\n self.min_periods if self.min_periods is not None else len(window),\n )\n\n with np.errstate(all="ignore"):\n # Our weighted aggregations return memoryviews\n result = np.asarray(calc(values))\n\n if self.center:\n result = self._center_window(result, offset)\n\n return result\n\n return self._apply_columnwise(homogeneous_func, name, numeric_only)[\n :: self.step\n ]\n\n @doc(\n _shared_docs["aggregate"],\n see_also=dedent(\n """\n See Also\n --------\n pandas.DataFrame.aggregate : Similar DataFrame method.\n pandas.Series.aggregate : Similar Series method.\n """\n ),\n examples=dedent(\n """\n Examples\n --------\n >>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6], "C": [7, 8, 9]})\n >>> df\n A B C\n 0 1 4 7\n 1 2 5 8\n 2 3 6 9\n\n >>> df.rolling(2, win_type="boxcar").agg("mean")\n A B C\n 0 NaN NaN NaN\n 1 1.5 4.5 7.5\n 2 2.5 5.5 8.5\n """\n ),\n klass="Series/DataFrame",\n axis="",\n )\n def aggregate(self, func, *args, **kwargs):\n result = ResamplerWindowApply(self, func, args=args, kwargs=kwargs).agg()\n if result is None:\n # these must apply directly\n result = func(self)\n\n return result\n\n agg = aggregate\n\n @doc(\n template_header,\n create_section_header("Parameters"),\n kwargs_numeric_only,\n kwargs_scipy,\n create_section_header("Returns"),\n template_returns,\n create_section_header("See Also"),\n template_see_also,\n create_section_header("Examples"),\n dedent(\n """\\n >>> ser = pd.Series([0, 1, 5, 2, 8])\n\n To get an instance of :class:`~pandas.core.window.rolling.Window` we need\n to pass the parameter `win_type`.\n\n >>> type(ser.rolling(2, win_type='gaussian'))\n <class 'pandas.core.window.rolling.Window'>\n\n In order to use the `SciPy` Gaussian window we need to provide the parameters\n `M` and `std`. The parameter `M` corresponds to 2 in our example.\n We pass the second parameter `std` as a parameter of the following method\n (`sum` in this case):\n\n >>> ser.rolling(2, win_type='gaussian').sum(std=3)\n 0 NaN\n 1 0.986207\n 2 5.917243\n 3 6.903450\n 4 9.862071\n dtype: float64\n """\n ),\n window_method="rolling",\n aggregation_description="weighted window sum",\n agg_method="sum",\n )\n def sum(self, numeric_only: bool = False, **kwargs):\n window_func = window_aggregations.roll_weighted_sum\n # error: Argument 1 to "_apply" of "Window" has incompatible type\n # "Callable[[ndarray, ndarray, int], ndarray]"; expected\n # "Callable[[ndarray, int, int], ndarray]"\n return self._apply(\n window_func, # type: ignore[arg-type]\n name="sum",\n numeric_only=numeric_only,\n **kwargs,\n )\n\n @doc(\n template_header,\n create_section_header("Parameters"),\n kwargs_numeric_only,\n kwargs_scipy,\n create_section_header("Returns"),\n template_returns,\n create_section_header("See Also"),\n template_see_also,\n create_section_header("Examples"),\n dedent(\n """\\n >>> ser = pd.Series([0, 1, 5, 2, 8])\n\n To get an instance of :class:`~pandas.core.window.rolling.Window` we need\n to pass the parameter `win_type`.\n\n >>> type(ser.rolling(2, win_type='gaussian'))\n <class 'pandas.core.window.rolling.Window'>\n\n In order to use the `SciPy` Gaussian window we need to provide the parameters\n `M` and `std`. The parameter `M` corresponds to 2 in our example.\n We pass the second parameter `std` as a parameter of the following method:\n\n >>> ser.rolling(2, win_type='gaussian').mean(std=3)\n 0 NaN\n 1 0.5\n 2 3.0\n 3 3.5\n 4 5.0\n dtype: float64\n """\n ),\n window_method="rolling",\n aggregation_description="weighted window mean",\n agg_method="mean",\n )\n def mean(self, numeric_only: bool = False, **kwargs):\n window_func = window_aggregations.roll_weighted_mean\n # error: Argument 1 to "_apply" of "Window" has incompatible type\n # "Callable[[ndarray, ndarray, int], ndarray]"; expected\n # "Callable[[ndarray, int, int], ndarray]"\n return self._apply(\n window_func, # type: ignore[arg-type]\n name="mean",\n numeric_only=numeric_only,\n **kwargs,\n )\n\n @doc(\n template_header,\n create_section_header("Parameters"),\n kwargs_numeric_only,\n kwargs_scipy,\n create_section_header("Returns"),\n template_returns,\n create_section_header("See Also"),\n template_see_also,\n create_section_header("Examples"),\n dedent(\n """\\n >>> ser = pd.Series([0, 1, 5, 2, 8])\n\n To get an instance of :class:`~pandas.core.window.rolling.Window` we need\n to pass the parameter `win_type`.\n\n >>> type(ser.rolling(2, win_type='gaussian'))\n <class 'pandas.core.window.rolling.Window'>\n\n In order to use the `SciPy` Gaussian window we need to provide the parameters\n `M` and `std`. The parameter `M` corresponds to 2 in our example.\n We pass the second parameter `std` as a parameter of the following method:\n\n >>> ser.rolling(2, win_type='gaussian').var(std=3)\n 0 NaN\n 1 0.5\n 2 8.0\n 3 4.5\n 4 18.0\n dtype: float64\n """\n ),\n window_method="rolling",\n aggregation_description="weighted window variance",\n agg_method="var",\n )\n def var(self, ddof: int = 1, numeric_only: bool = False, **kwargs):\n window_func = partial(window_aggregations.roll_weighted_var, ddof=ddof)\n kwargs.pop("name", None)\n return self._apply(window_func, name="var", numeric_only=numeric_only, **kwargs)\n\n @doc(\n template_header,\n create_section_header("Parameters"),\n kwargs_numeric_only,\n kwargs_scipy,\n create_section_header("Returns"),\n template_returns,\n create_section_header("See Also"),\n template_see_also,\n create_section_header("Examples"),\n dedent(\n """\\n >>> ser = pd.Series([0, 1, 5, 2, 8])\n\n To get an instance of :class:`~pandas.core.window.rolling.Window` we need\n to pass the parameter `win_type`.\n\n >>> type(ser.rolling(2, win_type='gaussian'))\n <class 'pandas.core.window.rolling.Window'>\n\n In order to use the `SciPy` Gaussian window we need to provide the parameters\n `M` and `std`. The parameter `M` corresponds to 2 in our example.\n We pass the second parameter `std` as a parameter of the following method:\n\n >>> ser.rolling(2, win_type='gaussian').std(std=3)\n 0 NaN\n 1 0.707107\n 2 2.828427\n 3 2.121320\n 4 4.242641\n dtype: float64\n """\n ),\n window_method="rolling",\n aggregation_description="weighted window standard deviation",\n agg_method="std",\n )\n def std(self, ddof: int = 1, numeric_only: bool = False, **kwargs):\n return zsqrt(\n self.var(ddof=ddof, name="std", numeric_only=numeric_only, **kwargs)\n )\n\n\nclass RollingAndExpandingMixin(BaseWindow):\n def count(self, numeric_only: bool = False):\n window_func = window_aggregations.roll_sum\n return self._apply(window_func, name="count", numeric_only=numeric_only)\n\n def apply(\n self,\n func: Callable[..., Any],\n raw: bool = False,\n engine: Literal["cython", "numba"] | None = None,\n engine_kwargs: dict[str, bool] | None = None,\n args: tuple[Any, ...] | None = None,\n kwargs: dict[str, Any] | None = None,\n ):\n if args is None:\n args = ()\n if kwargs is None:\n kwargs = {}\n\n if not is_bool(raw):\n raise ValueError("raw parameter must be `True` or `False`")\n\n numba_args: tuple[Any, ...] = ()\n if maybe_use_numba(engine):\n if raw is False:\n raise ValueError("raw must be `True` when using the numba engine")\n numba_args = args\n if self.method == "single":\n apply_func = generate_numba_apply_func(\n func, **get_jit_arguments(engine_kwargs, kwargs)\n )\n else:\n apply_func = generate_numba_table_func(\n func, **get_jit_arguments(engine_kwargs, kwargs)\n )\n elif engine in ("cython", None):\n if engine_kwargs is not None:\n raise ValueError("cython engine does not accept engine_kwargs")\n apply_func = self._generate_cython_apply_func(args, kwargs, raw, func)\n else:\n raise ValueError("engine must be either 'numba' or 'cython'")\n\n return self._apply(\n apply_func,\n name="apply",\n numba_args=numba_args,\n )\n\n def _generate_cython_apply_func(\n self,\n args: tuple[Any, ...],\n kwargs: dict[str, Any],\n raw: bool | np.bool_,\n function: Callable[..., Any],\n ) -> Callable[[np.ndarray, np.ndarray, np.ndarray, int], np.ndarray]:\n from pandas import Series\n\n window_func = partial(\n window_aggregations.roll_apply,\n args=args,\n kwargs=kwargs,\n raw=raw,\n function=function,\n )\n\n def apply_func(values, begin, end, min_periods, raw=raw):\n if not raw:\n # GH 45912\n values = Series(values, index=self._on, copy=False)\n return window_func(values, begin, end, min_periods)\n\n return apply_func\n\n def sum(\n self,\n numeric_only: bool = False,\n engine: Literal["cython", "numba"] | None = None,\n engine_kwargs: dict[str, bool] | None = None,\n ):\n if maybe_use_numba(engine):\n if self.method == "table":\n func = generate_manual_numpy_nan_agg_with_axis(np.nansum)\n return self.apply(\n func,\n raw=True,\n engine=engine,\n engine_kwargs=engine_kwargs,\n )\n else:\n from pandas.core._numba.kernels import sliding_sum\n\n return self._numba_apply(sliding_sum, engine_kwargs)\n window_func = window_aggregations.roll_sum\n return self._apply(window_func, name="sum", numeric_only=numeric_only)\n\n def max(\n self,\n numeric_only: bool = False,\n engine: Literal["cython", "numba"] | None = None,\n engine_kwargs: dict[str, bool] | None = None,\n ):\n if maybe_use_numba(engine):\n if self.method == "table":\n func = generate_manual_numpy_nan_agg_with_axis(np.nanmax)\n return self.apply(\n func,\n raw=True,\n engine=engine,\n engine_kwargs=engine_kwargs,\n )\n else:\n from pandas.core._numba.kernels import sliding_min_max\n\n return self._numba_apply(sliding_min_max, engine_kwargs, is_max=True)\n window_func = window_aggregations.roll_max\n return self._apply(window_func, name="max", numeric_only=numeric_only)\n\n def min(\n self,\n numeric_only: bool = False,\n engine: Literal["cython", "numba"] | None = None,\n engine_kwargs: dict[str, bool] | None = None,\n ):\n if maybe_use_numba(engine):\n if self.method == "table":\n func = generate_manual_numpy_nan_agg_with_axis(np.nanmin)\n return self.apply(\n func,\n raw=True,\n engine=engine,\n engine_kwargs=engine_kwargs,\n )\n else:\n from pandas.core._numba.kernels import sliding_min_max\n\n return self._numba_apply(sliding_min_max, engine_kwargs, is_max=False)\n window_func = window_aggregations.roll_min\n return self._apply(window_func, name="min", numeric_only=numeric_only)\n\n def mean(\n self,\n numeric_only: bool = False,\n engine: Literal["cython", "numba"] | None = None,\n engine_kwargs: dict[str, bool] | None = None,\n ):\n if maybe_use_numba(engine):\n if self.method == "table":\n func = generate_manual_numpy_nan_agg_with_axis(np.nanmean)\n return self.apply(\n func,\n raw=True,\n engine=engine,\n engine_kwargs=engine_kwargs,\n )\n else:\n from pandas.core._numba.kernels import sliding_mean\n\n return self._numba_apply(sliding_mean, engine_kwargs)\n window_func = window_aggregations.roll_mean\n return self._apply(window_func, name="mean", numeric_only=numeric_only)\n\n def median(\n self,\n numeric_only: bool = False,\n engine: Literal["cython", "numba"] | None = None,\n engine_kwargs: dict[str, bool] | None = None,\n ):\n if maybe_use_numba(engine):\n if self.method == "table":\n func = generate_manual_numpy_nan_agg_with_axis(np.nanmedian)\n else:\n func = np.nanmedian\n\n return self.apply(\n func,\n raw=True,\n engine=engine,\n engine_kwargs=engine_kwargs,\n )\n window_func = window_aggregations.roll_median_c\n return self._apply(window_func, name="median", numeric_only=numeric_only)\n\n def std(\n self,\n ddof: int = 1,\n numeric_only: bool = False,\n engine: Literal["cython", "numba"] | None = None,\n engine_kwargs: dict[str, bool] | None = None,\n ):\n if maybe_use_numba(engine):\n if self.method == "table":\n raise NotImplementedError("std not supported with method='table'")\n from pandas.core._numba.kernels import sliding_var\n\n return zsqrt(self._numba_apply(sliding_var, engine_kwargs, ddof=ddof))\n window_func = window_aggregations.roll_var\n\n def zsqrt_func(values, begin, end, min_periods):\n return zsqrt(window_func(values, begin, end, min_periods, ddof=ddof))\n\n return self._apply(\n zsqrt_func,\n name="std",\n numeric_only=numeric_only,\n )\n\n def var(\n self,\n ddof: int = 1,\n numeric_only: bool = False,\n engine: Literal["cython", "numba"] | None = None,\n engine_kwargs: dict[str, bool] | None = None,\n ):\n if maybe_use_numba(engine):\n if self.method == "table":\n raise NotImplementedError("var not supported with method='table'")\n from pandas.core._numba.kernels import sliding_var\n\n return self._numba_apply(sliding_var, engine_kwargs, ddof=ddof)\n window_func = partial(window_aggregations.roll_var, ddof=ddof)\n return self._apply(\n window_func,\n name="var",\n numeric_only=numeric_only,\n )\n\n def skew(self, numeric_only: bool = False):\n window_func = window_aggregations.roll_skew\n return self._apply(\n window_func,\n name="skew",\n numeric_only=numeric_only,\n )\n\n def sem(self, ddof: int = 1, numeric_only: bool = False):\n # Raise here so error message says sem instead of std\n self._validate_numeric_only("sem", numeric_only)\n return self.std(numeric_only=numeric_only) / (\n self.count(numeric_only=numeric_only) - ddof\n ).pow(0.5)\n\n def kurt(self, numeric_only: bool = False):\n window_func = window_aggregations.roll_kurt\n return self._apply(\n window_func,\n name="kurt",\n numeric_only=numeric_only,\n )\n\n def quantile(\n self,\n q: float,\n interpolation: QuantileInterpolation = "linear",\n numeric_only: bool = False,\n ):\n if q == 1.0:\n window_func = window_aggregations.roll_max\n elif q == 0.0:\n window_func = window_aggregations.roll_min\n else:\n window_func = partial(\n window_aggregations.roll_quantile,\n quantile=q,\n interpolation=interpolation,\n )\n\n return self._apply(window_func, name="quantile", numeric_only=numeric_only)\n\n def rank(\n self,\n method: WindowingRankType = "average",\n ascending: bool = True,\n pct: bool = False,\n numeric_only: bool = False,\n ):\n window_func = partial(\n window_aggregations.roll_rank,\n method=method,\n ascending=ascending,\n percentile=pct,\n )\n\n return self._apply(window_func, name="rank", numeric_only=numeric_only)\n\n def cov(\n self,\n other: DataFrame | Series | None = None,\n pairwise: bool | None = None,\n ddof: int = 1,\n numeric_only: bool = False,\n ):\n if self.step is not None:\n raise NotImplementedError("step not implemented for cov")\n self._validate_numeric_only("cov", numeric_only)\n\n from pandas import Series\n\n def cov_func(x, y):\n x_array = self._prep_values(x)\n y_array = self._prep_values(y)\n window_indexer = self._get_window_indexer()\n min_periods = (\n self.min_periods\n if self.min_periods is not None\n else window_indexer.window_size\n )\n start, end = window_indexer.get_window_bounds(\n num_values=len(x_array),\n min_periods=min_periods,\n center=self.center,\n closed=self.closed,\n step=self.step,\n )\n self._check_window_bounds(start, end, len(x_array))\n\n with np.errstate(all="ignore"):\n mean_x_y = window_aggregations.roll_mean(\n x_array * y_array, start, end, min_periods\n )\n mean_x = window_aggregations.roll_mean(x_array, start, end, min_periods)\n mean_y = window_aggregations.roll_mean(y_array, start, end, min_periods)\n count_x_y = window_aggregations.roll_sum(\n notna(x_array + y_array).astype(np.float64), start, end, 0\n )\n result = (mean_x_y - mean_x * mean_y) * (count_x_y / (count_x_y - ddof))\n return Series(result, index=x.index, name=x.name, copy=False)\n\n return self._apply_pairwise(\n self._selected_obj, other, pairwise, cov_func, numeric_only\n )\n\n def corr(\n self,\n other: DataFrame | Series | None = None,\n pairwise: bool | None = None,\n ddof: int = 1,\n numeric_only: bool = False,\n ):\n if self.step is not None:\n raise NotImplementedError("step not implemented for corr")\n self._validate_numeric_only("corr", numeric_only)\n\n from pandas import Series\n\n def corr_func(x, y):\n x_array = self._prep_values(x)\n y_array = self._prep_values(y)\n window_indexer = self._get_window_indexer()\n min_periods = (\n self.min_periods\n if self.min_periods is not None\n else window_indexer.window_size\n )\n start, end = window_indexer.get_window_bounds(\n num_values=len(x_array),\n min_periods=min_periods,\n center=self.center,\n closed=self.closed,\n step=self.step,\n )\n self._check_window_bounds(start, end, len(x_array))\n\n with np.errstate(all="ignore"):\n mean_x_y = window_aggregations.roll_mean(\n x_array * y_array, start, end, min_periods\n )\n mean_x = window_aggregations.roll_mean(x_array, start, end, min_periods)\n mean_y = window_aggregations.roll_mean(y_array, start, end, min_periods)\n count_x_y = window_aggregations.roll_sum(\n notna(x_array + y_array).astype(np.float64), start, end, 0\n )\n x_var = window_aggregations.roll_var(\n x_array, start, end, min_periods, ddof\n )\n y_var = window_aggregations.roll_var(\n y_array, start, end, min_periods, ddof\n )\n numerator = (mean_x_y - mean_x * mean_y) * (\n count_x_y / (count_x_y - ddof)\n )\n denominator = (x_var * y_var) ** 0.5\n result = numerator / denominator\n return Series(result, index=x.index, name=x.name, copy=False)\n\n return self._apply_pairwise(\n self._selected_obj, other, pairwise, corr_func, numeric_only\n )\n\n\nclass Rolling(RollingAndExpandingMixin):\n _attributes: list[str] = [\n "window",\n "min_periods",\n "center",\n "win_type",\n "axis",\n "on",\n "closed",\n "step",\n "method",\n ]\n\n def _validate(self):\n super()._validate()\n\n # we allow rolling on a datetimelike index\n if (\n self.obj.empty\n or isinstance(self._on, (DatetimeIndex, TimedeltaIndex, PeriodIndex))\n or (isinstance(self._on.dtype, ArrowDtype) and self._on.dtype.kind in "mM")\n ) and isinstance(self.window, (str, BaseOffset, timedelta)):\n self._validate_datetimelike_monotonic()\n\n # this will raise ValueError on non-fixed freqs\n try:\n freq = to_offset(self.window)\n except (TypeError, ValueError) as err:\n raise ValueError(\n f"passed window {self.window} is not "\n "compatible with a datetimelike index"\n ) from err\n if isinstance(self._on, PeriodIndex):\n # error: Incompatible types in assignment (expression has type\n # "float", variable has type "Optional[int]")\n self._win_freq_i8 = freq.nanos / ( # type: ignore[assignment]\n self._on.freq.nanos / self._on.freq.n\n )\n else:\n try:\n unit = dtype_to_unit(self._on.dtype) # type: ignore[arg-type]\n except TypeError:\n # if not a datetime dtype, eg for empty dataframes\n unit = "ns"\n self._win_freq_i8 = Timedelta(freq.nanos).as_unit(unit)._value\n\n # min_periods must be an integer\n if self.min_periods is None:\n self.min_periods = 1\n\n if self.step is not None:\n raise NotImplementedError(\n "step is not supported with frequency windows"\n )\n\n elif isinstance(self.window, BaseIndexer):\n # Passed BaseIndexer subclass should handle all other rolling kwargs\n pass\n elif not is_integer(self.window) or self.window < 0:\n raise ValueError("window must be an integer 0 or greater")\n\n def _validate_datetimelike_monotonic(self) -> None:\n """\n Validate self._on is monotonic (increasing or decreasing) and has\n no NaT values for frequency windows.\n """\n if self._on.hasnans:\n self._raise_monotonic_error("values must not have NaT")\n if not (self._on.is_monotonic_increasing or self._on.is_monotonic_decreasing):\n self._raise_monotonic_error("values must be monotonic")\n\n def _raise_monotonic_error(self, msg: str):\n on = self.on\n if on is None:\n if self.axis == 0:\n on = "index"\n else:\n on = "column"\n raise ValueError(f"{on} {msg}")\n\n @doc(\n _shared_docs["aggregate"],\n see_also=dedent(\n """\n See Also\n --------\n pandas.Series.rolling : Calling object with Series data.\n pandas.DataFrame.rolling : Calling object with DataFrame data.\n """\n ),\n examples=dedent(\n """\n Examples\n --------\n >>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6], "C": [7, 8, 9]})\n >>> df\n A B C\n 0 1 4 7\n 1 2 5 8\n 2 3 6 9\n\n >>> df.rolling(2).sum()\n A B C\n 0 NaN NaN NaN\n 1 3.0 9.0 15.0\n 2 5.0 11.0 17.0\n\n >>> df.rolling(2).agg({"A": "sum", "B": "min"})\n A B\n 0 NaN NaN\n 1 3.0 4.0\n 2 5.0 5.0\n """\n ),\n klass="Series/Dataframe",\n axis="",\n )\n def aggregate(self, func, *args, **kwargs):\n return super().aggregate(func, *args, **kwargs)\n\n agg = aggregate\n\n @doc(\n template_header,\n create_section_header("Parameters"),\n kwargs_numeric_only,\n create_section_header("Returns"),\n template_returns,\n create_section_header("See Also"),\n template_see_also,\n create_section_header("Examples"),\n dedent(\n """\n >>> s = pd.Series([2, 3, np.nan, 10])\n >>> s.rolling(2).count()\n 0 NaN\n 1 2.0\n 2 1.0\n 3 1.0\n dtype: float64\n >>> s.rolling(3).count()\n 0 NaN\n 1 NaN\n 2 2.0\n 3 2.0\n dtype: float64\n >>> s.rolling(4).count()\n 0 NaN\n 1 NaN\n 2 NaN\n 3 3.0\n dtype: float64\n """\n ).replace("\n", "", 1),\n window_method="rolling",\n aggregation_description="count of non NaN observations",\n agg_method="count",\n )\n def count(self, numeric_only: bool = False):\n return super().count(numeric_only)\n\n @doc(\n template_header,\n create_section_header("Parameters"),\n window_apply_parameters,\n create_section_header("Returns"),\n template_returns,\n create_section_header("See Also"),\n template_see_also,\n create_section_header("Examples"),\n dedent(\n """\\n >>> ser = pd.Series([1, 6, 5, 4])\n >>> ser.rolling(2).apply(lambda s: s.sum() - s.min())\n 0 NaN\n 1 6.0\n 2 6.0\n 3 5.0\n dtype: float64\n """\n ),\n window_method="rolling",\n aggregation_description="custom aggregation function",\n agg_method="apply",\n )\n def apply(\n self,\n func: Callable[..., Any],\n raw: bool = False,\n engine: Literal["cython", "numba"] | None = None,\n engine_kwargs: dict[str, bool] | None = None,\n args: tuple[Any, ...] | None = None,\n kwargs: dict[str, Any] | None = None,\n ):\n return super().apply(\n func,\n raw=raw,\n engine=engine,\n engine_kwargs=engine_kwargs,\n args=args,\n kwargs=kwargs,\n )\n\n @doc(\n template_header,\n create_section_header("Parameters"),\n kwargs_numeric_only,\n window_agg_numba_parameters(),\n create_section_header("Returns"),\n template_returns,\n create_section_header("See Also"),\n template_see_also,\n create_section_header("Notes"),\n numba_notes,\n create_section_header("Examples"),\n dedent(\n """\n >>> s = pd.Series([1, 2, 3, 4, 5])\n >>> s\n 0 1\n 1 2\n 2 3\n 3 4\n 4 5\n dtype: int64\n\n >>> s.rolling(3).sum()\n 0 NaN\n 1 NaN\n 2 6.0\n 3 9.0\n 4 12.0\n dtype: float64\n\n >>> s.rolling(3, center=True).sum()\n 0 NaN\n 1 6.0\n 2 9.0\n 3 12.0\n 4 NaN\n dtype: float64\n\n For DataFrame, each sum is computed column-wise.\n\n >>> df = pd.DataFrame({{"A": s, "B": s ** 2}})\n >>> df\n A B\n 0 1 1\n 1 2 4\n 2 3 9\n 3 4 16\n 4 5 25\n\n >>> df.rolling(3).sum()\n A B\n 0 NaN NaN\n 1 NaN NaN\n 2 6.0 14.0\n 3 9.0 29.0\n 4 12.0 50.0\n """\n ).replace("\n", "", 1),\n window_method="rolling",\n aggregation_description="sum",\n agg_method="sum",\n )\n def sum(\n self,\n numeric_only: bool = False,\n engine: Literal["cython", "numba"] | None = None,\n engine_kwargs: dict[str, bool] | None = None,\n ):\n return super().sum(\n numeric_only=numeric_only,\n engine=engine,\n engine_kwargs=engine_kwargs,\n )\n\n @doc(\n template_header,\n create_section_header("Parameters"),\n kwargs_numeric_only,\n window_agg_numba_parameters(),\n create_section_header("Returns"),\n template_returns,\n create_section_header("See Also"),\n template_see_also,\n create_section_header("Notes"),\n numba_notes,\n create_section_header("Examples"),\n dedent(\n """\\n >>> ser = pd.Series([1, 2, 3, 4])\n >>> ser.rolling(2).max()\n 0 NaN\n 1 2.0\n 2 3.0\n 3 4.0\n dtype: float64\n """\n ),\n window_method="rolling",\n aggregation_description="maximum",\n agg_method="max",\n )\n def max(\n self,\n numeric_only: bool = False,\n *args,\n engine: Literal["cython", "numba"] | None = None,\n engine_kwargs: dict[str, bool] | None = None,\n **kwargs,\n ):\n return super().max(\n numeric_only=numeric_only,\n engine=engine,\n engine_kwargs=engine_kwargs,\n )\n\n @doc(\n template_header,\n create_section_header("Parameters"),\n kwargs_numeric_only,\n window_agg_numba_parameters(),\n create_section_header("Returns"),\n template_returns,\n create_section_header("See Also"),\n template_see_also,\n create_section_header("Notes"),\n numba_notes,\n create_section_header("Examples"),\n dedent(\n """\n Performing a rolling minimum with a window size of 3.\n\n >>> s = pd.Series([4, 3, 5, 2, 6])\n >>> s.rolling(3).min()\n 0 NaN\n 1 NaN\n 2 3.0\n 3 2.0\n 4 2.0\n dtype: float64\n """\n ).replace("\n", "", 1),\n window_method="rolling",\n aggregation_description="minimum",\n agg_method="min",\n )\n def min(\n self,\n numeric_only: bool = False,\n engine: Literal["cython", "numba"] | None = None,\n engine_kwargs: dict[str, bool] | None = None,\n ):\n return super().min(\n numeric_only=numeric_only,\n engine=engine,\n engine_kwargs=engine_kwargs,\n )\n\n @doc(\n template_header,\n create_section_header("Parameters"),\n kwargs_numeric_only,\n window_agg_numba_parameters(),\n create_section_header("Returns"),\n template_returns,\n create_section_header("See Also"),\n template_see_also,\n create_section_header("Notes"),\n numba_notes,\n create_section_header("Examples"),\n dedent(\n """\n The below examples will show rolling mean calculations with window sizes of\n two and three, respectively.\n\n >>> s = pd.Series([1, 2, 3, 4])\n >>> s.rolling(2).mean()\n 0 NaN\n 1 1.5\n 2 2.5\n 3 3.5\n dtype: float64\n\n >>> s.rolling(3).mean()\n 0 NaN\n 1 NaN\n 2 2.0\n 3 3.0\n dtype: float64\n """\n ).replace("\n", "", 1),\n window_method="rolling",\n aggregation_description="mean",\n agg_method="mean",\n )\n def mean(\n self,\n numeric_only: bool = False,\n engine: Literal["cython", "numba"] | None = None,\n engine_kwargs: dict[str, bool] | None = None,\n ):\n return super().mean(\n numeric_only=numeric_only,\n engine=engine,\n engine_kwargs=engine_kwargs,\n )\n\n @doc(\n template_header,\n create_section_header("Parameters"),\n kwargs_numeric_only,\n window_agg_numba_parameters(),\n create_section_header("Returns"),\n template_returns,\n create_section_header("See Also"),\n template_see_also,\n create_section_header("Notes"),\n numba_notes,\n create_section_header("Examples"),\n dedent(\n """\n Compute the rolling median of a series with a window size of 3.\n\n >>> s = pd.Series([0, 1, 2, 3, 4])\n >>> s.rolling(3).median()\n 0 NaN\n 1 NaN\n 2 1.0\n 3 2.0\n 4 3.0\n dtype: float64\n """\n ).replace("\n", "", 1),\n window_method="rolling",\n aggregation_description="median",\n agg_method="median",\n )\n def median(\n self,\n numeric_only: bool = False,\n engine: Literal["cython", "numba"] | None = None,\n engine_kwargs: dict[str, bool] | None = None,\n ):\n return super().median(\n numeric_only=numeric_only,\n engine=engine,\n engine_kwargs=engine_kwargs,\n )\n\n @doc(\n template_header,\n create_section_header("Parameters"),\n dedent(\n """\n ddof : int, default 1\n Delta Degrees of Freedom. The divisor used in calculations\n is ``N - ddof``, where ``N`` represents the number of elements.\n """\n ).replace("\n", "", 1),\n kwargs_numeric_only,\n window_agg_numba_parameters("1.4"),\n create_section_header("Returns"),\n template_returns,\n create_section_header("See Also"),\n "numpy.std : Equivalent method for NumPy array.\n",\n template_see_also,\n create_section_header("Notes"),\n dedent(\n """\n The default ``ddof`` of 1 used in :meth:`Series.std` is different\n than the default ``ddof`` of 0 in :func:`numpy.std`.\n\n A minimum of one period is required for the rolling calculation.\n\n """\n ).replace("\n", "", 1),\n create_section_header("Examples"),\n dedent(\n """\n >>> s = pd.Series([5, 5, 6, 7, 5, 5, 5])\n >>> s.rolling(3).std()\n 0 NaN\n 1 NaN\n 2 0.577350\n 3 1.000000\n 4 1.000000\n 5 1.154701\n 6 0.000000\n dtype: float64\n """\n ).replace("\n", "", 1),\n window_method="rolling",\n aggregation_description="standard deviation",\n agg_method="std",\n )\n def std(\n self,\n ddof: int = 1,\n numeric_only: bool = False,\n engine: Literal["cython", "numba"] | None = None,\n engine_kwargs: dict[str, bool] | None = None,\n ):\n return super().std(\n ddof=ddof,\n numeric_only=numeric_only,\n engine=engine,\n engine_kwargs=engine_kwargs,\n )\n\n @doc(\n template_header,\n create_section_header("Parameters"),\n dedent(\n """\n ddof : int, default 1\n Delta Degrees of Freedom. The divisor used in calculations\n is ``N - ddof``, where ``N`` represents the number of elements.\n """\n ).replace("\n", "", 1),\n kwargs_numeric_only,\n window_agg_numba_parameters("1.4"),\n create_section_header("Returns"),\n template_returns,\n create_section_header("See Also"),\n "numpy.var : Equivalent method for NumPy array.\n",\n template_see_also,\n create_section_header("Notes"),\n dedent(\n """\n The default ``ddof`` of 1 used in :meth:`Series.var` is different\n than the default ``ddof`` of 0 in :func:`numpy.var`.\n\n A minimum of one period is required for the rolling calculation.\n\n """\n ).replace("\n", "", 1),\n create_section_header("Examples"),\n dedent(\n """\n >>> s = pd.Series([5, 5, 6, 7, 5, 5, 5])\n >>> s.rolling(3).var()\n 0 NaN\n 1 NaN\n 2 0.333333\n 3 1.000000\n 4 1.000000\n 5 1.333333\n 6 0.000000\n dtype: float64\n """\n ).replace("\n", "", 1),\n window_method="rolling",\n aggregation_description="variance",\n agg_method="var",\n )\n def var(\n self,\n ddof: int = 1,\n numeric_only: bool = False,\n engine: Literal["cython", "numba"] | None = None,\n engine_kwargs: dict[str, bool] | None = None,\n ):\n return super().var(\n ddof=ddof,\n numeric_only=numeric_only,\n engine=engine,\n engine_kwargs=engine_kwargs,\n )\n\n @doc(\n template_header,\n create_section_header("Parameters"),\n kwargs_numeric_only,\n create_section_header("Returns"),\n template_returns,\n create_section_header("See Also"),\n "scipy.stats.skew : Third moment of a probability density.\n",\n template_see_also,\n create_section_header("Notes"),\n dedent(\n """\n A minimum of three periods is required for the rolling calculation.\n\n """\n ),\n create_section_header("Examples"),\n dedent(\n """\\n >>> ser = pd.Series([1, 5, 2, 7, 15, 6])\n >>> ser.rolling(3).skew().round(6)\n 0 NaN\n 1 NaN\n 2 1.293343\n 3 -0.585583\n 4 0.670284\n 5 1.652317\n dtype: float64\n """\n ),\n window_method="rolling",\n aggregation_description="unbiased skewness",\n agg_method="skew",\n )\n def skew(self, numeric_only: bool = False):\n return super().skew(numeric_only=numeric_only)\n\n @doc(\n template_header,\n create_section_header("Parameters"),\n dedent(\n """\n ddof : int, default 1\n Delta Degrees of Freedom. The divisor used in calculations\n is ``N - ddof``, where ``N`` represents the number of elements.\n """\n ).replace("\n", "", 1),\n kwargs_numeric_only,\n create_section_header("Returns"),\n template_returns,\n create_section_header("See Also"),\n template_see_also,\n create_section_header("Notes"),\n "A minimum of one period is required for the calculation.\n\n",\n create_section_header("Examples"),\n dedent(\n """\n >>> s = pd.Series([0, 1, 2, 3])\n >>> s.rolling(2, min_periods=1).sem()\n 0 NaN\n 1 0.707107\n 2 0.707107\n 3 0.707107\n dtype: float64\n """\n ).replace("\n", "", 1),\n window_method="rolling",\n aggregation_description="standard error of mean",\n agg_method="sem",\n )\n def sem(self, ddof: int = 1, numeric_only: bool = False):\n # Raise here so error message says sem instead of std\n self._validate_numeric_only("sem", numeric_only)\n return self.std(numeric_only=numeric_only) / (\n self.count(numeric_only) - ddof\n ).pow(0.5)\n\n @doc(\n template_header,\n create_section_header("Parameters"),\n kwargs_numeric_only,\n create_section_header("Returns"),\n template_returns,\n create_section_header("See Also"),\n "scipy.stats.kurtosis : Reference SciPy method.\n",\n template_see_also,\n create_section_header("Notes"),\n "A minimum of four periods is required for the calculation.\n\n",\n create_section_header("Examples"),\n dedent(\n """\n The example below will show a rolling calculation with a window size of\n four matching the equivalent function call using `scipy.stats`.\n\n >>> arr = [1, 2, 3, 4, 999]\n >>> import scipy.stats\n >>> print(f"{{scipy.stats.kurtosis(arr[:-1], bias=False):.6f}}")\n -1.200000\n >>> print(f"{{scipy.stats.kurtosis(arr[1:], bias=False):.6f}}")\n 3.999946\n >>> s = pd.Series(arr)\n >>> s.rolling(4).kurt()\n 0 NaN\n 1 NaN\n 2 NaN\n 3 -1.200000\n 4 3.999946\n dtype: float64\n """\n ).replace("\n", "", 1),\n window_method="rolling",\n aggregation_description="Fisher's definition of kurtosis without bias",\n agg_method="kurt",\n )\n def kurt(self, numeric_only: bool = False):\n return super().kurt(numeric_only=numeric_only)\n\n @doc(\n template_header,\n create_section_header("Parameters"),\n dedent(\n """\n quantile : float\n Quantile to compute. 0 <= quantile <= 1.\n\n .. deprecated:: 2.1.0\n This will be renamed to 'q' in a future version.\n interpolation : {{'linear', 'lower', 'higher', 'midpoint', 'nearest'}}\n This optional parameter specifies the interpolation method to use,\n when the desired quantile lies between two data points `i` and `j`:\n\n * linear: `i + (j - i) * fraction`, where `fraction` is the\n fractional part of the index surrounded by `i` and `j`.\n * lower: `i`.\n * higher: `j`.\n * nearest: `i` or `j` whichever is nearest.\n * midpoint: (`i` + `j`) / 2.\n """\n ).replace("\n", "", 1),\n kwargs_numeric_only,\n create_section_header("Returns"),\n template_returns,\n create_section_header("See Also"),\n template_see_also,\n create_section_header("Examples"),\n dedent(\n """\n >>> s = pd.Series([1, 2, 3, 4])\n >>> s.rolling(2).quantile(.4, interpolation='lower')\n 0 NaN\n 1 1.0\n 2 2.0\n 3 3.0\n dtype: float64\n\n >>> s.rolling(2).quantile(.4, interpolation='midpoint')\n 0 NaN\n 1 1.5\n 2 2.5\n 3 3.5\n dtype: float64\n """\n ).replace("\n", "", 1),\n window_method="rolling",\n aggregation_description="quantile",\n agg_method="quantile",\n )\n @deprecate_kwarg(old_arg_name="quantile", new_arg_name="q")\n def quantile(\n self,\n q: float,\n interpolation: QuantileInterpolation = "linear",\n numeric_only: bool = False,\n ):\n return super().quantile(\n q=q,\n interpolation=interpolation,\n numeric_only=numeric_only,\n )\n\n @doc(\n template_header,\n ".. versionadded:: 1.4.0 \n\n",\n create_section_header("Parameters"),\n dedent(\n """\n method : {{'average', 'min', 'max'}}, default 'average'\n How to rank the group of records that have the same value (i.e. ties):\n\n * average: average rank of the group\n * min: lowest rank in the group\n * max: highest rank in the group\n\n ascending : bool, default True\n Whether or not the elements should be ranked in ascending order.\n pct : bool, default False\n Whether or not to display the returned rankings in percentile\n form.\n """\n ).replace("\n", "", 1),\n kwargs_numeric_only,\n create_section_header("Returns"),\n template_returns,\n create_section_header("See Also"),\n template_see_also,\n create_section_header("Examples"),\n dedent(\n """\n >>> s = pd.Series([1, 4, 2, 3, 5, 3])\n >>> s.rolling(3).rank()\n 0 NaN\n 1 NaN\n 2 2.0\n 3 2.0\n 4 3.0\n 5 1.5\n dtype: float64\n\n >>> s.rolling(3).rank(method="max")\n 0 NaN\n 1 NaN\n 2 2.0\n 3 2.0\n 4 3.0\n 5 2.0\n dtype: float64\n\n >>> s.rolling(3).rank(method="min")\n 0 NaN\n 1 NaN\n 2 2.0\n 3 2.0\n 4 3.0\n 5 1.0\n dtype: float64\n """\n ).replace("\n", "", 1),\n window_method="rolling",\n aggregation_description="rank",\n agg_method="rank",\n )\n def rank(\n self,\n method: WindowingRankType = "average",\n ascending: bool = True,\n pct: bool = False,\n numeric_only: bool = False,\n ):\n return super().rank(\n method=method,\n ascending=ascending,\n pct=pct,\n numeric_only=numeric_only,\n )\n\n @doc(\n template_header,\n create_section_header("Parameters"),\n dedent(\n """\n other : Series or DataFrame, optional\n If not supplied then will default to self and produce pairwise\n output.\n pairwise : bool, default None\n If False then only matching columns between self and other will be\n used and the output will be a DataFrame.\n If True then all pairwise combinations will be calculated and the\n output will be a MultiIndexed DataFrame in the case of DataFrame\n inputs. In the case of missing elements, only complete pairwise\n observations will be used.\n ddof : int, default 1\n Delta Degrees of Freedom. The divisor used in calculations\n is ``N - ddof``, where ``N`` represents the number of elements.\n """\n ).replace("\n", "", 1),\n kwargs_numeric_only,\n create_section_header("Returns"),\n template_returns,\n create_section_header("See Also"),\n template_see_also,\n create_section_header("Examples"),\n dedent(\n """\\n >>> ser1 = pd.Series([1, 2, 3, 4])\n >>> ser2 = pd.Series([1, 4, 5, 8])\n >>> ser1.rolling(2).cov(ser2)\n 0 NaN\n 1 1.5\n 2 0.5\n 3 1.5\n dtype: float64\n """\n ),\n window_method="rolling",\n aggregation_description="sample covariance",\n agg_method="cov",\n )\n def cov(\n self,\n other: DataFrame | Series | None = None,\n pairwise: bool | None = None,\n ddof: int = 1,\n numeric_only: bool = False,\n ):\n return super().cov(\n other=other,\n pairwise=pairwise,\n ddof=ddof,\n numeric_only=numeric_only,\n )\n\n @doc(\n template_header,\n create_section_header("Parameters"),\n dedent(\n """\n other : Series or DataFrame, optional\n If not supplied then will default to self and produce pairwise\n output.\n pairwise : bool, default None\n If False then only matching columns between self and other will be\n used and the output will be a DataFrame.\n If True then all pairwise combinations will be calculated and the\n output will be a MultiIndexed DataFrame in the case of DataFrame\n inputs. In the case of missing elements, only complete pairwise\n observations will be used.\n ddof : int, default 1\n Delta Degrees of Freedom. The divisor used in calculations\n is ``N - ddof``, where ``N`` represents the number of elements.\n """\n ).replace("\n", "", 1),\n kwargs_numeric_only,\n create_section_header("Returns"),\n template_returns,\n create_section_header("See Also"),\n dedent(\n """\n cov : Similar method to calculate covariance.\n numpy.corrcoef : NumPy Pearson's correlation calculation.\n """\n ).replace("\n", "", 1),\n template_see_also,\n create_section_header("Notes"),\n dedent(\n """\n This function uses Pearson's definition of correlation\n (https://en.wikipedia.org/wiki/Pearson_correlation_coefficient).\n\n When `other` is not specified, the output will be self correlation (e.g.\n all 1's), except for :class:`~pandas.DataFrame` inputs with `pairwise`\n set to `True`.\n\n Function will return ``NaN`` for correlations of equal valued sequences;\n this is the result of a 0/0 division error.\n\n When `pairwise` is set to `False`, only matching columns between `self` and\n `other` will be used.\n\n When `pairwise` is set to `True`, the output will be a MultiIndex DataFrame\n with the original index on the first level, and the `other` DataFrame\n columns on the second level.\n\n In the case of missing elements, only complete pairwise observations\n will be used.\n\n """\n ).replace("\n", "", 1),\n create_section_header("Examples"),\n dedent(\n """\n The below example shows a rolling calculation with a window size of\n four matching the equivalent function call using :meth:`numpy.corrcoef`.\n\n >>> v1 = [3, 3, 3, 5, 8]\n >>> v2 = [3, 4, 4, 4, 8]\n >>> np.corrcoef(v1[:-1], v2[:-1])\n array([[1. , 0.33333333],\n [0.33333333, 1. ]])\n >>> np.corrcoef(v1[1:], v2[1:])\n array([[1. , 0.9169493],\n [0.9169493, 1. ]])\n >>> s1 = pd.Series(v1)\n >>> s2 = pd.Series(v2)\n >>> s1.rolling(4).corr(s2)\n 0 NaN\n 1 NaN\n 2 NaN\n 3 0.333333\n 4 0.916949\n dtype: float64\n\n The below example shows a similar rolling calculation on a\n DataFrame using the pairwise option.\n\n >>> matrix = np.array([[51., 35.],\n ... [49., 30.],\n ... [47., 32.],\n ... [46., 31.],\n ... [50., 36.]])\n >>> np.corrcoef(matrix[:-1, 0], matrix[:-1, 1])\n array([[1. , 0.6263001],\n [0.6263001, 1. ]])\n >>> np.corrcoef(matrix[1:, 0], matrix[1:, 1])\n array([[1. , 0.55536811],\n [0.55536811, 1. ]])\n >>> df = pd.DataFrame(matrix, columns=['X', 'Y'])\n >>> df\n X Y\n 0 51.0 35.0\n 1 49.0 30.0\n 2 47.0 32.0\n 3 46.0 31.0\n 4 50.0 36.0\n >>> df.rolling(4).corr(pairwise=True)\n X Y\n 0 X NaN NaN\n Y NaN NaN\n 1 X NaN NaN\n Y NaN NaN\n 2 X NaN NaN\n Y NaN NaN\n 3 X 1.000000 0.626300\n Y 0.626300 1.000000\n 4 X 1.000000 0.555368\n Y 0.555368 1.000000\n """\n ).replace("\n", "", 1),\n window_method="rolling",\n aggregation_description="correlation",\n agg_method="corr",\n )\n def corr(\n self,\n other: DataFrame | Series | None = None,\n pairwise: bool | None = None,\n ddof: int = 1,\n numeric_only: bool = False,\n ):\n return super().corr(\n other=other,\n pairwise=pairwise,\n ddof=ddof,\n numeric_only=numeric_only,\n )\n\n\nRolling.__doc__ = Window.__doc__\n\n\nclass RollingGroupby(BaseWindowGroupby, Rolling):\n """\n Provide a rolling groupby implementation.\n """\n\n _attributes = Rolling._attributes + BaseWindowGroupby._attributes\n\n def _get_window_indexer(self) -> GroupbyIndexer:\n """\n Return an indexer class that will compute the window start and end bounds\n\n Returns\n -------\n GroupbyIndexer\n """\n rolling_indexer: type[BaseIndexer]\n indexer_kwargs: dict[str, Any] | None = None\n index_array = self._index_array\n if isinstance(self.window, BaseIndexer):\n rolling_indexer = type(self.window)\n indexer_kwargs = self.window.__dict__.copy()\n assert isinstance(indexer_kwargs, dict) # for mypy\n # We'll be using the index of each group later\n indexer_kwargs.pop("index_array", None)\n window = self.window\n elif self._win_freq_i8 is not None:\n rolling_indexer = VariableWindowIndexer\n # error: Incompatible types in assignment (expression has type\n # "int", variable has type "BaseIndexer")\n window = self._win_freq_i8 # type: ignore[assignment]\n else:\n rolling_indexer = FixedWindowIndexer\n window = self.window\n window_indexer = GroupbyIndexer(\n index_array=index_array,\n window_size=window,\n groupby_indices=self._grouper.indices,\n window_indexer=rolling_indexer,\n indexer_kwargs=indexer_kwargs,\n )\n return window_indexer\n\n def _validate_datetimelike_monotonic(self):\n """\n Validate that each group in self._on is monotonic\n """\n # GH 46061\n if self._on.hasnans:\n self._raise_monotonic_error("values must not have NaT")\n for group_indices in self._grouper.indices.values():\n group_on = self._on.take(group_indices)\n if not (\n group_on.is_monotonic_increasing or group_on.is_monotonic_decreasing\n ):\n on = "index" if self.on is None else self.on\n raise ValueError(\n f"Each group within {on} must be monotonic. "\n f"Sort the values in {on} first."\n )\n
.venv\Lib\site-packages\pandas\core\window\rolling.py
rolling.py
Python
95,504
0.75
0.103754
0.042667
node-utils
390
2024-11-16T08:51:43.980426
BSD-3-Clause
false
bcaa3730f7c9890a49ac71fdd00965ee
from pandas.core.window.ewm import (\n ExponentialMovingWindow,\n ExponentialMovingWindowGroupby,\n)\nfrom pandas.core.window.expanding import (\n Expanding,\n ExpandingGroupby,\n)\nfrom pandas.core.window.rolling import (\n Rolling,\n RollingGroupby,\n Window,\n)\n\n__all__ = [\n "Expanding",\n "ExpandingGroupby",\n "ExponentialMovingWindow",\n "ExponentialMovingWindowGroupby",\n "Rolling",\n "RollingGroupby",\n "Window",\n]\n
.venv\Lib\site-packages\pandas\core\window\__init__.py
__init__.py
Python
450
0.85
0
0
node-utils
499
2025-06-11T19:05:48.840924
Apache-2.0
false
b91be2221c051ce1846c9be3d1380172
\n\n
.venv\Lib\site-packages\pandas\core\window\__pycache__\common.cpython-313.pyc
common.cpython-313.pyc
Other
7,528
0.95
0.013158
0.027027
node-utils
983
2024-09-10T15:56:06.184456
Apache-2.0
false
d86212c86b71a6decf6cce0c44275ae4
\n\n
.venv\Lib\site-packages\pandas\core\window\__pycache__\doc.cpython-313.pyc
doc.cpython-313.pyc
Other
4,892
0.95
0.135922
0.152941
node-utils
547
2024-06-01T08:27:31.085409
Apache-2.0
false
8ec1dc701ddf231b70d31b8a8f05eff9
\n\n
.venv\Lib\site-packages\pandas\core\window\__pycache__\ewm.cpython-313.pyc
ewm.cpython-313.pyc
Other
36,612
0.95
0.026856
0.007156
vue-tools
68
2023-08-01T08:41:49.799529
BSD-3-Clause
false
97f04e6c83987741c865609ad1dbeab3
\n\n
.venv\Lib\site-packages\pandas\core\window\__pycache__\expanding.cpython-313.pyc
expanding.cpython-313.pyc
Other
24,542
0.95
0.02439
0.015152
node-utils
834
2023-07-29T04:45:38.242220
BSD-3-Clause
false
030945a9e65d662bdccd9abf488b9d9a
\n\n
.venv\Lib\site-packages\pandas\core\window\__pycache__\numba_.cpython-313.pyc
numba_.cpython-313.pyc
Other
11,783
0.95
0.10396
0.005291
python-kit
513
2023-09-03T03:25:01.325289
MIT
false
a9697eb0ccc3c8bd3b29548b0e7fb9c0
\n\n
.venv\Lib\site-packages\pandas\core\window\__pycache__\online.cpython-313.pyc
online.cpython-313.pyc
Other
4,879
0.95
0.036364
0
vue-tools
41
2023-08-28T06:40:12.783124
GPL-3.0
false
11e8900e2d23a023d4837c829619a4e0
\n\n
.venv\Lib\site-packages\pandas\core\window\__pycache__\rolling.cpython-313.pyc
rolling.cpython-313.pyc
Other
98,036
0.75
0.035692
0.014716
awesome-app
708
2023-09-05T15:58:18.711179
MIT
false
6aaf4ec52f3c3a34cfec5d1b42fb8c23
\n\n
.venv\Lib\site-packages\pandas\core\window\__pycache__\__init__.cpython-313.pyc
__init__.cpython-313.pyc
Other
570
0.7
0
0
node-utils
810
2025-04-30T21:42:47.508086
BSD-3-Clause
false
c99af35eab518fd457fcda3a0259b4f9
from __future__ import annotations\n\nimport functools\nfrom typing import (\n TYPE_CHECKING,\n Any,\n Callable,\n)\n\nif TYPE_CHECKING:\n from pandas._typing import Scalar\n\nimport numpy as np\n\nfrom pandas.compat._optional import import_optional_dependency\n\n\n@functools.cache\ndef generate_apply_looper(func, nopython=True, nogil=True, parallel=False):\n if TYPE_CHECKING:\n import numba\n else:\n numba = import_optional_dependency("numba")\n nb_compat_func = numba.extending.register_jitable(func)\n\n @numba.jit(nopython=nopython, nogil=nogil, parallel=parallel)\n def nb_looper(values, axis):\n # Operate on the first row/col in order to get\n # the output shape\n if axis == 0:\n first_elem = values[:, 0]\n dim0 = values.shape[1]\n else:\n first_elem = values[0]\n dim0 = values.shape[0]\n res0 = nb_compat_func(first_elem)\n # Use np.asarray to get shape for\n # https://github.com/numba/numba/issues/4202#issuecomment-1185981507\n buf_shape = (dim0,) + np.atleast_1d(np.asarray(res0)).shape\n if axis == 0:\n buf_shape = buf_shape[::-1]\n buff = np.empty(buf_shape)\n\n if axis == 1:\n buff[0] = res0\n for i in numba.prange(1, values.shape[0]):\n buff[i] = nb_compat_func(values[i])\n else:\n buff[:, 0] = res0\n for j in numba.prange(1, values.shape[1]):\n buff[:, j] = nb_compat_func(values[:, j])\n return buff\n\n return nb_looper\n\n\n@functools.cache\ndef make_looper(func, result_dtype, is_grouped_kernel, nopython, nogil, parallel):\n if TYPE_CHECKING:\n import numba\n else:\n numba = import_optional_dependency("numba")\n\n if is_grouped_kernel:\n\n @numba.jit(nopython=nopython, nogil=nogil, parallel=parallel)\n def column_looper(\n values: np.ndarray,\n labels: np.ndarray,\n ngroups: int,\n min_periods: int,\n *args,\n ):\n result = np.empty((values.shape[0], ngroups), dtype=result_dtype)\n na_positions = {}\n for i in numba.prange(values.shape[0]):\n output, na_pos = func(\n values[i], result_dtype, labels, ngroups, min_periods, *args\n )\n result[i] = output\n if len(na_pos) > 0:\n na_positions[i] = np.array(na_pos)\n return result, na_positions\n\n else:\n\n @numba.jit(nopython=nopython, nogil=nogil, parallel=parallel)\n def column_looper(\n values: np.ndarray,\n start: np.ndarray,\n end: np.ndarray,\n min_periods: int,\n *args,\n ):\n result = np.empty((values.shape[0], len(start)), dtype=result_dtype)\n na_positions = {}\n for i in numba.prange(values.shape[0]):\n output, na_pos = func(\n values[i], result_dtype, start, end, min_periods, *args\n )\n result[i] = output\n if len(na_pos) > 0:\n na_positions[i] = np.array(na_pos)\n return result, na_positions\n\n return column_looper\n\n\ndefault_dtype_mapping: dict[np.dtype, Any] = {\n np.dtype("int8"): np.int64,\n np.dtype("int16"): np.int64,\n np.dtype("int32"): np.int64,\n np.dtype("int64"): np.int64,\n np.dtype("uint8"): np.uint64,\n np.dtype("uint16"): np.uint64,\n np.dtype("uint32"): np.uint64,\n np.dtype("uint64"): np.uint64,\n np.dtype("float32"): np.float64,\n np.dtype("float64"): np.float64,\n np.dtype("complex64"): np.complex128,\n np.dtype("complex128"): np.complex128,\n}\n\n\n# TODO: Preserve complex dtypes\n\nfloat_dtype_mapping: dict[np.dtype, Any] = {\n np.dtype("int8"): np.float64,\n np.dtype("int16"): np.float64,\n np.dtype("int32"): np.float64,\n np.dtype("int64"): np.float64,\n np.dtype("uint8"): np.float64,\n np.dtype("uint16"): np.float64,\n np.dtype("uint32"): np.float64,\n np.dtype("uint64"): np.float64,\n np.dtype("float32"): np.float64,\n np.dtype("float64"): np.float64,\n np.dtype("complex64"): np.float64,\n np.dtype("complex128"): np.float64,\n}\n\nidentity_dtype_mapping: dict[np.dtype, Any] = {\n np.dtype("int8"): np.int8,\n np.dtype("int16"): np.int16,\n np.dtype("int32"): np.int32,\n np.dtype("int64"): np.int64,\n np.dtype("uint8"): np.uint8,\n np.dtype("uint16"): np.uint16,\n np.dtype("uint32"): np.uint32,\n np.dtype("uint64"): np.uint64,\n np.dtype("float32"): np.float32,\n np.dtype("float64"): np.float64,\n np.dtype("complex64"): np.complex64,\n np.dtype("complex128"): np.complex128,\n}\n\n\ndef generate_shared_aggregator(\n func: Callable[..., Scalar],\n dtype_mapping: dict[np.dtype, np.dtype],\n is_grouped_kernel: bool,\n nopython: bool,\n nogil: bool,\n parallel: bool,\n):\n """\n Generate a Numba function that loops over the columns 2D object and applies\n a 1D numba kernel over each column.\n\n Parameters\n ----------\n func : function\n aggregation function to be applied to each column\n dtype_mapping: dict or None\n If not None, maps a dtype to a result dtype.\n Otherwise, will fall back to default mapping.\n is_grouped_kernel: bool, default False\n Whether func operates using the group labels (True)\n or using starts/ends arrays\n\n If true, you also need to pass the number of groups to this function\n nopython : bool\n nopython to be passed into numba.jit\n nogil : bool\n nogil to be passed into numba.jit\n parallel : bool\n parallel to be passed into numba.jit\n\n Returns\n -------\n Numba function\n """\n\n # A wrapper around the looper function,\n # to dispatch based on dtype since numba is unable to do that in nopython mode\n\n # It also post-processes the values by inserting nans where number of observations\n # is less than min_periods\n # Cannot do this in numba nopython mode\n # (you'll run into type-unification error when you cast int -> float)\n def looper_wrapper(\n values,\n start=None,\n end=None,\n labels=None,\n ngroups=None,\n min_periods: int = 0,\n **kwargs,\n ):\n result_dtype = dtype_mapping[values.dtype]\n column_looper = make_looper(\n func, result_dtype, is_grouped_kernel, nopython, nogil, parallel\n )\n # Need to unpack kwargs since numba only supports *args\n if is_grouped_kernel:\n result, na_positions = column_looper(\n values, labels, ngroups, min_periods, *kwargs.values()\n )\n else:\n result, na_positions = column_looper(\n values, start, end, min_periods, *kwargs.values()\n )\n if result.dtype.kind == "i":\n # Look if na_positions is not empty\n # If so, convert the whole block\n # This is OK since int dtype cannot hold nan,\n # so if min_periods not satisfied for 1 col, it is not satisfied for\n # all columns at that index\n for na_pos in na_positions.values():\n if len(na_pos) > 0:\n result = result.astype("float64")\n break\n # TODO: Optimize this\n for i, na_pos in na_positions.items():\n if len(na_pos) > 0:\n result[i, na_pos] = np.nan\n return result\n\n return looper_wrapper\n
.venv\Lib\site-packages\pandas\core\_numba\executor.py
executor.py
Python
7,530
0.95
0.154812
0.100478
node-utils
282
2025-06-23T03:56:01.397558
BSD-3-Clause
false
c9c1d956307b7a8b404a016d5300e0a6
# Disable type checking for this module since numba's internals\n# are not typed, and we use numba's internals via its extension API\n# mypy: ignore-errors\n"""\nUtility classes/functions to let numba recognize\npandas Index/Series/DataFrame\n\nMostly vendored from https://github.com/numba/numba/blob/main/numba/tests/pdlike_usecase.py\n"""\n\nfrom __future__ import annotations\n\nfrom contextlib import contextmanager\nimport operator\n\nimport numba\nfrom numba import types\nfrom numba.core import cgutils\nfrom numba.core.datamodel import models\nfrom numba.core.extending import (\n NativeValue,\n box,\n lower_builtin,\n make_attribute_wrapper,\n overload,\n overload_attribute,\n overload_method,\n register_model,\n type_callable,\n typeof_impl,\n unbox,\n)\nfrom numba.core.imputils import impl_ret_borrowed\nimport numpy as np\n\nfrom pandas._libs import lib\n\nfrom pandas.core.indexes.base import Index\nfrom pandas.core.indexing import _iLocIndexer\nfrom pandas.core.internals import SingleBlockManager\nfrom pandas.core.series import Series\n\n\n# Helper function to hack around fact that Index casts numpy string dtype to object\n#\n# Idea is to set an attribute on a Index called _numba_data\n# that is the original data, or the object data casted to numpy string dtype,\n# with a context manager that is unset afterwards\n@contextmanager\ndef set_numba_data(index: Index):\n numba_data = index._data\n if numba_data.dtype in (object, "string"):\n numba_data = np.asarray(numba_data)\n if not lib.is_string_array(numba_data):\n raise ValueError(\n "The numba engine only supports using string or numeric column names"\n )\n numba_data = numba_data.astype("U")\n try:\n index._numba_data = numba_data\n yield index\n finally:\n del index._numba_data\n\n\n# TODO: Range index support\n# (this currently lowers OK, but does not round-trip)\nclass IndexType(types.Type):\n """\n The type class for Index objects.\n """\n\n def __init__(self, dtype, layout, pyclass: any) -> None:\n self.pyclass = pyclass\n name = f"index({dtype}, {layout})"\n self.dtype = dtype\n self.layout = layout\n super().__init__(name)\n\n @property\n def key(self):\n return self.pyclass, self.dtype, self.layout\n\n @property\n def as_array(self):\n return types.Array(self.dtype, 1, self.layout)\n\n def copy(self, dtype=None, ndim: int = 1, layout=None):\n assert ndim == 1\n if dtype is None:\n dtype = self.dtype\n layout = layout or self.layout\n return type(self)(dtype, layout, self.pyclass)\n\n\nclass SeriesType(types.Type):\n """\n The type class for Series objects.\n """\n\n def __init__(self, dtype, index, namety) -> None:\n assert isinstance(index, IndexType)\n self.dtype = dtype\n self.index = index\n self.values = types.Array(self.dtype, 1, "C")\n self.namety = namety\n name = f"series({dtype}, {index}, {namety})"\n super().__init__(name)\n\n @property\n def key(self):\n return self.dtype, self.index, self.namety\n\n @property\n def as_array(self):\n return self.values\n\n def copy(self, dtype=None, ndim: int = 1, layout: str = "C"):\n assert ndim == 1\n assert layout == "C"\n if dtype is None:\n dtype = self.dtype\n return type(self)(dtype, self.index, self.namety)\n\n\n@typeof_impl.register(Index)\ndef typeof_index(val, c):\n """\n This will assume that only strings are in object dtype\n index.\n (you should check this before this gets lowered down to numba)\n """\n # arrty = typeof_impl(val._data, c)\n arrty = typeof_impl(val._numba_data, c)\n assert arrty.ndim == 1\n return IndexType(arrty.dtype, arrty.layout, type(val))\n\n\n@typeof_impl.register(Series)\ndef typeof_series(val, c):\n index = typeof_impl(val.index, c)\n arrty = typeof_impl(val.values, c)\n namety = typeof_impl(val.name, c)\n assert arrty.ndim == 1\n assert arrty.layout == "C"\n return SeriesType(arrty.dtype, index, namety)\n\n\n@type_callable(Series)\ndef type_series_constructor(context):\n def typer(data, index, name=None):\n if isinstance(index, IndexType) and isinstance(data, types.Array):\n assert data.ndim == 1\n if name is None:\n name = types.intp\n return SeriesType(data.dtype, index, name)\n\n return typer\n\n\n@type_callable(Index)\ndef type_index_constructor(context):\n def typer(data, hashmap=None):\n if isinstance(data, types.Array):\n assert data.layout == "C"\n assert data.ndim == 1\n assert hashmap is None or isinstance(hashmap, types.DictType)\n return IndexType(data.dtype, layout=data.layout, pyclass=Index)\n\n return typer\n\n\n# Backend extensions for Index and Series and Frame\n@register_model(IndexType)\nclass IndexModel(models.StructModel):\n def __init__(self, dmm, fe_type) -> None:\n # We don't want the numpy string scalar type in our hashmap\n members = [\n ("data", fe_type.as_array),\n # This is an attempt to emulate our hashtable code with a numba\n # typed dict\n # It maps from values in the index to their integer positions in the array\n ("hashmap", types.DictType(fe_type.dtype, types.intp)),\n # Pointer to the Index object this was created from, or that it\n # boxes to\n # https://numba.discourse.group/t/qst-how-to-cache-the-boxing-of-an-object/2128/2?u=lithomas1\n ("parent", types.pyobject),\n ]\n models.StructModel.__init__(self, dmm, fe_type, members)\n\n\n@register_model(SeriesType)\nclass SeriesModel(models.StructModel):\n def __init__(self, dmm, fe_type) -> None:\n members = [\n ("index", fe_type.index),\n ("values", fe_type.as_array),\n ("name", fe_type.namety),\n ]\n models.StructModel.__init__(self, dmm, fe_type, members)\n\n\nmake_attribute_wrapper(IndexType, "data", "_data")\nmake_attribute_wrapper(IndexType, "hashmap", "hashmap")\n\nmake_attribute_wrapper(SeriesType, "index", "index")\nmake_attribute_wrapper(SeriesType, "values", "values")\nmake_attribute_wrapper(SeriesType, "name", "name")\n\n\n@lower_builtin(Series, types.Array, IndexType)\ndef pdseries_constructor(context, builder, sig, args):\n data, index = args\n series = cgutils.create_struct_proxy(sig.return_type)(context, builder)\n series.index = index\n series.values = data\n series.name = context.get_constant(types.intp, 0)\n return impl_ret_borrowed(context, builder, sig.return_type, series._getvalue())\n\n\n@lower_builtin(Series, types.Array, IndexType, types.intp)\n@lower_builtin(Series, types.Array, IndexType, types.float64)\n@lower_builtin(Series, types.Array, IndexType, types.unicode_type)\ndef pdseries_constructor_with_name(context, builder, sig, args):\n data, index, name = args\n series = cgutils.create_struct_proxy(sig.return_type)(context, builder)\n series.index = index\n series.values = data\n series.name = name\n return impl_ret_borrowed(context, builder, sig.return_type, series._getvalue())\n\n\n@lower_builtin(Index, types.Array, types.DictType, types.pyobject)\ndef index_constructor_2arg(context, builder, sig, args):\n (data, hashmap, parent) = args\n index = cgutils.create_struct_proxy(sig.return_type)(context, builder)\n\n index.data = data\n index.hashmap = hashmap\n index.parent = parent\n return impl_ret_borrowed(context, builder, sig.return_type, index._getvalue())\n\n\n@lower_builtin(Index, types.Array, types.DictType)\ndef index_constructor_2arg_parent(context, builder, sig, args):\n # Basically same as index_constructor_1arg, but also lets you specify the\n # parent object\n (data, hashmap) = args\n index = cgutils.create_struct_proxy(sig.return_type)(context, builder)\n\n index.data = data\n index.hashmap = hashmap\n return impl_ret_borrowed(context, builder, sig.return_type, index._getvalue())\n\n\n@lower_builtin(Index, types.Array)\ndef index_constructor_1arg(context, builder, sig, args):\n from numba.typed import Dict\n\n key_type = sig.return_type.dtype\n value_type = types.intp\n\n def index_impl(data):\n return Index(data, Dict.empty(key_type, value_type))\n\n return context.compile_internal(builder, index_impl, sig, args)\n\n\n# Helper to convert the unicodecharseq (numpy string scalar) into a unicode_type\n# (regular string)\ndef maybe_cast_str(x):\n # Dummy function that numba can overload\n pass\n\n\n@overload(maybe_cast_str)\ndef maybe_cast_str_impl(x):\n """Converts numba UnicodeCharSeq (numpy string scalar) -> unicode type (string).\n Is a no-op for other types."""\n if isinstance(x, types.UnicodeCharSeq):\n return lambda x: str(x)\n else:\n return lambda x: x\n\n\n@unbox(IndexType)\ndef unbox_index(typ, obj, c):\n """\n Convert a Index object to a native structure.\n\n Note: Object dtype is not allowed here\n """\n data_obj = c.pyapi.object_getattr_string(obj, "_numba_data")\n index = cgutils.create_struct_proxy(typ)(c.context, c.builder)\n # If we see an object array, assume its been validated as only containing strings\n # We still need to do the conversion though\n index.data = c.unbox(typ.as_array, data_obj).value\n typed_dict_obj = c.pyapi.unserialize(c.pyapi.serialize_object(numba.typed.Dict))\n # Create an empty typed dict in numba for the hashmap for indexing\n # equiv of numba.typed.Dict.empty(typ.dtype, types.intp)\n arr_type_obj = c.pyapi.unserialize(c.pyapi.serialize_object(typ.dtype))\n intp_type_obj = c.pyapi.unserialize(c.pyapi.serialize_object(types.intp))\n hashmap_obj = c.pyapi.call_method(\n typed_dict_obj, "empty", (arr_type_obj, intp_type_obj)\n )\n index.hashmap = c.unbox(types.DictType(typ.dtype, types.intp), hashmap_obj).value\n # Set the parent for speedy boxing.\n index.parent = obj\n\n # Decrefs\n c.pyapi.decref(data_obj)\n c.pyapi.decref(arr_type_obj)\n c.pyapi.decref(intp_type_obj)\n c.pyapi.decref(typed_dict_obj)\n\n return NativeValue(index._getvalue())\n\n\n@unbox(SeriesType)\ndef unbox_series(typ, obj, c):\n """\n Convert a Series object to a native structure.\n """\n index_obj = c.pyapi.object_getattr_string(obj, "index")\n values_obj = c.pyapi.object_getattr_string(obj, "values")\n name_obj = c.pyapi.object_getattr_string(obj, "name")\n\n series = cgutils.create_struct_proxy(typ)(c.context, c.builder)\n series.index = c.unbox(typ.index, index_obj).value\n series.values = c.unbox(typ.values, values_obj).value\n series.name = c.unbox(typ.namety, name_obj).value\n\n # Decrefs\n c.pyapi.decref(index_obj)\n c.pyapi.decref(values_obj)\n c.pyapi.decref(name_obj)\n\n return NativeValue(series._getvalue())\n\n\n@box(IndexType)\ndef box_index(typ, val, c):\n """\n Convert a native index structure to a Index object.\n\n If our native index is of a numpy string dtype, we'll cast it to\n object.\n """\n # First build a Numpy array object, then wrap it in a Index\n index = cgutils.create_struct_proxy(typ)(c.context, c.builder, value=val)\n\n res = cgutils.alloca_once_value(c.builder, index.parent)\n\n # Does parent exist?\n # (it means already boxed once, or Index same as original df.index or df.columns)\n # xref https://github.com/numba/numba/blob/596e8a55334cc46854e3192766e643767bd7c934/numba/core/boxing.py#L593C17-L593C17\n with c.builder.if_else(cgutils.is_not_null(c.builder, index.parent)) as (\n has_parent,\n otherwise,\n ):\n with has_parent:\n c.pyapi.incref(index.parent)\n with otherwise:\n # TODO: preserve the original class for the index\n # Also need preserve the name of the Index\n # class_obj = c.pyapi.unserialize(c.pyapi.serialize_object(typ.pyclass))\n class_obj = c.pyapi.unserialize(c.pyapi.serialize_object(Index))\n array_obj = c.box(typ.as_array, index.data)\n if isinstance(typ.dtype, types.UnicodeCharSeq):\n # We converted to numpy string dtype, convert back\n # to object since _simple_new won't do that for uss\n object_str_obj = c.pyapi.unserialize(c.pyapi.serialize_object("object"))\n array_obj = c.pyapi.call_method(array_obj, "astype", (object_str_obj,))\n c.pyapi.decref(object_str_obj)\n # this is basically Index._simple_new(array_obj, name_obj) in python\n index_obj = c.pyapi.call_method(class_obj, "_simple_new", (array_obj,))\n index.parent = index_obj\n c.builder.store(index_obj, res)\n\n # Decrefs\n c.pyapi.decref(class_obj)\n c.pyapi.decref(array_obj)\n return c.builder.load(res)\n\n\n@box(SeriesType)\ndef box_series(typ, val, c):\n """\n Convert a native series structure to a Series object.\n """\n series = cgutils.create_struct_proxy(typ)(c.context, c.builder, value=val)\n series_const_obj = c.pyapi.unserialize(c.pyapi.serialize_object(Series._from_mgr))\n mgr_const_obj = c.pyapi.unserialize(\n c.pyapi.serialize_object(SingleBlockManager.from_array)\n )\n index_obj = c.box(typ.index, series.index)\n array_obj = c.box(typ.as_array, series.values)\n name_obj = c.box(typ.namety, series.name)\n # This is basically equivalent of\n # pd.Series(data=array_obj, index=index_obj)\n # To improve perf, we will construct the Series from a manager\n # object to avoid checks.\n # We'll also set the name attribute manually to avoid validation\n mgr_obj = c.pyapi.call_function_objargs(\n mgr_const_obj,\n (\n array_obj,\n index_obj,\n ),\n )\n mgr_axes_obj = c.pyapi.object_getattr_string(mgr_obj, "axes")\n # Series._constructor_from_mgr(mgr, axes)\n series_obj = c.pyapi.call_function_objargs(\n series_const_obj, (mgr_obj, mgr_axes_obj)\n )\n c.pyapi.object_setattr_string(series_obj, "_name", name_obj)\n\n # Decrefs\n c.pyapi.decref(series_const_obj)\n c.pyapi.decref(mgr_axes_obj)\n c.pyapi.decref(mgr_obj)\n c.pyapi.decref(mgr_const_obj)\n c.pyapi.decref(index_obj)\n c.pyapi.decref(array_obj)\n c.pyapi.decref(name_obj)\n\n return series_obj\n\n\n# Add common series reductions (e.g. mean, sum),\n# and also add common binops (e.g. add, sub, mul, div)\ndef generate_series_reduction(ser_reduction, ser_method):\n @overload_method(SeriesType, ser_reduction)\n def series_reduction(series):\n def series_reduction_impl(series):\n return ser_method(series.values)\n\n return series_reduction_impl\n\n return series_reduction\n\n\ndef generate_series_binop(binop):\n @overload(binop)\n def series_binop(series1, value):\n if isinstance(series1, SeriesType):\n if isinstance(value, SeriesType):\n\n def series_binop_impl(series1, series2):\n # TODO: Check index matching?\n return Series(\n binop(series1.values, series2.values),\n series1.index,\n series1.name,\n )\n\n return series_binop_impl\n else:\n\n def series_binop_impl(series1, value):\n return Series(\n binop(series1.values, value), series1.index, series1.name\n )\n\n return series_binop_impl\n\n return series_binop\n\n\nseries_reductions = [\n ("sum", np.sum),\n ("mean", np.mean),\n # Disabled due to discrepancies between numba std. dev\n # and pandas std. dev (no way to specify dof)\n # ("std", np.std),\n # ("var", np.var),\n ("min", np.min),\n ("max", np.max),\n]\nfor reduction, reduction_method in series_reductions:\n generate_series_reduction(reduction, reduction_method)\n\nseries_binops = [operator.add, operator.sub, operator.mul, operator.truediv]\n\nfor ser_binop in series_binops:\n generate_series_binop(ser_binop)\n\n\n# get_loc on Index\n@overload_method(IndexType, "get_loc")\ndef index_get_loc(index, item):\n def index_get_loc_impl(index, item):\n # Initialize the hash table if not initialized\n if len(index.hashmap) == 0:\n for i, val in enumerate(index._data):\n index.hashmap[val] = i\n return index.hashmap[item]\n\n return index_get_loc_impl\n\n\n# Indexing for Series/Index\n@overload(operator.getitem)\ndef series_indexing(series, item):\n if isinstance(series, SeriesType):\n\n def series_getitem(series, item):\n loc = series.index.get_loc(item)\n return series.iloc[loc]\n\n return series_getitem\n\n\n@overload(operator.getitem)\ndef index_indexing(index, idx):\n if isinstance(index, IndexType):\n\n def index_getitem(index, idx):\n return index._data[idx]\n\n return index_getitem\n\n\nclass IlocType(types.Type):\n def __init__(self, obj_type) -> None:\n self.obj_type = obj_type\n name = f"iLocIndexer({obj_type})"\n super().__init__(name=name)\n\n @property\n def key(self):\n return self.obj_type\n\n\n@typeof_impl.register(_iLocIndexer)\ndef typeof_iloc(val, c):\n objtype = typeof_impl(val.obj, c)\n return IlocType(objtype)\n\n\n@type_callable(_iLocIndexer)\ndef type_iloc_constructor(context):\n def typer(obj):\n if isinstance(obj, SeriesType):\n return IlocType(obj)\n\n return typer\n\n\n@lower_builtin(_iLocIndexer, SeriesType)\ndef iloc_constructor(context, builder, sig, args):\n (obj,) = args\n iloc_indexer = cgutils.create_struct_proxy(sig.return_type)(context, builder)\n iloc_indexer.obj = obj\n return impl_ret_borrowed(\n context, builder, sig.return_type, iloc_indexer._getvalue()\n )\n\n\n@register_model(IlocType)\nclass ILocModel(models.StructModel):\n def __init__(self, dmm, fe_type) -> None:\n members = [("obj", fe_type.obj_type)]\n models.StructModel.__init__(self, dmm, fe_type, members)\n\n\nmake_attribute_wrapper(IlocType, "obj", "obj")\n\n\n@overload_attribute(SeriesType, "iloc")\ndef series_iloc(series):\n def get(series):\n return _iLocIndexer(series)\n\n return get\n\n\n@overload(operator.getitem)\ndef iloc_getitem(iloc_indexer, i):\n if isinstance(iloc_indexer, IlocType):\n\n def getitem_impl(iloc_indexer, i):\n return iloc_indexer.obj.values[i]\n\n return getitem_impl\n
.venv\Lib\site-packages\pandas\core\_numba\extensions.py
extensions.py
Python
18,430
0.95
0.164103
0.127706
python-kit
219
2024-02-14T04:05:37.057528
Apache-2.0
false
c04dc038e5623f792aef62f43ac87234
"""\nNumba 1D mean kernels that can be shared by\n* Dataframe / Series\n* groupby\n* rolling / expanding\n\nMirrors pandas/_libs/window/aggregation.pyx\n"""\nfrom __future__ import annotations\n\nfrom typing import TYPE_CHECKING\n\nimport numba\nimport numpy as np\n\nfrom pandas.core._numba.kernels.shared import is_monotonic_increasing\nfrom pandas.core._numba.kernels.sum_ import grouped_kahan_sum\n\nif TYPE_CHECKING:\n from pandas._typing import npt\n\n\n@numba.jit(nopython=True, nogil=True, parallel=False)\ndef add_mean(\n val: float,\n nobs: int,\n sum_x: float,\n neg_ct: int,\n compensation: float,\n num_consecutive_same_value: int,\n prev_value: float,\n) -> tuple[int, float, int, float, int, float]:\n if not np.isnan(val):\n nobs += 1\n y = val - compensation\n t = sum_x + y\n compensation = t - sum_x - y\n sum_x = t\n if val < 0:\n neg_ct += 1\n\n if val == prev_value:\n num_consecutive_same_value += 1\n else:\n num_consecutive_same_value = 1\n prev_value = val\n\n return nobs, sum_x, neg_ct, compensation, num_consecutive_same_value, prev_value\n\n\n@numba.jit(nopython=True, nogil=True, parallel=False)\ndef remove_mean(\n val: float, nobs: int, sum_x: float, neg_ct: int, compensation: float\n) -> tuple[int, float, int, float]:\n if not np.isnan(val):\n nobs -= 1\n y = -val - compensation\n t = sum_x + y\n compensation = t - sum_x - y\n sum_x = t\n if val < 0:\n neg_ct -= 1\n return nobs, sum_x, neg_ct, compensation\n\n\n@numba.jit(nopython=True, nogil=True, parallel=False)\ndef sliding_mean(\n values: np.ndarray,\n result_dtype: np.dtype,\n start: np.ndarray,\n end: np.ndarray,\n min_periods: int,\n) -> tuple[np.ndarray, list[int]]:\n N = len(start)\n nobs = 0\n sum_x = 0.0\n neg_ct = 0\n compensation_add = 0.0\n compensation_remove = 0.0\n\n is_monotonic_increasing_bounds = is_monotonic_increasing(\n start\n ) and is_monotonic_increasing(end)\n\n output = np.empty(N, dtype=result_dtype)\n\n for i in range(N):\n s = start[i]\n e = end[i]\n if i == 0 or not is_monotonic_increasing_bounds:\n prev_value = values[s]\n num_consecutive_same_value = 0\n\n for j in range(s, e):\n val = values[j]\n (\n nobs,\n sum_x,\n neg_ct,\n compensation_add,\n num_consecutive_same_value,\n prev_value,\n ) = add_mean(\n val,\n nobs,\n sum_x,\n neg_ct,\n compensation_add,\n num_consecutive_same_value,\n prev_value, # pyright: ignore[reportGeneralTypeIssues]\n )\n else:\n for j in range(start[i - 1], s):\n val = values[j]\n nobs, sum_x, neg_ct, compensation_remove = remove_mean(\n val, nobs, sum_x, neg_ct, compensation_remove\n )\n\n for j in range(end[i - 1], e):\n val = values[j]\n (\n nobs,\n sum_x,\n neg_ct,\n compensation_add,\n num_consecutive_same_value,\n prev_value,\n ) = add_mean(\n val,\n nobs,\n sum_x,\n neg_ct,\n compensation_add,\n num_consecutive_same_value,\n prev_value, # pyright: ignore[reportGeneralTypeIssues]\n )\n\n if nobs >= min_periods and nobs > 0:\n result = sum_x / nobs\n if num_consecutive_same_value >= nobs:\n result = prev_value\n elif neg_ct == 0 and result < 0:\n result = 0\n elif neg_ct == nobs and result > 0:\n result = 0\n else:\n result = np.nan\n\n output[i] = result\n\n if not is_monotonic_increasing_bounds:\n nobs = 0\n sum_x = 0.0\n neg_ct = 0\n compensation_remove = 0.0\n\n # na_position is empty list since float64 can already hold nans\n # Do list comprehension, since numba cannot figure out that na_pos is\n # empty list of ints on its own\n na_pos = [0 for i in range(0)]\n return output, na_pos\n\n\n@numba.jit(nopython=True, nogil=True, parallel=False)\ndef grouped_mean(\n values: np.ndarray,\n result_dtype: np.dtype,\n labels: npt.NDArray[np.intp],\n ngroups: int,\n min_periods: int,\n) -> tuple[np.ndarray, list[int]]:\n output, nobs_arr, comp_arr, consecutive_counts, prev_vals = grouped_kahan_sum(\n values, result_dtype, labels, ngroups\n )\n\n # Post-processing, replace sums that don't satisfy min_periods\n for lab in range(ngroups):\n nobs = nobs_arr[lab]\n num_consecutive_same_value = consecutive_counts[lab]\n prev_value = prev_vals[lab]\n sum_x = output[lab]\n if nobs >= min_periods:\n if num_consecutive_same_value >= nobs:\n result = prev_value * nobs\n else:\n result = sum_x\n else:\n result = np.nan\n result /= nobs\n output[lab] = result\n\n # na_position is empty list since float64 can already hold nans\n # Do list comprehension, since numba cannot figure out that na_pos is\n # empty list of ints on its own\n na_pos = [0 for i in range(0)]\n return output, na_pos\n
.venv\Lib\site-packages\pandas\core\_numba\kernels\mean_.py
mean_.py
Python
5,646
0.95
0.117347
0.058824
awesome-app
103
2024-04-29T08:19:59.573032
BSD-3-Clause
false
0145abd2f3a1976a660a845615b37e26
"""\nNumba 1D min/max kernels that can be shared by\n* Dataframe / Series\n* groupby\n* rolling / expanding\n\nMirrors pandas/_libs/window/aggregation.pyx\n"""\nfrom __future__ import annotations\n\nfrom typing import TYPE_CHECKING\n\nimport numba\nimport numpy as np\n\nif TYPE_CHECKING:\n from pandas._typing import npt\n\n\n@numba.jit(nopython=True, nogil=True, parallel=False)\ndef sliding_min_max(\n values: np.ndarray,\n result_dtype: np.dtype,\n start: np.ndarray,\n end: np.ndarray,\n min_periods: int,\n is_max: bool,\n) -> tuple[np.ndarray, list[int]]:\n N = len(start)\n nobs = 0\n output = np.empty(N, dtype=result_dtype)\n na_pos = []\n # Use deque once numba supports it\n # https://github.com/numba/numba/issues/7417\n Q: list = []\n W: list = []\n for i in range(N):\n curr_win_size = end[i] - start[i]\n if i == 0:\n st = start[i]\n else:\n st = end[i - 1]\n\n for k in range(st, end[i]):\n ai = values[k]\n if not np.isnan(ai):\n nobs += 1\n elif is_max:\n ai = -np.inf\n else:\n ai = np.inf\n # Discard previous entries if we find new min or max\n if is_max:\n while Q and ((ai >= values[Q[-1]]) or values[Q[-1]] != values[Q[-1]]):\n Q.pop()\n else:\n while Q and ((ai <= values[Q[-1]]) or values[Q[-1]] != values[Q[-1]]):\n Q.pop()\n Q.append(k)\n W.append(k)\n\n # Discard entries outside and left of current window\n while Q and Q[0] <= start[i] - 1:\n Q.pop(0)\n while W and W[0] <= start[i] - 1:\n if not np.isnan(values[W[0]]):\n nobs -= 1\n W.pop(0)\n\n # Save output based on index in input value array\n if Q and curr_win_size > 0 and nobs >= min_periods:\n output[i] = values[Q[0]]\n else:\n if values.dtype.kind != "i":\n output[i] = np.nan\n else:\n na_pos.append(i)\n\n return output, na_pos\n\n\n@numba.jit(nopython=True, nogil=True, parallel=False)\ndef grouped_min_max(\n values: np.ndarray,\n result_dtype: np.dtype,\n labels: npt.NDArray[np.intp],\n ngroups: int,\n min_periods: int,\n is_max: bool,\n) -> tuple[np.ndarray, list[int]]:\n N = len(labels)\n nobs = np.zeros(ngroups, dtype=np.int64)\n na_pos = []\n output = np.empty(ngroups, dtype=result_dtype)\n\n for i in range(N):\n lab = labels[i]\n val = values[i]\n if lab < 0:\n continue\n\n if values.dtype.kind == "i" or not np.isnan(val):\n nobs[lab] += 1\n else:\n # NaN value cannot be a min/max value\n continue\n\n if nobs[lab] == 1:\n # First element in group, set output equal to this\n output[lab] = val\n continue\n\n if is_max:\n if val > output[lab]:\n output[lab] = val\n else:\n if val < output[lab]:\n output[lab] = val\n\n # Set labels that don't satisfy min_periods as np.nan\n for lab, count in enumerate(nobs):\n if count < min_periods:\n na_pos.append(lab)\n\n return output, na_pos\n
.venv\Lib\site-packages\pandas\core\_numba\kernels\min_max_.py
min_max_.py
Python
3,284
0.95
0.2
0.102804
vue-tools
348
2023-11-03T06:14:56.894531
BSD-3-Clause
false
bf114c58d3874004032b56cecf7c1372
from __future__ import annotations\n\nfrom typing import TYPE_CHECKING\n\nimport numba\n\nif TYPE_CHECKING:\n import numpy as np\n\n\n@numba.jit(\n # error: Any? not callable\n numba.boolean(numba.int64[:]), # type: ignore[misc]\n nopython=True,\n nogil=True,\n parallel=False,\n)\ndef is_monotonic_increasing(bounds: np.ndarray) -> bool:\n """Check if int64 values are monotonically increasing."""\n n = len(bounds)\n if n < 2:\n return True\n prev = bounds[0]\n for i in range(1, n):\n cur = bounds[i]\n if cur < prev:\n return False\n prev = cur\n return True\n
.venv\Lib\site-packages\pandas\core\_numba\kernels\shared.py
shared.py
Python
611
0.95
0.206897
0.041667
python-kit
369
2024-04-05T01:57:34.414895
BSD-3-Clause
false
891528130f48ebae8f6ca9849bf8bc06
"""\nNumba 1D sum kernels that can be shared by\n* Dataframe / Series\n* groupby\n* rolling / expanding\n\nMirrors pandas/_libs/window/aggregation.pyx\n"""\nfrom __future__ import annotations\n\nfrom typing import (\n TYPE_CHECKING,\n Any,\n)\n\nimport numba\nfrom numba.extending import register_jitable\nimport numpy as np\n\nif TYPE_CHECKING:\n from pandas._typing import npt\n\nfrom pandas.core._numba.kernels.shared import is_monotonic_increasing\n\n\n@numba.jit(nopython=True, nogil=True, parallel=False)\ndef add_sum(\n val: Any,\n nobs: int,\n sum_x: Any,\n compensation: Any,\n num_consecutive_same_value: int,\n prev_value: Any,\n) -> tuple[int, Any, Any, int, Any]:\n if not np.isnan(val):\n nobs += 1\n y = val - compensation\n t = sum_x + y\n compensation = t - sum_x - y\n sum_x = t\n\n if val == prev_value:\n num_consecutive_same_value += 1\n else:\n num_consecutive_same_value = 1\n prev_value = val\n\n return nobs, sum_x, compensation, num_consecutive_same_value, prev_value\n\n\n@numba.jit(nopython=True, nogil=True, parallel=False)\ndef remove_sum(\n val: Any, nobs: int, sum_x: Any, compensation: Any\n) -> tuple[int, Any, Any]:\n if not np.isnan(val):\n nobs -= 1\n y = -val - compensation\n t = sum_x + y\n compensation = t - sum_x - y\n sum_x = t\n return nobs, sum_x, compensation\n\n\n@numba.jit(nopython=True, nogil=True, parallel=False)\ndef sliding_sum(\n values: np.ndarray,\n result_dtype: np.dtype,\n start: np.ndarray,\n end: np.ndarray,\n min_periods: int,\n) -> tuple[np.ndarray, list[int]]:\n dtype = values.dtype\n\n na_val: object = np.nan\n if dtype.kind == "i":\n na_val = 0\n\n N = len(start)\n nobs = 0\n sum_x = 0\n compensation_add = 0\n compensation_remove = 0\n na_pos = []\n\n is_monotonic_increasing_bounds = is_monotonic_increasing(\n start\n ) and is_monotonic_increasing(end)\n\n output = np.empty(N, dtype=result_dtype)\n\n for i in range(N):\n s = start[i]\n e = end[i]\n if i == 0 or not is_monotonic_increasing_bounds:\n prev_value = values[s]\n num_consecutive_same_value = 0\n\n for j in range(s, e):\n val = values[j]\n (\n nobs,\n sum_x,\n compensation_add,\n num_consecutive_same_value,\n prev_value,\n ) = add_sum(\n val,\n nobs,\n sum_x,\n compensation_add,\n num_consecutive_same_value,\n prev_value,\n )\n else:\n for j in range(start[i - 1], s):\n val = values[j]\n nobs, sum_x, compensation_remove = remove_sum(\n val, nobs, sum_x, compensation_remove\n )\n\n for j in range(end[i - 1], e):\n val = values[j]\n (\n nobs,\n sum_x,\n compensation_add,\n num_consecutive_same_value,\n prev_value,\n ) = add_sum(\n val,\n nobs,\n sum_x,\n compensation_add,\n num_consecutive_same_value,\n prev_value,\n )\n\n if nobs == 0 == min_periods:\n result: object = 0\n elif nobs >= min_periods:\n if num_consecutive_same_value >= nobs:\n result = prev_value * nobs\n else:\n result = sum_x\n else:\n result = na_val\n if dtype.kind == "i":\n na_pos.append(i)\n\n output[i] = result\n\n if not is_monotonic_increasing_bounds:\n nobs = 0\n sum_x = 0\n compensation_remove = 0\n\n return output, na_pos\n\n\n# Mypy/pyright don't like the fact that the decorator is untyped\n@register_jitable # type: ignore[misc]\ndef grouped_kahan_sum(\n values: np.ndarray,\n result_dtype: np.dtype,\n labels: npt.NDArray[np.intp],\n ngroups: int,\n) -> tuple[\n np.ndarray, npt.NDArray[np.int64], np.ndarray, npt.NDArray[np.int64], np.ndarray\n]:\n N = len(labels)\n\n nobs_arr = np.zeros(ngroups, dtype=np.int64)\n comp_arr = np.zeros(ngroups, dtype=values.dtype)\n consecutive_counts = np.zeros(ngroups, dtype=np.int64)\n prev_vals = np.zeros(ngroups, dtype=values.dtype)\n output = np.zeros(ngroups, dtype=result_dtype)\n\n for i in range(N):\n lab = labels[i]\n val = values[i]\n\n if lab < 0:\n continue\n\n sum_x = output[lab]\n nobs = nobs_arr[lab]\n compensation_add = comp_arr[lab]\n num_consecutive_same_value = consecutive_counts[lab]\n prev_value = prev_vals[lab]\n\n (\n nobs,\n sum_x,\n compensation_add,\n num_consecutive_same_value,\n prev_value,\n ) = add_sum(\n val,\n nobs,\n sum_x,\n compensation_add,\n num_consecutive_same_value,\n prev_value,\n )\n\n output[lab] = sum_x\n consecutive_counts[lab] = num_consecutive_same_value\n prev_vals[lab] = prev_value\n comp_arr[lab] = compensation_add\n nobs_arr[lab] = nobs\n return output, nobs_arr, comp_arr, consecutive_counts, prev_vals\n\n\n@numba.jit(nopython=True, nogil=True, parallel=False)\ndef grouped_sum(\n values: np.ndarray,\n result_dtype: np.dtype,\n labels: npt.NDArray[np.intp],\n ngroups: int,\n min_periods: int,\n) -> tuple[np.ndarray, list[int]]:\n na_pos = []\n\n output, nobs_arr, comp_arr, consecutive_counts, prev_vals = grouped_kahan_sum(\n values, result_dtype, labels, ngroups\n )\n\n # Post-processing, replace sums that don't satisfy min_periods\n for lab in range(ngroups):\n nobs = nobs_arr[lab]\n num_consecutive_same_value = consecutive_counts[lab]\n prev_value = prev_vals[lab]\n sum_x = output[lab]\n if nobs >= min_periods:\n if num_consecutive_same_value >= nobs:\n result = prev_value * nobs\n else:\n result = sum_x\n else:\n result = sum_x # Don't change val, will be replaced by nan later\n na_pos.append(lab)\n output[lab] = result\n\n return output, na_pos\n
.venv\Lib\site-packages\pandas\core\_numba\kernels\sum_.py
sum_.py
Python
6,488
0.95
0.098361
0.024155
vue-tools
507
2023-08-28T18:16:09.780557
GPL-3.0
false
bde3c3e61333c470c727687dafa8be2c
"""\nNumba 1D var kernels that can be shared by\n* Dataframe / Series\n* groupby\n* rolling / expanding\n\nMirrors pandas/_libs/window/aggregation.pyx\n"""\nfrom __future__ import annotations\n\nfrom typing import TYPE_CHECKING\n\nimport numba\nimport numpy as np\n\nif TYPE_CHECKING:\n from pandas._typing import npt\n\nfrom pandas.core._numba.kernels.shared import is_monotonic_increasing\n\n\n@numba.jit(nopython=True, nogil=True, parallel=False)\ndef add_var(\n val: float,\n nobs: int,\n mean_x: float,\n ssqdm_x: float,\n compensation: float,\n num_consecutive_same_value: int,\n prev_value: float,\n) -> tuple[int, float, float, float, int, float]:\n if not np.isnan(val):\n if val == prev_value:\n num_consecutive_same_value += 1\n else:\n num_consecutive_same_value = 1\n prev_value = val\n\n nobs += 1\n prev_mean = mean_x - compensation\n y = val - compensation\n t = y - mean_x\n compensation = t + mean_x - y\n delta = t\n if nobs:\n mean_x += delta / nobs\n else:\n mean_x = 0\n ssqdm_x += (val - prev_mean) * (val - mean_x)\n return nobs, mean_x, ssqdm_x, compensation, num_consecutive_same_value, prev_value\n\n\n@numba.jit(nopython=True, nogil=True, parallel=False)\ndef remove_var(\n val: float, nobs: int, mean_x: float, ssqdm_x: float, compensation: float\n) -> tuple[int, float, float, float]:\n if not np.isnan(val):\n nobs -= 1\n if nobs:\n prev_mean = mean_x - compensation\n y = val - compensation\n t = y - mean_x\n compensation = t + mean_x - y\n delta = t\n mean_x -= delta / nobs\n ssqdm_x -= (val - prev_mean) * (val - mean_x)\n else:\n mean_x = 0\n ssqdm_x = 0\n return nobs, mean_x, ssqdm_x, compensation\n\n\n@numba.jit(nopython=True, nogil=True, parallel=False)\ndef sliding_var(\n values: np.ndarray,\n result_dtype: np.dtype,\n start: np.ndarray,\n end: np.ndarray,\n min_periods: int,\n ddof: int = 1,\n) -> tuple[np.ndarray, list[int]]:\n N = len(start)\n nobs = 0\n mean_x = 0.0\n ssqdm_x = 0.0\n compensation_add = 0.0\n compensation_remove = 0.0\n\n min_periods = max(min_periods, 1)\n is_monotonic_increasing_bounds = is_monotonic_increasing(\n start\n ) and is_monotonic_increasing(end)\n\n output = np.empty(N, dtype=result_dtype)\n\n for i in range(N):\n s = start[i]\n e = end[i]\n if i == 0 or not is_monotonic_increasing_bounds:\n prev_value = values[s]\n num_consecutive_same_value = 0\n\n for j in range(s, e):\n val = values[j]\n (\n nobs,\n mean_x,\n ssqdm_x,\n compensation_add,\n num_consecutive_same_value,\n prev_value,\n ) = add_var(\n val,\n nobs,\n mean_x,\n ssqdm_x,\n compensation_add,\n num_consecutive_same_value,\n prev_value,\n )\n else:\n for j in range(start[i - 1], s):\n val = values[j]\n nobs, mean_x, ssqdm_x, compensation_remove = remove_var(\n val, nobs, mean_x, ssqdm_x, compensation_remove\n )\n\n for j in range(end[i - 1], e):\n val = values[j]\n (\n nobs,\n mean_x,\n ssqdm_x,\n compensation_add,\n num_consecutive_same_value,\n prev_value,\n ) = add_var(\n val,\n nobs,\n mean_x,\n ssqdm_x,\n compensation_add,\n num_consecutive_same_value,\n prev_value,\n )\n\n if nobs >= min_periods and nobs > ddof:\n if nobs == 1 or num_consecutive_same_value >= nobs:\n result = 0.0\n else:\n result = ssqdm_x / (nobs - ddof)\n else:\n result = np.nan\n\n output[i] = result\n\n if not is_monotonic_increasing_bounds:\n nobs = 0\n mean_x = 0.0\n ssqdm_x = 0.0\n compensation_remove = 0.0\n\n # na_position is empty list since float64 can already hold nans\n # Do list comprehension, since numba cannot figure out that na_pos is\n # empty list of ints on its own\n na_pos = [0 for i in range(0)]\n return output, na_pos\n\n\n@numba.jit(nopython=True, nogil=True, parallel=False)\ndef grouped_var(\n values: np.ndarray,\n result_dtype: np.dtype,\n labels: npt.NDArray[np.intp],\n ngroups: int,\n min_periods: int,\n ddof: int = 1,\n) -> tuple[np.ndarray, list[int]]:\n N = len(labels)\n\n nobs_arr = np.zeros(ngroups, dtype=np.int64)\n comp_arr = np.zeros(ngroups, dtype=values.dtype)\n consecutive_counts = np.zeros(ngroups, dtype=np.int64)\n prev_vals = np.zeros(ngroups, dtype=values.dtype)\n output = np.zeros(ngroups, dtype=result_dtype)\n means = np.zeros(ngroups, dtype=result_dtype)\n\n for i in range(N):\n lab = labels[i]\n val = values[i]\n\n if lab < 0:\n continue\n\n mean_x = means[lab]\n ssqdm_x = output[lab]\n nobs = nobs_arr[lab]\n compensation_add = comp_arr[lab]\n num_consecutive_same_value = consecutive_counts[lab]\n prev_value = prev_vals[lab]\n\n (\n nobs,\n mean_x,\n ssqdm_x,\n compensation_add,\n num_consecutive_same_value,\n prev_value,\n ) = add_var(\n val,\n nobs,\n mean_x,\n ssqdm_x,\n compensation_add,\n num_consecutive_same_value,\n prev_value,\n )\n\n output[lab] = ssqdm_x\n means[lab] = mean_x\n consecutive_counts[lab] = num_consecutive_same_value\n prev_vals[lab] = prev_value\n comp_arr[lab] = compensation_add\n nobs_arr[lab] = nobs\n\n # Post-processing, replace vars that don't satisfy min_periods\n for lab in range(ngroups):\n nobs = nobs_arr[lab]\n num_consecutive_same_value = consecutive_counts[lab]\n ssqdm_x = output[lab]\n if nobs >= min_periods and nobs > ddof:\n if nobs == 1 or num_consecutive_same_value >= nobs:\n result = 0.0\n else:\n result = ssqdm_x / (nobs - ddof)\n else:\n result = np.nan\n output[lab] = result\n\n # Second pass to get the std.dev\n # na_position is empty list since float64 can already hold nans\n # Do list comprehension, since numba cannot figure out that na_pos is\n # empty list of ints on its own\n na_pos = [0 for i in range(0)]\n return output, na_pos\n
.venv\Lib\site-packages\pandas\core\_numba\kernels\var_.py
var_.py
Python
6,973
0.95
0.102041
0.051402
node-utils
802
2024-08-22T03:52:12.513428
MIT
false
6c552328be83bc11b123a076258c94a0
from pandas.core._numba.kernels.mean_ import (\n grouped_mean,\n sliding_mean,\n)\nfrom pandas.core._numba.kernels.min_max_ import (\n grouped_min_max,\n sliding_min_max,\n)\nfrom pandas.core._numba.kernels.sum_ import (\n grouped_sum,\n sliding_sum,\n)\nfrom pandas.core._numba.kernels.var_ import (\n grouped_var,\n sliding_var,\n)\n\n__all__ = [\n "sliding_mean",\n "grouped_mean",\n "sliding_sum",\n "grouped_sum",\n "sliding_var",\n "grouped_var",\n "sliding_min_max",\n "grouped_min_max",\n]\n
.venv\Lib\site-packages\pandas\core\_numba\kernels\__init__.py
__init__.py
Python
520
0.85
0
0
node-utils
592
2024-02-06T06:29:25.440825
Apache-2.0
false
af25740031f3c867e3b989b08d940a07
\n\n
.venv\Lib\site-packages\pandas\core\_numba\kernels\__pycache__\mean_.cpython-313.pyc
mean_.cpython-313.pyc
Other
5,004
0.8
0
0.043478
awesome-app
706
2024-03-27T07:30:54.267747
MIT
false
46bf8205c5922ea5f3713ca3b30f4cad
\n\n
.venv\Lib\site-packages\pandas\core\_numba\kernels\__pycache__\min_max_.cpython-313.pyc
min_max_.cpython-313.pyc
Other
4,427
0.8
0
0.045455
node-utils
670
2024-07-07T06:03:04.111865
GPL-3.0
false
497de12aec8addd7f3d8d348c307eb5a
\n\n
.venv\Lib\site-packages\pandas\core\_numba\kernels\__pycache__\shared.cpython-313.pyc
shared.cpython-313.pyc
Other
995
0.7
0.083333
0
python-kit
131
2025-02-14T22:04:44.152897
Apache-2.0
false
7f186f84c01486d8d3b8e546d46d8182
\n\n
.venv\Lib\site-packages\pandas\core\_numba\kernels\__pycache__\sum_.cpython-313.pyc
sum_.cpython-313.pyc
Other
6,059
0.8
0
0.031915
react-lib
376
2023-08-09T22:24:31.637286
BSD-3-Clause
false
11ba21c8dda2f56e7dd744e5f350a44d
\n\n
.venv\Lib\site-packages\pandas\core\_numba\kernels\__pycache__\var_.cpython-313.pyc
var_.cpython-313.pyc
Other
6,099
0.8
0
0.032609
react-lib
359
2024-06-26T01:03:52.440553
BSD-3-Clause
false
2e3971831a893b46a7d255c9c3344ff4
\n\n
.venv\Lib\site-packages\pandas\core\_numba\kernels\__pycache__\__init__.cpython-313.pyc
__init__.cpython-313.pyc
Other
652
0.8
0
0
awesome-app
541
2025-03-19T17:33:00.238858
GPL-3.0
false
dad8ecd426cc7b7cd0d195af53b1a5a9
\n\n
.venv\Lib\site-packages\pandas\core\_numba\__pycache__\executor.cpython-313.pyc
executor.cpython-313.pyc
Other
9,554
0.95
0.02924
0
python-kit
52
2024-11-20T07:32:28.382695
Apache-2.0
false
298b2662fb19026ba60a883a5e7cd65d
\n\n
.venv\Lib\site-packages\pandas\core\_numba\__pycache__\extensions.cpython-313.pyc
extensions.cpython-313.pyc
Other
28,642
0.95
0.023585
0.009852
awesome-app
750
2023-11-13T04:59:16.511848
GPL-3.0
false
b8fea041515156018b83193316fd1e75
\n\n
.venv\Lib\site-packages\pandas\core\_numba\__pycache__\__init__.cpython-313.pyc
__init__.cpython-313.pyc
Other
193
0.7
0
0
node-utils
247
2023-07-16T10:27:32.841965
GPL-3.0
false
9026ed3b206f31b73adae1cb4e8d1b84
\n\n
.venv\Lib\site-packages\pandas\core\__pycache__\accessor.cpython-313.pyc
accessor.cpython-313.pyc
Other
12,672
0.95
0.154206
0.016216
python-kit
377
2024-07-03T20:02:02.024015
BSD-3-Clause
false
f82bcc4027c1d643c01789b586dbbd7b
\n\n
.venv\Lib\site-packages\pandas\core\__pycache__\algorithms.cpython-313.pyc
algorithms.cpython-313.pyc
Other
54,177
0.75
0.047131
0.012896
react-lib
126
2023-07-14T21:54:33.008969
GPL-3.0
false
c0db5739e5833d959f9f1edeea2fb8ef
\n\n
.venv\Lib\site-packages\pandas\core\__pycache__\api.cpython-313.pyc
api.cpython-313.pyc
Other
2,728
0.8
0
0
awesome-app
803
2023-09-24T00:39:09.347267
MIT
false
860b5339905b7523d6068b7ef61baee5
\n\n
.venv\Lib\site-packages\pandas\core\__pycache__\apply.cpython-313.pyc
apply.cpython-313.pyc
Other
75,589
0.75
0.061176
0.00641
vue-tools
779
2023-12-04T18:57:04.717001
GPL-3.0
false
a4c7faf3f96e972bc168e605f3d62bf5
\n\n
.venv\Lib\site-packages\pandas\core\__pycache__\arraylike.cpython-313.pyc
arraylike.cpython-313.pyc
Other
20,784
0.8
0.033816
0.010989
vue-tools
374
2024-08-13T21:16:13.085333
MIT
false
bf2b71d9b377bb56ee36d170644cbf15
\n\n
.venv\Lib\site-packages\pandas\core\__pycache__\base.cpython-313.pyc
base.cpython-313.pyc
Other
43,686
0.95
0.050633
0.006158
node-utils
547
2024-05-14T14:51:12.967131
MIT
false
5addc3b34f1f0a94a6a6da9f2bbcd865
\n\n
.venv\Lib\site-packages\pandas\core\__pycache__\common.cpython-313.pyc
common.cpython-313.pyc
Other
23,090
0.95
0.079114
0.010526
vue-tools
940
2023-10-16T01:41:59.801343
MIT
false
a88702d35ea39fb9cb908d7f8cc1261f
\n\n
.venv\Lib\site-packages\pandas\core\__pycache__\config_init.cpython-313.pyc
config_init.cpython-313.pyc
Other
27,962
0.95
0.091633
0
vue-tools
830
2025-05-20T22:13:20.180776
MIT
false
cdcd91d08167389a17ab0348cf8bc8ee
\n\n
.venv\Lib\site-packages\pandas\core\__pycache__\construction.cpython-313.pyc
construction.cpython-313.pyc
Other
26,814
0.95
0.10515
0.009975
vue-tools
910
2024-02-24T23:08:29.082577
Apache-2.0
false
91bc22c395e02f119e22965d78a3727e
\n\n
.venv\Lib\site-packages\pandas\core\__pycache__\flags.cpython-313.pyc
flags.cpython-313.pyc
Other
4,819
0.8
0.047059
0
awesome-app
292
2025-06-25T07:19:30.848565
GPL-3.0
false
7c87e1dfc5b47473e774cc55b7722b3a
\n\n
.venv\Lib\site-packages\pandas\core\__pycache__\indexing.cpython-313.pyc
indexing.cpython-313.pyc
Other
101,760
0.75
0.057334
0.008688
awesome-app
474
2024-03-23T06:50:28.176764
Apache-2.0
false
50d57c909c50099d46a8c56290f2fe5c
\n\n
.venv\Lib\site-packages\pandas\core\__pycache__\missing.cpython-313.pyc
missing.cpython-313.pyc
Other
35,668
0.95
0.054945
0.011952
react-lib
763
2024-12-05T02:57:39.300291
GPL-3.0
false
e7dc3da7beb4693cb21839d56c20ee34
\n\n
.venv\Lib\site-packages\pandas\core\__pycache__\nanops.cpython-313.pyc
nanops.cpython-313.pyc
Other
57,428
0.75
0.050901
0.003529
react-lib
722
2024-03-17T08:56:00.851363
BSD-3-Clause
false
353d89d79610522af690d0037b2cd5c5
\n\n
.venv\Lib\site-packages\pandas\core\__pycache__\resample.cpython-313.pyc
resample.cpython-313.pyc
Other
95,430
0.75
0.026651
0.012295
awesome-app
105
2023-09-06T21:56:25.034274
Apache-2.0
false
ab20a5ad426c601cdfb03b70f473bf03
\n\n
.venv\Lib\site-packages\pandas\core\__pycache__\roperator.cpython-313.pyc
roperator.cpython-313.pyc
Other
2,175
0.8
0
0
node-utils
269
2023-10-18T15:30:36.867351
GPL-3.0
false
a579cf7c7cc86618704bbfc41256118e
\n\n
.venv\Lib\site-packages\pandas\core\__pycache__\sample.cpython-313.pyc
sample.cpython-313.pyc
Other
5,192
0.8
0.061728
0
vue-tools
933
2024-04-10T07:10:06.747178
Apache-2.0
false
73b6bae846496abada1d3b74f1639447
\n\n
.venv\Lib\site-packages\pandas\core\__pycache__\shared_docs.cpython-313.pyc
shared_docs.cpython-313.pyc
Other
30,383
0.95
0.07767
0.038462
node-utils
660
2025-02-01T08:03:37.593336
BSD-3-Clause
false
65d3134074d4d51d6d0557ec26d2e75d
\n\n
.venv\Lib\site-packages\pandas\core\__pycache__\sorting.cpython-313.pyc
sorting.cpython-313.pyc
Other
26,418
0.95
0.063882
0.007895
vue-tools
291
2025-03-02T13:42:47.471190
MIT
false
0f871ab61b456d34b7b417ec0d8c4f66
\n\n
.venv\Lib\site-packages\pandas\core\__pycache__\__init__.cpython-313.pyc
__init__.cpython-313.pyc
Other
186
0.7
0
0
react-lib
169
2025-06-20T11:18:32.056046
BSD-3-Clause
false
73c246e3ee35ad1ec0a24e911746d4ae
"""\nExpose public exceptions & warnings\n"""\nfrom __future__ import annotations\n\nimport ctypes\n\nfrom pandas._config.config import OptionError\n\nfrom pandas._libs.tslibs import (\n OutOfBoundsDatetime,\n OutOfBoundsTimedelta,\n)\n\nfrom pandas.util.version import InvalidVersion\n\n\nclass IntCastingNaNError(ValueError):\n """\n Exception raised when converting (``astype``) an array with NaN to an integer type.\n\n Examples\n --------\n >>> pd.DataFrame(np.array([[1, np.nan], [2, 3]]), dtype="i8")\n Traceback (most recent call last):\n IntCastingNaNError: Cannot convert non-finite values (NA or inf) to integer\n """\n\n\nclass NullFrequencyError(ValueError):\n """\n Exception raised when a ``freq`` cannot be null.\n\n Particularly ``DatetimeIndex.shift``, ``TimedeltaIndex.shift``,\n ``PeriodIndex.shift``.\n\n Examples\n --------\n >>> df = pd.DatetimeIndex(["2011-01-01 10:00", "2011-01-01"], freq=None)\n >>> df.shift(2)\n Traceback (most recent call last):\n NullFrequencyError: Cannot shift with no freq\n """\n\n\nclass PerformanceWarning(Warning):\n """\n Warning raised when there is a possible performance impact.\n\n Examples\n --------\n >>> df = pd.DataFrame({"jim": [0, 0, 1, 1],\n ... "joe": ["x", "x", "z", "y"],\n ... "jolie": [1, 2, 3, 4]})\n >>> df = df.set_index(["jim", "joe"])\n >>> df\n jolie\n jim joe\n 0 x 1\n x 2\n 1 z 3\n y 4\n >>> df.loc[(1, 'z')] # doctest: +SKIP\n # PerformanceWarning: indexing past lexsort depth may impact performance.\n df.loc[(1, 'z')]\n jolie\n jim joe\n 1 z 3\n """\n\n\nclass UnsupportedFunctionCall(ValueError):\n """\n Exception raised when attempting to call a unsupported numpy function.\n\n For example, ``np.cumsum(groupby_object)``.\n\n Examples\n --------\n >>> df = pd.DataFrame({"A": [0, 0, 1, 1],\n ... "B": ["x", "x", "z", "y"],\n ... "C": [1, 2, 3, 4]}\n ... )\n >>> np.cumsum(df.groupby(["A"]))\n Traceback (most recent call last):\n UnsupportedFunctionCall: numpy operations are not valid with groupby.\n Use .groupby(...).cumsum() instead\n """\n\n\nclass UnsortedIndexError(KeyError):\n """\n Error raised when slicing a MultiIndex which has not been lexsorted.\n\n Subclass of `KeyError`.\n\n Examples\n --------\n >>> df = pd.DataFrame({"cat": [0, 0, 1, 1],\n ... "color": ["white", "white", "brown", "black"],\n ... "lives": [4, 4, 3, 7]},\n ... )\n >>> df = df.set_index(["cat", "color"])\n >>> df\n lives\n cat color\n 0 white 4\n white 4\n 1 brown 3\n black 7\n >>> df.loc[(0, "black"):(1, "white")]\n Traceback (most recent call last):\n UnsortedIndexError: 'Key length (2) was greater\n than MultiIndex lexsort depth (1)'\n """\n\n\nclass ParserError(ValueError):\n """\n Exception that is raised by an error encountered in parsing file contents.\n\n This is a generic error raised for errors encountered when functions like\n `read_csv` or `read_html` are parsing contents of a file.\n\n See Also\n --------\n read_csv : Read CSV (comma-separated) file into a DataFrame.\n read_html : Read HTML table into a DataFrame.\n\n Examples\n --------\n >>> data = '''a,b,c\n ... cat,foo,bar\n ... dog,foo,"baz'''\n >>> from io import StringIO\n >>> pd.read_csv(StringIO(data), skipfooter=1, engine='python')\n Traceback (most recent call last):\n ParserError: ',' expected after '"'. Error could possibly be due\n to parsing errors in the skipped footer rows\n """\n\n\nclass DtypeWarning(Warning):\n """\n Warning raised when reading different dtypes in a column from a file.\n\n Raised for a dtype incompatibility. This can happen whenever `read_csv`\n or `read_table` encounter non-uniform dtypes in a column(s) of a given\n CSV file.\n\n See Also\n --------\n read_csv : Read CSV (comma-separated) file into a DataFrame.\n read_table : Read general delimited file into a DataFrame.\n\n Notes\n -----\n This warning is issued when dealing with larger files because the dtype\n checking happens per chunk read.\n\n Despite the warning, the CSV file is read with mixed types in a single\n column which will be an object type. See the examples below to better\n understand this issue.\n\n Examples\n --------\n This example creates and reads a large CSV file with a column that contains\n `int` and `str`.\n\n >>> df = pd.DataFrame({'a': (['1'] * 100000 + ['X'] * 100000 +\n ... ['1'] * 100000),\n ... 'b': ['b'] * 300000}) # doctest: +SKIP\n >>> df.to_csv('test.csv', index=False) # doctest: +SKIP\n >>> df2 = pd.read_csv('test.csv') # doctest: +SKIP\n ... # DtypeWarning: Columns (0) have mixed types\n\n Important to notice that ``df2`` will contain both `str` and `int` for the\n same input, '1'.\n\n >>> df2.iloc[262140, 0] # doctest: +SKIP\n '1'\n >>> type(df2.iloc[262140, 0]) # doctest: +SKIP\n <class 'str'>\n >>> df2.iloc[262150, 0] # doctest: +SKIP\n 1\n >>> type(df2.iloc[262150, 0]) # doctest: +SKIP\n <class 'int'>\n\n One way to solve this issue is using the `dtype` parameter in the\n `read_csv` and `read_table` functions to explicit the conversion:\n\n >>> df2 = pd.read_csv('test.csv', sep=',', dtype={'a': str}) # doctest: +SKIP\n\n No warning was issued.\n """\n\n\nclass EmptyDataError(ValueError):\n """\n Exception raised in ``pd.read_csv`` when empty data or header is encountered.\n\n Examples\n --------\n >>> from io import StringIO\n >>> empty = StringIO()\n >>> pd.read_csv(empty)\n Traceback (most recent call last):\n EmptyDataError: No columns to parse from file\n """\n\n\nclass ParserWarning(Warning):\n """\n Warning raised when reading a file that doesn't use the default 'c' parser.\n\n Raised by `pd.read_csv` and `pd.read_table` when it is necessary to change\n parsers, generally from the default 'c' parser to 'python'.\n\n It happens due to a lack of support or functionality for parsing a\n particular attribute of a CSV file with the requested engine.\n\n Currently, 'c' unsupported options include the following parameters:\n\n 1. `sep` other than a single character (e.g. regex separators)\n 2. `skipfooter` higher than 0\n 3. `sep=None` with `delim_whitespace=False`\n\n The warning can be avoided by adding `engine='python'` as a parameter in\n `pd.read_csv` and `pd.read_table` methods.\n\n See Also\n --------\n pd.read_csv : Read CSV (comma-separated) file into DataFrame.\n pd.read_table : Read general delimited file into DataFrame.\n\n Examples\n --------\n Using a `sep` in `pd.read_csv` other than a single character:\n\n >>> import io\n >>> csv = '''a;b;c\n ... 1;1,8\n ... 1;2,1'''\n >>> df = pd.read_csv(io.StringIO(csv), sep='[;,]') # doctest: +SKIP\n ... # ParserWarning: Falling back to the 'python' engine...\n\n Adding `engine='python'` to `pd.read_csv` removes the Warning:\n\n >>> df = pd.read_csv(io.StringIO(csv), sep='[;,]', engine='python')\n """\n\n\nclass MergeError(ValueError):\n """\n Exception raised when merging data.\n\n Subclass of ``ValueError``.\n\n Examples\n --------\n >>> left = pd.DataFrame({"a": ["a", "b", "b", "d"],\n ... "b": ["cat", "dog", "weasel", "horse"]},\n ... index=range(4))\n >>> right = pd.DataFrame({"a": ["a", "b", "c", "d"],\n ... "c": ["meow", "bark", "chirp", "nay"]},\n ... index=range(4)).set_index("a")\n >>> left.join(right, on="a", validate="one_to_one",)\n Traceback (most recent call last):\n MergeError: Merge keys are not unique in left dataset; not a one-to-one merge\n """\n\n\nclass AbstractMethodError(NotImplementedError):\n """\n Raise this error instead of NotImplementedError for abstract methods.\n\n Examples\n --------\n >>> class Foo:\n ... @classmethod\n ... def classmethod(cls):\n ... raise pd.errors.AbstractMethodError(cls, methodtype="classmethod")\n ... def method(self):\n ... raise pd.errors.AbstractMethodError(self)\n >>> test = Foo.classmethod()\n Traceback (most recent call last):\n AbstractMethodError: This classmethod must be defined in the concrete class Foo\n\n >>> test2 = Foo().method()\n Traceback (most recent call last):\n AbstractMethodError: This classmethod must be defined in the concrete class Foo\n """\n\n def __init__(self, class_instance, methodtype: str = "method") -> None:\n types = {"method", "classmethod", "staticmethod", "property"}\n if methodtype not in types:\n raise ValueError(\n f"methodtype must be one of {methodtype}, got {types} instead."\n )\n self.methodtype = methodtype\n self.class_instance = class_instance\n\n def __str__(self) -> str:\n if self.methodtype == "classmethod":\n name = self.class_instance.__name__\n else:\n name = type(self.class_instance).__name__\n return f"This {self.methodtype} must be defined in the concrete class {name}"\n\n\nclass NumbaUtilError(Exception):\n """\n Error raised for unsupported Numba engine routines.\n\n Examples\n --------\n >>> df = pd.DataFrame({"key": ["a", "a", "b", "b"], "data": [1, 2, 3, 4]},\n ... columns=["key", "data"])\n >>> def incorrect_function(x):\n ... return sum(x) * 2.7\n >>> df.groupby("key").agg(incorrect_function, engine="numba")\n Traceback (most recent call last):\n NumbaUtilError: The first 2 arguments to incorrect_function\n must be ['values', 'index']\n """\n\n\nclass DuplicateLabelError(ValueError):\n """\n Error raised when an operation would introduce duplicate labels.\n\n Examples\n --------\n >>> s = pd.Series([0, 1, 2], index=['a', 'b', 'c']).set_flags(\n ... allows_duplicate_labels=False\n ... )\n >>> s.reindex(['a', 'a', 'b'])\n Traceback (most recent call last):\n ...\n DuplicateLabelError: Index has duplicates.\n positions\n label\n a [0, 1]\n """\n\n\nclass InvalidIndexError(Exception):\n """\n Exception raised when attempting to use an invalid index key.\n\n Examples\n --------\n >>> idx = pd.MultiIndex.from_product([["x", "y"], [0, 1]])\n >>> df = pd.DataFrame([[1, 1, 2, 2],\n ... [3, 3, 4, 4]], columns=idx)\n >>> df\n x y\n 0 1 0 1\n 0 1 1 2 2\n 1 3 3 4 4\n >>> df[:, 0]\n Traceback (most recent call last):\n InvalidIndexError: (slice(None, None, None), 0)\n """\n\n\nclass DataError(Exception):\n """\n Exceptionn raised when performing an operation on non-numerical data.\n\n For example, calling ``ohlc`` on a non-numerical column or a function\n on a rolling window.\n\n Examples\n --------\n >>> ser = pd.Series(['a', 'b', 'c'])\n >>> ser.rolling(2).sum()\n Traceback (most recent call last):\n DataError: No numeric types to aggregate\n """\n\n\nclass SpecificationError(Exception):\n """\n Exception raised by ``agg`` when the functions are ill-specified.\n\n The exception raised in two scenarios.\n\n The first way is calling ``agg`` on a\n Dataframe or Series using a nested renamer (dict-of-dict).\n\n The second way is calling ``agg`` on a Dataframe with duplicated functions\n names without assigning column name.\n\n Examples\n --------\n >>> df = pd.DataFrame({'A': [1, 1, 1, 2, 2],\n ... 'B': range(5),\n ... 'C': range(5)})\n >>> df.groupby('A').B.agg({'foo': 'count'}) # doctest: +SKIP\n ... # SpecificationError: nested renamer is not supported\n\n >>> df.groupby('A').agg({'B': {'foo': ['sum', 'max']}}) # doctest: +SKIP\n ... # SpecificationError: nested renamer is not supported\n\n >>> df.groupby('A').agg(['min', 'min']) # doctest: +SKIP\n ... # SpecificationError: nested renamer is not supported\n """\n\n\nclass SettingWithCopyError(ValueError):\n """\n Exception raised when trying to set on a copied slice from a ``DataFrame``.\n\n The ``mode.chained_assignment`` needs to be set to set to 'raise.' This can\n happen unintentionally when chained indexing.\n\n For more information on evaluation order,\n see :ref:`the user guide<indexing.evaluation_order>`.\n\n For more information on view vs. copy,\n see :ref:`the user guide<indexing.view_versus_copy>`.\n\n Examples\n --------\n >>> pd.options.mode.chained_assignment = 'raise'\n >>> df = pd.DataFrame({'A': [1, 1, 1, 2, 2]}, columns=['A'])\n >>> df.loc[0:3]['A'] = 'a' # doctest: +SKIP\n ... # SettingWithCopyError: A value is trying to be set on a copy of a...\n """\n\n\nclass SettingWithCopyWarning(Warning):\n """\n Warning raised when trying to set on a copied slice from a ``DataFrame``.\n\n The ``mode.chained_assignment`` needs to be set to set to 'warn.'\n 'Warn' is the default option. This can happen unintentionally when\n chained indexing.\n\n For more information on evaluation order,\n see :ref:`the user guide<indexing.evaluation_order>`.\n\n For more information on view vs. copy,\n see :ref:`the user guide<indexing.view_versus_copy>`.\n\n Examples\n --------\n >>> df = pd.DataFrame({'A': [1, 1, 1, 2, 2]}, columns=['A'])\n >>> df.loc[0:3]['A'] = 'a' # doctest: +SKIP\n ... # SettingWithCopyWarning: A value is trying to be set on a copy of a...\n """\n\n\nclass ChainedAssignmentError(Warning):\n """\n Warning raised when trying to set using chained assignment.\n\n When the ``mode.copy_on_write`` option is enabled, chained assignment can\n never work. In such a situation, we are always setting into a temporary\n object that is the result of an indexing operation (getitem), which under\n Copy-on-Write always behaves as a copy. Thus, assigning through a chain\n can never update the original Series or DataFrame.\n\n For more information on view vs. copy,\n see :ref:`the user guide<indexing.view_versus_copy>`.\n\n Examples\n --------\n >>> pd.options.mode.copy_on_write = True\n >>> df = pd.DataFrame({'A': [1, 1, 1, 2, 2]}, columns=['A'])\n >>> df["A"][0:3] = 10 # doctest: +SKIP\n ... # ChainedAssignmentError: ...\n >>> pd.options.mode.copy_on_write = False\n """\n\n\n_chained_assignment_msg = (\n "A value is trying to be set on a copy of a DataFrame or Series "\n "through chained assignment.\n"\n "When using the Copy-on-Write mode, such chained assignment never works "\n "to update the original DataFrame or Series, because the intermediate "\n "object on which we are setting values always behaves as a copy.\n\n"\n "Try using '.loc[row_indexer, col_indexer] = value' instead, to perform "\n "the assignment in a single step.\n\n"\n "See the caveats in the documentation: "\n "https://pandas.pydata.org/pandas-docs/stable/user_guide/"\n "indexing.html#returning-a-view-versus-a-copy"\n)\n\n\n_chained_assignment_method_msg = (\n "A value is trying to be set on a copy of a DataFrame or Series "\n "through chained assignment using an inplace method.\n"\n "When using the Copy-on-Write mode, such inplace method never works "\n "to update the original DataFrame or Series, because the intermediate "\n "object on which we are setting values always behaves as a copy.\n\n"\n "For example, when doing 'df[col].method(value, inplace=True)', try "\n "using 'df.method({col: value}, inplace=True)' instead, to perform "\n "the operation inplace on the original object.\n\n"\n)\n\n\n_chained_assignment_warning_msg = (\n "ChainedAssignmentError: behaviour will change in pandas 3.0!\n"\n "You are setting values through chained assignment. Currently this works "\n "in certain cases, but when using Copy-on-Write (which will become the "\n "default behaviour in pandas 3.0) this will never work to update the "\n "original DataFrame or Series, because the intermediate object on which "\n "we are setting values will behave as a copy.\n"\n "A typical example is when you are setting values in a column of a "\n "DataFrame, like:\n\n"\n 'df["col"][row_indexer] = value\n\n'\n 'Use `df.loc[row_indexer, "col"] = values` instead, to perform the '\n "assignment in a single step and ensure this keeps updating the original `df`.\n\n"\n "See the caveats in the documentation: "\n "https://pandas.pydata.org/pandas-docs/stable/user_guide/"\n "indexing.html#returning-a-view-versus-a-copy\n"\n)\n\n\n_chained_assignment_warning_method_msg = (\n "A value is trying to be set on a copy of a DataFrame or Series "\n "through chained assignment using an inplace method.\n"\n "The behavior will change in pandas 3.0. This inplace method will "\n "never work because the intermediate object on which we are setting "\n "values always behaves as a copy.\n\n"\n "For example, when doing 'df[col].method(value, inplace=True)', try "\n "using 'df.method({col: value}, inplace=True)' or "\n "df[col] = df[col].method(value) instead, to perform "\n "the operation inplace on the original object.\n\n"\n)\n\n\ndef _check_cacher(obj):\n # This is a mess, selection paths that return a view set the _cacher attribute\n # on the Series; most of them also set _item_cache which adds 1 to our relevant\n # reference count, but iloc does not, so we have to check if we are actually\n # in the item cache\n if hasattr(obj, "_cacher"):\n parent = obj._cacher[1]()\n # parent could be dead\n if parent is None:\n return False\n if hasattr(parent, "_item_cache"):\n if obj._cacher[0] in parent._item_cache:\n # Check if we are actually the item from item_cache, iloc creates a\n # new object\n return obj is parent._item_cache[obj._cacher[0]]\n return False\n\n\nclass NumExprClobberingError(NameError):\n """\n Exception raised when trying to use a built-in numexpr name as a variable name.\n\n ``eval`` or ``query`` will throw the error if the engine is set\n to 'numexpr'. 'numexpr' is the default engine value for these methods if the\n numexpr package is installed.\n\n Examples\n --------\n >>> df = pd.DataFrame({'abs': [1, 1, 1]})\n >>> df.query("abs > 2") # doctest: +SKIP\n ... # NumExprClobberingError: Variables in expression "(abs) > (2)" overlap...\n >>> sin, a = 1, 2\n >>> pd.eval("sin + a", engine='numexpr') # doctest: +SKIP\n ... # NumExprClobberingError: Variables in expression "(sin) + (a)" overlap...\n """\n\n\nclass UndefinedVariableError(NameError):\n """\n Exception raised by ``query`` or ``eval`` when using an undefined variable name.\n\n It will also specify whether the undefined variable is local or not.\n\n Examples\n --------\n >>> df = pd.DataFrame({'A': [1, 1, 1]})\n >>> df.query("A > x") # doctest: +SKIP\n ... # UndefinedVariableError: name 'x' is not defined\n >>> df.query("A > @y") # doctest: +SKIP\n ... # UndefinedVariableError: local variable 'y' is not defined\n >>> pd.eval('x + 1') # doctest: +SKIP\n ... # UndefinedVariableError: name 'x' is not defined\n """\n\n def __init__(self, name: str, is_local: bool | None = None) -> None:\n base_msg = f"{repr(name)} is not defined"\n if is_local:\n msg = f"local variable {base_msg}"\n else:\n msg = f"name {base_msg}"\n super().__init__(msg)\n\n\nclass IndexingError(Exception):\n """\n Exception is raised when trying to index and there is a mismatch in dimensions.\n\n Examples\n --------\n >>> df = pd.DataFrame({'A': [1, 1, 1]})\n >>> df.loc[..., ..., 'A'] # doctest: +SKIP\n ... # IndexingError: indexer may only contain one '...' entry\n >>> df = pd.DataFrame({'A': [1, 1, 1]})\n >>> df.loc[1, ..., ...] # doctest: +SKIP\n ... # IndexingError: Too many indexers\n >>> df[pd.Series([True], dtype=bool)] # doctest: +SKIP\n ... # IndexingError: Unalignable boolean Series provided as indexer...\n >>> s = pd.Series(range(2),\n ... index = pd.MultiIndex.from_product([["a", "b"], ["c"]]))\n >>> s.loc["a", "c", "d"] # doctest: +SKIP\n ... # IndexingError: Too many indexers\n """\n\n\nclass PyperclipException(RuntimeError):\n """\n Exception raised when clipboard functionality is unsupported.\n\n Raised by ``to_clipboard()`` and ``read_clipboard()``.\n """\n\n\nclass PyperclipWindowsException(PyperclipException):\n """\n Exception raised when clipboard functionality is unsupported by Windows.\n\n Access to the clipboard handle would be denied due to some other\n window process is accessing it.\n """\n\n def __init__(self, message: str) -> None:\n # attr only exists on Windows, so typing fails on other platforms\n message += f" ({ctypes.WinError()})" # type: ignore[attr-defined]\n super().__init__(message)\n\n\nclass CSSWarning(UserWarning):\n """\n Warning is raised when converting css styling fails.\n\n This can be due to the styling not having an equivalent value or because the\n styling isn't properly formatted.\n\n Examples\n --------\n >>> df = pd.DataFrame({'A': [1, 1, 1]})\n >>> df.style.applymap(\n ... lambda x: 'background-color: blueGreenRed;'\n ... ).to_excel('styled.xlsx') # doctest: +SKIP\n CSSWarning: Unhandled color format: 'blueGreenRed'\n >>> df.style.applymap(\n ... lambda x: 'border: 1px solid red red;'\n ... ).to_excel('styled.xlsx') # doctest: +SKIP\n CSSWarning: Unhandled color format: 'blueGreenRed'\n """\n\n\nclass PossibleDataLossError(Exception):\n """\n Exception raised when trying to open a HDFStore file when already opened.\n\n Examples\n --------\n >>> store = pd.HDFStore('my-store', 'a') # doctest: +SKIP\n >>> store.open("w") # doctest: +SKIP\n ... # PossibleDataLossError: Re-opening the file [my-store] with mode [a]...\n """\n\n\nclass ClosedFileError(Exception):\n """\n Exception is raised when trying to perform an operation on a closed HDFStore file.\n\n Examples\n --------\n >>> store = pd.HDFStore('my-store', 'a') # doctest: +SKIP\n >>> store.close() # doctest: +SKIP\n >>> store.keys() # doctest: +SKIP\n ... # ClosedFileError: my-store file is not open!\n """\n\n\nclass IncompatibilityWarning(Warning):\n """\n Warning raised when trying to use where criteria on an incompatible HDF5 file.\n """\n\n\nclass AttributeConflictWarning(Warning):\n """\n Warning raised when index attributes conflict when using HDFStore.\n\n Occurs when attempting to append an index with a different\n name than the existing index on an HDFStore or attempting to append an index with a\n different frequency than the existing index on an HDFStore.\n\n Examples\n --------\n >>> idx1 = pd.Index(['a', 'b'], name='name1')\n >>> df1 = pd.DataFrame([[1, 2], [3, 4]], index=idx1)\n >>> df1.to_hdf('file', 'data', 'w', append=True) # doctest: +SKIP\n >>> idx2 = pd.Index(['c', 'd'], name='name2')\n >>> df2 = pd.DataFrame([[5, 6], [7, 8]], index=idx2)\n >>> df2.to_hdf('file', 'data', 'a', append=True) # doctest: +SKIP\n AttributeConflictWarning: the [index_name] attribute of the existing index is\n [name1] which conflicts with the new [name2]...\n """\n\n\nclass DatabaseError(OSError):\n """\n Error is raised when executing sql with bad syntax or sql that throws an error.\n\n Examples\n --------\n >>> from sqlite3 import connect\n >>> conn = connect(':memory:')\n >>> pd.read_sql('select * test', conn) # doctest: +SKIP\n ... # DatabaseError: Execution failed on sql 'test': near "test": syntax error\n """\n\n\nclass PossiblePrecisionLoss(Warning):\n """\n Warning raised by to_stata on a column with a value outside or equal to int64.\n\n When the column value is outside or equal to the int64 value the column is\n converted to a float64 dtype.\n\n Examples\n --------\n >>> df = pd.DataFrame({"s": pd.Series([1, 2**53], dtype=np.int64)})\n >>> df.to_stata('test') # doctest: +SKIP\n ... # PossiblePrecisionLoss: Column converted from int64 to float64...\n """\n\n\nclass ValueLabelTypeMismatch(Warning):\n """\n Warning raised by to_stata on a category column that contains non-string values.\n\n Examples\n --------\n >>> df = pd.DataFrame({"categories": pd.Series(["a", 2], dtype="category")})\n >>> df.to_stata('test') # doctest: +SKIP\n ... # ValueLabelTypeMismatch: Stata value labels (pandas categories) must be str...\n """\n\n\nclass InvalidColumnName(Warning):\n """\n Warning raised by to_stata the column contains a non-valid stata name.\n\n Because the column name is an invalid Stata variable, the name needs to be\n converted.\n\n Examples\n --------\n >>> df = pd.DataFrame({"0categories": pd.Series([2, 2])})\n >>> df.to_stata('test') # doctest: +SKIP\n ... # InvalidColumnName: Not all pandas column names were valid Stata variable...\n """\n\n\nclass CategoricalConversionWarning(Warning):\n """\n Warning is raised when reading a partial labeled Stata file using a iterator.\n\n Examples\n --------\n >>> from pandas.io.stata import StataReader\n >>> with StataReader('dta_file', chunksize=2) as reader: # doctest: +SKIP\n ... for i, block in enumerate(reader):\n ... print(i, block)\n ... # CategoricalConversionWarning: One or more series with value labels...\n """\n\n\nclass LossySetitemError(Exception):\n """\n Raised when trying to do a __setitem__ on an np.ndarray that is not lossless.\n\n Notes\n -----\n This is an internal error.\n """\n\n\nclass NoBufferPresent(Exception):\n """\n Exception is raised in _get_data_buffer to signal that there is no requested buffer.\n """\n\n\nclass InvalidComparison(Exception):\n """\n Exception is raised by _validate_comparison_value to indicate an invalid comparison.\n\n Notes\n -----\n This is an internal error.\n """\n\n\n__all__ = [\n "AbstractMethodError",\n "AttributeConflictWarning",\n "CategoricalConversionWarning",\n "ClosedFileError",\n "CSSWarning",\n "DatabaseError",\n "DataError",\n "DtypeWarning",\n "DuplicateLabelError",\n "EmptyDataError",\n "IncompatibilityWarning",\n "IntCastingNaNError",\n "InvalidColumnName",\n "InvalidComparison",\n "InvalidIndexError",\n "InvalidVersion",\n "IndexingError",\n "LossySetitemError",\n "MergeError",\n "NoBufferPresent",\n "NullFrequencyError",\n "NumbaUtilError",\n "NumExprClobberingError",\n "OptionError",\n "OutOfBoundsDatetime",\n "OutOfBoundsTimedelta",\n "ParserError",\n "ParserWarning",\n "PerformanceWarning",\n "PossibleDataLossError",\n "PossiblePrecisionLoss",\n "PyperclipException",\n "PyperclipWindowsException",\n "SettingWithCopyError",\n "SettingWithCopyWarning",\n "SpecificationError",\n "UndefinedVariableError",\n "UnsortedIndexError",\n "UnsupportedFunctionCall",\n "ValueLabelTypeMismatch",\n]\n
.venv\Lib\site-packages\pandas\errors\__init__.py
__init__.py
Python
27,164
0.95
0.087059
0.013333
awesome-app
680
2023-12-10T03:39:21.726763
MIT
false
662fe0e75b397b799c8ceae642b56423
\n\n
.venv\Lib\site-packages\pandas\errors\__pycache__\__init__.cpython-313.pyc
__init__.cpython-313.pyc
Other
29,896
0.95
0.037891
0.001938
awesome-app
697
2023-11-01T22:37:07.436962
MIT
false
2b2bf27ff1e087e63f5c8bae4f2ce82d
"""\nData IO api\n"""\n\nfrom pandas.io.clipboards import read_clipboard\nfrom pandas.io.excel import (\n ExcelFile,\n ExcelWriter,\n read_excel,\n)\nfrom pandas.io.feather_format import read_feather\nfrom pandas.io.gbq import read_gbq\nfrom pandas.io.html import read_html\nfrom pandas.io.json import read_json\nfrom pandas.io.orc import read_orc\nfrom pandas.io.parquet import read_parquet\nfrom pandas.io.parsers import (\n read_csv,\n read_fwf,\n read_table,\n)\nfrom pandas.io.pickle import (\n read_pickle,\n to_pickle,\n)\nfrom pandas.io.pytables import (\n HDFStore,\n read_hdf,\n)\nfrom pandas.io.sas import read_sas\nfrom pandas.io.spss import read_spss\nfrom pandas.io.sql import (\n read_sql,\n read_sql_query,\n read_sql_table,\n)\nfrom pandas.io.stata import read_stata\nfrom pandas.io.xml import read_xml\n\n__all__ = [\n "ExcelFile",\n "ExcelWriter",\n "HDFStore",\n "read_clipboard",\n "read_csv",\n "read_excel",\n "read_feather",\n "read_fwf",\n "read_gbq",\n "read_hdf",\n "read_html",\n "read_json",\n "read_orc",\n "read_parquet",\n "read_pickle",\n "read_sas",\n "read_spss",\n "read_sql",\n "read_sql_query",\n "read_sql_table",\n "read_stata",\n "read_table",\n "read_xml",\n "to_pickle",\n]\n
.venv\Lib\site-packages\pandas\io\api.py
api.py
Python
1,264
0.85
0
0
python-kit
776
2024-12-09T11:19:10.742491
Apache-2.0
false
7204b98d4db2dadf95ec0b5be03332a5
""" io on the clipboard """\nfrom __future__ import annotations\n\nfrom io import StringIO\nfrom typing import TYPE_CHECKING\nimport warnings\n\nfrom pandas._libs import lib\nfrom pandas.util._exceptions import find_stack_level\nfrom pandas.util._validators import check_dtype_backend\n\nfrom pandas.core.dtypes.generic import ABCDataFrame\n\nfrom pandas import (\n get_option,\n option_context,\n)\n\nif TYPE_CHECKING:\n from pandas._typing import DtypeBackend\n\n\ndef read_clipboard(\n sep: str = r"\s+",\n dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default,\n **kwargs,\n): # pragma: no cover\n r"""\n Read text from clipboard and pass to :func:`~pandas.read_csv`.\n\n Parses clipboard contents similar to how CSV files are parsed\n using :func:`~pandas.read_csv`.\n\n Parameters\n ----------\n sep : str, default '\\s+'\n A string or regex delimiter. The default of ``'\\s+'`` denotes\n one or more whitespace characters.\n\n dtype_backend : {'numpy_nullable', 'pyarrow'}, default 'numpy_nullable'\n Back-end data type applied to the resultant :class:`DataFrame`\n (still experimental). Behaviour is as follows:\n\n * ``"numpy_nullable"``: returns nullable-dtype-backed :class:`DataFrame`\n (default).\n * ``"pyarrow"``: returns pyarrow-backed nullable :class:`ArrowDtype`\n DataFrame.\n\n .. versionadded:: 2.0\n\n **kwargs\n See :func:`~pandas.read_csv` for the full argument list.\n\n Returns\n -------\n DataFrame\n A parsed :class:`~pandas.DataFrame` object.\n\n See Also\n --------\n DataFrame.to_clipboard : Copy object to the system clipboard.\n read_csv : Read a comma-separated values (csv) file into DataFrame.\n read_fwf : Read a table of fixed-width formatted lines into DataFrame.\n\n Examples\n --------\n >>> df = pd.DataFrame([[1, 2, 3], [4, 5, 6]], columns=['A', 'B', 'C'])\n >>> df.to_clipboard() # doctest: +SKIP\n >>> pd.read_clipboard() # doctest: +SKIP\n A B C\n 0 1 2 3\n 1 4 5 6\n """\n encoding = kwargs.pop("encoding", "utf-8")\n\n # only utf-8 is valid for passed value because that's what clipboard\n # supports\n if encoding is not None and encoding.lower().replace("-", "") != "utf8":\n raise NotImplementedError("reading from clipboard only supports utf-8 encoding")\n\n check_dtype_backend(dtype_backend)\n\n from pandas.io.clipboard import clipboard_get\n from pandas.io.parsers import read_csv\n\n text = clipboard_get()\n\n # Try to decode (if needed, as "text" might already be a string here).\n try:\n text = text.decode(kwargs.get("encoding") or get_option("display.encoding"))\n except AttributeError:\n pass\n\n # Excel copies into clipboard with \t separation\n # inspect no more then the 10 first lines, if they\n # all contain an equal number (>0) of tabs, infer\n # that this came from excel and set 'sep' accordingly\n lines = text[:10000].split("\n")[:-1][:10]\n\n # Need to remove leading white space, since read_csv\n # accepts:\n # a b\n # 0 1 2\n # 1 3 4\n\n counts = {x.lstrip(" ").count("\t") for x in lines}\n if len(lines) > 1 and len(counts) == 1 and counts.pop() != 0:\n sep = "\t"\n # check the number of leading tabs in the first line\n # to account for index columns\n index_length = len(lines[0]) - len(lines[0].lstrip(" \t"))\n if index_length != 0:\n kwargs.setdefault("index_col", list(range(index_length)))\n\n # Edge case where sep is specified to be None, return to default\n if sep is None and kwargs.get("delim_whitespace") is None:\n sep = r"\s+"\n\n # Regex separator currently only works with python engine.\n # Default to python if separator is multi-character (regex)\n if len(sep) > 1 and kwargs.get("engine") is None:\n kwargs["engine"] = "python"\n elif len(sep) > 1 and kwargs.get("engine") == "c":\n warnings.warn(\n "read_clipboard with regex separator does not work properly with c engine.",\n stacklevel=find_stack_level(),\n )\n\n return read_csv(StringIO(text), sep=sep, dtype_backend=dtype_backend, **kwargs)\n\n\ndef to_clipboard(\n obj, excel: bool | None = True, sep: str | None = None, **kwargs\n) -> None: # pragma: no cover\n """\n Attempt to write text representation of object to the system clipboard\n The clipboard can be then pasted into Excel for example.\n\n Parameters\n ----------\n obj : the object to write to the clipboard\n excel : bool, defaults to True\n if True, use the provided separator, writing in a csv\n format for allowing easy pasting into excel.\n if False, write a string representation of the object\n to the clipboard\n sep : optional, defaults to tab\n other keywords are passed to to_csv\n\n Notes\n -----\n Requirements for your platform\n - Linux: xclip, or xsel (with PyQt4 modules)\n - Windows:\n - OS X:\n """\n encoding = kwargs.pop("encoding", "utf-8")\n\n # testing if an invalid encoding is passed to clipboard\n if encoding is not None and encoding.lower().replace("-", "") != "utf8":\n raise ValueError("clipboard only supports utf-8 encoding")\n\n from pandas.io.clipboard import clipboard_set\n\n if excel is None:\n excel = True\n\n if excel:\n try:\n if sep is None:\n sep = "\t"\n buf = StringIO()\n\n # clipboard_set (pyperclip) expects unicode\n obj.to_csv(buf, sep=sep, encoding="utf-8", **kwargs)\n text = buf.getvalue()\n\n clipboard_set(text)\n return\n except TypeError:\n warnings.warn(\n "to_clipboard in excel mode requires a single character separator.",\n stacklevel=find_stack_level(),\n )\n elif sep is not None:\n warnings.warn(\n "to_clipboard with excel=False ignores the sep argument.",\n stacklevel=find_stack_level(),\n )\n\n if isinstance(obj, ABCDataFrame):\n # str(df) has various unhelpful defaults, like truncation\n with option_context("display.max_colwidth", None):\n objstr = obj.to_string(**kwargs)\n else:\n objstr = str(obj)\n clipboard_set(objstr)\n
.venv\Lib\site-packages\pandas\io\clipboards.py
clipboards.py
Python
6,320
0.95
0.162437
0.150943
awesome-app
319
2024-01-08T05:05:20.407762
BSD-3-Clause
false
217c8afa496bfccf645b5f1f9230de6d
"""Common IO api utilities"""\nfrom __future__ import annotations\n\nfrom abc import (\n ABC,\n abstractmethod,\n)\nimport codecs\nfrom collections import defaultdict\nfrom collections.abc import (\n Hashable,\n Mapping,\n Sequence,\n)\nimport dataclasses\nimport functools\nimport gzip\nfrom io import (\n BufferedIOBase,\n BytesIO,\n RawIOBase,\n StringIO,\n TextIOBase,\n TextIOWrapper,\n)\nimport mmap\nimport os\nfrom pathlib import Path\nimport re\nimport tarfile\nfrom typing import (\n IO,\n TYPE_CHECKING,\n Any,\n AnyStr,\n DefaultDict,\n Generic,\n Literal,\n TypeVar,\n cast,\n overload,\n)\nfrom urllib.parse import (\n urljoin,\n urlparse as parse_url,\n uses_netloc,\n uses_params,\n uses_relative,\n)\nimport warnings\nimport zipfile\n\nfrom pandas._typing import (\n BaseBuffer,\n ReadCsvBuffer,\n)\nfrom pandas.compat import (\n get_bz2_file,\n get_lzma_file,\n)\nfrom pandas.compat._optional import import_optional_dependency\nfrom pandas.util._decorators import doc\nfrom pandas.util._exceptions import find_stack_level\n\nfrom pandas.core.dtypes.common import (\n is_bool,\n is_file_like,\n is_integer,\n is_list_like,\n)\nfrom pandas.core.dtypes.generic import ABCMultiIndex\n\nfrom pandas.core.shared_docs import _shared_docs\n\n_VALID_URLS = set(uses_relative + uses_netloc + uses_params)\n_VALID_URLS.discard("")\n_RFC_3986_PATTERN = re.compile(r"^[A-Za-z][A-Za-z0-9+\-+.]*://")\n\nBaseBufferT = TypeVar("BaseBufferT", bound=BaseBuffer)\n\n\nif TYPE_CHECKING:\n from types import TracebackType\n\n from pandas._typing import (\n CompressionDict,\n CompressionOptions,\n FilePath,\n ReadBuffer,\n StorageOptions,\n WriteBuffer,\n )\n\n from pandas import MultiIndex\n\n\n@dataclasses.dataclass\nclass IOArgs:\n """\n Return value of io/common.py:_get_filepath_or_buffer.\n """\n\n filepath_or_buffer: str | BaseBuffer\n encoding: str\n mode: str\n compression: CompressionDict\n should_close: bool = False\n\n\n@dataclasses.dataclass\nclass IOHandles(Generic[AnyStr]):\n """\n Return value of io/common.py:get_handle\n\n Can be used as a context manager.\n\n This is used to easily close created buffers and to handle corner cases when\n TextIOWrapper is inserted.\n\n handle: The file handle to be used.\n created_handles: All file handles that are created by get_handle\n is_wrapped: Whether a TextIOWrapper needs to be detached.\n """\n\n # handle might not implement the IO-interface\n handle: IO[AnyStr]\n compression: CompressionDict\n created_handles: list[IO[bytes] | IO[str]] = dataclasses.field(default_factory=list)\n is_wrapped: bool = False\n\n def close(self) -> None:\n """\n Close all created buffers.\n\n Note: If a TextIOWrapper was inserted, it is flushed and detached to\n avoid closing the potentially user-created buffer.\n """\n if self.is_wrapped:\n assert isinstance(self.handle, TextIOWrapper)\n self.handle.flush()\n self.handle.detach()\n self.created_handles.remove(self.handle)\n for handle in self.created_handles:\n handle.close()\n self.created_handles = []\n self.is_wrapped = False\n\n def __enter__(self) -> IOHandles[AnyStr]:\n return self\n\n def __exit__(\n self,\n exc_type: type[BaseException] | None,\n exc_value: BaseException | None,\n traceback: TracebackType | None,\n ) -> None:\n self.close()\n\n\ndef is_url(url: object) -> bool:\n """\n Check to see if a URL has a valid protocol.\n\n Parameters\n ----------\n url : str or unicode\n\n Returns\n -------\n isurl : bool\n If `url` has a valid protocol return True otherwise False.\n """\n if not isinstance(url, str):\n return False\n return parse_url(url).scheme in _VALID_URLS\n\n\n@overload\ndef _expand_user(filepath_or_buffer: str) -> str:\n ...\n\n\n@overload\ndef _expand_user(filepath_or_buffer: BaseBufferT) -> BaseBufferT:\n ...\n\n\ndef _expand_user(filepath_or_buffer: str | BaseBufferT) -> str | BaseBufferT:\n """\n Return the argument with an initial component of ~ or ~user\n replaced by that user's home directory.\n\n Parameters\n ----------\n filepath_or_buffer : object to be converted if possible\n\n Returns\n -------\n expanded_filepath_or_buffer : an expanded filepath or the\n input if not expandable\n """\n if isinstance(filepath_or_buffer, str):\n return os.path.expanduser(filepath_or_buffer)\n return filepath_or_buffer\n\n\ndef validate_header_arg(header: object) -> None:\n if header is None:\n return\n if is_integer(header):\n header = cast(int, header)\n if header < 0:\n # GH 27779\n raise ValueError(\n "Passing negative integer to header is invalid. "\n "For no header, use header=None instead"\n )\n return\n if is_list_like(header, allow_sets=False):\n header = cast(Sequence, header)\n if not all(map(is_integer, header)):\n raise ValueError("header must be integer or list of integers")\n if any(i < 0 for i in header):\n raise ValueError("cannot specify multi-index header with negative integers")\n return\n if is_bool(header):\n raise TypeError(\n "Passing a bool to header is invalid. Use header=None for no header or "\n "header=int or list-like of ints to specify "\n "the row(s) making up the column names"\n )\n # GH 16338\n raise ValueError("header must be integer or list of integers")\n\n\n@overload\ndef stringify_path(filepath_or_buffer: FilePath, convert_file_like: bool = ...) -> str:\n ...\n\n\n@overload\ndef stringify_path(\n filepath_or_buffer: BaseBufferT, convert_file_like: bool = ...\n) -> BaseBufferT:\n ...\n\n\ndef stringify_path(\n filepath_or_buffer: FilePath | BaseBufferT,\n convert_file_like: bool = False,\n) -> str | BaseBufferT:\n """\n Attempt to convert a path-like object to a string.\n\n Parameters\n ----------\n filepath_or_buffer : object to be converted\n\n Returns\n -------\n str_filepath_or_buffer : maybe a string version of the object\n\n Notes\n -----\n Objects supporting the fspath protocol are coerced\n according to its __fspath__ method.\n\n Any other object is passed through unchanged, which includes bytes,\n strings, buffers, or anything else that's not even path-like.\n """\n if not convert_file_like and is_file_like(filepath_or_buffer):\n # GH 38125: some fsspec objects implement os.PathLike but have already opened a\n # file. This prevents opening the file a second time. infer_compression calls\n # this function with convert_file_like=True to infer the compression.\n return cast(BaseBufferT, filepath_or_buffer)\n\n if isinstance(filepath_or_buffer, os.PathLike):\n filepath_or_buffer = filepath_or_buffer.__fspath__()\n return _expand_user(filepath_or_buffer)\n\n\ndef urlopen(*args, **kwargs):\n """\n Lazy-import wrapper for stdlib urlopen, as that imports a big chunk of\n the stdlib.\n """\n import urllib.request\n\n return urllib.request.urlopen(*args, **kwargs)\n\n\ndef is_fsspec_url(url: FilePath | BaseBuffer) -> bool:\n """\n Returns true if the given URL looks like\n something fsspec can handle\n """\n return (\n isinstance(url, str)\n and bool(_RFC_3986_PATTERN.match(url))\n and not url.startswith(("http://", "https://"))\n )\n\n\n@doc(\n storage_options=_shared_docs["storage_options"],\n compression_options=_shared_docs["compression_options"] % "filepath_or_buffer",\n)\ndef _get_filepath_or_buffer(\n filepath_or_buffer: FilePath | BaseBuffer,\n encoding: str = "utf-8",\n compression: CompressionOptions | None = None,\n mode: str = "r",\n storage_options: StorageOptions | None = None,\n) -> IOArgs:\n """\n If the filepath_or_buffer is a url, translate and return the buffer.\n Otherwise passthrough.\n\n Parameters\n ----------\n filepath_or_buffer : a url, filepath (str, py.path.local or pathlib.Path),\n or buffer\n {compression_options}\n\n .. versionchanged:: 1.4.0 Zstandard support.\n\n encoding : the encoding to use to decode bytes, default is 'utf-8'\n mode : str, optional\n\n {storage_options}\n\n\n Returns the dataclass IOArgs.\n """\n filepath_or_buffer = stringify_path(filepath_or_buffer)\n\n # handle compression dict\n compression_method, compression = get_compression_method(compression)\n compression_method = infer_compression(filepath_or_buffer, compression_method)\n\n # GH21227 internal compression is not used for non-binary handles.\n if compression_method and hasattr(filepath_or_buffer, "write") and "b" not in mode:\n warnings.warn(\n "compression has no effect when passing a non-binary object as input.",\n RuntimeWarning,\n stacklevel=find_stack_level(),\n )\n compression_method = None\n\n compression = dict(compression, method=compression_method)\n\n # bz2 and xz do not write the byte order mark for utf-16 and utf-32\n # print a warning when writing such files\n if (\n "w" in mode\n and compression_method in ["bz2", "xz"]\n and encoding in ["utf-16", "utf-32"]\n ):\n warnings.warn(\n f"{compression} will not write the byte order mark for {encoding}",\n UnicodeWarning,\n stacklevel=find_stack_level(),\n )\n\n # Use binary mode when converting path-like objects to file-like objects (fsspec)\n # except when text mode is explicitly requested. The original mode is returned if\n # fsspec is not used.\n fsspec_mode = mode\n if "t" not in fsspec_mode and "b" not in fsspec_mode:\n fsspec_mode += "b"\n\n if isinstance(filepath_or_buffer, str) and is_url(filepath_or_buffer):\n # TODO: fsspec can also handle HTTP via requests, but leaving this\n # unchanged. using fsspec appears to break the ability to infer if the\n # server responded with gzipped data\n storage_options = storage_options or {}\n\n # waiting until now for importing to match intended lazy logic of\n # urlopen function defined elsewhere in this module\n import urllib.request\n\n # assuming storage_options is to be interpreted as headers\n req_info = urllib.request.Request(filepath_or_buffer, headers=storage_options)\n with urlopen(req_info) as req:\n content_encoding = req.headers.get("Content-Encoding", None)\n if content_encoding == "gzip":\n # Override compression based on Content-Encoding header\n compression = {"method": "gzip"}\n reader = BytesIO(req.read())\n return IOArgs(\n filepath_or_buffer=reader,\n encoding=encoding,\n compression=compression,\n should_close=True,\n mode=fsspec_mode,\n )\n\n if is_fsspec_url(filepath_or_buffer):\n assert isinstance(\n filepath_or_buffer, str\n ) # just to appease mypy for this branch\n # two special-case s3-like protocols; these have special meaning in Hadoop,\n # but are equivalent to just "s3" from fsspec's point of view\n # cc #11071\n if filepath_or_buffer.startswith("s3a://"):\n filepath_or_buffer = filepath_or_buffer.replace("s3a://", "s3://")\n if filepath_or_buffer.startswith("s3n://"):\n filepath_or_buffer = filepath_or_buffer.replace("s3n://", "s3://")\n fsspec = import_optional_dependency("fsspec")\n\n # If botocore is installed we fallback to reading with anon=True\n # to allow reads from public buckets\n err_types_to_retry_with_anon: list[Any] = []\n try:\n import_optional_dependency("botocore")\n from botocore.exceptions import (\n ClientError,\n NoCredentialsError,\n )\n\n err_types_to_retry_with_anon = [\n ClientError,\n NoCredentialsError,\n PermissionError,\n ]\n except ImportError:\n pass\n\n try:\n file_obj = fsspec.open(\n filepath_or_buffer, mode=fsspec_mode, **(storage_options or {})\n ).open()\n # GH 34626 Reads from Public Buckets without Credentials needs anon=True\n except tuple(err_types_to_retry_with_anon):\n if storage_options is None:\n storage_options = {"anon": True}\n else:\n # don't mutate user input.\n storage_options = dict(storage_options)\n storage_options["anon"] = True\n file_obj = fsspec.open(\n filepath_or_buffer, mode=fsspec_mode, **(storage_options or {})\n ).open()\n\n return IOArgs(\n filepath_or_buffer=file_obj,\n encoding=encoding,\n compression=compression,\n should_close=True,\n mode=fsspec_mode,\n )\n elif storage_options:\n raise ValueError(\n "storage_options passed with file object or non-fsspec file path"\n )\n\n if isinstance(filepath_or_buffer, (str, bytes, mmap.mmap)):\n return IOArgs(\n filepath_or_buffer=_expand_user(filepath_or_buffer),\n encoding=encoding,\n compression=compression,\n should_close=False,\n mode=mode,\n )\n\n # is_file_like requires (read | write) & __iter__ but __iter__ is only\n # needed for read_csv(engine=python)\n if not (\n hasattr(filepath_or_buffer, "read") or hasattr(filepath_or_buffer, "write")\n ):\n msg = f"Invalid file path or buffer object type: {type(filepath_or_buffer)}"\n raise ValueError(msg)\n\n return IOArgs(\n filepath_or_buffer=filepath_or_buffer,\n encoding=encoding,\n compression=compression,\n should_close=False,\n mode=mode,\n )\n\n\ndef file_path_to_url(path: str) -> str:\n """\n converts an absolute native path to a FILE URL.\n\n Parameters\n ----------\n path : a path in native format\n\n Returns\n -------\n a valid FILE URL\n """\n # lazify expensive import (~30ms)\n from urllib.request import pathname2url\n\n return urljoin("file:", pathname2url(path))\n\n\nextension_to_compression = {\n ".tar": "tar",\n ".tar.gz": "tar",\n ".tar.bz2": "tar",\n ".tar.xz": "tar",\n ".gz": "gzip",\n ".bz2": "bz2",\n ".zip": "zip",\n ".xz": "xz",\n ".zst": "zstd",\n}\n_supported_compressions = set(extension_to_compression.values())\n\n\ndef get_compression_method(\n compression: CompressionOptions,\n) -> tuple[str | None, CompressionDict]:\n """\n Simplifies a compression argument to a compression method string and\n a mapping containing additional arguments.\n\n Parameters\n ----------\n compression : str or mapping\n If string, specifies the compression method. If mapping, value at key\n 'method' specifies compression method.\n\n Returns\n -------\n tuple of ({compression method}, Optional[str]\n {compression arguments}, Dict[str, Any])\n\n Raises\n ------\n ValueError on mapping missing 'method' key\n """\n compression_method: str | None\n if isinstance(compression, Mapping):\n compression_args = dict(compression)\n try:\n compression_method = compression_args.pop("method")\n except KeyError as err:\n raise ValueError("If mapping, compression must have key 'method'") from err\n else:\n compression_args = {}\n compression_method = compression\n return compression_method, compression_args\n\n\n@doc(compression_options=_shared_docs["compression_options"] % "filepath_or_buffer")\ndef infer_compression(\n filepath_or_buffer: FilePath | BaseBuffer, compression: str | None\n) -> str | None:\n """\n Get the compression method for filepath_or_buffer. If compression='infer',\n the inferred compression method is returned. Otherwise, the input\n compression method is returned unchanged, unless it's invalid, in which\n case an error is raised.\n\n Parameters\n ----------\n filepath_or_buffer : str or file handle\n File path or object.\n {compression_options}\n\n .. versionchanged:: 1.4.0 Zstandard support.\n\n Returns\n -------\n string or None\n\n Raises\n ------\n ValueError on invalid compression specified.\n """\n if compression is None:\n return None\n\n # Infer compression\n if compression == "infer":\n # Convert all path types (e.g. pathlib.Path) to strings\n filepath_or_buffer = stringify_path(filepath_or_buffer, convert_file_like=True)\n if not isinstance(filepath_or_buffer, str):\n # Cannot infer compression of a buffer, assume no compression\n return None\n\n # Infer compression from the filename/URL extension\n for extension, compression in extension_to_compression.items():\n if filepath_or_buffer.lower().endswith(extension):\n return compression\n return None\n\n # Compression has been specified. Check that it's valid\n if compression in _supported_compressions:\n return compression\n\n valid = ["infer", None] + sorted(_supported_compressions)\n msg = (\n f"Unrecognized compression type: {compression}\n"\n f"Valid compression types are {valid}"\n )\n raise ValueError(msg)\n\n\ndef check_parent_directory(path: Path | str) -> None:\n """\n Check if parent directory of a file exists, raise OSError if it does not\n\n Parameters\n ----------\n path: Path or str\n Path to check parent directory of\n """\n parent = Path(path).parent\n if not parent.is_dir():\n raise OSError(rf"Cannot save file into a non-existent directory: '{parent}'")\n\n\n@overload\ndef get_handle(\n path_or_buf: FilePath | BaseBuffer,\n mode: str,\n *,\n encoding: str | None = ...,\n compression: CompressionOptions = ...,\n memory_map: bool = ...,\n is_text: Literal[False],\n errors: str | None = ...,\n storage_options: StorageOptions = ...,\n) -> IOHandles[bytes]:\n ...\n\n\n@overload\ndef get_handle(\n path_or_buf: FilePath | BaseBuffer,\n mode: str,\n *,\n encoding: str | None = ...,\n compression: CompressionOptions = ...,\n memory_map: bool = ...,\n is_text: Literal[True] = ...,\n errors: str | None = ...,\n storage_options: StorageOptions = ...,\n) -> IOHandles[str]:\n ...\n\n\n@overload\ndef get_handle(\n path_or_buf: FilePath | BaseBuffer,\n mode: str,\n *,\n encoding: str | None = ...,\n compression: CompressionOptions = ...,\n memory_map: bool = ...,\n is_text: bool = ...,\n errors: str | None = ...,\n storage_options: StorageOptions = ...,\n) -> IOHandles[str] | IOHandles[bytes]:\n ...\n\n\n@doc(compression_options=_shared_docs["compression_options"] % "path_or_buf")\ndef get_handle(\n path_or_buf: FilePath | BaseBuffer,\n mode: str,\n *,\n encoding: str | None = None,\n compression: CompressionOptions | None = None,\n memory_map: bool = False,\n is_text: bool = True,\n errors: str | None = None,\n storage_options: StorageOptions | None = None,\n) -> IOHandles[str] | IOHandles[bytes]:\n """\n Get file handle for given path/buffer and mode.\n\n Parameters\n ----------\n path_or_buf : str or file handle\n File path or object.\n mode : str\n Mode to open path_or_buf with.\n encoding : str or None\n Encoding to use.\n {compression_options}\n\n May be a dict with key 'method' as compression mode\n and other keys as compression options if compression\n mode is 'zip'.\n\n Passing compression options as keys in dict is\n supported for compression modes 'gzip', 'bz2', 'zstd' and 'zip'.\n\n .. versionchanged:: 1.4.0 Zstandard support.\n\n memory_map : bool, default False\n See parsers._parser_params for more information. Only used by read_csv.\n is_text : bool, default True\n Whether the type of the content passed to the file/buffer is string or\n bytes. This is not the same as `"b" not in mode`. If a string content is\n passed to a binary file/buffer, a wrapper is inserted.\n errors : str, default 'strict'\n Specifies how encoding and decoding errors are to be handled.\n See the errors argument for :func:`open` for a full list\n of options.\n storage_options: StorageOptions = None\n Passed to _get_filepath_or_buffer\n\n Returns the dataclass IOHandles\n """\n # Windows does not default to utf-8. Set to utf-8 for a consistent behavior\n encoding = encoding or "utf-8"\n\n errors = errors or "strict"\n\n # read_csv does not know whether the buffer is opened in binary/text mode\n if _is_binary_mode(path_or_buf, mode) and "b" not in mode:\n mode += "b"\n\n # validate encoding and errors\n codecs.lookup(encoding)\n if isinstance(errors, str):\n codecs.lookup_error(errors)\n\n # open URLs\n ioargs = _get_filepath_or_buffer(\n path_or_buf,\n encoding=encoding,\n compression=compression,\n mode=mode,\n storage_options=storage_options,\n )\n\n handle = ioargs.filepath_or_buffer\n handles: list[BaseBuffer]\n\n # memory mapping needs to be the first step\n # only used for read_csv\n handle, memory_map, handles = _maybe_memory_map(handle, memory_map)\n\n is_path = isinstance(handle, str)\n compression_args = dict(ioargs.compression)\n compression = compression_args.pop("method")\n\n # Only for write methods\n if "r" not in mode and is_path:\n check_parent_directory(str(handle))\n\n if compression:\n if compression != "zstd":\n # compression libraries do not like an explicit text-mode\n ioargs.mode = ioargs.mode.replace("t", "")\n elif compression == "zstd" and "b" not in ioargs.mode:\n # python-zstandard defaults to text mode, but we always expect\n # compression libraries to use binary mode.\n ioargs.mode += "b"\n\n # GZ Compression\n if compression == "gzip":\n if isinstance(handle, str):\n # error: Incompatible types in assignment (expression has type\n # "GzipFile", variable has type "Union[str, BaseBuffer]")\n handle = gzip.GzipFile( # type: ignore[assignment]\n filename=handle,\n mode=ioargs.mode,\n **compression_args,\n )\n else:\n handle = gzip.GzipFile(\n # No overload variant of "GzipFile" matches argument types\n # "Union[str, BaseBuffer]", "str", "Dict[str, Any]"\n fileobj=handle, # type: ignore[call-overload]\n mode=ioargs.mode,\n **compression_args,\n )\n\n # BZ Compression\n elif compression == "bz2":\n # Overload of "BZ2File" to handle pickle protocol 5\n # "Union[str, BaseBuffer]", "str", "Dict[str, Any]"\n handle = get_bz2_file()( # type: ignore[call-overload]\n handle,\n mode=ioargs.mode,\n **compression_args,\n )\n\n # ZIP Compression\n elif compression == "zip":\n # error: Argument 1 to "_BytesZipFile" has incompatible type\n # "Union[str, BaseBuffer]"; expected "Union[Union[str, PathLike[str]],\n # ReadBuffer[bytes], WriteBuffer[bytes]]"\n handle = _BytesZipFile(\n handle, ioargs.mode, **compression_args # type: ignore[arg-type]\n )\n if handle.buffer.mode == "r":\n handles.append(handle)\n zip_names = handle.buffer.namelist()\n if len(zip_names) == 1:\n handle = handle.buffer.open(zip_names.pop())\n elif not zip_names:\n raise ValueError(f"Zero files found in ZIP file {path_or_buf}")\n else:\n raise ValueError(\n "Multiple files found in ZIP file. "\n f"Only one file per ZIP: {zip_names}"\n )\n\n # TAR Encoding\n elif compression == "tar":\n compression_args.setdefault("mode", ioargs.mode)\n if isinstance(handle, str):\n handle = _BytesTarFile(name=handle, **compression_args)\n else:\n # error: Argument "fileobj" to "_BytesTarFile" has incompatible\n # type "BaseBuffer"; expected "Union[ReadBuffer[bytes],\n # WriteBuffer[bytes], None]"\n handle = _BytesTarFile(\n fileobj=handle, **compression_args # type: ignore[arg-type]\n )\n assert isinstance(handle, _BytesTarFile)\n if "r" in handle.buffer.mode:\n handles.append(handle)\n files = handle.buffer.getnames()\n if len(files) == 1:\n file = handle.buffer.extractfile(files[0])\n assert file is not None\n handle = file\n elif not files:\n raise ValueError(f"Zero files found in TAR archive {path_or_buf}")\n else:\n raise ValueError(\n "Multiple files found in TAR archive. "\n f"Only one file per TAR archive: {files}"\n )\n\n # XZ Compression\n elif compression == "xz":\n # error: Argument 1 to "LZMAFile" has incompatible type "Union[str,\n # BaseBuffer]"; expected "Optional[Union[Union[str, bytes, PathLike[str],\n # PathLike[bytes]], IO[bytes]], None]"\n handle = get_lzma_file()(\n handle, ioargs.mode, **compression_args # type: ignore[arg-type]\n )\n\n # Zstd Compression\n elif compression == "zstd":\n zstd = import_optional_dependency("zstandard")\n if "r" in ioargs.mode:\n open_args = {"dctx": zstd.ZstdDecompressor(**compression_args)}\n else:\n open_args = {"cctx": zstd.ZstdCompressor(**compression_args)}\n handle = zstd.open(\n handle,\n mode=ioargs.mode,\n **open_args,\n )\n\n # Unrecognized Compression\n else:\n msg = f"Unrecognized compression type: {compression}"\n raise ValueError(msg)\n\n assert not isinstance(handle, str)\n handles.append(handle)\n\n elif isinstance(handle, str):\n # Check whether the filename is to be opened in binary mode.\n # Binary mode does not support 'encoding' and 'newline'.\n if ioargs.encoding and "b" not in ioargs.mode:\n # Encoding\n handle = open(\n handle,\n ioargs.mode,\n encoding=ioargs.encoding,\n errors=errors,\n newline="",\n )\n else:\n # Binary mode\n handle = open(handle, ioargs.mode)\n handles.append(handle)\n\n # Convert BytesIO or file objects passed with an encoding\n is_wrapped = False\n if not is_text and ioargs.mode == "rb" and isinstance(handle, TextIOBase):\n # not added to handles as it does not open/buffer resources\n handle = _BytesIOWrapper(\n handle,\n encoding=ioargs.encoding,\n )\n elif is_text and (\n compression or memory_map or _is_binary_mode(handle, ioargs.mode)\n ):\n if (\n not hasattr(handle, "readable")\n or not hasattr(handle, "writable")\n or not hasattr(handle, "seekable")\n ):\n handle = _IOWrapper(handle)\n # error: Argument 1 to "TextIOWrapper" has incompatible type\n # "_IOWrapper"; expected "IO[bytes]"\n handle = TextIOWrapper(\n handle, # type: ignore[arg-type]\n encoding=ioargs.encoding,\n errors=errors,\n newline="",\n )\n handles.append(handle)\n # only marked as wrapped when the caller provided a handle\n is_wrapped = not (\n isinstance(ioargs.filepath_or_buffer, str) or ioargs.should_close\n )\n\n if "r" in ioargs.mode and not hasattr(handle, "read"):\n raise TypeError(\n "Expected file path name or file-like object, "\n f"got {type(ioargs.filepath_or_buffer)} type"\n )\n\n handles.reverse() # close the most recently added buffer first\n if ioargs.should_close:\n assert not isinstance(ioargs.filepath_or_buffer, str)\n handles.append(ioargs.filepath_or_buffer)\n\n return IOHandles(\n # error: Argument "handle" to "IOHandles" has incompatible type\n # "Union[TextIOWrapper, GzipFile, BaseBuffer, typing.IO[bytes],\n # typing.IO[Any]]"; expected "pandas._typing.IO[Any]"\n handle=handle, # type: ignore[arg-type]\n # error: Argument "created_handles" to "IOHandles" has incompatible type\n # "List[BaseBuffer]"; expected "List[Union[IO[bytes], IO[str]]]"\n created_handles=handles, # type: ignore[arg-type]\n is_wrapped=is_wrapped,\n compression=ioargs.compression,\n )\n\n\n# error: Definition of "__enter__" in base class "IOBase" is incompatible\n# with definition in base class "BinaryIO"\nclass _BufferedWriter(BytesIO, ABC): # type: ignore[misc]\n """\n Some objects do not support multiple .write() calls (TarFile and ZipFile).\n This wrapper writes to the underlying buffer on close.\n """\n\n buffer = BytesIO()\n\n @abstractmethod\n def write_to_buffer(self) -> None:\n ...\n\n def close(self) -> None:\n if self.closed:\n # already closed\n return\n if self.getbuffer().nbytes:\n # write to buffer\n self.seek(0)\n with self.buffer:\n self.write_to_buffer()\n else:\n self.buffer.close()\n super().close()\n\n\nclass _BytesTarFile(_BufferedWriter):\n def __init__(\n self,\n name: str | None = None,\n mode: Literal["r", "a", "w", "x"] = "r",\n fileobj: ReadBuffer[bytes] | WriteBuffer[bytes] | None = None,\n archive_name: str | None = None,\n **kwargs,\n ) -> None:\n super().__init__()\n self.archive_name = archive_name\n self.name = name\n # error: Incompatible types in assignment (expression has type "TarFile",\n # base class "_BufferedWriter" defined the type as "BytesIO")\n self.buffer: tarfile.TarFile = tarfile.TarFile.open( # type: ignore[assignment]\n name=name,\n mode=self.extend_mode(mode),\n fileobj=fileobj,\n **kwargs,\n )\n\n def extend_mode(self, mode: str) -> str:\n mode = mode.replace("b", "")\n if mode != "w":\n return mode\n if self.name is not None:\n suffix = Path(self.name).suffix\n if suffix in (".gz", ".xz", ".bz2"):\n mode = f"{mode}:{suffix[1:]}"\n return mode\n\n def infer_filename(self) -> str | None:\n """\n If an explicit archive_name is not given, we still want the file inside the zip\n file not to be named something.tar, because that causes confusion (GH39465).\n """\n if self.name is None:\n return None\n\n filename = Path(self.name)\n if filename.suffix == ".tar":\n return filename.with_suffix("").name\n elif filename.suffix in (".tar.gz", ".tar.bz2", ".tar.xz"):\n return filename.with_suffix("").with_suffix("").name\n return filename.name\n\n def write_to_buffer(self) -> None:\n # TarFile needs a non-empty string\n archive_name = self.archive_name or self.infer_filename() or "tar"\n tarinfo = tarfile.TarInfo(name=archive_name)\n tarinfo.size = len(self.getvalue())\n self.buffer.addfile(tarinfo, self)\n\n\nclass _BytesZipFile(_BufferedWriter):\n def __init__(\n self,\n file: FilePath | ReadBuffer[bytes] | WriteBuffer[bytes],\n mode: str,\n archive_name: str | None = None,\n **kwargs,\n ) -> None:\n super().__init__()\n mode = mode.replace("b", "")\n self.archive_name = archive_name\n\n kwargs.setdefault("compression", zipfile.ZIP_DEFLATED)\n # error: Incompatible types in assignment (expression has type "ZipFile",\n # base class "_BufferedWriter" defined the type as "BytesIO")\n self.buffer: zipfile.ZipFile = zipfile.ZipFile( # type: ignore[assignment]\n file, mode, **kwargs\n )\n\n def infer_filename(self) -> str | None:\n """\n If an explicit archive_name is not given, we still want the file inside the zip\n file not to be named something.zip, because that causes confusion (GH39465).\n """\n if isinstance(self.buffer.filename, (os.PathLike, str)):\n filename = Path(self.buffer.filename)\n if filename.suffix == ".zip":\n return filename.with_suffix("").name\n return filename.name\n return None\n\n def write_to_buffer(self) -> None:\n # ZipFile needs a non-empty string\n archive_name = self.archive_name or self.infer_filename() or "zip"\n self.buffer.writestr(archive_name, self.getvalue())\n\n\nclass _IOWrapper:\n # TextIOWrapper is overly strict: it request that the buffer has seekable, readable,\n # and writable. If we have a read-only buffer, we shouldn't need writable and vice\n # versa. Some buffers, are seek/read/writ-able but they do not have the "-able"\n # methods, e.g., tempfile.SpooledTemporaryFile.\n # If a buffer does not have the above "-able" methods, we simple assume they are\n # seek/read/writ-able.\n def __init__(self, buffer: BaseBuffer) -> None:\n self.buffer = buffer\n\n def __getattr__(self, name: str):\n return getattr(self.buffer, name)\n\n def readable(self) -> bool:\n if hasattr(self.buffer, "readable"):\n return self.buffer.readable()\n return True\n\n def seekable(self) -> bool:\n if hasattr(self.buffer, "seekable"):\n return self.buffer.seekable()\n return True\n\n def writable(self) -> bool:\n if hasattr(self.buffer, "writable"):\n return self.buffer.writable()\n return True\n\n\nclass _BytesIOWrapper:\n # Wrapper that wraps a StringIO buffer and reads bytes from it\n # Created for compat with pyarrow read_csv\n def __init__(self, buffer: StringIO | TextIOBase, encoding: str = "utf-8") -> None:\n self.buffer = buffer\n self.encoding = encoding\n # Because a character can be represented by more than 1 byte,\n # it is possible that reading will produce more bytes than n\n # We store the extra bytes in this overflow variable, and append the\n # overflow to the front of the bytestring the next time reading is performed\n self.overflow = b""\n\n def __getattr__(self, attr: str):\n return getattr(self.buffer, attr)\n\n def read(self, n: int | None = -1) -> bytes:\n assert self.buffer is not None\n bytestring = self.buffer.read(n).encode(self.encoding)\n # When n=-1/n greater than remaining bytes: Read entire file/rest of file\n combined_bytestring = self.overflow + bytestring\n if n is None or n < 0 or n >= len(combined_bytestring):\n self.overflow = b""\n return combined_bytestring\n else:\n to_return = combined_bytestring[:n]\n self.overflow = combined_bytestring[n:]\n return to_return\n\n\ndef _maybe_memory_map(\n handle: str | BaseBuffer, memory_map: bool\n) -> tuple[str | BaseBuffer, bool, list[BaseBuffer]]:\n """Try to memory map file/buffer."""\n handles: list[BaseBuffer] = []\n memory_map &= hasattr(handle, "fileno") or isinstance(handle, str)\n if not memory_map:\n return handle, memory_map, handles\n\n # mmap used by only read_csv\n handle = cast(ReadCsvBuffer, handle)\n\n # need to open the file first\n if isinstance(handle, str):\n handle = open(handle, "rb")\n handles.append(handle)\n\n try:\n # open mmap and adds *-able\n # error: Argument 1 to "_IOWrapper" has incompatible type "mmap";\n # expected "BaseBuffer"\n wrapped = _IOWrapper(\n mmap.mmap(\n handle.fileno(), 0, access=mmap.ACCESS_READ # type: ignore[arg-type]\n )\n )\n finally:\n for handle in reversed(handles):\n # error: "BaseBuffer" has no attribute "close"\n handle.close() # type: ignore[attr-defined]\n\n return wrapped, memory_map, [wrapped]\n\n\ndef file_exists(filepath_or_buffer: FilePath | BaseBuffer) -> bool:\n """Test whether file exists."""\n exists = False\n filepath_or_buffer = stringify_path(filepath_or_buffer)\n if not isinstance(filepath_or_buffer, str):\n return exists\n try:\n exists = os.path.exists(filepath_or_buffer)\n # gh-5874: if the filepath is too long will raise here\n except (TypeError, ValueError):\n pass\n return exists\n\n\ndef _is_binary_mode(handle: FilePath | BaseBuffer, mode: str) -> bool:\n """Whether the handle is opened in binary mode"""\n # specified by user\n if "t" in mode or "b" in mode:\n return "b" in mode\n\n # exceptions\n text_classes = (\n # classes that expect string but have 'b' in mode\n codecs.StreamWriter,\n codecs.StreamReader,\n codecs.StreamReaderWriter,\n )\n if issubclass(type(handle), text_classes):\n return False\n\n return isinstance(handle, _get_binary_io_classes()) or "b" in getattr(\n handle, "mode", mode\n )\n\n\n@functools.lru_cache\ndef _get_binary_io_classes() -> tuple[type, ...]:\n """IO classes that that expect bytes"""\n binary_classes: tuple[type, ...] = (BufferedIOBase, RawIOBase)\n\n # python-zstandard doesn't use any of the builtin base classes; instead we\n # have to use the `zstd.ZstdDecompressionReader` class for isinstance checks.\n # Unfortunately `zstd.ZstdDecompressionReader` isn't exposed by python-zstandard\n # so we have to get it from a `zstd.ZstdDecompressor` instance.\n # See also https://github.com/indygreg/python-zstandard/pull/165.\n zstd = import_optional_dependency("zstandard", errors="ignore")\n if zstd is not None:\n with zstd.ZstdDecompressor().stream_reader(b"") as reader:\n binary_classes += (type(reader),)\n\n return binary_classes\n\n\ndef is_potential_multi_index(\n columns: Sequence[Hashable] | MultiIndex,\n index_col: bool | Sequence[int] | None = None,\n) -> bool:\n """\n Check whether or not the `columns` parameter\n could be converted into a MultiIndex.\n\n Parameters\n ----------\n columns : array-like\n Object which may or may not be convertible into a MultiIndex\n index_col : None, bool or list, optional\n Column or columns to use as the (possibly hierarchical) index\n\n Returns\n -------\n bool : Whether or not columns could become a MultiIndex\n """\n if index_col is None or isinstance(index_col, bool):\n index_col = []\n\n return bool(\n len(columns)\n and not isinstance(columns, ABCMultiIndex)\n and all(isinstance(c, tuple) for c in columns if c not in list(index_col))\n )\n\n\ndef dedup_names(\n names: Sequence[Hashable], is_potential_multiindex: bool\n) -> Sequence[Hashable]:\n """\n Rename column names if duplicates exist.\n\n Currently the renaming is done by appending a period and an autonumeric,\n but a custom pattern may be supported in the future.\n\n Examples\n --------\n >>> dedup_names(["x", "y", "x", "x"], is_potential_multiindex=False)\n ['x', 'y', 'x.1', 'x.2']\n """\n names = list(names) # so we can index\n counts: DefaultDict[Hashable, int] = defaultdict(int)\n\n for i, col in enumerate(names):\n cur_count = counts[col]\n\n while cur_count > 0:\n counts[col] = cur_count + 1\n\n if is_potential_multiindex:\n # for mypy\n assert isinstance(col, tuple)\n col = col[:-1] + (f"{col[-1]}.{cur_count}",)\n else:\n col = f"{col}.{cur_count}"\n cur_count = counts[col]\n\n names[i] = col\n counts[col] = cur_count + 1\n\n return names\n
.venv\Lib\site-packages\pandas\io\common.py
common.py
Python
40,615
0.95
0.136543
0.122088
awesome-app
60
2023-07-23T09:42:43.482498
GPL-3.0
false
49233a5ac56c238c23a8637dd8aaad2c
""" feather-format compat """\nfrom __future__ import annotations\n\nfrom typing import (\n TYPE_CHECKING,\n Any,\n)\n\nfrom pandas._config import using_string_dtype\n\nfrom pandas._libs import lib\nfrom pandas.compat._optional import import_optional_dependency\nfrom pandas.util._decorators import doc\nfrom pandas.util._validators import check_dtype_backend\n\nfrom pandas.core.api import DataFrame\nfrom pandas.core.shared_docs import _shared_docs\n\nfrom pandas.io._util import arrow_table_to_pandas\nfrom pandas.io.common import get_handle\n\nif TYPE_CHECKING:\n from collections.abc import (\n Hashable,\n Sequence,\n )\n\n from pandas._typing import (\n DtypeBackend,\n FilePath,\n ReadBuffer,\n StorageOptions,\n WriteBuffer,\n )\n\n\n@doc(storage_options=_shared_docs["storage_options"])\ndef to_feather(\n df: DataFrame,\n path: FilePath | WriteBuffer[bytes],\n storage_options: StorageOptions | None = None,\n **kwargs: Any,\n) -> None:\n """\n Write a DataFrame to the binary Feather format.\n\n Parameters\n ----------\n df : DataFrame\n path : str, path object, or file-like object\n {storage_options}\n **kwargs :\n Additional keywords passed to `pyarrow.feather.write_feather`.\n\n """\n import_optional_dependency("pyarrow")\n from pyarrow import feather\n\n if not isinstance(df, DataFrame):\n raise ValueError("feather only support IO with DataFrames")\n\n with get_handle(\n path, "wb", storage_options=storage_options, is_text=False\n ) as handles:\n feather.write_feather(df, handles.handle, **kwargs)\n\n\n@doc(storage_options=_shared_docs["storage_options"])\ndef read_feather(\n path: FilePath | ReadBuffer[bytes],\n columns: Sequence[Hashable] | None = None,\n use_threads: bool = True,\n storage_options: StorageOptions | None = None,\n dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default,\n) -> DataFrame:\n """\n Load a feather-format object from the file path.\n\n Parameters\n ----------\n path : str, path object, or file-like object\n String, path object (implementing ``os.PathLike[str]``), or file-like\n object implementing a binary ``read()`` function. The string could be a URL.\n Valid URL schemes include http, ftp, s3, and file. For file URLs, a host is\n expected. A local file could be: ``file://localhost/path/to/table.feather``.\n columns : sequence, default None\n If not provided, all columns are read.\n use_threads : bool, default True\n Whether to parallelize reading using multiple threads.\n {storage_options}\n\n dtype_backend : {{'numpy_nullable', 'pyarrow'}}, default 'numpy_nullable'\n Back-end data type applied to the resultant :class:`DataFrame`\n (still experimental). Behaviour is as follows:\n\n * ``"numpy_nullable"``: returns nullable-dtype-backed :class:`DataFrame`\n (default).\n * ``"pyarrow"``: returns pyarrow-backed nullable :class:`ArrowDtype`\n DataFrame.\n\n .. versionadded:: 2.0\n\n Returns\n -------\n type of object stored in file\n\n Examples\n --------\n >>> df = pd.read_feather("path/to/file.feather") # doctest: +SKIP\n """\n import_optional_dependency("pyarrow")\n from pyarrow import feather\n\n # import utils to register the pyarrow extension types\n import pandas.core.arrays.arrow.extension_types # pyright: ignore[reportUnusedImport] # noqa: F401\n\n check_dtype_backend(dtype_backend)\n\n with get_handle(\n path, "rb", storage_options=storage_options, is_text=False\n ) as handles:\n if dtype_backend is lib.no_default and not using_string_dtype():\n return feather.read_feather(\n handles.handle, columns=columns, use_threads=bool(use_threads)\n )\n\n pa_table = feather.read_table(\n handles.handle, columns=columns, use_threads=bool(use_threads)\n )\n return arrow_table_to_pandas(pa_table, dtype_backend=dtype_backend)\n
.venv\Lib\site-packages\pandas\io\feather_format.py
feather_format.py
Python
4,007
0.95
0.069231
0.047619
node-utils
279
2023-12-20T13:46:14.392729
Apache-2.0
false
ffe22a025e6c01053e81ee197d4b9f6e
""" Google BigQuery support """\nfrom __future__ import annotations\n\nfrom typing import (\n TYPE_CHECKING,\n Any,\n)\nimport warnings\n\nfrom pandas.compat._optional import import_optional_dependency\nfrom pandas.util._exceptions import find_stack_level\n\nif TYPE_CHECKING:\n from google.auth.credentials import Credentials\n\n from pandas import DataFrame\n\n\ndef _try_import():\n # since pandas is a dependency of pandas-gbq\n # we need to import on first use\n msg = (\n "pandas-gbq is required to load data from Google BigQuery. "\n "See the docs: https://pandas-gbq.readthedocs.io."\n )\n pandas_gbq = import_optional_dependency("pandas_gbq", extra=msg)\n return pandas_gbq\n\n\ndef read_gbq(\n query: str,\n project_id: str | None = None,\n index_col: str | None = None,\n col_order: list[str] | None = None,\n reauth: bool = False,\n auth_local_webserver: bool = True,\n dialect: str | None = None,\n location: str | None = None,\n configuration: dict[str, Any] | None = None,\n credentials: Credentials | None = None,\n use_bqstorage_api: bool | None = None,\n max_results: int | None = None,\n progress_bar_type: str | None = None,\n) -> DataFrame:\n """\n Load data from Google BigQuery.\n\n .. deprecated:: 2.2.0\n\n Please use ``pandas_gbq.read_gbq`` instead.\n\n This function requires the `pandas-gbq package\n <https://pandas-gbq.readthedocs.io>`__.\n\n See the `How to authenticate with Google BigQuery\n <https://pandas-gbq.readthedocs.io/en/latest/howto/authentication.html>`__\n guide for authentication instructions.\n\n Parameters\n ----------\n query : str\n SQL-Like Query to return data values.\n project_id : str, optional\n Google BigQuery Account project ID. Optional when available from\n the environment.\n index_col : str, optional\n Name of result column to use for index in results DataFrame.\n col_order : list(str), optional\n List of BigQuery column names in the desired order for results\n DataFrame.\n reauth : bool, default False\n Force Google BigQuery to re-authenticate the user. This is useful\n if multiple accounts are used.\n auth_local_webserver : bool, default True\n Use the `local webserver flow`_ instead of the `console flow`_\n when getting user credentials.\n\n .. _local webserver flow:\n https://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_local_server\n .. _console flow:\n https://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_console\n\n *New in version 0.2.0 of pandas-gbq*.\n\n .. versionchanged:: 1.5.0\n Default value is changed to ``True``. Google has deprecated the\n ``auth_local_webserver = False`` `"out of band" (copy-paste)\n flow\n <https://developers.googleblog.com/2022/02/making-oauth-flows-safer.html?m=1#disallowed-oob>`_.\n dialect : str, default 'legacy'\n Note: The default value is changing to 'standard' in a future version.\n\n SQL syntax dialect to use. Value can be one of:\n\n ``'legacy'``\n Use BigQuery's legacy SQL dialect. For more information see\n `BigQuery Legacy SQL Reference\n <https://cloud.google.com/bigquery/docs/reference/legacy-sql>`__.\n ``'standard'``\n Use BigQuery's standard SQL, which is\n compliant with the SQL 2011 standard. For more information\n see `BigQuery Standard SQL Reference\n <https://cloud.google.com/bigquery/docs/reference/standard-sql/>`__.\n location : str, optional\n Location where the query job should run. See the `BigQuery locations\n documentation\n <https://cloud.google.com/bigquery/docs/dataset-locations>`__ for a\n list of available locations. The location must match that of any\n datasets used in the query.\n\n *New in version 0.5.0 of pandas-gbq*.\n configuration : dict, optional\n Query config parameters for job processing.\n For example:\n\n configuration = {'query': {'useQueryCache': False}}\n\n For more information see `BigQuery REST API Reference\n <https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.query>`__.\n credentials : google.auth.credentials.Credentials, optional\n Credentials for accessing Google APIs. Use this parameter to override\n default credentials, such as to use Compute Engine\n :class:`google.auth.compute_engine.Credentials` or Service Account\n :class:`google.oauth2.service_account.Credentials` directly.\n\n *New in version 0.8.0 of pandas-gbq*.\n use_bqstorage_api : bool, default False\n Use the `BigQuery Storage API\n <https://cloud.google.com/bigquery/docs/reference/storage/>`__ to\n download query results quickly, but at an increased cost. To use this\n API, first `enable it in the Cloud Console\n <https://console.cloud.google.com/apis/library/bigquerystorage.googleapis.com>`__.\n You must also have the `bigquery.readsessions.create\n <https://cloud.google.com/bigquery/docs/access-control#roles>`__\n permission on the project you are billing queries to.\n\n This feature requires version 0.10.0 or later of the ``pandas-gbq``\n package. It also requires the ``google-cloud-bigquery-storage`` and\n ``fastavro`` packages.\n\n max_results : int, optional\n If set, limit the maximum number of rows to fetch from the query\n results.\n\n progress_bar_type : Optional, str\n If set, use the `tqdm <https://tqdm.github.io/>`__ library to\n display a progress bar while the data downloads. Install the\n ``tqdm`` package to use this feature.\n\n Possible values of ``progress_bar_type`` include:\n\n ``None``\n No progress bar.\n ``'tqdm'``\n Use the :func:`tqdm.tqdm` function to print a progress bar\n to :data:`sys.stderr`.\n ``'tqdm_notebook'``\n Use the :func:`tqdm.tqdm_notebook` function to display a\n progress bar as a Jupyter notebook widget.\n ``'tqdm_gui'``\n Use the :func:`tqdm.tqdm_gui` function to display a\n progress bar as a graphical dialog box.\n\n Returns\n -------\n df: DataFrame\n DataFrame representing results of query.\n\n See Also\n --------\n pandas_gbq.read_gbq : This function in the pandas-gbq library.\n DataFrame.to_gbq : Write a DataFrame to Google BigQuery.\n\n Examples\n --------\n Example taken from `Google BigQuery documentation\n <https://cloud.google.com/bigquery/docs/pandas-gbq-migration>`_\n\n >>> sql = "SELECT name FROM table_name WHERE state = 'TX' LIMIT 100;"\n >>> df = pd.read_gbq(sql, dialect="standard") # doctest: +SKIP\n >>> project_id = "your-project-id" # doctest: +SKIP\n >>> df = pd.read_gbq(sql,\n ... project_id=project_id,\n ... dialect="standard"\n ... ) # doctest: +SKIP\n """\n warnings.warn(\n "read_gbq is deprecated and will be removed in a future version. "\n "Please use pandas_gbq.read_gbq instead: "\n "https://pandas-gbq.readthedocs.io/en/latest/api.html#pandas_gbq.read_gbq",\n FutureWarning,\n stacklevel=find_stack_level(),\n )\n pandas_gbq = _try_import()\n\n kwargs: dict[str, str | bool | int | None] = {}\n\n # START: new kwargs. Don't populate unless explicitly set.\n if use_bqstorage_api is not None:\n kwargs["use_bqstorage_api"] = use_bqstorage_api\n if max_results is not None:\n kwargs["max_results"] = max_results\n\n kwargs["progress_bar_type"] = progress_bar_type\n # END: new kwargs\n\n return pandas_gbq.read_gbq(\n query,\n project_id=project_id,\n index_col=index_col,\n col_order=col_order,\n reauth=reauth,\n auth_local_webserver=auth_local_webserver,\n dialect=dialect,\n location=location,\n configuration=configuration,\n credentials=credentials,\n **kwargs,\n )\n\n\ndef to_gbq(\n dataframe: DataFrame,\n destination_table: str,\n project_id: str | None = None,\n chunksize: int | None = None,\n reauth: bool = False,\n if_exists: str = "fail",\n auth_local_webserver: bool = True,\n table_schema: list[dict[str, str]] | None = None,\n location: str | None = None,\n progress_bar: bool = True,\n credentials: Credentials | None = None,\n) -> None:\n warnings.warn(\n "to_gbq is deprecated and will be removed in a future version. "\n "Please use pandas_gbq.to_gbq instead: "\n "https://pandas-gbq.readthedocs.io/en/latest/api.html#pandas_gbq.to_gbq",\n FutureWarning,\n stacklevel=find_stack_level(),\n )\n pandas_gbq = _try_import()\n pandas_gbq.to_gbq(\n dataframe,\n destination_table,\n project_id=project_id,\n chunksize=chunksize,\n reauth=reauth,\n if_exists=if_exists,\n auth_local_webserver=auth_local_webserver,\n table_schema=table_schema,\n location=location,\n progress_bar=progress_bar,\n credentials=credentials,\n )\n
.venv\Lib\site-packages\pandas\io\gbq.py
gbq.py
Python
9,372
0.95
0.082353
0.036697
awesome-app
753
2024-12-16T05:07:41.119406
GPL-3.0
false
1b430f8654fd3e4aaa814a79574188f0
"""\n:mod:`pandas.io.html` is a module containing functionality for dealing with\nHTML IO.\n\n"""\n\nfrom __future__ import annotations\n\nfrom collections import abc\nimport numbers\nimport re\nfrom re import Pattern\nfrom typing import (\n TYPE_CHECKING,\n Literal,\n cast,\n)\nimport warnings\n\nfrom pandas._libs import lib\nfrom pandas.compat._optional import import_optional_dependency\nfrom pandas.errors import (\n AbstractMethodError,\n EmptyDataError,\n)\nfrom pandas.util._decorators import doc\nfrom pandas.util._exceptions import find_stack_level\nfrom pandas.util._validators import check_dtype_backend\n\nfrom pandas.core.dtypes.common import is_list_like\n\nfrom pandas import isna\nfrom pandas.core.indexes.base import Index\nfrom pandas.core.indexes.multi import MultiIndex\nfrom pandas.core.series import Series\nfrom pandas.core.shared_docs import _shared_docs\n\nfrom pandas.io.common import (\n file_exists,\n get_handle,\n is_file_like,\n is_fsspec_url,\n is_url,\n stringify_path,\n validate_header_arg,\n)\nfrom pandas.io.formats.printing import pprint_thing\nfrom pandas.io.parsers import TextParser\n\nif TYPE_CHECKING:\n from collections.abc import (\n Iterable,\n Sequence,\n )\n\n from pandas._typing import (\n BaseBuffer,\n DtypeBackend,\n FilePath,\n HTMLFlavors,\n ReadBuffer,\n StorageOptions,\n )\n\n from pandas import DataFrame\n\n#############\n# READ HTML #\n#############\n_RE_WHITESPACE = re.compile(r"[\r\n]+|\s{2,}")\n\n\ndef _remove_whitespace(s: str, regex: Pattern = _RE_WHITESPACE) -> str:\n """\n Replace extra whitespace inside of a string with a single space.\n\n Parameters\n ----------\n s : str or unicode\n The string from which to remove extra whitespace.\n regex : re.Pattern\n The regular expression to use to remove extra whitespace.\n\n Returns\n -------\n subd : str or unicode\n `s` with all extra whitespace replaced with a single space.\n """\n return regex.sub(" ", s.strip())\n\n\ndef _get_skiprows(skiprows: int | Sequence[int] | slice | None) -> int | Sequence[int]:\n """\n Get an iterator given an integer, slice or container.\n\n Parameters\n ----------\n skiprows : int, slice, container\n The iterator to use to skip rows; can also be a slice.\n\n Raises\n ------\n TypeError\n * If `skiprows` is not a slice, integer, or Container\n\n Returns\n -------\n it : iterable\n A proper iterator to use to skip rows of a DataFrame.\n """\n if isinstance(skiprows, slice):\n start, step = skiprows.start or 0, skiprows.step or 1\n return list(range(start, skiprows.stop, step))\n elif isinstance(skiprows, numbers.Integral) or is_list_like(skiprows):\n return cast("int | Sequence[int]", skiprows)\n elif skiprows is None:\n return 0\n raise TypeError(f"{type(skiprows).__name__} is not a valid type for skipping rows")\n\n\ndef _read(\n obj: FilePath | BaseBuffer,\n encoding: str | None,\n storage_options: StorageOptions | None,\n) -> str | bytes:\n """\n Try to read from a url, file or string.\n\n Parameters\n ----------\n obj : str, unicode, path object, or file-like object\n\n Returns\n -------\n raw_text : str\n """\n text: str | bytes\n if (\n is_url(obj)\n or hasattr(obj, "read")\n or (isinstance(obj, str) and file_exists(obj))\n ):\n with get_handle(\n obj, "r", encoding=encoding, storage_options=storage_options\n ) as handles:\n text = handles.handle.read()\n elif isinstance(obj, (str, bytes)):\n text = obj\n else:\n raise TypeError(f"Cannot read object of type '{type(obj).__name__}'")\n return text\n\n\nclass _HtmlFrameParser:\n """\n Base class for parsers that parse HTML into DataFrames.\n\n Parameters\n ----------\n io : str or file-like\n This can be either a string of raw HTML, a valid URL using the HTTP,\n FTP, or FILE protocols or a file-like object.\n\n match : str or regex\n The text to match in the document.\n\n attrs : dict\n List of HTML <table> element attributes to match.\n\n encoding : str\n Encoding to be used by parser\n\n displayed_only : bool\n Whether or not items with "display:none" should be ignored\n\n extract_links : {None, "all", "header", "body", "footer"}\n Table elements in the specified section(s) with <a> tags will have their\n href extracted.\n\n .. versionadded:: 1.5.0\n\n Attributes\n ----------\n io : str or file-like\n raw HTML, URL, or file-like object\n\n match : regex\n The text to match in the raw HTML\n\n attrs : dict-like\n A dictionary of valid table attributes to use to search for table\n elements.\n\n encoding : str\n Encoding to be used by parser\n\n displayed_only : bool\n Whether or not items with "display:none" should be ignored\n\n extract_links : {None, "all", "header", "body", "footer"}\n Table elements in the specified section(s) with <a> tags will have their\n href extracted.\n\n .. versionadded:: 1.5.0\n\n Notes\n -----\n To subclass this class effectively you must override the following methods:\n * :func:`_build_doc`\n * :func:`_attr_getter`\n * :func:`_href_getter`\n * :func:`_text_getter`\n * :func:`_parse_td`\n * :func:`_parse_thead_tr`\n * :func:`_parse_tbody_tr`\n * :func:`_parse_tfoot_tr`\n * :func:`_parse_tables`\n * :func:`_equals_tag`\n See each method's respective documentation for details on their\n functionality.\n """\n\n def __init__(\n self,\n io: FilePath | ReadBuffer[str] | ReadBuffer[bytes],\n match: str | Pattern,\n attrs: dict[str, str] | None,\n encoding: str,\n displayed_only: bool,\n extract_links: Literal[None, "header", "footer", "body", "all"],\n storage_options: StorageOptions = None,\n ) -> None:\n self.io = io\n self.match = match\n self.attrs = attrs\n self.encoding = encoding\n self.displayed_only = displayed_only\n self.extract_links = extract_links\n self.storage_options = storage_options\n\n def parse_tables(self):\n """\n Parse and return all tables from the DOM.\n\n Returns\n -------\n list of parsed (header, body, footer) tuples from tables.\n """\n tables = self._parse_tables(self._build_doc(), self.match, self.attrs)\n return (self._parse_thead_tbody_tfoot(table) for table in tables)\n\n def _attr_getter(self, obj, attr):\n """\n Return the attribute value of an individual DOM node.\n\n Parameters\n ----------\n obj : node-like\n A DOM node.\n\n attr : str or unicode\n The attribute, such as "colspan"\n\n Returns\n -------\n str or unicode\n The attribute value.\n """\n # Both lxml and BeautifulSoup have the same implementation:\n return obj.get(attr)\n\n def _href_getter(self, obj) -> str | None:\n """\n Return a href if the DOM node contains a child <a> or None.\n\n Parameters\n ----------\n obj : node-like\n A DOM node.\n\n Returns\n -------\n href : str or unicode\n The href from the <a> child of the DOM node.\n """\n raise AbstractMethodError(self)\n\n def _text_getter(self, obj):\n """\n Return the text of an individual DOM node.\n\n Parameters\n ----------\n obj : node-like\n A DOM node.\n\n Returns\n -------\n text : str or unicode\n The text from an individual DOM node.\n """\n raise AbstractMethodError(self)\n\n def _parse_td(self, obj):\n """\n Return the td elements from a row element.\n\n Parameters\n ----------\n obj : node-like\n A DOM <tr> node.\n\n Returns\n -------\n list of node-like\n These are the elements of each row, i.e., the columns.\n """\n raise AbstractMethodError(self)\n\n def _parse_thead_tr(self, table):\n """\n Return the list of thead row elements from the parsed table element.\n\n Parameters\n ----------\n table : a table element that contains zero or more thead elements.\n\n Returns\n -------\n list of node-like\n These are the <tr> row elements of a table.\n """\n raise AbstractMethodError(self)\n\n def _parse_tbody_tr(self, table):\n """\n Return the list of tbody row elements from the parsed table element.\n\n HTML5 table bodies consist of either 0 or more <tbody> elements (which\n only contain <tr> elements) or 0 or more <tr> elements. This method\n checks for both structures.\n\n Parameters\n ----------\n table : a table element that contains row elements.\n\n Returns\n -------\n list of node-like\n These are the <tr> row elements of a table.\n """\n raise AbstractMethodError(self)\n\n def _parse_tfoot_tr(self, table):\n """\n Return the list of tfoot row elements from the parsed table element.\n\n Parameters\n ----------\n table : a table element that contains row elements.\n\n Returns\n -------\n list of node-like\n These are the <tr> row elements of a table.\n """\n raise AbstractMethodError(self)\n\n def _parse_tables(self, document, match, attrs):\n """\n Return all tables from the parsed DOM.\n\n Parameters\n ----------\n document : the DOM from which to parse the table element.\n\n match : str or regular expression\n The text to search for in the DOM tree.\n\n attrs : dict\n A dictionary of table attributes that can be used to disambiguate\n multiple tables on a page.\n\n Raises\n ------\n ValueError : `match` does not match any text in the document.\n\n Returns\n -------\n list of node-like\n HTML <table> elements to be parsed into raw data.\n """\n raise AbstractMethodError(self)\n\n def _equals_tag(self, obj, tag) -> bool:\n """\n Return whether an individual DOM node matches a tag\n\n Parameters\n ----------\n obj : node-like\n A DOM node.\n\n tag : str\n Tag name to be checked for equality.\n\n Returns\n -------\n boolean\n Whether `obj`'s tag name is `tag`\n """\n raise AbstractMethodError(self)\n\n def _build_doc(self):\n """\n Return a tree-like object that can be used to iterate over the DOM.\n\n Returns\n -------\n node-like\n The DOM from which to parse the table element.\n """\n raise AbstractMethodError(self)\n\n def _parse_thead_tbody_tfoot(self, table_html):\n """\n Given a table, return parsed header, body, and foot.\n\n Parameters\n ----------\n table_html : node-like\n\n Returns\n -------\n tuple of (header, body, footer), each a list of list-of-text rows.\n\n Notes\n -----\n Header and body are lists-of-lists. Top level list is a list of\n rows. Each row is a list of str text.\n\n Logic: Use <thead>, <tbody>, <tfoot> elements to identify\n header, body, and footer, otherwise:\n - Put all rows into body\n - Move rows from top of body to header only if\n all elements inside row are <th>\n - Move rows from bottom of body to footer only if\n all elements inside row are <th>\n """\n header_rows = self._parse_thead_tr(table_html)\n body_rows = self._parse_tbody_tr(table_html)\n footer_rows = self._parse_tfoot_tr(table_html)\n\n def row_is_all_th(row):\n return all(self._equals_tag(t, "th") for t in self._parse_td(row))\n\n if not header_rows:\n # The table has no <thead>. Move the top all-<th> rows from\n # body_rows to header_rows. (This is a common case because many\n # tables in the wild have no <thead> or <tfoot>\n while body_rows and row_is_all_th(body_rows[0]):\n header_rows.append(body_rows.pop(0))\n\n header = self._expand_colspan_rowspan(header_rows, section="header")\n body = self._expand_colspan_rowspan(body_rows, section="body")\n footer = self._expand_colspan_rowspan(footer_rows, section="footer")\n\n return header, body, footer\n\n def _expand_colspan_rowspan(\n self, rows, section: Literal["header", "footer", "body"]\n ):\n """\n Given a list of <tr>s, return a list of text rows.\n\n Parameters\n ----------\n rows : list of node-like\n List of <tr>s\n section : the section that the rows belong to (header, body or footer).\n\n Returns\n -------\n list of list\n Each returned row is a list of str text, or tuple (text, link)\n if extract_links is not None.\n\n Notes\n -----\n Any cell with ``rowspan`` or ``colspan`` will have its contents copied\n to subsequent cells.\n """\n all_texts = [] # list of rows, each a list of str\n text: str | tuple\n remainder: list[\n tuple[int, str | tuple, int]\n ] = [] # list of (index, text, nrows)\n\n for tr in rows:\n texts = [] # the output for this row\n next_remainder = []\n\n index = 0\n tds = self._parse_td(tr)\n for td in tds:\n # Append texts from previous rows with rowspan>1 that come\n # before this <td>\n while remainder and remainder[0][0] <= index:\n prev_i, prev_text, prev_rowspan = remainder.pop(0)\n texts.append(prev_text)\n if prev_rowspan > 1:\n next_remainder.append((prev_i, prev_text, prev_rowspan - 1))\n index += 1\n\n # Append the text from this <td>, colspan times\n text = _remove_whitespace(self._text_getter(td))\n if self.extract_links in ("all", section):\n href = self._href_getter(td)\n text = (text, href)\n rowspan = int(self._attr_getter(td, "rowspan") or 1)\n colspan = int(self._attr_getter(td, "colspan") or 1)\n\n for _ in range(colspan):\n texts.append(text)\n if rowspan > 1:\n next_remainder.append((index, text, rowspan - 1))\n index += 1\n\n # Append texts from previous rows at the final position\n for prev_i, prev_text, prev_rowspan in remainder:\n texts.append(prev_text)\n if prev_rowspan > 1:\n next_remainder.append((prev_i, prev_text, prev_rowspan - 1))\n\n all_texts.append(texts)\n remainder = next_remainder\n\n # Append rows that only appear because the previous row had non-1\n # rowspan\n while remainder:\n next_remainder = []\n texts = []\n for prev_i, prev_text, prev_rowspan in remainder:\n texts.append(prev_text)\n if prev_rowspan > 1:\n next_remainder.append((prev_i, prev_text, prev_rowspan - 1))\n all_texts.append(texts)\n remainder = next_remainder\n\n return all_texts\n\n def _handle_hidden_tables(self, tbl_list, attr_name: str):\n """\n Return list of tables, potentially removing hidden elements\n\n Parameters\n ----------\n tbl_list : list of node-like\n Type of list elements will vary depending upon parser used\n attr_name : str\n Name of the accessor for retrieving HTML attributes\n\n Returns\n -------\n list of node-like\n Return type matches `tbl_list`\n """\n if not self.displayed_only:\n return tbl_list\n\n return [\n x\n for x in tbl_list\n if "display:none"\n not in getattr(x, attr_name).get("style", "").replace(" ", "")\n ]\n\n\nclass _BeautifulSoupHtml5LibFrameParser(_HtmlFrameParser):\n """\n HTML to DataFrame parser that uses BeautifulSoup under the hood.\n\n See Also\n --------\n pandas.io.html._HtmlFrameParser\n pandas.io.html._LxmlFrameParser\n\n Notes\n -----\n Documentation strings for this class are in the base class\n :class:`pandas.io.html._HtmlFrameParser`.\n """\n\n def _parse_tables(self, document, match, attrs):\n element_name = "table"\n tables = document.find_all(element_name, attrs=attrs)\n if not tables:\n raise ValueError("No tables found")\n\n result = []\n unique_tables = set()\n tables = self._handle_hidden_tables(tables, "attrs")\n\n for table in tables:\n if self.displayed_only:\n for elem in table.find_all("style"):\n elem.decompose()\n\n for elem in table.find_all(style=re.compile(r"display:\s*none")):\n elem.decompose()\n\n if table not in unique_tables and table.find(string=match) is not None:\n result.append(table)\n unique_tables.add(table)\n if not result:\n raise ValueError(f"No tables found matching pattern {repr(match.pattern)}")\n return result\n\n def _href_getter(self, obj) -> str | None:\n a = obj.find("a", href=True)\n return None if not a else a["href"]\n\n def _text_getter(self, obj):\n return obj.text\n\n def _equals_tag(self, obj, tag) -> bool:\n return obj.name == tag\n\n def _parse_td(self, row):\n return row.find_all(("td", "th"), recursive=False)\n\n def _parse_thead_tr(self, table):\n return table.select("thead tr")\n\n def _parse_tbody_tr(self, table):\n from_tbody = table.select("tbody tr")\n from_root = table.find_all("tr", recursive=False)\n # HTML spec: at most one of these lists has content\n return from_tbody + from_root\n\n def _parse_tfoot_tr(self, table):\n return table.select("tfoot tr")\n\n def _setup_build_doc(self):\n raw_text = _read(self.io, self.encoding, self.storage_options)\n if not raw_text:\n raise ValueError(f"No text parsed from document: {self.io}")\n return raw_text\n\n def _build_doc(self):\n from bs4 import BeautifulSoup\n\n bdoc = self._setup_build_doc()\n if isinstance(bdoc, bytes) and self.encoding is not None:\n udoc = bdoc.decode(self.encoding)\n from_encoding = None\n else:\n udoc = bdoc\n from_encoding = self.encoding\n\n soup = BeautifulSoup(udoc, features="html5lib", from_encoding=from_encoding)\n\n for br in soup.find_all("br"):\n br.replace_with("\n" + br.text)\n\n return soup\n\n\ndef _build_xpath_expr(attrs) -> str:\n """\n Build an xpath expression to simulate bs4's ability to pass in kwargs to\n search for attributes when using the lxml parser.\n\n Parameters\n ----------\n attrs : dict\n A dict of HTML attributes. These are NOT checked for validity.\n\n Returns\n -------\n expr : unicode\n An XPath expression that checks for the given HTML attributes.\n """\n # give class attribute as class_ because class is a python keyword\n if "class_" in attrs:\n attrs["class"] = attrs.pop("class_")\n\n s = " and ".join([f"@{k}={repr(v)}" for k, v in attrs.items()])\n return f"[{s}]"\n\n\n_re_namespace = {"re": "http://exslt.org/regular-expressions"}\n\n\nclass _LxmlFrameParser(_HtmlFrameParser):\n """\n HTML to DataFrame parser that uses lxml under the hood.\n\n Warning\n -------\n This parser can only handle HTTP, FTP, and FILE urls.\n\n See Also\n --------\n _HtmlFrameParser\n _BeautifulSoupLxmlFrameParser\n\n Notes\n -----\n Documentation strings for this class are in the base class\n :class:`_HtmlFrameParser`.\n """\n\n def _href_getter(self, obj) -> str | None:\n href = obj.xpath(".//a/@href")\n return None if not href else href[0]\n\n def _text_getter(self, obj):\n return obj.text_content()\n\n def _parse_td(self, row):\n # Look for direct children only: the "row" element here may be a\n # <thead> or <tfoot> (see _parse_thead_tr).\n return row.xpath("./td|./th")\n\n def _parse_tables(self, document, match, kwargs):\n pattern = match.pattern\n\n # 1. check all descendants for the given pattern and only search tables\n # GH 49929\n xpath_expr = f"//table[.//text()[re:test(., {repr(pattern)})]]"\n\n # if any table attributes were given build an xpath expression to\n # search for them\n if kwargs:\n xpath_expr += _build_xpath_expr(kwargs)\n\n tables = document.xpath(xpath_expr, namespaces=_re_namespace)\n\n tables = self._handle_hidden_tables(tables, "attrib")\n if self.displayed_only:\n for table in tables:\n # lxml utilizes XPATH 1.0 which does not have regex\n # support. As a result, we find all elements with a style\n # attribute and iterate them to check for display:none\n for elem in table.xpath(".//style"):\n elem.drop_tree()\n for elem in table.xpath(".//*[@style]"):\n if "display:none" in elem.attrib.get("style", "").replace(" ", ""):\n elem.drop_tree()\n if not tables:\n raise ValueError(f"No tables found matching regex {repr(pattern)}")\n return tables\n\n def _equals_tag(self, obj, tag) -> bool:\n return obj.tag == tag\n\n def _build_doc(self):\n """\n Raises\n ------\n ValueError\n * If a URL that lxml cannot parse is passed.\n\n Exception\n * Any other ``Exception`` thrown. For example, trying to parse a\n URL that is syntactically correct on a machine with no internet\n connection will fail.\n\n See Also\n --------\n pandas.io.html._HtmlFrameParser._build_doc\n """\n from lxml.etree import XMLSyntaxError\n from lxml.html import (\n HTMLParser,\n fromstring,\n parse,\n )\n\n parser = HTMLParser(recover=True, encoding=self.encoding)\n\n try:\n if is_url(self.io):\n with get_handle(\n self.io, "r", storage_options=self.storage_options\n ) as f:\n r = parse(f.handle, parser=parser)\n else:\n # try to parse the input in the simplest way\n r = parse(self.io, parser=parser)\n try:\n r = r.getroot()\n except AttributeError:\n pass\n except (UnicodeDecodeError, OSError) as e:\n # if the input is a blob of html goop\n if not is_url(self.io):\n r = fromstring(self.io, parser=parser)\n\n try:\n r = r.getroot()\n except AttributeError:\n pass\n else:\n raise e\n else:\n if not hasattr(r, "text_content"):\n raise XMLSyntaxError("no text parsed from document", 0, 0, 0)\n\n for br in r.xpath("*//br"):\n br.tail = "\n" + (br.tail or "")\n\n return r\n\n def _parse_thead_tr(self, table):\n rows = []\n\n for thead in table.xpath(".//thead"):\n rows.extend(thead.xpath("./tr"))\n\n # HACK: lxml does not clean up the clearly-erroneous\n # <thead><th>foo</th><th>bar</th></thead>. (Missing <tr>). Add\n # the <thead> and _pretend_ it's a <tr>; _parse_td() will find its\n # children as though it's a <tr>.\n #\n # Better solution would be to use html5lib.\n elements_at_root = thead.xpath("./td|./th")\n if elements_at_root:\n rows.append(thead)\n\n return rows\n\n def _parse_tbody_tr(self, table):\n from_tbody = table.xpath(".//tbody//tr")\n from_root = table.xpath("./tr")\n # HTML spec: at most one of these lists has content\n return from_tbody + from_root\n\n def _parse_tfoot_tr(self, table):\n return table.xpath(".//tfoot//tr")\n\n\ndef _expand_elements(body) -> None:\n data = [len(elem) for elem in body]\n lens = Series(data)\n lens_max = lens.max()\n not_max = lens[lens != lens_max]\n\n empty = [""]\n for ind, length in not_max.items():\n body[ind] += empty * (lens_max - length)\n\n\ndef _data_to_frame(**kwargs):\n head, body, foot = kwargs.pop("data")\n header = kwargs.pop("header")\n kwargs["skiprows"] = _get_skiprows(kwargs["skiprows"])\n if head:\n body = head + body\n\n # Infer header when there is a <thead> or top <th>-only rows\n if header is None:\n if len(head) == 1:\n header = 0\n else:\n # ignore all-empty-text rows\n header = [i for i, row in enumerate(head) if any(text for text in row)]\n\n if foot:\n body += foot\n\n # fill out elements of body that are "ragged"\n _expand_elements(body)\n with TextParser(body, header=header, **kwargs) as tp:\n return tp.read()\n\n\n_valid_parsers = {\n "lxml": _LxmlFrameParser,\n None: _LxmlFrameParser,\n "html5lib": _BeautifulSoupHtml5LibFrameParser,\n "bs4": _BeautifulSoupHtml5LibFrameParser,\n}\n\n\ndef _parser_dispatch(flavor: HTMLFlavors | None) -> type[_HtmlFrameParser]:\n """\n Choose the parser based on the input flavor.\n\n Parameters\n ----------\n flavor : {{"lxml", "html5lib", "bs4"}} or None\n The type of parser to use. This must be a valid backend.\n\n Returns\n -------\n cls : _HtmlFrameParser subclass\n The parser class based on the requested input flavor.\n\n Raises\n ------\n ValueError\n * If `flavor` is not a valid backend.\n ImportError\n * If you do not have the requested `flavor`\n """\n valid_parsers = list(_valid_parsers.keys())\n if flavor not in valid_parsers:\n raise ValueError(\n f"{repr(flavor)} is not a valid flavor, valid flavors are {valid_parsers}"\n )\n\n if flavor in ("bs4", "html5lib"):\n import_optional_dependency("html5lib")\n import_optional_dependency("bs4")\n else:\n import_optional_dependency("lxml.etree")\n return _valid_parsers[flavor]\n\n\ndef _print_as_set(s) -> str:\n arg = ", ".join([pprint_thing(el) for el in s])\n return f"{{{arg}}}"\n\n\ndef _validate_flavor(flavor):\n if flavor is None:\n flavor = "lxml", "bs4"\n elif isinstance(flavor, str):\n flavor = (flavor,)\n elif isinstance(flavor, abc.Iterable):\n if not all(isinstance(flav, str) for flav in flavor):\n raise TypeError(\n f"Object of type {repr(type(flavor).__name__)} "\n f"is not an iterable of strings"\n )\n else:\n msg = repr(flavor) if isinstance(flavor, str) else str(flavor)\n msg += " is not a valid flavor"\n raise ValueError(msg)\n\n flavor = tuple(flavor)\n valid_flavors = set(_valid_parsers)\n flavor_set = set(flavor)\n\n if not flavor_set & valid_flavors:\n raise ValueError(\n f"{_print_as_set(flavor_set)} is not a valid set of flavors, valid "\n f"flavors are {_print_as_set(valid_flavors)}"\n )\n return flavor\n\n\ndef _parse(\n flavor,\n io,\n match,\n attrs,\n encoding,\n displayed_only,\n extract_links,\n storage_options,\n **kwargs,\n):\n flavor = _validate_flavor(flavor)\n compiled_match = re.compile(match) # you can pass a compiled regex here\n\n retained = None\n for flav in flavor:\n parser = _parser_dispatch(flav)\n p = parser(\n io,\n compiled_match,\n attrs,\n encoding,\n displayed_only,\n extract_links,\n storage_options,\n )\n\n try:\n tables = p.parse_tables()\n except ValueError as caught:\n # if `io` is an io-like object, check if it's seekable\n # and try to rewind it before trying the next parser\n if hasattr(io, "seekable") and io.seekable():\n io.seek(0)\n elif hasattr(io, "seekable") and not io.seekable():\n # if we couldn't rewind it, let the user know\n raise ValueError(\n f"The flavor {flav} failed to parse your input. "\n "Since you passed a non-rewindable file "\n "object, we can't rewind it to try "\n "another parser. Try read_html() with a different flavor."\n ) from caught\n\n retained = caught\n else:\n break\n else:\n assert retained is not None # for mypy\n raise retained\n\n ret = []\n for table in tables:\n try:\n df = _data_to_frame(data=table, **kwargs)\n # Cast MultiIndex header to an Index of tuples when extracting header\n # links and replace nan with None (therefore can't use mi.to_flat_index()).\n # This maintains consistency of selection (e.g. df.columns.str[1])\n if extract_links in ("all", "header") and isinstance(\n df.columns, MultiIndex\n ):\n df.columns = Index(\n ((col[0], None if isna(col[1]) else col[1]) for col in df.columns),\n tupleize_cols=False,\n )\n\n ret.append(df)\n except EmptyDataError: # empty table\n continue\n return ret\n\n\n@doc(storage_options=_shared_docs["storage_options"])\ndef read_html(\n io: FilePath | ReadBuffer[str],\n *,\n match: str | Pattern = ".+",\n flavor: HTMLFlavors | Sequence[HTMLFlavors] | None = None,\n header: int | Sequence[int] | None = None,\n index_col: int | Sequence[int] | None = None,\n skiprows: int | Sequence[int] | slice | None = None,\n attrs: dict[str, str] | None = None,\n parse_dates: bool = False,\n thousands: str | None = ",",\n encoding: str | None = None,\n decimal: str = ".",\n converters: dict | None = None,\n na_values: Iterable[object] | None = None,\n keep_default_na: bool = True,\n displayed_only: bool = True,\n extract_links: Literal[None, "header", "footer", "body", "all"] = None,\n dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default,\n storage_options: StorageOptions = None,\n) -> list[DataFrame]:\n r"""\n Read HTML tables into a ``list`` of ``DataFrame`` objects.\n\n Parameters\n ----------\n io : str, path object, or file-like object\n String, path object (implementing ``os.PathLike[str]``), or file-like\n object implementing a string ``read()`` function.\n The string can represent a URL or the HTML itself. Note that\n lxml only accepts the http, ftp and file url protocols. If you have a\n URL that starts with ``'https'`` you might try removing the ``'s'``.\n\n .. deprecated:: 2.1.0\n Passing html literal strings is deprecated.\n Wrap literal string/bytes input in ``io.StringIO``/``io.BytesIO`` instead.\n\n match : str or compiled regular expression, optional\n The set of tables containing text matching this regex or string will be\n returned. Unless the HTML is extremely simple you will probably need to\n pass a non-empty string here. Defaults to '.+' (match any non-empty\n string). The default value will return all tables contained on a page.\n This value is converted to a regular expression so that there is\n consistent behavior between Beautiful Soup and lxml.\n\n flavor : {{"lxml", "html5lib", "bs4"}} or list-like, optional\n The parsing engine (or list of parsing engines) to use. 'bs4' and\n 'html5lib' are synonymous with each other, they are both there for\n backwards compatibility. The default of ``None`` tries to use ``lxml``\n to parse and if that fails it falls back on ``bs4`` + ``html5lib``.\n\n header : int or list-like, optional\n The row (or list of rows for a :class:`~pandas.MultiIndex`) to use to\n make the columns headers.\n\n index_col : int or list-like, optional\n The column (or list of columns) to use to create the index.\n\n skiprows : int, list-like or slice, optional\n Number of rows to skip after parsing the column integer. 0-based. If a\n sequence of integers or a slice is given, will skip the rows indexed by\n that sequence. Note that a single element sequence means 'skip the nth\n row' whereas an integer means 'skip n rows'.\n\n attrs : dict, optional\n This is a dictionary of attributes that you can pass to use to identify\n the table in the HTML. These are not checked for validity before being\n passed to lxml or Beautiful Soup. However, these attributes must be\n valid HTML table attributes to work correctly. For example, ::\n\n attrs = {{'id': 'table'}}\n\n is a valid attribute dictionary because the 'id' HTML tag attribute is\n a valid HTML attribute for *any* HTML tag as per `this document\n <https://html.spec.whatwg.org/multipage/dom.html#global-attributes>`__. ::\n\n attrs = {{'asdf': 'table'}}\n\n is *not* a valid attribute dictionary because 'asdf' is not a valid\n HTML attribute even if it is a valid XML attribute. Valid HTML 4.01\n table attributes can be found `here\n <http://www.w3.org/TR/REC-html40/struct/tables.html#h-11.2>`__. A\n working draft of the HTML 5 spec can be found `here\n <https://html.spec.whatwg.org/multipage/tables.html>`__. It contains the\n latest information on table attributes for the modern web.\n\n parse_dates : bool, optional\n See :func:`~read_csv` for more details.\n\n thousands : str, optional\n Separator to use to parse thousands. Defaults to ``','``.\n\n encoding : str, optional\n The encoding used to decode the web page. Defaults to ``None``.``None``\n preserves the previous encoding behavior, which depends on the\n underlying parser library (e.g., the parser library will try to use\n the encoding provided by the document).\n\n decimal : str, default '.'\n Character to recognize as decimal point (e.g. use ',' for European\n data).\n\n converters : dict, default None\n Dict of functions for converting values in certain columns. Keys can\n either be integers or column labels, values are functions that take one\n input argument, the cell (not column) content, and return the\n transformed content.\n\n na_values : iterable, default None\n Custom NA values.\n\n keep_default_na : bool, default True\n If na_values are specified and keep_default_na is False the default NaN\n values are overridden, otherwise they're appended to.\n\n displayed_only : bool, default True\n Whether elements with "display: none" should be parsed.\n\n extract_links : {{None, "all", "header", "body", "footer"}}\n Table elements in the specified section(s) with <a> tags will have their\n href extracted.\n\n .. versionadded:: 1.5.0\n\n dtype_backend : {{'numpy_nullable', 'pyarrow'}}, default 'numpy_nullable'\n Back-end data type applied to the resultant :class:`DataFrame`\n (still experimental). Behaviour is as follows:\n\n * ``"numpy_nullable"``: returns nullable-dtype-backed :class:`DataFrame`\n (default).\n * ``"pyarrow"``: returns pyarrow-backed nullable :class:`ArrowDtype`\n DataFrame.\n\n .. versionadded:: 2.0\n\n {storage_options}\n\n .. versionadded:: 2.1.0\n\n Returns\n -------\n dfs\n A list of DataFrames.\n\n See Also\n --------\n read_csv : Read a comma-separated values (csv) file into DataFrame.\n\n Notes\n -----\n Before using this function you should read the :ref:`gotchas about the\n HTML parsing libraries <io.html.gotchas>`.\n\n Expect to do some cleanup after you call this function. For example, you\n might need to manually assign column names if the column names are\n converted to NaN when you pass the `header=0` argument. We try to assume as\n little as possible about the structure of the table and push the\n idiosyncrasies of the HTML contained in the table to the user.\n\n This function searches for ``<table>`` elements and only for ``<tr>``\n and ``<th>`` rows and ``<td>`` elements within each ``<tr>`` or ``<th>``\n element in the table. ``<td>`` stands for "table data". This function\n attempts to properly handle ``colspan`` and ``rowspan`` attributes.\n If the function has a ``<thead>`` argument, it is used to construct\n the header, otherwise the function attempts to find the header within\n the body (by putting rows with only ``<th>`` elements into the header).\n\n Similar to :func:`~read_csv` the `header` argument is applied\n **after** `skiprows` is applied.\n\n This function will *always* return a list of :class:`DataFrame` *or*\n it will fail, e.g., it will *not* return an empty list.\n\n Examples\n --------\n See the :ref:`read_html documentation in the IO section of the docs\n <io.read_html>` for some examples of reading in HTML tables.\n """\n # Type check here. We don't want to parse only to fail because of an\n # invalid value of an integer skiprows.\n if isinstance(skiprows, numbers.Integral) and skiprows < 0:\n raise ValueError(\n "cannot skip rows starting from the end of the "\n "data (you passed a negative value)"\n )\n if extract_links not in [None, "header", "footer", "body", "all"]:\n raise ValueError(\n "`extract_links` must be one of "\n '{None, "header", "footer", "body", "all"}, got '\n f'"{extract_links}"'\n )\n\n validate_header_arg(header)\n check_dtype_backend(dtype_backend)\n\n io = stringify_path(io)\n\n if isinstance(io, str) and not any(\n [\n is_file_like(io),\n file_exists(io),\n is_url(io),\n is_fsspec_url(io),\n ]\n ):\n warnings.warn(\n "Passing literal html to 'read_html' is deprecated and "\n "will be removed in a future version. To read from a "\n "literal string, wrap it in a 'StringIO' object.",\n FutureWarning,\n stacklevel=find_stack_level(),\n )\n\n return _parse(\n flavor=flavor,\n io=io,\n match=match,\n header=header,\n index_col=index_col,\n skiprows=skiprows,\n parse_dates=parse_dates,\n thousands=thousands,\n attrs=attrs,\n encoding=encoding,\n decimal=decimal,\n converters=converters,\n na_values=na_values,\n keep_default_na=keep_default_na,\n displayed_only=displayed_only,\n extract_links=extract_links,\n dtype_backend=dtype_backend,\n storage_options=storage_options,\n )\n
.venv\Lib\site-packages\pandas\io\html.py
html.py
Python
39,546
0.95
0.162033
0.062257
react-lib
159
2023-09-01T02:38:31.409082
MIT
false
47f6a6c6ad5bbf582adb78afafc8323c
""" orc compat """\nfrom __future__ import annotations\n\nimport io\nfrom types import ModuleType\nfrom typing import (\n TYPE_CHECKING,\n Any,\n Literal,\n)\n\nfrom pandas._libs import lib\nfrom pandas.compat._optional import import_optional_dependency\nfrom pandas.util._validators import check_dtype_backend\n\nfrom pandas.core.indexes.api import default_index\n\nfrom pandas.io._util import arrow_table_to_pandas\nfrom pandas.io.common import (\n get_handle,\n is_fsspec_url,\n)\n\nif TYPE_CHECKING:\n import fsspec\n import pyarrow.fs\n\n from pandas._typing import (\n DtypeBackend,\n FilePath,\n ReadBuffer,\n WriteBuffer,\n )\n\n from pandas.core.frame import DataFrame\n\n\ndef read_orc(\n path: FilePath | ReadBuffer[bytes],\n columns: list[str] | None = None,\n dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default,\n filesystem: pyarrow.fs.FileSystem | fsspec.spec.AbstractFileSystem | None = None,\n **kwargs: Any,\n) -> DataFrame:\n """\n Load an ORC object from the file path, returning a DataFrame.\n\n Parameters\n ----------\n path : str, path object, or file-like object\n String, path object (implementing ``os.PathLike[str]``), or file-like\n object implementing a binary ``read()`` function. The string could be a URL.\n Valid URL schemes include http, ftp, s3, and file. For file URLs, a host is\n expected. A local file could be:\n ``file://localhost/path/to/table.orc``.\n columns : list, default None\n If not None, only these columns will be read from the file.\n Output always follows the ordering of the file and not the columns list.\n This mirrors the original behaviour of\n :external+pyarrow:py:meth:`pyarrow.orc.ORCFile.read`.\n dtype_backend : {'numpy_nullable', 'pyarrow'}, default 'numpy_nullable'\n Back-end data type applied to the resultant :class:`DataFrame`\n (still experimental). Behaviour is as follows:\n\n * ``"numpy_nullable"``: returns nullable-dtype-backed :class:`DataFrame`\n (default).\n * ``"pyarrow"``: returns pyarrow-backed nullable :class:`ArrowDtype`\n DataFrame.\n\n .. versionadded:: 2.0\n\n filesystem : fsspec or pyarrow filesystem, default None\n Filesystem object to use when reading the parquet file.\n\n .. versionadded:: 2.1.0\n\n **kwargs\n Any additional kwargs are passed to pyarrow.\n\n Returns\n -------\n DataFrame\n\n Notes\n -----\n Before using this function you should read the :ref:`user guide about ORC <io.orc>`\n and :ref:`install optional dependencies <install.warn_orc>`.\n\n If ``path`` is a URI scheme pointing to a local or remote file (e.g. "s3://"),\n a ``pyarrow.fs`` filesystem will be attempted to read the file. You can also pass a\n pyarrow or fsspec filesystem object into the filesystem keyword to override this\n behavior.\n\n Examples\n --------\n >>> result = pd.read_orc("example_pa.orc") # doctest: +SKIP\n """\n # we require a newer version of pyarrow than we support for parquet\n\n orc = import_optional_dependency("pyarrow.orc")\n\n check_dtype_backend(dtype_backend)\n\n with get_handle(path, "rb", is_text=False) as handles:\n source = handles.handle\n if is_fsspec_url(path) and filesystem is None:\n pa = import_optional_dependency("pyarrow")\n pa_fs = import_optional_dependency("pyarrow.fs")\n try:\n filesystem, source = pa_fs.FileSystem.from_uri(path)\n except (TypeError, pa.ArrowInvalid):\n pass\n\n pa_table = orc.read_table(\n source=source, columns=columns, filesystem=filesystem, **kwargs\n )\n return arrow_table_to_pandas(pa_table, dtype_backend=dtype_backend)\n\n\ndef to_orc(\n df: DataFrame,\n path: FilePath | WriteBuffer[bytes] | None = None,\n *,\n engine: Literal["pyarrow"] = "pyarrow",\n index: bool | None = None,\n engine_kwargs: dict[str, Any] | None = None,\n) -> bytes | None:\n """\n Write a DataFrame to the ORC format.\n\n .. versionadded:: 1.5.0\n\n Parameters\n ----------\n df : DataFrame\n The dataframe to be written to ORC. Raises NotImplementedError\n if dtype of one or more columns is category, unsigned integers,\n intervals, periods or sparse.\n path : str, file-like object or None, default None\n If a string, it will be used as Root Directory path\n when writing a partitioned dataset. By file-like object,\n we refer to objects with a write() method, such as a file handle\n (e.g. via builtin open function). If path is None,\n a bytes object is returned.\n engine : str, default 'pyarrow'\n ORC library to use.\n index : bool, optional\n If ``True``, include the dataframe's index(es) in the file output. If\n ``False``, they will not be written to the file.\n If ``None``, similar to ``infer`` the dataframe's index(es)\n will be saved. However, instead of being saved as values,\n the RangeIndex will be stored as a range in the metadata so it\n doesn't require much space and is faster. Other indexes will\n be included as columns in the file output.\n engine_kwargs : dict[str, Any] or None, default None\n Additional keyword arguments passed to :func:`pyarrow.orc.write_table`.\n\n Returns\n -------\n bytes if no path argument is provided else None\n\n Raises\n ------\n NotImplementedError\n Dtype of one or more columns is category, unsigned integers, interval,\n period or sparse.\n ValueError\n engine is not pyarrow.\n\n Notes\n -----\n * Before using this function you should read the\n :ref:`user guide about ORC <io.orc>` and\n :ref:`install optional dependencies <install.warn_orc>`.\n * This function requires `pyarrow <https://arrow.apache.org/docs/python/>`_\n library.\n * For supported dtypes please refer to `supported ORC features in Arrow\n <https://arrow.apache.org/docs/cpp/orc.html#data-types>`__.\n * Currently timezones in datetime columns are not preserved when a\n dataframe is converted into ORC files.\n """\n if index is None:\n index = df.index.names[0] is not None\n if engine_kwargs is None:\n engine_kwargs = {}\n\n # validate index\n # --------------\n\n # validate that we have only a default index\n # raise on anything else as we don't serialize the index\n\n if not df.index.equals(default_index(len(df))):\n raise ValueError(\n "orc does not support serializing a non-default index for the index; "\n "you can .reset_index() to make the index into column(s)"\n )\n\n if df.index.name is not None:\n raise ValueError("orc does not serialize index meta-data on a default index")\n\n if engine != "pyarrow":\n raise ValueError("engine must be 'pyarrow'")\n engine = import_optional_dependency(engine, min_version="10.0.1")\n pa = import_optional_dependency("pyarrow")\n orc = import_optional_dependency("pyarrow.orc")\n\n was_none = path is None\n if was_none:\n path = io.BytesIO()\n assert path is not None # For mypy\n with get_handle(path, "wb", is_text=False) as handles:\n assert isinstance(engine, ModuleType) # For mypy\n try:\n orc.write_table(\n engine.Table.from_pandas(df, preserve_index=index),\n handles.handle,\n **engine_kwargs,\n )\n except (TypeError, pa.ArrowNotImplementedError) as e:\n raise NotImplementedError(\n "The dtype of one or more columns is not supported yet."\n ) from e\n\n if was_none:\n assert isinstance(path, io.BytesIO) # For mypy\n return path.getvalue()\n return None\n
.venv\Lib\site-packages\pandas\io\orc.py
orc.py
Python
7,817
0.95
0.109649
0.078534
vue-tools
167
2023-12-18T11:54:41.672467
MIT
false
c8b5f2c0a8ff53afc353febd953bf8f0
""" parquet compat """\nfrom __future__ import annotations\n\nimport io\nimport json\nimport os\nfrom typing import (\n TYPE_CHECKING,\n Any,\n Literal,\n)\nimport warnings\nfrom warnings import (\n catch_warnings,\n filterwarnings,\n)\n\nfrom pandas._config.config import _get_option\n\nfrom pandas._libs import lib\nfrom pandas.compat._optional import import_optional_dependency\nfrom pandas.errors import AbstractMethodError\nfrom pandas.util._decorators import doc\nfrom pandas.util._exceptions import find_stack_level\nfrom pandas.util._validators import check_dtype_backend\n\nfrom pandas import (\n DataFrame,\n get_option,\n)\nfrom pandas.core.shared_docs import _shared_docs\n\nfrom pandas.io._util import arrow_table_to_pandas\nfrom pandas.io.common import (\n IOHandles,\n get_handle,\n is_fsspec_url,\n is_url,\n stringify_path,\n)\n\nif TYPE_CHECKING:\n from pandas._typing import (\n DtypeBackend,\n FilePath,\n ReadBuffer,\n StorageOptions,\n WriteBuffer,\n )\n\n\ndef get_engine(engine: str) -> BaseImpl:\n """return our implementation"""\n if engine == "auto":\n engine = get_option("io.parquet.engine")\n\n if engine == "auto":\n # try engines in this order\n engine_classes = [PyArrowImpl, FastParquetImpl]\n\n error_msgs = ""\n for engine_class in engine_classes:\n try:\n return engine_class()\n except ImportError as err:\n error_msgs += "\n - " + str(err)\n\n raise ImportError(\n "Unable to find a usable engine; "\n "tried using: 'pyarrow', 'fastparquet'.\n"\n "A suitable version of "\n "pyarrow or fastparquet is required for parquet "\n "support.\n"\n "Trying to import the above resulted in these errors:"\n f"{error_msgs}"\n )\n\n if engine == "pyarrow":\n return PyArrowImpl()\n elif engine == "fastparquet":\n return FastParquetImpl()\n\n raise ValueError("engine must be one of 'pyarrow', 'fastparquet'")\n\n\ndef _get_path_or_handle(\n path: FilePath | ReadBuffer[bytes] | WriteBuffer[bytes],\n fs: Any,\n storage_options: StorageOptions | None = None,\n mode: str = "rb",\n is_dir: bool = False,\n) -> tuple[\n FilePath | ReadBuffer[bytes] | WriteBuffer[bytes], IOHandles[bytes] | None, Any\n]:\n """File handling for PyArrow."""\n path_or_handle = stringify_path(path)\n if fs is not None:\n pa_fs = import_optional_dependency("pyarrow.fs", errors="ignore")\n fsspec = import_optional_dependency("fsspec", errors="ignore")\n if pa_fs is not None and isinstance(fs, pa_fs.FileSystem):\n if storage_options:\n raise NotImplementedError(\n "storage_options not supported with a pyarrow FileSystem."\n )\n elif fsspec is not None and isinstance(fs, fsspec.spec.AbstractFileSystem):\n pass\n else:\n raise ValueError(\n f"filesystem must be a pyarrow or fsspec FileSystem, "\n f"not a {type(fs).__name__}"\n )\n if is_fsspec_url(path_or_handle) and fs is None:\n if storage_options is None:\n pa = import_optional_dependency("pyarrow")\n pa_fs = import_optional_dependency("pyarrow.fs")\n\n try:\n fs, path_or_handle = pa_fs.FileSystem.from_uri(path)\n except (TypeError, pa.ArrowInvalid):\n pass\n if fs is None:\n fsspec = import_optional_dependency("fsspec")\n fs, path_or_handle = fsspec.core.url_to_fs(\n path_or_handle, **(storage_options or {})\n )\n elif storage_options and (not is_url(path_or_handle) or mode != "rb"):\n # can't write to a remote url\n # without making use of fsspec at the moment\n raise ValueError("storage_options passed with buffer, or non-supported URL")\n\n handles = None\n if (\n not fs\n and not is_dir\n and isinstance(path_or_handle, str)\n and not os.path.isdir(path_or_handle)\n ):\n # use get_handle only when we are very certain that it is not a directory\n # fsspec resources can also point to directories\n # this branch is used for example when reading from non-fsspec URLs\n handles = get_handle(\n path_or_handle, mode, is_text=False, storage_options=storage_options\n )\n fs = None\n path_or_handle = handles.handle\n return path_or_handle, handles, fs\n\n\nclass BaseImpl:\n @staticmethod\n def validate_dataframe(df: DataFrame) -> None:\n if not isinstance(df, DataFrame):\n raise ValueError("to_parquet only supports IO with DataFrames")\n\n def write(self, df: DataFrame, path, compression, **kwargs):\n raise AbstractMethodError(self)\n\n def read(self, path, columns=None, **kwargs) -> DataFrame:\n raise AbstractMethodError(self)\n\n\nclass PyArrowImpl(BaseImpl):\n def __init__(self) -> None:\n import_optional_dependency(\n "pyarrow", extra="pyarrow is required for parquet support."\n )\n import pyarrow.parquet\n\n # import utils to register the pyarrow extension types\n import pandas.core.arrays.arrow.extension_types # pyright: ignore[reportUnusedImport] # noqa: F401\n\n self.api = pyarrow\n\n def write(\n self,\n df: DataFrame,\n path: FilePath | WriteBuffer[bytes],\n compression: str | None = "snappy",\n index: bool | None = None,\n storage_options: StorageOptions | None = None,\n partition_cols: list[str] | None = None,\n filesystem=None,\n **kwargs,\n ) -> None:\n self.validate_dataframe(df)\n\n from_pandas_kwargs: dict[str, Any] = {"schema": kwargs.pop("schema", None)}\n if index is not None:\n from_pandas_kwargs["preserve_index"] = index\n\n table = self.api.Table.from_pandas(df, **from_pandas_kwargs)\n\n if df.attrs:\n df_metadata = {"PANDAS_ATTRS": json.dumps(df.attrs)}\n existing_metadata = table.schema.metadata\n merged_metadata = {**existing_metadata, **df_metadata}\n table = table.replace_schema_metadata(merged_metadata)\n\n path_or_handle, handles, filesystem = _get_path_or_handle(\n path,\n filesystem,\n storage_options=storage_options,\n mode="wb",\n is_dir=partition_cols is not None,\n )\n if (\n isinstance(path_or_handle, io.BufferedWriter)\n and hasattr(path_or_handle, "name")\n and isinstance(path_or_handle.name, (str, bytes))\n ):\n if isinstance(path_or_handle.name, bytes):\n path_or_handle = path_or_handle.name.decode()\n else:\n path_or_handle = path_or_handle.name\n\n try:\n if partition_cols is not None:\n # writes to multiple files under the given path\n self.api.parquet.write_to_dataset(\n table,\n path_or_handle,\n compression=compression,\n partition_cols=partition_cols,\n filesystem=filesystem,\n **kwargs,\n )\n else:\n # write to single output file\n self.api.parquet.write_table(\n table,\n path_or_handle,\n compression=compression,\n filesystem=filesystem,\n **kwargs,\n )\n finally:\n if handles is not None:\n handles.close()\n\n def read(\n self,\n path,\n columns=None,\n filters=None,\n use_nullable_dtypes: bool = False,\n dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default,\n storage_options: StorageOptions | None = None,\n filesystem=None,\n **kwargs,\n ) -> DataFrame:\n kwargs["use_pandas_metadata"] = True\n\n to_pandas_kwargs = {}\n\n manager = _get_option("mode.data_manager", silent=True)\n if manager == "array":\n to_pandas_kwargs["split_blocks"] = True\n path_or_handle, handles, filesystem = _get_path_or_handle(\n path,\n filesystem,\n storage_options=storage_options,\n mode="rb",\n )\n try:\n pa_table = self.api.parquet.read_table(\n path_or_handle,\n columns=columns,\n filesystem=filesystem,\n filters=filters,\n **kwargs,\n )\n\n with catch_warnings():\n filterwarnings(\n "ignore",\n "make_block is deprecated",\n DeprecationWarning,\n )\n result = arrow_table_to_pandas(\n pa_table,\n dtype_backend=dtype_backend,\n to_pandas_kwargs=to_pandas_kwargs,\n )\n\n if manager == "array":\n result = result._as_manager("array", copy=False)\n\n if pa_table.schema.metadata:\n if b"PANDAS_ATTRS" in pa_table.schema.metadata:\n df_metadata = pa_table.schema.metadata[b"PANDAS_ATTRS"]\n result.attrs = json.loads(df_metadata)\n return result\n finally:\n if handles is not None:\n handles.close()\n\n\nclass FastParquetImpl(BaseImpl):\n def __init__(self) -> None:\n # since pandas is a dependency of fastparquet\n # we need to import on first use\n fastparquet = import_optional_dependency(\n "fastparquet", extra="fastparquet is required for parquet support."\n )\n self.api = fastparquet\n\n def write(\n self,\n df: DataFrame,\n path,\n compression: Literal["snappy", "gzip", "brotli"] | None = "snappy",\n index=None,\n partition_cols=None,\n storage_options: StorageOptions | None = None,\n filesystem=None,\n **kwargs,\n ) -> None:\n self.validate_dataframe(df)\n\n if "partition_on" in kwargs and partition_cols is not None:\n raise ValueError(\n "Cannot use both partition_on and "\n "partition_cols. Use partition_cols for partitioning data"\n )\n if "partition_on" in kwargs:\n partition_cols = kwargs.pop("partition_on")\n\n if partition_cols is not None:\n kwargs["file_scheme"] = "hive"\n\n if filesystem is not None:\n raise NotImplementedError(\n "filesystem is not implemented for the fastparquet engine."\n )\n\n # cannot use get_handle as write() does not accept file buffers\n path = stringify_path(path)\n if is_fsspec_url(path):\n fsspec = import_optional_dependency("fsspec")\n\n # if filesystem is provided by fsspec, file must be opened in 'wb' mode.\n kwargs["open_with"] = lambda path, _: fsspec.open(\n path, "wb", **(storage_options or {})\n ).open()\n elif storage_options:\n raise ValueError(\n "storage_options passed with file object or non-fsspec file path"\n )\n\n with catch_warnings(record=True):\n self.api.write(\n path,\n df,\n compression=compression,\n write_index=index,\n partition_on=partition_cols,\n **kwargs,\n )\n\n def read(\n self,\n path,\n columns=None,\n filters=None,\n storage_options: StorageOptions | None = None,\n filesystem=None,\n **kwargs,\n ) -> DataFrame:\n parquet_kwargs: dict[str, Any] = {}\n use_nullable_dtypes = kwargs.pop("use_nullable_dtypes", False)\n dtype_backend = kwargs.pop("dtype_backend", lib.no_default)\n # We are disabling nullable dtypes for fastparquet pending discussion\n parquet_kwargs["pandas_nulls"] = False\n if use_nullable_dtypes:\n raise ValueError(\n "The 'use_nullable_dtypes' argument is not supported for the "\n "fastparquet engine"\n )\n if dtype_backend is not lib.no_default:\n raise ValueError(\n "The 'dtype_backend' argument is not supported for the "\n "fastparquet engine"\n )\n if filesystem is not None:\n raise NotImplementedError(\n "filesystem is not implemented for the fastparquet engine."\n )\n path = stringify_path(path)\n handles = None\n if is_fsspec_url(path):\n fsspec = import_optional_dependency("fsspec")\n\n parquet_kwargs["fs"] = fsspec.open(path, "rb", **(storage_options or {})).fs\n elif isinstance(path, str) and not os.path.isdir(path):\n # use get_handle only when we are very certain that it is not a directory\n # fsspec resources can also point to directories\n # this branch is used for example when reading from non-fsspec URLs\n handles = get_handle(\n path, "rb", is_text=False, storage_options=storage_options\n )\n path = handles.handle\n\n try:\n parquet_file = self.api.ParquetFile(path, **parquet_kwargs)\n return parquet_file.to_pandas(columns=columns, filters=filters, **kwargs)\n finally:\n if handles is not None:\n handles.close()\n\n\n@doc(storage_options=_shared_docs["storage_options"])\ndef to_parquet(\n df: DataFrame,\n path: FilePath | WriteBuffer[bytes] | None = None,\n engine: str = "auto",\n compression: str | None = "snappy",\n index: bool | None = None,\n storage_options: StorageOptions | None = None,\n partition_cols: list[str] | None = None,\n filesystem: Any = None,\n **kwargs,\n) -> bytes | None:\n """\n Write a DataFrame to the parquet format.\n\n Parameters\n ----------\n df : DataFrame\n path : str, path object, file-like object, or None, default None\n String, path object (implementing ``os.PathLike[str]``), or file-like\n object implementing a binary ``write()`` function. If None, the result is\n returned as bytes. If a string, it will be used as Root Directory path\n when writing a partitioned dataset. The engine fastparquet does not\n accept file-like objects.\n engine : {{'auto', 'pyarrow', 'fastparquet'}}, default 'auto'\n Parquet library to use. If 'auto', then the option\n ``io.parquet.engine`` is used. The default ``io.parquet.engine``\n behavior is to try 'pyarrow', falling back to 'fastparquet' if\n 'pyarrow' is unavailable.\n\n When using the ``'pyarrow'`` engine and no storage options are provided\n and a filesystem is implemented by both ``pyarrow.fs`` and ``fsspec``\n (e.g. "s3://"), then the ``pyarrow.fs`` filesystem is attempted first.\n Use the filesystem keyword with an instantiated fsspec filesystem\n if you wish to use its implementation.\n compression : {{'snappy', 'gzip', 'brotli', 'lz4', 'zstd', None}},\n default 'snappy'. Name of the compression to use. Use ``None``\n for no compression.\n index : bool, default None\n If ``True``, include the dataframe's index(es) in the file output. If\n ``False``, they will not be written to the file.\n If ``None``, similar to ``True`` the dataframe's index(es)\n will be saved. However, instead of being saved as values,\n the RangeIndex will be stored as a range in the metadata so it\n doesn't require much space and is faster. Other indexes will\n be included as columns in the file output.\n partition_cols : str or list, optional, default None\n Column names by which to partition the dataset.\n Columns are partitioned in the order they are given.\n Must be None if path is not a string.\n {storage_options}\n\n filesystem : fsspec or pyarrow filesystem, default None\n Filesystem object to use when reading the parquet file. Only implemented\n for ``engine="pyarrow"``.\n\n .. versionadded:: 2.1.0\n\n kwargs\n Additional keyword arguments passed to the engine\n\n Returns\n -------\n bytes if no path argument is provided else None\n """\n if isinstance(partition_cols, str):\n partition_cols = [partition_cols]\n impl = get_engine(engine)\n\n path_or_buf: FilePath | WriteBuffer[bytes] = io.BytesIO() if path is None else path\n\n impl.write(\n df,\n path_or_buf,\n compression=compression,\n index=index,\n partition_cols=partition_cols,\n storage_options=storage_options,\n filesystem=filesystem,\n **kwargs,\n )\n\n if path is None:\n assert isinstance(path_or_buf, io.BytesIO)\n return path_or_buf.getvalue()\n else:\n return None\n\n\n@doc(storage_options=_shared_docs["storage_options"])\ndef read_parquet(\n path: FilePath | ReadBuffer[bytes],\n engine: str = "auto",\n columns: list[str] | None = None,\n storage_options: StorageOptions | None = None,\n use_nullable_dtypes: bool | lib.NoDefault = lib.no_default,\n dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default,\n filesystem: Any = None,\n filters: list[tuple] | list[list[tuple]] | None = None,\n **kwargs,\n) -> DataFrame:\n """\n Load a parquet object from the file path, returning a DataFrame.\n\n Parameters\n ----------\n path : str, path object or file-like object\n String, path object (implementing ``os.PathLike[str]``), or file-like\n object implementing a binary ``read()`` function.\n The string could be a URL. Valid URL schemes include http, ftp, s3,\n gs, and file. For file URLs, a host is expected. A local file could be:\n ``file://localhost/path/to/table.parquet``.\n A file URL can also be a path to a directory that contains multiple\n partitioned parquet files. Both pyarrow and fastparquet support\n paths to directories as well as file URLs. A directory path could be:\n ``file://localhost/path/to/tables`` or ``s3://bucket/partition_dir``.\n engine : {{'auto', 'pyarrow', 'fastparquet'}}, default 'auto'\n Parquet library to use. If 'auto', then the option\n ``io.parquet.engine`` is used. The default ``io.parquet.engine``\n behavior is to try 'pyarrow', falling back to 'fastparquet' if\n 'pyarrow' is unavailable.\n\n When using the ``'pyarrow'`` engine and no storage options are provided\n and a filesystem is implemented by both ``pyarrow.fs`` and ``fsspec``\n (e.g. "s3://"), then the ``pyarrow.fs`` filesystem is attempted first.\n Use the filesystem keyword with an instantiated fsspec filesystem\n if you wish to use its implementation.\n columns : list, default=None\n If not None, only these columns will be read from the file.\n {storage_options}\n\n .. versionadded:: 1.3.0\n\n use_nullable_dtypes : bool, default False\n If True, use dtypes that use ``pd.NA`` as missing value indicator\n for the resulting DataFrame. (only applicable for the ``pyarrow``\n engine)\n As new dtypes are added that support ``pd.NA`` in the future, the\n output with this option will change to use those dtypes.\n Note: this is an experimental option, and behaviour (e.g. additional\n support dtypes) may change without notice.\n\n .. deprecated:: 2.0\n\n dtype_backend : {{'numpy_nullable', 'pyarrow'}}, default 'numpy_nullable'\n Back-end data type applied to the resultant :class:`DataFrame`\n (still experimental). Behaviour is as follows:\n\n * ``"numpy_nullable"``: returns nullable-dtype-backed :class:`DataFrame`\n (default).\n * ``"pyarrow"``: returns pyarrow-backed nullable :class:`ArrowDtype`\n DataFrame.\n\n .. versionadded:: 2.0\n\n filesystem : fsspec or pyarrow filesystem, default None\n Filesystem object to use when reading the parquet file. Only implemented\n for ``engine="pyarrow"``.\n\n .. versionadded:: 2.1.0\n\n filters : List[Tuple] or List[List[Tuple]], default None\n To filter out data.\n Filter syntax: [[(column, op, val), ...],...]\n where op is [==, =, >, >=, <, <=, !=, in, not in]\n The innermost tuples are transposed into a set of filters applied\n through an `AND` operation.\n The outer list combines these sets of filters through an `OR`\n operation.\n A single list of tuples can also be used, meaning that no `OR`\n operation between set of filters is to be conducted.\n\n Using this argument will NOT result in row-wise filtering of the final\n partitions unless ``engine="pyarrow"`` is also specified. For\n other engines, filtering is only performed at the partition level, that is,\n to prevent the loading of some row-groups and/or files.\n\n .. versionadded:: 2.1.0\n\n **kwargs\n Any additional kwargs are passed to the engine.\n\n Returns\n -------\n DataFrame\n\n See Also\n --------\n DataFrame.to_parquet : Create a parquet object that serializes a DataFrame.\n\n Examples\n --------\n >>> original_df = pd.DataFrame(\n ... {{"foo": range(5), "bar": range(5, 10)}}\n ... )\n >>> original_df\n foo bar\n 0 0 5\n 1 1 6\n 2 2 7\n 3 3 8\n 4 4 9\n >>> df_parquet_bytes = original_df.to_parquet()\n >>> from io import BytesIO\n >>> restored_df = pd.read_parquet(BytesIO(df_parquet_bytes))\n >>> restored_df\n foo bar\n 0 0 5\n 1 1 6\n 2 2 7\n 3 3 8\n 4 4 9\n >>> restored_df.equals(original_df)\n True\n >>> restored_bar = pd.read_parquet(BytesIO(df_parquet_bytes), columns=["bar"])\n >>> restored_bar\n bar\n 0 5\n 1 6\n 2 7\n 3 8\n 4 9\n >>> restored_bar.equals(original_df[['bar']])\n True\n\n The function uses `kwargs` that are passed directly to the engine.\n In the following example, we use the `filters` argument of the pyarrow\n engine to filter the rows of the DataFrame.\n\n Since `pyarrow` is the default engine, we can omit the `engine` argument.\n Note that the `filters` argument is implemented by the `pyarrow` engine,\n which can benefit from multithreading and also potentially be more\n economical in terms of memory.\n\n >>> sel = [("foo", ">", 2)]\n >>> restored_part = pd.read_parquet(BytesIO(df_parquet_bytes), filters=sel)\n >>> restored_part\n foo bar\n 0 3 8\n 1 4 9\n """\n\n impl = get_engine(engine)\n\n if use_nullable_dtypes is not lib.no_default:\n msg = (\n "The argument 'use_nullable_dtypes' is deprecated and will be removed "\n "in a future version."\n )\n if use_nullable_dtypes is True:\n msg += (\n "Use dtype_backend='numpy_nullable' instead of use_nullable_dtype=True."\n )\n warnings.warn(msg, FutureWarning, stacklevel=find_stack_level())\n else:\n use_nullable_dtypes = False\n check_dtype_backend(dtype_backend)\n\n return impl.read(\n path,\n columns=columns,\n filters=filters,\n storage_options=storage_options,\n use_nullable_dtypes=use_nullable_dtypes,\n dtype_backend=dtype_backend,\n filesystem=filesystem,\n **kwargs,\n )\n
.venv\Lib\site-packages\pandas\io\parquet.py
parquet.py
Python
23,641
0.95
0.137168
0.053963
python-kit
503
2025-04-17T04:40:27.273663
MIT
false
544cdd9396c2fcbc180e1f93d0943365
""" pickle compat """\nfrom __future__ import annotations\n\nimport pickle\nfrom typing import (\n TYPE_CHECKING,\n Any,\n)\nimport warnings\n\nfrom pandas.compat import pickle_compat as pc\nfrom pandas.util._decorators import doc\n\nfrom pandas.core.shared_docs import _shared_docs\n\nfrom pandas.io.common import get_handle\n\nif TYPE_CHECKING:\n from pandas._typing import (\n CompressionOptions,\n FilePath,\n ReadPickleBuffer,\n StorageOptions,\n WriteBuffer,\n )\n\n from pandas import (\n DataFrame,\n Series,\n )\n\n\n@doc(\n storage_options=_shared_docs["storage_options"],\n compression_options=_shared_docs["compression_options"] % "filepath_or_buffer",\n)\ndef to_pickle(\n obj: Any,\n filepath_or_buffer: FilePath | WriteBuffer[bytes],\n compression: CompressionOptions = "infer",\n protocol: int = pickle.HIGHEST_PROTOCOL,\n storage_options: StorageOptions | None = None,\n) -> None:\n """\n Pickle (serialize) object to file.\n\n Parameters\n ----------\n obj : any object\n Any python object.\n filepath_or_buffer : str, path object, or file-like object\n String, path object (implementing ``os.PathLike[str]``), or file-like\n object implementing a binary ``write()`` function.\n Also accepts URL. URL has to be of S3 or GCS.\n {compression_options}\n\n .. versionchanged:: 1.4.0 Zstandard support.\n\n protocol : int\n Int which indicates which protocol should be used by the pickler,\n default HIGHEST_PROTOCOL (see [1], paragraph 12.1.2). The possible\n values for this parameter depend on the version of Python. For Python\n 2.x, possible values are 0, 1, 2. For Python>=3.0, 3 is a valid value.\n For Python >= 3.4, 4 is a valid value. A negative value for the\n protocol parameter is equivalent to setting its value to\n HIGHEST_PROTOCOL.\n\n {storage_options}\n\n .. [1] https://docs.python.org/3/library/pickle.html\n\n See Also\n --------\n read_pickle : Load pickled pandas object (or any object) from file.\n DataFrame.to_hdf : Write DataFrame to an HDF5 file.\n DataFrame.to_sql : Write DataFrame to a SQL database.\n DataFrame.to_parquet : Write a DataFrame to the binary parquet format.\n\n Examples\n --------\n >>> original_df = pd.DataFrame({{"foo": range(5), "bar": range(5, 10)}}) # doctest: +SKIP\n >>> original_df # doctest: +SKIP\n foo bar\n 0 0 5\n 1 1 6\n 2 2 7\n 3 3 8\n 4 4 9\n >>> pd.to_pickle(original_df, "./dummy.pkl") # doctest: +SKIP\n\n >>> unpickled_df = pd.read_pickle("./dummy.pkl") # doctest: +SKIP\n >>> unpickled_df # doctest: +SKIP\n foo bar\n 0 0 5\n 1 1 6\n 2 2 7\n 3 3 8\n 4 4 9\n """ # noqa: E501\n if protocol < 0:\n protocol = pickle.HIGHEST_PROTOCOL\n\n with get_handle(\n filepath_or_buffer,\n "wb",\n compression=compression,\n is_text=False,\n storage_options=storage_options,\n ) as handles:\n # letting pickle write directly to the buffer is more memory-efficient\n pickle.dump(obj, handles.handle, protocol=protocol)\n\n\n@doc(\n storage_options=_shared_docs["storage_options"],\n decompression_options=_shared_docs["decompression_options"] % "filepath_or_buffer",\n)\ndef read_pickle(\n filepath_or_buffer: FilePath | ReadPickleBuffer,\n compression: CompressionOptions = "infer",\n storage_options: StorageOptions | None = None,\n) -> DataFrame | Series:\n """\n Load pickled pandas object (or any object) from file.\n\n .. warning::\n\n Loading pickled data received from untrusted sources can be\n unsafe. See `here <https://docs.python.org/3/library/pickle.html>`__.\n\n Parameters\n ----------\n filepath_or_buffer : str, path object, or file-like object\n String, path object (implementing ``os.PathLike[str]``), or file-like\n object implementing a binary ``readlines()`` function.\n Also accepts URL. URL is not limited to S3 and GCS.\n\n {decompression_options}\n\n .. versionchanged:: 1.4.0 Zstandard support.\n\n {storage_options}\n\n Returns\n -------\n same type as object stored in file\n\n See Also\n --------\n DataFrame.to_pickle : Pickle (serialize) DataFrame object to file.\n Series.to_pickle : Pickle (serialize) Series object to file.\n read_hdf : Read HDF5 file into a DataFrame.\n read_sql : Read SQL query or database table into a DataFrame.\n read_parquet : Load a parquet object, returning a DataFrame.\n\n Notes\n -----\n read_pickle is only guaranteed to be backwards compatible to pandas 0.20.3\n provided the object was serialized with to_pickle.\n\n Examples\n --------\n >>> original_df = pd.DataFrame(\n ... {{"foo": range(5), "bar": range(5, 10)}}\n ... ) # doctest: +SKIP\n >>> original_df # doctest: +SKIP\n foo bar\n 0 0 5\n 1 1 6\n 2 2 7\n 3 3 8\n 4 4 9\n >>> pd.to_pickle(original_df, "./dummy.pkl") # doctest: +SKIP\n\n >>> unpickled_df = pd.read_pickle("./dummy.pkl") # doctest: +SKIP\n >>> unpickled_df # doctest: +SKIP\n foo bar\n 0 0 5\n 1 1 6\n 2 2 7\n 3 3 8\n 4 4 9\n """\n excs_to_catch = (AttributeError, ImportError, ModuleNotFoundError, TypeError)\n with get_handle(\n filepath_or_buffer,\n "rb",\n compression=compression,\n is_text=False,\n storage_options=storage_options,\n ) as handles:\n # 1) try standard library Pickle\n # 2) try pickle_compat (older pandas version) to handle subclass changes\n # 3) try pickle_compat with latin-1 encoding upon a UnicodeDecodeError\n\n try:\n # TypeError for Cython complaints about object.__new__ vs Tick.__new__\n try:\n with warnings.catch_warnings(record=True):\n # We want to silence any warnings about, e.g. moved modules.\n warnings.simplefilter("ignore", Warning)\n return pickle.load(handles.handle)\n except excs_to_catch:\n # e.g.\n # "No module named 'pandas.core.sparse.series'"\n # "Can't get attribute '__nat_unpickle' on <module 'pandas._libs.tslib"\n return pc.load(handles.handle, encoding=None)\n except UnicodeDecodeError:\n # e.g. can occur for files written in py27; see GH#28645 and GH#31988\n return pc.load(handles.handle, encoding="latin-1")\n
.venv\Lib\site-packages\pandas\io\pickle.py
pickle.py
Python
6,582
0.95
0.071429
0.055866
awesome-app
581
2024-05-13T09:39:59.549475
MIT
false
dcd546fcb1dddd9f8322256eed48d925
from __future__ import annotations\n\nfrom typing import TYPE_CHECKING\n\nfrom pandas._libs import lib\nfrom pandas.compat._optional import import_optional_dependency\nfrom pandas.util._validators import check_dtype_backend\n\nfrom pandas.core.dtypes.inference import is_list_like\n\nfrom pandas.io.common import stringify_path\n\nif TYPE_CHECKING:\n from collections.abc import Sequence\n from pathlib import Path\n\n from pandas._typing import DtypeBackend\n\n from pandas import DataFrame\n\n\ndef read_spss(\n path: str | Path,\n usecols: Sequence[str] | None = None,\n convert_categoricals: bool = True,\n dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default,\n) -> DataFrame:\n """\n Load an SPSS file from the file path, returning a DataFrame.\n\n Parameters\n ----------\n path : str or Path\n File path.\n usecols : list-like, optional\n Return a subset of the columns. If None, return all columns.\n convert_categoricals : bool, default is True\n Convert categorical columns into pd.Categorical.\n dtype_backend : {'numpy_nullable', 'pyarrow'}, default 'numpy_nullable'\n Back-end data type applied to the resultant :class:`DataFrame`\n (still experimental). Behaviour is as follows:\n\n * ``"numpy_nullable"``: returns nullable-dtype-backed :class:`DataFrame`\n (default).\n * ``"pyarrow"``: returns pyarrow-backed nullable :class:`ArrowDtype`\n DataFrame.\n\n .. versionadded:: 2.0\n\n Returns\n -------\n DataFrame\n\n Examples\n --------\n >>> df = pd.read_spss("spss_data.sav") # doctest: +SKIP\n """\n pyreadstat = import_optional_dependency("pyreadstat")\n check_dtype_backend(dtype_backend)\n\n if usecols is not None:\n if not is_list_like(usecols):\n raise TypeError("usecols must be list-like.")\n usecols = list(usecols) # pyreadstat requires a list\n\n df, metadata = pyreadstat.read_sav(\n stringify_path(path), usecols=usecols, apply_value_formats=convert_categoricals\n )\n df.attrs = metadata.__dict__\n if dtype_backend is not lib.no_default:\n df = df.convert_dtypes(dtype_backend=dtype_backend)\n return df\n
.venv\Lib\site-packages\pandas\io\spss.py
spss.py
Python
2,182
0.95
0.111111
0.035714
awesome-app
634
2024-10-29T13:06:57.943709
Apache-2.0
false
b49cd8f6771f69c813aaf73d1c2bb188