content stringlengths 1 103k ⌀ | path stringlengths 8 216 | filename stringlengths 2 179 | language stringclasses 15
values | size_bytes int64 2 189k | quality_score float64 0.5 0.95 | complexity float64 0 1 | documentation_ratio float64 0 1 | repository stringclasses 5
values | stars int64 0 1k | created_date stringdate 2023-07-10 19:21:08 2025-07-09 19:11:45 | license stringclasses 4
values | is_test bool 2
classes | file_hash stringlengths 32 32 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
\n\n | .venv\Lib\site-packages\pandas\io\__pycache__\_util.cpython-313.pyc | _util.cpython-313.pyc | Other | 4,299 | 0.95 | 0 | 0 | node-utils | 36 | 2023-08-10T17:26:17.084486 | GPL-3.0 | false | f1f71d995967b0da83b1627b025b4126 |
\n\n | .venv\Lib\site-packages\pandas\io\__pycache__\__init__.cpython-313.pyc | __init__.cpython-313.pyc | Other | 359 | 0.7 | 0 | 0 | awesome-app | 358 | 2023-10-22T04:19:16.089186 | Apache-2.0 | false | 8eca30276a9aff891b8678f4cdc39daf |
from __future__ import annotations\n\nimport importlib\nfrom typing import (\n TYPE_CHECKING,\n Callable,\n Literal,\n)\n\nfrom pandas._config import get_option\n\nfrom pandas.util._decorators import (\n Appender,\n Substitution,\n)\n\nfrom pandas.core.dtypes.common import (\n is_integer,\n is_list_like,\n)\nfrom pandas.core.dtypes.generic import (\n ABCDataFrame,\n ABCSeries,\n)\n\nfrom pandas.core.base import PandasObject\n\nif TYPE_CHECKING:\n from collections.abc import (\n Hashable,\n Sequence,\n )\n import types\n\n from matplotlib.axes import Axes\n import numpy as np\n\n from pandas._typing import IndexLabel\n\n from pandas import (\n DataFrame,\n Series,\n )\n from pandas.core.groupby.generic import DataFrameGroupBy\n\n\ndef hist_series(\n self: Series,\n by=None,\n ax=None,\n grid: bool = True,\n xlabelsize: int | None = None,\n xrot: float | None = None,\n ylabelsize: int | None = None,\n yrot: float | None = None,\n figsize: tuple[int, int] | None = None,\n bins: int | Sequence[int] = 10,\n backend: str | None = None,\n legend: bool = False,\n **kwargs,\n):\n """\n Draw histogram of the input series using matplotlib.\n\n Parameters\n ----------\n by : object, optional\n If passed, then used to form histograms for separate groups.\n ax : matplotlib axis object\n If not passed, uses gca().\n grid : bool, default True\n Whether to show axis grid lines.\n xlabelsize : int, default None\n If specified changes the x-axis label size.\n xrot : float, default None\n Rotation of x axis labels.\n ylabelsize : int, default None\n If specified changes the y-axis label size.\n yrot : float, default None\n Rotation of y axis labels.\n figsize : tuple, default None\n Figure size in inches by default.\n bins : int or sequence, default 10\n Number of histogram bins to be used. If an integer is given, bins + 1\n bin edges are calculated and returned. If bins is a sequence, gives\n bin edges, including left edge of first bin and right edge of last\n bin. In this case, bins is returned unmodified.\n backend : str, default None\n Backend to use instead of the backend specified in the option\n ``plotting.backend``. For instance, 'matplotlib'. Alternatively, to\n specify the ``plotting.backend`` for the whole session, set\n ``pd.options.plotting.backend``.\n legend : bool, default False\n Whether to show the legend.\n\n **kwargs\n To be passed to the actual plotting function.\n\n Returns\n -------\n matplotlib.AxesSubplot\n A histogram plot.\n\n See Also\n --------\n matplotlib.axes.Axes.hist : Plot a histogram using matplotlib.\n\n Examples\n --------\n For Series:\n\n .. plot::\n :context: close-figs\n\n >>> lst = ['a', 'a', 'a', 'b', 'b', 'b']\n >>> ser = pd.Series([1, 2, 2, 4, 6, 6], index=lst)\n >>> hist = ser.hist()\n\n For Groupby:\n\n .. plot::\n :context: close-figs\n\n >>> lst = ['a', 'a', 'a', 'b', 'b', 'b']\n >>> ser = pd.Series([1, 2, 2, 4, 6, 6], index=lst)\n >>> hist = ser.groupby(level=0).hist()\n """\n plot_backend = _get_plot_backend(backend)\n return plot_backend.hist_series(\n self,\n by=by,\n ax=ax,\n grid=grid,\n xlabelsize=xlabelsize,\n xrot=xrot,\n ylabelsize=ylabelsize,\n yrot=yrot,\n figsize=figsize,\n bins=bins,\n legend=legend,\n **kwargs,\n )\n\n\ndef hist_frame(\n data: DataFrame,\n column: IndexLabel | None = None,\n by=None,\n grid: bool = True,\n xlabelsize: int | None = None,\n xrot: float | None = None,\n ylabelsize: int | None = None,\n yrot: float | None = None,\n ax=None,\n sharex: bool = False,\n sharey: bool = False,\n figsize: tuple[int, int] | None = None,\n layout: tuple[int, int] | None = None,\n bins: int | Sequence[int] = 10,\n backend: str | None = None,\n legend: bool = False,\n **kwargs,\n):\n """\n Make a histogram of the DataFrame's columns.\n\n A `histogram`_ is a representation of the distribution of data.\n This function calls :meth:`matplotlib.pyplot.hist`, on each series in\n the DataFrame, resulting in one histogram per column.\n\n .. _histogram: https://en.wikipedia.org/wiki/Histogram\n\n Parameters\n ----------\n data : DataFrame\n The pandas object holding the data.\n column : str or sequence, optional\n If passed, will be used to limit data to a subset of columns.\n by : object, optional\n If passed, then used to form histograms for separate groups.\n grid : bool, default True\n Whether to show axis grid lines.\n xlabelsize : int, default None\n If specified changes the x-axis label size.\n xrot : float, default None\n Rotation of x axis labels. For example, a value of 90 displays the\n x labels rotated 90 degrees clockwise.\n ylabelsize : int, default None\n If specified changes the y-axis label size.\n yrot : float, default None\n Rotation of y axis labels. For example, a value of 90 displays the\n y labels rotated 90 degrees clockwise.\n ax : Matplotlib axes object, default None\n The axes to plot the histogram on.\n sharex : bool, default True if ax is None else False\n In case subplots=True, share x axis and set some x axis labels to\n invisible; defaults to True if ax is None otherwise False if an ax\n is passed in.\n Note that passing in both an ax and sharex=True will alter all x axis\n labels for all subplots in a figure.\n sharey : bool, default False\n In case subplots=True, share y axis and set some y axis labels to\n invisible.\n figsize : tuple, optional\n The size in inches of the figure to create. Uses the value in\n `matplotlib.rcParams` by default.\n layout : tuple, optional\n Tuple of (rows, columns) for the layout of the histograms.\n bins : int or sequence, default 10\n Number of histogram bins to be used. If an integer is given, bins + 1\n bin edges are calculated and returned. If bins is a sequence, gives\n bin edges, including left edge of first bin and right edge of last\n bin. In this case, bins is returned unmodified.\n\n backend : str, default None\n Backend to use instead of the backend specified in the option\n ``plotting.backend``. For instance, 'matplotlib'. Alternatively, to\n specify the ``plotting.backend`` for the whole session, set\n ``pd.options.plotting.backend``.\n\n legend : bool, default False\n Whether to show the legend.\n\n **kwargs\n All other plotting keyword arguments to be passed to\n :meth:`matplotlib.pyplot.hist`.\n\n Returns\n -------\n matplotlib.AxesSubplot or numpy.ndarray of them\n\n See Also\n --------\n matplotlib.pyplot.hist : Plot a histogram using matplotlib.\n\n Examples\n --------\n This example draws a histogram based on the length and width of\n some animals, displayed in three bins\n\n .. plot::\n :context: close-figs\n\n >>> data = {'length': [1.5, 0.5, 1.2, 0.9, 3],\n ... 'width': [0.7, 0.2, 0.15, 0.2, 1.1]}\n >>> index = ['pig', 'rabbit', 'duck', 'chicken', 'horse']\n >>> df = pd.DataFrame(data, index=index)\n >>> hist = df.hist(bins=3)\n """\n plot_backend = _get_plot_backend(backend)\n return plot_backend.hist_frame(\n data,\n column=column,\n by=by,\n grid=grid,\n xlabelsize=xlabelsize,\n xrot=xrot,\n ylabelsize=ylabelsize,\n yrot=yrot,\n ax=ax,\n sharex=sharex,\n sharey=sharey,\n figsize=figsize,\n layout=layout,\n legend=legend,\n bins=bins,\n **kwargs,\n )\n\n\n_boxplot_doc = """\nMake a box plot from DataFrame columns.\n\nMake a box-and-whisker plot from DataFrame columns, optionally grouped\nby some other columns. A box plot is a method for graphically depicting\ngroups of numerical data through their quartiles.\nThe box extends from the Q1 to Q3 quartile values of the data,\nwith a line at the median (Q2). The whiskers extend from the edges\nof box to show the range of the data. By default, they extend no more than\n`1.5 * IQR (IQR = Q3 - Q1)` from the edges of the box, ending at the farthest\ndata point within that interval. Outliers are plotted as separate dots.\n\nFor further details see\nWikipedia's entry for `boxplot <https://en.wikipedia.org/wiki/Box_plot>`_.\n\nParameters\n----------\n%(data)s\\ncolumn : str or list of str, optional\n Column name or list of names, or vector.\n Can be any valid input to :meth:`pandas.DataFrame.groupby`.\nby : str or array-like, optional\n Column in the DataFrame to :meth:`pandas.DataFrame.groupby`.\n One box-plot will be done per value of columns in `by`.\nax : object of class matplotlib.axes.Axes, optional\n The matplotlib axes to be used by boxplot.\nfontsize : float or str\n Tick label font size in points or as a string (e.g., `large`).\nrot : float, default 0\n The rotation angle of labels (in degrees)\n with respect to the screen coordinate system.\ngrid : bool, default True\n Setting this to True will show the grid.\nfigsize : A tuple (width, height) in inches\n The size of the figure to create in matplotlib.\nlayout : tuple (rows, columns), optional\n For example, (3, 5) will display the subplots\n using 3 rows and 5 columns, starting from the top-left.\nreturn_type : {'axes', 'dict', 'both'} or None, default 'axes'\n The kind of object to return. The default is ``axes``.\n\n * 'axes' returns the matplotlib axes the boxplot is drawn on.\n * 'dict' returns a dictionary whose values are the matplotlib\n Lines of the boxplot.\n * 'both' returns a namedtuple with the axes and dict.\n * when grouping with ``by``, a Series mapping columns to\n ``return_type`` is returned.\n\n If ``return_type`` is `None`, a NumPy array\n of axes with the same shape as ``layout`` is returned.\n%(backend)s\\n\n**kwargs\n All other plotting keyword arguments to be passed to\n :func:`matplotlib.pyplot.boxplot`.\n\nReturns\n-------\nresult\n See Notes.\n\nSee Also\n--------\npandas.Series.plot.hist: Make a histogram.\nmatplotlib.pyplot.boxplot : Matplotlib equivalent plot.\n\nNotes\n-----\nThe return type depends on the `return_type` parameter:\n\n* 'axes' : object of class matplotlib.axes.Axes\n* 'dict' : dict of matplotlib.lines.Line2D objects\n* 'both' : a namedtuple with structure (ax, lines)\n\nFor data grouped with ``by``, return a Series of the above or a numpy\narray:\n\n* :class:`~pandas.Series`\n* :class:`~numpy.array` (for ``return_type = None``)\n\nUse ``return_type='dict'`` when you want to tweak the appearance\nof the lines after plotting. In this case a dict containing the Lines\nmaking up the boxes, caps, fliers, medians, and whiskers is returned.\n\nExamples\n--------\n\nBoxplots can be created for every column in the dataframe\nby ``df.boxplot()`` or indicating the columns to be used:\n\n.. plot::\n :context: close-figs\n\n >>> np.random.seed(1234)\n >>> df = pd.DataFrame(np.random.randn(10, 4),\n ... columns=['Col1', 'Col2', 'Col3', 'Col4'])\n >>> boxplot = df.boxplot(column=['Col1', 'Col2', 'Col3']) # doctest: +SKIP\n\nBoxplots of variables distributions grouped by the values of a third\nvariable can be created using the option ``by``. For instance:\n\n.. plot::\n :context: close-figs\n\n >>> df = pd.DataFrame(np.random.randn(10, 2),\n ... columns=['Col1', 'Col2'])\n >>> df['X'] = pd.Series(['A', 'A', 'A', 'A', 'A',\n ... 'B', 'B', 'B', 'B', 'B'])\n >>> boxplot = df.boxplot(by='X')\n\nA list of strings (i.e. ``['X', 'Y']``) can be passed to boxplot\nin order to group the data by combination of the variables in the x-axis:\n\n.. plot::\n :context: close-figs\n\n >>> df = pd.DataFrame(np.random.randn(10, 3),\n ... columns=['Col1', 'Col2', 'Col3'])\n >>> df['X'] = pd.Series(['A', 'A', 'A', 'A', 'A',\n ... 'B', 'B', 'B', 'B', 'B'])\n >>> df['Y'] = pd.Series(['A', 'B', 'A', 'B', 'A',\n ... 'B', 'A', 'B', 'A', 'B'])\n >>> boxplot = df.boxplot(column=['Col1', 'Col2'], by=['X', 'Y'])\n\nThe layout of boxplot can be adjusted giving a tuple to ``layout``:\n\n.. plot::\n :context: close-figs\n\n >>> boxplot = df.boxplot(column=['Col1', 'Col2'], by='X',\n ... layout=(2, 1))\n\nAdditional formatting can be done to the boxplot, like suppressing the grid\n(``grid=False``), rotating the labels in the x-axis (i.e. ``rot=45``)\nor changing the fontsize (i.e. ``fontsize=15``):\n\n.. plot::\n :context: close-figs\n\n >>> boxplot = df.boxplot(grid=False, rot=45, fontsize=15) # doctest: +SKIP\n\nThe parameter ``return_type`` can be used to select the type of element\nreturned by `boxplot`. When ``return_type='axes'`` is selected,\nthe matplotlib axes on which the boxplot is drawn are returned:\n\n >>> boxplot = df.boxplot(column=['Col1', 'Col2'], return_type='axes')\n >>> type(boxplot)\n <class 'matplotlib.axes._axes.Axes'>\n\nWhen grouping with ``by``, a Series mapping columns to ``return_type``\nis returned:\n\n >>> boxplot = df.boxplot(column=['Col1', 'Col2'], by='X',\n ... return_type='axes')\n >>> type(boxplot)\n <class 'pandas.core.series.Series'>\n\nIf ``return_type`` is `None`, a NumPy array of axes with the same shape\nas ``layout`` is returned:\n\n >>> boxplot = df.boxplot(column=['Col1', 'Col2'], by='X',\n ... return_type=None)\n >>> type(boxplot)\n <class 'numpy.ndarray'>\n"""\n\n_backend_doc = """\\nbackend : str, default None\n Backend to use instead of the backend specified in the option\n ``plotting.backend``. For instance, 'matplotlib'. Alternatively, to\n specify the ``plotting.backend`` for the whole session, set\n ``pd.options.plotting.backend``.\n"""\n\n\n_bar_or_line_doc = """\n Parameters\n ----------\n x : label or position, optional\n Allows plotting of one column versus another. If not specified,\n the index of the DataFrame is used.\n y : label or position, optional\n Allows plotting of one column versus another. If not specified,\n all numerical columns are used.\n color : str, array-like, or dict, optional\n The color for each of the DataFrame's columns. Possible values are:\n\n - A single color string referred to by name, RGB or RGBA code,\n for instance 'red' or '#a98d19'.\n\n - A sequence of color strings referred to by name, RGB or RGBA\n code, which will be used for each column recursively. For\n instance ['green','yellow'] each column's %(kind)s will be filled in\n green or yellow, alternatively. If there is only a single column to\n be plotted, then only the first color from the color list will be\n used.\n\n - A dict of the form {column name : color}, so that each column will be\n colored accordingly. For example, if your columns are called `a` and\n `b`, then passing {'a': 'green', 'b': 'red'} will color %(kind)ss for\n column `a` in green and %(kind)ss for column `b` in red.\n\n **kwargs\n Additional keyword arguments are documented in\n :meth:`DataFrame.plot`.\n\n Returns\n -------\n matplotlib.axes.Axes or np.ndarray of them\n An ndarray is returned with one :class:`matplotlib.axes.Axes`\n per column when ``subplots=True``.\n"""\n\n\n@Substitution(data="data : DataFrame\n The data to visualize.\n", backend="")\n@Appender(_boxplot_doc)\ndef boxplot(\n data: DataFrame,\n column: str | list[str] | None = None,\n by: str | list[str] | None = None,\n ax: Axes | None = None,\n fontsize: float | str | None = None,\n rot: int = 0,\n grid: bool = True,\n figsize: tuple[float, float] | None = None,\n layout: tuple[int, int] | None = None,\n return_type: str | None = None,\n **kwargs,\n):\n plot_backend = _get_plot_backend("matplotlib")\n return plot_backend.boxplot(\n data,\n column=column,\n by=by,\n ax=ax,\n fontsize=fontsize,\n rot=rot,\n grid=grid,\n figsize=figsize,\n layout=layout,\n return_type=return_type,\n **kwargs,\n )\n\n\n@Substitution(data="", backend=_backend_doc)\n@Appender(_boxplot_doc)\ndef boxplot_frame(\n self: DataFrame,\n column=None,\n by=None,\n ax=None,\n fontsize: int | None = None,\n rot: int = 0,\n grid: bool = True,\n figsize: tuple[float, float] | None = None,\n layout=None,\n return_type=None,\n backend=None,\n **kwargs,\n):\n plot_backend = _get_plot_backend(backend)\n return plot_backend.boxplot_frame(\n self,\n column=column,\n by=by,\n ax=ax,\n fontsize=fontsize,\n rot=rot,\n grid=grid,\n figsize=figsize,\n layout=layout,\n return_type=return_type,\n **kwargs,\n )\n\n\ndef boxplot_frame_groupby(\n grouped: DataFrameGroupBy,\n subplots: bool = True,\n column=None,\n fontsize: int | None = None,\n rot: int = 0,\n grid: bool = True,\n ax=None,\n figsize: tuple[float, float] | None = None,\n layout=None,\n sharex: bool = False,\n sharey: bool = True,\n backend=None,\n **kwargs,\n):\n """\n Make box plots from DataFrameGroupBy data.\n\n Parameters\n ----------\n grouped : Grouped DataFrame\n subplots : bool\n * ``False`` - no subplots will be used\n * ``True`` - create a subplot for each group.\n\n column : column name or list of names, or vector\n Can be any valid input to groupby.\n fontsize : float or str\n rot : label rotation angle\n grid : Setting this to True will show the grid\n ax : Matplotlib axis object, default None\n figsize : A tuple (width, height) in inches\n layout : tuple (optional)\n The layout of the plot: (rows, columns).\n sharex : bool, default False\n Whether x-axes will be shared among subplots.\n sharey : bool, default True\n Whether y-axes will be shared among subplots.\n backend : str, default None\n Backend to use instead of the backend specified in the option\n ``plotting.backend``. For instance, 'matplotlib'. Alternatively, to\n specify the ``plotting.backend`` for the whole session, set\n ``pd.options.plotting.backend``.\n **kwargs\n All other plotting keyword arguments to be passed to\n matplotlib's boxplot function.\n\n Returns\n -------\n dict of key/value = group key/DataFrame.boxplot return value\n or DataFrame.boxplot return value in case subplots=figures=False\n\n Examples\n --------\n You can create boxplots for grouped data and show them as separate subplots:\n\n .. plot::\n :context: close-figs\n\n >>> import itertools\n >>> tuples = [t for t in itertools.product(range(1000), range(4))]\n >>> index = pd.MultiIndex.from_tuples(tuples, names=['lvl0', 'lvl1'])\n >>> data = np.random.randn(len(index), 4)\n >>> df = pd.DataFrame(data, columns=list('ABCD'), index=index)\n >>> grouped = df.groupby(level='lvl1')\n >>> grouped.boxplot(rot=45, fontsize=12, figsize=(8, 10)) # doctest: +SKIP\n\n The ``subplots=False`` option shows the boxplots in a single figure.\n\n .. plot::\n :context: close-figs\n\n >>> grouped.boxplot(subplots=False, rot=45, fontsize=12) # doctest: +SKIP\n """\n plot_backend = _get_plot_backend(backend)\n return plot_backend.boxplot_frame_groupby(\n grouped,\n subplots=subplots,\n column=column,\n fontsize=fontsize,\n rot=rot,\n grid=grid,\n ax=ax,\n figsize=figsize,\n layout=layout,\n sharex=sharex,\n sharey=sharey,\n **kwargs,\n )\n\n\nclass PlotAccessor(PandasObject):\n """\n Make plots of Series or DataFrame.\n\n Uses the backend specified by the\n option ``plotting.backend``. By default, matplotlib is used.\n\n Parameters\n ----------\n data : Series or DataFrame\n The object for which the method is called.\n x : label or position, default None\n Only used if data is a DataFrame.\n y : label, position or list of label, positions, default None\n Allows plotting of one column versus another. Only used if data is a\n DataFrame.\n kind : str\n The kind of plot to produce:\n\n - 'line' : line plot (default)\n - 'bar' : vertical bar plot\n - 'barh' : horizontal bar plot\n - 'hist' : histogram\n - 'box' : boxplot\n - 'kde' : Kernel Density Estimation plot\n - 'density' : same as 'kde'\n - 'area' : area plot\n - 'pie' : pie plot\n - 'scatter' : scatter plot (DataFrame only)\n - 'hexbin' : hexbin plot (DataFrame only)\n ax : matplotlib axes object, default None\n An axes of the current figure.\n subplots : bool or sequence of iterables, default False\n Whether to group columns into subplots:\n\n - ``False`` : No subplots will be used\n - ``True`` : Make separate subplots for each column.\n - sequence of iterables of column labels: Create a subplot for each\n group of columns. For example `[('a', 'c'), ('b', 'd')]` will\n create 2 subplots: one with columns 'a' and 'c', and one\n with columns 'b' and 'd'. Remaining columns that aren't specified\n will be plotted in additional subplots (one per column).\n\n .. versionadded:: 1.5.0\n\n sharex : bool, default True if ax is None else False\n In case ``subplots=True``, share x axis and set some x axis labels\n to invisible; defaults to True if ax is None otherwise False if\n an ax is passed in; Be aware, that passing in both an ax and\n ``sharex=True`` will alter all x axis labels for all axis in a figure.\n sharey : bool, default False\n In case ``subplots=True``, share y axis and set some y axis labels to invisible.\n layout : tuple, optional\n (rows, columns) for the layout of subplots.\n figsize : a tuple (width, height) in inches\n Size of a figure object.\n use_index : bool, default True\n Use index as ticks for x axis.\n title : str or list\n Title to use for the plot. If a string is passed, print the string\n at the top of the figure. If a list is passed and `subplots` is\n True, print each item in the list above the corresponding subplot.\n grid : bool, default None (matlab style default)\n Axis grid lines.\n legend : bool or {'reverse'}\n Place legend on axis subplots.\n style : list or dict\n The matplotlib line style per column.\n logx : bool or 'sym', default False\n Use log scaling or symlog scaling on x axis.\n\n logy : bool or 'sym' default False\n Use log scaling or symlog scaling on y axis.\n\n loglog : bool or 'sym', default False\n Use log scaling or symlog scaling on both x and y axes.\n\n xticks : sequence\n Values to use for the xticks.\n yticks : sequence\n Values to use for the yticks.\n xlim : 2-tuple/list\n Set the x limits of the current axes.\n ylim : 2-tuple/list\n Set the y limits of the current axes.\n xlabel : label, optional\n Name to use for the xlabel on x-axis. Default uses index name as xlabel, or the\n x-column name for planar plots.\n\n .. versionchanged:: 2.0.0\n\n Now applicable to histograms.\n\n ylabel : label, optional\n Name to use for the ylabel on y-axis. Default will show no ylabel, or the\n y-column name for planar plots.\n\n .. versionchanged:: 2.0.0\n\n Now applicable to histograms.\n\n rot : float, default None\n Rotation for ticks (xticks for vertical, yticks for horizontal\n plots).\n fontsize : float, default None\n Font size for xticks and yticks.\n colormap : str or matplotlib colormap object, default None\n Colormap to select colors from. If string, load colormap with that\n name from matplotlib.\n colorbar : bool, optional\n If True, plot colorbar (only relevant for 'scatter' and 'hexbin'\n plots).\n position : float\n Specify relative alignments for bar plot layout.\n From 0 (left/bottom-end) to 1 (right/top-end). Default is 0.5\n (center).\n table : bool, Series or DataFrame, default False\n If True, draw a table using the data in the DataFrame and the data\n will be transposed to meet matplotlib's default layout.\n If a Series or DataFrame is passed, use passed data to draw a\n table.\n yerr : DataFrame, Series, array-like, dict and str\n See :ref:`Plotting with Error Bars <visualization.errorbars>` for\n detail.\n xerr : DataFrame, Series, array-like, dict and str\n Equivalent to yerr.\n stacked : bool, default False in line and bar plots, and True in area plot\n If True, create stacked plot.\n secondary_y : bool or sequence, default False\n Whether to plot on the secondary y-axis if a list/tuple, which\n columns to plot on secondary y-axis.\n mark_right : bool, default True\n When using a secondary_y axis, automatically mark the column\n labels with "(right)" in the legend.\n include_bool : bool, default is False\n If True, boolean values can be plotted.\n backend : str, default None\n Backend to use instead of the backend specified in the option\n ``plotting.backend``. For instance, 'matplotlib'. Alternatively, to\n specify the ``plotting.backend`` for the whole session, set\n ``pd.options.plotting.backend``.\n **kwargs\n Options to pass to matplotlib plotting method.\n\n Returns\n -------\n :class:`matplotlib.axes.Axes` or numpy.ndarray of them\n If the backend is not the default matplotlib one, the return value\n will be the object returned by the backend.\n\n Notes\n -----\n - See matplotlib documentation online for more on this subject\n - If `kind` = 'bar' or 'barh', you can specify relative alignments\n for bar plot layout by `position` keyword.\n From 0 (left/bottom-end) to 1 (right/top-end). Default is 0.5\n (center)\n\n Examples\n --------\n For Series:\n\n .. plot::\n :context: close-figs\n\n >>> ser = pd.Series([1, 2, 3, 3])\n >>> plot = ser.plot(kind='hist', title="My plot")\n\n For DataFrame:\n\n .. plot::\n :context: close-figs\n\n >>> df = pd.DataFrame({'length': [1.5, 0.5, 1.2, 0.9, 3],\n ... 'width': [0.7, 0.2, 0.15, 0.2, 1.1]},\n ... index=['pig', 'rabbit', 'duck', 'chicken', 'horse'])\n >>> plot = df.plot(title="DataFrame Plot")\n\n For SeriesGroupBy:\n\n .. plot::\n :context: close-figs\n\n >>> lst = [-1, -2, -3, 1, 2, 3]\n >>> ser = pd.Series([1, 2, 2, 4, 6, 6], index=lst)\n >>> plot = ser.groupby(lambda x: x > 0).plot(title="SeriesGroupBy Plot")\n\n For DataFrameGroupBy:\n\n .. plot::\n :context: close-figs\n\n >>> df = pd.DataFrame({"col1" : [1, 2, 3, 4],\n ... "col2" : ["A", "B", "A", "B"]})\n >>> plot = df.groupby("col2").plot(kind="bar", title="DataFrameGroupBy Plot")\n """\n\n _common_kinds = ("line", "bar", "barh", "kde", "density", "area", "hist", "box")\n _series_kinds = ("pie",)\n _dataframe_kinds = ("scatter", "hexbin")\n _kind_aliases = {"density": "kde"}\n _all_kinds = _common_kinds + _series_kinds + _dataframe_kinds\n\n def __init__(self, data: Series | DataFrame) -> None:\n self._parent = data\n\n @staticmethod\n def _get_call_args(backend_name: str, data: Series | DataFrame, args, kwargs):\n """\n This function makes calls to this accessor `__call__` method compatible\n with the previous `SeriesPlotMethods.__call__` and\n `DataFramePlotMethods.__call__`. Those had slightly different\n signatures, since `DataFramePlotMethods` accepted `x` and `y`\n parameters.\n """\n if isinstance(data, ABCSeries):\n arg_def = [\n ("kind", "line"),\n ("ax", None),\n ("figsize", None),\n ("use_index", True),\n ("title", None),\n ("grid", None),\n ("legend", False),\n ("style", None),\n ("logx", False),\n ("logy", False),\n ("loglog", False),\n ("xticks", None),\n ("yticks", None),\n ("xlim", None),\n ("ylim", None),\n ("rot", None),\n ("fontsize", None),\n ("colormap", None),\n ("table", False),\n ("yerr", None),\n ("xerr", None),\n ("label", None),\n ("secondary_y", False),\n ("xlabel", None),\n ("ylabel", None),\n ]\n elif isinstance(data, ABCDataFrame):\n arg_def = [\n ("x", None),\n ("y", None),\n ("kind", "line"),\n ("ax", None),\n ("subplots", False),\n ("sharex", None),\n ("sharey", False),\n ("layout", None),\n ("figsize", None),\n ("use_index", True),\n ("title", None),\n ("grid", None),\n ("legend", True),\n ("style", None),\n ("logx", False),\n ("logy", False),\n ("loglog", False),\n ("xticks", None),\n ("yticks", None),\n ("xlim", None),\n ("ylim", None),\n ("rot", None),\n ("fontsize", None),\n ("colormap", None),\n ("table", False),\n ("yerr", None),\n ("xerr", None),\n ("secondary_y", False),\n ("xlabel", None),\n ("ylabel", None),\n ]\n else:\n raise TypeError(\n f"Called plot accessor for type {type(data).__name__}, "\n "expected Series or DataFrame"\n )\n\n if args and isinstance(data, ABCSeries):\n positional_args = str(args)[1:-1]\n keyword_args = ", ".join(\n [f"{name}={repr(value)}" for (name, _), value in zip(arg_def, args)]\n )\n msg = (\n "`Series.plot()` should not be called with positional "\n "arguments, only keyword arguments. The order of "\n "positional arguments will change in the future. "\n f"Use `Series.plot({keyword_args})` instead of "\n f"`Series.plot({positional_args})`."\n )\n raise TypeError(msg)\n\n pos_args = {name: value for (name, _), value in zip(arg_def, args)}\n if backend_name == "pandas.plotting._matplotlib":\n kwargs = dict(arg_def, **pos_args, **kwargs)\n else:\n kwargs = dict(pos_args, **kwargs)\n\n x = kwargs.pop("x", None)\n y = kwargs.pop("y", None)\n kind = kwargs.pop("kind", "line")\n return x, y, kind, kwargs\n\n def __call__(self, *args, **kwargs):\n plot_backend = _get_plot_backend(kwargs.pop("backend", None))\n\n x, y, kind, kwargs = self._get_call_args(\n plot_backend.__name__, self._parent, args, kwargs\n )\n\n kind = self._kind_aliases.get(kind, kind)\n\n # when using another backend, get out of the way\n if plot_backend.__name__ != "pandas.plotting._matplotlib":\n return plot_backend.plot(self._parent, x=x, y=y, kind=kind, **kwargs)\n\n if kind not in self._all_kinds:\n raise ValueError(\n f"{kind} is not a valid plot kind "\n f"Valid plot kinds: {self._all_kinds}"\n )\n\n # The original data structured can be transformed before passed to the\n # backend. For example, for DataFrame is common to set the index as the\n # `x` parameter, and return a Series with the parameter `y` as values.\n data = self._parent.copy()\n\n if isinstance(data, ABCSeries):\n kwargs["reuse_plot"] = True\n\n if kind in self._dataframe_kinds:\n if isinstance(data, ABCDataFrame):\n return plot_backend.plot(data, x=x, y=y, kind=kind, **kwargs)\n else:\n raise ValueError(f"plot kind {kind} can only be used for data frames")\n elif kind in self._series_kinds:\n if isinstance(data, ABCDataFrame):\n if y is None and kwargs.get("subplots") is False:\n raise ValueError(\n f"{kind} requires either y column or 'subplots=True'"\n )\n if y is not None:\n if is_integer(y) and not data.columns._holds_integer():\n y = data.columns[y]\n # converted to series actually. copy to not modify\n data = data[y].copy()\n data.index.name = y\n elif isinstance(data, ABCDataFrame):\n data_cols = data.columns\n if x is not None:\n if is_integer(x) and not data.columns._holds_integer():\n x = data_cols[x]\n elif not isinstance(data[x], ABCSeries):\n raise ValueError("x must be a label or position")\n data = data.set_index(x)\n if y is not None:\n # check if we have y as int or list of ints\n int_ylist = is_list_like(y) and all(is_integer(c) for c in y)\n int_y_arg = is_integer(y) or int_ylist\n if int_y_arg and not data.columns._holds_integer():\n y = data_cols[y]\n\n label_kw = kwargs["label"] if "label" in kwargs else False\n for kw in ["xerr", "yerr"]:\n if kw in kwargs and (\n isinstance(kwargs[kw], str) or is_integer(kwargs[kw])\n ):\n try:\n kwargs[kw] = data[kwargs[kw]]\n except (IndexError, KeyError, TypeError):\n pass\n\n # don't overwrite\n data = data[y].copy()\n\n if isinstance(data, ABCSeries):\n label_name = label_kw or y\n data.name = label_name\n else:\n match = is_list_like(label_kw) and len(label_kw) == len(y)\n if label_kw and not match:\n raise ValueError(\n "label should be list-like and same length as y"\n )\n label_name = label_kw or data.columns\n data.columns = label_name\n\n return plot_backend.plot(data, kind=kind, **kwargs)\n\n __call__.__doc__ = __doc__\n\n @Appender(\n """\n See Also\n --------\n matplotlib.pyplot.plot : Plot y versus x as lines and/or markers.\n\n Examples\n --------\n\n .. plot::\n :context: close-figs\n\n >>> s = pd.Series([1, 3, 2])\n >>> s.plot.line() # doctest: +SKIP\n\n .. plot::\n :context: close-figs\n\n The following example shows the populations for some animals\n over the years.\n\n >>> df = pd.DataFrame({\n ... 'pig': [20, 18, 489, 675, 1776],\n ... 'horse': [4, 25, 281, 600, 1900]\n ... }, index=[1990, 1997, 2003, 2009, 2014])\n >>> lines = df.plot.line()\n\n .. plot::\n :context: close-figs\n\n An example with subplots, so an array of axes is returned.\n\n >>> axes = df.plot.line(subplots=True)\n >>> type(axes)\n <class 'numpy.ndarray'>\n\n .. plot::\n :context: close-figs\n\n Let's repeat the same example, but specifying colors for\n each column (in this case, for each animal).\n\n >>> axes = df.plot.line(\n ... subplots=True, color={"pig": "pink", "horse": "#742802"}\n ... )\n\n .. plot::\n :context: close-figs\n\n The following example shows the relationship between both\n populations.\n\n >>> lines = df.plot.line(x='pig', y='horse')\n """\n )\n @Substitution(kind="line")\n @Appender(_bar_or_line_doc)\n def line(\n self, x: Hashable | None = None, y: Hashable | None = None, **kwargs\n ) -> PlotAccessor:\n """\n Plot Series or DataFrame as lines.\n\n This function is useful to plot lines using DataFrame's values\n as coordinates.\n """\n return self(kind="line", x=x, y=y, **kwargs)\n\n @Appender(\n """\n See Also\n --------\n DataFrame.plot.barh : Horizontal bar plot.\n DataFrame.plot : Make plots of a DataFrame.\n matplotlib.pyplot.bar : Make a bar plot with matplotlib.\n\n Examples\n --------\n Basic plot.\n\n .. plot::\n :context: close-figs\n\n >>> df = pd.DataFrame({'lab':['A', 'B', 'C'], 'val':[10, 30, 20]})\n >>> ax = df.plot.bar(x='lab', y='val', rot=0)\n\n Plot a whole dataframe to a bar plot. Each column is assigned a\n distinct color, and each row is nested in a group along the\n horizontal axis.\n\n .. plot::\n :context: close-figs\n\n >>> speed = [0.1, 17.5, 40, 48, 52, 69, 88]\n >>> lifespan = [2, 8, 70, 1.5, 25, 12, 28]\n >>> index = ['snail', 'pig', 'elephant',\n ... 'rabbit', 'giraffe', 'coyote', 'horse']\n >>> df = pd.DataFrame({'speed': speed,\n ... 'lifespan': lifespan}, index=index)\n >>> ax = df.plot.bar(rot=0)\n\n Plot stacked bar charts for the DataFrame\n\n .. plot::\n :context: close-figs\n\n >>> ax = df.plot.bar(stacked=True)\n\n Instead of nesting, the figure can be split by column with\n ``subplots=True``. In this case, a :class:`numpy.ndarray` of\n :class:`matplotlib.axes.Axes` are returned.\n\n .. plot::\n :context: close-figs\n\n >>> axes = df.plot.bar(rot=0, subplots=True)\n >>> axes[1].legend(loc=2) # doctest: +SKIP\n\n If you don't like the default colours, you can specify how you'd\n like each column to be colored.\n\n .. plot::\n :context: close-figs\n\n >>> axes = df.plot.bar(\n ... rot=0, subplots=True, color={"speed": "red", "lifespan": "green"}\n ... )\n >>> axes[1].legend(loc=2) # doctest: +SKIP\n\n Plot a single column.\n\n .. plot::\n :context: close-figs\n\n >>> ax = df.plot.bar(y='speed', rot=0)\n\n Plot only selected categories for the DataFrame.\n\n .. plot::\n :context: close-figs\n\n >>> ax = df.plot.bar(x='lifespan', rot=0)\n """\n )\n @Substitution(kind="bar")\n @Appender(_bar_or_line_doc)\n def bar( # pylint: disable=disallowed-name\n self, x: Hashable | None = None, y: Hashable | None = None, **kwargs\n ) -> PlotAccessor:\n """\n Vertical bar plot.\n\n A bar plot is a plot that presents categorical data with\n rectangular bars with lengths proportional to the values that they\n represent. A bar plot shows comparisons among discrete categories. One\n axis of the plot shows the specific categories being compared, and the\n other axis represents a measured value.\n """\n return self(kind="bar", x=x, y=y, **kwargs)\n\n @Appender(\n """\n See Also\n --------\n DataFrame.plot.bar: Vertical bar plot.\n DataFrame.plot : Make plots of DataFrame using matplotlib.\n matplotlib.axes.Axes.bar : Plot a vertical bar plot using matplotlib.\n\n Examples\n --------\n Basic example\n\n .. plot::\n :context: close-figs\n\n >>> df = pd.DataFrame({'lab': ['A', 'B', 'C'], 'val': [10, 30, 20]})\n >>> ax = df.plot.barh(x='lab', y='val')\n\n Plot a whole DataFrame to a horizontal bar plot\n\n .. plot::\n :context: close-figs\n\n >>> speed = [0.1, 17.5, 40, 48, 52, 69, 88]\n >>> lifespan = [2, 8, 70, 1.5, 25, 12, 28]\n >>> index = ['snail', 'pig', 'elephant',\n ... 'rabbit', 'giraffe', 'coyote', 'horse']\n >>> df = pd.DataFrame({'speed': speed,\n ... 'lifespan': lifespan}, index=index)\n >>> ax = df.plot.barh()\n\n Plot stacked barh charts for the DataFrame\n\n .. plot::\n :context: close-figs\n\n >>> ax = df.plot.barh(stacked=True)\n\n We can specify colors for each column\n\n .. plot::\n :context: close-figs\n\n >>> ax = df.plot.barh(color={"speed": "red", "lifespan": "green"})\n\n Plot a column of the DataFrame to a horizontal bar plot\n\n .. plot::\n :context: close-figs\n\n >>> speed = [0.1, 17.5, 40, 48, 52, 69, 88]\n >>> lifespan = [2, 8, 70, 1.5, 25, 12, 28]\n >>> index = ['snail', 'pig', 'elephant',\n ... 'rabbit', 'giraffe', 'coyote', 'horse']\n >>> df = pd.DataFrame({'speed': speed,\n ... 'lifespan': lifespan}, index=index)\n >>> ax = df.plot.barh(y='speed')\n\n Plot DataFrame versus the desired column\n\n .. plot::\n :context: close-figs\n\n >>> speed = [0.1, 17.5, 40, 48, 52, 69, 88]\n >>> lifespan = [2, 8, 70, 1.5, 25, 12, 28]\n >>> index = ['snail', 'pig', 'elephant',\n ... 'rabbit', 'giraffe', 'coyote', 'horse']\n >>> df = pd.DataFrame({'speed': speed,\n ... 'lifespan': lifespan}, index=index)\n >>> ax = df.plot.barh(x='lifespan')\n """\n )\n @Substitution(kind="bar")\n @Appender(_bar_or_line_doc)\n def barh(\n self, x: Hashable | None = None, y: Hashable | None = None, **kwargs\n ) -> PlotAccessor:\n """\n Make a horizontal bar plot.\n\n A horizontal bar plot is a plot that presents quantitative data with\n rectangular bars with lengths proportional to the values that they\n represent. A bar plot shows comparisons among discrete categories. One\n axis of the plot shows the specific categories being compared, and the\n other axis represents a measured value.\n """\n return self(kind="barh", x=x, y=y, **kwargs)\n\n def box(self, by: IndexLabel | None = None, **kwargs) -> PlotAccessor:\n r"""\n Make a box plot of the DataFrame columns.\n\n A box plot is a method for graphically depicting groups of numerical\n data through their quartiles.\n The box extends from the Q1 to Q3 quartile values of the data,\n with a line at the median (Q2). The whiskers extend from the edges\n of box to show the range of the data. The position of the whiskers\n is set by default to 1.5*IQR (IQR = Q3 - Q1) from the edges of the\n box. Outlier points are those past the end of the whiskers.\n\n For further details see Wikipedia's\n entry for `boxplot <https://en.wikipedia.org/wiki/Box_plot>`__.\n\n A consideration when using this chart is that the box and the whiskers\n can overlap, which is very common when plotting small sets of data.\n\n Parameters\n ----------\n by : str or sequence\n Column in the DataFrame to group by.\n\n .. versionchanged:: 1.4.0\n\n Previously, `by` is silently ignore and makes no groupings\n\n **kwargs\n Additional keywords are documented in\n :meth:`DataFrame.plot`.\n\n Returns\n -------\n :class:`matplotlib.axes.Axes` or numpy.ndarray of them\n\n See Also\n --------\n DataFrame.boxplot: Another method to draw a box plot.\n Series.plot.box: Draw a box plot from a Series object.\n matplotlib.pyplot.boxplot: Draw a box plot in matplotlib.\n\n Examples\n --------\n Draw a box plot from a DataFrame with four columns of randomly\n generated data.\n\n .. plot::\n :context: close-figs\n\n >>> data = np.random.randn(25, 4)\n >>> df = pd.DataFrame(data, columns=list('ABCD'))\n >>> ax = df.plot.box()\n\n You can also generate groupings if you specify the `by` parameter (which\n can take a column name, or a list or tuple of column names):\n\n .. versionchanged:: 1.4.0\n\n .. plot::\n :context: close-figs\n\n >>> age_list = [8, 10, 12, 14, 72, 74, 76, 78, 20, 25, 30, 35, 60, 85]\n >>> df = pd.DataFrame({"gender": list("MMMMMMMMFFFFFF"), "age": age_list})\n >>> ax = df.plot.box(column="age", by="gender", figsize=(10, 8))\n """\n return self(kind="box", by=by, **kwargs)\n\n def hist(\n self, by: IndexLabel | None = None, bins: int = 10, **kwargs\n ) -> PlotAccessor:\n """\n Draw one histogram of the DataFrame's columns.\n\n A histogram is a representation of the distribution of data.\n This function groups the values of all given Series in the DataFrame\n into bins and draws all bins in one :class:`matplotlib.axes.Axes`.\n This is useful when the DataFrame's Series are in a similar scale.\n\n Parameters\n ----------\n by : str or sequence, optional\n Column in the DataFrame to group by.\n\n .. versionchanged:: 1.4.0\n\n Previously, `by` is silently ignore and makes no groupings\n\n bins : int, default 10\n Number of histogram bins to be used.\n **kwargs\n Additional keyword arguments are documented in\n :meth:`DataFrame.plot`.\n\n Returns\n -------\n class:`matplotlib.AxesSubplot`\n Return a histogram plot.\n\n See Also\n --------\n DataFrame.hist : Draw histograms per DataFrame's Series.\n Series.hist : Draw a histogram with Series' data.\n\n Examples\n --------\n When we roll a die 6000 times, we expect to get each value around 1000\n times. But when we roll two dice and sum the result, the distribution\n is going to be quite different. A histogram illustrates those\n distributions.\n\n .. plot::\n :context: close-figs\n\n >>> df = pd.DataFrame(np.random.randint(1, 7, 6000), columns=['one'])\n >>> df['two'] = df['one'] + np.random.randint(1, 7, 6000)\n >>> ax = df.plot.hist(bins=12, alpha=0.5)\n\n A grouped histogram can be generated by providing the parameter `by` (which\n can be a column name, or a list of column names):\n\n .. plot::\n :context: close-figs\n\n >>> age_list = [8, 10, 12, 14, 72, 74, 76, 78, 20, 25, 30, 35, 60, 85]\n >>> df = pd.DataFrame({"gender": list("MMMMMMMMFFFFFF"), "age": age_list})\n >>> ax = df.plot.hist(column=["age"], by="gender", figsize=(10, 8))\n """\n return self(kind="hist", by=by, bins=bins, **kwargs)\n\n def kde(\n self,\n bw_method: Literal["scott", "silverman"] | float | Callable | None = None,\n ind: np.ndarray | int | None = None,\n **kwargs,\n ) -> PlotAccessor:\n """\n Generate Kernel Density Estimate plot using Gaussian kernels.\n\n In statistics, `kernel density estimation`_ (KDE) is a non-parametric\n way to estimate the probability density function (PDF) of a random\n variable. This function uses Gaussian kernels and includes automatic\n bandwidth determination.\n\n .. _kernel density estimation:\n https://en.wikipedia.org/wiki/Kernel_density_estimation\n\n Parameters\n ----------\n bw_method : str, scalar or callable, optional\n The method used to calculate the estimator bandwidth. This can be\n 'scott', 'silverman', a scalar constant or a callable.\n If None (default), 'scott' is used.\n See :class:`scipy.stats.gaussian_kde` for more information.\n ind : NumPy array or int, optional\n Evaluation points for the estimated PDF. If None (default),\n 1000 equally spaced points are used. If `ind` is a NumPy array, the\n KDE is evaluated at the points passed. If `ind` is an integer,\n `ind` number of equally spaced points are used.\n **kwargs\n Additional keyword arguments are documented in\n :meth:`DataFrame.plot`.\n\n Returns\n -------\n matplotlib.axes.Axes or numpy.ndarray of them\n\n See Also\n --------\n scipy.stats.gaussian_kde : Representation of a kernel-density\n estimate using Gaussian kernels. This is the function used\n internally to estimate the PDF.\n\n Examples\n --------\n Given a Series of points randomly sampled from an unknown\n distribution, estimate its PDF using KDE with automatic\n bandwidth determination and plot the results, evaluating them at\n 1000 equally spaced points (default):\n\n .. plot::\n :context: close-figs\n\n >>> s = pd.Series([1, 2, 2.5, 3, 3.5, 4, 5])\n >>> ax = s.plot.kde()\n\n A scalar bandwidth can be specified. Using a small bandwidth value can\n lead to over-fitting, while using a large bandwidth value may result\n in under-fitting:\n\n .. plot::\n :context: close-figs\n\n >>> ax = s.plot.kde(bw_method=0.3)\n\n .. plot::\n :context: close-figs\n\n >>> ax = s.plot.kde(bw_method=3)\n\n Finally, the `ind` parameter determines the evaluation points for the\n plot of the estimated PDF:\n\n .. plot::\n :context: close-figs\n\n >>> ax = s.plot.kde(ind=[1, 2, 3, 4, 5])\n\n For DataFrame, it works in the same way:\n\n .. plot::\n :context: close-figs\n\n >>> df = pd.DataFrame({\n ... 'x': [1, 2, 2.5, 3, 3.5, 4, 5],\n ... 'y': [4, 4, 4.5, 5, 5.5, 6, 6],\n ... })\n >>> ax = df.plot.kde()\n\n A scalar bandwidth can be specified. Using a small bandwidth value can\n lead to over-fitting, while using a large bandwidth value may result\n in under-fitting:\n\n .. plot::\n :context: close-figs\n\n >>> ax = df.plot.kde(bw_method=0.3)\n\n .. plot::\n :context: close-figs\n\n >>> ax = df.plot.kde(bw_method=3)\n\n Finally, the `ind` parameter determines the evaluation points for the\n plot of the estimated PDF:\n\n .. plot::\n :context: close-figs\n\n >>> ax = df.plot.kde(ind=[1, 2, 3, 4, 5, 6])\n """\n return self(kind="kde", bw_method=bw_method, ind=ind, **kwargs)\n\n density = kde\n\n def area(\n self,\n x: Hashable | None = None,\n y: Hashable | None = None,\n stacked: bool = True,\n **kwargs,\n ) -> PlotAccessor:\n """\n Draw a stacked area plot.\n\n An area plot displays quantitative data visually.\n This function wraps the matplotlib area function.\n\n Parameters\n ----------\n x : label or position, optional\n Coordinates for the X axis. By default uses the index.\n y : label or position, optional\n Column to plot. By default uses all columns.\n stacked : bool, default True\n Area plots are stacked by default. Set to False to create a\n unstacked plot.\n **kwargs\n Additional keyword arguments are documented in\n :meth:`DataFrame.plot`.\n\n Returns\n -------\n matplotlib.axes.Axes or numpy.ndarray\n Area plot, or array of area plots if subplots is True.\n\n See Also\n --------\n DataFrame.plot : Make plots of DataFrame using matplotlib / pylab.\n\n Examples\n --------\n Draw an area plot based on basic business metrics:\n\n .. plot::\n :context: close-figs\n\n >>> df = pd.DataFrame({\n ... 'sales': [3, 2, 3, 9, 10, 6],\n ... 'signups': [5, 5, 6, 12, 14, 13],\n ... 'visits': [20, 42, 28, 62, 81, 50],\n ... }, index=pd.date_range(start='2018/01/01', end='2018/07/01',\n ... freq='ME'))\n >>> ax = df.plot.area()\n\n Area plots are stacked by default. To produce an unstacked plot,\n pass ``stacked=False``:\n\n .. plot::\n :context: close-figs\n\n >>> ax = df.plot.area(stacked=False)\n\n Draw an area plot for a single column:\n\n .. plot::\n :context: close-figs\n\n >>> ax = df.plot.area(y='sales')\n\n Draw with a different `x`:\n\n .. plot::\n :context: close-figs\n\n >>> df = pd.DataFrame({\n ... 'sales': [3, 2, 3],\n ... 'visits': [20, 42, 28],\n ... 'day': [1, 2, 3],\n ... })\n >>> ax = df.plot.area(x='day')\n """\n return self(kind="area", x=x, y=y, stacked=stacked, **kwargs)\n\n def pie(self, **kwargs) -> PlotAccessor:\n """\n Generate a pie plot.\n\n A pie plot is a proportional representation of the numerical data in a\n column. This function wraps :meth:`matplotlib.pyplot.pie` for the\n specified column. If no column reference is passed and\n ``subplots=True`` a pie plot is drawn for each numerical column\n independently.\n\n Parameters\n ----------\n y : int or label, optional\n Label or position of the column to plot.\n If not provided, ``subplots=True`` argument must be passed.\n **kwargs\n Keyword arguments to pass on to :meth:`DataFrame.plot`.\n\n Returns\n -------\n matplotlib.axes.Axes or np.ndarray of them\n A NumPy array is returned when `subplots` is True.\n\n See Also\n --------\n Series.plot.pie : Generate a pie plot for a Series.\n DataFrame.plot : Make plots of a DataFrame.\n\n Examples\n --------\n In the example below we have a DataFrame with the information about\n planet's mass and radius. We pass the 'mass' column to the\n pie function to get a pie plot.\n\n .. plot::\n :context: close-figs\n\n >>> df = pd.DataFrame({'mass': [0.330, 4.87 , 5.97],\n ... 'radius': [2439.7, 6051.8, 6378.1]},\n ... index=['Mercury', 'Venus', 'Earth'])\n >>> plot = df.plot.pie(y='mass', figsize=(5, 5))\n\n .. plot::\n :context: close-figs\n\n >>> plot = df.plot.pie(subplots=True, figsize=(11, 6))\n """\n if (\n isinstance(self._parent, ABCDataFrame)\n and kwargs.get("y", None) is None\n and not kwargs.get("subplots", False)\n ):\n raise ValueError("pie requires either y column or 'subplots=True'")\n return self(kind="pie", **kwargs)\n\n def scatter(\n self,\n x: Hashable,\n y: Hashable,\n s: Hashable | Sequence[Hashable] | None = None,\n c: Hashable | Sequence[Hashable] | None = None,\n **kwargs,\n ) -> PlotAccessor:\n """\n Create a scatter plot with varying marker point size and color.\n\n The coordinates of each point are defined by two dataframe columns and\n filled circles are used to represent each point. This kind of plot is\n useful to see complex correlations between two variables. Points could\n be for instance natural 2D coordinates like longitude and latitude in\n a map or, in general, any pair of metrics that can be plotted against\n each other.\n\n Parameters\n ----------\n x : int or str\n The column name or column position to be used as horizontal\n coordinates for each point.\n y : int or str\n The column name or column position to be used as vertical\n coordinates for each point.\n s : str, scalar or array-like, optional\n The size of each point. Possible values are:\n\n - A string with the name of the column to be used for marker's size.\n\n - A single scalar so all points have the same size.\n\n - A sequence of scalars, which will be used for each point's size\n recursively. For instance, when passing [2,14] all points size\n will be either 2 or 14, alternatively.\n\n c : str, int or array-like, optional\n The color of each point. Possible values are:\n\n - A single color string referred to by name, RGB or RGBA code,\n for instance 'red' or '#a98d19'.\n\n - A sequence of color strings referred to by name, RGB or RGBA\n code, which will be used for each point's color recursively. For\n instance ['green','yellow'] all points will be filled in green or\n yellow, alternatively.\n\n - A column name or position whose values will be used to color the\n marker points according to a colormap.\n\n **kwargs\n Keyword arguments to pass on to :meth:`DataFrame.plot`.\n\n Returns\n -------\n :class:`matplotlib.axes.Axes` or numpy.ndarray of them\n\n See Also\n --------\n matplotlib.pyplot.scatter : Scatter plot using multiple input data\n formats.\n\n Examples\n --------\n Let's see how to draw a scatter plot using coordinates from the values\n in a DataFrame's columns.\n\n .. plot::\n :context: close-figs\n\n >>> df = pd.DataFrame([[5.1, 3.5, 0], [4.9, 3.0, 0], [7.0, 3.2, 1],\n ... [6.4, 3.2, 1], [5.9, 3.0, 2]],\n ... columns=['length', 'width', 'species'])\n >>> ax1 = df.plot.scatter(x='length',\n ... y='width',\n ... c='DarkBlue')\n\n And now with the color determined by a column as well.\n\n .. plot::\n :context: close-figs\n\n >>> ax2 = df.plot.scatter(x='length',\n ... y='width',\n ... c='species',\n ... colormap='viridis')\n """\n return self(kind="scatter", x=x, y=y, s=s, c=c, **kwargs)\n\n def hexbin(\n self,\n x: Hashable,\n y: Hashable,\n C: Hashable | None = None,\n reduce_C_function: Callable | None = None,\n gridsize: int | tuple[int, int] | None = None,\n **kwargs,\n ) -> PlotAccessor:\n """\n Generate a hexagonal binning plot.\n\n Generate a hexagonal binning plot of `x` versus `y`. If `C` is `None`\n (the default), this is a histogram of the number of occurrences\n of the observations at ``(x[i], y[i])``.\n\n If `C` is specified, specifies values at given coordinates\n ``(x[i], y[i])``. These values are accumulated for each hexagonal\n bin and then reduced according to `reduce_C_function`,\n having as default the NumPy's mean function (:meth:`numpy.mean`).\n (If `C` is specified, it must also be a 1-D sequence\n of the same length as `x` and `y`, or a column label.)\n\n Parameters\n ----------\n x : int or str\n The column label or position for x points.\n y : int or str\n The column label or position for y points.\n C : int or str, optional\n The column label or position for the value of `(x, y)` point.\n reduce_C_function : callable, default `np.mean`\n Function of one argument that reduces all the values in a bin to\n a single number (e.g. `np.mean`, `np.max`, `np.sum`, `np.std`).\n gridsize : int or tuple of (int, int), default 100\n The number of hexagons in the x-direction.\n The corresponding number of hexagons in the y-direction is\n chosen in a way that the hexagons are approximately regular.\n Alternatively, gridsize can be a tuple with two elements\n specifying the number of hexagons in the x-direction and the\n y-direction.\n **kwargs\n Additional keyword arguments are documented in\n :meth:`DataFrame.plot`.\n\n Returns\n -------\n matplotlib.AxesSubplot\n The matplotlib ``Axes`` on which the hexbin is plotted.\n\n See Also\n --------\n DataFrame.plot : Make plots of a DataFrame.\n matplotlib.pyplot.hexbin : Hexagonal binning plot using matplotlib,\n the matplotlib function that is used under the hood.\n\n Examples\n --------\n The following examples are generated with random data from\n a normal distribution.\n\n .. plot::\n :context: close-figs\n\n >>> n = 10000\n >>> df = pd.DataFrame({'x': np.random.randn(n),\n ... 'y': np.random.randn(n)})\n >>> ax = df.plot.hexbin(x='x', y='y', gridsize=20)\n\n The next example uses `C` and `np.sum` as `reduce_C_function`.\n Note that `'observations'` values ranges from 1 to 5 but the result\n plot shows values up to more than 25. This is because of the\n `reduce_C_function`.\n\n .. plot::\n :context: close-figs\n\n >>> n = 500\n >>> df = pd.DataFrame({\n ... 'coord_x': np.random.uniform(-3, 3, size=n),\n ... 'coord_y': np.random.uniform(30, 50, size=n),\n ... 'observations': np.random.randint(1,5, size=n)\n ... })\n >>> ax = df.plot.hexbin(x='coord_x',\n ... y='coord_y',\n ... C='observations',\n ... reduce_C_function=np.sum,\n ... gridsize=10,\n ... cmap="viridis")\n """\n if reduce_C_function is not None:\n kwargs["reduce_C_function"] = reduce_C_function\n if gridsize is not None:\n kwargs["gridsize"] = gridsize\n\n return self(kind="hexbin", x=x, y=y, C=C, **kwargs)\n\n\n_backends: dict[str, types.ModuleType] = {}\n\n\ndef _load_backend(backend: str) -> types.ModuleType:\n """\n Load a pandas plotting backend.\n\n Parameters\n ----------\n backend : str\n The identifier for the backend. Either an entrypoint item registered\n with importlib.metadata, "matplotlib", or a module name.\n\n Returns\n -------\n types.ModuleType\n The imported backend.\n """\n from importlib.metadata import entry_points\n\n if backend == "matplotlib":\n # Because matplotlib is an optional dependency and first-party backend,\n # we need to attempt an import here to raise an ImportError if needed.\n try:\n module = importlib.import_module("pandas.plotting._matplotlib")\n except ImportError:\n raise ImportError(\n "matplotlib is required for plotting when the "\n 'default backend "matplotlib" is selected.'\n ) from None\n return module\n\n found_backend = False\n\n eps = entry_points()\n key = "pandas_plotting_backends"\n # entry_points lost dict API ~ PY 3.10\n # https://github.com/python/importlib_metadata/issues/298\n if hasattr(eps, "select"):\n entry = eps.select(group=key)\n else:\n # Argument 2 to "get" of "dict" has incompatible type "Tuple[]";\n # expected "EntryPoints" [arg-type]\n entry = eps.get(key, ()) # type: ignore[arg-type]\n for entry_point in entry:\n found_backend = entry_point.name == backend\n if found_backend:\n module = entry_point.load()\n break\n\n if not found_backend:\n # Fall back to unregistered, module name approach.\n try:\n module = importlib.import_module(backend)\n found_backend = True\n except ImportError:\n # We re-raise later on.\n pass\n\n if found_backend:\n if hasattr(module, "plot"):\n # Validate that the interface is implemented when the option is set,\n # rather than at plot time.\n return module\n\n raise ValueError(\n f"Could not find plotting backend '{backend}'. Ensure that you've "\n f"installed the package providing the '{backend}' entrypoint, or that "\n "the package has a top-level `.plot` method."\n )\n\n\ndef _get_plot_backend(backend: str | None = None):\n """\n Return the plotting backend to use (e.g. `pandas.plotting._matplotlib`).\n\n The plotting system of pandas uses matplotlib by default, but the idea here\n is that it can also work with other third-party backends. This function\n returns the module which provides a top-level `.plot` method that will\n actually do the plotting. The backend is specified from a string, which\n either comes from the keyword argument `backend`, or, if not specified, from\n the option `pandas.options.plotting.backend`. All the rest of the code in\n this file uses the backend specified there for the plotting.\n\n The backend is imported lazily, as matplotlib is a soft dependency, and\n pandas can be used without it being installed.\n\n Notes\n -----\n Modifies `_backends` with imported backend as a side effect.\n """\n backend_str: str = backend or get_option("plotting.backend")\n\n if backend_str in _backends:\n return _backends[backend_str]\n\n module = _load_backend(backend_str)\n _backends[backend_str] = module\n return module\n | .venv\Lib\site-packages\pandas\plotting\_core.py | _core.py | Python | 66,558 | 0.75 | 0.096608 | 0.034289 | python-kit | 612 | 2024-09-06T23:34:16.848457 | Apache-2.0 | false | e13ab3a48a27ac78585b9b6107ccfca1 |
from __future__ import annotations\n\nfrom contextlib import contextmanager\nfrom typing import (\n TYPE_CHECKING,\n Any,\n)\n\nfrom pandas.plotting._core import _get_plot_backend\n\nif TYPE_CHECKING:\n from collections.abc import (\n Generator,\n Mapping,\n )\n\n from matplotlib.axes import Axes\n from matplotlib.colors import Colormap\n from matplotlib.figure import Figure\n from matplotlib.table import Table\n import numpy as np\n\n from pandas import (\n DataFrame,\n Series,\n )\n\n\ndef table(ax: Axes, data: DataFrame | Series, **kwargs) -> Table:\n """\n Helper function to convert DataFrame and Series to matplotlib.table.\n\n Parameters\n ----------\n ax : Matplotlib axes object\n data : DataFrame or Series\n Data for table contents.\n **kwargs\n Keyword arguments to be passed to matplotlib.table.table.\n If `rowLabels` or `colLabels` is not specified, data index or column\n name will be used.\n\n Returns\n -------\n matplotlib table object\n\n Examples\n --------\n\n .. plot::\n :context: close-figs\n\n >>> import matplotlib.pyplot as plt\n >>> df = pd.DataFrame({'A': [1, 2], 'B': [3, 4]})\n >>> fix, ax = plt.subplots()\n >>> ax.axis('off')\n (0.0, 1.0, 0.0, 1.0)\n >>> table = pd.plotting.table(ax, df, loc='center',\n ... cellLoc='center', colWidths=list([.2, .2]))\n """\n plot_backend = _get_plot_backend("matplotlib")\n return plot_backend.table(\n ax=ax, data=data, rowLabels=None, colLabels=None, **kwargs\n )\n\n\ndef register() -> None:\n """\n Register pandas formatters and converters with matplotlib.\n\n This function modifies the global ``matplotlib.units.registry``\n dictionary. pandas adds custom converters for\n\n * pd.Timestamp\n * pd.Period\n * np.datetime64\n * datetime.datetime\n * datetime.date\n * datetime.time\n\n See Also\n --------\n deregister_matplotlib_converters : Remove pandas formatters and converters.\n\n Examples\n --------\n .. plot::\n :context: close-figs\n\n The following line is done automatically by pandas so\n the plot can be rendered:\n\n >>> pd.plotting.register_matplotlib_converters()\n\n >>> df = pd.DataFrame({'ts': pd.period_range('2020', periods=2, freq='M'),\n ... 'y': [1, 2]\n ... })\n >>> plot = df.plot.line(x='ts', y='y')\n\n Unsetting the register manually an error will be raised:\n\n >>> pd.set_option("plotting.matplotlib.register_converters",\n ... False) # doctest: +SKIP\n >>> df.plot.line(x='ts', y='y') # doctest: +SKIP\n Traceback (most recent call last):\n TypeError: float() argument must be a string or a real number, not 'Period'\n """\n plot_backend = _get_plot_backend("matplotlib")\n plot_backend.register()\n\n\ndef deregister() -> None:\n """\n Remove pandas formatters and converters.\n\n Removes the custom converters added by :func:`register`. This\n attempts to set the state of the registry back to the state before\n pandas registered its own units. Converters for pandas' own types like\n Timestamp and Period are removed completely. Converters for types\n pandas overwrites, like ``datetime.datetime``, are restored to their\n original value.\n\n See Also\n --------\n register_matplotlib_converters : Register pandas formatters and converters\n with matplotlib.\n\n Examples\n --------\n .. plot::\n :context: close-figs\n\n The following line is done automatically by pandas so\n the plot can be rendered:\n\n >>> pd.plotting.register_matplotlib_converters()\n\n >>> df = pd.DataFrame({'ts': pd.period_range('2020', periods=2, freq='M'),\n ... 'y': [1, 2]\n ... })\n >>> plot = df.plot.line(x='ts', y='y')\n\n Unsetting the register manually an error will be raised:\n\n >>> pd.set_option("plotting.matplotlib.register_converters",\n ... False) # doctest: +SKIP\n >>> df.plot.line(x='ts', y='y') # doctest: +SKIP\n Traceback (most recent call last):\n TypeError: float() argument must be a string or a real number, not 'Period'\n """\n plot_backend = _get_plot_backend("matplotlib")\n plot_backend.deregister()\n\n\ndef scatter_matrix(\n frame: DataFrame,\n alpha: float = 0.5,\n figsize: tuple[float, float] | None = None,\n ax: Axes | None = None,\n grid: bool = False,\n diagonal: str = "hist",\n marker: str = ".",\n density_kwds: Mapping[str, Any] | None = None,\n hist_kwds: Mapping[str, Any] | None = None,\n range_padding: float = 0.05,\n **kwargs,\n) -> np.ndarray:\n """\n Draw a matrix of scatter plots.\n\n Parameters\n ----------\n frame : DataFrame\n alpha : float, optional\n Amount of transparency applied.\n figsize : (float,float), optional\n A tuple (width, height) in inches.\n ax : Matplotlib axis object, optional\n grid : bool, optional\n Setting this to True will show the grid.\n diagonal : {'hist', 'kde'}\n Pick between 'kde' and 'hist' for either Kernel Density Estimation or\n Histogram plot in the diagonal.\n marker : str, optional\n Matplotlib marker type, default '.'.\n density_kwds : keywords\n Keyword arguments to be passed to kernel density estimate plot.\n hist_kwds : keywords\n Keyword arguments to be passed to hist function.\n range_padding : float, default 0.05\n Relative extension of axis range in x and y with respect to\n (x_max - x_min) or (y_max - y_min).\n **kwargs\n Keyword arguments to be passed to scatter function.\n\n Returns\n -------\n numpy.ndarray\n A matrix of scatter plots.\n\n Examples\n --------\n\n .. plot::\n :context: close-figs\n\n >>> df = pd.DataFrame(np.random.randn(1000, 4), columns=['A','B','C','D'])\n >>> pd.plotting.scatter_matrix(df, alpha=0.2)\n array([[<Axes: xlabel='A', ylabel='A'>, <Axes: xlabel='B', ylabel='A'>,\n <Axes: xlabel='C', ylabel='A'>, <Axes: xlabel='D', ylabel='A'>],\n [<Axes: xlabel='A', ylabel='B'>, <Axes: xlabel='B', ylabel='B'>,\n <Axes: xlabel='C', ylabel='B'>, <Axes: xlabel='D', ylabel='B'>],\n [<Axes: xlabel='A', ylabel='C'>, <Axes: xlabel='B', ylabel='C'>,\n <Axes: xlabel='C', ylabel='C'>, <Axes: xlabel='D', ylabel='C'>],\n [<Axes: xlabel='A', ylabel='D'>, <Axes: xlabel='B', ylabel='D'>,\n <Axes: xlabel='C', ylabel='D'>, <Axes: xlabel='D', ylabel='D'>]],\n dtype=object)\n """\n plot_backend = _get_plot_backend("matplotlib")\n return plot_backend.scatter_matrix(\n frame=frame,\n alpha=alpha,\n figsize=figsize,\n ax=ax,\n grid=grid,\n diagonal=diagonal,\n marker=marker,\n density_kwds=density_kwds,\n hist_kwds=hist_kwds,\n range_padding=range_padding,\n **kwargs,\n )\n\n\ndef radviz(\n frame: DataFrame,\n class_column: str,\n ax: Axes | None = None,\n color: list[str] | tuple[str, ...] | None = None,\n colormap: Colormap | str | None = None,\n **kwds,\n) -> Axes:\n """\n Plot a multidimensional dataset in 2D.\n\n Each Series in the DataFrame is represented as a evenly distributed\n slice on a circle. Each data point is rendered in the circle according to\n the value on each Series. Highly correlated `Series` in the `DataFrame`\n are placed closer on the unit circle.\n\n RadViz allow to project a N-dimensional data set into a 2D space where the\n influence of each dimension can be interpreted as a balance between the\n influence of all dimensions.\n\n More info available at the `original article\n <https://doi.org/10.1145/331770.331775>`_\n describing RadViz.\n\n Parameters\n ----------\n frame : `DataFrame`\n Object holding the data.\n class_column : str\n Column name containing the name of the data point category.\n ax : :class:`matplotlib.axes.Axes`, optional\n A plot instance to which to add the information.\n color : list[str] or tuple[str], optional\n Assign a color to each category. Example: ['blue', 'green'].\n colormap : str or :class:`matplotlib.colors.Colormap`, default None\n Colormap to select colors from. If string, load colormap with that\n name from matplotlib.\n **kwds\n Options to pass to matplotlib scatter plotting method.\n\n Returns\n -------\n :class:`matplotlib.axes.Axes`\n\n See Also\n --------\n pandas.plotting.andrews_curves : Plot clustering visualization.\n\n Examples\n --------\n\n .. plot::\n :context: close-figs\n\n >>> df = pd.DataFrame(\n ... {\n ... 'SepalLength': [6.5, 7.7, 5.1, 5.8, 7.6, 5.0, 5.4, 4.6, 6.7, 4.6],\n ... 'SepalWidth': [3.0, 3.8, 3.8, 2.7, 3.0, 2.3, 3.0, 3.2, 3.3, 3.6],\n ... 'PetalLength': [5.5, 6.7, 1.9, 5.1, 6.6, 3.3, 4.5, 1.4, 5.7, 1.0],\n ... 'PetalWidth': [1.8, 2.2, 0.4, 1.9, 2.1, 1.0, 1.5, 0.2, 2.1, 0.2],\n ... 'Category': [\n ... 'virginica',\n ... 'virginica',\n ... 'setosa',\n ... 'virginica',\n ... 'virginica',\n ... 'versicolor',\n ... 'versicolor',\n ... 'setosa',\n ... 'virginica',\n ... 'setosa'\n ... ]\n ... }\n ... )\n >>> pd.plotting.radviz(df, 'Category') # doctest: +SKIP\n """\n plot_backend = _get_plot_backend("matplotlib")\n return plot_backend.radviz(\n frame=frame,\n class_column=class_column,\n ax=ax,\n color=color,\n colormap=colormap,\n **kwds,\n )\n\n\ndef andrews_curves(\n frame: DataFrame,\n class_column: str,\n ax: Axes | None = None,\n samples: int = 200,\n color: list[str] | tuple[str, ...] | None = None,\n colormap: Colormap | str | None = None,\n **kwargs,\n) -> Axes:\n """\n Generate a matplotlib plot for visualizing clusters of multivariate data.\n\n Andrews curves have the functional form:\n\n .. math::\n f(t) = \\frac{x_1}{\\sqrt{2}} + x_2 \\sin(t) + x_3 \\cos(t) +\n x_4 \\sin(2t) + x_5 \\cos(2t) + \\cdots\n\n Where :math:`x` coefficients correspond to the values of each dimension\n and :math:`t` is linearly spaced between :math:`-\\pi` and :math:`+\\pi`.\n Each row of frame then corresponds to a single curve.\n\n Parameters\n ----------\n frame : DataFrame\n Data to be plotted, preferably normalized to (0.0, 1.0).\n class_column : label\n Name of the column containing class names.\n ax : axes object, default None\n Axes to use.\n samples : int\n Number of points to plot in each curve.\n color : str, list[str] or tuple[str], optional\n Colors to use for the different classes. Colors can be strings\n or 3-element floating point RGB values.\n colormap : str or matplotlib colormap object, default None\n Colormap to select colors from. If a string, load colormap with that\n name from matplotlib.\n **kwargs\n Options to pass to matplotlib plotting method.\n\n Returns\n -------\n :class:`matplotlib.axes.Axes`\n\n Examples\n --------\n\n .. plot::\n :context: close-figs\n\n >>> df = pd.read_csv(\n ... 'https://raw.githubusercontent.com/pandas-dev/'\n ... 'pandas/main/pandas/tests/io/data/csv/iris.csv'\n ... )\n >>> pd.plotting.andrews_curves(df, 'Name') # doctest: +SKIP\n """\n plot_backend = _get_plot_backend("matplotlib")\n return plot_backend.andrews_curves(\n frame=frame,\n class_column=class_column,\n ax=ax,\n samples=samples,\n color=color,\n colormap=colormap,\n **kwargs,\n )\n\n\ndef bootstrap_plot(\n series: Series,\n fig: Figure | None = None,\n size: int = 50,\n samples: int = 500,\n **kwds,\n) -> Figure:\n """\n Bootstrap plot on mean, median and mid-range statistics.\n\n The bootstrap plot is used to estimate the uncertainty of a statistic\n by relying on random sampling with replacement [1]_. This function will\n generate bootstrapping plots for mean, median and mid-range statistics\n for the given number of samples of the given size.\n\n .. [1] "Bootstrapping (statistics)" in \\n https://en.wikipedia.org/wiki/Bootstrapping_%28statistics%29\n\n Parameters\n ----------\n series : pandas.Series\n Series from where to get the samplings for the bootstrapping.\n fig : matplotlib.figure.Figure, default None\n If given, it will use the `fig` reference for plotting instead of\n creating a new one with default parameters.\n size : int, default 50\n Number of data points to consider during each sampling. It must be\n less than or equal to the length of the `series`.\n samples : int, default 500\n Number of times the bootstrap procedure is performed.\n **kwds\n Options to pass to matplotlib plotting method.\n\n Returns\n -------\n matplotlib.figure.Figure\n Matplotlib figure.\n\n See Also\n --------\n pandas.DataFrame.plot : Basic plotting for DataFrame objects.\n pandas.Series.plot : Basic plotting for Series objects.\n\n Examples\n --------\n This example draws a basic bootstrap plot for a Series.\n\n .. plot::\n :context: close-figs\n\n >>> s = pd.Series(np.random.uniform(size=100))\n >>> pd.plotting.bootstrap_plot(s) # doctest: +SKIP\n <Figure size 640x480 with 6 Axes>\n """\n plot_backend = _get_plot_backend("matplotlib")\n return plot_backend.bootstrap_plot(\n series=series, fig=fig, size=size, samples=samples, **kwds\n )\n\n\ndef parallel_coordinates(\n frame: DataFrame,\n class_column: str,\n cols: list[str] | None = None,\n ax: Axes | None = None,\n color: list[str] | tuple[str, ...] | None = None,\n use_columns: bool = False,\n xticks: list | tuple | None = None,\n colormap: Colormap | str | None = None,\n axvlines: bool = True,\n axvlines_kwds: Mapping[str, Any] | None = None,\n sort_labels: bool = False,\n **kwargs,\n) -> Axes:\n """\n Parallel coordinates plotting.\n\n Parameters\n ----------\n frame : DataFrame\n class_column : str\n Column name containing class names.\n cols : list, optional\n A list of column names to use.\n ax : matplotlib.axis, optional\n Matplotlib axis object.\n color : list or tuple, optional\n Colors to use for the different classes.\n use_columns : bool, optional\n If true, columns will be used as xticks.\n xticks : list or tuple, optional\n A list of values to use for xticks.\n colormap : str or matplotlib colormap, default None\n Colormap to use for line colors.\n axvlines : bool, optional\n If true, vertical lines will be added at each xtick.\n axvlines_kwds : keywords, optional\n Options to be passed to axvline method for vertical lines.\n sort_labels : bool, default False\n Sort class_column labels, useful when assigning colors.\n **kwargs\n Options to pass to matplotlib plotting method.\n\n Returns\n -------\n matplotlib.axes.Axes\n\n Examples\n --------\n\n .. plot::\n :context: close-figs\n\n >>> df = pd.read_csv(\n ... 'https://raw.githubusercontent.com/pandas-dev/'\n ... 'pandas/main/pandas/tests/io/data/csv/iris.csv'\n ... )\n >>> pd.plotting.parallel_coordinates(\n ... df, 'Name', color=('#556270', '#4ECDC4', '#C7F464')\n ... ) # doctest: +SKIP\n """\n plot_backend = _get_plot_backend("matplotlib")\n return plot_backend.parallel_coordinates(\n frame=frame,\n class_column=class_column,\n cols=cols,\n ax=ax,\n color=color,\n use_columns=use_columns,\n xticks=xticks,\n colormap=colormap,\n axvlines=axvlines,\n axvlines_kwds=axvlines_kwds,\n sort_labels=sort_labels,\n **kwargs,\n )\n\n\ndef lag_plot(series: Series, lag: int = 1, ax: Axes | None = None, **kwds) -> Axes:\n """\n Lag plot for time series.\n\n Parameters\n ----------\n series : Series\n The time series to visualize.\n lag : int, default 1\n Lag length of the scatter plot.\n ax : Matplotlib axis object, optional\n The matplotlib axis object to use.\n **kwds\n Matplotlib scatter method keyword arguments.\n\n Returns\n -------\n matplotlib.axes.Axes\n\n Examples\n --------\n Lag plots are most commonly used to look for patterns in time series data.\n\n Given the following time series\n\n .. plot::\n :context: close-figs\n\n >>> np.random.seed(5)\n >>> x = np.cumsum(np.random.normal(loc=1, scale=5, size=50))\n >>> s = pd.Series(x)\n >>> s.plot() # doctest: +SKIP\n\n A lag plot with ``lag=1`` returns\n\n .. plot::\n :context: close-figs\n\n >>> pd.plotting.lag_plot(s, lag=1)\n <Axes: xlabel='y(t)', ylabel='y(t + 1)'>\n """\n plot_backend = _get_plot_backend("matplotlib")\n return plot_backend.lag_plot(series=series, lag=lag, ax=ax, **kwds)\n\n\ndef autocorrelation_plot(series: Series, ax: Axes | None = None, **kwargs) -> Axes:\n """\n Autocorrelation plot for time series.\n\n Parameters\n ----------\n series : Series\n The time series to visualize.\n ax : Matplotlib axis object, optional\n The matplotlib axis object to use.\n **kwargs\n Options to pass to matplotlib plotting method.\n\n Returns\n -------\n matplotlib.axes.Axes\n\n Examples\n --------\n The horizontal lines in the plot correspond to 95% and 99% confidence bands.\n\n The dashed line is 99% confidence band.\n\n .. plot::\n :context: close-figs\n\n >>> spacing = np.linspace(-9 * np.pi, 9 * np.pi, num=1000)\n >>> s = pd.Series(0.7 * np.random.rand(1000) + 0.3 * np.sin(spacing))\n >>> pd.plotting.autocorrelation_plot(s) # doctest: +SKIP\n """\n plot_backend = _get_plot_backend("matplotlib")\n return plot_backend.autocorrelation_plot(series=series, ax=ax, **kwargs)\n\n\nclass _Options(dict):\n """\n Stores pandas plotting options.\n\n Allows for parameter aliasing so you can just use parameter names that are\n the same as the plot function parameters, but is stored in a canonical\n format that makes it easy to breakdown into groups later.\n\n Examples\n --------\n\n .. plot::\n :context: close-figs\n\n >>> np.random.seed(42)\n >>> df = pd.DataFrame({'A': np.random.randn(10),\n ... 'B': np.random.randn(10)},\n ... index=pd.date_range("1/1/2000",\n ... freq='4MS', periods=10))\n >>> with pd.plotting.plot_params.use("x_compat", True):\n ... _ = df["A"].plot(color="r")\n ... _ = df["B"].plot(color="g")\n """\n\n # alias so the names are same as plotting method parameter names\n _ALIASES = {"x_compat": "xaxis.compat"}\n _DEFAULT_KEYS = ["xaxis.compat"]\n\n def __init__(self, deprecated: bool = False) -> None:\n self._deprecated = deprecated\n super().__setitem__("xaxis.compat", False)\n\n def __getitem__(self, key):\n key = self._get_canonical_key(key)\n if key not in self:\n raise ValueError(f"{key} is not a valid pandas plotting option")\n return super().__getitem__(key)\n\n def __setitem__(self, key, value) -> None:\n key = self._get_canonical_key(key)\n super().__setitem__(key, value)\n\n def __delitem__(self, key) -> None:\n key = self._get_canonical_key(key)\n if key in self._DEFAULT_KEYS:\n raise ValueError(f"Cannot remove default parameter {key}")\n super().__delitem__(key)\n\n def __contains__(self, key) -> bool:\n key = self._get_canonical_key(key)\n return super().__contains__(key)\n\n def reset(self) -> None:\n """\n Reset the option store to its initial state\n\n Returns\n -------\n None\n """\n # error: Cannot access "__init__" directly\n self.__init__() # type: ignore[misc]\n\n def _get_canonical_key(self, key):\n return self._ALIASES.get(key, key)\n\n @contextmanager\n def use(self, key, value) -> Generator[_Options, None, None]:\n """\n Temporarily set a parameter value using the with statement.\n Aliasing allowed.\n """\n old_value = self[key]\n try:\n self[key] = value\n yield self\n finally:\n self[key] = old_value\n\n\nplot_params = _Options()\n | .venv\Lib\site-packages\pandas\plotting\_misc.py | _misc.py | Python | 20,929 | 0.95 | 0.082849 | 0.04363 | react-lib | 21 | 2025-06-30T11:18:05.793348 | GPL-3.0 | false | 9dac1d75f10c5787c5fff069525075af |
"""\nPlotting public API.\n\nAuthors of third-party plotting backends should implement a module with a\npublic ``plot(data, kind, **kwargs)``. The parameter `data` will contain\nthe data structure and can be a `Series` or a `DataFrame`. For example,\nfor ``df.plot()`` the parameter `data` will contain the DataFrame `df`.\nIn some cases, the data structure is transformed before being sent to\nthe backend (see PlotAccessor.__call__ in pandas/plotting/_core.py for\nthe exact transformations).\n\nThe parameter `kind` will be one of:\n\n- line\n- bar\n- barh\n- box\n- hist\n- kde\n- area\n- pie\n- scatter\n- hexbin\n\nSee the pandas API reference for documentation on each kind of plot.\n\nAny other keyword argument is currently assumed to be backend specific,\nbut some parameters may be unified and added to the signature in the\nfuture (e.g. `title` which should be useful for any backend).\n\nCurrently, all the Matplotlib functions in pandas are accessed through\nthe selected backend. For example, `pandas.plotting.boxplot` (equivalent\nto `DataFrame.boxplot`) is also accessed in the selected backend. This\nis expected to change, and the exact API is under discussion. But with\nthe current version, backends are expected to implement the next functions:\n\n- plot (describe above, used for `Series.plot` and `DataFrame.plot`)\n- hist_series and hist_frame (for `Series.hist` and `DataFrame.hist`)\n- boxplot (`pandas.plotting.boxplot(df)` equivalent to `DataFrame.boxplot`)\n- boxplot_frame and boxplot_frame_groupby\n- register and deregister (register converters for the tick formats)\n- Plots not called as `Series` and `DataFrame` methods:\n - table\n - andrews_curves\n - autocorrelation_plot\n - bootstrap_plot\n - lag_plot\n - parallel_coordinates\n - radviz\n - scatter_matrix\n\nUse the code in pandas/plotting/_matplotib.py and\nhttps://github.com/pyviz/hvplot as a reference on how to write a backend.\n\nFor the discussion about the API see\nhttps://github.com/pandas-dev/pandas/issues/26747.\n"""\nfrom pandas.plotting._core import (\n PlotAccessor,\n boxplot,\n boxplot_frame,\n boxplot_frame_groupby,\n hist_frame,\n hist_series,\n)\nfrom pandas.plotting._misc import (\n andrews_curves,\n autocorrelation_plot,\n bootstrap_plot,\n deregister as deregister_matplotlib_converters,\n lag_plot,\n parallel_coordinates,\n plot_params,\n radviz,\n register as register_matplotlib_converters,\n scatter_matrix,\n table,\n)\n\n__all__ = [\n "PlotAccessor",\n "boxplot",\n "boxplot_frame",\n "boxplot_frame_groupby",\n "hist_frame",\n "hist_series",\n "scatter_matrix",\n "radviz",\n "andrews_curves",\n "bootstrap_plot",\n "parallel_coordinates",\n "lag_plot",\n "autocorrelation_plot",\n "table",\n "plot_params",\n "register_matplotlib_converters",\n "deregister_matplotlib_converters",\n]\n | .venv\Lib\site-packages\pandas\plotting\__init__.py | __init__.py | Python | 2,826 | 0.95 | 0.071429 | 0 | react-lib | 749 | 2024-06-29T22:26:12.055544 | Apache-2.0 | false | f723eb33dd74626ec4958506454c314f |
from __future__ import annotations\n\nfrom typing import (\n TYPE_CHECKING,\n Literal,\n NamedTuple,\n)\nimport warnings\n\nimport matplotlib as mpl\nfrom matplotlib.artist import setp\nimport numpy as np\n\nfrom pandas._libs import lib\nfrom pandas.util._decorators import cache_readonly\nfrom pandas.util._exceptions import find_stack_level\n\nfrom pandas.core.dtypes.common import is_dict_like\nfrom pandas.core.dtypes.generic import ABCSeries\nfrom pandas.core.dtypes.missing import remove_na_arraylike\n\nimport pandas as pd\nimport pandas.core.common as com\nfrom pandas.util.version import Version\n\nfrom pandas.io.formats.printing import pprint_thing\nfrom pandas.plotting._matplotlib.core import (\n LinePlot,\n MPLPlot,\n)\nfrom pandas.plotting._matplotlib.groupby import create_iter_data_given_by\nfrom pandas.plotting._matplotlib.style import get_standard_colors\nfrom pandas.plotting._matplotlib.tools import (\n create_subplots,\n flatten_axes,\n maybe_adjust_figure,\n)\n\nif TYPE_CHECKING:\n from collections.abc import Collection\n\n from matplotlib.axes import Axes\n from matplotlib.figure import Figure\n from matplotlib.lines import Line2D\n\n from pandas._typing import MatplotlibColor\n\n\ndef _set_ticklabels(ax: Axes, labels: list[str], is_vertical: bool, **kwargs) -> None:\n """Set the tick labels of a given axis.\n\n Due to https://github.com/matplotlib/matplotlib/pull/17266, we need to handle the\n case of repeated ticks (due to `FixedLocator`) and thus we duplicate the number of\n labels.\n """\n ticks = ax.get_xticks() if is_vertical else ax.get_yticks()\n if len(ticks) != len(labels):\n i, remainder = divmod(len(ticks), len(labels))\n if Version(mpl.__version__) < Version("3.10"):\n assert remainder == 0, remainder\n labels *= i\n if is_vertical:\n ax.set_xticklabels(labels, **kwargs)\n else:\n ax.set_yticklabels(labels, **kwargs)\n\n\nclass BoxPlot(LinePlot):\n @property\n def _kind(self) -> Literal["box"]:\n return "box"\n\n _layout_type = "horizontal"\n\n _valid_return_types = (None, "axes", "dict", "both")\n\n class BP(NamedTuple):\n # namedtuple to hold results\n ax: Axes\n lines: dict[str, list[Line2D]]\n\n def __init__(self, data, return_type: str = "axes", **kwargs) -> None:\n if return_type not in self._valid_return_types:\n raise ValueError("return_type must be {None, 'axes', 'dict', 'both'}")\n\n self.return_type = return_type\n # Do not call LinePlot.__init__ which may fill nan\n MPLPlot.__init__(self, data, **kwargs) # pylint: disable=non-parent-init-called\n\n if self.subplots:\n # Disable label ax sharing. Otherwise, all subplots shows last\n # column label\n if self.orientation == "vertical":\n self.sharex = False\n else:\n self.sharey = False\n\n # error: Signature of "_plot" incompatible with supertype "MPLPlot"\n @classmethod\n def _plot( # type: ignore[override]\n cls, ax: Axes, y: np.ndarray, column_num=None, return_type: str = "axes", **kwds\n ):\n ys: np.ndarray | list[np.ndarray]\n if y.ndim == 2:\n ys = [remove_na_arraylike(v) for v in y]\n # Boxplot fails with empty arrays, so need to add a NaN\n # if any cols are empty\n # GH 8181\n ys = [v if v.size > 0 else np.array([np.nan]) for v in ys]\n else:\n ys = remove_na_arraylike(y)\n bp = ax.boxplot(ys, **kwds)\n\n if return_type == "dict":\n return bp, bp\n elif return_type == "both":\n return cls.BP(ax=ax, lines=bp), bp\n else:\n return ax, bp\n\n def _validate_color_args(self, color, colormap):\n if color is lib.no_default:\n return None\n\n if colormap is not None:\n warnings.warn(\n "'color' and 'colormap' cannot be used "\n "simultaneously. Using 'color'",\n stacklevel=find_stack_level(),\n )\n\n if isinstance(color, dict):\n valid_keys = ["boxes", "whiskers", "medians", "caps"]\n for key in color:\n if key not in valid_keys:\n raise ValueError(\n f"color dict contains invalid key '{key}'. "\n f"The key must be either {valid_keys}"\n )\n return color\n\n @cache_readonly\n def _color_attrs(self):\n # get standard colors for default\n # use 2 colors by default, for box/whisker and median\n # flier colors isn't needed here\n # because it can be specified by ``sym`` kw\n return get_standard_colors(num_colors=3, colormap=self.colormap, color=None)\n\n @cache_readonly\n def _boxes_c(self):\n return self._color_attrs[0]\n\n @cache_readonly\n def _whiskers_c(self):\n return self._color_attrs[0]\n\n @cache_readonly\n def _medians_c(self):\n return self._color_attrs[2]\n\n @cache_readonly\n def _caps_c(self):\n return self._color_attrs[0]\n\n def _get_colors(\n self,\n num_colors=None,\n color_kwds: dict[str, MatplotlibColor]\n | MatplotlibColor\n | Collection[MatplotlibColor]\n | None = "color",\n ) -> None:\n pass\n\n def maybe_color_bp(self, bp) -> None:\n if isinstance(self.color, dict):\n boxes = self.color.get("boxes", self._boxes_c)\n whiskers = self.color.get("whiskers", self._whiskers_c)\n medians = self.color.get("medians", self._medians_c)\n caps = self.color.get("caps", self._caps_c)\n else:\n # Other types are forwarded to matplotlib\n # If None, use default colors\n boxes = self.color or self._boxes_c\n whiskers = self.color or self._whiskers_c\n medians = self.color or self._medians_c\n caps = self.color or self._caps_c\n\n color_tup = (boxes, whiskers, medians, caps)\n maybe_color_bp(bp, color_tup=color_tup, **self.kwds)\n\n def _make_plot(self, fig: Figure) -> None:\n if self.subplots:\n self._return_obj = pd.Series(dtype=object)\n\n # Re-create iterated data if `by` is assigned by users\n data = (\n create_iter_data_given_by(self.data, self._kind)\n if self.by is not None\n else self.data\n )\n\n # error: Argument "data" to "_iter_data" of "MPLPlot" has\n # incompatible type "object"; expected "DataFrame |\n # dict[Hashable, Series | DataFrame]"\n for i, (label, y) in enumerate(self._iter_data(data=data)): # type: ignore[arg-type]\n ax = self._get_ax(i)\n kwds = self.kwds.copy()\n\n # When by is applied, show title for subplots to know which group it is\n # just like df.boxplot, and need to apply T on y to provide right input\n if self.by is not None:\n y = y.T\n ax.set_title(pprint_thing(label))\n\n # When `by` is assigned, the ticklabels will become unique grouped\n # values, instead of label which is used as subtitle in this case.\n # error: "Index" has no attribute "levels"; maybe "nlevels"?\n levels = self.data.columns.levels # type: ignore[attr-defined]\n ticklabels = [pprint_thing(col) for col in levels[0]]\n else:\n ticklabels = [pprint_thing(label)]\n\n ret, bp = self._plot(\n ax, y, column_num=i, return_type=self.return_type, **kwds\n )\n self.maybe_color_bp(bp)\n self._return_obj[label] = ret\n _set_ticklabels(\n ax=ax, labels=ticklabels, is_vertical=self.orientation == "vertical"\n )\n else:\n y = self.data.values.T\n ax = self._get_ax(0)\n kwds = self.kwds.copy()\n\n ret, bp = self._plot(\n ax, y, column_num=0, return_type=self.return_type, **kwds\n )\n self.maybe_color_bp(bp)\n self._return_obj = ret\n\n labels = [pprint_thing(left) for left in self.data.columns]\n if not self.use_index:\n labels = [pprint_thing(key) for key in range(len(labels))]\n _set_ticklabels(\n ax=ax, labels=labels, is_vertical=self.orientation == "vertical"\n )\n\n def _make_legend(self) -> None:\n pass\n\n def _post_plot_logic(self, ax: Axes, data) -> None:\n # GH 45465: make sure that the boxplot doesn't ignore xlabel/ylabel\n if self.xlabel:\n ax.set_xlabel(pprint_thing(self.xlabel))\n if self.ylabel:\n ax.set_ylabel(pprint_thing(self.ylabel))\n\n @property\n def orientation(self) -> Literal["horizontal", "vertical"]:\n if self.kwds.get("vert", True):\n return "vertical"\n else:\n return "horizontal"\n\n @property\n def result(self):\n if self.return_type is None:\n return super().result\n else:\n return self._return_obj\n\n\ndef maybe_color_bp(bp, color_tup, **kwds) -> None:\n # GH#30346, when users specifying those arguments explicitly, our defaults\n # for these four kwargs should be overridden; if not, use Pandas settings\n if not kwds.get("boxprops"):\n setp(bp["boxes"], color=color_tup[0], alpha=1)\n if not kwds.get("whiskerprops"):\n setp(bp["whiskers"], color=color_tup[1], alpha=1)\n if not kwds.get("medianprops"):\n setp(bp["medians"], color=color_tup[2], alpha=1)\n if not kwds.get("capprops"):\n setp(bp["caps"], color=color_tup[3], alpha=1)\n\n\ndef _grouped_plot_by_column(\n plotf,\n data,\n columns=None,\n by=None,\n numeric_only: bool = True,\n grid: bool = False,\n figsize: tuple[float, float] | None = None,\n ax=None,\n layout=None,\n return_type=None,\n **kwargs,\n):\n grouped = data.groupby(by, observed=False)\n if columns is None:\n if not isinstance(by, (list, tuple)):\n by = [by]\n columns = data._get_numeric_data().columns.difference(by)\n naxes = len(columns)\n fig, axes = create_subplots(\n naxes=naxes,\n sharex=kwargs.pop("sharex", True),\n sharey=kwargs.pop("sharey", True),\n figsize=figsize,\n ax=ax,\n layout=layout,\n )\n\n _axes = flatten_axes(axes)\n\n # GH 45465: move the "by" label based on "vert"\n xlabel, ylabel = kwargs.pop("xlabel", None), kwargs.pop("ylabel", None)\n if kwargs.get("vert", True):\n xlabel = xlabel or by\n else:\n ylabel = ylabel or by\n\n ax_values = []\n\n for i, col in enumerate(columns):\n ax = _axes[i]\n gp_col = grouped[col]\n keys, values = zip(*gp_col)\n re_plotf = plotf(keys, values, ax, xlabel=xlabel, ylabel=ylabel, **kwargs)\n ax.set_title(col)\n ax_values.append(re_plotf)\n ax.grid(grid)\n\n result = pd.Series(ax_values, index=columns, copy=False)\n\n # Return axes in multiplot case, maybe revisit later # 985\n if return_type is None:\n result = axes\n\n byline = by[0] if len(by) == 1 else by\n fig.suptitle(f"Boxplot grouped by {byline}")\n maybe_adjust_figure(fig, bottom=0.15, top=0.9, left=0.1, right=0.9, wspace=0.2)\n\n return result\n\n\ndef boxplot(\n data,\n column=None,\n by=None,\n ax=None,\n fontsize: int | None = None,\n rot: int = 0,\n grid: bool = True,\n figsize: tuple[float, float] | None = None,\n layout=None,\n return_type=None,\n **kwds,\n):\n import matplotlib.pyplot as plt\n\n # validate return_type:\n if return_type not in BoxPlot._valid_return_types:\n raise ValueError("return_type must be {'axes', 'dict', 'both'}")\n\n if isinstance(data, ABCSeries):\n data = data.to_frame("x")\n column = "x"\n\n def _get_colors():\n # num_colors=3 is required as method maybe_color_bp takes the colors\n # in positions 0 and 2.\n # if colors not provided, use same defaults as DataFrame.plot.box\n result = get_standard_colors(num_colors=3)\n result = np.take(result, [0, 0, 2])\n result = np.append(result, "k")\n\n colors = kwds.pop("color", None)\n if colors:\n if is_dict_like(colors):\n # replace colors in result array with user-specified colors\n # taken from the colors dict parameter\n # "boxes" value placed in position 0, "whiskers" in 1, etc.\n valid_keys = ["boxes", "whiskers", "medians", "caps"]\n key_to_index = dict(zip(valid_keys, range(4)))\n for key, value in colors.items():\n if key in valid_keys:\n result[key_to_index[key]] = value\n else:\n raise ValueError(\n f"color dict contains invalid key '{key}'. "\n f"The key must be either {valid_keys}"\n )\n else:\n result.fill(colors)\n\n return result\n\n def plot_group(keys, values, ax: Axes, **kwds):\n # GH 45465: xlabel/ylabel need to be popped out before plotting happens\n xlabel, ylabel = kwds.pop("xlabel", None), kwds.pop("ylabel", None)\n if xlabel:\n ax.set_xlabel(pprint_thing(xlabel))\n if ylabel:\n ax.set_ylabel(pprint_thing(ylabel))\n\n keys = [pprint_thing(x) for x in keys]\n values = [np.asarray(remove_na_arraylike(v), dtype=object) for v in values]\n bp = ax.boxplot(values, **kwds)\n if fontsize is not None:\n ax.tick_params(axis="both", labelsize=fontsize)\n\n # GH 45465: x/y are flipped when "vert" changes\n _set_ticklabels(\n ax=ax, labels=keys, is_vertical=kwds.get("vert", True), rotation=rot\n )\n maybe_color_bp(bp, color_tup=colors, **kwds)\n\n # Return axes in multiplot case, maybe revisit later # 985\n if return_type == "dict":\n return bp\n elif return_type == "both":\n return BoxPlot.BP(ax=ax, lines=bp)\n else:\n return ax\n\n colors = _get_colors()\n if column is None:\n columns = None\n elif isinstance(column, (list, tuple)):\n columns = column\n else:\n columns = [column]\n\n if by is not None:\n # Prefer array return type for 2-D plots to match the subplot layout\n # https://github.com/pandas-dev/pandas/pull/12216#issuecomment-241175580\n result = _grouped_plot_by_column(\n plot_group,\n data,\n columns=columns,\n by=by,\n grid=grid,\n figsize=figsize,\n ax=ax,\n layout=layout,\n return_type=return_type,\n **kwds,\n )\n else:\n if return_type is None:\n return_type = "axes"\n if layout is not None:\n raise ValueError("The 'layout' keyword is not supported when 'by' is None")\n\n if ax is None:\n rc = {"figure.figsize": figsize} if figsize is not None else {}\n with plt.rc_context(rc):\n ax = plt.gca()\n data = data._get_numeric_data()\n naxes = len(data.columns)\n if naxes == 0:\n raise ValueError(\n "boxplot method requires numerical columns, nothing to plot."\n )\n if columns is None:\n columns = data.columns\n else:\n data = data[columns]\n\n result = plot_group(columns, data.values.T, ax, **kwds)\n ax.grid(grid)\n\n return result\n\n\ndef boxplot_frame(\n self,\n column=None,\n by=None,\n ax=None,\n fontsize: int | None = None,\n rot: int = 0,\n grid: bool = True,\n figsize: tuple[float, float] | None = None,\n layout=None,\n return_type=None,\n **kwds,\n):\n import matplotlib.pyplot as plt\n\n ax = boxplot(\n self,\n column=column,\n by=by,\n ax=ax,\n fontsize=fontsize,\n grid=grid,\n rot=rot,\n figsize=figsize,\n layout=layout,\n return_type=return_type,\n **kwds,\n )\n plt.draw_if_interactive()\n return ax\n\n\ndef boxplot_frame_groupby(\n grouped,\n subplots: bool = True,\n column=None,\n fontsize: int | None = None,\n rot: int = 0,\n grid: bool = True,\n ax=None,\n figsize: tuple[float, float] | None = None,\n layout=None,\n sharex: bool = False,\n sharey: bool = True,\n **kwds,\n):\n if subplots is True:\n naxes = len(grouped)\n fig, axes = create_subplots(\n naxes=naxes,\n squeeze=False,\n ax=ax,\n sharex=sharex,\n sharey=sharey,\n figsize=figsize,\n layout=layout,\n )\n axes = flatten_axes(axes)\n\n ret = pd.Series(dtype=object)\n\n for (key, group), ax in zip(grouped, axes):\n d = group.boxplot(\n ax=ax, column=column, fontsize=fontsize, rot=rot, grid=grid, **kwds\n )\n ax.set_title(pprint_thing(key))\n ret.loc[key] = d\n maybe_adjust_figure(fig, bottom=0.15, top=0.9, left=0.1, right=0.9, wspace=0.2)\n else:\n keys, frames = zip(*grouped)\n if grouped.axis == 0:\n df = pd.concat(frames, keys=keys, axis=1)\n elif len(frames) > 1:\n df = frames[0].join(frames[1::])\n else:\n df = frames[0]\n\n # GH 16748, DataFrameGroupby fails when subplots=False and `column` argument\n # is assigned, and in this case, since `df` here becomes MI after groupby,\n # so we need to couple the keys (grouped values) and column (original df\n # column) together to search for subset to plot\n if column is not None:\n column = com.convert_to_list_like(column)\n multi_key = pd.MultiIndex.from_product([keys, column])\n column = list(multi_key.values)\n ret = df.boxplot(\n column=column,\n fontsize=fontsize,\n rot=rot,\n grid=grid,\n ax=ax,\n figsize=figsize,\n layout=layout,\n **kwds,\n )\n return ret\n | .venv\Lib\site-packages\pandas\plotting\_matplotlib\boxplot.py | boxplot.py | Python | 18,385 | 0.95 | 0.175652 | 0.103239 | node-utils | 357 | 2025-06-23T06:51:39.083995 | GPL-3.0 | false | 9a95e37236b586a658ea9979c4193ccb |
from __future__ import annotations\n\nimport contextlib\nimport datetime as pydt\nfrom datetime import (\n datetime,\n timedelta,\n tzinfo,\n)\nimport functools\nfrom typing import (\n TYPE_CHECKING,\n Any,\n cast,\n)\nimport warnings\n\nimport matplotlib.dates as mdates\nfrom matplotlib.ticker import (\n AutoLocator,\n Formatter,\n Locator,\n)\nfrom matplotlib.transforms import nonsingular\nimport matplotlib.units as munits\nimport numpy as np\n\nfrom pandas._libs import lib\nfrom pandas._libs.tslibs import (\n Timestamp,\n to_offset,\n)\nfrom pandas._libs.tslibs.dtypes import (\n FreqGroup,\n periods_per_day,\n)\nfrom pandas._typing import (\n F,\n npt,\n)\n\nfrom pandas.core.dtypes.common import (\n is_float,\n is_float_dtype,\n is_integer,\n is_integer_dtype,\n is_nested_list_like,\n)\n\nfrom pandas import (\n Index,\n Series,\n get_option,\n)\nimport pandas.core.common as com\nfrom pandas.core.indexes.datetimes import date_range\nfrom pandas.core.indexes.period import (\n Period,\n PeriodIndex,\n period_range,\n)\nimport pandas.core.tools.datetimes as tools\n\nif TYPE_CHECKING:\n from collections.abc import Generator\n\n from matplotlib.axis import Axis\n\n from pandas._libs.tslibs.offsets import BaseOffset\n\n\n_mpl_units = {} # Cache for units overwritten by us\n\n\ndef get_pairs():\n pairs = [\n (Timestamp, DatetimeConverter),\n (Period, PeriodConverter),\n (pydt.datetime, DatetimeConverter),\n (pydt.date, DatetimeConverter),\n (pydt.time, TimeConverter),\n (np.datetime64, DatetimeConverter),\n ]\n return pairs\n\n\ndef register_pandas_matplotlib_converters(func: F) -> F:\n """\n Decorator applying pandas_converters.\n """\n\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n with pandas_converters():\n return func(*args, **kwargs)\n\n return cast(F, wrapper)\n\n\n@contextlib.contextmanager\ndef pandas_converters() -> Generator[None, None, None]:\n """\n Context manager registering pandas' converters for a plot.\n\n See Also\n --------\n register_pandas_matplotlib_converters : Decorator that applies this.\n """\n value = get_option("plotting.matplotlib.register_converters")\n\n if value:\n # register for True or "auto"\n register()\n try:\n yield\n finally:\n if value == "auto":\n # only deregister for "auto"\n deregister()\n\n\ndef register() -> None:\n pairs = get_pairs()\n for type_, cls in pairs:\n # Cache previous converter if present\n if type_ in munits.registry and not isinstance(munits.registry[type_], cls):\n previous = munits.registry[type_]\n _mpl_units[type_] = previous\n # Replace with pandas converter\n munits.registry[type_] = cls()\n\n\ndef deregister() -> None:\n # Renamed in pandas.plotting.__init__\n for type_, cls in get_pairs():\n # We use type to catch our classes directly, no inheritance\n if type(munits.registry.get(type_)) is cls:\n munits.registry.pop(type_)\n\n # restore the old keys\n for unit, formatter in _mpl_units.items():\n if type(formatter) not in {DatetimeConverter, PeriodConverter, TimeConverter}:\n # make it idempotent by excluding ours.\n munits.registry[unit] = formatter\n\n\ndef _to_ordinalf(tm: pydt.time) -> float:\n tot_sec = tm.hour * 3600 + tm.minute * 60 + tm.second + tm.microsecond / 10**6\n return tot_sec\n\n\ndef time2num(d):\n if isinstance(d, str):\n parsed = Timestamp(d)\n return _to_ordinalf(parsed.time())\n if isinstance(d, pydt.time):\n return _to_ordinalf(d)\n return d\n\n\nclass TimeConverter(munits.ConversionInterface):\n @staticmethod\n def convert(value, unit, axis):\n valid_types = (str, pydt.time)\n if isinstance(value, valid_types) or is_integer(value) or is_float(value):\n return time2num(value)\n if isinstance(value, Index):\n return value.map(time2num)\n if isinstance(value, (list, tuple, np.ndarray, Index)):\n return [time2num(x) for x in value]\n return value\n\n @staticmethod\n def axisinfo(unit, axis) -> munits.AxisInfo | None:\n if unit != "time":\n return None\n\n majloc = AutoLocator()\n majfmt = TimeFormatter(majloc)\n return munits.AxisInfo(majloc=majloc, majfmt=majfmt, label="time")\n\n @staticmethod\n def default_units(x, axis) -> str:\n return "time"\n\n\n# time formatter\nclass TimeFormatter(Formatter):\n def __init__(self, locs) -> None:\n self.locs = locs\n\n def __call__(self, x, pos: int | None = 0) -> str:\n """\n Return the time of day as a formatted string.\n\n Parameters\n ----------\n x : float\n The time of day specified as seconds since 00:00 (midnight),\n with up to microsecond precision.\n pos\n Unused\n\n Returns\n -------\n str\n A string in HH:MM:SS.mmmuuu format. Microseconds,\n milliseconds and seconds are only displayed if non-zero.\n """\n fmt = "%H:%M:%S.%f"\n s = int(x)\n msus = round((x - s) * 10**6)\n ms = msus // 1000\n us = msus % 1000\n m, s = divmod(s, 60)\n h, m = divmod(m, 60)\n _, h = divmod(h, 24)\n if us != 0:\n return pydt.time(h, m, s, msus).strftime(fmt)\n elif ms != 0:\n return pydt.time(h, m, s, msus).strftime(fmt)[:-3]\n elif s != 0:\n return pydt.time(h, m, s).strftime("%H:%M:%S")\n\n return pydt.time(h, m).strftime("%H:%M")\n\n\n# Period Conversion\n\n\nclass PeriodConverter(mdates.DateConverter):\n @staticmethod\n def convert(values, units, axis):\n if is_nested_list_like(values):\n values = [PeriodConverter._convert_1d(v, units, axis) for v in values]\n else:\n values = PeriodConverter._convert_1d(values, units, axis)\n return values\n\n @staticmethod\n def _convert_1d(values, units, axis):\n if not hasattr(axis, "freq"):\n raise TypeError("Axis must have `freq` set to convert to Periods")\n valid_types = (str, datetime, Period, pydt.date, pydt.time, np.datetime64)\n with warnings.catch_warnings():\n warnings.filterwarnings(\n "ignore", "Period with BDay freq is deprecated", category=FutureWarning\n )\n warnings.filterwarnings(\n "ignore", r"PeriodDtype\[B\] is deprecated", category=FutureWarning\n )\n if (\n isinstance(values, valid_types)\n or is_integer(values)\n or is_float(values)\n ):\n return get_datevalue(values, axis.freq)\n elif isinstance(values, PeriodIndex):\n return values.asfreq(axis.freq).asi8\n elif isinstance(values, Index):\n return values.map(lambda x: get_datevalue(x, axis.freq))\n elif lib.infer_dtype(values, skipna=False) == "period":\n # https://github.com/pandas-dev/pandas/issues/24304\n # convert ndarray[period] -> PeriodIndex\n return PeriodIndex(values, freq=axis.freq).asi8\n elif isinstance(values, (list, tuple, np.ndarray, Index)):\n return [get_datevalue(x, axis.freq) for x in values]\n return values\n\n\ndef get_datevalue(date, freq):\n if isinstance(date, Period):\n return date.asfreq(freq).ordinal\n elif isinstance(date, (str, datetime, pydt.date, pydt.time, np.datetime64)):\n return Period(date, freq).ordinal\n elif (\n is_integer(date)\n or is_float(date)\n or (isinstance(date, (np.ndarray, Index)) and (date.size == 1))\n ):\n return date\n elif date is None:\n return None\n raise ValueError(f"Unrecognizable date '{date}'")\n\n\n# Datetime Conversion\nclass DatetimeConverter(mdates.DateConverter):\n @staticmethod\n def convert(values, unit, axis):\n # values might be a 1-d array, or a list-like of arrays.\n if is_nested_list_like(values):\n values = [DatetimeConverter._convert_1d(v, unit, axis) for v in values]\n else:\n values = DatetimeConverter._convert_1d(values, unit, axis)\n return values\n\n @staticmethod\n def _convert_1d(values, unit, axis):\n def try_parse(values):\n try:\n return mdates.date2num(tools.to_datetime(values))\n except Exception:\n return values\n\n if isinstance(values, (datetime, pydt.date, np.datetime64, pydt.time)):\n return mdates.date2num(values)\n elif is_integer(values) or is_float(values):\n return values\n elif isinstance(values, str):\n return try_parse(values)\n elif isinstance(values, (list, tuple, np.ndarray, Index, Series)):\n if isinstance(values, Series):\n # https://github.com/matplotlib/matplotlib/issues/11391\n # Series was skipped. Convert to DatetimeIndex to get asi8\n values = Index(values)\n if isinstance(values, Index):\n values = values.values\n if not isinstance(values, np.ndarray):\n values = com.asarray_tuplesafe(values)\n\n if is_integer_dtype(values) or is_float_dtype(values):\n return values\n\n try:\n values = tools.to_datetime(values)\n except Exception:\n pass\n\n values = mdates.date2num(values)\n\n return values\n\n @staticmethod\n def axisinfo(unit: tzinfo | None, axis) -> munits.AxisInfo:\n """\n Return the :class:`~matplotlib.units.AxisInfo` for *unit*.\n\n *unit* is a tzinfo instance or None.\n The *axis* argument is required but not used.\n """\n tz = unit\n\n majloc = PandasAutoDateLocator(tz=tz)\n majfmt = PandasAutoDateFormatter(majloc, tz=tz)\n datemin = pydt.date(2000, 1, 1)\n datemax = pydt.date(2010, 1, 1)\n\n return munits.AxisInfo(\n majloc=majloc, majfmt=majfmt, label="", default_limits=(datemin, datemax)\n )\n\n\nclass PandasAutoDateFormatter(mdates.AutoDateFormatter):\n def __init__(self, locator, tz=None, defaultfmt: str = "%Y-%m-%d") -> None:\n mdates.AutoDateFormatter.__init__(self, locator, tz, defaultfmt)\n\n\nclass PandasAutoDateLocator(mdates.AutoDateLocator):\n def get_locator(self, dmin, dmax):\n """Pick the best locator based on a distance."""\n tot_sec = (dmax - dmin).total_seconds()\n\n if abs(tot_sec) < self.minticks:\n self._freq = -1\n locator = MilliSecondLocator(self.tz)\n locator.set_axis(self.axis)\n\n # error: Item "None" of "Axis | _DummyAxis | _AxisWrapper | None"\n # has no attribute "get_data_interval"\n locator.axis.set_view_interval( # type: ignore[union-attr]\n *self.axis.get_view_interval() # type: ignore[union-attr]\n )\n locator.axis.set_data_interval( # type: ignore[union-attr]\n *self.axis.get_data_interval() # type: ignore[union-attr]\n )\n return locator\n\n return mdates.AutoDateLocator.get_locator(self, dmin, dmax)\n\n def _get_unit(self):\n return MilliSecondLocator.get_unit_generic(self._freq)\n\n\nclass MilliSecondLocator(mdates.DateLocator):\n UNIT = 1.0 / (24 * 3600 * 1000)\n\n def __init__(self, tz) -> None:\n mdates.DateLocator.__init__(self, tz)\n self._interval = 1.0\n\n def _get_unit(self):\n return self.get_unit_generic(-1)\n\n @staticmethod\n def get_unit_generic(freq):\n unit = mdates.RRuleLocator.get_unit_generic(freq)\n if unit < 0:\n return MilliSecondLocator.UNIT\n return unit\n\n def __call__(self):\n # if no data have been set, this will tank with a ValueError\n try:\n dmin, dmax = self.viewlim_to_dt()\n except ValueError:\n return []\n\n # We need to cap at the endpoints of valid datetime\n nmax, nmin = mdates.date2num((dmax, dmin))\n\n num = (nmax - nmin) * 86400 * 1000\n max_millis_ticks = 6\n for interval in [1, 10, 50, 100, 200, 500]:\n if num <= interval * (max_millis_ticks - 1):\n self._interval = interval\n break\n # We went through the whole loop without breaking, default to 1\n self._interval = 1000.0\n\n estimate = (nmax - nmin) / (self._get_unit() * self._get_interval())\n\n if estimate > self.MAXTICKS * 2:\n raise RuntimeError(\n "MillisecondLocator estimated to generate "\n f"{estimate:d} ticks from {dmin} to {dmax}: exceeds Locator.MAXTICKS"\n f"* 2 ({self.MAXTICKS * 2:d}) "\n )\n\n interval = self._get_interval()\n freq = f"{interval}ms"\n tz = self.tz.tzname(None)\n st = dmin.replace(tzinfo=None)\n ed = dmin.replace(tzinfo=None)\n all_dates = date_range(start=st, end=ed, freq=freq, tz=tz).astype(object)\n\n try:\n if len(all_dates) > 0:\n locs = self.raise_if_exceeds(mdates.date2num(all_dates))\n return locs\n except Exception: # pragma: no cover\n pass\n\n lims = mdates.date2num([dmin, dmax])\n return lims\n\n def _get_interval(self):\n return self._interval\n\n def autoscale(self):\n """\n Set the view limits to include the data range.\n """\n # We need to cap at the endpoints of valid datetime\n dmin, dmax = self.datalim_to_dt()\n\n vmin = mdates.date2num(dmin)\n vmax = mdates.date2num(dmax)\n\n return self.nonsingular(vmin, vmax)\n\n\ndef _from_ordinal(x, tz: tzinfo | None = None) -> datetime:\n ix = int(x)\n dt = datetime.fromordinal(ix)\n remainder = float(x) - ix\n hour, remainder = divmod(24 * remainder, 1)\n minute, remainder = divmod(60 * remainder, 1)\n second, remainder = divmod(60 * remainder, 1)\n microsecond = int(1_000_000 * remainder)\n if microsecond < 10:\n microsecond = 0 # compensate for rounding errors\n dt = datetime(\n dt.year, dt.month, dt.day, int(hour), int(minute), int(second), microsecond\n )\n if tz is not None:\n dt = dt.astimezone(tz)\n\n if microsecond > 999990: # compensate for rounding errors\n dt += timedelta(microseconds=1_000_000 - microsecond)\n\n return dt\n\n\n# Fixed frequency dynamic tick locators and formatters\n\n# -------------------------------------------------------------------------\n# --- Locators ---\n# -------------------------------------------------------------------------\n\n\ndef _get_default_annual_spacing(nyears) -> tuple[int, int]:\n """\n Returns a default spacing between consecutive ticks for annual data.\n """\n if nyears < 11:\n (min_spacing, maj_spacing) = (1, 1)\n elif nyears < 20:\n (min_spacing, maj_spacing) = (1, 2)\n elif nyears < 50:\n (min_spacing, maj_spacing) = (1, 5)\n elif nyears < 100:\n (min_spacing, maj_spacing) = (5, 10)\n elif nyears < 200:\n (min_spacing, maj_spacing) = (5, 25)\n elif nyears < 600:\n (min_spacing, maj_spacing) = (10, 50)\n else:\n factor = nyears // 1000 + 1\n (min_spacing, maj_spacing) = (factor * 20, factor * 100)\n return (min_spacing, maj_spacing)\n\n\ndef _period_break(dates: PeriodIndex, period: str) -> npt.NDArray[np.intp]:\n """\n Returns the indices where the given period changes.\n\n Parameters\n ----------\n dates : PeriodIndex\n Array of intervals to monitor.\n period : str\n Name of the period to monitor.\n """\n mask = _period_break_mask(dates, period)\n return np.nonzero(mask)[0]\n\n\ndef _period_break_mask(dates: PeriodIndex, period: str) -> npt.NDArray[np.bool_]:\n current = getattr(dates, period)\n previous = getattr(dates - 1 * dates.freq, period)\n return current != previous\n\n\ndef has_level_label(label_flags: npt.NDArray[np.intp], vmin: float) -> bool:\n """\n Returns true if the ``label_flags`` indicate there is at least one label\n for this level.\n\n if the minimum view limit is not an exact integer, then the first tick\n label won't be shown, so we must adjust for that.\n """\n if label_flags.size == 0 or (\n label_flags.size == 1 and label_flags[0] == 0 and vmin % 1 > 0.0\n ):\n return False\n else:\n return True\n\n\ndef _get_periods_per_ymd(freq: BaseOffset) -> tuple[int, int, int]:\n # error: "BaseOffset" has no attribute "_period_dtype_code"\n dtype_code = freq._period_dtype_code # type: ignore[attr-defined]\n freq_group = FreqGroup.from_period_dtype_code(dtype_code)\n\n ppd = -1 # placeholder for above-day freqs\n\n if dtype_code >= FreqGroup.FR_HR.value:\n # error: "BaseOffset" has no attribute "_creso"\n ppd = periods_per_day(freq._creso) # type: ignore[attr-defined]\n ppm = 28 * ppd\n ppy = 365 * ppd\n elif freq_group == FreqGroup.FR_BUS:\n ppm = 19\n ppy = 261\n elif freq_group == FreqGroup.FR_DAY:\n ppm = 28\n ppy = 365\n elif freq_group == FreqGroup.FR_WK:\n ppm = 3\n ppy = 52\n elif freq_group == FreqGroup.FR_MTH:\n ppm = 1\n ppy = 12\n elif freq_group == FreqGroup.FR_QTR:\n ppm = -1 # placerholder\n ppy = 4\n elif freq_group == FreqGroup.FR_ANN:\n ppm = -1 # placeholder\n ppy = 1\n else:\n raise NotImplementedError(f"Unsupported frequency: {dtype_code}")\n\n return ppd, ppm, ppy\n\n\n@functools.cache\ndef _daily_finder(vmin: float, vmax: float, freq: BaseOffset) -> np.ndarray:\n # error: "BaseOffset" has no attribute "_period_dtype_code"\n dtype_code = freq._period_dtype_code # type: ignore[attr-defined]\n\n periodsperday, periodspermonth, periodsperyear = _get_periods_per_ymd(freq)\n\n # save this for later usage\n vmin_orig = vmin\n (vmin, vmax) = (int(vmin), int(vmax))\n span = vmax - vmin + 1\n\n with warnings.catch_warnings():\n warnings.filterwarnings(\n "ignore", "Period with BDay freq is deprecated", category=FutureWarning\n )\n warnings.filterwarnings(\n "ignore", r"PeriodDtype\[B\] is deprecated", category=FutureWarning\n )\n dates_ = period_range(\n start=Period(ordinal=vmin, freq=freq),\n end=Period(ordinal=vmax, freq=freq),\n freq=freq,\n )\n\n # Initialize the output\n info = np.zeros(\n span, dtype=[("val", np.int64), ("maj", bool), ("min", bool), ("fmt", "|S20")]\n )\n info["val"][:] = dates_.asi8\n info["fmt"][:] = ""\n info["maj"][[0, -1]] = True\n # .. and set some shortcuts\n info_maj = info["maj"]\n info_min = info["min"]\n info_fmt = info["fmt"]\n\n def first_label(label_flags):\n if (label_flags[0] == 0) and (label_flags.size > 1) and ((vmin_orig % 1) > 0.0):\n return label_flags[1]\n else:\n return label_flags[0]\n\n # Case 1. Less than a month\n if span <= periodspermonth:\n day_start = _period_break(dates_, "day")\n month_start = _period_break(dates_, "month")\n year_start = _period_break(dates_, "year")\n\n def _hour_finder(label_interval: int, force_year_start: bool) -> None:\n target = dates_.hour\n mask = _period_break_mask(dates_, "hour")\n info_maj[day_start] = True\n info_min[mask & (target % label_interval == 0)] = True\n info_fmt[mask & (target % label_interval == 0)] = "%H:%M"\n info_fmt[day_start] = "%H:%M\n%d-%b"\n info_fmt[year_start] = "%H:%M\n%d-%b\n%Y"\n if force_year_start and not has_level_label(year_start, vmin_orig):\n info_fmt[first_label(day_start)] = "%H:%M\n%d-%b\n%Y"\n\n def _minute_finder(label_interval: int) -> None:\n target = dates_.minute\n hour_start = _period_break(dates_, "hour")\n mask = _period_break_mask(dates_, "minute")\n info_maj[hour_start] = True\n info_min[mask & (target % label_interval == 0)] = True\n info_fmt[mask & (target % label_interval == 0)] = "%H:%M"\n info_fmt[day_start] = "%H:%M\n%d-%b"\n info_fmt[year_start] = "%H:%M\n%d-%b\n%Y"\n\n def _second_finder(label_interval: int) -> None:\n target = dates_.second\n minute_start = _period_break(dates_, "minute")\n mask = _period_break_mask(dates_, "second")\n info_maj[minute_start] = True\n info_min[mask & (target % label_interval == 0)] = True\n info_fmt[mask & (target % label_interval == 0)] = "%H:%M:%S"\n info_fmt[day_start] = "%H:%M:%S\n%d-%b"\n info_fmt[year_start] = "%H:%M:%S\n%d-%b\n%Y"\n\n if span < periodsperday / 12000:\n _second_finder(1)\n elif span < periodsperday / 6000:\n _second_finder(2)\n elif span < periodsperday / 2400:\n _second_finder(5)\n elif span < periodsperday / 1200:\n _second_finder(10)\n elif span < periodsperday / 800:\n _second_finder(15)\n elif span < periodsperday / 400:\n _second_finder(30)\n elif span < periodsperday / 150:\n _minute_finder(1)\n elif span < periodsperday / 70:\n _minute_finder(2)\n elif span < periodsperday / 24:\n _minute_finder(5)\n elif span < periodsperday / 12:\n _minute_finder(15)\n elif span < periodsperday / 6:\n _minute_finder(30)\n elif span < periodsperday / 2.5:\n _hour_finder(1, False)\n elif span < periodsperday / 1.5:\n _hour_finder(2, False)\n elif span < periodsperday * 1.25:\n _hour_finder(3, False)\n elif span < periodsperday * 2.5:\n _hour_finder(6, True)\n elif span < periodsperday * 4:\n _hour_finder(12, True)\n else:\n info_maj[month_start] = True\n info_min[day_start] = True\n info_fmt[day_start] = "%d"\n info_fmt[month_start] = "%d\n%b"\n info_fmt[year_start] = "%d\n%b\n%Y"\n if not has_level_label(year_start, vmin_orig):\n if not has_level_label(month_start, vmin_orig):\n info_fmt[first_label(day_start)] = "%d\n%b\n%Y"\n else:\n info_fmt[first_label(month_start)] = "%d\n%b\n%Y"\n\n # Case 2. Less than three months\n elif span <= periodsperyear // 4:\n month_start = _period_break(dates_, "month")\n info_maj[month_start] = True\n if dtype_code < FreqGroup.FR_HR.value:\n info["min"] = True\n else:\n day_start = _period_break(dates_, "day")\n info["min"][day_start] = True\n week_start = _period_break(dates_, "week")\n year_start = _period_break(dates_, "year")\n info_fmt[week_start] = "%d"\n info_fmt[month_start] = "\n\n%b"\n info_fmt[year_start] = "\n\n%b\n%Y"\n if not has_level_label(year_start, vmin_orig):\n if not has_level_label(month_start, vmin_orig):\n info_fmt[first_label(week_start)] = "\n\n%b\n%Y"\n else:\n info_fmt[first_label(month_start)] = "\n\n%b\n%Y"\n # Case 3. Less than 14 months ...............\n elif span <= 1.15 * periodsperyear:\n year_start = _period_break(dates_, "year")\n month_start = _period_break(dates_, "month")\n week_start = _period_break(dates_, "week")\n info_maj[month_start] = True\n info_min[week_start] = True\n info_min[year_start] = False\n info_min[month_start] = False\n info_fmt[month_start] = "%b"\n info_fmt[year_start] = "%b\n%Y"\n if not has_level_label(year_start, vmin_orig):\n info_fmt[first_label(month_start)] = "%b\n%Y"\n # Case 4. Less than 2.5 years ...............\n elif span <= 2.5 * periodsperyear:\n year_start = _period_break(dates_, "year")\n quarter_start = _period_break(dates_, "quarter")\n month_start = _period_break(dates_, "month")\n info_maj[quarter_start] = True\n info_min[month_start] = True\n info_fmt[quarter_start] = "%b"\n info_fmt[year_start] = "%b\n%Y"\n # Case 4. Less than 4 years .................\n elif span <= 4 * periodsperyear:\n year_start = _period_break(dates_, "year")\n month_start = _period_break(dates_, "month")\n info_maj[year_start] = True\n info_min[month_start] = True\n info_min[year_start] = False\n\n month_break = dates_[month_start].month\n jan_or_jul = month_start[(month_break == 1) | (month_break == 7)]\n info_fmt[jan_or_jul] = "%b"\n info_fmt[year_start] = "%b\n%Y"\n # Case 5. Less than 11 years ................\n elif span <= 11 * periodsperyear:\n year_start = _period_break(dates_, "year")\n quarter_start = _period_break(dates_, "quarter")\n info_maj[year_start] = True\n info_min[quarter_start] = True\n info_min[year_start] = False\n info_fmt[year_start] = "%Y"\n # Case 6. More than 12 years ................\n else:\n year_start = _period_break(dates_, "year")\n year_break = dates_[year_start].year\n nyears = span / periodsperyear\n (min_anndef, maj_anndef) = _get_default_annual_spacing(nyears)\n major_idx = year_start[(year_break % maj_anndef == 0)]\n info_maj[major_idx] = True\n minor_idx = year_start[(year_break % min_anndef == 0)]\n info_min[minor_idx] = True\n info_fmt[major_idx] = "%Y"\n\n return info\n\n\n@functools.cache\ndef _monthly_finder(vmin: float, vmax: float, freq: BaseOffset) -> np.ndarray:\n _, _, periodsperyear = _get_periods_per_ymd(freq)\n\n vmin_orig = vmin\n (vmin, vmax) = (int(vmin), int(vmax))\n span = vmax - vmin + 1\n\n # Initialize the output\n info = np.zeros(\n span, dtype=[("val", int), ("maj", bool), ("min", bool), ("fmt", "|S8")]\n )\n info["val"] = np.arange(vmin, vmax + 1)\n dates_ = info["val"]\n info["fmt"] = ""\n year_start = (dates_ % 12 == 0).nonzero()[0]\n info_maj = info["maj"]\n info_fmt = info["fmt"]\n\n if span <= 1.15 * periodsperyear:\n info_maj[year_start] = True\n info["min"] = True\n\n info_fmt[:] = "%b"\n info_fmt[year_start] = "%b\n%Y"\n\n if not has_level_label(year_start, vmin_orig):\n if dates_.size > 1:\n idx = 1\n else:\n idx = 0\n info_fmt[idx] = "%b\n%Y"\n\n elif span <= 2.5 * periodsperyear:\n quarter_start = (dates_ % 3 == 0).nonzero()\n info_maj[year_start] = True\n # TODO: Check the following : is it really info['fmt'] ?\n # 2023-09-15 this is reached in test_finder_monthly\n info["fmt"][quarter_start] = True\n info["min"] = True\n\n info_fmt[quarter_start] = "%b"\n info_fmt[year_start] = "%b\n%Y"\n\n elif span <= 4 * periodsperyear:\n info_maj[year_start] = True\n info["min"] = True\n\n jan_or_jul = (dates_ % 12 == 0) | (dates_ % 12 == 6)\n info_fmt[jan_or_jul] = "%b"\n info_fmt[year_start] = "%b\n%Y"\n\n elif span <= 11 * periodsperyear:\n quarter_start = (dates_ % 3 == 0).nonzero()\n info_maj[year_start] = True\n info["min"][quarter_start] = True\n\n info_fmt[year_start] = "%Y"\n\n else:\n nyears = span / periodsperyear\n (min_anndef, maj_anndef) = _get_default_annual_spacing(nyears)\n years = dates_[year_start] // 12 + 1\n major_idx = year_start[(years % maj_anndef == 0)]\n info_maj[major_idx] = True\n info["min"][year_start[(years % min_anndef == 0)]] = True\n\n info_fmt[major_idx] = "%Y"\n\n return info\n\n\n@functools.cache\ndef _quarterly_finder(vmin: float, vmax: float, freq: BaseOffset) -> np.ndarray:\n _, _, periodsperyear = _get_periods_per_ymd(freq)\n vmin_orig = vmin\n (vmin, vmax) = (int(vmin), int(vmax))\n span = vmax - vmin + 1\n\n info = np.zeros(\n span, dtype=[("val", int), ("maj", bool), ("min", bool), ("fmt", "|S8")]\n )\n info["val"] = np.arange(vmin, vmax + 1)\n info["fmt"] = ""\n dates_ = info["val"]\n info_maj = info["maj"]\n info_fmt = info["fmt"]\n year_start = (dates_ % 4 == 0).nonzero()[0]\n\n if span <= 3.5 * periodsperyear:\n info_maj[year_start] = True\n info["min"] = True\n\n info_fmt[:] = "Q%q"\n info_fmt[year_start] = "Q%q\n%F"\n if not has_level_label(year_start, vmin_orig):\n if dates_.size > 1:\n idx = 1\n else:\n idx = 0\n info_fmt[idx] = "Q%q\n%F"\n\n elif span <= 11 * periodsperyear:\n info_maj[year_start] = True\n info["min"] = True\n info_fmt[year_start] = "%F"\n\n else:\n # https://github.com/pandas-dev/pandas/pull/47602\n years = dates_[year_start] // 4 + 1970\n nyears = span / periodsperyear\n (min_anndef, maj_anndef) = _get_default_annual_spacing(nyears)\n major_idx = year_start[(years % maj_anndef == 0)]\n info_maj[major_idx] = True\n info["min"][year_start[(years % min_anndef == 0)]] = True\n info_fmt[major_idx] = "%F"\n\n return info\n\n\n@functools.cache\ndef _annual_finder(vmin: float, vmax: float, freq: BaseOffset) -> np.ndarray:\n # Note: small difference here vs other finders in adding 1 to vmax\n (vmin, vmax) = (int(vmin), int(vmax + 1))\n span = vmax - vmin + 1\n\n info = np.zeros(\n span, dtype=[("val", int), ("maj", bool), ("min", bool), ("fmt", "|S8")]\n )\n info["val"] = np.arange(vmin, vmax + 1)\n info["fmt"] = ""\n dates_ = info["val"]\n\n (min_anndef, maj_anndef) = _get_default_annual_spacing(span)\n major_idx = dates_ % maj_anndef == 0\n minor_idx = dates_ % min_anndef == 0\n info["maj"][major_idx] = True\n info["min"][minor_idx] = True\n info["fmt"][major_idx] = "%Y"\n\n return info\n\n\ndef get_finder(freq: BaseOffset):\n # error: "BaseOffset" has no attribute "_period_dtype_code"\n dtype_code = freq._period_dtype_code # type: ignore[attr-defined]\n fgroup = FreqGroup.from_period_dtype_code(dtype_code)\n\n if fgroup == FreqGroup.FR_ANN:\n return _annual_finder\n elif fgroup == FreqGroup.FR_QTR:\n return _quarterly_finder\n elif fgroup == FreqGroup.FR_MTH:\n return _monthly_finder\n elif (dtype_code >= FreqGroup.FR_BUS.value) or fgroup == FreqGroup.FR_WK:\n return _daily_finder\n else: # pragma: no cover\n raise NotImplementedError(f"Unsupported frequency: {dtype_code}")\n\n\nclass TimeSeries_DateLocator(Locator):\n """\n Locates the ticks along an axis controlled by a :class:`Series`.\n\n Parameters\n ----------\n freq : BaseOffset\n Valid frequency specifier.\n minor_locator : {False, True}, optional\n Whether the locator is for minor ticks (True) or not.\n dynamic_mode : {True, False}, optional\n Whether the locator should work in dynamic mode.\n base : {int}, optional\n quarter : {int}, optional\n month : {int}, optional\n day : {int}, optional\n """\n\n axis: Axis\n\n def __init__(\n self,\n freq: BaseOffset,\n minor_locator: bool = False,\n dynamic_mode: bool = True,\n base: int = 1,\n quarter: int = 1,\n month: int = 1,\n day: int = 1,\n plot_obj=None,\n ) -> None:\n freq = to_offset(freq, is_period=True)\n self.freq = freq\n self.base = base\n (self.quarter, self.month, self.day) = (quarter, month, day)\n self.isminor = minor_locator\n self.isdynamic = dynamic_mode\n self.offset = 0\n self.plot_obj = plot_obj\n self.finder = get_finder(freq)\n\n def _get_default_locs(self, vmin, vmax):\n """Returns the default locations of ticks."""\n locator = self.finder(vmin, vmax, self.freq)\n\n if self.isminor:\n return np.compress(locator["min"], locator["val"])\n return np.compress(locator["maj"], locator["val"])\n\n def __call__(self):\n """Return the locations of the ticks."""\n # axis calls Locator.set_axis inside set_m<xxxx>_formatter\n\n vi = tuple(self.axis.get_view_interval())\n vmin, vmax = vi\n if vmax < vmin:\n vmin, vmax = vmax, vmin\n if self.isdynamic:\n locs = self._get_default_locs(vmin, vmax)\n else: # pragma: no cover\n base = self.base\n (d, m) = divmod(vmin, base)\n vmin = (d + 1) * base\n # error: No overload variant of "range" matches argument types "float",\n # "float", "int"\n locs = list(range(vmin, vmax + 1, base)) # type: ignore[call-overload]\n return locs\n\n def autoscale(self):\n """\n Sets the view limits to the nearest multiples of base that contain the\n data.\n """\n # requires matplotlib >= 0.98.0\n (vmin, vmax) = self.axis.get_data_interval()\n\n locs = self._get_default_locs(vmin, vmax)\n (vmin, vmax) = locs[[0, -1]]\n if vmin == vmax:\n vmin -= 1\n vmax += 1\n return nonsingular(vmin, vmax)\n\n\n# -------------------------------------------------------------------------\n# --- Formatter ---\n# -------------------------------------------------------------------------\n\n\nclass TimeSeries_DateFormatter(Formatter):\n """\n Formats the ticks along an axis controlled by a :class:`PeriodIndex`.\n\n Parameters\n ----------\n freq : BaseOffset\n Valid frequency specifier.\n minor_locator : bool, default False\n Whether the current formatter should apply to minor ticks (True) or\n major ticks (False).\n dynamic_mode : bool, default True\n Whether the formatter works in dynamic mode or not.\n """\n\n axis: Axis\n\n def __init__(\n self,\n freq: BaseOffset,\n minor_locator: bool = False,\n dynamic_mode: bool = True,\n plot_obj=None,\n ) -> None:\n freq = to_offset(freq, is_period=True)\n self.format = None\n self.freq = freq\n self.locs: list[Any] = [] # unused, for matplotlib compat\n self.formatdict: dict[Any, Any] | None = None\n self.isminor = minor_locator\n self.isdynamic = dynamic_mode\n self.offset = 0\n self.plot_obj = plot_obj\n self.finder = get_finder(freq)\n\n def _set_default_format(self, vmin, vmax):\n """Returns the default ticks spacing."""\n info = self.finder(vmin, vmax, self.freq)\n\n if self.isminor:\n format = np.compress(info["min"] & np.logical_not(info["maj"]), info)\n else:\n format = np.compress(info["maj"], info)\n self.formatdict = {x: f for (x, _, _, f) in format}\n return self.formatdict\n\n def set_locs(self, locs) -> None:\n """Sets the locations of the ticks"""\n # don't actually use the locs. This is just needed to work with\n # matplotlib. Force to use vmin, vmax\n\n self.locs = locs\n\n (vmin, vmax) = tuple(self.axis.get_view_interval())\n if vmax < vmin:\n (vmin, vmax) = (vmax, vmin)\n self._set_default_format(vmin, vmax)\n\n def __call__(self, x, pos: int | None = 0) -> str:\n if self.formatdict is None:\n return ""\n else:\n fmt = self.formatdict.pop(x, "")\n if isinstance(fmt, np.bytes_):\n fmt = fmt.decode("utf-8")\n with warnings.catch_warnings():\n warnings.filterwarnings(\n "ignore",\n "Period with BDay freq is deprecated",\n category=FutureWarning,\n )\n period = Period(ordinal=int(x), freq=self.freq)\n assert isinstance(period, Period)\n return period.strftime(fmt)\n\n\nclass TimeSeries_TimedeltaFormatter(Formatter):\n """\n Formats the ticks along an axis controlled by a :class:`TimedeltaIndex`.\n """\n\n axis: Axis\n\n @staticmethod\n def format_timedelta_ticks(x, pos, n_decimals: int) -> str:\n """\n Convert seconds to 'D days HH:MM:SS.F'\n """\n s, ns = divmod(x, 10**9) # TODO(non-nano): this looks like it assumes ns\n m, s = divmod(s, 60)\n h, m = divmod(m, 60)\n d, h = divmod(h, 24)\n decimals = int(ns * 10 ** (n_decimals - 9))\n s = f"{int(h):02d}:{int(m):02d}:{int(s):02d}"\n if n_decimals > 0:\n s += f".{decimals:0{n_decimals}d}"\n if d != 0:\n s = f"{int(d):d} days {s}"\n return s\n\n def __call__(self, x, pos: int | None = 0) -> str:\n (vmin, vmax) = tuple(self.axis.get_view_interval())\n n_decimals = min(int(np.ceil(np.log10(100 * 10**9 / abs(vmax - vmin)))), 9)\n return self.format_timedelta_ticks(x, pos, n_decimals)\n | .venv\Lib\site-packages\pandas\plotting\_matplotlib\converter.py | converter.py | Python | 37,033 | 0.95 | 0.143108 | 0.059561 | python-kit | 930 | 2023-10-02T09:24:35.231974 | BSD-3-Clause | false | 893c2191616ecd8058406d8ce28ac7ed |
from __future__ import annotations\n\nfrom abc import (\n ABC,\n abstractmethod,\n)\nfrom collections.abc import (\n Hashable,\n Iterable,\n Iterator,\n Sequence,\n)\nfrom typing import (\n TYPE_CHECKING,\n Any,\n Literal,\n cast,\n final,\n)\nimport warnings\n\nimport matplotlib as mpl\nimport numpy as np\n\nfrom pandas._libs import lib\nfrom pandas.errors import AbstractMethodError\nfrom pandas.util._decorators import cache_readonly\nfrom pandas.util._exceptions import find_stack_level\n\nfrom pandas.core.dtypes.common import (\n is_any_real_numeric_dtype,\n is_bool,\n is_float,\n is_float_dtype,\n is_hashable,\n is_integer,\n is_integer_dtype,\n is_iterator,\n is_list_like,\n is_number,\n is_numeric_dtype,\n)\nfrom pandas.core.dtypes.dtypes import (\n CategoricalDtype,\n ExtensionDtype,\n)\nfrom pandas.core.dtypes.generic import (\n ABCDataFrame,\n ABCDatetimeIndex,\n ABCIndex,\n ABCMultiIndex,\n ABCPeriodIndex,\n ABCSeries,\n)\nfrom pandas.core.dtypes.missing import isna\n\nimport pandas.core.common as com\nfrom pandas.core.frame import DataFrame\nfrom pandas.util.version import Version\n\nfrom pandas.io.formats.printing import pprint_thing\nfrom pandas.plotting._matplotlib import tools\nfrom pandas.plotting._matplotlib.converter import register_pandas_matplotlib_converters\nfrom pandas.plotting._matplotlib.groupby import reconstruct_data_with_by\nfrom pandas.plotting._matplotlib.misc import unpack_single_str_list\nfrom pandas.plotting._matplotlib.style import get_standard_colors\nfrom pandas.plotting._matplotlib.timeseries import (\n decorate_axes,\n format_dateaxis,\n maybe_convert_index,\n maybe_resample,\n use_dynamic_x,\n)\nfrom pandas.plotting._matplotlib.tools import (\n create_subplots,\n flatten_axes,\n format_date_labels,\n get_all_lines,\n get_xlim,\n handle_shared_axes,\n)\n\nif TYPE_CHECKING:\n from matplotlib.artist import Artist\n from matplotlib.axes import Axes\n from matplotlib.axis import Axis\n from matplotlib.figure import Figure\n\n from pandas._typing import (\n IndexLabel,\n NDFrameT,\n PlottingOrientation,\n npt,\n )\n\n from pandas import Series\n\n\ndef _color_in_style(style: str) -> bool:\n """\n Check if there is a color letter in the style string.\n """\n from matplotlib.colors import BASE_COLORS\n\n return not set(BASE_COLORS).isdisjoint(style)\n\n\nclass MPLPlot(ABC):\n """\n Base class for assembling a pandas plot using matplotlib\n\n Parameters\n ----------\n data :\n\n """\n\n @property\n @abstractmethod\n def _kind(self) -> str:\n """Specify kind str. Must be overridden in child class"""\n raise NotImplementedError\n\n _layout_type = "vertical"\n _default_rot = 0\n\n @property\n def orientation(self) -> str | None:\n return None\n\n data: DataFrame\n\n def __init__(\n self,\n data,\n kind=None,\n by: IndexLabel | None = None,\n subplots: bool | Sequence[Sequence[str]] = False,\n sharex: bool | None = None,\n sharey: bool = False,\n use_index: bool = True,\n figsize: tuple[float, float] | None = None,\n grid=None,\n legend: bool | str = True,\n rot=None,\n ax=None,\n fig=None,\n title=None,\n xlim=None,\n ylim=None,\n xticks=None,\n yticks=None,\n xlabel: Hashable | None = None,\n ylabel: Hashable | None = None,\n fontsize: int | None = None,\n secondary_y: bool | tuple | list | np.ndarray = False,\n colormap=None,\n table: bool = False,\n layout=None,\n include_bool: bool = False,\n column: IndexLabel | None = None,\n *,\n logx: bool | None | Literal["sym"] = False,\n logy: bool | None | Literal["sym"] = False,\n loglog: bool | None | Literal["sym"] = False,\n mark_right: bool = True,\n stacked: bool = False,\n label: Hashable | None = None,\n style=None,\n **kwds,\n ) -> None:\n import matplotlib.pyplot as plt\n\n # if users assign an empty list or tuple, raise `ValueError`\n # similar to current `df.box` and `df.hist` APIs.\n if by in ([], ()):\n raise ValueError("No group keys passed!")\n self.by = com.maybe_make_list(by)\n\n # Assign the rest of columns into self.columns if by is explicitly defined\n # while column is not, only need `columns` in hist/box plot when it's DF\n # TODO: Might deprecate `column` argument in future PR (#28373)\n if isinstance(data, DataFrame):\n if column:\n self.columns = com.maybe_make_list(column)\n elif self.by is None:\n self.columns = [\n col for col in data.columns if is_numeric_dtype(data[col])\n ]\n else:\n self.columns = [\n col\n for col in data.columns\n if col not in self.by and is_numeric_dtype(data[col])\n ]\n\n # For `hist` plot, need to get grouped original data before `self.data` is\n # updated later\n if self.by is not None and self._kind == "hist":\n self._grouped = data.groupby(unpack_single_str_list(self.by))\n\n self.kind = kind\n\n self.subplots = type(self)._validate_subplots_kwarg(\n subplots, data, kind=self._kind\n )\n\n self.sharex = type(self)._validate_sharex(sharex, ax, by)\n self.sharey = sharey\n self.figsize = figsize\n self.layout = layout\n\n self.xticks = xticks\n self.yticks = yticks\n self.xlim = xlim\n self.ylim = ylim\n self.title = title\n self.use_index = use_index\n self.xlabel = xlabel\n self.ylabel = ylabel\n\n self.fontsize = fontsize\n\n if rot is not None:\n self.rot = rot\n # need to know for format_date_labels since it's rotated to 30 by\n # default\n self._rot_set = True\n else:\n self._rot_set = False\n self.rot = self._default_rot\n\n if grid is None:\n grid = False if secondary_y else plt.rcParams["axes.grid"]\n\n self.grid = grid\n self.legend = legend\n self.legend_handles: list[Artist] = []\n self.legend_labels: list[Hashable] = []\n\n self.logx = type(self)._validate_log_kwd("logx", logx)\n self.logy = type(self)._validate_log_kwd("logy", logy)\n self.loglog = type(self)._validate_log_kwd("loglog", loglog)\n self.label = label\n self.style = style\n self.mark_right = mark_right\n self.stacked = stacked\n\n # ax may be an Axes object or (if self.subplots) an ndarray of\n # Axes objects\n self.ax = ax\n # TODO: deprecate fig keyword as it is ignored, not passed in tests\n # as of 2023-11-05\n\n # parse errorbar input if given\n xerr = kwds.pop("xerr", None)\n yerr = kwds.pop("yerr", None)\n nseries = self._get_nseries(data)\n xerr, data = type(self)._parse_errorbars("xerr", xerr, data, nseries)\n yerr, data = type(self)._parse_errorbars("yerr", yerr, data, nseries)\n self.errors = {"xerr": xerr, "yerr": yerr}\n self.data = data\n\n if not isinstance(secondary_y, (bool, tuple, list, np.ndarray, ABCIndex)):\n secondary_y = [secondary_y]\n self.secondary_y = secondary_y\n\n # ugly TypeError if user passes matplotlib's `cmap` name.\n # Probably better to accept either.\n if "cmap" in kwds and colormap:\n raise TypeError("Only specify one of `cmap` and `colormap`.")\n if "cmap" in kwds:\n self.colormap = kwds.pop("cmap")\n else:\n self.colormap = colormap\n\n self.table = table\n self.include_bool = include_bool\n\n self.kwds = kwds\n\n color = kwds.pop("color", lib.no_default)\n self.color = self._validate_color_args(color, self.colormap)\n assert "color" not in self.kwds\n\n self.data = self._ensure_frame(self.data)\n\n @final\n @staticmethod\n def _validate_sharex(sharex: bool | None, ax, by) -> bool:\n if sharex is None:\n # if by is defined, subplots are used and sharex should be False\n if ax is None and by is None: # pylint: disable=simplifiable-if-statement\n sharex = True\n else:\n # if we get an axis, the users should do the visibility\n # setting...\n sharex = False\n elif not is_bool(sharex):\n raise TypeError("sharex must be a bool or None")\n return bool(sharex)\n\n @classmethod\n def _validate_log_kwd(\n cls,\n kwd: str,\n value: bool | None | Literal["sym"],\n ) -> bool | None | Literal["sym"]:\n if (\n value is None\n or isinstance(value, bool)\n or (isinstance(value, str) and value == "sym")\n ):\n return value\n raise ValueError(\n f"keyword '{kwd}' should be bool, None, or 'sym', not '{value}'"\n )\n\n @final\n @staticmethod\n def _validate_subplots_kwarg(\n subplots: bool | Sequence[Sequence[str]], data: Series | DataFrame, kind: str\n ) -> bool | list[tuple[int, ...]]:\n """\n Validate the subplots parameter\n\n - check type and content\n - check for duplicate columns\n - check for invalid column names\n - convert column names into indices\n - add missing columns in a group of their own\n See comments in code below for more details.\n\n Parameters\n ----------\n subplots : subplots parameters as passed to PlotAccessor\n\n Returns\n -------\n validated subplots : a bool or a list of tuples of column indices. Columns\n in the same tuple will be grouped together in the resulting plot.\n """\n\n if isinstance(subplots, bool):\n return subplots\n elif not isinstance(subplots, Iterable):\n raise ValueError("subplots should be a bool or an iterable")\n\n supported_kinds = (\n "line",\n "bar",\n "barh",\n "hist",\n "kde",\n "density",\n "area",\n "pie",\n )\n if kind not in supported_kinds:\n raise ValueError(\n "When subplots is an iterable, kind must be "\n f"one of {', '.join(supported_kinds)}. Got {kind}."\n )\n\n if isinstance(data, ABCSeries):\n raise NotImplementedError(\n "An iterable subplots for a Series is not supported."\n )\n\n columns = data.columns\n if isinstance(columns, ABCMultiIndex):\n raise NotImplementedError(\n "An iterable subplots for a DataFrame with a MultiIndex column "\n "is not supported."\n )\n\n if columns.nunique() != len(columns):\n raise NotImplementedError(\n "An iterable subplots for a DataFrame with non-unique column "\n "labels is not supported."\n )\n\n # subplots is a list of tuples where each tuple is a group of\n # columns to be grouped together (one ax per group).\n # we consolidate the subplots list such that:\n # - the tuples contain indices instead of column names\n # - the columns that aren't yet in the list are added in a group\n # of their own.\n # For example with columns from a to g, and\n # subplots = [(a, c), (b, f, e)],\n # we end up with [(ai, ci), (bi, fi, ei), (di,), (gi,)]\n # This way, we can handle self.subplots in a homogeneous manner\n # later.\n # TODO: also accept indices instead of just names?\n\n out = []\n seen_columns: set[Hashable] = set()\n for group in subplots:\n if not is_list_like(group):\n raise ValueError(\n "When subplots is an iterable, each entry "\n "should be a list/tuple of column names."\n )\n idx_locs = columns.get_indexer_for(group)\n if (idx_locs == -1).any():\n bad_labels = np.extract(idx_locs == -1, group)\n raise ValueError(\n f"Column label(s) {list(bad_labels)} not found in the DataFrame."\n )\n unique_columns = set(group)\n duplicates = seen_columns.intersection(unique_columns)\n if duplicates:\n raise ValueError(\n "Each column should be in only one subplot. "\n f"Columns {duplicates} were found in multiple subplots."\n )\n seen_columns = seen_columns.union(unique_columns)\n out.append(tuple(idx_locs))\n\n unseen_columns = columns.difference(seen_columns)\n for column in unseen_columns:\n idx_loc = columns.get_loc(column)\n out.append((idx_loc,))\n return out\n\n def _validate_color_args(self, color, colormap):\n if color is lib.no_default:\n # It was not provided by the user\n if "colors" in self.kwds and colormap is not None:\n warnings.warn(\n "'color' and 'colormap' cannot be used simultaneously. "\n "Using 'color'",\n stacklevel=find_stack_level(),\n )\n return None\n if self.nseries == 1 and color is not None and not is_list_like(color):\n # support series.plot(color='green')\n color = [color]\n\n if isinstance(color, tuple) and self.nseries == 1 and len(color) in (3, 4):\n # support RGB and RGBA tuples in series plot\n color = [color]\n\n if colormap is not None:\n warnings.warn(\n "'color' and 'colormap' cannot be used simultaneously. Using 'color'",\n stacklevel=find_stack_level(),\n )\n\n if self.style is not None:\n if is_list_like(self.style):\n styles = self.style\n else:\n styles = [self.style]\n # need only a single match\n for s in styles:\n if _color_in_style(s):\n raise ValueError(\n "Cannot pass 'style' string with a color symbol and "\n "'color' keyword argument. Please use one or the "\n "other or pass 'style' without a color symbol"\n )\n return color\n\n @final\n @staticmethod\n def _iter_data(\n data: DataFrame | dict[Hashable, Series | DataFrame]\n ) -> Iterator[tuple[Hashable, np.ndarray]]:\n for col, values in data.items():\n # This was originally written to use values.values before EAs\n # were implemented; adding np.asarray(...) to keep consistent\n # typing.\n yield col, np.asarray(values.values)\n\n def _get_nseries(self, data: Series | DataFrame) -> int:\n # When `by` is explicitly assigned, grouped data size will be defined, and\n # this will determine number of subplots to have, aka `self.nseries`\n if data.ndim == 1:\n return 1\n elif self.by is not None and self._kind == "hist":\n return len(self._grouped)\n elif self.by is not None and self._kind == "box":\n return len(self.columns)\n else:\n return data.shape[1]\n\n @final\n @property\n def nseries(self) -> int:\n return self._get_nseries(self.data)\n\n @final\n def draw(self) -> None:\n self.plt.draw_if_interactive()\n\n @final\n def generate(self) -> None:\n self._compute_plot_data()\n fig = self.fig\n self._make_plot(fig)\n self._add_table()\n self._make_legend()\n self._adorn_subplots(fig)\n\n for ax in self.axes:\n self._post_plot_logic_common(ax)\n self._post_plot_logic(ax, self.data)\n\n @final\n @staticmethod\n def _has_plotted_object(ax: Axes) -> bool:\n """check whether ax has data"""\n return len(ax.lines) != 0 or len(ax.artists) != 0 or len(ax.containers) != 0\n\n @final\n def _maybe_right_yaxis(self, ax: Axes, axes_num: int) -> Axes:\n if not self.on_right(axes_num):\n # secondary axes may be passed via ax kw\n return self._get_ax_layer(ax)\n\n if hasattr(ax, "right_ax"):\n # if it has right_ax property, ``ax`` must be left axes\n return ax.right_ax\n elif hasattr(ax, "left_ax"):\n # if it has left_ax property, ``ax`` must be right axes\n return ax\n else:\n # otherwise, create twin axes\n orig_ax, new_ax = ax, ax.twinx()\n # TODO: use Matplotlib public API when available\n new_ax._get_lines = orig_ax._get_lines # type: ignore[attr-defined]\n # TODO #54485\n new_ax._get_patches_for_fill = ( # type: ignore[attr-defined]\n orig_ax._get_patches_for_fill # type: ignore[attr-defined]\n )\n # TODO #54485\n orig_ax.right_ax, new_ax.left_ax = ( # type: ignore[attr-defined]\n new_ax,\n orig_ax,\n )\n\n if not self._has_plotted_object(orig_ax): # no data on left y\n orig_ax.get_yaxis().set_visible(False)\n\n if self.logy is True or self.loglog is True:\n new_ax.set_yscale("log")\n elif self.logy == "sym" or self.loglog == "sym":\n new_ax.set_yscale("symlog")\n return new_ax\n\n @final\n @cache_readonly\n def fig(self) -> Figure:\n return self._axes_and_fig[1]\n\n @final\n @cache_readonly\n # TODO: can we annotate this as both a Sequence[Axes] and ndarray[object]?\n def axes(self) -> Sequence[Axes]:\n return self._axes_and_fig[0]\n\n @final\n @cache_readonly\n def _axes_and_fig(self) -> tuple[Sequence[Axes], Figure]:\n if self.subplots:\n naxes = (\n self.nseries if isinstance(self.subplots, bool) else len(self.subplots)\n )\n fig, axes = create_subplots(\n naxes=naxes,\n sharex=self.sharex,\n sharey=self.sharey,\n figsize=self.figsize,\n ax=self.ax,\n layout=self.layout,\n layout_type=self._layout_type,\n )\n elif self.ax is None:\n fig = self.plt.figure(figsize=self.figsize)\n axes = fig.add_subplot(111)\n else:\n fig = self.ax.get_figure()\n if self.figsize is not None:\n fig.set_size_inches(self.figsize)\n axes = self.ax\n\n axes = flatten_axes(axes)\n\n if self.logx is True or self.loglog is True:\n [a.set_xscale("log") for a in axes]\n elif self.logx == "sym" or self.loglog == "sym":\n [a.set_xscale("symlog") for a in axes]\n\n if self.logy is True or self.loglog is True:\n [a.set_yscale("log") for a in axes]\n elif self.logy == "sym" or self.loglog == "sym":\n [a.set_yscale("symlog") for a in axes]\n\n axes_seq = cast(Sequence["Axes"], axes)\n return axes_seq, fig\n\n @property\n def result(self):\n """\n Return result axes\n """\n if self.subplots:\n if self.layout is not None and not is_list_like(self.ax):\n # error: "Sequence[Any]" has no attribute "reshape"\n return self.axes.reshape(*self.layout) # type: ignore[attr-defined]\n else:\n return self.axes\n else:\n sec_true = isinstance(self.secondary_y, bool) and self.secondary_y\n # error: Argument 1 to "len" has incompatible type "Union[bool,\n # Tuple[Any, ...], List[Any], ndarray[Any, Any]]"; expected "Sized"\n all_sec = (\n is_list_like(self.secondary_y)\n and len(self.secondary_y) == self.nseries # type: ignore[arg-type]\n )\n if sec_true or all_sec:\n # if all data is plotted on secondary, return right axes\n return self._get_ax_layer(self.axes[0], primary=False)\n else:\n return self.axes[0]\n\n @final\n @staticmethod\n def _convert_to_ndarray(data):\n # GH31357: categorical columns are processed separately\n if isinstance(data.dtype, CategoricalDtype):\n return data\n\n # GH32073: cast to float if values contain nulled integers\n if (is_integer_dtype(data.dtype) or is_float_dtype(data.dtype)) and isinstance(\n data.dtype, ExtensionDtype\n ):\n return data.to_numpy(dtype="float", na_value=np.nan)\n\n # GH25587: cast ExtensionArray of pandas (IntegerArray, etc.) to\n # np.ndarray before plot.\n if len(data) > 0:\n return np.asarray(data)\n\n return data\n\n @final\n def _ensure_frame(self, data) -> DataFrame:\n if isinstance(data, ABCSeries):\n label = self.label\n if label is None and data.name is None:\n label = ""\n if label is None:\n # We'll end up with columns of [0] instead of [None]\n data = data.to_frame()\n else:\n data = data.to_frame(name=label)\n elif self._kind in ("hist", "box"):\n cols = self.columns if self.by is None else self.columns + self.by\n data = data.loc[:, cols]\n return data\n\n @final\n def _compute_plot_data(self) -> None:\n data = self.data\n\n # GH15079 reconstruct data if by is defined\n if self.by is not None:\n self.subplots = True\n data = reconstruct_data_with_by(self.data, by=self.by, cols=self.columns)\n\n # GH16953, infer_objects is needed as fallback, for ``Series``\n # with ``dtype == object``\n data = data.infer_objects(copy=False)\n include_type = [np.number, "datetime", "datetimetz", "timedelta"]\n\n # GH23719, allow plotting boolean\n if self.include_bool is True:\n include_type.append(np.bool_)\n\n # GH22799, exclude datetime-like type for boxplot\n exclude_type = None\n if self._kind == "box":\n # TODO: change after solving issue 27881\n include_type = [np.number]\n exclude_type = ["timedelta"]\n\n # GH 18755, include object and category type for scatter plot\n if self._kind == "scatter":\n include_type.extend(["object", "category", "string"])\n\n numeric_data = data.select_dtypes(include=include_type, exclude=exclude_type)\n\n is_empty = numeric_data.shape[-1] == 0\n # no non-numeric frames or series allowed\n if is_empty:\n raise TypeError("no numeric data to plot")\n\n self.data = numeric_data.apply(type(self)._convert_to_ndarray)\n\n def _make_plot(self, fig: Figure) -> None:\n raise AbstractMethodError(self)\n\n @final\n def _add_table(self) -> None:\n if self.table is False:\n return\n elif self.table is True:\n data = self.data.transpose()\n else:\n data = self.table\n ax = self._get_ax(0)\n tools.table(ax, data)\n\n @final\n def _post_plot_logic_common(self, ax: Axes) -> None:\n """Common post process for each axes"""\n if self.orientation == "vertical" or self.orientation is None:\n type(self)._apply_axis_properties(\n ax.xaxis, rot=self.rot, fontsize=self.fontsize\n )\n type(self)._apply_axis_properties(ax.yaxis, fontsize=self.fontsize)\n\n if hasattr(ax, "right_ax"):\n type(self)._apply_axis_properties(\n ax.right_ax.yaxis, fontsize=self.fontsize\n )\n\n elif self.orientation == "horizontal":\n type(self)._apply_axis_properties(\n ax.yaxis, rot=self.rot, fontsize=self.fontsize\n )\n type(self)._apply_axis_properties(ax.xaxis, fontsize=self.fontsize)\n\n if hasattr(ax, "right_ax"):\n type(self)._apply_axis_properties(\n ax.right_ax.yaxis, fontsize=self.fontsize\n )\n else: # pragma no cover\n raise ValueError\n\n @abstractmethod\n def _post_plot_logic(self, ax: Axes, data) -> None:\n """Post process for each axes. Overridden in child classes"""\n\n @final\n def _adorn_subplots(self, fig: Figure) -> None:\n """Common post process unrelated to data"""\n if len(self.axes) > 0:\n all_axes = self._get_subplots(fig)\n nrows, ncols = self._get_axes_layout(fig)\n handle_shared_axes(\n axarr=all_axes,\n nplots=len(all_axes),\n naxes=nrows * ncols,\n nrows=nrows,\n ncols=ncols,\n sharex=self.sharex,\n sharey=self.sharey,\n )\n\n for ax in self.axes:\n ax = getattr(ax, "right_ax", ax)\n if self.yticks is not None:\n ax.set_yticks(self.yticks)\n\n if self.xticks is not None:\n ax.set_xticks(self.xticks)\n\n if self.ylim is not None:\n ax.set_ylim(self.ylim)\n\n if self.xlim is not None:\n ax.set_xlim(self.xlim)\n\n # GH9093, currently Pandas does not show ylabel, so if users provide\n # ylabel will set it as ylabel in the plot.\n if self.ylabel is not None:\n ax.set_ylabel(pprint_thing(self.ylabel))\n\n ax.grid(self.grid)\n\n if self.title:\n if self.subplots:\n if is_list_like(self.title):\n if len(self.title) != self.nseries:\n raise ValueError(\n "The length of `title` must equal the number "\n "of columns if using `title` of type `list` "\n "and `subplots=True`.\n"\n f"length of title = {len(self.title)}\n"\n f"number of columns = {self.nseries}"\n )\n\n for ax, title in zip(self.axes, self.title):\n ax.set_title(title)\n else:\n fig.suptitle(self.title)\n else:\n if is_list_like(self.title):\n msg = (\n "Using `title` of type `list` is not supported "\n "unless `subplots=True` is passed"\n )\n raise ValueError(msg)\n self.axes[0].set_title(self.title)\n\n @final\n @staticmethod\n def _apply_axis_properties(\n axis: Axis, rot=None, fontsize: int | None = None\n ) -> None:\n """\n Tick creation within matplotlib is reasonably expensive and is\n internally deferred until accessed as Ticks are created/destroyed\n multiple times per draw. It's therefore beneficial for us to avoid\n accessing unless we will act on the Tick.\n """\n if rot is not None or fontsize is not None:\n # rot=0 is a valid setting, hence the explicit None check\n labels = axis.get_majorticklabels() + axis.get_minorticklabels()\n for label in labels:\n if rot is not None:\n label.set_rotation(rot)\n if fontsize is not None:\n label.set_fontsize(fontsize)\n\n @final\n @property\n def legend_title(self) -> str | None:\n if not isinstance(self.data.columns, ABCMultiIndex):\n name = self.data.columns.name\n if name is not None:\n name = pprint_thing(name)\n return name\n else:\n stringified = map(pprint_thing, self.data.columns.names)\n return ",".join(stringified)\n\n @final\n def _mark_right_label(self, label: str, index: int) -> str:\n """\n Append ``(right)`` to the label of a line if it's plotted on the right axis.\n\n Note that ``(right)`` is only appended when ``subplots=False``.\n """\n if not self.subplots and self.mark_right and self.on_right(index):\n label += " (right)"\n return label\n\n @final\n def _append_legend_handles_labels(self, handle: Artist, label: str) -> None:\n """\n Append current handle and label to ``legend_handles`` and ``legend_labels``.\n\n These will be used to make the legend.\n """\n self.legend_handles.append(handle)\n self.legend_labels.append(label)\n\n def _make_legend(self) -> None:\n ax, leg = self._get_ax_legend(self.axes[0])\n\n handles = []\n labels = []\n title = ""\n\n if not self.subplots:\n if leg is not None:\n title = leg.get_title().get_text()\n # Replace leg.legend_handles because it misses marker info\n if Version(mpl.__version__) < Version("3.7"):\n handles = leg.legendHandles\n else:\n handles = leg.legend_handles\n labels = [x.get_text() for x in leg.get_texts()]\n\n if self.legend:\n if self.legend == "reverse":\n handles += reversed(self.legend_handles)\n labels += reversed(self.legend_labels)\n else:\n handles += self.legend_handles\n labels += self.legend_labels\n\n if self.legend_title is not None:\n title = self.legend_title\n\n if len(handles) > 0:\n ax.legend(handles, labels, loc="best", title=title)\n\n elif self.subplots and self.legend:\n for ax in self.axes:\n if ax.get_visible():\n with warnings.catch_warnings():\n warnings.filterwarnings(\n "ignore",\n "No artists with labels found to put in legend.",\n UserWarning,\n )\n ax.legend(loc="best")\n\n @final\n @staticmethod\n def _get_ax_legend(ax: Axes):\n """\n Take in axes and return ax and legend under different scenarios\n """\n leg = ax.get_legend()\n\n other_ax = getattr(ax, "left_ax", None) or getattr(ax, "right_ax", None)\n other_leg = None\n if other_ax is not None:\n other_leg = other_ax.get_legend()\n if leg is None and other_leg is not None:\n leg = other_leg\n ax = other_ax\n return ax, leg\n\n @final\n @cache_readonly\n def plt(self):\n import matplotlib.pyplot as plt\n\n return plt\n\n _need_to_set_index = False\n\n @final\n def _get_xticks(self):\n index = self.data.index\n is_datetype = index.inferred_type in ("datetime", "date", "datetime64", "time")\n\n # TODO: be stricter about x?\n x: list[int] | np.ndarray\n if self.use_index:\n if isinstance(index, ABCPeriodIndex):\n # test_mixed_freq_irreg_period\n x = index.to_timestamp()._mpl_repr()\n # TODO: why do we need to do to_timestamp() here but not other\n # places where we call mpl_repr?\n elif is_any_real_numeric_dtype(index.dtype):\n # Matplotlib supports numeric values or datetime objects as\n # xaxis values. Taking LBYL approach here, by the time\n # matplotlib raises exception when using non numeric/datetime\n # values for xaxis, several actions are already taken by plt.\n x = index._mpl_repr()\n elif isinstance(index, ABCDatetimeIndex) or is_datetype:\n x = index._mpl_repr()\n else:\n self._need_to_set_index = True\n x = list(range(len(index)))\n else:\n x = list(range(len(index)))\n\n return x\n\n @classmethod\n @register_pandas_matplotlib_converters\n def _plot(\n cls, ax: Axes, x, y: np.ndarray, style=None, is_errorbar: bool = False, **kwds\n ):\n mask = isna(y)\n if mask.any():\n y = np.ma.array(y)\n y = np.ma.masked_where(mask, y)\n\n if isinstance(x, ABCIndex):\n x = x._mpl_repr()\n\n if is_errorbar:\n if "xerr" in kwds:\n kwds["xerr"] = np.array(kwds.get("xerr"))\n if "yerr" in kwds:\n kwds["yerr"] = np.array(kwds.get("yerr"))\n return ax.errorbar(x, y, **kwds)\n else:\n # prevent style kwarg from going to errorbar, where it is unsupported\n args = (x, y, style) if style is not None else (x, y)\n return ax.plot(*args, **kwds)\n\n def _get_custom_index_name(self):\n """Specify whether xlabel/ylabel should be used to override index name"""\n return self.xlabel\n\n @final\n def _get_index_name(self) -> str | None:\n if isinstance(self.data.index, ABCMultiIndex):\n name = self.data.index.names\n if com.any_not_none(*name):\n name = ",".join([pprint_thing(x) for x in name])\n else:\n name = None\n else:\n name = self.data.index.name\n if name is not None:\n name = pprint_thing(name)\n\n # GH 45145, override the default axis label if one is provided.\n index_name = self._get_custom_index_name()\n if index_name is not None:\n name = pprint_thing(index_name)\n\n return name\n\n @final\n @classmethod\n def _get_ax_layer(cls, ax, primary: bool = True):\n """get left (primary) or right (secondary) axes"""\n if primary:\n return getattr(ax, "left_ax", ax)\n else:\n return getattr(ax, "right_ax", ax)\n\n @final\n def _col_idx_to_axis_idx(self, col_idx: int) -> int:\n """Return the index of the axis where the column at col_idx should be plotted"""\n if isinstance(self.subplots, list):\n # Subplots is a list: some columns will be grouped together in the same ax\n return next(\n group_idx\n for (group_idx, group) in enumerate(self.subplots)\n if col_idx in group\n )\n else:\n # subplots is True: one ax per column\n return col_idx\n\n @final\n def _get_ax(self, i: int):\n # get the twinx ax if appropriate\n if self.subplots:\n i = self._col_idx_to_axis_idx(i)\n ax = self.axes[i]\n ax = self._maybe_right_yaxis(ax, i)\n # error: Unsupported target for indexed assignment ("Sequence[Any]")\n self.axes[i] = ax # type: ignore[index]\n else:\n ax = self.axes[0]\n ax = self._maybe_right_yaxis(ax, i)\n\n ax.get_yaxis().set_visible(True)\n return ax\n\n @final\n def on_right(self, i: int):\n if isinstance(self.secondary_y, bool):\n return self.secondary_y\n\n if isinstance(self.secondary_y, (tuple, list, np.ndarray, ABCIndex)):\n return self.data.columns[i] in self.secondary_y\n\n @final\n def _apply_style_colors(\n self, colors, kwds: dict[str, Any], col_num: int, label: str\n ):\n """\n Manage style and color based on column number and its label.\n Returns tuple of appropriate style and kwds which "color" may be added.\n """\n style = None\n if self.style is not None:\n if isinstance(self.style, list):\n try:\n style = self.style[col_num]\n except IndexError:\n pass\n elif isinstance(self.style, dict):\n style = self.style.get(label, style)\n else:\n style = self.style\n\n has_color = "color" in kwds or self.colormap is not None\n nocolor_style = style is None or not _color_in_style(style)\n if (has_color or self.subplots) and nocolor_style:\n if isinstance(colors, dict):\n kwds["color"] = colors[label]\n else:\n kwds["color"] = colors[col_num % len(colors)]\n return style, kwds\n\n def _get_colors(\n self,\n num_colors: int | None = None,\n color_kwds: str = "color",\n ):\n if num_colors is None:\n num_colors = self.nseries\n if color_kwds == "color":\n color = self.color\n else:\n color = self.kwds.get(color_kwds)\n return get_standard_colors(\n num_colors=num_colors,\n colormap=self.colormap,\n color=color,\n )\n\n # TODO: tighter typing for first return?\n @final\n @staticmethod\n def _parse_errorbars(\n label: str, err, data: NDFrameT, nseries: int\n ) -> tuple[Any, NDFrameT]:\n """\n Look for error keyword arguments and return the actual errorbar data\n or return the error DataFrame/dict\n\n Error bars can be specified in several ways:\n Series: the user provides a pandas.Series object of the same\n length as the data\n ndarray: provides a np.ndarray of the same length as the data\n DataFrame/dict: error values are paired with keys matching the\n key in the plotted DataFrame\n str: the name of the column within the plotted DataFrame\n\n Asymmetrical error bars are also supported, however raw error values\n must be provided in this case. For a ``N`` length :class:`Series`, a\n ``2xN`` array should be provided indicating lower and upper (or left\n and right) errors. For a ``MxN`` :class:`DataFrame`, asymmetrical errors\n should be in a ``Mx2xN`` array.\n """\n if err is None:\n return None, data\n\n def match_labels(data, e):\n e = e.reindex(data.index)\n return e\n\n # key-matched DataFrame\n if isinstance(err, ABCDataFrame):\n err = match_labels(data, err)\n # key-matched dict\n elif isinstance(err, dict):\n pass\n\n # Series of error values\n elif isinstance(err, ABCSeries):\n # broadcast error series across data\n err = match_labels(data, err)\n err = np.atleast_2d(err)\n err = np.tile(err, (nseries, 1))\n\n # errors are a column in the dataframe\n elif isinstance(err, str):\n evalues = data[err].values\n data = data[data.columns.drop(err)]\n err = np.atleast_2d(evalues)\n err = np.tile(err, (nseries, 1))\n\n elif is_list_like(err):\n if is_iterator(err):\n err = np.atleast_2d(list(err))\n else:\n # raw error values\n err = np.atleast_2d(err)\n\n err_shape = err.shape\n\n # asymmetrical error bars\n if isinstance(data, ABCSeries) and err_shape[0] == 2:\n err = np.expand_dims(err, 0)\n err_shape = err.shape\n if err_shape[2] != len(data):\n raise ValueError(\n "Asymmetrical error bars should be provided "\n f"with the shape (2, {len(data)})"\n )\n elif isinstance(data, ABCDataFrame) and err.ndim == 3:\n if (\n (err_shape[0] != nseries)\n or (err_shape[1] != 2)\n or (err_shape[2] != len(data))\n ):\n raise ValueError(\n "Asymmetrical error bars should be provided "\n f"with the shape ({nseries}, 2, {len(data)})"\n )\n\n # broadcast errors to each data series\n if len(err) == 1:\n err = np.tile(err, (nseries, 1))\n\n elif is_number(err):\n err = np.tile(\n [err],\n (nseries, len(data)),\n )\n\n else:\n msg = f"No valid {label} detected"\n raise ValueError(msg)\n\n return err, data\n\n @final\n def _get_errorbars(\n self, label=None, index=None, xerr: bool = True, yerr: bool = True\n ) -> dict[str, Any]:\n errors = {}\n\n for kw, flag in zip(["xerr", "yerr"], [xerr, yerr]):\n if flag:\n err = self.errors[kw]\n # user provided label-matched dataframe of errors\n if isinstance(err, (ABCDataFrame, dict)):\n if label is not None and label in err.keys():\n err = err[label]\n else:\n err = None\n elif index is not None and err is not None:\n err = err[index]\n\n if err is not None:\n errors[kw] = err\n return errors\n\n @final\n def _get_subplots(self, fig: Figure):\n if Version(mpl.__version__) < Version("3.8"):\n from matplotlib.axes import Subplot as Klass\n else:\n from matplotlib.axes import Axes as Klass\n\n return [\n ax\n for ax in fig.get_axes()\n if (isinstance(ax, Klass) and ax.get_subplotspec() is not None)\n ]\n\n @final\n def _get_axes_layout(self, fig: Figure) -> tuple[int, int]:\n axes = self._get_subplots(fig)\n x_set = set()\n y_set = set()\n for ax in axes:\n # check axes coordinates to estimate layout\n points = ax.get_position().get_points()\n x_set.add(points[0][0])\n y_set.add(points[0][1])\n return (len(y_set), len(x_set))\n\n\nclass PlanePlot(MPLPlot, ABC):\n """\n Abstract class for plotting on plane, currently scatter and hexbin.\n """\n\n _layout_type = "single"\n\n def __init__(self, data, x, y, **kwargs) -> None:\n MPLPlot.__init__(self, data, **kwargs)\n if x is None or y is None:\n raise ValueError(self._kind + " requires an x and y column")\n if is_integer(x) and not self.data.columns._holds_integer():\n x = self.data.columns[x]\n if is_integer(y) and not self.data.columns._holds_integer():\n y = self.data.columns[y]\n\n self.x = x\n self.y = y\n\n @final\n def _get_nseries(self, data: Series | DataFrame) -> int:\n return 1\n\n @final\n def _post_plot_logic(self, ax: Axes, data) -> None:\n x, y = self.x, self.y\n xlabel = self.xlabel if self.xlabel is not None else pprint_thing(x)\n ylabel = self.ylabel if self.ylabel is not None else pprint_thing(y)\n # error: Argument 1 to "set_xlabel" of "_AxesBase" has incompatible\n # type "Hashable"; expected "str"\n ax.set_xlabel(xlabel) # type: ignore[arg-type]\n ax.set_ylabel(ylabel) # type: ignore[arg-type]\n\n @final\n def _plot_colorbar(self, ax: Axes, *, fig: Figure, **kwds):\n # Addresses issues #10611 and #10678:\n # When plotting scatterplots and hexbinplots in IPython\n # inline backend the colorbar axis height tends not to\n # exactly match the parent axis height.\n # The difference is due to small fractional differences\n # in floating points with similar representation.\n # To deal with this, this method forces the colorbar\n # height to take the height of the parent axes.\n # For a more detailed description of the issue\n # see the following link:\n # https://github.com/ipython/ipython/issues/11215\n\n # GH33389, if ax is used multiple times, we should always\n # use the last one which contains the latest information\n # about the ax\n img = ax.collections[-1]\n return fig.colorbar(img, ax=ax, **kwds)\n\n\nclass ScatterPlot(PlanePlot):\n @property\n def _kind(self) -> Literal["scatter"]:\n return "scatter"\n\n def __init__(\n self,\n data,\n x,\n y,\n s=None,\n c=None,\n *,\n colorbar: bool | lib.NoDefault = lib.no_default,\n norm=None,\n **kwargs,\n ) -> None:\n if s is None:\n # hide the matplotlib default for size, in case we want to change\n # the handling of this argument later\n s = 20\n elif is_hashable(s) and s in data.columns:\n s = data[s]\n self.s = s\n\n self.colorbar = colorbar\n self.norm = norm\n\n super().__init__(data, x, y, **kwargs)\n if is_integer(c) and not self.data.columns._holds_integer():\n c = self.data.columns[c]\n self.c = c\n\n def _make_plot(self, fig: Figure) -> None:\n x, y, c, data = self.x, self.y, self.c, self.data\n ax = self.axes[0]\n\n c_is_column = is_hashable(c) and c in self.data.columns\n\n color_by_categorical = c_is_column and isinstance(\n self.data[c].dtype, CategoricalDtype\n )\n\n color = self.color\n c_values = self._get_c_values(color, color_by_categorical, c_is_column)\n norm, cmap = self._get_norm_and_cmap(c_values, color_by_categorical)\n cb = self._get_colorbar(c_values, c_is_column)\n\n if self.legend:\n label = self.label\n else:\n label = None\n scatter = ax.scatter(\n data[x].values,\n data[y].values,\n c=c_values,\n label=label,\n cmap=cmap,\n norm=norm,\n s=self.s,\n **self.kwds,\n )\n if cb:\n cbar_label = c if c_is_column else ""\n cbar = self._plot_colorbar(ax, fig=fig, label=cbar_label)\n if color_by_categorical:\n n_cats = len(self.data[c].cat.categories)\n cbar.set_ticks(np.linspace(0.5, n_cats - 0.5, n_cats))\n cbar.ax.set_yticklabels(self.data[c].cat.categories)\n\n if label is not None:\n self._append_legend_handles_labels(\n # error: Argument 2 to "_append_legend_handles_labels" of\n # "MPLPlot" has incompatible type "Hashable"; expected "str"\n scatter,\n label, # type: ignore[arg-type]\n )\n\n errors_x = self._get_errorbars(label=x, index=0, yerr=False)\n errors_y = self._get_errorbars(label=y, index=0, xerr=False)\n if len(errors_x) > 0 or len(errors_y) > 0:\n err_kwds = dict(errors_x, **errors_y)\n err_kwds["ecolor"] = scatter.get_facecolor()[0]\n ax.errorbar(data[x].values, data[y].values, linestyle="none", **err_kwds)\n\n def _get_c_values(self, color, color_by_categorical: bool, c_is_column: bool):\n c = self.c\n if c is not None and color is not None:\n raise TypeError("Specify exactly one of `c` and `color`")\n if c is None and color is None:\n c_values = self.plt.rcParams["patch.facecolor"]\n elif color is not None:\n c_values = color\n elif color_by_categorical:\n c_values = self.data[c].cat.codes\n elif c_is_column:\n c_values = self.data[c].values\n else:\n c_values = c\n return c_values\n\n def _get_norm_and_cmap(self, c_values, color_by_categorical: bool):\n c = self.c\n if self.colormap is not None:\n cmap = mpl.colormaps.get_cmap(self.colormap)\n # cmap is only used if c_values are integers, otherwise UserWarning.\n # GH-53908: additionally call isinstance() because is_integer_dtype\n # returns True for "b" (meaning "blue" and not int8 in this context)\n elif not isinstance(c_values, str) and is_integer_dtype(c_values):\n # pandas uses colormap, matplotlib uses cmap.\n cmap = mpl.colormaps["Greys"]\n else:\n cmap = None\n\n if color_by_categorical and cmap is not None:\n from matplotlib import colors\n\n n_cats = len(self.data[c].cat.categories)\n cmap = colors.ListedColormap([cmap(i) for i in range(cmap.N)])\n bounds = np.linspace(0, n_cats, n_cats + 1)\n norm = colors.BoundaryNorm(bounds, cmap.N)\n # TODO: warn that we are ignoring self.norm if user specified it?\n # Doesn't happen in any tests 2023-11-09\n else:\n norm = self.norm\n return norm, cmap\n\n def _get_colorbar(self, c_values, c_is_column: bool) -> bool:\n # plot colorbar if\n # 1. colormap is assigned, and\n # 2.`c` is a column containing only numeric values\n plot_colorbar = self.colormap or c_is_column\n cb = self.colorbar\n if cb is lib.no_default:\n return is_numeric_dtype(c_values) and plot_colorbar\n return cb\n\n\nclass HexBinPlot(PlanePlot):\n @property\n def _kind(self) -> Literal["hexbin"]:\n return "hexbin"\n\n def __init__(self, data, x, y, C=None, *, colorbar: bool = True, **kwargs) -> None:\n super().__init__(data, x, y, **kwargs)\n if is_integer(C) and not self.data.columns._holds_integer():\n C = self.data.columns[C]\n self.C = C\n\n self.colorbar = colorbar\n\n # Scatter plot allows to plot objects data\n if len(self.data[self.x]._get_numeric_data()) == 0:\n raise ValueError(self._kind + " requires x column to be numeric")\n if len(self.data[self.y]._get_numeric_data()) == 0:\n raise ValueError(self._kind + " requires y column to be numeric")\n\n def _make_plot(self, fig: Figure) -> None:\n x, y, data, C = self.x, self.y, self.data, self.C\n ax = self.axes[0]\n # pandas uses colormap, matplotlib uses cmap.\n cmap = self.colormap or "BuGn"\n cmap = mpl.colormaps.get_cmap(cmap)\n cb = self.colorbar\n\n if C is None:\n c_values = None\n else:\n c_values = data[C].values\n\n ax.hexbin(data[x].values, data[y].values, C=c_values, cmap=cmap, **self.kwds)\n if cb:\n self._plot_colorbar(ax, fig=fig)\n\n def _make_legend(self) -> None:\n pass\n\n\nclass LinePlot(MPLPlot):\n _default_rot = 0\n\n @property\n def orientation(self) -> PlottingOrientation:\n return "vertical"\n\n @property\n def _kind(self) -> Literal["line", "area", "hist", "kde", "box"]:\n return "line"\n\n def __init__(self, data, **kwargs) -> None:\n from pandas.plotting import plot_params\n\n MPLPlot.__init__(self, data, **kwargs)\n if self.stacked:\n self.data = self.data.fillna(value=0)\n self.x_compat = plot_params["x_compat"]\n if "x_compat" in self.kwds:\n self.x_compat = bool(self.kwds.pop("x_compat"))\n\n @final\n def _is_ts_plot(self) -> bool:\n # this is slightly deceptive\n return not self.x_compat and self.use_index and self._use_dynamic_x()\n\n @final\n def _use_dynamic_x(self) -> bool:\n return use_dynamic_x(self._get_ax(0), self.data)\n\n def _make_plot(self, fig: Figure) -> None:\n if self._is_ts_plot():\n data = maybe_convert_index(self._get_ax(0), self.data)\n\n x = data.index # dummy, not used\n plotf = self._ts_plot\n it = data.items()\n else:\n x = self._get_xticks()\n # error: Incompatible types in assignment (expression has type\n # "Callable[[Any, Any, Any, Any, Any, Any, KwArg(Any)], Any]", variable has\n # type "Callable[[Any, Any, Any, Any, KwArg(Any)], Any]")\n plotf = self._plot # type: ignore[assignment]\n # error: Incompatible types in assignment (expression has type\n # "Iterator[tuple[Hashable, ndarray[Any, Any]]]", variable has\n # type "Iterable[tuple[Hashable, Series]]")\n it = self._iter_data(data=self.data) # type: ignore[assignment]\n\n stacking_id = self._get_stacking_id()\n is_errorbar = com.any_not_none(*self.errors.values())\n\n colors = self._get_colors()\n for i, (label, y) in enumerate(it):\n ax = self._get_ax(i)\n kwds = self.kwds.copy()\n if self.color is not None:\n kwds["color"] = self.color\n style, kwds = self._apply_style_colors(\n colors,\n kwds,\n i,\n # error: Argument 4 to "_apply_style_colors" of "MPLPlot" has\n # incompatible type "Hashable"; expected "str"\n label, # type: ignore[arg-type]\n )\n\n errors = self._get_errorbars(label=label, index=i)\n kwds = dict(kwds, **errors)\n\n label = pprint_thing(label)\n label = self._mark_right_label(label, index=i)\n kwds["label"] = label\n\n newlines = plotf(\n ax,\n x,\n y,\n style=style,\n column_num=i,\n stacking_id=stacking_id,\n is_errorbar=is_errorbar,\n **kwds,\n )\n self._append_legend_handles_labels(newlines[0], label)\n\n if self._is_ts_plot():\n # reset of xlim should be used for ts data\n # TODO: GH28021, should find a way to change view limit on xaxis\n lines = get_all_lines(ax)\n left, right = get_xlim(lines)\n ax.set_xlim(left, right)\n\n # error: Signature of "_plot" incompatible with supertype "MPLPlot"\n @classmethod\n def _plot( # type: ignore[override]\n cls,\n ax: Axes,\n x,\n y: np.ndarray,\n style=None,\n column_num=None,\n stacking_id=None,\n **kwds,\n ):\n # column_num is used to get the target column from plotf in line and\n # area plots\n if column_num == 0:\n cls._initialize_stacker(ax, stacking_id, len(y))\n y_values = cls._get_stacked_values(ax, stacking_id, y, kwds["label"])\n lines = MPLPlot._plot(ax, x, y_values, style=style, **kwds)\n cls._update_stacker(ax, stacking_id, y)\n return lines\n\n @final\n def _ts_plot(self, ax: Axes, x, data: Series, style=None, **kwds):\n # accept x to be consistent with normal plot func,\n # x is not passed to tsplot as it uses data.index as x coordinate\n # column_num must be in kwds for stacking purpose\n freq, data = maybe_resample(data, ax, kwds)\n\n # Set ax with freq info\n decorate_axes(ax, freq)\n # digging deeper\n if hasattr(ax, "left_ax"):\n decorate_axes(ax.left_ax, freq)\n if hasattr(ax, "right_ax"):\n decorate_axes(ax.right_ax, freq)\n # TODO #54485\n ax._plot_data.append((data, self._kind, kwds)) # type: ignore[attr-defined]\n\n lines = self._plot(ax, data.index, np.asarray(data.values), style=style, **kwds)\n # set date formatter, locators and rescale limits\n # TODO #54485\n format_dateaxis(ax, ax.freq, data.index) # type: ignore[arg-type, attr-defined]\n return lines\n\n @final\n def _get_stacking_id(self) -> int | None:\n if self.stacked:\n return id(self.data)\n else:\n return None\n\n @final\n @classmethod\n def _initialize_stacker(cls, ax: Axes, stacking_id, n: int) -> None:\n if stacking_id is None:\n return\n if not hasattr(ax, "_stacker_pos_prior"):\n # TODO #54485\n ax._stacker_pos_prior = {} # type: ignore[attr-defined]\n if not hasattr(ax, "_stacker_neg_prior"):\n # TODO #54485\n ax._stacker_neg_prior = {} # type: ignore[attr-defined]\n # TODO #54485\n ax._stacker_pos_prior[stacking_id] = np.zeros(n) # type: ignore[attr-defined]\n # TODO #54485\n ax._stacker_neg_prior[stacking_id] = np.zeros(n) # type: ignore[attr-defined]\n\n @final\n @classmethod\n def _get_stacked_values(\n cls, ax: Axes, stacking_id: int | None, values: np.ndarray, label\n ) -> np.ndarray:\n if stacking_id is None:\n return values\n if not hasattr(ax, "_stacker_pos_prior"):\n # stacker may not be initialized for subplots\n cls._initialize_stacker(ax, stacking_id, len(values))\n\n if (values >= 0).all():\n # TODO #54485\n return (\n ax._stacker_pos_prior[stacking_id] # type: ignore[attr-defined]\n + values\n )\n elif (values <= 0).all():\n # TODO #54485\n return (\n ax._stacker_neg_prior[stacking_id] # type: ignore[attr-defined]\n + values\n )\n\n raise ValueError(\n "When stacked is True, each column must be either "\n "all positive or all negative. "\n f"Column '{label}' contains both positive and negative values"\n )\n\n @final\n @classmethod\n def _update_stacker(cls, ax: Axes, stacking_id: int | None, values) -> None:\n if stacking_id is None:\n return\n if (values >= 0).all():\n # TODO #54485\n ax._stacker_pos_prior[stacking_id] += values # type: ignore[attr-defined]\n elif (values <= 0).all():\n # TODO #54485\n ax._stacker_neg_prior[stacking_id] += values # type: ignore[attr-defined]\n\n def _post_plot_logic(self, ax: Axes, data) -> None:\n from matplotlib.ticker import FixedLocator\n\n def get_label(i):\n if is_float(i) and i.is_integer():\n i = int(i)\n try:\n return pprint_thing(data.index[i])\n except Exception:\n return ""\n\n if self._need_to_set_index:\n xticks = ax.get_xticks()\n xticklabels = [get_label(x) for x in xticks]\n # error: Argument 1 to "FixedLocator" has incompatible type "ndarray[Any,\n # Any]"; expected "Sequence[float]"\n ax.xaxis.set_major_locator(FixedLocator(xticks)) # type: ignore[arg-type]\n ax.set_xticklabels(xticklabels)\n\n # If the index is an irregular time series, then by default\n # we rotate the tick labels. The exception is if there are\n # subplots which don't share their x-axes, in which we case\n # we don't rotate the ticklabels as by default the subplots\n # would be too close together.\n condition = (\n not self._use_dynamic_x()\n and (data.index._is_all_dates and self.use_index)\n and (not self.subplots or (self.subplots and self.sharex))\n )\n\n index_name = self._get_index_name()\n\n if condition:\n # irregular TS rotated 30 deg. by default\n # probably a better place to check / set this.\n if not self._rot_set:\n self.rot = 30\n format_date_labels(ax, rot=self.rot)\n\n if index_name is not None and self.use_index:\n ax.set_xlabel(index_name)\n\n\nclass AreaPlot(LinePlot):\n @property\n def _kind(self) -> Literal["area"]:\n return "area"\n\n def __init__(self, data, **kwargs) -> None:\n kwargs.setdefault("stacked", True)\n with warnings.catch_warnings():\n warnings.filterwarnings(\n "ignore",\n "Downcasting object dtype arrays",\n category=FutureWarning,\n )\n data = data.fillna(value=0)\n LinePlot.__init__(self, data, **kwargs)\n\n if not self.stacked:\n # use smaller alpha to distinguish overlap\n self.kwds.setdefault("alpha", 0.5)\n\n if self.logy or self.loglog:\n raise ValueError("Log-y scales are not supported in area plot")\n\n # error: Signature of "_plot" incompatible with supertype "MPLPlot"\n @classmethod\n def _plot( # type: ignore[override]\n cls,\n ax: Axes,\n x,\n y: np.ndarray,\n style=None,\n column_num=None,\n stacking_id=None,\n is_errorbar: bool = False,\n **kwds,\n ):\n if column_num == 0:\n cls._initialize_stacker(ax, stacking_id, len(y))\n y_values = cls._get_stacked_values(ax, stacking_id, y, kwds["label"])\n\n # need to remove label, because subplots uses mpl legend as it is\n line_kwds = kwds.copy()\n line_kwds.pop("label")\n lines = MPLPlot._plot(ax, x, y_values, style=style, **line_kwds)\n\n # get data from the line to get coordinates for fill_between\n xdata, y_values = lines[0].get_data(orig=False)\n\n # unable to use ``_get_stacked_values`` here to get starting point\n if stacking_id is None:\n start = np.zeros(len(y))\n elif (y >= 0).all():\n # TODO #54485\n start = ax._stacker_pos_prior[stacking_id] # type: ignore[attr-defined]\n elif (y <= 0).all():\n # TODO #54485\n start = ax._stacker_neg_prior[stacking_id] # type: ignore[attr-defined]\n else:\n start = np.zeros(len(y))\n\n if "color" not in kwds:\n kwds["color"] = lines[0].get_color()\n\n rect = ax.fill_between(xdata, start, y_values, **kwds)\n cls._update_stacker(ax, stacking_id, y)\n\n # LinePlot expects list of artists\n res = [rect]\n return res\n\n def _post_plot_logic(self, ax: Axes, data) -> None:\n LinePlot._post_plot_logic(self, ax, data)\n\n is_shared_y = len(list(ax.get_shared_y_axes())) > 0\n # do not override the default axis behaviour in case of shared y axes\n if self.ylim is None and not is_shared_y:\n if (data >= 0).all().all():\n ax.set_ylim(0, None)\n elif (data <= 0).all().all():\n ax.set_ylim(None, 0)\n\n\nclass BarPlot(MPLPlot):\n @property\n def _kind(self) -> Literal["bar", "barh"]:\n return "bar"\n\n _default_rot = 90\n\n @property\n def orientation(self) -> PlottingOrientation:\n return "vertical"\n\n def __init__(\n self,\n data,\n *,\n align="center",\n bottom=0,\n left=0,\n width=0.5,\n position=0.5,\n log=False,\n **kwargs,\n ) -> None:\n # we have to treat a series differently than a\n # 1-column DataFrame w.r.t. color handling\n self._is_series = isinstance(data, ABCSeries)\n self.bar_width = width\n self._align = align\n self._position = position\n self.tick_pos = np.arange(len(data))\n\n if is_list_like(bottom):\n bottom = np.array(bottom)\n if is_list_like(left):\n left = np.array(left)\n self.bottom = bottom\n self.left = left\n\n self.log = log\n\n MPLPlot.__init__(self, data, **kwargs)\n\n @cache_readonly\n def ax_pos(self) -> np.ndarray:\n return self.tick_pos - self.tickoffset\n\n @cache_readonly\n def tickoffset(self):\n if self.stacked or self.subplots:\n return self.bar_width * self._position\n elif self._align == "edge":\n w = self.bar_width / self.nseries\n return self.bar_width * (self._position - 0.5) + w * 0.5\n else:\n return self.bar_width * self._position\n\n @cache_readonly\n def lim_offset(self):\n if self.stacked or self.subplots:\n if self._align == "edge":\n return self.bar_width / 2\n else:\n return 0\n elif self._align == "edge":\n w = self.bar_width / self.nseries\n return w * 0.5\n else:\n return 0\n\n # error: Signature of "_plot" incompatible with supertype "MPLPlot"\n @classmethod\n def _plot( # type: ignore[override]\n cls,\n ax: Axes,\n x,\n y: np.ndarray,\n w,\n start: int | npt.NDArray[np.intp] = 0,\n log: bool = False,\n **kwds,\n ):\n return ax.bar(x, y, w, bottom=start, log=log, **kwds)\n\n @property\n def _start_base(self):\n return self.bottom\n\n def _make_plot(self, fig: Figure) -> None:\n colors = self._get_colors()\n ncolors = len(colors)\n\n pos_prior = neg_prior = np.zeros(len(self.data))\n K = self.nseries\n\n data = self.data.fillna(0)\n for i, (label, y) in enumerate(self._iter_data(data=data)):\n ax = self._get_ax(i)\n kwds = self.kwds.copy()\n if self._is_series:\n kwds["color"] = colors\n elif isinstance(colors, dict):\n kwds["color"] = colors[label]\n else:\n kwds["color"] = colors[i % ncolors]\n\n errors = self._get_errorbars(label=label, index=i)\n kwds = dict(kwds, **errors)\n\n label = pprint_thing(label)\n label = self._mark_right_label(label, index=i)\n\n if (("yerr" in kwds) or ("xerr" in kwds)) and (kwds.get("ecolor") is None):\n kwds["ecolor"] = mpl.rcParams["xtick.color"]\n\n start = 0\n if self.log and (y >= 1).all():\n start = 1\n start = start + self._start_base\n\n kwds["align"] = self._align\n if self.subplots:\n w = self.bar_width / 2\n rect = self._plot(\n ax,\n self.ax_pos + w,\n y,\n self.bar_width,\n start=start,\n label=label,\n log=self.log,\n **kwds,\n )\n ax.set_title(label)\n elif self.stacked:\n mask = y > 0\n start = np.where(mask, pos_prior, neg_prior) + self._start_base\n w = self.bar_width / 2\n rect = self._plot(\n ax,\n self.ax_pos + w,\n y,\n self.bar_width,\n start=start,\n label=label,\n log=self.log,\n **kwds,\n )\n pos_prior = pos_prior + np.where(mask, y, 0)\n neg_prior = neg_prior + np.where(mask, 0, y)\n else:\n w = self.bar_width / K\n rect = self._plot(\n ax,\n self.ax_pos + (i + 0.5) * w,\n y,\n w,\n start=start,\n label=label,\n log=self.log,\n **kwds,\n )\n self._append_legend_handles_labels(rect, label)\n\n def _post_plot_logic(self, ax: Axes, data) -> None:\n if self.use_index:\n str_index = [pprint_thing(key) for key in data.index]\n else:\n str_index = [pprint_thing(key) for key in range(data.shape[0])]\n\n s_edge = self.ax_pos[0] - 0.25 + self.lim_offset\n e_edge = self.ax_pos[-1] + 0.25 + self.bar_width + self.lim_offset\n\n self._decorate_ticks(ax, self._get_index_name(), str_index, s_edge, e_edge)\n\n def _decorate_ticks(\n self,\n ax: Axes,\n name: str | None,\n ticklabels: list[str],\n start_edge: float,\n end_edge: float,\n ) -> None:\n ax.set_xlim((start_edge, end_edge))\n\n if self.xticks is not None:\n ax.set_xticks(np.array(self.xticks))\n else:\n ax.set_xticks(self.tick_pos)\n ax.set_xticklabels(ticklabels)\n\n if name is not None and self.use_index:\n ax.set_xlabel(name)\n\n\nclass BarhPlot(BarPlot):\n @property\n def _kind(self) -> Literal["barh"]:\n return "barh"\n\n _default_rot = 0\n\n @property\n def orientation(self) -> Literal["horizontal"]:\n return "horizontal"\n\n @property\n def _start_base(self):\n return self.left\n\n # error: Signature of "_plot" incompatible with supertype "MPLPlot"\n @classmethod\n def _plot( # type: ignore[override]\n cls,\n ax: Axes,\n x,\n y: np.ndarray,\n w,\n start: int | npt.NDArray[np.intp] = 0,\n log: bool = False,\n **kwds,\n ):\n return ax.barh(x, y, w, left=start, log=log, **kwds)\n\n def _get_custom_index_name(self):\n return self.ylabel\n\n def _decorate_ticks(\n self,\n ax: Axes,\n name: str | None,\n ticklabels: list[str],\n start_edge: float,\n end_edge: float,\n ) -> None:\n # horizontal bars\n ax.set_ylim((start_edge, end_edge))\n ax.set_yticks(self.tick_pos)\n ax.set_yticklabels(ticklabels)\n if name is not None and self.use_index:\n ax.set_ylabel(name)\n # error: Argument 1 to "set_xlabel" of "_AxesBase" has incompatible type\n # "Hashable | None"; expected "str"\n ax.set_xlabel(self.xlabel) # type: ignore[arg-type]\n\n\nclass PiePlot(MPLPlot):\n @property\n def _kind(self) -> Literal["pie"]:\n return "pie"\n\n _layout_type = "horizontal"\n\n def __init__(self, data, kind=None, **kwargs) -> None:\n data = data.fillna(value=0)\n if (data < 0).any().any():\n raise ValueError(f"{self._kind} plot doesn't allow negative values")\n MPLPlot.__init__(self, data, kind=kind, **kwargs)\n\n @classmethod\n def _validate_log_kwd(\n cls,\n kwd: str,\n value: bool | None | Literal["sym"],\n ) -> bool | None | Literal["sym"]:\n super()._validate_log_kwd(kwd=kwd, value=value)\n if value is not False:\n warnings.warn(\n f"PiePlot ignores the '{kwd}' keyword",\n UserWarning,\n stacklevel=find_stack_level(),\n )\n return False\n\n def _validate_color_args(self, color, colormap) -> None:\n # TODO: warn if color is passed and ignored?\n return None\n\n def _make_plot(self, fig: Figure) -> None:\n colors = self._get_colors(num_colors=len(self.data), color_kwds="colors")\n self.kwds.setdefault("colors", colors)\n\n for i, (label, y) in enumerate(self._iter_data(data=self.data)):\n ax = self._get_ax(i)\n if label is not None:\n label = pprint_thing(label)\n ax.set_ylabel(label)\n\n kwds = self.kwds.copy()\n\n def blank_labeler(label, value):\n if value == 0:\n return ""\n else:\n return label\n\n idx = [pprint_thing(v) for v in self.data.index]\n labels = kwds.pop("labels", idx)\n # labels is used for each wedge's labels\n # Blank out labels for values of 0 so they don't overlap\n # with nonzero wedges\n if labels is not None:\n blabels = [blank_labeler(left, value) for left, value in zip(labels, y)]\n else:\n blabels = None\n results = ax.pie(y, labels=blabels, **kwds)\n\n if kwds.get("autopct", None) is not None:\n patches, texts, autotexts = results\n else:\n patches, texts = results\n autotexts = []\n\n if self.fontsize is not None:\n for t in texts + autotexts:\n t.set_fontsize(self.fontsize)\n\n # leglabels is used for legend labels\n leglabels = labels if labels is not None else idx\n for _patch, _leglabel in zip(patches, leglabels):\n self._append_legend_handles_labels(_patch, _leglabel)\n\n def _post_plot_logic(self, ax: Axes, data) -> None:\n pass\n | .venv\Lib\site-packages\pandas\plotting\_matplotlib\core.py | core.py | Python | 71,826 | 0.75 | 0.190588 | 0.109956 | python-kit | 886 | 2024-01-11T11:21:02.843353 | MIT | false | f77677d3a4b90ab45c12f7fc2548dbe9 |
from __future__ import annotations\n\nfrom typing import TYPE_CHECKING\n\nimport numpy as np\n\nfrom pandas.core.dtypes.missing import remove_na_arraylike\n\nfrom pandas import (\n MultiIndex,\n concat,\n)\n\nfrom pandas.plotting._matplotlib.misc import unpack_single_str_list\n\nif TYPE_CHECKING:\n from collections.abc import Hashable\n\n from pandas._typing import IndexLabel\n\n from pandas import (\n DataFrame,\n Series,\n )\n\n\ndef create_iter_data_given_by(\n data: DataFrame, kind: str = "hist"\n) -> dict[Hashable, DataFrame | Series]:\n """\n Create data for iteration given `by` is assigned or not, and it is only\n used in both hist and boxplot.\n\n If `by` is assigned, return a dictionary of DataFrames in which the key of\n dictionary is the values in groups.\n If `by` is not assigned, return input as is, and this preserves current\n status of iter_data.\n\n Parameters\n ----------\n data : reformatted grouped data from `_compute_plot_data` method.\n kind : str, plot kind. This function is only used for `hist` and `box` plots.\n\n Returns\n -------\n iter_data : DataFrame or Dictionary of DataFrames\n\n Examples\n --------\n If `by` is assigned:\n\n >>> import numpy as np\n >>> tuples = [('h1', 'a'), ('h1', 'b'), ('h2', 'a'), ('h2', 'b')]\n >>> mi = pd.MultiIndex.from_tuples(tuples)\n >>> value = [[1, 3, np.nan, np.nan],\n ... [3, 4, np.nan, np.nan], [np.nan, np.nan, 5, 6]]\n >>> data = pd.DataFrame(value, columns=mi)\n >>> create_iter_data_given_by(data)\n {'h1': h1\n a b\n 0 1.0 3.0\n 1 3.0 4.0\n 2 NaN NaN, 'h2': h2\n a b\n 0 NaN NaN\n 1 NaN NaN\n 2 5.0 6.0}\n """\n\n # For `hist` plot, before transformation, the values in level 0 are values\n # in groups and subplot titles, and later used for column subselection and\n # iteration; For `box` plot, values in level 1 are column names to show,\n # and are used for iteration and as subplots titles.\n if kind == "hist":\n level = 0\n else:\n level = 1\n\n # Select sub-columns based on the value of level of MI, and if `by` is\n # assigned, data must be a MI DataFrame\n assert isinstance(data.columns, MultiIndex)\n return {\n col: data.loc[:, data.columns.get_level_values(level) == col]\n for col in data.columns.levels[level]\n }\n\n\ndef reconstruct_data_with_by(\n data: DataFrame, by: IndexLabel, cols: IndexLabel\n) -> DataFrame:\n """\n Internal function to group data, and reassign multiindex column names onto the\n result in order to let grouped data be used in _compute_plot_data method.\n\n Parameters\n ----------\n data : Original DataFrame to plot\n by : grouped `by` parameter selected by users\n cols : columns of data set (excluding columns used in `by`)\n\n Returns\n -------\n Output is the reconstructed DataFrame with MultiIndex columns. The first level\n of MI is unique values of groups, and second level of MI is the columns\n selected by users.\n\n Examples\n --------\n >>> d = {'h': ['h1', 'h1', 'h2'], 'a': [1, 3, 5], 'b': [3, 4, 6]}\n >>> df = pd.DataFrame(d)\n >>> reconstruct_data_with_by(df, by='h', cols=['a', 'b'])\n h1 h2\n a b a b\n 0 1.0 3.0 NaN NaN\n 1 3.0 4.0 NaN NaN\n 2 NaN NaN 5.0 6.0\n """\n by_modified = unpack_single_str_list(by)\n grouped = data.groupby(by_modified)\n\n data_list = []\n for key, group in grouped:\n # error: List item 1 has incompatible type "Union[Hashable,\n # Sequence[Hashable]]"; expected "Iterable[Hashable]"\n columns = MultiIndex.from_product([[key], cols]) # type: ignore[list-item]\n sub_group = group[cols]\n sub_group.columns = columns\n data_list.append(sub_group)\n\n data = concat(data_list, axis=1)\n return data\n\n\ndef reformat_hist_y_given_by(y: np.ndarray, by: IndexLabel | None) -> np.ndarray:\n """Internal function to reformat y given `by` is applied or not for hist plot.\n\n If by is None, input y is 1-d with NaN removed; and if by is not None, groupby\n will take place and input y is multi-dimensional array.\n """\n if by is not None and len(y.shape) > 1:\n return np.array([remove_na_arraylike(col) for col in y.T]).T\n return remove_na_arraylike(y)\n | .venv\Lib\site-packages\pandas\plotting\_matplotlib\groupby.py | groupby.py | Python | 4,343 | 0.95 | 0.133803 | 0.069565 | awesome-app | 603 | 2024-03-03T14:35:58.052404 | BSD-3-Clause | false | 742ca772a2befb0f4fc59526d9dfeb10 |
from __future__ import annotations\n\nfrom typing import (\n TYPE_CHECKING,\n Any,\n Literal,\n final,\n)\n\nimport numpy as np\n\nfrom pandas.core.dtypes.common import (\n is_integer,\n is_list_like,\n)\nfrom pandas.core.dtypes.generic import (\n ABCDataFrame,\n ABCIndex,\n)\nfrom pandas.core.dtypes.missing import (\n isna,\n remove_na_arraylike,\n)\n\nfrom pandas.io.formats.printing import pprint_thing\nfrom pandas.plotting._matplotlib.core import (\n LinePlot,\n MPLPlot,\n)\nfrom pandas.plotting._matplotlib.groupby import (\n create_iter_data_given_by,\n reformat_hist_y_given_by,\n)\nfrom pandas.plotting._matplotlib.misc import unpack_single_str_list\nfrom pandas.plotting._matplotlib.tools import (\n create_subplots,\n flatten_axes,\n maybe_adjust_figure,\n set_ticks_props,\n)\n\nif TYPE_CHECKING:\n from matplotlib.axes import Axes\n from matplotlib.figure import Figure\n\n from pandas._typing import PlottingOrientation\n\n from pandas import (\n DataFrame,\n Series,\n )\n\n\nclass HistPlot(LinePlot):\n @property\n def _kind(self) -> Literal["hist", "kde"]:\n return "hist"\n\n def __init__(\n self,\n data,\n bins: int | np.ndarray | list[np.ndarray] = 10,\n bottom: int | np.ndarray = 0,\n *,\n range=None,\n weights=None,\n **kwargs,\n ) -> None:\n if is_list_like(bottom):\n bottom = np.array(bottom)\n self.bottom = bottom\n\n self._bin_range = range\n self.weights = weights\n\n self.xlabel = kwargs.get("xlabel")\n self.ylabel = kwargs.get("ylabel")\n # Do not call LinePlot.__init__ which may fill nan\n MPLPlot.__init__(self, data, **kwargs) # pylint: disable=non-parent-init-called\n\n self.bins = self._adjust_bins(bins)\n\n def _adjust_bins(self, bins: int | np.ndarray | list[np.ndarray]):\n if is_integer(bins):\n if self.by is not None:\n by_modified = unpack_single_str_list(self.by)\n grouped = self.data.groupby(by_modified)[self.columns]\n bins = [self._calculate_bins(group, bins) for key, group in grouped]\n else:\n bins = self._calculate_bins(self.data, bins)\n return bins\n\n def _calculate_bins(self, data: Series | DataFrame, bins) -> np.ndarray:\n """Calculate bins given data"""\n nd_values = data.infer_objects(copy=False)._get_numeric_data()\n values = np.ravel(nd_values)\n values = values[~isna(values)]\n\n hist, bins = np.histogram(values, bins=bins, range=self._bin_range)\n return bins\n\n # error: Signature of "_plot" incompatible with supertype "LinePlot"\n @classmethod\n def _plot( # type: ignore[override]\n cls,\n ax: Axes,\n y: np.ndarray,\n style=None,\n bottom: int | np.ndarray = 0,\n column_num: int = 0,\n stacking_id=None,\n *,\n bins,\n **kwds,\n ):\n if column_num == 0:\n cls._initialize_stacker(ax, stacking_id, len(bins) - 1)\n\n base = np.zeros(len(bins) - 1)\n bottom = bottom + cls._get_stacked_values(ax, stacking_id, base, kwds["label"])\n # ignore style\n n, bins, patches = ax.hist(y, bins=bins, bottom=bottom, **kwds)\n cls._update_stacker(ax, stacking_id, n)\n return patches\n\n def _make_plot(self, fig: Figure) -> None:\n colors = self._get_colors()\n stacking_id = self._get_stacking_id()\n\n # Re-create iterated data if `by` is assigned by users\n data = (\n create_iter_data_given_by(self.data, self._kind)\n if self.by is not None\n else self.data\n )\n\n # error: Argument "data" to "_iter_data" of "MPLPlot" has incompatible\n # type "object"; expected "DataFrame | dict[Hashable, Series | DataFrame]"\n for i, (label, y) in enumerate(self._iter_data(data=data)): # type: ignore[arg-type]\n ax = self._get_ax(i)\n\n kwds = self.kwds.copy()\n if self.color is not None:\n kwds["color"] = self.color\n\n label = pprint_thing(label)\n label = self._mark_right_label(label, index=i)\n kwds["label"] = label\n\n style, kwds = self._apply_style_colors(colors, kwds, i, label)\n if style is not None:\n kwds["style"] = style\n\n self._make_plot_keywords(kwds, y)\n\n # the bins is multi-dimension array now and each plot need only 1-d and\n # when by is applied, label should be columns that are grouped\n if self.by is not None:\n kwds["bins"] = kwds["bins"][i]\n kwds["label"] = self.columns\n kwds.pop("color")\n\n if self.weights is not None:\n kwds["weights"] = type(self)._get_column_weights(self.weights, i, y)\n\n y = reformat_hist_y_given_by(y, self.by)\n\n artists = self._plot(ax, y, column_num=i, stacking_id=stacking_id, **kwds)\n\n # when by is applied, show title for subplots to know which group it is\n if self.by is not None:\n ax.set_title(pprint_thing(label))\n\n self._append_legend_handles_labels(artists[0], label)\n\n def _make_plot_keywords(self, kwds: dict[str, Any], y: np.ndarray) -> None:\n """merge BoxPlot/KdePlot properties to passed kwds"""\n # y is required for KdePlot\n kwds["bottom"] = self.bottom\n kwds["bins"] = self.bins\n\n @final\n @staticmethod\n def _get_column_weights(weights, i: int, y):\n # We allow weights to be a multi-dimensional array, e.g. a (10, 2) array,\n # and each sub-array (10,) will be called in each iteration. If users only\n # provide 1D array, we assume the same weights is used for all iterations\n if weights is not None:\n if np.ndim(weights) != 1 and np.shape(weights)[-1] != 1:\n try:\n weights = weights[:, i]\n except IndexError as err:\n raise ValueError(\n "weights must have the same shape as data, "\n "or be a single column"\n ) from err\n weights = weights[~isna(y)]\n return weights\n\n def _post_plot_logic(self, ax: Axes, data) -> None:\n if self.orientation == "horizontal":\n # error: Argument 1 to "set_xlabel" of "_AxesBase" has incompatible\n # type "Hashable"; expected "str"\n ax.set_xlabel(\n "Frequency"\n if self.xlabel is None\n else self.xlabel # type: ignore[arg-type]\n )\n ax.set_ylabel(self.ylabel) # type: ignore[arg-type]\n else:\n ax.set_xlabel(self.xlabel) # type: ignore[arg-type]\n ax.set_ylabel(\n "Frequency"\n if self.ylabel is None\n else self.ylabel # type: ignore[arg-type]\n )\n\n @property\n def orientation(self) -> PlottingOrientation:\n if self.kwds.get("orientation", None) == "horizontal":\n return "horizontal"\n else:\n return "vertical"\n\n\nclass KdePlot(HistPlot):\n @property\n def _kind(self) -> Literal["kde"]:\n return "kde"\n\n @property\n def orientation(self) -> Literal["vertical"]:\n return "vertical"\n\n def __init__(\n self, data, bw_method=None, ind=None, *, weights=None, **kwargs\n ) -> None:\n # Do not call LinePlot.__init__ which may fill nan\n MPLPlot.__init__(self, data, **kwargs) # pylint: disable=non-parent-init-called\n self.bw_method = bw_method\n self.ind = ind\n self.weights = weights\n\n @staticmethod\n def _get_ind(y: np.ndarray, ind):\n if ind is None:\n # np.nanmax() and np.nanmin() ignores the missing values\n sample_range = np.nanmax(y) - np.nanmin(y)\n ind = np.linspace(\n np.nanmin(y) - 0.5 * sample_range,\n np.nanmax(y) + 0.5 * sample_range,\n 1000,\n )\n elif is_integer(ind):\n sample_range = np.nanmax(y) - np.nanmin(y)\n ind = np.linspace(\n np.nanmin(y) - 0.5 * sample_range,\n np.nanmax(y) + 0.5 * sample_range,\n ind,\n )\n return ind\n\n @classmethod\n # error: Signature of "_plot" incompatible with supertype "MPLPlot"\n def _plot( # type: ignore[override]\n cls,\n ax: Axes,\n y: np.ndarray,\n style=None,\n bw_method=None,\n ind=None,\n column_num=None,\n stacking_id: int | None = None,\n **kwds,\n ):\n from scipy.stats import gaussian_kde\n\n y = remove_na_arraylike(y)\n gkde = gaussian_kde(y, bw_method=bw_method)\n\n y = gkde.evaluate(ind)\n lines = MPLPlot._plot(ax, ind, y, style=style, **kwds)\n return lines\n\n def _make_plot_keywords(self, kwds: dict[str, Any], y: np.ndarray) -> None:\n kwds["bw_method"] = self.bw_method\n kwds["ind"] = type(self)._get_ind(y, ind=self.ind)\n\n def _post_plot_logic(self, ax: Axes, data) -> None:\n ax.set_ylabel("Density")\n\n\ndef _grouped_plot(\n plotf,\n data: Series | DataFrame,\n column=None,\n by=None,\n numeric_only: bool = True,\n figsize: tuple[float, float] | None = None,\n sharex: bool = True,\n sharey: bool = True,\n layout=None,\n rot: float = 0,\n ax=None,\n **kwargs,\n):\n # error: Non-overlapping equality check (left operand type: "Optional[Tuple[float,\n # float]]", right operand type: "Literal['default']")\n if figsize == "default": # type: ignore[comparison-overlap]\n # allowed to specify mpl default with 'default'\n raise ValueError(\n "figsize='default' is no longer supported. "\n "Specify figure size by tuple instead"\n )\n\n grouped = data.groupby(by)\n if column is not None:\n grouped = grouped[column]\n\n naxes = len(grouped)\n fig, axes = create_subplots(\n naxes=naxes, figsize=figsize, sharex=sharex, sharey=sharey, ax=ax, layout=layout\n )\n\n _axes = flatten_axes(axes)\n\n for i, (key, group) in enumerate(grouped):\n ax = _axes[i]\n if numeric_only and isinstance(group, ABCDataFrame):\n group = group._get_numeric_data()\n plotf(group, ax, **kwargs)\n ax.set_title(pprint_thing(key))\n\n return fig, axes\n\n\ndef _grouped_hist(\n data: Series | DataFrame,\n column=None,\n by=None,\n ax=None,\n bins: int = 50,\n figsize: tuple[float, float] | None = None,\n layout=None,\n sharex: bool = False,\n sharey: bool = False,\n rot: float = 90,\n grid: bool = True,\n xlabelsize: int | None = None,\n xrot=None,\n ylabelsize: int | None = None,\n yrot=None,\n legend: bool = False,\n **kwargs,\n):\n """\n Grouped histogram\n\n Parameters\n ----------\n data : Series/DataFrame\n column : object, optional\n by : object, optional\n ax : axes, optional\n bins : int, default 50\n figsize : tuple, optional\n layout : optional\n sharex : bool, default False\n sharey : bool, default False\n rot : float, default 90\n grid : bool, default True\n legend: : bool, default False\n kwargs : dict, keyword arguments passed to matplotlib.Axes.hist\n\n Returns\n -------\n collection of Matplotlib Axes\n """\n if legend:\n assert "label" not in kwargs\n if data.ndim == 1:\n kwargs["label"] = data.name\n elif column is None:\n kwargs["label"] = data.columns\n else:\n kwargs["label"] = column\n\n def plot_group(group, ax) -> None:\n ax.hist(group.dropna().values, bins=bins, **kwargs)\n if legend:\n ax.legend()\n\n if xrot is None:\n xrot = rot\n\n fig, axes = _grouped_plot(\n plot_group,\n data,\n column=column,\n by=by,\n sharex=sharex,\n sharey=sharey,\n ax=ax,\n figsize=figsize,\n layout=layout,\n rot=rot,\n )\n\n set_ticks_props(\n axes, xlabelsize=xlabelsize, xrot=xrot, ylabelsize=ylabelsize, yrot=yrot\n )\n\n maybe_adjust_figure(\n fig, bottom=0.15, top=0.9, left=0.1, right=0.9, hspace=0.5, wspace=0.3\n )\n return axes\n\n\ndef hist_series(\n self: Series,\n by=None,\n ax=None,\n grid: bool = True,\n xlabelsize: int | None = None,\n xrot=None,\n ylabelsize: int | None = None,\n yrot=None,\n figsize: tuple[float, float] | None = None,\n bins: int = 10,\n legend: bool = False,\n **kwds,\n):\n import matplotlib.pyplot as plt\n\n if legend and "label" in kwds:\n raise ValueError("Cannot use both legend and label")\n\n if by is None:\n if kwds.get("layout", None) is not None:\n raise ValueError("The 'layout' keyword is not supported when 'by' is None")\n # hack until the plotting interface is a bit more unified\n fig = kwds.pop(\n "figure", plt.gcf() if plt.get_fignums() else plt.figure(figsize=figsize)\n )\n if figsize is not None and tuple(figsize) != tuple(fig.get_size_inches()):\n fig.set_size_inches(*figsize, forward=True)\n if ax is None:\n ax = fig.gca()\n elif ax.get_figure() != fig:\n raise AssertionError("passed axis not bound to passed figure")\n values = self.dropna().values\n if legend:\n kwds["label"] = self.name\n ax.hist(values, bins=bins, **kwds)\n if legend:\n ax.legend()\n ax.grid(grid)\n axes = np.array([ax])\n\n # error: Argument 1 to "set_ticks_props" has incompatible type "ndarray[Any,\n # dtype[Any]]"; expected "Axes | Sequence[Axes]"\n set_ticks_props(\n axes, # type: ignore[arg-type]\n xlabelsize=xlabelsize,\n xrot=xrot,\n ylabelsize=ylabelsize,\n yrot=yrot,\n )\n\n else:\n if "figure" in kwds:\n raise ValueError(\n "Cannot pass 'figure' when using the "\n "'by' argument, since a new 'Figure' instance will be created"\n )\n axes = _grouped_hist(\n self,\n by=by,\n ax=ax,\n grid=grid,\n figsize=figsize,\n bins=bins,\n xlabelsize=xlabelsize,\n xrot=xrot,\n ylabelsize=ylabelsize,\n yrot=yrot,\n legend=legend,\n **kwds,\n )\n\n if hasattr(axes, "ndim"):\n if axes.ndim == 1 and len(axes) == 1:\n return axes[0]\n return axes\n\n\ndef hist_frame(\n data: DataFrame,\n column=None,\n by=None,\n grid: bool = True,\n xlabelsize: int | None = None,\n xrot=None,\n ylabelsize: int | None = None,\n yrot=None,\n ax=None,\n sharex: bool = False,\n sharey: bool = False,\n figsize: tuple[float, float] | None = None,\n layout=None,\n bins: int = 10,\n legend: bool = False,\n **kwds,\n):\n if legend and "label" in kwds:\n raise ValueError("Cannot use both legend and label")\n if by is not None:\n axes = _grouped_hist(\n data,\n column=column,\n by=by,\n ax=ax,\n grid=grid,\n figsize=figsize,\n sharex=sharex,\n sharey=sharey,\n layout=layout,\n bins=bins,\n xlabelsize=xlabelsize,\n xrot=xrot,\n ylabelsize=ylabelsize,\n yrot=yrot,\n legend=legend,\n **kwds,\n )\n return axes\n\n if column is not None:\n if not isinstance(column, (list, np.ndarray, ABCIndex)):\n column = [column]\n data = data[column]\n # GH32590\n data = data.select_dtypes(\n include=(np.number, "datetime64", "datetimetz"), exclude="timedelta"\n )\n naxes = len(data.columns)\n\n if naxes == 0:\n raise ValueError(\n "hist method requires numerical or datetime columns, nothing to plot."\n )\n\n fig, axes = create_subplots(\n naxes=naxes,\n ax=ax,\n squeeze=False,\n sharex=sharex,\n sharey=sharey,\n figsize=figsize,\n layout=layout,\n )\n _axes = flatten_axes(axes)\n\n can_set_label = "label" not in kwds\n\n for i, col in enumerate(data.columns):\n ax = _axes[i]\n if legend and can_set_label:\n kwds["label"] = col\n ax.hist(data[col].dropna().values, bins=bins, **kwds)\n ax.set_title(col)\n ax.grid(grid)\n if legend:\n ax.legend()\n\n set_ticks_props(\n axes, xlabelsize=xlabelsize, xrot=xrot, ylabelsize=ylabelsize, yrot=yrot\n )\n maybe_adjust_figure(fig, wspace=0.3, hspace=0.3)\n\n return axes\n | .venv\Lib\site-packages\pandas\plotting\_matplotlib\hist.py | hist.py | Python | 16,816 | 0.95 | 0.130809 | 0.071429 | python-kit | 802 | 2023-08-28T15:41:02.134340 | BSD-3-Clause | false | 7c745fa17044d2b76e8a24af84410103 |
from __future__ import annotations\n\nimport random\nfrom typing import TYPE_CHECKING\n\nfrom matplotlib import patches\nimport matplotlib.lines as mlines\nimport numpy as np\n\nfrom pandas.core.dtypes.missing import notna\n\nfrom pandas.io.formats.printing import pprint_thing\nfrom pandas.plotting._matplotlib.style import get_standard_colors\nfrom pandas.plotting._matplotlib.tools import (\n create_subplots,\n do_adjust_figure,\n maybe_adjust_figure,\n set_ticks_props,\n)\n\nif TYPE_CHECKING:\n from collections.abc import Hashable\n\n from matplotlib.axes import Axes\n from matplotlib.figure import Figure\n\n from pandas import (\n DataFrame,\n Index,\n Series,\n )\n\n\ndef scatter_matrix(\n frame: DataFrame,\n alpha: float = 0.5,\n figsize: tuple[float, float] | None = None,\n ax=None,\n grid: bool = False,\n diagonal: str = "hist",\n marker: str = ".",\n density_kwds=None,\n hist_kwds=None,\n range_padding: float = 0.05,\n **kwds,\n):\n df = frame._get_numeric_data()\n n = df.columns.size\n naxes = n * n\n fig, axes = create_subplots(naxes=naxes, figsize=figsize, ax=ax, squeeze=False)\n\n # no gaps between subplots\n maybe_adjust_figure(fig, wspace=0, hspace=0)\n\n mask = notna(df)\n\n marker = _get_marker_compat(marker)\n\n hist_kwds = hist_kwds or {}\n density_kwds = density_kwds or {}\n\n # GH 14855\n kwds.setdefault("edgecolors", "none")\n\n boundaries_list = []\n for a in df.columns:\n values = df[a].values[mask[a].values]\n rmin_, rmax_ = np.min(values), np.max(values)\n rdelta_ext = (rmax_ - rmin_) * range_padding / 2\n boundaries_list.append((rmin_ - rdelta_ext, rmax_ + rdelta_ext))\n\n for i, a in enumerate(df.columns):\n for j, b in enumerate(df.columns):\n ax = axes[i, j]\n\n if i == j:\n values = df[a].values[mask[a].values]\n\n # Deal with the diagonal by drawing a histogram there.\n if diagonal == "hist":\n ax.hist(values, **hist_kwds)\n\n elif diagonal in ("kde", "density"):\n from scipy.stats import gaussian_kde\n\n y = values\n gkde = gaussian_kde(y)\n ind = np.linspace(y.min(), y.max(), 1000)\n ax.plot(ind, gkde.evaluate(ind), **density_kwds)\n\n ax.set_xlim(boundaries_list[i])\n\n else:\n common = (mask[a] & mask[b]).values\n\n ax.scatter(\n df[b][common], df[a][common], marker=marker, alpha=alpha, **kwds\n )\n\n ax.set_xlim(boundaries_list[j])\n ax.set_ylim(boundaries_list[i])\n\n ax.set_xlabel(b)\n ax.set_ylabel(a)\n\n if j != 0:\n ax.yaxis.set_visible(False)\n if i != n - 1:\n ax.xaxis.set_visible(False)\n\n if len(df.columns) > 1:\n lim1 = boundaries_list[0]\n locs = axes[0][1].yaxis.get_majorticklocs()\n locs = locs[(lim1[0] <= locs) & (locs <= lim1[1])]\n adj = (locs - lim1[0]) / (lim1[1] - lim1[0])\n\n lim0 = axes[0][0].get_ylim()\n adj = adj * (lim0[1] - lim0[0]) + lim0[0]\n axes[0][0].yaxis.set_ticks(adj)\n\n if np.all(locs == locs.astype(int)):\n # if all ticks are int\n locs = locs.astype(int)\n axes[0][0].yaxis.set_ticklabels(locs)\n\n set_ticks_props(axes, xlabelsize=8, xrot=90, ylabelsize=8, yrot=0)\n\n return axes\n\n\ndef _get_marker_compat(marker):\n if marker not in mlines.lineMarkers:\n return "o"\n return marker\n\n\ndef radviz(\n frame: DataFrame,\n class_column,\n ax: Axes | None = None,\n color=None,\n colormap=None,\n **kwds,\n) -> Axes:\n import matplotlib.pyplot as plt\n\n def normalize(series):\n a = min(series)\n b = max(series)\n return (series - a) / (b - a)\n\n n = len(frame)\n classes = frame[class_column].drop_duplicates()\n class_col = frame[class_column]\n df = frame.drop(class_column, axis=1).apply(normalize)\n\n if ax is None:\n ax = plt.gca()\n ax.set_xlim(-1, 1)\n ax.set_ylim(-1, 1)\n\n to_plot: dict[Hashable, list[list]] = {}\n colors = get_standard_colors(\n num_colors=len(classes), colormap=colormap, color_type="random", color=color\n )\n\n for kls in classes:\n to_plot[kls] = [[], []]\n\n m = len(frame.columns) - 1\n s = np.array(\n [(np.cos(t), np.sin(t)) for t in [2 * np.pi * (i / m) for i in range(m)]]\n )\n\n for i in range(n):\n row = df.iloc[i].values\n row_ = np.repeat(np.expand_dims(row, axis=1), 2, axis=1)\n y = (s * row_).sum(axis=0) / row.sum()\n kls = class_col.iat[i]\n to_plot[kls][0].append(y[0])\n to_plot[kls][1].append(y[1])\n\n for i, kls in enumerate(classes):\n ax.scatter(\n to_plot[kls][0],\n to_plot[kls][1],\n color=colors[i],\n label=pprint_thing(kls),\n **kwds,\n )\n ax.legend()\n\n ax.add_patch(patches.Circle((0.0, 0.0), radius=1.0, facecolor="none"))\n\n for xy, name in zip(s, df.columns):\n ax.add_patch(patches.Circle(xy, radius=0.025, facecolor="gray"))\n\n if xy[0] < 0.0 and xy[1] < 0.0:\n ax.text(\n xy[0] - 0.025, xy[1] - 0.025, name, ha="right", va="top", size="small"\n )\n elif xy[0] < 0.0 <= xy[1]:\n ax.text(\n xy[0] - 0.025,\n xy[1] + 0.025,\n name,\n ha="right",\n va="bottom",\n size="small",\n )\n elif xy[1] < 0.0 <= xy[0]:\n ax.text(\n xy[0] + 0.025, xy[1] - 0.025, name, ha="left", va="top", size="small"\n )\n elif xy[0] >= 0.0 and xy[1] >= 0.0:\n ax.text(\n xy[0] + 0.025, xy[1] + 0.025, name, ha="left", va="bottom", size="small"\n )\n\n ax.axis("equal")\n return ax\n\n\ndef andrews_curves(\n frame: DataFrame,\n class_column,\n ax: Axes | None = None,\n samples: int = 200,\n color=None,\n colormap=None,\n **kwds,\n) -> Axes:\n import matplotlib.pyplot as plt\n\n def function(amplitudes):\n def f(t):\n x1 = amplitudes[0]\n result = x1 / np.sqrt(2.0)\n\n # Take the rest of the coefficients and resize them\n # appropriately. Take a copy of amplitudes as otherwise numpy\n # deletes the element from amplitudes itself.\n coeffs = np.delete(np.copy(amplitudes), 0)\n coeffs = np.resize(coeffs, (int((coeffs.size + 1) / 2), 2))\n\n # Generate the harmonics and arguments for the sin and cos\n # functions.\n harmonics = np.arange(0, coeffs.shape[0]) + 1\n trig_args = np.outer(harmonics, t)\n\n result += np.sum(\n coeffs[:, 0, np.newaxis] * np.sin(trig_args)\n + coeffs[:, 1, np.newaxis] * np.cos(trig_args),\n axis=0,\n )\n return result\n\n return f\n\n n = len(frame)\n class_col = frame[class_column]\n classes = frame[class_column].drop_duplicates()\n df = frame.drop(class_column, axis=1)\n t = np.linspace(-np.pi, np.pi, samples)\n used_legends: set[str] = set()\n\n color_values = get_standard_colors(\n num_colors=len(classes), colormap=colormap, color_type="random", color=color\n )\n colors = dict(zip(classes, color_values))\n if ax is None:\n ax = plt.gca()\n ax.set_xlim(-np.pi, np.pi)\n for i in range(n):\n row = df.iloc[i].values\n f = function(row)\n y = f(t)\n kls = class_col.iat[i]\n label = pprint_thing(kls)\n if label not in used_legends:\n used_legends.add(label)\n ax.plot(t, y, color=colors[kls], label=label, **kwds)\n else:\n ax.plot(t, y, color=colors[kls], **kwds)\n\n ax.legend(loc="upper right")\n ax.grid()\n return ax\n\n\ndef bootstrap_plot(\n series: Series,\n fig: Figure | None = None,\n size: int = 50,\n samples: int = 500,\n **kwds,\n) -> Figure:\n import matplotlib.pyplot as plt\n\n # TODO: is the failure mentioned below still relevant?\n # random.sample(ndarray, int) fails on python 3.3, sigh\n data = list(series.values)\n samplings = [random.sample(data, size) for _ in range(samples)]\n\n means = np.array([np.mean(sampling) for sampling in samplings])\n medians = np.array([np.median(sampling) for sampling in samplings])\n midranges = np.array(\n [(min(sampling) + max(sampling)) * 0.5 for sampling in samplings]\n )\n if fig is None:\n fig = plt.figure()\n x = list(range(samples))\n axes = []\n ax1 = fig.add_subplot(2, 3, 1)\n ax1.set_xlabel("Sample")\n axes.append(ax1)\n ax1.plot(x, means, **kwds)\n ax2 = fig.add_subplot(2, 3, 2)\n ax2.set_xlabel("Sample")\n axes.append(ax2)\n ax2.plot(x, medians, **kwds)\n ax3 = fig.add_subplot(2, 3, 3)\n ax3.set_xlabel("Sample")\n axes.append(ax3)\n ax3.plot(x, midranges, **kwds)\n ax4 = fig.add_subplot(2, 3, 4)\n ax4.set_xlabel("Mean")\n axes.append(ax4)\n ax4.hist(means, **kwds)\n ax5 = fig.add_subplot(2, 3, 5)\n ax5.set_xlabel("Median")\n axes.append(ax5)\n ax5.hist(medians, **kwds)\n ax6 = fig.add_subplot(2, 3, 6)\n ax6.set_xlabel("Midrange")\n axes.append(ax6)\n ax6.hist(midranges, **kwds)\n for axis in axes:\n plt.setp(axis.get_xticklabels(), fontsize=8)\n plt.setp(axis.get_yticklabels(), fontsize=8)\n if do_adjust_figure(fig):\n plt.tight_layout()\n return fig\n\n\ndef parallel_coordinates(\n frame: DataFrame,\n class_column,\n cols=None,\n ax: Axes | None = None,\n color=None,\n use_columns: bool = False,\n xticks=None,\n colormap=None,\n axvlines: bool = True,\n axvlines_kwds=None,\n sort_labels: bool = False,\n **kwds,\n) -> Axes:\n import matplotlib.pyplot as plt\n\n if axvlines_kwds is None:\n axvlines_kwds = {"linewidth": 1, "color": "black"}\n\n n = len(frame)\n classes = frame[class_column].drop_duplicates()\n class_col = frame[class_column]\n\n if cols is None:\n df = frame.drop(class_column, axis=1)\n else:\n df = frame[cols]\n\n used_legends: set[str] = set()\n\n ncols = len(df.columns)\n\n # determine values to use for xticks\n x: list[int] | Index\n if use_columns is True:\n if not np.all(np.isreal(list(df.columns))):\n raise ValueError("Columns must be numeric to be used as xticks")\n x = df.columns\n elif xticks is not None:\n if not np.all(np.isreal(xticks)):\n raise ValueError("xticks specified must be numeric")\n if len(xticks) != ncols:\n raise ValueError("Length of xticks must match number of columns")\n x = xticks\n else:\n x = list(range(ncols))\n\n if ax is None:\n ax = plt.gca()\n\n color_values = get_standard_colors(\n num_colors=len(classes), colormap=colormap, color_type="random", color=color\n )\n\n if sort_labels:\n classes = sorted(classes)\n color_values = sorted(color_values)\n colors = dict(zip(classes, color_values))\n\n for i in range(n):\n y = df.iloc[i].values\n kls = class_col.iat[i]\n label = pprint_thing(kls)\n if label not in used_legends:\n used_legends.add(label)\n ax.plot(x, y, color=colors[kls], label=label, **kwds)\n else:\n ax.plot(x, y, color=colors[kls], **kwds)\n\n if axvlines:\n for i in x:\n ax.axvline(i, **axvlines_kwds)\n\n ax.set_xticks(x)\n ax.set_xticklabels(df.columns)\n ax.set_xlim(x[0], x[-1])\n ax.legend(loc="upper right")\n ax.grid()\n return ax\n\n\ndef lag_plot(series: Series, lag: int = 1, ax: Axes | None = None, **kwds) -> Axes:\n # workaround because `c='b'` is hardcoded in matplotlib's scatter method\n import matplotlib.pyplot as plt\n\n kwds.setdefault("c", plt.rcParams["patch.facecolor"])\n\n data = series.values\n y1 = data[:-lag]\n y2 = data[lag:]\n if ax is None:\n ax = plt.gca()\n ax.set_xlabel("y(t)")\n ax.set_ylabel(f"y(t + {lag})")\n ax.scatter(y1, y2, **kwds)\n return ax\n\n\ndef autocorrelation_plot(series: Series, ax: Axes | None = None, **kwds) -> Axes:\n import matplotlib.pyplot as plt\n\n n = len(series)\n data = np.asarray(series)\n if ax is None:\n ax = plt.gca()\n ax.set_xlim(1, n)\n ax.set_ylim(-1.0, 1.0)\n mean = np.mean(data)\n c0 = np.sum((data - mean) ** 2) / n\n\n def r(h):\n return ((data[: n - h] - mean) * (data[h:] - mean)).sum() / n / c0\n\n x = np.arange(n) + 1\n y = [r(loc) for loc in x]\n z95 = 1.959963984540054\n z99 = 2.5758293035489004\n ax.axhline(y=z99 / np.sqrt(n), linestyle="--", color="grey")\n ax.axhline(y=z95 / np.sqrt(n), color="grey")\n ax.axhline(y=0.0, color="black")\n ax.axhline(y=-z95 / np.sqrt(n), color="grey")\n ax.axhline(y=-z99 / np.sqrt(n), linestyle="--", color="grey")\n ax.set_xlabel("Lag")\n ax.set_ylabel("Autocorrelation")\n ax.plot(x, y, **kwds)\n if "label" in kwds:\n ax.legend()\n ax.grid()\n return ax\n\n\ndef unpack_single_str_list(keys):\n # GH 42795\n if isinstance(keys, list) and len(keys) == 1:\n keys = keys[0]\n return keys\n | .venv\Lib\site-packages\pandas\plotting\_matplotlib\misc.py | misc.py | Python | 13,358 | 0.95 | 0.133056 | 0.050633 | react-lib | 641 | 2025-06-29T02:43:18.295870 | GPL-3.0 | false | 5a3b7c161e3d3f683b59b8524a764075 |
from __future__ import annotations\n\nfrom collections.abc import (\n Collection,\n Iterator,\n)\nimport itertools\nfrom typing import (\n TYPE_CHECKING,\n cast,\n)\nimport warnings\n\nimport matplotlib as mpl\nimport matplotlib.colors\nimport numpy as np\n\nfrom pandas._typing import MatplotlibColor as Color\nfrom pandas.util._exceptions import find_stack_level\n\nfrom pandas.core.dtypes.common import is_list_like\n\nimport pandas.core.common as com\n\nif TYPE_CHECKING:\n from matplotlib.colors import Colormap\n\n\ndef get_standard_colors(\n num_colors: int,\n colormap: Colormap | None = None,\n color_type: str = "default",\n color: dict[str, Color] | Color | Collection[Color] | None = None,\n):\n """\n Get standard colors based on `colormap`, `color_type` or `color` inputs.\n\n Parameters\n ----------\n num_colors : int\n Minimum number of colors to be returned.\n Ignored if `color` is a dictionary.\n colormap : :py:class:`matplotlib.colors.Colormap`, optional\n Matplotlib colormap.\n When provided, the resulting colors will be derived from the colormap.\n color_type : {"default", "random"}, optional\n Type of colors to derive. Used if provided `color` and `colormap` are None.\n Ignored if either `color` or `colormap` are not None.\n color : dict or str or sequence, optional\n Color(s) to be used for deriving sequence of colors.\n Can be either be a dictionary, or a single color (single color string,\n or sequence of floats representing a single color),\n or a sequence of colors.\n\n Returns\n -------\n dict or list\n Standard colors. Can either be a mapping if `color` was a dictionary,\n or a list of colors with a length of `num_colors` or more.\n\n Warns\n -----\n UserWarning\n If both `colormap` and `color` are provided.\n Parameter `color` will override.\n """\n if isinstance(color, dict):\n return color\n\n colors = _derive_colors(\n color=color,\n colormap=colormap,\n color_type=color_type,\n num_colors=num_colors,\n )\n\n return list(_cycle_colors(colors, num_colors=num_colors))\n\n\ndef _derive_colors(\n *,\n color: Color | Collection[Color] | None,\n colormap: str | Colormap | None,\n color_type: str,\n num_colors: int,\n) -> list[Color]:\n """\n Derive colors from either `colormap`, `color_type` or `color` inputs.\n\n Get a list of colors either from `colormap`, or from `color`,\n or from `color_type` (if both `colormap` and `color` are None).\n\n Parameters\n ----------\n color : str or sequence, optional\n Color(s) to be used for deriving sequence of colors.\n Can be either be a single color (single color string, or sequence of floats\n representing a single color), or a sequence of colors.\n colormap : :py:class:`matplotlib.colors.Colormap`, optional\n Matplotlib colormap.\n When provided, the resulting colors will be derived from the colormap.\n color_type : {"default", "random"}, optional\n Type of colors to derive. Used if provided `color` and `colormap` are None.\n Ignored if either `color` or `colormap`` are not None.\n num_colors : int\n Number of colors to be extracted.\n\n Returns\n -------\n list\n List of colors extracted.\n\n Warns\n -----\n UserWarning\n If both `colormap` and `color` are provided.\n Parameter `color` will override.\n """\n if color is None and colormap is not None:\n return _get_colors_from_colormap(colormap, num_colors=num_colors)\n elif color is not None:\n if colormap is not None:\n warnings.warn(\n "'color' and 'colormap' cannot be used simultaneously. Using 'color'",\n stacklevel=find_stack_level(),\n )\n return _get_colors_from_color(color)\n else:\n return _get_colors_from_color_type(color_type, num_colors=num_colors)\n\n\ndef _cycle_colors(colors: list[Color], num_colors: int) -> Iterator[Color]:\n """Cycle colors until achieving max of `num_colors` or length of `colors`.\n\n Extra colors will be ignored by matplotlib if there are more colors\n than needed and nothing needs to be done here.\n """\n max_colors = max(num_colors, len(colors))\n yield from itertools.islice(itertools.cycle(colors), max_colors)\n\n\ndef _get_colors_from_colormap(\n colormap: str | Colormap,\n num_colors: int,\n) -> list[Color]:\n """Get colors from colormap."""\n cmap = _get_cmap_instance(colormap)\n return [cmap(num) for num in np.linspace(0, 1, num=num_colors)]\n\n\ndef _get_cmap_instance(colormap: str | Colormap) -> Colormap:\n """Get instance of matplotlib colormap."""\n if isinstance(colormap, str):\n cmap = colormap\n colormap = mpl.colormaps[colormap]\n if colormap is None:\n raise ValueError(f"Colormap {cmap} is not recognized")\n return colormap\n\n\ndef _get_colors_from_color(\n color: Color | Collection[Color],\n) -> list[Color]:\n """Get colors from user input color."""\n if len(color) == 0:\n raise ValueError(f"Invalid color argument: {color}")\n\n if _is_single_color(color):\n color = cast(Color, color)\n return [color]\n\n color = cast(Collection[Color], color)\n return list(_gen_list_of_colors_from_iterable(color))\n\n\ndef _is_single_color(color: Color | Collection[Color]) -> bool:\n """Check if `color` is a single color, not a sequence of colors.\n\n Single color is of these kinds:\n - Named color "red", "C0", "firebrick"\n - Alias "g"\n - Sequence of floats, such as (0.1, 0.2, 0.3) or (0.1, 0.2, 0.3, 0.4).\n\n See Also\n --------\n _is_single_string_color\n """\n if isinstance(color, str) and _is_single_string_color(color):\n # GH #36972\n return True\n\n if _is_floats_color(color):\n return True\n\n return False\n\n\ndef _gen_list_of_colors_from_iterable(color: Collection[Color]) -> Iterator[Color]:\n """\n Yield colors from string of several letters or from collection of colors.\n """\n for x in color:\n if _is_single_color(x):\n yield x\n else:\n raise ValueError(f"Invalid color {x}")\n\n\ndef _is_floats_color(color: Color | Collection[Color]) -> bool:\n """Check if color comprises a sequence of floats representing color."""\n return bool(\n is_list_like(color)\n and (len(color) == 3 or len(color) == 4)\n and all(isinstance(x, (int, float)) for x in color)\n )\n\n\ndef _get_colors_from_color_type(color_type: str, num_colors: int) -> list[Color]:\n """Get colors from user input color type."""\n if color_type == "default":\n return _get_default_colors(num_colors)\n elif color_type == "random":\n return _get_random_colors(num_colors)\n else:\n raise ValueError("color_type must be either 'default' or 'random'")\n\n\ndef _get_default_colors(num_colors: int) -> list[Color]:\n """Get `num_colors` of default colors from matplotlib rc params."""\n import matplotlib.pyplot as plt\n\n colors = [c["color"] for c in plt.rcParams["axes.prop_cycle"]]\n return colors[0:num_colors]\n\n\ndef _get_random_colors(num_colors: int) -> list[Color]:\n """Get `num_colors` of random colors."""\n return [_random_color(num) for num in range(num_colors)]\n\n\ndef _random_color(column: int) -> list[float]:\n """Get a random color represented as a list of length 3"""\n # GH17525 use common._random_state to avoid resetting the seed\n rs = com.random_state(column)\n return rs.rand(3).tolist()\n\n\ndef _is_single_string_color(color: Color) -> bool:\n """Check if `color` is a single string color.\n\n Examples of single string colors:\n - 'r'\n - 'g'\n - 'red'\n - 'green'\n - 'C3'\n - 'firebrick'\n\n Parameters\n ----------\n color : Color\n Color string or sequence of floats.\n\n Returns\n -------\n bool\n True if `color` looks like a valid color.\n False otherwise.\n """\n conv = matplotlib.colors.ColorConverter()\n try:\n # error: Argument 1 to "to_rgba" of "ColorConverter" has incompatible type\n # "str | Sequence[float]"; expected "tuple[float, float, float] | ..."\n conv.to_rgba(color) # type: ignore[arg-type]\n except ValueError:\n return False\n else:\n return True\n | .venv\Lib\site-packages\pandas\plotting\_matplotlib\style.py | style.py | Python | 8,368 | 0.95 | 0.172662 | 0.022321 | awesome-app | 383 | 2024-05-01T10:05:14.256705 | Apache-2.0 | false | 9bd81bf7ab92a46ea62c96a02f18109e |
# TODO: Use the fact that axis can have units to simplify the process\n\nfrom __future__ import annotations\n\nimport functools\nfrom typing import (\n TYPE_CHECKING,\n Any,\n cast,\n)\nimport warnings\n\nimport numpy as np\n\nfrom pandas._libs.tslibs import (\n BaseOffset,\n Period,\n to_offset,\n)\nfrom pandas._libs.tslibs.dtypes import (\n OFFSET_TO_PERIOD_FREQSTR,\n FreqGroup,\n)\n\nfrom pandas.core.dtypes.generic import (\n ABCDatetimeIndex,\n ABCPeriodIndex,\n ABCTimedeltaIndex,\n)\n\nfrom pandas.io.formats.printing import pprint_thing\nfrom pandas.plotting._matplotlib.converter import (\n TimeSeries_DateFormatter,\n TimeSeries_DateLocator,\n TimeSeries_TimedeltaFormatter,\n)\nfrom pandas.tseries.frequencies import (\n get_period_alias,\n is_subperiod,\n is_superperiod,\n)\n\nif TYPE_CHECKING:\n from datetime import timedelta\n\n from matplotlib.axes import Axes\n\n from pandas._typing import NDFrameT\n\n from pandas import (\n DataFrame,\n DatetimeIndex,\n Index,\n PeriodIndex,\n Series,\n )\n\n# ---------------------------------------------------------------------\n# Plotting functions and monkey patches\n\n\ndef maybe_resample(series: Series, ax: Axes, kwargs: dict[str, Any]):\n # resample against axes freq if necessary\n\n if "how" in kwargs:\n raise ValueError(\n "'how' is not a valid keyword for plotting functions. If plotting "\n "multiple objects on shared axes, resample manually first."\n )\n\n freq, ax_freq = _get_freq(ax, series)\n\n if freq is None: # pragma: no cover\n raise ValueError("Cannot use dynamic axis without frequency info")\n\n # Convert DatetimeIndex to PeriodIndex\n if isinstance(series.index, ABCDatetimeIndex):\n series = series.to_period(freq=freq)\n\n if ax_freq is not None and freq != ax_freq:\n if is_superperiod(freq, ax_freq): # upsample input\n series = series.copy()\n # error: "Index" has no attribute "asfreq"\n series.index = series.index.asfreq( # type: ignore[attr-defined]\n ax_freq, how="s"\n )\n freq = ax_freq\n elif _is_sup(freq, ax_freq): # one is weekly\n # Resampling with PeriodDtype is deprecated, so we convert to\n # DatetimeIndex, resample, then convert back.\n ser_ts = series.to_timestamp()\n ser_d = ser_ts.resample("D").last().dropna()\n ser_freq = ser_d.resample(ax_freq).last().dropna()\n series = ser_freq.to_period(ax_freq)\n freq = ax_freq\n elif is_subperiod(freq, ax_freq) or _is_sub(freq, ax_freq):\n _upsample_others(ax, freq, kwargs)\n else: # pragma: no cover\n raise ValueError("Incompatible frequency conversion")\n return freq, series\n\n\ndef _is_sub(f1: str, f2: str) -> bool:\n return (f1.startswith("W") and is_subperiod("D", f2)) or (\n f2.startswith("W") and is_subperiod(f1, "D")\n )\n\n\ndef _is_sup(f1: str, f2: str) -> bool:\n return (f1.startswith("W") and is_superperiod("D", f2)) or (\n f2.startswith("W") and is_superperiod(f1, "D")\n )\n\n\ndef _upsample_others(ax: Axes, freq: BaseOffset, kwargs: dict[str, Any]) -> None:\n legend = ax.get_legend()\n lines, labels = _replot_ax(ax, freq)\n _replot_ax(ax, freq)\n\n other_ax = None\n if hasattr(ax, "left_ax"):\n other_ax = ax.left_ax\n if hasattr(ax, "right_ax"):\n other_ax = ax.right_ax\n\n if other_ax is not None:\n rlines, rlabels = _replot_ax(other_ax, freq)\n lines.extend(rlines)\n labels.extend(rlabels)\n\n if legend is not None and kwargs.get("legend", True) and len(lines) > 0:\n title: str | None = legend.get_title().get_text()\n if title == "None":\n title = None\n ax.legend(lines, labels, loc="best", title=title)\n\n\ndef _replot_ax(ax: Axes, freq: BaseOffset):\n data = getattr(ax, "_plot_data", None)\n\n # clear current axes and data\n # TODO #54485\n ax._plot_data = [] # type: ignore[attr-defined]\n ax.clear()\n\n decorate_axes(ax, freq)\n\n lines = []\n labels = []\n if data is not None:\n for series, plotf, kwds in data:\n series = series.copy()\n idx = series.index.asfreq(freq, how="S")\n series.index = idx\n # TODO #54485\n ax._plot_data.append((series, plotf, kwds)) # type: ignore[attr-defined]\n\n # for tsplot\n if isinstance(plotf, str):\n from pandas.plotting._matplotlib import PLOT_CLASSES\n\n plotf = PLOT_CLASSES[plotf]._plot\n\n lines.append(plotf(ax, series.index._mpl_repr(), series.values, **kwds)[0])\n labels.append(pprint_thing(series.name))\n\n return lines, labels\n\n\ndef decorate_axes(ax: Axes, freq: BaseOffset) -> None:\n """Initialize axes for time-series plotting"""\n if not hasattr(ax, "_plot_data"):\n # TODO #54485\n ax._plot_data = [] # type: ignore[attr-defined]\n\n # TODO #54485\n ax.freq = freq # type: ignore[attr-defined]\n xaxis = ax.get_xaxis()\n # TODO #54485\n xaxis.freq = freq # type: ignore[attr-defined]\n\n\ndef _get_ax_freq(ax: Axes):\n """\n Get the freq attribute of the ax object if set.\n Also checks shared axes (eg when using secondary yaxis, sharex=True\n or twinx)\n """\n ax_freq = getattr(ax, "freq", None)\n if ax_freq is None:\n # check for left/right ax in case of secondary yaxis\n if hasattr(ax, "left_ax"):\n ax_freq = getattr(ax.left_ax, "freq", None)\n elif hasattr(ax, "right_ax"):\n ax_freq = getattr(ax.right_ax, "freq", None)\n if ax_freq is None:\n # check if a shared ax (sharex/twinx) has already freq set\n shared_axes = ax.get_shared_x_axes().get_siblings(ax)\n if len(shared_axes) > 1:\n for shared_ax in shared_axes:\n ax_freq = getattr(shared_ax, "freq", None)\n if ax_freq is not None:\n break\n return ax_freq\n\n\ndef _get_period_alias(freq: timedelta | BaseOffset | str) -> str | None:\n if isinstance(freq, BaseOffset):\n freqstr = freq.name\n else:\n freqstr = to_offset(freq, is_period=True).rule_code\n\n return get_period_alias(freqstr)\n\n\ndef _get_freq(ax: Axes, series: Series):\n # get frequency from data\n freq = getattr(series.index, "freq", None)\n if freq is None:\n freq = getattr(series.index, "inferred_freq", None)\n freq = to_offset(freq, is_period=True)\n\n ax_freq = _get_ax_freq(ax)\n\n # use axes freq if no data freq\n if freq is None:\n freq = ax_freq\n\n # get the period frequency\n freq = _get_period_alias(freq)\n return freq, ax_freq\n\n\ndef use_dynamic_x(ax: Axes, data: DataFrame | Series) -> bool:\n freq = _get_index_freq(data.index)\n ax_freq = _get_ax_freq(ax)\n\n if freq is None: # convert irregular if axes has freq info\n freq = ax_freq\n # do not use tsplot if irregular was plotted first\n elif (ax_freq is None) and (len(ax.get_lines()) > 0):\n return False\n\n if freq is None:\n return False\n\n freq_str = _get_period_alias(freq)\n\n if freq_str is None:\n return False\n\n # FIXME: hack this for 0.10.1, creating more technical debt...sigh\n if isinstance(data.index, ABCDatetimeIndex):\n # error: "BaseOffset" has no attribute "_period_dtype_code"\n freq_str = OFFSET_TO_PERIOD_FREQSTR.get(freq_str, freq_str)\n base = to_offset(\n freq_str, is_period=True\n )._period_dtype_code # type: ignore[attr-defined]\n x = data.index\n if base <= FreqGroup.FR_DAY.value:\n return x[:1].is_normalized\n period = Period(x[0], freq_str)\n assert isinstance(period, Period)\n return period.to_timestamp().tz_localize(x.tz) == x[0]\n return True\n\n\ndef _get_index_freq(index: Index) -> BaseOffset | None:\n freq = getattr(index, "freq", None)\n if freq is None:\n freq = getattr(index, "inferred_freq", None)\n if freq == "B":\n # error: "Index" has no attribute "dayofweek"\n weekdays = np.unique(index.dayofweek) # type: ignore[attr-defined]\n if (5 in weekdays) or (6 in weekdays):\n freq = None\n\n freq = to_offset(freq)\n return freq\n\n\ndef maybe_convert_index(ax: Axes, data: NDFrameT) -> NDFrameT:\n # tsplot converts automatically, but don't want to convert index\n # over and over for DataFrames\n if isinstance(data.index, (ABCDatetimeIndex, ABCPeriodIndex)):\n freq: str | BaseOffset | None = data.index.freq\n\n if freq is None:\n # We only get here for DatetimeIndex\n data.index = cast("DatetimeIndex", data.index)\n freq = data.index.inferred_freq\n freq = to_offset(freq)\n\n if freq is None:\n freq = _get_ax_freq(ax)\n\n if freq is None:\n raise ValueError("Could not get frequency alias for plotting")\n\n freq_str = _get_period_alias(freq)\n\n with warnings.catch_warnings():\n # suppress Period[B] deprecation warning\n # TODO: need to find an alternative to this before the deprecation\n # is enforced!\n warnings.filterwarnings(\n "ignore",\n r"PeriodDtype\[B\] is deprecated",\n category=FutureWarning,\n )\n\n if isinstance(data.index, ABCDatetimeIndex):\n data = data.tz_localize(None).to_period(freq=freq_str)\n elif isinstance(data.index, ABCPeriodIndex):\n data.index = data.index.asfreq(freq=freq_str)\n return data\n\n\n# Patch methods for subplot.\n\n\ndef _format_coord(freq, t, y) -> str:\n time_period = Period(ordinal=int(t), freq=freq)\n return f"t = {time_period} y = {y:8f}"\n\n\ndef format_dateaxis(\n subplot, freq: BaseOffset, index: DatetimeIndex | PeriodIndex\n) -> None:\n """\n Pretty-formats the date axis (x-axis).\n\n Major and minor ticks are automatically set for the frequency of the\n current underlying series. As the dynamic mode is activated by\n default, changing the limits of the x axis will intelligently change\n the positions of the ticks.\n """\n from matplotlib import pylab\n\n # handle index specific formatting\n # Note: DatetimeIndex does not use this\n # interface. DatetimeIndex uses matplotlib.date directly\n if isinstance(index, ABCPeriodIndex):\n majlocator = TimeSeries_DateLocator(\n freq, dynamic_mode=True, minor_locator=False, plot_obj=subplot\n )\n minlocator = TimeSeries_DateLocator(\n freq, dynamic_mode=True, minor_locator=True, plot_obj=subplot\n )\n subplot.xaxis.set_major_locator(majlocator)\n subplot.xaxis.set_minor_locator(minlocator)\n\n majformatter = TimeSeries_DateFormatter(\n freq, dynamic_mode=True, minor_locator=False, plot_obj=subplot\n )\n minformatter = TimeSeries_DateFormatter(\n freq, dynamic_mode=True, minor_locator=True, plot_obj=subplot\n )\n subplot.xaxis.set_major_formatter(majformatter)\n subplot.xaxis.set_minor_formatter(minformatter)\n\n # x and y coord info\n subplot.format_coord = functools.partial(_format_coord, freq)\n\n elif isinstance(index, ABCTimedeltaIndex):\n subplot.xaxis.set_major_formatter(TimeSeries_TimedeltaFormatter())\n else:\n raise TypeError("index type not supported")\n\n pylab.draw_if_interactive()\n | .venv\Lib\site-packages\pandas\plotting\_matplotlib\timeseries.py | timeseries.py | Python | 11,531 | 0.95 | 0.183784 | 0.120275 | vue-tools | 368 | 2024-04-16T20:59:25.684648 | Apache-2.0 | false | 9af79cff3cb2627bca090202496cf9b5 |
# being a bit too dynamic\nfrom __future__ import annotations\n\nfrom math import ceil\nfrom typing import TYPE_CHECKING\nimport warnings\n\nfrom matplotlib import ticker\nimport matplotlib.table\nimport numpy as np\n\nfrom pandas.util._exceptions import find_stack_level\n\nfrom pandas.core.dtypes.common import is_list_like\nfrom pandas.core.dtypes.generic import (\n ABCDataFrame,\n ABCIndex,\n ABCSeries,\n)\n\nif TYPE_CHECKING:\n from collections.abc import (\n Iterable,\n Sequence,\n )\n\n from matplotlib.axes import Axes\n from matplotlib.axis import Axis\n from matplotlib.figure import Figure\n from matplotlib.lines import Line2D\n from matplotlib.table import Table\n\n from pandas import (\n DataFrame,\n Series,\n )\n\n\ndef do_adjust_figure(fig: Figure) -> bool:\n """Whether fig has constrained_layout enabled."""\n if not hasattr(fig, "get_constrained_layout"):\n return False\n return not fig.get_constrained_layout()\n\n\ndef maybe_adjust_figure(fig: Figure, *args, **kwargs) -> None:\n """Call fig.subplots_adjust unless fig has constrained_layout enabled."""\n if do_adjust_figure(fig):\n fig.subplots_adjust(*args, **kwargs)\n\n\ndef format_date_labels(ax: Axes, rot) -> None:\n # mini version of autofmt_xdate\n for label in ax.get_xticklabels():\n label.set_horizontalalignment("right")\n label.set_rotation(rot)\n fig = ax.get_figure()\n if fig is not None:\n # should always be a Figure but can technically be None\n maybe_adjust_figure(fig, bottom=0.2) # type: ignore[arg-type]\n\n\ndef table(\n ax, data: DataFrame | Series, rowLabels=None, colLabels=None, **kwargs\n) -> Table:\n if isinstance(data, ABCSeries):\n data = data.to_frame()\n elif isinstance(data, ABCDataFrame):\n pass\n else:\n raise ValueError("Input data must be DataFrame or Series")\n\n if rowLabels is None:\n rowLabels = data.index\n\n if colLabels is None:\n colLabels = data.columns\n\n cellText = data.values\n\n # error: Argument "cellText" to "table" has incompatible type "ndarray[Any,\n # Any]"; expected "Sequence[Sequence[str]] | None"\n return matplotlib.table.table(\n ax,\n cellText=cellText, # type: ignore[arg-type]\n rowLabels=rowLabels,\n colLabels=colLabels,\n **kwargs,\n )\n\n\ndef _get_layout(\n nplots: int,\n layout: tuple[int, int] | None = None,\n layout_type: str = "box",\n) -> tuple[int, int]:\n if layout is not None:\n if not isinstance(layout, (tuple, list)) or len(layout) != 2:\n raise ValueError("Layout must be a tuple of (rows, columns)")\n\n nrows, ncols = layout\n\n if nrows == -1 and ncols > 0:\n layout = nrows, ncols = (ceil(nplots / ncols), ncols)\n elif ncols == -1 and nrows > 0:\n layout = nrows, ncols = (nrows, ceil(nplots / nrows))\n elif ncols <= 0 and nrows <= 0:\n msg = "At least one dimension of layout must be positive"\n raise ValueError(msg)\n\n if nrows * ncols < nplots:\n raise ValueError(\n f"Layout of {nrows}x{ncols} must be larger than required size {nplots}"\n )\n\n return layout\n\n if layout_type == "single":\n return (1, 1)\n elif layout_type == "horizontal":\n return (1, nplots)\n elif layout_type == "vertical":\n return (nplots, 1)\n\n layouts = {1: (1, 1), 2: (1, 2), 3: (2, 2), 4: (2, 2)}\n try:\n return layouts[nplots]\n except KeyError:\n k = 1\n while k**2 < nplots:\n k += 1\n\n if (k - 1) * k >= nplots:\n return k, (k - 1)\n else:\n return k, k\n\n\n# copied from matplotlib/pyplot.py and modified for pandas.plotting\n\n\ndef create_subplots(\n naxes: int,\n sharex: bool = False,\n sharey: bool = False,\n squeeze: bool = True,\n subplot_kw=None,\n ax=None,\n layout=None,\n layout_type: str = "box",\n **fig_kw,\n):\n """\n Create a figure with a set of subplots already made.\n\n This utility wrapper makes it convenient to create common layouts of\n subplots, including the enclosing figure object, in a single call.\n\n Parameters\n ----------\n naxes : int\n Number of required axes. Exceeded axes are set invisible. Default is\n nrows * ncols.\n\n sharex : bool\n If True, the X axis will be shared amongst all subplots.\n\n sharey : bool\n If True, the Y axis will be shared amongst all subplots.\n\n squeeze : bool\n\n If True, extra dimensions are squeezed out from the returned axis object:\n - if only one subplot is constructed (nrows=ncols=1), the resulting\n single Axis object is returned as a scalar.\n - for Nx1 or 1xN subplots, the returned object is a 1-d numpy object\n array of Axis objects are returned as numpy 1-d arrays.\n - for NxM subplots with N>1 and M>1 are returned as a 2d array.\n\n If False, no squeezing is done: the returned axis object is always\n a 2-d array containing Axis instances, even if it ends up being 1x1.\n\n subplot_kw : dict\n Dict with keywords passed to the add_subplot() call used to create each\n subplots.\n\n ax : Matplotlib axis object, optional\n\n layout : tuple\n Number of rows and columns of the subplot grid.\n If not specified, calculated from naxes and layout_type\n\n layout_type : {'box', 'horizontal', 'vertical'}, default 'box'\n Specify how to layout the subplot grid.\n\n fig_kw : Other keyword arguments to be passed to the figure() call.\n Note that all keywords not recognized above will be\n automatically included here.\n\n Returns\n -------\n fig, ax : tuple\n - fig is the Matplotlib Figure object\n - ax can be either a single axis object or an array of axis objects if\n more than one subplot was created. The dimensions of the resulting array\n can be controlled with the squeeze keyword, see above.\n\n Examples\n --------\n x = np.linspace(0, 2*np.pi, 400)\n y = np.sin(x**2)\n\n # Just a figure and one subplot\n f, ax = plt.subplots()\n ax.plot(x, y)\n ax.set_title('Simple plot')\n\n # Two subplots, unpack the output array immediately\n f, (ax1, ax2) = plt.subplots(1, 2, sharey=True)\n ax1.plot(x, y)\n ax1.set_title('Sharing Y axis')\n ax2.scatter(x, y)\n\n # Four polar axes\n plt.subplots(2, 2, subplot_kw=dict(polar=True))\n """\n import matplotlib.pyplot as plt\n\n if subplot_kw is None:\n subplot_kw = {}\n\n if ax is None:\n fig = plt.figure(**fig_kw)\n else:\n if is_list_like(ax):\n if squeeze:\n ax = flatten_axes(ax)\n if layout is not None:\n warnings.warn(\n "When passing multiple axes, layout keyword is ignored.",\n UserWarning,\n stacklevel=find_stack_level(),\n )\n if sharex or sharey:\n warnings.warn(\n "When passing multiple axes, sharex and sharey "\n "are ignored. These settings must be specified when creating axes.",\n UserWarning,\n stacklevel=find_stack_level(),\n )\n if ax.size == naxes:\n fig = ax.flat[0].get_figure()\n return fig, ax\n else:\n raise ValueError(\n f"The number of passed axes must be {naxes}, the "\n "same as the output plot"\n )\n\n fig = ax.get_figure()\n # if ax is passed and a number of subplots is 1, return ax as it is\n if naxes == 1:\n if squeeze:\n return fig, ax\n else:\n return fig, flatten_axes(ax)\n else:\n warnings.warn(\n "To output multiple subplots, the figure containing "\n "the passed axes is being cleared.",\n UserWarning,\n stacklevel=find_stack_level(),\n )\n fig.clear()\n\n nrows, ncols = _get_layout(naxes, layout=layout, layout_type=layout_type)\n nplots = nrows * ncols\n\n # Create empty object array to hold all axes. It's easiest to make it 1-d\n # so we can just append subplots upon creation, and then\n axarr = np.empty(nplots, dtype=object)\n\n # Create first subplot separately, so we can share it if requested\n ax0 = fig.add_subplot(nrows, ncols, 1, **subplot_kw)\n\n if sharex:\n subplot_kw["sharex"] = ax0\n if sharey:\n subplot_kw["sharey"] = ax0\n axarr[0] = ax0\n\n # Note off-by-one counting because add_subplot uses the MATLAB 1-based\n # convention.\n for i in range(1, nplots):\n kwds = subplot_kw.copy()\n # Set sharex and sharey to None for blank/dummy axes, these can\n # interfere with proper axis limits on the visible axes if\n # they share axes e.g. issue #7528\n if i >= naxes:\n kwds["sharex"] = None\n kwds["sharey"] = None\n ax = fig.add_subplot(nrows, ncols, i + 1, **kwds)\n axarr[i] = ax\n\n if naxes != nplots:\n for ax in axarr[naxes:]:\n ax.set_visible(False)\n\n handle_shared_axes(axarr, nplots, naxes, nrows, ncols, sharex, sharey)\n\n if squeeze:\n # Reshape the array to have the final desired dimension (nrow,ncol),\n # though discarding unneeded dimensions that equal 1. If we only have\n # one subplot, just return it instead of a 1-element array.\n if nplots == 1:\n axes = axarr[0]\n else:\n axes = axarr.reshape(nrows, ncols).squeeze()\n else:\n # returned axis array will be always 2-d, even if nrows=ncols=1\n axes = axarr.reshape(nrows, ncols)\n\n return fig, axes\n\n\ndef _remove_labels_from_axis(axis: Axis) -> None:\n for t in axis.get_majorticklabels():\n t.set_visible(False)\n\n # set_visible will not be effective if\n # minor axis has NullLocator and NullFormatter (default)\n if isinstance(axis.get_minor_locator(), ticker.NullLocator):\n axis.set_minor_locator(ticker.AutoLocator())\n if isinstance(axis.get_minor_formatter(), ticker.NullFormatter):\n axis.set_minor_formatter(ticker.FormatStrFormatter(""))\n for t in axis.get_minorticklabels():\n t.set_visible(False)\n\n axis.get_label().set_visible(False)\n\n\ndef _has_externally_shared_axis(ax1: Axes, compare_axis: str) -> bool:\n """\n Return whether an axis is externally shared.\n\n Parameters\n ----------\n ax1 : matplotlib.axes.Axes\n Axis to query.\n compare_axis : str\n `"x"` or `"y"` according to whether the X-axis or Y-axis is being\n compared.\n\n Returns\n -------\n bool\n `True` if the axis is externally shared. Otherwise `False`.\n\n Notes\n -----\n If two axes with different positions are sharing an axis, they can be\n referred to as *externally* sharing the common axis.\n\n If two axes sharing an axis also have the same position, they can be\n referred to as *internally* sharing the common axis (a.k.a twinning).\n\n _handle_shared_axes() is only interested in axes externally sharing an\n axis, regardless of whether either of the axes is also internally sharing\n with a third axis.\n """\n if compare_axis == "x":\n axes = ax1.get_shared_x_axes()\n elif compare_axis == "y":\n axes = ax1.get_shared_y_axes()\n else:\n raise ValueError(\n "_has_externally_shared_axis() needs 'x' or 'y' as a second parameter"\n )\n\n axes_siblings = axes.get_siblings(ax1)\n\n # Retain ax1 and any of its siblings which aren't in the same position as it\n ax1_points = ax1.get_position().get_points()\n\n for ax2 in axes_siblings:\n if not np.array_equal(ax1_points, ax2.get_position().get_points()):\n return True\n\n return False\n\n\ndef handle_shared_axes(\n axarr: Iterable[Axes],\n nplots: int,\n naxes: int,\n nrows: int,\n ncols: int,\n sharex: bool,\n sharey: bool,\n) -> None:\n if nplots > 1:\n row_num = lambda x: x.get_subplotspec().rowspan.start\n col_num = lambda x: x.get_subplotspec().colspan.start\n\n is_first_col = lambda x: x.get_subplotspec().is_first_col()\n\n if nrows > 1:\n try:\n # first find out the ax layout,\n # so that we can correctly handle 'gaps"\n layout = np.zeros((nrows + 1, ncols + 1), dtype=np.bool_)\n for ax in axarr:\n layout[row_num(ax), col_num(ax)] = ax.get_visible()\n\n for ax in axarr:\n # only the last row of subplots should get x labels -> all\n # other off layout handles the case that the subplot is\n # the last in the column, because below is no subplot/gap.\n if not layout[row_num(ax) + 1, col_num(ax)]:\n continue\n if sharex or _has_externally_shared_axis(ax, "x"):\n _remove_labels_from_axis(ax.xaxis)\n\n except IndexError:\n # if gridspec is used, ax.rowNum and ax.colNum may different\n # from layout shape. in this case, use last_row logic\n is_last_row = lambda x: x.get_subplotspec().is_last_row()\n for ax in axarr:\n if is_last_row(ax):\n continue\n if sharex or _has_externally_shared_axis(ax, "x"):\n _remove_labels_from_axis(ax.xaxis)\n\n if ncols > 1:\n for ax in axarr:\n # only the first column should get y labels -> set all other to\n # off as we only have labels in the first column and we always\n # have a subplot there, we can skip the layout test\n if is_first_col(ax):\n continue\n if sharey or _has_externally_shared_axis(ax, "y"):\n _remove_labels_from_axis(ax.yaxis)\n\n\ndef flatten_axes(axes: Axes | Sequence[Axes]) -> np.ndarray:\n if not is_list_like(axes):\n return np.array([axes])\n elif isinstance(axes, (np.ndarray, ABCIndex)):\n return np.asarray(axes).ravel()\n return np.array(axes)\n\n\ndef set_ticks_props(\n axes: Axes | Sequence[Axes],\n xlabelsize: int | None = None,\n xrot=None,\n ylabelsize: int | None = None,\n yrot=None,\n):\n import matplotlib.pyplot as plt\n\n for ax in flatten_axes(axes):\n if xlabelsize is not None:\n plt.setp(ax.get_xticklabels(), fontsize=xlabelsize)\n if xrot is not None:\n plt.setp(ax.get_xticklabels(), rotation=xrot)\n if ylabelsize is not None:\n plt.setp(ax.get_yticklabels(), fontsize=ylabelsize)\n if yrot is not None:\n plt.setp(ax.get_yticklabels(), rotation=yrot)\n return axes\n\n\ndef get_all_lines(ax: Axes) -> list[Line2D]:\n lines = ax.get_lines()\n\n if hasattr(ax, "right_ax"):\n lines += ax.right_ax.get_lines()\n\n if hasattr(ax, "left_ax"):\n lines += ax.left_ax.get_lines()\n\n return lines\n\n\ndef get_xlim(lines: Iterable[Line2D]) -> tuple[float, float]:\n left, right = np.inf, -np.inf\n for line in lines:\n x = line.get_xdata(orig=False)\n left = min(np.nanmin(x), left)\n right = max(np.nanmax(x), right)\n return left, right\n | .venv\Lib\site-packages\pandas\plotting\_matplotlib\tools.py | tools.py | Python | 15,415 | 0.95 | 0.182927 | 0.093199 | react-lib | 138 | 2024-12-04T20:36:12.999863 | GPL-3.0 | false | 909a5208b773d94e5473437bf5f0e2c8 |
from __future__ import annotations\n\nfrom typing import TYPE_CHECKING\n\nfrom pandas.plotting._matplotlib.boxplot import (\n BoxPlot,\n boxplot,\n boxplot_frame,\n boxplot_frame_groupby,\n)\nfrom pandas.plotting._matplotlib.converter import (\n deregister,\n register,\n)\nfrom pandas.plotting._matplotlib.core import (\n AreaPlot,\n BarhPlot,\n BarPlot,\n HexBinPlot,\n LinePlot,\n PiePlot,\n ScatterPlot,\n)\nfrom pandas.plotting._matplotlib.hist import (\n HistPlot,\n KdePlot,\n hist_frame,\n hist_series,\n)\nfrom pandas.plotting._matplotlib.misc import (\n andrews_curves,\n autocorrelation_plot,\n bootstrap_plot,\n lag_plot,\n parallel_coordinates,\n radviz,\n scatter_matrix,\n)\nfrom pandas.plotting._matplotlib.tools import table\n\nif TYPE_CHECKING:\n from pandas.plotting._matplotlib.core import MPLPlot\n\nPLOT_CLASSES: dict[str, type[MPLPlot]] = {\n "line": LinePlot,\n "bar": BarPlot,\n "barh": BarhPlot,\n "box": BoxPlot,\n "hist": HistPlot,\n "kde": KdePlot,\n "area": AreaPlot,\n "pie": PiePlot,\n "scatter": ScatterPlot,\n "hexbin": HexBinPlot,\n}\n\n\ndef plot(data, kind, **kwargs):\n # Importing pyplot at the top of the file (before the converters are\n # registered) causes problems in matplotlib 2 (converters seem to not\n # work)\n import matplotlib.pyplot as plt\n\n if kwargs.pop("reuse_plot", False):\n ax = kwargs.get("ax")\n if ax is None and len(plt.get_fignums()) > 0:\n with plt.rc_context():\n ax = plt.gca()\n kwargs["ax"] = getattr(ax, "left_ax", ax)\n plot_obj = PLOT_CLASSES[kind](data, **kwargs)\n plot_obj.generate()\n plot_obj.draw()\n return plot_obj.result\n\n\n__all__ = [\n "plot",\n "hist_series",\n "hist_frame",\n "boxplot",\n "boxplot_frame",\n "boxplot_frame_groupby",\n "table",\n "andrews_curves",\n "autocorrelation_plot",\n "bootstrap_plot",\n "lag_plot",\n "parallel_coordinates",\n "radviz",\n "scatter_matrix",\n "register",\n "deregister",\n]\n | .venv\Lib\site-packages\pandas\plotting\_matplotlib\__init__.py | __init__.py | Python | 2,044 | 0.95 | 0.043011 | 0.035714 | react-lib | 204 | 2024-05-03T07:25:00.077388 | BSD-3-Clause | false | 0d0b51265e29e237f444e175c0dcb67b |
\n\n | .venv\Lib\site-packages\pandas\plotting\_matplotlib\__pycache__\boxplot.cpython-313.pyc | boxplot.cpython-313.pyc | Other | 21,295 | 0.95 | 0 | 0.014563 | node-utils | 477 | 2024-11-12T09:27:47.721720 | GPL-3.0 | false | 8967ef16c6b6cb5ee58bd6c527503dcc |
\n\n | .venv\Lib\site-packages\pandas\plotting\_matplotlib\__pycache__\converter.cpython-313.pyc | converter.cpython-313.pyc | Other | 46,764 | 0.95 | 0.024668 | 0.017613 | vue-tools | 863 | 2025-03-10T09:10:53.421705 | MIT | false | d298a33d6b7df261d21d4329e1c290db |
\n\n | .venv\Lib\site-packages\pandas\plotting\_matplotlib\__pycache__\core.cpython-313.pyc | core.cpython-313.pyc | Other | 86,636 | 0.75 | 0.025707 | 0.006887 | vue-tools | 681 | 2023-08-28T09:41:00.231900 | GPL-3.0 | false | 24bc4455b5a78a0fba697d51f7bd03a0 |
\n\n | .venv\Lib\site-packages\pandas\plotting\_matplotlib\__pycache__\groupby.cpython-313.pyc | groupby.cpython-313.pyc | Other | 4,679 | 0.95 | 0.074468 | 0 | awesome-app | 899 | 2024-06-19T15:26:45.090773 | Apache-2.0 | false | d9e820679d977adf852ab92b86756717 |
\n\n | .venv\Lib\site-packages\pandas\plotting\_matplotlib\__pycache__\hist.cpython-313.pyc | hist.cpython-313.pyc | Other | 19,688 | 0.95 | 0 | 0.008889 | vue-tools | 273 | 2024-05-28T04:45:37.814354 | Apache-2.0 | false | edbb39e95f60543cef04882e22476c3d |
\n\n | .venv\Lib\site-packages\pandas\plotting\_matplotlib\__pycache__\misc.cpython-313.pyc | misc.cpython-313.pyc | Other | 21,280 | 0.95 | 0.012048 | 0.025641 | awesome-app | 747 | 2025-05-30T18:04:55.419873 | Apache-2.0 | false | f85a5674c769c40f61457784319ba251 |
\n\n | .venv\Lib\site-packages\pandas\plotting\_matplotlib\__pycache__\style.cpython-313.pyc | style.cpython-313.pyc | Other | 10,181 | 0.8 | 0.110345 | 0.007692 | python-kit | 271 | 2024-04-06T16:05:24.494337 | GPL-3.0 | false | e263dbeceb18e814580c876d063b2c33 |
\n\n | .venv\Lib\site-packages\pandas\plotting\_matplotlib\__pycache__\timeseries.cpython-313.pyc | timeseries.cpython-313.pyc | Other | 13,284 | 0.95 | 0.036496 | 0.016807 | react-lib | 849 | 2024-12-12T04:52:43.969497 | MIT | false | 24bd475248ab30b1c0ef91018e433d45 |
\n\n | .venv\Lib\site-packages\pandas\plotting\_matplotlib\__pycache__\tools.cpython-313.pyc | tools.cpython-313.pyc | Other | 17,161 | 0.95 | 0.024896 | 0.014085 | node-utils | 897 | 2023-09-14T01:59:01.427139 | Apache-2.0 | false | 7b8ea75d1a479c285c5780f4ae9e0156 |
\n\n | .venv\Lib\site-packages\pandas\plotting\_matplotlib\__pycache__\__init__.cpython-313.pyc | __init__.cpython-313.pyc | Other | 2,405 | 0.8 | 0 | 0 | vue-tools | 939 | 2024-03-17T00:33:27.250754 | GPL-3.0 | false | 9f584fb76368490d1daf50276a8a06c1 |
\n\n | .venv\Lib\site-packages\pandas\plotting\__pycache__\_core.cpython-313.pyc | _core.cpython-313.pyc | Other | 62,230 | 0.75 | 0.080386 | 0.02044 | awesome-app | 658 | 2025-04-17T06:55:48.537570 | BSD-3-Clause | false | f5195c6a4d57200f0b907281bc58170a |
\n\n | .venv\Lib\site-packages\pandas\plotting\__pycache__\_misc.cpython-313.pyc | _misc.cpython-313.pyc | Other | 21,660 | 0.95 | 0.062731 | 0.030435 | react-lib | 106 | 2025-03-08T02:54:39.750855 | Apache-2.0 | false | b7cbee37402a5a27e4130ab86bf59cfa |
\n\n | .venv\Lib\site-packages\pandas\plotting\__pycache__\__init__.cpython-313.pyc | __init__.cpython-313.pyc | Other | 2,848 | 0.95 | 0.101449 | 0 | react-lib | 384 | 2023-07-26T10:23:03.821872 | BSD-3-Clause | false | a5b683c3669531e4f980798282e2c1e8 |
import numpy as np\nimport pytest\n\nfrom pandas.core.apply import (\n _make_unique_kwarg_list,\n maybe_mangle_lambdas,\n)\n\n\ndef test_maybe_mangle_lambdas_passthrough():\n assert maybe_mangle_lambdas("mean") == "mean"\n assert maybe_mangle_lambdas(lambda x: x).__name__ == "<lambda>"\n # don't mangel single lambda.\n assert maybe_mangle_lambdas([lambda x: x])[0].__name__ == "<lambda>"\n\n\ndef test_maybe_mangle_lambdas_listlike():\n aggfuncs = [lambda x: 1, lambda x: 2]\n result = maybe_mangle_lambdas(aggfuncs)\n assert result[0].__name__ == "<lambda_0>"\n assert result[1].__name__ == "<lambda_1>"\n assert aggfuncs[0](None) == result[0](None)\n assert aggfuncs[1](None) == result[1](None)\n\n\ndef test_maybe_mangle_lambdas():\n func = {"A": [lambda x: 0, lambda x: 1]}\n result = maybe_mangle_lambdas(func)\n assert result["A"][0].__name__ == "<lambda_0>"\n assert result["A"][1].__name__ == "<lambda_1>"\n\n\ndef test_maybe_mangle_lambdas_args():\n func = {"A": [lambda x, a, b=1: (0, a, b), lambda x: 1]}\n result = maybe_mangle_lambdas(func)\n assert result["A"][0].__name__ == "<lambda_0>"\n assert result["A"][1].__name__ == "<lambda_1>"\n\n assert func["A"][0](0, 1) == (0, 1, 1)\n assert func["A"][0](0, 1, 2) == (0, 1, 2)\n assert func["A"][0](0, 2, b=3) == (0, 2, 3)\n\n\ndef test_maybe_mangle_lambdas_named():\n func = {"C": np.mean, "D": {"foo": np.mean, "bar": np.mean}}\n result = maybe_mangle_lambdas(func)\n assert result == func\n\n\n@pytest.mark.parametrize(\n "order, expected_reorder",\n [\n (\n [\n ("height", "<lambda>"),\n ("height", "max"),\n ("weight", "max"),\n ("height", "<lambda>"),\n ("weight", "<lambda>"),\n ],\n [\n ("height", "<lambda>_0"),\n ("height", "max"),\n ("weight", "max"),\n ("height", "<lambda>_1"),\n ("weight", "<lambda>"),\n ],\n ),\n (\n [\n ("col2", "min"),\n ("col1", "<lambda>"),\n ("col1", "<lambda>"),\n ("col1", "<lambda>"),\n ],\n [\n ("col2", "min"),\n ("col1", "<lambda>_0"),\n ("col1", "<lambda>_1"),\n ("col1", "<lambda>_2"),\n ],\n ),\n (\n [("col", "<lambda>"), ("col", "<lambda>"), ("col", "<lambda>")],\n [("col", "<lambda>_0"), ("col", "<lambda>_1"), ("col", "<lambda>_2")],\n ),\n ],\n)\ndef test_make_unique(order, expected_reorder):\n # GH 27519, test if make_unique function reorders correctly\n result = _make_unique_kwarg_list(order)\n\n assert result == expected_reorder\n | .venv\Lib\site-packages\pandas\tests\test_aggregation.py | test_aggregation.py | Python | 2,779 | 0.95 | 0.086022 | 0.025641 | awesome-app | 581 | 2024-06-22T13:39:14.878369 | MIT | true | a456a8ca34751c2847030fc4d5e0612a |
from datetime import datetime\nimport struct\n\nimport numpy as np\nimport pytest\n\nfrom pandas._config import using_string_dtype\n\nfrom pandas._libs import (\n algos as libalgos,\n hashtable as ht,\n)\n\nfrom pandas.core.dtypes.common import (\n is_bool_dtype,\n is_complex_dtype,\n is_float_dtype,\n is_integer_dtype,\n is_object_dtype,\n)\nfrom pandas.core.dtypes.dtypes import CategoricalDtype\n\nimport pandas as pd\nfrom pandas import (\n Categorical,\n CategoricalIndex,\n DataFrame,\n DatetimeIndex,\n Index,\n IntervalIndex,\n MultiIndex,\n NaT,\n Period,\n PeriodIndex,\n Series,\n Timedelta,\n Timestamp,\n cut,\n date_range,\n timedelta_range,\n to_datetime,\n to_timedelta,\n)\nimport pandas._testing as tm\nimport pandas.core.algorithms as algos\nfrom pandas.core.arrays import (\n DatetimeArray,\n TimedeltaArray,\n)\nimport pandas.core.common as com\n\n\nclass TestFactorize:\n def test_factorize_complex(self):\n # GH#17927\n array = [1, 2, 2 + 1j]\n msg = "factorize with argument that is not not a Series"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n labels, uniques = algos.factorize(array)\n\n expected_labels = np.array([0, 1, 2], dtype=np.intp)\n tm.assert_numpy_array_equal(labels, expected_labels)\n\n # Should return a complex dtype in the future\n expected_uniques = np.array([(1 + 0j), (2 + 0j), (2 + 1j)], dtype=object)\n tm.assert_numpy_array_equal(uniques, expected_uniques)\n\n @pytest.mark.xfail(using_string_dtype(), reason="TODO(infer_string)", strict=False)\n @pytest.mark.parametrize("sort", [True, False])\n def test_factorize(self, index_or_series_obj, sort):\n obj = index_or_series_obj\n result_codes, result_uniques = obj.factorize(sort=sort)\n\n constructor = Index\n if isinstance(obj, MultiIndex):\n constructor = MultiIndex.from_tuples\n expected_arr = obj.unique()\n if expected_arr.dtype == np.float16:\n expected_arr = expected_arr.astype(np.float32)\n expected_uniques = constructor(expected_arr)\n if (\n isinstance(obj, Index)\n and expected_uniques.dtype == bool\n and obj.dtype == object\n ):\n expected_uniques = expected_uniques.astype(object)\n\n if sort:\n expected_uniques = expected_uniques.sort_values()\n\n # construct an integer ndarray so that\n # `expected_uniques.take(expected_codes)` is equal to `obj`\n expected_uniques_list = list(expected_uniques)\n expected_codes = [expected_uniques_list.index(val) for val in obj]\n expected_codes = np.asarray(expected_codes, dtype=np.intp)\n\n tm.assert_numpy_array_equal(result_codes, expected_codes)\n tm.assert_index_equal(result_uniques, expected_uniques, exact=True)\n\n def test_series_factorize_use_na_sentinel_false(self):\n # GH#35667\n values = np.array([1, 2, 1, np.nan])\n ser = Series(values)\n codes, uniques = ser.factorize(use_na_sentinel=False)\n\n expected_codes = np.array([0, 1, 0, 2], dtype=np.intp)\n expected_uniques = Index([1.0, 2.0, np.nan])\n\n tm.assert_numpy_array_equal(codes, expected_codes)\n tm.assert_index_equal(uniques, expected_uniques)\n\n def test_basic(self):\n items = np.array(["a", "b", "b", "a", "a", "c", "c", "c"], dtype=object)\n codes, uniques = algos.factorize(items)\n tm.assert_numpy_array_equal(uniques, np.array(["a", "b", "c"], dtype=object))\n\n codes, uniques = algos.factorize(items, sort=True)\n exp = np.array([0, 1, 1, 0, 0, 2, 2, 2], dtype=np.intp)\n tm.assert_numpy_array_equal(codes, exp)\n exp = np.array(["a", "b", "c"], dtype=object)\n tm.assert_numpy_array_equal(uniques, exp)\n\n arr = np.arange(5, dtype=np.intp)[::-1]\n\n codes, uniques = algos.factorize(arr)\n exp = np.array([0, 1, 2, 3, 4], dtype=np.intp)\n tm.assert_numpy_array_equal(codes, exp)\n exp = np.array([4, 3, 2, 1, 0], dtype=arr.dtype)\n tm.assert_numpy_array_equal(uniques, exp)\n\n codes, uniques = algos.factorize(arr, sort=True)\n exp = np.array([4, 3, 2, 1, 0], dtype=np.intp)\n tm.assert_numpy_array_equal(codes, exp)\n exp = np.array([0, 1, 2, 3, 4], dtype=arr.dtype)\n tm.assert_numpy_array_equal(uniques, exp)\n\n arr = np.arange(5.0)[::-1]\n\n codes, uniques = algos.factorize(arr)\n exp = np.array([0, 1, 2, 3, 4], dtype=np.intp)\n tm.assert_numpy_array_equal(codes, exp)\n exp = np.array([4.0, 3.0, 2.0, 1.0, 0.0], dtype=arr.dtype)\n tm.assert_numpy_array_equal(uniques, exp)\n\n codes, uniques = algos.factorize(arr, sort=True)\n exp = np.array([4, 3, 2, 1, 0], dtype=np.intp)\n tm.assert_numpy_array_equal(codes, exp)\n exp = np.array([0.0, 1.0, 2.0, 3.0, 4.0], dtype=arr.dtype)\n tm.assert_numpy_array_equal(uniques, exp)\n\n def test_mixed(self):\n # doc example reshaping.rst\n x = Series(["A", "A", np.nan, "B", 3.14, np.inf])\n codes, uniques = algos.factorize(x)\n\n exp = np.array([0, 0, -1, 1, 2, 3], dtype=np.intp)\n tm.assert_numpy_array_equal(codes, exp)\n exp = Index(["A", "B", 3.14, np.inf])\n tm.assert_index_equal(uniques, exp)\n\n codes, uniques = algos.factorize(x, sort=True)\n exp = np.array([2, 2, -1, 3, 0, 1], dtype=np.intp)\n tm.assert_numpy_array_equal(codes, exp)\n exp = Index([3.14, np.inf, "A", "B"])\n tm.assert_index_equal(uniques, exp)\n\n def test_factorize_datetime64(self):\n # M8\n v1 = Timestamp("20130101 09:00:00.00004")\n v2 = Timestamp("20130101")\n x = Series([v1, v1, v1, v2, v2, v1])\n codes, uniques = algos.factorize(x)\n\n exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)\n tm.assert_numpy_array_equal(codes, exp)\n exp = DatetimeIndex([v1, v2])\n tm.assert_index_equal(uniques, exp)\n\n codes, uniques = algos.factorize(x, sort=True)\n exp = np.array([1, 1, 1, 0, 0, 1], dtype=np.intp)\n tm.assert_numpy_array_equal(codes, exp)\n exp = DatetimeIndex([v2, v1])\n tm.assert_index_equal(uniques, exp)\n\n def test_factorize_period(self):\n # period\n v1 = Period("201302", freq="M")\n v2 = Period("201303", freq="M")\n x = Series([v1, v1, v1, v2, v2, v1])\n\n # periods are not 'sorted' as they are converted back into an index\n codes, uniques = algos.factorize(x)\n exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)\n tm.assert_numpy_array_equal(codes, exp)\n tm.assert_index_equal(uniques, PeriodIndex([v1, v2]))\n\n codes, uniques = algos.factorize(x, sort=True)\n exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)\n tm.assert_numpy_array_equal(codes, exp)\n tm.assert_index_equal(uniques, PeriodIndex([v1, v2]))\n\n def test_factorize_timedelta(self):\n # GH 5986\n v1 = to_timedelta("1 day 1 min")\n v2 = to_timedelta("1 day")\n x = Series([v1, v2, v1, v1, v2, v2, v1])\n codes, uniques = algos.factorize(x)\n exp = np.array([0, 1, 0, 0, 1, 1, 0], dtype=np.intp)\n tm.assert_numpy_array_equal(codes, exp)\n tm.assert_index_equal(uniques, to_timedelta([v1, v2]))\n\n codes, uniques = algos.factorize(x, sort=True)\n exp = np.array([1, 0, 1, 1, 0, 0, 1], dtype=np.intp)\n tm.assert_numpy_array_equal(codes, exp)\n tm.assert_index_equal(uniques, to_timedelta([v2, v1]))\n\n def test_factorize_nan(self):\n # nan should map to na_sentinel, not reverse_indexer[na_sentinel]\n # rizer.factorize should not raise an exception if na_sentinel indexes\n # outside of reverse_indexer\n key = np.array([1, 2, 1, np.nan], dtype="O")\n rizer = ht.ObjectFactorizer(len(key))\n for na_sentinel in (-1, 20):\n ids = rizer.factorize(key, na_sentinel=na_sentinel)\n expected = np.array([0, 1, 0, na_sentinel], dtype=np.intp)\n assert len(set(key)) == len(set(expected))\n tm.assert_numpy_array_equal(pd.isna(key), expected == na_sentinel)\n tm.assert_numpy_array_equal(ids, expected)\n\n def test_factorizer_with_mask(self):\n # GH#49549\n data = np.array([1, 2, 3, 1, 1, 0], dtype="int64")\n mask = np.array([False, False, False, False, False, True])\n rizer = ht.Int64Factorizer(len(data))\n result = rizer.factorize(data, mask=mask)\n expected = np.array([0, 1, 2, 0, 0, -1], dtype=np.intp)\n tm.assert_numpy_array_equal(result, expected)\n expected_uniques = np.array([1, 2, 3], dtype="int64")\n tm.assert_numpy_array_equal(rizer.uniques.to_array(), expected_uniques)\n\n def test_factorizer_object_with_nan(self):\n # GH#49549\n data = np.array([1, 2, 3, 1, np.nan])\n rizer = ht.ObjectFactorizer(len(data))\n result = rizer.factorize(data.astype(object))\n expected = np.array([0, 1, 2, 0, -1], dtype=np.intp)\n tm.assert_numpy_array_equal(result, expected)\n expected_uniques = np.array([1, 2, 3], dtype=object)\n tm.assert_numpy_array_equal(rizer.uniques.to_array(), expected_uniques)\n\n @pytest.mark.parametrize(\n "data, expected_codes, expected_uniques",\n [\n (\n [(1, 1), (1, 2), (0, 0), (1, 2), "nonsense"],\n [0, 1, 2, 1, 3],\n [(1, 1), (1, 2), (0, 0), "nonsense"],\n ),\n (\n [(1, 1), (1, 2), (0, 0), (1, 2), (1, 2, 3)],\n [0, 1, 2, 1, 3],\n [(1, 1), (1, 2), (0, 0), (1, 2, 3)],\n ),\n ([(1, 1), (1, 2), (0, 0), (1, 2)], [0, 1, 2, 1], [(1, 1), (1, 2), (0, 0)]),\n ],\n )\n def test_factorize_tuple_list(self, data, expected_codes, expected_uniques):\n # GH9454\n msg = "factorize with argument that is not not a Series"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n codes, uniques = pd.factorize(data)\n\n tm.assert_numpy_array_equal(codes, np.array(expected_codes, dtype=np.intp))\n\n expected_uniques_array = com.asarray_tuplesafe(expected_uniques, dtype=object)\n tm.assert_numpy_array_equal(uniques, expected_uniques_array)\n\n def test_complex_sorting(self):\n # gh 12666 - check no segfault\n x17 = np.array([complex(i) for i in range(17)], dtype=object)\n\n msg = "'[<>]' not supported between instances of .*"\n with pytest.raises(TypeError, match=msg):\n algos.factorize(x17[::-1], sort=True)\n\n def test_numeric_dtype_factorize(self, any_real_numpy_dtype):\n # GH41132\n dtype = any_real_numpy_dtype\n data = np.array([1, 2, 2, 1], dtype=dtype)\n expected_codes = np.array([0, 1, 1, 0], dtype=np.intp)\n expected_uniques = np.array([1, 2], dtype=dtype)\n\n codes, uniques = algos.factorize(data)\n tm.assert_numpy_array_equal(codes, expected_codes)\n tm.assert_numpy_array_equal(uniques, expected_uniques)\n\n def test_float64_factorize(self, writable):\n data = np.array([1.0, 1e8, 1.0, 1e-8, 1e8, 1.0], dtype=np.float64)\n data.setflags(write=writable)\n expected_codes = np.array([0, 1, 0, 2, 1, 0], dtype=np.intp)\n expected_uniques = np.array([1.0, 1e8, 1e-8], dtype=np.float64)\n\n codes, uniques = algos.factorize(data)\n tm.assert_numpy_array_equal(codes, expected_codes)\n tm.assert_numpy_array_equal(uniques, expected_uniques)\n\n def test_uint64_factorize(self, writable):\n data = np.array([2**64 - 1, 1, 2**64 - 1], dtype=np.uint64)\n data.setflags(write=writable)\n expected_codes = np.array([0, 1, 0], dtype=np.intp)\n expected_uniques = np.array([2**64 - 1, 1], dtype=np.uint64)\n\n codes, uniques = algos.factorize(data)\n tm.assert_numpy_array_equal(codes, expected_codes)\n tm.assert_numpy_array_equal(uniques, expected_uniques)\n\n def test_int64_factorize(self, writable):\n data = np.array([2**63 - 1, -(2**63), 2**63 - 1], dtype=np.int64)\n data.setflags(write=writable)\n expected_codes = np.array([0, 1, 0], dtype=np.intp)\n expected_uniques = np.array([2**63 - 1, -(2**63)], dtype=np.int64)\n\n codes, uniques = algos.factorize(data)\n tm.assert_numpy_array_equal(codes, expected_codes)\n tm.assert_numpy_array_equal(uniques, expected_uniques)\n\n def test_string_factorize(self, writable):\n data = np.array(["a", "c", "a", "b", "c"], dtype=object)\n data.setflags(write=writable)\n expected_codes = np.array([0, 1, 0, 2, 1], dtype=np.intp)\n expected_uniques = np.array(["a", "c", "b"], dtype=object)\n\n codes, uniques = algos.factorize(data)\n tm.assert_numpy_array_equal(codes, expected_codes)\n tm.assert_numpy_array_equal(uniques, expected_uniques)\n\n def test_object_factorize(self, writable):\n data = np.array(["a", "c", None, np.nan, "a", "b", NaT, "c"], dtype=object)\n data.setflags(write=writable)\n expected_codes = np.array([0, 1, -1, -1, 0, 2, -1, 1], dtype=np.intp)\n expected_uniques = np.array(["a", "c", "b"], dtype=object)\n\n codes, uniques = algos.factorize(data)\n tm.assert_numpy_array_equal(codes, expected_codes)\n tm.assert_numpy_array_equal(uniques, expected_uniques)\n\n def test_datetime64_factorize(self, writable):\n # GH35650 Verify whether read-only datetime64 array can be factorized\n data = np.array([np.datetime64("2020-01-01T00:00:00.000")], dtype="M8[ns]")\n data.setflags(write=writable)\n expected_codes = np.array([0], dtype=np.intp)\n expected_uniques = np.array(\n ["2020-01-01T00:00:00.000000000"], dtype="datetime64[ns]"\n )\n\n codes, uniques = pd.factorize(data)\n tm.assert_numpy_array_equal(codes, expected_codes)\n tm.assert_numpy_array_equal(uniques, expected_uniques)\n\n @pytest.mark.parametrize("sort", [True, False])\n def test_factorize_rangeindex(self, sort):\n # increasing -> sort doesn't matter\n ri = pd.RangeIndex.from_range(range(10))\n expected = np.arange(10, dtype=np.intp), ri\n\n result = algos.factorize(ri, sort=sort)\n tm.assert_numpy_array_equal(result[0], expected[0])\n tm.assert_index_equal(result[1], expected[1], exact=True)\n\n result = ri.factorize(sort=sort)\n tm.assert_numpy_array_equal(result[0], expected[0])\n tm.assert_index_equal(result[1], expected[1], exact=True)\n\n @pytest.mark.parametrize("sort", [True, False])\n def test_factorize_rangeindex_decreasing(self, sort):\n # decreasing -> sort matters\n ri = pd.RangeIndex.from_range(range(10))\n expected = np.arange(10, dtype=np.intp), ri\n\n ri2 = ri[::-1]\n expected = expected[0], ri2\n if sort:\n expected = expected[0][::-1], expected[1][::-1]\n\n result = algos.factorize(ri2, sort=sort)\n tm.assert_numpy_array_equal(result[0], expected[0])\n tm.assert_index_equal(result[1], expected[1], exact=True)\n\n result = ri2.factorize(sort=sort)\n tm.assert_numpy_array_equal(result[0], expected[0])\n tm.assert_index_equal(result[1], expected[1], exact=True)\n\n def test_deprecate_order(self):\n # gh 19727 - check warning is raised for deprecated keyword, order.\n # Test not valid once order keyword is removed.\n data = np.array([2**63, 1, 2**63], dtype=np.uint64)\n with pytest.raises(TypeError, match="got an unexpected keyword"):\n algos.factorize(data, order=True)\n with tm.assert_produces_warning(False):\n algos.factorize(data)\n\n @pytest.mark.parametrize(\n "data",\n [\n np.array([0, 1, 0], dtype="u8"),\n np.array([-(2**63), 1, -(2**63)], dtype="i8"),\n np.array(["__nan__", "foo", "__nan__"], dtype="object"),\n ],\n )\n def test_parametrized_factorize_na_value_default(self, data):\n # arrays that include the NA default for that type, but isn't used.\n codes, uniques = algos.factorize(data)\n expected_uniques = data[[0, 1]]\n expected_codes = np.array([0, 1, 0], dtype=np.intp)\n tm.assert_numpy_array_equal(codes, expected_codes)\n tm.assert_numpy_array_equal(uniques, expected_uniques)\n\n @pytest.mark.parametrize(\n "data, na_value",\n [\n (np.array([0, 1, 0, 2], dtype="u8"), 0),\n (np.array([1, 0, 1, 2], dtype="u8"), 1),\n (np.array([-(2**63), 1, -(2**63), 0], dtype="i8"), -(2**63)),\n (np.array([1, -(2**63), 1, 0], dtype="i8"), 1),\n (np.array(["a", "", "a", "b"], dtype=object), "a"),\n (np.array([(), ("a", 1), (), ("a", 2)], dtype=object), ()),\n (np.array([("a", 1), (), ("a", 1), ("a", 2)], dtype=object), ("a", 1)),\n ],\n )\n def test_parametrized_factorize_na_value(self, data, na_value):\n codes, uniques = algos.factorize_array(data, na_value=na_value)\n expected_uniques = data[[1, 3]]\n expected_codes = np.array([-1, 0, -1, 1], dtype=np.intp)\n tm.assert_numpy_array_equal(codes, expected_codes)\n tm.assert_numpy_array_equal(uniques, expected_uniques)\n\n @pytest.mark.parametrize("sort", [True, False])\n @pytest.mark.parametrize(\n "data, uniques",\n [\n (\n np.array(["b", "a", None, "b"], dtype=object),\n np.array(["b", "a"], dtype=object),\n ),\n (\n pd.array([2, 1, np.nan, 2], dtype="Int64"),\n pd.array([2, 1], dtype="Int64"),\n ),\n ],\n ids=["numpy_array", "extension_array"],\n )\n def test_factorize_use_na_sentinel(self, sort, data, uniques):\n codes, uniques = algos.factorize(data, sort=sort, use_na_sentinel=True)\n if sort:\n expected_codes = np.array([1, 0, -1, 1], dtype=np.intp)\n expected_uniques = algos.safe_sort(uniques)\n else:\n expected_codes = np.array([0, 1, -1, 0], dtype=np.intp)\n expected_uniques = uniques\n tm.assert_numpy_array_equal(codes, expected_codes)\n if isinstance(data, np.ndarray):\n tm.assert_numpy_array_equal(uniques, expected_uniques)\n else:\n tm.assert_extension_array_equal(uniques, expected_uniques)\n\n @pytest.mark.parametrize(\n "data, expected_codes, expected_uniques",\n [\n (\n ["a", None, "b", "a"],\n np.array([0, 1, 2, 0], dtype=np.dtype("intp")),\n np.array(["a", np.nan, "b"], dtype=object),\n ),\n (\n ["a", np.nan, "b", "a"],\n np.array([0, 1, 2, 0], dtype=np.dtype("intp")),\n np.array(["a", np.nan, "b"], dtype=object),\n ),\n ],\n )\n def test_object_factorize_use_na_sentinel_false(\n self, data, expected_codes, expected_uniques\n ):\n codes, uniques = algos.factorize(\n np.array(data, dtype=object), use_na_sentinel=False\n )\n\n tm.assert_numpy_array_equal(uniques, expected_uniques, strict_nan=True)\n tm.assert_numpy_array_equal(codes, expected_codes, strict_nan=True)\n\n @pytest.mark.parametrize(\n "data, expected_codes, expected_uniques",\n [\n (\n [1, None, 1, 2],\n np.array([0, 1, 0, 2], dtype=np.dtype("intp")),\n np.array([1, np.nan, 2], dtype="O"),\n ),\n (\n [1, np.nan, 1, 2],\n np.array([0, 1, 0, 2], dtype=np.dtype("intp")),\n np.array([1, np.nan, 2], dtype=np.float64),\n ),\n ],\n )\n def test_int_factorize_use_na_sentinel_false(\n self, data, expected_codes, expected_uniques\n ):\n msg = "factorize with argument that is not not a Series"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n codes, uniques = algos.factorize(data, use_na_sentinel=False)\n\n tm.assert_numpy_array_equal(uniques, expected_uniques, strict_nan=True)\n tm.assert_numpy_array_equal(codes, expected_codes, strict_nan=True)\n\n @pytest.mark.parametrize(\n "data, expected_codes, expected_uniques",\n [\n (\n Index(Categorical(["a", "a", "b"])),\n np.array([0, 0, 1], dtype=np.intp),\n CategoricalIndex(["a", "b"], categories=["a", "b"], dtype="category"),\n ),\n (\n Series(Categorical(["a", "a", "b"])),\n np.array([0, 0, 1], dtype=np.intp),\n CategoricalIndex(["a", "b"], categories=["a", "b"], dtype="category"),\n ),\n (\n Series(DatetimeIndex(["2017", "2017"], tz="US/Eastern")),\n np.array([0, 0], dtype=np.intp),\n DatetimeIndex(["2017"], tz="US/Eastern"),\n ),\n ],\n )\n def test_factorize_mixed_values(self, data, expected_codes, expected_uniques):\n # GH 19721\n codes, uniques = algos.factorize(data)\n tm.assert_numpy_array_equal(codes, expected_codes)\n tm.assert_index_equal(uniques, expected_uniques)\n\n def test_factorize_interval_non_nano(self, unit):\n # GH#56099\n left = DatetimeIndex(["2016-01-01", np.nan, "2015-10-11"]).as_unit(unit)\n right = DatetimeIndex(["2016-01-02", np.nan, "2015-10-15"]).as_unit(unit)\n idx = IntervalIndex.from_arrays(left, right)\n codes, cats = idx.factorize()\n assert cats.dtype == f"interval[datetime64[{unit}], right]"\n\n ts = Timestamp(0).as_unit(unit)\n idx2 = IntervalIndex.from_arrays(left - ts, right - ts)\n codes2, cats2 = idx2.factorize()\n assert cats2.dtype == f"interval[timedelta64[{unit}], right]"\n\n idx3 = IntervalIndex.from_arrays(\n left.tz_localize("US/Pacific"), right.tz_localize("US/Pacific")\n )\n codes3, cats3 = idx3.factorize()\n assert cats3.dtype == f"interval[datetime64[{unit}, US/Pacific], right]"\n\n\nclass TestUnique:\n def test_ints(self):\n arr = np.random.default_rng(2).integers(0, 100, size=50)\n\n result = algos.unique(arr)\n assert isinstance(result, np.ndarray)\n\n def test_objects(self):\n arr = np.random.default_rng(2).integers(0, 100, size=50).astype("O")\n\n result = algos.unique(arr)\n assert isinstance(result, np.ndarray)\n\n def test_object_refcount_bug(self):\n lst = np.array(["A", "B", "C", "D", "E"], dtype=object)\n for i in range(1000):\n len(algos.unique(lst))\n\n def test_on_index_object(self):\n mindex = MultiIndex.from_arrays(\n [np.arange(5).repeat(5), np.tile(np.arange(5), 5)]\n )\n expected = mindex.values\n expected.sort()\n\n mindex = mindex.repeat(2)\n\n result = pd.unique(mindex)\n result.sort()\n\n tm.assert_almost_equal(result, expected)\n\n def test_dtype_preservation(self, any_numpy_dtype):\n # GH 15442\n if any_numpy_dtype in (tm.BYTES_DTYPES + tm.STRING_DTYPES):\n data = [1, 2, 2]\n uniques = [1, 2]\n elif is_integer_dtype(any_numpy_dtype):\n data = [1, 2, 2]\n uniques = [1, 2]\n elif is_float_dtype(any_numpy_dtype):\n data = [1, 2, 2]\n uniques = [1.0, 2.0]\n elif is_complex_dtype(any_numpy_dtype):\n data = [complex(1, 0), complex(2, 0), complex(2, 0)]\n uniques = [complex(1, 0), complex(2, 0)]\n elif is_bool_dtype(any_numpy_dtype):\n data = [True, True, False]\n uniques = [True, False]\n elif is_object_dtype(any_numpy_dtype):\n data = ["A", "B", "B"]\n uniques = ["A", "B"]\n else:\n # datetime64[ns]/M8[ns]/timedelta64[ns]/m8[ns] tested elsewhere\n data = [1, 2, 2]\n uniques = [1, 2]\n\n result = Series(data, dtype=any_numpy_dtype).unique()\n expected = np.array(uniques, dtype=any_numpy_dtype)\n\n if any_numpy_dtype in tm.STRING_DTYPES:\n expected = expected.astype(object)\n\n if expected.dtype.kind in ["m", "M"]:\n # We get TimedeltaArray/DatetimeArray\n assert isinstance(result, (DatetimeArray, TimedeltaArray))\n result = np.array(result)\n tm.assert_numpy_array_equal(result, expected)\n\n def test_datetime64_dtype_array_returned(self):\n # GH 9431\n expected = np.array(\n [\n "2015-01-03T00:00:00.000000000",\n "2015-01-01T00:00:00.000000000",\n ],\n dtype="M8[ns]",\n )\n\n dt_index = to_datetime(\n [\n "2015-01-03T00:00:00.000000000",\n "2015-01-01T00:00:00.000000000",\n "2015-01-01T00:00:00.000000000",\n ]\n )\n result = algos.unique(dt_index)\n tm.assert_numpy_array_equal(result, expected)\n assert result.dtype == expected.dtype\n\n s = Series(dt_index)\n result = algos.unique(s)\n tm.assert_numpy_array_equal(result, expected)\n assert result.dtype == expected.dtype\n\n arr = s.values\n result = algos.unique(arr)\n tm.assert_numpy_array_equal(result, expected)\n assert result.dtype == expected.dtype\n\n def test_datetime_non_ns(self):\n a = np.array(["2000", "2000", "2001"], dtype="datetime64[s]")\n result = pd.unique(a)\n expected = np.array(["2000", "2001"], dtype="datetime64[s]")\n tm.assert_numpy_array_equal(result, expected)\n\n def test_timedelta_non_ns(self):\n a = np.array(["2000", "2000", "2001"], dtype="timedelta64[s]")\n result = pd.unique(a)\n expected = np.array([2000, 2001], dtype="timedelta64[s]")\n tm.assert_numpy_array_equal(result, expected)\n\n def test_timedelta64_dtype_array_returned(self):\n # GH 9431\n expected = np.array([31200, 45678, 10000], dtype="m8[ns]")\n\n td_index = to_timedelta([31200, 45678, 31200, 10000, 45678])\n result = algos.unique(td_index)\n tm.assert_numpy_array_equal(result, expected)\n assert result.dtype == expected.dtype\n\n s = Series(td_index)\n result = algos.unique(s)\n tm.assert_numpy_array_equal(result, expected)\n assert result.dtype == expected.dtype\n\n arr = s.values\n result = algos.unique(arr)\n tm.assert_numpy_array_equal(result, expected)\n assert result.dtype == expected.dtype\n\n def test_uint64_overflow(self):\n s = Series([1, 2, 2**63, 2**63], dtype=np.uint64)\n exp = np.array([1, 2, 2**63], dtype=np.uint64)\n tm.assert_numpy_array_equal(algos.unique(s), exp)\n\n def test_nan_in_object_array(self):\n duplicated_items = ["a", np.nan, "c", "c"]\n result = pd.unique(np.array(duplicated_items, dtype=object))\n expected = np.array(["a", np.nan, "c"], dtype=object)\n tm.assert_numpy_array_equal(result, expected)\n\n def test_categorical(self):\n # we are expecting to return in the order\n # of appearance\n expected = Categorical(list("bac"))\n\n # we are expecting to return in the order\n # of the categories\n expected_o = Categorical(list("bac"), categories=list("abc"), ordered=True)\n\n # GH 15939\n c = Categorical(list("baabc"))\n result = c.unique()\n tm.assert_categorical_equal(result, expected)\n\n result = algos.unique(c)\n tm.assert_categorical_equal(result, expected)\n\n c = Categorical(list("baabc"), ordered=True)\n result = c.unique()\n tm.assert_categorical_equal(result, expected_o)\n\n result = algos.unique(c)\n tm.assert_categorical_equal(result, expected_o)\n\n # Series of categorical dtype\n s = Series(Categorical(list("baabc")), name="foo")\n result = s.unique()\n tm.assert_categorical_equal(result, expected)\n\n result = pd.unique(s)\n tm.assert_categorical_equal(result, expected)\n\n # CI -> return CI\n ci = CategoricalIndex(Categorical(list("baabc"), categories=list("abc")))\n expected = CategoricalIndex(expected)\n result = ci.unique()\n tm.assert_index_equal(result, expected)\n\n result = pd.unique(ci)\n tm.assert_index_equal(result, expected)\n\n def test_datetime64tz_aware(self, unit):\n # GH 15939\n\n dti = Index(\n [\n Timestamp("20160101", tz="US/Eastern"),\n Timestamp("20160101", tz="US/Eastern"),\n ]\n ).as_unit(unit)\n ser = Series(dti)\n\n result = ser.unique()\n expected = dti[:1]._data\n tm.assert_extension_array_equal(result, expected)\n\n result = dti.unique()\n expected = dti[:1]\n tm.assert_index_equal(result, expected)\n\n result = pd.unique(ser)\n expected = dti[:1]._data\n tm.assert_extension_array_equal(result, expected)\n\n result = pd.unique(dti)\n expected = dti[:1]\n tm.assert_index_equal(result, expected)\n\n def test_order_of_appearance(self):\n # 9346\n # light testing of guarantee of order of appearance\n # these also are the doc-examples\n result = pd.unique(Series([2, 1, 3, 3]))\n tm.assert_numpy_array_equal(result, np.array([2, 1, 3], dtype="int64"))\n\n result = pd.unique(Series([2] + [1] * 5))\n tm.assert_numpy_array_equal(result, np.array([2, 1], dtype="int64"))\n\n msg = "unique with argument that is not not a Series, Index,"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = pd.unique(list("aabc"))\n expected = np.array(["a", "b", "c"], dtype=object)\n tm.assert_numpy_array_equal(result, expected)\n\n result = pd.unique(Series(Categorical(list("aabc"))))\n expected = Categorical(list("abc"))\n tm.assert_categorical_equal(result, expected)\n\n def test_order_of_appearance_dt64(self, unit):\n ser = Series([Timestamp("20160101"), Timestamp("20160101")]).dt.as_unit(unit)\n result = pd.unique(ser)\n expected = np.array(["2016-01-01T00:00:00.000000000"], dtype=f"M8[{unit}]")\n tm.assert_numpy_array_equal(result, expected)\n\n def test_order_of_appearance_dt64tz(self, unit):\n dti = DatetimeIndex(\n [\n Timestamp("20160101", tz="US/Eastern"),\n Timestamp("20160101", tz="US/Eastern"),\n ]\n ).as_unit(unit)\n result = pd.unique(dti)\n expected = DatetimeIndex(\n ["2016-01-01 00:00:00"], dtype=f"datetime64[{unit}, US/Eastern]", freq=None\n )\n tm.assert_index_equal(result, expected)\n\n @pytest.mark.parametrize(\n "arg ,expected",\n [\n (("1", "1", "2"), np.array(["1", "2"], dtype=object)),\n (("foo",), np.array(["foo"], dtype=object)),\n ],\n )\n def test_tuple_with_strings(self, arg, expected):\n # see GH 17108\n msg = "unique with argument that is not not a Series"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = pd.unique(arg)\n tm.assert_numpy_array_equal(result, expected)\n\n def test_obj_none_preservation(self):\n # GH 20866\n arr = np.array(["foo", None], dtype=object)\n result = pd.unique(arr)\n expected = np.array(["foo", None], dtype=object)\n\n tm.assert_numpy_array_equal(result, expected, strict_nan=True)\n\n def test_signed_zero(self):\n # GH 21866\n a = np.array([-0.0, 0.0])\n result = pd.unique(a)\n expected = np.array([-0.0]) # 0.0 and -0.0 are equivalent\n tm.assert_numpy_array_equal(result, expected)\n\n def test_different_nans(self):\n # GH 21866\n # create different nans from bit-patterns:\n NAN1 = struct.unpack("d", struct.pack("=Q", 0x7FF8000000000000))[0]\n NAN2 = struct.unpack("d", struct.pack("=Q", 0x7FF8000000000001))[0]\n assert NAN1 != NAN1\n assert NAN2 != NAN2\n a = np.array([NAN1, NAN2]) # NAN1 and NAN2 are equivalent\n result = pd.unique(a)\n expected = np.array([np.nan])\n tm.assert_numpy_array_equal(result, expected)\n\n @pytest.mark.parametrize("el_type", [np.float64, object])\n def test_first_nan_kept(self, el_type):\n # GH 22295\n # create different nans from bit-patterns:\n bits_for_nan1 = 0xFFF8000000000001\n bits_for_nan2 = 0x7FF8000000000001\n NAN1 = struct.unpack("d", struct.pack("=Q", bits_for_nan1))[0]\n NAN2 = struct.unpack("d", struct.pack("=Q", bits_for_nan2))[0]\n assert NAN1 != NAN1\n assert NAN2 != NAN2\n a = np.array([NAN1, NAN2], dtype=el_type)\n result = pd.unique(a)\n assert result.size == 1\n # use bit patterns to identify which nan was kept:\n result_nan_bits = struct.unpack("=Q", struct.pack("d", result[0]))[0]\n assert result_nan_bits == bits_for_nan1\n\n def test_do_not_mangle_na_values(self, unique_nulls_fixture, unique_nulls_fixture2):\n # GH 22295\n if unique_nulls_fixture is unique_nulls_fixture2:\n return # skip it, values not unique\n a = np.array([unique_nulls_fixture, unique_nulls_fixture2], dtype=object)\n result = pd.unique(a)\n assert result.size == 2\n assert a[0] is unique_nulls_fixture\n assert a[1] is unique_nulls_fixture2\n\n def test_unique_masked(self, any_numeric_ea_dtype):\n # GH#48019\n ser = Series([1, pd.NA, 2] * 3, dtype=any_numeric_ea_dtype)\n result = pd.unique(ser)\n expected = pd.array([1, pd.NA, 2], dtype=any_numeric_ea_dtype)\n tm.assert_extension_array_equal(result, expected)\n\n\ndef test_nunique_ints(index_or_series_or_array):\n # GH#36327\n values = index_or_series_or_array(np.random.default_rng(2).integers(0, 20, 30))\n result = algos.nunique_ints(values)\n expected = len(algos.unique(values))\n assert result == expected\n\n\nclass TestIsin:\n def test_invalid(self):\n msg = (\n r"only list-like objects are allowed to be passed to isin\(\), "\n r"you passed a `int`"\n )\n with pytest.raises(TypeError, match=msg):\n algos.isin(1, 1)\n with pytest.raises(TypeError, match=msg):\n algos.isin(1, [1])\n with pytest.raises(TypeError, match=msg):\n algos.isin([1], 1)\n\n def test_basic(self):\n msg = "isin with argument that is not not a Series"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = algos.isin([1, 2], [1])\n expected = np.array([True, False])\n tm.assert_numpy_array_equal(result, expected)\n\n result = algos.isin(np.array([1, 2]), [1])\n expected = np.array([True, False])\n tm.assert_numpy_array_equal(result, expected)\n\n result = algos.isin(Series([1, 2]), [1])\n expected = np.array([True, False])\n tm.assert_numpy_array_equal(result, expected)\n\n result = algos.isin(Series([1, 2]), Series([1]))\n expected = np.array([True, False])\n tm.assert_numpy_array_equal(result, expected)\n\n result = algos.isin(Series([1, 2]), {1})\n expected = np.array([True, False])\n tm.assert_numpy_array_equal(result, expected)\n\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = algos.isin(["a", "b"], ["a"])\n expected = np.array([True, False])\n tm.assert_numpy_array_equal(result, expected)\n\n result = algos.isin(Series(["a", "b"]), Series(["a"]))\n expected = np.array([True, False])\n tm.assert_numpy_array_equal(result, expected)\n\n result = algos.isin(Series(["a", "b"]), {"a"})\n expected = np.array([True, False])\n tm.assert_numpy_array_equal(result, expected)\n\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = algos.isin(["a", "b"], [1])\n expected = np.array([False, False])\n tm.assert_numpy_array_equal(result, expected)\n\n def test_i8(self):\n arr = date_range("20130101", periods=3).values\n result = algos.isin(arr, [arr[0]])\n expected = np.array([True, False, False])\n tm.assert_numpy_array_equal(result, expected)\n\n result = algos.isin(arr, arr[0:2])\n expected = np.array([True, True, False])\n tm.assert_numpy_array_equal(result, expected)\n\n result = algos.isin(arr, set(arr[0:2]))\n expected = np.array([True, True, False])\n tm.assert_numpy_array_equal(result, expected)\n\n arr = timedelta_range("1 day", periods=3).values\n result = algos.isin(arr, [arr[0]])\n expected = np.array([True, False, False])\n tm.assert_numpy_array_equal(result, expected)\n\n result = algos.isin(arr, arr[0:2])\n expected = np.array([True, True, False])\n tm.assert_numpy_array_equal(result, expected)\n\n result = algos.isin(arr, set(arr[0:2]))\n expected = np.array([True, True, False])\n tm.assert_numpy_array_equal(result, expected)\n\n @pytest.mark.parametrize("dtype1", ["m8[ns]", "M8[ns]", "M8[ns, UTC]", "period[D]"])\n @pytest.mark.parametrize("dtype", ["i8", "f8", "u8"])\n def test_isin_datetimelike_values_numeric_comps(self, dtype, dtype1):\n # Anything but object and we get all-False shortcut\n\n dta = date_range("2013-01-01", periods=3)._values\n arr = Series(dta.view("i8")).array.view(dtype1)\n\n comps = arr.view("i8").astype(dtype)\n\n result = algos.isin(comps, arr)\n expected = np.zeros(comps.shape, dtype=bool)\n tm.assert_numpy_array_equal(result, expected)\n\n def test_large(self):\n s = date_range("20000101", periods=2000000, freq="s").values\n result = algos.isin(s, s[0:2])\n expected = np.zeros(len(s), dtype=bool)\n expected[0] = True\n expected[1] = True\n tm.assert_numpy_array_equal(result, expected)\n\n @pytest.mark.parametrize("dtype", ["m8[ns]", "M8[ns]", "M8[ns, UTC]", "period[D]"])\n def test_isin_datetimelike_all_nat(self, dtype):\n # GH#56427\n dta = date_range("2013-01-01", periods=3)._values\n arr = Series(dta.view("i8")).array.view(dtype)\n\n arr[0] = NaT\n result = algos.isin(arr, [NaT])\n expected = np.array([True, False, False], dtype=bool)\n tm.assert_numpy_array_equal(result, expected)\n\n @pytest.mark.parametrize("dtype", ["m8[ns]", "M8[ns]", "M8[ns, UTC]"])\n def test_isin_datetimelike_strings_deprecated(self, dtype):\n # GH#53111\n dta = date_range("2013-01-01", periods=3)._values\n arr = Series(dta.view("i8")).array.view(dtype)\n\n vals = [str(x) for x in arr]\n msg = "The behavior of 'isin' with dtype=.* is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n res = algos.isin(arr, vals)\n assert res.all()\n\n vals2 = np.array(vals, dtype=str)\n with tm.assert_produces_warning(FutureWarning, match=msg):\n res2 = algos.isin(arr, vals2)\n assert res2.all()\n\n def test_isin_dt64tz_with_nat(self):\n # the all-NaT values used to get inferred to tznaive, which was evaluated\n # as non-matching GH#56427\n dti = date_range("2016-01-01", periods=3, tz="UTC")\n ser = Series(dti)\n ser[0] = NaT\n\n res = algos.isin(ser._values, [NaT])\n exp = np.array([True, False, False], dtype=bool)\n tm.assert_numpy_array_equal(res, exp)\n\n def test_categorical_from_codes(self):\n # GH 16639\n vals = np.array([0, 1, 2, 0])\n cats = ["a", "b", "c"]\n Sd = Series(Categorical([1]).from_codes(vals, cats))\n St = Series(Categorical([1]).from_codes(np.array([0, 1]), cats))\n expected = np.array([True, True, False, True])\n result = algos.isin(Sd, St)\n tm.assert_numpy_array_equal(expected, result)\n\n def test_categorical_isin(self):\n vals = np.array([0, 1, 2, 0])\n cats = ["a", "b", "c"]\n cat = Categorical([1]).from_codes(vals, cats)\n other = Categorical([1]).from_codes(np.array([0, 1]), cats)\n\n expected = np.array([True, True, False, True])\n result = algos.isin(cat, other)\n tm.assert_numpy_array_equal(expected, result)\n\n def test_same_nan_is_in(self):\n # GH 22160\n # nan is special, because from " a is b" doesn't follow "a == b"\n # at least, isin() should follow python's "np.nan in [nan] == True"\n # casting to -> np.float64 -> another float-object somewhere on\n # the way could lead jeopardize this behavior\n comps = [np.nan] # could be casted to float64\n values = [np.nan]\n expected = np.array([True])\n msg = "isin with argument that is not not a Series"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = algos.isin(comps, values)\n tm.assert_numpy_array_equal(expected, result)\n\n def test_same_nan_is_in_large(self):\n # https://github.com/pandas-dev/pandas/issues/22205\n s = np.tile(1.0, 1_000_001)\n s[0] = np.nan\n result = algos.isin(s, np.array([np.nan, 1]))\n expected = np.ones(len(s), dtype=bool)\n tm.assert_numpy_array_equal(result, expected)\n\n def test_same_nan_is_in_large_series(self):\n # https://github.com/pandas-dev/pandas/issues/22205\n s = np.tile(1.0, 1_000_001)\n series = Series(s)\n s[0] = np.nan\n result = series.isin(np.array([np.nan, 1]))\n expected = Series(np.ones(len(s), dtype=bool))\n tm.assert_series_equal(result, expected)\n\n def test_same_object_is_in(self):\n # GH 22160\n # there could be special treatment for nans\n # the user however could define a custom class\n # with similar behavior, then we at least should\n # fall back to usual python's behavior: "a in [a] == True"\n class LikeNan:\n def __eq__(self, other) -> bool:\n return False\n\n def __hash__(self):\n return 0\n\n a, b = LikeNan(), LikeNan()\n\n msg = "isin with argument that is not not a Series"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n # same object -> True\n tm.assert_numpy_array_equal(algos.isin([a], [a]), np.array([True]))\n # different objects -> False\n tm.assert_numpy_array_equal(algos.isin([a], [b]), np.array([False]))\n\n def test_different_nans(self):\n # GH 22160\n # all nans are handled as equivalent\n\n comps = [float("nan")]\n values = [float("nan")]\n assert comps[0] is not values[0] # different nan-objects\n\n # as list of python-objects:\n result = algos.isin(np.array(comps), values)\n tm.assert_numpy_array_equal(np.array([True]), result)\n\n # as object-array:\n result = algos.isin(\n np.asarray(comps, dtype=object), np.asarray(values, dtype=object)\n )\n tm.assert_numpy_array_equal(np.array([True]), result)\n\n # as float64-array:\n result = algos.isin(\n np.asarray(comps, dtype=np.float64), np.asarray(values, dtype=np.float64)\n )\n tm.assert_numpy_array_equal(np.array([True]), result)\n\n def test_no_cast(self):\n # GH 22160\n # ensure 42 is not casted to a string\n comps = ["ss", 42]\n values = ["42"]\n expected = np.array([False, False])\n msg = "isin with argument that is not not a Series, Index"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = algos.isin(comps, values)\n tm.assert_numpy_array_equal(expected, result)\n\n @pytest.mark.parametrize("empty", [[], Series(dtype=object), np.array([])])\n def test_empty(self, empty):\n # see gh-16991\n vals = Index(["a", "b"])\n expected = np.array([False, False])\n\n result = algos.isin(vals, empty)\n tm.assert_numpy_array_equal(expected, result)\n\n def test_different_nan_objects(self):\n # GH 22119\n comps = np.array(["nan", np.nan * 1j, float("nan")], dtype=object)\n vals = np.array([float("nan")], dtype=object)\n expected = np.array([False, False, True])\n result = algos.isin(comps, vals)\n tm.assert_numpy_array_equal(expected, result)\n\n def test_different_nans_as_float64(self):\n # GH 21866\n # create different nans from bit-patterns,\n # these nans will land in different buckets in the hash-table\n # if no special care is taken\n NAN1 = struct.unpack("d", struct.pack("=Q", 0x7FF8000000000000))[0]\n NAN2 = struct.unpack("d", struct.pack("=Q", 0x7FF8000000000001))[0]\n assert NAN1 != NAN1\n assert NAN2 != NAN2\n\n # check that NAN1 and NAN2 are equivalent:\n arr = np.array([NAN1, NAN2], dtype=np.float64)\n lookup1 = np.array([NAN1], dtype=np.float64)\n result = algos.isin(arr, lookup1)\n expected = np.array([True, True])\n tm.assert_numpy_array_equal(result, expected)\n\n lookup2 = np.array([NAN2], dtype=np.float64)\n result = algos.isin(arr, lookup2)\n expected = np.array([True, True])\n tm.assert_numpy_array_equal(result, expected)\n\n def test_isin_int_df_string_search(self):\n """Comparing df with int`s (1,2) with a string at isin() ("1")\n -> should not match values because int 1 is not equal str 1"""\n df = DataFrame({"values": [1, 2]})\n result = df.isin(["1"])\n expected_false = DataFrame({"values": [False, False]})\n tm.assert_frame_equal(result, expected_false)\n\n def test_isin_nan_df_string_search(self):\n """Comparing df with nan value (np.nan,2) with a string at isin() ("NaN")\n -> should not match values because np.nan is not equal str NaN"""\n df = DataFrame({"values": [np.nan, 2]})\n result = df.isin(np.array(["NaN"], dtype=object))\n expected_false = DataFrame({"values": [False, False]})\n tm.assert_frame_equal(result, expected_false)\n\n def test_isin_float_df_string_search(self):\n """Comparing df with floats (1.4245,2.32441) with a string at isin() ("1.4245")\n -> should not match values because float 1.4245 is not equal str 1.4245"""\n df = DataFrame({"values": [1.4245, 2.32441]})\n result = df.isin(np.array(["1.4245"], dtype=object))\n expected_false = DataFrame({"values": [False, False]})\n tm.assert_frame_equal(result, expected_false)\n\n def test_isin_unsigned_dtype(self):\n # GH#46485\n ser = Series([1378774140726870442], dtype=np.uint64)\n result = ser.isin([1378774140726870528])\n expected = Series(False)\n tm.assert_series_equal(result, expected)\n\n\nclass TestValueCounts:\n def test_value_counts(self):\n arr = np.random.default_rng(1234).standard_normal(4)\n factor = cut(arr, 4)\n\n # assert isinstance(factor, n)\n msg = "pandas.value_counts is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = algos.value_counts(factor)\n breaks = [-1.606, -1.018, -0.431, 0.155, 0.741]\n index = IntervalIndex.from_breaks(breaks).astype(CategoricalDtype(ordered=True))\n expected = Series([1, 0, 2, 1], index=index, name="count")\n tm.assert_series_equal(result.sort_index(), expected.sort_index())\n\n def test_value_counts_bins(self):\n s = [1, 2, 3, 4]\n msg = "pandas.value_counts is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = algos.value_counts(s, bins=1)\n expected = Series(\n [4], index=IntervalIndex.from_tuples([(0.996, 4.0)]), name="count"\n )\n tm.assert_series_equal(result, expected)\n\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = algos.value_counts(s, bins=2, sort=False)\n expected = Series(\n [2, 2],\n index=IntervalIndex.from_tuples([(0.996, 2.5), (2.5, 4.0)]),\n name="count",\n )\n tm.assert_series_equal(result, expected)\n\n def test_value_counts_dtypes(self):\n msg2 = "pandas.value_counts is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg2):\n result = algos.value_counts(np.array([1, 1.0]))\n assert len(result) == 1\n\n with tm.assert_produces_warning(FutureWarning, match=msg2):\n result = algos.value_counts(np.array([1, 1.0]), bins=1)\n assert len(result) == 1\n\n with tm.assert_produces_warning(FutureWarning, match=msg2):\n result = algos.value_counts(Series([1, 1.0, "1"])) # object\n assert len(result) == 2\n\n msg = "bins argument only works with numeric data"\n with pytest.raises(TypeError, match=msg):\n with tm.assert_produces_warning(FutureWarning, match=msg2):\n algos.value_counts(np.array(["1", 1], dtype=object), bins=1)\n\n def test_value_counts_nat(self):\n td = Series([np.timedelta64(10000), NaT], dtype="timedelta64[ns]")\n dt = to_datetime(["NaT", "2014-01-01"])\n\n msg = "pandas.value_counts is deprecated"\n\n for ser in [td, dt]:\n with tm.assert_produces_warning(FutureWarning, match=msg):\n vc = algos.value_counts(ser)\n vc_with_na = algos.value_counts(ser, dropna=False)\n assert len(vc) == 1\n assert len(vc_with_na) == 2\n\n exp_dt = Series({Timestamp("2014-01-01 00:00:00"): 1}, name="count")\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result_dt = algos.value_counts(dt)\n tm.assert_series_equal(result_dt, exp_dt)\n\n exp_td = Series([1], index=[np.timedelta64(10000)], name="count")\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result_td = algos.value_counts(td)\n tm.assert_series_equal(result_td, exp_td)\n\n @pytest.mark.parametrize("dtype", [object, "M8[us]"])\n def test_value_counts_datetime_outofbounds(self, dtype):\n # GH 13663\n ser = Series(\n [\n datetime(3000, 1, 1),\n datetime(5000, 1, 1),\n datetime(5000, 1, 1),\n datetime(6000, 1, 1),\n datetime(3000, 1, 1),\n datetime(3000, 1, 1),\n ],\n dtype=dtype,\n )\n res = ser.value_counts()\n\n exp_index = Index(\n [datetime(3000, 1, 1), datetime(5000, 1, 1), datetime(6000, 1, 1)],\n dtype=dtype,\n )\n exp = Series([3, 2, 1], index=exp_index, name="count")\n tm.assert_series_equal(res, exp)\n\n def test_categorical(self):\n s = Series(Categorical(list("aaabbc")))\n result = s.value_counts()\n expected = Series(\n [3, 2, 1], index=CategoricalIndex(["a", "b", "c"]), name="count"\n )\n\n tm.assert_series_equal(result, expected, check_index_type=True)\n\n # preserve order?\n s = s.cat.as_ordered()\n result = s.value_counts()\n expected.index = expected.index.as_ordered()\n tm.assert_series_equal(result, expected, check_index_type=True)\n\n def test_categorical_nans(self):\n s = Series(Categorical(list("aaaaabbbcc"))) # 4,3,2,1 (nan)\n s.iloc[1] = np.nan\n result = s.value_counts()\n expected = Series(\n [4, 3, 2],\n index=CategoricalIndex(["a", "b", "c"], categories=["a", "b", "c"]),\n name="count",\n )\n tm.assert_series_equal(result, expected, check_index_type=True)\n result = s.value_counts(dropna=False)\n expected = Series(\n [4, 3, 2, 1], index=CategoricalIndex(["a", "b", "c", np.nan]), name="count"\n )\n tm.assert_series_equal(result, expected, check_index_type=True)\n\n # out of order\n s = Series(\n Categorical(list("aaaaabbbcc"), ordered=True, categories=["b", "a", "c"])\n )\n s.iloc[1] = np.nan\n result = s.value_counts()\n expected = Series(\n [4, 3, 2],\n index=CategoricalIndex(\n ["a", "b", "c"],\n categories=["b", "a", "c"],\n ordered=True,\n ),\n name="count",\n )\n tm.assert_series_equal(result, expected, check_index_type=True)\n\n result = s.value_counts(dropna=False)\n expected = Series(\n [4, 3, 2, 1],\n index=CategoricalIndex(\n ["a", "b", "c", np.nan], categories=["b", "a", "c"], ordered=True\n ),\n name="count",\n )\n tm.assert_series_equal(result, expected, check_index_type=True)\n\n def test_categorical_zeroes(self):\n # keep the `d` category with 0\n s = Series(Categorical(list("bbbaac"), categories=list("abcd"), ordered=True))\n result = s.value_counts()\n expected = Series(\n [3, 2, 1, 0],\n index=Categorical(\n ["b", "a", "c", "d"], categories=list("abcd"), ordered=True\n ),\n name="count",\n )\n tm.assert_series_equal(result, expected, check_index_type=True)\n\n def test_value_counts_dropna(self):\n # https://github.com/pandas-dev/pandas/issues/9443#issuecomment-73719328\n\n tm.assert_series_equal(\n Series([True, True, False]).value_counts(dropna=True),\n Series([2, 1], index=[True, False], name="count"),\n )\n tm.assert_series_equal(\n Series([True, True, False]).value_counts(dropna=False),\n Series([2, 1], index=[True, False], name="count"),\n )\n\n tm.assert_series_equal(\n Series([True] * 3 + [False] * 2 + [None] * 5).value_counts(dropna=True),\n Series([3, 2], index=Index([True, False], dtype=object), name="count"),\n )\n tm.assert_series_equal(\n Series([True] * 5 + [False] * 3 + [None] * 2).value_counts(dropna=False),\n Series([5, 3, 2], index=[True, False, None], name="count"),\n )\n tm.assert_series_equal(\n Series([10.3, 5.0, 5.0]).value_counts(dropna=True),\n Series([2, 1], index=[5.0, 10.3], name="count"),\n )\n tm.assert_series_equal(\n Series([10.3, 5.0, 5.0]).value_counts(dropna=False),\n Series([2, 1], index=[5.0, 10.3], name="count"),\n )\n\n tm.assert_series_equal(\n Series([10.3, 5.0, 5.0, None]).value_counts(dropna=True),\n Series([2, 1], index=[5.0, 10.3], name="count"),\n )\n\n result = Series([10.3, 10.3, 5.0, 5.0, 5.0, None]).value_counts(dropna=False)\n expected = Series([3, 2, 1], index=[5.0, 10.3, None], name="count")\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize("dtype", (np.float64, object, "M8[ns]"))\n def test_value_counts_normalized(self, dtype):\n # GH12558\n s = Series([1] * 2 + [2] * 3 + [np.nan] * 5)\n s_typed = s.astype(dtype)\n result = s_typed.value_counts(normalize=True, dropna=False)\n expected = Series(\n [0.5, 0.3, 0.2],\n index=Series([np.nan, 2.0, 1.0], dtype=dtype),\n name="proportion",\n )\n tm.assert_series_equal(result, expected)\n\n result = s_typed.value_counts(normalize=True, dropna=True)\n expected = Series(\n [0.6, 0.4], index=Series([2.0, 1.0], dtype=dtype), name="proportion"\n )\n tm.assert_series_equal(result, expected)\n\n def test_value_counts_uint64(self):\n arr = np.array([2**63], dtype=np.uint64)\n expected = Series([1], index=[2**63], name="count")\n msg = "pandas.value_counts is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = algos.value_counts(arr)\n\n tm.assert_series_equal(result, expected)\n\n arr = np.array([-1, 2**63], dtype=object)\n expected = Series([1, 1], index=[-1, 2**63], name="count")\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = algos.value_counts(arr)\n\n tm.assert_series_equal(result, expected)\n\n def test_value_counts_series(self):\n # GH#54857\n values = np.array([3, 1, 2, 3, 4, np.nan])\n result = Series(values).value_counts(bins=3)\n expected = Series(\n [2, 2, 1],\n index=IntervalIndex.from_tuples(\n [(0.996, 2.0), (2.0, 3.0), (3.0, 4.0)], dtype="interval[float64, right]"\n ),\n name="count",\n )\n tm.assert_series_equal(result, expected)\n\n\nclass TestDuplicated:\n def test_duplicated_with_nas(self):\n keys = np.array([0, 1, np.nan, 0, 2, np.nan], dtype=object)\n\n result = algos.duplicated(keys)\n expected = np.array([False, False, False, True, False, True])\n tm.assert_numpy_array_equal(result, expected)\n\n result = algos.duplicated(keys, keep="first")\n expected = np.array([False, False, False, True, False, True])\n tm.assert_numpy_array_equal(result, expected)\n\n result = algos.duplicated(keys, keep="last")\n expected = np.array([True, False, True, False, False, False])\n tm.assert_numpy_array_equal(result, expected)\n\n result = algos.duplicated(keys, keep=False)\n expected = np.array([True, False, True, True, False, True])\n tm.assert_numpy_array_equal(result, expected)\n\n keys = np.empty(8, dtype=object)\n for i, t in enumerate(\n zip([0, 0, np.nan, np.nan] * 2, [0, np.nan, 0, np.nan] * 2)\n ):\n keys[i] = t\n\n result = algos.duplicated(keys)\n falses = [False] * 4\n trues = [True] * 4\n expected = np.array(falses + trues)\n tm.assert_numpy_array_equal(result, expected)\n\n result = algos.duplicated(keys, keep="last")\n expected = np.array(trues + falses)\n tm.assert_numpy_array_equal(result, expected)\n\n result = algos.duplicated(keys, keep=False)\n expected = np.array(trues + trues)\n tm.assert_numpy_array_equal(result, expected)\n\n @pytest.mark.parametrize(\n "case",\n [\n np.array([1, 2, 1, 5, 3, 2, 4, 1, 5, 6]),\n np.array([1.1, 2.2, 1.1, np.nan, 3.3, 2.2, 4.4, 1.1, np.nan, 6.6]),\n np.array(\n [\n 1 + 1j,\n 2 + 2j,\n 1 + 1j,\n 5 + 5j,\n 3 + 3j,\n 2 + 2j,\n 4 + 4j,\n 1 + 1j,\n 5 + 5j,\n 6 + 6j,\n ]\n ),\n np.array(["a", "b", "a", "e", "c", "b", "d", "a", "e", "f"], dtype=object),\n np.array(\n [1, 2**63, 1, 3**5, 10, 2**63, 39, 1, 3**5, 7], dtype=np.uint64\n ),\n ],\n )\n def test_numeric_object_likes(self, case):\n exp_first = np.array(\n [False, False, True, False, False, True, False, True, True, False]\n )\n exp_last = np.array(\n [True, True, True, True, False, False, False, False, False, False]\n )\n exp_false = exp_first | exp_last\n\n res_first = algos.duplicated(case, keep="first")\n tm.assert_numpy_array_equal(res_first, exp_first)\n\n res_last = algos.duplicated(case, keep="last")\n tm.assert_numpy_array_equal(res_last, exp_last)\n\n res_false = algos.duplicated(case, keep=False)\n tm.assert_numpy_array_equal(res_false, exp_false)\n\n # index\n for idx in [Index(case), Index(case, dtype="category")]:\n res_first = idx.duplicated(keep="first")\n tm.assert_numpy_array_equal(res_first, exp_first)\n\n res_last = idx.duplicated(keep="last")\n tm.assert_numpy_array_equal(res_last, exp_last)\n\n res_false = idx.duplicated(keep=False)\n tm.assert_numpy_array_equal(res_false, exp_false)\n\n # series\n for s in [Series(case), Series(case, dtype="category")]:\n res_first = s.duplicated(keep="first")\n tm.assert_series_equal(res_first, Series(exp_first))\n\n res_last = s.duplicated(keep="last")\n tm.assert_series_equal(res_last, Series(exp_last))\n\n res_false = s.duplicated(keep=False)\n tm.assert_series_equal(res_false, Series(exp_false))\n\n def test_datetime_likes(self):\n dt = [\n "2011-01-01",\n "2011-01-02",\n "2011-01-01",\n "NaT",\n "2011-01-03",\n "2011-01-02",\n "2011-01-04",\n "2011-01-01",\n "NaT",\n "2011-01-06",\n ]\n td = [\n "1 days",\n "2 days",\n "1 days",\n "NaT",\n "3 days",\n "2 days",\n "4 days",\n "1 days",\n "NaT",\n "6 days",\n ]\n\n cases = [\n np.array([Timestamp(d) for d in dt]),\n np.array([Timestamp(d, tz="US/Eastern") for d in dt]),\n np.array([Period(d, freq="D") for d in dt]),\n np.array([np.datetime64(d) for d in dt]),\n np.array([Timedelta(d) for d in td]),\n ]\n\n exp_first = np.array(\n [False, False, True, False, False, True, False, True, True, False]\n )\n exp_last = np.array(\n [True, True, True, True, False, False, False, False, False, False]\n )\n exp_false = exp_first | exp_last\n\n for case in cases:\n res_first = algos.duplicated(case, keep="first")\n tm.assert_numpy_array_equal(res_first, exp_first)\n\n res_last = algos.duplicated(case, keep="last")\n tm.assert_numpy_array_equal(res_last, exp_last)\n\n res_false = algos.duplicated(case, keep=False)\n tm.assert_numpy_array_equal(res_false, exp_false)\n\n # index\n for idx in [\n Index(case),\n Index(case, dtype="category"),\n Index(case, dtype=object),\n ]:\n res_first = idx.duplicated(keep="first")\n tm.assert_numpy_array_equal(res_first, exp_first)\n\n res_last = idx.duplicated(keep="last")\n tm.assert_numpy_array_equal(res_last, exp_last)\n\n res_false = idx.duplicated(keep=False)\n tm.assert_numpy_array_equal(res_false, exp_false)\n\n # series\n for s in [\n Series(case),\n Series(case, dtype="category"),\n Series(case, dtype=object),\n ]:\n res_first = s.duplicated(keep="first")\n tm.assert_series_equal(res_first, Series(exp_first))\n\n res_last = s.duplicated(keep="last")\n tm.assert_series_equal(res_last, Series(exp_last))\n\n res_false = s.duplicated(keep=False)\n tm.assert_series_equal(res_false, Series(exp_false))\n\n @pytest.mark.parametrize("case", [Index([1, 2, 3]), pd.RangeIndex(0, 3)])\n def test_unique_index(self, case):\n assert case.is_unique is True\n tm.assert_numpy_array_equal(case.duplicated(), np.array([False, False, False]))\n\n @pytest.mark.parametrize(\n "arr, uniques",\n [\n (\n [(0, 0), (0, 1), (1, 0), (1, 1), (0, 0), (0, 1), (1, 0), (1, 1)],\n [(0, 0), (0, 1), (1, 0), (1, 1)],\n ),\n (\n [("b", "c"), ("a", "b"), ("a", "b"), ("b", "c")],\n [("b", "c"), ("a", "b")],\n ),\n ([("a", 1), ("b", 2), ("a", 3), ("a", 1)], [("a", 1), ("b", 2), ("a", 3)]),\n ],\n )\n def test_unique_tuples(self, arr, uniques):\n # https://github.com/pandas-dev/pandas/issues/16519\n expected = np.empty(len(uniques), dtype=object)\n expected[:] = uniques\n\n msg = "unique with argument that is not not a Series"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = pd.unique(arr)\n tm.assert_numpy_array_equal(result, expected)\n\n @pytest.mark.parametrize(\n "array,expected",\n [\n (\n [1 + 1j, 0, 1, 1j, 1 + 2j, 1 + 2j],\n # Should return a complex dtype in the future\n np.array([(1 + 1j), 0j, (1 + 0j), 1j, (1 + 2j)], dtype=object),\n )\n ],\n )\n def test_unique_complex_numbers(self, array, expected):\n # GH 17927\n msg = "unique with argument that is not not a Series"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = pd.unique(array)\n tm.assert_numpy_array_equal(result, expected)\n\n\nclass TestHashTable:\n @pytest.mark.parametrize(\n "htable, data",\n [\n (\n ht.PyObjectHashTable,\n np.array([f"foo_{i}" for i in range(1000)], dtype=object),\n ),\n (\n ht.StringHashTable,\n np.array([f"foo_{i}" for i in range(1000)], dtype=object),\n ),\n (ht.Float64HashTable, np.arange(1000, dtype=np.float64)),\n (ht.Int64HashTable, np.arange(1000, dtype=np.int64)),\n (ht.UInt64HashTable, np.arange(1000, dtype=np.uint64)),\n ],\n )\n def test_hashtable_unique(self, htable, data, writable):\n # output of maker has guaranteed unique elements\n s = Series(data, dtype=data.dtype)\n if htable == ht.Float64HashTable:\n # add NaN for float column\n s.loc[500] = np.nan\n elif htable == ht.PyObjectHashTable:\n # use different NaN types for object column\n s.loc[500:502] = [np.nan, None, NaT]\n\n # create duplicated selection\n s_duplicated = s.sample(frac=3, replace=True).reset_index(drop=True)\n s_duplicated.values.setflags(write=writable)\n\n # drop_duplicates has own cython code (hash_table_func_helper.pxi)\n # and is tested separately; keeps first occurrence like ht.unique()\n expected_unique = s_duplicated.drop_duplicates(keep="first").values\n result_unique = htable().unique(s_duplicated.values)\n tm.assert_numpy_array_equal(result_unique, expected_unique)\n\n # test return_inverse=True\n # reconstruction can only succeed if the inverse is correct\n result_unique, result_inverse = htable().unique(\n s_duplicated.values, return_inverse=True\n )\n tm.assert_numpy_array_equal(result_unique, expected_unique)\n reconstr = result_unique[result_inverse]\n tm.assert_numpy_array_equal(reconstr, s_duplicated.values)\n\n @pytest.mark.parametrize(\n "htable, data",\n [\n (\n ht.PyObjectHashTable,\n np.array([f"foo_{i}" for i in range(1000)], dtype=object),\n ),\n (\n ht.StringHashTable,\n np.array([f"foo_{i}" for i in range(1000)], dtype=object),\n ),\n (ht.Float64HashTable, np.arange(1000, dtype=np.float64)),\n (ht.Int64HashTable, np.arange(1000, dtype=np.int64)),\n (ht.UInt64HashTable, np.arange(1000, dtype=np.uint64)),\n ],\n )\n def test_hashtable_factorize(self, htable, writable, data):\n # output of maker has guaranteed unique elements\n s = Series(data, dtype=data.dtype)\n if htable == ht.Float64HashTable:\n # add NaN for float column\n s.loc[500] = np.nan\n elif htable == ht.PyObjectHashTable:\n # use different NaN types for object column\n s.loc[500:502] = [np.nan, None, NaT]\n\n # create duplicated selection\n s_duplicated = s.sample(frac=3, replace=True).reset_index(drop=True)\n s_duplicated.values.setflags(write=writable)\n na_mask = s_duplicated.isna().values\n\n result_unique, result_inverse = htable().factorize(s_duplicated.values)\n\n # drop_duplicates has own cython code (hash_table_func_helper.pxi)\n # and is tested separately; keeps first occurrence like ht.factorize()\n # since factorize removes all NaNs, we do the same here\n expected_unique = s_duplicated.dropna().drop_duplicates().values\n tm.assert_numpy_array_equal(result_unique, expected_unique)\n\n # reconstruction can only succeed if the inverse is correct. Since\n # factorize removes the NaNs, those have to be excluded here as well\n result_reconstruct = result_unique[result_inverse[~na_mask]]\n expected_reconstruct = s_duplicated.dropna().values\n tm.assert_numpy_array_equal(result_reconstruct, expected_reconstruct)\n\n\nclass TestRank:\n @pytest.mark.parametrize(\n "arr",\n [\n [np.nan, np.nan, 5.0, 5.0, 5.0, np.nan, 1, 2, 3, np.nan],\n [4.0, np.nan, 5.0, 5.0, 5.0, np.nan, 1, 2, 4.0, np.nan],\n ],\n )\n def test_scipy_compat(self, arr):\n sp_stats = pytest.importorskip("scipy.stats")\n\n arr = np.array(arr)\n\n mask = ~np.isfinite(arr)\n arr = arr.copy()\n result = libalgos.rank_1d(arr)\n arr[mask] = np.inf\n exp = sp_stats.rankdata(arr)\n exp[mask] = np.nan\n tm.assert_almost_equal(result, exp)\n\n @pytest.mark.parametrize("dtype", np.typecodes["AllInteger"])\n def test_basic(self, writable, dtype):\n exp = np.array([1, 2], dtype=np.float64)\n\n data = np.array([1, 100], dtype=dtype)\n data.setflags(write=writable)\n ser = Series(data)\n result = algos.rank(ser)\n tm.assert_numpy_array_equal(result, exp)\n\n @pytest.mark.parametrize("dtype", [np.float64, np.uint64])\n def test_uint64_overflow(self, dtype):\n exp = np.array([1, 2], dtype=np.float64)\n\n s = Series([1, 2**63], dtype=dtype)\n tm.assert_numpy_array_equal(algos.rank(s), exp)\n\n def test_too_many_ndims(self):\n arr = np.array([[[1, 2, 3], [4, 5, 6], [7, 8, 9]]])\n msg = "Array with ndim > 2 are not supported"\n\n with pytest.raises(TypeError, match=msg):\n algos.rank(arr)\n\n @pytest.mark.single_cpu\n def test_pct_max_many_rows(self):\n # GH 18271\n values = np.arange(2**24 + 1)\n result = algos.rank(values, pct=True).max()\n assert result == 1\n\n values = np.arange(2**25 + 2).reshape(2**24 + 1, 2)\n result = algos.rank(values, pct=True).max()\n assert result == 1\n\n\nclass TestMode:\n def test_no_mode(self):\n exp = Series([], dtype=np.float64, index=Index([], dtype=int))\n tm.assert_numpy_array_equal(algos.mode(np.array([])), exp.values)\n\n @pytest.mark.parametrize("dt", np.typecodes["AllInteger"] + np.typecodes["Float"])\n def test_mode_single(self, dt):\n # GH 15714\n exp_single = [1]\n data_single = [1]\n\n exp_multi = [1]\n data_multi = [1, 1]\n\n ser = Series(data_single, dtype=dt)\n exp = Series(exp_single, dtype=dt)\n tm.assert_numpy_array_equal(algos.mode(ser.values), exp.values)\n tm.assert_series_equal(ser.mode(), exp)\n\n ser = Series(data_multi, dtype=dt)\n exp = Series(exp_multi, dtype=dt)\n tm.assert_numpy_array_equal(algos.mode(ser.values), exp.values)\n tm.assert_series_equal(ser.mode(), exp)\n\n def test_mode_obj_int(self):\n exp = Series([1], dtype=int)\n tm.assert_numpy_array_equal(algos.mode(exp.values), exp.values)\n\n exp = Series(["a", "b", "c"], dtype=object)\n tm.assert_numpy_array_equal(algos.mode(exp.values), exp.values)\n\n @pytest.mark.parametrize("dt", np.typecodes["AllInteger"] + np.typecodes["Float"])\n def test_number_mode(self, dt):\n exp_single = [1]\n data_single = [1] * 5 + [2] * 3\n\n exp_multi = [1, 3]\n data_multi = [1] * 5 + [2] * 3 + [3] * 5\n\n ser = Series(data_single, dtype=dt)\n exp = Series(exp_single, dtype=dt)\n tm.assert_numpy_array_equal(algos.mode(ser.values), exp.values)\n tm.assert_series_equal(ser.mode(), exp)\n\n ser = Series(data_multi, dtype=dt)\n exp = Series(exp_multi, dtype=dt)\n tm.assert_numpy_array_equal(algos.mode(ser.values), exp.values)\n tm.assert_series_equal(ser.mode(), exp)\n\n def test_strobj_mode(self):\n exp = ["b"]\n data = ["a"] * 2 + ["b"] * 3\n\n ser = Series(data, dtype="c")\n exp = Series(exp, dtype="c")\n tm.assert_numpy_array_equal(algos.mode(ser.values), exp.values)\n tm.assert_series_equal(ser.mode(), exp)\n\n @pytest.mark.parametrize("dt", [str, object])\n def test_strobj_multi_char(self, dt, using_infer_string):\n exp = ["bar"]\n data = ["foo"] * 2 + ["bar"] * 3\n\n ser = Series(data, dtype=dt)\n exp = Series(exp, dtype=dt)\n if using_infer_string and dt is str:\n tm.assert_extension_array_equal(algos.mode(ser.values), exp.values)\n else:\n tm.assert_numpy_array_equal(algos.mode(ser.values), exp.values)\n tm.assert_series_equal(ser.mode(), exp)\n\n def test_datelike_mode(self):\n exp = Series(["1900-05-03", "2011-01-03", "2013-01-02"], dtype="M8[ns]")\n ser = Series(["2011-01-03", "2013-01-02", "1900-05-03"], dtype="M8[ns]")\n tm.assert_extension_array_equal(algos.mode(ser.values), exp._values)\n tm.assert_series_equal(ser.mode(), exp)\n\n exp = Series(["2011-01-03", "2013-01-02"], dtype="M8[ns]")\n ser = Series(\n ["2011-01-03", "2013-01-02", "1900-05-03", "2011-01-03", "2013-01-02"],\n dtype="M8[ns]",\n )\n tm.assert_extension_array_equal(algos.mode(ser.values), exp._values)\n tm.assert_series_equal(ser.mode(), exp)\n\n def test_timedelta_mode(self):\n exp = Series(["-1 days", "0 days", "1 days"], dtype="timedelta64[ns]")\n ser = Series(["1 days", "-1 days", "0 days"], dtype="timedelta64[ns]")\n tm.assert_extension_array_equal(algos.mode(ser.values), exp._values)\n tm.assert_series_equal(ser.mode(), exp)\n\n exp = Series(["2 min", "1 day"], dtype="timedelta64[ns]")\n ser = Series(\n ["1 day", "1 day", "-1 day", "-1 day 2 min", "2 min", "2 min"],\n dtype="timedelta64[ns]",\n )\n tm.assert_extension_array_equal(algos.mode(ser.values), exp._values)\n tm.assert_series_equal(ser.mode(), exp)\n\n def test_mixed_dtype(self):\n exp = Series(["foo"], dtype=object)\n ser = Series([1, "foo", "foo"])\n tm.assert_numpy_array_equal(algos.mode(ser.values), exp.values)\n tm.assert_series_equal(ser.mode(), exp)\n\n def test_uint64_overflow(self):\n exp = Series([2**63], dtype=np.uint64)\n ser = Series([1, 2**63, 2**63], dtype=np.uint64)\n tm.assert_numpy_array_equal(algos.mode(ser.values), exp.values)\n tm.assert_series_equal(ser.mode(), exp)\n\n exp = Series([1, 2**63], dtype=np.uint64)\n ser = Series([1, 2**63], dtype=np.uint64)\n tm.assert_numpy_array_equal(algos.mode(ser.values), exp.values)\n tm.assert_series_equal(ser.mode(), exp)\n\n def test_categorical(self):\n c = Categorical([1, 2])\n exp = c\n res = Series(c).mode()._values\n tm.assert_categorical_equal(res, exp)\n\n c = Categorical([1, "a", "a"])\n exp = Categorical(["a"], categories=[1, "a"])\n res = Series(c).mode()._values\n tm.assert_categorical_equal(res, exp)\n\n c = Categorical([1, 1, 2, 3, 3])\n exp = Categorical([1, 3], categories=[1, 2, 3])\n res = Series(c).mode()._values\n tm.assert_categorical_equal(res, exp)\n\n def test_index(self):\n idx = Index([1, 2, 3])\n exp = Series([1, 2, 3], dtype=np.int64)\n tm.assert_numpy_array_equal(algos.mode(idx), exp.values)\n\n idx = Index([1, "a", "a"])\n exp = Series(["a"], dtype=object)\n tm.assert_numpy_array_equal(algos.mode(idx), exp.values)\n\n idx = Index([1, 1, 2, 3, 3])\n exp = Series([1, 3], dtype=np.int64)\n tm.assert_numpy_array_equal(algos.mode(idx), exp.values)\n\n idx = Index(\n ["1 day", "1 day", "-1 day", "-1 day 2 min", "2 min", "2 min"],\n dtype="timedelta64[ns]",\n )\n with pytest.raises(AttributeError, match="TimedeltaIndex"):\n # algos.mode expects Arraylike, does *not* unwrap TimedeltaIndex\n algos.mode(idx)\n\n def test_ser_mode_with_name(self):\n # GH 46737\n ser = Series([1, 1, 3], name="foo")\n result = ser.mode()\n expected = Series([1], name="foo")\n tm.assert_series_equal(result, expected)\n\n\nclass TestDiff:\n @pytest.mark.parametrize("dtype", ["M8[ns]", "m8[ns]"])\n def test_diff_datetimelike_nat(self, dtype):\n # NaT - NaT is NaT, not 0\n arr = np.arange(12).astype(np.int64).view(dtype).reshape(3, 4)\n arr[:, 2] = arr.dtype.type("NaT", "ns")\n result = algos.diff(arr, 1, axis=0)\n\n expected = np.ones(arr.shape, dtype="timedelta64[ns]") * 4\n expected[:, 2] = np.timedelta64("NaT", "ns")\n expected[0, :] = np.timedelta64("NaT", "ns")\n\n tm.assert_numpy_array_equal(result, expected)\n\n result = algos.diff(arr.T, 1, axis=1)\n tm.assert_numpy_array_equal(result, expected.T)\n\n def test_diff_ea_axis(self):\n dta = date_range("2016-01-01", periods=3, tz="US/Pacific")._data\n\n msg = "cannot diff DatetimeArray on axis=1"\n with pytest.raises(ValueError, match=msg):\n algos.diff(dta, 1, axis=1)\n\n @pytest.mark.parametrize("dtype", ["int8", "int16"])\n def test_diff_low_precision_int(self, dtype):\n arr = np.array([0, 1, 1, 0, 0], dtype=dtype)\n result = algos.diff(arr, 1)\n expected = np.array([np.nan, 1, 0, -1, 0], dtype="float32")\n tm.assert_numpy_array_equal(result, expected)\n\n\n@pytest.mark.parametrize("op", [np.array, pd.array])\ndef test_union_with_duplicates(op):\n # GH#36289\n lvals = op([3, 1, 3, 4])\n rvals = op([2, 3, 1, 1])\n expected = op([3, 3, 1, 1, 4, 2])\n if isinstance(expected, np.ndarray):\n result = algos.union_with_duplicates(lvals, rvals)\n tm.assert_numpy_array_equal(result, expected)\n else:\n result = algos.union_with_duplicates(lvals, rvals)\n tm.assert_extension_array_equal(result, expected)\n | .venv\Lib\site-packages\pandas\tests\test_algos.py | test_algos.py | Python | 78,613 | 0.75 | 0.086935 | 0.072622 | awesome-app | 616 | 2023-10-25T03:18:09.634525 | MIT | true | 37fd72ebe2e84f16db5ec8c09b4f00a8 |
import collections\nfrom functools import partial\nimport string\nimport subprocess\nimport sys\nimport textwrap\n\nimport numpy as np\nimport pytest\n\nimport pandas as pd\nfrom pandas import Series\nimport pandas._testing as tm\nfrom pandas.core import ops\nimport pandas.core.common as com\nfrom pandas.util.version import Version\n\n\ndef test_get_callable_name():\n getname = com.get_callable_name\n\n def fn(x):\n return x\n\n lambda_ = lambda x: x\n part1 = partial(fn)\n part2 = partial(part1)\n\n class somecall:\n def __call__(self):\n # This shouldn't actually get called below; somecall.__init__\n # should.\n raise NotImplementedError\n\n assert getname(fn) == "fn"\n assert getname(lambda_)\n assert getname(part1) == "fn"\n assert getname(part2) == "fn"\n assert getname(somecall()) == "somecall"\n assert getname(1) is None\n\n\ndef test_any_none():\n assert com.any_none(1, 2, 3, None)\n assert not com.any_none(1, 2, 3, 4)\n\n\ndef test_all_not_none():\n assert com.all_not_none(1, 2, 3, 4)\n assert not com.all_not_none(1, 2, 3, None)\n assert not com.all_not_none(None, None, None, None)\n\n\ndef test_random_state():\n # Check with seed\n state = com.random_state(5)\n assert state.uniform() == np.random.RandomState(5).uniform()\n\n # Check with random state object\n state2 = np.random.RandomState(10)\n assert com.random_state(state2).uniform() == np.random.RandomState(10).uniform()\n\n # check with no arg random state\n assert com.random_state() is np.random\n\n # check array-like\n # GH32503\n state_arr_like = np.random.default_rng(None).integers(\n 0, 2**31, size=624, dtype="uint32"\n )\n assert (\n com.random_state(state_arr_like).uniform()\n == np.random.RandomState(state_arr_like).uniform()\n )\n\n # Check BitGenerators\n # GH32503\n assert (\n com.random_state(np.random.MT19937(3)).uniform()\n == np.random.RandomState(np.random.MT19937(3)).uniform()\n )\n assert (\n com.random_state(np.random.PCG64(11)).uniform()\n == np.random.RandomState(np.random.PCG64(11)).uniform()\n )\n\n # Error for floats or strings\n msg = (\n "random_state must be an integer, array-like, a BitGenerator, Generator, "\n "a numpy RandomState, or None"\n )\n with pytest.raises(ValueError, match=msg):\n com.random_state("test")\n\n with pytest.raises(ValueError, match=msg):\n com.random_state(5.5)\n\n\n@pytest.mark.parametrize(\n "left, right, expected",\n [\n (Series([1], name="x"), Series([2], name="x"), "x"),\n (Series([1], name="x"), Series([2], name="y"), None),\n (Series([1]), Series([2], name="x"), None),\n (Series([1], name="x"), Series([2]), None),\n (Series([1], name="x"), [2], "x"),\n ([1], Series([2], name="y"), "y"),\n # matching NAs\n (Series([1], name=np.nan), pd.Index([], name=np.nan), np.nan),\n (Series([1], name=np.nan), pd.Index([], name=pd.NaT), None),\n (Series([1], name=pd.NA), pd.Index([], name=pd.NA), pd.NA),\n # tuple name GH#39757\n (\n Series([1], name=np.int64(1)),\n pd.Index([], name=(np.int64(1), np.int64(2))),\n None,\n ),\n (\n Series([1], name=(np.int64(1), np.int64(2))),\n pd.Index([], name=(np.int64(1), np.int64(2))),\n (np.int64(1), np.int64(2)),\n ),\n pytest.param(\n Series([1], name=(np.float64("nan"), np.int64(2))),\n pd.Index([], name=(np.float64("nan"), np.int64(2))),\n (np.float64("nan"), np.int64(2)),\n marks=pytest.mark.xfail(\n reason="Not checking for matching NAs inside tuples."\n ),\n ),\n ],\n)\ndef test_maybe_match_name(left, right, expected):\n res = ops.common._maybe_match_name(left, right)\n assert res is expected or res == expected\n\n\ndef test_standardize_mapping():\n # No uninitialized defaultdicts\n msg = r"to_dict\(\) only accepts initialized defaultdicts"\n with pytest.raises(TypeError, match=msg):\n com.standardize_mapping(collections.defaultdict)\n\n # No non-mapping subtypes, instance\n msg = "unsupported type: <class 'list'>"\n with pytest.raises(TypeError, match=msg):\n com.standardize_mapping([])\n\n # No non-mapping subtypes, class\n with pytest.raises(TypeError, match=msg):\n com.standardize_mapping(list)\n\n fill = {"bad": "data"}\n assert com.standardize_mapping(fill) == dict\n\n # Convert instance to type\n assert com.standardize_mapping({}) == dict\n\n dd = collections.defaultdict(list)\n assert isinstance(com.standardize_mapping(dd), partial)\n\n\ndef test_git_version():\n # GH 21295\n git_version = pd.__git_version__\n assert len(git_version) == 40\n assert all(c in string.hexdigits for c in git_version)\n\n\ndef test_version_tag():\n version = Version(pd.__version__)\n try:\n version > Version("0.0.1")\n except TypeError:\n raise ValueError(\n "No git tags exist, please sync tags between upstream and your repo"\n )\n\n\n@pytest.mark.parametrize(\n "obj", [(obj,) for obj in pd.__dict__.values() if callable(obj)]\n)\ndef test_serializable(obj):\n # GH 35611\n unpickled = tm.round_trip_pickle(obj)\n assert type(obj) == type(unpickled)\n\n\nclass TestIsBoolIndexer:\n def test_non_bool_array_with_na(self):\n # in particular, this should not raise\n arr = np.array(["A", "B", np.nan], dtype=object)\n assert not com.is_bool_indexer(arr)\n\n def test_list_subclass(self):\n # GH#42433\n\n class MyList(list):\n pass\n\n val = MyList(["a"])\n\n assert not com.is_bool_indexer(val)\n\n val = MyList([True])\n assert com.is_bool_indexer(val)\n\n def test_frozenlist(self):\n # GH#42461\n data = {"col1": [1, 2], "col2": [3, 4]}\n df = pd.DataFrame(data=data)\n\n frozen = df.index.names[1:]\n assert not com.is_bool_indexer(frozen)\n\n result = df[frozen]\n expected = df[[]]\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize("with_exception", [True, False])\ndef test_temp_setattr(with_exception):\n # GH#45954\n ser = Series(dtype=object)\n ser.name = "first"\n # Raise a ValueError in either case to satisfy pytest.raises\n match = "Inside exception raised" if with_exception else "Outside exception raised"\n with pytest.raises(ValueError, match=match):\n with com.temp_setattr(ser, "name", "second"):\n assert ser.name == "second"\n if with_exception:\n raise ValueError("Inside exception raised")\n raise ValueError("Outside exception raised")\n assert ser.name == "first"\n\n\n@pytest.mark.single_cpu\ndef test_str_size():\n # GH#21758\n a = "a"\n expected = sys.getsizeof(a)\n pyexe = sys.executable.replace("\\", "/")\n call = [\n pyexe,\n "-c",\n "a='a';import sys;sys.getsizeof(a);import pandas;print(sys.getsizeof(a));",\n ]\n result = subprocess.check_output(call).decode()[-4:-1].strip("\n")\n assert int(result) == int(expected)\n\n\n@pytest.mark.single_cpu\ndef test_bz2_missing_import():\n # Check whether bz2 missing import is handled correctly (issue #53857)\n code = """\n import sys\n sys.modules['bz2'] = None\n import pytest\n import pandas as pd\n from pandas.compat import get_bz2_file\n msg = 'bz2 module not available.'\n with pytest.raises(RuntimeError, match=msg):\n get_bz2_file()\n """\n code = textwrap.dedent(code)\n call = [sys.executable, "-c", code]\n subprocess.check_output(call)\n | .venv\Lib\site-packages\pandas\tests\test_common.py | test_common.py | Python | 7,695 | 0.95 | 0.11236 | 0.115741 | node-utils | 778 | 2023-09-11T22:49:37.343166 | BSD-3-Clause | true | 6ec0ce25add0ca4be0e3c29cbb80ee8f |
"""\nTesting that we work in the downstream packages\n"""\nimport array\nimport subprocess\nimport sys\n\nimport numpy as np\nimport pytest\n\nfrom pandas.errors import IntCastingNaNError\nimport pandas.util._test_decorators as td\n\nimport pandas as pd\nfrom pandas import (\n DataFrame,\n DatetimeIndex,\n Series,\n TimedeltaIndex,\n)\nimport pandas._testing as tm\nfrom pandas.core.arrays import (\n DatetimeArray,\n TimedeltaArray,\n)\nfrom pandas.util.version import Version\n\n\n@pytest.fixture\ndef df():\n return DataFrame({"A": [1, 2, 3]})\n\n\ndef test_dask(df):\n # dask sets "compute.use_numexpr" to False, so catch the current value\n # and ensure to reset it afterwards to avoid impacting other tests\n olduse = pd.get_option("compute.use_numexpr")\n\n try:\n pytest.importorskip("toolz")\n dd = pytest.importorskip("dask.dataframe")\n\n ddf = dd.from_pandas(df, npartitions=3)\n assert ddf.A is not None\n assert ddf.compute() is not None\n finally:\n pd.set_option("compute.use_numexpr", olduse)\n\n\ndef test_dask_ufunc():\n # dask sets "compute.use_numexpr" to False, so catch the current value\n # and ensure to reset it afterwards to avoid impacting other tests\n olduse = pd.get_option("compute.use_numexpr")\n\n try:\n da = pytest.importorskip("dask.array")\n dd = pytest.importorskip("dask.dataframe")\n\n s = Series([1.5, 2.3, 3.7, 4.0])\n ds = dd.from_pandas(s, npartitions=2)\n\n result = da.fix(ds).compute()\n expected = np.fix(s)\n tm.assert_series_equal(result, expected)\n finally:\n pd.set_option("compute.use_numexpr", olduse)\n\n\ndef test_construct_dask_float_array_int_dtype_match_ndarray():\n # GH#40110 make sure we treat a float-dtype dask array with the same\n # rules we would for an ndarray\n dd = pytest.importorskip("dask.dataframe")\n\n arr = np.array([1, 2.5, 3])\n darr = dd.from_array(arr)\n\n res = Series(darr)\n expected = Series(arr)\n tm.assert_series_equal(res, expected)\n\n # GH#49599 in 2.0 we raise instead of silently ignoring the dtype\n msg = "Trying to coerce float values to integers"\n with pytest.raises(ValueError, match=msg):\n Series(darr, dtype="i8")\n\n msg = r"Cannot convert non-finite values \(NA or inf\) to integer"\n arr[2] = np.nan\n with pytest.raises(IntCastingNaNError, match=msg):\n Series(darr, dtype="i8")\n # which is the same as we get with a numpy input\n with pytest.raises(IntCastingNaNError, match=msg):\n Series(arr, dtype="i8")\n\n\ndef test_xarray(df):\n pytest.importorskip("xarray")\n\n assert df.to_xarray() is not None\n\n\ndef test_xarray_cftimeindex_nearest():\n # https://github.com/pydata/xarray/issues/3751\n cftime = pytest.importorskip("cftime")\n xarray = pytest.importorskip("xarray")\n\n times = xarray.cftime_range("0001", periods=2)\n key = cftime.DatetimeGregorian(2000, 1, 1)\n result = times.get_indexer([key], method="nearest")\n expected = 1\n assert result == expected\n\n\n@pytest.mark.single_cpu\ndef test_oo_optimizable():\n # GH 21071\n subprocess.check_call([sys.executable, "-OO", "-c", "import pandas"])\n\n\n@pytest.mark.single_cpu\ndef test_oo_optimized_datetime_index_unpickle():\n # GH 42866\n subprocess.check_call(\n [\n sys.executable,\n "-OO",\n "-c",\n (\n "import pandas as pd, pickle; "\n "pickle.loads(pickle.dumps(pd.date_range('2021-01-01', periods=1)))"\n ),\n ]\n )\n\n\ndef test_statsmodels():\n smf = pytest.importorskip("statsmodels.formula.api")\n\n df = DataFrame(\n {"Lottery": range(5), "Literacy": range(5), "Pop1831": range(100, 105)}\n )\n smf.ols("Lottery ~ Literacy + np.log(Pop1831)", data=df).fit()\n\n\ndef test_scikit_learn():\n pytest.importorskip("sklearn")\n from sklearn import (\n datasets,\n svm,\n )\n\n digits = datasets.load_digits()\n clf = svm.SVC(gamma=0.001, C=100.0)\n clf.fit(digits.data[:-1], digits.target[:-1])\n clf.predict(digits.data[-1:])\n\n\ndef test_seaborn():\n seaborn = pytest.importorskip("seaborn")\n tips = DataFrame(\n {"day": pd.date_range("2023", freq="D", periods=5), "total_bill": range(5)}\n )\n seaborn.stripplot(x="day", y="total_bill", data=tips)\n\n\ndef test_pandas_datareader():\n pytest.importorskip("pandas_datareader")\n\n\n@pytest.mark.filterwarnings("ignore:Passing a BlockManager:DeprecationWarning")\ndef test_pyarrow(df):\n pyarrow = pytest.importorskip("pyarrow")\n table = pyarrow.Table.from_pandas(df)\n result = table.to_pandas()\n tm.assert_frame_equal(result, df)\n\n\ndef test_yaml_dump(df):\n # GH#42748\n yaml = pytest.importorskip("yaml")\n\n dumped = yaml.dump(df)\n\n loaded = yaml.load(dumped, Loader=yaml.Loader)\n tm.assert_frame_equal(df, loaded)\n\n loaded2 = yaml.load(dumped, Loader=yaml.UnsafeLoader)\n tm.assert_frame_equal(df, loaded2)\n\n\n@pytest.mark.single_cpu\ndef test_missing_required_dependency():\n # GH 23868\n # To ensure proper isolation, we pass these flags\n # -S : disable site-packages\n # -s : disable user site-packages\n # -E : disable PYTHON* env vars, especially PYTHONPATH\n # https://github.com/MacPython/pandas-wheels/pull/50\n\n pyexe = sys.executable.replace("\\", "/")\n\n # We skip this test if pandas is installed as a site package. We first\n # import the package normally and check the path to the module before\n # executing the test which imports pandas with site packages disabled.\n call = [pyexe, "-c", "import pandas;print(pandas.__file__)"]\n output = subprocess.check_output(call).decode()\n if "site-packages" in output:\n pytest.skip("pandas installed as site package")\n\n # This test will fail if pandas is installed as a site package. The flags\n # prevent pandas being imported and the test will report Failed: DID NOT\n # RAISE <class 'subprocess.CalledProcessError'>\n call = [pyexe, "-sSE", "-c", "import pandas"]\n\n msg = (\n rf"Command '\['{pyexe}', '-sSE', '-c', 'import pandas'\]' "\n "returned non-zero exit status 1."\n )\n\n with pytest.raises(subprocess.CalledProcessError, match=msg) as exc:\n subprocess.check_output(call, stderr=subprocess.STDOUT)\n\n output = exc.value.stdout.decode()\n for name in ["numpy", "pytz", "dateutil"]:\n assert name in output\n\n\ndef test_frame_setitem_dask_array_into_new_col(request):\n # GH#47128\n\n # dask sets "compute.use_numexpr" to False, so catch the current value\n # and ensure to reset it afterwards to avoid impacting other tests\n olduse = pd.get_option("compute.use_numexpr")\n\n try:\n dask = pytest.importorskip("dask")\n da = pytest.importorskip("dask.array")\n if Version(dask.__version__) <= Version("2025.1.0") and Version(\n np.__version__\n ) >= Version("2.1"):\n request.applymarker(\n pytest.mark.xfail(reason="loc.__setitem__ incorrectly mutated column c")\n )\n\n dda = da.array([1, 2])\n df = DataFrame({"a": ["a", "b"]})\n df["b"] = dda\n df["c"] = dda\n df.loc[[False, True], "b"] = 100\n result = df.loc[[1], :]\n expected = DataFrame({"a": ["b"], "b": [100], "c": [2]}, index=[1])\n tm.assert_frame_equal(result, expected)\n finally:\n pd.set_option("compute.use_numexpr", olduse)\n\n\ndef test_pandas_priority():\n # GH#48347\n\n class MyClass:\n __pandas_priority__ = 5000\n\n def __radd__(self, other):\n return self\n\n left = MyClass()\n right = Series(range(3))\n\n assert right.__add__(left) is NotImplemented\n assert right + left is left\n\n\n@pytest.fixture(\n params=[\n "memoryview",\n "array",\n pytest.param("dask", marks=td.skip_if_no("dask.array")),\n pytest.param("xarray", marks=td.skip_if_no("xarray")),\n ]\n)\ndef array_likes(request):\n """\n Fixture giving a numpy array and a parametrized 'data' object, which can\n be a memoryview, array, dask or xarray object created from the numpy array.\n """\n # GH#24539 recognize e.g xarray, dask, ...\n arr = np.array([1, 2, 3], dtype=np.int64)\n\n name = request.param\n if name == "memoryview":\n data = memoryview(arr)\n elif name == "array":\n data = array.array("i", arr)\n elif name == "dask":\n import dask.array\n\n data = dask.array.array(arr)\n elif name == "xarray":\n import xarray as xr\n\n data = xr.DataArray(arr)\n\n return arr, data\n\n\n@pytest.mark.parametrize("dtype", ["M8[ns]", "m8[ns]"])\ndef test_from_obscure_array(dtype, array_likes):\n # GH#24539 recognize e.g xarray, dask, ...\n # Note: we dont do this for PeriodArray bc _from_sequence won't accept\n # an array of integers\n # TODO: could check with arraylike of Period objects\n arr, data = array_likes\n\n cls = {"M8[ns]": DatetimeArray, "m8[ns]": TimedeltaArray}[dtype]\n\n depr_msg = f"{cls.__name__}.__init__ is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=depr_msg):\n expected = cls(arr)\n result = cls._from_sequence(data, dtype=dtype)\n tm.assert_extension_array_equal(result, expected)\n\n if not isinstance(data, memoryview):\n # FIXME(GH#44431) these raise on memoryview and attempted fix\n # fails on py3.10\n func = {"M8[ns]": pd.to_datetime, "m8[ns]": pd.to_timedelta}[dtype]\n result = func(arr).array\n expected = func(data).array\n tm.assert_equal(result, expected)\n\n # Let's check the Indexes while we're here\n idx_cls = {"M8[ns]": DatetimeIndex, "m8[ns]": TimedeltaIndex}[dtype]\n result = idx_cls(arr)\n expected = idx_cls(data)\n tm.assert_index_equal(result, expected)\n\n\ndef test_dataframe_consortium() -> None:\n """\n Test some basic methods of the dataframe consortium standard.\n\n Full testing is done at https://github.com/data-apis/dataframe-api-compat,\n this is just to check that the entry point works as expected.\n """\n pytest.importorskip("dataframe_api_compat")\n df_pd = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})\n df = df_pd.__dataframe_consortium_standard__()\n result_1 = df.get_column_names()\n expected_1 = ["a", "b"]\n assert result_1 == expected_1\n\n ser = Series([1, 2, 3], name="a")\n col = ser.__column_consortium_standard__()\n assert col.name == "a"\n\n\ndef test_xarray_coerce_unit():\n # GH44053\n xr = pytest.importorskip("xarray")\n\n arr = xr.DataArray([1, 2, 3])\n result = pd.to_datetime(arr, unit="ns")\n expected = DatetimeIndex(\n [\n "1970-01-01 00:00:00.000000001",\n "1970-01-01 00:00:00.000000002",\n "1970-01-01 00:00:00.000000003",\n ],\n dtype="datetime64[ns]",\n freq=None,\n )\n tm.assert_index_equal(result, expected)\n | .venv\Lib\site-packages\pandas\tests\test_downstream.py | test_downstream.py | Python | 10,856 | 0.95 | 0.108108 | 0.129825 | awesome-app | 932 | 2025-06-14T11:05:37.399996 | Apache-2.0 | true | c6432c65ab92e3214406f893e0b56db6 |
import pytest\n\nfrom pandas.errors import (\n AbstractMethodError,\n UndefinedVariableError,\n)\n\nimport pandas as pd\n\n\n@pytest.mark.parametrize(\n "exc",\n [\n "AttributeConflictWarning",\n "CSSWarning",\n "CategoricalConversionWarning",\n "ClosedFileError",\n "DataError",\n "DatabaseError",\n "DtypeWarning",\n "EmptyDataError",\n "IncompatibilityWarning",\n "IndexingError",\n "InvalidColumnName",\n "InvalidComparison",\n "InvalidVersion",\n "LossySetitemError",\n "MergeError",\n "NoBufferPresent",\n "NumExprClobberingError",\n "NumbaUtilError",\n "OptionError",\n "OutOfBoundsDatetime",\n "ParserError",\n "ParserWarning",\n "PerformanceWarning",\n "PossibleDataLossError",\n "PossiblePrecisionLoss",\n "PyperclipException",\n "SettingWithCopyError",\n "SettingWithCopyWarning",\n "SpecificationError",\n "UnsortedIndexError",\n "UnsupportedFunctionCall",\n "ValueLabelTypeMismatch",\n ],\n)\ndef test_exception_importable(exc):\n from pandas import errors\n\n err = getattr(errors, exc)\n assert err is not None\n\n # check that we can raise on them\n\n msg = "^$"\n\n with pytest.raises(err, match=msg):\n raise err()\n\n\ndef test_catch_oob():\n from pandas import errors\n\n msg = "Cannot cast 1500-01-01 00:00:00 to unit='ns' without overflow"\n with pytest.raises(errors.OutOfBoundsDatetime, match=msg):\n pd.Timestamp("15000101").as_unit("ns")\n\n\n@pytest.mark.parametrize(\n "is_local",\n [\n True,\n False,\n ],\n)\ndef test_catch_undefined_variable_error(is_local):\n variable_name = "x"\n if is_local:\n msg = f"local variable '{variable_name}' is not defined"\n else:\n msg = f"name '{variable_name}' is not defined"\n\n with pytest.raises(UndefinedVariableError, match=msg):\n raise UndefinedVariableError(variable_name, is_local)\n\n\nclass Foo:\n @classmethod\n def classmethod(cls):\n raise AbstractMethodError(cls, methodtype="classmethod")\n\n @property\n def property(self):\n raise AbstractMethodError(self, methodtype="property")\n\n def method(self):\n raise AbstractMethodError(self)\n\n\ndef test_AbstractMethodError_classmethod():\n xpr = "This classmethod must be defined in the concrete class Foo"\n with pytest.raises(AbstractMethodError, match=xpr):\n Foo.classmethod()\n\n xpr = "This property must be defined in the concrete class Foo"\n with pytest.raises(AbstractMethodError, match=xpr):\n Foo().property\n\n xpr = "This method must be defined in the concrete class Foo"\n with pytest.raises(AbstractMethodError, match=xpr):\n Foo().method()\n | .venv\Lib\site-packages\pandas\tests\test_errors.py | test_errors.py | Python | 2,789 | 0.95 | 0.107143 | 0.011111 | node-utils | 672 | 2024-09-16T11:17:03.845801 | MIT | true | bf2bd2405d3bb65855b05e5217aa21b1 |
import operator\nimport re\n\nimport numpy as np\nimport pytest\n\nfrom pandas import option_context\nimport pandas._testing as tm\nfrom pandas.core.api import (\n DataFrame,\n Index,\n Series,\n)\nfrom pandas.core.computation import expressions as expr\n\n\n@pytest.fixture\ndef _frame():\n return DataFrame(\n np.random.default_rng(2).standard_normal((10001, 4)),\n columns=list("ABCD"),\n dtype="float64",\n )\n\n\n@pytest.fixture\ndef _frame2():\n return DataFrame(\n np.random.default_rng(2).standard_normal((100, 4)),\n columns=list("ABCD"),\n dtype="float64",\n )\n\n\n@pytest.fixture\ndef _mixed(_frame):\n return DataFrame(\n {\n "A": _frame["A"].copy(),\n "B": _frame["B"].astype("float32"),\n "C": _frame["C"].astype("int64"),\n "D": _frame["D"].astype("int32"),\n }\n )\n\n\n@pytest.fixture\ndef _mixed2(_frame2):\n return DataFrame(\n {\n "A": _frame2["A"].copy(),\n "B": _frame2["B"].astype("float32"),\n "C": _frame2["C"].astype("int64"),\n "D": _frame2["D"].astype("int32"),\n }\n )\n\n\n@pytest.fixture\ndef _integer():\n return DataFrame(\n np.random.default_rng(2).integers(1, 100, size=(10001, 4)),\n columns=list("ABCD"),\n dtype="int64",\n )\n\n\n@pytest.fixture\ndef _integer_integers(_integer):\n # integers to get a case with zeros\n return _integer * np.random.default_rng(2).integers(0, 2, size=np.shape(_integer))\n\n\n@pytest.fixture\ndef _integer2():\n return DataFrame(\n np.random.default_rng(2).integers(1, 100, size=(101, 4)),\n columns=list("ABCD"),\n dtype="int64",\n )\n\n\n@pytest.fixture\ndef _array(_frame):\n return _frame["A"].values.copy()\n\n\n@pytest.fixture\ndef _array2(_frame2):\n return _frame2["A"].values.copy()\n\n\n@pytest.fixture\ndef _array_mixed(_mixed):\n return _mixed["D"].values.copy()\n\n\n@pytest.fixture\ndef _array_mixed2(_mixed2):\n return _mixed2["D"].values.copy()\n\n\n@pytest.mark.skipif(not expr.USE_NUMEXPR, reason="not using numexpr")\nclass TestExpressions:\n @staticmethod\n def call_op(df, other, flex: bool, opname: str):\n if flex:\n op = lambda x, y: getattr(x, opname)(y)\n op.__name__ = opname\n else:\n op = getattr(operator, opname)\n\n with option_context("compute.use_numexpr", False):\n expected = op(df, other)\n\n expr.get_test_result()\n\n result = op(df, other)\n return result, expected\n\n @pytest.mark.parametrize(\n "fixture",\n [\n "_integer",\n "_integer2",\n "_integer_integers",\n "_frame",\n "_frame2",\n "_mixed",\n "_mixed2",\n ],\n )\n @pytest.mark.parametrize("flex", [True, False])\n @pytest.mark.parametrize(\n "arith", ["add", "sub", "mul", "mod", "truediv", "floordiv"]\n )\n def test_run_arithmetic(self, request, fixture, flex, arith, monkeypatch):\n df = request.getfixturevalue(fixture)\n with monkeypatch.context() as m:\n m.setattr(expr, "_MIN_ELEMENTS", 0)\n result, expected = self.call_op(df, df, flex, arith)\n\n if arith == "truediv":\n assert all(x.kind == "f" for x in expected.dtypes.values)\n tm.assert_equal(expected, result)\n\n for i in range(len(df.columns)):\n result, expected = self.call_op(\n df.iloc[:, i], df.iloc[:, i], flex, arith\n )\n if arith == "truediv":\n assert expected.dtype.kind == "f"\n tm.assert_equal(expected, result)\n\n @pytest.mark.parametrize(\n "fixture",\n [\n "_integer",\n "_integer2",\n "_integer_integers",\n "_frame",\n "_frame2",\n "_mixed",\n "_mixed2",\n ],\n )\n @pytest.mark.parametrize("flex", [True, False])\n def test_run_binary(self, request, fixture, flex, comparison_op, monkeypatch):\n """\n tests solely that the result is the same whether or not numexpr is\n enabled. Need to test whether the function does the correct thing\n elsewhere.\n """\n df = request.getfixturevalue(fixture)\n arith = comparison_op.__name__\n with option_context("compute.use_numexpr", False):\n other = df.copy() + 1\n\n with monkeypatch.context() as m:\n m.setattr(expr, "_MIN_ELEMENTS", 0)\n expr.set_test_mode(True)\n\n result, expected = self.call_op(df, other, flex, arith)\n\n used_numexpr = expr.get_test_result()\n assert used_numexpr, "Did not use numexpr as expected."\n tm.assert_equal(expected, result)\n\n for i in range(len(df.columns)):\n binary_comp = other.iloc[:, i] + 1\n self.call_op(df.iloc[:, i], binary_comp, flex, "add")\n\n def test_invalid(self):\n array = np.random.default_rng(2).standard_normal(1_000_001)\n array2 = np.random.default_rng(2).standard_normal(100)\n\n # no op\n result = expr._can_use_numexpr(operator.add, None, array, array, "evaluate")\n assert not result\n\n # min elements\n result = expr._can_use_numexpr(operator.add, "+", array2, array2, "evaluate")\n assert not result\n\n # ok, we only check on first part of expression\n result = expr._can_use_numexpr(operator.add, "+", array, array2, "evaluate")\n assert result\n\n @pytest.mark.filterwarnings("ignore:invalid value encountered in:RuntimeWarning")\n @pytest.mark.parametrize(\n "opname,op_str",\n [("add", "+"), ("sub", "-"), ("mul", "*"), ("truediv", "/"), ("pow", "**")],\n )\n @pytest.mark.parametrize(\n "left_fix,right_fix", [("_array", "_array2"), ("_array_mixed", "_array_mixed2")]\n )\n def test_binary_ops(self, request, opname, op_str, left_fix, right_fix):\n left = request.getfixturevalue(left_fix)\n right = request.getfixturevalue(right_fix)\n\n def testit(left, right, opname, op_str):\n if opname == "pow":\n left = np.abs(left)\n\n op = getattr(operator, opname)\n\n # array has 0s\n result = expr.evaluate(op, left, left, use_numexpr=True)\n expected = expr.evaluate(op, left, left, use_numexpr=False)\n tm.assert_numpy_array_equal(result, expected)\n\n result = expr._can_use_numexpr(op, op_str, right, right, "evaluate")\n assert not result\n\n with option_context("compute.use_numexpr", False):\n testit(left, right, opname, op_str)\n\n expr.set_numexpr_threads(1)\n testit(left, right, opname, op_str)\n expr.set_numexpr_threads()\n testit(left, right, opname, op_str)\n\n @pytest.mark.parametrize(\n "left_fix,right_fix", [("_array", "_array2"), ("_array_mixed", "_array_mixed2")]\n )\n def test_comparison_ops(self, request, comparison_op, left_fix, right_fix):\n left = request.getfixturevalue(left_fix)\n right = request.getfixturevalue(right_fix)\n\n def testit():\n f12 = left + 1\n f22 = right + 1\n\n op = comparison_op\n\n result = expr.evaluate(op, left, f12, use_numexpr=True)\n expected = expr.evaluate(op, left, f12, use_numexpr=False)\n tm.assert_numpy_array_equal(result, expected)\n\n result = expr._can_use_numexpr(op, op, right, f22, "evaluate")\n assert not result\n\n with option_context("compute.use_numexpr", False):\n testit()\n\n expr.set_numexpr_threads(1)\n testit()\n expr.set_numexpr_threads()\n testit()\n\n @pytest.mark.parametrize("cond", [True, False])\n @pytest.mark.parametrize("fixture", ["_frame", "_frame2", "_mixed", "_mixed2"])\n def test_where(self, request, cond, fixture):\n df = request.getfixturevalue(fixture)\n\n def testit():\n c = np.empty(df.shape, dtype=np.bool_)\n c.fill(cond)\n result = expr.where(c, df.values, df.values + 1)\n expected = np.where(c, df.values, df.values + 1)\n tm.assert_numpy_array_equal(result, expected)\n\n with option_context("compute.use_numexpr", False):\n testit()\n\n expr.set_numexpr_threads(1)\n testit()\n expr.set_numexpr_threads()\n testit()\n\n @pytest.mark.parametrize(\n "op_str,opname", [("/", "truediv"), ("//", "floordiv"), ("**", "pow")]\n )\n def test_bool_ops_raise_on_arithmetic(self, op_str, opname):\n df = DataFrame(\n {\n "a": np.random.default_rng(2).random(10) > 0.5,\n "b": np.random.default_rng(2).random(10) > 0.5,\n }\n )\n\n msg = f"operator '{opname}' not implemented for bool dtypes"\n f = getattr(operator, opname)\n err_msg = re.escape(msg)\n\n with pytest.raises(NotImplementedError, match=err_msg):\n f(df, df)\n\n with pytest.raises(NotImplementedError, match=err_msg):\n f(df.a, df.b)\n\n with pytest.raises(NotImplementedError, match=err_msg):\n f(df.a, True)\n\n with pytest.raises(NotImplementedError, match=err_msg):\n f(False, df.a)\n\n with pytest.raises(NotImplementedError, match=err_msg):\n f(False, df)\n\n with pytest.raises(NotImplementedError, match=err_msg):\n f(df, True)\n\n @pytest.mark.parametrize(\n "op_str,opname", [("+", "add"), ("*", "mul"), ("-", "sub")]\n )\n def test_bool_ops_warn_on_arithmetic(self, op_str, opname):\n n = 10\n df = DataFrame(\n {\n "a": np.random.default_rng(2).random(n) > 0.5,\n "b": np.random.default_rng(2).random(n) > 0.5,\n }\n )\n\n subs = {"+": "|", "*": "&", "-": "^"}\n sub_funcs = {"|": "or_", "&": "and_", "^": "xor"}\n\n f = getattr(operator, opname)\n fe = getattr(operator, sub_funcs[subs[op_str]])\n\n if op_str == "-":\n # raises TypeError\n return\n\n with tm.use_numexpr(True, min_elements=5):\n with tm.assert_produces_warning():\n r = f(df, df)\n e = fe(df, df)\n tm.assert_frame_equal(r, e)\n\n with tm.assert_produces_warning():\n r = f(df.a, df.b)\n e = fe(df.a, df.b)\n tm.assert_series_equal(r, e)\n\n with tm.assert_produces_warning():\n r = f(df.a, True)\n e = fe(df.a, True)\n tm.assert_series_equal(r, e)\n\n with tm.assert_produces_warning():\n r = f(False, df.a)\n e = fe(False, df.a)\n tm.assert_series_equal(r, e)\n\n with tm.assert_produces_warning():\n r = f(False, df)\n e = fe(False, df)\n tm.assert_frame_equal(r, e)\n\n with tm.assert_produces_warning():\n r = f(df, True)\n e = fe(df, True)\n tm.assert_frame_equal(r, e)\n\n @pytest.mark.parametrize(\n "test_input,expected",\n [\n (\n DataFrame(\n [[0, 1, 2, "aa"], [0, 1, 2, "aa"]], columns=["a", "b", "c", "dtype"]\n ),\n DataFrame([[False, False], [False, False]], columns=["a", "dtype"]),\n ),\n (\n DataFrame(\n [[0, 3, 2, "aa"], [0, 4, 2, "aa"], [0, 1, 1, "bb"]],\n columns=["a", "b", "c", "dtype"],\n ),\n DataFrame(\n [[False, False], [False, False], [False, False]],\n columns=["a", "dtype"],\n ),\n ),\n ],\n )\n def test_bool_ops_column_name_dtype(self, test_input, expected):\n # GH 22383 - .ne fails if columns containing column name 'dtype'\n result = test_input.loc[:, ["a", "dtype"]].ne(test_input.loc[:, ["a", "dtype"]])\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.parametrize(\n "arith", ("add", "sub", "mul", "mod", "truediv", "floordiv")\n )\n @pytest.mark.parametrize("axis", (0, 1))\n def test_frame_series_axis(self, axis, arith, _frame, monkeypatch):\n # GH#26736 Dataframe.floordiv(Series, axis=1) fails\n\n df = _frame\n if axis == 1:\n other = df.iloc[0, :]\n else:\n other = df.iloc[:, 0]\n\n with monkeypatch.context() as m:\n m.setattr(expr, "_MIN_ELEMENTS", 0)\n\n op_func = getattr(df, arith)\n\n with option_context("compute.use_numexpr", False):\n expected = op_func(other, axis=axis)\n\n result = op_func(other, axis=axis)\n tm.assert_frame_equal(expected, result)\n\n @pytest.mark.parametrize(\n "op",\n [\n "__mod__",\n "__rmod__",\n "__floordiv__",\n "__rfloordiv__",\n ],\n )\n @pytest.mark.parametrize("box", [DataFrame, Series, Index])\n @pytest.mark.parametrize("scalar", [-5, 5])\n def test_python_semantics_with_numexpr_installed(\n self, op, box, scalar, monkeypatch\n ):\n # https://github.com/pandas-dev/pandas/issues/36047\n with monkeypatch.context() as m:\n m.setattr(expr, "_MIN_ELEMENTS", 0)\n data = np.arange(-50, 50)\n obj = box(data)\n method = getattr(obj, op)\n result = method(scalar)\n\n # compare result with numpy\n with option_context("compute.use_numexpr", False):\n expected = method(scalar)\n\n tm.assert_equal(result, expected)\n\n # compare result element-wise with Python\n for i, elem in enumerate(data):\n if box == DataFrame:\n scalar_result = result.iloc[i, 0]\n else:\n scalar_result = result[i]\n try:\n expected = getattr(int(elem), op)(scalar)\n except ZeroDivisionError:\n pass\n else:\n assert scalar_result == expected\n | .venv\Lib\site-packages\pandas\tests\test_expressions.py | test_expressions.py | Python | 14,256 | 0.95 | 0.090129 | 0.029101 | node-utils | 823 | 2024-11-26T01:26:53.786125 | MIT | true | eaaf074046ce403f8ffd9630fd8a496a |
import pytest\n\nimport pandas as pd\n\n\nclass TestFlags:\n def test_equality(self):\n a = pd.DataFrame().set_flags(allows_duplicate_labels=True).flags\n b = pd.DataFrame().set_flags(allows_duplicate_labels=False).flags\n\n assert a == a\n assert b == b\n assert a != b\n assert a != 2\n\n def test_set(self):\n df = pd.DataFrame().set_flags(allows_duplicate_labels=True)\n a = df.flags\n a.allows_duplicate_labels = False\n assert a.allows_duplicate_labels is False\n a["allows_duplicate_labels"] = True\n assert a.allows_duplicate_labels is True\n\n def test_repr(self):\n a = repr(pd.DataFrame({"A"}).set_flags(allows_duplicate_labels=True).flags)\n assert a == "<Flags(allows_duplicate_labels=True)>"\n a = repr(pd.DataFrame({"A"}).set_flags(allows_duplicate_labels=False).flags)\n assert a == "<Flags(allows_duplicate_labels=False)>"\n\n def test_obj_ref(self):\n df = pd.DataFrame()\n flags = df.flags\n del df\n with pytest.raises(ValueError, match="object has been deleted"):\n flags.allows_duplicate_labels = True\n\n def test_getitem(self):\n df = pd.DataFrame()\n flags = df.flags\n assert flags["allows_duplicate_labels"] is True\n flags["allows_duplicate_labels"] = False\n assert flags["allows_duplicate_labels"] is False\n\n with pytest.raises(KeyError, match="a"):\n flags["a"]\n\n with pytest.raises(ValueError, match="a"):\n flags["a"] = 10\n | .venv\Lib\site-packages\pandas\tests\test_flags.py | test_flags.py | Python | 1,550 | 0.85 | 0.125 | 0 | node-utils | 94 | 2024-02-02T10:43:56.290597 | Apache-2.0 | true | 6165277fb00ac256e0dc7a68972366c0 |
import datetime\n\nimport numpy as np\nimport pytest\n\nimport pandas as pd\nfrom pandas import (\n DataFrame,\n MultiIndex,\n Series,\n)\nimport pandas._testing as tm\n\n\nclass TestMultiLevel:\n def test_reindex_level(self, multiindex_year_month_day_dataframe_random_data):\n # axis=0\n ymd = multiindex_year_month_day_dataframe_random_data\n\n month_sums = ymd.groupby("month").sum()\n result = month_sums.reindex(ymd.index, level=1)\n expected = ymd.groupby(level="month").transform("sum")\n\n tm.assert_frame_equal(result, expected)\n\n # Series\n result = month_sums["A"].reindex(ymd.index, level=1)\n expected = ymd["A"].groupby(level="month").transform("sum")\n tm.assert_series_equal(result, expected, check_names=False)\n\n # axis=1\n msg = "DataFrame.groupby with axis=1 is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n gb = ymd.T.groupby("month", axis=1)\n\n month_sums = gb.sum()\n result = month_sums.reindex(columns=ymd.index, level=1)\n expected = ymd.groupby(level="month").transform("sum").T\n tm.assert_frame_equal(result, expected)\n\n def test_reindex(self, multiindex_dataframe_random_data):\n frame = multiindex_dataframe_random_data\n\n expected = frame.iloc[[0, 3]]\n reindexed = frame.loc[[("foo", "one"), ("bar", "one")]]\n tm.assert_frame_equal(reindexed, expected)\n\n def test_reindex_preserve_levels(\n self, multiindex_year_month_day_dataframe_random_data, using_copy_on_write\n ):\n ymd = multiindex_year_month_day_dataframe_random_data\n\n new_index = ymd.index[::10]\n chunk = ymd.reindex(new_index)\n if using_copy_on_write:\n assert chunk.index.is_(new_index)\n else:\n assert chunk.index is new_index\n\n chunk = ymd.loc[new_index]\n assert chunk.index.equals(new_index)\n\n ymdT = ymd.T\n chunk = ymdT.reindex(columns=new_index)\n if using_copy_on_write:\n assert chunk.columns.is_(new_index)\n else:\n assert chunk.columns is new_index\n\n chunk = ymdT.loc[:, new_index]\n assert chunk.columns.equals(new_index)\n\n def test_groupby_transform(self, multiindex_dataframe_random_data):\n frame = multiindex_dataframe_random_data\n\n s = frame["A"]\n grouper = s.index.get_level_values(0)\n\n grouped = s.groupby(grouper, group_keys=False)\n\n applied = grouped.apply(lambda x: x * 2)\n expected = grouped.transform(lambda x: x * 2)\n result = applied.reindex(expected.index)\n tm.assert_series_equal(result, expected, check_names=False)\n\n def test_groupby_corner(self):\n midx = MultiIndex(\n levels=[["foo"], ["bar"], ["baz"]],\n codes=[[0], [0], [0]],\n names=["one", "two", "three"],\n )\n df = DataFrame(\n [np.random.default_rng(2).random(4)],\n columns=["a", "b", "c", "d"],\n index=midx,\n )\n # should work\n df.groupby(level="three")\n\n def test_groupby_level_no_obs(self):\n # #1697\n midx = MultiIndex.from_tuples(\n [\n ("f1", "s1"),\n ("f1", "s2"),\n ("f2", "s1"),\n ("f2", "s2"),\n ("f3", "s1"),\n ("f3", "s2"),\n ]\n )\n df = DataFrame([[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]], columns=midx)\n df1 = df.loc(axis=1)[df.columns.map(lambda u: u[0] in ["f2", "f3"])]\n\n msg = "DataFrame.groupby with axis=1 is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n grouped = df1.groupby(axis=1, level=0)\n result = grouped.sum()\n assert (result.columns == ["f2", "f3"]).all()\n\n def test_setitem_with_expansion_multiindex_columns(\n self, multiindex_year_month_day_dataframe_random_data\n ):\n ymd = multiindex_year_month_day_dataframe_random_data\n\n df = ymd[:5].T\n df[2000, 1, 10] = df[2000, 1, 7]\n assert isinstance(df.columns, MultiIndex)\n assert (df[2000, 1, 10] == df[2000, 1, 7]).all()\n\n def test_alignment(self):\n x = Series(\n data=[1, 2, 3], index=MultiIndex.from_tuples([("A", 1), ("A", 2), ("B", 3)])\n )\n\n y = Series(\n data=[4, 5, 6], index=MultiIndex.from_tuples([("Z", 1), ("Z", 2), ("B", 3)])\n )\n\n res = x - y\n exp_index = x.index.union(y.index)\n exp = x.reindex(exp_index) - y.reindex(exp_index)\n tm.assert_series_equal(res, exp)\n\n # hit non-monotonic code path\n res = x[::-1] - y[::-1]\n exp_index = x.index.union(y.index)\n exp = x.reindex(exp_index) - y.reindex(exp_index)\n tm.assert_series_equal(res, exp)\n\n def test_groupby_multilevel(self, multiindex_year_month_day_dataframe_random_data):\n ymd = multiindex_year_month_day_dataframe_random_data\n\n result = ymd.groupby(level=[0, 1]).mean()\n\n k1 = ymd.index.get_level_values(0)\n k2 = ymd.index.get_level_values(1)\n\n expected = ymd.groupby([k1, k2]).mean()\n\n # TODO groupby with level_values drops names\n tm.assert_frame_equal(result, expected, check_names=False)\n assert result.index.names == ymd.index.names[:2]\n\n result2 = ymd.groupby(level=ymd.index.names[:2]).mean()\n tm.assert_frame_equal(result, result2)\n\n def test_multilevel_consolidate(self):\n index = MultiIndex.from_tuples(\n [("foo", "one"), ("foo", "two"), ("bar", "one"), ("bar", "two")]\n )\n df = DataFrame(\n np.random.default_rng(2).standard_normal((4, 4)), index=index, columns=index\n )\n df["Totals", ""] = df.sum(1)\n df = df._consolidate()\n\n def test_level_with_tuples(self):\n index = MultiIndex(\n levels=[[("foo", "bar", 0), ("foo", "baz", 0), ("foo", "qux", 0)], [0, 1]],\n codes=[[0, 0, 1, 1, 2, 2], [0, 1, 0, 1, 0, 1]],\n )\n\n series = Series(np.random.default_rng(2).standard_normal(6), index=index)\n frame = DataFrame(np.random.default_rng(2).standard_normal((6, 4)), index=index)\n\n result = series[("foo", "bar", 0)]\n result2 = series.loc[("foo", "bar", 0)]\n expected = series[:2]\n expected.index = expected.index.droplevel(0)\n tm.assert_series_equal(result, expected)\n tm.assert_series_equal(result2, expected)\n\n with pytest.raises(KeyError, match=r"^\(\('foo', 'bar', 0\), 2\)$"):\n series[("foo", "bar", 0), 2]\n\n result = frame.loc[("foo", "bar", 0)]\n result2 = frame.xs(("foo", "bar", 0))\n expected = frame[:2]\n expected.index = expected.index.droplevel(0)\n tm.assert_frame_equal(result, expected)\n tm.assert_frame_equal(result2, expected)\n\n index = MultiIndex(\n levels=[[("foo", "bar"), ("foo", "baz"), ("foo", "qux")], [0, 1]],\n codes=[[0, 0, 1, 1, 2, 2], [0, 1, 0, 1, 0, 1]],\n )\n\n series = Series(np.random.default_rng(2).standard_normal(6), index=index)\n frame = DataFrame(np.random.default_rng(2).standard_normal((6, 4)), index=index)\n\n result = series[("foo", "bar")]\n result2 = series.loc[("foo", "bar")]\n expected = series[:2]\n expected.index = expected.index.droplevel(0)\n tm.assert_series_equal(result, expected)\n tm.assert_series_equal(result2, expected)\n\n result = frame.loc[("foo", "bar")]\n result2 = frame.xs(("foo", "bar"))\n expected = frame[:2]\n expected.index = expected.index.droplevel(0)\n tm.assert_frame_equal(result, expected)\n tm.assert_frame_equal(result2, expected)\n\n def test_reindex_level_partial_selection(self, multiindex_dataframe_random_data):\n frame = multiindex_dataframe_random_data\n\n result = frame.reindex(["foo", "qux"], level=0)\n expected = frame.iloc[[0, 1, 2, 7, 8, 9]]\n tm.assert_frame_equal(result, expected)\n\n result = frame.T.reindex(["foo", "qux"], axis=1, level=0)\n tm.assert_frame_equal(result, expected.T)\n\n result = frame.loc[["foo", "qux"]]\n tm.assert_frame_equal(result, expected)\n\n result = frame["A"].loc[["foo", "qux"]]\n tm.assert_series_equal(result, expected["A"])\n\n result = frame.T.loc[:, ["foo", "qux"]]\n tm.assert_frame_equal(result, expected.T)\n\n @pytest.mark.parametrize("d", [4, "d"])\n def test_empty_frame_groupby_dtypes_consistency(self, d):\n # GH 20888\n group_keys = ["a", "b", "c"]\n df = DataFrame({"a": [1], "b": [2], "c": [3], "d": [d]})\n\n g = df[df.a == 2].groupby(group_keys)\n result = g.first().index\n expected = MultiIndex(\n levels=[[1], [2], [3]], codes=[[], [], []], names=["a", "b", "c"]\n )\n\n tm.assert_index_equal(result, expected)\n\n def test_duplicate_groupby_issues(self):\n idx_tp = [\n ("600809", "20061231"),\n ("600809", "20070331"),\n ("600809", "20070630"),\n ("600809", "20070331"),\n ]\n dt = ["demo", "demo", "demo", "demo"]\n\n idx = MultiIndex.from_tuples(idx_tp, names=["STK_ID", "RPT_Date"])\n s = Series(dt, index=idx)\n\n result = s.groupby(s.index).first()\n assert len(result) == 3\n\n def test_subsets_multiindex_dtype(self):\n # GH 20757\n data = [["x", 1]]\n columns = [("a", "b", np.nan), ("a", "c", 0.0)]\n df = DataFrame(data, columns=MultiIndex.from_tuples(columns))\n expected = df.dtypes.a.b\n result = df.a.b.dtypes\n tm.assert_series_equal(result, expected)\n\n def test_datetime_object_multiindex(self):\n data_dic = {\n (0, datetime.date(2018, 3, 3)): {"A": 1, "B": 10},\n (0, datetime.date(2018, 3, 4)): {"A": 2, "B": 11},\n (1, datetime.date(2018, 3, 3)): {"A": 3, "B": 12},\n (1, datetime.date(2018, 3, 4)): {"A": 4, "B": 13},\n }\n result = DataFrame.from_dict(data_dic, orient="index")\n data = {"A": [1, 2, 3, 4], "B": [10, 11, 12, 13]}\n index = [\n [0, 0, 1, 1],\n [\n datetime.date(2018, 3, 3),\n datetime.date(2018, 3, 4),\n datetime.date(2018, 3, 3),\n datetime.date(2018, 3, 4),\n ],\n ]\n expected = DataFrame(data=data, index=index)\n\n tm.assert_frame_equal(result, expected)\n\n def test_multiindex_with_na(self):\n df = DataFrame(\n [\n ["A", np.nan, 1.23, 4.56],\n ["A", "G", 1.23, 4.56],\n ["A", "D", 9.87, 10.54],\n ],\n columns=["pivot_0", "pivot_1", "col_1", "col_2"],\n ).set_index(["pivot_0", "pivot_1"])\n\n df.at[("A", "F"), "col_2"] = 0.0\n\n expected = DataFrame(\n [\n ["A", np.nan, 1.23, 4.56],\n ["A", "G", 1.23, 4.56],\n ["A", "D", 9.87, 10.54],\n ["A", "F", np.nan, 0.0],\n ],\n columns=["pivot_0", "pivot_1", "col_1", "col_2"],\n ).set_index(["pivot_0", "pivot_1"])\n\n tm.assert_frame_equal(df, expected)\n\n\nclass TestSorted:\n """everything you wanted to test about sorting"""\n\n def test_sort_non_lexsorted(self):\n # degenerate case where we sort but don't\n # have a satisfying result :<\n # GH 15797\n idx = MultiIndex(\n [["A", "B", "C"], ["c", "b", "a"]], [[0, 1, 2, 0, 1, 2], [0, 2, 1, 1, 0, 2]]\n )\n\n df = DataFrame({"col": range(len(idx))}, index=idx, dtype="int64")\n assert df.index.is_monotonic_increasing is False\n\n sorted = df.sort_index()\n assert sorted.index.is_monotonic_increasing is True\n\n expected = DataFrame(\n {"col": [1, 4, 5, 2]},\n index=MultiIndex.from_tuples(\n [("B", "a"), ("B", "c"), ("C", "a"), ("C", "b")]\n ),\n dtype="int64",\n )\n result = sorted.loc[pd.IndexSlice["B":"C", "a":"c"], :]\n tm.assert_frame_equal(result, expected)\n | .venv\Lib\site-packages\pandas\tests\test_multilevel.py | test_multilevel.py | Python | 12,206 | 0.95 | 0.061972 | 0.042105 | react-lib | 639 | 2023-10-14T01:05:40.420246 | GPL-3.0 | true | 6a4497f6d76e8c415ad93ee5e7a02503 |
from functools import partial\n\nimport numpy as np\nimport pytest\n\nimport pandas.util._test_decorators as td\n\nfrom pandas.core.dtypes.common import is_integer_dtype\n\nimport pandas as pd\nfrom pandas import (\n Series,\n isna,\n)\nimport pandas._testing as tm\nfrom pandas.core import nanops\n\nuse_bn = nanops._USE_BOTTLENECK\n\n\n@pytest.fixture\ndef disable_bottleneck(monkeypatch):\n with monkeypatch.context() as m:\n m.setattr(nanops, "_USE_BOTTLENECK", False)\n yield\n\n\n@pytest.fixture\ndef arr_shape():\n return 11, 7\n\n\n@pytest.fixture\ndef arr_float(arr_shape):\n return np.random.default_rng(2).standard_normal(arr_shape)\n\n\n@pytest.fixture\ndef arr_complex(arr_float):\n return arr_float + arr_float * 1j\n\n\n@pytest.fixture\ndef arr_int(arr_shape):\n return np.random.default_rng(2).integers(-10, 10, arr_shape)\n\n\n@pytest.fixture\ndef arr_bool(arr_shape):\n return np.random.default_rng(2).integers(0, 2, arr_shape) == 0\n\n\n@pytest.fixture\ndef arr_str(arr_float):\n return np.abs(arr_float).astype("S")\n\n\n@pytest.fixture\ndef arr_utf(arr_float):\n return np.abs(arr_float).astype("U")\n\n\n@pytest.fixture\ndef arr_date(arr_shape):\n return np.random.default_rng(2).integers(0, 20000, arr_shape).astype("M8[ns]")\n\n\n@pytest.fixture\ndef arr_tdelta(arr_shape):\n return np.random.default_rng(2).integers(0, 20000, arr_shape).astype("m8[ns]")\n\n\n@pytest.fixture\ndef arr_nan(arr_shape):\n return np.tile(np.nan, arr_shape)\n\n\n@pytest.fixture\ndef arr_float_nan(arr_float, arr_nan):\n return np.vstack([arr_float, arr_nan])\n\n\n@pytest.fixture\ndef arr_nan_float1(arr_nan, arr_float):\n return np.vstack([arr_nan, arr_float])\n\n\n@pytest.fixture\ndef arr_nan_nan(arr_nan):\n return np.vstack([arr_nan, arr_nan])\n\n\n@pytest.fixture\ndef arr_inf(arr_float):\n return arr_float * np.inf\n\n\n@pytest.fixture\ndef arr_float_inf(arr_float, arr_inf):\n return np.vstack([arr_float, arr_inf])\n\n\n@pytest.fixture\ndef arr_nan_inf(arr_nan, arr_inf):\n return np.vstack([arr_nan, arr_inf])\n\n\n@pytest.fixture\ndef arr_float_nan_inf(arr_float, arr_nan, arr_inf):\n return np.vstack([arr_float, arr_nan, arr_inf])\n\n\n@pytest.fixture\ndef arr_nan_nan_inf(arr_nan, arr_inf):\n return np.vstack([arr_nan, arr_nan, arr_inf])\n\n\n@pytest.fixture\ndef arr_obj(\n arr_float, arr_int, arr_bool, arr_complex, arr_str, arr_utf, arr_date, arr_tdelta\n):\n return np.vstack(\n [\n arr_float.astype("O"),\n arr_int.astype("O"),\n arr_bool.astype("O"),\n arr_complex.astype("O"),\n arr_str.astype("O"),\n arr_utf.astype("O"),\n arr_date.astype("O"),\n arr_tdelta.astype("O"),\n ]\n )\n\n\n@pytest.fixture\ndef arr_nan_nanj(arr_nan):\n with np.errstate(invalid="ignore"):\n return arr_nan + arr_nan * 1j\n\n\n@pytest.fixture\ndef arr_complex_nan(arr_complex, arr_nan_nanj):\n with np.errstate(invalid="ignore"):\n return np.vstack([arr_complex, arr_nan_nanj])\n\n\n@pytest.fixture\ndef arr_nan_infj(arr_inf):\n with np.errstate(invalid="ignore"):\n return arr_inf * 1j\n\n\n@pytest.fixture\ndef arr_complex_nan_infj(arr_complex, arr_nan_infj):\n with np.errstate(invalid="ignore"):\n return np.vstack([arr_complex, arr_nan_infj])\n\n\n@pytest.fixture\ndef arr_float_1d(arr_float):\n return arr_float[:, 0]\n\n\n@pytest.fixture\ndef arr_nan_1d(arr_nan):\n return arr_nan[:, 0]\n\n\n@pytest.fixture\ndef arr_float_nan_1d(arr_float_nan):\n return arr_float_nan[:, 0]\n\n\n@pytest.fixture\ndef arr_float1_nan_1d(arr_float1_nan):\n return arr_float1_nan[:, 0]\n\n\n@pytest.fixture\ndef arr_nan_float1_1d(arr_nan_float1):\n return arr_nan_float1[:, 0]\n\n\nclass TestnanopsDataFrame:\n def setup_method(self):\n nanops._USE_BOTTLENECK = False\n\n arr_shape = (11, 7)\n\n self.arr_float = np.random.default_rng(2).standard_normal(arr_shape)\n self.arr_float1 = np.random.default_rng(2).standard_normal(arr_shape)\n self.arr_complex = self.arr_float + self.arr_float1 * 1j\n self.arr_int = np.random.default_rng(2).integers(-10, 10, arr_shape)\n self.arr_bool = np.random.default_rng(2).integers(0, 2, arr_shape) == 0\n self.arr_str = np.abs(self.arr_float).astype("S")\n self.arr_utf = np.abs(self.arr_float).astype("U")\n self.arr_date = (\n np.random.default_rng(2).integers(0, 20000, arr_shape).astype("M8[ns]")\n )\n self.arr_tdelta = (\n np.random.default_rng(2).integers(0, 20000, arr_shape).astype("m8[ns]")\n )\n\n self.arr_nan = np.tile(np.nan, arr_shape)\n self.arr_float_nan = np.vstack([self.arr_float, self.arr_nan])\n self.arr_float1_nan = np.vstack([self.arr_float1, self.arr_nan])\n self.arr_nan_float1 = np.vstack([self.arr_nan, self.arr_float1])\n self.arr_nan_nan = np.vstack([self.arr_nan, self.arr_nan])\n\n self.arr_inf = self.arr_float * np.inf\n self.arr_float_inf = np.vstack([self.arr_float, self.arr_inf])\n\n self.arr_nan_inf = np.vstack([self.arr_nan, self.arr_inf])\n self.arr_float_nan_inf = np.vstack([self.arr_float, self.arr_nan, self.arr_inf])\n self.arr_nan_nan_inf = np.vstack([self.arr_nan, self.arr_nan, self.arr_inf])\n self.arr_obj = np.vstack(\n [\n self.arr_float.astype("O"),\n self.arr_int.astype("O"),\n self.arr_bool.astype("O"),\n self.arr_complex.astype("O"),\n self.arr_str.astype("O"),\n self.arr_utf.astype("O"),\n self.arr_date.astype("O"),\n self.arr_tdelta.astype("O"),\n ]\n )\n\n with np.errstate(invalid="ignore"):\n self.arr_nan_nanj = self.arr_nan + self.arr_nan * 1j\n self.arr_complex_nan = np.vstack([self.arr_complex, self.arr_nan_nanj])\n\n self.arr_nan_infj = self.arr_inf * 1j\n self.arr_complex_nan_infj = np.vstack([self.arr_complex, self.arr_nan_infj])\n\n self.arr_float_2d = self.arr_float\n self.arr_float1_2d = self.arr_float1\n\n self.arr_nan_2d = self.arr_nan\n self.arr_float_nan_2d = self.arr_float_nan\n self.arr_float1_nan_2d = self.arr_float1_nan\n self.arr_nan_float1_2d = self.arr_nan_float1\n\n self.arr_float_1d = self.arr_float[:, 0]\n self.arr_float1_1d = self.arr_float1[:, 0]\n\n self.arr_nan_1d = self.arr_nan[:, 0]\n self.arr_float_nan_1d = self.arr_float_nan[:, 0]\n self.arr_float1_nan_1d = self.arr_float1_nan[:, 0]\n self.arr_nan_float1_1d = self.arr_nan_float1[:, 0]\n\n def teardown_method(self):\n nanops._USE_BOTTLENECK = use_bn\n\n def check_results(self, targ, res, axis, check_dtype=True):\n res = getattr(res, "asm8", res)\n\n if (\n axis != 0\n and hasattr(targ, "shape")\n and targ.ndim\n and targ.shape != res.shape\n ):\n res = np.split(res, [targ.shape[0]], axis=0)[0]\n\n try:\n tm.assert_almost_equal(targ, res, check_dtype=check_dtype)\n except AssertionError:\n # handle timedelta dtypes\n if hasattr(targ, "dtype") and targ.dtype == "m8[ns]":\n raise\n\n # There are sometimes rounding errors with\n # complex and object dtypes.\n # If it isn't one of those, re-raise the error.\n if not hasattr(res, "dtype") or res.dtype.kind not in ["c", "O"]:\n raise\n # convert object dtypes to something that can be split into\n # real and imaginary parts\n if res.dtype.kind == "O":\n if targ.dtype.kind != "O":\n res = res.astype(targ.dtype)\n else:\n cast_dtype = "c16" if hasattr(np, "complex128") else "f8"\n res = res.astype(cast_dtype)\n targ = targ.astype(cast_dtype)\n # there should never be a case where numpy returns an object\n # but nanops doesn't, so make that an exception\n elif targ.dtype.kind == "O":\n raise\n tm.assert_almost_equal(np.real(targ), np.real(res), check_dtype=check_dtype)\n tm.assert_almost_equal(np.imag(targ), np.imag(res), check_dtype=check_dtype)\n\n def check_fun_data(\n self,\n testfunc,\n targfunc,\n testarval,\n targarval,\n skipna,\n check_dtype=True,\n empty_targfunc=None,\n **kwargs,\n ):\n for axis in list(range(targarval.ndim)) + [None]:\n targartempval = targarval if skipna else testarval\n if skipna and empty_targfunc and isna(targartempval).all():\n targ = empty_targfunc(targartempval, axis=axis, **kwargs)\n else:\n targ = targfunc(targartempval, axis=axis, **kwargs)\n\n if targartempval.dtype == object and (\n targfunc is np.any or targfunc is np.all\n ):\n # GH#12863 the numpy functions will retain e.g. floatiness\n if isinstance(targ, np.ndarray):\n targ = targ.astype(bool)\n else:\n targ = bool(targ)\n\n res = testfunc(testarval, axis=axis, skipna=skipna, **kwargs)\n\n if (\n isinstance(targ, np.complex128)\n and isinstance(res, float)\n and np.isnan(targ)\n and np.isnan(res)\n ):\n # GH#18463\n targ = res\n\n self.check_results(targ, res, axis, check_dtype=check_dtype)\n if skipna:\n res = testfunc(testarval, axis=axis, **kwargs)\n self.check_results(targ, res, axis, check_dtype=check_dtype)\n if axis is None:\n res = testfunc(testarval, skipna=skipna, **kwargs)\n self.check_results(targ, res, axis, check_dtype=check_dtype)\n if skipna and axis is None:\n res = testfunc(testarval, **kwargs)\n self.check_results(targ, res, axis, check_dtype=check_dtype)\n\n if testarval.ndim <= 1:\n return\n\n # Recurse on lower-dimension\n testarval2 = np.take(testarval, 0, axis=-1)\n targarval2 = np.take(targarval, 0, axis=-1)\n self.check_fun_data(\n testfunc,\n targfunc,\n testarval2,\n targarval2,\n skipna=skipna,\n check_dtype=check_dtype,\n empty_targfunc=empty_targfunc,\n **kwargs,\n )\n\n def check_fun(\n self, testfunc, targfunc, testar, skipna, empty_targfunc=None, **kwargs\n ):\n targar = testar\n if testar.endswith("_nan") and hasattr(self, testar[:-4]):\n targar = testar[:-4]\n\n testarval = getattr(self, testar)\n targarval = getattr(self, targar)\n self.check_fun_data(\n testfunc,\n targfunc,\n testarval,\n targarval,\n skipna=skipna,\n empty_targfunc=empty_targfunc,\n **kwargs,\n )\n\n def check_funs(\n self,\n testfunc,\n targfunc,\n skipna,\n allow_complex=True,\n allow_all_nan=True,\n allow_date=True,\n allow_tdelta=True,\n allow_obj=True,\n **kwargs,\n ):\n self.check_fun(testfunc, targfunc, "arr_float", skipna, **kwargs)\n self.check_fun(testfunc, targfunc, "arr_float_nan", skipna, **kwargs)\n self.check_fun(testfunc, targfunc, "arr_int", skipna, **kwargs)\n self.check_fun(testfunc, targfunc, "arr_bool", skipna, **kwargs)\n objs = [\n self.arr_float.astype("O"),\n self.arr_int.astype("O"),\n self.arr_bool.astype("O"),\n ]\n\n if allow_all_nan:\n self.check_fun(testfunc, targfunc, "arr_nan", skipna, **kwargs)\n\n if allow_complex:\n self.check_fun(testfunc, targfunc, "arr_complex", skipna, **kwargs)\n self.check_fun(testfunc, targfunc, "arr_complex_nan", skipna, **kwargs)\n if allow_all_nan:\n self.check_fun(testfunc, targfunc, "arr_nan_nanj", skipna, **kwargs)\n objs += [self.arr_complex.astype("O")]\n\n if allow_date:\n targfunc(self.arr_date)\n self.check_fun(testfunc, targfunc, "arr_date", skipna, **kwargs)\n objs += [self.arr_date.astype("O")]\n\n if allow_tdelta:\n try:\n targfunc(self.arr_tdelta)\n except TypeError:\n pass\n else:\n self.check_fun(testfunc, targfunc, "arr_tdelta", skipna, **kwargs)\n objs += [self.arr_tdelta.astype("O")]\n\n if allow_obj:\n self.arr_obj = np.vstack(objs)\n # some nanops handle object dtypes better than their numpy\n # counterparts, so the numpy functions need to be given something\n # else\n if allow_obj == "convert":\n targfunc = partial(\n self._badobj_wrap, func=targfunc, allow_complex=allow_complex\n )\n self.check_fun(testfunc, targfunc, "arr_obj", skipna, **kwargs)\n\n def _badobj_wrap(self, value, func, allow_complex=True, **kwargs):\n if value.dtype.kind == "O":\n if allow_complex:\n value = value.astype("c16")\n else:\n value = value.astype("f8")\n return func(value, **kwargs)\n\n @pytest.mark.parametrize(\n "nan_op,np_op", [(nanops.nanany, np.any), (nanops.nanall, np.all)]\n )\n def test_nan_funcs(self, nan_op, np_op, skipna):\n self.check_funs(nan_op, np_op, skipna, allow_all_nan=False, allow_date=False)\n\n def test_nansum(self, skipna):\n self.check_funs(\n nanops.nansum,\n np.sum,\n skipna,\n allow_date=False,\n check_dtype=False,\n empty_targfunc=np.nansum,\n )\n\n def test_nanmean(self, skipna):\n self.check_funs(\n nanops.nanmean, np.mean, skipna, allow_obj=False, allow_date=False\n )\n\n @pytest.mark.filterwarnings("ignore::RuntimeWarning")\n def test_nanmedian(self, skipna):\n self.check_funs(\n nanops.nanmedian,\n np.median,\n skipna,\n allow_complex=False,\n allow_date=False,\n allow_obj="convert",\n )\n\n @pytest.mark.parametrize("ddof", range(3))\n def test_nanvar(self, ddof, skipna):\n self.check_funs(\n nanops.nanvar,\n np.var,\n skipna,\n allow_complex=False,\n allow_date=False,\n allow_obj="convert",\n ddof=ddof,\n )\n\n @pytest.mark.parametrize("ddof", range(3))\n def test_nanstd(self, ddof, skipna):\n self.check_funs(\n nanops.nanstd,\n np.std,\n skipna,\n allow_complex=False,\n allow_date=False,\n allow_obj="convert",\n ddof=ddof,\n )\n\n @pytest.mark.parametrize("ddof", range(3))\n def test_nansem(self, ddof, skipna):\n sp_stats = pytest.importorskip("scipy.stats")\n\n with np.errstate(invalid="ignore"):\n self.check_funs(\n nanops.nansem,\n sp_stats.sem,\n skipna,\n allow_complex=False,\n allow_date=False,\n allow_tdelta=False,\n allow_obj="convert",\n ddof=ddof,\n )\n\n @pytest.mark.filterwarnings("ignore::RuntimeWarning")\n @pytest.mark.parametrize(\n "nan_op,np_op", [(nanops.nanmin, np.min), (nanops.nanmax, np.max)]\n )\n def test_nanops_with_warnings(self, nan_op, np_op, skipna):\n self.check_funs(nan_op, np_op, skipna, allow_obj=False)\n\n def _argminmax_wrap(self, value, axis=None, func=None):\n res = func(value, axis)\n nans = np.min(value, axis)\n nullnan = isna(nans)\n if res.ndim:\n res[nullnan] = -1\n elif (\n hasattr(nullnan, "all")\n and nullnan.all()\n or not hasattr(nullnan, "all")\n and nullnan\n ):\n res = -1\n return res\n\n @pytest.mark.filterwarnings("ignore::RuntimeWarning")\n def test_nanargmax(self, skipna):\n func = partial(self._argminmax_wrap, func=np.argmax)\n self.check_funs(nanops.nanargmax, func, skipna, allow_obj=False)\n\n @pytest.mark.filterwarnings("ignore::RuntimeWarning")\n def test_nanargmin(self, skipna):\n func = partial(self._argminmax_wrap, func=np.argmin)\n self.check_funs(nanops.nanargmin, func, skipna, allow_obj=False)\n\n def _skew_kurt_wrap(self, values, axis=None, func=None):\n if not isinstance(values.dtype.type, np.floating):\n values = values.astype("f8")\n result = func(values, axis=axis, bias=False)\n # fix for handling cases where all elements in an axis are the same\n if isinstance(result, np.ndarray):\n result[np.max(values, axis=axis) == np.min(values, axis=axis)] = 0\n return result\n elif np.max(values) == np.min(values):\n return 0.0\n return result\n\n def test_nanskew(self, skipna):\n sp_stats = pytest.importorskip("scipy.stats")\n\n func = partial(self._skew_kurt_wrap, func=sp_stats.skew)\n with np.errstate(invalid="ignore"):\n self.check_funs(\n nanops.nanskew,\n func,\n skipna,\n allow_complex=False,\n allow_date=False,\n allow_tdelta=False,\n )\n\n def test_nankurt(self, skipna):\n sp_stats = pytest.importorskip("scipy.stats")\n\n func1 = partial(sp_stats.kurtosis, fisher=True)\n func = partial(self._skew_kurt_wrap, func=func1)\n with np.errstate(invalid="ignore"):\n self.check_funs(\n nanops.nankurt,\n func,\n skipna,\n allow_complex=False,\n allow_date=False,\n allow_tdelta=False,\n )\n\n def test_nanprod(self, skipna):\n self.check_funs(\n nanops.nanprod,\n np.prod,\n skipna,\n allow_date=False,\n allow_tdelta=False,\n empty_targfunc=np.nanprod,\n )\n\n def check_nancorr_nancov_2d(self, checkfun, targ0, targ1, **kwargs):\n res00 = checkfun(self.arr_float_2d, self.arr_float1_2d, **kwargs)\n res01 = checkfun(\n self.arr_float_2d,\n self.arr_float1_2d,\n min_periods=len(self.arr_float_2d) - 1,\n **kwargs,\n )\n tm.assert_almost_equal(targ0, res00)\n tm.assert_almost_equal(targ0, res01)\n\n res10 = checkfun(self.arr_float_nan_2d, self.arr_float1_nan_2d, **kwargs)\n res11 = checkfun(\n self.arr_float_nan_2d,\n self.arr_float1_nan_2d,\n min_periods=len(self.arr_float_2d) - 1,\n **kwargs,\n )\n tm.assert_almost_equal(targ1, res10)\n tm.assert_almost_equal(targ1, res11)\n\n targ2 = np.nan\n res20 = checkfun(self.arr_nan_2d, self.arr_float1_2d, **kwargs)\n res21 = checkfun(self.arr_float_2d, self.arr_nan_2d, **kwargs)\n res22 = checkfun(self.arr_nan_2d, self.arr_nan_2d, **kwargs)\n res23 = checkfun(self.arr_float_nan_2d, self.arr_nan_float1_2d, **kwargs)\n res24 = checkfun(\n self.arr_float_nan_2d,\n self.arr_nan_float1_2d,\n min_periods=len(self.arr_float_2d) - 1,\n **kwargs,\n )\n res25 = checkfun(\n self.arr_float_2d,\n self.arr_float1_2d,\n min_periods=len(self.arr_float_2d) + 1,\n **kwargs,\n )\n tm.assert_almost_equal(targ2, res20)\n tm.assert_almost_equal(targ2, res21)\n tm.assert_almost_equal(targ2, res22)\n tm.assert_almost_equal(targ2, res23)\n tm.assert_almost_equal(targ2, res24)\n tm.assert_almost_equal(targ2, res25)\n\n def check_nancorr_nancov_1d(self, checkfun, targ0, targ1, **kwargs):\n res00 = checkfun(self.arr_float_1d, self.arr_float1_1d, **kwargs)\n res01 = checkfun(\n self.arr_float_1d,\n self.arr_float1_1d,\n min_periods=len(self.arr_float_1d) - 1,\n **kwargs,\n )\n tm.assert_almost_equal(targ0, res00)\n tm.assert_almost_equal(targ0, res01)\n\n res10 = checkfun(self.arr_float_nan_1d, self.arr_float1_nan_1d, **kwargs)\n res11 = checkfun(\n self.arr_float_nan_1d,\n self.arr_float1_nan_1d,\n min_periods=len(self.arr_float_1d) - 1,\n **kwargs,\n )\n tm.assert_almost_equal(targ1, res10)\n tm.assert_almost_equal(targ1, res11)\n\n targ2 = np.nan\n res20 = checkfun(self.arr_nan_1d, self.arr_float1_1d, **kwargs)\n res21 = checkfun(self.arr_float_1d, self.arr_nan_1d, **kwargs)\n res22 = checkfun(self.arr_nan_1d, self.arr_nan_1d, **kwargs)\n res23 = checkfun(self.arr_float_nan_1d, self.arr_nan_float1_1d, **kwargs)\n res24 = checkfun(\n self.arr_float_nan_1d,\n self.arr_nan_float1_1d,\n min_periods=len(self.arr_float_1d) - 1,\n **kwargs,\n )\n res25 = checkfun(\n self.arr_float_1d,\n self.arr_float1_1d,\n min_periods=len(self.arr_float_1d) + 1,\n **kwargs,\n )\n tm.assert_almost_equal(targ2, res20)\n tm.assert_almost_equal(targ2, res21)\n tm.assert_almost_equal(targ2, res22)\n tm.assert_almost_equal(targ2, res23)\n tm.assert_almost_equal(targ2, res24)\n tm.assert_almost_equal(targ2, res25)\n\n def test_nancorr(self):\n targ0 = np.corrcoef(self.arr_float_2d, self.arr_float1_2d)[0, 1]\n targ1 = np.corrcoef(self.arr_float_2d.flat, self.arr_float1_2d.flat)[0, 1]\n self.check_nancorr_nancov_2d(nanops.nancorr, targ0, targ1)\n targ0 = np.corrcoef(self.arr_float_1d, self.arr_float1_1d)[0, 1]\n targ1 = np.corrcoef(self.arr_float_1d.flat, self.arr_float1_1d.flat)[0, 1]\n self.check_nancorr_nancov_1d(nanops.nancorr, targ0, targ1, method="pearson")\n\n def test_nancorr_pearson(self):\n targ0 = np.corrcoef(self.arr_float_2d, self.arr_float1_2d)[0, 1]\n targ1 = np.corrcoef(self.arr_float_2d.flat, self.arr_float1_2d.flat)[0, 1]\n self.check_nancorr_nancov_2d(nanops.nancorr, targ0, targ1, method="pearson")\n targ0 = np.corrcoef(self.arr_float_1d, self.arr_float1_1d)[0, 1]\n targ1 = np.corrcoef(self.arr_float_1d.flat, self.arr_float1_1d.flat)[0, 1]\n self.check_nancorr_nancov_1d(nanops.nancorr, targ0, targ1, method="pearson")\n\n def test_nancorr_kendall(self):\n sp_stats = pytest.importorskip("scipy.stats")\n\n targ0 = sp_stats.kendalltau(self.arr_float_2d, self.arr_float1_2d)[0]\n targ1 = sp_stats.kendalltau(self.arr_float_2d.flat, self.arr_float1_2d.flat)[0]\n self.check_nancorr_nancov_2d(nanops.nancorr, targ0, targ1, method="kendall")\n targ0 = sp_stats.kendalltau(self.arr_float_1d, self.arr_float1_1d)[0]\n targ1 = sp_stats.kendalltau(self.arr_float_1d.flat, self.arr_float1_1d.flat)[0]\n self.check_nancorr_nancov_1d(nanops.nancorr, targ0, targ1, method="kendall")\n\n def test_nancorr_spearman(self):\n sp_stats = pytest.importorskip("scipy.stats")\n\n targ0 = sp_stats.spearmanr(self.arr_float_2d, self.arr_float1_2d)[0]\n targ1 = sp_stats.spearmanr(self.arr_float_2d.flat, self.arr_float1_2d.flat)[0]\n self.check_nancorr_nancov_2d(nanops.nancorr, targ0, targ1, method="spearman")\n targ0 = sp_stats.spearmanr(self.arr_float_1d, self.arr_float1_1d)[0]\n targ1 = sp_stats.spearmanr(self.arr_float_1d.flat, self.arr_float1_1d.flat)[0]\n self.check_nancorr_nancov_1d(nanops.nancorr, targ0, targ1, method="spearman")\n\n def test_invalid_method(self):\n pytest.importorskip("scipy")\n targ0 = np.corrcoef(self.arr_float_2d, self.arr_float1_2d)[0, 1]\n targ1 = np.corrcoef(self.arr_float_2d.flat, self.arr_float1_2d.flat)[0, 1]\n msg = "Unknown method 'foo', expected one of 'kendall', 'spearman'"\n with pytest.raises(ValueError, match=msg):\n self.check_nancorr_nancov_1d(nanops.nancorr, targ0, targ1, method="foo")\n\n def test_nancov(self):\n targ0 = np.cov(self.arr_float_2d, self.arr_float1_2d)[0, 1]\n targ1 = np.cov(self.arr_float_2d.flat, self.arr_float1_2d.flat)[0, 1]\n self.check_nancorr_nancov_2d(nanops.nancov, targ0, targ1)\n targ0 = np.cov(self.arr_float_1d, self.arr_float1_1d)[0, 1]\n targ1 = np.cov(self.arr_float_1d.flat, self.arr_float1_1d.flat)[0, 1]\n self.check_nancorr_nancov_1d(nanops.nancov, targ0, targ1)\n\n\n@pytest.mark.parametrize(\n "arr, correct",\n [\n ("arr_complex", False),\n ("arr_int", False),\n ("arr_bool", False),\n ("arr_str", False),\n ("arr_utf", False),\n ("arr_complex", False),\n ("arr_complex_nan", False),\n ("arr_nan_nanj", False),\n ("arr_nan_infj", True),\n ("arr_complex_nan_infj", True),\n ],\n)\ndef test_has_infs_non_float(request, arr, correct, disable_bottleneck):\n val = request.getfixturevalue(arr)\n while getattr(val, "ndim", True):\n res0 = nanops._has_infs(val)\n if correct:\n assert res0\n else:\n assert not res0\n\n if not hasattr(val, "ndim"):\n break\n\n # Reduce dimension for next step in the loop\n val = np.take(val, 0, axis=-1)\n\n\n@pytest.mark.parametrize(\n "arr, correct",\n [\n ("arr_float", False),\n ("arr_nan", False),\n ("arr_float_nan", False),\n ("arr_nan_nan", False),\n ("arr_float_inf", True),\n ("arr_inf", True),\n ("arr_nan_inf", True),\n ("arr_float_nan_inf", True),\n ("arr_nan_nan_inf", True),\n ],\n)\n@pytest.mark.parametrize("astype", [None, "f4", "f2"])\ndef test_has_infs_floats(request, arr, correct, astype, disable_bottleneck):\n val = request.getfixturevalue(arr)\n if astype is not None:\n val = val.astype(astype)\n while getattr(val, "ndim", True):\n res0 = nanops._has_infs(val)\n if correct:\n assert res0\n else:\n assert not res0\n\n if not hasattr(val, "ndim"):\n break\n\n # Reduce dimension for next step in the loop\n val = np.take(val, 0, axis=-1)\n\n\n@pytest.mark.parametrize(\n "fixture", ["arr_float", "arr_complex", "arr_int", "arr_bool", "arr_str", "arr_utf"]\n)\ndef test_bn_ok_dtype(fixture, request, disable_bottleneck):\n obj = request.getfixturevalue(fixture)\n assert nanops._bn_ok_dtype(obj.dtype, "test")\n\n\n@pytest.mark.parametrize(\n "fixture",\n [\n "arr_date",\n "arr_tdelta",\n "arr_obj",\n ],\n)\ndef test_bn_not_ok_dtype(fixture, request, disable_bottleneck):\n obj = request.getfixturevalue(fixture)\n assert not nanops._bn_ok_dtype(obj.dtype, "test")\n\n\nclass TestEnsureNumeric:\n def test_numeric_values(self):\n # Test integer\n assert nanops._ensure_numeric(1) == 1\n\n # Test float\n assert nanops._ensure_numeric(1.1) == 1.1\n\n # Test complex\n assert nanops._ensure_numeric(1 + 2j) == 1 + 2j\n\n def test_ndarray(self):\n # Test numeric ndarray\n values = np.array([1, 2, 3])\n assert np.allclose(nanops._ensure_numeric(values), values)\n\n # Test object ndarray\n o_values = values.astype(object)\n assert np.allclose(nanops._ensure_numeric(o_values), values)\n\n # Test convertible string ndarray\n s_values = np.array(["1", "2", "3"], dtype=object)\n msg = r"Could not convert \['1' '2' '3'\] to numeric"\n with pytest.raises(TypeError, match=msg):\n nanops._ensure_numeric(s_values)\n\n # Test non-convertible string ndarray\n s_values = np.array(["foo", "bar", "baz"], dtype=object)\n msg = r"Could not convert .* to numeric"\n with pytest.raises(TypeError, match=msg):\n nanops._ensure_numeric(s_values)\n\n def test_convertable_values(self):\n with pytest.raises(TypeError, match="Could not convert string '1' to numeric"):\n nanops._ensure_numeric("1")\n with pytest.raises(\n TypeError, match="Could not convert string '1.1' to numeric"\n ):\n nanops._ensure_numeric("1.1")\n with pytest.raises(\n TypeError, match=r"Could not convert string '1\+1j' to numeric"\n ):\n nanops._ensure_numeric("1+1j")\n\n def test_non_convertable_values(self):\n msg = "Could not convert string 'foo' to numeric"\n with pytest.raises(TypeError, match=msg):\n nanops._ensure_numeric("foo")\n\n # with the wrong type, python raises TypeError for us\n msg = "argument must be a string or a number"\n with pytest.raises(TypeError, match=msg):\n nanops._ensure_numeric({})\n with pytest.raises(TypeError, match=msg):\n nanops._ensure_numeric([])\n\n\nclass TestNanvarFixedValues:\n # xref GH10242\n # Samples from a normal distribution.\n @pytest.fixture\n def variance(self):\n return 3.0\n\n @pytest.fixture\n def samples(self, variance):\n return self.prng.normal(scale=variance**0.5, size=100000)\n\n def test_nanvar_all_finite(self, samples, variance):\n actual_variance = nanops.nanvar(samples)\n tm.assert_almost_equal(actual_variance, variance, rtol=1e-2)\n\n def test_nanvar_nans(self, samples, variance):\n samples_test = np.nan * np.ones(2 * samples.shape[0])\n samples_test[::2] = samples\n\n actual_variance = nanops.nanvar(samples_test, skipna=True)\n tm.assert_almost_equal(actual_variance, variance, rtol=1e-2)\n\n actual_variance = nanops.nanvar(samples_test, skipna=False)\n tm.assert_almost_equal(actual_variance, np.nan, rtol=1e-2)\n\n def test_nanstd_nans(self, samples, variance):\n samples_test = np.nan * np.ones(2 * samples.shape[0])\n samples_test[::2] = samples\n\n actual_std = nanops.nanstd(samples_test, skipna=True)\n tm.assert_almost_equal(actual_std, variance**0.5, rtol=1e-2)\n\n actual_std = nanops.nanvar(samples_test, skipna=False)\n tm.assert_almost_equal(actual_std, np.nan, rtol=1e-2)\n\n def test_nanvar_axis(self, samples, variance):\n # Generate some sample data.\n samples_unif = self.prng.uniform(size=samples.shape[0])\n samples = np.vstack([samples, samples_unif])\n\n actual_variance = nanops.nanvar(samples, axis=1)\n tm.assert_almost_equal(\n actual_variance, np.array([variance, 1.0 / 12]), rtol=1e-2\n )\n\n def test_nanvar_ddof(self):\n n = 5\n samples = self.prng.uniform(size=(10000, n + 1))\n samples[:, -1] = np.nan # Force use of our own algorithm.\n\n variance_0 = nanops.nanvar(samples, axis=1, skipna=True, ddof=0).mean()\n variance_1 = nanops.nanvar(samples, axis=1, skipna=True, ddof=1).mean()\n variance_2 = nanops.nanvar(samples, axis=1, skipna=True, ddof=2).mean()\n\n # The unbiased estimate.\n var = 1.0 / 12\n tm.assert_almost_equal(variance_1, var, rtol=1e-2)\n\n # The underestimated variance.\n tm.assert_almost_equal(variance_0, (n - 1.0) / n * var, rtol=1e-2)\n\n # The overestimated variance.\n tm.assert_almost_equal(variance_2, (n - 1.0) / (n - 2.0) * var, rtol=1e-2)\n\n @pytest.mark.parametrize("axis", range(2))\n @pytest.mark.parametrize("ddof", range(3))\n def test_ground_truth(self, axis, ddof):\n # Test against values that were precomputed with Numpy.\n samples = np.empty((4, 4))\n samples[:3, :3] = np.array(\n [\n [0.97303362, 0.21869576, 0.55560287],\n [0.72980153, 0.03109364, 0.99155171],\n [0.09317602, 0.60078248, 0.15871292],\n ]\n )\n samples[3] = samples[:, 3] = np.nan\n\n # Actual variances along axis=0, 1 for ddof=0, 1, 2\n variance = np.array(\n [\n [\n [0.13762259, 0.05619224, 0.11568816],\n [0.20643388, 0.08428837, 0.17353224],\n [0.41286776, 0.16857673, 0.34706449],\n ],\n [\n [0.09519783, 0.16435395, 0.05082054],\n [0.14279674, 0.24653093, 0.07623082],\n [0.28559348, 0.49306186, 0.15246163],\n ],\n ]\n )\n\n # Test nanvar.\n var = nanops.nanvar(samples, skipna=True, axis=axis, ddof=ddof)\n tm.assert_almost_equal(var[:3], variance[axis, ddof])\n assert np.isnan(var[3])\n\n # Test nanstd.\n std = nanops.nanstd(samples, skipna=True, axis=axis, ddof=ddof)\n tm.assert_almost_equal(std[:3], variance[axis, ddof] ** 0.5)\n assert np.isnan(std[3])\n\n @pytest.mark.parametrize("ddof", range(3))\n def test_nanstd_roundoff(self, ddof):\n # Regression test for GH 10242 (test data taken from GH 10489). Ensure\n # that variance is stable.\n data = Series(766897346 * np.ones(10))\n result = data.std(ddof=ddof)\n assert result == 0.0\n\n @property\n def prng(self):\n return np.random.default_rng(2)\n\n\nclass TestNanskewFixedValues:\n # xref GH 11974\n # Test data + skewness value (computed with scipy.stats.skew)\n @pytest.fixture\n def samples(self):\n return np.sin(np.linspace(0, 1, 200))\n\n @pytest.fixture\n def actual_skew(self):\n return -0.1875895205961754\n\n @pytest.mark.parametrize("val", [3075.2, 3075.3, 3075.5])\n def test_constant_series(self, val):\n # xref GH 11974\n data = val * np.ones(300)\n skew = nanops.nanskew(data)\n assert skew == 0.0\n\n def test_all_finite(self):\n alpha, beta = 0.3, 0.1\n left_tailed = self.prng.beta(alpha, beta, size=100)\n assert nanops.nanskew(left_tailed) < 0\n\n alpha, beta = 0.1, 0.3\n right_tailed = self.prng.beta(alpha, beta, size=100)\n assert nanops.nanskew(right_tailed) > 0\n\n def test_ground_truth(self, samples, actual_skew):\n skew = nanops.nanskew(samples)\n tm.assert_almost_equal(skew, actual_skew)\n\n def test_axis(self, samples, actual_skew):\n samples = np.vstack([samples, np.nan * np.ones(len(samples))])\n skew = nanops.nanskew(samples, axis=1)\n tm.assert_almost_equal(skew, np.array([actual_skew, np.nan]))\n\n def test_nans(self, samples):\n samples = np.hstack([samples, np.nan])\n skew = nanops.nanskew(samples, skipna=False)\n assert np.isnan(skew)\n\n def test_nans_skipna(self, samples, actual_skew):\n samples = np.hstack([samples, np.nan])\n skew = nanops.nanskew(samples, skipna=True)\n tm.assert_almost_equal(skew, actual_skew)\n\n @property\n def prng(self):\n return np.random.default_rng(2)\n\n\nclass TestNankurtFixedValues:\n # xref GH 11974\n # Test data + kurtosis value (computed with scipy.stats.kurtosis)\n @pytest.fixture\n def samples(self):\n return np.sin(np.linspace(0, 1, 200))\n\n @pytest.fixture\n def actual_kurt(self):\n return -1.2058303433799713\n\n @pytest.mark.parametrize("val", [3075.2, 3075.3, 3075.5])\n def test_constant_series(self, val):\n # xref GH 11974\n data = val * np.ones(300)\n kurt = nanops.nankurt(data)\n assert kurt == 0.0\n\n def test_all_finite(self):\n alpha, beta = 0.3, 0.1\n left_tailed = self.prng.beta(alpha, beta, size=100)\n assert nanops.nankurt(left_tailed) < 2\n\n alpha, beta = 0.1, 0.3\n right_tailed = self.prng.beta(alpha, beta, size=100)\n assert nanops.nankurt(right_tailed) < 0\n\n def test_ground_truth(self, samples, actual_kurt):\n kurt = nanops.nankurt(samples)\n tm.assert_almost_equal(kurt, actual_kurt)\n\n def test_axis(self, samples, actual_kurt):\n samples = np.vstack([samples, np.nan * np.ones(len(samples))])\n kurt = nanops.nankurt(samples, axis=1)\n tm.assert_almost_equal(kurt, np.array([actual_kurt, np.nan]))\n\n def test_nans(self, samples):\n samples = np.hstack([samples, np.nan])\n kurt = nanops.nankurt(samples, skipna=False)\n assert np.isnan(kurt)\n\n def test_nans_skipna(self, samples, actual_kurt):\n samples = np.hstack([samples, np.nan])\n kurt = nanops.nankurt(samples, skipna=True)\n tm.assert_almost_equal(kurt, actual_kurt)\n\n @property\n def prng(self):\n return np.random.default_rng(2)\n\n\nclass TestDatetime64NaNOps:\n @pytest.fixture(params=["s", "ms", "us", "ns"])\n def unit(self, request):\n return request.param\n\n # Enabling mean changes the behavior of DataFrame.mean\n # See https://github.com/pandas-dev/pandas/issues/24752\n def test_nanmean(self, unit):\n dti = pd.date_range("2016-01-01", periods=3).as_unit(unit)\n expected = dti[1]\n\n for obj in [dti, dti._data]:\n result = nanops.nanmean(obj)\n assert result == expected\n\n dti2 = dti.insert(1, pd.NaT)\n\n for obj in [dti2, dti2._data]:\n result = nanops.nanmean(obj)\n assert result == expected\n\n @pytest.mark.parametrize("constructor", ["M8", "m8"])\n def test_nanmean_skipna_false(self, constructor, unit):\n dtype = f"{constructor}[{unit}]"\n arr = np.arange(12).astype(np.int64).view(dtype).reshape(4, 3)\n\n arr[-1, -1] = "NaT"\n\n result = nanops.nanmean(arr, skipna=False)\n assert np.isnat(result)\n assert result.dtype == dtype\n\n result = nanops.nanmean(arr, axis=0, skipna=False)\n expected = np.array([4, 5, "NaT"], dtype=arr.dtype)\n tm.assert_numpy_array_equal(result, expected)\n\n result = nanops.nanmean(arr, axis=1, skipna=False)\n expected = np.array([arr[0, 1], arr[1, 1], arr[2, 1], arr[-1, -1]])\n tm.assert_numpy_array_equal(result, expected)\n\n\ndef test_use_bottleneck():\n if nanops._BOTTLENECK_INSTALLED:\n with pd.option_context("use_bottleneck", True):\n assert pd.get_option("use_bottleneck")\n\n with pd.option_context("use_bottleneck", False):\n assert not pd.get_option("use_bottleneck")\n\n\n@pytest.mark.parametrize(\n "numpy_op, expected",\n [\n (np.sum, 10),\n (np.nansum, 10),\n (np.mean, 2.5),\n (np.nanmean, 2.5),\n (np.median, 2.5),\n (np.nanmedian, 2.5),\n (np.min, 1),\n (np.max, 4),\n (np.nanmin, 1),\n (np.nanmax, 4),\n ],\n)\ndef test_numpy_ops(numpy_op, expected):\n # GH8383\n result = numpy_op(Series([1, 2, 3, 4]))\n assert result == expected\n\n\n@pytest.mark.parametrize(\n "operation",\n [\n nanops.nanany,\n nanops.nanall,\n nanops.nansum,\n nanops.nanmean,\n nanops.nanmedian,\n nanops.nanstd,\n nanops.nanvar,\n nanops.nansem,\n nanops.nanargmax,\n nanops.nanargmin,\n nanops.nanmax,\n nanops.nanmin,\n nanops.nanskew,\n nanops.nankurt,\n nanops.nanprod,\n ],\n)\ndef test_nanops_independent_of_mask_param(operation):\n # GH22764\n ser = Series([1, 2, np.nan, 3, np.nan, 4])\n mask = ser.isna()\n median_expected = operation(ser._values)\n median_result = operation(ser._values, mask=mask)\n assert median_expected == median_result\n\n\n@pytest.mark.parametrize("min_count", [-1, 0])\ndef test_check_below_min_count_negative_or_zero_min_count(min_count):\n # GH35227\n result = nanops.check_below_min_count((21, 37), None, min_count)\n expected_result = False\n assert result == expected_result\n\n\n@pytest.mark.parametrize(\n "mask", [None, np.array([False, False, True]), np.array([True] + 9 * [False])]\n)\n@pytest.mark.parametrize("min_count, expected_result", [(1, False), (101, True)])\ndef test_check_below_min_count_positive_min_count(mask, min_count, expected_result):\n # GH35227\n shape = (10, 10)\n result = nanops.check_below_min_count(shape, mask, min_count)\n assert result == expected_result\n\n\n@td.skip_if_windows\n@td.skip_if_32bit\n@pytest.mark.parametrize("min_count, expected_result", [(1, False), (2812191852, True)])\ndef test_check_below_min_count_large_shape(min_count, expected_result):\n # GH35227 large shape used to show that the issue is fixed\n shape = (2244367, 1253)\n result = nanops.check_below_min_count(shape, mask=None, min_count=min_count)\n assert result == expected_result\n\n\n@pytest.mark.parametrize("func", ["nanmean", "nansum"])\ndef test_check_bottleneck_disallow(any_real_numpy_dtype, func):\n # GH 42878 bottleneck sometimes produces unreliable results for mean and sum\n assert not nanops._bn_ok_dtype(np.dtype(any_real_numpy_dtype).type, func)\n\n\n@pytest.mark.parametrize("val", [2**55, -(2**55), 20150515061816532])\ndef test_nanmean_overflow(disable_bottleneck, val):\n # GH 10155\n # In the previous implementation mean can overflow for int dtypes, it\n # is now consistent with numpy\n\n ser = Series(val, index=range(500), dtype=np.int64)\n result = ser.mean()\n np_result = ser.values.mean()\n assert result == val\n assert result == np_result\n assert result.dtype == np.float64\n\n\n@pytest.mark.parametrize(\n "dtype",\n [\n np.int16,\n np.int32,\n np.int64,\n np.float32,\n np.float64,\n getattr(np, "float128", None),\n ],\n)\n@pytest.mark.parametrize("method", ["mean", "std", "var", "skew", "kurt", "min", "max"])\ndef test_returned_dtype(disable_bottleneck, dtype, method):\n if dtype is None:\n pytest.skip("np.float128 not available")\n\n ser = Series(range(10), dtype=dtype)\n result = getattr(ser, method)()\n if is_integer_dtype(dtype) and method not in ["min", "max"]:\n assert result.dtype == np.float64\n else:\n assert result.dtype == dtype\n | .venv\Lib\site-packages\pandas\tests\test_nanops.py | test_nanops.py | Python | 42,042 | 0.95 | 0.128728 | 0.063158 | python-kit | 647 | 2023-09-07T11:17:11.808623 | Apache-2.0 | true | 58186cd452939fa63349cf3ba4115785 |
import sys\nimport types\n\nimport pytest\n\nfrom pandas.compat._optional import (\n VERSIONS,\n import_optional_dependency,\n)\n\nimport pandas._testing as tm\n\n\ndef test_import_optional():\n match = "Missing .*notapackage.* pip .* conda .* notapackage"\n with pytest.raises(ImportError, match=match) as exc_info:\n import_optional_dependency("notapackage")\n # The original exception should be there as context:\n assert isinstance(exc_info.value.__context__, ImportError)\n\n result = import_optional_dependency("notapackage", errors="ignore")\n assert result is None\n\n\ndef test_xlrd_version_fallback():\n pytest.importorskip("xlrd")\n import_optional_dependency("xlrd")\n\n\ndef test_bad_version(monkeypatch):\n name = "fakemodule"\n module = types.ModuleType(name)\n module.__version__ = "0.9.0"\n sys.modules[name] = module\n monkeypatch.setitem(VERSIONS, name, "1.0.0")\n\n match = "Pandas requires .*1.0.0.* of .fakemodule.*'0.9.0'"\n with pytest.raises(ImportError, match=match):\n import_optional_dependency("fakemodule")\n\n # Test min_version parameter\n result = import_optional_dependency("fakemodule", min_version="0.8")\n assert result is module\n\n with tm.assert_produces_warning(UserWarning):\n result = import_optional_dependency("fakemodule", errors="warn")\n assert result is None\n\n module.__version__ = "1.0.0" # exact match is OK\n result = import_optional_dependency("fakemodule")\n assert result is module\n\n with pytest.raises(ImportError, match="Pandas requires version '1.1.0'"):\n import_optional_dependency("fakemodule", min_version="1.1.0")\n\n with tm.assert_produces_warning(UserWarning):\n result = import_optional_dependency(\n "fakemodule", errors="warn", min_version="1.1.0"\n )\n assert result is None\n\n result = import_optional_dependency(\n "fakemodule", errors="ignore", min_version="1.1.0"\n )\n assert result is None\n\n\ndef test_submodule(monkeypatch):\n # Create a fake module with a submodule\n name = "fakemodule"\n module = types.ModuleType(name)\n module.__version__ = "0.9.0"\n sys.modules[name] = module\n sub_name = "submodule"\n submodule = types.ModuleType(sub_name)\n setattr(module, sub_name, submodule)\n sys.modules[f"{name}.{sub_name}"] = submodule\n monkeypatch.setitem(VERSIONS, name, "1.0.0")\n\n match = "Pandas requires .*1.0.0.* of .fakemodule.*'0.9.0'"\n with pytest.raises(ImportError, match=match):\n import_optional_dependency("fakemodule.submodule")\n\n with tm.assert_produces_warning(UserWarning):\n result = import_optional_dependency("fakemodule.submodule", errors="warn")\n assert result is None\n\n module.__version__ = "1.0.0" # exact match is OK\n result = import_optional_dependency("fakemodule.submodule")\n assert result is submodule\n\n\ndef test_no_version_raises(monkeypatch):\n name = "fakemodule"\n module = types.ModuleType(name)\n sys.modules[name] = module\n monkeypatch.setitem(VERSIONS, name, "1.0.0")\n\n with pytest.raises(ImportError, match="Can't determine .* fakemodule"):\n import_optional_dependency(name)\n | .venv\Lib\site-packages\pandas\tests\test_optional_dependency.py | test_optional_dependency.py | Python | 3,159 | 0.95 | 0.05 | 0.04 | awesome-app | 862 | 2025-05-20T13:45:52.949250 | Apache-2.0 | true | b85e7746acdd8f6cb4f53297b776bfe0 |
from collections.abc import Generator\nimport contextlib\n\nimport pytest\n\nimport pandas as pd\nimport pandas._testing as tm\nfrom pandas.core import accessor\n\n\ndef test_dirname_mixin() -> None:\n # GH37173\n\n class X(accessor.DirNamesMixin):\n x = 1\n y: int\n\n def __init__(self) -> None:\n self.z = 3\n\n result = [attr_name for attr_name in dir(X()) if not attr_name.startswith("_")]\n\n assert result == ["x", "z"]\n\n\n@contextlib.contextmanager\ndef ensure_removed(obj, attr) -> Generator[None, None, None]:\n """Ensure that an attribute added to 'obj' during the test is\n removed when we're done\n """\n try:\n yield\n finally:\n try:\n delattr(obj, attr)\n except AttributeError:\n pass\n obj._accessors.discard(attr)\n\n\nclass MyAccessor:\n def __init__(self, obj) -> None:\n self.obj = obj\n self.item = "item"\n\n @property\n def prop(self):\n return self.item\n\n def method(self):\n return self.item\n\n\n@pytest.mark.parametrize(\n "obj, registrar",\n [\n (pd.Series, pd.api.extensions.register_series_accessor),\n (pd.DataFrame, pd.api.extensions.register_dataframe_accessor),\n (pd.Index, pd.api.extensions.register_index_accessor),\n ],\n)\ndef test_register(obj, registrar):\n with ensure_removed(obj, "mine"):\n before = set(dir(obj))\n registrar("mine")(MyAccessor)\n o = obj([]) if obj is not pd.Series else obj([], dtype=object)\n assert o.mine.prop == "item"\n after = set(dir(obj))\n assert (before ^ after) == {"mine"}\n assert "mine" in obj._accessors\n\n\ndef test_accessor_works():\n with ensure_removed(pd.Series, "mine"):\n pd.api.extensions.register_series_accessor("mine")(MyAccessor)\n\n s = pd.Series([1, 2])\n assert s.mine.obj is s\n\n assert s.mine.prop == "item"\n assert s.mine.method() == "item"\n\n\ndef test_overwrite_warns():\n match = r".*MyAccessor.*fake.*Series.*"\n with tm.assert_produces_warning(UserWarning, match=match):\n with ensure_removed(pd.Series, "fake"):\n setattr(pd.Series, "fake", 123)\n pd.api.extensions.register_series_accessor("fake")(MyAccessor)\n s = pd.Series([1, 2])\n assert s.fake.prop == "item"\n\n\ndef test_raises_attribute_error():\n with ensure_removed(pd.Series, "bad"):\n\n @pd.api.extensions.register_series_accessor("bad")\n class Bad:\n def __init__(self, data) -> None:\n raise AttributeError("whoops")\n\n with pytest.raises(AttributeError, match="whoops"):\n pd.Series([], dtype=object).bad\n | .venv\Lib\site-packages\pandas\tests\test_register_accessor.py | test_register_accessor.py | Python | 2,671 | 0.95 | 0.184466 | 0.012987 | vue-tools | 581 | 2023-12-24T21:43:30.194710 | GPL-3.0 | true | 7bff351195536984d7a0e30658237cd3 |
from collections import defaultdict\nfrom datetime import datetime\nfrom itertools import product\n\nimport numpy as np\nimport pytest\n\nfrom pandas import (\n NA,\n DataFrame,\n MultiIndex,\n Series,\n array,\n concat,\n merge,\n)\nimport pandas._testing as tm\nfrom pandas.core.algorithms import safe_sort\nimport pandas.core.common as com\nfrom pandas.core.sorting import (\n _decons_group_index,\n get_group_index,\n is_int64_overflow_possible,\n lexsort_indexer,\n nargsort,\n)\n\n\n@pytest.fixture\ndef left_right():\n low, high, n = -1 << 10, 1 << 10, 1 << 20\n left = DataFrame(\n np.random.default_rng(2).integers(low, high, (n, 7)), columns=list("ABCDEFG")\n )\n left["left"] = left.sum(axis=1)\n\n # one-2-one match\n i = np.random.default_rng(2).permutation(len(left))\n right = left.iloc[i].copy()\n right.columns = right.columns[:-1].tolist() + ["right"]\n right.index = np.arange(len(right))\n right["right"] *= -1\n return left, right\n\n\nclass TestSorting:\n @pytest.mark.slow\n def test_int64_overflow(self):\n B = np.concatenate((np.arange(1000), np.arange(1000), np.arange(500)))\n A = np.arange(2500)\n df = DataFrame(\n {\n "A": A,\n "B": B,\n "C": A,\n "D": B,\n "E": A,\n "F": B,\n "G": A,\n "H": B,\n "values": np.random.default_rng(2).standard_normal(2500),\n }\n )\n\n lg = df.groupby(["A", "B", "C", "D", "E", "F", "G", "H"])\n rg = df.groupby(["H", "G", "F", "E", "D", "C", "B", "A"])\n\n left = lg.sum()["values"]\n right = rg.sum()["values"]\n\n exp_index, _ = left.index.sortlevel()\n tm.assert_index_equal(left.index, exp_index)\n\n exp_index, _ = right.index.sortlevel(0)\n tm.assert_index_equal(right.index, exp_index)\n\n tups = list(map(tuple, df[["A", "B", "C", "D", "E", "F", "G", "H"]].values))\n tups = com.asarray_tuplesafe(tups)\n\n expected = df.groupby(tups).sum()["values"]\n\n for k, v in expected.items():\n assert left[k] == right[k[::-1]]\n assert left[k] == v\n assert len(left) == len(right)\n\n def test_int64_overflow_groupby_large_range(self):\n # GH9096\n values = range(55109)\n data = DataFrame.from_dict({"a": values, "b": values, "c": values, "d": values})\n grouped = data.groupby(["a", "b", "c", "d"])\n assert len(grouped) == len(values)\n\n @pytest.mark.parametrize("agg", ["mean", "median"])\n def test_int64_overflow_groupby_large_df_shuffled(self, agg):\n rs = np.random.default_rng(2)\n arr = rs.integers(-1 << 12, 1 << 12, (1 << 15, 5))\n i = rs.choice(len(arr), len(arr) * 4)\n arr = np.vstack((arr, arr[i])) # add some duplicate rows\n\n i = rs.permutation(len(arr))\n arr = arr[i] # shuffle rows\n\n df = DataFrame(arr, columns=list("abcde"))\n df["jim"], df["joe"] = np.zeros((2, len(df)))\n gr = df.groupby(list("abcde"))\n\n # verify this is testing what it is supposed to test!\n assert is_int64_overflow_possible(gr._grouper.shape)\n\n mi = MultiIndex.from_arrays(\n [ar.ravel() for ar in np.array_split(np.unique(arr, axis=0), 5, axis=1)],\n names=list("abcde"),\n )\n\n res = DataFrame(\n np.zeros((len(mi), 2)), columns=["jim", "joe"], index=mi\n ).sort_index()\n\n tm.assert_frame_equal(getattr(gr, agg)(), res)\n\n @pytest.mark.parametrize(\n "order, na_position, exp",\n [\n [\n True,\n "last",\n list(range(5, 105)) + list(range(5)) + list(range(105, 110)),\n ],\n [\n True,\n "first",\n list(range(5)) + list(range(105, 110)) + list(range(5, 105)),\n ],\n [\n False,\n "last",\n list(range(104, 4, -1)) + list(range(5)) + list(range(105, 110)),\n ],\n [\n False,\n "first",\n list(range(5)) + list(range(105, 110)) + list(range(104, 4, -1)),\n ],\n ],\n )\n def test_lexsort_indexer(self, order, na_position, exp):\n keys = [[np.nan] * 5 + list(range(100)) + [np.nan] * 5]\n result = lexsort_indexer(keys, orders=order, na_position=na_position)\n tm.assert_numpy_array_equal(result, np.array(exp, dtype=np.intp))\n\n @pytest.mark.parametrize(\n "ascending, na_position, exp",\n [\n [\n True,\n "last",\n list(range(5, 105)) + list(range(5)) + list(range(105, 110)),\n ],\n [\n True,\n "first",\n list(range(5)) + list(range(105, 110)) + list(range(5, 105)),\n ],\n [\n False,\n "last",\n list(range(104, 4, -1)) + list(range(5)) + list(range(105, 110)),\n ],\n [\n False,\n "first",\n list(range(5)) + list(range(105, 110)) + list(range(104, 4, -1)),\n ],\n ],\n )\n def test_nargsort(self, ascending, na_position, exp):\n # list places NaNs last, np.array(..., dtype="O") may not place NaNs first\n items = np.array([np.nan] * 5 + list(range(100)) + [np.nan] * 5, dtype="O")\n\n # mergesort is the most difficult to get right because we want it to be\n # stable.\n\n # According to numpy/core/tests/test_multiarray, """The number of\n # sorted items must be greater than ~50 to check the actual algorithm\n # because quick and merge sort fall over to insertion sort for small\n # arrays."""\n\n result = nargsort(\n items, kind="mergesort", ascending=ascending, na_position=na_position\n )\n tm.assert_numpy_array_equal(result, np.array(exp), check_dtype=False)\n\n\nclass TestMerge:\n def test_int64_overflow_outer_merge(self):\n # #2690, combinatorial explosion\n df1 = DataFrame(\n np.random.default_rng(2).standard_normal((1000, 7)),\n columns=list("ABCDEF") + ["G1"],\n )\n df2 = DataFrame(\n np.random.default_rng(3).standard_normal((1000, 7)),\n columns=list("ABCDEF") + ["G2"],\n )\n result = merge(df1, df2, how="outer")\n assert len(result) == 2000\n\n @pytest.mark.slow\n def test_int64_overflow_check_sum_col(self, left_right):\n left, right = left_right\n\n out = merge(left, right, how="outer")\n assert len(out) == len(left)\n tm.assert_series_equal(out["left"], -out["right"], check_names=False)\n result = out.iloc[:, :-2].sum(axis=1)\n tm.assert_series_equal(out["left"], result, check_names=False)\n assert result.name is None\n\n @pytest.mark.slow\n @pytest.mark.parametrize("how", ["left", "right", "outer", "inner"])\n def test_int64_overflow_how_merge(self, left_right, how):\n left, right = left_right\n\n out = merge(left, right, how="outer")\n out.sort_values(out.columns.tolist(), inplace=True)\n out.index = np.arange(len(out))\n tm.assert_frame_equal(out, merge(left, right, how=how, sort=True))\n\n @pytest.mark.slow\n def test_int64_overflow_sort_false_order(self, left_right):\n left, right = left_right\n\n # check that left merge w/ sort=False maintains left frame order\n out = merge(left, right, how="left", sort=False)\n tm.assert_frame_equal(left, out[left.columns.tolist()])\n\n out = merge(right, left, how="left", sort=False)\n tm.assert_frame_equal(right, out[right.columns.tolist()])\n\n @pytest.mark.slow\n @pytest.mark.parametrize("how", ["left", "right", "outer", "inner"])\n @pytest.mark.parametrize("sort", [True, False])\n def test_int64_overflow_one_to_many_none_match(self, how, sort):\n # one-2-many/none match\n low, high, n = -1 << 10, 1 << 10, 1 << 11\n left = DataFrame(\n np.random.default_rng(2).integers(low, high, (n, 7)).astype("int64"),\n columns=list("ABCDEFG"),\n )\n\n # confirm that this is checking what it is supposed to check\n shape = left.apply(Series.nunique).values\n assert is_int64_overflow_possible(shape)\n\n # add duplicates to left frame\n left = concat([left, left], ignore_index=True)\n\n right = DataFrame(\n np.random.default_rng(3).integers(low, high, (n // 2, 7)).astype("int64"),\n columns=list("ABCDEFG"),\n )\n\n # add duplicates & overlap with left to the right frame\n i = np.random.default_rng(4).choice(len(left), n)\n right = concat([right, right, left.iloc[i]], ignore_index=True)\n\n left["left"] = np.random.default_rng(2).standard_normal(len(left))\n right["right"] = np.random.default_rng(2).standard_normal(len(right))\n\n # shuffle left & right frames\n i = np.random.default_rng(5).permutation(len(left))\n left = left.iloc[i].copy()\n left.index = np.arange(len(left))\n\n i = np.random.default_rng(6).permutation(len(right))\n right = right.iloc[i].copy()\n right.index = np.arange(len(right))\n\n # manually compute outer merge\n ldict, rdict = defaultdict(list), defaultdict(list)\n\n for idx, row in left.set_index(list("ABCDEFG")).iterrows():\n ldict[idx].append(row["left"])\n\n for idx, row in right.set_index(list("ABCDEFG")).iterrows():\n rdict[idx].append(row["right"])\n\n vals = []\n for k, lval in ldict.items():\n rval = rdict.get(k, [np.nan])\n for lv, rv in product(lval, rval):\n vals.append(\n k\n + (\n lv,\n rv,\n )\n )\n\n for k, rval in rdict.items():\n if k not in ldict:\n vals.extend(\n k\n + (\n np.nan,\n rv,\n )\n for rv in rval\n )\n\n def align(df):\n df = df.sort_values(df.columns.tolist())\n df.index = np.arange(len(df))\n return df\n\n out = DataFrame(vals, columns=list("ABCDEFG") + ["left", "right"])\n out = align(out)\n\n jmask = {\n "left": out["left"].notna(),\n "right": out["right"].notna(),\n "inner": out["left"].notna() & out["right"].notna(),\n "outer": np.ones(len(out), dtype="bool"),\n }\n\n mask = jmask[how]\n frame = align(out[mask].copy())\n assert mask.all() ^ mask.any() or how == "outer"\n\n res = merge(left, right, how=how, sort=sort)\n if sort:\n kcols = list("ABCDEFG")\n tm.assert_frame_equal(\n res[kcols].copy(), res[kcols].sort_values(kcols, kind="mergesort")\n )\n\n # as in GH9092 dtypes break with outer/right join\n # 2021-12-18: dtype does not break anymore\n tm.assert_frame_equal(frame, align(res))\n\n\n@pytest.mark.parametrize(\n "codes_list, shape",\n [\n [\n [\n np.tile([0, 1, 2, 3, 0, 1, 2, 3], 100).astype(np.int64),\n np.tile([0, 2, 4, 3, 0, 1, 2, 3], 100).astype(np.int64),\n np.tile([5, 1, 0, 2, 3, 0, 5, 4], 100).astype(np.int64),\n ],\n (4, 5, 6),\n ],\n [\n [\n np.tile(np.arange(10000, dtype=np.int64), 5),\n np.tile(np.arange(10000, dtype=np.int64), 5),\n ],\n (10000, 10000),\n ],\n ],\n)\ndef test_decons(codes_list, shape):\n group_index = get_group_index(codes_list, shape, sort=True, xnull=True)\n codes_list2 = _decons_group_index(group_index, shape)\n\n for a, b in zip(codes_list, codes_list2):\n tm.assert_numpy_array_equal(a, b)\n\n\nclass TestSafeSort:\n @pytest.mark.parametrize(\n "arg, exp",\n [\n [[3, 1, 2, 0, 4], [0, 1, 2, 3, 4]],\n [\n np.array(list("baaacb"), dtype=object),\n np.array(list("aaabbc"), dtype=object),\n ],\n [[], []],\n ],\n )\n def test_basic_sort(self, arg, exp):\n result = safe_sort(np.array(arg))\n expected = np.array(exp)\n tm.assert_numpy_array_equal(result, expected)\n\n @pytest.mark.parametrize("verify", [True, False])\n @pytest.mark.parametrize(\n "codes, exp_codes",\n [\n [[0, 1, 1, 2, 3, 0, -1, 4], [3, 1, 1, 2, 0, 3, -1, 4]],\n [[], []],\n ],\n )\n def test_codes(self, verify, codes, exp_codes):\n values = np.array([3, 1, 2, 0, 4])\n expected = np.array([0, 1, 2, 3, 4])\n\n result, result_codes = safe_sort(\n values, codes, use_na_sentinel=True, verify=verify\n )\n expected_codes = np.array(exp_codes, dtype=np.intp)\n tm.assert_numpy_array_equal(result, expected)\n tm.assert_numpy_array_equal(result_codes, expected_codes)\n\n def test_codes_out_of_bound(self):\n values = np.array([3, 1, 2, 0, 4])\n expected = np.array([0, 1, 2, 3, 4])\n\n # out of bound indices\n codes = [0, 101, 102, 2, 3, 0, 99, 4]\n result, result_codes = safe_sort(values, codes, use_na_sentinel=True)\n expected_codes = np.array([3, -1, -1, 2, 0, 3, -1, 4], dtype=np.intp)\n tm.assert_numpy_array_equal(result, expected)\n tm.assert_numpy_array_equal(result_codes, expected_codes)\n\n def test_mixed_integer(self):\n values = np.array(["b", 1, 0, "a", 0, "b"], dtype=object)\n result = safe_sort(values)\n expected = np.array([0, 0, 1, "a", "b", "b"], dtype=object)\n tm.assert_numpy_array_equal(result, expected)\n\n def test_mixed_integer_with_codes(self):\n values = np.array(["b", 1, 0, "a"], dtype=object)\n codes = [0, 1, 2, 3, 0, -1, 1]\n result, result_codes = safe_sort(values, codes)\n expected = np.array([0, 1, "a", "b"], dtype=object)\n expected_codes = np.array([3, 1, 0, 2, 3, -1, 1], dtype=np.intp)\n tm.assert_numpy_array_equal(result, expected)\n tm.assert_numpy_array_equal(result_codes, expected_codes)\n\n def test_unsortable(self):\n # GH 13714\n arr = np.array([1, 2, datetime.now(), 0, 3], dtype=object)\n msg = "'[<>]' not supported between instances of .*"\n with pytest.raises(TypeError, match=msg):\n safe_sort(arr)\n\n @pytest.mark.parametrize(\n "arg, codes, err, msg",\n [\n [1, None, TypeError, "Only np.ndarray, ExtensionArray, and Index"],\n [np.array([0, 1, 2]), 1, TypeError, "Only list-like objects or None"],\n [np.array([0, 1, 2, 1]), [0, 1], ValueError, "values should be unique"],\n ],\n )\n def test_exceptions(self, arg, codes, err, msg):\n with pytest.raises(err, match=msg):\n safe_sort(values=arg, codes=codes)\n\n @pytest.mark.parametrize(\n "arg, exp", [[[1, 3, 2], [1, 2, 3]], [[1, 3, np.nan, 2], [1, 2, 3, np.nan]]]\n )\n def test_extension_array(self, arg, exp):\n a = array(arg, dtype="Int64")\n result = safe_sort(a)\n expected = array(exp, dtype="Int64")\n tm.assert_extension_array_equal(result, expected)\n\n @pytest.mark.parametrize("verify", [True, False])\n def test_extension_array_codes(self, verify):\n a = array([1, 3, 2], dtype="Int64")\n result, codes = safe_sort(a, [0, 1, -1, 2], use_na_sentinel=True, verify=verify)\n expected_values = array([1, 2, 3], dtype="Int64")\n expected_codes = np.array([0, 2, -1, 1], dtype=np.intp)\n tm.assert_extension_array_equal(result, expected_values)\n tm.assert_numpy_array_equal(codes, expected_codes)\n\n\ndef test_mixed_str_null(nulls_fixture):\n values = np.array(["b", nulls_fixture, "a", "b"], dtype=object)\n result = safe_sort(values)\n expected = np.array(["a", "b", "b", nulls_fixture], dtype=object)\n tm.assert_numpy_array_equal(result, expected)\n\n\ndef test_safe_sort_multiindex():\n # GH#48412\n arr1 = Series([2, 1, NA, NA], dtype="Int64")\n arr2 = [2, 1, 3, 3]\n midx = MultiIndex.from_arrays([arr1, arr2])\n result = safe_sort(midx)\n expected = MultiIndex.from_arrays(\n [Series([1, 2, NA, NA], dtype="Int64"), [1, 2, 3, 3]]\n )\n tm.assert_index_equal(result, expected)\n | .venv\Lib\site-packages\pandas\tests\test_sorting.py | test_sorting.py | Python | 16,595 | 0.95 | 0.080082 | 0.05569 | awesome-app | 248 | 2025-02-22T02:57:35.605124 | BSD-3-Clause | true | b5797712c3a120e1d9eff8af8d9f0415 |
from datetime import datetime\n\nimport numpy as np\nimport pytest\n\nfrom pandas._libs import iNaT\n\nimport pandas._testing as tm\nimport pandas.core.algorithms as algos\n\n\n@pytest.fixture(\n params=[\n (np.int8, np.int16(127), np.int8),\n (np.int8, np.int16(128), np.int16),\n (np.int32, 1, np.int32),\n (np.int32, 2.0, np.float64),\n (np.int32, 3.0 + 4.0j, np.complex128),\n (np.int32, True, np.object_),\n (np.int32, "", np.object_),\n (np.float64, 1, np.float64),\n (np.float64, 2.0, np.float64),\n (np.float64, 3.0 + 4.0j, np.complex128),\n (np.float64, True, np.object_),\n (np.float64, "", np.object_),\n (np.complex128, 1, np.complex128),\n (np.complex128, 2.0, np.complex128),\n (np.complex128, 3.0 + 4.0j, np.complex128),\n (np.complex128, True, np.object_),\n (np.complex128, "", np.object_),\n (np.bool_, 1, np.object_),\n (np.bool_, 2.0, np.object_),\n (np.bool_, 3.0 + 4.0j, np.object_),\n (np.bool_, True, np.bool_),\n (np.bool_, "", np.object_),\n ]\n)\ndef dtype_fill_out_dtype(request):\n return request.param\n\n\nclass TestTake:\n def test_1d_fill_nonna(self, dtype_fill_out_dtype):\n dtype, fill_value, out_dtype = dtype_fill_out_dtype\n data = np.random.default_rng(2).integers(0, 2, 4).astype(dtype)\n indexer = [2, 1, 0, -1]\n\n result = algos.take_nd(data, indexer, fill_value=fill_value)\n assert (result[[0, 1, 2]] == data[[2, 1, 0]]).all()\n assert result[3] == fill_value\n assert result.dtype == out_dtype\n\n indexer = [2, 1, 0, 1]\n\n result = algos.take_nd(data, indexer, fill_value=fill_value)\n assert (result[[0, 1, 2, 3]] == data[indexer]).all()\n assert result.dtype == dtype\n\n def test_2d_fill_nonna(self, dtype_fill_out_dtype):\n dtype, fill_value, out_dtype = dtype_fill_out_dtype\n data = np.random.default_rng(2).integers(0, 2, (5, 3)).astype(dtype)\n indexer = [2, 1, 0, -1]\n\n result = algos.take_nd(data, indexer, axis=0, fill_value=fill_value)\n assert (result[[0, 1, 2], :] == data[[2, 1, 0], :]).all()\n assert (result[3, :] == fill_value).all()\n assert result.dtype == out_dtype\n\n result = algos.take_nd(data, indexer, axis=1, fill_value=fill_value)\n assert (result[:, [0, 1, 2]] == data[:, [2, 1, 0]]).all()\n assert (result[:, 3] == fill_value).all()\n assert result.dtype == out_dtype\n\n indexer = [2, 1, 0, 1]\n result = algos.take_nd(data, indexer, axis=0, fill_value=fill_value)\n assert (result[[0, 1, 2, 3], :] == data[indexer, :]).all()\n assert result.dtype == dtype\n\n result = algos.take_nd(data, indexer, axis=1, fill_value=fill_value)\n assert (result[:, [0, 1, 2, 3]] == data[:, indexer]).all()\n assert result.dtype == dtype\n\n def test_3d_fill_nonna(self, dtype_fill_out_dtype):\n dtype, fill_value, out_dtype = dtype_fill_out_dtype\n\n data = np.random.default_rng(2).integers(0, 2, (5, 4, 3)).astype(dtype)\n indexer = [2, 1, 0, -1]\n\n result = algos.take_nd(data, indexer, axis=0, fill_value=fill_value)\n assert (result[[0, 1, 2], :, :] == data[[2, 1, 0], :, :]).all()\n assert (result[3, :, :] == fill_value).all()\n assert result.dtype == out_dtype\n\n result = algos.take_nd(data, indexer, axis=1, fill_value=fill_value)\n assert (result[:, [0, 1, 2], :] == data[:, [2, 1, 0], :]).all()\n assert (result[:, 3, :] == fill_value).all()\n assert result.dtype == out_dtype\n\n result = algos.take_nd(data, indexer, axis=2, fill_value=fill_value)\n assert (result[:, :, [0, 1, 2]] == data[:, :, [2, 1, 0]]).all()\n assert (result[:, :, 3] == fill_value).all()\n assert result.dtype == out_dtype\n\n indexer = [2, 1, 0, 1]\n result = algos.take_nd(data, indexer, axis=0, fill_value=fill_value)\n assert (result[[0, 1, 2, 3], :, :] == data[indexer, :, :]).all()\n assert result.dtype == dtype\n\n result = algos.take_nd(data, indexer, axis=1, fill_value=fill_value)\n assert (result[:, [0, 1, 2, 3], :] == data[:, indexer, :]).all()\n assert result.dtype == dtype\n\n result = algos.take_nd(data, indexer, axis=2, fill_value=fill_value)\n assert (result[:, :, [0, 1, 2, 3]] == data[:, :, indexer]).all()\n assert result.dtype == dtype\n\n def test_1d_other_dtypes(self):\n arr = np.random.default_rng(2).standard_normal(10).astype(np.float32)\n\n indexer = [1, 2, 3, -1]\n result = algos.take_nd(arr, indexer)\n expected = arr.take(indexer)\n expected[-1] = np.nan\n tm.assert_almost_equal(result, expected)\n\n def test_2d_other_dtypes(self):\n arr = np.random.default_rng(2).standard_normal((10, 5)).astype(np.float32)\n\n indexer = [1, 2, 3, -1]\n\n # axis=0\n result = algos.take_nd(arr, indexer, axis=0)\n expected = arr.take(indexer, axis=0)\n expected[-1] = np.nan\n tm.assert_almost_equal(result, expected)\n\n # axis=1\n result = algos.take_nd(arr, indexer, axis=1)\n expected = arr.take(indexer, axis=1)\n expected[:, -1] = np.nan\n tm.assert_almost_equal(result, expected)\n\n def test_1d_bool(self):\n arr = np.array([0, 1, 0], dtype=bool)\n\n result = algos.take_nd(arr, [0, 2, 2, 1])\n expected = arr.take([0, 2, 2, 1])\n tm.assert_numpy_array_equal(result, expected)\n\n result = algos.take_nd(arr, [0, 2, -1])\n assert result.dtype == np.object_\n\n def test_2d_bool(self):\n arr = np.array([[0, 1, 0], [1, 0, 1], [0, 1, 1]], dtype=bool)\n\n result = algos.take_nd(arr, [0, 2, 2, 1])\n expected = arr.take([0, 2, 2, 1], axis=0)\n tm.assert_numpy_array_equal(result, expected)\n\n result = algos.take_nd(arr, [0, 2, 2, 1], axis=1)\n expected = arr.take([0, 2, 2, 1], axis=1)\n tm.assert_numpy_array_equal(result, expected)\n\n result = algos.take_nd(arr, [0, 2, -1])\n assert result.dtype == np.object_\n\n def test_2d_float32(self):\n arr = np.random.default_rng(2).standard_normal((4, 3)).astype(np.float32)\n indexer = [0, 2, -1, 1, -1]\n\n # axis=0\n result = algos.take_nd(arr, indexer, axis=0)\n\n expected = arr.take(indexer, axis=0)\n expected[[2, 4], :] = np.nan\n tm.assert_almost_equal(result, expected)\n\n # axis=1\n result = algos.take_nd(arr, indexer, axis=1)\n expected = arr.take(indexer, axis=1)\n expected[:, [2, 4]] = np.nan\n tm.assert_almost_equal(result, expected)\n\n def test_2d_datetime64(self):\n # 2005/01/01 - 2006/01/01\n arr = (\n np.random.default_rng(2).integers(11_045_376, 11_360_736, (5, 3))\n * 100_000_000_000\n )\n arr = arr.view(dtype="datetime64[ns]")\n indexer = [0, 2, -1, 1, -1]\n\n # axis=0\n result = algos.take_nd(arr, indexer, axis=0)\n expected = arr.take(indexer, axis=0)\n expected.view(np.int64)[[2, 4], :] = iNaT\n tm.assert_almost_equal(result, expected)\n\n result = algos.take_nd(arr, indexer, axis=0, fill_value=datetime(2007, 1, 1))\n expected = arr.take(indexer, axis=0)\n expected[[2, 4], :] = datetime(2007, 1, 1)\n tm.assert_almost_equal(result, expected)\n\n # axis=1\n result = algos.take_nd(arr, indexer, axis=1)\n expected = arr.take(indexer, axis=1)\n expected.view(np.int64)[:, [2, 4]] = iNaT\n tm.assert_almost_equal(result, expected)\n\n result = algos.take_nd(arr, indexer, axis=1, fill_value=datetime(2007, 1, 1))\n expected = arr.take(indexer, axis=1)\n expected[:, [2, 4]] = datetime(2007, 1, 1)\n tm.assert_almost_equal(result, expected)\n\n def test_take_axis_0(self):\n arr = np.arange(12).reshape(4, 3)\n result = algos.take(arr, [0, -1])\n expected = np.array([[0, 1, 2], [9, 10, 11]])\n tm.assert_numpy_array_equal(result, expected)\n\n # allow_fill=True\n result = algos.take(arr, [0, -1], allow_fill=True, fill_value=0)\n expected = np.array([[0, 1, 2], [0, 0, 0]])\n tm.assert_numpy_array_equal(result, expected)\n\n def test_take_axis_1(self):\n arr = np.arange(12).reshape(4, 3)\n result = algos.take(arr, [0, -1], axis=1)\n expected = np.array([[0, 2], [3, 5], [6, 8], [9, 11]])\n tm.assert_numpy_array_equal(result, expected)\n\n # allow_fill=True\n result = algos.take(arr, [0, -1], axis=1, allow_fill=True, fill_value=0)\n expected = np.array([[0, 0], [3, 0], [6, 0], [9, 0]])\n tm.assert_numpy_array_equal(result, expected)\n\n # GH#26976 make sure we validate along the correct axis\n with pytest.raises(IndexError, match="indices are out-of-bounds"):\n algos.take(arr, [0, 3], axis=1, allow_fill=True, fill_value=0)\n\n def test_take_non_hashable_fill_value(self):\n arr = np.array([1, 2, 3])\n indexer = np.array([1, -1])\n with pytest.raises(ValueError, match="fill_value must be a scalar"):\n algos.take(arr, indexer, allow_fill=True, fill_value=[1])\n\n # with object dtype it is allowed\n arr = np.array([1, 2, 3], dtype=object)\n result = algos.take(arr, indexer, allow_fill=True, fill_value=[1])\n expected = np.array([2, [1]], dtype=object)\n tm.assert_numpy_array_equal(result, expected)\n\n\nclass TestExtensionTake:\n # The take method found in pd.api.extensions\n\n def test_bounds_check_large(self):\n arr = np.array([1, 2])\n\n msg = "indices are out-of-bounds"\n with pytest.raises(IndexError, match=msg):\n algos.take(arr, [2, 3], allow_fill=True)\n\n msg = "index 2 is out of bounds for( axis 0 with)? size 2"\n with pytest.raises(IndexError, match=msg):\n algos.take(arr, [2, 3], allow_fill=False)\n\n def test_bounds_check_small(self):\n arr = np.array([1, 2, 3], dtype=np.int64)\n indexer = [0, -1, -2]\n\n msg = r"'indices' contains values less than allowed \(-2 < -1\)"\n with pytest.raises(ValueError, match=msg):\n algos.take(arr, indexer, allow_fill=True)\n\n result = algos.take(arr, indexer)\n expected = np.array([1, 3, 2], dtype=np.int64)\n tm.assert_numpy_array_equal(result, expected)\n\n @pytest.mark.parametrize("allow_fill", [True, False])\n def test_take_empty(self, allow_fill):\n arr = np.array([], dtype=np.int64)\n # empty take is ok\n result = algos.take(arr, [], allow_fill=allow_fill)\n tm.assert_numpy_array_equal(arr, result)\n\n msg = "|".join(\n [\n "cannot do a non-empty take from an empty axes.",\n "indices are out-of-bounds",\n ]\n )\n with pytest.raises(IndexError, match=msg):\n algos.take(arr, [0], allow_fill=allow_fill)\n\n def test_take_na_empty(self):\n result = algos.take(np.array([]), [-1, -1], allow_fill=True, fill_value=0.0)\n expected = np.array([0.0, 0.0])\n tm.assert_numpy_array_equal(result, expected)\n\n def test_take_coerces_list(self):\n arr = [1, 2, 3]\n msg = "take accepting non-standard inputs is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = algos.take(arr, [0, 0])\n expected = np.array([1, 1])\n tm.assert_numpy_array_equal(result, expected)\n | .venv\Lib\site-packages\pandas\tests\test_take.py | test_take.py | Python | 11,539 | 0.95 | 0.068404 | 0.057613 | node-utils | 840 | 2023-09-24T15:54:43.511189 | MIT | true | 94d13fe36e459f7c12dac0e5b321262b |
from __future__ import annotations\n\nimport pytest\n\nimport pandas as pd\nfrom pandas import api\nimport pandas._testing as tm\nfrom pandas.api import (\n extensions as api_extensions,\n indexers as api_indexers,\n interchange as api_interchange,\n types as api_types,\n typing as api_typing,\n)\n\n\nclass Base:\n def check(self, namespace, expected, ignored=None):\n # see which names are in the namespace, minus optional\n # ignored ones\n # compare vs the expected\n\n result = sorted(\n f for f in dir(namespace) if not f.startswith("__") and f != "annotations"\n )\n if ignored is not None:\n result = sorted(set(result) - set(ignored))\n\n expected = sorted(expected)\n tm.assert_almost_equal(result, expected)\n\n\nclass TestPDApi(Base):\n # these are optionally imported based on testing\n # & need to be ignored\n ignored = ["tests", "locale", "conftest", "_version_meson"]\n\n # top-level sub-packages\n public_lib = [\n "api",\n "arrays",\n "options",\n "test",\n "testing",\n "errors",\n "plotting",\n "io",\n "tseries",\n ]\n private_lib = ["compat", "core", "pandas", "util", "_built_with_meson"]\n\n # misc\n misc = ["IndexSlice", "NaT", "NA"]\n\n # top-level classes\n classes = [\n "ArrowDtype",\n "Categorical",\n "CategoricalIndex",\n "DataFrame",\n "DateOffset",\n "DatetimeIndex",\n "ExcelFile",\n "ExcelWriter",\n "Flags",\n "Grouper",\n "HDFStore",\n "Index",\n "MultiIndex",\n "Period",\n "PeriodIndex",\n "RangeIndex",\n "Series",\n "SparseDtype",\n "StringDtype",\n "Timedelta",\n "TimedeltaIndex",\n "Timestamp",\n "Interval",\n "IntervalIndex",\n "CategoricalDtype",\n "PeriodDtype",\n "IntervalDtype",\n "DatetimeTZDtype",\n "BooleanDtype",\n "Int8Dtype",\n "Int16Dtype",\n "Int32Dtype",\n "Int64Dtype",\n "UInt8Dtype",\n "UInt16Dtype",\n "UInt32Dtype",\n "UInt64Dtype",\n "Float32Dtype",\n "Float64Dtype",\n "NamedAgg",\n ]\n\n # these are already deprecated; awaiting removal\n deprecated_classes: list[str] = []\n\n # external modules exposed in pandas namespace\n modules: list[str] = []\n\n # top-level functions\n funcs = [\n "array",\n "bdate_range",\n "concat",\n "crosstab",\n "cut",\n "date_range",\n "interval_range",\n "eval",\n "factorize",\n "get_dummies",\n "from_dummies",\n "infer_freq",\n "isna",\n "isnull",\n "lreshape",\n "melt",\n "notna",\n "notnull",\n "offsets",\n "merge",\n "merge_ordered",\n "merge_asof",\n "period_range",\n "pivot",\n "pivot_table",\n "qcut",\n "show_versions",\n "timedelta_range",\n "unique",\n "value_counts",\n "wide_to_long",\n ]\n\n # top-level option funcs\n funcs_option = [\n "reset_option",\n "describe_option",\n "get_option",\n "option_context",\n "set_option",\n "set_eng_float_format",\n ]\n\n # top-level read_* funcs\n funcs_read = [\n "read_clipboard",\n "read_csv",\n "read_excel",\n "read_fwf",\n "read_gbq",\n "read_hdf",\n "read_html",\n "read_xml",\n "read_json",\n "read_pickle",\n "read_sas",\n "read_sql",\n "read_sql_query",\n "read_sql_table",\n "read_stata",\n "read_table",\n "read_feather",\n "read_parquet",\n "read_orc",\n "read_spss",\n ]\n\n # top-level json funcs\n funcs_json = ["json_normalize"]\n\n # top-level to_* funcs\n funcs_to = ["to_datetime", "to_numeric", "to_pickle", "to_timedelta"]\n\n # top-level to deprecate in the future\n deprecated_funcs_in_future: list[str] = []\n\n # these are already deprecated; awaiting removal\n deprecated_funcs: list[str] = []\n\n # private modules in pandas namespace\n private_modules = [\n "_config",\n "_libs",\n "_is_numpy_dev",\n "_pandas_datetime_CAPI",\n "_pandas_parser_CAPI",\n "_testing",\n "_typing",\n ]\n if not pd._built_with_meson:\n private_modules.append("_version")\n\n def test_api(self):\n checkthese = (\n self.public_lib\n + self.private_lib\n + self.misc\n + self.modules\n + self.classes\n + self.funcs\n + self.funcs_option\n + self.funcs_read\n + self.funcs_json\n + self.funcs_to\n + self.private_modules\n )\n self.check(namespace=pd, expected=checkthese, ignored=self.ignored)\n\n def test_api_all(self):\n expected = set(\n self.public_lib\n + self.misc\n + self.modules\n + self.classes\n + self.funcs\n + self.funcs_option\n + self.funcs_read\n + self.funcs_json\n + self.funcs_to\n ) - set(self.deprecated_classes)\n actual = set(pd.__all__)\n\n extraneous = actual - expected\n assert not extraneous\n\n missing = expected - actual\n assert not missing\n\n def test_depr(self):\n deprecated_list = (\n self.deprecated_classes\n + self.deprecated_funcs\n + self.deprecated_funcs_in_future\n )\n for depr in deprecated_list:\n with tm.assert_produces_warning(FutureWarning):\n _ = getattr(pd, depr)\n\n\nclass TestApi(Base):\n allowed_api_dirs = [\n "types",\n "extensions",\n "indexers",\n "interchange",\n "typing",\n ]\n allowed_typing = [\n "DataFrameGroupBy",\n "DatetimeIndexResamplerGroupby",\n "Expanding",\n "ExpandingGroupby",\n "ExponentialMovingWindow",\n "ExponentialMovingWindowGroupby",\n "JsonReader",\n "NaTType",\n "NAType",\n "PeriodIndexResamplerGroupby",\n "Resampler",\n "Rolling",\n "RollingGroupby",\n "SeriesGroupBy",\n "StataReader",\n "TimedeltaIndexResamplerGroupby",\n "TimeGrouper",\n "Window",\n ]\n allowed_api_types = [\n "is_any_real_numeric_dtype",\n "is_array_like",\n "is_bool",\n "is_bool_dtype",\n "is_categorical_dtype",\n "is_complex",\n "is_complex_dtype",\n "is_datetime64_any_dtype",\n "is_datetime64_dtype",\n "is_datetime64_ns_dtype",\n "is_datetime64tz_dtype",\n "is_dict_like",\n "is_dtype_equal",\n "is_extension_array_dtype",\n "is_file_like",\n "is_float",\n "is_float_dtype",\n "is_hashable",\n "is_int64_dtype",\n "is_integer",\n "is_integer_dtype",\n "is_interval",\n "is_interval_dtype",\n "is_iterator",\n "is_list_like",\n "is_named_tuple",\n "is_number",\n "is_numeric_dtype",\n "is_object_dtype",\n "is_period_dtype",\n "is_re",\n "is_re_compilable",\n "is_scalar",\n "is_signed_integer_dtype",\n "is_sparse",\n "is_string_dtype",\n "is_timedelta64_dtype",\n "is_timedelta64_ns_dtype",\n "is_unsigned_integer_dtype",\n "pandas_dtype",\n "infer_dtype",\n "union_categoricals",\n "CategoricalDtype",\n "DatetimeTZDtype",\n "IntervalDtype",\n "PeriodDtype",\n ]\n allowed_api_interchange = ["from_dataframe", "DataFrame"]\n allowed_api_indexers = [\n "check_array_indexer",\n "BaseIndexer",\n "FixedForwardWindowIndexer",\n "VariableOffsetWindowIndexer",\n ]\n allowed_api_extensions = [\n "no_default",\n "ExtensionDtype",\n "register_extension_dtype",\n "register_dataframe_accessor",\n "register_index_accessor",\n "register_series_accessor",\n "take",\n "ExtensionArray",\n "ExtensionScalarOpsMixin",\n ]\n\n def test_api(self):\n self.check(api, self.allowed_api_dirs)\n\n def test_api_typing(self):\n self.check(api_typing, self.allowed_typing)\n\n def test_api_types(self):\n self.check(api_types, self.allowed_api_types)\n\n def test_api_interchange(self):\n self.check(api_interchange, self.allowed_api_interchange)\n\n def test_api_indexers(self):\n self.check(api_indexers, self.allowed_api_indexers)\n\n def test_api_extensions(self):\n self.check(api_extensions, self.allowed_api_extensions)\n\n\nclass TestTesting(Base):\n funcs = [\n "assert_frame_equal",\n "assert_series_equal",\n "assert_index_equal",\n "assert_extension_array_equal",\n ]\n\n def test_testing(self):\n from pandas import testing\n\n self.check(testing, self.funcs)\n\n def test_util_in_top_level(self):\n with pytest.raises(AttributeError, match="foo"):\n pd.util.foo\n\n\ndef test_pandas_array_alias():\n msg = "PandasArray has been renamed NumpyExtensionArray"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n res = pd.arrays.PandasArray\n\n assert res is pd.arrays.NumpyExtensionArray\n | .venv\Lib\site-packages\pandas\tests\api\test_api.py | test_api.py | Python | 9,394 | 0.95 | 0.057441 | 0.052786 | vue-tools | 883 | 2024-08-08T05:48:14.019449 | GPL-3.0 | true | 4fde2bc403149c916bd439bb149c85a8 |
from __future__ import annotations\n\nimport pandas._testing as tm\nfrom pandas.api import types\nfrom pandas.tests.api.test_api import Base\n\n\nclass TestTypes(Base):\n allowed = [\n "is_any_real_numeric_dtype",\n "is_bool",\n "is_bool_dtype",\n "is_categorical_dtype",\n "is_complex",\n "is_complex_dtype",\n "is_datetime64_any_dtype",\n "is_datetime64_dtype",\n "is_datetime64_ns_dtype",\n "is_datetime64tz_dtype",\n "is_dtype_equal",\n "is_float",\n "is_float_dtype",\n "is_int64_dtype",\n "is_integer",\n "is_integer_dtype",\n "is_number",\n "is_numeric_dtype",\n "is_object_dtype",\n "is_scalar",\n "is_sparse",\n "is_string_dtype",\n "is_signed_integer_dtype",\n "is_timedelta64_dtype",\n "is_timedelta64_ns_dtype",\n "is_unsigned_integer_dtype",\n "is_period_dtype",\n "is_interval",\n "is_interval_dtype",\n "is_re",\n "is_re_compilable",\n "is_dict_like",\n "is_iterator",\n "is_file_like",\n "is_list_like",\n "is_hashable",\n "is_array_like",\n "is_named_tuple",\n "pandas_dtype",\n "union_categoricals",\n "infer_dtype",\n "is_extension_array_dtype",\n ]\n deprecated: list[str] = []\n dtypes = ["CategoricalDtype", "DatetimeTZDtype", "PeriodDtype", "IntervalDtype"]\n\n def test_types(self):\n self.check(types, self.allowed + self.dtypes + self.deprecated)\n\n def test_deprecated_from_api_types(self):\n for t in self.deprecated:\n with tm.assert_produces_warning(FutureWarning):\n getattr(types, t)(1)\n | .venv\Lib\site-packages\pandas\tests\api\test_types.py | test_types.py | Python | 1,711 | 0.85 | 0.064516 | 0 | node-utils | 599 | 2023-10-02T22:29:28.502213 | MIT | true | e1206b751d9fa5ac0ce94a41f4ac2cba |
\n\n | .venv\Lib\site-packages\pandas\tests\api\__pycache__\test_api.cpython-313.pyc | test_api.cpython-313.pyc | Other | 10,603 | 0.8 | 0 | 0.007463 | python-kit | 184 | 2024-04-09T22:01:50.734840 | MIT | true | 413c0a34405470322d92a5843564f517 |
\n\n | .venv\Lib\site-packages\pandas\tests\api\__pycache__\test_types.cpython-313.pyc | test_types.cpython-313.pyc | Other | 2,306 | 0.7 | 0 | 0 | awesome-app | 315 | 2024-05-12T00:01:35.476855 | Apache-2.0 | true | 0a8d9a0f984d407c9220048eca5f428a |
\n\n | .venv\Lib\site-packages\pandas\tests\api\__pycache__\__init__.cpython-313.pyc | __init__.cpython-313.pyc | Other | 191 | 0.7 | 0 | 0 | node-utils | 676 | 2024-12-21T15:48:37.256511 | Apache-2.0 | true | 2bcc55a23cfea8a0339fb2fa946ce740 |
from pandas.core.groupby.base import transformation_kernels\n\n# There is no Series.cumcount or DataFrame.cumcount\nseries_transform_kernels = [\n x for x in sorted(transformation_kernels) if x != "cumcount"\n]\nframe_transform_kernels = [x for x in sorted(transformation_kernels) if x != "cumcount"]\n | .venv\Lib\site-packages\pandas\tests\apply\common.py | common.py | Python | 298 | 0.95 | 0.571429 | 0.166667 | node-utils | 677 | 2025-01-17T00:18:57.460579 | GPL-3.0 | true | d21e9d546e3880714d673cedc121d8c6 |
from datetime import datetime\nimport warnings\n\nimport numpy as np\nimport pytest\n\nfrom pandas.compat import is_platform_arm\n\nfrom pandas.core.dtypes.dtypes import CategoricalDtype\n\nimport pandas as pd\nfrom pandas import (\n DataFrame,\n MultiIndex,\n Series,\n Timestamp,\n date_range,\n)\nimport pandas._testing as tm\nfrom pandas.tests.frame.common import zip_frames\nfrom pandas.util.version import Version\n\n\n@pytest.fixture\ndef int_frame_const_col():\n """\n Fixture for DataFrame of ints which are constant per column\n\n Columns are ['A', 'B', 'C'], with values (per column): [1, 2, 3]\n """\n df = DataFrame(\n np.tile(np.arange(3, dtype="int64"), 6).reshape(6, -1) + 1,\n columns=["A", "B", "C"],\n )\n return df\n\n\n@pytest.fixture(params=["python", pytest.param("numba", marks=pytest.mark.single_cpu)])\ndef engine(request):\n if request.param == "numba":\n pytest.importorskip("numba")\n return request.param\n\n\ndef test_apply(float_frame, engine, request):\n if engine == "numba":\n mark = pytest.mark.xfail(reason="numba engine not supporting numpy ufunc yet")\n request.node.add_marker(mark)\n with np.errstate(all="ignore"):\n # ufunc\n result = np.sqrt(float_frame["A"])\n expected = float_frame.apply(np.sqrt, engine=engine)["A"]\n tm.assert_series_equal(result, expected)\n\n # aggregator\n result = float_frame.apply(np.mean, engine=engine)["A"]\n expected = np.mean(float_frame["A"])\n assert result == expected\n\n d = float_frame.index[0]\n result = float_frame.apply(np.mean, axis=1, engine=engine)\n expected = np.mean(float_frame.xs(d))\n assert result[d] == expected\n assert result.index is float_frame.index\n\n\n@pytest.mark.parametrize("axis", [0, 1])\n@pytest.mark.parametrize("raw", [True, False])\ndef test_apply_args(float_frame, axis, raw, engine, request):\n if engine == "numba":\n numba = pytest.importorskip("numba")\n if Version(numba.__version__) == Version("0.61") and is_platform_arm():\n pytest.skip(f"Segfaults on ARM platforms with numba {numba.__version__}")\n mark = pytest.mark.xfail(reason="numba engine doesn't support args")\n request.node.add_marker(mark)\n result = float_frame.apply(\n lambda x, y: x + y, axis, args=(1,), raw=raw, engine=engine\n )\n expected = float_frame + 1\n tm.assert_frame_equal(result, expected)\n\n\ndef test_apply_categorical_func():\n # GH 9573\n df = DataFrame({"c0": ["A", "A", "B", "B"], "c1": ["C", "C", "D", "D"]})\n result = df.apply(lambda ts: ts.astype("category"))\n\n assert result.shape == (4, 2)\n assert isinstance(result["c0"].dtype, CategoricalDtype)\n assert isinstance(result["c1"].dtype, CategoricalDtype)\n\n\ndef test_apply_axis1_with_ea():\n # GH#36785\n expected = DataFrame({"A": [Timestamp("2013-01-01", tz="UTC")]})\n result = expected.apply(lambda x: x, axis=1)\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "data, dtype",\n [(1, None), (1, CategoricalDtype([1])), (Timestamp("2013-01-01", tz="UTC"), None)],\n)\ndef test_agg_axis1_duplicate_index(data, dtype):\n # GH 42380\n expected = DataFrame([[data], [data]], index=["a", "a"], dtype=dtype)\n result = expected.agg(lambda x: x, axis=1)\n tm.assert_frame_equal(result, expected)\n\n\ndef test_apply_mixed_datetimelike():\n # mixed datetimelike\n # GH 7778\n expected = DataFrame(\n {\n "A": date_range("20130101", periods=3),\n "B": pd.to_timedelta(np.arange(3), unit="s"),\n }\n )\n result = expected.apply(lambda x: x, axis=1)\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize("func", [np.sqrt, np.mean])\ndef test_apply_empty(func, engine):\n # empty\n empty_frame = DataFrame()\n\n result = empty_frame.apply(func, engine=engine)\n assert result.empty\n\n\ndef test_apply_float_frame(float_frame, engine):\n no_rows = float_frame[:0]\n result = no_rows.apply(lambda x: x.mean(), engine=engine)\n expected = Series(np.nan, index=float_frame.columns)\n tm.assert_series_equal(result, expected)\n\n no_cols = float_frame.loc[:, []]\n result = no_cols.apply(lambda x: x.mean(), axis=1, engine=engine)\n expected = Series(np.nan, index=float_frame.index)\n tm.assert_series_equal(result, expected)\n\n\ndef test_apply_empty_except_index(engine):\n # GH 2476\n expected = DataFrame(index=["a"])\n result = expected.apply(lambda x: x["a"], axis=1, engine=engine)\n tm.assert_frame_equal(result, expected)\n\n\ndef test_apply_with_reduce_empty():\n # reduce with an empty DataFrame\n empty_frame = DataFrame()\n\n x = []\n result = empty_frame.apply(x.append, axis=1, result_type="expand")\n tm.assert_frame_equal(result, empty_frame)\n result = empty_frame.apply(x.append, axis=1, result_type="reduce")\n expected = Series([], dtype=np.float64)\n tm.assert_series_equal(result, expected)\n\n empty_with_cols = DataFrame(columns=["a", "b", "c"])\n result = empty_with_cols.apply(x.append, axis=1, result_type="expand")\n tm.assert_frame_equal(result, empty_with_cols)\n result = empty_with_cols.apply(x.append, axis=1, result_type="reduce")\n expected = Series([], dtype=np.float64)\n tm.assert_series_equal(result, expected)\n\n # Ensure that x.append hasn't been called\n assert x == []\n\n\n@pytest.mark.parametrize("func", ["sum", "prod", "any", "all"])\ndef test_apply_funcs_over_empty(func):\n # GH 28213\n df = DataFrame(columns=["a", "b", "c"])\n\n result = df.apply(getattr(np, func))\n expected = getattr(df, func)()\n if func in ("sum", "prod"):\n expected = expected.astype(float)\n tm.assert_series_equal(result, expected)\n\n\ndef test_nunique_empty():\n # GH 28213\n df = DataFrame(columns=["a", "b", "c"])\n\n result = df.nunique()\n expected = Series(0, index=df.columns)\n tm.assert_series_equal(result, expected)\n\n result = df.T.nunique()\n expected = Series([], dtype=np.float64)\n tm.assert_series_equal(result, expected)\n\n\ndef test_apply_standard_nonunique():\n df = DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]], index=["a", "a", "c"])\n\n result = df.apply(lambda s: s[0], axis=1)\n expected = Series([1, 4, 7], ["a", "a", "c"])\n tm.assert_series_equal(result, expected)\n\n result = df.T.apply(lambda s: s[0], axis=0)\n tm.assert_series_equal(result, expected)\n\n\ndef test_apply_broadcast_scalars(float_frame):\n # scalars\n result = float_frame.apply(np.mean, result_type="broadcast")\n expected = DataFrame([float_frame.mean()], index=float_frame.index)\n tm.assert_frame_equal(result, expected)\n\n\ndef test_apply_broadcast_scalars_axis1(float_frame):\n result = float_frame.apply(np.mean, axis=1, result_type="broadcast")\n m = float_frame.mean(axis=1)\n expected = DataFrame({c: m for c in float_frame.columns})\n tm.assert_frame_equal(result, expected)\n\n\ndef test_apply_broadcast_lists_columns(float_frame):\n # lists\n result = float_frame.apply(\n lambda x: list(range(len(float_frame.columns))),\n axis=1,\n result_type="broadcast",\n )\n m = list(range(len(float_frame.columns)))\n expected = DataFrame(\n [m] * len(float_frame.index),\n dtype="float64",\n index=float_frame.index,\n columns=float_frame.columns,\n )\n tm.assert_frame_equal(result, expected)\n\n\ndef test_apply_broadcast_lists_index(float_frame):\n result = float_frame.apply(\n lambda x: list(range(len(float_frame.index))), result_type="broadcast"\n )\n m = list(range(len(float_frame.index)))\n expected = DataFrame(\n {c: m for c in float_frame.columns},\n dtype="float64",\n index=float_frame.index,\n )\n tm.assert_frame_equal(result, expected)\n\n\ndef test_apply_broadcast_list_lambda_func(int_frame_const_col):\n # preserve columns\n df = int_frame_const_col\n result = df.apply(lambda x: [1, 2, 3], axis=1, result_type="broadcast")\n tm.assert_frame_equal(result, df)\n\n\ndef test_apply_broadcast_series_lambda_func(int_frame_const_col):\n df = int_frame_const_col\n result = df.apply(\n lambda x: Series([1, 2, 3], index=list("abc")),\n axis=1,\n result_type="broadcast",\n )\n expected = df.copy()\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize("axis", [0, 1])\ndef test_apply_raw_float_frame(float_frame, axis, engine):\n if engine == "numba":\n pytest.skip("numba can't handle when UDF returns None.")\n\n def _assert_raw(x):\n assert isinstance(x, np.ndarray)\n assert x.ndim == 1\n\n float_frame.apply(_assert_raw, axis=axis, engine=engine, raw=True)\n\n\n@pytest.mark.parametrize("axis", [0, 1])\ndef test_apply_raw_float_frame_lambda(float_frame, axis, engine):\n result = float_frame.apply(np.mean, axis=axis, engine=engine, raw=True)\n expected = float_frame.apply(lambda x: x.values.mean(), axis=axis)\n tm.assert_series_equal(result, expected)\n\n\ndef test_apply_raw_float_frame_no_reduction(float_frame, engine):\n # no reduction\n result = float_frame.apply(lambda x: x * 2, engine=engine, raw=True)\n expected = float_frame * 2\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize("axis", [0, 1])\ndef test_apply_raw_mixed_type_frame(axis, engine):\n if engine == "numba":\n pytest.skip("isinstance check doesn't work with numba")\n\n def _assert_raw(x):\n assert isinstance(x, np.ndarray)\n assert x.ndim == 1\n\n # Mixed dtype (GH-32423)\n df = DataFrame(\n {\n "a": 1.0,\n "b": 2,\n "c": "foo",\n "float32": np.array([1.0] * 10, dtype="float32"),\n "int32": np.array([1] * 10, dtype="int32"),\n },\n index=np.arange(10),\n )\n df.apply(_assert_raw, axis=axis, engine=engine, raw=True)\n\n\ndef test_apply_axis1(float_frame):\n d = float_frame.index[0]\n result = float_frame.apply(np.mean, axis=1)[d]\n expected = np.mean(float_frame.xs(d))\n assert result == expected\n\n\ndef test_apply_mixed_dtype_corner():\n df = DataFrame({"A": ["foo"], "B": [1.0]})\n result = df[:0].apply(np.mean, axis=1)\n # the result here is actually kind of ambiguous, should it be a Series\n # or a DataFrame?\n expected = Series(np.nan, index=pd.Index([], dtype="int64"))\n tm.assert_series_equal(result, expected)\n\n\ndef test_apply_mixed_dtype_corner_indexing():\n df = DataFrame({"A": ["foo"], "B": [1.0]})\n result = df.apply(lambda x: x["A"], axis=1)\n expected = Series(["foo"], index=[0])\n tm.assert_series_equal(result, expected)\n\n result = df.apply(lambda x: x["B"], axis=1)\n expected = Series([1.0], index=[0])\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.filterwarnings("ignore::RuntimeWarning")\n@pytest.mark.parametrize("ax", ["index", "columns"])\n@pytest.mark.parametrize(\n "func", [lambda x: x, lambda x: x.mean()], ids=["identity", "mean"]\n)\n@pytest.mark.parametrize("raw", [True, False])\n@pytest.mark.parametrize("axis", [0, 1])\ndef test_apply_empty_infer_type(ax, func, raw, axis, engine, request):\n df = DataFrame(**{ax: ["a", "b", "c"]})\n\n with np.errstate(all="ignore"):\n test_res = func(np.array([], dtype="f8"))\n is_reduction = not isinstance(test_res, np.ndarray)\n\n result = df.apply(func, axis=axis, engine=engine, raw=raw)\n if is_reduction:\n agg_axis = df._get_agg_axis(axis)\n assert isinstance(result, Series)\n assert result.index is agg_axis\n else:\n assert isinstance(result, DataFrame)\n\n\ndef test_apply_empty_infer_type_broadcast():\n no_cols = DataFrame(index=["a", "b", "c"])\n result = no_cols.apply(lambda x: x.mean(), result_type="broadcast")\n assert isinstance(result, DataFrame)\n\n\ndef test_apply_with_args_kwds_add_some(float_frame):\n def add_some(x, howmuch=0):\n return x + howmuch\n\n result = float_frame.apply(add_some, howmuch=2)\n expected = float_frame.apply(lambda x: x + 2)\n tm.assert_frame_equal(result, expected)\n\n\ndef test_apply_with_args_kwds_agg_and_add(float_frame):\n def agg_and_add(x, howmuch=0):\n return x.mean() + howmuch\n\n result = float_frame.apply(agg_and_add, howmuch=2)\n expected = float_frame.apply(lambda x: x.mean() + 2)\n tm.assert_series_equal(result, expected)\n\n\ndef test_apply_with_args_kwds_subtract_and_divide(float_frame):\n def subtract_and_divide(x, sub, divide=1):\n return (x - sub) / divide\n\n result = float_frame.apply(subtract_and_divide, args=(2,), divide=2)\n expected = float_frame.apply(lambda x: (x - 2.0) / 2.0)\n tm.assert_frame_equal(result, expected)\n\n\ndef test_apply_yield_list(float_frame):\n result = float_frame.apply(list)\n tm.assert_frame_equal(result, float_frame)\n\n\ndef test_apply_reduce_Series(float_frame):\n float_frame.iloc[::2, float_frame.columns.get_loc("A")] = np.nan\n expected = float_frame.mean(1)\n result = float_frame.apply(np.mean, axis=1)\n tm.assert_series_equal(result, expected)\n\n\ndef test_apply_reduce_to_dict():\n # GH 25196 37544\n data = DataFrame([[1, 2], [3, 4]], columns=["c0", "c1"], index=["i0", "i1"])\n\n result = data.apply(dict, axis=0)\n expected = Series([{"i0": 1, "i1": 3}, {"i0": 2, "i1": 4}], index=data.columns)\n tm.assert_series_equal(result, expected)\n\n result = data.apply(dict, axis=1)\n expected = Series([{"c0": 1, "c1": 2}, {"c0": 3, "c1": 4}], index=data.index)\n tm.assert_series_equal(result, expected)\n\n\ndef test_apply_differently_indexed():\n df = DataFrame(np.random.default_rng(2).standard_normal((20, 10)))\n\n result = df.apply(Series.describe, axis=0)\n expected = DataFrame({i: v.describe() for i, v in df.items()}, columns=df.columns)\n tm.assert_frame_equal(result, expected)\n\n result = df.apply(Series.describe, axis=1)\n expected = DataFrame({i: v.describe() for i, v in df.T.items()}, columns=df.index).T\n tm.assert_frame_equal(result, expected)\n\n\ndef test_apply_bug():\n # GH 6125\n positions = DataFrame(\n [\n [1, "ABC0", 50],\n [1, "YUM0", 20],\n [1, "DEF0", 20],\n [2, "ABC1", 50],\n [2, "YUM1", 20],\n [2, "DEF1", 20],\n ],\n columns=["a", "market", "position"],\n )\n\n def f(r):\n return r["market"]\n\n expected = positions.apply(f, axis=1)\n\n positions = DataFrame(\n [\n [datetime(2013, 1, 1), "ABC0", 50],\n [datetime(2013, 1, 2), "YUM0", 20],\n [datetime(2013, 1, 3), "DEF0", 20],\n [datetime(2013, 1, 4), "ABC1", 50],\n [datetime(2013, 1, 5), "YUM1", 20],\n [datetime(2013, 1, 6), "DEF1", 20],\n ],\n columns=["a", "market", "position"],\n )\n result = positions.apply(f, axis=1)\n tm.assert_series_equal(result, expected)\n\n\ndef test_apply_convert_objects():\n expected = DataFrame(\n {\n "A": [\n "foo",\n "foo",\n "foo",\n "foo",\n "bar",\n "bar",\n "bar",\n "bar",\n "foo",\n "foo",\n "foo",\n ],\n "B": [\n "one",\n "one",\n "one",\n "two",\n "one",\n "one",\n "one",\n "two",\n "two",\n "two",\n "one",\n ],\n "C": [\n "dull",\n "dull",\n "shiny",\n "dull",\n "dull",\n "shiny",\n "shiny",\n "dull",\n "shiny",\n "shiny",\n "shiny",\n ],\n "D": np.random.default_rng(2).standard_normal(11),\n "E": np.random.default_rng(2).standard_normal(11),\n "F": np.random.default_rng(2).standard_normal(11),\n }\n )\n\n result = expected.apply(lambda x: x, axis=1)\n tm.assert_frame_equal(result, expected)\n\n\ndef test_apply_attach_name(float_frame):\n result = float_frame.apply(lambda x: x.name)\n expected = Series(float_frame.columns, index=float_frame.columns)\n tm.assert_series_equal(result, expected)\n\n\ndef test_apply_attach_name_axis1(float_frame):\n result = float_frame.apply(lambda x: x.name, axis=1)\n expected = Series(float_frame.index, index=float_frame.index)\n tm.assert_series_equal(result, expected)\n\n\ndef test_apply_attach_name_non_reduction(float_frame):\n # non-reductions\n result = float_frame.apply(lambda x: np.repeat(x.name, len(x)))\n expected = DataFrame(\n np.tile(float_frame.columns, (len(float_frame.index), 1)),\n index=float_frame.index,\n columns=float_frame.columns,\n )\n tm.assert_frame_equal(result, expected)\n\n\ndef test_apply_attach_name_non_reduction_axis1(float_frame):\n result = float_frame.apply(lambda x: np.repeat(x.name, len(x)), axis=1)\n expected = Series(\n np.repeat(t[0], len(float_frame.columns)) for t in float_frame.itertuples()\n )\n expected.index = float_frame.index\n tm.assert_series_equal(result, expected)\n\n\ndef test_apply_multi_index():\n index = MultiIndex.from_arrays([["a", "a", "b"], ["c", "d", "d"]])\n s = DataFrame([[1, 2], [3, 4], [5, 6]], index=index, columns=["col1", "col2"])\n result = s.apply(lambda x: Series({"min": min(x), "max": max(x)}), 1)\n expected = DataFrame([[1, 2], [3, 4], [5, 6]], index=index, columns=["min", "max"])\n tm.assert_frame_equal(result, expected, check_like=True)\n\n\n@pytest.mark.parametrize(\n "df, dicts",\n [\n [\n DataFrame([["foo", "bar"], ["spam", "eggs"]]),\n Series([{0: "foo", 1: "spam"}, {0: "bar", 1: "eggs"}]),\n ],\n [DataFrame([[0, 1], [2, 3]]), Series([{0: 0, 1: 2}, {0: 1, 1: 3}])],\n ],\n)\ndef test_apply_dict(df, dicts):\n # GH 8735\n fn = lambda x: x.to_dict()\n reduce_true = df.apply(fn, result_type="reduce")\n reduce_false = df.apply(fn, result_type="expand")\n reduce_none = df.apply(fn)\n\n tm.assert_series_equal(reduce_true, dicts)\n tm.assert_frame_equal(reduce_false, df)\n tm.assert_series_equal(reduce_none, dicts)\n\n\ndef test_apply_non_numpy_dtype():\n # GH 12244\n df = DataFrame({"dt": date_range("2015-01-01", periods=3, tz="Europe/Brussels")})\n result = df.apply(lambda x: x)\n tm.assert_frame_equal(result, df)\n\n result = df.apply(lambda x: x + pd.Timedelta("1day"))\n expected = DataFrame(\n {"dt": date_range("2015-01-02", periods=3, tz="Europe/Brussels")}\n )\n tm.assert_frame_equal(result, expected)\n\n\ndef test_apply_non_numpy_dtype_category():\n df = DataFrame({"dt": ["a", "b", "c", "a"]}, dtype="category")\n result = df.apply(lambda x: x)\n tm.assert_frame_equal(result, df)\n\n\ndef test_apply_dup_names_multi_agg():\n # GH 21063\n df = DataFrame([[0, 1], [2, 3]], columns=["a", "a"])\n expected = DataFrame([[0, 1]], columns=["a", "a"], index=["min"])\n result = df.agg(["min"])\n\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize("op", ["apply", "agg"])\ndef test_apply_nested_result_axis_1(op):\n # GH 13820\n def apply_list(row):\n return [2 * row["A"], 2 * row["C"], 2 * row["B"]]\n\n df = DataFrame(np.zeros((4, 4)), columns=list("ABCD"))\n result = getattr(df, op)(apply_list, axis=1)\n expected = Series(\n [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]\n )\n tm.assert_series_equal(result, expected)\n\n\ndef test_apply_noreduction_tzaware_object():\n # https://github.com/pandas-dev/pandas/issues/31505\n expected = DataFrame(\n {"foo": [Timestamp("2020", tz="UTC")]}, dtype="datetime64[ns, UTC]"\n )\n result = expected.apply(lambda x: x)\n tm.assert_frame_equal(result, expected)\n result = expected.apply(lambda x: x.copy())\n tm.assert_frame_equal(result, expected)\n\n\ndef test_apply_function_runs_once():\n # https://github.com/pandas-dev/pandas/issues/30815\n\n df = DataFrame({"a": [1, 2, 3]})\n names = [] # Save row names function is applied to\n\n def reducing_function(row):\n names.append(row.name)\n\n def non_reducing_function(row):\n names.append(row.name)\n return row\n\n for func in [reducing_function, non_reducing_function]:\n del names[:]\n\n df.apply(func, axis=1)\n assert names == list(df.index)\n\n\ndef test_apply_raw_function_runs_once(engine):\n # https://github.com/pandas-dev/pandas/issues/34506\n if engine == "numba":\n pytest.skip("appending to list outside of numba func is not supported")\n\n df = DataFrame({"a": [1, 2, 3]})\n values = [] # Save row values function is applied to\n\n def reducing_function(row):\n values.extend(row)\n\n def non_reducing_function(row):\n values.extend(row)\n return row\n\n for func in [reducing_function, non_reducing_function]:\n del values[:]\n\n df.apply(func, engine=engine, raw=True, axis=1)\n assert values == list(df.a.to_list())\n\n\ndef test_apply_with_byte_string():\n # GH 34529\n df = DataFrame(np.array([b"abcd", b"efgh"]), columns=["col"])\n expected = DataFrame(np.array([b"abcd", b"efgh"]), columns=["col"], dtype=object)\n # After we make the apply we expect a dataframe just\n # like the original but with the object datatype\n result = df.apply(lambda x: x.astype("object"))\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize("val", ["asd", 12, None, np.nan])\ndef test_apply_category_equalness(val):\n # Check if categorical comparisons on apply, GH 21239\n df_values = ["asd", None, 12, "asd", "cde", np.nan]\n df = DataFrame({"a": df_values}, dtype="category")\n\n result = df.a.apply(lambda x: x == val)\n expected = Series(\n [np.nan if pd.isnull(x) else x == val for x in df_values], name="a"\n )\n tm.assert_series_equal(result, expected)\n\n\n# the user has supplied an opaque UDF where\n# they are transforming the input that requires\n# us to infer the output\n\n\ndef test_infer_row_shape():\n # GH 17437\n # if row shape is changing, infer it\n df = DataFrame(np.random.default_rng(2).random((10, 2)))\n result = df.apply(np.fft.fft, axis=0).shape\n assert result == (10, 2)\n\n result = df.apply(np.fft.rfft, axis=0).shape\n assert result == (6, 2)\n\n\n@pytest.mark.parametrize(\n "ops, by_row, expected",\n [\n ({"a": lambda x: x + 1}, "compat", DataFrame({"a": [2, 3]})),\n ({"a": lambda x: x + 1}, False, DataFrame({"a": [2, 3]})),\n ({"a": lambda x: x.sum()}, "compat", Series({"a": 3})),\n ({"a": lambda x: x.sum()}, False, Series({"a": 3})),\n (\n {"a": ["sum", np.sum, lambda x: x.sum()]},\n "compat",\n DataFrame({"a": [3, 3, 3]}, index=["sum", "sum", "<lambda>"]),\n ),\n (\n {"a": ["sum", np.sum, lambda x: x.sum()]},\n False,\n DataFrame({"a": [3, 3, 3]}, index=["sum", "sum", "<lambda>"]),\n ),\n ({"a": lambda x: 1}, "compat", DataFrame({"a": [1, 1]})),\n ({"a": lambda x: 1}, False, Series({"a": 1})),\n ],\n)\ndef test_dictlike_lambda(ops, by_row, expected):\n # GH53601\n df = DataFrame({"a": [1, 2]})\n result = df.apply(ops, by_row=by_row)\n tm.assert_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "ops",\n [\n {"a": lambda x: x + 1},\n {"a": lambda x: x.sum()},\n {"a": ["sum", np.sum, lambda x: x.sum()]},\n {"a": lambda x: 1},\n ],\n)\ndef test_dictlike_lambda_raises(ops):\n # GH53601\n df = DataFrame({"a": [1, 2]})\n with pytest.raises(ValueError, match="by_row=True not allowed"):\n df.apply(ops, by_row=True)\n\n\ndef test_with_dictlike_columns():\n # GH 17602\n df = DataFrame([[1, 2], [1, 2]], columns=["a", "b"])\n result = df.apply(lambda x: {"s": x["a"] + x["b"]}, axis=1)\n expected = Series([{"s": 3} for t in df.itertuples()])\n tm.assert_series_equal(result, expected)\n\n df["tm"] = [\n Timestamp("2017-05-01 00:00:00"),\n Timestamp("2017-05-02 00:00:00"),\n ]\n result = df.apply(lambda x: {"s": x["a"] + x["b"]}, axis=1)\n tm.assert_series_equal(result, expected)\n\n # compose a series\n result = (df["a"] + df["b"]).apply(lambda x: {"s": x})\n expected = Series([{"s": 3}, {"s": 3}])\n tm.assert_series_equal(result, expected)\n\n\ndef test_with_dictlike_columns_with_datetime():\n # GH 18775\n df = DataFrame()\n df["author"] = ["X", "Y", "Z"]\n df["publisher"] = ["BBC", "NBC", "N24"]\n df["date"] = pd.to_datetime(\n ["17-10-2010 07:15:30", "13-05-2011 08:20:35", "15-01-2013 09:09:09"],\n dayfirst=True,\n )\n result = df.apply(lambda x: {}, axis=1)\n expected = Series([{}, {}, {}])\n tm.assert_series_equal(result, expected)\n\n\ndef test_with_dictlike_columns_with_infer():\n # GH 17602\n df = DataFrame([[1, 2], [1, 2]], columns=["a", "b"])\n result = df.apply(lambda x: {"s": x["a"] + x["b"]}, axis=1, result_type="expand")\n expected = DataFrame({"s": [3, 3]})\n tm.assert_frame_equal(result, expected)\n\n df["tm"] = [\n Timestamp("2017-05-01 00:00:00"),\n Timestamp("2017-05-02 00:00:00"),\n ]\n result = df.apply(lambda x: {"s": x["a"] + x["b"]}, axis=1, result_type="expand")\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "ops, by_row, expected",\n [\n ([lambda x: x + 1], "compat", DataFrame({("a", "<lambda>"): [2, 3]})),\n ([lambda x: x + 1], False, DataFrame({("a", "<lambda>"): [2, 3]})),\n ([lambda x: x.sum()], "compat", DataFrame({"a": [3]}, index=["<lambda>"])),\n ([lambda x: x.sum()], False, DataFrame({"a": [3]}, index=["<lambda>"])),\n (\n ["sum", np.sum, lambda x: x.sum()],\n "compat",\n DataFrame({"a": [3, 3, 3]}, index=["sum", "sum", "<lambda>"]),\n ),\n (\n ["sum", np.sum, lambda x: x.sum()],\n False,\n DataFrame({"a": [3, 3, 3]}, index=["sum", "sum", "<lambda>"]),\n ),\n (\n [lambda x: x + 1, lambda x: 3],\n "compat",\n DataFrame([[2, 3], [3, 3]], columns=[["a", "a"], ["<lambda>", "<lambda>"]]),\n ),\n (\n [lambda x: 2, lambda x: 3],\n False,\n DataFrame({"a": [2, 3]}, ["<lambda>", "<lambda>"]),\n ),\n ],\n)\ndef test_listlike_lambda(ops, by_row, expected):\n # GH53601\n df = DataFrame({"a": [1, 2]})\n result = df.apply(ops, by_row=by_row)\n tm.assert_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "ops",\n [\n [lambda x: x + 1],\n [lambda x: x.sum()],\n ["sum", np.sum, lambda x: x.sum()],\n [lambda x: x + 1, lambda x: 3],\n ],\n)\ndef test_listlike_lambda_raises(ops):\n # GH53601\n df = DataFrame({"a": [1, 2]})\n with pytest.raises(ValueError, match="by_row=True not allowed"):\n df.apply(ops, by_row=True)\n\n\ndef test_with_listlike_columns():\n # GH 17348\n df = DataFrame(\n {\n "a": Series(np.random.default_rng(2).standard_normal(4)),\n "b": ["a", "list", "of", "words"],\n "ts": date_range("2016-10-01", periods=4, freq="h"),\n }\n )\n\n result = df[["a", "b"]].apply(tuple, axis=1)\n expected = Series([t[1:] for t in df[["a", "b"]].itertuples()])\n tm.assert_series_equal(result, expected)\n\n result = df[["a", "ts"]].apply(tuple, axis=1)\n expected = Series([t[1:] for t in df[["a", "ts"]].itertuples()])\n tm.assert_series_equal(result, expected)\n\n\ndef test_with_listlike_columns_returning_list():\n # GH 18919\n df = DataFrame({"x": Series([["a", "b"], ["q"]]), "y": Series([["z"], ["q", "t"]])})\n df.index = MultiIndex.from_tuples([("i0", "j0"), ("i1", "j1")])\n\n result = df.apply(lambda row: [el for el in row["x"] if el in row["y"]], axis=1)\n expected = Series([[], ["q"]], index=df.index)\n tm.assert_series_equal(result, expected)\n\n\ndef test_infer_output_shape_columns():\n # GH 18573\n\n df = DataFrame(\n {\n "number": [1.0, 2.0],\n "string": ["foo", "bar"],\n "datetime": [\n Timestamp("2017-11-29 03:30:00"),\n Timestamp("2017-11-29 03:45:00"),\n ],\n }\n )\n result = df.apply(lambda row: (row.number, row.string), axis=1)\n expected = Series([(t.number, t.string) for t in df.itertuples()])\n tm.assert_series_equal(result, expected)\n\n\ndef test_infer_output_shape_listlike_columns():\n # GH 16353\n\n df = DataFrame(\n np.random.default_rng(2).standard_normal((6, 3)), columns=["A", "B", "C"]\n )\n\n result = df.apply(lambda x: [1, 2, 3], axis=1)\n expected = Series([[1, 2, 3] for t in df.itertuples()])\n tm.assert_series_equal(result, expected)\n\n result = df.apply(lambda x: [1, 2], axis=1)\n expected = Series([[1, 2] for t in df.itertuples()])\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize("val", [1, 2])\ndef test_infer_output_shape_listlike_columns_np_func(val):\n # GH 17970\n df = DataFrame({"a": [1, 2, 3]}, index=list("abc"))\n\n result = df.apply(lambda row: np.ones(val), axis=1)\n expected = Series([np.ones(val) for t in df.itertuples()], index=df.index)\n tm.assert_series_equal(result, expected)\n\n\ndef test_infer_output_shape_listlike_columns_with_timestamp():\n # GH 17892\n df = DataFrame(\n {\n "a": [\n Timestamp("2010-02-01"),\n Timestamp("2010-02-04"),\n Timestamp("2010-02-05"),\n Timestamp("2010-02-06"),\n ],\n "b": [9, 5, 4, 3],\n "c": [5, 3, 4, 2],\n "d": [1, 2, 3, 4],\n }\n )\n\n def fun(x):\n return (1, 2)\n\n result = df.apply(fun, axis=1)\n expected = Series([(1, 2) for t in df.itertuples()])\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize("lst", [[1, 2, 3], [1, 2]])\ndef test_consistent_coerce_for_shapes(lst):\n # we want column names to NOT be propagated\n # just because the shape matches the input shape\n df = DataFrame(\n np.random.default_rng(2).standard_normal((4, 3)), columns=["A", "B", "C"]\n )\n\n result = df.apply(lambda x: lst, axis=1)\n expected = Series([lst for t in df.itertuples()])\n tm.assert_series_equal(result, expected)\n\n\ndef test_consistent_names(int_frame_const_col):\n # if a Series is returned, we should use the resulting index names\n df = int_frame_const_col\n\n result = df.apply(\n lambda x: Series([1, 2, 3], index=["test", "other", "cols"]), axis=1\n )\n expected = int_frame_const_col.rename(\n columns={"A": "test", "B": "other", "C": "cols"}\n )\n tm.assert_frame_equal(result, expected)\n\n result = df.apply(lambda x: Series([1, 2], index=["test", "other"]), axis=1)\n expected = expected[["test", "other"]]\n tm.assert_frame_equal(result, expected)\n\n\ndef test_result_type(int_frame_const_col):\n # result_type should be consistent no matter which\n # path we take in the code\n df = int_frame_const_col\n\n result = df.apply(lambda x: [1, 2, 3], axis=1, result_type="expand")\n expected = df.copy()\n expected.columns = [0, 1, 2]\n tm.assert_frame_equal(result, expected)\n\n\ndef test_result_type_shorter_list(int_frame_const_col):\n # result_type should be consistent no matter which\n # path we take in the code\n df = int_frame_const_col\n result = df.apply(lambda x: [1, 2], axis=1, result_type="expand")\n expected = df[["A", "B"]].copy()\n expected.columns = [0, 1]\n tm.assert_frame_equal(result, expected)\n\n\ndef test_result_type_broadcast(int_frame_const_col, request, engine):\n # result_type should be consistent no matter which\n # path we take in the code\n if engine == "numba":\n mark = pytest.mark.xfail(reason="numba engine doesn't support list return")\n request.node.add_marker(mark)\n df = int_frame_const_col\n # broadcast result\n result = df.apply(\n lambda x: [1, 2, 3], axis=1, result_type="broadcast", engine=engine\n )\n expected = df.copy()\n tm.assert_frame_equal(result, expected)\n\n\ndef test_result_type_broadcast_series_func(int_frame_const_col, engine, request):\n # result_type should be consistent no matter which\n # path we take in the code\n if engine == "numba":\n mark = pytest.mark.xfail(\n reason="numba Series constructor only support ndarrays not list data"\n )\n request.node.add_marker(mark)\n df = int_frame_const_col\n columns = ["other", "col", "names"]\n result = df.apply(\n lambda x: Series([1, 2, 3], index=columns),\n axis=1,\n result_type="broadcast",\n engine=engine,\n )\n expected = df.copy()\n tm.assert_frame_equal(result, expected)\n\n\ndef test_result_type_series_result(int_frame_const_col, engine, request):\n # result_type should be consistent no matter which\n # path we take in the code\n if engine == "numba":\n mark = pytest.mark.xfail(\n reason="numba Series constructor only support ndarrays not list data"\n )\n request.node.add_marker(mark)\n df = int_frame_const_col\n # series result\n result = df.apply(lambda x: Series([1, 2, 3], index=x.index), axis=1, engine=engine)\n expected = df.copy()\n tm.assert_frame_equal(result, expected)\n\n\ndef test_result_type_series_result_other_index(int_frame_const_col, engine, request):\n # result_type should be consistent no matter which\n # path we take in the code\n\n if engine == "numba":\n mark = pytest.mark.xfail(\n reason="no support in numba Series constructor for list of columns"\n )\n request.node.add_marker(mark)\n df = int_frame_const_col\n # series result with other index\n columns = ["other", "col", "names"]\n result = df.apply(lambda x: Series([1, 2, 3], index=columns), axis=1, engine=engine)\n expected = df.copy()\n expected.columns = columns\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "box",\n [lambda x: list(x), lambda x: tuple(x), lambda x: np.array(x, dtype="int64")],\n ids=["list", "tuple", "array"],\n)\ndef test_consistency_for_boxed(box, int_frame_const_col):\n # passing an array or list should not affect the output shape\n df = int_frame_const_col\n\n result = df.apply(lambda x: box([1, 2]), axis=1)\n expected = Series([box([1, 2]) for t in df.itertuples()])\n tm.assert_series_equal(result, expected)\n\n result = df.apply(lambda x: box([1, 2]), axis=1, result_type="expand")\n expected = int_frame_const_col[["A", "B"]].rename(columns={"A": 0, "B": 1})\n tm.assert_frame_equal(result, expected)\n\n\ndef test_agg_transform(axis, float_frame):\n other_axis = 1 if axis in {0, "index"} else 0\n\n with np.errstate(all="ignore"):\n f_abs = np.abs(float_frame)\n f_sqrt = np.sqrt(float_frame)\n\n # ufunc\n expected = f_sqrt.copy()\n result = float_frame.apply(np.sqrt, axis=axis)\n tm.assert_frame_equal(result, expected)\n\n # list-like\n result = float_frame.apply([np.sqrt], axis=axis)\n expected = f_sqrt.copy()\n if axis in {0, "index"}:\n expected.columns = MultiIndex.from_product([float_frame.columns, ["sqrt"]])\n else:\n expected.index = MultiIndex.from_product([float_frame.index, ["sqrt"]])\n tm.assert_frame_equal(result, expected)\n\n # multiple items in list\n # these are in the order as if we are applying both\n # functions per series and then concatting\n result = float_frame.apply([np.abs, np.sqrt], axis=axis)\n expected = zip_frames([f_abs, f_sqrt], axis=other_axis)\n if axis in {0, "index"}:\n expected.columns = MultiIndex.from_product(\n [float_frame.columns, ["absolute", "sqrt"]]\n )\n else:\n expected.index = MultiIndex.from_product(\n [float_frame.index, ["absolute", "sqrt"]]\n )\n tm.assert_frame_equal(result, expected)\n\n\ndef test_demo():\n # demonstration tests\n df = DataFrame({"A": range(5), "B": 5})\n\n result = df.agg(["min", "max"])\n expected = DataFrame(\n {"A": [0, 4], "B": [5, 5]}, columns=["A", "B"], index=["min", "max"]\n )\n tm.assert_frame_equal(result, expected)\n\n\ndef test_demo_dict_agg():\n # demonstration tests\n df = DataFrame({"A": range(5), "B": 5})\n result = df.agg({"A": ["min", "max"], "B": ["sum", "max"]})\n expected = DataFrame(\n {"A": [4.0, 0.0, np.nan], "B": [5.0, np.nan, 25.0]},\n columns=["A", "B"],\n index=["max", "min", "sum"],\n )\n tm.assert_frame_equal(result.reindex_like(expected), expected)\n\n\ndef test_agg_with_name_as_column_name():\n # GH 36212 - Column name is "name"\n data = {"name": ["foo", "bar"]}\n df = DataFrame(data)\n\n # result's name should be None\n result = df.agg({"name": "count"})\n expected = Series({"name": 2})\n tm.assert_series_equal(result, expected)\n\n # Check if name is still preserved when aggregating series instead\n result = df["name"].agg({"name": "count"})\n expected = Series({"name": 2}, name="name")\n tm.assert_series_equal(result, expected)\n\n\ndef test_agg_multiple_mixed():\n # GH 20909\n mdf = DataFrame(\n {\n "A": [1, 2, 3],\n "B": [1.0, 2.0, 3.0],\n "C": ["foo", "bar", "baz"],\n }\n )\n expected = DataFrame(\n {\n "A": [1, 6],\n "B": [1.0, 6.0],\n "C": ["bar", "foobarbaz"],\n },\n index=["min", "sum"],\n )\n # sorted index\n result = mdf.agg(["min", "sum"])\n tm.assert_frame_equal(result, expected)\n\n result = mdf[["C", "B", "A"]].agg(["sum", "min"])\n # GH40420: the result of .agg should have an index that is sorted\n # according to the arguments provided to agg.\n expected = expected[["C", "B", "A"]].reindex(["sum", "min"])\n tm.assert_frame_equal(result, expected)\n\n\ndef test_agg_multiple_mixed_raises():\n # GH 20909\n mdf = DataFrame(\n {\n "A": [1, 2, 3],\n "B": [1.0, 2.0, 3.0],\n "C": ["foo", "bar", "baz"],\n "D": date_range("20130101", periods=3),\n }\n )\n\n # sorted index\n msg = "does not support reduction"\n with pytest.raises(TypeError, match=msg):\n mdf.agg(["min", "sum"])\n\n with pytest.raises(TypeError, match=msg):\n mdf[["D", "C", "B", "A"]].agg(["sum", "min"])\n\n\ndef test_agg_reduce(axis, float_frame):\n other_axis = 1 if axis in {0, "index"} else 0\n name1, name2 = float_frame.axes[other_axis].unique()[:2].sort_values()\n\n # all reducers\n expected = pd.concat(\n [\n float_frame.mean(axis=axis),\n float_frame.max(axis=axis),\n float_frame.sum(axis=axis),\n ],\n axis=1,\n )\n expected.columns = ["mean", "max", "sum"]\n expected = expected.T if axis in {0, "index"} else expected\n\n result = float_frame.agg(["mean", "max", "sum"], axis=axis)\n tm.assert_frame_equal(result, expected)\n\n # dict input with scalars\n func = {name1: "mean", name2: "sum"}\n result = float_frame.agg(func, axis=axis)\n expected = Series(\n [\n float_frame.loc(other_axis)[name1].mean(),\n float_frame.loc(other_axis)[name2].sum(),\n ],\n index=[name1, name2],\n )\n tm.assert_series_equal(result, expected)\n\n # dict input with lists\n func = {name1: ["mean"], name2: ["sum"]}\n result = float_frame.agg(func, axis=axis)\n expected = DataFrame(\n {\n name1: Series([float_frame.loc(other_axis)[name1].mean()], index=["mean"]),\n name2: Series([float_frame.loc(other_axis)[name2].sum()], index=["sum"]),\n }\n )\n expected = expected.T if axis in {1, "columns"} else expected\n tm.assert_frame_equal(result, expected)\n\n # dict input with lists with multiple\n func = {name1: ["mean", "sum"], name2: ["sum", "max"]}\n result = float_frame.agg(func, axis=axis)\n expected = pd.concat(\n {\n name1: Series(\n [\n float_frame.loc(other_axis)[name1].mean(),\n float_frame.loc(other_axis)[name1].sum(),\n ],\n index=["mean", "sum"],\n ),\n name2: Series(\n [\n float_frame.loc(other_axis)[name2].sum(),\n float_frame.loc(other_axis)[name2].max(),\n ],\n index=["sum", "max"],\n ),\n },\n axis=1,\n )\n expected = expected.T if axis in {1, "columns"} else expected\n tm.assert_frame_equal(result, expected)\n\n\ndef test_nuiscance_columns():\n # GH 15015\n df = DataFrame(\n {\n "A": [1, 2, 3],\n "B": [1.0, 2.0, 3.0],\n "C": ["foo", "bar", "baz"],\n "D": date_range("20130101", periods=3),\n }\n )\n\n result = df.agg("min")\n expected = Series([1, 1.0, "bar", Timestamp("20130101")], index=df.columns)\n tm.assert_series_equal(result, expected)\n\n result = df.agg(["min"])\n expected = DataFrame(\n [[1, 1.0, "bar", Timestamp("20130101").as_unit("ns")]],\n index=["min"],\n columns=df.columns,\n )\n tm.assert_frame_equal(result, expected)\n\n msg = "does not support reduction"\n with pytest.raises(TypeError, match=msg):\n df.agg("sum")\n\n result = df[["A", "B", "C"]].agg("sum")\n expected = Series([6, 6.0, "foobarbaz"], index=["A", "B", "C"])\n tm.assert_series_equal(result, expected)\n\n msg = "does not support reduction"\n with pytest.raises(TypeError, match=msg):\n df.agg(["sum"])\n\n\n@pytest.mark.parametrize("how", ["agg", "apply"])\ndef test_non_callable_aggregates(how):\n # GH 16405\n # 'size' is a property of frame/series\n # validate that this is working\n # GH 39116 - expand to apply\n df = DataFrame(\n {"A": [None, 2, 3], "B": [1.0, np.nan, 3.0], "C": ["foo", None, "bar"]}\n )\n\n # Function aggregate\n result = getattr(df, how)({"A": "count"})\n expected = Series({"A": 2})\n\n tm.assert_series_equal(result, expected)\n\n # Non-function aggregate\n result = getattr(df, how)({"A": "size"})\n expected = Series({"A": 3})\n\n tm.assert_series_equal(result, expected)\n\n # Mix function and non-function aggs\n result1 = getattr(df, how)(["count", "size"])\n result2 = getattr(df, how)(\n {"A": ["count", "size"], "B": ["count", "size"], "C": ["count", "size"]}\n )\n expected = DataFrame(\n {\n "A": {"count": 2, "size": 3},\n "B": {"count": 2, "size": 3},\n "C": {"count": 2, "size": 3},\n }\n )\n\n tm.assert_frame_equal(result1, result2, check_like=True)\n tm.assert_frame_equal(result2, expected, check_like=True)\n\n # Just functional string arg is same as calling df.arg()\n result = getattr(df, how)("count")\n expected = df.count()\n\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize("how", ["agg", "apply"])\ndef test_size_as_str(how, axis):\n # GH 39934\n df = DataFrame(\n {"A": [None, 2, 3], "B": [1.0, np.nan, 3.0], "C": ["foo", None, "bar"]}\n )\n # Just a string attribute arg same as calling df.arg\n # on the columns\n result = getattr(df, how)("size", axis=axis)\n if axis in (0, "index"):\n expected = Series(df.shape[0], index=df.columns)\n else:\n expected = Series(df.shape[1], index=df.index)\n tm.assert_series_equal(result, expected)\n\n\ndef test_agg_listlike_result():\n # GH-29587 user defined function returning list-likes\n df = DataFrame({"A": [2, 2, 3], "B": [1.5, np.nan, 1.5], "C": ["foo", None, "bar"]})\n\n def func(group_col):\n return list(group_col.dropna().unique())\n\n result = df.agg(func)\n expected = Series([[2, 3], [1.5], ["foo", "bar"]], index=["A", "B", "C"])\n tm.assert_series_equal(result, expected)\n\n result = df.agg([func])\n expected = expected.to_frame("func").T\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize("axis", [0, 1])\n@pytest.mark.parametrize(\n "args, kwargs",\n [\n ((1, 2, 3), {}),\n ((8, 7, 15), {}),\n ((1, 2), {}),\n ((1,), {"b": 2}),\n ((), {"a": 1, "b": 2}),\n ((), {"a": 2, "b": 1}),\n ((), {"a": 1, "b": 2, "c": 3}),\n ],\n)\ndef test_agg_args_kwargs(axis, args, kwargs):\n def f(x, a, b, c=3):\n return x.sum() + (a + b) / c\n\n df = DataFrame([[1, 2], [3, 4]])\n\n if axis == 0:\n expected = Series([5.0, 7.0])\n else:\n expected = Series([4.0, 8.0])\n\n result = df.agg(f, axis, *args, **kwargs)\n\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize("num_cols", [2, 3, 5])\ndef test_frequency_is_original(num_cols, engine, request):\n # GH 22150\n if engine == "numba":\n mark = pytest.mark.xfail(reason="numba engine only supports numeric indices")\n request.node.add_marker(mark)\n index = pd.DatetimeIndex(["1950-06-30", "1952-10-24", "1953-05-29"])\n original = index.copy()\n df = DataFrame(1, index=index, columns=range(num_cols))\n df.apply(lambda x: x, engine=engine)\n assert index.freq == original.freq\n\n\ndef test_apply_datetime_tz_issue(engine, request):\n # GH 29052\n\n if engine == "numba":\n mark = pytest.mark.xfail(\n reason="numba engine doesn't support non-numeric indexes"\n )\n request.node.add_marker(mark)\n\n timestamps = [\n Timestamp("2019-03-15 12:34:31.909000+0000", tz="UTC"),\n Timestamp("2019-03-15 12:34:34.359000+0000", tz="UTC"),\n Timestamp("2019-03-15 12:34:34.660000+0000", tz="UTC"),\n ]\n df = DataFrame(data=[0, 1, 2], index=timestamps)\n result = df.apply(lambda x: x.name, axis=1, engine=engine)\n expected = Series(index=timestamps, data=timestamps)\n\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize("df", [DataFrame({"A": ["a", None], "B": ["c", "d"]})])\n@pytest.mark.parametrize("method", ["min", "max", "sum"])\ndef test_mixed_column_raises(df, method, using_infer_string):\n # GH 16832\n if method == "sum":\n msg = r'can only concatenate str \(not "int"\) to str|does not support'\n else:\n msg = "not supported between instances of 'str' and 'float'"\n if not using_infer_string:\n with pytest.raises(TypeError, match=msg):\n getattr(df, method)()\n else:\n getattr(df, method)()\n\n\n@pytest.mark.parametrize("col", [1, 1.0, True, "a", np.nan])\ndef test_apply_dtype(col):\n # GH 31466\n df = DataFrame([[1.0, col]], columns=["a", "b"])\n result = df.apply(lambda x: x.dtype)\n expected = df.dtypes\n\n tm.assert_series_equal(result, expected)\n\n\ndef test_apply_mutating(using_array_manager, using_copy_on_write, warn_copy_on_write):\n # GH#35462 case where applied func pins a new BlockManager to a row\n df = DataFrame({"a": range(100), "b": range(100, 200)})\n df_orig = df.copy()\n\n def func(row):\n mgr = row._mgr\n row.loc["a"] += 1\n assert row._mgr is not mgr\n return row\n\n expected = df.copy()\n expected["a"] += 1\n\n with tm.assert_cow_warning(warn_copy_on_write):\n result = df.apply(func, axis=1)\n\n tm.assert_frame_equal(result, expected)\n if using_copy_on_write or using_array_manager:\n # INFO(CoW) With copy on write, mutating a viewing row doesn't mutate the parent\n # INFO(ArrayManager) With BlockManager, the row is a view and mutated in place,\n # with ArrayManager the row is not a view, and thus not mutated in place\n tm.assert_frame_equal(df, df_orig)\n else:\n tm.assert_frame_equal(df, result)\n\n\ndef test_apply_empty_list_reduce():\n # GH#35683 get columns correct\n df = DataFrame([[1, 2], [3, 4], [5, 6], [7, 8], [9, 10]], columns=["a", "b"])\n\n result = df.apply(lambda x: [], result_type="reduce")\n expected = Series({"a": [], "b": []}, dtype=object)\n tm.assert_series_equal(result, expected)\n\n\ndef test_apply_no_suffix_index(engine, request):\n # GH36189\n if engine == "numba":\n mark = pytest.mark.xfail(\n reason="numba engine doesn't support list-likes/dict-like callables"\n )\n request.node.add_marker(mark)\n pdf = DataFrame([[4, 9]] * 3, columns=["A", "B"])\n result = pdf.apply(["sum", lambda x: x.sum(), lambda x: x.sum()], engine=engine)\n expected = DataFrame(\n {"A": [12, 12, 12], "B": [27, 27, 27]}, index=["sum", "<lambda>", "<lambda>"]\n )\n\n tm.assert_frame_equal(result, expected)\n\n\ndef test_apply_raw_returns_string(engine):\n # https://github.com/pandas-dev/pandas/issues/35940\n if engine == "numba":\n pytest.skip("No object dtype support in numba")\n df = DataFrame({"A": ["aa", "bbb"]})\n result = df.apply(lambda x: x[0], engine=engine, axis=1, raw=True)\n expected = Series(["aa", "bbb"])\n tm.assert_series_equal(result, expected)\n\n\ndef test_aggregation_func_column_order():\n # GH40420: the result of .agg should have an index that is sorted\n # according to the arguments provided to agg.\n df = DataFrame(\n [\n (1, 0, 0),\n (2, 0, 0),\n (3, 0, 0),\n (4, 5, 4),\n (5, 6, 6),\n (6, 7, 7),\n ],\n columns=("att1", "att2", "att3"),\n )\n\n def sum_div2(s):\n return s.sum() / 2\n\n aggs = ["sum", sum_div2, "count", "min"]\n result = df.agg(aggs)\n expected = DataFrame(\n {\n "att1": [21.0, 10.5, 6.0, 1.0],\n "att2": [18.0, 9.0, 6.0, 0.0],\n "att3": [17.0, 8.5, 6.0, 0.0],\n },\n index=["sum", "sum_div2", "count", "min"],\n )\n tm.assert_frame_equal(result, expected)\n\n\ndef test_apply_getitem_axis_1(engine, request):\n # GH 13427\n if engine == "numba":\n mark = pytest.mark.xfail(\n reason="numba engine not supporting duplicate index values"\n )\n request.node.add_marker(mark)\n df = DataFrame({"a": [0, 1, 2], "b": [1, 2, 3]})\n result = df[["a", "a"]].apply(\n lambda x: x.iloc[0] + x.iloc[1], axis=1, engine=engine\n )\n expected = Series([0, 2, 4])\n tm.assert_series_equal(result, expected)\n\n\ndef test_nuisance_depr_passes_through_warnings():\n # GH 43740\n # DataFrame.agg with list-likes may emit warnings for both individual\n # args and for entire columns, but we only want to emit once. We\n # catch and suppress the warnings for individual args, but need to make\n # sure if some other warnings were raised, they get passed through to\n # the user.\n\n def expected_warning(x):\n warnings.warn("Hello, World!")\n return x.sum()\n\n df = DataFrame({"a": [1, 2, 3]})\n with tm.assert_produces_warning(UserWarning, match="Hello, World!"):\n df.agg([expected_warning])\n\n\ndef test_apply_type():\n # GH 46719\n df = DataFrame(\n {"col1": [3, "string", float], "col2": [0.25, datetime(2020, 1, 1), np.nan]},\n index=["a", "b", "c"],\n )\n\n # axis=0\n result = df.apply(type, axis=0)\n expected = Series({"col1": Series, "col2": Series})\n tm.assert_series_equal(result, expected)\n\n # axis=1\n result = df.apply(type, axis=1)\n expected = Series({"a": Series, "b": Series, "c": Series})\n tm.assert_series_equal(result, expected)\n\n\ndef test_apply_on_empty_dataframe(engine):\n # GH 39111\n df = DataFrame({"a": [1, 2], "b": [3, 0]})\n result = df.head(0).apply(lambda x: max(x["a"], x["b"]), axis=1, engine=engine)\n expected = Series([], dtype=np.float64)\n tm.assert_series_equal(result, expected)\n\n\ndef test_apply_return_list():\n df = DataFrame({"a": [1, 2], "b": [2, 3]})\n result = df.apply(lambda x: [x.values])\n expected = DataFrame({"a": [[1, 2]], "b": [[2, 3]]})\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "test, constant",\n [\n ({"a": [1, 2, 3], "b": [1, 1, 1]}, {"a": [1, 2, 3], "b": [1]}),\n ({"a": [2, 2, 2], "b": [1, 1, 1]}, {"a": [2], "b": [1]}),\n ],\n)\ndef test_unique_agg_type_is_series(test, constant):\n # GH#22558\n df1 = DataFrame(test)\n expected = Series(data=constant, index=["a", "b"], dtype="object")\n aggregation = {"a": "unique", "b": "unique"}\n\n result = df1.agg(aggregation)\n\n tm.assert_series_equal(result, expected)\n\n\ndef test_any_apply_keyword_non_zero_axis_regression():\n # https://github.com/pandas-dev/pandas/issues/48656\n df = DataFrame({"A": [1, 2, 0], "B": [0, 2, 0], "C": [0, 0, 0]})\n expected = Series([True, True, False])\n tm.assert_series_equal(df.any(axis=1), expected)\n\n result = df.apply("any", axis=1)\n tm.assert_series_equal(result, expected)\n\n result = df.apply("any", 1)\n tm.assert_series_equal(result, expected)\n\n\ndef test_agg_mapping_func_deprecated():\n # GH 53325\n df = DataFrame({"x": [1, 2, 3]})\n\n def foo1(x, a=1, c=0):\n return x + a + c\n\n def foo2(x, b=2, c=0):\n return x + b + c\n\n # single func already takes the vectorized path\n result = df.agg(foo1, 0, 3, c=4)\n expected = df + 7\n tm.assert_frame_equal(result, expected)\n\n msg = "using .+ in Series.agg cannot aggregate and"\n\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = df.agg([foo1, foo2], 0, 3, c=4)\n expected = DataFrame(\n [[8, 8], [9, 9], [10, 10]], columns=[["x", "x"], ["foo1", "foo2"]]\n )\n tm.assert_frame_equal(result, expected)\n\n # TODO: the result below is wrong, should be fixed (GH53325)\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = df.agg({"x": foo1}, 0, 3, c=4)\n expected = DataFrame([2, 3, 4], columns=["x"])\n tm.assert_frame_equal(result, expected)\n\n\ndef test_agg_std():\n df = DataFrame(np.arange(6).reshape(3, 2), columns=["A", "B"])\n\n with tm.assert_produces_warning(FutureWarning, match="using DataFrame.std"):\n result = df.agg(np.std)\n expected = Series({"A": 2.0, "B": 2.0}, dtype=float)\n tm.assert_series_equal(result, expected)\n\n with tm.assert_produces_warning(FutureWarning, match="using Series.std"):\n result = df.agg([np.std])\n expected = DataFrame({"A": 2.0, "B": 2.0}, index=["std"])\n tm.assert_frame_equal(result, expected)\n\n\ndef test_agg_dist_like_and_nonunique_columns():\n # GH#51099\n df = DataFrame(\n {"A": [None, 2, 3], "B": [1.0, np.nan, 3.0], "C": ["foo", None, "bar"]}\n )\n df.columns = ["A", "A", "C"]\n\n result = df.agg({"A": "count"})\n expected = df["A"].count()\n tm.assert_series_equal(result, expected)\n | .venv\Lib\site-packages\pandas\tests\apply\test_frame_apply.py | test_frame_apply.py | Python | 54,550 | 0.75 | 0.112708 | 0.097473 | vue-tools | 190 | 2025-06-03T13:40:50.438399 | BSD-3-Clause | true | e485e94ec6bbbd30112c6263b90ea1f5 |
import numpy as np\nimport pytest\n\nfrom pandas.compat.numpy import np_version_gte1p25\n\nimport pandas as pd\nimport pandas._testing as tm\n\n\ndef test_agg_relabel():\n # GH 26513\n df = pd.DataFrame({"A": [1, 2, 1, 2], "B": [1, 2, 3, 4], "C": [3, 4, 5, 6]})\n\n # simplest case with one column, one func\n result = df.agg(foo=("B", "sum"))\n expected = pd.DataFrame({"B": [10]}, index=pd.Index(["foo"]))\n tm.assert_frame_equal(result, expected)\n\n # test on same column with different methods\n result = df.agg(foo=("B", "sum"), bar=("B", "min"))\n expected = pd.DataFrame({"B": [10, 1]}, index=pd.Index(["foo", "bar"]))\n\n tm.assert_frame_equal(result, expected)\n\n\ndef test_agg_relabel_multi_columns_multi_methods():\n # GH 26513, test on multiple columns with multiple methods\n df = pd.DataFrame({"A": [1, 2, 1, 2], "B": [1, 2, 3, 4], "C": [3, 4, 5, 6]})\n result = df.agg(\n foo=("A", "sum"),\n bar=("B", "mean"),\n cat=("A", "min"),\n dat=("B", "max"),\n f=("A", "max"),\n g=("C", "min"),\n )\n expected = pd.DataFrame(\n {\n "A": [6.0, np.nan, 1.0, np.nan, 2.0, np.nan],\n "B": [np.nan, 2.5, np.nan, 4.0, np.nan, np.nan],\n "C": [np.nan, np.nan, np.nan, np.nan, np.nan, 3.0],\n },\n index=pd.Index(["foo", "bar", "cat", "dat", "f", "g"]),\n )\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.xfail(np_version_gte1p25, reason="name of min now equals name of np.min")\ndef test_agg_relabel_partial_functions():\n # GH 26513, test on partial, functools or more complex cases\n df = pd.DataFrame({"A": [1, 2, 1, 2], "B": [1, 2, 3, 4], "C": [3, 4, 5, 6]})\n msg = "using Series.[mean|min]"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = df.agg(foo=("A", np.mean), bar=("A", "mean"), cat=("A", min))\n expected = pd.DataFrame(\n {"A": [1.5, 1.5, 1.0]}, index=pd.Index(["foo", "bar", "cat"])\n )\n tm.assert_frame_equal(result, expected)\n\n msg = "using Series.[mean|min|max|sum]"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = df.agg(\n foo=("A", min),\n bar=("A", np.min),\n cat=("B", max),\n dat=("C", "min"),\n f=("B", np.sum),\n kk=("B", lambda x: min(x)),\n )\n expected = pd.DataFrame(\n {\n "A": [1.0, 1.0, np.nan, np.nan, np.nan, np.nan],\n "B": [np.nan, np.nan, 4.0, np.nan, 10.0, 1.0],\n "C": [np.nan, np.nan, np.nan, 3.0, np.nan, np.nan],\n },\n index=pd.Index(["foo", "bar", "cat", "dat", "f", "kk"]),\n )\n tm.assert_frame_equal(result, expected)\n\n\ndef test_agg_namedtuple():\n # GH 26513\n df = pd.DataFrame({"A": [0, 1], "B": [1, 2]})\n result = df.agg(\n foo=pd.NamedAgg("B", "sum"),\n bar=pd.NamedAgg("B", "min"),\n cat=pd.NamedAgg(column="B", aggfunc="count"),\n fft=pd.NamedAgg("B", aggfunc="max"),\n )\n\n expected = pd.DataFrame(\n {"B": [3, 1, 2, 2]}, index=pd.Index(["foo", "bar", "cat", "fft"])\n )\n tm.assert_frame_equal(result, expected)\n\n result = df.agg(\n foo=pd.NamedAgg("A", "min"),\n bar=pd.NamedAgg(column="B", aggfunc="max"),\n cat=pd.NamedAgg(column="A", aggfunc="max"),\n )\n expected = pd.DataFrame(\n {"A": [0.0, np.nan, 1.0], "B": [np.nan, 2.0, np.nan]},\n index=pd.Index(["foo", "bar", "cat"]),\n )\n tm.assert_frame_equal(result, expected)\n\n\ndef test_reconstruct_func():\n # GH 28472, test to ensure reconstruct_func isn't moved;\n # This method is used by other libraries (e.g. dask)\n result = pd.core.apply.reconstruct_func("min")\n expected = (False, "min", None, None)\n tm.assert_equal(result, expected)\n | .venv\Lib\site-packages\pandas\tests\apply\test_frame_apply_relabeling.py | test_frame_apply_relabeling.py | Python | 3,772 | 0.95 | 0.044248 | 0.084211 | python-kit | 83 | 2023-09-11T18:15:57.026673 | GPL-3.0 | true | 3cd179f834da100fd44a2ee0ac722055 |
import numpy as np\nimport pytest\n\nfrom pandas import (\n DataFrame,\n MultiIndex,\n Series,\n)\nimport pandas._testing as tm\nfrom pandas.tests.apply.common import frame_transform_kernels\nfrom pandas.tests.frame.common import zip_frames\n\n\ndef unpack_obj(obj, klass, axis):\n """\n Helper to ensure we have the right type of object for a test parametrized\n over frame_or_series.\n """\n if klass is not DataFrame:\n obj = obj["A"]\n if axis != 0:\n pytest.skip(f"Test is only for DataFrame with axis={axis}")\n return obj\n\n\ndef test_transform_ufunc(axis, float_frame, frame_or_series):\n # GH 35964\n obj = unpack_obj(float_frame, frame_or_series, axis)\n\n with np.errstate(all="ignore"):\n f_sqrt = np.sqrt(obj)\n\n # ufunc\n result = obj.transform(np.sqrt, axis=axis)\n expected = f_sqrt\n tm.assert_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "ops, names",\n [\n ([np.sqrt], ["sqrt"]),\n ([np.abs, np.sqrt], ["absolute", "sqrt"]),\n (np.array([np.sqrt]), ["sqrt"]),\n (np.array([np.abs, np.sqrt]), ["absolute", "sqrt"]),\n ],\n)\ndef test_transform_listlike(axis, float_frame, ops, names):\n # GH 35964\n other_axis = 1 if axis in {0, "index"} else 0\n with np.errstate(all="ignore"):\n expected = zip_frames([op(float_frame) for op in ops], axis=other_axis)\n if axis in {0, "index"}:\n expected.columns = MultiIndex.from_product([float_frame.columns, names])\n else:\n expected.index = MultiIndex.from_product([float_frame.index, names])\n result = float_frame.transform(ops, axis=axis)\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize("ops", [[], np.array([])])\ndef test_transform_empty_listlike(float_frame, ops, frame_or_series):\n obj = unpack_obj(float_frame, frame_or_series, 0)\n\n with pytest.raises(ValueError, match="No transform functions were provided"):\n obj.transform(ops)\n\n\ndef test_transform_listlike_func_with_args():\n # GH 50624\n df = DataFrame({"x": [1, 2, 3]})\n\n def foo1(x, a=1, c=0):\n return x + a + c\n\n def foo2(x, b=2, c=0):\n return x + b + c\n\n msg = r"foo1\(\) got an unexpected keyword argument 'b'"\n with pytest.raises(TypeError, match=msg):\n df.transform([foo1, foo2], 0, 3, b=3, c=4)\n\n result = df.transform([foo1, foo2], 0, 3, c=4)\n expected = DataFrame(\n [[8, 8], [9, 9], [10, 10]],\n columns=MultiIndex.from_tuples([("x", "foo1"), ("x", "foo2")]),\n )\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize("box", [dict, Series])\ndef test_transform_dictlike(axis, float_frame, box):\n # GH 35964\n if axis in (0, "index"):\n e = float_frame.columns[0]\n expected = float_frame[[e]].transform(np.abs)\n else:\n e = float_frame.index[0]\n expected = float_frame.iloc[[0]].transform(np.abs)\n result = float_frame.transform(box({e: np.abs}), axis=axis)\n tm.assert_frame_equal(result, expected)\n\n\ndef test_transform_dictlike_mixed():\n # GH 40018 - mix of lists and non-lists in values of a dictionary\n df = DataFrame({"a": [1, 2], "b": [1, 4], "c": [1, 4]})\n result = df.transform({"b": ["sqrt", "abs"], "c": "sqrt"})\n expected = DataFrame(\n [[1.0, 1, 1.0], [2.0, 4, 2.0]],\n columns=MultiIndex([("b", "c"), ("sqrt", "abs")], [(0, 0, 1), (0, 1, 0)]),\n )\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "ops",\n [\n {},\n {"A": []},\n {"A": [], "B": "cumsum"},\n {"A": "cumsum", "B": []},\n {"A": [], "B": ["cumsum"]},\n {"A": ["cumsum"], "B": []},\n ],\n)\ndef test_transform_empty_dictlike(float_frame, ops, frame_or_series):\n obj = unpack_obj(float_frame, frame_or_series, 0)\n\n with pytest.raises(ValueError, match="No transform functions were provided"):\n obj.transform(ops)\n\n\n@pytest.mark.parametrize("use_apply", [True, False])\ndef test_transform_udf(axis, float_frame, use_apply, frame_or_series):\n # GH 35964\n obj = unpack_obj(float_frame, frame_or_series, axis)\n\n # transform uses UDF either via apply or passing the entire DataFrame\n def func(x):\n # transform is using apply iff x is not a DataFrame\n if use_apply == isinstance(x, frame_or_series):\n # Force transform to fallback\n raise ValueError\n return x + 1\n\n result = obj.transform(func, axis=axis)\n expected = obj + 1\n tm.assert_equal(result, expected)\n\n\nwont_fail = ["ffill", "bfill", "fillna", "pad", "backfill", "shift"]\nframe_kernels_raise = [x for x in frame_transform_kernels if x not in wont_fail]\n\n\n@pytest.mark.parametrize("op", [*frame_kernels_raise, lambda x: x + 1])\ndef test_transform_bad_dtype(op, frame_or_series, request):\n # GH 35964\n if op == "ngroup":\n request.applymarker(\n pytest.mark.xfail(raises=ValueError, reason="ngroup not valid for NDFrame")\n )\n\n obj = DataFrame({"A": 3 * [object]}) # DataFrame that will fail on most transforms\n obj = tm.get_obj(obj, frame_or_series)\n error = TypeError\n msg = "|".join(\n [\n "not supported between instances of 'type' and 'type'",\n "unsupported operand type",\n ]\n )\n\n with pytest.raises(error, match=msg):\n obj.transform(op)\n with pytest.raises(error, match=msg):\n obj.transform([op])\n with pytest.raises(error, match=msg):\n obj.transform({"A": op})\n with pytest.raises(error, match=msg):\n obj.transform({"A": [op]})\n\n\n@pytest.mark.parametrize("op", frame_kernels_raise)\ndef test_transform_failure_typeerror(request, op):\n # GH 35964\n\n if op == "ngroup":\n request.applymarker(\n pytest.mark.xfail(raises=ValueError, reason="ngroup not valid for NDFrame")\n )\n\n # Using object makes most transform kernels fail\n df = DataFrame({"A": 3 * [object], "B": [1, 2, 3]})\n error = TypeError\n msg = "|".join(\n [\n "not supported between instances of 'type' and 'type'",\n "unsupported operand type",\n ]\n )\n\n with pytest.raises(error, match=msg):\n df.transform([op])\n\n with pytest.raises(error, match=msg):\n df.transform({"A": op, "B": op})\n\n with pytest.raises(error, match=msg):\n df.transform({"A": [op], "B": [op]})\n\n with pytest.raises(error, match=msg):\n df.transform({"A": [op, "shift"], "B": [op]})\n\n\ndef test_transform_failure_valueerror():\n # GH 40211\n def op(x):\n if np.sum(np.sum(x)) < 10:\n raise ValueError\n return x\n\n df = DataFrame({"A": [1, 2, 3], "B": [400, 500, 600]})\n msg = "Transform function failed"\n\n with pytest.raises(ValueError, match=msg):\n df.transform([op])\n\n with pytest.raises(ValueError, match=msg):\n df.transform({"A": op, "B": op})\n\n with pytest.raises(ValueError, match=msg):\n df.transform({"A": [op], "B": [op]})\n\n with pytest.raises(ValueError, match=msg):\n df.transform({"A": [op, "shift"], "B": [op]})\n\n\n@pytest.mark.parametrize("use_apply", [True, False])\ndef test_transform_passes_args(use_apply, frame_or_series):\n # GH 35964\n # transform uses UDF either via apply or passing the entire DataFrame\n expected_args = [1, 2]\n expected_kwargs = {"c": 3}\n\n def f(x, a, b, c):\n # transform is using apply iff x is not a DataFrame\n if use_apply == isinstance(x, frame_or_series):\n # Force transform to fallback\n raise ValueError\n assert [a, b] == expected_args\n assert c == expected_kwargs["c"]\n return x\n\n frame_or_series([1]).transform(f, 0, *expected_args, **expected_kwargs)\n\n\ndef test_transform_empty_dataframe():\n # https://github.com/pandas-dev/pandas/issues/39636\n df = DataFrame([], columns=["col1", "col2"])\n result = df.transform(lambda x: x + 10)\n tm.assert_frame_equal(result, df)\n\n result = df["col1"].transform(lambda x: x + 10)\n tm.assert_series_equal(result, df["col1"])\n | .venv\Lib\site-packages\pandas\tests\apply\test_frame_transform.py | test_frame_transform.py | Python | 8,020 | 0.95 | 0.140152 | 0.091787 | react-lib | 690 | 2023-10-30T02:04:13.132031 | GPL-3.0 | true | 187b1f1ad750ce3bd296efd6a5f79c12 |
# Tests specifically aimed at detecting bad arguments.\n# This file is organized by reason for exception.\n# 1. always invalid argument values\n# 2. missing column(s)\n# 3. incompatible ops/dtype/args/kwargs\n# 4. invalid result shape/type\n# If your test does not fit into one of these categories, add to this list.\n\nfrom itertools import chain\nimport re\n\nimport numpy as np\nimport pytest\n\nfrom pandas.errors import SpecificationError\n\nfrom pandas import (\n DataFrame,\n Series,\n date_range,\n)\nimport pandas._testing as tm\n\n\n@pytest.mark.parametrize("result_type", ["foo", 1])\ndef test_result_type_error(result_type):\n # allowed result_type\n df = DataFrame(\n np.tile(np.arange(3, dtype="int64"), 6).reshape(6, -1) + 1,\n columns=["A", "B", "C"],\n )\n\n msg = (\n "invalid value for result_type, must be one of "\n "{None, 'reduce', 'broadcast', 'expand'}"\n )\n with pytest.raises(ValueError, match=msg):\n df.apply(lambda x: [1, 2, 3], axis=1, result_type=result_type)\n\n\ndef test_apply_invalid_axis_value():\n df = DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]], index=["a", "a", "c"])\n msg = "No axis named 2 for object type DataFrame"\n with pytest.raises(ValueError, match=msg):\n df.apply(lambda x: x, 2)\n\n\ndef test_agg_raises():\n # GH 26513\n df = DataFrame({"A": [0, 1], "B": [1, 2]})\n msg = "Must provide"\n\n with pytest.raises(TypeError, match=msg):\n df.agg()\n\n\ndef test_map_with_invalid_na_action_raises():\n # https://github.com/pandas-dev/pandas/issues/32815\n s = Series([1, 2, 3])\n msg = "na_action must either be 'ignore' or None"\n with pytest.raises(ValueError, match=msg):\n s.map(lambda x: x, na_action="____")\n\n\n@pytest.mark.parametrize("input_na_action", ["____", True])\ndef test_map_arg_is_dict_with_invalid_na_action_raises(input_na_action):\n # https://github.com/pandas-dev/pandas/issues/46588\n s = Series([1, 2, 3])\n msg = f"na_action must either be 'ignore' or None, {input_na_action} was passed"\n with pytest.raises(ValueError, match=msg):\n s.map({1: 2}, na_action=input_na_action)\n\n\n@pytest.mark.parametrize("method", ["apply", "agg", "transform"])\n@pytest.mark.parametrize("func", [{"A": {"B": "sum"}}, {"A": {"B": ["sum"]}}])\ndef test_nested_renamer(frame_or_series, method, func):\n # GH 35964\n obj = frame_or_series({"A": [1]})\n match = "nested renamer is not supported"\n with pytest.raises(SpecificationError, match=match):\n getattr(obj, method)(func)\n\n\n@pytest.mark.parametrize(\n "renamer",\n [{"foo": ["min", "max"]}, {"foo": ["min", "max"], "bar": ["sum", "mean"]}],\n)\ndef test_series_nested_renamer(renamer):\n s = Series(range(6), dtype="int64", name="series")\n msg = "nested renamer is not supported"\n with pytest.raises(SpecificationError, match=msg):\n s.agg(renamer)\n\n\ndef test_apply_dict_depr():\n tsdf = DataFrame(\n np.random.default_rng(2).standard_normal((10, 3)),\n columns=["A", "B", "C"],\n index=date_range("1/1/2000", periods=10),\n )\n msg = "nested renamer is not supported"\n with pytest.raises(SpecificationError, match=msg):\n tsdf.A.agg({"foo": ["sum", "mean"]})\n\n\n@pytest.mark.parametrize("method", ["agg", "transform"])\ndef test_dict_nested_renaming_depr(method):\n df = DataFrame({"A": range(5), "B": 5})\n\n # nested renaming\n msg = r"nested renamer is not supported"\n with pytest.raises(SpecificationError, match=msg):\n getattr(df, method)({"A": {"foo": "min"}, "B": {"bar": "max"}})\n\n\n@pytest.mark.parametrize("method", ["apply", "agg", "transform"])\n@pytest.mark.parametrize("func", [{"B": "sum"}, {"B": ["sum"]}])\ndef test_missing_column(method, func):\n # GH 40004\n obj = DataFrame({"A": [1]})\n match = re.escape("Column(s) ['B'] do not exist")\n with pytest.raises(KeyError, match=match):\n getattr(obj, method)(func)\n\n\ndef test_transform_mixed_column_name_dtypes():\n # GH39025\n df = DataFrame({"a": ["1"]})\n msg = r"Column\(s\) \[1, 'b'\] do not exist"\n with pytest.raises(KeyError, match=msg):\n df.transform({"a": int, 1: str, "b": int})\n\n\n@pytest.mark.parametrize(\n "how, args", [("pct_change", ()), ("nsmallest", (1, ["a", "b"])), ("tail", 1)]\n)\ndef test_apply_str_axis_1_raises(how, args):\n # GH 39211 - some ops don't support axis=1\n df = DataFrame({"a": [1, 2], "b": [3, 4]})\n msg = f"Operation {how} does not support axis=1"\n with pytest.raises(ValueError, match=msg):\n df.apply(how, axis=1, args=args)\n\n\ndef test_transform_axis_1_raises():\n # GH 35964\n msg = "No axis named 1 for object type Series"\n with pytest.raises(ValueError, match=msg):\n Series([1]).transform("sum", axis=1)\n\n\ndef test_apply_modify_traceback():\n data = DataFrame(\n {\n "A": [\n "foo",\n "foo",\n "foo",\n "foo",\n "bar",\n "bar",\n "bar",\n "bar",\n "foo",\n "foo",\n "foo",\n ],\n "B": [\n "one",\n "one",\n "one",\n "two",\n "one",\n "one",\n "one",\n "two",\n "two",\n "two",\n "one",\n ],\n "C": [\n "dull",\n "dull",\n "shiny",\n "dull",\n "dull",\n "shiny",\n "shiny",\n "dull",\n "shiny",\n "shiny",\n "shiny",\n ],\n "D": np.random.default_rng(2).standard_normal(11),\n "E": np.random.default_rng(2).standard_normal(11),\n "F": np.random.default_rng(2).standard_normal(11),\n }\n )\n\n data.loc[4, "C"] = np.nan\n\n def transform(row):\n if row["C"].startswith("shin") and row["A"] == "foo":\n row["D"] = 7\n return row\n\n msg = "'float' object has no attribute 'startswith'"\n with pytest.raises(AttributeError, match=msg):\n data.apply(transform, axis=1)\n\n\n@pytest.mark.parametrize(\n "df, func, expected",\n tm.get_cython_table_params(\n DataFrame([["a", "b"], ["b", "a"]]), [["cumprod", TypeError]]\n ),\n)\ndef test_agg_cython_table_raises_frame(df, func, expected, axis, using_infer_string):\n # GH 21224\n if using_infer_string:\n expected = (expected, NotImplementedError)\n\n msg = (\n "can't multiply sequence by non-int of type 'str'"\n "|cannot perform cumprod with type str" # NotImplementedError python backend\n "|operation 'cumprod' not supported for dtype 'str'" # TypeError pyarrow\n )\n warn = None if isinstance(func, str) else FutureWarning\n with pytest.raises(expected, match=msg):\n with tm.assert_produces_warning(warn, match="using DataFrame.cumprod"):\n df.agg(func, axis=axis)\n\n\n@pytest.mark.parametrize(\n "series, func, expected",\n chain(\n tm.get_cython_table_params(\n Series("a b c".split()),\n [\n ("mean", TypeError), # mean raises TypeError\n ("prod", TypeError),\n ("std", TypeError),\n ("var", TypeError),\n ("median", TypeError),\n ("cumprod", TypeError),\n ],\n )\n ),\n)\ndef test_agg_cython_table_raises_series(series, func, expected, using_infer_string):\n # GH21224\n msg = r"[Cc]ould not convert|can't multiply sequence by non-int of type"\n if func == "median" or func is np.nanmedian or func is np.median:\n msg = r"Cannot convert \['a' 'b' 'c'\] to numeric"\n\n if using_infer_string and func in ("cumprod", np.cumprod, np.nancumprod):\n expected = (expected, NotImplementedError)\n\n msg = (\n msg + "|does not support|has no kernel|Cannot perform|cannot perform|operation"\n )\n warn = None if isinstance(func, str) else FutureWarning\n\n with pytest.raises(expected, match=msg):\n # e.g. Series('a b'.split()).cumprod() will raise\n with tm.assert_produces_warning(warn, match="is currently using Series.*"):\n series.agg(func)\n\n\ndef test_agg_none_to_type():\n # GH 40543\n df = DataFrame({"a": [None]})\n msg = re.escape("int() argument must be a string")\n with pytest.raises(TypeError, match=msg):\n df.agg({"a": lambda x: int(x.iloc[0])})\n\n\ndef test_transform_none_to_type():\n # GH#34377\n df = DataFrame({"a": [None]})\n msg = "argument must be a"\n with pytest.raises(TypeError, match=msg):\n df.transform({"a": lambda x: int(x.iloc[0])})\n\n\n@pytest.mark.parametrize(\n "func",\n [\n lambda x: np.array([1, 2]).reshape(-1, 2),\n lambda x: [1, 2],\n lambda x: Series([1, 2]),\n ],\n)\ndef test_apply_broadcast_error(func):\n df = DataFrame(\n np.tile(np.arange(3, dtype="int64"), 6).reshape(6, -1) + 1,\n columns=["A", "B", "C"],\n )\n\n # > 1 ndim\n msg = "too many dims to broadcast|cannot broadcast result"\n with pytest.raises(ValueError, match=msg):\n df.apply(func, axis=1, result_type="broadcast")\n\n\ndef test_transform_and_agg_err_agg(axis, float_frame):\n # cannot both transform and agg\n msg = "cannot combine transform and aggregation operations"\n with pytest.raises(ValueError, match=msg):\n with np.errstate(all="ignore"):\n float_frame.agg(["max", "sqrt"], axis=axis)\n\n\n@pytest.mark.filterwarnings("ignore::FutureWarning") # GH53325\n@pytest.mark.parametrize(\n "func, msg",\n [\n (["sqrt", "max"], "cannot combine transform and aggregation"),\n (\n {"foo": np.sqrt, "bar": "sum"},\n "cannot perform both aggregation and transformation",\n ),\n ],\n)\ndef test_transform_and_agg_err_series(string_series, func, msg):\n # we are trying to transform with an aggregator\n with pytest.raises(ValueError, match=msg):\n with np.errstate(all="ignore"):\n string_series.agg(func)\n\n\n@pytest.mark.parametrize("func", [["max", "min"], ["max", "sqrt"]])\ndef test_transform_wont_agg_frame(axis, float_frame, func):\n # GH 35964\n # cannot both transform and agg\n msg = "Function did not transform"\n with pytest.raises(ValueError, match=msg):\n float_frame.transform(func, axis=axis)\n\n\n@pytest.mark.parametrize("func", [["min", "max"], ["sqrt", "max"]])\ndef test_transform_wont_agg_series(string_series, func):\n # GH 35964\n # we are trying to transform with an aggregator\n msg = "Function did not transform"\n\n with pytest.raises(ValueError, match=msg):\n string_series.transform(func)\n\n\n@pytest.mark.parametrize(\n "op_wrapper", [lambda x: x, lambda x: [x], lambda x: {"A": x}, lambda x: {"A": [x]}]\n)\ndef test_transform_reducer_raises(all_reductions, frame_or_series, op_wrapper):\n # GH 35964\n op = op_wrapper(all_reductions)\n\n obj = DataFrame({"A": [1, 2, 3]})\n obj = tm.get_obj(obj, frame_or_series)\n\n msg = "Function did not transform"\n with pytest.raises(ValueError, match=msg):\n obj.transform(op)\n | .venv\Lib\site-packages\pandas\tests\apply\test_invalid_arg.py | test_invalid_arg.py | Python | 11,176 | 0.95 | 0.099174 | 0.10101 | vue-tools | 965 | 2025-06-09T15:43:22.705304 | GPL-3.0 | true | 16b06d1d6579e71979c4828c6ad5c1ad |
import numpy as np\nimport pytest\n\nfrom pandas.compat import is_platform_arm\nimport pandas.util._test_decorators as td\n\nimport pandas as pd\nfrom pandas import (\n DataFrame,\n Index,\n)\nimport pandas._testing as tm\nfrom pandas.util.version import Version\n\npytestmark = [td.skip_if_no("numba"), pytest.mark.single_cpu, pytest.mark.skipif()]\n\nnumba = pytest.importorskip("numba")\npytestmark.append(\n pytest.mark.skipif(\n Version(numba.__version__) == Version("0.61") and is_platform_arm(),\n reason=f"Segfaults on ARM platforms with numba {numba.__version__}",\n )\n)\n\n\n@pytest.fixture(params=[0, 1])\ndef apply_axis(request):\n return request.param\n\n\ndef test_numba_vs_python_noop(float_frame, apply_axis):\n func = lambda x: x\n result = float_frame.apply(func, engine="numba", axis=apply_axis)\n expected = float_frame.apply(func, engine="python", axis=apply_axis)\n tm.assert_frame_equal(result, expected)\n\n\ndef test_numba_vs_python_string_index():\n # GH#56189\n df = DataFrame(\n 1,\n index=Index(["a", "b"], dtype=pd.StringDtype(na_value=np.nan)),\n columns=Index(["x", "y"], dtype=pd.StringDtype(na_value=np.nan)),\n )\n func = lambda x: x\n result = df.apply(func, engine="numba", axis=0)\n expected = df.apply(func, engine="python", axis=0)\n tm.assert_frame_equal(\n result, expected, check_column_type=False, check_index_type=False\n )\n\n\ndef test_numba_vs_python_indexing():\n frame = DataFrame(\n {"a": [1, 2, 3], "b": [4, 5, 6], "c": [7.0, 8.0, 9.0]},\n index=Index(["A", "B", "C"]),\n )\n row_func = lambda x: x["c"]\n result = frame.apply(row_func, engine="numba", axis=1)\n expected = frame.apply(row_func, engine="python", axis=1)\n tm.assert_series_equal(result, expected)\n\n col_func = lambda x: x["A"]\n result = frame.apply(col_func, engine="numba", axis=0)\n expected = frame.apply(col_func, engine="python", axis=0)\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "reduction",\n [lambda x: x.mean(), lambda x: x.min(), lambda x: x.max(), lambda x: x.sum()],\n)\ndef test_numba_vs_python_reductions(reduction, apply_axis):\n df = DataFrame(np.ones((4, 4), dtype=np.float64))\n result = df.apply(reduction, engine="numba", axis=apply_axis)\n expected = df.apply(reduction, engine="python", axis=apply_axis)\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize("colnames", [[1, 2, 3], [1.0, 2.0, 3.0]])\ndef test_numba_numeric_colnames(colnames):\n # Check that numeric column names lower properly and can be indxed on\n df = DataFrame(\n np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.int64), columns=colnames\n )\n first_col = colnames[0]\n f = lambda x: x[first_col] # Get the first column\n result = df.apply(f, engine="numba", axis=1)\n expected = df.apply(f, engine="python", axis=1)\n tm.assert_series_equal(result, expected)\n\n\ndef test_numba_parallel_unsupported(float_frame):\n f = lambda x: x\n with pytest.raises(\n NotImplementedError,\n match="Parallel apply is not supported when raw=False and engine='numba'",\n ):\n float_frame.apply(f, engine="numba", engine_kwargs={"parallel": True})\n\n\ndef test_numba_nonunique_unsupported(apply_axis):\n f = lambda x: x\n df = DataFrame({"a": [1, 2]}, index=Index(["a", "a"]))\n with pytest.raises(\n NotImplementedError,\n match="The index/columns must be unique when raw=False and engine='numba'",\n ):\n df.apply(f, engine="numba", axis=apply_axis)\n\n\ndef test_numba_unsupported_dtypes(apply_axis):\n pytest.importorskip("pyarrow")\n f = lambda x: x\n df = DataFrame({"a": [1, 2], "b": ["a", "b"], "c": [4, 5]})\n df["c"] = df["c"].astype("double[pyarrow]")\n\n with pytest.raises(\n ValueError,\n match="Column b must have a numeric dtype. Found 'object|str' instead",\n ):\n df.apply(f, engine="numba", axis=apply_axis)\n\n with pytest.raises(\n ValueError,\n match="Column c is backed by an extension array, "\n "which is not supported by the numba engine.",\n ):\n df["c"].to_frame().apply(f, engine="numba", axis=apply_axis)\n | .venv\Lib\site-packages\pandas\tests\apply\test_numba.py | test_numba.py | Python | 4,190 | 0.95 | 0.069767 | 0.019231 | react-lib | 186 | 2023-07-16T04:54:07.585048 | GPL-3.0 | true | ea9d5c958a411cde008338a78ed074c7 |
import numpy as np\nimport pytest\n\nimport pandas as pd\nfrom pandas import (\n DataFrame,\n Index,\n MultiIndex,\n Series,\n concat,\n date_range,\n timedelta_range,\n)\nimport pandas._testing as tm\nfrom pandas.tests.apply.common import series_transform_kernels\n\n\n@pytest.fixture(params=[False, "compat"])\ndef by_row(request):\n return request.param\n\n\ndef test_series_map_box_timedelta(by_row):\n # GH#11349\n ser = Series(timedelta_range("1 day 1 s", periods=3, freq="h"))\n\n def f(x):\n return x.total_seconds() if by_row else x.dt.total_seconds()\n\n result = ser.apply(f, by_row=by_row)\n\n expected = ser.map(lambda x: x.total_seconds())\n tm.assert_series_equal(result, expected)\n\n expected = Series([86401.0, 90001.0, 93601.0])\n tm.assert_series_equal(result, expected)\n\n\ndef test_apply(datetime_series, by_row):\n result = datetime_series.apply(np.sqrt, by_row=by_row)\n with np.errstate(all="ignore"):\n expected = np.sqrt(datetime_series)\n tm.assert_series_equal(result, expected)\n\n # element-wise apply (ufunc)\n result = datetime_series.apply(np.exp, by_row=by_row)\n expected = np.exp(datetime_series)\n tm.assert_series_equal(result, expected)\n\n # empty series\n s = Series(dtype=object, name="foo", index=Index([], name="bar"))\n rs = s.apply(lambda x: x, by_row=by_row)\n tm.assert_series_equal(s, rs)\n\n # check all metadata (GH 9322)\n assert s is not rs\n assert s.index is rs.index\n assert s.dtype == rs.dtype\n assert s.name == rs.name\n\n # index but no data\n s = Series(index=[1, 2, 3], dtype=np.float64)\n rs = s.apply(lambda x: x, by_row=by_row)\n tm.assert_series_equal(s, rs)\n\n\ndef test_apply_map_same_length_inference_bug():\n s = Series([1, 2])\n\n def f(x):\n return (x, x + 1)\n\n result = s.apply(f, by_row="compat")\n expected = s.map(f)\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize("convert_dtype", [True, False])\ndef test_apply_convert_dtype_deprecated(convert_dtype):\n ser = Series(np.random.default_rng(2).standard_normal(10))\n\n def func(x):\n return x if x > 0 else np.nan\n\n with tm.assert_produces_warning(FutureWarning):\n ser.apply(func, convert_dtype=convert_dtype, by_row="compat")\n\n\ndef test_apply_args():\n s = Series(["foo,bar"])\n\n result = s.apply(str.split, args=(",",))\n assert result[0] == ["foo", "bar"]\n assert isinstance(result[0], list)\n\n\n@pytest.mark.parametrize(\n "args, kwargs, increment",\n [((), {}, 0), ((), {"a": 1}, 1), ((2, 3), {}, 32), ((1,), {"c": 2}, 201)],\n)\ndef test_agg_args(args, kwargs, increment):\n # GH 43357\n def f(x, a=0, b=0, c=0):\n return x + a + 10 * b + 100 * c\n\n s = Series([1, 2])\n msg = (\n "in Series.agg cannot aggregate and has been deprecated. "\n "Use Series.transform to keep behavior unchanged."\n )\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = s.agg(f, 0, *args, **kwargs)\n expected = s + increment\n tm.assert_series_equal(result, expected)\n\n\ndef test_agg_mapping_func_deprecated():\n # GH 53325\n s = Series([1, 2, 3])\n\n def foo1(x, a=1, c=0):\n return x + a + c\n\n def foo2(x, b=2, c=0):\n return x + b + c\n\n msg = "using .+ in Series.agg cannot aggregate and"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n s.agg(foo1, 0, 3, c=4)\n with tm.assert_produces_warning(FutureWarning, match=msg):\n s.agg([foo1, foo2], 0, 3, c=4)\n with tm.assert_produces_warning(FutureWarning, match=msg):\n s.agg({"a": foo1, "b": foo2}, 0, 3, c=4)\n\n\ndef test_series_apply_map_box_timestamps(by_row):\n # GH#2689, GH#2627\n ser = Series(date_range("1/1/2000", periods=10))\n\n def func(x):\n return (x.hour, x.day, x.month)\n\n if not by_row:\n msg = "Series' object has no attribute 'hour'"\n with pytest.raises(AttributeError, match=msg):\n ser.apply(func, by_row=by_row)\n return\n\n result = ser.apply(func, by_row=by_row)\n expected = ser.map(func)\n tm.assert_series_equal(result, expected)\n\n\ndef test_apply_box_dt64():\n # ufunc will not be boxed. Same test cases as the test_map_box\n vals = [pd.Timestamp("2011-01-01"), pd.Timestamp("2011-01-02")]\n ser = Series(vals, dtype="M8[ns]")\n assert ser.dtype == "datetime64[ns]"\n # boxed value must be Timestamp instance\n res = ser.apply(lambda x: f"{type(x).__name__}_{x.day}_{x.tz}", by_row="compat")\n exp = Series(["Timestamp_1_None", "Timestamp_2_None"])\n tm.assert_series_equal(res, exp)\n\n\ndef test_apply_box_dt64tz():\n vals = [\n pd.Timestamp("2011-01-01", tz="US/Eastern"),\n pd.Timestamp("2011-01-02", tz="US/Eastern"),\n ]\n ser = Series(vals, dtype="M8[ns, US/Eastern]")\n assert ser.dtype == "datetime64[ns, US/Eastern]"\n res = ser.apply(lambda x: f"{type(x).__name__}_{x.day}_{x.tz}", by_row="compat")\n exp = Series(["Timestamp_1_US/Eastern", "Timestamp_2_US/Eastern"])\n tm.assert_series_equal(res, exp)\n\n\ndef test_apply_box_td64():\n # timedelta\n vals = [pd.Timedelta("1 days"), pd.Timedelta("2 days")]\n ser = Series(vals)\n assert ser.dtype == "timedelta64[ns]"\n res = ser.apply(lambda x: f"{type(x).__name__}_{x.days}", by_row="compat")\n exp = Series(["Timedelta_1", "Timedelta_2"])\n tm.assert_series_equal(res, exp)\n\n\ndef test_apply_box_period():\n # period\n vals = [pd.Period("2011-01-01", freq="M"), pd.Period("2011-01-02", freq="M")]\n ser = Series(vals)\n assert ser.dtype == "Period[M]"\n res = ser.apply(lambda x: f"{type(x).__name__}_{x.freqstr}", by_row="compat")\n exp = Series(["Period_M", "Period_M"])\n tm.assert_series_equal(res, exp)\n\n\ndef test_apply_datetimetz(by_row):\n values = date_range("2011-01-01", "2011-01-02", freq="h").tz_localize("Asia/Tokyo")\n s = Series(values, name="XX")\n\n result = s.apply(lambda x: x + pd.offsets.Day(), by_row=by_row)\n exp_values = date_range("2011-01-02", "2011-01-03", freq="h").tz_localize(\n "Asia/Tokyo"\n )\n exp = Series(exp_values, name="XX")\n tm.assert_series_equal(result, exp)\n\n result = s.apply(lambda x: x.hour if by_row else x.dt.hour, by_row=by_row)\n exp = Series(list(range(24)) + [0], name="XX", dtype="int64" if by_row else "int32")\n tm.assert_series_equal(result, exp)\n\n # not vectorized\n def f(x):\n return str(x.tz) if by_row else str(x.dt.tz)\n\n result = s.apply(f, by_row=by_row)\n if by_row:\n exp = Series(["Asia/Tokyo"] * 25, name="XX")\n tm.assert_series_equal(result, exp)\n else:\n assert result == "Asia/Tokyo"\n\n\ndef test_apply_categorical(by_row, using_infer_string):\n values = pd.Categorical(list("ABBABCD"), categories=list("DCBA"), ordered=True)\n ser = Series(values, name="XX", index=list("abcdefg"))\n\n if not by_row:\n msg = "Series' object has no attribute 'lower"\n with pytest.raises(AttributeError, match=msg):\n ser.apply(lambda x: x.lower(), by_row=by_row)\n assert ser.apply(lambda x: "A", by_row=by_row) == "A"\n return\n\n result = ser.apply(lambda x: x.lower(), by_row=by_row)\n\n # should be categorical dtype when the number of categories are\n # the same\n values = pd.Categorical(list("abbabcd"), categories=list("dcba"), ordered=True)\n exp = Series(values, name="XX", index=list("abcdefg"))\n tm.assert_series_equal(result, exp)\n tm.assert_categorical_equal(result.values, exp.values)\n\n result = ser.apply(lambda x: "A")\n exp = Series(["A"] * 7, name="XX", index=list("abcdefg"))\n tm.assert_series_equal(result, exp)\n assert result.dtype == object if not using_infer_string else "str"\n\n\n@pytest.mark.parametrize("series", [["1-1", "1-1", np.nan], ["1-1", "1-2", np.nan]])\ndef test_apply_categorical_with_nan_values(series, by_row):\n # GH 20714 bug fixed in: GH 24275\n s = Series(series, dtype="category")\n if not by_row:\n msg = "'Series' object has no attribute 'split'"\n with pytest.raises(AttributeError, match=msg):\n s.apply(lambda x: x.split("-")[0], by_row=by_row)\n return\n\n result = s.apply(lambda x: x.split("-")[0], by_row=by_row)\n result = result.astype(object)\n expected = Series(["1", "1", np.nan], dtype="category")\n expected = expected.astype(object)\n tm.assert_series_equal(result, expected)\n\n\ndef test_apply_empty_integer_series_with_datetime_index(by_row):\n # GH 21245\n s = Series([], index=date_range(start="2018-01-01", periods=0), dtype=int)\n result = s.apply(lambda x: x, by_row=by_row)\n tm.assert_series_equal(result, s)\n\n\ndef test_apply_dataframe_iloc():\n uintDF = DataFrame(np.uint64([1, 2, 3, 4, 5]), columns=["Numbers"])\n indexDF = DataFrame([2, 3, 2, 1, 2], columns=["Indices"])\n\n def retrieve(targetRow, targetDF):\n val = targetDF["Numbers"].iloc[targetRow]\n return val\n\n result = indexDF["Indices"].apply(retrieve, args=(uintDF,))\n expected = Series([3, 4, 3, 2, 3], name="Indices", dtype="uint64")\n tm.assert_series_equal(result, expected)\n\n\ndef test_transform(string_series, by_row):\n # transforming functions\n\n with np.errstate(all="ignore"):\n f_sqrt = np.sqrt(string_series)\n f_abs = np.abs(string_series)\n\n # ufunc\n result = string_series.apply(np.sqrt, by_row=by_row)\n expected = f_sqrt.copy()\n tm.assert_series_equal(result, expected)\n\n # list-like\n result = string_series.apply([np.sqrt], by_row=by_row)\n expected = f_sqrt.to_frame().copy()\n expected.columns = ["sqrt"]\n tm.assert_frame_equal(result, expected)\n\n result = string_series.apply(["sqrt"], by_row=by_row)\n tm.assert_frame_equal(result, expected)\n\n # multiple items in list\n # these are in the order as if we are applying both functions per\n # series and then concatting\n expected = concat([f_sqrt, f_abs], axis=1)\n expected.columns = ["sqrt", "absolute"]\n result = string_series.apply([np.sqrt, np.abs], by_row=by_row)\n tm.assert_frame_equal(result, expected)\n\n # dict, provide renaming\n expected = concat([f_sqrt, f_abs], axis=1)\n expected.columns = ["foo", "bar"]\n expected = expected.unstack().rename("series")\n\n result = string_series.apply({"foo": np.sqrt, "bar": np.abs}, by_row=by_row)\n tm.assert_series_equal(result.reindex_like(expected), expected)\n\n\n@pytest.mark.parametrize("op", series_transform_kernels)\ndef test_transform_partial_failure(op, request):\n # GH 35964\n if op in ("ffill", "bfill", "pad", "backfill", "shift"):\n request.applymarker(\n pytest.mark.xfail(reason=f"{op} is successful on any dtype")\n )\n\n # Using object makes most transform kernels fail\n ser = Series(3 * [object])\n\n if op in ("fillna", "ngroup"):\n error = ValueError\n msg = "Transform function failed"\n else:\n error = TypeError\n msg = "|".join(\n [\n "not supported between instances of 'type' and 'type'",\n "unsupported operand type",\n ]\n )\n\n with pytest.raises(error, match=msg):\n ser.transform([op, "shift"])\n\n with pytest.raises(error, match=msg):\n ser.transform({"A": op, "B": "shift"})\n\n with pytest.raises(error, match=msg):\n ser.transform({"A": [op], "B": ["shift"]})\n\n with pytest.raises(error, match=msg):\n ser.transform({"A": [op, "shift"], "B": [op]})\n\n\ndef test_transform_partial_failure_valueerror():\n # GH 40211\n def noop(x):\n return x\n\n def raising_op(_):\n raise ValueError\n\n ser = Series(3 * [object])\n msg = "Transform function failed"\n\n with pytest.raises(ValueError, match=msg):\n ser.transform([noop, raising_op])\n\n with pytest.raises(ValueError, match=msg):\n ser.transform({"A": raising_op, "B": noop})\n\n with pytest.raises(ValueError, match=msg):\n ser.transform({"A": [raising_op], "B": [noop]})\n\n with pytest.raises(ValueError, match=msg):\n ser.transform({"A": [noop, raising_op], "B": [noop]})\n\n\ndef test_demo():\n # demonstration tests\n s = Series(range(6), dtype="int64", name="series")\n\n result = s.agg(["min", "max"])\n expected = Series([0, 5], index=["min", "max"], name="series")\n tm.assert_series_equal(result, expected)\n\n result = s.agg({"foo": "min"})\n expected = Series([0], index=["foo"], name="series")\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize("func", [str, lambda x: str(x)])\ndef test_apply_map_evaluate_lambdas_the_same(string_series, func, by_row):\n # test that we are evaluating row-by-row first if by_row="compat"\n # else vectorized evaluation\n result = string_series.apply(func, by_row=by_row)\n\n if by_row:\n expected = string_series.map(func)\n tm.assert_series_equal(result, expected)\n else:\n assert result == str(string_series)\n\n\ndef test_agg_evaluate_lambdas(string_series):\n # GH53325\n # in the future, the result will be a Series class.\n\n with tm.assert_produces_warning(FutureWarning):\n result = string_series.agg(lambda x: type(x))\n assert isinstance(result, Series) and len(result) == len(string_series)\n\n with tm.assert_produces_warning(FutureWarning):\n result = string_series.agg(type)\n assert isinstance(result, Series) and len(result) == len(string_series)\n\n\n@pytest.mark.parametrize("op_name", ["agg", "apply"])\ndef test_with_nested_series(datetime_series, op_name):\n # GH 2316\n # .agg with a reducer and a transform, what to do\n msg = "cannot aggregate"\n warning = FutureWarning if op_name == "agg" else None\n with tm.assert_produces_warning(warning, match=msg):\n # GH52123\n result = getattr(datetime_series, op_name)(\n lambda x: Series([x, x**2], index=["x", "x^2"])\n )\n expected = DataFrame({"x": datetime_series, "x^2": datetime_series**2})\n tm.assert_frame_equal(result, expected)\n\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = datetime_series.agg(lambda x: Series([x, x**2], index=["x", "x^2"]))\n tm.assert_frame_equal(result, expected)\n\n\ndef test_replicate_describe(string_series):\n # this also tests a result set that is all scalars\n expected = string_series.describe()\n result = string_series.apply(\n {\n "count": "count",\n "mean": "mean",\n "std": "std",\n "min": "min",\n "25%": lambda x: x.quantile(0.25),\n "50%": "median",\n "75%": lambda x: x.quantile(0.75),\n "max": "max",\n },\n )\n tm.assert_series_equal(result, expected)\n\n\ndef test_reduce(string_series):\n # reductions with named functions\n result = string_series.agg(["sum", "mean"])\n expected = Series(\n [string_series.sum(), string_series.mean()],\n ["sum", "mean"],\n name=string_series.name,\n )\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "how, kwds",\n [("agg", {}), ("apply", {"by_row": "compat"}), ("apply", {"by_row": False})],\n)\ndef test_non_callable_aggregates(how, kwds):\n # test agg using non-callable series attributes\n # GH 39116 - expand to apply\n s = Series([1, 2, None])\n\n # Calling agg w/ just a string arg same as calling s.arg\n result = getattr(s, how)("size", **kwds)\n expected = s.size\n assert result == expected\n\n # test when mixed w/ callable reducers\n result = getattr(s, how)(["size", "count", "mean"], **kwds)\n expected = Series({"size": 3.0, "count": 2.0, "mean": 1.5})\n tm.assert_series_equal(result, expected)\n\n result = getattr(s, how)({"size": "size", "count": "count", "mean": "mean"}, **kwds)\n tm.assert_series_equal(result, expected)\n\n\ndef test_series_apply_no_suffix_index(by_row):\n # GH36189\n s = Series([4] * 3)\n result = s.apply(["sum", lambda x: x.sum(), lambda x: x.sum()], by_row=by_row)\n expected = Series([12, 12, 12], index=["sum", "<lambda>", "<lambda>"])\n\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "dti,exp",\n [\n (\n Series([1, 2], index=pd.DatetimeIndex([0, 31536000000])),\n DataFrame(np.repeat([[1, 2]], 2, axis=0), dtype="int64"),\n ),\n (\n Series(\n np.arange(10, dtype=np.float64),\n index=date_range("2020-01-01", periods=10),\n name="ts",\n ),\n DataFrame(np.repeat([[1, 2]], 10, axis=0), dtype="int64"),\n ),\n ],\n)\n@pytest.mark.parametrize("aware", [True, False])\ndef test_apply_series_on_date_time_index_aware_series(dti, exp, aware):\n # GH 25959\n # Calling apply on a localized time series should not cause an error\n if aware:\n index = dti.tz_localize("UTC").index\n else:\n index = dti.index\n result = Series(index).apply(lambda x: Series([1, 2]))\n tm.assert_frame_equal(result, exp)\n\n\n@pytest.mark.parametrize(\n "by_row, expected", [("compat", Series(np.ones(10), dtype="int64")), (False, 1)]\n)\ndef test_apply_scalar_on_date_time_index_aware_series(by_row, expected):\n # GH 25959\n # Calling apply on a localized time series should not cause an error\n series = Series(\n np.arange(10, dtype=np.float64),\n index=date_range("2020-01-01", periods=10, tz="UTC"),\n )\n result = Series(series.index).apply(lambda x: 1, by_row=by_row)\n tm.assert_equal(result, expected)\n\n\ndef test_apply_to_timedelta(by_row):\n list_of_valid_strings = ["00:00:01", "00:00:02"]\n a = pd.to_timedelta(list_of_valid_strings)\n b = Series(list_of_valid_strings).apply(pd.to_timedelta, by_row=by_row)\n tm.assert_series_equal(Series(a), b)\n\n list_of_strings = ["00:00:01", np.nan, pd.NaT, pd.NaT]\n\n a = pd.to_timedelta(list_of_strings)\n ser = Series(list_of_strings)\n b = ser.apply(pd.to_timedelta, by_row=by_row)\n tm.assert_series_equal(Series(a), b)\n\n\n@pytest.mark.parametrize(\n "ops, names",\n [\n ([np.sum], ["sum"]),\n ([np.sum, np.mean], ["sum", "mean"]),\n (np.array([np.sum]), ["sum"]),\n (np.array([np.sum, np.mean]), ["sum", "mean"]),\n ],\n)\n@pytest.mark.parametrize(\n "how, kwargs",\n [["agg", {}], ["apply", {"by_row": "compat"}], ["apply", {"by_row": False}]],\n)\ndef test_apply_listlike_reducer(string_series, ops, names, how, kwargs):\n # GH 39140\n expected = Series({name: op(string_series) for name, op in zip(names, ops)})\n expected.name = "series"\n warn = FutureWarning if how == "agg" else None\n msg = f"using Series.[{'|'.join(names)}]"\n with tm.assert_produces_warning(warn, match=msg):\n result = getattr(string_series, how)(ops, **kwargs)\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "ops",\n [\n {"A": np.sum},\n {"A": np.sum, "B": np.mean},\n Series({"A": np.sum}),\n Series({"A": np.sum, "B": np.mean}),\n ],\n)\n@pytest.mark.parametrize(\n "how, kwargs",\n [["agg", {}], ["apply", {"by_row": "compat"}], ["apply", {"by_row": False}]],\n)\ndef test_apply_dictlike_reducer(string_series, ops, how, kwargs, by_row):\n # GH 39140\n expected = Series({name: op(string_series) for name, op in ops.items()})\n expected.name = string_series.name\n warn = FutureWarning if how == "agg" else None\n msg = "using Series.[sum|mean]"\n with tm.assert_produces_warning(warn, match=msg):\n result = getattr(string_series, how)(ops, **kwargs)\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "ops, names",\n [\n ([np.sqrt], ["sqrt"]),\n ([np.abs, np.sqrt], ["absolute", "sqrt"]),\n (np.array([np.sqrt]), ["sqrt"]),\n (np.array([np.abs, np.sqrt]), ["absolute", "sqrt"]),\n ],\n)\ndef test_apply_listlike_transformer(string_series, ops, names, by_row):\n # GH 39140\n with np.errstate(all="ignore"):\n expected = concat([op(string_series) for op in ops], axis=1)\n expected.columns = names\n result = string_series.apply(ops, by_row=by_row)\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "ops, expected",\n [\n ([lambda x: x], DataFrame({"<lambda>": [1, 2, 3]})),\n ([lambda x: x.sum()], Series([6], index=["<lambda>"])),\n ],\n)\ndef test_apply_listlike_lambda(ops, expected, by_row):\n # GH53400\n ser = Series([1, 2, 3])\n result = ser.apply(ops, by_row=by_row)\n tm.assert_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "ops",\n [\n {"A": np.sqrt},\n {"A": np.sqrt, "B": np.exp},\n Series({"A": np.sqrt}),\n Series({"A": np.sqrt, "B": np.exp}),\n ],\n)\ndef test_apply_dictlike_transformer(string_series, ops, by_row):\n # GH 39140\n with np.errstate(all="ignore"):\n expected = concat({name: op(string_series) for name, op in ops.items()})\n expected.name = string_series.name\n result = string_series.apply(ops, by_row=by_row)\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "ops, expected",\n [\n (\n {"a": lambda x: x},\n Series([1, 2, 3], index=MultiIndex.from_arrays([["a"] * 3, range(3)])),\n ),\n ({"a": lambda x: x.sum()}, Series([6], index=["a"])),\n ],\n)\ndef test_apply_dictlike_lambda(ops, by_row, expected):\n # GH53400\n ser = Series([1, 2, 3])\n result = ser.apply(ops, by_row=by_row)\n tm.assert_equal(result, expected)\n\n\ndef test_apply_retains_column_name(by_row):\n # GH 16380\n df = DataFrame({"x": range(3)}, Index(range(3), name="x"))\n result = df.x.apply(lambda x: Series(range(x + 1), Index(range(x + 1), name="y")))\n expected = DataFrame(\n [[0.0, np.nan, np.nan], [0.0, 1.0, np.nan], [0.0, 1.0, 2.0]],\n columns=Index(range(3), name="y"),\n index=Index(range(3), name="x"),\n )\n tm.assert_frame_equal(result, expected)\n\n\ndef test_apply_type():\n # GH 46719\n s = Series([3, "string", float], index=["a", "b", "c"])\n result = s.apply(type)\n expected = Series([int, str, type], index=["a", "b", "c"])\n tm.assert_series_equal(result, expected)\n\n\ndef test_series_apply_unpack_nested_data():\n # GH#55189\n ser = Series([[1, 2, 3], [4, 5, 6, 7]])\n result = ser.apply(lambda x: Series(x))\n expected = DataFrame({0: [1.0, 4.0], 1: [2.0, 5.0], 2: [3.0, 6.0], 3: [np.nan, 7]})\n tm.assert_frame_equal(result, expected)\n | .venv\Lib\site-packages\pandas\tests\apply\test_series_apply.py | test_series_apply.py | Python | 22,467 | 0.95 | 0.11127 | 0.098921 | vue-tools | 337 | 2024-09-28T13:46:22.346940 | GPL-3.0 | true | 5926b66443c75831dec1d8cc1588ddbb |
import pandas as pd\nimport pandas._testing as tm\n\n\ndef test_relabel_no_duplicated_method():\n # this is to test there is no duplicated method used in agg\n df = pd.DataFrame({"A": [1, 2, 1, 2], "B": [1, 2, 3, 4]})\n\n result = df["A"].agg(foo="sum")\n expected = df["A"].agg({"foo": "sum"})\n tm.assert_series_equal(result, expected)\n\n result = df["B"].agg(foo="min", bar="max")\n expected = df["B"].agg({"foo": "min", "bar": "max"})\n tm.assert_series_equal(result, expected)\n\n msg = "using Series.[sum|min|max]"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = df["B"].agg(foo=sum, bar=min, cat="max")\n msg = "using Series.[sum|min|max]"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n expected = df["B"].agg({"foo": sum, "bar": min, "cat": "max"})\n tm.assert_series_equal(result, expected)\n\n\ndef test_relabel_duplicated_method():\n # this is to test with nested renaming, duplicated method can be used\n # if they are assigned with different new names\n df = pd.DataFrame({"A": [1, 2, 1, 2], "B": [1, 2, 3, 4]})\n\n result = df["A"].agg(foo="sum", bar="sum")\n expected = pd.Series([6, 6], index=["foo", "bar"], name="A")\n tm.assert_series_equal(result, expected)\n\n msg = "using Series.min"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = df["B"].agg(foo=min, bar="min")\n expected = pd.Series([1, 1], index=["foo", "bar"], name="B")\n tm.assert_series_equal(result, expected)\n | .venv\Lib\site-packages\pandas\tests\apply\test_series_apply_relabeling.py | test_series_apply_relabeling.py | Python | 1,510 | 0.95 | 0.076923 | 0.1 | react-lib | 31 | 2023-11-25T02:05:16.519863 | GPL-3.0 | true | 17bb14c4f10540f7da1f50e3b9361470 |
import numpy as np\nimport pytest\n\nfrom pandas import (\n DataFrame,\n MultiIndex,\n Series,\n concat,\n)\nimport pandas._testing as tm\n\n\n@pytest.mark.parametrize(\n "args, kwargs, increment",\n [((), {}, 0), ((), {"a": 1}, 1), ((2, 3), {}, 32), ((1,), {"c": 2}, 201)],\n)\ndef test_agg_args(args, kwargs, increment):\n # GH 43357\n def f(x, a=0, b=0, c=0):\n return x + a + 10 * b + 100 * c\n\n s = Series([1, 2])\n result = s.transform(f, 0, *args, **kwargs)\n expected = s + increment\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "ops, names",\n [\n ([np.sqrt], ["sqrt"]),\n ([np.abs, np.sqrt], ["absolute", "sqrt"]),\n (np.array([np.sqrt]), ["sqrt"]),\n (np.array([np.abs, np.sqrt]), ["absolute", "sqrt"]),\n ],\n)\ndef test_transform_listlike(string_series, ops, names):\n # GH 35964\n with np.errstate(all="ignore"):\n expected = concat([op(string_series) for op in ops], axis=1)\n expected.columns = names\n result = string_series.transform(ops)\n tm.assert_frame_equal(result, expected)\n\n\ndef test_transform_listlike_func_with_args():\n # GH 50624\n\n s = Series([1, 2, 3])\n\n def foo1(x, a=1, c=0):\n return x + a + c\n\n def foo2(x, b=2, c=0):\n return x + b + c\n\n msg = r"foo1\(\) got an unexpected keyword argument 'b'"\n with pytest.raises(TypeError, match=msg):\n s.transform([foo1, foo2], 0, 3, b=3, c=4)\n\n result = s.transform([foo1, foo2], 0, 3, c=4)\n expected = DataFrame({"foo1": [8, 9, 10], "foo2": [8, 9, 10]})\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize("box", [dict, Series])\ndef test_transform_dictlike(string_series, box):\n # GH 35964\n with np.errstate(all="ignore"):\n expected = concat([np.sqrt(string_series), np.abs(string_series)], axis=1)\n expected.columns = ["foo", "bar"]\n result = string_series.transform(box({"foo": np.sqrt, "bar": np.abs}))\n tm.assert_frame_equal(result, expected)\n\n\ndef test_transform_dictlike_mixed():\n # GH 40018 - mix of lists and non-lists in values of a dictionary\n df = Series([1, 4])\n result = df.transform({"b": ["sqrt", "abs"], "c": "sqrt"})\n expected = DataFrame(\n [[1.0, 1, 1.0], [2.0, 4, 2.0]],\n columns=MultiIndex([("b", "c"), ("sqrt", "abs")], [(0, 0, 1), (0, 1, 0)]),\n )\n tm.assert_frame_equal(result, expected)\n | .venv\Lib\site-packages\pandas\tests\apply\test_series_transform.py | test_series_transform.py | Python | 2,404 | 0.95 | 0.107143 | 0.074627 | node-utils | 85 | 2023-09-03T00:29:45.536830 | Apache-2.0 | true | 7e014ea00ae6f8ef47bf12c8f2d5e311 |
from itertools import chain\nimport operator\n\nimport numpy as np\nimport pytest\n\nfrom pandas.core.dtypes.common import is_number\n\nfrom pandas import (\n DataFrame,\n Series,\n)\nimport pandas._testing as tm\nfrom pandas.tests.apply.common import (\n frame_transform_kernels,\n series_transform_kernels,\n)\n\n\n@pytest.mark.parametrize("func", ["sum", "mean", "min", "max", "std"])\n@pytest.mark.parametrize(\n "args,kwds",\n [\n pytest.param([], {}, id="no_args_or_kwds"),\n pytest.param([1], {}, id="axis_from_args"),\n pytest.param([], {"axis": 1}, id="axis_from_kwds"),\n pytest.param([], {"numeric_only": True}, id="optional_kwds"),\n pytest.param([1, True], {"numeric_only": True}, id="args_and_kwds"),\n ],\n)\n@pytest.mark.parametrize("how", ["agg", "apply"])\ndef test_apply_with_string_funcs(request, float_frame, func, args, kwds, how):\n if len(args) > 1 and how == "agg":\n request.applymarker(\n pytest.mark.xfail(\n raises=TypeError,\n reason="agg/apply signature mismatch - agg passes 2nd "\n "argument to func",\n )\n )\n result = getattr(float_frame, how)(func, *args, **kwds)\n expected = getattr(float_frame, func)(*args, **kwds)\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize("arg", ["sum", "mean", "min", "max", "std"])\ndef test_with_string_args(datetime_series, arg):\n result = datetime_series.apply(arg)\n expected = getattr(datetime_series, arg)()\n assert result == expected\n\n\n@pytest.mark.parametrize("op", ["mean", "median", "std", "var"])\n@pytest.mark.parametrize("how", ["agg", "apply"])\ndef test_apply_np_reducer(op, how):\n # GH 39116\n float_frame = DataFrame({"a": [1, 2], "b": [3, 4]})\n result = getattr(float_frame, how)(op)\n # pandas ddof defaults to 1, numpy to 0\n kwargs = {"ddof": 1} if op in ("std", "var") else {}\n expected = Series(\n getattr(np, op)(float_frame, axis=0, **kwargs), index=float_frame.columns\n )\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "op", ["abs", "ceil", "cos", "cumsum", "exp", "log", "sqrt", "square"]\n)\n@pytest.mark.parametrize("how", ["transform", "apply"])\ndef test_apply_np_transformer(float_frame, op, how):\n # GH 39116\n\n # float_frame will _usually_ have negative values, which will\n # trigger the warning here, but let's put one in just to be sure\n float_frame.iloc[0, 0] = -1.0\n warn = None\n if op in ["log", "sqrt"]:\n warn = RuntimeWarning\n\n with tm.assert_produces_warning(warn, check_stacklevel=False):\n # float_frame fixture is defined in conftest.py, so we don't check the\n # stacklevel as otherwise the test would fail.\n result = getattr(float_frame, how)(op)\n expected = getattr(np, op)(float_frame)\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "series, func, expected",\n chain(\n tm.get_cython_table_params(\n Series(dtype=np.float64),\n [\n ("sum", 0),\n ("max", np.nan),\n ("min", np.nan),\n ("all", True),\n ("any", False),\n ("mean", np.nan),\n ("prod", 1),\n ("std", np.nan),\n ("var", np.nan),\n ("median", np.nan),\n ],\n ),\n tm.get_cython_table_params(\n Series([np.nan, 1, 2, 3]),\n [\n ("sum", 6),\n ("max", 3),\n ("min", 1),\n ("all", True),\n ("any", True),\n ("mean", 2),\n ("prod", 6),\n ("std", 1),\n ("var", 1),\n ("median", 2),\n ],\n ),\n tm.get_cython_table_params(\n Series("a b c".split()),\n [\n ("sum", "abc"),\n ("max", "c"),\n ("min", "a"),\n ("all", True),\n ("any", True),\n ],\n ),\n ),\n)\ndef test_agg_cython_table_series(series, func, expected):\n # GH21224\n # test reducing functions in\n # pandas.core.base.SelectionMixin._cython_table\n warn = None if isinstance(func, str) else FutureWarning\n with tm.assert_produces_warning(warn, match="is currently using Series.*"):\n result = series.agg(func)\n if is_number(expected):\n assert np.isclose(result, expected, equal_nan=True)\n else:\n assert result == expected\n\n\n@pytest.mark.parametrize(\n "series, func, expected",\n chain(\n tm.get_cython_table_params(\n Series(dtype=np.float64),\n [\n ("cumprod", Series([], dtype=np.float64)),\n ("cumsum", Series([], dtype=np.float64)),\n ],\n ),\n tm.get_cython_table_params(\n Series([np.nan, 1, 2, 3]),\n [\n ("cumprod", Series([np.nan, 1, 2, 6])),\n ("cumsum", Series([np.nan, 1, 3, 6])),\n ],\n ),\n tm.get_cython_table_params(\n Series("a b c".split()), [("cumsum", Series(["a", "ab", "abc"]))]\n ),\n ),\n)\ndef test_agg_cython_table_transform_series(series, func, expected):\n # GH21224\n # test transforming functions in\n # pandas.core.base.SelectionMixin._cython_table (cumprod, cumsum)\n warn = None if isinstance(func, str) else FutureWarning\n with tm.assert_produces_warning(warn, match="is currently using Series.*"):\n result = series.agg(func)\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "df, func, expected",\n chain(\n tm.get_cython_table_params(\n DataFrame(),\n [\n ("sum", Series(dtype="float64")),\n ("max", Series(dtype="float64")),\n ("min", Series(dtype="float64")),\n ("all", Series(dtype=bool)),\n ("any", Series(dtype=bool)),\n ("mean", Series(dtype="float64")),\n ("prod", Series(dtype="float64")),\n ("std", Series(dtype="float64")),\n ("var", Series(dtype="float64")),\n ("median", Series(dtype="float64")),\n ],\n ),\n tm.get_cython_table_params(\n DataFrame([[np.nan, 1], [1, 2]]),\n [\n ("sum", Series([1.0, 3])),\n ("max", Series([1.0, 2])),\n ("min", Series([1.0, 1])),\n ("all", Series([True, True])),\n ("any", Series([True, True])),\n ("mean", Series([1, 1.5])),\n ("prod", Series([1.0, 2])),\n ("std", Series([np.nan, 0.707107])),\n ("var", Series([np.nan, 0.5])),\n ("median", Series([1, 1.5])),\n ],\n ),\n ),\n)\ndef test_agg_cython_table_frame(df, func, expected, axis):\n # GH 21224\n # test reducing functions in\n # pandas.core.base.SelectionMixin._cython_table\n warn = None if isinstance(func, str) else FutureWarning\n with tm.assert_produces_warning(warn, match="is currently using DataFrame.*"):\n # GH#53425\n result = df.agg(func, axis=axis)\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "df, func, expected",\n chain(\n tm.get_cython_table_params(\n DataFrame(), [("cumprod", DataFrame()), ("cumsum", DataFrame())]\n ),\n tm.get_cython_table_params(\n DataFrame([[np.nan, 1], [1, 2]]),\n [\n ("cumprod", DataFrame([[np.nan, 1], [1, 2]])),\n ("cumsum", DataFrame([[np.nan, 1], [1, 3]])),\n ],\n ),\n ),\n)\ndef test_agg_cython_table_transform_frame(df, func, expected, axis):\n # GH 21224\n # test transforming functions in\n # pandas.core.base.SelectionMixin._cython_table (cumprod, cumsum)\n if axis in ("columns", 1):\n # operating blockwise doesn't let us preserve dtypes\n expected = expected.astype("float64")\n\n warn = None if isinstance(func, str) else FutureWarning\n with tm.assert_produces_warning(warn, match="is currently using DataFrame.*"):\n # GH#53425\n result = df.agg(func, axis=axis)\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize("op", series_transform_kernels)\ndef test_transform_groupby_kernel_series(request, string_series, op):\n # GH 35964\n if op == "ngroup":\n request.applymarker(\n pytest.mark.xfail(raises=ValueError, reason="ngroup not valid for NDFrame")\n )\n args = [0.0] if op == "fillna" else []\n ones = np.ones(string_series.shape[0])\n\n warn = FutureWarning if op == "fillna" else None\n msg = "SeriesGroupBy.fillna is deprecated"\n with tm.assert_produces_warning(warn, match=msg):\n expected = string_series.groupby(ones).transform(op, *args)\n result = string_series.transform(op, 0, *args)\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize("op", frame_transform_kernels)\ndef test_transform_groupby_kernel_frame(request, axis, float_frame, op):\n if op == "ngroup":\n request.applymarker(\n pytest.mark.xfail(raises=ValueError, reason="ngroup not valid for NDFrame")\n )\n\n # GH 35964\n\n args = [0.0] if op == "fillna" else []\n if axis in (0, "index"):\n ones = np.ones(float_frame.shape[0])\n msg = "The 'axis' keyword in DataFrame.groupby is deprecated"\n else:\n ones = np.ones(float_frame.shape[1])\n msg = "DataFrame.groupby with axis=1 is deprecated"\n\n with tm.assert_produces_warning(FutureWarning, match=msg):\n gb = float_frame.groupby(ones, axis=axis)\n\n warn = FutureWarning if op == "fillna" else None\n op_msg = "DataFrameGroupBy.fillna is deprecated"\n with tm.assert_produces_warning(warn, match=op_msg):\n expected = gb.transform(op, *args)\n\n result = float_frame.transform(op, axis, *args)\n tm.assert_frame_equal(result, expected)\n\n # same thing, but ensuring we have multiple blocks\n assert "E" not in float_frame.columns\n float_frame["E"] = float_frame["A"].copy()\n assert len(float_frame._mgr.arrays) > 1\n\n if axis in (0, "index"):\n ones = np.ones(float_frame.shape[0])\n else:\n ones = np.ones(float_frame.shape[1])\n with tm.assert_produces_warning(FutureWarning, match=msg):\n gb2 = float_frame.groupby(ones, axis=axis)\n warn = FutureWarning if op == "fillna" else None\n op_msg = "DataFrameGroupBy.fillna is deprecated"\n with tm.assert_produces_warning(warn, match=op_msg):\n expected2 = gb2.transform(op, *args)\n result2 = float_frame.transform(op, axis, *args)\n tm.assert_frame_equal(result2, expected2)\n\n\n@pytest.mark.parametrize("method", ["abs", "shift", "pct_change", "cumsum", "rank"])\ndef test_transform_method_name(method):\n # GH 19760\n df = DataFrame({"A": [-1, 2]})\n result = df.transform(method)\n expected = operator.methodcaller(method)(df)\n tm.assert_frame_equal(result, expected)\n | .venv\Lib\site-packages\pandas\tests\apply\test_str.py | test_str.py | Python | 11,043 | 0.95 | 0.095092 | 0.089655 | awesome-app | 835 | 2024-03-14T08:07:33.441188 | Apache-2.0 | true | f9904394c3afbbc3e84b65dd7e2a3bec |
\n\n | .venv\Lib\site-packages\pandas\tests\apply\__pycache__\common.cpython-313.pyc | common.cpython-313.pyc | Other | 591 | 0.7 | 0 | 0 | node-utils | 981 | 2025-05-28T00:25:57.679860 | BSD-3-Clause | true | d5259732fcf78233c849ffd2be42cba4 |
\n\n | .venv\Lib\site-packages\pandas\tests\apply\__pycache__\test_frame_apply.cpython-313.pyc | test_frame_apply.cpython-313.pyc | Other | 93,735 | 0.75 | 0.002345 | 0.013924 | node-utils | 14 | 2024-07-26T06:04:19.397042 | MIT | true | d830056c06721de54b44baa81e89ca0b |
\n\n | .venv\Lib\site-packages\pandas\tests\apply\__pycache__\test_frame_apply_relabeling.cpython-313.pyc | test_frame_apply_relabeling.cpython-313.pyc | Other | 6,538 | 0.95 | 0 | 0.015873 | awesome-app | 269 | 2025-07-03T22:04:54.489968 | GPL-3.0 | true | 80c45f033a13d7d7afdf174f8b29b5eb |
\n\n | .venv\Lib\site-packages\pandas\tests\apply\__pycache__\test_frame_transform.cpython-313.pyc | test_frame_transform.cpython-313.pyc | Other | 13,728 | 0.95 | 0.021164 | 0.03352 | python-kit | 208 | 2024-12-08T03:15:29.723097 | MIT | true | 576ca794ca68bfec77caeb0768b255e7 |
\n\n | .venv\Lib\site-packages\pandas\tests\apply\__pycache__\test_invalid_arg.cpython-313.pyc | test_invalid_arg.cpython-313.pyc | Other | 19,192 | 0.8 | 0.018868 | 0.009662 | react-lib | 838 | 2025-06-10T01:31:01.230597 | GPL-3.0 | true | 74ecb48cbc1d9923a6b3e3f8c8836eaa |
\n\n | .venv\Lib\site-packages\pandas\tests\apply\__pycache__\test_numba.cpython-313.pyc | test_numba.cpython-313.pyc | Other | 8,438 | 0.95 | 0 | 0 | react-lib | 553 | 2024-09-09T11:08:56.476966 | Apache-2.0 | true | 96b8c956aae030c6dcb68cc0ca38578a |
\n\n | .venv\Lib\site-packages\pandas\tests\apply\__pycache__\test_series_apply.cpython-313.pyc | test_series_apply.cpython-313.pyc | Other | 41,475 | 0.95 | 0.002212 | 0.035294 | awesome-app | 518 | 2024-10-09T05:11:42.444100 | MIT | true | a291a3d77b686ff900de2df798984be6 |
\n\n | .venv\Lib\site-packages\pandas\tests\apply\__pycache__\test_series_apply_relabeling.cpython-313.pyc | test_series_apply_relabeling.cpython-313.pyc | Other | 2,564 | 0.8 | 0 | 0.02439 | node-utils | 982 | 2025-05-09T01:23:20.364240 | BSD-3-Clause | true | 252cf4f83ba54e57efa447fbe4643728 |
\n\n | .venv\Lib\site-packages\pandas\tests\apply\__pycache__\test_series_transform.cpython-313.pyc | test_series_transform.cpython-313.pyc | Other | 4,927 | 0.8 | 0 | 0.070175 | vue-tools | 353 | 2024-02-07T11:28:20.539429 | MIT | true | 5e16e5caeb57d7600ee59e5bf1d4fde2 |
\n\n | .venv\Lib\site-packages\pandas\tests\apply\__pycache__\test_str.cpython-313.pyc | test_str.cpython-313.pyc | Other | 14,299 | 0.8 | 0.005263 | 0.010753 | node-utils | 698 | 2025-01-21T16:22:02.290550 | Apache-2.0 | true | 3633e7a129f4ae344ce030568497c652 |
\n\n | .venv\Lib\site-packages\pandas\tests\apply\__pycache__\__init__.cpython-313.pyc | __init__.cpython-313.pyc | Other | 193 | 0.7 | 0 | 0 | python-kit | 813 | 2024-11-22T13:46:56.503731 | BSD-3-Clause | true | c72bf400cc796c43c58c376f7d1cad37 |
"""\nAssertion helpers for arithmetic tests.\n"""\nimport numpy as np\nimport pytest\n\nfrom pandas import (\n DataFrame,\n Index,\n Series,\n array,\n)\nimport pandas._testing as tm\nfrom pandas.core.arrays import (\n BooleanArray,\n NumpyExtensionArray,\n)\n\n\ndef assert_cannot_add(left, right, msg="cannot add"):\n """\n Helper to assert that left and right cannot be added.\n\n Parameters\n ----------\n left : object\n right : object\n msg : str, default "cannot add"\n """\n with pytest.raises(TypeError, match=msg):\n left + right\n with pytest.raises(TypeError, match=msg):\n right + left\n\n\ndef assert_invalid_addsub_type(left, right, msg=None):\n """\n Helper to assert that left and right can be neither added nor subtracted.\n\n Parameters\n ----------\n left : object\n right : object\n msg : str or None, default None\n """\n with pytest.raises(TypeError, match=msg):\n left + right\n with pytest.raises(TypeError, match=msg):\n right + left\n with pytest.raises(TypeError, match=msg):\n left - right\n with pytest.raises(TypeError, match=msg):\n right - left\n\n\ndef get_upcast_box(left, right, is_cmp: bool = False):\n """\n Get the box to use for 'expected' in an arithmetic or comparison operation.\n\n Parameters\n left : Any\n right : Any\n is_cmp : bool, default False\n Whether the operation is a comparison method.\n """\n\n if isinstance(left, DataFrame) or isinstance(right, DataFrame):\n return DataFrame\n if isinstance(left, Series) or isinstance(right, Series):\n if is_cmp and isinstance(left, Index):\n # Index does not defer for comparisons\n return np.array\n return Series\n if isinstance(left, Index) or isinstance(right, Index):\n if is_cmp:\n return np.array\n return Index\n return tm.to_array\n\n\ndef assert_invalid_comparison(left, right, box):\n """\n Assert that comparison operations with mismatched types behave correctly.\n\n Parameters\n ----------\n left : np.ndarray, ExtensionArray, Index, or Series\n right : object\n box : {pd.DataFrame, pd.Series, pd.Index, pd.array, tm.to_array}\n """\n # Not for tznaive-tzaware comparison\n\n # Note: not quite the same as how we do this for tm.box_expected\n xbox = box if box not in [Index, array] else np.array\n\n def xbox2(x):\n # Eventually we'd like this to be tighter, but for now we'll\n # just exclude NumpyExtensionArray[bool]\n if isinstance(x, NumpyExtensionArray):\n return x._ndarray\n if isinstance(x, BooleanArray):\n # NB: we are assuming no pd.NAs for now\n return x.astype(bool)\n return x\n\n # rev_box: box to use for reversed comparisons\n rev_box = xbox\n if isinstance(right, Index) and isinstance(left, Series):\n rev_box = np.array\n\n result = xbox2(left == right)\n expected = xbox(np.zeros(result.shape, dtype=np.bool_))\n\n tm.assert_equal(result, expected)\n\n result = xbox2(right == left)\n tm.assert_equal(result, rev_box(expected))\n\n result = xbox2(left != right)\n tm.assert_equal(result, ~expected)\n\n result = xbox2(right != left)\n tm.assert_equal(result, rev_box(~expected))\n\n msg = "|".join(\n [\n "Invalid comparison between",\n "Cannot compare type",\n "not supported between",\n "invalid type promotion",\n (\n # GH#36706 npdev 1.20.0 2020-09-28\n r"The DTypes <class 'numpy.dtype\[datetime64\]'> and "\n r"<class 'numpy.dtype\[int64\]'> do not have a common DType. "\n "For example they cannot be stored in a single array unless the "\n "dtype is `object`."\n ),\n ]\n )\n with pytest.raises(TypeError, match=msg):\n left < right\n with pytest.raises(TypeError, match=msg):\n left <= right\n with pytest.raises(TypeError, match=msg):\n left > right\n with pytest.raises(TypeError, match=msg):\n left >= right\n with pytest.raises(TypeError, match=msg):\n right < left\n with pytest.raises(TypeError, match=msg):\n right <= left\n with pytest.raises(TypeError, match=msg):\n right > left\n with pytest.raises(TypeError, match=msg):\n right >= left\n | .venv\Lib\site-packages\pandas\tests\arithmetic\common.py | common.py | Python | 4,362 | 0.95 | 0.154839 | 0.060606 | awesome-app | 910 | 2024-03-15T03:00:40.820405 | GPL-3.0 | true | 3740afccb8605a3742799d2908ae14de |
import numpy as np\nimport pytest\n\nimport pandas as pd\nfrom pandas import Index\n\n\n@pytest.fixture(params=[1, np.array(1, dtype=np.int64)])\ndef one(request):\n """\n Several variants of integer value 1. The zero-dim integer array\n behaves like an integer.\n\n This fixture can be used to check that datetimelike indexes handle\n addition and subtraction of integers and zero-dimensional arrays\n of integers.\n\n Examples\n --------\n dti = pd.date_range('2016-01-01', periods=2, freq='h')\n dti\n DatetimeIndex(['2016-01-01 00:00:00', '2016-01-01 01:00:00'],\n dtype='datetime64[ns]', freq='h')\n dti + one\n DatetimeIndex(['2016-01-01 01:00:00', '2016-01-01 02:00:00'],\n dtype='datetime64[ns]', freq='h')\n """\n return request.param\n\n\nzeros = [\n box_cls([0] * 5, dtype=dtype)\n for box_cls in [Index, np.array, pd.array]\n for dtype in [np.int64, np.uint64, np.float64]\n]\nzeros.extend([box_cls([-0.0] * 5, dtype=np.float64) for box_cls in [Index, np.array]])\nzeros.extend([np.array(0, dtype=dtype) for dtype in [np.int64, np.uint64, np.float64]])\nzeros.extend([np.array(-0.0, dtype=np.float64)])\nzeros.extend([0, 0.0, -0.0])\n\n\n@pytest.fixture(params=zeros)\ndef zero(request):\n """\n Several types of scalar zeros and length 5 vectors of zeros.\n\n This fixture can be used to check that numeric-dtype indexes handle\n division by any zero numeric-dtype.\n\n Uses vector of length 5 for broadcasting with `numeric_idx` fixture,\n which creates numeric-dtype vectors also of length 5.\n\n Examples\n --------\n arr = RangeIndex(5)\n arr / zeros\n Index([nan, inf, inf, inf, inf], dtype='float64')\n """\n return request.param\n\n\n# ------------------------------------------------------------------\n# Scalar Fixtures\n\n\n@pytest.fixture(\n params=[\n pd.Timedelta("10m7s").to_pytimedelta(),\n pd.Timedelta("10m7s"),\n pd.Timedelta("10m7s").to_timedelta64(),\n ],\n ids=lambda x: type(x).__name__,\n)\ndef scalar_td(request):\n """\n Several variants of Timedelta scalars representing 10 minutes and 7 seconds.\n """\n return request.param\n\n\n@pytest.fixture(\n params=[\n pd.offsets.Day(3),\n pd.offsets.Hour(72),\n pd.Timedelta(days=3).to_pytimedelta(),\n pd.Timedelta("72:00:00"),\n np.timedelta64(3, "D"),\n np.timedelta64(72, "h"),\n ],\n ids=lambda x: type(x).__name__,\n)\ndef three_days(request):\n """\n Several timedelta-like and DateOffset objects that each represent\n a 3-day timedelta\n """\n return request.param\n\n\n@pytest.fixture(\n params=[\n pd.offsets.Hour(2),\n pd.offsets.Minute(120),\n pd.Timedelta(hours=2).to_pytimedelta(),\n pd.Timedelta(seconds=2 * 3600),\n np.timedelta64(2, "h"),\n np.timedelta64(120, "m"),\n ],\n ids=lambda x: type(x).__name__,\n)\ndef two_hours(request):\n """\n Several timedelta-like and DateOffset objects that each represent\n a 2-hour timedelta\n """\n return request.param\n\n\n_common_mismatch = [\n pd.offsets.YearBegin(2),\n pd.offsets.MonthBegin(1),\n pd.offsets.Minute(),\n]\n\n\n@pytest.fixture(\n params=[\n np.timedelta64(4, "h"),\n pd.Timedelta(hours=23).to_pytimedelta(),\n pd.Timedelta("23:00:00"),\n ]\n + _common_mismatch\n)\ndef not_daily(request):\n """\n Several timedelta-like and DateOffset instances that are _not_\n compatible with Daily frequencies.\n """\n return request.param\n | .venv\Lib\site-packages\pandas\tests\arithmetic\conftest.py | conftest.py | Python | 3,473 | 0.95 | 0.079137 | 0.017391 | awesome-app | 174 | 2024-11-05T23:16:58.573077 | BSD-3-Clause | true | 9bef7ee773fc72eb448e9647f787a290 |
import operator\n\nimport numpy as np\nimport pytest\n\nimport pandas._testing as tm\nfrom pandas.core.ops.array_ops import (\n comparison_op,\n na_logical_op,\n)\n\n\ndef test_na_logical_op_2d():\n left = np.arange(8).reshape(4, 2)\n right = left.astype(object)\n right[0, 0] = np.nan\n\n # Check that we fall back to the vec_binop branch\n with pytest.raises(TypeError, match="unsupported operand type"):\n operator.or_(left, right)\n\n result = na_logical_op(left, right, operator.or_)\n expected = right\n tm.assert_numpy_array_equal(result, expected)\n\n\ndef test_object_comparison_2d():\n left = np.arange(9).reshape(3, 3).astype(object)\n right = left.T\n\n result = comparison_op(left, right, operator.eq)\n expected = np.eye(3).astype(bool)\n tm.assert_numpy_array_equal(result, expected)\n\n # Ensure that cython doesn't raise on non-writeable arg, which\n # we can get from np.broadcast_to\n right.flags.writeable = False\n result = comparison_op(left, right, operator.ne)\n tm.assert_numpy_array_equal(result, ~expected)\n | .venv\Lib\site-packages\pandas\tests\arithmetic\test_array_ops.py | test_array_ops.py | Python | 1,064 | 0.95 | 0.051282 | 0.103448 | vue-tools | 821 | 2023-09-14T20:50:11.504991 | Apache-2.0 | true | 25798539524afdd6a801599b8fbdd1eb |
import numpy as np\n\nfrom pandas import (\n Categorical,\n Series,\n)\nimport pandas._testing as tm\n\n\nclass TestCategoricalComparisons:\n def test_categorical_nan_equality(self):\n cat = Series(Categorical(["a", "b", "c", np.nan]))\n expected = Series([True, True, True, False])\n result = cat == cat\n tm.assert_series_equal(result, expected)\n\n def test_categorical_tuple_equality(self):\n # GH 18050\n ser = Series([(0, 0), (0, 1), (0, 0), (1, 0), (1, 1)])\n expected = Series([True, False, True, False, False])\n result = ser == (0, 0)\n tm.assert_series_equal(result, expected)\n\n result = ser.astype("category") == (0, 0)\n tm.assert_series_equal(result, expected)\n | .venv\Lib\site-packages\pandas\tests\arithmetic\test_categorical.py | test_categorical.py | Python | 742 | 0.95 | 0.12 | 0.05 | node-utils | 879 | 2025-03-31T14:34:04.292079 | Apache-2.0 | true | 639dadb707da0d4388ec65df56b49fd4 |
# Arithmetic tests for DataFrame/Series/Index/Array classes that should\n# behave identically.\n# Specifically for datetime64 and datetime64tz dtypes\nfrom datetime import (\n datetime,\n time,\n timedelta,\n)\nfrom itertools import (\n product,\n starmap,\n)\nimport operator\n\nimport numpy as np\nimport pytest\nimport pytz\n\nfrom pandas._libs.tslibs.conversion import localize_pydatetime\nfrom pandas._libs.tslibs.offsets import shift_months\nfrom pandas.errors import PerformanceWarning\n\nimport pandas as pd\nfrom pandas import (\n DateOffset,\n DatetimeIndex,\n NaT,\n Period,\n Series,\n Timedelta,\n TimedeltaIndex,\n Timestamp,\n date_range,\n)\nimport pandas._testing as tm\nfrom pandas.core import roperator\nfrom pandas.tests.arithmetic.common import (\n assert_cannot_add,\n assert_invalid_addsub_type,\n assert_invalid_comparison,\n get_upcast_box,\n)\n\n# ------------------------------------------------------------------\n# Comparisons\n\n\nclass TestDatetime64ArrayLikeComparisons:\n # Comparison tests for datetime64 vectors fully parametrized over\n # DataFrame/Series/DatetimeIndex/DatetimeArray. Ideally all comparison\n # tests will eventually end up here.\n\n def test_compare_zerodim(self, tz_naive_fixture, box_with_array):\n # Test comparison with zero-dimensional array is unboxed\n tz = tz_naive_fixture\n box = box_with_array\n dti = date_range("20130101", periods=3, tz=tz)\n\n other = np.array(dti.to_numpy()[0])\n\n dtarr = tm.box_expected(dti, box)\n xbox = get_upcast_box(dtarr, other, True)\n result = dtarr <= other\n expected = np.array([True, False, False])\n expected = tm.box_expected(expected, xbox)\n tm.assert_equal(result, expected)\n\n @pytest.mark.parametrize(\n "other",\n [\n "foo",\n -1,\n 99,\n 4.0,\n object(),\n timedelta(days=2),\n # GH#19800, GH#19301 datetime.date comparison raises to\n # match DatetimeIndex/Timestamp. This also matches the behavior\n # of stdlib datetime.datetime\n datetime(2001, 1, 1).date(),\n # GH#19301 None and NaN are *not* cast to NaT for comparisons\n None,\n np.nan,\n ],\n )\n def test_dt64arr_cmp_scalar_invalid(self, other, tz_naive_fixture, box_with_array):\n # GH#22074, GH#15966\n tz = tz_naive_fixture\n\n rng = date_range("1/1/2000", periods=10, tz=tz)\n dtarr = tm.box_expected(rng, box_with_array)\n assert_invalid_comparison(dtarr, other, box_with_array)\n\n @pytest.mark.parametrize(\n "other",\n [\n # GH#4968 invalid date/int comparisons\n list(range(10)),\n np.arange(10),\n np.arange(10).astype(np.float32),\n np.arange(10).astype(object),\n pd.timedelta_range("1ns", periods=10).array,\n np.array(pd.timedelta_range("1ns", periods=10)),\n list(pd.timedelta_range("1ns", periods=10)),\n pd.timedelta_range("1 Day", periods=10).astype(object),\n pd.period_range("1971-01-01", freq="D", periods=10).array,\n pd.period_range("1971-01-01", freq="D", periods=10).astype(object),\n ],\n )\n def test_dt64arr_cmp_arraylike_invalid(\n self, other, tz_naive_fixture, box_with_array\n ):\n tz = tz_naive_fixture\n\n dta = date_range("1970-01-01", freq="ns", periods=10, tz=tz)._data\n obj = tm.box_expected(dta, box_with_array)\n assert_invalid_comparison(obj, other, box_with_array)\n\n def test_dt64arr_cmp_mixed_invalid(self, tz_naive_fixture):\n tz = tz_naive_fixture\n\n dta = date_range("1970-01-01", freq="h", periods=5, tz=tz)._data\n\n other = np.array([0, 1, 2, dta[3], Timedelta(days=1)])\n result = dta == other\n expected = np.array([False, False, False, True, False])\n tm.assert_numpy_array_equal(result, expected)\n\n result = dta != other\n tm.assert_numpy_array_equal(result, ~expected)\n\n msg = "Invalid comparison between|Cannot compare type|not supported between"\n with pytest.raises(TypeError, match=msg):\n dta < other\n with pytest.raises(TypeError, match=msg):\n dta > other\n with pytest.raises(TypeError, match=msg):\n dta <= other\n with pytest.raises(TypeError, match=msg):\n dta >= other\n\n def test_dt64arr_nat_comparison(self, tz_naive_fixture, box_with_array):\n # GH#22242, GH#22163 DataFrame considered NaT == ts incorrectly\n tz = tz_naive_fixture\n box = box_with_array\n\n ts = Timestamp("2021-01-01", tz=tz)\n ser = Series([ts, NaT])\n\n obj = tm.box_expected(ser, box)\n xbox = get_upcast_box(obj, ts, True)\n\n expected = Series([True, False], dtype=np.bool_)\n expected = tm.box_expected(expected, xbox)\n\n result = obj == ts\n tm.assert_equal(result, expected)\n\n\nclass TestDatetime64SeriesComparison:\n # TODO: moved from tests.series.test_operators; needs cleanup\n\n @pytest.mark.parametrize(\n "pair",\n [\n (\n [Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")],\n [NaT, NaT, Timestamp("2011-01-03")],\n ),\n (\n [Timedelta("1 days"), NaT, Timedelta("3 days")],\n [NaT, NaT, Timedelta("3 days")],\n ),\n (\n [Period("2011-01", freq="M"), NaT, Period("2011-03", freq="M")],\n [NaT, NaT, Period("2011-03", freq="M")],\n ),\n ],\n )\n @pytest.mark.parametrize("reverse", [True, False])\n @pytest.mark.parametrize("dtype", [None, object])\n @pytest.mark.parametrize(\n "op, expected",\n [\n (operator.eq, Series([False, False, True])),\n (operator.ne, Series([True, True, False])),\n (operator.lt, Series([False, False, False])),\n (operator.gt, Series([False, False, False])),\n (operator.ge, Series([False, False, True])),\n (operator.le, Series([False, False, True])),\n ],\n )\n def test_nat_comparisons(\n self,\n dtype,\n index_or_series,\n reverse,\n pair,\n op,\n expected,\n ):\n box = index_or_series\n lhs, rhs = pair\n if reverse:\n # add lhs / rhs switched data\n lhs, rhs = rhs, lhs\n\n left = Series(lhs, dtype=dtype)\n right = box(rhs, dtype=dtype)\n\n result = op(left, right)\n\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize(\n "data",\n [\n [Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")],\n [Timedelta("1 days"), NaT, Timedelta("3 days")],\n [Period("2011-01", freq="M"), NaT, Period("2011-03", freq="M")],\n ],\n )\n @pytest.mark.parametrize("dtype", [None, object])\n def test_nat_comparisons_scalar(self, dtype, data, box_with_array):\n box = box_with_array\n\n left = Series(data, dtype=dtype)\n left = tm.box_expected(left, box)\n xbox = get_upcast_box(left, NaT, True)\n\n expected = [False, False, False]\n expected = tm.box_expected(expected, xbox)\n if box is pd.array and dtype is object:\n expected = pd.array(expected, dtype="bool")\n\n tm.assert_equal(left == NaT, expected)\n tm.assert_equal(NaT == left, expected)\n\n expected = [True, True, True]\n expected = tm.box_expected(expected, xbox)\n if box is pd.array and dtype is object:\n expected = pd.array(expected, dtype="bool")\n tm.assert_equal(left != NaT, expected)\n tm.assert_equal(NaT != left, expected)\n\n expected = [False, False, False]\n expected = tm.box_expected(expected, xbox)\n if box is pd.array and dtype is object:\n expected = pd.array(expected, dtype="bool")\n tm.assert_equal(left < NaT, expected)\n tm.assert_equal(NaT > left, expected)\n tm.assert_equal(left <= NaT, expected)\n tm.assert_equal(NaT >= left, expected)\n\n tm.assert_equal(left > NaT, expected)\n tm.assert_equal(NaT < left, expected)\n tm.assert_equal(left >= NaT, expected)\n tm.assert_equal(NaT <= left, expected)\n\n @pytest.mark.parametrize("val", [datetime(2000, 1, 4), datetime(2000, 1, 5)])\n def test_series_comparison_scalars(self, val):\n series = Series(date_range("1/1/2000", periods=10))\n\n result = series > val\n expected = Series([x > val for x in series])\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize(\n "left,right", [("lt", "gt"), ("le", "ge"), ("eq", "eq"), ("ne", "ne")]\n )\n def test_timestamp_compare_series(self, left, right):\n # see gh-4982\n # Make sure we can compare Timestamps on the right AND left hand side.\n ser = Series(date_range("20010101", periods=10), name="dates")\n s_nat = ser.copy(deep=True)\n\n ser[0] = Timestamp("nat")\n ser[3] = Timestamp("nat")\n\n left_f = getattr(operator, left)\n right_f = getattr(operator, right)\n\n # No NaT\n expected = left_f(ser, Timestamp("20010109"))\n result = right_f(Timestamp("20010109"), ser)\n tm.assert_series_equal(result, expected)\n\n # NaT\n expected = left_f(ser, Timestamp("nat"))\n result = right_f(Timestamp("nat"), ser)\n tm.assert_series_equal(result, expected)\n\n # Compare to Timestamp with series containing NaT\n expected = left_f(s_nat, Timestamp("20010109"))\n result = right_f(Timestamp("20010109"), s_nat)\n tm.assert_series_equal(result, expected)\n\n # Compare to NaT with series containing NaT\n expected = left_f(s_nat, NaT)\n result = right_f(NaT, s_nat)\n tm.assert_series_equal(result, expected)\n\n def test_dt64arr_timestamp_equality(self, box_with_array):\n # GH#11034\n box = box_with_array\n\n ser = Series([Timestamp("2000-01-29 01:59:00"), Timestamp("2000-01-30"), NaT])\n ser = tm.box_expected(ser, box)\n xbox = get_upcast_box(ser, ser, True)\n\n result = ser != ser\n expected = tm.box_expected([False, False, True], xbox)\n tm.assert_equal(result, expected)\n\n if box is pd.DataFrame:\n # alignment for frame vs series comparisons deprecated\n # in GH#46795 enforced 2.0\n with pytest.raises(ValueError, match="not aligned"):\n ser != ser[0]\n\n else:\n result = ser != ser[0]\n expected = tm.box_expected([False, True, True], xbox)\n tm.assert_equal(result, expected)\n\n if box is pd.DataFrame:\n # alignment for frame vs series comparisons deprecated\n # in GH#46795 enforced 2.0\n with pytest.raises(ValueError, match="not aligned"):\n ser != ser[2]\n else:\n result = ser != ser[2]\n expected = tm.box_expected([True, True, True], xbox)\n tm.assert_equal(result, expected)\n\n result = ser == ser\n expected = tm.box_expected([True, True, False], xbox)\n tm.assert_equal(result, expected)\n\n if box is pd.DataFrame:\n # alignment for frame vs series comparisons deprecated\n # in GH#46795 enforced 2.0\n with pytest.raises(ValueError, match="not aligned"):\n ser == ser[0]\n else:\n result = ser == ser[0]\n expected = tm.box_expected([True, False, False], xbox)\n tm.assert_equal(result, expected)\n\n if box is pd.DataFrame:\n # alignment for frame vs series comparisons deprecated\n # in GH#46795 enforced 2.0\n with pytest.raises(ValueError, match="not aligned"):\n ser == ser[2]\n else:\n result = ser == ser[2]\n expected = tm.box_expected([False, False, False], xbox)\n tm.assert_equal(result, expected)\n\n @pytest.mark.parametrize(\n "datetimelike",\n [\n Timestamp("20130101"),\n datetime(2013, 1, 1),\n np.datetime64("2013-01-01T00:00", "ns"),\n ],\n )\n @pytest.mark.parametrize(\n "op,expected",\n [\n (operator.lt, [True, False, False, False]),\n (operator.le, [True, True, False, False]),\n (operator.eq, [False, True, False, False]),\n (operator.gt, [False, False, False, True]),\n ],\n )\n def test_dt64_compare_datetime_scalar(self, datetimelike, op, expected):\n # GH#17965, test for ability to compare datetime64[ns] columns\n # to datetimelike\n ser = Series(\n [\n Timestamp("20120101"),\n Timestamp("20130101"),\n np.nan,\n Timestamp("20130103"),\n ],\n name="A",\n )\n result = op(ser, datetimelike)\n expected = Series(expected, name="A")\n tm.assert_series_equal(result, expected)\n\n\nclass TestDatetimeIndexComparisons:\n # TODO: moved from tests.indexes.test_base; parametrize and de-duplicate\n def test_comparators(self, comparison_op):\n index = date_range("2020-01-01", periods=10)\n element = index[len(index) // 2]\n element = Timestamp(element).to_datetime64()\n\n arr = np.array(index)\n arr_result = comparison_op(arr, element)\n index_result = comparison_op(index, element)\n\n assert isinstance(index_result, np.ndarray)\n tm.assert_numpy_array_equal(arr_result, index_result)\n\n @pytest.mark.parametrize(\n "other",\n [datetime(2016, 1, 1), Timestamp("2016-01-01"), np.datetime64("2016-01-01")],\n )\n def test_dti_cmp_datetimelike(self, other, tz_naive_fixture):\n tz = tz_naive_fixture\n dti = date_range("2016-01-01", periods=2, tz=tz)\n if tz is not None:\n if isinstance(other, np.datetime64):\n pytest.skip(f"{type(other).__name__} is not tz aware")\n other = localize_pydatetime(other, dti.tzinfo)\n\n result = dti == other\n expected = np.array([True, False])\n tm.assert_numpy_array_equal(result, expected)\n\n result = dti > other\n expected = np.array([False, True])\n tm.assert_numpy_array_equal(result, expected)\n\n result = dti >= other\n expected = np.array([True, True])\n tm.assert_numpy_array_equal(result, expected)\n\n result = dti < other\n expected = np.array([False, False])\n tm.assert_numpy_array_equal(result, expected)\n\n result = dti <= other\n expected = np.array([True, False])\n tm.assert_numpy_array_equal(result, expected)\n\n @pytest.mark.parametrize("dtype", [None, object])\n def test_dti_cmp_nat(self, dtype, box_with_array):\n left = DatetimeIndex([Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")])\n right = DatetimeIndex([NaT, NaT, Timestamp("2011-01-03")])\n\n left = tm.box_expected(left, box_with_array)\n right = tm.box_expected(right, box_with_array)\n xbox = get_upcast_box(left, right, True)\n\n lhs, rhs = left, right\n if dtype is object:\n lhs, rhs = left.astype(object), right.astype(object)\n\n result = rhs == lhs\n expected = np.array([False, False, True])\n expected = tm.box_expected(expected, xbox)\n tm.assert_equal(result, expected)\n\n result = lhs != rhs\n expected = np.array([True, True, False])\n expected = tm.box_expected(expected, xbox)\n tm.assert_equal(result, expected)\n\n expected = np.array([False, False, False])\n expected = tm.box_expected(expected, xbox)\n tm.assert_equal(lhs == NaT, expected)\n tm.assert_equal(NaT == rhs, expected)\n\n expected = np.array([True, True, True])\n expected = tm.box_expected(expected, xbox)\n tm.assert_equal(lhs != NaT, expected)\n tm.assert_equal(NaT != lhs, expected)\n\n expected = np.array([False, False, False])\n expected = tm.box_expected(expected, xbox)\n tm.assert_equal(lhs < NaT, expected)\n tm.assert_equal(NaT > lhs, expected)\n\n def test_dti_cmp_nat_behaves_like_float_cmp_nan(self):\n fidx1 = pd.Index([1.0, np.nan, 3.0, np.nan, 5.0, 7.0])\n fidx2 = pd.Index([2.0, 3.0, np.nan, np.nan, 6.0, 7.0])\n\n didx1 = DatetimeIndex(\n ["2014-01-01", NaT, "2014-03-01", NaT, "2014-05-01", "2014-07-01"]\n )\n didx2 = DatetimeIndex(\n ["2014-02-01", "2014-03-01", NaT, NaT, "2014-06-01", "2014-07-01"]\n )\n darr = np.array(\n [\n np.datetime64("2014-02-01 00:00"),\n np.datetime64("2014-03-01 00:00"),\n np.datetime64("nat"),\n np.datetime64("nat"),\n np.datetime64("2014-06-01 00:00"),\n np.datetime64("2014-07-01 00:00"),\n ]\n )\n\n cases = [(fidx1, fidx2), (didx1, didx2), (didx1, darr)]\n\n # Check pd.NaT is handles as the same as np.nan\n with tm.assert_produces_warning(None):\n for idx1, idx2 in cases:\n result = idx1 < idx2\n expected = np.array([True, False, False, False, True, False])\n tm.assert_numpy_array_equal(result, expected)\n\n result = idx2 > idx1\n expected = np.array([True, False, False, False, True, False])\n tm.assert_numpy_array_equal(result, expected)\n\n result = idx1 <= idx2\n expected = np.array([True, False, False, False, True, True])\n tm.assert_numpy_array_equal(result, expected)\n\n result = idx2 >= idx1\n expected = np.array([True, False, False, False, True, True])\n tm.assert_numpy_array_equal(result, expected)\n\n result = idx1 == idx2\n expected = np.array([False, False, False, False, False, True])\n tm.assert_numpy_array_equal(result, expected)\n\n result = idx1 != idx2\n expected = np.array([True, True, True, True, True, False])\n tm.assert_numpy_array_equal(result, expected)\n\n with tm.assert_produces_warning(None):\n for idx1, val in [(fidx1, np.nan), (didx1, NaT)]:\n result = idx1 < val\n expected = np.array([False, False, False, False, False, False])\n tm.assert_numpy_array_equal(result, expected)\n result = idx1 > val\n tm.assert_numpy_array_equal(result, expected)\n\n result = idx1 <= val\n tm.assert_numpy_array_equal(result, expected)\n result = idx1 >= val\n tm.assert_numpy_array_equal(result, expected)\n\n result = idx1 == val\n tm.assert_numpy_array_equal(result, expected)\n\n result = idx1 != val\n expected = np.array([True, True, True, True, True, True])\n tm.assert_numpy_array_equal(result, expected)\n\n # Check pd.NaT is handles as the same as np.nan\n with tm.assert_produces_warning(None):\n for idx1, val in [(fidx1, 3), (didx1, datetime(2014, 3, 1))]:\n result = idx1 < val\n expected = np.array([True, False, False, False, False, False])\n tm.assert_numpy_array_equal(result, expected)\n result = idx1 > val\n expected = np.array([False, False, False, False, True, True])\n tm.assert_numpy_array_equal(result, expected)\n\n result = idx1 <= val\n expected = np.array([True, False, True, False, False, False])\n tm.assert_numpy_array_equal(result, expected)\n result = idx1 >= val\n expected = np.array([False, False, True, False, True, True])\n tm.assert_numpy_array_equal(result, expected)\n\n result = idx1 == val\n expected = np.array([False, False, True, False, False, False])\n tm.assert_numpy_array_equal(result, expected)\n\n result = idx1 != val\n expected = np.array([True, True, False, True, True, True])\n tm.assert_numpy_array_equal(result, expected)\n\n def test_comparison_tzawareness_compat(self, comparison_op, box_with_array):\n # GH#18162\n op = comparison_op\n box = box_with_array\n\n dr = date_range("2016-01-01", periods=6)\n dz = dr.tz_localize("US/Pacific")\n\n dr = tm.box_expected(dr, box)\n dz = tm.box_expected(dz, box)\n\n if box is pd.DataFrame:\n tolist = lambda x: x.astype(object).values.tolist()[0]\n else:\n tolist = list\n\n if op not in [operator.eq, operator.ne]:\n msg = (\n r"Invalid comparison between dtype=datetime64\[ns.*\] "\n "and (Timestamp|DatetimeArray|list|ndarray)"\n )\n with pytest.raises(TypeError, match=msg):\n op(dr, dz)\n\n with pytest.raises(TypeError, match=msg):\n op(dr, tolist(dz))\n with pytest.raises(TypeError, match=msg):\n op(dr, np.array(tolist(dz), dtype=object))\n with pytest.raises(TypeError, match=msg):\n op(dz, dr)\n\n with pytest.raises(TypeError, match=msg):\n op(dz, tolist(dr))\n with pytest.raises(TypeError, match=msg):\n op(dz, np.array(tolist(dr), dtype=object))\n\n # The aware==aware and naive==naive comparisons should *not* raise\n assert np.all(dr == dr)\n assert np.all(dr == tolist(dr))\n assert np.all(tolist(dr) == dr)\n assert np.all(np.array(tolist(dr), dtype=object) == dr)\n assert np.all(dr == np.array(tolist(dr), dtype=object))\n\n assert np.all(dz == dz)\n assert np.all(dz == tolist(dz))\n assert np.all(tolist(dz) == dz)\n assert np.all(np.array(tolist(dz), dtype=object) == dz)\n assert np.all(dz == np.array(tolist(dz), dtype=object))\n\n def test_comparison_tzawareness_compat_scalars(self, comparison_op, box_with_array):\n # GH#18162\n op = comparison_op\n\n dr = date_range("2016-01-01", periods=6)\n dz = dr.tz_localize("US/Pacific")\n\n dr = tm.box_expected(dr, box_with_array)\n dz = tm.box_expected(dz, box_with_array)\n\n # Check comparisons against scalar Timestamps\n ts = Timestamp("2000-03-14 01:59")\n ts_tz = Timestamp("2000-03-14 01:59", tz="Europe/Amsterdam")\n\n assert np.all(dr > ts)\n msg = r"Invalid comparison between dtype=datetime64\[ns.*\] and Timestamp"\n if op not in [operator.eq, operator.ne]:\n with pytest.raises(TypeError, match=msg):\n op(dr, ts_tz)\n\n assert np.all(dz > ts_tz)\n if op not in [operator.eq, operator.ne]:\n with pytest.raises(TypeError, match=msg):\n op(dz, ts)\n\n if op not in [operator.eq, operator.ne]:\n # GH#12601: Check comparison against Timestamps and DatetimeIndex\n with pytest.raises(TypeError, match=msg):\n op(ts, dz)\n\n @pytest.mark.parametrize(\n "other",\n [datetime(2016, 1, 1), Timestamp("2016-01-01"), np.datetime64("2016-01-01")],\n )\n # Bug in NumPy? https://github.com/numpy/numpy/issues/13841\n # Raising in __eq__ will fallback to NumPy, which warns, fails,\n # then re-raises the original exception. So we just need to ignore.\n @pytest.mark.filterwarnings("ignore:elementwise comp:DeprecationWarning")\n def test_scalar_comparison_tzawareness(\n self, comparison_op, other, tz_aware_fixture, box_with_array\n ):\n op = comparison_op\n tz = tz_aware_fixture\n dti = date_range("2016-01-01", periods=2, tz=tz)\n\n dtarr = tm.box_expected(dti, box_with_array)\n xbox = get_upcast_box(dtarr, other, True)\n if op in [operator.eq, operator.ne]:\n exbool = op is operator.ne\n expected = np.array([exbool, exbool], dtype=bool)\n expected = tm.box_expected(expected, xbox)\n\n result = op(dtarr, other)\n tm.assert_equal(result, expected)\n\n result = op(other, dtarr)\n tm.assert_equal(result, expected)\n else:\n msg = (\n r"Invalid comparison between dtype=datetime64\[ns, .*\] "\n f"and {type(other).__name__}"\n )\n with pytest.raises(TypeError, match=msg):\n op(dtarr, other)\n with pytest.raises(TypeError, match=msg):\n op(other, dtarr)\n\n def test_nat_comparison_tzawareness(self, comparison_op):\n # GH#19276\n # tzaware DatetimeIndex should not raise when compared to NaT\n op = comparison_op\n\n dti = DatetimeIndex(\n ["2014-01-01", NaT, "2014-03-01", NaT, "2014-05-01", "2014-07-01"]\n )\n expected = np.array([op == operator.ne] * len(dti))\n result = op(dti, NaT)\n tm.assert_numpy_array_equal(result, expected)\n\n result = op(dti.tz_localize("US/Pacific"), NaT)\n tm.assert_numpy_array_equal(result, expected)\n\n def test_dti_cmp_str(self, tz_naive_fixture):\n # GH#22074\n # regardless of tz, we expect these comparisons are valid\n tz = tz_naive_fixture\n rng = date_range("1/1/2000", periods=10, tz=tz)\n other = "1/1/2000"\n\n result = rng == other\n expected = np.array([True] + [False] * 9)\n tm.assert_numpy_array_equal(result, expected)\n\n result = rng != other\n expected = np.array([False] + [True] * 9)\n tm.assert_numpy_array_equal(result, expected)\n\n result = rng < other\n expected = np.array([False] * 10)\n tm.assert_numpy_array_equal(result, expected)\n\n result = rng <= other\n expected = np.array([True] + [False] * 9)\n tm.assert_numpy_array_equal(result, expected)\n\n result = rng > other\n expected = np.array([False] + [True] * 9)\n tm.assert_numpy_array_equal(result, expected)\n\n result = rng >= other\n expected = np.array([True] * 10)\n tm.assert_numpy_array_equal(result, expected)\n\n def test_dti_cmp_list(self):\n rng = date_range("1/1/2000", periods=10)\n\n result = rng == list(rng)\n expected = rng == rng\n tm.assert_numpy_array_equal(result, expected)\n\n @pytest.mark.parametrize(\n "other",\n [\n pd.timedelta_range("1D", periods=10),\n pd.timedelta_range("1D", periods=10).to_series(),\n pd.timedelta_range("1D", periods=10).asi8.view("m8[ns]"),\n ],\n ids=lambda x: type(x).__name__,\n )\n def test_dti_cmp_tdi_tzawareness(self, other):\n # GH#22074\n # reversion test that we _don't_ call _assert_tzawareness_compat\n # when comparing against TimedeltaIndex\n dti = date_range("2000-01-01", periods=10, tz="Asia/Tokyo")\n\n result = dti == other\n expected = np.array([False] * 10)\n tm.assert_numpy_array_equal(result, expected)\n\n result = dti != other\n expected = np.array([True] * 10)\n tm.assert_numpy_array_equal(result, expected)\n msg = "Invalid comparison between"\n with pytest.raises(TypeError, match=msg):\n dti < other\n with pytest.raises(TypeError, match=msg):\n dti <= other\n with pytest.raises(TypeError, match=msg):\n dti > other\n with pytest.raises(TypeError, match=msg):\n dti >= other\n\n def test_dti_cmp_object_dtype(self):\n # GH#22074\n dti = date_range("2000-01-01", periods=10, tz="Asia/Tokyo")\n\n other = dti.astype("O")\n\n result = dti == other\n expected = np.array([True] * 10)\n tm.assert_numpy_array_equal(result, expected)\n\n other = dti.tz_localize(None)\n result = dti != other\n tm.assert_numpy_array_equal(result, expected)\n\n other = np.array(list(dti[:5]) + [Timedelta(days=1)] * 5)\n result = dti == other\n expected = np.array([True] * 5 + [False] * 5)\n tm.assert_numpy_array_equal(result, expected)\n msg = ">=' not supported between instances of 'Timestamp' and 'Timedelta'"\n with pytest.raises(TypeError, match=msg):\n dti >= other\n\n\n# ------------------------------------------------------------------\n# Arithmetic\n\n\nclass TestDatetime64Arithmetic:\n # This class is intended for "finished" tests that are fully parametrized\n # over DataFrame/Series/Index/DatetimeArray\n\n # -------------------------------------------------------------\n # Addition/Subtraction of timedelta-like\n\n @pytest.mark.arm_slow\n def test_dt64arr_add_timedeltalike_scalar(\n self, tz_naive_fixture, two_hours, box_with_array\n ):\n # GH#22005, GH#22163 check DataFrame doesn't raise TypeError\n tz = tz_naive_fixture\n\n rng = date_range("2000-01-01", "2000-02-01", tz=tz)\n expected = date_range("2000-01-01 02:00", "2000-02-01 02:00", tz=tz)\n\n rng = tm.box_expected(rng, box_with_array)\n expected = tm.box_expected(expected, box_with_array)\n\n result = rng + two_hours\n tm.assert_equal(result, expected)\n\n result = two_hours + rng\n tm.assert_equal(result, expected)\n\n rng += two_hours\n tm.assert_equal(rng, expected)\n\n def test_dt64arr_sub_timedeltalike_scalar(\n self, tz_naive_fixture, two_hours, box_with_array\n ):\n tz = tz_naive_fixture\n\n rng = date_range("2000-01-01", "2000-02-01", tz=tz)\n expected = date_range("1999-12-31 22:00", "2000-01-31 22:00", tz=tz)\n\n rng = tm.box_expected(rng, box_with_array)\n expected = tm.box_expected(expected, box_with_array)\n\n result = rng - two_hours\n tm.assert_equal(result, expected)\n\n rng -= two_hours\n tm.assert_equal(rng, expected)\n\n def test_dt64_array_sub_dt_with_different_timezone(self, box_with_array):\n t1 = date_range("20130101", periods=3).tz_localize("US/Eastern")\n t1 = tm.box_expected(t1, box_with_array)\n t2 = Timestamp("20130101").tz_localize("CET")\n tnaive = Timestamp(20130101)\n\n result = t1 - t2\n expected = TimedeltaIndex(\n ["0 days 06:00:00", "1 days 06:00:00", "2 days 06:00:00"]\n )\n expected = tm.box_expected(expected, box_with_array)\n tm.assert_equal(result, expected)\n\n result = t2 - t1\n expected = TimedeltaIndex(\n ["-1 days +18:00:00", "-2 days +18:00:00", "-3 days +18:00:00"]\n )\n expected = tm.box_expected(expected, box_with_array)\n tm.assert_equal(result, expected)\n\n msg = "Cannot subtract tz-naive and tz-aware datetime-like objects"\n with pytest.raises(TypeError, match=msg):\n t1 - tnaive\n\n with pytest.raises(TypeError, match=msg):\n tnaive - t1\n\n def test_dt64_array_sub_dt64_array_with_different_timezone(self, box_with_array):\n t1 = date_range("20130101", periods=3).tz_localize("US/Eastern")\n t1 = tm.box_expected(t1, box_with_array)\n t2 = date_range("20130101", periods=3).tz_localize("CET")\n t2 = tm.box_expected(t2, box_with_array)\n tnaive = date_range("20130101", periods=3)\n\n result = t1 - t2\n expected = TimedeltaIndex(\n ["0 days 06:00:00", "0 days 06:00:00", "0 days 06:00:00"]\n )\n expected = tm.box_expected(expected, box_with_array)\n tm.assert_equal(result, expected)\n\n result = t2 - t1\n expected = TimedeltaIndex(\n ["-1 days +18:00:00", "-1 days +18:00:00", "-1 days +18:00:00"]\n )\n expected = tm.box_expected(expected, box_with_array)\n tm.assert_equal(result, expected)\n\n msg = "Cannot subtract tz-naive and tz-aware datetime-like objects"\n with pytest.raises(TypeError, match=msg):\n t1 - tnaive\n\n with pytest.raises(TypeError, match=msg):\n tnaive - t1\n\n def test_dt64arr_add_sub_td64_nat(self, box_with_array, tz_naive_fixture):\n # GH#23320 special handling for timedelta64("NaT")\n tz = tz_naive_fixture\n\n dti = date_range("1994-04-01", periods=9, tz=tz, freq="QS")\n other = np.timedelta64("NaT")\n expected = DatetimeIndex(["NaT"] * 9, tz=tz).as_unit("ns")\n\n obj = tm.box_expected(dti, box_with_array)\n expected = tm.box_expected(expected, box_with_array)\n\n result = obj + other\n tm.assert_equal(result, expected)\n result = other + obj\n tm.assert_equal(result, expected)\n result = obj - other\n tm.assert_equal(result, expected)\n msg = "cannot subtract"\n with pytest.raises(TypeError, match=msg):\n other - obj\n\n def test_dt64arr_add_sub_td64ndarray(self, tz_naive_fixture, box_with_array):\n tz = tz_naive_fixture\n dti = date_range("2016-01-01", periods=3, tz=tz)\n tdi = TimedeltaIndex(["-1 Day", "-1 Day", "-1 Day"])\n tdarr = tdi.values\n\n expected = date_range("2015-12-31", "2016-01-02", periods=3, tz=tz)\n\n dtarr = tm.box_expected(dti, box_with_array)\n expected = tm.box_expected(expected, box_with_array)\n\n result = dtarr + tdarr\n tm.assert_equal(result, expected)\n result = tdarr + dtarr\n tm.assert_equal(result, expected)\n\n expected = date_range("2016-01-02", "2016-01-04", periods=3, tz=tz)\n expected = tm.box_expected(expected, box_with_array)\n\n result = dtarr - tdarr\n tm.assert_equal(result, expected)\n msg = "cannot subtract|(bad|unsupported) operand type for unary"\n with pytest.raises(TypeError, match=msg):\n tdarr - dtarr\n\n # -----------------------------------------------------------------\n # Subtraction of datetime-like scalars\n\n @pytest.mark.parametrize(\n "ts",\n [\n Timestamp("2013-01-01"),\n Timestamp("2013-01-01").to_pydatetime(),\n Timestamp("2013-01-01").to_datetime64(),\n # GH#7996, GH#22163 ensure non-nano datetime64 is converted to nano\n # for DataFrame operation\n np.datetime64("2013-01-01", "D"),\n ],\n )\n def test_dt64arr_sub_dtscalar(self, box_with_array, ts):\n # GH#8554, GH#22163 DataFrame op should _not_ return dt64 dtype\n idx = date_range("2013-01-01", periods=3)._with_freq(None)\n idx = tm.box_expected(idx, box_with_array)\n\n expected = TimedeltaIndex(["0 Days", "1 Day", "2 Days"])\n expected = tm.box_expected(expected, box_with_array)\n\n result = idx - ts\n tm.assert_equal(result, expected)\n\n result = ts - idx\n tm.assert_equal(result, -expected)\n tm.assert_equal(result, -expected)\n\n def test_dt64arr_sub_timestamp_tzaware(self, box_with_array):\n ser = date_range("2014-03-17", periods=2, freq="D", tz="US/Eastern")\n ser = ser._with_freq(None)\n ts = ser[0]\n\n ser = tm.box_expected(ser, box_with_array)\n\n delta_series = Series([np.timedelta64(0, "D"), np.timedelta64(1, "D")])\n expected = tm.box_expected(delta_series, box_with_array)\n\n tm.assert_equal(ser - ts, expected)\n tm.assert_equal(ts - ser, -expected)\n\n def test_dt64arr_sub_NaT(self, box_with_array, unit):\n # GH#18808\n dti = DatetimeIndex([NaT, Timestamp("19900315")]).as_unit(unit)\n ser = tm.box_expected(dti, box_with_array)\n\n result = ser - NaT\n expected = Series([NaT, NaT], dtype=f"timedelta64[{unit}]")\n expected = tm.box_expected(expected, box_with_array)\n tm.assert_equal(result, expected)\n\n dti_tz = dti.tz_localize("Asia/Tokyo")\n ser_tz = tm.box_expected(dti_tz, box_with_array)\n\n result = ser_tz - NaT\n expected = Series([NaT, NaT], dtype=f"timedelta64[{unit}]")\n expected = tm.box_expected(expected, box_with_array)\n tm.assert_equal(result, expected)\n\n # -------------------------------------------------------------\n # Subtraction of datetime-like array-like\n\n def test_dt64arr_sub_dt64object_array(self, box_with_array, tz_naive_fixture):\n dti = date_range("2016-01-01", periods=3, tz=tz_naive_fixture)\n expected = dti - dti\n\n obj = tm.box_expected(dti, box_with_array)\n expected = tm.box_expected(expected, box_with_array).astype(object)\n\n with tm.assert_produces_warning(PerformanceWarning):\n result = obj - obj.astype(object)\n tm.assert_equal(result, expected)\n\n def test_dt64arr_naive_sub_dt64ndarray(self, box_with_array):\n dti = date_range("2016-01-01", periods=3, tz=None)\n dt64vals = dti.values\n\n dtarr = tm.box_expected(dti, box_with_array)\n\n expected = dtarr - dtarr\n result = dtarr - dt64vals\n tm.assert_equal(result, expected)\n result = dt64vals - dtarr\n tm.assert_equal(result, expected)\n\n def test_dt64arr_aware_sub_dt64ndarray_raises(\n self, tz_aware_fixture, box_with_array\n ):\n tz = tz_aware_fixture\n dti = date_range("2016-01-01", periods=3, tz=tz)\n dt64vals = dti.values\n\n dtarr = tm.box_expected(dti, box_with_array)\n msg = "Cannot subtract tz-naive and tz-aware datetime"\n with pytest.raises(TypeError, match=msg):\n dtarr - dt64vals\n with pytest.raises(TypeError, match=msg):\n dt64vals - dtarr\n\n # -------------------------------------------------------------\n # Addition of datetime-like others (invalid)\n\n def test_dt64arr_add_dtlike_raises(self, tz_naive_fixture, box_with_array):\n # GH#22163 ensure DataFrame doesn't cast Timestamp to i8\n # GH#9631\n tz = tz_naive_fixture\n\n dti = date_range("2016-01-01", periods=3, tz=tz)\n if tz is None:\n dti2 = dti.tz_localize("US/Eastern")\n else:\n dti2 = dti.tz_localize(None)\n dtarr = tm.box_expected(dti, box_with_array)\n\n assert_cannot_add(dtarr, dti.values)\n assert_cannot_add(dtarr, dti)\n assert_cannot_add(dtarr, dtarr)\n assert_cannot_add(dtarr, dti[0])\n assert_cannot_add(dtarr, dti[0].to_pydatetime())\n assert_cannot_add(dtarr, dti[0].to_datetime64())\n assert_cannot_add(dtarr, dti2[0])\n assert_cannot_add(dtarr, dti2[0].to_pydatetime())\n assert_cannot_add(dtarr, np.datetime64("2011-01-01", "D"))\n\n # -------------------------------------------------------------\n # Other Invalid Addition/Subtraction\n\n # Note: freq here includes both Tick and non-Tick offsets; this is\n # relevant because historically integer-addition was allowed if we had\n # a freq.\n @pytest.mark.parametrize("freq", ["h", "D", "W", "2ME", "MS", "QE", "B", None])\n @pytest.mark.parametrize("dtype", [None, "uint8"])\n def test_dt64arr_addsub_intlike(\n self, request, dtype, index_or_series_or_array, freq, tz_naive_fixture\n ):\n # GH#19959, GH#19123, GH#19012\n # GH#55860 use index_or_series_or_array instead of box_with_array\n # bc DataFrame alignment makes it inapplicable\n tz = tz_naive_fixture\n\n if freq is None:\n dti = DatetimeIndex(["NaT", "2017-04-05 06:07:08"], tz=tz)\n else:\n dti = date_range("2016-01-01", periods=2, freq=freq, tz=tz)\n\n obj = index_or_series_or_array(dti)\n other = np.array([4, -1])\n if dtype is not None:\n other = other.astype(dtype)\n\n msg = "|".join(\n [\n "Addition/subtraction of integers",\n "cannot subtract DatetimeArray from",\n # IntegerArray\n "can only perform ops with numeric values",\n "unsupported operand type.*Categorical",\n r"unsupported operand type\(s\) for -: 'int' and 'Timestamp'",\n ]\n )\n assert_invalid_addsub_type(obj, 1, msg)\n assert_invalid_addsub_type(obj, np.int64(2), msg)\n assert_invalid_addsub_type(obj, np.array(3, dtype=np.int64), msg)\n assert_invalid_addsub_type(obj, other, msg)\n assert_invalid_addsub_type(obj, np.array(other), msg)\n assert_invalid_addsub_type(obj, pd.array(other), msg)\n assert_invalid_addsub_type(obj, pd.Categorical(other), msg)\n assert_invalid_addsub_type(obj, pd.Index(other), msg)\n assert_invalid_addsub_type(obj, Series(other), msg)\n\n @pytest.mark.parametrize(\n "other",\n [\n 3.14,\n np.array([2.0, 3.0]),\n # GH#13078 datetime +/- Period is invalid\n Period("2011-01-01", freq="D"),\n # https://github.com/pandas-dev/pandas/issues/10329\n time(1, 2, 3),\n ],\n )\n @pytest.mark.parametrize("dti_freq", [None, "D"])\n def test_dt64arr_add_sub_invalid(self, dti_freq, other, box_with_array):\n dti = DatetimeIndex(["2011-01-01", "2011-01-02"], freq=dti_freq)\n dtarr = tm.box_expected(dti, box_with_array)\n msg = "|".join(\n [\n "unsupported operand type",\n "cannot (add|subtract)",\n "cannot use operands with types",\n "ufunc '?(add|subtract)'? cannot use operands with types",\n "Concatenation operation is not implemented for NumPy arrays",\n ]\n )\n assert_invalid_addsub_type(dtarr, other, msg)\n\n @pytest.mark.parametrize("pi_freq", ["D", "W", "Q", "h"])\n @pytest.mark.parametrize("dti_freq", [None, "D"])\n def test_dt64arr_add_sub_parr(\n self, dti_freq, pi_freq, box_with_array, box_with_array2\n ):\n # GH#20049 subtracting PeriodIndex should raise TypeError\n dti = DatetimeIndex(["2011-01-01", "2011-01-02"], freq=dti_freq)\n pi = dti.to_period(pi_freq)\n\n dtarr = tm.box_expected(dti, box_with_array)\n parr = tm.box_expected(pi, box_with_array2)\n msg = "|".join(\n [\n "cannot (add|subtract)",\n "unsupported operand",\n "descriptor.*requires",\n "ufunc.*cannot use operands",\n ]\n )\n assert_invalid_addsub_type(dtarr, parr, msg)\n\n @pytest.mark.filterwarnings("ignore::pandas.errors.PerformanceWarning")\n def test_dt64arr_addsub_time_objects_raises(self, box_with_array, tz_naive_fixture):\n # https://github.com/pandas-dev/pandas/issues/10329\n\n tz = tz_naive_fixture\n\n obj1 = date_range("2012-01-01", periods=3, tz=tz)\n obj2 = [time(i, i, i) for i in range(3)]\n\n obj1 = tm.box_expected(obj1, box_with_array)\n obj2 = tm.box_expected(obj2, box_with_array)\n\n msg = "|".join(\n [\n "unsupported operand",\n "cannot subtract DatetimeArray from ndarray",\n ]\n )\n # pandas.errors.PerformanceWarning: Non-vectorized DateOffset being\n # applied to Series or DatetimeIndex\n # we aren't testing that here, so ignore.\n assert_invalid_addsub_type(obj1, obj2, msg=msg)\n\n # -------------------------------------------------------------\n # Other invalid operations\n\n @pytest.mark.parametrize(\n "dt64_series",\n [\n Series([Timestamp("19900315"), Timestamp("19900315")]),\n Series([NaT, Timestamp("19900315")]),\n Series([NaT, NaT], dtype="datetime64[ns]"),\n ],\n )\n @pytest.mark.parametrize("one", [1, 1.0, np.array(1)])\n def test_dt64_mul_div_numeric_invalid(self, one, dt64_series, box_with_array):\n obj = tm.box_expected(dt64_series, box_with_array)\n\n msg = "cannot perform .* with this index type"\n\n # multiplication\n with pytest.raises(TypeError, match=msg):\n obj * one\n with pytest.raises(TypeError, match=msg):\n one * obj\n\n # division\n with pytest.raises(TypeError, match=msg):\n obj / one\n with pytest.raises(TypeError, match=msg):\n one / obj\n\n\nclass TestDatetime64DateOffsetArithmetic:\n # -------------------------------------------------------------\n # Tick DateOffsets\n\n # TODO: parametrize over timezone?\n @pytest.mark.parametrize("unit", ["s", "ms", "us", "ns"])\n def test_dt64arr_series_add_tick_DateOffset(self, box_with_array, unit):\n # GH#4532\n # operate with pd.offsets\n ser = Series(\n [Timestamp("20130101 9:01"), Timestamp("20130101 9:02")]\n ).dt.as_unit(unit)\n expected = Series(\n [Timestamp("20130101 9:01:05"), Timestamp("20130101 9:02:05")]\n ).dt.as_unit(unit)\n\n ser = tm.box_expected(ser, box_with_array)\n expected = tm.box_expected(expected, box_with_array)\n\n result = ser + pd.offsets.Second(5)\n tm.assert_equal(result, expected)\n\n result2 = pd.offsets.Second(5) + ser\n tm.assert_equal(result2, expected)\n\n def test_dt64arr_series_sub_tick_DateOffset(self, box_with_array):\n # GH#4532\n # operate with pd.offsets\n ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])\n expected = Series(\n [Timestamp("20130101 9:00:55"), Timestamp("20130101 9:01:55")]\n )\n\n ser = tm.box_expected(ser, box_with_array)\n expected = tm.box_expected(expected, box_with_array)\n\n result = ser - pd.offsets.Second(5)\n tm.assert_equal(result, expected)\n\n result2 = -pd.offsets.Second(5) + ser\n tm.assert_equal(result2, expected)\n msg = "(bad|unsupported) operand type for unary"\n with pytest.raises(TypeError, match=msg):\n pd.offsets.Second(5) - ser\n\n @pytest.mark.parametrize(\n "cls_name", ["Day", "Hour", "Minute", "Second", "Milli", "Micro", "Nano"]\n )\n def test_dt64arr_add_sub_tick_DateOffset_smoke(self, cls_name, box_with_array):\n # GH#4532\n # smoke tests for valid DateOffsets\n ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])\n ser = tm.box_expected(ser, box_with_array)\n\n offset_cls = getattr(pd.offsets, cls_name)\n ser + offset_cls(5)\n offset_cls(5) + ser\n ser - offset_cls(5)\n\n def test_dti_add_tick_tzaware(self, tz_aware_fixture, box_with_array):\n # GH#21610, GH#22163 ensure DataFrame doesn't return object-dtype\n tz = tz_aware_fixture\n if tz == "US/Pacific":\n dates = date_range("2012-11-01", periods=3, tz=tz)\n offset = dates + pd.offsets.Hour(5)\n assert dates[0] + pd.offsets.Hour(5) == offset[0]\n\n dates = date_range("2010-11-01 00:00", periods=3, tz=tz, freq="h")\n expected = DatetimeIndex(\n ["2010-11-01 05:00", "2010-11-01 06:00", "2010-11-01 07:00"],\n freq="h",\n tz=tz,\n ).as_unit("ns")\n\n dates = tm.box_expected(dates, box_with_array)\n expected = tm.box_expected(expected, box_with_array)\n\n for scalar in [pd.offsets.Hour(5), np.timedelta64(5, "h"), timedelta(hours=5)]:\n offset = dates + scalar\n tm.assert_equal(offset, expected)\n offset = scalar + dates\n tm.assert_equal(offset, expected)\n\n roundtrip = offset - scalar\n tm.assert_equal(roundtrip, dates)\n\n msg = "|".join(\n ["bad operand type for unary -", "cannot subtract DatetimeArray"]\n )\n with pytest.raises(TypeError, match=msg):\n scalar - dates\n\n # -------------------------------------------------------------\n # RelativeDelta DateOffsets\n\n @pytest.mark.parametrize("unit", ["s", "ms", "us", "ns"])\n def test_dt64arr_add_sub_relativedelta_offsets(self, box_with_array, unit):\n # GH#10699\n vec = DatetimeIndex(\n [\n Timestamp("2000-01-05 00:15:00"),\n Timestamp("2000-01-31 00:23:00"),\n Timestamp("2000-01-01"),\n Timestamp("2000-03-31"),\n Timestamp("2000-02-29"),\n Timestamp("2000-12-31"),\n Timestamp("2000-05-15"),\n Timestamp("2001-06-15"),\n ]\n ).as_unit(unit)\n vec = tm.box_expected(vec, box_with_array)\n vec_items = vec.iloc[0] if box_with_array is pd.DataFrame else vec\n\n # DateOffset relativedelta fastpath\n relative_kwargs = [\n ("years", 2),\n ("months", 5),\n ("days", 3),\n ("hours", 5),\n ("minutes", 10),\n ("seconds", 2),\n ("microseconds", 5),\n ]\n for i, (offset_unit, value) in enumerate(relative_kwargs):\n off = DateOffset(**{offset_unit: value})\n\n exp_unit = unit\n if offset_unit == "microseconds" and unit != "ns":\n exp_unit = "us"\n\n # TODO(GH#55564): as_unit will be unnecessary\n expected = DatetimeIndex([x + off for x in vec_items]).as_unit(exp_unit)\n expected = tm.box_expected(expected, box_with_array)\n tm.assert_equal(expected, vec + off)\n\n expected = DatetimeIndex([x - off for x in vec_items]).as_unit(exp_unit)\n expected = tm.box_expected(expected, box_with_array)\n tm.assert_equal(expected, vec - off)\n\n off = DateOffset(**dict(relative_kwargs[: i + 1]))\n\n expected = DatetimeIndex([x + off for x in vec_items]).as_unit(exp_unit)\n expected = tm.box_expected(expected, box_with_array)\n tm.assert_equal(expected, vec + off)\n\n expected = DatetimeIndex([x - off for x in vec_items]).as_unit(exp_unit)\n expected = tm.box_expected(expected, box_with_array)\n tm.assert_equal(expected, vec - off)\n msg = "(bad|unsupported) operand type for unary"\n with pytest.raises(TypeError, match=msg):\n off - vec\n\n # -------------------------------------------------------------\n # Non-Tick, Non-RelativeDelta DateOffsets\n\n # TODO: redundant with test_dt64arr_add_sub_DateOffset? that includes\n # tz-aware cases which this does not\n @pytest.mark.filterwarnings("ignore::pandas.errors.PerformanceWarning")\n @pytest.mark.parametrize(\n "cls_and_kwargs",\n [\n "YearBegin",\n ("YearBegin", {"month": 5}),\n "YearEnd",\n ("YearEnd", {"month": 5}),\n "MonthBegin",\n "MonthEnd",\n "SemiMonthEnd",\n "SemiMonthBegin",\n "Week",\n ("Week", {"weekday": 3}),\n "Week",\n ("Week", {"weekday": 6}),\n "BusinessDay",\n "BDay",\n "QuarterEnd",\n "QuarterBegin",\n "CustomBusinessDay",\n "CDay",\n "CBMonthEnd",\n "CBMonthBegin",\n "BMonthBegin",\n "BMonthEnd",\n "BusinessHour",\n "BYearBegin",\n "BYearEnd",\n "BQuarterBegin",\n ("LastWeekOfMonth", {"weekday": 2}),\n (\n "FY5253Quarter",\n {\n "qtr_with_extra_week": 1,\n "startingMonth": 1,\n "weekday": 2,\n "variation": "nearest",\n },\n ),\n ("FY5253", {"weekday": 0, "startingMonth": 2, "variation": "nearest"}),\n ("WeekOfMonth", {"weekday": 2, "week": 2}),\n "Easter",\n ("DateOffset", {"day": 4}),\n ("DateOffset", {"month": 5}),\n ],\n )\n @pytest.mark.parametrize("normalize", [True, False])\n @pytest.mark.parametrize("n", [0, 5])\n @pytest.mark.parametrize("unit", ["s", "ms", "us", "ns"])\n @pytest.mark.parametrize("tz", [None, "US/Central"])\n def test_dt64arr_add_sub_DateOffsets(\n self, box_with_array, n, normalize, cls_and_kwargs, unit, tz\n ):\n # GH#10699\n # assert vectorized operation matches pointwise operations\n\n if isinstance(cls_and_kwargs, tuple):\n # If cls_name param is a tuple, then 2nd entry is kwargs for\n # the offset constructor\n cls_name, kwargs = cls_and_kwargs\n else:\n cls_name = cls_and_kwargs\n kwargs = {}\n\n if n == 0 and cls_name in [\n "WeekOfMonth",\n "LastWeekOfMonth",\n "FY5253Quarter",\n "FY5253",\n ]:\n # passing n = 0 is invalid for these offset classes\n return\n\n vec = (\n DatetimeIndex(\n [\n Timestamp("2000-01-05 00:15:00"),\n Timestamp("2000-01-31 00:23:00"),\n Timestamp("2000-01-01"),\n Timestamp("2000-03-31"),\n Timestamp("2000-02-29"),\n Timestamp("2000-12-31"),\n Timestamp("2000-05-15"),\n Timestamp("2001-06-15"),\n ]\n )\n .as_unit(unit)\n .tz_localize(tz)\n )\n vec = tm.box_expected(vec, box_with_array)\n vec_items = vec.iloc[0] if box_with_array is pd.DataFrame else vec\n\n offset_cls = getattr(pd.offsets, cls_name)\n offset = offset_cls(n, normalize=normalize, **kwargs)\n\n # TODO(GH#55564): as_unit will be unnecessary\n expected = DatetimeIndex([x + offset for x in vec_items]).as_unit(unit)\n expected = tm.box_expected(expected, box_with_array)\n tm.assert_equal(expected, vec + offset)\n tm.assert_equal(expected, offset + vec)\n\n expected = DatetimeIndex([x - offset for x in vec_items]).as_unit(unit)\n expected = tm.box_expected(expected, box_with_array)\n tm.assert_equal(expected, vec - offset)\n\n expected = DatetimeIndex([offset + x for x in vec_items]).as_unit(unit)\n expected = tm.box_expected(expected, box_with_array)\n tm.assert_equal(expected, offset + vec)\n msg = "(bad|unsupported) operand type for unary"\n with pytest.raises(TypeError, match=msg):\n offset - vec\n\n @pytest.mark.parametrize(\n "other",\n [\n np.array([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)]),\n np.array([pd.offsets.DateOffset(years=1), pd.offsets.MonthEnd()]),\n np.array( # matching offsets\n [pd.offsets.DateOffset(years=1), pd.offsets.DateOffset(years=1)]\n ),\n ],\n )\n @pytest.mark.parametrize("op", [operator.add, roperator.radd, operator.sub])\n def test_dt64arr_add_sub_offset_array(\n self, tz_naive_fixture, box_with_array, op, other\n ):\n # GH#18849\n # GH#10699 array of offsets\n\n tz = tz_naive_fixture\n dti = date_range("2017-01-01", periods=2, tz=tz)\n dtarr = tm.box_expected(dti, box_with_array)\n\n expected = DatetimeIndex([op(dti[n], other[n]) for n in range(len(dti))])\n expected = tm.box_expected(expected, box_with_array).astype(object)\n\n with tm.assert_produces_warning(PerformanceWarning):\n res = op(dtarr, other)\n tm.assert_equal(res, expected)\n\n # Same thing but boxing other\n other = tm.box_expected(other, box_with_array)\n if box_with_array is pd.array and op is roperator.radd:\n # We expect a NumpyExtensionArray, not ndarray[object] here\n expected = pd.array(expected, dtype=object)\n with tm.assert_produces_warning(PerformanceWarning):\n res = op(dtarr, other)\n tm.assert_equal(res, expected)\n\n @pytest.mark.parametrize(\n "op, offset, exp, exp_freq",\n [\n (\n "__add__",\n DateOffset(months=3, days=10),\n [\n Timestamp("2014-04-11"),\n Timestamp("2015-04-11"),\n Timestamp("2016-04-11"),\n Timestamp("2017-04-11"),\n ],\n None,\n ),\n (\n "__add__",\n DateOffset(months=3),\n [\n Timestamp("2014-04-01"),\n Timestamp("2015-04-01"),\n Timestamp("2016-04-01"),\n Timestamp("2017-04-01"),\n ],\n "YS-APR",\n ),\n (\n "__sub__",\n DateOffset(months=3, days=10),\n [\n Timestamp("2013-09-21"),\n Timestamp("2014-09-21"),\n Timestamp("2015-09-21"),\n Timestamp("2016-09-21"),\n ],\n None,\n ),\n (\n "__sub__",\n DateOffset(months=3),\n [\n Timestamp("2013-10-01"),\n Timestamp("2014-10-01"),\n Timestamp("2015-10-01"),\n Timestamp("2016-10-01"),\n ],\n "YS-OCT",\n ),\n ],\n )\n def test_dti_add_sub_nonzero_mth_offset(\n self, op, offset, exp, exp_freq, tz_aware_fixture, box_with_array\n ):\n # GH 26258\n tz = tz_aware_fixture\n date = date_range(start="01 Jan 2014", end="01 Jan 2017", freq="YS", tz=tz)\n date = tm.box_expected(date, box_with_array, False)\n mth = getattr(date, op)\n result = mth(offset)\n\n expected = DatetimeIndex(exp, tz=tz).as_unit("ns")\n expected = tm.box_expected(expected, box_with_array, False)\n tm.assert_equal(result, expected)\n\n def test_dt64arr_series_add_DateOffset_with_milli(self):\n # GH 57529\n dti = DatetimeIndex(\n [\n "2000-01-01 00:00:00.012345678",\n "2000-01-31 00:00:00.012345678",\n "2000-02-29 00:00:00.012345678",\n ],\n dtype="datetime64[ns]",\n )\n result = dti + DateOffset(milliseconds=4)\n expected = DatetimeIndex(\n [\n "2000-01-01 00:00:00.016345678",\n "2000-01-31 00:00:00.016345678",\n "2000-02-29 00:00:00.016345678",\n ],\n dtype="datetime64[ns]",\n )\n tm.assert_index_equal(result, expected)\n\n result = dti + DateOffset(days=1, milliseconds=4)\n expected = DatetimeIndex(\n [\n "2000-01-02 00:00:00.016345678",\n "2000-02-01 00:00:00.016345678",\n "2000-03-01 00:00:00.016345678",\n ],\n dtype="datetime64[ns]",\n )\n tm.assert_index_equal(result, expected)\n\n\nclass TestDatetime64OverflowHandling:\n # TODO: box + de-duplicate\n\n def test_dt64_overflow_masking(self, box_with_array):\n # GH#25317\n left = Series([Timestamp("1969-12-31")], dtype="M8[ns]")\n right = Series([NaT])\n\n left = tm.box_expected(left, box_with_array)\n right = tm.box_expected(right, box_with_array)\n\n expected = TimedeltaIndex([NaT], dtype="m8[ns]")\n expected = tm.box_expected(expected, box_with_array)\n\n result = left - right\n tm.assert_equal(result, expected)\n\n def test_dt64_series_arith_overflow(self):\n # GH#12534, fixed by GH#19024\n dt = Timestamp("1700-01-31")\n td = Timedelta("20000 Days")\n dti = date_range("1949-09-30", freq="100YE", periods=4)\n ser = Series(dti)\n msg = "Overflow in int64 addition"\n with pytest.raises(OverflowError, match=msg):\n ser - dt\n with pytest.raises(OverflowError, match=msg):\n dt - ser\n with pytest.raises(OverflowError, match=msg):\n ser + td\n with pytest.raises(OverflowError, match=msg):\n td + ser\n\n ser.iloc[-1] = NaT\n expected = Series(\n ["2004-10-03", "2104-10-04", "2204-10-04", "NaT"], dtype="datetime64[ns]"\n )\n res = ser + td\n tm.assert_series_equal(res, expected)\n res = td + ser\n tm.assert_series_equal(res, expected)\n\n ser.iloc[1:] = NaT\n expected = Series(["91279 Days", "NaT", "NaT", "NaT"], dtype="timedelta64[ns]")\n res = ser - dt\n tm.assert_series_equal(res, expected)\n res = dt - ser\n tm.assert_series_equal(res, -expected)\n\n def test_datetimeindex_sub_timestamp_overflow(self):\n dtimax = pd.to_datetime(["2021-12-28 17:19", Timestamp.max]).as_unit("ns")\n dtimin = pd.to_datetime(["2021-12-28 17:19", Timestamp.min]).as_unit("ns")\n\n tsneg = Timestamp("1950-01-01").as_unit("ns")\n ts_neg_variants = [\n tsneg,\n tsneg.to_pydatetime(),\n tsneg.to_datetime64().astype("datetime64[ns]"),\n tsneg.to_datetime64().astype("datetime64[D]"),\n ]\n\n tspos = Timestamp("1980-01-01").as_unit("ns")\n ts_pos_variants = [\n tspos,\n tspos.to_pydatetime(),\n tspos.to_datetime64().astype("datetime64[ns]"),\n tspos.to_datetime64().astype("datetime64[D]"),\n ]\n msg = "Overflow in int64 addition"\n for variant in ts_neg_variants:\n with pytest.raises(OverflowError, match=msg):\n dtimax - variant\n\n expected = Timestamp.max._value - tspos._value\n for variant in ts_pos_variants:\n res = dtimax - variant\n assert res[1]._value == expected\n\n expected = Timestamp.min._value - tsneg._value\n for variant in ts_neg_variants:\n res = dtimin - variant\n assert res[1]._value == expected\n\n for variant in ts_pos_variants:\n with pytest.raises(OverflowError, match=msg):\n dtimin - variant\n\n def test_datetimeindex_sub_datetimeindex_overflow(self):\n # GH#22492, GH#22508\n dtimax = pd.to_datetime(["2021-12-28 17:19", Timestamp.max]).as_unit("ns")\n dtimin = pd.to_datetime(["2021-12-28 17:19", Timestamp.min]).as_unit("ns")\n\n ts_neg = pd.to_datetime(["1950-01-01", "1950-01-01"]).as_unit("ns")\n ts_pos = pd.to_datetime(["1980-01-01", "1980-01-01"]).as_unit("ns")\n\n # General tests\n expected = Timestamp.max._value - ts_pos[1]._value\n result = dtimax - ts_pos\n assert result[1]._value == expected\n\n expected = Timestamp.min._value - ts_neg[1]._value\n result = dtimin - ts_neg\n assert result[1]._value == expected\n msg = "Overflow in int64 addition"\n with pytest.raises(OverflowError, match=msg):\n dtimax - ts_neg\n\n with pytest.raises(OverflowError, match=msg):\n dtimin - ts_pos\n\n # Edge cases\n tmin = pd.to_datetime([Timestamp.min])\n t1 = tmin + Timedelta.max + Timedelta("1us")\n with pytest.raises(OverflowError, match=msg):\n t1 - tmin\n\n tmax = pd.to_datetime([Timestamp.max])\n t2 = tmax + Timedelta.min - Timedelta("1us")\n with pytest.raises(OverflowError, match=msg):\n tmax - t2\n\n\nclass TestTimestampSeriesArithmetic:\n def test_empty_series_add_sub(self, box_with_array):\n # GH#13844\n a = Series(dtype="M8[ns]")\n b = Series(dtype="m8[ns]")\n a = box_with_array(a)\n b = box_with_array(b)\n tm.assert_equal(a, a + b)\n tm.assert_equal(a, a - b)\n tm.assert_equal(a, b + a)\n msg = "cannot subtract"\n with pytest.raises(TypeError, match=msg):\n b - a\n\n def test_operators_datetimelike(self):\n # ## timedelta64 ###\n td1 = Series([timedelta(minutes=5, seconds=3)] * 3)\n td1.iloc[2] = np.nan\n\n # ## datetime64 ###\n dt1 = Series(\n [\n Timestamp("20111230"),\n Timestamp("20120101"),\n Timestamp("20120103"),\n ]\n )\n dt1.iloc[2] = np.nan\n dt2 = Series(\n [\n Timestamp("20111231"),\n Timestamp("20120102"),\n Timestamp("20120104"),\n ]\n )\n dt1 - dt2\n dt2 - dt1\n\n # datetime64 with timetimedelta\n dt1 + td1\n td1 + dt1\n dt1 - td1\n\n # timetimedelta with datetime64\n td1 + dt1\n dt1 + td1\n\n def test_dt64ser_sub_datetime_dtype(self, unit):\n ts = Timestamp(datetime(1993, 1, 7, 13, 30, 00))\n dt = datetime(1993, 6, 22, 13, 30)\n ser = Series([ts], dtype=f"M8[{unit}]")\n result = ser - dt\n\n # the expected unit is the max of `unit` and the unit imputed to `dt`,\n # which is "us"\n exp_unit = tm.get_finest_unit(unit, "us")\n assert result.dtype == f"timedelta64[{exp_unit}]"\n\n # -------------------------------------------------------------\n # TODO: This next block of tests came from tests.series.test_operators,\n # needs to be de-duplicated and parametrized over `box` classes\n\n @pytest.mark.parametrize(\n "left, right, op_fail",\n [\n [\n [Timestamp("20111230"), Timestamp("20120101"), NaT],\n [Timestamp("20111231"), Timestamp("20120102"), Timestamp("20120104")],\n ["__sub__", "__rsub__"],\n ],\n [\n [Timestamp("20111230"), Timestamp("20120101"), NaT],\n [timedelta(minutes=5, seconds=3), timedelta(minutes=5, seconds=3), NaT],\n ["__add__", "__radd__", "__sub__"],\n ],\n [\n [\n Timestamp("20111230", tz="US/Eastern"),\n Timestamp("20111230", tz="US/Eastern"),\n NaT,\n ],\n [timedelta(minutes=5, seconds=3), NaT, timedelta(minutes=5, seconds=3)],\n ["__add__", "__radd__", "__sub__"],\n ],\n ],\n )\n def test_operators_datetimelike_invalid(\n self, left, right, op_fail, all_arithmetic_operators\n ):\n # these are all TypeError ops\n op_str = all_arithmetic_operators\n arg1 = Series(left)\n arg2 = Series(right)\n # check that we are getting a TypeError\n # with 'operate' (from core/ops.py) for the ops that are not\n # defined\n op = getattr(arg1, op_str, None)\n # Previously, _validate_for_numeric_binop in core/indexes/base.py\n # did this for us.\n if op_str not in op_fail:\n with pytest.raises(\n TypeError, match="operate|[cC]annot|unsupported operand"\n ):\n op(arg2)\n else:\n # Smoke test\n op(arg2)\n\n def test_sub_single_tz(self, unit):\n # GH#12290\n s1 = Series([Timestamp("2016-02-10", tz="America/Sao_Paulo")]).dt.as_unit(unit)\n s2 = Series([Timestamp("2016-02-08", tz="America/Sao_Paulo")]).dt.as_unit(unit)\n result = s1 - s2\n expected = Series([Timedelta("2days")]).dt.as_unit(unit)\n tm.assert_series_equal(result, expected)\n result = s2 - s1\n expected = Series([Timedelta("-2days")]).dt.as_unit(unit)\n tm.assert_series_equal(result, expected)\n\n def test_dt64tz_series_sub_dtitz(self):\n # GH#19071 subtracting tzaware DatetimeIndex from tzaware Series\n # (with same tz) raises, fixed by #19024\n dti = date_range("1999-09-30", periods=10, tz="US/Pacific")\n ser = Series(dti)\n expected = Series(TimedeltaIndex(["0days"] * 10))\n\n res = dti - ser\n tm.assert_series_equal(res, expected)\n res = ser - dti\n tm.assert_series_equal(res, expected)\n\n def test_sub_datetime_compat(self, unit):\n # see GH#14088\n ser = Series([datetime(2016, 8, 23, 12, tzinfo=pytz.utc), NaT]).dt.as_unit(unit)\n dt = datetime(2016, 8, 22, 12, tzinfo=pytz.utc)\n # The datetime object has "us" so we upcast lower units\n exp_unit = tm.get_finest_unit(unit, "us")\n exp = Series([Timedelta("1 days"), NaT]).dt.as_unit(exp_unit)\n result = ser - dt\n tm.assert_series_equal(result, exp)\n result2 = ser - Timestamp(dt)\n tm.assert_series_equal(result2, exp)\n\n def test_dt64_series_add_mixed_tick_DateOffset(self):\n # GH#4532\n # operate with pd.offsets\n s = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])\n\n result = s + pd.offsets.Milli(5)\n result2 = pd.offsets.Milli(5) + s\n expected = Series(\n [Timestamp("20130101 9:01:00.005"), Timestamp("20130101 9:02:00.005")]\n )\n tm.assert_series_equal(result, expected)\n tm.assert_series_equal(result2, expected)\n\n result = s + pd.offsets.Minute(5) + pd.offsets.Milli(5)\n expected = Series(\n [Timestamp("20130101 9:06:00.005"), Timestamp("20130101 9:07:00.005")]\n )\n tm.assert_series_equal(result, expected)\n\n def test_datetime64_ops_nat(self, unit):\n # GH#11349\n datetime_series = Series([NaT, Timestamp("19900315")]).dt.as_unit(unit)\n nat_series_dtype_timestamp = Series([NaT, NaT], dtype=f"datetime64[{unit}]")\n single_nat_dtype_datetime = Series([NaT], dtype=f"datetime64[{unit}]")\n\n # subtraction\n tm.assert_series_equal(-NaT + datetime_series, nat_series_dtype_timestamp)\n msg = "bad operand type for unary -: 'DatetimeArray'"\n with pytest.raises(TypeError, match=msg):\n -single_nat_dtype_datetime + datetime_series\n\n tm.assert_series_equal(\n -NaT + nat_series_dtype_timestamp, nat_series_dtype_timestamp\n )\n with pytest.raises(TypeError, match=msg):\n -single_nat_dtype_datetime + nat_series_dtype_timestamp\n\n # addition\n tm.assert_series_equal(\n nat_series_dtype_timestamp + NaT, nat_series_dtype_timestamp\n )\n tm.assert_series_equal(\n NaT + nat_series_dtype_timestamp, nat_series_dtype_timestamp\n )\n\n tm.assert_series_equal(\n nat_series_dtype_timestamp + NaT, nat_series_dtype_timestamp\n )\n tm.assert_series_equal(\n NaT + nat_series_dtype_timestamp, nat_series_dtype_timestamp\n )\n\n # -------------------------------------------------------------\n # Timezone-Centric Tests\n\n def test_operators_datetimelike_with_timezones(self):\n tz = "US/Eastern"\n dt1 = Series(date_range("2000-01-01 09:00:00", periods=5, tz=tz), name="foo")\n dt2 = dt1.copy()\n dt2.iloc[2] = np.nan\n\n td1 = Series(pd.timedelta_range("1 days 1 min", periods=5, freq="h"))\n td2 = td1.copy()\n td2.iloc[1] = np.nan\n assert td2._values.freq is None\n\n result = dt1 + td1[0]\n exp = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize(tz)\n tm.assert_series_equal(result, exp)\n\n result = dt2 + td2[0]\n exp = (dt2.dt.tz_localize(None) + td2[0]).dt.tz_localize(tz)\n tm.assert_series_equal(result, exp)\n\n # odd numpy behavior with scalar timedeltas\n result = td1[0] + dt1\n exp = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize(tz)\n tm.assert_series_equal(result, exp)\n\n result = td2[0] + dt2\n exp = (dt2.dt.tz_localize(None) + td2[0]).dt.tz_localize(tz)\n tm.assert_series_equal(result, exp)\n\n result = dt1 - td1[0]\n exp = (dt1.dt.tz_localize(None) - td1[0]).dt.tz_localize(tz)\n tm.assert_series_equal(result, exp)\n msg = "(bad|unsupported) operand type for unary"\n with pytest.raises(TypeError, match=msg):\n td1[0] - dt1\n\n result = dt2 - td2[0]\n exp = (dt2.dt.tz_localize(None) - td2[0]).dt.tz_localize(tz)\n tm.assert_series_equal(result, exp)\n with pytest.raises(TypeError, match=msg):\n td2[0] - dt2\n\n result = dt1 + td1\n exp = (dt1.dt.tz_localize(None) + td1).dt.tz_localize(tz)\n tm.assert_series_equal(result, exp)\n\n result = dt2 + td2\n exp = (dt2.dt.tz_localize(None) + td2).dt.tz_localize(tz)\n tm.assert_series_equal(result, exp)\n\n result = dt1 - td1\n exp = (dt1.dt.tz_localize(None) - td1).dt.tz_localize(tz)\n tm.assert_series_equal(result, exp)\n\n result = dt2 - td2\n exp = (dt2.dt.tz_localize(None) - td2).dt.tz_localize(tz)\n tm.assert_series_equal(result, exp)\n msg = "cannot (add|subtract)"\n with pytest.raises(TypeError, match=msg):\n td1 - dt1\n with pytest.raises(TypeError, match=msg):\n td2 - dt2\n\n\nclass TestDatetimeIndexArithmetic:\n # -------------------------------------------------------------\n # Binary operations DatetimeIndex and TimedeltaIndex/array\n\n def test_dti_add_tdi(self, tz_naive_fixture):\n # GH#17558\n tz = tz_naive_fixture\n dti = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)\n tdi = pd.timedelta_range("0 days", periods=10)\n expected = date_range("2017-01-01", periods=10, tz=tz)\n expected = expected._with_freq(None)\n\n # add with TimedeltaIndex\n result = dti + tdi\n tm.assert_index_equal(result, expected)\n\n result = tdi + dti\n tm.assert_index_equal(result, expected)\n\n # add with timedelta64 array\n result = dti + tdi.values\n tm.assert_index_equal(result, expected)\n\n result = tdi.values + dti\n tm.assert_index_equal(result, expected)\n\n def test_dti_iadd_tdi(self, tz_naive_fixture):\n # GH#17558\n tz = tz_naive_fixture\n dti = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)\n tdi = pd.timedelta_range("0 days", periods=10)\n expected = date_range("2017-01-01", periods=10, tz=tz)\n expected = expected._with_freq(None)\n\n # iadd with TimedeltaIndex\n result = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)\n result += tdi\n tm.assert_index_equal(result, expected)\n\n result = pd.timedelta_range("0 days", periods=10)\n result += dti\n tm.assert_index_equal(result, expected)\n\n # iadd with timedelta64 array\n result = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)\n result += tdi.values\n tm.assert_index_equal(result, expected)\n\n result = pd.timedelta_range("0 days", periods=10)\n result += dti\n tm.assert_index_equal(result, expected)\n\n def test_dti_sub_tdi(self, tz_naive_fixture):\n # GH#17558\n tz = tz_naive_fixture\n dti = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)\n tdi = pd.timedelta_range("0 days", periods=10)\n expected = date_range("2017-01-01", periods=10, tz=tz, freq="-1D")\n expected = expected._with_freq(None)\n\n # sub with TimedeltaIndex\n result = dti - tdi\n tm.assert_index_equal(result, expected)\n\n msg = "cannot subtract .*TimedeltaArray"\n with pytest.raises(TypeError, match=msg):\n tdi - dti\n\n # sub with timedelta64 array\n result = dti - tdi.values\n tm.assert_index_equal(result, expected)\n\n msg = "cannot subtract a datelike from a TimedeltaArray"\n with pytest.raises(TypeError, match=msg):\n tdi.values - dti\n\n def test_dti_isub_tdi(self, tz_naive_fixture, unit):\n # GH#17558\n tz = tz_naive_fixture\n dti = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10).as_unit(unit)\n tdi = pd.timedelta_range("0 days", periods=10, unit=unit)\n expected = date_range("2017-01-01", periods=10, tz=tz, freq="-1D", unit=unit)\n expected = expected._with_freq(None)\n\n # isub with TimedeltaIndex\n result = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10).as_unit(unit)\n result -= tdi\n tm.assert_index_equal(result, expected)\n\n # DTA.__isub__ GH#43904\n dta = dti._data.copy()\n dta -= tdi\n tm.assert_datetime_array_equal(dta, expected._data)\n\n out = dti._data.copy()\n np.subtract(out, tdi, out=out)\n tm.assert_datetime_array_equal(out, expected._data)\n\n msg = "cannot subtract a datelike from a TimedeltaArray"\n with pytest.raises(TypeError, match=msg):\n tdi -= dti\n\n # isub with timedelta64 array\n result = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10).as_unit(unit)\n result -= tdi.values\n tm.assert_index_equal(result, expected)\n\n with pytest.raises(TypeError, match=msg):\n tdi.values -= dti\n\n with pytest.raises(TypeError, match=msg):\n tdi._values -= dti\n\n # -------------------------------------------------------------\n # Binary Operations DatetimeIndex and datetime-like\n # TODO: A couple other tests belong in this section. Move them in\n # A PR where there isn't already a giant diff.\n\n # -------------------------------------------------------------\n\n def test_dta_add_sub_index(self, tz_naive_fixture):\n # Check that DatetimeArray defers to Index classes\n dti = date_range("20130101", periods=3, tz=tz_naive_fixture)\n dta = dti.array\n result = dta - dti\n expected = dti - dti\n tm.assert_index_equal(result, expected)\n\n tdi = result\n result = dta + tdi\n expected = dti + tdi\n tm.assert_index_equal(result, expected)\n\n result = dta - tdi\n expected = dti - tdi\n tm.assert_index_equal(result, expected)\n\n def test_sub_dti_dti(self, unit):\n # previously performed setop (deprecated in 0.16.0), now changed to\n # return subtraction -> TimeDeltaIndex (GH ...)\n\n dti = date_range("20130101", periods=3, unit=unit)\n dti_tz = date_range("20130101", periods=3, unit=unit).tz_localize("US/Eastern")\n expected = TimedeltaIndex([0, 0, 0]).as_unit(unit)\n\n result = dti - dti\n tm.assert_index_equal(result, expected)\n\n result = dti_tz - dti_tz\n tm.assert_index_equal(result, expected)\n msg = "Cannot subtract tz-naive and tz-aware datetime-like objects"\n with pytest.raises(TypeError, match=msg):\n dti_tz - dti\n\n with pytest.raises(TypeError, match=msg):\n dti - dti_tz\n\n # isub\n dti -= dti\n tm.assert_index_equal(dti, expected)\n\n # different length raises ValueError\n dti1 = date_range("20130101", periods=3, unit=unit)\n dti2 = date_range("20130101", periods=4, unit=unit)\n msg = "cannot add indices of unequal length"\n with pytest.raises(ValueError, match=msg):\n dti1 - dti2\n\n # NaN propagation\n dti1 = DatetimeIndex(["2012-01-01", np.nan, "2012-01-03"]).as_unit(unit)\n dti2 = DatetimeIndex(["2012-01-02", "2012-01-03", np.nan]).as_unit(unit)\n expected = TimedeltaIndex(["1 days", np.nan, np.nan]).as_unit(unit)\n result = dti2 - dti1\n tm.assert_index_equal(result, expected)\n\n # -------------------------------------------------------------------\n # TODO: Most of this block is moved from series or frame tests, needs\n # cleanup, box-parametrization, and de-duplication\n\n @pytest.mark.parametrize("op", [operator.add, operator.sub])\n def test_timedelta64_equal_timedelta_supported_ops(self, op, box_with_array):\n ser = Series(\n [\n Timestamp("20130301"),\n Timestamp("20130228 23:00:00"),\n Timestamp("20130228 22:00:00"),\n Timestamp("20130228 21:00:00"),\n ]\n )\n obj = box_with_array(ser)\n\n intervals = ["D", "h", "m", "s", "us"]\n\n def timedelta64(*args):\n # see casting notes in NumPy gh-12927\n return np.sum(list(starmap(np.timedelta64, zip(args, intervals))))\n\n for d, h, m, s, us in product(*([range(2)] * 5)):\n nptd = timedelta64(d, h, m, s, us)\n pytd = timedelta(days=d, hours=h, minutes=m, seconds=s, microseconds=us)\n lhs = op(obj, nptd)\n rhs = op(obj, pytd)\n\n tm.assert_equal(lhs, rhs)\n\n def test_ops_nat_mixed_datetime64_timedelta64(self):\n # GH#11349\n timedelta_series = Series([NaT, Timedelta("1s")])\n datetime_series = Series([NaT, Timestamp("19900315")])\n nat_series_dtype_timedelta = Series([NaT, NaT], dtype="timedelta64[ns]")\n nat_series_dtype_timestamp = Series([NaT, NaT], dtype="datetime64[ns]")\n single_nat_dtype_datetime = Series([NaT], dtype="datetime64[ns]")\n single_nat_dtype_timedelta = Series([NaT], dtype="timedelta64[ns]")\n\n # subtraction\n tm.assert_series_equal(\n datetime_series - single_nat_dtype_datetime, nat_series_dtype_timedelta\n )\n\n tm.assert_series_equal(\n datetime_series - single_nat_dtype_timedelta, nat_series_dtype_timestamp\n )\n tm.assert_series_equal(\n -single_nat_dtype_timedelta + datetime_series, nat_series_dtype_timestamp\n )\n\n # without a Series wrapping the NaT, it is ambiguous\n # whether it is a datetime64 or timedelta64\n # defaults to interpreting it as timedelta64\n tm.assert_series_equal(\n nat_series_dtype_timestamp - single_nat_dtype_datetime,\n nat_series_dtype_timedelta,\n )\n\n tm.assert_series_equal(\n nat_series_dtype_timestamp - single_nat_dtype_timedelta,\n nat_series_dtype_timestamp,\n )\n tm.assert_series_equal(\n -single_nat_dtype_timedelta + nat_series_dtype_timestamp,\n nat_series_dtype_timestamp,\n )\n msg = "cannot subtract a datelike"\n with pytest.raises(TypeError, match=msg):\n timedelta_series - single_nat_dtype_datetime\n\n # addition\n tm.assert_series_equal(\n nat_series_dtype_timestamp + single_nat_dtype_timedelta,\n nat_series_dtype_timestamp,\n )\n tm.assert_series_equal(\n single_nat_dtype_timedelta + nat_series_dtype_timestamp,\n nat_series_dtype_timestamp,\n )\n\n tm.assert_series_equal(\n nat_series_dtype_timestamp + single_nat_dtype_timedelta,\n nat_series_dtype_timestamp,\n )\n tm.assert_series_equal(\n single_nat_dtype_timedelta + nat_series_dtype_timestamp,\n nat_series_dtype_timestamp,\n )\n\n tm.assert_series_equal(\n nat_series_dtype_timedelta + single_nat_dtype_datetime,\n nat_series_dtype_timestamp,\n )\n tm.assert_series_equal(\n single_nat_dtype_datetime + nat_series_dtype_timedelta,\n nat_series_dtype_timestamp,\n )\n\n def test_ufunc_coercions(self, unit):\n idx = date_range("2011-01-01", periods=3, freq="2D", name="x", unit=unit)\n\n delta = np.timedelta64(1, "D")\n exp = date_range("2011-01-02", periods=3, freq="2D", name="x", unit=unit)\n for result in [idx + delta, np.add(idx, delta)]:\n assert isinstance(result, DatetimeIndex)\n tm.assert_index_equal(result, exp)\n assert result.freq == "2D"\n\n exp = date_range("2010-12-31", periods=3, freq="2D", name="x", unit=unit)\n\n for result in [idx - delta, np.subtract(idx, delta)]:\n assert isinstance(result, DatetimeIndex)\n tm.assert_index_equal(result, exp)\n assert result.freq == "2D"\n\n # When adding/subtracting an ndarray (which has no .freq), the result\n # does not infer freq\n idx = idx._with_freq(None)\n delta = np.array(\n [np.timedelta64(1, "D"), np.timedelta64(2, "D"), np.timedelta64(3, "D")]\n )\n exp = DatetimeIndex(\n ["2011-01-02", "2011-01-05", "2011-01-08"], name="x"\n ).as_unit(unit)\n\n for result in [idx + delta, np.add(idx, delta)]:\n tm.assert_index_equal(result, exp)\n assert result.freq == exp.freq\n\n exp = DatetimeIndex(\n ["2010-12-31", "2011-01-01", "2011-01-02"], name="x"\n ).as_unit(unit)\n for result in [idx - delta, np.subtract(idx, delta)]:\n assert isinstance(result, DatetimeIndex)\n tm.assert_index_equal(result, exp)\n assert result.freq == exp.freq\n\n def test_dti_add_series(self, tz_naive_fixture, names):\n # GH#13905\n tz = tz_naive_fixture\n index = DatetimeIndex(\n ["2016-06-28 05:30", "2016-06-28 05:31"], tz=tz, name=names[0]\n ).as_unit("ns")\n ser = Series([Timedelta(seconds=5)] * 2, index=index, name=names[1])\n expected = Series(index + Timedelta(seconds=5), index=index, name=names[2])\n\n # passing name arg isn't enough when names[2] is None\n expected.name = names[2]\n assert expected.dtype == index.dtype\n result = ser + index\n tm.assert_series_equal(result, expected)\n result2 = index + ser\n tm.assert_series_equal(result2, expected)\n\n expected = index + Timedelta(seconds=5)\n result3 = ser.values + index\n tm.assert_index_equal(result3, expected)\n result4 = index + ser.values\n tm.assert_index_equal(result4, expected)\n\n @pytest.mark.parametrize("op", [operator.add, roperator.radd, operator.sub])\n def test_dti_addsub_offset_arraylike(\n self, tz_naive_fixture, names, op, index_or_series\n ):\n # GH#18849, GH#19744\n other_box = index_or_series\n\n tz = tz_naive_fixture\n dti = date_range("2017-01-01", periods=2, tz=tz, name=names[0])\n other = other_box([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)], name=names[1])\n\n xbox = get_upcast_box(dti, other)\n\n with tm.assert_produces_warning(PerformanceWarning):\n res = op(dti, other)\n\n expected = DatetimeIndex(\n [op(dti[n], other[n]) for n in range(len(dti))], name=names[2], freq="infer"\n )\n expected = tm.box_expected(expected, xbox).astype(object)\n tm.assert_equal(res, expected)\n\n @pytest.mark.parametrize("other_box", [pd.Index, np.array])\n def test_dti_addsub_object_arraylike(\n self, tz_naive_fixture, box_with_array, other_box\n ):\n tz = tz_naive_fixture\n\n dti = date_range("2017-01-01", periods=2, tz=tz)\n dtarr = tm.box_expected(dti, box_with_array)\n other = other_box([pd.offsets.MonthEnd(), Timedelta(days=4)])\n xbox = get_upcast_box(dtarr, other)\n\n expected = DatetimeIndex(["2017-01-31", "2017-01-06"], tz=tz_naive_fixture)\n expected = tm.box_expected(expected, xbox).astype(object)\n\n with tm.assert_produces_warning(PerformanceWarning):\n result = dtarr + other\n tm.assert_equal(result, expected)\n\n expected = DatetimeIndex(["2016-12-31", "2016-12-29"], tz=tz_naive_fixture)\n expected = tm.box_expected(expected, xbox).astype(object)\n\n with tm.assert_produces_warning(PerformanceWarning):\n result = dtarr - other\n tm.assert_equal(result, expected)\n\n\n@pytest.mark.parametrize("years", [-1, 0, 1])\n@pytest.mark.parametrize("months", [-2, 0, 2])\n@pytest.mark.parametrize("unit", ["s", "ms", "us", "ns"])\ndef test_shift_months(years, months, unit):\n dti = DatetimeIndex(\n [\n Timestamp("2000-01-05 00:15:00"),\n Timestamp("2000-01-31 00:23:00"),\n Timestamp("2000-01-01"),\n Timestamp("2000-02-29"),\n Timestamp("2000-12-31"),\n ]\n ).as_unit(unit)\n shifted = shift_months(dti.asi8, years * 12 + months, reso=dti._data._creso)\n shifted_dt64 = shifted.view(f"M8[{dti.unit}]")\n actual = DatetimeIndex(shifted_dt64)\n\n raw = [x + pd.offsets.DateOffset(years=years, months=months) for x in dti]\n expected = DatetimeIndex(raw).as_unit(dti.unit)\n tm.assert_index_equal(actual, expected)\n\n\ndef test_dt64arr_addsub_object_dtype_2d():\n # block-wise DataFrame operations will require operating on 2D\n # DatetimeArray/TimedeltaArray, so check that specifically.\n dti = date_range("1994-02-13", freq="2W", periods=4)\n dta = dti._data.reshape((4, 1))\n\n other = np.array([[pd.offsets.Day(n)] for n in range(4)])\n assert other.shape == dta.shape\n\n with tm.assert_produces_warning(PerformanceWarning):\n result = dta + other\n with tm.assert_produces_warning(PerformanceWarning):\n expected = (dta[:, 0] + other[:, 0]).reshape(-1, 1)\n\n tm.assert_numpy_array_equal(result, expected)\n\n with tm.assert_produces_warning(PerformanceWarning):\n # Case where we expect to get a TimedeltaArray back\n result2 = dta - dta.astype(object)\n\n assert result2.shape == (4, 1)\n assert all(td._value == 0 for td in result2.ravel())\n\n\ndef test_non_nano_dt64_addsub_np_nat_scalars():\n # GH 52295\n ser = Series([1233242342344, 232432434324, 332434242344], dtype="datetime64[ms]")\n result = ser - np.datetime64("nat", "ms")\n expected = Series([NaT] * 3, dtype="timedelta64[ms]")\n tm.assert_series_equal(result, expected)\n\n result = ser + np.timedelta64("nat", "ms")\n expected = Series([NaT] * 3, dtype="datetime64[ms]")\n tm.assert_series_equal(result, expected)\n\n\ndef test_non_nano_dt64_addsub_np_nat_scalars_unitless():\n # GH 52295\n # TODO: Can we default to the ser unit?\n ser = Series([1233242342344, 232432434324, 332434242344], dtype="datetime64[ms]")\n result = ser - np.datetime64("nat")\n expected = Series([NaT] * 3, dtype="timedelta64[ns]")\n tm.assert_series_equal(result, expected)\n\n result = ser + np.timedelta64("nat")\n expected = Series([NaT] * 3, dtype="datetime64[ns]")\n tm.assert_series_equal(result, expected)\n\n\ndef test_non_nano_dt64_addsub_np_nat_scalars_unsupported_unit():\n # GH 52295\n ser = Series([12332, 23243, 33243], dtype="datetime64[s]")\n result = ser - np.datetime64("nat", "D")\n expected = Series([NaT] * 3, dtype="timedelta64[s]")\n tm.assert_series_equal(result, expected)\n\n result = ser + np.timedelta64("nat", "D")\n expected = Series([NaT] * 3, dtype="datetime64[s]")\n tm.assert_series_equal(result, expected)\n | .venv\Lib\site-packages\pandas\tests\arithmetic\test_datetime64.py | test_datetime64.py | Python | 90,255 | 0.75 | 0.070474 | 0.101211 | node-utils | 816 | 2024-09-19T05:21:31.034590 | MIT | true | 5368b17586838d8713d689cd993f488b |
import operator\n\nimport numpy as np\nimport pytest\n\nfrom pandas.core.dtypes.common import is_list_like\n\nimport pandas as pd\nfrom pandas import (\n Categorical,\n Index,\n Interval,\n IntervalIndex,\n Period,\n Series,\n Timedelta,\n Timestamp,\n date_range,\n period_range,\n timedelta_range,\n)\nimport pandas._testing as tm\nfrom pandas.core.arrays import (\n BooleanArray,\n IntervalArray,\n)\nfrom pandas.tests.arithmetic.common import get_upcast_box\n\n\n@pytest.fixture(\n params=[\n (Index([0, 2, 4, 4]), Index([1, 3, 5, 8])),\n (Index([0.0, 1.0, 2.0, np.nan]), Index([1.0, 2.0, 3.0, np.nan])),\n (\n timedelta_range("0 days", periods=3).insert(3, pd.NaT),\n timedelta_range("1 day", periods=3).insert(3, pd.NaT),\n ),\n (\n date_range("20170101", periods=3).insert(3, pd.NaT),\n date_range("20170102", periods=3).insert(3, pd.NaT),\n ),\n (\n date_range("20170101", periods=3, tz="US/Eastern").insert(3, pd.NaT),\n date_range("20170102", periods=3, tz="US/Eastern").insert(3, pd.NaT),\n ),\n ],\n ids=lambda x: str(x[0].dtype),\n)\ndef left_right_dtypes(request):\n """\n Fixture for building an IntervalArray from various dtypes\n """\n return request.param\n\n\n@pytest.fixture\ndef interval_array(left_right_dtypes):\n """\n Fixture to generate an IntervalArray of various dtypes containing NA if possible\n """\n left, right = left_right_dtypes\n return IntervalArray.from_arrays(left, right)\n\n\ndef create_categorical_intervals(left, right, closed="right"):\n return Categorical(IntervalIndex.from_arrays(left, right, closed))\n\n\ndef create_series_intervals(left, right, closed="right"):\n return Series(IntervalArray.from_arrays(left, right, closed))\n\n\ndef create_series_categorical_intervals(left, right, closed="right"):\n return Series(Categorical(IntervalIndex.from_arrays(left, right, closed)))\n\n\nclass TestComparison:\n @pytest.fixture(params=[operator.eq, operator.ne])\n def op(self, request):\n return request.param\n\n @pytest.fixture(\n params=[\n IntervalArray.from_arrays,\n IntervalIndex.from_arrays,\n create_categorical_intervals,\n create_series_intervals,\n create_series_categorical_intervals,\n ],\n ids=[\n "IntervalArray",\n "IntervalIndex",\n "Categorical[Interval]",\n "Series[Interval]",\n "Series[Categorical[Interval]]",\n ],\n )\n def interval_constructor(self, request):\n """\n Fixture for all pandas native interval constructors.\n To be used as the LHS of IntervalArray comparisons.\n """\n return request.param\n\n def elementwise_comparison(self, op, interval_array, other):\n """\n Helper that performs elementwise comparisons between `array` and `other`\n """\n other = other if is_list_like(other) else [other] * len(interval_array)\n expected = np.array([op(x, y) for x, y in zip(interval_array, other)])\n if isinstance(other, Series):\n return Series(expected, index=other.index)\n return expected\n\n def test_compare_scalar_interval(self, op, interval_array):\n # matches first interval\n other = interval_array[0]\n result = op(interval_array, other)\n expected = self.elementwise_comparison(op, interval_array, other)\n tm.assert_numpy_array_equal(result, expected)\n\n # matches on a single endpoint but not both\n other = Interval(interval_array.left[0], interval_array.right[1])\n result = op(interval_array, other)\n expected = self.elementwise_comparison(op, interval_array, other)\n tm.assert_numpy_array_equal(result, expected)\n\n def test_compare_scalar_interval_mixed_closed(self, op, closed, other_closed):\n interval_array = IntervalArray.from_arrays(range(2), range(1, 3), closed=closed)\n other = Interval(0, 1, closed=other_closed)\n\n result = op(interval_array, other)\n expected = self.elementwise_comparison(op, interval_array, other)\n tm.assert_numpy_array_equal(result, expected)\n\n def test_compare_scalar_na(self, op, interval_array, nulls_fixture, box_with_array):\n box = box_with_array\n obj = tm.box_expected(interval_array, box)\n result = op(obj, nulls_fixture)\n\n if nulls_fixture is pd.NA:\n # GH#31882\n exp = np.ones(interval_array.shape, dtype=bool)\n expected = BooleanArray(exp, exp)\n else:\n expected = self.elementwise_comparison(op, interval_array, nulls_fixture)\n\n if not (box is Index and nulls_fixture is pd.NA):\n # don't cast expected from BooleanArray to ndarray[object]\n xbox = get_upcast_box(obj, nulls_fixture, True)\n expected = tm.box_expected(expected, xbox)\n\n tm.assert_equal(result, expected)\n\n rev = op(nulls_fixture, obj)\n tm.assert_equal(rev, expected)\n\n @pytest.mark.parametrize(\n "other",\n [\n 0,\n 1.0,\n True,\n "foo",\n Timestamp("2017-01-01"),\n Timestamp("2017-01-01", tz="US/Eastern"),\n Timedelta("0 days"),\n Period("2017-01-01", "D"),\n ],\n )\n def test_compare_scalar_other(self, op, interval_array, other):\n result = op(interval_array, other)\n expected = self.elementwise_comparison(op, interval_array, other)\n tm.assert_numpy_array_equal(result, expected)\n\n def test_compare_list_like_interval(self, op, interval_array, interval_constructor):\n # same endpoints\n other = interval_constructor(interval_array.left, interval_array.right)\n result = op(interval_array, other)\n expected = self.elementwise_comparison(op, interval_array, other)\n tm.assert_equal(result, expected)\n\n # different endpoints\n other = interval_constructor(\n interval_array.left[::-1], interval_array.right[::-1]\n )\n result = op(interval_array, other)\n expected = self.elementwise_comparison(op, interval_array, other)\n tm.assert_equal(result, expected)\n\n # all nan endpoints\n other = interval_constructor([np.nan] * 4, [np.nan] * 4)\n result = op(interval_array, other)\n expected = self.elementwise_comparison(op, interval_array, other)\n tm.assert_equal(result, expected)\n\n def test_compare_list_like_interval_mixed_closed(\n self, op, interval_constructor, closed, other_closed\n ):\n interval_array = IntervalArray.from_arrays(range(2), range(1, 3), closed=closed)\n other = interval_constructor(range(2), range(1, 3), closed=other_closed)\n\n result = op(interval_array, other)\n expected = self.elementwise_comparison(op, interval_array, other)\n tm.assert_equal(result, expected)\n\n @pytest.mark.parametrize(\n "other",\n [\n (\n Interval(0, 1),\n Interval(Timedelta("1 day"), Timedelta("2 days")),\n Interval(4, 5, "both"),\n Interval(10, 20, "neither"),\n ),\n (0, 1.5, Timestamp("20170103"), np.nan),\n (\n Timestamp("20170102", tz="US/Eastern"),\n Timedelta("2 days"),\n "baz",\n pd.NaT,\n ),\n ],\n )\n def test_compare_list_like_object(self, op, interval_array, other):\n result = op(interval_array, other)\n expected = self.elementwise_comparison(op, interval_array, other)\n tm.assert_numpy_array_equal(result, expected)\n\n def test_compare_list_like_nan(self, op, interval_array, nulls_fixture):\n other = [nulls_fixture] * 4\n result = op(interval_array, other)\n expected = self.elementwise_comparison(op, interval_array, other)\n\n tm.assert_equal(result, expected)\n\n @pytest.mark.parametrize(\n "other",\n [\n np.arange(4, dtype="int64"),\n np.arange(4, dtype="float64"),\n date_range("2017-01-01", periods=4),\n date_range("2017-01-01", periods=4, tz="US/Eastern"),\n timedelta_range("0 days", periods=4),\n period_range("2017-01-01", periods=4, freq="D"),\n Categorical(list("abab")),\n Categorical(date_range("2017-01-01", periods=4)),\n pd.array(list("abcd")),\n pd.array(["foo", 3.14, None, object()], dtype=object),\n ],\n ids=lambda x: str(x.dtype),\n )\n def test_compare_list_like_other(self, op, interval_array, other):\n result = op(interval_array, other)\n expected = self.elementwise_comparison(op, interval_array, other)\n tm.assert_numpy_array_equal(result, expected)\n\n @pytest.mark.parametrize("length", [1, 3, 5])\n @pytest.mark.parametrize("other_constructor", [IntervalArray, list])\n def test_compare_length_mismatch_errors(self, op, other_constructor, length):\n interval_array = IntervalArray.from_arrays(range(4), range(1, 5))\n other = other_constructor([Interval(0, 1)] * length)\n with pytest.raises(ValueError, match="Lengths must match to compare"):\n op(interval_array, other)\n\n @pytest.mark.parametrize(\n "constructor, expected_type, assert_func",\n [\n (IntervalIndex, np.array, tm.assert_numpy_array_equal),\n (Series, Series, tm.assert_series_equal),\n ],\n )\n def test_index_series_compat(self, op, constructor, expected_type, assert_func):\n # IntervalIndex/Series that rely on IntervalArray for comparisons\n breaks = range(4)\n index = constructor(IntervalIndex.from_breaks(breaks))\n\n # scalar comparisons\n other = index[0]\n result = op(index, other)\n expected = expected_type(self.elementwise_comparison(op, index, other))\n assert_func(result, expected)\n\n other = breaks[0]\n result = op(index, other)\n expected = expected_type(self.elementwise_comparison(op, index, other))\n assert_func(result, expected)\n\n # list-like comparisons\n other = IntervalArray.from_breaks(breaks)\n result = op(index, other)\n expected = expected_type(self.elementwise_comparison(op, index, other))\n assert_func(result, expected)\n\n other = [index[0], breaks[0], "foo"]\n result = op(index, other)\n expected = expected_type(self.elementwise_comparison(op, index, other))\n assert_func(result, expected)\n\n @pytest.mark.parametrize("scalars", ["a", False, 1, 1.0, None])\n def test_comparison_operations(self, scalars):\n # GH #28981\n expected = Series([False, False])\n s = Series([Interval(0, 1), Interval(1, 2)], dtype="interval")\n result = s == scalars\n tm.assert_series_equal(result, expected)\n | .venv\Lib\site-packages\pandas\tests\arithmetic\test_interval.py | test_interval.py | Python | 10,951 | 0.95 | 0.098039 | 0.041825 | node-utils | 559 | 2024-02-03T13:05:37.337968 | BSD-3-Clause | true | 0fde13ab73682cb23fb62b470d0adfa3 |
# Arithmetic tests for DataFrame/Series/Index/Array classes that should\n# behave identically.\n# Specifically for numeric dtypes\nfrom __future__ import annotations\n\nfrom collections import abc\nfrom datetime import timedelta\nfrom decimal import Decimal\nimport operator\n\nimport numpy as np\nimport pytest\n\nimport pandas as pd\nfrom pandas import (\n Index,\n RangeIndex,\n Series,\n Timedelta,\n TimedeltaIndex,\n array,\n date_range,\n)\nimport pandas._testing as tm\nfrom pandas.core import ops\nfrom pandas.core.computation import expressions as expr\nfrom pandas.tests.arithmetic.common import (\n assert_invalid_addsub_type,\n assert_invalid_comparison,\n)\n\n\n@pytest.fixture(autouse=True, params=[0, 1000000], ids=["numexpr", "python"])\ndef switch_numexpr_min_elements(request, monkeypatch):\n with monkeypatch.context() as m:\n m.setattr(expr, "_MIN_ELEMENTS", request.param)\n yield request.param\n\n\n@pytest.fixture(params=[Index, Series, tm.to_array])\ndef box_pandas_1d_array(request):\n """\n Fixture to test behavior for Index, Series and tm.to_array classes\n """\n return request.param\n\n\n@pytest.fixture(\n params=[\n # TODO: add more dtypes here\n Index(np.arange(5, dtype="float64")),\n Index(np.arange(5, dtype="int64")),\n Index(np.arange(5, dtype="uint64")),\n RangeIndex(5),\n ],\n ids=lambda x: type(x).__name__,\n)\ndef numeric_idx(request):\n """\n Several types of numeric-dtypes Index objects\n """\n return request.param\n\n\n@pytest.fixture(\n params=[Index, Series, tm.to_array, np.array, list], ids=lambda x: x.__name__\n)\ndef box_1d_array(request):\n """\n Fixture to test behavior for Index, Series, tm.to_array, numpy Array and list\n classes\n """\n return request.param\n\n\ndef adjust_negative_zero(zero, expected):\n """\n Helper to adjust the expected result if we are dividing by -0.0\n as opposed to 0.0\n """\n if np.signbit(np.array(zero)).any():\n # All entries in the `zero` fixture should be either\n # all-negative or no-negative.\n assert np.signbit(np.array(zero)).all()\n\n expected *= -1\n\n return expected\n\n\ndef compare_op(series, other, op):\n left = np.abs(series) if op in (ops.rpow, operator.pow) else series\n right = np.abs(other) if op in (ops.rpow, operator.pow) else other\n\n cython_or_numpy = op(left, right)\n python = left.combine(right, op)\n if isinstance(other, Series) and not other.index.equals(series.index):\n python.index = python.index._with_freq(None)\n tm.assert_series_equal(cython_or_numpy, python)\n\n\n# TODO: remove this kludge once mypy stops giving false positives here\n# List comprehension has incompatible type List[PandasObject]; expected List[RangeIndex]\n# See GH#29725\n_ldtypes = ["i1", "i2", "i4", "i8", "u1", "u2", "u4", "u8", "f2", "f4", "f8"]\nlefts: list[Index | Series] = [RangeIndex(10, 40, 10)]\nlefts.extend([Series([10, 20, 30], dtype=dtype) for dtype in _ldtypes])\nlefts.extend([Index([10, 20, 30], dtype=dtype) for dtype in _ldtypes if dtype != "f2"])\n\n# ------------------------------------------------------------------\n# Comparisons\n\n\nclass TestNumericComparisons:\n def test_operator_series_comparison_zerorank(self):\n # GH#13006\n result = np.float64(0) > Series([1, 2, 3])\n expected = 0.0 > Series([1, 2, 3])\n tm.assert_series_equal(result, expected)\n result = Series([1, 2, 3]) < np.float64(0)\n expected = Series([1, 2, 3]) < 0.0\n tm.assert_series_equal(result, expected)\n result = np.array([0, 1, 2])[0] > Series([0, 1, 2])\n expected = 0.0 > Series([1, 2, 3])\n tm.assert_series_equal(result, expected)\n\n def test_df_numeric_cmp_dt64_raises(self, box_with_array, fixed_now_ts):\n # GH#8932, GH#22163\n ts = fixed_now_ts\n obj = np.array(range(5))\n obj = tm.box_expected(obj, box_with_array)\n\n assert_invalid_comparison(obj, ts, box_with_array)\n\n def test_compare_invalid(self):\n # GH#8058\n # ops testing\n a = Series(np.random.default_rng(2).standard_normal(5), name=0)\n b = Series(np.random.default_rng(2).standard_normal(5))\n b.name = pd.Timestamp("2000-01-01")\n tm.assert_series_equal(a / b, 1 / (b / a))\n\n def test_numeric_cmp_string_numexpr_path(self, box_with_array, monkeypatch):\n # GH#36377, GH#35700\n box = box_with_array\n xbox = box if box is not Index else np.ndarray\n\n obj = Series(np.random.default_rng(2).standard_normal(51))\n obj = tm.box_expected(obj, box, transpose=False)\n with monkeypatch.context() as m:\n m.setattr(expr, "_MIN_ELEMENTS", 50)\n result = obj == "a"\n\n expected = Series(np.zeros(51, dtype=bool))\n expected = tm.box_expected(expected, xbox, transpose=False)\n tm.assert_equal(result, expected)\n\n with monkeypatch.context() as m:\n m.setattr(expr, "_MIN_ELEMENTS", 50)\n result = obj != "a"\n tm.assert_equal(result, ~expected)\n\n msg = "Invalid comparison between dtype=float64 and str"\n with pytest.raises(TypeError, match=msg):\n obj < "a"\n\n\n# ------------------------------------------------------------------\n# Numeric dtypes Arithmetic with Datetime/Timedelta Scalar\n\n\nclass TestNumericArraylikeArithmeticWithDatetimeLike:\n @pytest.mark.parametrize("box_cls", [np.array, Index, Series])\n @pytest.mark.parametrize(\n "left", lefts, ids=lambda x: type(x).__name__ + str(x.dtype)\n )\n def test_mul_td64arr(self, left, box_cls):\n # GH#22390\n right = np.array([1, 2, 3], dtype="m8[s]")\n right = box_cls(right)\n\n expected = TimedeltaIndex(["10s", "40s", "90s"], dtype=right.dtype)\n\n if isinstance(left, Series) or box_cls is Series:\n expected = Series(expected)\n assert expected.dtype == right.dtype\n\n result = left * right\n tm.assert_equal(result, expected)\n\n result = right * left\n tm.assert_equal(result, expected)\n\n @pytest.mark.parametrize("box_cls", [np.array, Index, Series])\n @pytest.mark.parametrize(\n "left", lefts, ids=lambda x: type(x).__name__ + str(x.dtype)\n )\n def test_div_td64arr(self, left, box_cls):\n # GH#22390\n right = np.array([10, 40, 90], dtype="m8[s]")\n right = box_cls(right)\n\n expected = TimedeltaIndex(["1s", "2s", "3s"], dtype=right.dtype)\n if isinstance(left, Series) or box_cls is Series:\n expected = Series(expected)\n assert expected.dtype == right.dtype\n\n result = right / left\n tm.assert_equal(result, expected)\n\n result = right // left\n tm.assert_equal(result, expected)\n\n # (true_) needed for min-versions build 2022-12-26\n msg = "ufunc '(true_)?divide' cannot use operands with types"\n with pytest.raises(TypeError, match=msg):\n left / right\n\n msg = "ufunc 'floor_divide' cannot use operands with types"\n with pytest.raises(TypeError, match=msg):\n left // right\n\n # TODO: also test Tick objects;\n # see test_numeric_arr_rdiv_tdscalar for note on these failing\n @pytest.mark.parametrize(\n "scalar_td",\n [\n Timedelta(days=1),\n Timedelta(days=1).to_timedelta64(),\n Timedelta(days=1).to_pytimedelta(),\n Timedelta(days=1).to_timedelta64().astype("timedelta64[s]"),\n Timedelta(days=1).to_timedelta64().astype("timedelta64[ms]"),\n ],\n ids=lambda x: type(x).__name__,\n )\n def test_numeric_arr_mul_tdscalar(self, scalar_td, numeric_idx, box_with_array):\n # GH#19333\n box = box_with_array\n index = numeric_idx\n expected = TimedeltaIndex([Timedelta(days=n) for n in range(len(index))])\n if isinstance(scalar_td, np.timedelta64):\n dtype = scalar_td.dtype\n expected = expected.astype(dtype)\n elif type(scalar_td) is timedelta:\n expected = expected.astype("m8[us]")\n\n index = tm.box_expected(index, box)\n expected = tm.box_expected(expected, box)\n\n result = index * scalar_td\n tm.assert_equal(result, expected)\n\n commute = scalar_td * index\n tm.assert_equal(commute, expected)\n\n @pytest.mark.parametrize(\n "scalar_td",\n [\n Timedelta(days=1),\n Timedelta(days=1).to_timedelta64(),\n Timedelta(days=1).to_pytimedelta(),\n ],\n ids=lambda x: type(x).__name__,\n )\n @pytest.mark.parametrize("dtype", [np.int64, np.float64])\n def test_numeric_arr_mul_tdscalar_numexpr_path(\n self, dtype, scalar_td, box_with_array\n ):\n # GH#44772 for the float64 case\n box = box_with_array\n\n arr_i8 = np.arange(2 * 10**4).astype(np.int64, copy=False)\n arr = arr_i8.astype(dtype, copy=False)\n obj = tm.box_expected(arr, box, transpose=False)\n\n expected = arr_i8.view("timedelta64[D]").astype("timedelta64[ns]")\n if type(scalar_td) is timedelta:\n expected = expected.astype("timedelta64[us]")\n\n expected = tm.box_expected(expected, box, transpose=False)\n\n result = obj * scalar_td\n tm.assert_equal(result, expected)\n\n result = scalar_td * obj\n tm.assert_equal(result, expected)\n\n def test_numeric_arr_rdiv_tdscalar(self, three_days, numeric_idx, box_with_array):\n box = box_with_array\n\n index = numeric_idx[1:3]\n\n expected = TimedeltaIndex(["3 Days", "36 Hours"])\n if isinstance(three_days, np.timedelta64):\n dtype = three_days.dtype\n if dtype < np.dtype("m8[s]"):\n # i.e. resolution is lower -> use lowest supported resolution\n dtype = np.dtype("m8[s]")\n expected = expected.astype(dtype)\n elif type(three_days) is timedelta:\n expected = expected.astype("m8[us]")\n elif isinstance(\n three_days,\n (pd.offsets.Day, pd.offsets.Hour, pd.offsets.Minute, pd.offsets.Second),\n ):\n # closest reso is Second\n expected = expected.astype("m8[s]")\n\n index = tm.box_expected(index, box)\n expected = tm.box_expected(expected, box)\n\n result = three_days / index\n tm.assert_equal(result, expected)\n\n msg = "cannot use operands with types dtype"\n with pytest.raises(TypeError, match=msg):\n index / three_days\n\n @pytest.mark.parametrize(\n "other",\n [\n Timedelta(hours=31),\n Timedelta(hours=31).to_pytimedelta(),\n Timedelta(hours=31).to_timedelta64(),\n Timedelta(hours=31).to_timedelta64().astype("m8[h]"),\n np.timedelta64("NaT"),\n np.timedelta64("NaT", "D"),\n pd.offsets.Minute(3),\n pd.offsets.Second(0),\n # GH#28080 numeric+datetimelike should raise; Timestamp used\n # to raise NullFrequencyError but that behavior was removed in 1.0\n pd.Timestamp("2021-01-01", tz="Asia/Tokyo"),\n pd.Timestamp("2021-01-01"),\n pd.Timestamp("2021-01-01").to_pydatetime(),\n pd.Timestamp("2021-01-01", tz="UTC").to_pydatetime(),\n pd.Timestamp("2021-01-01").to_datetime64(),\n np.datetime64("NaT", "ns"),\n pd.NaT,\n ],\n ids=repr,\n )\n def test_add_sub_datetimedeltalike_invalid(\n self, numeric_idx, other, box_with_array\n ):\n box = box_with_array\n\n left = tm.box_expected(numeric_idx, box)\n msg = "|".join(\n [\n "unsupported operand type",\n "Addition/subtraction of integers and integer-arrays",\n "Instead of adding/subtracting",\n "cannot use operands with types dtype",\n "Concatenation operation is not implemented for NumPy arrays",\n "Cannot (add|subtract) NaT (to|from) ndarray",\n # pd.array vs np.datetime64 case\n r"operand type\(s\) all returned NotImplemented from __array_ufunc__",\n "can only perform ops with numeric values",\n "cannot subtract DatetimeArray from ndarray",\n # pd.Timedelta(1) + Index([0, 1, 2])\n "Cannot add or subtract Timedelta from integers",\n ]\n )\n assert_invalid_addsub_type(left, other, msg)\n\n\n# ------------------------------------------------------------------\n# Arithmetic\n\n\nclass TestDivisionByZero:\n def test_div_zero(self, zero, numeric_idx):\n idx = numeric_idx\n\n expected = Index([np.nan, np.inf, np.inf, np.inf, np.inf], dtype=np.float64)\n # We only adjust for Index, because Series does not yet apply\n # the adjustment correctly.\n expected2 = adjust_negative_zero(zero, expected)\n\n result = idx / zero\n tm.assert_index_equal(result, expected2)\n ser_compat = Series(idx).astype("i8") / np.array(zero).astype("i8")\n tm.assert_series_equal(ser_compat, Series(expected))\n\n def test_floordiv_zero(self, zero, numeric_idx):\n idx = numeric_idx\n\n expected = Index([np.nan, np.inf, np.inf, np.inf, np.inf], dtype=np.float64)\n # We only adjust for Index, because Series does not yet apply\n # the adjustment correctly.\n expected2 = adjust_negative_zero(zero, expected)\n\n result = idx // zero\n tm.assert_index_equal(result, expected2)\n ser_compat = Series(idx).astype("i8") // np.array(zero).astype("i8")\n tm.assert_series_equal(ser_compat, Series(expected))\n\n def test_mod_zero(self, zero, numeric_idx):\n idx = numeric_idx\n\n expected = Index([np.nan, np.nan, np.nan, np.nan, np.nan], dtype=np.float64)\n result = idx % zero\n tm.assert_index_equal(result, expected)\n ser_compat = Series(idx).astype("i8") % np.array(zero).astype("i8")\n tm.assert_series_equal(ser_compat, Series(result))\n\n def test_divmod_zero(self, zero, numeric_idx):\n idx = numeric_idx\n\n exleft = Index([np.nan, np.inf, np.inf, np.inf, np.inf], dtype=np.float64)\n exright = Index([np.nan, np.nan, np.nan, np.nan, np.nan], dtype=np.float64)\n exleft = adjust_negative_zero(zero, exleft)\n\n result = divmod(idx, zero)\n tm.assert_index_equal(result[0], exleft)\n tm.assert_index_equal(result[1], exright)\n\n @pytest.mark.parametrize("op", [operator.truediv, operator.floordiv])\n def test_div_negative_zero(self, zero, numeric_idx, op):\n # Check that -1 / -0.0 returns np.inf, not -np.inf\n if numeric_idx.dtype == np.uint64:\n pytest.skip(f"Div by negative 0 not relevant for {numeric_idx.dtype}")\n idx = numeric_idx - 3\n\n expected = Index([-np.inf, -np.inf, -np.inf, np.nan, np.inf], dtype=np.float64)\n expected = adjust_negative_zero(zero, expected)\n\n result = op(idx, zero)\n tm.assert_index_equal(result, expected)\n\n # ------------------------------------------------------------------\n\n @pytest.mark.parametrize("dtype1", [np.int64, np.float64, np.uint64])\n def test_ser_div_ser(\n self,\n switch_numexpr_min_elements,\n dtype1,\n any_real_numpy_dtype,\n ):\n # no longer do integer div for any ops, but deal with the 0's\n dtype2 = any_real_numpy_dtype\n\n first = Series([3, 4, 5, 8], name="first").astype(dtype1)\n second = Series([0, 0, 0, 3], name="second").astype(dtype2)\n\n with np.errstate(all="ignore"):\n expected = Series(\n first.values.astype(np.float64) / second.values,\n dtype="float64",\n name=None,\n )\n expected.iloc[0:3] = np.inf\n if first.dtype == "int64" and second.dtype == "float32":\n # when using numexpr, the casting rules are slightly different\n # and int64/float32 combo results in float32 instead of float64\n if expr.USE_NUMEXPR and switch_numexpr_min_elements == 0:\n expected = expected.astype("float32")\n\n result = first / second\n tm.assert_series_equal(result, expected)\n assert not result.equals(second / first)\n\n @pytest.mark.parametrize("dtype1", [np.int64, np.float64, np.uint64])\n def test_ser_divmod_zero(self, dtype1, any_real_numpy_dtype):\n # GH#26987\n dtype2 = any_real_numpy_dtype\n left = Series([1, 1]).astype(dtype1)\n right = Series([0, 2]).astype(dtype2)\n\n # GH#27321 pandas convention is to set 1 // 0 to np.inf, as opposed\n # to numpy which sets to np.nan; patch `expected[0]` below\n expected = left // right, left % right\n expected = list(expected)\n expected[0] = expected[0].astype(np.float64)\n expected[0][0] = np.inf\n result = divmod(left, right)\n\n tm.assert_series_equal(result[0], expected[0])\n tm.assert_series_equal(result[1], expected[1])\n\n # rdivmod case\n result = divmod(left.values, right)\n tm.assert_series_equal(result[0], expected[0])\n tm.assert_series_equal(result[1], expected[1])\n\n def test_ser_divmod_inf(self):\n left = Series([np.inf, 1.0])\n right = Series([np.inf, 2.0])\n\n expected = left // right, left % right\n result = divmod(left, right)\n\n tm.assert_series_equal(result[0], expected[0])\n tm.assert_series_equal(result[1], expected[1])\n\n # rdivmod case\n result = divmod(left.values, right)\n tm.assert_series_equal(result[0], expected[0])\n tm.assert_series_equal(result[1], expected[1])\n\n def test_rdiv_zero_compat(self):\n # GH#8674\n zero_array = np.array([0] * 5)\n data = np.random.default_rng(2).standard_normal(5)\n expected = Series([0.0] * 5)\n\n result = zero_array / Series(data)\n tm.assert_series_equal(result, expected)\n\n result = Series(zero_array) / data\n tm.assert_series_equal(result, expected)\n\n result = Series(zero_array) / Series(data)\n tm.assert_series_equal(result, expected)\n\n def test_div_zero_inf_signs(self):\n # GH#9144, inf signing\n ser = Series([-1, 0, 1], name="first")\n expected = Series([-np.inf, np.nan, np.inf], name="first")\n\n result = ser / 0\n tm.assert_series_equal(result, expected)\n\n def test_rdiv_zero(self):\n # GH#9144\n ser = Series([-1, 0, 1], name="first")\n expected = Series([0.0, np.nan, 0.0], name="first")\n\n result = 0 / ser\n tm.assert_series_equal(result, expected)\n\n def test_floordiv_div(self):\n # GH#9144\n ser = Series([-1, 0, 1], name="first")\n\n result = ser // 0\n expected = Series([-np.inf, np.nan, np.inf], name="first")\n tm.assert_series_equal(result, expected)\n\n def test_df_div_zero_df(self):\n # integer div, but deal with the 0's (GH#9144)\n df = pd.DataFrame({"first": [3, 4, 5, 8], "second": [0, 0, 0, 3]})\n result = df / df\n\n first = Series([1.0, 1.0, 1.0, 1.0])\n second = Series([np.nan, np.nan, np.nan, 1])\n expected = pd.DataFrame({"first": first, "second": second})\n tm.assert_frame_equal(result, expected)\n\n def test_df_div_zero_array(self):\n # integer div, but deal with the 0's (GH#9144)\n df = pd.DataFrame({"first": [3, 4, 5, 8], "second": [0, 0, 0, 3]})\n\n first = Series([1.0, 1.0, 1.0, 1.0])\n second = Series([np.nan, np.nan, np.nan, 1])\n expected = pd.DataFrame({"first": first, "second": second})\n\n with np.errstate(all="ignore"):\n arr = df.values.astype("float") / df.values\n result = pd.DataFrame(arr, index=df.index, columns=df.columns)\n tm.assert_frame_equal(result, expected)\n\n def test_df_div_zero_int(self):\n # integer div, but deal with the 0's (GH#9144)\n df = pd.DataFrame({"first": [3, 4, 5, 8], "second": [0, 0, 0, 3]})\n\n result = df / 0\n expected = pd.DataFrame(np.inf, index=df.index, columns=df.columns)\n expected.iloc[0:3, 1] = np.nan\n tm.assert_frame_equal(result, expected)\n\n # numpy has a slightly different (wrong) treatment\n with np.errstate(all="ignore"):\n arr = df.values.astype("float64") / 0\n result2 = pd.DataFrame(arr, index=df.index, columns=df.columns)\n tm.assert_frame_equal(result2, expected)\n\n def test_df_div_zero_series_does_not_commute(self):\n # integer div, but deal with the 0's (GH#9144)\n df = pd.DataFrame(np.random.default_rng(2).standard_normal((10, 5)))\n ser = df[0]\n res = ser / df\n res2 = df / ser\n assert not res.fillna(0).equals(res2.fillna(0))\n\n # ------------------------------------------------------------------\n # Mod By Zero\n\n def test_df_mod_zero_df(self, using_array_manager):\n # GH#3590, modulo as ints\n df = pd.DataFrame({"first": [3, 4, 5, 8], "second": [0, 0, 0, 3]})\n # this is technically wrong, as the integer portion is coerced to float\n first = Series([0, 0, 0, 0])\n if not using_array_manager:\n # INFO(ArrayManager) BlockManager doesn't preserve dtype per column\n # while ArrayManager performs op column-wisedoes and thus preserves\n # dtype if possible\n first = first.astype("float64")\n second = Series([np.nan, np.nan, np.nan, 0])\n expected = pd.DataFrame({"first": first, "second": second})\n result = df % df\n tm.assert_frame_equal(result, expected)\n\n # GH#38939 If we dont pass copy=False, df is consolidated and\n # result["first"] is float64 instead of int64\n df = pd.DataFrame({"first": [3, 4, 5, 8], "second": [0, 0, 0, 3]}, copy=False)\n first = Series([0, 0, 0, 0], dtype="int64")\n second = Series([np.nan, np.nan, np.nan, 0])\n expected = pd.DataFrame({"first": first, "second": second})\n result = df % df\n tm.assert_frame_equal(result, expected)\n\n def test_df_mod_zero_array(self):\n # GH#3590, modulo as ints\n df = pd.DataFrame({"first": [3, 4, 5, 8], "second": [0, 0, 0, 3]})\n\n # this is technically wrong, as the integer portion is coerced to float\n # ###\n first = Series([0, 0, 0, 0], dtype="float64")\n second = Series([np.nan, np.nan, np.nan, 0])\n expected = pd.DataFrame({"first": first, "second": second})\n\n # numpy has a slightly different (wrong) treatment\n with np.errstate(all="ignore"):\n arr = df.values % df.values\n result2 = pd.DataFrame(arr, index=df.index, columns=df.columns, dtype="float64")\n result2.iloc[0:3, 1] = np.nan\n tm.assert_frame_equal(result2, expected)\n\n def test_df_mod_zero_int(self):\n # GH#3590, modulo as ints\n df = pd.DataFrame({"first": [3, 4, 5, 8], "second": [0, 0, 0, 3]})\n\n result = df % 0\n expected = pd.DataFrame(np.nan, index=df.index, columns=df.columns)\n tm.assert_frame_equal(result, expected)\n\n # numpy has a slightly different (wrong) treatment\n with np.errstate(all="ignore"):\n arr = df.values.astype("float64") % 0\n result2 = pd.DataFrame(arr, index=df.index, columns=df.columns)\n tm.assert_frame_equal(result2, expected)\n\n def test_df_mod_zero_series_does_not_commute(self):\n # GH#3590, modulo as ints\n # not commutative with series\n df = pd.DataFrame(np.random.default_rng(2).standard_normal((10, 5)))\n ser = df[0]\n res = ser % df\n res2 = df % ser\n assert not res.fillna(0).equals(res2.fillna(0))\n\n\nclass TestMultiplicationDivision:\n # __mul__, __rmul__, __div__, __rdiv__, __floordiv__, __rfloordiv__\n # for non-timestamp/timedelta/period dtypes\n\n def test_divide_decimal(self, box_with_array):\n # resolves issue GH#9787\n box = box_with_array\n ser = Series([Decimal(10)])\n expected = Series([Decimal(5)])\n\n ser = tm.box_expected(ser, box)\n expected = tm.box_expected(expected, box)\n\n result = ser / Decimal(2)\n\n tm.assert_equal(result, expected)\n\n result = ser // Decimal(2)\n tm.assert_equal(result, expected)\n\n def test_div_equiv_binop(self):\n # Test Series.div as well as Series.__div__\n # float/integer issue\n # GH#7785\n first = Series([1, 0], name="first")\n second = Series([-0.01, -0.02], name="second")\n expected = Series([-0.01, -np.inf])\n\n result = second.div(first)\n tm.assert_series_equal(result, expected, check_names=False)\n\n result = second / first\n tm.assert_series_equal(result, expected)\n\n def test_div_int(self, numeric_idx):\n idx = numeric_idx\n result = idx / 1\n expected = idx.astype("float64")\n tm.assert_index_equal(result, expected)\n\n result = idx / 2\n expected = Index(idx.values / 2)\n tm.assert_index_equal(result, expected)\n\n @pytest.mark.parametrize("op", [operator.mul, ops.rmul, operator.floordiv])\n def test_mul_int_identity(self, op, numeric_idx, box_with_array):\n idx = numeric_idx\n idx = tm.box_expected(idx, box_with_array)\n\n result = op(idx, 1)\n tm.assert_equal(result, idx)\n\n def test_mul_int_array(self, numeric_idx):\n idx = numeric_idx\n didx = idx * idx\n\n result = idx * np.array(5, dtype="int64")\n tm.assert_index_equal(result, idx * 5)\n\n arr_dtype = "uint64" if idx.dtype == np.uint64 else "int64"\n result = idx * np.arange(5, dtype=arr_dtype)\n tm.assert_index_equal(result, didx)\n\n def test_mul_int_series(self, numeric_idx):\n idx = numeric_idx\n didx = idx * idx\n\n arr_dtype = "uint64" if idx.dtype == np.uint64 else "int64"\n result = idx * Series(np.arange(5, dtype=arr_dtype))\n tm.assert_series_equal(result, Series(didx))\n\n def test_mul_float_series(self, numeric_idx):\n idx = numeric_idx\n rng5 = np.arange(5, dtype="float64")\n\n result = idx * Series(rng5 + 0.1)\n expected = Series(rng5 * (rng5 + 0.1))\n tm.assert_series_equal(result, expected)\n\n def test_mul_index(self, numeric_idx):\n idx = numeric_idx\n\n result = idx * idx\n tm.assert_index_equal(result, idx**2)\n\n def test_mul_datelike_raises(self, numeric_idx):\n idx = numeric_idx\n msg = "cannot perform __rmul__ with this index type"\n with pytest.raises(TypeError, match=msg):\n idx * date_range("20130101", periods=5)\n\n def test_mul_size_mismatch_raises(self, numeric_idx):\n idx = numeric_idx\n msg = "operands could not be broadcast together"\n with pytest.raises(ValueError, match=msg):\n idx * idx[0:3]\n with pytest.raises(ValueError, match=msg):\n idx * np.array([1, 2])\n\n @pytest.mark.parametrize("op", [operator.pow, ops.rpow])\n def test_pow_float(self, op, numeric_idx, box_with_array):\n # test power calculations both ways, GH#14973\n box = box_with_array\n idx = numeric_idx\n expected = Index(op(idx.values, 2.0))\n\n idx = tm.box_expected(idx, box)\n expected = tm.box_expected(expected, box)\n\n result = op(idx, 2.0)\n tm.assert_equal(result, expected)\n\n def test_modulo(self, numeric_idx, box_with_array):\n # GH#9244\n box = box_with_array\n idx = numeric_idx\n expected = Index(idx.values % 2)\n\n idx = tm.box_expected(idx, box)\n expected = tm.box_expected(expected, box)\n\n result = idx % 2\n tm.assert_equal(result, expected)\n\n def test_divmod_scalar(self, numeric_idx):\n idx = numeric_idx\n\n result = divmod(idx, 2)\n with np.errstate(all="ignore"):\n div, mod = divmod(idx.values, 2)\n\n expected = Index(div), Index(mod)\n for r, e in zip(result, expected):\n tm.assert_index_equal(r, e)\n\n def test_divmod_ndarray(self, numeric_idx):\n idx = numeric_idx\n other = np.ones(idx.values.shape, dtype=idx.values.dtype) * 2\n\n result = divmod(idx, other)\n with np.errstate(all="ignore"):\n div, mod = divmod(idx.values, other)\n\n expected = Index(div), Index(mod)\n for r, e in zip(result, expected):\n tm.assert_index_equal(r, e)\n\n def test_divmod_series(self, numeric_idx):\n idx = numeric_idx\n other = np.ones(idx.values.shape, dtype=idx.values.dtype) * 2\n\n result = divmod(idx, Series(other))\n with np.errstate(all="ignore"):\n div, mod = divmod(idx.values, other)\n\n expected = Series(div), Series(mod)\n for r, e in zip(result, expected):\n tm.assert_series_equal(r, e)\n\n @pytest.mark.parametrize("other", [np.nan, 7, -23, 2.718, -3.14, np.inf])\n def test_ops_np_scalar(self, other):\n vals = np.random.default_rng(2).standard_normal((5, 3))\n f = lambda x: pd.DataFrame(\n x, index=list("ABCDE"), columns=["jim", "joe", "jolie"]\n )\n\n df = f(vals)\n\n tm.assert_frame_equal(df / np.array(other), f(vals / other))\n tm.assert_frame_equal(np.array(other) * df, f(vals * other))\n tm.assert_frame_equal(df + np.array(other), f(vals + other))\n tm.assert_frame_equal(np.array(other) - df, f(other - vals))\n\n # TODO: This came from series.test.test_operators, needs cleanup\n def test_operators_frame(self):\n # rpow does not work with DataFrame\n ts = Series(\n np.arange(10, dtype=np.float64),\n index=date_range("2020-01-01", periods=10),\n name="ts",\n )\n ts.name = "ts"\n\n df = pd.DataFrame({"A": ts})\n\n tm.assert_series_equal(ts + ts, ts + df["A"], check_names=False)\n tm.assert_series_equal(ts**ts, ts ** df["A"], check_names=False)\n tm.assert_series_equal(ts < ts, ts < df["A"], check_names=False)\n tm.assert_series_equal(ts / ts, ts / df["A"], check_names=False)\n\n # TODO: this came from tests.series.test_analytics, needs cleanup and\n # de-duplication with test_modulo above\n def test_modulo2(self):\n with np.errstate(all="ignore"):\n # GH#3590, modulo as ints\n p = pd.DataFrame({"first": [3, 4, 5, 8], "second": [0, 0, 0, 3]})\n result = p["first"] % p["second"]\n expected = Series(p["first"].values % p["second"].values, dtype="float64")\n expected.iloc[0:3] = np.nan\n tm.assert_series_equal(result, expected)\n\n result = p["first"] % 0\n expected = Series(np.nan, index=p.index, name="first")\n tm.assert_series_equal(result, expected)\n\n p = p.astype("float64")\n result = p["first"] % p["second"]\n expected = Series(p["first"].values % p["second"].values)\n tm.assert_series_equal(result, expected)\n\n p = p.astype("float64")\n result = p["first"] % p["second"]\n result2 = p["second"] % p["first"]\n assert not result.equals(result2)\n\n def test_modulo_zero_int(self):\n # GH#9144\n with np.errstate(all="ignore"):\n s = Series([0, 1])\n\n result = s % 0\n expected = Series([np.nan, np.nan])\n tm.assert_series_equal(result, expected)\n\n result = 0 % s\n expected = Series([np.nan, 0.0])\n tm.assert_series_equal(result, expected)\n\n\nclass TestAdditionSubtraction:\n # __add__, __sub__, __radd__, __rsub__, __iadd__, __isub__\n # for non-timestamp/timedelta/period dtypes\n\n @pytest.mark.parametrize(\n "first, second, expected",\n [\n (\n Series([1, 2, 3], index=list("ABC"), name="x"),\n Series([2, 2, 2], index=list("ABD"), name="x"),\n Series([3.0, 4.0, np.nan, np.nan], index=list("ABCD"), name="x"),\n ),\n (\n Series([1, 2, 3], index=list("ABC"), name="x"),\n Series([2, 2, 2, 2], index=list("ABCD"), name="x"),\n Series([3, 4, 5, np.nan], index=list("ABCD"), name="x"),\n ),\n ],\n )\n def test_add_series(self, first, second, expected):\n # GH#1134\n tm.assert_series_equal(first + second, expected)\n tm.assert_series_equal(second + first, expected)\n\n @pytest.mark.parametrize(\n "first, second, expected",\n [\n (\n pd.DataFrame({"x": [1, 2, 3]}, index=list("ABC")),\n pd.DataFrame({"x": [2, 2, 2]}, index=list("ABD")),\n pd.DataFrame({"x": [3.0, 4.0, np.nan, np.nan]}, index=list("ABCD")),\n ),\n (\n pd.DataFrame({"x": [1, 2, 3]}, index=list("ABC")),\n pd.DataFrame({"x": [2, 2, 2, 2]}, index=list("ABCD")),\n pd.DataFrame({"x": [3, 4, 5, np.nan]}, index=list("ABCD")),\n ),\n ],\n )\n def test_add_frames(self, first, second, expected):\n # GH#1134\n tm.assert_frame_equal(first + second, expected)\n tm.assert_frame_equal(second + first, expected)\n\n # TODO: This came from series.test.test_operators, needs cleanup\n def test_series_frame_radd_bug(self, fixed_now_ts):\n # GH#353\n vals = Series([str(i) for i in range(5)])\n result = "foo_" + vals\n expected = vals.map(lambda x: "foo_" + x)\n tm.assert_series_equal(result, expected)\n\n frame = pd.DataFrame({"vals": vals})\n result = "foo_" + frame\n expected = pd.DataFrame({"vals": vals.map(lambda x: "foo_" + x)})\n tm.assert_frame_equal(result, expected)\n\n ts = Series(\n np.arange(10, dtype=np.float64),\n index=date_range("2020-01-01", periods=10),\n name="ts",\n )\n\n # really raise this time\n fix_now = fixed_now_ts.to_pydatetime()\n msg = "|".join(\n [\n "unsupported operand type",\n # wrong error message, see https://github.com/numpy/numpy/issues/18832\n "Concatenation operation",\n ]\n )\n with pytest.raises(TypeError, match=msg):\n fix_now + ts\n\n with pytest.raises(TypeError, match=msg):\n ts + fix_now\n\n # TODO: This came from series.test.test_operators, needs cleanup\n def test_datetime64_with_index(self):\n # arithmetic integer ops with an index\n ser = Series(np.random.default_rng(2).standard_normal(5))\n expected = ser - ser.index.to_series()\n result = ser - ser.index\n tm.assert_series_equal(result, expected)\n\n # GH#4629\n # arithmetic datetime64 ops with an index\n ser = Series(\n date_range("20130101", periods=5),\n index=date_range("20130101", periods=5),\n )\n expected = ser - ser.index.to_series()\n result = ser - ser.index\n tm.assert_series_equal(result, expected)\n\n msg = "cannot subtract PeriodArray from DatetimeArray"\n with pytest.raises(TypeError, match=msg):\n # GH#18850\n result = ser - ser.index.to_period()\n\n df = pd.DataFrame(\n np.random.default_rng(2).standard_normal((5, 2)),\n index=date_range("20130101", periods=5),\n )\n df["date"] = pd.Timestamp("20130102")\n df["expected"] = df["date"] - df.index.to_series()\n df["result"] = df["date"] - df.index\n tm.assert_series_equal(df["result"], df["expected"], check_names=False)\n\n # TODO: taken from tests.frame.test_operators, needs cleanup\n def test_frame_operators(self, float_frame):\n frame = float_frame\n\n garbage = np.random.default_rng(2).random(4)\n colSeries = Series(garbage, index=np.array(frame.columns))\n\n idSum = frame + frame\n seriesSum = frame + colSeries\n\n for col, series in idSum.items():\n for idx, val in series.items():\n origVal = frame[col][idx] * 2\n if not np.isnan(val):\n assert val == origVal\n else:\n assert np.isnan(origVal)\n\n for col, series in seriesSum.items():\n for idx, val in series.items():\n origVal = frame[col][idx] + colSeries[col]\n if not np.isnan(val):\n assert val == origVal\n else:\n assert np.isnan(origVal)\n\n def test_frame_operators_col_align(self, float_frame):\n frame2 = pd.DataFrame(float_frame, columns=["D", "C", "B", "A"])\n added = frame2 + frame2\n expected = frame2 * 2\n tm.assert_frame_equal(added, expected)\n\n def test_frame_operators_none_to_nan(self):\n df = pd.DataFrame({"a": ["a", None, "b"]})\n tm.assert_frame_equal(df + df, pd.DataFrame({"a": ["aa", np.nan, "bb"]}))\n\n @pytest.mark.parametrize("dtype", ("float", "int64"))\n def test_frame_operators_empty_like(self, dtype):\n # Test for issue #10181\n frames = [\n pd.DataFrame(dtype=dtype),\n pd.DataFrame(columns=["A"], dtype=dtype),\n pd.DataFrame(index=[0], dtype=dtype),\n ]\n for df in frames:\n assert (df + df).equals(df)\n tm.assert_frame_equal(df + df, df)\n\n @pytest.mark.parametrize(\n "func",\n [lambda x: x * 2, lambda x: x[::2], lambda x: 5],\n ids=["multiply", "slice", "constant"],\n )\n def test_series_operators_arithmetic(self, all_arithmetic_functions, func):\n op = all_arithmetic_functions\n series = Series(\n np.arange(10, dtype=np.float64),\n index=date_range("2020-01-01", periods=10),\n name="ts",\n )\n other = func(series)\n compare_op(series, other, op)\n\n @pytest.mark.parametrize(\n "func", [lambda x: x + 1, lambda x: 5], ids=["add", "constant"]\n )\n def test_series_operators_compare(self, comparison_op, func):\n op = comparison_op\n series = Series(\n np.arange(10, dtype=np.float64),\n index=date_range("2020-01-01", periods=10),\n name="ts",\n )\n other = func(series)\n compare_op(series, other, op)\n\n @pytest.mark.parametrize(\n "func",\n [lambda x: x * 2, lambda x: x[::2], lambda x: 5],\n ids=["multiply", "slice", "constant"],\n )\n def test_divmod(self, func):\n series = Series(\n np.arange(10, dtype=np.float64),\n index=date_range("2020-01-01", periods=10),\n name="ts",\n )\n other = func(series)\n results = divmod(series, other)\n if isinstance(other, abc.Iterable) and len(series) != len(other):\n # if the lengths don't match, this is the test where we use\n # `tser[::2]`. Pad every other value in `other_np` with nan.\n other_np = []\n for n in other:\n other_np.append(n)\n other_np.append(np.nan)\n else:\n other_np = other\n other_np = np.asarray(other_np)\n with np.errstate(all="ignore"):\n expecteds = divmod(series.values, np.asarray(other_np))\n\n for result, expected in zip(results, expecteds):\n # check the values, name, and index separately\n tm.assert_almost_equal(np.asarray(result), expected)\n\n assert result.name == series.name\n tm.assert_index_equal(result.index, series.index._with_freq(None))\n\n def test_series_divmod_zero(self):\n # Check that divmod uses pandas convention for division by zero,\n # which does not match numpy.\n # pandas convention has\n # 1/0 == np.inf\n # -1/0 == -np.inf\n # 1/-0.0 == -np.inf\n # -1/-0.0 == np.inf\n tser = Series(\n np.arange(1, 11, dtype=np.float64),\n index=date_range("2020-01-01", periods=10),\n name="ts",\n )\n other = tser * 0\n\n result = divmod(tser, other)\n exp1 = Series([np.inf] * len(tser), index=tser.index, name="ts")\n exp2 = Series([np.nan] * len(tser), index=tser.index, name="ts")\n tm.assert_series_equal(result[0], exp1)\n tm.assert_series_equal(result[1], exp2)\n\n\nclass TestUFuncCompat:\n # TODO: add more dtypes\n @pytest.mark.parametrize("holder", [Index, RangeIndex, Series])\n @pytest.mark.parametrize("dtype", [np.int64, np.uint64, np.float64])\n def test_ufunc_compat(self, holder, dtype):\n box = Series if holder is Series else Index\n\n if holder is RangeIndex:\n if dtype != np.int64:\n pytest.skip(f"dtype {dtype} not relevant for RangeIndex")\n idx = RangeIndex(0, 5, name="foo")\n else:\n idx = holder(np.arange(5, dtype=dtype), name="foo")\n result = np.sin(idx)\n expected = box(np.sin(np.arange(5, dtype=dtype)), name="foo")\n tm.assert_equal(result, expected)\n\n # TODO: add more dtypes\n @pytest.mark.parametrize("holder", [Index, Series])\n @pytest.mark.parametrize("dtype", [np.int64, np.uint64, np.float64])\n def test_ufunc_coercions(self, holder, dtype):\n idx = holder([1, 2, 3, 4, 5], dtype=dtype, name="x")\n box = Series if holder is Series else Index\n\n result = np.sqrt(idx)\n assert result.dtype == "f8" and isinstance(result, box)\n exp = Index(np.sqrt(np.array([1, 2, 3, 4, 5], dtype=np.float64)), name="x")\n exp = tm.box_expected(exp, box)\n tm.assert_equal(result, exp)\n\n result = np.divide(idx, 2.0)\n assert result.dtype == "f8" and isinstance(result, box)\n exp = Index([0.5, 1.0, 1.5, 2.0, 2.5], dtype=np.float64, name="x")\n exp = tm.box_expected(exp, box)\n tm.assert_equal(result, exp)\n\n # _evaluate_numeric_binop\n result = idx + 2.0\n assert result.dtype == "f8" and isinstance(result, box)\n exp = Index([3.0, 4.0, 5.0, 6.0, 7.0], dtype=np.float64, name="x")\n exp = tm.box_expected(exp, box)\n tm.assert_equal(result, exp)\n\n result = idx - 2.0\n assert result.dtype == "f8" and isinstance(result, box)\n exp = Index([-1.0, 0.0, 1.0, 2.0, 3.0], dtype=np.float64, name="x")\n exp = tm.box_expected(exp, box)\n tm.assert_equal(result, exp)\n\n result = idx * 1.0\n assert result.dtype == "f8" and isinstance(result, box)\n exp = Index([1.0, 2.0, 3.0, 4.0, 5.0], dtype=np.float64, name="x")\n exp = tm.box_expected(exp, box)\n tm.assert_equal(result, exp)\n\n result = idx / 2.0\n assert result.dtype == "f8" and isinstance(result, box)\n exp = Index([0.5, 1.0, 1.5, 2.0, 2.5], dtype=np.float64, name="x")\n exp = tm.box_expected(exp, box)\n tm.assert_equal(result, exp)\n\n # TODO: add more dtypes\n @pytest.mark.parametrize("holder", [Index, Series])\n @pytest.mark.parametrize("dtype", [np.int64, np.uint64, np.float64])\n def test_ufunc_multiple_return_values(self, holder, dtype):\n obj = holder([1, 2, 3], dtype=dtype, name="x")\n box = Series if holder is Series else Index\n\n result = np.modf(obj)\n assert isinstance(result, tuple)\n exp1 = Index([0.0, 0.0, 0.0], dtype=np.float64, name="x")\n exp2 = Index([1.0, 2.0, 3.0], dtype=np.float64, name="x")\n tm.assert_equal(result[0], tm.box_expected(exp1, box))\n tm.assert_equal(result[1], tm.box_expected(exp2, box))\n\n def test_ufunc_at(self):\n s = Series([0, 1, 2], index=[1, 2, 3], name="x")\n np.add.at(s, [0, 2], 10)\n expected = Series([10, 1, 12], index=[1, 2, 3], name="x")\n tm.assert_series_equal(s, expected)\n\n\nclass TestObjectDtypeEquivalence:\n # Tests that arithmetic operations match operations executed elementwise\n\n @pytest.mark.parametrize("dtype", [None, object])\n def test_numarr_with_dtype_add_nan(self, dtype, box_with_array):\n box = box_with_array\n ser = Series([1, 2, 3], dtype=dtype)\n expected = Series([np.nan, np.nan, np.nan], dtype=dtype)\n\n ser = tm.box_expected(ser, box)\n expected = tm.box_expected(expected, box)\n\n result = np.nan + ser\n tm.assert_equal(result, expected)\n\n result = ser + np.nan\n tm.assert_equal(result, expected)\n\n @pytest.mark.parametrize("dtype", [None, object])\n def test_numarr_with_dtype_add_int(self, dtype, box_with_array):\n box = box_with_array\n ser = Series([1, 2, 3], dtype=dtype)\n expected = Series([2, 3, 4], dtype=dtype)\n\n ser = tm.box_expected(ser, box)\n expected = tm.box_expected(expected, box)\n\n result = 1 + ser\n tm.assert_equal(result, expected)\n\n result = ser + 1\n tm.assert_equal(result, expected)\n\n # TODO: moved from tests.series.test_operators; needs cleanup\n @pytest.mark.parametrize(\n "op",\n [operator.add, operator.sub, operator.mul, operator.truediv, operator.floordiv],\n )\n def test_operators_reverse_object(self, op):\n # GH#56\n arr = Series(\n np.random.default_rng(2).standard_normal(10),\n index=np.arange(10),\n dtype=object,\n )\n\n result = op(1.0, arr)\n expected = op(1.0, arr.astype(float))\n tm.assert_series_equal(result.astype(float), expected)\n\n\nclass TestNumericArithmeticUnsorted:\n # Tests in this class have been moved from type-specific test modules\n # but not yet sorted, parametrized, and de-duplicated\n @pytest.mark.parametrize(\n "op",\n [\n operator.add,\n operator.sub,\n operator.mul,\n operator.floordiv,\n operator.truediv,\n ],\n )\n @pytest.mark.parametrize(\n "idx1",\n [\n RangeIndex(0, 10, 1),\n RangeIndex(0, 20, 2),\n RangeIndex(-10, 10, 2),\n RangeIndex(5, -5, -1),\n ],\n )\n @pytest.mark.parametrize(\n "idx2",\n [\n RangeIndex(0, 10, 1),\n RangeIndex(0, 20, 2),\n RangeIndex(-10, 10, 2),\n RangeIndex(5, -5, -1),\n ],\n )\n def test_binops_index(self, op, idx1, idx2):\n idx1 = idx1._rename("foo")\n idx2 = idx2._rename("bar")\n result = op(idx1, idx2)\n expected = op(Index(idx1.to_numpy()), Index(idx2.to_numpy()))\n tm.assert_index_equal(result, expected, exact="equiv")\n\n @pytest.mark.parametrize(\n "op",\n [\n operator.add,\n operator.sub,\n operator.mul,\n operator.floordiv,\n operator.truediv,\n ],\n )\n @pytest.mark.parametrize(\n "idx",\n [\n RangeIndex(0, 10, 1),\n RangeIndex(0, 20, 2),\n RangeIndex(-10, 10, 2),\n RangeIndex(5, -5, -1),\n ],\n )\n @pytest.mark.parametrize("scalar", [-1, 1, 2])\n def test_binops_index_scalar(self, op, idx, scalar):\n result = op(idx, scalar)\n expected = op(Index(idx.to_numpy()), scalar)\n tm.assert_index_equal(result, expected, exact="equiv")\n\n @pytest.mark.parametrize("idx1", [RangeIndex(0, 10, 1), RangeIndex(0, 20, 2)])\n @pytest.mark.parametrize("idx2", [RangeIndex(0, 10, 1), RangeIndex(0, 20, 2)])\n def test_binops_index_pow(self, idx1, idx2):\n # numpy does not allow powers of negative integers so test separately\n # https://github.com/numpy/numpy/pull/8127\n idx1 = idx1._rename("foo")\n idx2 = idx2._rename("bar")\n result = pow(idx1, idx2)\n expected = pow(Index(idx1.to_numpy()), Index(idx2.to_numpy()))\n tm.assert_index_equal(result, expected, exact="equiv")\n\n @pytest.mark.parametrize("idx", [RangeIndex(0, 10, 1), RangeIndex(0, 20, 2)])\n @pytest.mark.parametrize("scalar", [1, 2])\n def test_binops_index_scalar_pow(self, idx, scalar):\n # numpy does not allow powers of negative integers so test separately\n # https://github.com/numpy/numpy/pull/8127\n result = pow(idx, scalar)\n expected = pow(Index(idx.to_numpy()), scalar)\n tm.assert_index_equal(result, expected, exact="equiv")\n\n # TODO: divmod?\n @pytest.mark.parametrize(\n "op",\n [\n operator.add,\n operator.sub,\n operator.mul,\n operator.floordiv,\n operator.truediv,\n operator.pow,\n operator.mod,\n ],\n )\n def test_arithmetic_with_frame_or_series(self, op):\n # check that we return NotImplemented when operating with Series\n # or DataFrame\n index = RangeIndex(5)\n other = Series(np.random.default_rng(2).standard_normal(5))\n\n expected = op(Series(index), other)\n result = op(index, other)\n tm.assert_series_equal(result, expected)\n\n other = pd.DataFrame(np.random.default_rng(2).standard_normal((2, 5)))\n expected = op(pd.DataFrame([index, index]), other)\n result = op(index, other)\n tm.assert_frame_equal(result, expected)\n\n def test_numeric_compat2(self):\n # validate that we are handling the RangeIndex overrides to numeric ops\n # and returning RangeIndex where possible\n\n idx = RangeIndex(0, 10, 2)\n\n result = idx * 2\n expected = RangeIndex(0, 20, 4)\n tm.assert_index_equal(result, expected, exact=True)\n\n result = idx + 2\n expected = RangeIndex(2, 12, 2)\n tm.assert_index_equal(result, expected, exact=True)\n\n result = idx - 2\n expected = RangeIndex(-2, 8, 2)\n tm.assert_index_equal(result, expected, exact=True)\n\n result = idx / 2\n expected = RangeIndex(0, 5, 1).astype("float64")\n tm.assert_index_equal(result, expected, exact=True)\n\n result = idx / 4\n expected = RangeIndex(0, 10, 2) / 4\n tm.assert_index_equal(result, expected, exact=True)\n\n result = idx // 1\n expected = idx\n tm.assert_index_equal(result, expected, exact=True)\n\n # __mul__\n result = idx * idx\n expected = Index(idx.values * idx.values)\n tm.assert_index_equal(result, expected, exact=True)\n\n # __pow__\n idx = RangeIndex(0, 1000, 2)\n result = idx**2\n expected = Index(idx._values) ** 2\n tm.assert_index_equal(Index(result.values), expected, exact=True)\n\n @pytest.mark.parametrize(\n "idx, div, expected",\n [\n # TODO: add more dtypes\n (RangeIndex(0, 1000, 2), 2, RangeIndex(0, 500, 1)),\n (RangeIndex(-99, -201, -3), -3, RangeIndex(33, 67, 1)),\n (\n RangeIndex(0, 1000, 1),\n 2,\n Index(RangeIndex(0, 1000, 1)._values) // 2,\n ),\n (\n RangeIndex(0, 100, 1),\n 2.0,\n Index(RangeIndex(0, 100, 1)._values) // 2.0,\n ),\n (RangeIndex(0), 50, RangeIndex(0)),\n (RangeIndex(2, 4, 2), 3, RangeIndex(0, 1, 1)),\n (RangeIndex(-5, -10, -6), 4, RangeIndex(-2, -1, 1)),\n (RangeIndex(-100, -200, 3), 2, RangeIndex(0)),\n ],\n )\n def test_numeric_compat2_floordiv(self, idx, div, expected):\n # __floordiv__\n tm.assert_index_equal(idx // div, expected, exact=True)\n\n @pytest.mark.parametrize("dtype", [np.int64, np.float64])\n @pytest.mark.parametrize("delta", [1, 0, -1])\n def test_addsub_arithmetic(self, dtype, delta):\n # GH#8142\n delta = dtype(delta)\n index = Index([10, 11, 12], dtype=dtype)\n result = index + delta\n expected = Index(index.values + delta, dtype=dtype)\n tm.assert_index_equal(result, expected)\n\n # this subtraction used to fail\n result = index - delta\n expected = Index(index.values - delta, dtype=dtype)\n tm.assert_index_equal(result, expected)\n\n tm.assert_index_equal(index + index, 2 * index)\n tm.assert_index_equal(index - index, 0 * index)\n assert not (index - index).empty\n\n def test_pow_nan_with_zero(self, box_with_array):\n left = Index([np.nan, np.nan, np.nan])\n right = Index([0, 0, 0])\n expected = Index([1.0, 1.0, 1.0])\n\n left = tm.box_expected(left, box_with_array)\n right = tm.box_expected(right, box_with_array)\n expected = tm.box_expected(expected, box_with_array)\n\n result = left**right\n tm.assert_equal(result, expected)\n\n\ndef test_fill_value_inf_masking():\n # GH #27464 make sure we mask 0/1 with Inf and not NaN\n df = pd.DataFrame({"A": [0, 1, 2], "B": [1.1, None, 1.1]})\n\n other = pd.DataFrame({"A": [1.1, 1.2, 1.3]}, index=[0, 2, 3])\n\n result = df.rfloordiv(other, fill_value=1)\n\n expected = pd.DataFrame(\n {"A": [np.inf, 1.0, 0.0, 1.0], "B": [0.0, np.nan, 0.0, np.nan]}\n )\n tm.assert_frame_equal(result, expected)\n\n\ndef test_dataframe_div_silenced():\n # GH#26793\n pdf1 = pd.DataFrame(\n {\n "A": np.arange(10),\n "B": [np.nan, 1, 2, 3, 4] * 2,\n "C": [np.nan] * 10,\n "D": np.arange(10),\n },\n index=list("abcdefghij"),\n columns=list("ABCD"),\n )\n pdf2 = pd.DataFrame(\n np.random.default_rng(2).standard_normal((10, 4)),\n index=list("abcdefghjk"),\n columns=list("ABCX"),\n )\n with tm.assert_produces_warning(None):\n pdf1.div(pdf2, fill_value=0)\n\n\n@pytest.mark.parametrize(\n "data, expected_data",\n [([0, 1, 2], [0, 2, 4])],\n)\ndef test_integer_array_add_list_like(\n box_pandas_1d_array, box_1d_array, data, expected_data\n):\n # GH22606 Verify operators with IntegerArray and list-likes\n arr = array(data, dtype="Int64")\n container = box_pandas_1d_array(arr)\n left = container + box_1d_array(data)\n right = box_1d_array(data) + container\n\n if Series in [box_1d_array, box_pandas_1d_array]:\n cls = Series\n elif Index in [box_1d_array, box_pandas_1d_array]:\n cls = Index\n else:\n cls = array\n\n expected = cls(expected_data, dtype="Int64")\n\n tm.assert_equal(left, expected)\n tm.assert_equal(right, expected)\n\n\ndef test_sub_multiindex_swapped_levels():\n # GH 9952\n df = pd.DataFrame(\n {"a": np.random.default_rng(2).standard_normal(6)},\n index=pd.MultiIndex.from_product(\n [["a", "b"], [0, 1, 2]], names=["levA", "levB"]\n ),\n )\n df2 = df.copy()\n df2.index = df2.index.swaplevel(0, 1)\n result = df - df2\n expected = pd.DataFrame([0.0] * 6, columns=["a"], index=df.index)\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize("power", [1, 2, 5])\n@pytest.mark.parametrize("string_size", [0, 1, 2, 5])\ndef test_empty_str_comparison(power, string_size):\n # GH 37348\n a = np.array(range(10**power))\n right = pd.DataFrame(a, dtype=np.int64)\n left = " " * string_size\n\n result = right == left\n expected = pd.DataFrame(np.zeros(right.shape, dtype=bool))\n tm.assert_frame_equal(result, expected)\n\n\ndef test_series_add_sub_with_UInt64():\n # GH 22023\n series1 = Series([1, 2, 3])\n series2 = Series([2, 1, 3], dtype="UInt64")\n\n result = series1 + series2\n expected = Series([3, 3, 6], dtype="Float64")\n tm.assert_series_equal(result, expected)\n\n result = series1 - series2\n expected = Series([-1, 1, 0], dtype="Float64")\n tm.assert_series_equal(result, expected)\n | .venv\Lib\site-packages\pandas\tests\arithmetic\test_numeric.py | test_numeric.py | Python | 55,511 | 0.75 | 0.102106 | 0.109822 | awesome-app | 668 | 2024-10-30T12:30:09.484756 | BSD-3-Clause | true | 4138d4fc79f4814e0ed1ea3916e13f11 |
# Arithmetic tests for DataFrame/Series/Index/Array classes that should\n# behave identically.\n# Specifically for object dtype\nimport datetime\nfrom decimal import Decimal\nimport operator\n\nimport numpy as np\nimport pytest\n\nimport pandas.util._test_decorators as td\n\nimport pandas as pd\nfrom pandas import (\n Series,\n Timestamp,\n option_context,\n)\nimport pandas._testing as tm\nfrom pandas.core import ops\n\n# ------------------------------------------------------------------\n# Comparisons\n\n\nclass TestObjectComparisons:\n def test_comparison_object_numeric_nas(self, comparison_op):\n ser = Series(np.random.default_rng(2).standard_normal(10), dtype=object)\n shifted = ser.shift(2)\n\n func = comparison_op\n\n result = func(ser, shifted)\n expected = func(ser.astype(float), shifted.astype(float))\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize(\n "infer_string", [False, pytest.param(True, marks=td.skip_if_no("pyarrow"))]\n )\n def test_object_comparisons(self, infer_string):\n with option_context("future.infer_string", infer_string):\n ser = Series(["a", "b", np.nan, "c", "a"])\n\n result = ser == "a"\n expected = Series([True, False, False, False, True])\n tm.assert_series_equal(result, expected)\n\n result = ser < "a"\n expected = Series([False, False, False, False, False])\n tm.assert_series_equal(result, expected)\n\n result = ser != "a"\n expected = -(ser == "a")\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize("dtype", [None, object])\n def test_more_na_comparisons(self, dtype):\n left = Series(["a", np.nan, "c"], dtype=dtype)\n right = Series(["a", np.nan, "d"], dtype=dtype)\n\n result = left == right\n expected = Series([True, False, False])\n tm.assert_series_equal(result, expected)\n\n result = left != right\n expected = Series([False, True, True])\n tm.assert_series_equal(result, expected)\n\n result = left == np.nan\n expected = Series([False, False, False])\n tm.assert_series_equal(result, expected)\n\n result = left != np.nan\n expected = Series([True, True, True])\n tm.assert_series_equal(result, expected)\n\n\n# ------------------------------------------------------------------\n# Arithmetic\n\n\nclass TestArithmetic:\n def test_add_period_to_array_of_offset(self):\n # GH#50162\n per = pd.Period("2012-1-1", freq="D")\n pi = pd.period_range("2012-1-1", periods=10, freq="D")\n idx = per - pi\n\n expected = pd.Index([x + per for x in idx], dtype=object)\n result = idx + per\n tm.assert_index_equal(result, expected)\n\n result = per + idx\n tm.assert_index_equal(result, expected)\n\n # TODO: parametrize\n def test_pow_ops_object(self):\n # GH#22922\n # pow is weird with masking & 1, so testing here\n a = Series([1, np.nan, 1, np.nan], dtype=object)\n b = Series([1, np.nan, np.nan, 1], dtype=object)\n result = a**b\n expected = Series(a.values**b.values, dtype=object)\n tm.assert_series_equal(result, expected)\n\n result = b**a\n expected = Series(b.values**a.values, dtype=object)\n\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize("op", [operator.add, ops.radd])\n @pytest.mark.parametrize("other", ["category", "Int64"])\n def test_add_extension_scalar(self, other, box_with_array, op):\n # GH#22378\n # Check that scalars satisfying is_extension_array_dtype(obj)\n # do not incorrectly try to dispatch to an ExtensionArray operation\n\n arr = Series(["a", "b", "c"])\n expected = Series([op(x, other) for x in arr])\n\n arr = tm.box_expected(arr, box_with_array)\n expected = tm.box_expected(expected, box_with_array)\n\n result = op(arr, other)\n tm.assert_equal(result, expected)\n\n def test_objarr_add_str(self, box_with_array):\n ser = Series(["x", np.nan, "x"])\n expected = Series(["xa", np.nan, "xa"])\n\n ser = tm.box_expected(ser, box_with_array)\n expected = tm.box_expected(expected, box_with_array)\n\n result = ser + "a"\n tm.assert_equal(result, expected)\n\n def test_objarr_radd_str(self, box_with_array):\n ser = Series(["x", np.nan, "x"])\n expected = Series(["ax", np.nan, "ax"])\n\n ser = tm.box_expected(ser, box_with_array)\n expected = tm.box_expected(expected, box_with_array)\n\n result = "a" + ser\n tm.assert_equal(result, expected)\n\n @pytest.mark.parametrize(\n "data",\n [\n [1, 2, 3],\n [1.1, 2.2, 3.3],\n [Timestamp("2011-01-01"), Timestamp("2011-01-02"), pd.NaT],\n ["x", "y", 1],\n ],\n )\n @pytest.mark.parametrize("dtype", [None, object])\n def test_objarr_radd_str_invalid(self, dtype, data, box_with_array):\n ser = Series(data, dtype=dtype)\n\n ser = tm.box_expected(ser, box_with_array)\n msg = "|".join(\n [\n "can only concatenate str",\n "did not contain a loop with signature matching types",\n "unsupported operand type",\n "must be str",\n ]\n )\n with pytest.raises(TypeError, match=msg):\n "foo_" + ser\n\n @pytest.mark.parametrize("op", [operator.add, ops.radd, operator.sub, ops.rsub])\n def test_objarr_add_invalid(self, op, box_with_array):\n # invalid ops\n box = box_with_array\n\n obj_ser = Series(list("abc"), dtype=object, name="objects")\n\n obj_ser = tm.box_expected(obj_ser, box)\n msg = "|".join(\n [\n "can only concatenate str",\n "unsupported operand type",\n "must be str",\n "has no kernel",\n "operation 'add' not supported",\n "operation 'radd' not supported",\n "operation 'sub' not supported",\n "operation 'rsub' not supported",\n ]\n )\n with pytest.raises(Exception, match=msg):\n op(obj_ser, 1)\n with pytest.raises(Exception, match=msg):\n op(obj_ser, np.array(1, dtype=np.int64))\n\n # TODO: Moved from tests.series.test_operators; needs cleanup\n def test_operators_na_handling(self):\n ser = Series(["foo", "bar", "baz", np.nan])\n result = "prefix_" + ser\n expected = Series(["prefix_foo", "prefix_bar", "prefix_baz", np.nan])\n tm.assert_series_equal(result, expected)\n\n result = ser + "_suffix"\n expected = Series(["foo_suffix", "bar_suffix", "baz_suffix", np.nan])\n tm.assert_series_equal(result, expected)\n\n # TODO: parametrize over box\n @pytest.mark.parametrize("dtype", [None, object])\n def test_series_with_dtype_radd_timedelta(self, dtype):\n # note this test is _not_ aimed at timedelta64-dtyped Series\n # as of 2.0 we retain object dtype when ser.dtype == object\n ser = Series(\n [pd.Timedelta("1 days"), pd.Timedelta("2 days"), pd.Timedelta("3 days")],\n dtype=dtype,\n )\n expected = Series(\n [pd.Timedelta("4 days"), pd.Timedelta("5 days"), pd.Timedelta("6 days")],\n dtype=dtype,\n )\n\n result = pd.Timedelta("3 days") + ser\n tm.assert_series_equal(result, expected)\n\n result = ser + pd.Timedelta("3 days")\n tm.assert_series_equal(result, expected)\n\n # TODO: cleanup & parametrize over box\n def test_mixed_timezone_series_ops_object(self):\n # GH#13043\n ser = Series(\n [\n Timestamp("2015-01-01", tz="US/Eastern"),\n Timestamp("2015-01-01", tz="Asia/Tokyo"),\n ],\n name="xxx",\n )\n assert ser.dtype == object\n\n exp = Series(\n [\n Timestamp("2015-01-02", tz="US/Eastern"),\n Timestamp("2015-01-02", tz="Asia/Tokyo"),\n ],\n name="xxx",\n )\n tm.assert_series_equal(ser + pd.Timedelta("1 days"), exp)\n tm.assert_series_equal(pd.Timedelta("1 days") + ser, exp)\n\n # object series & object series\n ser2 = Series(\n [\n Timestamp("2015-01-03", tz="US/Eastern"),\n Timestamp("2015-01-05", tz="Asia/Tokyo"),\n ],\n name="xxx",\n )\n assert ser2.dtype == object\n exp = Series(\n [pd.Timedelta("2 days"), pd.Timedelta("4 days")], name="xxx", dtype=object\n )\n tm.assert_series_equal(ser2 - ser, exp)\n tm.assert_series_equal(ser - ser2, -exp)\n\n ser = Series(\n [pd.Timedelta("01:00:00"), pd.Timedelta("02:00:00")],\n name="xxx",\n dtype=object,\n )\n assert ser.dtype == object\n\n exp = Series(\n [pd.Timedelta("01:30:00"), pd.Timedelta("02:30:00")],\n name="xxx",\n dtype=object,\n )\n tm.assert_series_equal(ser + pd.Timedelta("00:30:00"), exp)\n tm.assert_series_equal(pd.Timedelta("00:30:00") + ser, exp)\n\n # TODO: cleanup & parametrize over box\n def test_iadd_preserves_name(self):\n # GH#17067, GH#19723 __iadd__ and __isub__ should preserve index name\n ser = Series([1, 2, 3])\n ser.index.name = "foo"\n\n ser.index += 1\n assert ser.index.name == "foo"\n\n ser.index -= 1\n assert ser.index.name == "foo"\n\n def test_add_string(self):\n # from bug report\n index = pd.Index(["a", "b", "c"])\n index2 = index + "foo"\n\n assert "a" not in index2\n assert "afoo" in index2\n\n def test_iadd_string(self):\n index = pd.Index(["a", "b", "c"])\n # doesn't fail test unless there is a check before `+=`\n assert "a" in index\n\n index += "_x"\n assert "a_x" in index\n\n def test_add(self):\n index = pd.Index([str(i) for i in range(10)])\n expected = pd.Index(index.values * 2)\n tm.assert_index_equal(index + index, expected)\n tm.assert_index_equal(index + index.tolist(), expected)\n tm.assert_index_equal(index.tolist() + index, expected)\n\n # test add and radd\n index = pd.Index(list("abc"))\n expected = pd.Index(["a1", "b1", "c1"])\n tm.assert_index_equal(index + "1", expected)\n expected = pd.Index(["1a", "1b", "1c"])\n tm.assert_index_equal("1" + index, expected)\n\n def test_sub_fail(self):\n index = pd.Index([str(i) for i in range(10)])\n\n msg = "unsupported operand type|Cannot broadcast|sub' not supported"\n with pytest.raises(TypeError, match=msg):\n index - "a"\n with pytest.raises(TypeError, match=msg):\n index - index\n with pytest.raises(TypeError, match=msg):\n index - index.tolist()\n with pytest.raises(TypeError, match=msg):\n index.tolist() - index\n\n def test_sub_object(self):\n # GH#19369\n index = pd.Index([Decimal(1), Decimal(2)])\n expected = pd.Index([Decimal(0), Decimal(1)])\n\n result = index - Decimal(1)\n tm.assert_index_equal(result, expected)\n\n result = index - pd.Index([Decimal(1), Decimal(1)])\n tm.assert_index_equal(result, expected)\n\n msg = "unsupported operand type"\n with pytest.raises(TypeError, match=msg):\n index - "foo"\n\n with pytest.raises(TypeError, match=msg):\n index - np.array([2, "foo"], dtype=object)\n\n def test_rsub_object(self, fixed_now_ts):\n # GH#19369\n index = pd.Index([Decimal(1), Decimal(2)])\n expected = pd.Index([Decimal(1), Decimal(0)])\n\n result = Decimal(2) - index\n tm.assert_index_equal(result, expected)\n\n result = np.array([Decimal(2), Decimal(2)]) - index\n tm.assert_index_equal(result, expected)\n\n msg = "unsupported operand type"\n with pytest.raises(TypeError, match=msg):\n "foo" - index\n\n with pytest.raises(TypeError, match=msg):\n np.array([True, fixed_now_ts]) - index\n\n\nclass MyIndex(pd.Index):\n # Simple index subclass that tracks ops calls.\n\n _calls: int\n\n @classmethod\n def _simple_new(cls, values, name=None, dtype=None):\n result = object.__new__(cls)\n result._data = values\n result._name = name\n result._calls = 0\n result._reset_identity()\n\n return result\n\n def __add__(self, other):\n self._calls += 1\n return self._simple_new(self._data)\n\n def __radd__(self, other):\n return self.__add__(other)\n\n\n@pytest.mark.parametrize(\n "other",\n [\n [datetime.timedelta(1), datetime.timedelta(2)],\n [datetime.datetime(2000, 1, 1), datetime.datetime(2000, 1, 2)],\n [pd.Period("2000"), pd.Period("2001")],\n ["a", "b"],\n ],\n ids=["timedelta", "datetime", "period", "object"],\n)\ndef test_index_ops_defer_to_unknown_subclasses(other):\n # https://github.com/pandas-dev/pandas/issues/31109\n values = np.array(\n [datetime.date(2000, 1, 1), datetime.date(2000, 1, 2)], dtype=object\n )\n a = MyIndex._simple_new(values)\n other = pd.Index(other)\n result = other + a\n assert isinstance(result, MyIndex)\n assert a._calls == 1\n | .venv\Lib\site-packages\pandas\tests\arithmetic\test_object.py | test_object.py | Python | 13,416 | 0.95 | 0.082126 | 0.093093 | vue-tools | 658 | 2024-01-11T02:21:48.190286 | MIT | true | 30ae359ae4f2b424f2c1801f2fcec38b |
# Arithmetic tests for DataFrame/Series/Index/Array classes that should\n# behave identically.\n# Specifically for Period dtype\nimport operator\n\nimport numpy as np\nimport pytest\n\nfrom pandas._libs.tslibs import (\n IncompatibleFrequency,\n Period,\n Timestamp,\n to_offset,\n)\nfrom pandas.errors import PerformanceWarning\n\nimport pandas as pd\nfrom pandas import (\n PeriodIndex,\n Series,\n Timedelta,\n TimedeltaIndex,\n period_range,\n)\nimport pandas._testing as tm\nfrom pandas.core import ops\nfrom pandas.core.arrays import TimedeltaArray\nfrom pandas.tests.arithmetic.common import (\n assert_invalid_addsub_type,\n assert_invalid_comparison,\n get_upcast_box,\n)\n\n_common_mismatch = [\n pd.offsets.YearBegin(2),\n pd.offsets.MonthBegin(1),\n pd.offsets.Minute(),\n]\n\n\n@pytest.fixture(\n params=[\n Timedelta(minutes=30).to_pytimedelta(),\n np.timedelta64(30, "s"),\n Timedelta(seconds=30),\n ]\n + _common_mismatch\n)\ndef not_hourly(request):\n """\n Several timedelta-like and DateOffset instances that are _not_\n compatible with Hourly frequencies.\n """\n return request.param\n\n\n@pytest.fixture(\n params=[\n np.timedelta64(365, "D"),\n Timedelta(days=365).to_pytimedelta(),\n Timedelta(days=365),\n ]\n + _common_mismatch\n)\ndef mismatched_freq(request):\n """\n Several timedelta-like and DateOffset instances that are _not_\n compatible with Monthly or Annual frequencies.\n """\n return request.param\n\n\n# ------------------------------------------------------------------\n# Comparisons\n\n\nclass TestPeriodArrayLikeComparisons:\n # Comparison tests for PeriodDtype vectors fully parametrized over\n # DataFrame/Series/PeriodIndex/PeriodArray. Ideally all comparison\n # tests will eventually end up here.\n\n @pytest.mark.parametrize("other", ["2017", Period("2017", freq="D")])\n def test_eq_scalar(self, other, box_with_array):\n idx = PeriodIndex(["2017", "2017", "2018"], freq="D")\n idx = tm.box_expected(idx, box_with_array)\n xbox = get_upcast_box(idx, other, True)\n\n expected = np.array([True, True, False])\n expected = tm.box_expected(expected, xbox)\n\n result = idx == other\n\n tm.assert_equal(result, expected)\n\n def test_compare_zerodim(self, box_with_array):\n # GH#26689 make sure we unbox zero-dimensional arrays\n\n pi = period_range("2000", periods=4)\n other = np.array(pi.to_numpy()[0])\n\n pi = tm.box_expected(pi, box_with_array)\n xbox = get_upcast_box(pi, other, True)\n\n result = pi <= other\n expected = np.array([True, False, False, False])\n expected = tm.box_expected(expected, xbox)\n tm.assert_equal(result, expected)\n\n @pytest.mark.parametrize(\n "scalar",\n [\n "foo",\n Timestamp("2021-01-01"),\n Timedelta(days=4),\n 9,\n 9.5,\n 2000, # specifically don't consider 2000 to match Period("2000", "D")\n False,\n None,\n ],\n )\n def test_compare_invalid_scalar(self, box_with_array, scalar):\n # GH#28980\n # comparison with scalar that cannot be interpreted as a Period\n pi = period_range("2000", periods=4)\n parr = tm.box_expected(pi, box_with_array)\n assert_invalid_comparison(parr, scalar, box_with_array)\n\n @pytest.mark.parametrize(\n "other",\n [\n pd.date_range("2000", periods=4).array,\n pd.timedelta_range("1D", periods=4).array,\n np.arange(4),\n np.arange(4).astype(np.float64),\n list(range(4)),\n # match Period semantics by not treating integers as Periods\n [2000, 2001, 2002, 2003],\n np.arange(2000, 2004),\n np.arange(2000, 2004).astype(object),\n pd.Index([2000, 2001, 2002, 2003]),\n ],\n )\n def test_compare_invalid_listlike(self, box_with_array, other):\n pi = period_range("2000", periods=4)\n parr = tm.box_expected(pi, box_with_array)\n assert_invalid_comparison(parr, other, box_with_array)\n\n @pytest.mark.parametrize("other_box", [list, np.array, lambda x: x.astype(object)])\n def test_compare_object_dtype(self, box_with_array, other_box):\n pi = period_range("2000", periods=5)\n parr = tm.box_expected(pi, box_with_array)\n\n other = other_box(pi)\n xbox = get_upcast_box(parr, other, True)\n\n expected = np.array([True, True, True, True, True])\n expected = tm.box_expected(expected, xbox)\n\n result = parr == other\n tm.assert_equal(result, expected)\n result = parr <= other\n tm.assert_equal(result, expected)\n result = parr >= other\n tm.assert_equal(result, expected)\n\n result = parr != other\n tm.assert_equal(result, ~expected)\n result = parr < other\n tm.assert_equal(result, ~expected)\n result = parr > other\n tm.assert_equal(result, ~expected)\n\n other = other_box(pi[::-1])\n\n expected = np.array([False, False, True, False, False])\n expected = tm.box_expected(expected, xbox)\n result = parr == other\n tm.assert_equal(result, expected)\n\n expected = np.array([True, True, True, False, False])\n expected = tm.box_expected(expected, xbox)\n result = parr <= other\n tm.assert_equal(result, expected)\n\n expected = np.array([False, False, True, True, True])\n expected = tm.box_expected(expected, xbox)\n result = parr >= other\n tm.assert_equal(result, expected)\n\n expected = np.array([True, True, False, True, True])\n expected = tm.box_expected(expected, xbox)\n result = parr != other\n tm.assert_equal(result, expected)\n\n expected = np.array([True, True, False, False, False])\n expected = tm.box_expected(expected, xbox)\n result = parr < other\n tm.assert_equal(result, expected)\n\n expected = np.array([False, False, False, True, True])\n expected = tm.box_expected(expected, xbox)\n result = parr > other\n tm.assert_equal(result, expected)\n\n\nclass TestPeriodIndexComparisons:\n # TODO: parameterize over boxes\n\n def test_pi_cmp_period(self):\n idx = period_range("2007-01", periods=20, freq="M")\n per = idx[10]\n\n result = idx < per\n exp = idx.values < idx.values[10]\n tm.assert_numpy_array_equal(result, exp)\n\n # Tests Period.__richcmp__ against ndarray[object, ndim=2]\n result = idx.values.reshape(10, 2) < per\n tm.assert_numpy_array_equal(result, exp.reshape(10, 2))\n\n # Tests Period.__richcmp__ against ndarray[object, ndim=0]\n result = idx < np.array(per)\n tm.assert_numpy_array_equal(result, exp)\n\n # TODO: moved from test_datetime64; de-duplicate with version below\n def test_parr_cmp_period_scalar2(self, box_with_array):\n pi = period_range("2000-01-01", periods=10, freq="D")\n\n val = pi[3]\n expected = [x > val for x in pi]\n\n ser = tm.box_expected(pi, box_with_array)\n xbox = get_upcast_box(ser, val, True)\n\n expected = tm.box_expected(expected, xbox)\n result = ser > val\n tm.assert_equal(result, expected)\n\n val = pi[5]\n result = ser > val\n expected = [x > val for x in pi]\n expected = tm.box_expected(expected, xbox)\n tm.assert_equal(result, expected)\n\n @pytest.mark.parametrize("freq", ["M", "2M", "3M"])\n def test_parr_cmp_period_scalar(self, freq, box_with_array):\n # GH#13200\n base = PeriodIndex(["2011-01", "2011-02", "2011-03", "2011-04"], freq=freq)\n base = tm.box_expected(base, box_with_array)\n per = Period("2011-02", freq=freq)\n xbox = get_upcast_box(base, per, True)\n\n exp = np.array([False, True, False, False])\n exp = tm.box_expected(exp, xbox)\n tm.assert_equal(base == per, exp)\n tm.assert_equal(per == base, exp)\n\n exp = np.array([True, False, True, True])\n exp = tm.box_expected(exp, xbox)\n tm.assert_equal(base != per, exp)\n tm.assert_equal(per != base, exp)\n\n exp = np.array([False, False, True, True])\n exp = tm.box_expected(exp, xbox)\n tm.assert_equal(base > per, exp)\n tm.assert_equal(per < base, exp)\n\n exp = np.array([True, False, False, False])\n exp = tm.box_expected(exp, xbox)\n tm.assert_equal(base < per, exp)\n tm.assert_equal(per > base, exp)\n\n exp = np.array([False, True, True, True])\n exp = tm.box_expected(exp, xbox)\n tm.assert_equal(base >= per, exp)\n tm.assert_equal(per <= base, exp)\n\n exp = np.array([True, True, False, False])\n exp = tm.box_expected(exp, xbox)\n tm.assert_equal(base <= per, exp)\n tm.assert_equal(per >= base, exp)\n\n @pytest.mark.parametrize("freq", ["M", "2M", "3M"])\n def test_parr_cmp_pi(self, freq, box_with_array):\n # GH#13200\n base = PeriodIndex(["2011-01", "2011-02", "2011-03", "2011-04"], freq=freq)\n base = tm.box_expected(base, box_with_array)\n\n # TODO: could also box idx?\n idx = PeriodIndex(["2011-02", "2011-01", "2011-03", "2011-05"], freq=freq)\n\n xbox = get_upcast_box(base, idx, True)\n\n exp = np.array([False, False, True, False])\n exp = tm.box_expected(exp, xbox)\n tm.assert_equal(base == idx, exp)\n\n exp = np.array([True, True, False, True])\n exp = tm.box_expected(exp, xbox)\n tm.assert_equal(base != idx, exp)\n\n exp = np.array([False, True, False, False])\n exp = tm.box_expected(exp, xbox)\n tm.assert_equal(base > idx, exp)\n\n exp = np.array([True, False, False, True])\n exp = tm.box_expected(exp, xbox)\n tm.assert_equal(base < idx, exp)\n\n exp = np.array([False, True, True, False])\n exp = tm.box_expected(exp, xbox)\n tm.assert_equal(base >= idx, exp)\n\n exp = np.array([True, False, True, True])\n exp = tm.box_expected(exp, xbox)\n tm.assert_equal(base <= idx, exp)\n\n @pytest.mark.parametrize("freq", ["M", "2M", "3M"])\n def test_parr_cmp_pi_mismatched_freq(self, freq, box_with_array):\n # GH#13200\n # different base freq\n base = PeriodIndex(["2011-01", "2011-02", "2011-03", "2011-04"], freq=freq)\n base = tm.box_expected(base, box_with_array)\n\n msg = rf"Invalid comparison between dtype=period\[{freq}\] and Period"\n with pytest.raises(TypeError, match=msg):\n base <= Period("2011", freq="Y")\n\n with pytest.raises(TypeError, match=msg):\n Period("2011", freq="Y") >= base\n\n # TODO: Could parametrize over boxes for idx?\n idx = PeriodIndex(["2011", "2012", "2013", "2014"], freq="Y")\n rev_msg = r"Invalid comparison between dtype=period\[Y-DEC\] and PeriodArray"\n idx_msg = rev_msg if box_with_array in [tm.to_array, pd.array] else msg\n with pytest.raises(TypeError, match=idx_msg):\n base <= idx\n\n # Different frequency\n msg = rf"Invalid comparison between dtype=period\[{freq}\] and Period"\n with pytest.raises(TypeError, match=msg):\n base <= Period("2011", freq="4M")\n\n with pytest.raises(TypeError, match=msg):\n Period("2011", freq="4M") >= base\n\n idx = PeriodIndex(["2011", "2012", "2013", "2014"], freq="4M")\n rev_msg = r"Invalid comparison between dtype=period\[4M\] and PeriodArray"\n idx_msg = rev_msg if box_with_array in [tm.to_array, pd.array] else msg\n with pytest.raises(TypeError, match=idx_msg):\n base <= idx\n\n @pytest.mark.parametrize("freq", ["M", "2M", "3M"])\n def test_pi_cmp_nat(self, freq):\n idx1 = PeriodIndex(["2011-01", "2011-02", "NaT", "2011-05"], freq=freq)\n per = idx1[1]\n\n result = idx1 > per\n exp = np.array([False, False, False, True])\n tm.assert_numpy_array_equal(result, exp)\n result = per < idx1\n tm.assert_numpy_array_equal(result, exp)\n\n result = idx1 == pd.NaT\n exp = np.array([False, False, False, False])\n tm.assert_numpy_array_equal(result, exp)\n result = pd.NaT == idx1\n tm.assert_numpy_array_equal(result, exp)\n\n result = idx1 != pd.NaT\n exp = np.array([True, True, True, True])\n tm.assert_numpy_array_equal(result, exp)\n result = pd.NaT != idx1\n tm.assert_numpy_array_equal(result, exp)\n\n idx2 = PeriodIndex(["2011-02", "2011-01", "2011-04", "NaT"], freq=freq)\n result = idx1 < idx2\n exp = np.array([True, False, False, False])\n tm.assert_numpy_array_equal(result, exp)\n\n result = idx1 == idx2\n exp = np.array([False, False, False, False])\n tm.assert_numpy_array_equal(result, exp)\n\n result = idx1 != idx2\n exp = np.array([True, True, True, True])\n tm.assert_numpy_array_equal(result, exp)\n\n result = idx1 == idx1\n exp = np.array([True, True, False, True])\n tm.assert_numpy_array_equal(result, exp)\n\n result = idx1 != idx1\n exp = np.array([False, False, True, False])\n tm.assert_numpy_array_equal(result, exp)\n\n @pytest.mark.parametrize("freq", ["M", "2M", "3M"])\n def test_pi_cmp_nat_mismatched_freq_raises(self, freq):\n idx1 = PeriodIndex(["2011-01", "2011-02", "NaT", "2011-05"], freq=freq)\n\n diff = PeriodIndex(["2011-02", "2011-01", "2011-04", "NaT"], freq="4M")\n msg = rf"Invalid comparison between dtype=period\[{freq}\] and PeriodArray"\n with pytest.raises(TypeError, match=msg):\n idx1 > diff\n\n result = idx1 == diff\n expected = np.array([False, False, False, False], dtype=bool)\n tm.assert_numpy_array_equal(result, expected)\n\n # TODO: De-duplicate with test_pi_cmp_nat\n @pytest.mark.parametrize("dtype", [object, None])\n def test_comp_nat(self, dtype):\n left = PeriodIndex([Period("2011-01-01"), pd.NaT, Period("2011-01-03")])\n right = PeriodIndex([pd.NaT, pd.NaT, Period("2011-01-03")])\n\n if dtype is not None:\n left = left.astype(dtype)\n right = right.astype(dtype)\n\n result = left == right\n expected = np.array([False, False, True])\n tm.assert_numpy_array_equal(result, expected)\n\n result = left != right\n expected = np.array([True, True, False])\n tm.assert_numpy_array_equal(result, expected)\n\n expected = np.array([False, False, False])\n tm.assert_numpy_array_equal(left == pd.NaT, expected)\n tm.assert_numpy_array_equal(pd.NaT == right, expected)\n\n expected = np.array([True, True, True])\n tm.assert_numpy_array_equal(left != pd.NaT, expected)\n tm.assert_numpy_array_equal(pd.NaT != left, expected)\n\n expected = np.array([False, False, False])\n tm.assert_numpy_array_equal(left < pd.NaT, expected)\n tm.assert_numpy_array_equal(pd.NaT > left, expected)\n\n\nclass TestPeriodSeriesComparisons:\n def test_cmp_series_period_series_mixed_freq(self):\n # GH#13200\n base = Series(\n [\n Period("2011", freq="Y"),\n Period("2011-02", freq="M"),\n Period("2013", freq="Y"),\n Period("2011-04", freq="M"),\n ]\n )\n\n ser = Series(\n [\n Period("2012", freq="Y"),\n Period("2011-01", freq="M"),\n Period("2013", freq="Y"),\n Period("2011-05", freq="M"),\n ]\n )\n\n exp = Series([False, False, True, False])\n tm.assert_series_equal(base == ser, exp)\n\n exp = Series([True, True, False, True])\n tm.assert_series_equal(base != ser, exp)\n\n exp = Series([False, True, False, False])\n tm.assert_series_equal(base > ser, exp)\n\n exp = Series([True, False, False, True])\n tm.assert_series_equal(base < ser, exp)\n\n exp = Series([False, True, True, False])\n tm.assert_series_equal(base >= ser, exp)\n\n exp = Series([True, False, True, True])\n tm.assert_series_equal(base <= ser, exp)\n\n\nclass TestPeriodIndexSeriesComparisonConsistency:\n """Test PeriodIndex and Period Series Ops consistency"""\n\n # TODO: needs parametrization+de-duplication\n\n def _check(self, values, func, expected):\n # Test PeriodIndex and Period Series Ops consistency\n\n idx = PeriodIndex(values)\n result = func(idx)\n\n # check that we don't pass an unwanted type to tm.assert_equal\n assert isinstance(expected, (pd.Index, np.ndarray))\n tm.assert_equal(result, expected)\n\n s = Series(values)\n result = func(s)\n\n exp = Series(expected, name=values.name)\n tm.assert_series_equal(result, exp)\n\n def test_pi_comp_period(self):\n idx = PeriodIndex(\n ["2011-01", "2011-02", "2011-03", "2011-04"], freq="M", name="idx"\n )\n per = idx[2]\n\n f = lambda x: x == per\n exp = np.array([False, False, True, False], dtype=np.bool_)\n self._check(idx, f, exp)\n f = lambda x: per == x\n self._check(idx, f, exp)\n\n f = lambda x: x != per\n exp = np.array([True, True, False, True], dtype=np.bool_)\n self._check(idx, f, exp)\n f = lambda x: per != x\n self._check(idx, f, exp)\n\n f = lambda x: per >= x\n exp = np.array([True, True, True, False], dtype=np.bool_)\n self._check(idx, f, exp)\n\n f = lambda x: x > per\n exp = np.array([False, False, False, True], dtype=np.bool_)\n self._check(idx, f, exp)\n\n f = lambda x: per >= x\n exp = np.array([True, True, True, False], dtype=np.bool_)\n self._check(idx, f, exp)\n\n def test_pi_comp_period_nat(self):\n idx = PeriodIndex(\n ["2011-01", "NaT", "2011-03", "2011-04"], freq="M", name="idx"\n )\n per = idx[2]\n\n f = lambda x: x == per\n exp = np.array([False, False, True, False], dtype=np.bool_)\n self._check(idx, f, exp)\n f = lambda x: per == x\n self._check(idx, f, exp)\n\n f = lambda x: x == pd.NaT\n exp = np.array([False, False, False, False], dtype=np.bool_)\n self._check(idx, f, exp)\n f = lambda x: pd.NaT == x\n self._check(idx, f, exp)\n\n f = lambda x: x != per\n exp = np.array([True, True, False, True], dtype=np.bool_)\n self._check(idx, f, exp)\n f = lambda x: per != x\n self._check(idx, f, exp)\n\n f = lambda x: x != pd.NaT\n exp = np.array([True, True, True, True], dtype=np.bool_)\n self._check(idx, f, exp)\n f = lambda x: pd.NaT != x\n self._check(idx, f, exp)\n\n f = lambda x: per >= x\n exp = np.array([True, False, True, False], dtype=np.bool_)\n self._check(idx, f, exp)\n\n f = lambda x: x < per\n exp = np.array([True, False, False, False], dtype=np.bool_)\n self._check(idx, f, exp)\n\n f = lambda x: x > pd.NaT\n exp = np.array([False, False, False, False], dtype=np.bool_)\n self._check(idx, f, exp)\n\n f = lambda x: pd.NaT >= x\n exp = np.array([False, False, False, False], dtype=np.bool_)\n self._check(idx, f, exp)\n\n\n# ------------------------------------------------------------------\n# Arithmetic\n\n\nclass TestPeriodFrameArithmetic:\n def test_ops_frame_period(self):\n # GH#13043\n df = pd.DataFrame(\n {\n "A": [Period("2015-01", freq="M"), Period("2015-02", freq="M")],\n "B": [Period("2014-01", freq="M"), Period("2014-02", freq="M")],\n }\n )\n assert df["A"].dtype == "Period[M]"\n assert df["B"].dtype == "Period[M]"\n\n p = Period("2015-03", freq="M")\n off = p.freq\n # dtype will be object because of original dtype\n exp = pd.DataFrame(\n {\n "A": np.array([2 * off, 1 * off], dtype=object),\n "B": np.array([14 * off, 13 * off], dtype=object),\n }\n )\n tm.assert_frame_equal(p - df, exp)\n tm.assert_frame_equal(df - p, -1 * exp)\n\n df2 = pd.DataFrame(\n {\n "A": [Period("2015-05", freq="M"), Period("2015-06", freq="M")],\n "B": [Period("2015-05", freq="M"), Period("2015-06", freq="M")],\n }\n )\n assert df2["A"].dtype == "Period[M]"\n assert df2["B"].dtype == "Period[M]"\n\n exp = pd.DataFrame(\n {\n "A": np.array([4 * off, 4 * off], dtype=object),\n "B": np.array([16 * off, 16 * off], dtype=object),\n }\n )\n tm.assert_frame_equal(df2 - df, exp)\n tm.assert_frame_equal(df - df2, -1 * exp)\n\n\nclass TestPeriodIndexArithmetic:\n # ---------------------------------------------------------------\n # __add__/__sub__ with PeriodIndex\n # PeriodIndex + other is defined for integers and timedelta-like others\n # PeriodIndex - other is defined for integers, timedelta-like others,\n # and PeriodIndex (with matching freq)\n\n def test_parr_add_iadd_parr_raises(self, box_with_array):\n rng = period_range("1/1/2000", freq="D", periods=5)\n other = period_range("1/6/2000", freq="D", periods=5)\n # TODO: parametrize over boxes for other?\n\n rng = tm.box_expected(rng, box_with_array)\n # An earlier implementation of PeriodIndex addition performed\n # a set operation (union). This has since been changed to\n # raise a TypeError. See GH#14164 and GH#13077 for historical\n # reference.\n msg = r"unsupported operand type\(s\) for \+: .* and .*"\n with pytest.raises(TypeError, match=msg):\n rng + other\n\n with pytest.raises(TypeError, match=msg):\n rng += other\n\n def test_pi_sub_isub_pi(self):\n # GH#20049\n # For historical reference see GH#14164, GH#13077.\n # PeriodIndex subtraction originally performed set difference,\n # then changed to raise TypeError before being implemented in GH#20049\n rng = period_range("1/1/2000", freq="D", periods=5)\n other = period_range("1/6/2000", freq="D", periods=5)\n\n off = rng.freq\n expected = pd.Index([-5 * off] * 5)\n result = rng - other\n tm.assert_index_equal(result, expected)\n\n rng -= other\n tm.assert_index_equal(rng, expected)\n\n def test_pi_sub_pi_with_nat(self):\n rng = period_range("1/1/2000", freq="D", periods=5)\n other = rng[1:].insert(0, pd.NaT)\n assert other[1:].equals(rng[1:])\n\n result = rng - other\n off = rng.freq\n expected = pd.Index([pd.NaT, 0 * off, 0 * off, 0 * off, 0 * off])\n tm.assert_index_equal(result, expected)\n\n def test_parr_sub_pi_mismatched_freq(self, box_with_array, box_with_array2):\n rng = period_range("1/1/2000", freq="D", periods=5)\n other = period_range("1/6/2000", freq="h", periods=5)\n\n rng = tm.box_expected(rng, box_with_array)\n other = tm.box_expected(other, box_with_array2)\n msg = r"Input has different freq=[hD] from PeriodArray\(freq=[Dh]\)"\n with pytest.raises(IncompatibleFrequency, match=msg):\n rng - other\n\n @pytest.mark.parametrize("n", [1, 2, 3, 4])\n def test_sub_n_gt_1_ticks(self, tick_classes, n):\n # GH 23878\n p1_d = "19910905"\n p2_d = "19920406"\n p1 = PeriodIndex([p1_d], freq=tick_classes(n))\n p2 = PeriodIndex([p2_d], freq=tick_classes(n))\n\n expected = PeriodIndex([p2_d], freq=p2.freq.base) - PeriodIndex(\n [p1_d], freq=p1.freq.base\n )\n\n tm.assert_index_equal((p2 - p1), expected)\n\n @pytest.mark.parametrize("n", [1, 2, 3, 4])\n @pytest.mark.parametrize(\n "offset, kwd_name",\n [\n (pd.offsets.YearEnd, "month"),\n (pd.offsets.QuarterEnd, "startingMonth"),\n (pd.offsets.MonthEnd, None),\n (pd.offsets.Week, "weekday"),\n ],\n )\n def test_sub_n_gt_1_offsets(self, offset, kwd_name, n):\n # GH 23878\n kwds = {kwd_name: 3} if kwd_name is not None else {}\n p1_d = "19910905"\n p2_d = "19920406"\n freq = offset(n, normalize=False, **kwds)\n p1 = PeriodIndex([p1_d], freq=freq)\n p2 = PeriodIndex([p2_d], freq=freq)\n\n result = p2 - p1\n expected = PeriodIndex([p2_d], freq=freq.base) - PeriodIndex(\n [p1_d], freq=freq.base\n )\n\n tm.assert_index_equal(result, expected)\n\n # -------------------------------------------------------------\n # Invalid Operations\n\n @pytest.mark.parametrize(\n "other",\n [\n # datetime scalars\n Timestamp("2016-01-01"),\n Timestamp("2016-01-01").to_pydatetime(),\n Timestamp("2016-01-01").to_datetime64(),\n # datetime-like arrays\n pd.date_range("2016-01-01", periods=3, freq="h"),\n pd.date_range("2016-01-01", periods=3, tz="Europe/Brussels"),\n pd.date_range("2016-01-01", periods=3, freq="s")._data,\n pd.date_range("2016-01-01", periods=3, tz="Asia/Tokyo")._data,\n # Miscellaneous invalid types\n 3.14,\n np.array([2.0, 3.0, 4.0]),\n ],\n )\n def test_parr_add_sub_invalid(self, other, box_with_array):\n # GH#23215\n rng = period_range("1/1/2000", freq="D", periods=3)\n rng = tm.box_expected(rng, box_with_array)\n\n msg = "|".join(\n [\n r"(:?cannot add PeriodArray and .*)",\n r"(:?cannot subtract .* from (:?a\s)?.*)",\n r"(:?unsupported operand type\(s\) for \+: .* and .*)",\n r"unsupported operand type\(s\) for [+-]: .* and .*",\n ]\n )\n assert_invalid_addsub_type(rng, other, msg)\n with pytest.raises(TypeError, match=msg):\n rng + other\n with pytest.raises(TypeError, match=msg):\n other + rng\n with pytest.raises(TypeError, match=msg):\n rng - other\n with pytest.raises(TypeError, match=msg):\n other - rng\n\n # -----------------------------------------------------------------\n # __add__/__sub__ with ndarray[datetime64] and ndarray[timedelta64]\n\n def test_pi_add_sub_td64_array_non_tick_raises(self):\n rng = period_range("1/1/2000", freq="Q", periods=3)\n tdi = TimedeltaIndex(["-1 Day", "-1 Day", "-1 Day"])\n tdarr = tdi.values\n\n msg = r"Cannot add or subtract timedelta64\[ns\] dtype from period\[Q-DEC\]"\n with pytest.raises(TypeError, match=msg):\n rng + tdarr\n with pytest.raises(TypeError, match=msg):\n tdarr + rng\n\n with pytest.raises(TypeError, match=msg):\n rng - tdarr\n msg = r"cannot subtract PeriodArray from TimedeltaArray"\n with pytest.raises(TypeError, match=msg):\n tdarr - rng\n\n def test_pi_add_sub_td64_array_tick(self):\n # PeriodIndex + Timedelta-like is allowed only with\n # tick-like frequencies\n rng = period_range("1/1/2000", freq="90D", periods=3)\n tdi = TimedeltaIndex(["-1 Day", "-1 Day", "-1 Day"])\n tdarr = tdi.values\n\n expected = period_range("12/31/1999", freq="90D", periods=3)\n result = rng + tdi\n tm.assert_index_equal(result, expected)\n result = rng + tdarr\n tm.assert_index_equal(result, expected)\n result = tdi + rng\n tm.assert_index_equal(result, expected)\n result = tdarr + rng\n tm.assert_index_equal(result, expected)\n\n expected = period_range("1/2/2000", freq="90D", periods=3)\n\n result = rng - tdi\n tm.assert_index_equal(result, expected)\n result = rng - tdarr\n tm.assert_index_equal(result, expected)\n\n msg = r"cannot subtract .* from .*"\n with pytest.raises(TypeError, match=msg):\n tdarr - rng\n\n with pytest.raises(TypeError, match=msg):\n tdi - rng\n\n @pytest.mark.parametrize("pi_freq", ["D", "W", "Q", "h"])\n @pytest.mark.parametrize("tdi_freq", [None, "h"])\n def test_parr_sub_td64array(self, box_with_array, tdi_freq, pi_freq):\n box = box_with_array\n xbox = box if box not in [pd.array, tm.to_array] else pd.Index\n\n tdi = TimedeltaIndex(["1 hours", "2 hours"], freq=tdi_freq)\n dti = Timestamp("2018-03-07 17:16:40") + tdi\n pi = dti.to_period(pi_freq)\n\n # TODO: parametrize over box for pi?\n td64obj = tm.box_expected(tdi, box)\n\n if pi_freq == "h":\n result = pi - td64obj\n expected = (pi.to_timestamp("s") - tdi).to_period(pi_freq)\n expected = tm.box_expected(expected, xbox)\n tm.assert_equal(result, expected)\n\n # Subtract from scalar\n result = pi[0] - td64obj\n expected = (pi[0].to_timestamp("s") - tdi).to_period(pi_freq)\n expected = tm.box_expected(expected, box)\n tm.assert_equal(result, expected)\n\n elif pi_freq == "D":\n # Tick, but non-compatible\n msg = (\n "Cannot add/subtract timedelta-like from PeriodArray that is "\n "not an integer multiple of the PeriodArray's freq."\n )\n with pytest.raises(IncompatibleFrequency, match=msg):\n pi - td64obj\n\n with pytest.raises(IncompatibleFrequency, match=msg):\n pi[0] - td64obj\n\n else:\n # With non-Tick freq, we could not add timedelta64 array regardless\n # of what its resolution is\n msg = "Cannot add or subtract timedelta64"\n with pytest.raises(TypeError, match=msg):\n pi - td64obj\n with pytest.raises(TypeError, match=msg):\n pi[0] - td64obj\n\n # -----------------------------------------------------------------\n # operations with array/Index of DateOffset objects\n\n @pytest.mark.parametrize("box", [np.array, pd.Index])\n def test_pi_add_offset_array(self, box):\n # GH#18849\n pi = PeriodIndex([Period("2015Q1"), Period("2016Q2")])\n offs = box(\n [\n pd.offsets.QuarterEnd(n=1, startingMonth=12),\n pd.offsets.QuarterEnd(n=-2, startingMonth=12),\n ]\n )\n expected = PeriodIndex([Period("2015Q2"), Period("2015Q4")]).astype(object)\n\n with tm.assert_produces_warning(PerformanceWarning):\n res = pi + offs\n tm.assert_index_equal(res, expected)\n\n with tm.assert_produces_warning(PerformanceWarning):\n res2 = offs + pi\n tm.assert_index_equal(res2, expected)\n\n unanchored = np.array([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)])\n # addition/subtraction ops with incompatible offsets should issue\n # a PerformanceWarning and _then_ raise a TypeError.\n msg = r"Input cannot be converted to Period\(freq=Q-DEC\)"\n with pytest.raises(IncompatibleFrequency, match=msg):\n with tm.assert_produces_warning(PerformanceWarning):\n pi + unanchored\n with pytest.raises(IncompatibleFrequency, match=msg):\n with tm.assert_produces_warning(PerformanceWarning):\n unanchored + pi\n\n @pytest.mark.parametrize("box", [np.array, pd.Index])\n def test_pi_sub_offset_array(self, box):\n # GH#18824\n pi = PeriodIndex([Period("2015Q1"), Period("2016Q2")])\n other = box(\n [\n pd.offsets.QuarterEnd(n=1, startingMonth=12),\n pd.offsets.QuarterEnd(n=-2, startingMonth=12),\n ]\n )\n\n expected = PeriodIndex([pi[n] - other[n] for n in range(len(pi))])\n expected = expected.astype(object)\n\n with tm.assert_produces_warning(PerformanceWarning):\n res = pi - other\n tm.assert_index_equal(res, expected)\n\n anchored = box([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)])\n\n # addition/subtraction ops with anchored offsets should issue\n # a PerformanceWarning and _then_ raise a TypeError.\n msg = r"Input has different freq=-1M from Period\(freq=Q-DEC\)"\n with pytest.raises(IncompatibleFrequency, match=msg):\n with tm.assert_produces_warning(PerformanceWarning):\n pi - anchored\n with pytest.raises(IncompatibleFrequency, match=msg):\n with tm.assert_produces_warning(PerformanceWarning):\n anchored - pi\n\n def test_pi_add_iadd_int(self, one):\n # Variants of `one` for #19012\n rng = period_range("2000-01-01 09:00", freq="h", periods=10)\n result = rng + one\n expected = period_range("2000-01-01 10:00", freq="h", periods=10)\n tm.assert_index_equal(result, expected)\n rng += one\n tm.assert_index_equal(rng, expected)\n\n def test_pi_sub_isub_int(self, one):\n """\n PeriodIndex.__sub__ and __isub__ with several representations of\n the integer 1, e.g. int, np.int64, np.uint8, ...\n """\n rng = period_range("2000-01-01 09:00", freq="h", periods=10)\n result = rng - one\n expected = period_range("2000-01-01 08:00", freq="h", periods=10)\n tm.assert_index_equal(result, expected)\n rng -= one\n tm.assert_index_equal(rng, expected)\n\n @pytest.mark.parametrize("five", [5, np.array(5, dtype=np.int64)])\n def test_pi_sub_intlike(self, five):\n rng = period_range("2007-01", periods=50)\n\n result = rng - five\n exp = rng + (-five)\n tm.assert_index_equal(result, exp)\n\n def test_pi_add_sub_int_array_freqn_gt1(self):\n # GH#47209 test adding array of ints when freq.n > 1 matches\n # scalar behavior\n pi = period_range("2016-01-01", periods=10, freq="2D")\n arr = np.arange(10)\n result = pi + arr\n expected = pd.Index([x + y for x, y in zip(pi, arr)])\n tm.assert_index_equal(result, expected)\n\n result = pi - arr\n expected = pd.Index([x - y for x, y in zip(pi, arr)])\n tm.assert_index_equal(result, expected)\n\n def test_pi_sub_isub_offset(self):\n # offset\n # DateOffset\n rng = period_range("2014", "2024", freq="Y")\n result = rng - pd.offsets.YearEnd(5)\n expected = period_range("2009", "2019", freq="Y")\n tm.assert_index_equal(result, expected)\n rng -= pd.offsets.YearEnd(5)\n tm.assert_index_equal(rng, expected)\n\n rng = period_range("2014-01", "2016-12", freq="M")\n result = rng - pd.offsets.MonthEnd(5)\n expected = period_range("2013-08", "2016-07", freq="M")\n tm.assert_index_equal(result, expected)\n\n rng -= pd.offsets.MonthEnd(5)\n tm.assert_index_equal(rng, expected)\n\n @pytest.mark.parametrize("transpose", [True, False])\n def test_pi_add_offset_n_gt1(self, box_with_array, transpose):\n # GH#23215\n # add offset to PeriodIndex with freq.n > 1\n\n per = Period("2016-01", freq="2M")\n pi = PeriodIndex([per])\n\n expected = PeriodIndex(["2016-03"], freq="2M")\n\n pi = tm.box_expected(pi, box_with_array, transpose=transpose)\n expected = tm.box_expected(expected, box_with_array, transpose=transpose)\n\n result = pi + per.freq\n tm.assert_equal(result, expected)\n\n result = per.freq + pi\n tm.assert_equal(result, expected)\n\n def test_pi_add_offset_n_gt1_not_divisible(self, box_with_array):\n # GH#23215\n # PeriodIndex with freq.n > 1 add offset with offset.n % freq.n != 0\n pi = PeriodIndex(["2016-01"], freq="2M")\n expected = PeriodIndex(["2016-04"], freq="2M")\n\n pi = tm.box_expected(pi, box_with_array)\n expected = tm.box_expected(expected, box_with_array)\n\n result = pi + to_offset("3ME")\n tm.assert_equal(result, expected)\n\n result = to_offset("3ME") + pi\n tm.assert_equal(result, expected)\n\n # ---------------------------------------------------------------\n # __add__/__sub__ with integer arrays\n\n @pytest.mark.parametrize("int_holder", [np.array, pd.Index])\n @pytest.mark.parametrize("op", [operator.add, ops.radd])\n def test_pi_add_intarray(self, int_holder, op):\n # GH#19959\n pi = PeriodIndex([Period("2015Q1"), Period("NaT")])\n other = int_holder([4, -1])\n\n result = op(pi, other)\n expected = PeriodIndex([Period("2016Q1"), Period("NaT")])\n tm.assert_index_equal(result, expected)\n\n @pytest.mark.parametrize("int_holder", [np.array, pd.Index])\n def test_pi_sub_intarray(self, int_holder):\n # GH#19959\n pi = PeriodIndex([Period("2015Q1"), Period("NaT")])\n other = int_holder([4, -1])\n\n result = pi - other\n expected = PeriodIndex([Period("2014Q1"), Period("NaT")])\n tm.assert_index_equal(result, expected)\n\n msg = r"bad operand type for unary -: 'PeriodArray'"\n with pytest.raises(TypeError, match=msg):\n other - pi\n\n # ---------------------------------------------------------------\n # Timedelta-like (timedelta, timedelta64, Timedelta, Tick)\n # TODO: Some of these are misnomers because of non-Tick DateOffsets\n\n def test_parr_add_timedeltalike_minute_gt1(self, three_days, box_with_array):\n # GH#23031 adding a time-delta-like offset to a PeriodArray that has\n # minute frequency with n != 1. A more general case is tested below\n # in test_pi_add_timedeltalike_tick_gt1, but here we write out the\n # expected result more explicitly.\n other = three_days\n rng = period_range("2014-05-01", periods=3, freq="2D")\n rng = tm.box_expected(rng, box_with_array)\n\n expected = PeriodIndex(["2014-05-04", "2014-05-06", "2014-05-08"], freq="2D")\n expected = tm.box_expected(expected, box_with_array)\n\n result = rng + other\n tm.assert_equal(result, expected)\n\n result = other + rng\n tm.assert_equal(result, expected)\n\n # subtraction\n expected = PeriodIndex(["2014-04-28", "2014-04-30", "2014-05-02"], freq="2D")\n expected = tm.box_expected(expected, box_with_array)\n result = rng - other\n tm.assert_equal(result, expected)\n\n msg = "|".join(\n [\n r"bad operand type for unary -: 'PeriodArray'",\n r"cannot subtract PeriodArray from timedelta64\[[hD]\]",\n ]\n )\n with pytest.raises(TypeError, match=msg):\n other - rng\n\n @pytest.mark.parametrize("freqstr", ["5ns", "5us", "5ms", "5s", "5min", "5h", "5d"])\n def test_parr_add_timedeltalike_tick_gt1(self, three_days, freqstr, box_with_array):\n # GH#23031 adding a time-delta-like offset to a PeriodArray that has\n # tick-like frequency with n != 1\n other = three_days\n rng = period_range("2014-05-01", periods=6, freq=freqstr)\n first = rng[0]\n rng = tm.box_expected(rng, box_with_array)\n\n expected = period_range(first + other, periods=6, freq=freqstr)\n expected = tm.box_expected(expected, box_with_array)\n\n result = rng + other\n tm.assert_equal(result, expected)\n\n result = other + rng\n tm.assert_equal(result, expected)\n\n # subtraction\n expected = period_range(first - other, periods=6, freq=freqstr)\n expected = tm.box_expected(expected, box_with_array)\n result = rng - other\n tm.assert_equal(result, expected)\n msg = "|".join(\n [\n r"bad operand type for unary -: 'PeriodArray'",\n r"cannot subtract PeriodArray from timedelta64\[[hD]\]",\n ]\n )\n with pytest.raises(TypeError, match=msg):\n other - rng\n\n def test_pi_add_iadd_timedeltalike_daily(self, three_days):\n # Tick\n other = three_days\n rng = period_range("2014-05-01", "2014-05-15", freq="D")\n expected = period_range("2014-05-04", "2014-05-18", freq="D")\n\n result = rng + other\n tm.assert_index_equal(result, expected)\n\n rng += other\n tm.assert_index_equal(rng, expected)\n\n def test_pi_sub_isub_timedeltalike_daily(self, three_days):\n # Tick-like 3 Days\n other = three_days\n rng = period_range("2014-05-01", "2014-05-15", freq="D")\n expected = period_range("2014-04-28", "2014-05-12", freq="D")\n\n result = rng - other\n tm.assert_index_equal(result, expected)\n\n rng -= other\n tm.assert_index_equal(rng, expected)\n\n def test_parr_add_sub_timedeltalike_freq_mismatch_daily(\n self, not_daily, box_with_array\n ):\n other = not_daily\n rng = period_range("2014-05-01", "2014-05-15", freq="D")\n rng = tm.box_expected(rng, box_with_array)\n\n msg = "|".join(\n [\n # non-timedelta-like DateOffset\n "Input has different freq(=.+)? from Period.*?\\(freq=D\\)",\n # timedelta/td64/Timedelta but not a multiple of 24H\n "Cannot add/subtract timedelta-like from PeriodArray that is "\n "not an integer multiple of the PeriodArray's freq.",\n ]\n )\n with pytest.raises(IncompatibleFrequency, match=msg):\n rng + other\n with pytest.raises(IncompatibleFrequency, match=msg):\n rng += other\n with pytest.raises(IncompatibleFrequency, match=msg):\n rng - other\n with pytest.raises(IncompatibleFrequency, match=msg):\n rng -= other\n\n def test_pi_add_iadd_timedeltalike_hourly(self, two_hours):\n other = two_hours\n rng = period_range("2014-01-01 10:00", "2014-01-05 10:00", freq="h")\n expected = period_range("2014-01-01 12:00", "2014-01-05 12:00", freq="h")\n\n result = rng + other\n tm.assert_index_equal(result, expected)\n\n rng += other\n tm.assert_index_equal(rng, expected)\n\n def test_parr_add_timedeltalike_mismatched_freq_hourly(\n self, not_hourly, box_with_array\n ):\n other = not_hourly\n rng = period_range("2014-01-01 10:00", "2014-01-05 10:00", freq="h")\n rng = tm.box_expected(rng, box_with_array)\n msg = "|".join(\n [\n # non-timedelta-like DateOffset\n "Input has different freq(=.+)? from Period.*?\\(freq=h\\)",\n # timedelta/td64/Timedelta but not a multiple of 24H\n "Cannot add/subtract timedelta-like from PeriodArray that is "\n "not an integer multiple of the PeriodArray's freq.",\n ]\n )\n\n with pytest.raises(IncompatibleFrequency, match=msg):\n rng + other\n\n with pytest.raises(IncompatibleFrequency, match=msg):\n rng += other\n\n def test_pi_sub_isub_timedeltalike_hourly(self, two_hours):\n other = two_hours\n rng = period_range("2014-01-01 10:00", "2014-01-05 10:00", freq="h")\n expected = period_range("2014-01-01 08:00", "2014-01-05 08:00", freq="h")\n\n result = rng - other\n tm.assert_index_equal(result, expected)\n\n rng -= other\n tm.assert_index_equal(rng, expected)\n\n def test_add_iadd_timedeltalike_annual(self):\n # offset\n # DateOffset\n rng = period_range("2014", "2024", freq="Y")\n result = rng + pd.offsets.YearEnd(5)\n expected = period_range("2019", "2029", freq="Y")\n tm.assert_index_equal(result, expected)\n rng += pd.offsets.YearEnd(5)\n tm.assert_index_equal(rng, expected)\n\n def test_pi_add_sub_timedeltalike_freq_mismatch_annual(self, mismatched_freq):\n other = mismatched_freq\n rng = period_range("2014", "2024", freq="Y")\n msg = "Input has different freq(=.+)? from Period.*?\\(freq=Y-DEC\\)"\n with pytest.raises(IncompatibleFrequency, match=msg):\n rng + other\n with pytest.raises(IncompatibleFrequency, match=msg):\n rng += other\n with pytest.raises(IncompatibleFrequency, match=msg):\n rng - other\n with pytest.raises(IncompatibleFrequency, match=msg):\n rng -= other\n\n def test_pi_add_iadd_timedeltalike_M(self):\n rng = period_range("2014-01", "2016-12", freq="M")\n expected = period_range("2014-06", "2017-05", freq="M")\n\n result = rng + pd.offsets.MonthEnd(5)\n tm.assert_index_equal(result, expected)\n\n rng += pd.offsets.MonthEnd(5)\n tm.assert_index_equal(rng, expected)\n\n def test_pi_add_sub_timedeltalike_freq_mismatch_monthly(self, mismatched_freq):\n other = mismatched_freq\n rng = period_range("2014-01", "2016-12", freq="M")\n msg = "Input has different freq(=.+)? from Period.*?\\(freq=M\\)"\n with pytest.raises(IncompatibleFrequency, match=msg):\n rng + other\n with pytest.raises(IncompatibleFrequency, match=msg):\n rng += other\n with pytest.raises(IncompatibleFrequency, match=msg):\n rng - other\n with pytest.raises(IncompatibleFrequency, match=msg):\n rng -= other\n\n @pytest.mark.parametrize("transpose", [True, False])\n def test_parr_add_sub_td64_nat(self, box_with_array, transpose):\n # GH#23320 special handling for timedelta64("NaT")\n pi = period_range("1994-04-01", periods=9, freq="19D")\n other = np.timedelta64("NaT")\n expected = PeriodIndex(["NaT"] * 9, freq="19D")\n\n obj = tm.box_expected(pi, box_with_array, transpose=transpose)\n expected = tm.box_expected(expected, box_with_array, transpose=transpose)\n\n result = obj + other\n tm.assert_equal(result, expected)\n result = other + obj\n tm.assert_equal(result, expected)\n result = obj - other\n tm.assert_equal(result, expected)\n msg = r"cannot subtract .* from .*"\n with pytest.raises(TypeError, match=msg):\n other - obj\n\n @pytest.mark.parametrize(\n "other",\n [\n np.array(["NaT"] * 9, dtype="m8[ns]"),\n TimedeltaArray._from_sequence(["NaT"] * 9, dtype="m8[ns]"),\n ],\n )\n def test_parr_add_sub_tdt64_nat_array(self, box_with_array, other):\n pi = period_range("1994-04-01", periods=9, freq="19D")\n expected = PeriodIndex(["NaT"] * 9, freq="19D")\n\n obj = tm.box_expected(pi, box_with_array)\n expected = tm.box_expected(expected, box_with_array)\n\n result = obj + other\n tm.assert_equal(result, expected)\n result = other + obj\n tm.assert_equal(result, expected)\n result = obj - other\n tm.assert_equal(result, expected)\n msg = r"cannot subtract .* from .*"\n with pytest.raises(TypeError, match=msg):\n other - obj\n\n # some but not *all* NaT\n other = other.copy()\n other[0] = np.timedelta64(0, "ns")\n expected = PeriodIndex([pi[0]] + ["NaT"] * 8, freq="19D")\n expected = tm.box_expected(expected, box_with_array)\n\n result = obj + other\n tm.assert_equal(result, expected)\n result = other + obj\n tm.assert_equal(result, expected)\n result = obj - other\n tm.assert_equal(result, expected)\n with pytest.raises(TypeError, match=msg):\n other - obj\n\n # ---------------------------------------------------------------\n # Unsorted\n\n def test_parr_add_sub_index(self):\n # Check that PeriodArray defers to Index on arithmetic ops\n pi = period_range("2000-12-31", periods=3)\n parr = pi.array\n\n result = parr - pi\n expected = pi - pi\n tm.assert_index_equal(result, expected)\n\n def test_parr_add_sub_object_array(self):\n pi = period_range("2000-12-31", periods=3, freq="D")\n parr = pi.array\n\n other = np.array([Timedelta(days=1), pd.offsets.Day(2), 3])\n\n with tm.assert_produces_warning(PerformanceWarning):\n result = parr + other\n\n expected = PeriodIndex(\n ["2001-01-01", "2001-01-03", "2001-01-05"], freq="D"\n )._data.astype(object)\n tm.assert_equal(result, expected)\n\n with tm.assert_produces_warning(PerformanceWarning):\n result = parr - other\n\n expected = PeriodIndex(["2000-12-30"] * 3, freq="D")._data.astype(object)\n tm.assert_equal(result, expected)\n\n def test_period_add_timestamp_raises(self, box_with_array):\n # GH#17983\n ts = Timestamp("2017")\n per = Period("2017", freq="M")\n\n arr = pd.Index([per], dtype="Period[M]")\n arr = tm.box_expected(arr, box_with_array)\n\n msg = "cannot add PeriodArray and Timestamp"\n with pytest.raises(TypeError, match=msg):\n arr + ts\n with pytest.raises(TypeError, match=msg):\n ts + arr\n msg = "cannot add PeriodArray and DatetimeArray"\n with pytest.raises(TypeError, match=msg):\n arr + Series([ts])\n with pytest.raises(TypeError, match=msg):\n Series([ts]) + arr\n with pytest.raises(TypeError, match=msg):\n arr + pd.Index([ts])\n with pytest.raises(TypeError, match=msg):\n pd.Index([ts]) + arr\n\n if box_with_array is pd.DataFrame:\n msg = "cannot add PeriodArray and DatetimeArray"\n else:\n msg = r"unsupported operand type\(s\) for \+: 'Period' and 'DatetimeArray"\n with pytest.raises(TypeError, match=msg):\n arr + pd.DataFrame([ts])\n if box_with_array is pd.DataFrame:\n msg = "cannot add PeriodArray and DatetimeArray"\n else:\n msg = r"unsupported operand type\(s\) for \+: 'DatetimeArray' and 'Period'"\n with pytest.raises(TypeError, match=msg):\n pd.DataFrame([ts]) + arr\n\n\nclass TestPeriodSeriesArithmetic:\n def test_parr_add_timedeltalike_scalar(self, three_days, box_with_array):\n # GH#13043\n ser = Series(\n [Period("2015-01-01", freq="D"), Period("2015-01-02", freq="D")],\n name="xxx",\n )\n assert ser.dtype == "Period[D]"\n\n expected = Series(\n [Period("2015-01-04", freq="D"), Period("2015-01-05", freq="D")],\n name="xxx",\n )\n\n obj = tm.box_expected(ser, box_with_array)\n if box_with_array is pd.DataFrame:\n assert (obj.dtypes == "Period[D]").all()\n\n expected = tm.box_expected(expected, box_with_array)\n\n result = obj + three_days\n tm.assert_equal(result, expected)\n\n result = three_days + obj\n tm.assert_equal(result, expected)\n\n def test_ops_series_period(self):\n # GH#13043\n ser = Series(\n [Period("2015-01-01", freq="D"), Period("2015-01-02", freq="D")],\n name="xxx",\n )\n assert ser.dtype == "Period[D]"\n\n per = Period("2015-01-10", freq="D")\n off = per.freq\n # dtype will be object because of original dtype\n expected = Series([9 * off, 8 * off], name="xxx", dtype=object)\n tm.assert_series_equal(per - ser, expected)\n tm.assert_series_equal(ser - per, -1 * expected)\n\n s2 = Series(\n [Period("2015-01-05", freq="D"), Period("2015-01-04", freq="D")],\n name="xxx",\n )\n assert s2.dtype == "Period[D]"\n\n expected = Series([4 * off, 2 * off], name="xxx", dtype=object)\n tm.assert_series_equal(s2 - ser, expected)\n tm.assert_series_equal(ser - s2, -1 * expected)\n\n\nclass TestPeriodIndexSeriesMethods:\n """Test PeriodIndex and Period Series Ops consistency"""\n\n def _check(self, values, func, expected):\n idx = PeriodIndex(values)\n result = func(idx)\n tm.assert_equal(result, expected)\n\n ser = Series(values)\n result = func(ser)\n\n exp = Series(expected, name=values.name)\n tm.assert_series_equal(result, exp)\n\n def test_pi_ops(self):\n idx = PeriodIndex(\n ["2011-01", "2011-02", "2011-03", "2011-04"], freq="M", name="idx"\n )\n\n expected = PeriodIndex(\n ["2011-03", "2011-04", "2011-05", "2011-06"], freq="M", name="idx"\n )\n\n self._check(idx, lambda x: x + 2, expected)\n self._check(idx, lambda x: 2 + x, expected)\n\n self._check(idx + 2, lambda x: x - 2, idx)\n\n result = idx - Period("2011-01", freq="M")\n off = idx.freq\n exp = pd.Index([0 * off, 1 * off, 2 * off, 3 * off], name="idx")\n tm.assert_index_equal(result, exp)\n\n result = Period("2011-01", freq="M") - idx\n exp = pd.Index([0 * off, -1 * off, -2 * off, -3 * off], name="idx")\n tm.assert_index_equal(result, exp)\n\n @pytest.mark.parametrize("ng", ["str", 1.5])\n @pytest.mark.parametrize(\n "func",\n [\n lambda obj, ng: obj + ng,\n lambda obj, ng: ng + obj,\n lambda obj, ng: obj - ng,\n lambda obj, ng: ng - obj,\n lambda obj, ng: np.add(obj, ng),\n lambda obj, ng: np.add(ng, obj),\n lambda obj, ng: np.subtract(obj, ng),\n lambda obj, ng: np.subtract(ng, obj),\n ],\n )\n def test_parr_ops_errors(self, ng, func, box_with_array):\n idx = PeriodIndex(\n ["2011-01", "2011-02", "2011-03", "2011-04"], freq="M", name="idx"\n )\n obj = tm.box_expected(idx, box_with_array)\n msg = "|".join(\n [\n r"unsupported operand type\(s\)",\n "can only concatenate",\n r"must be str",\n "object to str implicitly",\n ]\n )\n\n with pytest.raises(TypeError, match=msg):\n func(obj, ng)\n\n def test_pi_ops_nat(self):\n idx = PeriodIndex(\n ["2011-01", "2011-02", "NaT", "2011-04"], freq="M", name="idx"\n )\n expected = PeriodIndex(\n ["2011-03", "2011-04", "NaT", "2011-06"], freq="M", name="idx"\n )\n\n self._check(idx, lambda x: x + 2, expected)\n self._check(idx, lambda x: 2 + x, expected)\n self._check(idx, lambda x: np.add(x, 2), expected)\n\n self._check(idx + 2, lambda x: x - 2, idx)\n self._check(idx + 2, lambda x: np.subtract(x, 2), idx)\n\n # freq with mult\n idx = PeriodIndex(\n ["2011-01", "2011-02", "NaT", "2011-04"], freq="2M", name="idx"\n )\n expected = PeriodIndex(\n ["2011-07", "2011-08", "NaT", "2011-10"], freq="2M", name="idx"\n )\n\n self._check(idx, lambda x: x + 3, expected)\n self._check(idx, lambda x: 3 + x, expected)\n self._check(idx, lambda x: np.add(x, 3), expected)\n\n self._check(idx + 3, lambda x: x - 3, idx)\n self._check(idx + 3, lambda x: np.subtract(x, 3), idx)\n\n def test_pi_ops_array_int(self):\n idx = PeriodIndex(\n ["2011-01", "2011-02", "NaT", "2011-04"], freq="M", name="idx"\n )\n f = lambda x: x + np.array([1, 2, 3, 4])\n exp = PeriodIndex(\n ["2011-02", "2011-04", "NaT", "2011-08"], freq="M", name="idx"\n )\n self._check(idx, f, exp)\n\n f = lambda x: np.add(x, np.array([4, -1, 1, 2]))\n exp = PeriodIndex(\n ["2011-05", "2011-01", "NaT", "2011-06"], freq="M", name="idx"\n )\n self._check(idx, f, exp)\n\n f = lambda x: x - np.array([1, 2, 3, 4])\n exp = PeriodIndex(\n ["2010-12", "2010-12", "NaT", "2010-12"], freq="M", name="idx"\n )\n self._check(idx, f, exp)\n\n f = lambda x: np.subtract(x, np.array([3, 2, 3, -2]))\n exp = PeriodIndex(\n ["2010-10", "2010-12", "NaT", "2011-06"], freq="M", name="idx"\n )\n self._check(idx, f, exp)\n\n def test_pi_ops_offset(self):\n idx = PeriodIndex(\n ["2011-01-01", "2011-02-01", "2011-03-01", "2011-04-01"],\n freq="D",\n name="idx",\n )\n f = lambda x: x + pd.offsets.Day()\n exp = PeriodIndex(\n ["2011-01-02", "2011-02-02", "2011-03-02", "2011-04-02"],\n freq="D",\n name="idx",\n )\n self._check(idx, f, exp)\n\n f = lambda x: x + pd.offsets.Day(2)\n exp = PeriodIndex(\n ["2011-01-03", "2011-02-03", "2011-03-03", "2011-04-03"],\n freq="D",\n name="idx",\n )\n self._check(idx, f, exp)\n\n f = lambda x: x - pd.offsets.Day(2)\n exp = PeriodIndex(\n ["2010-12-30", "2011-01-30", "2011-02-27", "2011-03-30"],\n freq="D",\n name="idx",\n )\n self._check(idx, f, exp)\n\n def test_pi_offset_errors(self):\n idx = PeriodIndex(\n ["2011-01-01", "2011-02-01", "2011-03-01", "2011-04-01"],\n freq="D",\n name="idx",\n )\n ser = Series(idx)\n\n msg = (\n "Cannot add/subtract timedelta-like from PeriodArray that is not "\n "an integer multiple of the PeriodArray's freq"\n )\n for obj in [idx, ser]:\n with pytest.raises(IncompatibleFrequency, match=msg):\n obj + pd.offsets.Hour(2)\n\n with pytest.raises(IncompatibleFrequency, match=msg):\n pd.offsets.Hour(2) + obj\n\n with pytest.raises(IncompatibleFrequency, match=msg):\n obj - pd.offsets.Hour(2)\n\n def test_pi_sub_period(self):\n # GH#13071\n idx = PeriodIndex(\n ["2011-01", "2011-02", "2011-03", "2011-04"], freq="M", name="idx"\n )\n\n result = idx - Period("2012-01", freq="M")\n off = idx.freq\n exp = pd.Index([-12 * off, -11 * off, -10 * off, -9 * off], name="idx")\n tm.assert_index_equal(result, exp)\n\n result = np.subtract(idx, Period("2012-01", freq="M"))\n tm.assert_index_equal(result, exp)\n\n result = Period("2012-01", freq="M") - idx\n exp = pd.Index([12 * off, 11 * off, 10 * off, 9 * off], name="idx")\n tm.assert_index_equal(result, exp)\n\n result = np.subtract(Period("2012-01", freq="M"), idx)\n tm.assert_index_equal(result, exp)\n\n exp = TimedeltaIndex([np.nan, np.nan, np.nan, np.nan], name="idx")\n result = idx - Period("NaT", freq="M")\n tm.assert_index_equal(result, exp)\n assert result.freq == exp.freq\n\n result = Period("NaT", freq="M") - idx\n tm.assert_index_equal(result, exp)\n assert result.freq == exp.freq\n\n def test_pi_sub_pdnat(self):\n # GH#13071, GH#19389\n idx = PeriodIndex(\n ["2011-01", "2011-02", "NaT", "2011-04"], freq="M", name="idx"\n )\n exp = TimedeltaIndex([pd.NaT] * 4, name="idx")\n tm.assert_index_equal(pd.NaT - idx, exp)\n tm.assert_index_equal(idx - pd.NaT, exp)\n\n def test_pi_sub_period_nat(self):\n # GH#13071\n idx = PeriodIndex(\n ["2011-01", "NaT", "2011-03", "2011-04"], freq="M", name="idx"\n )\n\n result = idx - Period("2012-01", freq="M")\n off = idx.freq\n exp = pd.Index([-12 * off, pd.NaT, -10 * off, -9 * off], name="idx")\n tm.assert_index_equal(result, exp)\n\n result = Period("2012-01", freq="M") - idx\n exp = pd.Index([12 * off, pd.NaT, 10 * off, 9 * off], name="idx")\n tm.assert_index_equal(result, exp)\n\n exp = TimedeltaIndex([np.nan, np.nan, np.nan, np.nan], name="idx")\n tm.assert_index_equal(idx - Period("NaT", freq="M"), exp)\n tm.assert_index_equal(Period("NaT", freq="M") - idx, exp)\n | .venv\Lib\site-packages\pandas\tests\arithmetic\test_period.py | test_period.py | Python | 59,617 | 0.75 | 0.066866 | 0.084919 | python-kit | 373 | 2024-07-27T23:17:00.877126 | BSD-3-Clause | true | 78b5d34247a79cfe6a8b33e119c709eb |
# Arithmetic tests for DataFrame/Series/Index/Array classes that should\n# behave identically.\nfrom datetime import (\n datetime,\n timedelta,\n)\n\nimport numpy as np\nimport pytest\n\nfrom pandas.errors import (\n OutOfBoundsDatetime,\n PerformanceWarning,\n)\n\nimport pandas as pd\nfrom pandas import (\n DataFrame,\n DatetimeIndex,\n Index,\n NaT,\n Series,\n Timedelta,\n TimedeltaIndex,\n Timestamp,\n offsets,\n timedelta_range,\n)\nimport pandas._testing as tm\nfrom pandas.core.arrays import NumpyExtensionArray\nfrom pandas.tests.arithmetic.common import (\n assert_invalid_addsub_type,\n assert_invalid_comparison,\n get_upcast_box,\n)\n\n\ndef assert_dtype(obj, expected_dtype):\n """\n Helper to check the dtype for a Series, Index, or single-column DataFrame.\n """\n dtype = tm.get_dtype(obj)\n\n assert dtype == expected_dtype\n\n\ndef get_expected_name(box, names):\n if box is DataFrame:\n # Since we are operating with a DataFrame and a non-DataFrame,\n # the non-DataFrame is cast to Series and its name ignored.\n exname = names[0]\n elif box in [tm.to_array, pd.array]:\n exname = names[1]\n else:\n exname = names[2]\n return exname\n\n\n# ------------------------------------------------------------------\n# Timedelta64[ns] dtype Comparisons\n\n\nclass TestTimedelta64ArrayLikeComparisons:\n # Comparison tests for timedelta64[ns] vectors fully parametrized over\n # DataFrame/Series/TimedeltaIndex/TimedeltaArray. Ideally all comparison\n # tests will eventually end up here.\n\n def test_compare_timedelta64_zerodim(self, box_with_array):\n # GH#26689 should unbox when comparing with zerodim array\n box = box_with_array\n xbox = box_with_array if box_with_array not in [Index, pd.array] else np.ndarray\n\n tdi = timedelta_range("2h", periods=4)\n other = np.array(tdi.to_numpy()[0])\n\n tdi = tm.box_expected(tdi, box)\n res = tdi <= other\n expected = np.array([True, False, False, False])\n expected = tm.box_expected(expected, xbox)\n tm.assert_equal(res, expected)\n\n @pytest.mark.parametrize(\n "td_scalar",\n [\n timedelta(days=1),\n Timedelta(days=1),\n Timedelta(days=1).to_timedelta64(),\n offsets.Hour(24),\n ],\n )\n def test_compare_timedeltalike_scalar(self, box_with_array, td_scalar):\n # regression test for GH#5963\n box = box_with_array\n xbox = box if box not in [Index, pd.array] else np.ndarray\n\n ser = Series([timedelta(days=1), timedelta(days=2)])\n ser = tm.box_expected(ser, box)\n actual = ser > td_scalar\n expected = Series([False, True])\n expected = tm.box_expected(expected, xbox)\n tm.assert_equal(actual, expected)\n\n @pytest.mark.parametrize(\n "invalid",\n [\n 345600000000000,\n "a",\n Timestamp("2021-01-01"),\n Timestamp("2021-01-01").now("UTC"),\n Timestamp("2021-01-01").now().to_datetime64(),\n Timestamp("2021-01-01").now().to_pydatetime(),\n Timestamp("2021-01-01").date(),\n np.array(4), # zero-dim mismatched dtype\n ],\n )\n def test_td64_comparisons_invalid(self, box_with_array, invalid):\n # GH#13624 for str\n box = box_with_array\n\n rng = timedelta_range("1 days", periods=10)\n obj = tm.box_expected(rng, box)\n\n assert_invalid_comparison(obj, invalid, box)\n\n @pytest.mark.parametrize(\n "other",\n [\n list(range(10)),\n np.arange(10),\n np.arange(10).astype(np.float32),\n np.arange(10).astype(object),\n pd.date_range("1970-01-01", periods=10, tz="UTC").array,\n np.array(pd.date_range("1970-01-01", periods=10)),\n list(pd.date_range("1970-01-01", periods=10)),\n pd.date_range("1970-01-01", periods=10).astype(object),\n pd.period_range("1971-01-01", freq="D", periods=10).array,\n pd.period_range("1971-01-01", freq="D", periods=10).astype(object),\n ],\n )\n def test_td64arr_cmp_arraylike_invalid(self, other, box_with_array):\n # We don't parametrize this over box_with_array because listlike\n # other plays poorly with assert_invalid_comparison reversed checks\n\n rng = timedelta_range("1 days", periods=10)._data\n rng = tm.box_expected(rng, box_with_array)\n assert_invalid_comparison(rng, other, box_with_array)\n\n def test_td64arr_cmp_mixed_invalid(self):\n rng = timedelta_range("1 days", periods=5)._data\n other = np.array([0, 1, 2, rng[3], Timestamp("2021-01-01")])\n\n result = rng == other\n expected = np.array([False, False, False, True, False])\n tm.assert_numpy_array_equal(result, expected)\n\n result = rng != other\n tm.assert_numpy_array_equal(result, ~expected)\n\n msg = "Invalid comparison between|Cannot compare type|not supported between"\n with pytest.raises(TypeError, match=msg):\n rng < other\n with pytest.raises(TypeError, match=msg):\n rng > other\n with pytest.raises(TypeError, match=msg):\n rng <= other\n with pytest.raises(TypeError, match=msg):\n rng >= other\n\n\nclass TestTimedelta64ArrayComparisons:\n # TODO: All of these need to be parametrized over box\n\n @pytest.mark.parametrize("dtype", [None, object])\n def test_comp_nat(self, dtype):\n left = TimedeltaIndex([Timedelta("1 days"), NaT, Timedelta("3 days")])\n right = TimedeltaIndex([NaT, NaT, Timedelta("3 days")])\n\n lhs, rhs = left, right\n if dtype is object:\n lhs, rhs = left.astype(object), right.astype(object)\n\n result = rhs == lhs\n expected = np.array([False, False, True])\n tm.assert_numpy_array_equal(result, expected)\n\n result = rhs != lhs\n expected = np.array([True, True, False])\n tm.assert_numpy_array_equal(result, expected)\n\n expected = np.array([False, False, False])\n tm.assert_numpy_array_equal(lhs == NaT, expected)\n tm.assert_numpy_array_equal(NaT == rhs, expected)\n\n expected = np.array([True, True, True])\n tm.assert_numpy_array_equal(lhs != NaT, expected)\n tm.assert_numpy_array_equal(NaT != lhs, expected)\n\n expected = np.array([False, False, False])\n tm.assert_numpy_array_equal(lhs < NaT, expected)\n tm.assert_numpy_array_equal(NaT > lhs, expected)\n\n @pytest.mark.parametrize(\n "idx2",\n [\n TimedeltaIndex(\n ["2 day", "2 day", NaT, NaT, "1 day 00:00:02", "5 days 00:00:03"]\n ),\n np.array(\n [\n np.timedelta64(2, "D"),\n np.timedelta64(2, "D"),\n np.timedelta64("nat"),\n np.timedelta64("nat"),\n np.timedelta64(1, "D") + np.timedelta64(2, "s"),\n np.timedelta64(5, "D") + np.timedelta64(3, "s"),\n ]\n ),\n ],\n )\n def test_comparisons_nat(self, idx2):\n idx1 = TimedeltaIndex(\n [\n "1 day",\n NaT,\n "1 day 00:00:01",\n NaT,\n "1 day 00:00:01",\n "5 day 00:00:03",\n ]\n )\n # Check pd.NaT is handles as the same as np.nan\n result = idx1 < idx2\n expected = np.array([True, False, False, False, True, False])\n tm.assert_numpy_array_equal(result, expected)\n\n result = idx2 > idx1\n expected = np.array([True, False, False, False, True, False])\n tm.assert_numpy_array_equal(result, expected)\n\n result = idx1 <= idx2\n expected = np.array([True, False, False, False, True, True])\n tm.assert_numpy_array_equal(result, expected)\n\n result = idx2 >= idx1\n expected = np.array([True, False, False, False, True, True])\n tm.assert_numpy_array_equal(result, expected)\n\n result = idx1 == idx2\n expected = np.array([False, False, False, False, False, True])\n tm.assert_numpy_array_equal(result, expected)\n\n result = idx1 != idx2\n expected = np.array([True, True, True, True, True, False])\n tm.assert_numpy_array_equal(result, expected)\n\n # TODO: better name\n def test_comparisons_coverage(self):\n rng = timedelta_range("1 days", periods=10)\n\n result = rng < rng[3]\n expected = np.array([True, True, True] + [False] * 7)\n tm.assert_numpy_array_equal(result, expected)\n\n result = rng == list(rng)\n exp = rng == rng\n tm.assert_numpy_array_equal(result, exp)\n\n\n# ------------------------------------------------------------------\n# Timedelta64[ns] dtype Arithmetic Operations\n\n\nclass TestTimedelta64ArithmeticUnsorted:\n # Tests moved from type-specific test files but not\n # yet sorted/parametrized/de-duplicated\n\n def test_ufunc_coercions(self):\n # normal ops are also tested in tseries/test_timedeltas.py\n idx = TimedeltaIndex(["2h", "4h", "6h", "8h", "10h"], freq="2h", name="x")\n\n for result in [idx * 2, np.multiply(idx, 2)]:\n assert isinstance(result, TimedeltaIndex)\n exp = TimedeltaIndex(["4h", "8h", "12h", "16h", "20h"], freq="4h", name="x")\n tm.assert_index_equal(result, exp)\n assert result.freq == "4h"\n\n for result in [idx / 2, np.divide(idx, 2)]:\n assert isinstance(result, TimedeltaIndex)\n exp = TimedeltaIndex(["1h", "2h", "3h", "4h", "5h"], freq="h", name="x")\n tm.assert_index_equal(result, exp)\n assert result.freq == "h"\n\n for result in [-idx, np.negative(idx)]:\n assert isinstance(result, TimedeltaIndex)\n exp = TimedeltaIndex(\n ["-2h", "-4h", "-6h", "-8h", "-10h"], freq="-2h", name="x"\n )\n tm.assert_index_equal(result, exp)\n assert result.freq == "-2h"\n\n idx = TimedeltaIndex(["-2h", "-1h", "0h", "1h", "2h"], freq="h", name="x")\n for result in [abs(idx), np.absolute(idx)]:\n assert isinstance(result, TimedeltaIndex)\n exp = TimedeltaIndex(["2h", "1h", "0h", "1h", "2h"], freq=None, name="x")\n tm.assert_index_equal(result, exp)\n assert result.freq is None\n\n def test_subtraction_ops(self):\n # with datetimes/timedelta and tdi/dti\n tdi = TimedeltaIndex(["1 days", NaT, "2 days"], name="foo")\n dti = pd.date_range("20130101", periods=3, name="bar")\n td = Timedelta("1 days")\n dt = Timestamp("20130101")\n\n msg = "cannot subtract a datelike from a TimedeltaArray"\n with pytest.raises(TypeError, match=msg):\n tdi - dt\n with pytest.raises(TypeError, match=msg):\n tdi - dti\n\n msg = r"unsupported operand type\(s\) for -"\n with pytest.raises(TypeError, match=msg):\n td - dt\n\n msg = "(bad|unsupported) operand type for unary"\n with pytest.raises(TypeError, match=msg):\n td - dti\n\n result = dt - dti\n expected = TimedeltaIndex(["0 days", "-1 days", "-2 days"], name="bar")\n tm.assert_index_equal(result, expected)\n\n result = dti - dt\n expected = TimedeltaIndex(["0 days", "1 days", "2 days"], name="bar")\n tm.assert_index_equal(result, expected)\n\n result = tdi - td\n expected = TimedeltaIndex(["0 days", NaT, "1 days"], name="foo")\n tm.assert_index_equal(result, expected)\n\n result = td - tdi\n expected = TimedeltaIndex(["0 days", NaT, "-1 days"], name="foo")\n tm.assert_index_equal(result, expected)\n\n result = dti - td\n expected = DatetimeIndex(\n ["20121231", "20130101", "20130102"], dtype="M8[ns]", freq="D", name="bar"\n )\n tm.assert_index_equal(result, expected)\n\n result = dt - tdi\n expected = DatetimeIndex(\n ["20121231", NaT, "20121230"], dtype="M8[ns]", name="foo"\n )\n tm.assert_index_equal(result, expected)\n\n def test_subtraction_ops_with_tz(self, box_with_array):\n # check that dt/dti subtraction ops with tz are validated\n dti = pd.date_range("20130101", periods=3)\n dti = tm.box_expected(dti, box_with_array)\n ts = Timestamp("20130101")\n dt = ts.to_pydatetime()\n dti_tz = pd.date_range("20130101", periods=3).tz_localize("US/Eastern")\n dti_tz = tm.box_expected(dti_tz, box_with_array)\n ts_tz = Timestamp("20130101").tz_localize("US/Eastern")\n ts_tz2 = Timestamp("20130101").tz_localize("CET")\n dt_tz = ts_tz.to_pydatetime()\n td = Timedelta("1 days")\n\n def _check(result, expected):\n assert result == expected\n assert isinstance(result, Timedelta)\n\n # scalars\n result = ts - ts\n expected = Timedelta("0 days")\n _check(result, expected)\n\n result = dt_tz - ts_tz\n expected = Timedelta("0 days")\n _check(result, expected)\n\n result = ts_tz - dt_tz\n expected = Timedelta("0 days")\n _check(result, expected)\n\n # tz mismatches\n msg = "Cannot subtract tz-naive and tz-aware datetime-like objects."\n with pytest.raises(TypeError, match=msg):\n dt_tz - ts\n msg = "can't subtract offset-naive and offset-aware datetimes"\n with pytest.raises(TypeError, match=msg):\n dt_tz - dt\n msg = "can't subtract offset-naive and offset-aware datetimes"\n with pytest.raises(TypeError, match=msg):\n dt - dt_tz\n msg = "Cannot subtract tz-naive and tz-aware datetime-like objects."\n with pytest.raises(TypeError, match=msg):\n ts - dt_tz\n with pytest.raises(TypeError, match=msg):\n ts_tz2 - ts\n with pytest.raises(TypeError, match=msg):\n ts_tz2 - dt\n\n msg = "Cannot subtract tz-naive and tz-aware"\n # with dti\n with pytest.raises(TypeError, match=msg):\n dti - ts_tz\n with pytest.raises(TypeError, match=msg):\n dti_tz - ts\n\n result = dti_tz - dt_tz\n expected = TimedeltaIndex(["0 days", "1 days", "2 days"])\n expected = tm.box_expected(expected, box_with_array)\n tm.assert_equal(result, expected)\n\n result = dt_tz - dti_tz\n expected = TimedeltaIndex(["0 days", "-1 days", "-2 days"])\n expected = tm.box_expected(expected, box_with_array)\n tm.assert_equal(result, expected)\n\n result = dti_tz - ts_tz\n expected = TimedeltaIndex(["0 days", "1 days", "2 days"])\n expected = tm.box_expected(expected, box_with_array)\n tm.assert_equal(result, expected)\n\n result = ts_tz - dti_tz\n expected = TimedeltaIndex(["0 days", "-1 days", "-2 days"])\n expected = tm.box_expected(expected, box_with_array)\n tm.assert_equal(result, expected)\n\n result = td - td\n expected = Timedelta("0 days")\n _check(result, expected)\n\n result = dti_tz - td\n expected = DatetimeIndex(\n ["20121231", "20130101", "20130102"], tz="US/Eastern"\n ).as_unit("ns")\n expected = tm.box_expected(expected, box_with_array)\n tm.assert_equal(result, expected)\n\n def test_dti_tdi_numeric_ops(self):\n # These are normally union/diff set-like ops\n tdi = TimedeltaIndex(["1 days", NaT, "2 days"], name="foo")\n dti = pd.date_range("20130101", periods=3, name="bar")\n\n result = tdi - tdi\n expected = TimedeltaIndex(["0 days", NaT, "0 days"], name="foo")\n tm.assert_index_equal(result, expected)\n\n result = tdi + tdi\n expected = TimedeltaIndex(["2 days", NaT, "4 days"], name="foo")\n tm.assert_index_equal(result, expected)\n\n result = dti - tdi # name will be reset\n expected = DatetimeIndex(["20121231", NaT, "20130101"], dtype="M8[ns]")\n tm.assert_index_equal(result, expected)\n\n def test_addition_ops(self):\n # with datetimes/timedelta and tdi/dti\n tdi = TimedeltaIndex(["1 days", NaT, "2 days"], name="foo")\n dti = pd.date_range("20130101", periods=3, name="bar")\n td = Timedelta("1 days")\n dt = Timestamp("20130101")\n\n result = tdi + dt\n expected = DatetimeIndex(\n ["20130102", NaT, "20130103"], dtype="M8[ns]", name="foo"\n )\n tm.assert_index_equal(result, expected)\n\n result = dt + tdi\n expected = DatetimeIndex(\n ["20130102", NaT, "20130103"], dtype="M8[ns]", name="foo"\n )\n tm.assert_index_equal(result, expected)\n\n result = td + tdi\n expected = TimedeltaIndex(["2 days", NaT, "3 days"], name="foo")\n tm.assert_index_equal(result, expected)\n\n result = tdi + td\n expected = TimedeltaIndex(["2 days", NaT, "3 days"], name="foo")\n tm.assert_index_equal(result, expected)\n\n # unequal length\n msg = "cannot add indices of unequal length"\n with pytest.raises(ValueError, match=msg):\n tdi + dti[0:1]\n with pytest.raises(ValueError, match=msg):\n tdi[0:1] + dti\n\n # random indexes\n msg = "Addition/subtraction of integers and integer-arrays"\n with pytest.raises(TypeError, match=msg):\n tdi + Index([1, 2, 3], dtype=np.int64)\n\n # this is a union!\n # FIXME: don't leave commented-out\n # pytest.raises(TypeError, lambda : Index([1,2,3]) + tdi)\n\n result = tdi + dti # name will be reset\n expected = DatetimeIndex(["20130102", NaT, "20130105"], dtype="M8[ns]")\n tm.assert_index_equal(result, expected)\n\n result = dti + tdi # name will be reset\n expected = DatetimeIndex(["20130102", NaT, "20130105"], dtype="M8[ns]")\n tm.assert_index_equal(result, expected)\n\n result = dt + td\n expected = Timestamp("20130102")\n assert result == expected\n\n result = td + dt\n expected = Timestamp("20130102")\n assert result == expected\n\n # TODO: Needs more informative name, probably split up into\n # more targeted tests\n @pytest.mark.parametrize("freq", ["D", "B"])\n def test_timedelta(self, freq):\n index = pd.date_range("1/1/2000", periods=50, freq=freq)\n\n shifted = index + timedelta(1)\n back = shifted + timedelta(-1)\n back = back._with_freq("infer")\n tm.assert_index_equal(index, back)\n\n if freq == "D":\n expected = pd.tseries.offsets.Day(1)\n assert index.freq == expected\n assert shifted.freq == expected\n assert back.freq == expected\n else: # freq == 'B'\n assert index.freq == pd.tseries.offsets.BusinessDay(1)\n assert shifted.freq is None\n assert back.freq == pd.tseries.offsets.BusinessDay(1)\n\n result = index - timedelta(1)\n expected = index + timedelta(-1)\n tm.assert_index_equal(result, expected)\n\n def test_timedelta_tick_arithmetic(self):\n # GH#4134, buggy with timedeltas\n rng = pd.date_range("2013", "2014")\n s = Series(rng)\n result1 = rng - offsets.Hour(1)\n result2 = DatetimeIndex(s - np.timedelta64(100000000))\n result3 = rng - np.timedelta64(100000000)\n result4 = DatetimeIndex(s - offsets.Hour(1))\n\n assert result1.freq == rng.freq\n result1 = result1._with_freq(None)\n tm.assert_index_equal(result1, result4)\n\n assert result3.freq == rng.freq\n result3 = result3._with_freq(None)\n tm.assert_index_equal(result2, result3)\n\n def test_tda_add_sub_index(self):\n # Check that TimedeltaArray defers to Index on arithmetic ops\n tdi = TimedeltaIndex(["1 days", NaT, "2 days"])\n tda = tdi.array\n\n dti = pd.date_range("1999-12-31", periods=3, freq="D")\n\n result = tda + dti\n expected = tdi + dti\n tm.assert_index_equal(result, expected)\n\n result = tda + tdi\n expected = tdi + tdi\n tm.assert_index_equal(result, expected)\n\n result = tda - tdi\n expected = tdi - tdi\n tm.assert_index_equal(result, expected)\n\n def test_tda_add_dt64_object_array(self, box_with_array, tz_naive_fixture):\n # Result should be cast back to DatetimeArray\n box = box_with_array\n\n dti = pd.date_range("2016-01-01", periods=3, tz=tz_naive_fixture)\n dti = dti._with_freq(None)\n tdi = dti - dti\n\n obj = tm.box_expected(tdi, box)\n other = tm.box_expected(dti, box)\n\n with tm.assert_produces_warning(PerformanceWarning):\n result = obj + other.astype(object)\n tm.assert_equal(result, other.astype(object))\n\n # -------------------------------------------------------------\n # Binary operations TimedeltaIndex and timedelta-like\n\n def test_tdi_iadd_timedeltalike(self, two_hours, box_with_array):\n # only test adding/sub offsets as + is now numeric\n rng = timedelta_range("1 days", "10 days")\n expected = timedelta_range("1 days 02:00:00", "10 days 02:00:00", freq="D")\n\n rng = tm.box_expected(rng, box_with_array)\n expected = tm.box_expected(expected, box_with_array)\n\n orig_rng = rng\n rng += two_hours\n tm.assert_equal(rng, expected)\n if box_with_array is not Index:\n # Check that operation is actually inplace\n tm.assert_equal(orig_rng, expected)\n\n def test_tdi_isub_timedeltalike(self, two_hours, box_with_array):\n # only test adding/sub offsets as - is now numeric\n rng = timedelta_range("1 days", "10 days")\n expected = timedelta_range("0 days 22:00:00", "9 days 22:00:00")\n\n rng = tm.box_expected(rng, box_with_array)\n expected = tm.box_expected(expected, box_with_array)\n\n orig_rng = rng\n rng -= two_hours\n tm.assert_equal(rng, expected)\n if box_with_array is not Index:\n # Check that operation is actually inplace\n tm.assert_equal(orig_rng, expected)\n\n # -------------------------------------------------------------\n\n def test_tdi_ops_attributes(self):\n rng = timedelta_range("2 days", periods=5, freq="2D", name="x")\n\n result = rng + 1 * rng.freq\n exp = timedelta_range("4 days", periods=5, freq="2D", name="x")\n tm.assert_index_equal(result, exp)\n assert result.freq == "2D"\n\n result = rng - 2 * rng.freq\n exp = timedelta_range("-2 days", periods=5, freq="2D", name="x")\n tm.assert_index_equal(result, exp)\n assert result.freq == "2D"\n\n result = rng * 2\n exp = timedelta_range("4 days", periods=5, freq="4D", name="x")\n tm.assert_index_equal(result, exp)\n assert result.freq == "4D"\n\n result = rng / 2\n exp = timedelta_range("1 days", periods=5, freq="D", name="x")\n tm.assert_index_equal(result, exp)\n assert result.freq == "D"\n\n result = -rng\n exp = timedelta_range("-2 days", periods=5, freq="-2D", name="x")\n tm.assert_index_equal(result, exp)\n assert result.freq == "-2D"\n\n rng = timedelta_range("-2 days", periods=5, freq="D", name="x")\n\n result = abs(rng)\n exp = TimedeltaIndex(\n ["2 days", "1 days", "0 days", "1 days", "2 days"], name="x"\n )\n tm.assert_index_equal(result, exp)\n assert result.freq is None\n\n\nclass TestAddSubNaTMasking:\n # TODO: parametrize over boxes\n\n @pytest.mark.parametrize("str_ts", ["1950-01-01", "1980-01-01"])\n def test_tdarr_add_timestamp_nat_masking(self, box_with_array, str_ts):\n # GH#17991 checking for overflow-masking with NaT\n tdinat = pd.to_timedelta(["24658 days 11:15:00", "NaT"])\n tdobj = tm.box_expected(tdinat, box_with_array)\n\n ts = Timestamp(str_ts)\n ts_variants = [\n ts,\n ts.to_pydatetime(),\n ts.to_datetime64().astype("datetime64[ns]"),\n ts.to_datetime64().astype("datetime64[D]"),\n ]\n\n for variant in ts_variants:\n res = tdobj + variant\n if box_with_array is DataFrame:\n assert res.iloc[1, 1] is NaT\n else:\n assert res[1] is NaT\n\n def test_tdi_add_overflow(self):\n # See GH#14068\n # preliminary test scalar analogue of vectorized tests below\n # TODO: Make raised error message more informative and test\n with pytest.raises(OutOfBoundsDatetime, match="10155196800000000000"):\n pd.to_timedelta(106580, "D") + Timestamp("2000")\n with pytest.raises(OutOfBoundsDatetime, match="10155196800000000000"):\n Timestamp("2000") + pd.to_timedelta(106580, "D")\n\n _NaT = NaT._value + 1\n msg = "Overflow in int64 addition"\n with pytest.raises(OverflowError, match=msg):\n pd.to_timedelta([106580], "D") + Timestamp("2000")\n with pytest.raises(OverflowError, match=msg):\n Timestamp("2000") + pd.to_timedelta([106580], "D")\n with pytest.raises(OverflowError, match=msg):\n pd.to_timedelta([_NaT]) - Timedelta("1 days")\n with pytest.raises(OverflowError, match=msg):\n pd.to_timedelta(["5 days", _NaT]) - Timedelta("1 days")\n with pytest.raises(OverflowError, match=msg):\n (\n pd.to_timedelta([_NaT, "5 days", "1 hours"])\n - pd.to_timedelta(["7 seconds", _NaT, "4 hours"])\n )\n\n # These should not overflow!\n exp = TimedeltaIndex([NaT])\n result = pd.to_timedelta([NaT]) - Timedelta("1 days")\n tm.assert_index_equal(result, exp)\n\n exp = TimedeltaIndex(["4 days", NaT])\n result = pd.to_timedelta(["5 days", NaT]) - Timedelta("1 days")\n tm.assert_index_equal(result, exp)\n\n exp = TimedeltaIndex([NaT, NaT, "5 hours"])\n result = pd.to_timedelta([NaT, "5 days", "1 hours"]) + pd.to_timedelta(\n ["7 seconds", NaT, "4 hours"]\n )\n tm.assert_index_equal(result, exp)\n\n\nclass TestTimedeltaArraylikeAddSubOps:\n # Tests for timedelta64[ns] __add__, __sub__, __radd__, __rsub__\n\n def test_sub_nat_retain_unit(self):\n ser = pd.to_timedelta(Series(["00:00:01"])).astype("m8[s]")\n\n result = ser - NaT\n expected = Series([NaT], dtype="m8[s]")\n tm.assert_series_equal(result, expected)\n\n # TODO: moved from tests.indexes.timedeltas.test_arithmetic; needs\n # parametrization+de-duplication\n def test_timedelta_ops_with_missing_values(self):\n # setup\n s1 = pd.to_timedelta(Series(["00:00:01"]))\n s2 = pd.to_timedelta(Series(["00:00:02"]))\n\n sn = pd.to_timedelta(Series([NaT], dtype="m8[ns]"))\n\n df1 = DataFrame(["00:00:01"]).apply(pd.to_timedelta)\n df2 = DataFrame(["00:00:02"]).apply(pd.to_timedelta)\n\n dfn = DataFrame([NaT._value]).apply(pd.to_timedelta)\n\n scalar1 = pd.to_timedelta("00:00:01")\n scalar2 = pd.to_timedelta("00:00:02")\n timedelta_NaT = pd.to_timedelta("NaT")\n\n actual = scalar1 + scalar1\n assert actual == scalar2\n actual = scalar2 - scalar1\n assert actual == scalar1\n\n actual = s1 + s1\n tm.assert_series_equal(actual, s2)\n actual = s2 - s1\n tm.assert_series_equal(actual, s1)\n\n actual = s1 + scalar1\n tm.assert_series_equal(actual, s2)\n actual = scalar1 + s1\n tm.assert_series_equal(actual, s2)\n actual = s2 - scalar1\n tm.assert_series_equal(actual, s1)\n actual = -scalar1 + s2\n tm.assert_series_equal(actual, s1)\n\n actual = s1 + timedelta_NaT\n tm.assert_series_equal(actual, sn)\n actual = timedelta_NaT + s1\n tm.assert_series_equal(actual, sn)\n actual = s1 - timedelta_NaT\n tm.assert_series_equal(actual, sn)\n actual = -timedelta_NaT + s1\n tm.assert_series_equal(actual, sn)\n\n msg = "unsupported operand type"\n with pytest.raises(TypeError, match=msg):\n s1 + np.nan\n with pytest.raises(TypeError, match=msg):\n np.nan + s1\n with pytest.raises(TypeError, match=msg):\n s1 - np.nan\n with pytest.raises(TypeError, match=msg):\n -np.nan + s1\n\n actual = s1 + NaT\n tm.assert_series_equal(actual, sn)\n actual = s2 - NaT\n tm.assert_series_equal(actual, sn)\n\n actual = s1 + df1\n tm.assert_frame_equal(actual, df2)\n actual = s2 - df1\n tm.assert_frame_equal(actual, df1)\n actual = df1 + s1\n tm.assert_frame_equal(actual, df2)\n actual = df2 - s1\n tm.assert_frame_equal(actual, df1)\n\n actual = df1 + df1\n tm.assert_frame_equal(actual, df2)\n actual = df2 - df1\n tm.assert_frame_equal(actual, df1)\n\n actual = df1 + scalar1\n tm.assert_frame_equal(actual, df2)\n actual = df2 - scalar1\n tm.assert_frame_equal(actual, df1)\n\n actual = df1 + timedelta_NaT\n tm.assert_frame_equal(actual, dfn)\n actual = df1 - timedelta_NaT\n tm.assert_frame_equal(actual, dfn)\n\n msg = "cannot subtract a datelike from|unsupported operand type"\n with pytest.raises(TypeError, match=msg):\n df1 + np.nan\n with pytest.raises(TypeError, match=msg):\n df1 - np.nan\n\n actual = df1 + NaT # NaT is datetime, not timedelta\n tm.assert_frame_equal(actual, dfn)\n actual = df1 - NaT\n tm.assert_frame_equal(actual, dfn)\n\n # TODO: moved from tests.series.test_operators, needs splitting, cleanup,\n # de-duplication, box-parametrization...\n def test_operators_timedelta64(self):\n # series ops\n v1 = pd.date_range("2012-1-1", periods=3, freq="D")\n v2 = pd.date_range("2012-1-2", periods=3, freq="D")\n rs = Series(v2) - Series(v1)\n xp = Series(1e9 * 3600 * 24, rs.index).astype("int64").astype("timedelta64[ns]")\n tm.assert_series_equal(rs, xp)\n assert rs.dtype == "timedelta64[ns]"\n\n df = DataFrame({"A": v1})\n td = Series([timedelta(days=i) for i in range(3)])\n assert td.dtype == "timedelta64[ns]"\n\n # series on the rhs\n result = df["A"] - df["A"].shift()\n assert result.dtype == "timedelta64[ns]"\n\n result = df["A"] + td\n assert result.dtype == "M8[ns]"\n\n # scalar Timestamp on rhs\n maxa = df["A"].max()\n assert isinstance(maxa, Timestamp)\n\n resultb = df["A"] - df["A"].max()\n assert resultb.dtype == "timedelta64[ns]"\n\n # timestamp on lhs\n result = resultb + df["A"]\n values = [Timestamp("20111230"), Timestamp("20120101"), Timestamp("20120103")]\n expected = Series(values, dtype="M8[ns]", name="A")\n tm.assert_series_equal(result, expected)\n\n # datetimes on rhs\n result = df["A"] - datetime(2001, 1, 1)\n expected = Series([timedelta(days=4017 + i) for i in range(3)], name="A")\n tm.assert_series_equal(result, expected)\n assert result.dtype == "m8[ns]"\n\n d = datetime(2001, 1, 1, 3, 4)\n resulta = df["A"] - d\n assert resulta.dtype == "m8[ns]"\n\n # roundtrip\n resultb = resulta + d\n tm.assert_series_equal(df["A"], resultb)\n\n # timedeltas on rhs\n td = timedelta(days=1)\n resulta = df["A"] + td\n resultb = resulta - td\n tm.assert_series_equal(resultb, df["A"])\n assert resultb.dtype == "M8[ns]"\n\n # roundtrip\n td = timedelta(minutes=5, seconds=3)\n resulta = df["A"] + td\n resultb = resulta - td\n tm.assert_series_equal(df["A"], resultb)\n assert resultb.dtype == "M8[ns]"\n\n # inplace\n value = rs[2] + np.timedelta64(timedelta(minutes=5, seconds=1))\n rs[2] += np.timedelta64(timedelta(minutes=5, seconds=1))\n assert rs[2] == value\n\n def test_timedelta64_ops_nat(self):\n # GH 11349\n timedelta_series = Series([NaT, Timedelta("1s")])\n nat_series_dtype_timedelta = Series([NaT, NaT], dtype="timedelta64[ns]")\n single_nat_dtype_timedelta = Series([NaT], dtype="timedelta64[ns]")\n\n # subtraction\n tm.assert_series_equal(timedelta_series - NaT, nat_series_dtype_timedelta)\n tm.assert_series_equal(-NaT + timedelta_series, nat_series_dtype_timedelta)\n\n tm.assert_series_equal(\n timedelta_series - single_nat_dtype_timedelta, nat_series_dtype_timedelta\n )\n tm.assert_series_equal(\n -single_nat_dtype_timedelta + timedelta_series, nat_series_dtype_timedelta\n )\n\n # addition\n tm.assert_series_equal(\n nat_series_dtype_timedelta + NaT, nat_series_dtype_timedelta\n )\n tm.assert_series_equal(\n NaT + nat_series_dtype_timedelta, nat_series_dtype_timedelta\n )\n\n tm.assert_series_equal(\n nat_series_dtype_timedelta + single_nat_dtype_timedelta,\n nat_series_dtype_timedelta,\n )\n tm.assert_series_equal(\n single_nat_dtype_timedelta + nat_series_dtype_timedelta,\n nat_series_dtype_timedelta,\n )\n\n tm.assert_series_equal(timedelta_series + NaT, nat_series_dtype_timedelta)\n tm.assert_series_equal(NaT + timedelta_series, nat_series_dtype_timedelta)\n\n tm.assert_series_equal(\n timedelta_series + single_nat_dtype_timedelta, nat_series_dtype_timedelta\n )\n tm.assert_series_equal(\n single_nat_dtype_timedelta + timedelta_series, nat_series_dtype_timedelta\n )\n\n tm.assert_series_equal(\n nat_series_dtype_timedelta + NaT, nat_series_dtype_timedelta\n )\n tm.assert_series_equal(\n NaT + nat_series_dtype_timedelta, nat_series_dtype_timedelta\n )\n\n tm.assert_series_equal(\n nat_series_dtype_timedelta + single_nat_dtype_timedelta,\n nat_series_dtype_timedelta,\n )\n tm.assert_series_equal(\n single_nat_dtype_timedelta + nat_series_dtype_timedelta,\n nat_series_dtype_timedelta,\n )\n\n # multiplication\n tm.assert_series_equal(\n nat_series_dtype_timedelta * 1.0, nat_series_dtype_timedelta\n )\n tm.assert_series_equal(\n 1.0 * nat_series_dtype_timedelta, nat_series_dtype_timedelta\n )\n\n tm.assert_series_equal(timedelta_series * 1, timedelta_series)\n tm.assert_series_equal(1 * timedelta_series, timedelta_series)\n\n tm.assert_series_equal(timedelta_series * 1.5, Series([NaT, Timedelta("1.5s")]))\n tm.assert_series_equal(1.5 * timedelta_series, Series([NaT, Timedelta("1.5s")]))\n\n tm.assert_series_equal(timedelta_series * np.nan, nat_series_dtype_timedelta)\n tm.assert_series_equal(np.nan * timedelta_series, nat_series_dtype_timedelta)\n\n # division\n tm.assert_series_equal(timedelta_series / 2, Series([NaT, Timedelta("0.5s")]))\n tm.assert_series_equal(timedelta_series / 2.0, Series([NaT, Timedelta("0.5s")]))\n tm.assert_series_equal(timedelta_series / np.nan, nat_series_dtype_timedelta)\n\n # -------------------------------------------------------------\n # Binary operations td64 arraylike and datetime-like\n\n @pytest.mark.parametrize("cls", [Timestamp, datetime, np.datetime64])\n def test_td64arr_add_sub_datetimelike_scalar(\n self, cls, box_with_array, tz_naive_fixture\n ):\n # GH#11925, GH#29558, GH#23215\n tz = tz_naive_fixture\n\n dt_scalar = Timestamp("2012-01-01", tz=tz)\n if cls is datetime:\n ts = dt_scalar.to_pydatetime()\n elif cls is np.datetime64:\n if tz_naive_fixture is not None:\n pytest.skip(f"{cls} doesn support {tz_naive_fixture}")\n ts = dt_scalar.to_datetime64()\n else:\n ts = dt_scalar\n\n tdi = timedelta_range("1 day", periods=3)\n expected = pd.date_range("2012-01-02", periods=3, tz=tz)\n\n tdarr = tm.box_expected(tdi, box_with_array)\n expected = tm.box_expected(expected, box_with_array)\n\n tm.assert_equal(ts + tdarr, expected)\n tm.assert_equal(tdarr + ts, expected)\n\n expected2 = pd.date_range("2011-12-31", periods=3, freq="-1D", tz=tz)\n expected2 = tm.box_expected(expected2, box_with_array)\n\n tm.assert_equal(ts - tdarr, expected2)\n tm.assert_equal(ts + (-tdarr), expected2)\n\n msg = "cannot subtract a datelike"\n with pytest.raises(TypeError, match=msg):\n tdarr - ts\n\n def test_td64arr_add_datetime64_nat(self, box_with_array):\n # GH#23215\n other = np.datetime64("NaT")\n\n tdi = timedelta_range("1 day", periods=3)\n expected = DatetimeIndex(["NaT", "NaT", "NaT"], dtype="M8[ns]")\n\n tdser = tm.box_expected(tdi, box_with_array)\n expected = tm.box_expected(expected, box_with_array)\n\n tm.assert_equal(tdser + other, expected)\n tm.assert_equal(other + tdser, expected)\n\n def test_td64arr_sub_dt64_array(self, box_with_array):\n dti = pd.date_range("2016-01-01", periods=3)\n tdi = TimedeltaIndex(["-1 Day"] * 3)\n dtarr = dti.values\n expected = DatetimeIndex(dtarr) - tdi\n\n tdi = tm.box_expected(tdi, box_with_array)\n expected = tm.box_expected(expected, box_with_array)\n\n msg = "cannot subtract a datelike from"\n with pytest.raises(TypeError, match=msg):\n tdi - dtarr\n\n # TimedeltaIndex.__rsub__\n result = dtarr - tdi\n tm.assert_equal(result, expected)\n\n def test_td64arr_add_dt64_array(self, box_with_array):\n dti = pd.date_range("2016-01-01", periods=3)\n tdi = TimedeltaIndex(["-1 Day"] * 3)\n dtarr = dti.values\n expected = DatetimeIndex(dtarr) + tdi\n\n tdi = tm.box_expected(tdi, box_with_array)\n expected = tm.box_expected(expected, box_with_array)\n\n result = tdi + dtarr\n tm.assert_equal(result, expected)\n result = dtarr + tdi\n tm.assert_equal(result, expected)\n\n # ------------------------------------------------------------------\n # Invalid __add__/__sub__ operations\n\n @pytest.mark.parametrize("pi_freq", ["D", "W", "Q", "h"])\n @pytest.mark.parametrize("tdi_freq", [None, "h"])\n def test_td64arr_sub_periodlike(\n self, box_with_array, box_with_array2, tdi_freq, pi_freq\n ):\n # GH#20049 subtracting PeriodIndex should raise TypeError\n tdi = TimedeltaIndex(["1 hours", "2 hours"], freq=tdi_freq)\n dti = Timestamp("2018-03-07 17:16:40") + tdi\n pi = dti.to_period(pi_freq)\n per = pi[0]\n\n tdi = tm.box_expected(tdi, box_with_array)\n pi = tm.box_expected(pi, box_with_array2)\n msg = "cannot subtract|unsupported operand type"\n with pytest.raises(TypeError, match=msg):\n tdi - pi\n\n # GH#13078 subtraction of Period scalar not supported\n with pytest.raises(TypeError, match=msg):\n tdi - per\n\n @pytest.mark.parametrize(\n "other",\n [\n # GH#12624 for str case\n "a",\n # GH#19123\n 1,\n 1.5,\n np.array(2),\n ],\n )\n def test_td64arr_addsub_numeric_scalar_invalid(self, box_with_array, other):\n # vector-like others are tested in test_td64arr_add_sub_numeric_arr_invalid\n tdser = Series(["59 Days", "59 Days", "NaT"], dtype="m8[ns]")\n tdarr = tm.box_expected(tdser, box_with_array)\n\n assert_invalid_addsub_type(tdarr, other)\n\n @pytest.mark.parametrize(\n "vec",\n [\n np.array([1, 2, 3]),\n Index([1, 2, 3]),\n Series([1, 2, 3]),\n DataFrame([[1, 2, 3]]),\n ],\n ids=lambda x: type(x).__name__,\n )\n def test_td64arr_addsub_numeric_arr_invalid(\n self, box_with_array, vec, any_real_numpy_dtype\n ):\n tdser = Series(["59 Days", "59 Days", "NaT"], dtype="m8[ns]")\n tdarr = tm.box_expected(tdser, box_with_array)\n\n vector = vec.astype(any_real_numpy_dtype)\n assert_invalid_addsub_type(tdarr, vector)\n\n def test_td64arr_add_sub_int(self, box_with_array, one):\n # Variants of `one` for #19012, deprecated GH#22535\n rng = timedelta_range("1 days 09:00:00", freq="h", periods=10)\n tdarr = tm.box_expected(rng, box_with_array)\n\n msg = "Addition/subtraction of integers"\n assert_invalid_addsub_type(tdarr, one, msg)\n\n # TODO: get inplace ops into assert_invalid_addsub_type\n with pytest.raises(TypeError, match=msg):\n tdarr += one\n with pytest.raises(TypeError, match=msg):\n tdarr -= one\n\n def test_td64arr_add_sub_integer_array(self, box_with_array):\n # GH#19959, deprecated GH#22535\n # GH#22696 for DataFrame case, check that we don't dispatch to numpy\n # implementation, which treats int64 as m8[ns]\n box = box_with_array\n xbox = np.ndarray if box is pd.array else box\n\n rng = timedelta_range("1 days 09:00:00", freq="h", periods=3)\n tdarr = tm.box_expected(rng, box)\n other = tm.box_expected([4, 3, 2], xbox)\n\n msg = "Addition/subtraction of integers and integer-arrays"\n assert_invalid_addsub_type(tdarr, other, msg)\n\n def test_td64arr_addsub_integer_array_no_freq(self, box_with_array):\n # GH#19959\n box = box_with_array\n xbox = np.ndarray if box is pd.array else box\n\n tdi = TimedeltaIndex(["1 Day", "NaT", "3 Hours"])\n tdarr = tm.box_expected(tdi, box)\n other = tm.box_expected([14, -1, 16], xbox)\n\n msg = "Addition/subtraction of integers"\n assert_invalid_addsub_type(tdarr, other, msg)\n\n # ------------------------------------------------------------------\n # Operations with timedelta-like others\n\n def test_td64arr_add_sub_td64_array(self, box_with_array):\n box = box_with_array\n dti = pd.date_range("2016-01-01", periods=3)\n tdi = dti - dti.shift(1)\n tdarr = tdi.values\n\n expected = 2 * tdi\n tdi = tm.box_expected(tdi, box)\n expected = tm.box_expected(expected, box)\n\n result = tdi + tdarr\n tm.assert_equal(result, expected)\n result = tdarr + tdi\n tm.assert_equal(result, expected)\n\n expected_sub = 0 * tdi\n result = tdi - tdarr\n tm.assert_equal(result, expected_sub)\n result = tdarr - tdi\n tm.assert_equal(result, expected_sub)\n\n def test_td64arr_add_sub_tdi(self, box_with_array, names):\n # GH#17250 make sure result dtype is correct\n # GH#19043 make sure names are propagated correctly\n box = box_with_array\n exname = get_expected_name(box, names)\n\n tdi = TimedeltaIndex(["0 days", "1 day"], name=names[1])\n tdi = np.array(tdi) if box in [tm.to_array, pd.array] else tdi\n ser = Series([Timedelta(hours=3), Timedelta(hours=4)], name=names[0])\n expected = Series([Timedelta(hours=3), Timedelta(days=1, hours=4)], name=exname)\n\n ser = tm.box_expected(ser, box)\n expected = tm.box_expected(expected, box)\n\n result = tdi + ser\n tm.assert_equal(result, expected)\n assert_dtype(result, "timedelta64[ns]")\n\n result = ser + tdi\n tm.assert_equal(result, expected)\n assert_dtype(result, "timedelta64[ns]")\n\n expected = Series(\n [Timedelta(hours=-3), Timedelta(days=1, hours=-4)], name=exname\n )\n expected = tm.box_expected(expected, box)\n\n result = tdi - ser\n tm.assert_equal(result, expected)\n assert_dtype(result, "timedelta64[ns]")\n\n result = ser - tdi\n tm.assert_equal(result, -expected)\n assert_dtype(result, "timedelta64[ns]")\n\n @pytest.mark.parametrize("tdnat", [np.timedelta64("NaT"), NaT])\n def test_td64arr_add_sub_td64_nat(self, box_with_array, tdnat):\n # GH#18808, GH#23320 special handling for timedelta64("NaT")\n box = box_with_array\n tdi = TimedeltaIndex([NaT, Timedelta("1s")])\n expected = TimedeltaIndex(["NaT"] * 2)\n\n obj = tm.box_expected(tdi, box)\n expected = tm.box_expected(expected, box)\n\n result = obj + tdnat\n tm.assert_equal(result, expected)\n result = tdnat + obj\n tm.assert_equal(result, expected)\n result = obj - tdnat\n tm.assert_equal(result, expected)\n result = tdnat - obj\n tm.assert_equal(result, expected)\n\n def test_td64arr_add_timedeltalike(self, two_hours, box_with_array):\n # only test adding/sub offsets as + is now numeric\n # GH#10699 for Tick cases\n box = box_with_array\n rng = timedelta_range("1 days", "10 days")\n expected = timedelta_range("1 days 02:00:00", "10 days 02:00:00", freq="D")\n rng = tm.box_expected(rng, box)\n expected = tm.box_expected(expected, box)\n\n result = rng + two_hours\n tm.assert_equal(result, expected)\n\n result = two_hours + rng\n tm.assert_equal(result, expected)\n\n def test_td64arr_sub_timedeltalike(self, two_hours, box_with_array):\n # only test adding/sub offsets as - is now numeric\n # GH#10699 for Tick cases\n box = box_with_array\n rng = timedelta_range("1 days", "10 days")\n expected = timedelta_range("0 days 22:00:00", "9 days 22:00:00")\n\n rng = tm.box_expected(rng, box)\n expected = tm.box_expected(expected, box)\n\n result = rng - two_hours\n tm.assert_equal(result, expected)\n\n result = two_hours - rng\n tm.assert_equal(result, -expected)\n\n # ------------------------------------------------------------------\n # __add__/__sub__ with DateOffsets and arrays of DateOffsets\n\n def test_td64arr_add_sub_offset_index(self, names, box_with_array):\n # GH#18849, GH#19744\n box = box_with_array\n exname = get_expected_name(box, names)\n\n tdi = TimedeltaIndex(["1 days 00:00:00", "3 days 04:00:00"], name=names[0])\n other = Index([offsets.Hour(n=1), offsets.Minute(n=-2)], name=names[1])\n other = np.array(other) if box in [tm.to_array, pd.array] else other\n\n expected = TimedeltaIndex(\n [tdi[n] + other[n] for n in range(len(tdi))], freq="infer", name=exname\n )\n expected_sub = TimedeltaIndex(\n [tdi[n] - other[n] for n in range(len(tdi))], freq="infer", name=exname\n )\n\n tdi = tm.box_expected(tdi, box)\n expected = tm.box_expected(expected, box).astype(object, copy=False)\n expected_sub = tm.box_expected(expected_sub, box).astype(object, copy=False)\n\n with tm.assert_produces_warning(PerformanceWarning):\n res = tdi + other\n tm.assert_equal(res, expected)\n\n with tm.assert_produces_warning(PerformanceWarning):\n res2 = other + tdi\n tm.assert_equal(res2, expected)\n\n with tm.assert_produces_warning(PerformanceWarning):\n res_sub = tdi - other\n tm.assert_equal(res_sub, expected_sub)\n\n def test_td64arr_add_sub_offset_array(self, box_with_array):\n # GH#18849, GH#18824\n box = box_with_array\n tdi = TimedeltaIndex(["1 days 00:00:00", "3 days 04:00:00"])\n other = np.array([offsets.Hour(n=1), offsets.Minute(n=-2)])\n\n expected = TimedeltaIndex(\n [tdi[n] + other[n] for n in range(len(tdi))], freq="infer"\n )\n expected_sub = TimedeltaIndex(\n [tdi[n] - other[n] for n in range(len(tdi))], freq="infer"\n )\n\n tdi = tm.box_expected(tdi, box)\n expected = tm.box_expected(expected, box).astype(object)\n\n with tm.assert_produces_warning(PerformanceWarning):\n res = tdi + other\n tm.assert_equal(res, expected)\n\n with tm.assert_produces_warning(PerformanceWarning):\n res2 = other + tdi\n tm.assert_equal(res2, expected)\n\n expected_sub = tm.box_expected(expected_sub, box_with_array).astype(object)\n with tm.assert_produces_warning(PerformanceWarning):\n res_sub = tdi - other\n tm.assert_equal(res_sub, expected_sub)\n\n def test_td64arr_with_offset_series(self, names, box_with_array):\n # GH#18849\n box = box_with_array\n box2 = Series if box in [Index, tm.to_array, pd.array] else box\n exname = get_expected_name(box, names)\n\n tdi = TimedeltaIndex(["1 days 00:00:00", "3 days 04:00:00"], name=names[0])\n other = Series([offsets.Hour(n=1), offsets.Minute(n=-2)], name=names[1])\n\n expected_add = Series(\n [tdi[n] + other[n] for n in range(len(tdi))], name=exname, dtype=object\n )\n obj = tm.box_expected(tdi, box)\n expected_add = tm.box_expected(expected_add, box2).astype(object)\n\n with tm.assert_produces_warning(PerformanceWarning):\n res = obj + other\n tm.assert_equal(res, expected_add)\n\n with tm.assert_produces_warning(PerformanceWarning):\n res2 = other + obj\n tm.assert_equal(res2, expected_add)\n\n expected_sub = Series(\n [tdi[n] - other[n] for n in range(len(tdi))], name=exname, dtype=object\n )\n expected_sub = tm.box_expected(expected_sub, box2).astype(object)\n\n with tm.assert_produces_warning(PerformanceWarning):\n res3 = obj - other\n tm.assert_equal(res3, expected_sub)\n\n @pytest.mark.parametrize("obox", [np.array, Index, Series])\n def test_td64arr_addsub_anchored_offset_arraylike(self, obox, box_with_array):\n # GH#18824\n tdi = TimedeltaIndex(["1 days 00:00:00", "3 days 04:00:00"])\n tdi = tm.box_expected(tdi, box_with_array)\n\n anchored = obox([offsets.MonthEnd(), offsets.Day(n=2)])\n\n # addition/subtraction ops with anchored offsets should issue\n # a PerformanceWarning and _then_ raise a TypeError.\n msg = "has incorrect type|cannot add the type MonthEnd"\n with pytest.raises(TypeError, match=msg):\n with tm.assert_produces_warning(PerformanceWarning):\n tdi + anchored\n with pytest.raises(TypeError, match=msg):\n with tm.assert_produces_warning(PerformanceWarning):\n anchored + tdi\n with pytest.raises(TypeError, match=msg):\n with tm.assert_produces_warning(PerformanceWarning):\n tdi - anchored\n with pytest.raises(TypeError, match=msg):\n with tm.assert_produces_warning(PerformanceWarning):\n anchored - tdi\n\n # ------------------------------------------------------------------\n # Unsorted\n\n def test_td64arr_add_sub_object_array(self, box_with_array):\n box = box_with_array\n xbox = np.ndarray if box is pd.array else box\n\n tdi = timedelta_range("1 day", periods=3, freq="D")\n tdarr = tm.box_expected(tdi, box)\n\n other = np.array([Timedelta(days=1), offsets.Day(2), Timestamp("2000-01-04")])\n\n with tm.assert_produces_warning(PerformanceWarning):\n result = tdarr + other\n\n expected = Index(\n [Timedelta(days=2), Timedelta(days=4), Timestamp("2000-01-07")]\n )\n expected = tm.box_expected(expected, xbox).astype(object)\n tm.assert_equal(result, expected)\n\n msg = "unsupported operand type|cannot subtract a datelike"\n with pytest.raises(TypeError, match=msg):\n with tm.assert_produces_warning(PerformanceWarning):\n tdarr - other\n\n with tm.assert_produces_warning(PerformanceWarning):\n result = other - tdarr\n\n expected = Index([Timedelta(0), Timedelta(0), Timestamp("2000-01-01")])\n expected = tm.box_expected(expected, xbox).astype(object)\n tm.assert_equal(result, expected)\n\n\nclass TestTimedeltaArraylikeMulDivOps:\n # Tests for timedelta64[ns]\n # __mul__, __rmul__, __div__, __rdiv__, __floordiv__, __rfloordiv__\n\n # ------------------------------------------------------------------\n # Multiplication\n # organized with scalar others first, then array-like\n\n def test_td64arr_mul_int(self, box_with_array):\n idx = TimedeltaIndex(np.arange(5, dtype="int64"))\n idx = tm.box_expected(idx, box_with_array)\n\n result = idx * 1\n tm.assert_equal(result, idx)\n\n result = 1 * idx\n tm.assert_equal(result, idx)\n\n def test_td64arr_mul_tdlike_scalar_raises(self, two_hours, box_with_array):\n rng = timedelta_range("1 days", "10 days", name="foo")\n rng = tm.box_expected(rng, box_with_array)\n msg = "|".join(\n [\n "argument must be an integer",\n "cannot use operands with types dtype",\n "Cannot multiply with",\n ]\n )\n with pytest.raises(TypeError, match=msg):\n rng * two_hours\n\n def test_tdi_mul_int_array_zerodim(self, box_with_array):\n rng5 = np.arange(5, dtype="int64")\n idx = TimedeltaIndex(rng5)\n expected = TimedeltaIndex(rng5 * 5)\n\n idx = tm.box_expected(idx, box_with_array)\n expected = tm.box_expected(expected, box_with_array)\n\n result = idx * np.array(5, dtype="int64")\n tm.assert_equal(result, expected)\n\n def test_tdi_mul_int_array(self, box_with_array):\n rng5 = np.arange(5, dtype="int64")\n idx = TimedeltaIndex(rng5)\n expected = TimedeltaIndex(rng5**2)\n\n idx = tm.box_expected(idx, box_with_array)\n expected = tm.box_expected(expected, box_with_array)\n\n result = idx * rng5\n tm.assert_equal(result, expected)\n\n def test_tdi_mul_int_series(self, box_with_array):\n box = box_with_array\n xbox = Series if box in [Index, tm.to_array, pd.array] else box\n\n idx = TimedeltaIndex(np.arange(5, dtype="int64"))\n expected = TimedeltaIndex(np.arange(5, dtype="int64") ** 2)\n\n idx = tm.box_expected(idx, box)\n expected = tm.box_expected(expected, xbox)\n\n result = idx * Series(np.arange(5, dtype="int64"))\n tm.assert_equal(result, expected)\n\n def test_tdi_mul_float_series(self, box_with_array):\n box = box_with_array\n xbox = Series if box in [Index, tm.to_array, pd.array] else box\n\n idx = TimedeltaIndex(np.arange(5, dtype="int64"))\n idx = tm.box_expected(idx, box)\n\n rng5f = np.arange(5, dtype="float64")\n expected = TimedeltaIndex(rng5f * (rng5f + 1.0))\n expected = tm.box_expected(expected, xbox)\n\n result = idx * Series(rng5f + 1.0)\n tm.assert_equal(result, expected)\n\n # TODO: Put Series/DataFrame in others?\n @pytest.mark.parametrize(\n "other",\n [\n np.arange(1, 11),\n Index(np.arange(1, 11), np.int64),\n Index(range(1, 11), np.uint64),\n Index(range(1, 11), np.float64),\n pd.RangeIndex(1, 11),\n ],\n ids=lambda x: type(x).__name__,\n )\n def test_tdi_rmul_arraylike(self, other, box_with_array):\n box = box_with_array\n\n tdi = TimedeltaIndex(["1 Day"] * 10)\n expected = timedelta_range("1 days", "10 days")._with_freq(None)\n\n tdi = tm.box_expected(tdi, box)\n xbox = get_upcast_box(tdi, other)\n\n expected = tm.box_expected(expected, xbox)\n\n result = other * tdi\n tm.assert_equal(result, expected)\n commute = tdi * other\n tm.assert_equal(commute, expected)\n\n # ------------------------------------------------------------------\n # __div__, __rdiv__\n\n def test_td64arr_div_nat_invalid(self, box_with_array):\n # don't allow division by NaT (maybe could in the future)\n rng = timedelta_range("1 days", "10 days", name="foo")\n rng = tm.box_expected(rng, box_with_array)\n\n with pytest.raises(TypeError, match="unsupported operand type"):\n rng / NaT\n with pytest.raises(TypeError, match="Cannot divide NaTType by"):\n NaT / rng\n\n dt64nat = np.datetime64("NaT", "ns")\n msg = "|".join(\n [\n # 'divide' on npdev as of 2021-12-18\n "ufunc '(true_divide|divide)' cannot use operands",\n "cannot perform __r?truediv__",\n "Cannot divide datetime64 by TimedeltaArray",\n ]\n )\n with pytest.raises(TypeError, match=msg):\n rng / dt64nat\n with pytest.raises(TypeError, match=msg):\n dt64nat / rng\n\n def test_td64arr_div_td64nat(self, box_with_array):\n # GH#23829\n box = box_with_array\n xbox = np.ndarray if box is pd.array else box\n\n rng = timedelta_range("1 days", "10 days")\n rng = tm.box_expected(rng, box)\n\n other = np.timedelta64("NaT")\n\n expected = np.array([np.nan] * 10)\n expected = tm.box_expected(expected, xbox)\n\n result = rng / other\n tm.assert_equal(result, expected)\n\n result = other / rng\n tm.assert_equal(result, expected)\n\n def test_td64arr_div_int(self, box_with_array):\n idx = TimedeltaIndex(np.arange(5, dtype="int64"))\n idx = tm.box_expected(idx, box_with_array)\n\n result = idx / 1\n tm.assert_equal(result, idx)\n\n with pytest.raises(TypeError, match="Cannot divide"):\n # GH#23829\n 1 / idx\n\n def test_td64arr_div_tdlike_scalar(self, two_hours, box_with_array):\n # GH#20088, GH#22163 ensure DataFrame returns correct dtype\n box = box_with_array\n xbox = np.ndarray if box is pd.array else box\n\n rng = timedelta_range("1 days", "10 days", name="foo")\n expected = Index((np.arange(10) + 1) * 12, dtype=np.float64, name="foo")\n\n rng = tm.box_expected(rng, box)\n expected = tm.box_expected(expected, xbox)\n\n result = rng / two_hours\n tm.assert_equal(result, expected)\n\n result = two_hours / rng\n expected = 1 / expected\n tm.assert_equal(result, expected)\n\n @pytest.mark.parametrize("m", [1, 3, 10])\n @pytest.mark.parametrize("unit", ["D", "h", "m", "s", "ms", "us", "ns"])\n def test_td64arr_div_td64_scalar(self, m, unit, box_with_array):\n box = box_with_array\n xbox = np.ndarray if box is pd.array else box\n\n ser = Series([Timedelta(days=59)] * 3)\n ser[2] = np.nan\n flat = ser\n ser = tm.box_expected(ser, box)\n\n # op\n expected = Series([x / np.timedelta64(m, unit) for x in flat])\n expected = tm.box_expected(expected, xbox)\n result = ser / np.timedelta64(m, unit)\n tm.assert_equal(result, expected)\n\n # reverse op\n expected = Series([Timedelta(np.timedelta64(m, unit)) / x for x in flat])\n expected = tm.box_expected(expected, xbox)\n result = np.timedelta64(m, unit) / ser\n tm.assert_equal(result, expected)\n\n def test_td64arr_div_tdlike_scalar_with_nat(self, two_hours, box_with_array):\n box = box_with_array\n xbox = np.ndarray if box is pd.array else box\n\n rng = TimedeltaIndex(["1 days", NaT, "2 days"], name="foo")\n expected = Index([12, np.nan, 24], dtype=np.float64, name="foo")\n\n rng = tm.box_expected(rng, box)\n expected = tm.box_expected(expected, xbox)\n\n result = rng / two_hours\n tm.assert_equal(result, expected)\n\n result = two_hours / rng\n expected = 1 / expected\n tm.assert_equal(result, expected)\n\n def test_td64arr_div_td64_ndarray(self, box_with_array):\n # GH#22631\n box = box_with_array\n xbox = np.ndarray if box is pd.array else box\n\n rng = TimedeltaIndex(["1 days", NaT, "2 days"])\n expected = Index([12, np.nan, 24], dtype=np.float64)\n\n rng = tm.box_expected(rng, box)\n expected = tm.box_expected(expected, xbox)\n\n other = np.array([2, 4, 2], dtype="m8[h]")\n result = rng / other\n tm.assert_equal(result, expected)\n\n result = rng / tm.box_expected(other, box)\n tm.assert_equal(result, expected)\n\n result = rng / other.astype(object)\n tm.assert_equal(result, expected.astype(object))\n\n result = rng / list(other)\n tm.assert_equal(result, expected)\n\n # reversed op\n expected = 1 / expected\n result = other / rng\n tm.assert_equal(result, expected)\n\n result = tm.box_expected(other, box) / rng\n tm.assert_equal(result, expected)\n\n result = other.astype(object) / rng\n tm.assert_equal(result, expected)\n\n result = list(other) / rng\n tm.assert_equal(result, expected)\n\n def test_tdarr_div_length_mismatch(self, box_with_array):\n rng = TimedeltaIndex(["1 days", NaT, "2 days"])\n mismatched = [1, 2, 3, 4]\n\n rng = tm.box_expected(rng, box_with_array)\n msg = "Cannot divide vectors|Unable to coerce to Series"\n for obj in [mismatched, mismatched[:2]]:\n # one shorter, one longer\n for other in [obj, np.array(obj), Index(obj)]:\n with pytest.raises(ValueError, match=msg):\n rng / other\n with pytest.raises(ValueError, match=msg):\n other / rng\n\n def test_td64_div_object_mixed_result(self, box_with_array):\n # Case where we having a NaT in the result inseat of timedelta64("NaT")\n # is misleading\n orig = timedelta_range("1 Day", periods=3).insert(1, NaT)\n tdi = tm.box_expected(orig, box_with_array, transpose=False)\n\n other = np.array([orig[0], 1.5, 2.0, orig[2]], dtype=object)\n other = tm.box_expected(other, box_with_array, transpose=False)\n\n res = tdi / other\n\n expected = Index([1.0, np.timedelta64("NaT", "ns"), orig[0], 1.5], dtype=object)\n expected = tm.box_expected(expected, box_with_array, transpose=False)\n if isinstance(expected, NumpyExtensionArray):\n expected = expected.to_numpy()\n tm.assert_equal(res, expected)\n if box_with_array is DataFrame:\n # We have a np.timedelta64(NaT), not pd.NaT\n assert isinstance(res.iloc[1, 0], np.timedelta64)\n\n res = tdi // other\n\n expected = Index([1, np.timedelta64("NaT", "ns"), orig[0], 1], dtype=object)\n expected = tm.box_expected(expected, box_with_array, transpose=False)\n if isinstance(expected, NumpyExtensionArray):\n expected = expected.to_numpy()\n tm.assert_equal(res, expected)\n if box_with_array is DataFrame:\n # We have a np.timedelta64(NaT), not pd.NaT\n assert isinstance(res.iloc[1, 0], np.timedelta64)\n\n # ------------------------------------------------------------------\n # __floordiv__, __rfloordiv__\n\n def test_td64arr_floordiv_td64arr_with_nat(\n self, box_with_array, using_array_manager\n ):\n # GH#35529\n box = box_with_array\n xbox = np.ndarray if box is pd.array else box\n\n left = Series([1000, 222330, 30], dtype="timedelta64[ns]")\n right = Series([1000, 222330, None], dtype="timedelta64[ns]")\n\n left = tm.box_expected(left, box)\n right = tm.box_expected(right, box)\n\n expected = np.array([1.0, 1.0, np.nan], dtype=np.float64)\n expected = tm.box_expected(expected, xbox)\n if box is DataFrame and using_array_manager:\n # INFO(ArrayManager) floordiv returns integer, and ArrayManager\n # performs ops column-wise and thus preserves int64 dtype for\n # columns without missing values\n expected[[0, 1]] = expected[[0, 1]].astype("int64")\n\n with tm.maybe_produces_warning(\n RuntimeWarning, box is pd.array, check_stacklevel=False\n ):\n result = left // right\n\n tm.assert_equal(result, expected)\n\n # case that goes through __rfloordiv__ with arraylike\n with tm.maybe_produces_warning(\n RuntimeWarning, box is pd.array, check_stacklevel=False\n ):\n result = np.asarray(left) // right\n tm.assert_equal(result, expected)\n\n @pytest.mark.filterwarnings("ignore:invalid value encountered:RuntimeWarning")\n def test_td64arr_floordiv_tdscalar(self, box_with_array, scalar_td):\n # GH#18831, GH#19125\n box = box_with_array\n xbox = np.ndarray if box is pd.array else box\n td = Timedelta("5m3s") # i.e. (scalar_td - 1sec) / 2\n\n td1 = Series([td, td, NaT], dtype="m8[ns]")\n td1 = tm.box_expected(td1, box, transpose=False)\n\n expected = Series([0, 0, np.nan])\n expected = tm.box_expected(expected, xbox, transpose=False)\n\n result = td1 // scalar_td\n tm.assert_equal(result, expected)\n\n # Reversed op\n expected = Series([2, 2, np.nan])\n expected = tm.box_expected(expected, xbox, transpose=False)\n\n result = scalar_td // td1\n tm.assert_equal(result, expected)\n\n # same thing buts let's be explicit about calling __rfloordiv__\n result = td1.__rfloordiv__(scalar_td)\n tm.assert_equal(result, expected)\n\n def test_td64arr_floordiv_int(self, box_with_array):\n idx = TimedeltaIndex(np.arange(5, dtype="int64"))\n idx = tm.box_expected(idx, box_with_array)\n result = idx // 1\n tm.assert_equal(result, idx)\n\n pattern = "floor_divide cannot use operands|Cannot divide int by Timedelta*"\n with pytest.raises(TypeError, match=pattern):\n 1 // idx\n\n # ------------------------------------------------------------------\n # mod, divmod\n # TODO: operations with timedelta-like arrays, numeric arrays,\n # reversed ops\n\n def test_td64arr_mod_tdscalar(self, box_with_array, three_days):\n tdi = timedelta_range("1 Day", "9 days")\n tdarr = tm.box_expected(tdi, box_with_array)\n\n expected = TimedeltaIndex(["1 Day", "2 Days", "0 Days"] * 3)\n expected = tm.box_expected(expected, box_with_array)\n\n result = tdarr % three_days\n tm.assert_equal(result, expected)\n\n warn = None\n if box_with_array is DataFrame and isinstance(three_days, pd.DateOffset):\n warn = PerformanceWarning\n # TODO: making expected be object here a result of DataFrame.__divmod__\n # being defined in a naive way that does not dispatch to the underlying\n # array's __divmod__\n expected = expected.astype(object)\n\n with tm.assert_produces_warning(warn):\n result = divmod(tdarr, three_days)\n\n tm.assert_equal(result[1], expected)\n tm.assert_equal(result[0], tdarr // three_days)\n\n def test_td64arr_mod_int(self, box_with_array):\n tdi = timedelta_range("1 ns", "10 ns", periods=10)\n tdarr = tm.box_expected(tdi, box_with_array)\n\n expected = TimedeltaIndex(["1 ns", "0 ns"] * 5)\n expected = tm.box_expected(expected, box_with_array)\n\n result = tdarr % 2\n tm.assert_equal(result, expected)\n\n msg = "Cannot divide int by"\n with pytest.raises(TypeError, match=msg):\n 2 % tdarr\n\n result = divmod(tdarr, 2)\n tm.assert_equal(result[1], expected)\n tm.assert_equal(result[0], tdarr // 2)\n\n def test_td64arr_rmod_tdscalar(self, box_with_array, three_days):\n tdi = timedelta_range("1 Day", "9 days")\n tdarr = tm.box_expected(tdi, box_with_array)\n\n expected = ["0 Days", "1 Day", "0 Days"] + ["3 Days"] * 6\n expected = TimedeltaIndex(expected)\n expected = tm.box_expected(expected, box_with_array)\n\n result = three_days % tdarr\n tm.assert_equal(result, expected)\n\n result = divmod(three_days, tdarr)\n tm.assert_equal(result[1], expected)\n tm.assert_equal(result[0], three_days // tdarr)\n\n # ------------------------------------------------------------------\n # Operations with invalid others\n\n def test_td64arr_mul_tdscalar_invalid(self, box_with_array, scalar_td):\n td1 = Series([timedelta(minutes=5, seconds=3)] * 3)\n td1.iloc[2] = np.nan\n\n td1 = tm.box_expected(td1, box_with_array)\n\n # check that we are getting a TypeError\n # with 'operate' (from core/ops.py) for the ops that are not\n # defined\n pattern = "operate|unsupported|cannot|not supported"\n with pytest.raises(TypeError, match=pattern):\n td1 * scalar_td\n with pytest.raises(TypeError, match=pattern):\n scalar_td * td1\n\n def test_td64arr_mul_too_short_raises(self, box_with_array):\n idx = TimedeltaIndex(np.arange(5, dtype="int64"))\n idx = tm.box_expected(idx, box_with_array)\n msg = "|".join(\n [\n "cannot use operands with types dtype",\n "Cannot multiply with unequal lengths",\n "Unable to coerce to Series",\n ]\n )\n with pytest.raises(TypeError, match=msg):\n # length check before dtype check\n idx * idx[:3]\n with pytest.raises(ValueError, match=msg):\n idx * np.array([1, 2])\n\n def test_td64arr_mul_td64arr_raises(self, box_with_array):\n idx = TimedeltaIndex(np.arange(5, dtype="int64"))\n idx = tm.box_expected(idx, box_with_array)\n msg = "cannot use operands with types dtype"\n with pytest.raises(TypeError, match=msg):\n idx * idx\n\n # ------------------------------------------------------------------\n # Operations with numeric others\n\n def test_td64arr_mul_numeric_scalar(self, box_with_array, one):\n # GH#4521\n # divide/multiply by integers\n tdser = Series(["59 Days", "59 Days", "NaT"], dtype="m8[ns]")\n expected = Series(["-59 Days", "-59 Days", "NaT"], dtype="timedelta64[ns]")\n\n tdser = tm.box_expected(tdser, box_with_array)\n expected = tm.box_expected(expected, box_with_array)\n\n result = tdser * (-one)\n tm.assert_equal(result, expected)\n result = (-one) * tdser\n tm.assert_equal(result, expected)\n\n expected = Series(["118 Days", "118 Days", "NaT"], dtype="timedelta64[ns]")\n expected = tm.box_expected(expected, box_with_array)\n\n result = tdser * (2 * one)\n tm.assert_equal(result, expected)\n result = (2 * one) * tdser\n tm.assert_equal(result, expected)\n\n @pytest.mark.parametrize("two", [2, 2.0, np.array(2), np.array(2.0)])\n def test_td64arr_div_numeric_scalar(self, box_with_array, two):\n # GH#4521\n # divide/multiply by integers\n tdser = Series(["59 Days", "59 Days", "NaT"], dtype="m8[ns]")\n expected = Series(["29.5D", "29.5D", "NaT"], dtype="timedelta64[ns]")\n\n tdser = tm.box_expected(tdser, box_with_array)\n expected = tm.box_expected(expected, box_with_array)\n\n result = tdser / two\n tm.assert_equal(result, expected)\n\n with pytest.raises(TypeError, match="Cannot divide"):\n two / tdser\n\n @pytest.mark.parametrize("two", [2, 2.0, np.array(2), np.array(2.0)])\n def test_td64arr_floordiv_numeric_scalar(self, box_with_array, two):\n tdser = Series(["59 Days", "59 Days", "NaT"], dtype="m8[ns]")\n expected = Series(["29.5D", "29.5D", "NaT"], dtype="timedelta64[ns]")\n\n tdser = tm.box_expected(tdser, box_with_array)\n expected = tm.box_expected(expected, box_with_array)\n\n result = tdser // two\n tm.assert_equal(result, expected)\n\n with pytest.raises(TypeError, match="Cannot divide"):\n two // tdser\n\n @pytest.mark.parametrize(\n "vector",\n [np.array([20, 30, 40]), Index([20, 30, 40]), Series([20, 30, 40])],\n ids=lambda x: type(x).__name__,\n )\n def test_td64arr_rmul_numeric_array(\n self,\n box_with_array,\n vector,\n any_real_numpy_dtype,\n ):\n # GH#4521\n # divide/multiply by integers\n\n tdser = Series(["59 Days", "59 Days", "NaT"], dtype="m8[ns]")\n vector = vector.astype(any_real_numpy_dtype)\n\n expected = Series(["1180 Days", "1770 Days", "NaT"], dtype="timedelta64[ns]")\n\n tdser = tm.box_expected(tdser, box_with_array)\n xbox = get_upcast_box(tdser, vector)\n\n expected = tm.box_expected(expected, xbox)\n\n result = tdser * vector\n tm.assert_equal(result, expected)\n\n result = vector * tdser\n tm.assert_equal(result, expected)\n\n @pytest.mark.parametrize(\n "vector",\n [np.array([20, 30, 40]), Index([20, 30, 40]), Series([20, 30, 40])],\n ids=lambda x: type(x).__name__,\n )\n def test_td64arr_div_numeric_array(\n self, box_with_array, vector, any_real_numpy_dtype\n ):\n # GH#4521\n # divide/multiply by integers\n\n tdser = Series(["59 Days", "59 Days", "NaT"], dtype="m8[ns]")\n vector = vector.astype(any_real_numpy_dtype)\n\n expected = Series(["2.95D", "1D 23h 12m", "NaT"], dtype="timedelta64[ns]")\n\n tdser = tm.box_expected(tdser, box_with_array)\n xbox = get_upcast_box(tdser, vector)\n expected = tm.box_expected(expected, xbox)\n\n result = tdser / vector\n tm.assert_equal(result, expected)\n\n pattern = "|".join(\n [\n "true_divide'? cannot use operands",\n "cannot perform __div__",\n "cannot perform __truediv__",\n "unsupported operand",\n "Cannot divide",\n "ufunc 'divide' cannot use operands with types",\n ]\n )\n with pytest.raises(TypeError, match=pattern):\n vector / tdser\n\n result = tdser / vector.astype(object)\n if box_with_array is DataFrame:\n expected = [tdser.iloc[0, n] / vector[n] for n in range(len(vector))]\n expected = tm.box_expected(expected, xbox).astype(object)\n # We specifically expect timedelta64("NaT") here, not pd.NA\n msg = "The 'downcast' keyword in fillna"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n expected[2] = expected[2].fillna(\n np.timedelta64("NaT", "ns"), downcast=False\n )\n else:\n expected = [tdser[n] / vector[n] for n in range(len(tdser))]\n expected = [\n x if x is not NaT else np.timedelta64("NaT", "ns") for x in expected\n ]\n if xbox is tm.to_array:\n expected = tm.to_array(expected).astype(object)\n else:\n expected = xbox(expected, dtype=object)\n\n tm.assert_equal(result, expected)\n\n with pytest.raises(TypeError, match=pattern):\n vector.astype(object) / tdser\n\n def test_td64arr_mul_int_series(self, box_with_array, names):\n # GH#19042 test for correct name attachment\n box = box_with_array\n exname = get_expected_name(box, names)\n\n tdi = TimedeltaIndex(\n ["0days", "1day", "2days", "3days", "4days"], name=names[0]\n )\n # TODO: Should we be parametrizing over types for `ser` too?\n ser = Series([0, 1, 2, 3, 4], dtype=np.int64, name=names[1])\n\n expected = Series(\n ["0days", "1day", "4days", "9days", "16days"],\n dtype="timedelta64[ns]",\n name=exname,\n )\n\n tdi = tm.box_expected(tdi, box)\n xbox = get_upcast_box(tdi, ser)\n\n expected = tm.box_expected(expected, xbox)\n\n result = ser * tdi\n tm.assert_equal(result, expected)\n\n result = tdi * ser\n tm.assert_equal(result, expected)\n\n # TODO: Should we be parametrizing over types for `ser` too?\n def test_float_series_rdiv_td64arr(self, box_with_array, names):\n # GH#19042 test for correct name attachment\n box = box_with_array\n tdi = TimedeltaIndex(\n ["0days", "1day", "2days", "3days", "4days"], name=names[0]\n )\n ser = Series([1.5, 3, 4.5, 6, 7.5], dtype=np.float64, name=names[1])\n\n xname = names[2] if box not in [tm.to_array, pd.array] else names[1]\n expected = Series(\n [tdi[n] / ser[n] for n in range(len(ser))],\n dtype="timedelta64[ns]",\n name=xname,\n )\n\n tdi = tm.box_expected(tdi, box)\n xbox = get_upcast_box(tdi, ser)\n expected = tm.box_expected(expected, xbox)\n\n result = ser.__rtruediv__(tdi)\n if box is DataFrame:\n assert result is NotImplemented\n else:\n tm.assert_equal(result, expected)\n\n def test_td64arr_all_nat_div_object_dtype_numeric(self, box_with_array):\n # GH#39750 make sure we infer the result as td64\n tdi = TimedeltaIndex([NaT, NaT])\n\n left = tm.box_expected(tdi, box_with_array)\n right = np.array([2, 2.0], dtype=object)\n\n tdnat = np.timedelta64("NaT", "ns")\n expected = Index([tdnat] * 2, dtype=object)\n if box_with_array is not Index:\n expected = tm.box_expected(expected, box_with_array).astype(object)\n if box_with_array in [Series, DataFrame]:\n msg = "The 'downcast' keyword in fillna is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n expected = expected.fillna(tdnat, downcast=False) # GH#18463\n\n result = left / right\n tm.assert_equal(result, expected)\n\n result = left // right\n tm.assert_equal(result, expected)\n\n\nclass TestTimedelta64ArrayLikeArithmetic:\n # Arithmetic tests for timedelta64[ns] vectors fully parametrized over\n # DataFrame/Series/TimedeltaIndex/TimedeltaArray. Ideally all arithmetic\n # tests will eventually end up here.\n\n def test_td64arr_pow_invalid(self, scalar_td, box_with_array):\n td1 = Series([timedelta(minutes=5, seconds=3)] * 3)\n td1.iloc[2] = np.nan\n\n td1 = tm.box_expected(td1, box_with_array)\n\n # check that we are getting a TypeError\n # with 'operate' (from core/ops.py) for the ops that are not\n # defined\n pattern = "operate|unsupported|cannot|not supported"\n with pytest.raises(TypeError, match=pattern):\n scalar_td**td1\n\n with pytest.raises(TypeError, match=pattern):\n td1**scalar_td\n\n\ndef test_add_timestamp_to_timedelta():\n # GH: 35897\n timestamp = Timestamp("2021-01-01")\n result = timestamp + timedelta_range("0s", "1s", periods=31)\n expected = DatetimeIndex(\n [\n timestamp\n + (\n pd.to_timedelta("0.033333333s") * i\n + pd.to_timedelta("0.000000001s") * divmod(i, 3)[0]\n )\n for i in range(31)\n ]\n )\n tm.assert_index_equal(result, expected)\n | .venv\Lib\site-packages\pandas\tests\arithmetic\test_timedelta64.py | test_timedelta64.py | Python | 78,911 | 0.75 | 0.080312 | 0.101783 | node-utils | 977 | 2024-11-24T05:19:46.164609 | BSD-3-Clause | true | e2ed392cfdea17183a2267e7d7df8363 |
\n\n | .venv\Lib\site-packages\pandas\tests\arithmetic\__pycache__\common.cpython-313.pyc | common.cpython-313.pyc | Other | 6,320 | 0.95 | 0.033898 | 0 | awesome-app | 522 | 2024-09-06T12:32:52.640642 | Apache-2.0 | true | c4cdaf5a39d16fcaeb68973b98b2a6c1 |
\n\n | .venv\Lib\site-packages\pandas\tests\arithmetic\__pycache__\conftest.cpython-313.pyc | conftest.cpython-313.pyc | Other | 5,878 | 0.8 | 0.009615 | 0 | python-kit | 182 | 2024-03-26T23:52:35.638789 | Apache-2.0 | true | fd9a8ec491389b028d33fe7dbeff154c |
\n\n | .venv\Lib\site-packages\pandas\tests\arithmetic\__pycache__\test_array_ops.cpython-313.pyc | test_array_ops.cpython-313.pyc | Other | 2,078 | 0.8 | 0 | 0 | python-kit | 834 | 2023-12-29T04:49:36.714941 | Apache-2.0 | true | dc18c85f1753c46d24517f0388b4b9bc |
\n\n | .venv\Lib\site-packages\pandas\tests\arithmetic\__pycache__\test_categorical.cpython-313.pyc | test_categorical.cpython-313.pyc | Other | 1,626 | 0.8 | 0 | 0 | node-utils | 918 | 2024-06-10T22:59:09.379719 | Apache-2.0 | true | b039c975bdbce1f7ac3ce007fe98e7a5 |
\n\n | .venv\Lib\site-packages\pandas\tests\arithmetic\__pycache__\test_interval.cpython-313.pyc | test_interval.cpython-313.pyc | Other | 14,600 | 0.8 | 0.026087 | 0.008929 | python-kit | 219 | 2024-04-25T02:07:40.159516 | MIT | true | d2889feca71ee4edcc8bd7e44ec338eb |
\n\n | .venv\Lib\site-packages\pandas\tests\arithmetic\__pycache__\test_numeric.cpython-313.pyc | test_numeric.cpython-313.pyc | Other | 86,588 | 0.75 | 0.007317 | 0.005025 | node-utils | 64 | 2024-04-29T14:17:22.702402 | BSD-3-Clause | true | 6f59d74c5f12a1e0ea197bfc3ea15edd |
\n\n | .venv\Lib\site-packages\pandas\tests\arithmetic\__pycache__\test_object.cpython-313.pyc | test_object.cpython-313.pyc | Other | 21,460 | 0.8 | 0 | 0.012876 | react-lib | 711 | 2023-08-23T05:20:35.026273 | BSD-3-Clause | true | c6814c24dc2b2d0f749253521f883e9a |
\n\n | .venv\Lib\site-packages\pandas\tests\arithmetic\__pycache__\test_period.cpython-313.pyc | test_period.cpython-313.pyc | Other | 85,521 | 0.6 | 0.00578 | 0.000979 | vue-tools | 13 | 2024-07-06T01:20:18.139968 | Apache-2.0 | true | 37e34426f96410dbb32394de97b81544 |
\n\n | .venv\Lib\site-packages\pandas\tests\arithmetic\__pycache__\test_timedelta64.cpython-313.pyc | test_timedelta64.cpython-313.pyc | Other | 105,132 | 0.6 | 0.00216 | 0.000729 | node-utils | 971 | 2025-05-20T17:56:59.230307 | GPL-3.0 | true | 2a8e3f964e564bbb60d02c6c0bff65bf |
\n\n | .venv\Lib\site-packages\pandas\tests\arithmetic\__pycache__\__init__.cpython-313.pyc | __init__.cpython-313.pyc | Other | 198 | 0.7 | 0 | 0 | vue-tools | 335 | 2023-07-30T11:52:57.005446 | MIT | true | 17db7af810e5618a5a72f9d722d0fd50 |
"""\nTests shared by MaskedArray subclasses.\n"""\nimport numpy as np\nimport pytest\n\nimport pandas as pd\nimport pandas._testing as tm\nfrom pandas.tests.extension.base import BaseOpsUtil\n\n\nclass ComparisonOps(BaseOpsUtil):\n def _compare_other(self, data, op, other):\n # array\n result = pd.Series(op(data, other))\n expected = pd.Series(op(data._data, other), dtype="boolean")\n\n # fill the nan locations\n expected[data._mask] = pd.NA\n\n tm.assert_series_equal(result, expected)\n\n # series\n ser = pd.Series(data)\n result = op(ser, other)\n\n # Set nullable dtype here to avoid upcasting when setting to pd.NA below\n expected = op(pd.Series(data._data), other).astype("boolean")\n\n # fill the nan locations\n expected[data._mask] = pd.NA\n\n tm.assert_series_equal(result, expected)\n\n # subclass will override to parametrize 'other'\n def test_scalar(self, other, comparison_op, dtype):\n op = comparison_op\n left = pd.array([1, 0, None], dtype=dtype)\n\n result = op(left, other)\n\n if other is pd.NA:\n expected = pd.array([None, None, None], dtype="boolean")\n else:\n values = op(left._data, other)\n expected = pd.arrays.BooleanArray(values, left._mask, copy=True)\n tm.assert_extension_array_equal(result, expected)\n\n # ensure we haven't mutated anything inplace\n result[0] = pd.NA\n tm.assert_extension_array_equal(left, pd.array([1, 0, None], dtype=dtype))\n\n\nclass NumericOps:\n # Shared by IntegerArray and FloatingArray, not BooleanArray\n\n def test_searchsorted_nan(self, dtype):\n # The base class casts to object dtype, for which searchsorted returns\n # 0 from the left and 10 from the right.\n arr = pd.array(range(10), dtype=dtype)\n\n assert arr.searchsorted(np.nan, side="left") == 10\n assert arr.searchsorted(np.nan, side="right") == 10\n\n def test_no_shared_mask(self, data):\n result = data + 1\n assert not tm.shares_memory(result, data)\n\n def test_array(self, comparison_op, dtype):\n op = comparison_op\n\n left = pd.array([0, 1, 2, None, None, None], dtype=dtype)\n right = pd.array([0, 1, None, 0, 1, None], dtype=dtype)\n\n result = op(left, right)\n values = op(left._data, right._data)\n mask = left._mask | right._mask\n\n expected = pd.arrays.BooleanArray(values, mask)\n tm.assert_extension_array_equal(result, expected)\n\n # ensure we haven't mutated anything inplace\n result[0] = pd.NA\n tm.assert_extension_array_equal(\n left, pd.array([0, 1, 2, None, None, None], dtype=dtype)\n )\n tm.assert_extension_array_equal(\n right, pd.array([0, 1, None, 0, 1, None], dtype=dtype)\n )\n\n def test_compare_with_booleanarray(self, comparison_op, dtype):\n op = comparison_op\n\n left = pd.array([True, False, None] * 3, dtype="boolean")\n right = pd.array([0] * 3 + [1] * 3 + [None] * 3, dtype=dtype)\n other = pd.array([False] * 3 + [True] * 3 + [None] * 3, dtype="boolean")\n\n expected = op(left, other)\n result = op(left, right)\n tm.assert_extension_array_equal(result, expected)\n\n # reversed op\n expected = op(other, left)\n result = op(right, left)\n tm.assert_extension_array_equal(result, expected)\n\n def test_compare_to_string(self, dtype):\n # GH#28930\n ser = pd.Series([1, None], dtype=dtype)\n result = ser == "a"\n expected = pd.Series([False, pd.NA], dtype="boolean")\n\n tm.assert_series_equal(result, expected)\n\n def test_ufunc_with_out(self, dtype):\n arr = pd.array([1, 2, 3], dtype=dtype)\n arr2 = pd.array([1, 2, pd.NA], dtype=dtype)\n\n mask = arr == arr\n mask2 = arr2 == arr2\n\n result = np.zeros(3, dtype=bool)\n result |= mask\n # If MaskedArray.__array_ufunc__ handled "out" appropriately,\n # `result` should still be an ndarray.\n assert isinstance(result, np.ndarray)\n assert result.all()\n\n # result |= mask worked because mask could be cast losslessly to\n # boolean ndarray. mask2 can't, so this raises\n result = np.zeros(3, dtype=bool)\n msg = "Specify an appropriate 'na_value' for this dtype"\n with pytest.raises(ValueError, match=msg):\n result |= mask2\n\n # addition\n res = np.add(arr, arr2)\n expected = pd.array([2, 4, pd.NA], dtype=dtype)\n tm.assert_extension_array_equal(res, expected)\n\n # when passing out=arr, we will modify 'arr' inplace.\n res = np.add(arr, arr2, out=arr)\n assert res is arr\n tm.assert_extension_array_equal(res, expected)\n tm.assert_extension_array_equal(arr, expected)\n\n def test_mul_td64_array(self, dtype):\n # GH#45622\n arr = pd.array([1, 2, pd.NA], dtype=dtype)\n other = np.arange(3, dtype=np.int64).view("m8[ns]")\n\n result = arr * other\n expected = pd.array([pd.Timedelta(0), pd.Timedelta(2), pd.NaT])\n tm.assert_extension_array_equal(result, expected)\n | .venv\Lib\site-packages\pandas\tests\arrays\masked_shared.py | masked_shared.py | Python | 5,194 | 0.95 | 0.097403 | 0.17094 | python-kit | 778 | 2024-06-20T15:33:33.171417 | BSD-3-Clause | true | 9c66dfc807d76978b5a84d3b88c7e4de |
import datetime\nimport decimal\nimport re\n\nimport numpy as np\nimport pytest\nimport pytz\n\nfrom pandas._config import using_string_dtype\n\nimport pandas as pd\nimport pandas._testing as tm\nfrom pandas.api.extensions import register_extension_dtype\nfrom pandas.arrays import (\n BooleanArray,\n DatetimeArray,\n FloatingArray,\n IntegerArray,\n IntervalArray,\n SparseArray,\n TimedeltaArray,\n)\nfrom pandas.core.arrays import (\n NumpyExtensionArray,\n period_array,\n)\nfrom pandas.tests.extension.decimal import (\n DecimalArray,\n DecimalDtype,\n to_decimal,\n)\n\n\n@pytest.mark.parametrize("dtype_unit", ["M8[h]", "M8[m]", "m8[h]", "M8[m]"])\ndef test_dt64_array(dtype_unit):\n # PR 53817\n dtype_var = np.dtype(dtype_unit)\n msg = (\n r"datetime64 and timedelta64 dtype resolutions other than "\n r"'s', 'ms', 'us', and 'ns' are deprecated. "\n r"In future releases passing unsupported resolutions will "\n r"raise an exception."\n )\n with tm.assert_produces_warning(FutureWarning, match=re.escape(msg)):\n pd.array([], dtype=dtype_var)\n\n\n@pytest.mark.parametrize(\n "data, dtype, expected",\n [\n # Basic NumPy defaults.\n ([], None, FloatingArray._from_sequence([], dtype="Float64")),\n ([1, 2], None, IntegerArray._from_sequence([1, 2], dtype="Int64")),\n ([1, 2], object, NumpyExtensionArray(np.array([1, 2], dtype=object))),\n (\n [1, 2],\n np.dtype("float32"),\n NumpyExtensionArray(np.array([1.0, 2.0], dtype=np.dtype("float32"))),\n ),\n (\n np.array([], dtype=object),\n None,\n NumpyExtensionArray(np.array([], dtype=object)),\n ),\n (\n np.array([1, 2], dtype="int64"),\n None,\n IntegerArray._from_sequence([1, 2], dtype="Int64"),\n ),\n (\n np.array([1.0, 2.0], dtype="float64"),\n None,\n FloatingArray._from_sequence([1.0, 2.0], dtype="Float64"),\n ),\n # String alias passes through to NumPy\n ([1, 2], "float32", NumpyExtensionArray(np.array([1, 2], dtype="float32"))),\n ([1, 2], "int64", NumpyExtensionArray(np.array([1, 2], dtype=np.int64))),\n # GH#44715 FloatingArray does not support float16, so fall\n # back to NumpyExtensionArray\n (\n np.array([1, 2], dtype=np.float16),\n None,\n NumpyExtensionArray(np.array([1, 2], dtype=np.float16)),\n ),\n # idempotency with e.g. pd.array(pd.array([1, 2], dtype="int64"))\n (\n NumpyExtensionArray(np.array([1, 2], dtype=np.int32)),\n None,\n NumpyExtensionArray(np.array([1, 2], dtype=np.int32)),\n ),\n # Period alias\n (\n [pd.Period("2000", "D"), pd.Period("2001", "D")],\n "Period[D]",\n period_array(["2000", "2001"], freq="D"),\n ),\n # Period dtype\n (\n [pd.Period("2000", "D")],\n pd.PeriodDtype("D"),\n period_array(["2000"], freq="D"),\n ),\n # Datetime (naive)\n (\n [1, 2],\n np.dtype("datetime64[ns]"),\n DatetimeArray._from_sequence(\n np.array([1, 2], dtype="M8[ns]"), dtype="M8[ns]"\n ),\n ),\n (\n [1, 2],\n np.dtype("datetime64[s]"),\n DatetimeArray._from_sequence(\n np.array([1, 2], dtype="M8[s]"), dtype="M8[s]"\n ),\n ),\n (\n np.array([1, 2], dtype="datetime64[ns]"),\n None,\n DatetimeArray._from_sequence(\n np.array([1, 2], dtype="M8[ns]"), dtype="M8[ns]"\n ),\n ),\n (\n pd.DatetimeIndex(["2000", "2001"]),\n np.dtype("datetime64[ns]"),\n DatetimeArray._from_sequence(["2000", "2001"], dtype="M8[ns]"),\n ),\n (\n pd.DatetimeIndex(["2000", "2001"]),\n None,\n DatetimeArray._from_sequence(["2000", "2001"], dtype="M8[ns]"),\n ),\n (\n ["2000", "2001"],\n np.dtype("datetime64[ns]"),\n DatetimeArray._from_sequence(["2000", "2001"], dtype="M8[ns]"),\n ),\n # Datetime (tz-aware)\n (\n ["2000", "2001"],\n pd.DatetimeTZDtype(tz="CET"),\n DatetimeArray._from_sequence(\n ["2000", "2001"], dtype=pd.DatetimeTZDtype(tz="CET")\n ),\n ),\n # Timedelta\n (\n ["1h", "2h"],\n np.dtype("timedelta64[ns]"),\n TimedeltaArray._from_sequence(["1h", "2h"], dtype="m8[ns]"),\n ),\n (\n pd.TimedeltaIndex(["1h", "2h"]),\n np.dtype("timedelta64[ns]"),\n TimedeltaArray._from_sequence(["1h", "2h"], dtype="m8[ns]"),\n ),\n (\n np.array([1, 2], dtype="m8[s]"),\n np.dtype("timedelta64[s]"),\n TimedeltaArray._from_sequence(\n np.array([1, 2], dtype="m8[s]"), dtype="m8[s]"\n ),\n ),\n (\n pd.TimedeltaIndex(["1h", "2h"]),\n None,\n TimedeltaArray._from_sequence(["1h", "2h"], dtype="m8[ns]"),\n ),\n (\n # preserve non-nano, i.e. don't cast to NumpyExtensionArray\n TimedeltaArray._simple_new(\n np.arange(5, dtype=np.int64).view("m8[s]"), dtype=np.dtype("m8[s]")\n ),\n None,\n TimedeltaArray._simple_new(\n np.arange(5, dtype=np.int64).view("m8[s]"), dtype=np.dtype("m8[s]")\n ),\n ),\n (\n # preserve non-nano, i.e. don't cast to NumpyExtensionArray\n TimedeltaArray._simple_new(\n np.arange(5, dtype=np.int64).view("m8[s]"), dtype=np.dtype("m8[s]")\n ),\n np.dtype("m8[s]"),\n TimedeltaArray._simple_new(\n np.arange(5, dtype=np.int64).view("m8[s]"), dtype=np.dtype("m8[s]")\n ),\n ),\n # Category\n (["a", "b"], "category", pd.Categorical(["a", "b"])),\n (\n ["a", "b"],\n pd.CategoricalDtype(None, ordered=True),\n pd.Categorical(["a", "b"], ordered=True),\n ),\n # Interval\n (\n [pd.Interval(1, 2), pd.Interval(3, 4)],\n "interval",\n IntervalArray.from_tuples([(1, 2), (3, 4)]),\n ),\n # Sparse\n ([0, 1], "Sparse[int64]", SparseArray([0, 1], dtype="int64")),\n # IntegerNA\n ([1, None], "Int16", pd.array([1, None], dtype="Int16")),\n (\n pd.Series([1, 2]),\n None,\n NumpyExtensionArray(np.array([1, 2], dtype=np.int64)),\n ),\n # String\n (\n ["a", None],\n "string",\n pd.StringDtype()\n .construct_array_type()\n ._from_sequence(["a", None], dtype=pd.StringDtype()),\n ),\n (\n ["a", None],\n "str",\n pd.StringDtype(na_value=np.nan)\n .construct_array_type()\n ._from_sequence(["a", None], dtype=pd.StringDtype(na_value=np.nan))\n if using_string_dtype()\n else NumpyExtensionArray(np.array(["a", "None"])),\n ),\n (\n ["a", None],\n pd.StringDtype(),\n pd.StringDtype()\n .construct_array_type()\n ._from_sequence(["a", None], dtype=pd.StringDtype()),\n ),\n (\n ["a", None],\n pd.StringDtype(na_value=np.nan),\n pd.StringDtype(na_value=np.nan)\n .construct_array_type()\n ._from_sequence(["a", None], dtype=pd.StringDtype(na_value=np.nan)),\n ),\n (\n # numpy array with string dtype\n np.array(["a", "b"], dtype=str),\n pd.StringDtype(),\n pd.StringDtype()\n .construct_array_type()\n ._from_sequence(["a", "b"], dtype=pd.StringDtype()),\n ),\n (\n # numpy array with string dtype\n np.array(["a", "b"], dtype=str),\n pd.StringDtype(na_value=np.nan),\n pd.StringDtype(na_value=np.nan)\n .construct_array_type()\n ._from_sequence(["a", "b"], dtype=pd.StringDtype(na_value=np.nan)),\n ),\n # Boolean\n (\n [True, None],\n "boolean",\n BooleanArray._from_sequence([True, None], dtype="boolean"),\n ),\n (\n [True, None],\n pd.BooleanDtype(),\n BooleanArray._from_sequence([True, None], dtype="boolean"),\n ),\n # Index\n (pd.Index([1, 2]), None, NumpyExtensionArray(np.array([1, 2], dtype=np.int64))),\n # Series[EA] returns the EA\n (\n pd.Series(pd.Categorical(["a", "b"], categories=["a", "b", "c"])),\n None,\n pd.Categorical(["a", "b"], categories=["a", "b", "c"]),\n ),\n # "3rd party" EAs work\n ([decimal.Decimal(0), decimal.Decimal(1)], "decimal", to_decimal([0, 1])),\n # pass an ExtensionArray, but a different dtype\n (\n period_array(["2000", "2001"], freq="D"),\n "category",\n pd.Categorical([pd.Period("2000", "D"), pd.Period("2001", "D")]),\n ),\n ],\n)\ndef test_array(data, dtype, expected):\n result = pd.array(data, dtype=dtype)\n tm.assert_equal(result, expected)\n\n\ndef test_array_copy():\n a = np.array([1, 2])\n # default is to copy\n b = pd.array(a, dtype=a.dtype)\n assert not tm.shares_memory(a, b)\n\n # copy=True\n b = pd.array(a, dtype=a.dtype, copy=True)\n assert not tm.shares_memory(a, b)\n\n # copy=False\n b = pd.array(a, dtype=a.dtype, copy=False)\n assert tm.shares_memory(a, b)\n\n\ncet = pytz.timezone("CET")\n\n\n@pytest.mark.parametrize(\n "data, expected",\n [\n # period\n (\n [pd.Period("2000", "D"), pd.Period("2001", "D")],\n period_array(["2000", "2001"], freq="D"),\n ),\n # interval\n ([pd.Interval(0, 1), pd.Interval(1, 2)], IntervalArray.from_breaks([0, 1, 2])),\n # datetime\n (\n [pd.Timestamp("2000"), pd.Timestamp("2001")],\n DatetimeArray._from_sequence(["2000", "2001"], dtype="M8[ns]"),\n ),\n (\n [datetime.datetime(2000, 1, 1), datetime.datetime(2001, 1, 1)],\n DatetimeArray._from_sequence(["2000", "2001"], dtype="M8[ns]"),\n ),\n (\n np.array([1, 2], dtype="M8[ns]"),\n DatetimeArray._from_sequence(np.array([1, 2], dtype="M8[ns]")),\n ),\n (\n np.array([1, 2], dtype="M8[us]"),\n DatetimeArray._simple_new(\n np.array([1, 2], dtype="M8[us]"), dtype=np.dtype("M8[us]")\n ),\n ),\n # datetimetz\n (\n [pd.Timestamp("2000", tz="CET"), pd.Timestamp("2001", tz="CET")],\n DatetimeArray._from_sequence(\n ["2000", "2001"], dtype=pd.DatetimeTZDtype(tz="CET", unit="ns")\n ),\n ),\n (\n [\n datetime.datetime(2000, 1, 1, tzinfo=cet),\n datetime.datetime(2001, 1, 1, tzinfo=cet),\n ],\n DatetimeArray._from_sequence(\n ["2000", "2001"], dtype=pd.DatetimeTZDtype(tz=cet, unit="ns")\n ),\n ),\n # timedelta\n (\n [pd.Timedelta("1h"), pd.Timedelta("2h")],\n TimedeltaArray._from_sequence(["1h", "2h"], dtype="m8[ns]"),\n ),\n (\n np.array([1, 2], dtype="m8[ns]"),\n TimedeltaArray._from_sequence(np.array([1, 2], dtype="m8[ns]")),\n ),\n (\n np.array([1, 2], dtype="m8[us]"),\n TimedeltaArray._from_sequence(np.array([1, 2], dtype="m8[us]")),\n ),\n # integer\n ([1, 2], IntegerArray._from_sequence([1, 2], dtype="Int64")),\n ([1, None], IntegerArray._from_sequence([1, None], dtype="Int64")),\n ([1, pd.NA], IntegerArray._from_sequence([1, pd.NA], dtype="Int64")),\n ([1, np.nan], IntegerArray._from_sequence([1, np.nan], dtype="Int64")),\n # float\n ([0.1, 0.2], FloatingArray._from_sequence([0.1, 0.2], dtype="Float64")),\n ([0.1, None], FloatingArray._from_sequence([0.1, pd.NA], dtype="Float64")),\n ([0.1, np.nan], FloatingArray._from_sequence([0.1, pd.NA], dtype="Float64")),\n ([0.1, pd.NA], FloatingArray._from_sequence([0.1, pd.NA], dtype="Float64")),\n # integer-like float\n ([1.0, 2.0], FloatingArray._from_sequence([1.0, 2.0], dtype="Float64")),\n ([1.0, None], FloatingArray._from_sequence([1.0, pd.NA], dtype="Float64")),\n ([1.0, np.nan], FloatingArray._from_sequence([1.0, pd.NA], dtype="Float64")),\n ([1.0, pd.NA], FloatingArray._from_sequence([1.0, pd.NA], dtype="Float64")),\n # mixed-integer-float\n ([1, 2.0], FloatingArray._from_sequence([1.0, 2.0], dtype="Float64")),\n (\n [1, np.nan, 2.0],\n FloatingArray._from_sequence([1.0, None, 2.0], dtype="Float64"),\n ),\n # string\n (\n ["a", "b"],\n pd.StringDtype()\n .construct_array_type()\n ._from_sequence(["a", "b"], dtype=pd.StringDtype()),\n ),\n (\n ["a", None],\n pd.StringDtype()\n .construct_array_type()\n ._from_sequence(["a", None], dtype=pd.StringDtype()),\n ),\n (\n # numpy array with string dtype\n np.array(["a", "b"], dtype=str),\n pd.StringDtype()\n .construct_array_type()\n ._from_sequence(["a", "b"], dtype=pd.StringDtype()),\n ),\n # Boolean\n ([True, False], BooleanArray._from_sequence([True, False], dtype="boolean")),\n ([True, None], BooleanArray._from_sequence([True, None], dtype="boolean")),\n ],\n)\ndef test_array_inference(data, expected):\n result = pd.array(data)\n tm.assert_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "data",\n [\n # mix of frequencies\n [pd.Period("2000", "D"), pd.Period("2001", "Y")],\n # mix of closed\n [pd.Interval(0, 1, closed="left"), pd.Interval(1, 2, closed="right")],\n # Mix of timezones\n [pd.Timestamp("2000", tz="CET"), pd.Timestamp("2000", tz="UTC")],\n # Mix of tz-aware and tz-naive\n [pd.Timestamp("2000", tz="CET"), pd.Timestamp("2000")],\n np.array([pd.Timestamp("2000"), pd.Timestamp("2000", tz="CET")]),\n ],\n)\ndef test_array_inference_fails(data):\n result = pd.array(data)\n expected = NumpyExtensionArray(np.array(data, dtype=object))\n tm.assert_extension_array_equal(result, expected)\n\n\n@pytest.mark.parametrize("data", [np.array(0)])\ndef test_nd_raises(data):\n with pytest.raises(ValueError, match="NumpyExtensionArray must be 1-dimensional"):\n pd.array(data, dtype="int64")\n\n\ndef test_scalar_raises():\n with pytest.raises(ValueError, match="Cannot pass scalar '1'"):\n pd.array(1)\n\n\ndef test_dataframe_raises():\n # GH#51167 don't accidentally cast to StringArray by doing inference on columns\n df = pd.DataFrame([[1, 2], [3, 4]], columns=["A", "B"])\n msg = "Cannot pass DataFrame to 'pandas.array'"\n with pytest.raises(TypeError, match=msg):\n pd.array(df)\n\n\ndef test_bounds_check():\n # GH21796\n with pytest.raises(\n TypeError, match=r"cannot safely cast non-equivalent int(32|64) to uint16"\n ):\n pd.array([-1, 2, 3], dtype="UInt16")\n\n\n# ---------------------------------------------------------------------------\n# A couple dummy classes to ensure that Series and Indexes are unboxed before\n# getting to the EA classes.\n\n\n@register_extension_dtype\nclass DecimalDtype2(DecimalDtype):\n name = "decimal2"\n\n @classmethod\n def construct_array_type(cls):\n """\n Return the array type associated with this dtype.\n\n Returns\n -------\n type\n """\n return DecimalArray2\n\n\nclass DecimalArray2(DecimalArray):\n @classmethod\n def _from_sequence(cls, scalars, *, dtype=None, copy=False):\n if isinstance(scalars, (pd.Series, pd.Index)):\n raise TypeError("scalars should not be of type pd.Series or pd.Index")\n\n return super()._from_sequence(scalars, dtype=dtype, copy=copy)\n\n\ndef test_array_unboxes(index_or_series):\n box = index_or_series\n\n data = box([decimal.Decimal("1"), decimal.Decimal("2")])\n dtype = DecimalDtype2()\n # make sure it works\n with pytest.raises(\n TypeError, match="scalars should not be of type pd.Series or pd.Index"\n ):\n DecimalArray2._from_sequence(data, dtype=dtype)\n\n result = pd.array(data, dtype="decimal2")\n expected = DecimalArray2._from_sequence(data.values, dtype=dtype)\n tm.assert_equal(result, expected)\n\n\ndef test_array_to_numpy_na():\n # GH#40638\n arr = pd.array([pd.NA, 1], dtype="string[python]")\n result = arr.to_numpy(na_value=True, dtype=bool)\n expected = np.array([True, True])\n tm.assert_numpy_array_equal(result, expected)\n | .venv\Lib\site-packages\pandas\tests\arrays\test_array.py | test_array.py | Python | 17,111 | 0.95 | 0.032755 | 0.106472 | react-lib | 494 | 2024-07-30T19:04:20.491812 | GPL-3.0 | true | 5340f423550e1dbd25aa00260e81ad6d |
from __future__ import annotations\n\nimport re\nimport warnings\n\nimport numpy as np\nimport pytest\n\nfrom pandas._libs import (\n NaT,\n OutOfBoundsDatetime,\n Timestamp,\n)\nfrom pandas._libs.tslibs.dtypes import freq_to_period_freqstr\nfrom pandas.compat.numpy import np_version_gt2\n\nimport pandas as pd\nfrom pandas import (\n DatetimeIndex,\n Period,\n PeriodIndex,\n TimedeltaIndex,\n)\nimport pandas._testing as tm\nfrom pandas.core.arrays import (\n DatetimeArray,\n NumpyExtensionArray,\n PeriodArray,\n TimedeltaArray,\n)\n\n\n# TODO: more freq variants\n@pytest.fixture(params=["D", "B", "W", "ME", "QE", "YE"])\ndef freqstr(request):\n """Fixture returning parametrized frequency in string format."""\n return request.param\n\n\n@pytest.fixture\ndef period_index(freqstr):\n """\n A fixture to provide PeriodIndex objects with different frequencies.\n\n Most PeriodArray behavior is already tested in PeriodIndex tests,\n so here we just test that the PeriodArray behavior matches\n the PeriodIndex behavior.\n """\n # TODO: non-monotone indexes; NaTs, different start dates\n with warnings.catch_warnings():\n # suppress deprecation of Period[B]\n warnings.filterwarnings(\n "ignore", message="Period with BDay freq", category=FutureWarning\n )\n freqstr = freq_to_period_freqstr(1, freqstr)\n pi = pd.period_range(start=Timestamp("2000-01-01"), periods=100, freq=freqstr)\n return pi\n\n\n@pytest.fixture\ndef datetime_index(freqstr):\n """\n A fixture to provide DatetimeIndex objects with different frequencies.\n\n Most DatetimeArray behavior is already tested in DatetimeIndex tests,\n so here we just test that the DatetimeArray behavior matches\n the DatetimeIndex behavior.\n """\n # TODO: non-monotone indexes; NaTs, different start dates, timezones\n dti = pd.date_range(start=Timestamp("2000-01-01"), periods=100, freq=freqstr)\n return dti\n\n\n@pytest.fixture\ndef timedelta_index():\n """\n A fixture to provide TimedeltaIndex objects with different frequencies.\n Most TimedeltaArray behavior is already tested in TimedeltaIndex tests,\n so here we just test that the TimedeltaArray behavior matches\n the TimedeltaIndex behavior.\n """\n # TODO: flesh this out\n return TimedeltaIndex(["1 Day", "3 Hours", "NaT"])\n\n\nclass SharedTests:\n index_cls: type[DatetimeIndex | PeriodIndex | TimedeltaIndex]\n\n @pytest.fixture\n def arr1d(self):\n """Fixture returning DatetimeArray with daily frequency."""\n data = np.arange(10, dtype="i8") * 24 * 3600 * 10**9\n if self.array_cls is PeriodArray:\n arr = self.array_cls(data, freq="D")\n else:\n arr = self.index_cls(data, freq="D")._data\n return arr\n\n def test_compare_len1_raises(self, arr1d):\n # make sure we raise when comparing with different lengths, specific\n # to the case where one has length-1, which numpy would broadcast\n arr = arr1d\n idx = self.index_cls(arr)\n\n with pytest.raises(ValueError, match="Lengths must match"):\n arr == arr[:1]\n\n # test the index classes while we're at it, GH#23078\n with pytest.raises(ValueError, match="Lengths must match"):\n idx <= idx[[0]]\n\n @pytest.mark.parametrize(\n "result",\n [\n pd.date_range("2020", periods=3),\n pd.date_range("2020", periods=3, tz="UTC"),\n pd.timedelta_range("0 days", periods=3),\n pd.period_range("2020Q1", periods=3, freq="Q"),\n ],\n )\n def test_compare_with_Categorical(self, result):\n expected = pd.Categorical(result)\n assert all(result == expected)\n assert not any(result != expected)\n\n @pytest.mark.parametrize("reverse", [True, False])\n @pytest.mark.parametrize("as_index", [True, False])\n def test_compare_categorical_dtype(self, arr1d, as_index, reverse, ordered):\n other = pd.Categorical(arr1d, ordered=ordered)\n if as_index:\n other = pd.CategoricalIndex(other)\n\n left, right = arr1d, other\n if reverse:\n left, right = right, left\n\n ones = np.ones(arr1d.shape, dtype=bool)\n zeros = ~ones\n\n result = left == right\n tm.assert_numpy_array_equal(result, ones)\n\n result = left != right\n tm.assert_numpy_array_equal(result, zeros)\n\n if not reverse and not as_index:\n # Otherwise Categorical raises TypeError bc it is not ordered\n # TODO: we should probably get the same behavior regardless?\n result = left < right\n tm.assert_numpy_array_equal(result, zeros)\n\n result = left <= right\n tm.assert_numpy_array_equal(result, ones)\n\n result = left > right\n tm.assert_numpy_array_equal(result, zeros)\n\n result = left >= right\n tm.assert_numpy_array_equal(result, ones)\n\n def test_take(self):\n data = np.arange(100, dtype="i8") * 24 * 3600 * 10**9\n np.random.default_rng(2).shuffle(data)\n\n if self.array_cls is PeriodArray:\n arr = PeriodArray(data, dtype="period[D]")\n else:\n arr = self.index_cls(data)._data\n idx = self.index_cls._simple_new(arr)\n\n takers = [1, 4, 94]\n result = arr.take(takers)\n expected = idx.take(takers)\n\n tm.assert_index_equal(self.index_cls(result), expected)\n\n takers = np.array([1, 4, 94])\n result = arr.take(takers)\n expected = idx.take(takers)\n\n tm.assert_index_equal(self.index_cls(result), expected)\n\n @pytest.mark.parametrize("fill_value", [2, 2.0, Timestamp(2021, 1, 1, 12).time])\n def test_take_fill_raises(self, fill_value, arr1d):\n msg = f"value should be a '{arr1d._scalar_type.__name__}' or 'NaT'. Got"\n with pytest.raises(TypeError, match=msg):\n arr1d.take([0, 1], allow_fill=True, fill_value=fill_value)\n\n def test_take_fill(self, arr1d):\n arr = arr1d\n\n result = arr.take([-1, 1], allow_fill=True, fill_value=None)\n assert result[0] is NaT\n\n result = arr.take([-1, 1], allow_fill=True, fill_value=np.nan)\n assert result[0] is NaT\n\n result = arr.take([-1, 1], allow_fill=True, fill_value=NaT)\n assert result[0] is NaT\n\n @pytest.mark.filterwarnings(\n "ignore:Period with BDay freq is deprecated:FutureWarning"\n )\n def test_take_fill_str(self, arr1d):\n # Cast str fill_value matching other fill_value-taking methods\n result = arr1d.take([-1, 1], allow_fill=True, fill_value=str(arr1d[-1]))\n expected = arr1d[[-1, 1]]\n tm.assert_equal(result, expected)\n\n msg = f"value should be a '{arr1d._scalar_type.__name__}' or 'NaT'. Got"\n with pytest.raises(TypeError, match=msg):\n arr1d.take([-1, 1], allow_fill=True, fill_value="foo")\n\n def test_concat_same_type(self, arr1d):\n arr = arr1d\n idx = self.index_cls(arr)\n idx = idx.insert(0, NaT)\n arr = arr1d\n\n result = arr._concat_same_type([arr[:-1], arr[1:], arr])\n arr2 = arr.astype(object)\n expected = self.index_cls(np.concatenate([arr2[:-1], arr2[1:], arr2]))\n\n tm.assert_index_equal(self.index_cls(result), expected)\n\n def test_unbox_scalar(self, arr1d):\n result = arr1d._unbox_scalar(arr1d[0])\n expected = arr1d._ndarray.dtype.type\n assert isinstance(result, expected)\n\n result = arr1d._unbox_scalar(NaT)\n assert isinstance(result, expected)\n\n msg = f"'value' should be a {self.scalar_type.__name__}."\n with pytest.raises(ValueError, match=msg):\n arr1d._unbox_scalar("foo")\n\n def test_check_compatible_with(self, arr1d):\n arr1d._check_compatible_with(arr1d[0])\n arr1d._check_compatible_with(arr1d[:1])\n arr1d._check_compatible_with(NaT)\n\n def test_scalar_from_string(self, arr1d):\n result = arr1d._scalar_from_string(str(arr1d[0]))\n assert result == arr1d[0]\n\n def test_reduce_invalid(self, arr1d):\n msg = "does not support reduction 'not a method'"\n with pytest.raises(TypeError, match=msg):\n arr1d._reduce("not a method")\n\n @pytest.mark.parametrize("method", ["pad", "backfill"])\n def test_fillna_method_doesnt_change_orig(self, method):\n data = np.arange(10, dtype="i8") * 24 * 3600 * 10**9\n if self.array_cls is PeriodArray:\n arr = self.array_cls(data, dtype="period[D]")\n else:\n arr = self.array_cls._from_sequence(data)\n arr[4] = NaT\n\n fill_value = arr[3] if method == "pad" else arr[5]\n\n result = arr._pad_or_backfill(method=method)\n assert result[4] == fill_value\n\n # check that the original was not changed\n assert arr[4] is NaT\n\n def test_searchsorted(self):\n data = np.arange(10, dtype="i8") * 24 * 3600 * 10**9\n if self.array_cls is PeriodArray:\n arr = self.array_cls(data, dtype="period[D]")\n else:\n arr = self.array_cls._from_sequence(data)\n\n # scalar\n result = arr.searchsorted(arr[1])\n assert result == 1\n\n result = arr.searchsorted(arr[2], side="right")\n assert result == 3\n\n # own-type\n result = arr.searchsorted(arr[1:3])\n expected = np.array([1, 2], dtype=np.intp)\n tm.assert_numpy_array_equal(result, expected)\n\n result = arr.searchsorted(arr[1:3], side="right")\n expected = np.array([2, 3], dtype=np.intp)\n tm.assert_numpy_array_equal(result, expected)\n\n # GH#29884 match numpy convention on whether NaT goes\n # at the end or the beginning\n result = arr.searchsorted(NaT)\n assert result == 10\n\n @pytest.mark.parametrize("box", [None, "index", "series"])\n def test_searchsorted_castable_strings(self, arr1d, box, string_storage):\n arr = arr1d\n if box is None:\n pass\n elif box == "index":\n # Test the equivalent Index.searchsorted method while we're here\n arr = self.index_cls(arr)\n else:\n # Test the equivalent Series.searchsorted method while we're here\n arr = pd.Series(arr)\n\n # scalar\n result = arr.searchsorted(str(arr[1]))\n assert result == 1\n\n result = arr.searchsorted(str(arr[2]), side="right")\n assert result == 3\n\n result = arr.searchsorted([str(x) for x in arr[1:3]])\n expected = np.array([1, 2], dtype=np.intp)\n tm.assert_numpy_array_equal(result, expected)\n\n with pytest.raises(\n TypeError,\n match=re.escape(\n f"value should be a '{arr1d._scalar_type.__name__}', 'NaT', "\n "or array of those. Got 'str' instead."\n ),\n ):\n arr.searchsorted("foo")\n\n with pd.option_context("string_storage", string_storage):\n with pytest.raises(\n TypeError,\n match=re.escape(\n f"value should be a '{arr1d._scalar_type.__name__}', 'NaT', "\n "or array of those. Got string array instead."\n ),\n ):\n arr.searchsorted([str(arr[1]), "baz"])\n\n def test_getitem_near_implementation_bounds(self):\n # We only check tz-naive for DTA bc the bounds are slightly different\n # for other tzs\n i8vals = np.asarray([NaT._value + n for n in range(1, 5)], dtype="i8")\n if self.array_cls is PeriodArray:\n arr = self.array_cls(i8vals, dtype="period[ns]")\n else:\n arr = self.index_cls(i8vals, freq="ns")._data\n arr[0] # should not raise OutOfBoundsDatetime\n\n index = pd.Index(arr)\n index[0] # should not raise OutOfBoundsDatetime\n\n ser = pd.Series(arr)\n ser[0] # should not raise OutOfBoundsDatetime\n\n def test_getitem_2d(self, arr1d):\n # 2d slicing on a 1D array\n expected = type(arr1d)._simple_new(\n arr1d._ndarray[:, np.newaxis], dtype=arr1d.dtype\n )\n result = arr1d[:, np.newaxis]\n tm.assert_equal(result, expected)\n\n # Lookup on a 2D array\n arr2d = expected\n expected = type(arr2d)._simple_new(arr2d._ndarray[:3, 0], dtype=arr2d.dtype)\n result = arr2d[:3, 0]\n tm.assert_equal(result, expected)\n\n # Scalar lookup\n result = arr2d[-1, 0]\n expected = arr1d[-1]\n assert result == expected\n\n def test_iter_2d(self, arr1d):\n data2d = arr1d._ndarray[:3, np.newaxis]\n arr2d = type(arr1d)._simple_new(data2d, dtype=arr1d.dtype)\n result = list(arr2d)\n assert len(result) == 3\n for x in result:\n assert isinstance(x, type(arr1d))\n assert x.ndim == 1\n assert x.dtype == arr1d.dtype\n\n def test_repr_2d(self, arr1d):\n data2d = arr1d._ndarray[:3, np.newaxis]\n arr2d = type(arr1d)._simple_new(data2d, dtype=arr1d.dtype)\n\n result = repr(arr2d)\n\n if isinstance(arr2d, TimedeltaArray):\n expected = (\n f"<{type(arr2d).__name__}>\n"\n "[\n"\n f"['{arr1d[0]._repr_base()}'],\n"\n f"['{arr1d[1]._repr_base()}'],\n"\n f"['{arr1d[2]._repr_base()}']\n"\n "]\n"\n f"Shape: (3, 1), dtype: {arr1d.dtype}"\n )\n else:\n expected = (\n f"<{type(arr2d).__name__}>\n"\n "[\n"\n f"['{arr1d[0]}'],\n"\n f"['{arr1d[1]}'],\n"\n f"['{arr1d[2]}']\n"\n "]\n"\n f"Shape: (3, 1), dtype: {arr1d.dtype}"\n )\n\n assert result == expected\n\n def test_setitem(self):\n data = np.arange(10, dtype="i8") * 24 * 3600 * 10**9\n if self.array_cls is PeriodArray:\n arr = self.array_cls(data, dtype="period[D]")\n else:\n arr = self.index_cls(data, freq="D")._data\n\n arr[0] = arr[1]\n expected = np.arange(10, dtype="i8") * 24 * 3600 * 10**9\n expected[0] = expected[1]\n\n tm.assert_numpy_array_equal(arr.asi8, expected)\n\n arr[:2] = arr[-2:]\n expected[:2] = expected[-2:]\n tm.assert_numpy_array_equal(arr.asi8, expected)\n\n @pytest.mark.parametrize(\n "box",\n [\n pd.Index,\n pd.Series,\n np.array,\n list,\n NumpyExtensionArray,\n ],\n )\n def test_setitem_object_dtype(self, box, arr1d):\n expected = arr1d.copy()[::-1]\n if expected.dtype.kind in ["m", "M"]:\n expected = expected._with_freq(None)\n\n vals = expected\n if box is list:\n vals = list(vals)\n elif box is np.array:\n # if we do np.array(x).astype(object) then dt64 and td64 cast to ints\n vals = np.array(vals.astype(object))\n elif box is NumpyExtensionArray:\n vals = box(np.asarray(vals, dtype=object))\n else:\n vals = box(vals).astype(object)\n\n arr1d[:] = vals\n\n tm.assert_equal(arr1d, expected)\n\n def test_setitem_strs(self, arr1d):\n # Check that we parse strs in both scalar and listlike\n\n # Setting list-like of strs\n expected = arr1d.copy()\n expected[[0, 1]] = arr1d[-2:]\n\n result = arr1d.copy()\n result[:2] = [str(x) for x in arr1d[-2:]]\n tm.assert_equal(result, expected)\n\n # Same thing but now for just a scalar str\n expected = arr1d.copy()\n expected[0] = arr1d[-1]\n\n result = arr1d.copy()\n result[0] = str(arr1d[-1])\n tm.assert_equal(result, expected)\n\n @pytest.mark.parametrize("as_index", [True, False])\n def test_setitem_categorical(self, arr1d, as_index):\n expected = arr1d.copy()[::-1]\n if not isinstance(expected, PeriodArray):\n expected = expected._with_freq(None)\n\n cat = pd.Categorical(arr1d)\n if as_index:\n cat = pd.CategoricalIndex(cat)\n\n arr1d[:] = cat[::-1]\n\n tm.assert_equal(arr1d, expected)\n\n def test_setitem_raises(self, arr1d):\n arr = arr1d[:10]\n val = arr[0]\n\n with pytest.raises(IndexError, match="index 12 is out of bounds"):\n arr[12] = val\n\n with pytest.raises(TypeError, match="value should be a.* 'object'"):\n arr[0] = object()\n\n msg = "cannot set using a list-like indexer with a different length"\n with pytest.raises(ValueError, match=msg):\n # GH#36339\n arr[[]] = [arr[1]]\n\n msg = "cannot set using a slice indexer with a different length than"\n with pytest.raises(ValueError, match=msg):\n # GH#36339\n arr[1:1] = arr[:3]\n\n @pytest.mark.parametrize("box", [list, np.array, pd.Index, pd.Series])\n def test_setitem_numeric_raises(self, arr1d, box):\n # We dont case e.g. int64 to our own dtype for setitem\n\n msg = (\n f"value should be a '{arr1d._scalar_type.__name__}', "\n "'NaT', or array of those. Got"\n )\n with pytest.raises(TypeError, match=msg):\n arr1d[:2] = box([0, 1])\n\n with pytest.raises(TypeError, match=msg):\n arr1d[:2] = box([0.0, 1.0])\n\n def test_inplace_arithmetic(self):\n # GH#24115 check that iadd and isub are actually in-place\n data = np.arange(10, dtype="i8") * 24 * 3600 * 10**9\n if self.array_cls is PeriodArray:\n arr = self.array_cls(data, dtype="period[D]")\n else:\n arr = self.index_cls(data, freq="D")._data\n\n expected = arr + pd.Timedelta(days=1)\n arr += pd.Timedelta(days=1)\n tm.assert_equal(arr, expected)\n\n expected = arr - pd.Timedelta(days=1)\n arr -= pd.Timedelta(days=1)\n tm.assert_equal(arr, expected)\n\n def test_shift_fill_int_deprecated(self, arr1d):\n # GH#31971, enforced in 2.0\n with pytest.raises(TypeError, match="value should be a"):\n arr1d.shift(1, fill_value=1)\n\n def test_median(self, arr1d):\n arr = arr1d\n if len(arr) % 2 == 0:\n # make it easier to define `expected`\n arr = arr[:-1]\n\n expected = arr[len(arr) // 2]\n\n result = arr.median()\n assert type(result) is type(expected)\n assert result == expected\n\n arr[len(arr) // 2] = NaT\n if not isinstance(expected, Period):\n expected = arr[len(arr) // 2 - 1 : len(arr) // 2 + 2].mean()\n\n assert arr.median(skipna=False) is NaT\n\n result = arr.median()\n assert type(result) is type(expected)\n assert result == expected\n\n assert arr[:0].median() is NaT\n assert arr[:0].median(skipna=False) is NaT\n\n # 2d Case\n arr2 = arr.reshape(-1, 1)\n\n result = arr2.median(axis=None)\n assert type(result) is type(expected)\n assert result == expected\n\n assert arr2.median(axis=None, skipna=False) is NaT\n\n result = arr2.median(axis=0)\n expected2 = type(arr)._from_sequence([expected], dtype=arr.dtype)\n tm.assert_equal(result, expected2)\n\n result = arr2.median(axis=0, skipna=False)\n expected2 = type(arr)._from_sequence([NaT], dtype=arr.dtype)\n tm.assert_equal(result, expected2)\n\n result = arr2.median(axis=1)\n tm.assert_equal(result, arr)\n\n result = arr2.median(axis=1, skipna=False)\n tm.assert_equal(result, arr)\n\n def test_from_integer_array(self):\n arr = np.array([1, 2, 3], dtype=np.int64)\n data = pd.array(arr, dtype="Int64")\n if self.array_cls is PeriodArray:\n expected = self.array_cls(arr, dtype=self.example_dtype)\n result = self.array_cls(data, dtype=self.example_dtype)\n else:\n expected = self.array_cls._from_sequence(arr, dtype=self.example_dtype)\n result = self.array_cls._from_sequence(data, dtype=self.example_dtype)\n\n tm.assert_extension_array_equal(result, expected)\n\n\nclass TestDatetimeArray(SharedTests):\n index_cls = DatetimeIndex\n array_cls = DatetimeArray\n scalar_type = Timestamp\n example_dtype = "M8[ns]"\n\n @pytest.fixture\n def arr1d(self, tz_naive_fixture, freqstr):\n """\n Fixture returning DatetimeArray with parametrized frequency and\n timezones\n """\n tz = tz_naive_fixture\n dti = pd.date_range("2016-01-01 01:01:00", periods=5, freq=freqstr, tz=tz)\n dta = dti._data\n return dta\n\n def test_round(self, arr1d):\n # GH#24064\n dti = self.index_cls(arr1d)\n\n result = dti.round(freq="2min")\n expected = dti - pd.Timedelta(minutes=1)\n expected = expected._with_freq(None)\n tm.assert_index_equal(result, expected)\n\n dta = dti._data\n result = dta.round(freq="2min")\n expected = expected._data._with_freq(None)\n tm.assert_datetime_array_equal(result, expected)\n\n def test_array_interface(self, datetime_index):\n arr = datetime_index._data\n copy_false = None if np_version_gt2 else False\n\n # default asarray gives the same underlying data (for tz naive)\n result = np.asarray(arr)\n expected = arr._ndarray\n assert result is expected\n tm.assert_numpy_array_equal(result, expected)\n result = np.array(arr, copy=copy_false)\n assert result is expected\n tm.assert_numpy_array_equal(result, expected)\n\n # specifying M8[ns] gives the same result as default\n result = np.asarray(arr, dtype="datetime64[ns]")\n expected = arr._ndarray\n assert result is expected\n tm.assert_numpy_array_equal(result, expected)\n result = np.array(arr, dtype="datetime64[ns]", copy=copy_false)\n assert result is expected\n tm.assert_numpy_array_equal(result, expected)\n result = np.array(arr, dtype="datetime64[ns]")\n if not np_version_gt2:\n # TODO: GH 57739\n assert result is not expected\n tm.assert_numpy_array_equal(result, expected)\n\n # to object dtype\n result = np.asarray(arr, dtype=object)\n expected = np.array(list(arr), dtype=object)\n tm.assert_numpy_array_equal(result, expected)\n\n # to other dtype always copies\n result = np.asarray(arr, dtype="int64")\n assert result is not arr.asi8\n assert not np.may_share_memory(arr, result)\n expected = arr.asi8.copy()\n tm.assert_numpy_array_equal(result, expected)\n\n # other dtypes handled by numpy\n for dtype in ["float64", str]:\n result = np.asarray(arr, dtype=dtype)\n expected = np.asarray(arr).astype(dtype)\n tm.assert_numpy_array_equal(result, expected)\n\n def test_array_object_dtype(self, arr1d):\n # GH#23524\n arr = arr1d\n dti = self.index_cls(arr1d)\n\n expected = np.array(list(dti))\n\n result = np.array(arr, dtype=object)\n tm.assert_numpy_array_equal(result, expected)\n\n # also test the DatetimeIndex method while we're at it\n result = np.array(dti, dtype=object)\n tm.assert_numpy_array_equal(result, expected)\n\n def test_array_tz(self, arr1d):\n # GH#23524\n arr = arr1d\n dti = self.index_cls(arr1d)\n copy_false = None if np_version_gt2 else False\n\n expected = dti.asi8.view("M8[ns]")\n result = np.array(arr, dtype="M8[ns]")\n tm.assert_numpy_array_equal(result, expected)\n\n result = np.array(arr, dtype="datetime64[ns]")\n tm.assert_numpy_array_equal(result, expected)\n\n # check that we are not making copies when setting copy=copy_false\n result = np.array(arr, dtype="M8[ns]", copy=copy_false)\n assert result.base is expected.base\n assert result.base is not None\n result = np.array(arr, dtype="datetime64[ns]", copy=copy_false)\n assert result.base is expected.base\n assert result.base is not None\n\n def test_array_i8_dtype(self, arr1d):\n arr = arr1d\n dti = self.index_cls(arr1d)\n copy_false = None if np_version_gt2 else False\n\n expected = dti.asi8\n result = np.array(arr, dtype="i8")\n tm.assert_numpy_array_equal(result, expected)\n\n result = np.array(arr, dtype=np.int64)\n tm.assert_numpy_array_equal(result, expected)\n\n # check that we are still making copies when setting copy=copy_false\n result = np.array(arr, dtype="i8", copy=copy_false)\n assert result.base is not expected.base\n assert result.base is None\n\n def test_from_array_keeps_base(self):\n # Ensure that DatetimeArray._ndarray.base isn't lost.\n arr = np.array(["2000-01-01", "2000-01-02"], dtype="M8[ns]")\n dta = DatetimeArray._from_sequence(arr)\n\n assert dta._ndarray is arr\n dta = DatetimeArray._from_sequence(arr[:0])\n assert dta._ndarray.base is arr\n\n def test_from_dti(self, arr1d):\n arr = arr1d\n dti = self.index_cls(arr1d)\n assert list(dti) == list(arr)\n\n # Check that Index.__new__ knows what to do with DatetimeArray\n dti2 = pd.Index(arr)\n assert isinstance(dti2, DatetimeIndex)\n assert list(dti2) == list(arr)\n\n def test_astype_object(self, arr1d):\n arr = arr1d\n dti = self.index_cls(arr1d)\n\n asobj = arr.astype("O")\n assert isinstance(asobj, np.ndarray)\n assert asobj.dtype == "O"\n assert list(asobj) == list(dti)\n\n @pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning")\n def test_to_period(self, datetime_index, freqstr):\n dti = datetime_index\n arr = dti._data\n\n freqstr = freq_to_period_freqstr(1, freqstr)\n expected = dti.to_period(freq=freqstr)\n result = arr.to_period(freq=freqstr)\n assert isinstance(result, PeriodArray)\n\n tm.assert_equal(result, expected._data)\n\n def test_to_period_2d(self, arr1d):\n arr2d = arr1d.reshape(1, -1)\n\n warn = None if arr1d.tz is None else UserWarning\n with tm.assert_produces_warning(warn):\n result = arr2d.to_period("D")\n expected = arr1d.to_period("D").reshape(1, -1)\n tm.assert_period_array_equal(result, expected)\n\n @pytest.mark.parametrize("propname", DatetimeArray._bool_ops)\n def test_bool_properties(self, arr1d, propname):\n # in this case _bool_ops is just `is_leap_year`\n dti = self.index_cls(arr1d)\n arr = arr1d\n assert dti.freq == arr.freq\n\n result = getattr(arr, propname)\n expected = np.array(getattr(dti, propname), dtype=result.dtype)\n\n tm.assert_numpy_array_equal(result, expected)\n\n @pytest.mark.parametrize("propname", DatetimeArray._field_ops)\n def test_int_properties(self, arr1d, propname):\n dti = self.index_cls(arr1d)\n arr = arr1d\n\n result = getattr(arr, propname)\n expected = np.array(getattr(dti, propname), dtype=result.dtype)\n\n tm.assert_numpy_array_equal(result, expected)\n\n def test_take_fill_valid(self, arr1d, fixed_now_ts):\n arr = arr1d\n dti = self.index_cls(arr1d)\n\n now = fixed_now_ts.tz_localize(dti.tz)\n result = arr.take([-1, 1], allow_fill=True, fill_value=now)\n assert result[0] == now\n\n msg = f"value should be a '{arr1d._scalar_type.__name__}' or 'NaT'. Got"\n with pytest.raises(TypeError, match=msg):\n # fill_value Timedelta invalid\n arr.take([-1, 1], allow_fill=True, fill_value=now - now)\n\n with pytest.raises(TypeError, match=msg):\n # fill_value Period invalid\n arr.take([-1, 1], allow_fill=True, fill_value=Period("2014Q1"))\n\n tz = None if dti.tz is not None else "US/Eastern"\n now = fixed_now_ts.tz_localize(tz)\n msg = "Cannot compare tz-naive and tz-aware datetime-like objects"\n with pytest.raises(TypeError, match=msg):\n # Timestamp with mismatched tz-awareness\n arr.take([-1, 1], allow_fill=True, fill_value=now)\n\n value = NaT._value\n msg = f"value should be a '{arr1d._scalar_type.__name__}' or 'NaT'. Got"\n with pytest.raises(TypeError, match=msg):\n # require NaT, not iNaT, as it could be confused with an integer\n arr.take([-1, 1], allow_fill=True, fill_value=value)\n\n value = np.timedelta64("NaT", "ns")\n with pytest.raises(TypeError, match=msg):\n # require appropriate-dtype if we have a NA value\n arr.take([-1, 1], allow_fill=True, fill_value=value)\n\n if arr.tz is not None:\n # GH#37356\n # Assuming here that arr1d fixture does not include Australia/Melbourne\n value = fixed_now_ts.tz_localize("Australia/Melbourne")\n result = arr.take([-1, 1], allow_fill=True, fill_value=value)\n\n expected = arr.take(\n [-1, 1],\n allow_fill=True,\n fill_value=value.tz_convert(arr.dtype.tz),\n )\n tm.assert_equal(result, expected)\n\n def test_concat_same_type_invalid(self, arr1d):\n # different timezones\n arr = arr1d\n\n if arr.tz is None:\n other = arr.tz_localize("UTC")\n else:\n other = arr.tz_localize(None)\n\n with pytest.raises(ValueError, match="to_concat must have the same"):\n arr._concat_same_type([arr, other])\n\n def test_concat_same_type_different_freq(self, unit):\n # we *can* concatenate DTI with different freqs.\n a = pd.date_range("2000", periods=2, freq="D", tz="US/Central", unit=unit)._data\n b = pd.date_range("2000", periods=2, freq="h", tz="US/Central", unit=unit)._data\n result = DatetimeArray._concat_same_type([a, b])\n expected = (\n pd.to_datetime(\n [\n "2000-01-01 00:00:00",\n "2000-01-02 00:00:00",\n "2000-01-01 00:00:00",\n "2000-01-01 01:00:00",\n ]\n )\n .tz_localize("US/Central")\n .as_unit(unit)\n ._data\n )\n\n tm.assert_datetime_array_equal(result, expected)\n\n def test_strftime(self, arr1d, using_infer_string):\n arr = arr1d\n\n result = arr.strftime("%Y %b")\n expected = np.array([ts.strftime("%Y %b") for ts in arr], dtype=object)\n if using_infer_string:\n expected = pd.array(expected, dtype=pd.StringDtype(na_value=np.nan))\n tm.assert_equal(result, expected)\n\n def test_strftime_nat(self, using_infer_string):\n # GH 29578\n arr = DatetimeIndex(["2019-01-01", NaT])._data\n\n result = arr.strftime("%Y-%m-%d")\n expected = np.array(["2019-01-01", np.nan], dtype=object)\n if using_infer_string:\n expected = pd.array(expected, dtype=pd.StringDtype(na_value=np.nan))\n tm.assert_equal(result, expected)\n\n\nclass TestTimedeltaArray(SharedTests):\n index_cls = TimedeltaIndex\n array_cls = TimedeltaArray\n scalar_type = pd.Timedelta\n example_dtype = "m8[ns]"\n\n def test_from_tdi(self):\n tdi = TimedeltaIndex(["1 Day", "3 Hours"])\n arr = tdi._data\n assert list(arr) == list(tdi)\n\n # Check that Index.__new__ knows what to do with TimedeltaArray\n tdi2 = pd.Index(arr)\n assert isinstance(tdi2, TimedeltaIndex)\n assert list(tdi2) == list(arr)\n\n def test_astype_object(self):\n tdi = TimedeltaIndex(["1 Day", "3 Hours"])\n arr = tdi._data\n asobj = arr.astype("O")\n assert isinstance(asobj, np.ndarray)\n assert asobj.dtype == "O"\n assert list(asobj) == list(tdi)\n\n def test_to_pytimedelta(self, timedelta_index):\n tdi = timedelta_index\n arr = tdi._data\n\n expected = tdi.to_pytimedelta()\n result = arr.to_pytimedelta()\n\n tm.assert_numpy_array_equal(result, expected)\n\n def test_total_seconds(self, timedelta_index):\n tdi = timedelta_index\n arr = tdi._data\n\n expected = tdi.total_seconds()\n result = arr.total_seconds()\n\n tm.assert_numpy_array_equal(result, expected.values)\n\n @pytest.mark.parametrize("propname", TimedeltaArray._field_ops)\n def test_int_properties(self, timedelta_index, propname):\n tdi = timedelta_index\n arr = tdi._data\n\n result = getattr(arr, propname)\n expected = np.array(getattr(tdi, propname), dtype=result.dtype)\n\n tm.assert_numpy_array_equal(result, expected)\n\n def test_array_interface(self, timedelta_index):\n arr = timedelta_index._data\n copy_false = None if np_version_gt2 else False\n\n # default asarray gives the same underlying data\n result = np.asarray(arr)\n expected = arr._ndarray\n assert result is expected\n tm.assert_numpy_array_equal(result, expected)\n result = np.array(arr, copy=copy_false)\n assert result is expected\n tm.assert_numpy_array_equal(result, expected)\n\n # specifying m8[ns] gives the same result as default\n result = np.asarray(arr, dtype="timedelta64[ns]")\n expected = arr._ndarray\n assert result is expected\n tm.assert_numpy_array_equal(result, expected)\n result = np.array(arr, dtype="timedelta64[ns]", copy=copy_false)\n assert result is expected\n tm.assert_numpy_array_equal(result, expected)\n result = np.array(arr, dtype="timedelta64[ns]")\n if not np_version_gt2:\n # TODO: GH 57739\n assert result is not expected\n tm.assert_numpy_array_equal(result, expected)\n\n # to object dtype\n result = np.asarray(arr, dtype=object)\n expected = np.array(list(arr), dtype=object)\n tm.assert_numpy_array_equal(result, expected)\n\n # to other dtype always copies\n result = np.asarray(arr, dtype="int64")\n assert result is not arr.asi8\n assert not np.may_share_memory(arr, result)\n expected = arr.asi8.copy()\n tm.assert_numpy_array_equal(result, expected)\n\n # other dtypes handled by numpy\n for dtype in ["float64", str]:\n result = np.asarray(arr, dtype=dtype)\n expected = np.asarray(arr).astype(dtype)\n tm.assert_numpy_array_equal(result, expected)\n\n def test_take_fill_valid(self, timedelta_index, fixed_now_ts):\n tdi = timedelta_index\n arr = tdi._data\n\n td1 = pd.Timedelta(days=1)\n result = arr.take([-1, 1], allow_fill=True, fill_value=td1)\n assert result[0] == td1\n\n value = fixed_now_ts\n msg = f"value should be a '{arr._scalar_type.__name__}' or 'NaT'. Got"\n with pytest.raises(TypeError, match=msg):\n # fill_value Timestamp invalid\n arr.take([0, 1], allow_fill=True, fill_value=value)\n\n value = fixed_now_ts.to_period("D")\n with pytest.raises(TypeError, match=msg):\n # fill_value Period invalid\n arr.take([0, 1], allow_fill=True, fill_value=value)\n\n value = np.datetime64("NaT", "ns")\n with pytest.raises(TypeError, match=msg):\n # require appropriate-dtype if we have a NA value\n arr.take([-1, 1], allow_fill=True, fill_value=value)\n\n\n@pytest.mark.filterwarnings(r"ignore:Period with BDay freq is deprecated:FutureWarning")\n@pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning")\nclass TestPeriodArray(SharedTests):\n index_cls = PeriodIndex\n array_cls = PeriodArray\n scalar_type = Period\n example_dtype = PeriodIndex([], freq="W").dtype\n\n @pytest.fixture\n def arr1d(self, period_index):\n """\n Fixture returning DatetimeArray from parametrized PeriodIndex objects\n """\n return period_index._data\n\n def test_from_pi(self, arr1d):\n pi = self.index_cls(arr1d)\n arr = arr1d\n assert list(arr) == list(pi)\n\n # Check that Index.__new__ knows what to do with PeriodArray\n pi2 = pd.Index(arr)\n assert isinstance(pi2, PeriodIndex)\n assert list(pi2) == list(arr)\n\n def test_astype_object(self, arr1d):\n pi = self.index_cls(arr1d)\n arr = arr1d\n asobj = arr.astype("O")\n assert isinstance(asobj, np.ndarray)\n assert asobj.dtype == "O"\n assert list(asobj) == list(pi)\n\n def test_take_fill_valid(self, arr1d):\n arr = arr1d\n\n value = NaT._value\n msg = f"value should be a '{arr1d._scalar_type.__name__}' or 'NaT'. Got"\n with pytest.raises(TypeError, match=msg):\n # require NaT, not iNaT, as it could be confused with an integer\n arr.take([-1, 1], allow_fill=True, fill_value=value)\n\n value = np.timedelta64("NaT", "ns")\n with pytest.raises(TypeError, match=msg):\n # require appropriate-dtype if we have a NA value\n arr.take([-1, 1], allow_fill=True, fill_value=value)\n\n @pytest.mark.parametrize("how", ["S", "E"])\n def test_to_timestamp(self, how, arr1d):\n pi = self.index_cls(arr1d)\n arr = arr1d\n\n expected = DatetimeIndex(pi.to_timestamp(how=how))._data\n result = arr.to_timestamp(how=how)\n assert isinstance(result, DatetimeArray)\n\n tm.assert_equal(result, expected)\n\n def test_to_timestamp_roundtrip_bday(self):\n # Case where infer_freq inside would choose "D" instead of "B"\n dta = pd.date_range("2021-10-18", periods=3, freq="B")._data\n parr = dta.to_period()\n result = parr.to_timestamp()\n assert result.freq == "B"\n tm.assert_extension_array_equal(result, dta)\n\n dta2 = dta[::2]\n parr2 = dta2.to_period()\n result2 = parr2.to_timestamp()\n assert result2.freq == "2B"\n tm.assert_extension_array_equal(result2, dta2)\n\n parr3 = dta.to_period("2B")\n result3 = parr3.to_timestamp()\n assert result3.freq == "B"\n tm.assert_extension_array_equal(result3, dta)\n\n def test_to_timestamp_out_of_bounds(self):\n # GH#19643 previously overflowed silently\n pi = pd.period_range("1500", freq="Y", periods=3)\n msg = "Out of bounds nanosecond timestamp: 1500-01-01 00:00:00"\n with pytest.raises(OutOfBoundsDatetime, match=msg):\n pi.to_timestamp()\n\n with pytest.raises(OutOfBoundsDatetime, match=msg):\n pi._data.to_timestamp()\n\n @pytest.mark.parametrize("propname", PeriodArray._bool_ops)\n def test_bool_properties(self, arr1d, propname):\n # in this case _bool_ops is just `is_leap_year`\n pi = self.index_cls(arr1d)\n arr = arr1d\n\n result = getattr(arr, propname)\n expected = np.array(getattr(pi, propname))\n\n tm.assert_numpy_array_equal(result, expected)\n\n @pytest.mark.parametrize("propname", PeriodArray._field_ops)\n def test_int_properties(self, arr1d, propname):\n pi = self.index_cls(arr1d)\n arr = arr1d\n\n result = getattr(arr, propname)\n expected = np.array(getattr(pi, propname))\n\n tm.assert_numpy_array_equal(result, expected)\n\n def test_array_interface(self, arr1d):\n arr = arr1d\n\n # default asarray gives objects\n result = np.asarray(arr)\n expected = np.array(list(arr), dtype=object)\n tm.assert_numpy_array_equal(result, expected)\n\n # to object dtype (same as default)\n result = np.asarray(arr, dtype=object)\n tm.assert_numpy_array_equal(result, expected)\n\n # to int64 gives the underlying representation\n result = np.asarray(arr, dtype="int64")\n tm.assert_numpy_array_equal(result, arr.asi8)\n\n result2 = np.asarray(arr, dtype="int64")\n assert np.may_share_memory(result, result2)\n\n result_copy1 = np.array(arr, dtype="int64", copy=True)\n result_copy2 = np.array(arr, dtype="int64", copy=True)\n assert not np.may_share_memory(result_copy1, result_copy2)\n\n # to other dtypes\n msg = r"float\(\) argument must be a string or a( real)? number, not 'Period'"\n with pytest.raises(TypeError, match=msg):\n np.asarray(arr, dtype="float64")\n\n result = np.asarray(arr, dtype="S20")\n expected = np.asarray(arr).astype("S20")\n tm.assert_numpy_array_equal(result, expected)\n\n def test_strftime(self, arr1d, using_infer_string):\n arr = arr1d\n\n result = arr.strftime("%Y")\n expected = np.array([per.strftime("%Y") for per in arr], dtype=object)\n if using_infer_string:\n expected = pd.array(expected, dtype=pd.StringDtype(na_value=np.nan))\n tm.assert_equal(result, expected)\n\n def test_strftime_nat(self, using_infer_string):\n # GH 29578\n arr = PeriodArray(PeriodIndex(["2019-01-01", NaT], dtype="period[D]"))\n\n result = arr.strftime("%Y-%m-%d")\n expected = np.array(["2019-01-01", np.nan], dtype=object)\n if using_infer_string:\n expected = pd.array(expected, dtype=pd.StringDtype(na_value=np.nan))\n tm.assert_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "arr,casting_nats",\n [\n (\n TimedeltaIndex(["1 Day", "3 Hours", "NaT"])._data,\n (NaT, np.timedelta64("NaT", "ns")),\n ),\n (\n pd.date_range("2000-01-01", periods=3, freq="D")._data,\n (NaT, np.datetime64("NaT", "ns")),\n ),\n (pd.period_range("2000-01-01", periods=3, freq="D")._data, (NaT,)),\n ],\n ids=lambda x: type(x).__name__,\n)\ndef test_casting_nat_setitem_array(arr, casting_nats):\n expected = type(arr)._from_sequence([NaT, arr[1], arr[2]], dtype=arr.dtype)\n\n for nat in casting_nats:\n arr = arr.copy()\n arr[0] = nat\n tm.assert_equal(arr, expected)\n\n\n@pytest.mark.parametrize(\n "arr,non_casting_nats",\n [\n (\n TimedeltaIndex(["1 Day", "3 Hours", "NaT"])._data,\n (np.datetime64("NaT", "ns"), NaT._value),\n ),\n (\n pd.date_range("2000-01-01", periods=3, freq="D")._data,\n (np.timedelta64("NaT", "ns"), NaT._value),\n ),\n (\n pd.period_range("2000-01-01", periods=3, freq="D")._data,\n (np.datetime64("NaT", "ns"), np.timedelta64("NaT", "ns"), NaT._value),\n ),\n ],\n ids=lambda x: type(x).__name__,\n)\ndef test_invalid_nat_setitem_array(arr, non_casting_nats):\n msg = (\n "value should be a '(Timestamp|Timedelta|Period)', 'NaT', or array of those. "\n "Got '(timedelta64|datetime64|int)' instead."\n )\n\n for nat in non_casting_nats:\n with pytest.raises(TypeError, match=msg):\n arr[0] = nat\n\n\n@pytest.mark.parametrize(\n "arr",\n [\n pd.date_range("2000", periods=4).array,\n pd.timedelta_range("2000", periods=4).array,\n ],\n)\ndef test_to_numpy_extra(arr):\n arr[0] = NaT\n original = arr.copy()\n\n result = arr.to_numpy()\n assert np.isnan(result[0])\n\n result = arr.to_numpy(dtype="int64")\n assert result[0] == -9223372036854775808\n\n result = arr.to_numpy(dtype="int64", na_value=0)\n assert result[0] == 0\n\n result = arr.to_numpy(na_value=arr[1].to_numpy())\n assert result[0] == result[1]\n\n result = arr.to_numpy(na_value=arr[1].to_numpy(copy=False))\n assert result[0] == result[1]\n\n tm.assert_equal(arr, original)\n\n\n@pytest.mark.parametrize("as_index", [True, False])\n@pytest.mark.parametrize(\n "values",\n [\n pd.to_datetime(["2020-01-01", "2020-02-01"]),\n pd.to_timedelta([1, 2], unit="D"),\n PeriodIndex(["2020-01-01", "2020-02-01"], freq="D"),\n ],\n)\n@pytest.mark.parametrize(\n "klass",\n [\n list,\n np.array,\n pd.array,\n pd.Series,\n pd.Index,\n pd.Categorical,\n pd.CategoricalIndex,\n ],\n)\ndef test_searchsorted_datetimelike_with_listlike(values, klass, as_index):\n # https://github.com/pandas-dev/pandas/issues/32762\n if not as_index:\n values = values._data\n\n result = values.searchsorted(klass(values))\n expected = np.array([0, 1], dtype=result.dtype)\n\n tm.assert_numpy_array_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "values",\n [\n pd.to_datetime(["2020-01-01", "2020-02-01"]),\n pd.to_timedelta([1, 2], unit="D"),\n PeriodIndex(["2020-01-01", "2020-02-01"], freq="D"),\n ],\n)\n@pytest.mark.parametrize(\n "arg", [[1, 2], ["a", "b"], [Timestamp("2020-01-01", tz="Europe/London")] * 2]\n)\ndef test_searchsorted_datetimelike_with_listlike_invalid_dtype(values, arg):\n # https://github.com/pandas-dev/pandas/issues/32762\n msg = "[Unexpected type|Cannot compare]"\n with pytest.raises(TypeError, match=msg):\n values.searchsorted(arg)\n\n\n@pytest.mark.parametrize("klass", [list, tuple, np.array, pd.Series])\ndef test_period_index_construction_from_strings(klass):\n # https://github.com/pandas-dev/pandas/issues/26109\n strings = ["2020Q1", "2020Q2"] * 2\n data = klass(strings)\n result = PeriodIndex(data, freq="Q")\n expected = PeriodIndex([Period(s) for s in strings])\n tm.assert_index_equal(result, expected)\n\n\n@pytest.mark.parametrize("dtype", ["M8[ns]", "m8[ns]"])\ndef test_from_pandas_array(dtype):\n # GH#24615\n data = np.array([1, 2, 3], dtype=dtype)\n arr = NumpyExtensionArray(data)\n\n cls = {"M8[ns]": DatetimeArray, "m8[ns]": TimedeltaArray}[dtype]\n\n depr_msg = f"{cls.__name__}.__init__ is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=depr_msg):\n result = cls(arr)\n expected = cls(data)\n tm.assert_extension_array_equal(result, expected)\n\n result = cls._from_sequence(arr, dtype=dtype)\n expected = cls._from_sequence(data, dtype=dtype)\n tm.assert_extension_array_equal(result, expected)\n\n func = {"M8[ns]": pd.to_datetime, "m8[ns]": pd.to_timedelta}[dtype]\n result = func(arr).array\n expected = func(data).array\n tm.assert_equal(result, expected)\n\n # Let's check the Indexes while we're here\n idx_cls = {"M8[ns]": DatetimeIndex, "m8[ns]": TimedeltaIndex}[dtype]\n result = idx_cls(arr)\n expected = idx_cls(data)\n tm.assert_index_equal(result, expected)\n | .venv\Lib\site-packages\pandas\tests\arrays\test_datetimelike.py | test_datetimelike.py | Python | 46,254 | 0.95 | 0.104412 | 0.079044 | python-kit | 502 | 2024-07-27T00:23:23.773248 | MIT | true | 09c9c2bb7d7ec888676f0ef3937ccc7f |
"""\nTests for DatetimeArray\n"""\nfrom __future__ import annotations\n\nfrom datetime import timedelta\nimport operator\n\ntry:\n from zoneinfo import ZoneInfo\nexcept ImportError:\n # Cannot assign to a type\n ZoneInfo = None # type: ignore[misc, assignment]\n\nimport numpy as np\nimport pytest\n\nfrom pandas._libs.tslibs import tz_compare\n\nfrom pandas.core.dtypes.dtypes import DatetimeTZDtype\n\nimport pandas as pd\nimport pandas._testing as tm\nfrom pandas.core.arrays import (\n DatetimeArray,\n TimedeltaArray,\n)\n\n\nclass TestNonNano:\n @pytest.fixture(params=["s", "ms", "us"])\n def unit(self, request):\n """Fixture returning parametrized time units"""\n return request.param\n\n @pytest.fixture\n def dtype(self, unit, tz_naive_fixture):\n tz = tz_naive_fixture\n if tz is None:\n return np.dtype(f"datetime64[{unit}]")\n else:\n return DatetimeTZDtype(unit=unit, tz=tz)\n\n @pytest.fixture\n def dta_dti(self, unit, dtype):\n tz = getattr(dtype, "tz", None)\n\n dti = pd.date_range("2016-01-01", periods=55, freq="D", tz=tz)\n if tz is None:\n arr = np.asarray(dti).astype(f"M8[{unit}]")\n else:\n arr = np.asarray(dti.tz_convert("UTC").tz_localize(None)).astype(\n f"M8[{unit}]"\n )\n\n dta = DatetimeArray._simple_new(arr, dtype=dtype)\n return dta, dti\n\n @pytest.fixture\n def dta(self, dta_dti):\n dta, dti = dta_dti\n return dta\n\n def test_non_nano(self, unit, dtype):\n arr = np.arange(5, dtype=np.int64).view(f"M8[{unit}]")\n dta = DatetimeArray._simple_new(arr, dtype=dtype)\n\n assert dta.dtype == dtype\n assert dta[0].unit == unit\n assert tz_compare(dta.tz, dta[0].tz)\n assert (dta[0] == dta[:1]).all()\n\n @pytest.mark.parametrize(\n "field", DatetimeArray._field_ops + DatetimeArray._bool_ops\n )\n def test_fields(self, unit, field, dtype, dta_dti):\n dta, dti = dta_dti\n\n assert (dti == dta).all()\n\n res = getattr(dta, field)\n expected = getattr(dti._data, field)\n tm.assert_numpy_array_equal(res, expected)\n\n def test_normalize(self, unit):\n dti = pd.date_range("2016-01-01 06:00:00", periods=55, freq="D")\n arr = np.asarray(dti).astype(f"M8[{unit}]")\n\n dta = DatetimeArray._simple_new(arr, dtype=arr.dtype)\n\n assert not dta.is_normalized\n\n # TODO: simplify once we can just .astype to other unit\n exp = np.asarray(dti.normalize()).astype(f"M8[{unit}]")\n expected = DatetimeArray._simple_new(exp, dtype=exp.dtype)\n\n res = dta.normalize()\n tm.assert_extension_array_equal(res, expected)\n\n def test_simple_new_requires_match(self, unit):\n arr = np.arange(5, dtype=np.int64).view(f"M8[{unit}]")\n dtype = DatetimeTZDtype(unit, "UTC")\n\n dta = DatetimeArray._simple_new(arr, dtype=dtype)\n assert dta.dtype == dtype\n\n wrong = DatetimeTZDtype("ns", "UTC")\n with pytest.raises(AssertionError, match=""):\n DatetimeArray._simple_new(arr, dtype=wrong)\n\n def test_std_non_nano(self, unit):\n dti = pd.date_range("2016-01-01", periods=55, freq="D")\n arr = np.asarray(dti).astype(f"M8[{unit}]")\n\n dta = DatetimeArray._simple_new(arr, dtype=arr.dtype)\n\n # we should match the nano-reso std, but floored to our reso.\n res = dta.std()\n assert res._creso == dta._creso\n assert res == dti.std().floor(unit)\n\n @pytest.mark.filterwarnings("ignore:Converting to PeriodArray.*:UserWarning")\n def test_to_period(self, dta_dti):\n dta, dti = dta_dti\n result = dta.to_period("D")\n expected = dti._data.to_period("D")\n\n tm.assert_extension_array_equal(result, expected)\n\n def test_iter(self, dta):\n res = next(iter(dta))\n expected = dta[0]\n\n assert type(res) is pd.Timestamp\n assert res._value == expected._value\n assert res._creso == expected._creso\n assert res == expected\n\n def test_astype_object(self, dta):\n result = dta.astype(object)\n assert all(x._creso == dta._creso for x in result)\n assert all(x == y for x, y in zip(result, dta))\n\n def test_to_pydatetime(self, dta_dti):\n dta, dti = dta_dti\n\n result = dta.to_pydatetime()\n expected = dti.to_pydatetime()\n tm.assert_numpy_array_equal(result, expected)\n\n @pytest.mark.parametrize("meth", ["time", "timetz", "date"])\n def test_time_date(self, dta_dti, meth):\n dta, dti = dta_dti\n\n result = getattr(dta, meth)\n expected = getattr(dti, meth)\n tm.assert_numpy_array_equal(result, expected)\n\n def test_format_native_types(self, unit, dtype, dta_dti):\n # In this case we should get the same formatted values with our nano\n # version dti._data as we do with the non-nano dta\n dta, dti = dta_dti\n\n res = dta._format_native_types()\n exp = dti._data._format_native_types()\n tm.assert_numpy_array_equal(res, exp)\n\n def test_repr(self, dta_dti, unit):\n dta, dti = dta_dti\n\n assert repr(dta) == repr(dti._data).replace("[ns", f"[{unit}")\n\n # TODO: tests with td64\n def test_compare_mismatched_resolutions(self, comparison_op):\n # comparison that numpy gets wrong bc of silent overflows\n op = comparison_op\n\n iinfo = np.iinfo(np.int64)\n vals = np.array([iinfo.min, iinfo.min + 1, iinfo.max], dtype=np.int64)\n\n # Construct so that arr2[1] < arr[1] < arr[2] < arr2[2]\n arr = np.array(vals).view("M8[ns]")\n arr2 = arr.view("M8[s]")\n\n left = DatetimeArray._simple_new(arr, dtype=arr.dtype)\n right = DatetimeArray._simple_new(arr2, dtype=arr2.dtype)\n\n if comparison_op is operator.eq:\n expected = np.array([False, False, False])\n elif comparison_op is operator.ne:\n expected = np.array([True, True, True])\n elif comparison_op in [operator.lt, operator.le]:\n expected = np.array([False, False, True])\n else:\n expected = np.array([False, True, False])\n\n result = op(left, right)\n tm.assert_numpy_array_equal(result, expected)\n\n result = op(left[1], right)\n tm.assert_numpy_array_equal(result, expected)\n\n if op not in [operator.eq, operator.ne]:\n # check that numpy still gets this wrong; if it is fixed we may be\n # able to remove compare_mismatched_resolutions\n np_res = op(left._ndarray, right._ndarray)\n tm.assert_numpy_array_equal(np_res[1:], ~expected[1:])\n\n def test_add_mismatched_reso_doesnt_downcast(self):\n # https://github.com/pandas-dev/pandas/pull/48748#issuecomment-1260181008\n td = pd.Timedelta(microseconds=1)\n dti = pd.date_range("2016-01-01", periods=3) - td\n dta = dti._data.as_unit("us")\n\n res = dta + td.as_unit("us")\n # even though the result is an even number of days\n # (so we _could_ downcast to unit="s"), we do not.\n assert res.unit == "us"\n\n @pytest.mark.parametrize(\n "scalar",\n [\n timedelta(hours=2),\n pd.Timedelta(hours=2),\n np.timedelta64(2, "h"),\n np.timedelta64(2 * 3600 * 1000, "ms"),\n pd.offsets.Minute(120),\n pd.offsets.Hour(2),\n ],\n )\n def test_add_timedeltalike_scalar_mismatched_reso(self, dta_dti, scalar):\n dta, dti = dta_dti\n\n td = pd.Timedelta(scalar)\n exp_unit = tm.get_finest_unit(dta.unit, td.unit)\n\n expected = (dti + td)._data.as_unit(exp_unit)\n result = dta + scalar\n tm.assert_extension_array_equal(result, expected)\n\n result = scalar + dta\n tm.assert_extension_array_equal(result, expected)\n\n expected = (dti - td)._data.as_unit(exp_unit)\n result = dta - scalar\n tm.assert_extension_array_equal(result, expected)\n\n def test_sub_datetimelike_scalar_mismatch(self):\n dti = pd.date_range("2016-01-01", periods=3)\n dta = dti._data.as_unit("us")\n\n ts = dta[0].as_unit("s")\n\n result = dta - ts\n expected = (dti - dti[0])._data.as_unit("us")\n assert result.dtype == "m8[us]"\n tm.assert_extension_array_equal(result, expected)\n\n def test_sub_datetime64_reso_mismatch(self):\n dti = pd.date_range("2016-01-01", periods=3)\n left = dti._data.as_unit("s")\n right = left.as_unit("ms")\n\n result = left - right\n exp_values = np.array([0, 0, 0], dtype="m8[ms]")\n expected = TimedeltaArray._simple_new(\n exp_values,\n dtype=exp_values.dtype,\n )\n tm.assert_extension_array_equal(result, expected)\n result2 = right - left\n tm.assert_extension_array_equal(result2, expected)\n\n\nclass TestDatetimeArrayComparisons:\n # TODO: merge this into tests/arithmetic/test_datetime64 once it is\n # sufficiently robust\n\n def test_cmp_dt64_arraylike_tznaive(self, comparison_op):\n # arbitrary tz-naive DatetimeIndex\n op = comparison_op\n\n dti = pd.date_range("2016-01-1", freq="MS", periods=9, tz=None)\n arr = dti._data\n assert arr.freq == dti.freq\n assert arr.tz == dti.tz\n\n right = dti\n\n expected = np.ones(len(arr), dtype=bool)\n if comparison_op.__name__ in ["ne", "gt", "lt"]:\n # for these the comparisons should be all-False\n expected = ~expected\n\n result = op(arr, arr)\n tm.assert_numpy_array_equal(result, expected)\n for other in [\n right,\n np.array(right),\n list(right),\n tuple(right),\n right.astype(object),\n ]:\n result = op(arr, other)\n tm.assert_numpy_array_equal(result, expected)\n\n result = op(other, arr)\n tm.assert_numpy_array_equal(result, expected)\n\n\nclass TestDatetimeArray:\n def test_astype_ns_to_ms_near_bounds(self):\n # GH#55979\n ts = pd.Timestamp("1677-09-21 00:12:43.145225")\n target = ts.as_unit("ms")\n\n dta = DatetimeArray._from_sequence([ts], dtype="M8[ns]")\n assert (dta.view("i8") == ts.as_unit("ns").value).all()\n\n result = dta.astype("M8[ms]")\n assert result[0] == target\n\n expected = DatetimeArray._from_sequence([ts], dtype="M8[ms]")\n assert (expected.view("i8") == target._value).all()\n\n tm.assert_datetime_array_equal(result, expected)\n\n def test_astype_non_nano_tznaive(self):\n dti = pd.date_range("2016-01-01", periods=3)\n\n res = dti.astype("M8[s]")\n assert res.dtype == "M8[s]"\n\n dta = dti._data\n res = dta.astype("M8[s]")\n assert res.dtype == "M8[s]"\n assert isinstance(res, pd.core.arrays.DatetimeArray) # used to be ndarray\n\n def test_astype_non_nano_tzaware(self):\n dti = pd.date_range("2016-01-01", periods=3, tz="UTC")\n\n res = dti.astype("M8[s, US/Pacific]")\n assert res.dtype == "M8[s, US/Pacific]"\n\n dta = dti._data\n res = dta.astype("M8[s, US/Pacific]")\n assert res.dtype == "M8[s, US/Pacific]"\n\n # from non-nano to non-nano, preserving reso\n res2 = res.astype("M8[s, UTC]")\n assert res2.dtype == "M8[s, UTC]"\n assert not tm.shares_memory(res2, res)\n\n res3 = res.astype("M8[s, UTC]", copy=False)\n assert res2.dtype == "M8[s, UTC]"\n assert tm.shares_memory(res3, res)\n\n def test_astype_to_same(self):\n arr = DatetimeArray._from_sequence(\n ["2000"], dtype=DatetimeTZDtype(tz="US/Central")\n )\n result = arr.astype(DatetimeTZDtype(tz="US/Central"), copy=False)\n assert result is arr\n\n @pytest.mark.parametrize("dtype", ["datetime64[ns]", "datetime64[ns, UTC]"])\n @pytest.mark.parametrize(\n "other", ["datetime64[ns]", "datetime64[ns, UTC]", "datetime64[ns, CET]"]\n )\n def test_astype_copies(self, dtype, other):\n # https://github.com/pandas-dev/pandas/pull/32490\n ser = pd.Series([1, 2], dtype=dtype)\n orig = ser.copy()\n\n err = False\n if (dtype == "datetime64[ns]") ^ (other == "datetime64[ns]"):\n # deprecated in favor of tz_localize\n err = True\n\n if err:\n if dtype == "datetime64[ns]":\n msg = "Use obj.tz_localize instead or series.dt.tz_localize instead"\n else:\n msg = "from timezone-aware dtype to timezone-naive dtype"\n with pytest.raises(TypeError, match=msg):\n ser.astype(other)\n else:\n t = ser.astype(other)\n t[:] = pd.NaT\n tm.assert_series_equal(ser, orig)\n\n @pytest.mark.parametrize("dtype", [int, np.int32, np.int64, "uint32", "uint64"])\n def test_astype_int(self, dtype):\n arr = DatetimeArray._from_sequence(\n [pd.Timestamp("2000"), pd.Timestamp("2001")], dtype="M8[ns]"\n )\n\n if np.dtype(dtype) != np.int64:\n with pytest.raises(TypeError, match=r"Do obj.astype\('int64'\)"):\n arr.astype(dtype)\n return\n\n result = arr.astype(dtype)\n expected = arr._ndarray.view("i8")\n tm.assert_numpy_array_equal(result, expected)\n\n def test_astype_to_sparse_dt64(self):\n # GH#50082\n dti = pd.date_range("2016-01-01", periods=4)\n dta = dti._data\n result = dta.astype("Sparse[datetime64[ns]]")\n\n assert result.dtype == "Sparse[datetime64[ns]]"\n assert (result == dta).all()\n\n def test_tz_setter_raises(self):\n arr = DatetimeArray._from_sequence(\n ["2000"], dtype=DatetimeTZDtype(tz="US/Central")\n )\n with pytest.raises(AttributeError, match="tz_localize"):\n arr.tz = "UTC"\n\n def test_setitem_str_impute_tz(self, tz_naive_fixture):\n # Like for getitem, if we are passed a naive-like string, we impute\n # our own timezone.\n tz = tz_naive_fixture\n\n data = np.array([1, 2, 3], dtype="M8[ns]")\n dtype = data.dtype if tz is None else DatetimeTZDtype(tz=tz)\n arr = DatetimeArray._from_sequence(data, dtype=dtype)\n expected = arr.copy()\n\n ts = pd.Timestamp("2020-09-08 16:50").tz_localize(tz)\n setter = str(ts.tz_localize(None))\n\n # Setting a scalar tznaive string\n expected[0] = ts\n arr[0] = setter\n tm.assert_equal(arr, expected)\n\n # Setting a listlike of tznaive strings\n expected[1] = ts\n arr[:2] = [setter, setter]\n tm.assert_equal(arr, expected)\n\n def test_setitem_different_tz_raises(self):\n # pre-2.0 we required exact tz match, in 2.0 we require only\n # tzawareness-match\n data = np.array([1, 2, 3], dtype="M8[ns]")\n arr = DatetimeArray._from_sequence(\n data, copy=False, dtype=DatetimeTZDtype(tz="US/Central")\n )\n with pytest.raises(TypeError, match="Cannot compare tz-naive and tz-aware"):\n arr[0] = pd.Timestamp("2000")\n\n ts = pd.Timestamp("2000", tz="US/Eastern")\n arr[0] = ts\n assert arr[0] == ts.tz_convert("US/Central")\n\n def test_setitem_clears_freq(self):\n a = pd.date_range("2000", periods=2, freq="D", tz="US/Central")._data\n a[0] = pd.Timestamp("2000", tz="US/Central")\n assert a.freq is None\n\n @pytest.mark.parametrize(\n "obj",\n [\n pd.Timestamp("2021-01-01"),\n pd.Timestamp("2021-01-01").to_datetime64(),\n pd.Timestamp("2021-01-01").to_pydatetime(),\n ],\n )\n def test_setitem_objects(self, obj):\n # make sure we accept datetime64 and datetime in addition to Timestamp\n dti = pd.date_range("2000", periods=2, freq="D")\n arr = dti._data\n\n arr[0] = obj\n assert arr[0] == obj\n\n def test_repeat_preserves_tz(self):\n dti = pd.date_range("2000", periods=2, freq="D", tz="US/Central")\n arr = dti._data\n\n repeated = arr.repeat([1, 1])\n\n # preserves tz and values, but not freq\n expected = DatetimeArray._from_sequence(arr.asi8, dtype=arr.dtype)\n tm.assert_equal(repeated, expected)\n\n def test_value_counts_preserves_tz(self):\n dti = pd.date_range("2000", periods=2, freq="D", tz="US/Central")\n arr = dti._data.repeat([4, 3])\n\n result = arr.value_counts()\n\n # Note: not tm.assert_index_equal, since `freq`s do not match\n assert result.index.equals(dti)\n\n arr[-2] = pd.NaT\n result = arr.value_counts(dropna=False)\n expected = pd.Series([4, 2, 1], index=[dti[0], dti[1], pd.NaT], name="count")\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize("method", ["pad", "backfill"])\n def test_fillna_preserves_tz(self, method):\n dti = pd.date_range("2000-01-01", periods=5, freq="D", tz="US/Central")\n arr = DatetimeArray._from_sequence(dti, copy=True)\n arr[2] = pd.NaT\n\n fill_val = dti[1] if method == "pad" else dti[3]\n expected = DatetimeArray._from_sequence(\n [dti[0], dti[1], fill_val, dti[3], dti[4]],\n dtype=DatetimeTZDtype(tz="US/Central"),\n )\n\n result = arr._pad_or_backfill(method=method)\n tm.assert_extension_array_equal(result, expected)\n\n # assert that arr and dti were not modified in-place\n assert arr[2] is pd.NaT\n assert dti[2] == pd.Timestamp("2000-01-03", tz="US/Central")\n\n def test_fillna_2d(self):\n dti = pd.date_range("2016-01-01", periods=6, tz="US/Pacific")\n dta = dti._data.reshape(3, 2).copy()\n dta[0, 1] = pd.NaT\n dta[1, 0] = pd.NaT\n\n res1 = dta._pad_or_backfill(method="pad")\n expected1 = dta.copy()\n expected1[1, 0] = dta[0, 0]\n tm.assert_extension_array_equal(res1, expected1)\n\n res2 = dta._pad_or_backfill(method="backfill")\n expected2 = dta.copy()\n expected2 = dta.copy()\n expected2[1, 0] = dta[2, 0]\n expected2[0, 1] = dta[1, 1]\n tm.assert_extension_array_equal(res2, expected2)\n\n # with different ordering for underlying ndarray; behavior should\n # be unchanged\n dta2 = dta._from_backing_data(dta._ndarray.copy(order="F"))\n assert dta2._ndarray.flags["F_CONTIGUOUS"]\n assert not dta2._ndarray.flags["C_CONTIGUOUS"]\n tm.assert_extension_array_equal(dta, dta2)\n\n res3 = dta2._pad_or_backfill(method="pad")\n tm.assert_extension_array_equal(res3, expected1)\n\n res4 = dta2._pad_or_backfill(method="backfill")\n tm.assert_extension_array_equal(res4, expected2)\n\n # test the DataFrame method while we're here\n df = pd.DataFrame(dta)\n res = df.ffill()\n expected = pd.DataFrame(expected1)\n tm.assert_frame_equal(res, expected)\n\n res = df.bfill()\n expected = pd.DataFrame(expected2)\n tm.assert_frame_equal(res, expected)\n\n def test_array_interface_tz(self):\n tz = "US/Central"\n data = pd.date_range("2017", periods=2, tz=tz)._data\n result = np.asarray(data)\n\n expected = np.array(\n [\n pd.Timestamp("2017-01-01T00:00:00", tz=tz),\n pd.Timestamp("2017-01-02T00:00:00", tz=tz),\n ],\n dtype=object,\n )\n tm.assert_numpy_array_equal(result, expected)\n\n result = np.asarray(data, dtype=object)\n tm.assert_numpy_array_equal(result, expected)\n\n result = np.asarray(data, dtype="M8[ns]")\n\n expected = np.array(\n ["2017-01-01T06:00:00", "2017-01-02T06:00:00"], dtype="M8[ns]"\n )\n tm.assert_numpy_array_equal(result, expected)\n\n def test_array_interface(self):\n data = pd.date_range("2017", periods=2)._data\n expected = np.array(\n ["2017-01-01T00:00:00", "2017-01-02T00:00:00"], dtype="datetime64[ns]"\n )\n\n result = np.asarray(data)\n tm.assert_numpy_array_equal(result, expected)\n\n result = np.asarray(data, dtype=object)\n expected = np.array(\n [pd.Timestamp("2017-01-01T00:00:00"), pd.Timestamp("2017-01-02T00:00:00")],\n dtype=object,\n )\n tm.assert_numpy_array_equal(result, expected)\n\n @pytest.mark.parametrize("index", [True, False])\n def test_searchsorted_different_tz(self, index):\n data = np.arange(10, dtype="i8") * 24 * 3600 * 10**9\n arr = pd.DatetimeIndex(data, freq="D")._data.tz_localize("Asia/Tokyo")\n if index:\n arr = pd.Index(arr)\n\n expected = arr.searchsorted(arr[2])\n result = arr.searchsorted(arr[2].tz_convert("UTC"))\n assert result == expected\n\n expected = arr.searchsorted(arr[2:6])\n result = arr.searchsorted(arr[2:6].tz_convert("UTC"))\n tm.assert_equal(result, expected)\n\n @pytest.mark.parametrize("index", [True, False])\n def test_searchsorted_tzawareness_compat(self, index):\n data = np.arange(10, dtype="i8") * 24 * 3600 * 10**9\n arr = pd.DatetimeIndex(data, freq="D")._data\n if index:\n arr = pd.Index(arr)\n\n mismatch = arr.tz_localize("Asia/Tokyo")\n\n msg = "Cannot compare tz-naive and tz-aware datetime-like objects"\n with pytest.raises(TypeError, match=msg):\n arr.searchsorted(mismatch[0])\n with pytest.raises(TypeError, match=msg):\n arr.searchsorted(mismatch)\n\n with pytest.raises(TypeError, match=msg):\n mismatch.searchsorted(arr[0])\n with pytest.raises(TypeError, match=msg):\n mismatch.searchsorted(arr)\n\n @pytest.mark.parametrize(\n "other",\n [\n 1,\n np.int64(1),\n 1.0,\n np.timedelta64("NaT"),\n pd.Timedelta(days=2),\n "invalid",\n np.arange(10, dtype="i8") * 24 * 3600 * 10**9,\n np.arange(10).view("timedelta64[ns]") * 24 * 3600 * 10**9,\n pd.Timestamp("2021-01-01").to_period("D"),\n ],\n )\n @pytest.mark.parametrize("index", [True, False])\n def test_searchsorted_invalid_types(self, other, index):\n data = np.arange(10, dtype="i8") * 24 * 3600 * 10**9\n arr = pd.DatetimeIndex(data, freq="D")._data\n if index:\n arr = pd.Index(arr)\n\n msg = "|".join(\n [\n "searchsorted requires compatible dtype or scalar",\n "value should be a 'Timestamp', 'NaT', or array of those. Got",\n ]\n )\n with pytest.raises(TypeError, match=msg):\n arr.searchsorted(other)\n\n def test_shift_fill_value(self):\n dti = pd.date_range("2016-01-01", periods=3)\n\n dta = dti._data\n expected = DatetimeArray._from_sequence(np.roll(dta._ndarray, 1))\n\n fv = dta[-1]\n for fill_value in [fv, fv.to_pydatetime(), fv.to_datetime64()]:\n result = dta.shift(1, fill_value=fill_value)\n tm.assert_datetime_array_equal(result, expected)\n\n dta = dta.tz_localize("UTC")\n expected = expected.tz_localize("UTC")\n fv = dta[-1]\n for fill_value in [fv, fv.to_pydatetime()]:\n result = dta.shift(1, fill_value=fill_value)\n tm.assert_datetime_array_equal(result, expected)\n\n def test_shift_value_tzawareness_mismatch(self):\n dti = pd.date_range("2016-01-01", periods=3)\n\n dta = dti._data\n\n fv = dta[-1].tz_localize("UTC")\n for invalid in [fv, fv.to_pydatetime()]:\n with pytest.raises(TypeError, match="Cannot compare"):\n dta.shift(1, fill_value=invalid)\n\n dta = dta.tz_localize("UTC")\n fv = dta[-1].tz_localize(None)\n for invalid in [fv, fv.to_pydatetime(), fv.to_datetime64()]:\n with pytest.raises(TypeError, match="Cannot compare"):\n dta.shift(1, fill_value=invalid)\n\n def test_shift_requires_tzmatch(self):\n # pre-2.0 we required exact tz match, in 2.0 we require just\n # matching tzawareness\n dti = pd.date_range("2016-01-01", periods=3, tz="UTC")\n dta = dti._data\n\n fill_value = pd.Timestamp("2020-10-18 18:44", tz="US/Pacific")\n\n result = dta.shift(1, fill_value=fill_value)\n expected = dta.shift(1, fill_value=fill_value.tz_convert("UTC"))\n tm.assert_equal(result, expected)\n\n def test_tz_localize_t2d(self):\n dti = pd.date_range("1994-05-12", periods=12, tz="US/Pacific")\n dta = dti._data.reshape(3, 4)\n result = dta.tz_localize(None)\n\n expected = dta.ravel().tz_localize(None).reshape(dta.shape)\n tm.assert_datetime_array_equal(result, expected)\n\n roundtrip = expected.tz_localize("US/Pacific")\n tm.assert_datetime_array_equal(roundtrip, dta)\n\n easts = ["US/Eastern", "dateutil/US/Eastern"]\n if ZoneInfo is not None:\n try:\n tz = ZoneInfo("US/Eastern")\n except KeyError:\n # no tzdata\n pass\n else:\n # Argument 1 to "append" of "list" has incompatible type "ZoneInfo";\n # expected "str"\n easts.append(tz) # type: ignore[arg-type]\n\n @pytest.mark.parametrize("tz", easts)\n def test_iter_zoneinfo_fold(self, tz):\n # GH#49684\n utc_vals = np.array(\n [1320552000, 1320555600, 1320559200, 1320562800], dtype=np.int64\n )\n utc_vals *= 1_000_000_000\n\n dta = DatetimeArray._from_sequence(utc_vals).tz_localize("UTC").tz_convert(tz)\n\n left = dta[2]\n right = list(dta)[2]\n assert str(left) == str(right)\n # previously there was a bug where with non-pytz right would be\n # Timestamp('2011-11-06 01:00:00-0400', tz='US/Eastern')\n # while left would be\n # Timestamp('2011-11-06 01:00:00-0500', tz='US/Eastern')\n # The .value's would match (so they would compare as equal),\n # but the folds would not\n assert left.utcoffset() == right.utcoffset()\n\n # The same bug in ints_to_pydatetime affected .astype, so we test\n # that here.\n right2 = dta.astype(object)[2]\n assert str(left) == str(right2)\n assert left.utcoffset() == right2.utcoffset()\n\n @pytest.mark.parametrize(\n "freq, freq_depr",\n [\n ("2ME", "2M"),\n ("2SME", "2SM"),\n ("2SME", "2sm"),\n ("2QE", "2Q"),\n ("2QE-SEP", "2Q-SEP"),\n ("1YE", "1Y"),\n ("2YE-MAR", "2Y-MAR"),\n ("1YE", "1A"),\n ("2YE-MAR", "2A-MAR"),\n ("2ME", "2m"),\n ("2QE-SEP", "2q-sep"),\n ("2YE-MAR", "2a-mar"),\n ("2YE", "2y"),\n ],\n )\n def test_date_range_frequency_M_Q_Y_A_deprecated(self, freq, freq_depr):\n # GH#9586, GH#54275\n depr_msg = f"'{freq_depr[1:]}' is deprecated and will be removed "\n f"in a future version, please use '{freq[1:]}' instead."\n\n expected = pd.date_range("1/1/2000", periods=4, freq=freq)\n with tm.assert_produces_warning(FutureWarning, match=depr_msg):\n result = pd.date_range("1/1/2000", periods=4, freq=freq_depr)\n tm.assert_index_equal(result, expected)\n\n @pytest.mark.parametrize("freq_depr", ["2H", "2CBH", "2MIN", "2S", "2mS", "2Us"])\n def test_date_range_uppercase_frequency_deprecated(self, freq_depr):\n # GH#9586, GH#54939\n depr_msg = f"'{freq_depr[1:]}' is deprecated and will be removed in a "\n f"future version. Please use '{freq_depr.lower()[1:]}' instead."\n\n expected = pd.date_range("1/1/2000", periods=4, freq=freq_depr.lower())\n with tm.assert_produces_warning(FutureWarning, match=depr_msg):\n result = pd.date_range("1/1/2000", periods=4, freq=freq_depr)\n tm.assert_index_equal(result, expected)\n\n @pytest.mark.parametrize(\n "freq_depr",\n [\n "2ye-mar",\n "2ys",\n "2qe",\n "2qs-feb",\n "2bqs",\n "2sms",\n "2bms",\n "2cbme",\n "2me",\n "2w",\n ],\n )\n def test_date_range_lowercase_frequency_deprecated(self, freq_depr):\n # GH#9586, GH#54939\n depr_msg = f"'{freq_depr[1:]}' is deprecated and will be removed in a "\n f"future version, please use '{freq_depr.upper()[1:]}' instead."\n\n expected = pd.date_range("1/1/2000", periods=4, freq=freq_depr.upper())\n with tm.assert_produces_warning(FutureWarning, match=depr_msg):\n result = pd.date_range("1/1/2000", periods=4, freq=freq_depr)\n tm.assert_index_equal(result, expected)\n\n\ndef test_factorize_sort_without_freq():\n dta = DatetimeArray._from_sequence([0, 2, 1], dtype="M8[ns]")\n\n msg = r"call pd.factorize\(obj, sort=True\) instead"\n with pytest.raises(NotImplementedError, match=msg):\n dta.factorize(sort=True)\n\n # Do TimedeltaArray while we're here\n tda = dta - dta[0]\n with pytest.raises(NotImplementedError, match=msg):\n tda.factorize(sort=True)\n | .venv\Lib\site-packages\pandas\tests\arrays\test_datetimes.py | test_datetimes.py | Python | 29,112 | 0.95 | 0.104762 | 0.078987 | awesome-app | 259 | 2024-07-24T10:35:03.273224 | MIT | true | ab47e8fec627a01cf891e721c66d6e07 |
"""\nTests for subclasses of NDArrayBackedExtensionArray\n"""\nimport numpy as np\n\nfrom pandas import (\n CategoricalIndex,\n date_range,\n)\nfrom pandas.core.arrays import (\n Categorical,\n DatetimeArray,\n NumpyExtensionArray,\n TimedeltaArray,\n)\n\n\nclass TestEmpty:\n def test_empty_categorical(self):\n ci = CategoricalIndex(["a", "b", "c"], ordered=True)\n dtype = ci.dtype\n\n # case with int8 codes\n shape = (4,)\n result = Categorical._empty(shape, dtype=dtype)\n assert isinstance(result, Categorical)\n assert result.shape == shape\n assert result._ndarray.dtype == np.int8\n\n # case where repr would segfault if we didn't override base implementation\n result = Categorical._empty((4096,), dtype=dtype)\n assert isinstance(result, Categorical)\n assert result.shape == (4096,)\n assert result._ndarray.dtype == np.int8\n repr(result)\n\n # case with int16 codes\n ci = CategoricalIndex(list(range(512)) * 4, ordered=False)\n dtype = ci.dtype\n result = Categorical._empty(shape, dtype=dtype)\n assert isinstance(result, Categorical)\n assert result.shape == shape\n assert result._ndarray.dtype == np.int16\n\n def test_empty_dt64tz(self):\n dti = date_range("2016-01-01", periods=2, tz="Asia/Tokyo")\n dtype = dti.dtype\n\n shape = (0,)\n result = DatetimeArray._empty(shape, dtype=dtype)\n assert result.dtype == dtype\n assert isinstance(result, DatetimeArray)\n assert result.shape == shape\n\n def test_empty_dt64(self):\n shape = (3, 9)\n result = DatetimeArray._empty(shape, dtype="datetime64[ns]")\n assert isinstance(result, DatetimeArray)\n assert result.shape == shape\n\n def test_empty_td64(self):\n shape = (3, 9)\n result = TimedeltaArray._empty(shape, dtype="m8[ns]")\n assert isinstance(result, TimedeltaArray)\n assert result.shape == shape\n\n def test_empty_pandas_array(self):\n arr = NumpyExtensionArray(np.array([1, 2]))\n dtype = arr.dtype\n\n shape = (3, 9)\n result = NumpyExtensionArray._empty(shape, dtype=dtype)\n assert isinstance(result, NumpyExtensionArray)\n assert result.dtype == dtype\n assert result.shape == shape\n | .venv\Lib\site-packages\pandas\tests\arrays\test_ndarray_backed.py | test_ndarray_backed.py | Python | 2,331 | 0.95 | 0.106667 | 0.047619 | python-kit | 593 | 2025-01-06T22:34:45.905347 | Apache-2.0 | true | f0c684b40f68a311c757a93dcc2be3a2 |
import numpy as np\nimport pytest\n\nfrom pandas._libs.tslibs import iNaT\nfrom pandas._libs.tslibs.period import IncompatibleFrequency\n\nfrom pandas.core.dtypes.base import _registry as registry\nfrom pandas.core.dtypes.dtypes import PeriodDtype\n\nimport pandas as pd\nimport pandas._testing as tm\nfrom pandas.core.arrays import PeriodArray\n\n# ----------------------------------------------------------------------------\n# Dtype\n\n\ndef test_registered():\n assert PeriodDtype in registry.dtypes\n result = registry.find("Period[D]")\n expected = PeriodDtype("D")\n assert result == expected\n\n\n# ----------------------------------------------------------------------------\n# period_array\n\n\ndef test_asi8():\n result = PeriodArray._from_sequence(["2000", "2001", None], dtype="period[D]").asi8\n expected = np.array([10957, 11323, iNaT])\n tm.assert_numpy_array_equal(result, expected)\n\n\ndef test_take_raises():\n arr = PeriodArray._from_sequence(["2000", "2001"], dtype="period[D]")\n with pytest.raises(IncompatibleFrequency, match="freq"):\n arr.take([0, -1], allow_fill=True, fill_value=pd.Period("2000", freq="W"))\n\n msg = "value should be a 'Period' or 'NaT'. Got 'str' instead"\n with pytest.raises(TypeError, match=msg):\n arr.take([0, -1], allow_fill=True, fill_value="foo")\n\n\ndef test_fillna_raises():\n arr = PeriodArray._from_sequence(["2000", "2001", "2002"], dtype="period[D]")\n with pytest.raises(ValueError, match="Length"):\n arr.fillna(arr[:2])\n\n\ndef test_fillna_copies():\n arr = PeriodArray._from_sequence(["2000", "2001", "2002"], dtype="period[D]")\n result = arr.fillna(pd.Period("2000", "D"))\n assert result is not arr\n\n\n# ----------------------------------------------------------------------------\n# setitem\n\n\n@pytest.mark.parametrize(\n "key, value, expected",\n [\n ([0], pd.Period("2000", "D"), [10957, 1, 2]),\n ([0], None, [iNaT, 1, 2]),\n ([0], np.nan, [iNaT, 1, 2]),\n ([0, 1, 2], pd.Period("2000", "D"), [10957] * 3),\n (\n [0, 1, 2],\n [pd.Period("2000", "D"), pd.Period("2001", "D"), pd.Period("2002", "D")],\n [10957, 11323, 11688],\n ),\n ],\n)\ndef test_setitem(key, value, expected):\n arr = PeriodArray(np.arange(3), dtype="period[D]")\n expected = PeriodArray(expected, dtype="period[D]")\n arr[key] = value\n tm.assert_period_array_equal(arr, expected)\n\n\ndef test_setitem_raises_incompatible_freq():\n arr = PeriodArray(np.arange(3), dtype="period[D]")\n with pytest.raises(IncompatibleFrequency, match="freq"):\n arr[0] = pd.Period("2000", freq="Y")\n\n other = PeriodArray._from_sequence(["2000", "2001"], dtype="period[Y]")\n with pytest.raises(IncompatibleFrequency, match="freq"):\n arr[[0, 1]] = other\n\n\ndef test_setitem_raises_length():\n arr = PeriodArray(np.arange(3), dtype="period[D]")\n with pytest.raises(ValueError, match="length"):\n arr[[0, 1]] = [pd.Period("2000", freq="D")]\n\n\ndef test_setitem_raises_type():\n arr = PeriodArray(np.arange(3), dtype="period[D]")\n with pytest.raises(TypeError, match="int"):\n arr[0] = 1\n\n\n# ----------------------------------------------------------------------------\n# Ops\n\n\ndef test_sub_period():\n arr = PeriodArray._from_sequence(["2000", "2001"], dtype="period[D]")\n other = pd.Period("2000", freq="M")\n with pytest.raises(IncompatibleFrequency, match="freq"):\n arr - other\n\n\ndef test_sub_period_overflow():\n # GH#47538\n dti = pd.date_range("1677-09-22", periods=2, freq="D")\n pi = dti.to_period("ns")\n\n per = pd.Period._from_ordinal(10**14, pi.freq)\n\n with pytest.raises(OverflowError, match="Overflow in int64 addition"):\n pi - per\n\n with pytest.raises(OverflowError, match="Overflow in int64 addition"):\n per - pi\n\n\n# ----------------------------------------------------------------------------\n# Methods\n\n\n@pytest.mark.parametrize(\n "other",\n [\n pd.Period("2000", freq="h"),\n PeriodArray._from_sequence(["2000", "2001", "2000"], dtype="period[h]"),\n ],\n)\ndef test_where_different_freq_raises(other):\n # GH#45768 The PeriodArray method raises, the Series method coerces\n ser = pd.Series(\n PeriodArray._from_sequence(["2000", "2001", "2002"], dtype="period[D]")\n )\n cond = np.array([True, False, True])\n\n with pytest.raises(IncompatibleFrequency, match="freq"):\n ser.array._where(cond, other)\n\n res = ser.where(cond, other)\n expected = ser.astype(object).where(cond, other)\n tm.assert_series_equal(res, expected)\n\n\n# ----------------------------------------------------------------------------\n# Printing\n\n\ndef test_repr_small():\n arr = PeriodArray._from_sequence(["2000", "2001"], dtype="period[D]")\n result = str(arr)\n expected = (\n "<PeriodArray>\n['2000-01-01', '2001-01-01']\nLength: 2, dtype: period[D]"\n )\n assert result == expected\n\n\ndef test_repr_large():\n arr = PeriodArray._from_sequence(["2000", "2001"] * 500, dtype="period[D]")\n result = str(arr)\n expected = (\n "<PeriodArray>\n"\n "['2000-01-01', '2001-01-01', '2000-01-01', '2001-01-01', "\n "'2000-01-01',\n"\n " '2001-01-01', '2000-01-01', '2001-01-01', '2000-01-01', "\n "'2001-01-01',\n"\n " ...\n"\n " '2000-01-01', '2001-01-01', '2000-01-01', '2001-01-01', "\n "'2000-01-01',\n"\n " '2001-01-01', '2000-01-01', '2001-01-01', '2000-01-01', "\n "'2001-01-01']\n"\n "Length: 1000, dtype: period[D]"\n )\n assert result == expected\n | .venv\Lib\site-packages\pandas\tests\arrays\test_period.py | test_period.py | Python | 5,572 | 0.95 | 0.076087 | 0.103704 | python-kit | 41 | 2024-08-09T02:39:32.974621 | Apache-2.0 | true | f1372cb1757a3ab2214fcabec2b78b35 |
from datetime import timedelta\n\nimport numpy as np\nimport pytest\n\nimport pandas as pd\nfrom pandas import Timedelta\nimport pandas._testing as tm\nfrom pandas.core.arrays import (\n DatetimeArray,\n TimedeltaArray,\n)\n\n\nclass TestNonNano:\n @pytest.fixture(params=["s", "ms", "us"])\n def unit(self, request):\n return request.param\n\n @pytest.fixture\n def tda(self, unit):\n arr = np.arange(5, dtype=np.int64).view(f"m8[{unit}]")\n return TimedeltaArray._simple_new(arr, dtype=arr.dtype)\n\n def test_non_nano(self, unit):\n arr = np.arange(5, dtype=np.int64).view(f"m8[{unit}]")\n tda = TimedeltaArray._simple_new(arr, dtype=arr.dtype)\n\n assert tda.dtype == arr.dtype\n assert tda[0].unit == unit\n\n def test_as_unit_raises(self, tda):\n # GH#50616\n with pytest.raises(ValueError, match="Supported units"):\n tda.as_unit("D")\n\n tdi = pd.Index(tda)\n with pytest.raises(ValueError, match="Supported units"):\n tdi.as_unit("D")\n\n @pytest.mark.parametrize("field", TimedeltaArray._field_ops)\n def test_fields(self, tda, field):\n as_nano = tda._ndarray.astype("m8[ns]")\n tda_nano = TimedeltaArray._simple_new(as_nano, dtype=as_nano.dtype)\n\n result = getattr(tda, field)\n expected = getattr(tda_nano, field)\n tm.assert_numpy_array_equal(result, expected)\n\n def test_to_pytimedelta(self, tda):\n as_nano = tda._ndarray.astype("m8[ns]")\n tda_nano = TimedeltaArray._simple_new(as_nano, dtype=as_nano.dtype)\n\n result = tda.to_pytimedelta()\n expected = tda_nano.to_pytimedelta()\n tm.assert_numpy_array_equal(result, expected)\n\n def test_total_seconds(self, unit, tda):\n as_nano = tda._ndarray.astype("m8[ns]")\n tda_nano = TimedeltaArray._simple_new(as_nano, dtype=as_nano.dtype)\n\n result = tda.total_seconds()\n expected = tda_nano.total_seconds()\n tm.assert_numpy_array_equal(result, expected)\n\n def test_timedelta_array_total_seconds(self):\n # GH34290\n expected = Timedelta("2 min").total_seconds()\n\n result = pd.array([Timedelta("2 min")]).total_seconds()[0]\n assert result == expected\n\n def test_total_seconds_nanoseconds(self):\n # issue #48521\n start_time = pd.Series(["2145-11-02 06:00:00"]).astype("datetime64[ns]")\n end_time = pd.Series(["2145-11-02 07:06:00"]).astype("datetime64[ns]")\n expected = (end_time - start_time).values / np.timedelta64(1, "s")\n result = (end_time - start_time).dt.total_seconds().values\n assert result == expected\n\n @pytest.mark.parametrize(\n "nat", [np.datetime64("NaT", "ns"), np.datetime64("NaT", "us")]\n )\n def test_add_nat_datetimelike_scalar(self, nat, tda):\n result = tda + nat\n assert isinstance(result, DatetimeArray)\n assert result._creso == tda._creso\n assert result.isna().all()\n\n result = nat + tda\n assert isinstance(result, DatetimeArray)\n assert result._creso == tda._creso\n assert result.isna().all()\n\n def test_add_pdnat(self, tda):\n result = tda + pd.NaT\n assert isinstance(result, TimedeltaArray)\n assert result._creso == tda._creso\n assert result.isna().all()\n\n result = pd.NaT + tda\n assert isinstance(result, TimedeltaArray)\n assert result._creso == tda._creso\n assert result.isna().all()\n\n # TODO: 2022-07-11 this is the only test that gets to DTA.tz_convert\n # or tz_localize with non-nano; implement tests specific to that.\n def test_add_datetimelike_scalar(self, tda, tz_naive_fixture):\n ts = pd.Timestamp("2016-01-01", tz=tz_naive_fixture).as_unit("ns")\n\n expected = tda.as_unit("ns") + ts\n res = tda + ts\n tm.assert_extension_array_equal(res, expected)\n res = ts + tda\n tm.assert_extension_array_equal(res, expected)\n\n ts += Timedelta(1) # case where we can't cast losslessly\n\n exp_values = tda._ndarray + ts.asm8\n expected = (\n DatetimeArray._simple_new(exp_values, dtype=exp_values.dtype)\n .tz_localize("UTC")\n .tz_convert(ts.tz)\n )\n\n result = tda + ts\n tm.assert_extension_array_equal(result, expected)\n\n result = ts + tda\n tm.assert_extension_array_equal(result, expected)\n\n def test_mul_scalar(self, tda):\n other = 2\n result = tda * other\n expected = TimedeltaArray._simple_new(tda._ndarray * other, dtype=tda.dtype)\n tm.assert_extension_array_equal(result, expected)\n assert result._creso == tda._creso\n\n def test_mul_listlike(self, tda):\n other = np.arange(len(tda))\n result = tda * other\n expected = TimedeltaArray._simple_new(tda._ndarray * other, dtype=tda.dtype)\n tm.assert_extension_array_equal(result, expected)\n assert result._creso == tda._creso\n\n def test_mul_listlike_object(self, tda):\n other = np.arange(len(tda))\n result = tda * other.astype(object)\n expected = TimedeltaArray._simple_new(tda._ndarray * other, dtype=tda.dtype)\n tm.assert_extension_array_equal(result, expected)\n assert result._creso == tda._creso\n\n def test_div_numeric_scalar(self, tda):\n other = 2\n result = tda / other\n expected = TimedeltaArray._simple_new(tda._ndarray / other, dtype=tda.dtype)\n tm.assert_extension_array_equal(result, expected)\n assert result._creso == tda._creso\n\n def test_div_td_scalar(self, tda):\n other = timedelta(seconds=1)\n result = tda / other\n expected = tda._ndarray / np.timedelta64(1, "s")\n tm.assert_numpy_array_equal(result, expected)\n\n def test_div_numeric_array(self, tda):\n other = np.arange(len(tda))\n result = tda / other\n expected = TimedeltaArray._simple_new(tda._ndarray / other, dtype=tda.dtype)\n tm.assert_extension_array_equal(result, expected)\n assert result._creso == tda._creso\n\n def test_div_td_array(self, tda):\n other = tda._ndarray + tda._ndarray[-1]\n result = tda / other\n expected = tda._ndarray / other\n tm.assert_numpy_array_equal(result, expected)\n\n def test_add_timedeltaarraylike(self, tda):\n tda_nano = tda.astype("m8[ns]")\n\n expected = tda_nano * 2\n res = tda_nano + tda\n tm.assert_extension_array_equal(res, expected)\n res = tda + tda_nano\n tm.assert_extension_array_equal(res, expected)\n\n expected = tda_nano * 0\n res = tda - tda_nano\n tm.assert_extension_array_equal(res, expected)\n\n res = tda_nano - tda\n tm.assert_extension_array_equal(res, expected)\n\n\nclass TestTimedeltaArray:\n @pytest.mark.parametrize("dtype", [int, np.int32, np.int64, "uint32", "uint64"])\n def test_astype_int(self, dtype):\n arr = TimedeltaArray._from_sequence(\n [Timedelta("1h"), Timedelta("2h")], dtype="m8[ns]"\n )\n\n if np.dtype(dtype) != np.int64:\n with pytest.raises(TypeError, match=r"Do obj.astype\('int64'\)"):\n arr.astype(dtype)\n return\n\n result = arr.astype(dtype)\n expected = arr._ndarray.view("i8")\n tm.assert_numpy_array_equal(result, expected)\n\n def test_setitem_clears_freq(self):\n a = pd.timedelta_range("1h", periods=2, freq="h")._data\n a[0] = Timedelta("1h")\n assert a.freq is None\n\n @pytest.mark.parametrize(\n "obj",\n [\n Timedelta(seconds=1),\n Timedelta(seconds=1).to_timedelta64(),\n Timedelta(seconds=1).to_pytimedelta(),\n ],\n )\n def test_setitem_objects(self, obj):\n # make sure we accept timedelta64 and timedelta in addition to Timedelta\n tdi = pd.timedelta_range("2 Days", periods=4, freq="h")\n arr = tdi._data\n\n arr[0] = obj\n assert arr[0] == Timedelta(seconds=1)\n\n @pytest.mark.parametrize(\n "other",\n [\n 1,\n np.int64(1),\n 1.0,\n np.datetime64("NaT"),\n pd.Timestamp("2021-01-01"),\n "invalid",\n np.arange(10, dtype="i8") * 24 * 3600 * 10**9,\n (np.arange(10) * 24 * 3600 * 10**9).view("datetime64[ns]"),\n pd.Timestamp("2021-01-01").to_period("D"),\n ],\n )\n @pytest.mark.parametrize("index", [True, False])\n def test_searchsorted_invalid_types(self, other, index):\n data = np.arange(10, dtype="i8") * 24 * 3600 * 10**9\n arr = pd.TimedeltaIndex(data, freq="D")._data\n if index:\n arr = pd.Index(arr)\n\n msg = "|".join(\n [\n "searchsorted requires compatible dtype or scalar",\n "value should be a 'Timedelta', 'NaT', or array of those. Got",\n ]\n )\n with pytest.raises(TypeError, match=msg):\n arr.searchsorted(other)\n\n\nclass TestUnaryOps:\n def test_abs(self):\n vals = np.array([-3600 * 10**9, "NaT", 7200 * 10**9], dtype="m8[ns]")\n arr = TimedeltaArray._from_sequence(vals)\n\n evals = np.array([3600 * 10**9, "NaT", 7200 * 10**9], dtype="m8[ns]")\n expected = TimedeltaArray._from_sequence(evals)\n\n result = abs(arr)\n tm.assert_timedelta_array_equal(result, expected)\n\n result2 = np.abs(arr)\n tm.assert_timedelta_array_equal(result2, expected)\n\n def test_pos(self):\n vals = np.array([-3600 * 10**9, "NaT", 7200 * 10**9], dtype="m8[ns]")\n arr = TimedeltaArray._from_sequence(vals)\n\n result = +arr\n tm.assert_timedelta_array_equal(result, arr)\n assert not tm.shares_memory(result, arr)\n\n result2 = np.positive(arr)\n tm.assert_timedelta_array_equal(result2, arr)\n assert not tm.shares_memory(result2, arr)\n\n def test_neg(self):\n vals = np.array([-3600 * 10**9, "NaT", 7200 * 10**9], dtype="m8[ns]")\n arr = TimedeltaArray._from_sequence(vals)\n\n evals = np.array([3600 * 10**9, "NaT", -7200 * 10**9], dtype="m8[ns]")\n expected = TimedeltaArray._from_sequence(evals)\n\n result = -arr\n tm.assert_timedelta_array_equal(result, expected)\n\n result2 = np.negative(arr)\n tm.assert_timedelta_array_equal(result2, expected)\n\n def test_neg_freq(self):\n tdi = pd.timedelta_range("2 Days", periods=4, freq="h")\n arr = tdi._data\n\n expected = -tdi._data\n\n result = -arr\n tm.assert_timedelta_array_equal(result, expected)\n\n result2 = np.negative(arr)\n tm.assert_timedelta_array_equal(result2, expected)\n | .venv\Lib\site-packages\pandas\tests\arrays\test_timedeltas.py | test_timedeltas.py | Python | 10,673 | 0.95 | 0.105431 | 0.024096 | python-kit | 909 | 2025-02-24T14:29:11.609310 | GPL-3.0 | true | 2522115ba99f3da1cede51d920b5cc19 |
import operator\n\nimport numpy as np\nimport pytest\n\nimport pandas as pd\nimport pandas._testing as tm\n\n\n@pytest.fixture\ndef data():\n """Fixture returning boolean array with valid and missing values."""\n return pd.array(\n [True, False] * 4 + [np.nan] + [True, False] * 44 + [np.nan] + [True, False],\n dtype="boolean",\n )\n\n\n@pytest.fixture\ndef left_array():\n """Fixture returning boolean array with valid and missing values."""\n return pd.array([True] * 3 + [False] * 3 + [None] * 3, dtype="boolean")\n\n\n@pytest.fixture\ndef right_array():\n """Fixture returning boolean array with valid and missing values."""\n return pd.array([True, False, None] * 3, dtype="boolean")\n\n\n# Basic test for the arithmetic array ops\n# -----------------------------------------------------------------------------\n\n\n@pytest.mark.parametrize(\n "opname, exp",\n [\n ("add", [True, True, None, True, False, None, None, None, None]),\n ("mul", [True, False, None, False, False, None, None, None, None]),\n ],\n ids=["add", "mul"],\n)\ndef test_add_mul(left_array, right_array, opname, exp):\n op = getattr(operator, opname)\n result = op(left_array, right_array)\n expected = pd.array(exp, dtype="boolean")\n tm.assert_extension_array_equal(result, expected)\n\n\ndef test_sub(left_array, right_array):\n msg = (\n r"numpy boolean subtract, the `-` operator, is (?:deprecated|not supported), "\n r"use the bitwise_xor, the `\^` operator, or the logical_xor function instead\."\n )\n with pytest.raises(TypeError, match=msg):\n left_array - right_array\n\n\ndef test_div(left_array, right_array):\n msg = "operator '.*' not implemented for bool dtypes"\n with pytest.raises(NotImplementedError, match=msg):\n # check that we are matching the non-masked Series behavior\n pd.Series(left_array._data) / pd.Series(right_array._data)\n\n with pytest.raises(NotImplementedError, match=msg):\n left_array / right_array\n\n\n@pytest.mark.parametrize(\n "opname",\n [\n "floordiv",\n "mod",\n "pow",\n ],\n)\ndef test_op_int8(left_array, right_array, opname):\n op = getattr(operator, opname)\n if opname != "mod":\n msg = "operator '.*' not implemented for bool dtypes"\n with pytest.raises(NotImplementedError, match=msg):\n result = op(left_array, right_array)\n return\n result = op(left_array, right_array)\n expected = op(left_array.astype("Int8"), right_array.astype("Int8"))\n tm.assert_extension_array_equal(result, expected)\n\n\n# Test generic characteristics / errors\n# -----------------------------------------------------------------------------\n\n\ndef test_error_invalid_values(data, all_arithmetic_operators):\n # invalid ops\n op = all_arithmetic_operators\n s = pd.Series(data)\n ops = getattr(s, op)\n\n # invalid scalars\n msg = (\n "did not contain a loop with signature matching types|"\n "BooleanArray cannot perform the operation|"\n "not supported for the input types, and the inputs could not be safely coerced "\n "to any supported types according to the casting rule ''safe''|"\n "not supported for dtype"\n )\n with pytest.raises(TypeError, match=msg):\n ops("foo")\n msg = "|".join(\n [\n r"unsupported operand type\(s\) for",\n "Concatenation operation is not implemented for NumPy arrays",\n "has no kernel",\n "not supported for dtype",\n ]\n )\n with pytest.raises(TypeError, match=msg):\n ops(pd.Timestamp("20180101"))\n\n # invalid array-likes\n if op not in ("__mul__", "__rmul__"):\n # TODO(extension) numpy's mul with object array sees booleans as numbers\n msg = "|".join(\n [\n r"unsupported operand type\(s\) for",\n "can only concatenate str",\n "not all arguments converted during string formatting",\n "has no kernel",\n "not implemented",\n "not supported for dtype",\n ]\n )\n with pytest.raises(TypeError, match=msg):\n ops(pd.Series("foo", index=s.index))\n | .venv\Lib\site-packages\pandas\tests\arrays\boolean\test_arithmetic.py | test_arithmetic.py | Python | 4,177 | 0.95 | 0.156716 | 0.082569 | react-lib | 352 | 2023-12-06T23:33:20.786936 | GPL-3.0 | true | efe26d5d3f61f23e39a4a972e4ae7c3f |
import numpy as np\nimport pytest\n\nimport pandas as pd\nimport pandas._testing as tm\n\n\ndef test_astype(using_infer_string):\n # with missing values\n arr = pd.array([True, False, None], dtype="boolean")\n\n with pytest.raises(ValueError, match="cannot convert NA to integer"):\n arr.astype("int64")\n\n with pytest.raises(ValueError, match="cannot convert float NaN to"):\n arr.astype("bool")\n\n result = arr.astype("float64")\n expected = np.array([1, 0, np.nan], dtype="float64")\n tm.assert_numpy_array_equal(result, expected)\n\n result = arr.astype("str")\n if using_infer_string:\n expected = pd.array(\n ["True", "False", None], dtype=pd.StringDtype(na_value=np.nan)\n )\n tm.assert_extension_array_equal(result, expected)\n else:\n expected = np.array(["True", "False", "<NA>"], dtype=f"{tm.ENDIAN}U5")\n tm.assert_numpy_array_equal(result, expected)\n\n # no missing values\n arr = pd.array([True, False, True], dtype="boolean")\n result = arr.astype("int64")\n expected = np.array([1, 0, 1], dtype="int64")\n tm.assert_numpy_array_equal(result, expected)\n\n result = arr.astype("bool")\n expected = np.array([True, False, True], dtype="bool")\n tm.assert_numpy_array_equal(result, expected)\n\n\ndef test_astype_to_boolean_array():\n # astype to BooleanArray\n arr = pd.array([True, False, None], dtype="boolean")\n\n result = arr.astype("boolean")\n tm.assert_extension_array_equal(result, arr)\n result = arr.astype(pd.BooleanDtype())\n tm.assert_extension_array_equal(result, arr)\n\n\ndef test_astype_to_integer_array():\n # astype to IntegerArray\n arr = pd.array([True, False, None], dtype="boolean")\n\n result = arr.astype("Int64")\n expected = pd.array([1, 0, None], dtype="Int64")\n tm.assert_extension_array_equal(result, expected)\n | .venv\Lib\site-packages\pandas\tests\arrays\boolean\test_astype.py | test_astype.py | Python | 1,849 | 0.95 | 0.067797 | 0.090909 | python-kit | 595 | 2023-09-06T06:46:50.955248 | MIT | true | 5376181675396bf9dd8f905baaa64be1 |
import numpy as np\nimport pytest\n\nimport pandas as pd\nimport pandas._testing as tm\nfrom pandas.arrays import BooleanArray\nfrom pandas.tests.arrays.masked_shared import ComparisonOps\n\n\n@pytest.fixture\ndef data():\n """Fixture returning boolean array with valid and missing data"""\n return pd.array(\n [True, False] * 4 + [np.nan] + [True, False] * 44 + [np.nan] + [True, False],\n dtype="boolean",\n )\n\n\n@pytest.fixture\ndef dtype():\n """Fixture returning BooleanDtype"""\n return pd.BooleanDtype()\n\n\nclass TestComparisonOps(ComparisonOps):\n def test_compare_scalar(self, data, comparison_op):\n self._compare_other(data, comparison_op, True)\n\n def test_compare_array(self, data, comparison_op):\n other = pd.array([True] * len(data), dtype="boolean")\n self._compare_other(data, comparison_op, other)\n other = np.array([True] * len(data))\n self._compare_other(data, comparison_op, other)\n other = pd.Series([True] * len(data))\n self._compare_other(data, comparison_op, other)\n\n @pytest.mark.parametrize("other", [True, False, pd.NA])\n def test_scalar(self, other, comparison_op, dtype):\n ComparisonOps.test_scalar(self, other, comparison_op, dtype)\n\n def test_array(self, comparison_op):\n op = comparison_op\n a = pd.array([True] * 3 + [False] * 3 + [None] * 3, dtype="boolean")\n b = pd.array([True, False, None] * 3, dtype="boolean")\n\n result = op(a, b)\n\n values = op(a._data, b._data)\n mask = a._mask | b._mask\n expected = BooleanArray(values, mask)\n tm.assert_extension_array_equal(result, expected)\n\n # ensure we haven't mutated anything inplace\n result[0] = None\n tm.assert_extension_array_equal(\n a, pd.array([True] * 3 + [False] * 3 + [None] * 3, dtype="boolean")\n )\n tm.assert_extension_array_equal(\n b, pd.array([True, False, None] * 3, dtype="boolean")\n )\n | .venv\Lib\site-packages\pandas\tests\arrays\boolean\test_comparison.py | test_comparison.py | Python | 1,976 | 0.95 | 0.116667 | 0.021277 | node-utils | 96 | 2024-11-05T12:41:46.081654 | BSD-3-Clause | true | aa7df2f0e36bdfb963863ec77bb622e1 |
import numpy as np\nimport pytest\n\nimport pandas as pd\nimport pandas._testing as tm\nfrom pandas.arrays import BooleanArray\nfrom pandas.core.arrays.boolean import coerce_to_array\n\n\ndef test_boolean_array_constructor():\n values = np.array([True, False, True, False], dtype="bool")\n mask = np.array([False, False, False, True], dtype="bool")\n\n result = BooleanArray(values, mask)\n expected = pd.array([True, False, True, None], dtype="boolean")\n tm.assert_extension_array_equal(result, expected)\n\n with pytest.raises(TypeError, match="values should be boolean numpy array"):\n BooleanArray(values.tolist(), mask)\n\n with pytest.raises(TypeError, match="mask should be boolean numpy array"):\n BooleanArray(values, mask.tolist())\n\n with pytest.raises(TypeError, match="values should be boolean numpy array"):\n BooleanArray(values.astype(int), mask)\n\n with pytest.raises(TypeError, match="mask should be boolean numpy array"):\n BooleanArray(values, None)\n\n with pytest.raises(ValueError, match="values.shape must match mask.shape"):\n BooleanArray(values.reshape(1, -1), mask)\n\n with pytest.raises(ValueError, match="values.shape must match mask.shape"):\n BooleanArray(values, mask.reshape(1, -1))\n\n\ndef test_boolean_array_constructor_copy():\n values = np.array([True, False, True, False], dtype="bool")\n mask = np.array([False, False, False, True], dtype="bool")\n\n result = BooleanArray(values, mask)\n assert result._data is values\n assert result._mask is mask\n\n result = BooleanArray(values, mask, copy=True)\n assert result._data is not values\n assert result._mask is not mask\n\n\ndef test_to_boolean_array():\n expected = BooleanArray(\n np.array([True, False, True]), np.array([False, False, False])\n )\n\n result = pd.array([True, False, True], dtype="boolean")\n tm.assert_extension_array_equal(result, expected)\n result = pd.array(np.array([True, False, True]), dtype="boolean")\n tm.assert_extension_array_equal(result, expected)\n result = pd.array(np.array([True, False, True], dtype=object), dtype="boolean")\n tm.assert_extension_array_equal(result, expected)\n\n # with missing values\n expected = BooleanArray(\n np.array([True, False, True]), np.array([False, False, True])\n )\n\n result = pd.array([True, False, None], dtype="boolean")\n tm.assert_extension_array_equal(result, expected)\n result = pd.array(np.array([True, False, None], dtype=object), dtype="boolean")\n tm.assert_extension_array_equal(result, expected)\n\n\ndef test_to_boolean_array_all_none():\n expected = BooleanArray(np.array([True, True, True]), np.array([True, True, True]))\n\n result = pd.array([None, None, None], dtype="boolean")\n tm.assert_extension_array_equal(result, expected)\n result = pd.array(np.array([None, None, None], dtype=object), dtype="boolean")\n tm.assert_extension_array_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "a, b",\n [\n ([True, False, None, np.nan, pd.NA], [True, False, None, None, None]),\n ([True, np.nan], [True, None]),\n ([True, pd.NA], [True, None]),\n ([np.nan, np.nan], [None, None]),\n (np.array([np.nan, np.nan], dtype=float), [None, None]),\n ],\n)\ndef test_to_boolean_array_missing_indicators(a, b):\n result = pd.array(a, dtype="boolean")\n expected = pd.array(b, dtype="boolean")\n tm.assert_extension_array_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "values",\n [\n ["foo", "bar"],\n ["1", "2"],\n # "foo",\n [1, 2],\n [1.0, 2.0],\n pd.date_range("20130101", periods=2),\n np.array(["foo"]),\n np.array([1, 2]),\n np.array([1.0, 2.0]),\n [np.nan, {"a": 1}],\n ],\n)\ndef test_to_boolean_array_error(values):\n # error in converting existing arrays to BooleanArray\n msg = "Need to pass bool-like value"\n with pytest.raises(TypeError, match=msg):\n pd.array(values, dtype="boolean")\n\n\ndef test_to_boolean_array_from_integer_array():\n result = pd.array(np.array([1, 0, 1, 0]), dtype="boolean")\n expected = pd.array([True, False, True, False], dtype="boolean")\n tm.assert_extension_array_equal(result, expected)\n\n # with missing values\n result = pd.array(np.array([1, 0, 1, None]), dtype="boolean")\n expected = pd.array([True, False, True, None], dtype="boolean")\n tm.assert_extension_array_equal(result, expected)\n\n\ndef test_to_boolean_array_from_float_array():\n result = pd.array(np.array([1.0, 0.0, 1.0, 0.0]), dtype="boolean")\n expected = pd.array([True, False, True, False], dtype="boolean")\n tm.assert_extension_array_equal(result, expected)\n\n # with missing values\n result = pd.array(np.array([1.0, 0.0, 1.0, np.nan]), dtype="boolean")\n expected = pd.array([True, False, True, None], dtype="boolean")\n tm.assert_extension_array_equal(result, expected)\n\n\ndef test_to_boolean_array_integer_like():\n # integers of 0's and 1's\n result = pd.array([1, 0, 1, 0], dtype="boolean")\n expected = pd.array([True, False, True, False], dtype="boolean")\n tm.assert_extension_array_equal(result, expected)\n\n # with missing values\n result = pd.array([1, 0, 1, None], dtype="boolean")\n expected = pd.array([True, False, True, None], dtype="boolean")\n tm.assert_extension_array_equal(result, expected)\n\n\ndef test_coerce_to_array():\n # TODO this is currently not public API\n values = np.array([True, False, True, False], dtype="bool")\n mask = np.array([False, False, False, True], dtype="bool")\n result = BooleanArray(*coerce_to_array(values, mask=mask))\n expected = BooleanArray(values, mask)\n tm.assert_extension_array_equal(result, expected)\n assert result._data is values\n assert result._mask is mask\n result = BooleanArray(*coerce_to_array(values, mask=mask, copy=True))\n expected = BooleanArray(values, mask)\n tm.assert_extension_array_equal(result, expected)\n assert result._data is not values\n assert result._mask is not mask\n\n # mixed missing from values and mask\n values = [True, False, None, False]\n mask = np.array([False, False, False, True], dtype="bool")\n result = BooleanArray(*coerce_to_array(values, mask=mask))\n expected = BooleanArray(\n np.array([True, False, True, True]), np.array([False, False, True, True])\n )\n tm.assert_extension_array_equal(result, expected)\n result = BooleanArray(*coerce_to_array(np.array(values, dtype=object), mask=mask))\n tm.assert_extension_array_equal(result, expected)\n result = BooleanArray(*coerce_to_array(values, mask=mask.tolist()))\n tm.assert_extension_array_equal(result, expected)\n\n # raise errors for wrong dimension\n values = np.array([True, False, True, False], dtype="bool")\n mask = np.array([False, False, False, True], dtype="bool")\n\n # passing 2D values is OK as long as no mask\n coerce_to_array(values.reshape(1, -1))\n\n with pytest.raises(ValueError, match="values.shape and mask.shape must match"):\n coerce_to_array(values.reshape(1, -1), mask=mask)\n\n with pytest.raises(ValueError, match="values.shape and mask.shape must match"):\n coerce_to_array(values, mask=mask.reshape(1, -1))\n\n\ndef test_coerce_to_array_from_boolean_array():\n # passing BooleanArray to coerce_to_array\n values = np.array([True, False, True, False], dtype="bool")\n mask = np.array([False, False, False, True], dtype="bool")\n arr = BooleanArray(values, mask)\n result = BooleanArray(*coerce_to_array(arr))\n tm.assert_extension_array_equal(result, arr)\n # no copy\n assert result._data is arr._data\n assert result._mask is arr._mask\n\n result = BooleanArray(*coerce_to_array(arr), copy=True)\n tm.assert_extension_array_equal(result, arr)\n assert result._data is not arr._data\n assert result._mask is not arr._mask\n\n with pytest.raises(ValueError, match="cannot pass mask for BooleanArray input"):\n coerce_to_array(arr, mask=mask)\n\n\ndef test_coerce_to_numpy_array():\n # with missing values -> object dtype\n arr = pd.array([True, False, None], dtype="boolean")\n result = np.array(arr)\n expected = np.array([True, False, pd.NA], dtype="object")\n tm.assert_numpy_array_equal(result, expected)\n\n # also with no missing values -> object dtype\n arr = pd.array([True, False, True], dtype="boolean")\n result = np.array(arr)\n expected = np.array([True, False, True], dtype="bool")\n tm.assert_numpy_array_equal(result, expected)\n\n # force bool dtype\n result = np.array(arr, dtype="bool")\n expected = np.array([True, False, True], dtype="bool")\n tm.assert_numpy_array_equal(result, expected)\n # with missing values will raise error\n arr = pd.array([True, False, None], dtype="boolean")\n msg = (\n "cannot convert to 'bool'-dtype NumPy array with missing values. "\n "Specify an appropriate 'na_value' for this dtype."\n )\n with pytest.raises(ValueError, match=msg):\n np.array(arr, dtype="bool")\n\n\ndef test_to_boolean_array_from_strings():\n result = BooleanArray._from_sequence_of_strings(\n np.array(["True", "False", "1", "1.0", "0", "0.0", np.nan], dtype=object),\n dtype="boolean",\n )\n expected = BooleanArray(\n np.array([True, False, True, True, False, False, False]),\n np.array([False, False, False, False, False, False, True]),\n )\n\n tm.assert_extension_array_equal(result, expected)\n\n\ndef test_to_boolean_array_from_strings_invalid_string():\n with pytest.raises(ValueError, match="cannot be cast"):\n BooleanArray._from_sequence_of_strings(["donkey"], dtype="boolean")\n\n\n@pytest.mark.parametrize("box", [True, False], ids=["series", "array"])\ndef test_to_numpy(box):\n con = pd.Series if box else pd.array\n # default (with or without missing values) -> object dtype\n arr = con([True, False, True], dtype="boolean")\n result = arr.to_numpy()\n expected = np.array([True, False, True], dtype="bool")\n tm.assert_numpy_array_equal(result, expected)\n\n arr = con([True, False, None], dtype="boolean")\n result = arr.to_numpy()\n expected = np.array([True, False, pd.NA], dtype="object")\n tm.assert_numpy_array_equal(result, expected)\n\n arr = con([True, False, None], dtype="boolean")\n result = arr.to_numpy(dtype="str")\n expected = np.array([True, False, pd.NA], dtype=f"{tm.ENDIAN}U5")\n tm.assert_numpy_array_equal(result, expected)\n\n # no missing values -> can convert to bool, otherwise raises\n arr = con([True, False, True], dtype="boolean")\n result = arr.to_numpy(dtype="bool")\n expected = np.array([True, False, True], dtype="bool")\n tm.assert_numpy_array_equal(result, expected)\n\n arr = con([True, False, None], dtype="boolean")\n with pytest.raises(ValueError, match="cannot convert to 'bool'-dtype"):\n result = arr.to_numpy(dtype="bool")\n\n # specify dtype and na_value\n arr = con([True, False, None], dtype="boolean")\n result = arr.to_numpy(dtype=object, na_value=None)\n expected = np.array([True, False, None], dtype="object")\n tm.assert_numpy_array_equal(result, expected)\n\n result = arr.to_numpy(dtype=bool, na_value=False)\n expected = np.array([True, False, False], dtype="bool")\n tm.assert_numpy_array_equal(result, expected)\n\n result = arr.to_numpy(dtype="int64", na_value=-99)\n expected = np.array([1, 0, -99], dtype="int64")\n tm.assert_numpy_array_equal(result, expected)\n\n result = arr.to_numpy(dtype="float64", na_value=np.nan)\n expected = np.array([1, 0, np.nan], dtype="float64")\n tm.assert_numpy_array_equal(result, expected)\n\n # converting to int or float without specifying na_value raises\n with pytest.raises(ValueError, match="cannot convert to 'int64'-dtype"):\n arr.to_numpy(dtype="int64")\n\n\ndef test_to_numpy_copy():\n # to_numpy can be zero-copy if no missing values\n arr = pd.array([True, False, True], dtype="boolean")\n result = arr.to_numpy(dtype=bool)\n result[0] = False\n tm.assert_extension_array_equal(\n arr, pd.array([False, False, True], dtype="boolean")\n )\n\n arr = pd.array([True, False, True], dtype="boolean")\n result = arr.to_numpy(dtype=bool, copy=True)\n result[0] = False\n tm.assert_extension_array_equal(arr, pd.array([True, False, True], dtype="boolean"))\n | .venv\Lib\site-packages\pandas\tests\arrays\boolean\test_construction.py | test_construction.py | Python | 12,332 | 0.95 | 0.064615 | 0.085938 | react-lib | 940 | 2024-08-26T02:23:25.918084 | BSD-3-Clause | true | e171f005b6f79bac8aa768130da83334 |
import numpy as np\nimport pytest\n\nimport pandas as pd\nimport pandas._testing as tm\n\n\n@pytest.mark.parametrize(\n "ufunc", [np.add, np.logical_or, np.logical_and, np.logical_xor]\n)\ndef test_ufuncs_binary(ufunc):\n # two BooleanArrays\n a = pd.array([True, False, None], dtype="boolean")\n result = ufunc(a, a)\n expected = pd.array(ufunc(a._data, a._data), dtype="boolean")\n expected[a._mask] = np.nan\n tm.assert_extension_array_equal(result, expected)\n\n s = pd.Series(a)\n result = ufunc(s, a)\n expected = pd.Series(ufunc(a._data, a._data), dtype="boolean")\n expected[a._mask] = np.nan\n tm.assert_series_equal(result, expected)\n\n # Boolean with numpy array\n arr = np.array([True, True, False])\n result = ufunc(a, arr)\n expected = pd.array(ufunc(a._data, arr), dtype="boolean")\n expected[a._mask] = np.nan\n tm.assert_extension_array_equal(result, expected)\n\n result = ufunc(arr, a)\n expected = pd.array(ufunc(arr, a._data), dtype="boolean")\n expected[a._mask] = np.nan\n tm.assert_extension_array_equal(result, expected)\n\n # BooleanArray with scalar\n result = ufunc(a, True)\n expected = pd.array(ufunc(a._data, True), dtype="boolean")\n expected[a._mask] = np.nan\n tm.assert_extension_array_equal(result, expected)\n\n result = ufunc(True, a)\n expected = pd.array(ufunc(True, a._data), dtype="boolean")\n expected[a._mask] = np.nan\n tm.assert_extension_array_equal(result, expected)\n\n # not handled types\n msg = r"operand type\(s\) all returned NotImplemented from __array_ufunc__"\n with pytest.raises(TypeError, match=msg):\n ufunc(a, "test")\n\n\n@pytest.mark.parametrize("ufunc", [np.logical_not])\ndef test_ufuncs_unary(ufunc):\n a = pd.array([True, False, None], dtype="boolean")\n result = ufunc(a)\n expected = pd.array(ufunc(a._data), dtype="boolean")\n expected[a._mask] = np.nan\n tm.assert_extension_array_equal(result, expected)\n\n ser = pd.Series(a)\n result = ufunc(ser)\n expected = pd.Series(ufunc(a._data), dtype="boolean")\n expected[a._mask] = np.nan\n tm.assert_series_equal(result, expected)\n\n\ndef test_ufunc_numeric():\n # np.sqrt on np.bool_ returns float16, which we upcast to Float32\n # bc we do not have Float16\n arr = pd.array([True, False, None], dtype="boolean")\n\n res = np.sqrt(arr)\n\n expected = pd.array([1, 0, None], dtype="Float32")\n tm.assert_extension_array_equal(res, expected)\n\n\n@pytest.mark.parametrize("values", [[True, False], [True, None]])\ndef test_ufunc_reduce_raises(values):\n arr = pd.array(values, dtype="boolean")\n\n res = np.add.reduce(arr)\n if arr[-1] is pd.NA:\n expected = pd.NA\n else:\n expected = arr._data.sum()\n tm.assert_almost_equal(res, expected)\n\n\ndef test_value_counts_na():\n arr = pd.array([True, False, pd.NA], dtype="boolean")\n result = arr.value_counts(dropna=False)\n expected = pd.Series([1, 1, 1], index=arr, dtype="Int64", name="count")\n assert expected.index.dtype == arr.dtype\n tm.assert_series_equal(result, expected)\n\n result = arr.value_counts(dropna=True)\n expected = pd.Series([1, 1], index=arr[:-1], dtype="Int64", name="count")\n assert expected.index.dtype == arr.dtype\n tm.assert_series_equal(result, expected)\n\n\ndef test_value_counts_with_normalize():\n ser = pd.Series([True, False, pd.NA], dtype="boolean")\n result = ser.value_counts(normalize=True)\n expected = pd.Series([1, 1], index=ser[:-1], dtype="Float64", name="proportion") / 2\n assert expected.index.dtype == "boolean"\n tm.assert_series_equal(result, expected)\n\n\ndef test_diff():\n a = pd.array(\n [True, True, False, False, True, None, True, None, False], dtype="boolean"\n )\n result = pd.core.algorithms.diff(a, 1)\n expected = pd.array(\n [None, False, True, False, True, None, None, None, None], dtype="boolean"\n )\n tm.assert_extension_array_equal(result, expected)\n\n ser = pd.Series(a)\n result = ser.diff()\n expected = pd.Series(expected)\n tm.assert_series_equal(result, expected)\n | .venv\Lib\site-packages\pandas\tests\arrays\boolean\test_function.py | test_function.py | Python | 4,061 | 0.95 | 0.063492 | 0.060606 | react-lib | 36 | 2023-10-30T12:16:55.422259 | GPL-3.0 | true | 3144e0bf17a2f34723e59165fa579880 |
import numpy as np\nimport pytest\n\nimport pandas as pd\nimport pandas._testing as tm\n\n\n@pytest.mark.parametrize("na", [None, np.nan, pd.NA])\ndef test_setitem_missing_values(na):\n arr = pd.array([True, False, None], dtype="boolean")\n expected = pd.array([True, None, None], dtype="boolean")\n arr[1] = na\n tm.assert_extension_array_equal(arr, expected)\n | .venv\Lib\site-packages\pandas\tests\arrays\boolean\test_indexing.py | test_indexing.py | Python | 361 | 0.85 | 0.076923 | 0 | awesome-app | 648 | 2023-08-05T16:30:56.518349 | GPL-3.0 | true | 789a86b052bdf938b21d6d769806c28b |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.